summaryrefslogtreecommitdiffstats
path: root/third_party/rust/glslopt
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-19 00:47:55 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-19 00:47:55 +0000
commit26a029d407be480d791972afb5975cf62c9360a6 (patch)
treef435a8308119effd964b339f76abb83a57c29483 /third_party/rust/glslopt
parentInitial commit. (diff)
downloadfirefox-26a029d407be480d791972afb5975cf62c9360a6.tar.xz
firefox-26a029d407be480d791972afb5975cf62c9360a6.zip
Adding upstream version 124.0.1.upstream/124.0.1
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'third_party/rust/glslopt')
-rw-r--r--third_party/rust/glslopt/.cargo-checksum.json1
-rw-r--r--third_party/rust/glslopt/Cargo.toml23
-rw-r--r--third_party/rust/glslopt/README.md20
-rw-r--r--third_party/rust/glslopt/build.rs199
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/CMakeLists.txt171
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/README.md100
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/contrib/glslopt/Main.cpp159
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/contrib/glslopt/Readme13
-rwxr-xr-xthird_party/rust/glslopt/glsl-optimizer/generateParsers.sh9
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/include/GL/gl.h2103
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/include/GL/glext.h12832
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/include/KHR/khrplatform.h292
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/include/c11/threads.h73
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/include/c11/threads_posix.h396
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/include/c11/threads_win32.h653
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/include/c11_compat.h27
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/include/c99_alloca.h49
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/include/c99_compat.h183
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/include/c99_math.h211
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/include/no_extern_c.h48
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/license.txt21
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/compiler/builtin_type_macros.h185
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/README228
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/TODO12
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/ast.h1401
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/ast_array_index.cpp364
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/ast_expr.cpp95
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/ast_function.cpp2512
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/ast_to_hir.cpp8997
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/ast_type.cpp1012
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/builtin_functions.cpp7677
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/builtin_functions.h84
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/builtin_int64.h1196
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/builtin_types.cpp474
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/builtin_variables.cpp1624
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/float64.glsl1818
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/generate_ir.cpp33
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/glcpp/README30
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/glcpp/glcpp-lex.c3216
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/glcpp/glcpp-lex.l621
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/glcpp/glcpp-parse.c4912
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/glcpp/glcpp-parse.h116
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/glcpp/glcpp-parse.y2546
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/glcpp/glcpp.c182
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/glcpp/glcpp.h277
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/glcpp/pp.c256
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/glcpp/pp_standalone_scaffolding.c57
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/glcpp/pp_standalone_scaffolding.h47
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/glsl_lexer.cpp4750
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/glsl_lexer.ll793
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/glsl_optimizer.cpp813
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/glsl_optimizer.h94
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/glsl_parser.cpp6208
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/glsl_parser.h262
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/glsl_parser.yy3122
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/glsl_parser_extras.cpp2411
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/glsl_parser_extras.h1060
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/glsl_symbol_table.cpp294
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/glsl_symbol_table.h113
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/hir_field_selection.cpp80
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/int64.glsl121
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/ir.cpp2237
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/ir.h2589
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/ir_array_refcount.cpp207
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/ir_array_refcount.h127
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/ir_basic_block.cpp99
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/ir_basic_block.h33
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/ir_builder.cpp648
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/ir_builder.h243
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/ir_builder_print_visitor.cpp778
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/ir_builder_print_visitor.h31
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/ir_clone.cpp455
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/ir_constant_expression.cpp1153
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/ir_equals.cpp211
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/ir_expression_flattening.cpp84
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/ir_expression_flattening.h43
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/ir_expression_operation.h185
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/ir_expression_operation.py818
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/ir_expression_operation_constant.h2087
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/ir_expression_operation_strings.h334
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/ir_function.cpp407
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/ir_function_can_inline.cpp75
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/ir_function_detect_recursion.cpp360
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/ir_function_inlining.h35
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/ir_hierarchical_visitor.cpp421
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/ir_hierarchical_visitor.h218
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/ir_hv_accept.cpp466
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/ir_optimization.h193
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/ir_print_glsl_visitor.cpp1978
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/ir_print_glsl_visitor.h105
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/ir_print_visitor.cpp675
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/ir_print_visitor.h96
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/ir_reader.cpp1169
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/ir_reader.h33
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/ir_rvalue_visitor.cpp316
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/ir_rvalue_visitor.h88
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/ir_set_program_inouts.cpp441
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/ir_uniform.h221
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/ir_unused_structs.cpp139
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/ir_unused_structs.h26
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/ir_validate.cpp1129
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/ir_variable_refcount.cpp152
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/ir_variable_refcount.h91
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/ir_visitor.h98
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/link_atomics.cpp353
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/link_functions.cpp339
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/link_interface_blocks.cpp538
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/link_uniform_block_active_visitor.cpp294
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/link_uniform_block_active_visitor.h82
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/link_uniform_blocks.cpp573
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/link_uniform_initializers.cpp311
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/link_uniforms.cpp1767
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/link_varyings.cpp3188
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/link_varyings.h298
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/linker.cpp4988
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/linker.h218
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/linker_util.cpp376
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/linker_util.h112
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/list.h777
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/loop_analysis.cpp845
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/loop_analysis.h244
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/loop_unroll.cpp591
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/lower_blend_equation_advanced.cpp572
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/lower_buffer_access.cpp447
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/lower_buffer_access.h70
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/lower_builtins.cpp64
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/lower_const_arrays_to_uniforms.cpp157
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/lower_cs_derived.cpp235
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/lower_discard.cpp201
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/lower_discard_flow.cpp154
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/lower_distance.cpp685
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/lower_if_to_cond_assign.cpp333
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/lower_instructions.cpp1914
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/lower_int64.cpp391
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/lower_jumps.cpp1046
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/lower_mat_op_to_vec.cpp441
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/lower_named_interface_blocks.cpp318
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/lower_offset_array.cpp91
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/lower_output_reads.cpp182
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/lower_packed_varyings.cpp943
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/lower_packing_builtins.cpp1311
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/lower_precision.cpp721
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/lower_shared_reference.cpp517
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/lower_subroutine.cpp124
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/lower_tess_level.cpp461
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/lower_texture_projection.cpp103
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/lower_ubo_reference.cpp1142
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/lower_variable_index_to_cond_assign.cpp567
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/lower_vec_index_to_cond_assign.cpp240
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/lower_vec_index_to_swizzle.cpp102
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/lower_vector.cpp228
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/lower_vector_derefs.cpp188
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/lower_vector_insert.cpp147
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/lower_vertex_id.cpp146
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/lower_xfb_varying.cpp222
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/main.cpp107
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/opt_add_neg_to_sub.h61
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/opt_algebraic.cpp1061
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/opt_array_splitting.cpp505
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/opt_conditional_discard.cpp88
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/opt_constant_folding.cpp212
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/opt_constant_propagation.cpp527
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/opt_constant_variable.cpp235
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/opt_copy_propagation_elements.cpp745
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/opt_dead_builtin_variables.cpp81
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/opt_dead_builtin_varyings.cpp620
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/opt_dead_code.cpp203
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/opt_dead_code_local.cpp358
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/opt_dead_functions.cpp152
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/opt_flatten_nested_if_blocks.cpp103
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/opt_flip_matrices.cpp123
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/opt_function_inlining.cpp466
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/opt_if_simplification.cpp127
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/opt_minmax.cpp507
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/opt_rebalance_tree.cpp337
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/opt_redundant_jumps.cpp124
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/opt_structure_splitting.cpp377
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/opt_swizzle.cpp119
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/opt_tree_grafting.cpp419
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/opt_vectorize.cpp407
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/program.h56
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/propagate_invariance.cpp125
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/s_expression.cpp220
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/s_expression.h178
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/serialize.cpp1340
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/serialize.h50
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/shader_cache.cpp263
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/shader_cache.h40
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/standalone.cpp620
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/standalone.h55
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/standalone_scaffolding.cpp289
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/standalone_scaffolding.h114
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/string_to_uint_map.cpp42
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/string_to_uint_map.h177
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/test_optpass.h29
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/xxd.py111
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl_types.cpp2954
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl_types.h1380
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/compiler/shader_enums.c302
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/compiler/shader_enums.h902
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/compiler/shader_info.h358
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/gallium/auxiliary/util/u_half.h143
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/gallium/include/pipe/p_compiler.h179
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/gallium/include/pipe/p_config.h192
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/gallium/include/pipe/p_defines.h1305
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/gallium/include/pipe/p_format.h587
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/gallium/include/pipe/p_state.h980
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/mapi/glapi/glapi.h188
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/mesa/main/config.h322
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/mesa/main/context.c1920
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/mesa/main/context.h460
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/mesa/main/dd.h1503
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/mesa/main/debug_output.h107
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/mesa/main/draw.h227
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/mesa/main/enums.h58
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/mesa/main/errors.h118
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/mesa/main/extensions.h125
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/mesa/main/extensions_table.c50
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/mesa/main/extensions_table.h490
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/mesa/main/formats.h749
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/mesa/main/glheader.h164
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/mesa/main/glthread.h142
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/mesa/main/hash.h181
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/mesa/main/macros.h800
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/mesa/main/menums.h189
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/mesa/main/mesa_private.h56
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/mesa/main/mtypes.h5281
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/mesa/main/shaderobj.h265
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/mesa/main/uniforms.h523
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/mesa/main/version.h66
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/mesa/math/m_matrix.h218
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/mesa/program/dummy_errors.c30
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/mesa/program/ir_to_mesa.h59
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/mesa/program/prog_instruction.h293
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/mesa/program/prog_parameter.h244
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/mesa/program/prog_statevars.h156
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/mesa/program/program.h169
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/mesa/program/symbol_table.c314
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/mesa/program/symbol_table.h62
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/mesa/vbo/vbo.h162
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/util/bitscan.h326
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/util/bitset.h261
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/util/blob.c368
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/util/blob.h418
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/util/compiler.h76
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/util/crc32.c134
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/util/crc32.h55
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/util/debug.c114
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/util/debug.h53
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/util/detect_os.h131
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/util/disk_cache.c1344
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/util/disk_cache.h321
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/util/fast_urem_by_const.h74
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/util/fnv1a.h61
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/util/format/u_format.h1665
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/util/futex.h113
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/util/half_float.c213
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/util/half_float.h85
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/util/hash_table.c859
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/util/hash_table.h183
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/util/list.h249
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/util/macros.h349
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/util/mesa-sha1.c51
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/util/mesa-sha1.h64
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/util/mesa-sha1_test.c65
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/util/os_memory.h74
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/util/os_memory_aligned.h128
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/util/os_memory_stdc.h60
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/util/os_misc.c184
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/util/os_misc.h104
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/util/os_time.h130
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/util/ralloc.c921
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/util/ralloc.h609
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/util/rounding.h148
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/util/set.c572
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/util/set.h127
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/util/sha1/README62
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/util/sha1/sha1.c174
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/util/sha1/sha1.h53
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/util/simple_mtx.h148
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/util/softfloat.c1475
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/util/softfloat.h65
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/util/string_buffer.c148
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/util/string_buffer.h104
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/util/strndup.h60
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/util/strtod.c85
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/util/strtod.h52
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/util/u_atomic.h268
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/util/u_debug.c440
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/util/u_debug.h460
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/util/u_dynarray.h214
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/util/u_endian.h89
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/util/u_math.c139
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/util/u_math.h828
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/util/u_memory.h99
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/util/u_queue.h277
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/util/u_string.h130
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/util/u_thread.h256
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/util/xxhash.h1435
-rw-r--r--third_party/rust/glslopt/src/bindings.rs135
-rw-r--r--third_party/rust/glslopt/src/lib.rs102
-rw-r--r--third_party/rust/glslopt/wrapper.hpp1
302 files changed, 184061 insertions, 0 deletions
diff --git a/third_party/rust/glslopt/.cargo-checksum.json b/third_party/rust/glslopt/.cargo-checksum.json
new file mode 100644
index 0000000000..4e4e911018
--- /dev/null
+++ b/third_party/rust/glslopt/.cargo-checksum.json
@@ -0,0 +1 @@
+{"files":{"Cargo.toml":"5e6232fe2ab7866ef301c1dacaa3135a02e2fd69744c9372eceffbc3c1fd1c32","README.md":"4468e08c64c19977707d792bfab0080e35ff927b64990eab77873f8ba056ba1c","build.rs":"6a64610018701781af182c418a4355c9ac5d99d000be9457f0e38a7dadf7542a","glsl-optimizer/CMakeLists.txt":"42ce94744e82ffa000da8b64d81fc140e293b9f5da7dd4cf6b49e7404a2448d9","glsl-optimizer/README.md":"b18eef11a92d267d88a937b1154f7670ee433c730b102fdf7e2da0b02722b146","glsl-optimizer/contrib/glslopt/Main.cpp":"14ba213210c62e234b8d9b0052105fed28eedd83d535ebe85acc10bda7322dd4","glsl-optimizer/contrib/glslopt/Readme":"65d2a6f1aa1dc61e903e090cdade027abad33e02e7c9c81e07dc80508acadec4","glsl-optimizer/generateParsers.sh":"878a97db5d3b69eb3b4c3a95780763b373cfcc0c02e0b28894f162dbbd1b8848","glsl-optimizer/include/GL/gl.h":"1989b51365b6d7d0c48ff6e8b181ef75e2cdf71bfb1626b1cc4362e2f54854a3","glsl-optimizer/include/GL/glext.h":"2ac3681045a35a2194a81a960cad395c04bef1c8a20ef46b799fb24af3ec5f70","glsl-optimizer/include/KHR/khrplatform.h":"1448141a0c054d7f46edfb63f4fe6c203acf9591974049481c32442fb03fd6ed","glsl-optimizer/include/c11/threads.h":"56e9e592b28df19f0db432125223cb3eb5c0c1f960c22db96a15692e14776337","glsl-optimizer/include/c11/threads_posix.h":"f8ad2b69fa472e332b50572c1b2dcc1c8a0fa783a1199aad245398d3df421b4b","glsl-optimizer/include/c11/threads_win32.h":"95bf19d7fc14d328a016889afd583e4c49c050a93bcfb114bd2e9130a4532488","glsl-optimizer/include/c11_compat.h":"103fedb48f658d36cb416c9c9e5ea4d70dff181aab551fcb1028107d098ffa3e","glsl-optimizer/include/c99_alloca.h":"96ffde34c6cabd17e41df0ea8b79b034ce8f406a60ef58fe8f068af406d8b194","glsl-optimizer/include/c99_compat.h":"aafad02f1ea90a7857636913ea21617a0fcd6197256dcfc6dd97bb3410ba892e","glsl-optimizer/include/c99_math.h":"9730d800899f1e3a605f58e19451cd016385024a05a5300e1ed9c7aeeb1c3463","glsl-optimizer/include/no_extern_c.h":"40069dbb6dd2843658d442f926e609c7799b9c296046a90b62b570774fd618f5","glsl-optimizer/license.txt":"e26a745226f4a46b3ca00ffbe8be18507362189a2863d04b4f563ba176a9a836","glsl-optimizer/src/compiler/builtin_type_macros.h":"5b4fc4d4da7b07f997b6eb569e37db79fa0735286575ef1fab08d419e76776ff","glsl-optimizer/src/compiler/glsl/README":"e7d408b621c1b605857c4cab63902f615edb06b530142b91ac040808df6e22f7","glsl-optimizer/src/compiler/glsl/TODO":"dd3b7a098e6f9c85ca8c99ce6dea49d65bb75d4cea243b917f29e4ad2c974603","glsl-optimizer/src/compiler/glsl/ast.h":"3e68ff374350c49211a9931f7f55a485d8d89fc4b21caaffbf6655009ad95bf8","glsl-optimizer/src/compiler/glsl/ast_array_index.cpp":"92b4d501f33e0544c00d14e4f8837753afd916c2b42e076ccc95c9e8fc37ba94","glsl-optimizer/src/compiler/glsl/ast_expr.cpp":"afd712a7b1beb2b633888f4a0911b0a8e4ae5eb5ab9c1e3f247d518cdaaa56d6","glsl-optimizer/src/compiler/glsl/ast_function.cpp":"74f4fbd490e366b37f4715168bb3465ecd9334d4130942f75dcc8e80e8e7f027","glsl-optimizer/src/compiler/glsl/ast_to_hir.cpp":"d0f798eb09271d41d068b9e7b18220d37f1ed0083300ab51eba30989698fe23d","glsl-optimizer/src/compiler/glsl/ast_type.cpp":"8eb790b24b26dfb72bdc333744b566c26d8464c5d47d20eae659461f5c4899f7","glsl-optimizer/src/compiler/glsl/builtin_functions.cpp":"454189d643c220fcb49116ee5c8a34f7b349aa67564040deb8607f6a41a15e70","glsl-optimizer/src/compiler/glsl/builtin_functions.h":"a37cad7ed09b522c5b8bec7b80115a36846e7ba6e0874a2a858e32f7f202c665","glsl-optimizer/src/compiler/glsl/builtin_int64.h":"619def6f3aebf180da3944ef08f159ab12a58b24767e41d8b985ac37ded54d62","glsl-optimizer/src/compiler/glsl/builtin_types.cpp":"afec060b62d6f3b00bfbf94e9fa5f96341ce096c128d1eef322791e6ed9cea4d","glsl-optimizer/src/compiler/glsl/builtin_variables.cpp":"6563bfb1345cbca4c77e00eef09ad152f3e1dc271d246a08c5ce9e1f4ce4250a","glsl-optimizer/src/compiler/glsl/float64.glsl":"1072fd888be48c2a7a5117cd2d92a65f034965a66375f598bb856bff5d7be766","glsl-optimizer/src/compiler/glsl/generate_ir.cpp":"e5f0175370a0d07f93c48d3f0f1b8233d12c64a7b02de02dcc753ef7b398ef0f","glsl-optimizer/src/compiler/glsl/glcpp/README":"a0332a1b221d047e9cce5181a64d4ac4056046fd878360ec8ae3a7b1e062bcff","glsl-optimizer/src/compiler/glsl/glcpp/glcpp-lex.c":"2d179879b1ffe84f58875eee5b0c19b6bae9c973b0c48e6bcd99978f2f501c80","glsl-optimizer/src/compiler/glsl/glcpp/glcpp-lex.l":"e4c5744c837200dafd7c15a912d13f650308ea552454d4fa67271bc0a5bde118","glsl-optimizer/src/compiler/glsl/glcpp/glcpp-parse.c":"03494f9ce1cb82260506e2559e73a3eeb622c4bd51b65eaa0a2c3351862bd4c8","glsl-optimizer/src/compiler/glsl/glcpp/glcpp-parse.h":"264d9a18421cde255ce34a0a62b3d8e73465359f0d167e64aa3973062aae5bdd","glsl-optimizer/src/compiler/glsl/glcpp/glcpp-parse.y":"fafb66e3a8f149d19e085f18a4273ba6d4c11af9e9a01d665cc784dddf97b79f","glsl-optimizer/src/compiler/glsl/glcpp/glcpp.c":"37ed294403c2abfd17fd999d1ae8d11b170e5e9c878979fefac74a31195c96b0","glsl-optimizer/src/compiler/glsl/glcpp/glcpp.h":"85ac8b444bcbd0822b66448a1da407b6ae5467b649f5afaf5c58325bd7569468","glsl-optimizer/src/compiler/glsl/glcpp/pp.c":"a52d94f1bcb3fb2747a95709c4a77c25de7eea8354d2b83bb18efd96976a4473","glsl-optimizer/src/compiler/glsl/glcpp/pp_standalone_scaffolding.c":"d11aeb3acfe966d1b78f1ee49804093f2434214c41391d139ffcb67b69dc9862","glsl-optimizer/src/compiler/glsl/glcpp/pp_standalone_scaffolding.h":"abbf1f36ec5a92d035bfbb841b9452287d147616e56373cdbee1c0e55af46406","glsl-optimizer/src/compiler/glsl/glsl_lexer.cpp":"272b9fc1383d72b81bfc03fa11fdf82270ed91a294e523f9ce2b4554bd3effa9","glsl-optimizer/src/compiler/glsl/glsl_lexer.ll":"2b57d9f9eb830c3d7961d4533048a158ee6f458c8d05c65bea7b7cfbc36e4458","glsl-optimizer/src/compiler/glsl/glsl_optimizer.cpp":"f8095d20629d0af70be930b0612e169edb274551a1d25a3cd1bf9995a11ce2e8","glsl-optimizer/src/compiler/glsl/glsl_optimizer.h":"22e843b4ec53ba5f6cd85ca5f7bad33922dca8061b19fb512d46f1caca8d4757","glsl-optimizer/src/compiler/glsl/glsl_parser.cpp":"126baf368d525aba301854e3d91ba60b5aee32e1102376af71416f32cb95ec48","glsl-optimizer/src/compiler/glsl/glsl_parser.h":"2ea9a50716098a8f7bef782d2a030d757b68da73afb01b4d4940d3e8381d44e8","glsl-optimizer/src/compiler/glsl/glsl_parser.yy":"6b1fd1576b29fce005dff744a6dbd0219e4c695c361d61864e1f3a8d6fa6b764","glsl-optimizer/src/compiler/glsl/glsl_parser_extras.cpp":"aad64b5b66467da650091430681e8c6a820cf3cadc4db3c160bf2f15875390ae","glsl-optimizer/src/compiler/glsl/glsl_parser_extras.h":"71fd0e92bbdb193dfb067d7bfdb1200d77392be2fbd0cbfc9ca89d1bb4c7e741","glsl-optimizer/src/compiler/glsl/glsl_symbol_table.cpp":"6660fb83c0ddddbbd64581d46ccfdb9c84bfaa99d13348c289e6442ab00df046","glsl-optimizer/src/compiler/glsl/glsl_symbol_table.h":"24682b8304e0ea3f6318ddb8c859686bd1faee23cd0511d1760977ae975d41bf","glsl-optimizer/src/compiler/glsl/hir_field_selection.cpp":"72a039b0fcab4161788def9e4bedac7ac06a20d8e13146529c6d246bd5202afd","glsl-optimizer/src/compiler/glsl/int64.glsl":"303dbe95dde44b91aee3e38b115b92028400d6a92f9268975d607471984e13eb","glsl-optimizer/src/compiler/glsl/ir.cpp":"2b4741cce90b5d4abff5d719c7324e2693c67294d4d99736cb241554adb281bc","glsl-optimizer/src/compiler/glsl/ir.h":"990b1c74447c4eb4835353ccb0ed9aea644f97fc1129ef1739cd935075d85d2e","glsl-optimizer/src/compiler/glsl/ir_array_refcount.cpp":"8cdc1cffe01e42e0566fa2193a75f789628e8025ad1b82f0ee6f204451b7f9f7","glsl-optimizer/src/compiler/glsl/ir_array_refcount.h":"75f06ec81342b379096ca52e1dc0fd5f19a11ff8e9b58203c20628179d644c12","glsl-optimizer/src/compiler/glsl/ir_basic_block.cpp":"1e2920b1c0ecb08424c745c558f84d0d7e44b74585cf2cc2265dc4dfede3fa2f","glsl-optimizer/src/compiler/glsl/ir_basic_block.h":"81be7da0fc0ee547cd13ec60c1fcd7d3ce3d70d7e5e988f01a3b43a827acdf05","glsl-optimizer/src/compiler/glsl/ir_builder.cpp":"daba29c5a1efdd5a9754f420eb3e2ebdf73485273497f40d4863dadeddb23c0d","glsl-optimizer/src/compiler/glsl/ir_builder.h":"2822e74dd3f6e3df8b300af27d5b11ea2dd99d0e5e7ca809b7bbcce9833c483c","glsl-optimizer/src/compiler/glsl/ir_builder_print_visitor.cpp":"8c6df5abf2fe313363f285f171c19ca6c8ee4f3bc2ed79d33c0c88cc8be45c48","glsl-optimizer/src/compiler/glsl/ir_builder_print_visitor.h":"799852adc3a0e54d04080655e7cebfa0d3bf5b6ffed5d8414f141380665d4db7","glsl-optimizer/src/compiler/glsl/ir_clone.cpp":"d897a4e1f5bbec4a6a2f15044c1be9a4d13899c73be77335b041049a4589aa5d","glsl-optimizer/src/compiler/glsl/ir_constant_expression.cpp":"78bd87ddb09db67f6c499067728d72aef4f16aa02721a99a4b769d1e0cfa9010","glsl-optimizer/src/compiler/glsl/ir_equals.cpp":"bca28533a6310b0fc152b56d80872368f1510dc62ed6e8ac199b9ffa7fac02e7","glsl-optimizer/src/compiler/glsl/ir_expression_flattening.cpp":"7e918d4e1f237eca01396004015865ce345afe32a876c9dbc6728576a1a7eae4","glsl-optimizer/src/compiler/glsl/ir_expression_flattening.h":"f45b66aa9497520e7e08e612d24b308477c34477fbd963ee9320eac664957f16","glsl-optimizer/src/compiler/glsl/ir_expression_operation.h":"cc9f10727dbd26cac506804f51456302c702650f9eeb59054a7e1575d5cf6687","glsl-optimizer/src/compiler/glsl/ir_expression_operation.py":"7b86c96021b9fbe165957f4ecb0b612fefcde1c2cf3c6d75e3cdb22e369216ba","glsl-optimizer/src/compiler/glsl/ir_expression_operation_constant.h":"9ad3346416392e3efa11e12ecf2feca7453c5253d241eb96c91dfb85d4f2b971","glsl-optimizer/src/compiler/glsl/ir_expression_operation_strings.h":"a6826daf496a8b9e89885bc2a161ac3445d501b23c6e0ac33e2c01b506b273c8","glsl-optimizer/src/compiler/glsl/ir_function.cpp":"7537365fc0fbe4b37a26b9a2146cc64d3e9a774d60eab63b65002ad165ae8fc7","glsl-optimizer/src/compiler/glsl/ir_function_can_inline.cpp":"faddbf112187a048d502716a3fb82570a322299ba2a3abd79388382c82040bfc","glsl-optimizer/src/compiler/glsl/ir_function_detect_recursion.cpp":"9176973eaf5c0a984701f953bb7a80f37dca43d59b5bce50fc69b3f02f2902d7","glsl-optimizer/src/compiler/glsl/ir_function_inlining.h":"9739493f99c489987d650762fccdd3fb3d432f6481d67f6c799176685bd59632","glsl-optimizer/src/compiler/glsl/ir_hierarchical_visitor.cpp":"3725861fbe2b98e0617f52d3b14cf6d3b25fb5ec00f5ef5d308b03642f592767","glsl-optimizer/src/compiler/glsl/ir_hierarchical_visitor.h":"e0560210e966c0c31e4ca843e80ea154e64db5a444b8c2df845b6ba5b3a43fc1","glsl-optimizer/src/compiler/glsl/ir_hv_accept.cpp":"caf7ce2cd9494aadd3c58bcf77f29de58368dc9e347a362bbf37f8bda9509b80","glsl-optimizer/src/compiler/glsl/ir_optimization.h":"8b3dcfc7f9e96b21a8dd47a0040d90be483a9e67a2cdce3a697188fb758d4630","glsl-optimizer/src/compiler/glsl/ir_print_glsl_visitor.cpp":"f8e34a983452be0dcb5a695e9c8e895eead24f9e540992a8afe510ae85da4c4c","glsl-optimizer/src/compiler/glsl/ir_print_glsl_visitor.h":"1ad1bd3efd1ace39051c13f904c05fd80425d329444f9a8d47fd6d948faf46e0","glsl-optimizer/src/compiler/glsl/ir_print_visitor.cpp":"643f5a68aae3fb37267fd793f1216d1cfdeb2c09338c26b1f30e4c6deaef4de5","glsl-optimizer/src/compiler/glsl/ir_print_visitor.h":"4573eb93268a2654c14b505253dd651e2695d43dc745904d824da18305269b95","glsl-optimizer/src/compiler/glsl/ir_reader.cpp":"06bfba802c8354e5a8b2334b6d78d6297de18235bedd3f8fbb382c89870b02f2","glsl-optimizer/src/compiler/glsl/ir_reader.h":"63e3f7f1597936a7011d5b520e171b197bf82bee6c1560d822c3edf5aaa6f9e9","glsl-optimizer/src/compiler/glsl/ir_rvalue_visitor.cpp":"84b5c5d746555adca85759c2912fe48010232b7c1c0bd2cf03bd04067a85e66f","glsl-optimizer/src/compiler/glsl/ir_rvalue_visitor.h":"fd8c561b71085d3211fff85ed514fecb299d8ce19a04bc063419a55b6d840525","glsl-optimizer/src/compiler/glsl/ir_set_program_inouts.cpp":"ab9f115ce9e7f312d9c7978340ced0dc4ae6d13a80e08442ba9709d11d50cae5","glsl-optimizer/src/compiler/glsl/ir_uniform.h":"683ae6896b1a08470c090be5f822fc31cd434eab9216e954b9bba24a46975109","glsl-optimizer/src/compiler/glsl/ir_unused_structs.cpp":"9c1620c45f2fc071fe5ed828472040b14c5f42effe06aa0e3b8352c95ef78786","glsl-optimizer/src/compiler/glsl/ir_unused_structs.h":"13387b49c23093575276b25b9dfd31fedd8f131c5c4f3128ab04cf03e15b5295","glsl-optimizer/src/compiler/glsl/ir_validate.cpp":"6b232be5999a86ea278f4f15b2832d76843246509118d924243055a3b9b0299f","glsl-optimizer/src/compiler/glsl/ir_variable_refcount.cpp":"2764a3cad937d53f36db7447c3a5b98b04bf153acf81074d971857fc5bca460d","glsl-optimizer/src/compiler/glsl/ir_variable_refcount.h":"b0668e3eb1501ef65e38fe12830742ecb3d28e6039f30e366c8924efc29b4a39","glsl-optimizer/src/compiler/glsl/ir_visitor.h":"f21b3534c3d66d5fb707d1581fece7e1eb043523afbaedf89918cfb031c6df94","glsl-optimizer/src/compiler/glsl/link_atomics.cpp":"360f0209e11f367ba358223597b0a118bae095bff16337cf03f1fb89c5b80ca6","glsl-optimizer/src/compiler/glsl/link_functions.cpp":"de7895da8aa33a1e3c2c1eb2fdaf267ab5d1fbfdb79ae2e67f95211e946e294c","glsl-optimizer/src/compiler/glsl/link_interface_blocks.cpp":"1926cfa73810704eb19b916c1b2cdb9321155e2f98b2a0a57c7c3c6e960540cd","glsl-optimizer/src/compiler/glsl/link_uniform_block_active_visitor.cpp":"1e14e06ca3b2c1089cfba2e8eaf0c1f373d9d6374b6082f320962dd71ae09611","glsl-optimizer/src/compiler/glsl/link_uniform_block_active_visitor.h":"fd58c155af645295bb6aec08797889de586f4d919731de2bce57e8dce59bb048","glsl-optimizer/src/compiler/glsl/link_uniform_blocks.cpp":"09589f49776dce32e6c4044937de7e0c839a9754ad31960148f8f9e010658997","glsl-optimizer/src/compiler/glsl/link_uniform_initializers.cpp":"bf98e08c12db466acf9623cbeb8fa8e3b4002512722e7a6521287f558a099f37","glsl-optimizer/src/compiler/glsl/link_uniforms.cpp":"84bad5b1377362cecf259b05124239be5220b03ce1c0c61b59bd9a47e4379af2","glsl-optimizer/src/compiler/glsl/link_varyings.cpp":"a5f1a53e7c80d635515fe808ff223d89fef1767abb0f2b7aa28fa6773dca353f","glsl-optimizer/src/compiler/glsl/link_varyings.h":"b9dbe018f038df69763df2e928742ce81bbc6e3aaba26f50621e30a6d9aa6220","glsl-optimizer/src/compiler/glsl/linker.cpp":"40b1ecd5d4f6c7f13d5a87ce390561a51fdf6f3fcd9b2197b9c88b03a773ba94","glsl-optimizer/src/compiler/glsl/linker.h":"ecf94b4ad75ef461c27c557fda4bd25f34c91930822b8e1d729ec84520d4a049","glsl-optimizer/src/compiler/glsl/linker_util.cpp":"1663ad88e2a369305659aeeffaedb5bd752cf76340a2ba5797fc0bf600633cf9","glsl-optimizer/src/compiler/glsl/linker_util.h":"6db788daf9c8e87ae2764b61a8b37ebe419e69c1b82ddee01986e37c978c6993","glsl-optimizer/src/compiler/glsl/list.h":"b1f46ce0e552fe7c45b2a19408a9d97662e23e4b182ab335491c26f8cf25886f","glsl-optimizer/src/compiler/glsl/loop_analysis.cpp":"57ecd573477c68091c7cc99537faa7139a8f395935e3d4f10144cefdefb5a611","glsl-optimizer/src/compiler/glsl/loop_analysis.h":"a85f045a038ee5b5176063e85d7988865862c44ab0580f771b993a042d0b69cc","glsl-optimizer/src/compiler/glsl/loop_unroll.cpp":"bd4292ea2809f5a669bcb76ceaa1ac365772dcd638c579c3ed10275214901a54","glsl-optimizer/src/compiler/glsl/lower_blend_equation_advanced.cpp":"8cfbef140d9c4b4d2f57bfa05c9c374d31a121d0f87afce94333f049023b654a","glsl-optimizer/src/compiler/glsl/lower_buffer_access.cpp":"1ae221c3c7a95aeb867207e7a742be635f91b406c157747bfd6ddf10274d97fb","glsl-optimizer/src/compiler/glsl/lower_buffer_access.h":"807886953a576a323591798cbca5e2df24295ea893b28affd8ffb5926cebaa04","glsl-optimizer/src/compiler/glsl/lower_builtins.cpp":"4d81afc32cf58e1481fcb5e42888ab93dbe6820310a20ff7a9982b77b2152d9b","glsl-optimizer/src/compiler/glsl/lower_const_arrays_to_uniforms.cpp":"608403f0eeeedf21cfcd3014116e0f44e28cbdf6c4c32aac7e613e64e30205e1","glsl-optimizer/src/compiler/glsl/lower_cs_derived.cpp":"179905cd47a294122adeb5b0abfed6f2f67782dcde21b544d1ee2c1985154e66","glsl-optimizer/src/compiler/glsl/lower_discard.cpp":"3b361b2db0004d544d64611cb50d5a6e364cf6c5f2e60c449085d7d753dd7fb0","glsl-optimizer/src/compiler/glsl/lower_discard_flow.cpp":"f5c29b6a27690bb5c91f196d1a1cf9f6be4f1025292311fe2dac561ce6774dee","glsl-optimizer/src/compiler/glsl/lower_distance.cpp":"a118c85493d5d22b2c059a930c51a5854896d4b1dade76598eaa985e5a3dff8c","glsl-optimizer/src/compiler/glsl/lower_if_to_cond_assign.cpp":"469e617757fd1728709cce021aac5c8da05ee503bf5366977bdc4ef7a6d83950","glsl-optimizer/src/compiler/glsl/lower_instructions.cpp":"defd043e8576437c1ef63c7d0bf5f828068bbfb5fdbec16457a9c191a1e9242d","glsl-optimizer/src/compiler/glsl/lower_int64.cpp":"d1ed41196880dd53c7b13e2782f9423f8442bf1d46186e8be92b1b66218a83ee","glsl-optimizer/src/compiler/glsl/lower_jumps.cpp":"34de7b493f281589fb0c2c0f6e885d0a0fabbe7a4e97a73de374dd714777a58c","glsl-optimizer/src/compiler/glsl/lower_mat_op_to_vec.cpp":"dff7a308edc4846c348ed4225c6699a9c75abac68d88f41f85954276552779f4","glsl-optimizer/src/compiler/glsl/lower_named_interface_blocks.cpp":"16063ac127bff75a68272070ab11c21c25101edbff62b4c68f4983b4cd941af0","glsl-optimizer/src/compiler/glsl/lower_offset_array.cpp":"3b00773399135aea85746a5a68b96ef000bc6841be1a2c8e6f25c516628b0949","glsl-optimizer/src/compiler/glsl/lower_output_reads.cpp":"a0fc9975d5aa1617e21fc6c353659a9802da9e83779a3eef4ec584f74b4dadc5","glsl-optimizer/src/compiler/glsl/lower_packed_varyings.cpp":"7550099d4ae123d71541c2fc88bc04fbfe9271ec75d7e210987d1c8cac3cf3ea","glsl-optimizer/src/compiler/glsl/lower_packing_builtins.cpp":"79a13d161fe505a410ab948d92769395708693ec888153630fa240e5b97e356f","glsl-optimizer/src/compiler/glsl/lower_precision.cpp":"f82a185b879872b977a1787d8061b9a80bc4cf8db1b970db6efba2ad9cc20fa2","glsl-optimizer/src/compiler/glsl/lower_shared_reference.cpp":"ea2dccf50a83bc19391bf6b7ab6aa53c0005f427af4066d25140340af9a4beef","glsl-optimizer/src/compiler/glsl/lower_subroutine.cpp":"f69fa53650eeb6f2944fce4d36a6e0a423e6705f3a3bd3389c7fadb83cfc8802","glsl-optimizer/src/compiler/glsl/lower_tess_level.cpp":"b196c9d424c0569f3e85d75c2d125af21566cb113d69036db87c0990703e0fa7","glsl-optimizer/src/compiler/glsl/lower_texture_projection.cpp":"4d247f244272adc8250fd888d8d932a140dd5de4d1efc7a58492c3c2b8291527","glsl-optimizer/src/compiler/glsl/lower_ubo_reference.cpp":"89bdbc6c1669230c644c0857db1ce2781ec61d349ecd08c7914146e1f4750a4a","glsl-optimizer/src/compiler/glsl/lower_variable_index_to_cond_assign.cpp":"fce930f29ac9405b297d1f749d68f59506b89c70b4ee1b1ab8cf49a34cc71ecf","glsl-optimizer/src/compiler/glsl/lower_vec_index_to_cond_assign.cpp":"3c67d851a11a55fad1c49a550f3a0cfe50892d33a3f238ce266cd829eba510a8","glsl-optimizer/src/compiler/glsl/lower_vec_index_to_swizzle.cpp":"f5ec666b73e1415cbab32519a53605ed385f3b03e889560373dbce69dda5000e","glsl-optimizer/src/compiler/glsl/lower_vector.cpp":"f7c13f5572ebe09b6a71553133b2cf003cd4b77b9657600672ee3b21bf890725","glsl-optimizer/src/compiler/glsl/lower_vector_derefs.cpp":"b05793da6dd620a531b43df5af8b2ecbc37b9db0c88910f5724ea10bcd057e19","glsl-optimizer/src/compiler/glsl/lower_vector_insert.cpp":"fee772ec17eea5e86a529bf9c5fa2ee0d29a5982bb75ebc6d68ed36cd19aa299","glsl-optimizer/src/compiler/glsl/lower_vertex_id.cpp":"690e8715182e03fead5cc5a35251fb4f41b357e4c71a1dfbc4bd7be19862b56d","glsl-optimizer/src/compiler/glsl/lower_xfb_varying.cpp":"58c0e8b270e4bbde54250be03cdb2f36966bcafb785372ad2e2b786835df7f9f","glsl-optimizer/src/compiler/glsl/main.cpp":"ae5e88abbbc8a12f769e1296bad938b9d7398cc6da0d3d0caeceeeb876536850","glsl-optimizer/src/compiler/glsl/opt_add_neg_to_sub.h":"f5054944bfd068810629080d0ea11df78b3f57a8f86df75e13ca50157ad1964d","glsl-optimizer/src/compiler/glsl/opt_algebraic.cpp":"25f45b20e1972ee8c789177a1aeda6e4286c25db2eae3a43ff83029ae64969c0","glsl-optimizer/src/compiler/glsl/opt_array_splitting.cpp":"19d3ce0e815438f4df9ab2890e767b03a4f3f191b53bb30c0217cf2ae6a95430","glsl-optimizer/src/compiler/glsl/opt_conditional_discard.cpp":"0e44e0e126711a3725c1f3a2aa65ff03c381fed08680ffc30101aae60f716c4e","glsl-optimizer/src/compiler/glsl/opt_constant_folding.cpp":"a088d04d9b45f9e55e235835648f614c89b7803c03a6d4f6a6d1a6bc1f0228bd","glsl-optimizer/src/compiler/glsl/opt_constant_propagation.cpp":"8a9440d77ecd6dcf13e683cbb99943aab6311c8fd4b5f6a9189a8d4f270746f4","glsl-optimizer/src/compiler/glsl/opt_constant_variable.cpp":"63d3ccd4dd09f19c9cf1a2f51592111bed41284504f29f3c0de4cadebc439a37","glsl-optimizer/src/compiler/glsl/opt_copy_propagation_elements.cpp":"ffa0f50863995e0d2e31f55a52e82319edc71e520987bebd7f7e561ea331c64b","glsl-optimizer/src/compiler/glsl/opt_dead_builtin_variables.cpp":"84e8747b948232f01dd56b428b9315f96f9511f605f240119fc446fae28981a9","glsl-optimizer/src/compiler/glsl/opt_dead_builtin_varyings.cpp":"761523e88f5b3ba785170f4d7205e94fa99acb7e74d29efbe40e1c010e1dbdb3","glsl-optimizer/src/compiler/glsl/opt_dead_code.cpp":"fd1ba2da7337d4e5dad17f5c2d73d9cc8880305f423e85d64cf94553588fa401","glsl-optimizer/src/compiler/glsl/opt_dead_code_local.cpp":"969a598b4df322baf222258a66cd64a326ea20e5b3125be9d8d1771f522c69e0","glsl-optimizer/src/compiler/glsl/opt_dead_functions.cpp":"774cae6536d02edf26e996a2a895e1f62d5098f16dc96b44798b4fc731a9a95f","glsl-optimizer/src/compiler/glsl/opt_flatten_nested_if_blocks.cpp":"3696a5c55f02e20056e085bc2714f73ac992f221b6f3387d655068e86b512046","glsl-optimizer/src/compiler/glsl/opt_flip_matrices.cpp":"44f0fe05b49329667671f88c96dc86ab3fe1459ff7b87f2b2d88de2d49829f9f","glsl-optimizer/src/compiler/glsl/opt_function_inlining.cpp":"fb56a33c90419a01676b57cbd91d0674a54cca40e6defaacc88dd33facebc131","glsl-optimizer/src/compiler/glsl/opt_if_simplification.cpp":"ac406eb35e379c357641d6c5749f50c65961455924d3dc884e2b90046fa92c5c","glsl-optimizer/src/compiler/glsl/opt_minmax.cpp":"8abd59d3b14ef60ff14a9c69660e6945f5cf10b97edb4afebe56be3f81d96316","glsl-optimizer/src/compiler/glsl/opt_rebalance_tree.cpp":"8bb6329dc0f299042368fc81934c2df019b45ab9f7aa0415d4e57b8d1ff98c9f","glsl-optimizer/src/compiler/glsl/opt_redundant_jumps.cpp":"222c73e2ac7a938ebb6428cc6c780c908ff6156d8ff935b04fed93a48fc10496","glsl-optimizer/src/compiler/glsl/opt_structure_splitting.cpp":"2edc79cc13f3177934e0443ad62f5976a1991f01f86ea303a803434849b13a47","glsl-optimizer/src/compiler/glsl/opt_swizzle.cpp":"015d0abddfe507f67c4b96c82988d861d018ededf7bf055e2bcbe9ea92da694e","glsl-optimizer/src/compiler/glsl/opt_tree_grafting.cpp":"46d28ac983ea244a4315bdc0e8892979ec4d1f9b9a96ac8a8a08006d9bc5e878","glsl-optimizer/src/compiler/glsl/opt_vectorize.cpp":"d80ee43bb97d9f016fb9c5e1e06f5b2afa569811f368ba067be794ec11d085fb","glsl-optimizer/src/compiler/glsl/program.h":"2982447e2abd35371e273ad87951722782a8b21c08294f67c39d987da1e1c55f","glsl-optimizer/src/compiler/glsl/propagate_invariance.cpp":"080943e21baa32494723a2eefb185915d2daae1f46d6df420145c5ad6857e119","glsl-optimizer/src/compiler/glsl/s_expression.cpp":"1ced972bc6ecc8eab4116ea71fb0212ab9ae5bcc0be3b47aa5d9d903566b3af1","glsl-optimizer/src/compiler/glsl/s_expression.h":"65b847e30e22a809b57d0bc70243049c99d9c6318803c5b8d0826aba55dc217e","glsl-optimizer/src/compiler/glsl/serialize.cpp":"be0eb4251348a9d921acb839a5c48c6023a2e9d116d602bb0432787ab623655d","glsl-optimizer/src/compiler/glsl/serialize.h":"57425732eba1233d928e5f07f88b623ce65af46b3bb034bf147f0a4b7f94f9a1","glsl-optimizer/src/compiler/glsl/shader_cache.cpp":"e0c5c433f2df3fccdf1d61281bfcb0ee5633433339b97c697d64db99611cbaaf","glsl-optimizer/src/compiler/glsl/shader_cache.h":"9217164d8d7f54aca0fe5922c7187095a6ae0cb703b196b79805aeef07a7e697","glsl-optimizer/src/compiler/glsl/standalone.cpp":"8e6c416a14d631261917a5fe4cc91880c287b22b2dfd70eb22028289a8fa5364","glsl-optimizer/src/compiler/glsl/standalone.h":"a7c397d1dfdd1e7fb2cfe99db35cd9df93251e642059208533202b7f20497f83","glsl-optimizer/src/compiler/glsl/standalone_scaffolding.cpp":"970d14b7a9d58e5270321f97bf5d57795558b1c570a56678e04a65b26c60bf4f","glsl-optimizer/src/compiler/glsl/standalone_scaffolding.h":"d921a617ea82b9e49413314492a645c44356de503581b1be3f1b57de236e480d","glsl-optimizer/src/compiler/glsl/string_to_uint_map.cpp":"d824bf5b839bd39498dc9e457103cdbe3e5289ddf7564107c27b1505948dd31f","glsl-optimizer/src/compiler/glsl/string_to_uint_map.h":"e2f18e66359c9d620e085de7f4a334a47df9c66e65a5bfe8b734c627bec04104","glsl-optimizer/src/compiler/glsl/test_optpass.h":"b27b8f35f5387e7ce4982bb51c7b63ccf14f91757f3108a5d02ed006925bb8a0","glsl-optimizer/src/compiler/glsl/xxd.py":"376484142f27f45090ea8203ae2621abf73f06175cb0ee8d96f44a3b9327f4bd","glsl-optimizer/src/compiler/glsl_types.cpp":"044bb6754f45419a3151e7a25c39202a82009ae3c6bc54ff7f0bb4258a5deefe","glsl-optimizer/src/compiler/glsl_types.h":"fd899a42f34ddeb8601bc3cd6c5e3aed82fc8aef4042dde1b39b3c01e1dcc219","glsl-optimizer/src/compiler/shader_enums.c":"436bff5216b11b0980bdfada5885fc6ac9afa2037a3027fcd6eea2a8635597ac","glsl-optimizer/src/compiler/shader_enums.h":"13220442a5c02e83540cf2c0ad4f8417b2fbda5f2586dec4e92082544c937cdd","glsl-optimizer/src/compiler/shader_info.h":"4c5453e81197ca83593ee4f365074b23530f2ab21c78e1733b63dec6f344c12a","glsl-optimizer/src/gallium/auxiliary/util/u_half.h":"3c2b37bda3ccb64387e44b723d29cf9046decab1a893bf42d842e9603398bdee","glsl-optimizer/src/gallium/include/pipe/p_compiler.h":"c75620096ce8523dae90599e50aa2ef6468d3b0e368a77795edeb20dd1abfc0c","glsl-optimizer/src/gallium/include/pipe/p_config.h":"a27692fc35f9e55df3224b7529e66b3001e911e94e6bc5f8f569e493e1ee3fb7","glsl-optimizer/src/gallium/include/pipe/p_defines.h":"be26d68c0acc67c5e44788c6299716a9eee415fd81d7d747e3738a829e3b6b38","glsl-optimizer/src/gallium/include/pipe/p_format.h":"5674215fc41d27496f037cf837717daefbf23ebb38d40ace7c0c414bc08182b0","glsl-optimizer/src/gallium/include/pipe/p_state.h":"d600593aba5f5a17072a6c38f6baa81e01c7994b0174250f7e433bb41684b702","glsl-optimizer/src/mapi/glapi/glapi.h":"73632a625c0ddabc401205e8b5a81eb8af8506868efe4b170d7979ec3619e9c5","glsl-optimizer/src/mesa/main/config.h":"5800259373099e5405de2eb52619f9de242552a479902a3a642a333c8cb3c1e7","glsl-optimizer/src/mesa/main/context.c":"2f3208473d99c94f734b1137ba91889d4a1babb9e7534bf1dc85d851ee98274e","glsl-optimizer/src/mesa/main/context.h":"cc7e4194797db9d007f01884e23d786c453b3860821f7f2ddcdf0f1bf3f8ffb1","glsl-optimizer/src/mesa/main/dd.h":"6a964acd06b6c2d88700e69fb75fe3c6b3b3d45bbc41db24f3f897a29695fe0c","glsl-optimizer/src/mesa/main/debug_output.h":"7312422e90b8c0e34028ac27280e438139b5cba525c99deb3ac883cd3d87e452","glsl-optimizer/src/mesa/main/draw.h":"7eaef3a9e27a60ea6f7937109bf3a6190b831162fde0479abb12077ce27c353d","glsl-optimizer/src/mesa/main/enums.h":"87d562a6764f51c014a2274fa7c3aca17c04441537ddd56b2554f13c6fffea92","glsl-optimizer/src/mesa/main/errors.h":"c79444b5df289c90fbb22a33b2d0c23917d9fc4510960088f0b79e53bb56b1b2","glsl-optimizer/src/mesa/main/extensions.h":"a38b2f87cc93c513994281350d69e06c84ff8eded5313ec0a1be33f375e0ebbd","glsl-optimizer/src/mesa/main/extensions_table.c":"17642d1a8c9a0bf2bd61060052d33ff14a005d2b962e6cf91465797a50851e85","glsl-optimizer/src/mesa/main/extensions_table.h":"2c879571c238d2e14461031ac740372fd0f9ac3a34c0d5541bb9b7ed4c0376c8","glsl-optimizer/src/mesa/main/formats.h":"02e2f7ec3e39286cf9f27e2641043e6df8ecb1dfde9e643313210e214af2a929","glsl-optimizer/src/mesa/main/glheader.h":"58217b33eead6aa6b23cd4a291cefeaa6cb84e465f4960daffca97c44d6d1c35","glsl-optimizer/src/mesa/main/glthread.h":"51fb2711f77e7eafcfc52d29d5b844978832b24c930d88accd48d143a6eb9c6f","glsl-optimizer/src/mesa/main/hash.h":"7e7f782034c16a8e693de48e00c31d4a90b0129f4029fd074033d7d16ccbe718","glsl-optimizer/src/mesa/main/macros.h":"73d15ddfd64f2b57b9b2ffeeb993b9c2c0899a80563e9d6ff337b11ccbe6eee5","glsl-optimizer/src/mesa/main/menums.h":"5dfac0e2279d60b0cd0c7b9fc2a5021620d0f6282ed2e738c420214e3af152d3","glsl-optimizer/src/mesa/main/mesa_private.h":"edda678b93438944279a551f663b8858ad84814a9fc88ba9672ef195599c24ae","glsl-optimizer/src/mesa/main/mtypes.h":"6efddefa099e4d2e3fdd97f0055644f47aba21711385edfeabc2d9b0676f2eec","glsl-optimizer/src/mesa/main/shaderobj.h":"9f0dfe96d0c2154201adef942bd36053533ac7b2492fb3786acda5bea514c75e","glsl-optimizer/src/mesa/main/uniforms.h":"4e331e6ad6e9cbded978b4082dbe0a57c1f8f01327446bb6892bfc179976c38b","glsl-optimizer/src/mesa/main/version.h":"9d0a13a758099302dc55cf7d045791834a89b0f9d4cf17b2692259b369a8a9a1","glsl-optimizer/src/mesa/math/m_matrix.h":"a37b19f182e070db3df93b0ede43c22fb8be8c2906504133ee6dbd7db1185d8b","glsl-optimizer/src/mesa/program/dummy_errors.c":"1820e305515b4c5e041f5e1623266a48ec8f076a155310be7d60637101f593e4","glsl-optimizer/src/mesa/program/ir_to_mesa.h":"b47f58d22e3ca2ae42d52501ea769d15c4476834944fa97eeccd3a3439211d00","glsl-optimizer/src/mesa/program/prog_instruction.h":"ab3832152a7e144b59e5a2264b2c29db56d93be31e76bbd958527a56771b40eb","glsl-optimizer/src/mesa/program/prog_parameter.h":"ba18c743284eadbc837c2c364c73e5d372321a7637a76e589d8d39fe8b5de225","glsl-optimizer/src/mesa/program/prog_statevars.h":"fc413698f84bc52d45fdeae0471934ee9904bfb7eac1a2b5f70446e54bcbbdca","glsl-optimizer/src/mesa/program/program.h":"1f01026a4eff440a3f122fd9b519d03546fe7f7d8be60dca834e95a2f8fbbfd2","glsl-optimizer/src/mesa/program/symbol_table.c":"6611cb9f078035bf5ff8c9112093a6c7d99f8af99a3931d0c07f227cc72283ea","glsl-optimizer/src/mesa/program/symbol_table.h":"631dc35ac48d5e87962d45507461920f6575610960ffcc42a08cefeb43300cda","glsl-optimizer/src/mesa/vbo/vbo.h":"6eb1dcd9a08c92f276c5fe08da184ff9d455d1be421913b8ad732a7b65e858fb","glsl-optimizer/src/util/bitscan.h":"d4fcb47b57a50d70cb97f99ca3e619bc06282a877768a435e009775ce8d77f36","glsl-optimizer/src/util/bitset.h":"c40f78515c6230fed18345c6751ce33833a49da7a27901c7e6d7340cbdcbc5e7","glsl-optimizer/src/util/blob.c":"8f729846f66efc9c15485cc5fc24c6ec861fc1fecb2f652573f2a237d481b791","glsl-optimizer/src/util/blob.h":"93e1eaac866b9a7cd6fc03b533c18fb2edf0e97f03395eff4f3a605c4fc14d0c","glsl-optimizer/src/util/compiler.h":"79e3bf40a5bab704e6c949f23a1352759607bb57d80e5d8df2ef159755f10b68","glsl-optimizer/src/util/crc32.c":"2f3467a046b3a76784ecb9aa55d527698c8607fd0b12c622f6691aaa77b58505","glsl-optimizer/src/util/crc32.h":"59bd81865e51042b73a86f8fb117c312418df095fed2d828c5c1d1c8b6fc6cd4","glsl-optimizer/src/util/debug.c":"c3d68e9752ccc19e66c669562cd113cf1d0ac83cbb30174789e7fb8d1df58f9c","glsl-optimizer/src/util/debug.h":"50068d745c4199ccbd33d68dd4c8a36d2b5179c7869a21e75906ddd0718ca456","glsl-optimizer/src/util/detect_os.h":"343a8790d17a3710c6dd015ee367f84e3902ff3f2e36faca2bf93f9d725d3574","glsl-optimizer/src/util/disk_cache.c":"f533937e5a4fffe76e2739ef4b6b1e1da097d96d63eb808e68ebbc7027641c23","glsl-optimizer/src/util/disk_cache.h":"e83314fb14134a8e079b15e470a6376ba5a8253701f048c890a62b7e55d64bc8","glsl-optimizer/src/util/fast_urem_by_const.h":"e108fce804616c47d071dfe4a04163eec1126e448ed1aa89abb6b3a6d772bd5b","glsl-optimizer/src/util/fnv1a.h":"ab2596f19c6adf431ae27618f62c5743e24ad23ef83bb359a4c4c218245ab459","glsl-optimizer/src/util/format/u_format.h":"4cdfc0c59cbc99a092e5ec5a396910f2d93b9643e5d8141050b011e66f11e45b","glsl-optimizer/src/util/futex.h":"26f7c9d86e9ffef4c0fa2761f1aaa1918337302e20bd6ca10e61dc3c47356deb","glsl-optimizer/src/util/half_float.c":"11bc2584493d5d9d46e8c8a619a0307cf150bf5ab5d0f96bb764b061dc37a00e","glsl-optimizer/src/util/half_float.h":"7f7c380f126da1400a91758cc0392f24bf967bce1672890b62be26fe9fbd922b","glsl-optimizer/src/util/hash_table.c":"0ca40352e35dedab0a84c64c903f1b16d47e950bb5f43b4d22bb57d499bfea6e","glsl-optimizer/src/util/hash_table.h":"217191bb360592e2232f187473c10287d2cda8ae6fa5c53d0ef74c8c206118b4","glsl-optimizer/src/util/list.h":"9fab03c6a78186bb5f173269f825f6ce976b409d931852e3d93bac632e07989a","glsl-optimizer/src/util/macros.h":"63faf65b51058c483b17f1f77da51d1c53c8beab52678cb6bd01f1228a63b6b0","glsl-optimizer/src/util/mesa-sha1.c":"00c692ec353ebc02c06c57c5a71de0ab7a119f86a4146f452e65ec87e4944417","glsl-optimizer/src/util/mesa-sha1.h":"bff4c29f4bf7cdbcefb30fa0c996a7604a380eba8976467c2a60e7cd328f7e26","glsl-optimizer/src/util/mesa-sha1_test.c":"25da89a59d51469f77b4c468ca23ffdce0a7a1166a70b6cc23026a6800b0143c","glsl-optimizer/src/util/os_memory.h":"64555faf1760ae6954f42c83727c38dfc4c278e9152115779ffaad58b42adacf","glsl-optimizer/src/util/os_memory_aligned.h":"12d86fa94be38c13f7eeebdf313795e1267dd5a7187d2f0072e0e896f41702f6","glsl-optimizer/src/util/os_memory_stdc.h":"07360363b88c927065e10df71bebf6c8a0cc3b9167c9dfce55f2d65f11e6f787","glsl-optimizer/src/util/os_misc.c":"a9936e613ec84803abd59ad47c192c8e3939993c950ac91973fdc4cec1801bb8","glsl-optimizer/src/util/os_misc.h":"cc68eb12e05b5e749c54298cb4a6f4cd20cc5af7db3403e70b3c27b56090c740","glsl-optimizer/src/util/os_time.h":"73e775f7335244ff5964c678c27eedf1aea6abea44c4169d327ea8c7ce4a3a88","glsl-optimizer/src/util/ralloc.c":"4b51189595ef67bcef52c40cbf654d969041dbd15e15d4a893ad494ac060aeca","glsl-optimizer/src/util/ralloc.h":"e573c45875ff1530f0dbee9a93ae55535fdac8d5cc88a79ebc327c688824bde5","glsl-optimizer/src/util/rounding.h":"fe22a2a198057b5442de8034968a68a70909811aa12e1096ced221415f237e2b","glsl-optimizer/src/util/set.c":"86f8c9a830bead5a5a79bc970b0ff97809312af07b3beb39ef9d90af04d40a1b","glsl-optimizer/src/util/set.h":"3e39ca161e7ed4ec7c436cc9c7919ed9a55ed1b71edbf2caf6f9bcfd9bc578ed","glsl-optimizer/src/util/sha1/README":"00af7419af05247081858acb2902efd99fcda2ce16e331079f701645bb3729c0","glsl-optimizer/src/util/sha1/sha1.c":"1403bbe0aad42ba3e6be7e09f7cad87a6a8c4ad5b63962f7b92b9f37d8133b04","glsl-optimizer/src/util/sha1/sha1.h":"68d9f240eab2918026ecdf22be36811abbd4f1389f6c36e31258041aeaedd247","glsl-optimizer/src/util/simple_mtx.h":"12c6c3c4b7db9168bc656d5b3c65912075084d2b388c415d5c3d3f5953a9d6c7","glsl-optimizer/src/util/softfloat.c":"a97e51a96fe5e6a052c02aa6bbec683fe73fb88a8c087d9c930503e2120d8a2e","glsl-optimizer/src/util/softfloat.h":"66664b0250e83bf5dd4cc743acd119d076efcea624a0eab3d6b60718e6ee8811","glsl-optimizer/src/util/string_buffer.c":"63a1d1b1e34926c88ea00159cafbcd56568b805c4f64d1e8c97169fe313921fc","glsl-optimizer/src/util/string_buffer.h":"7b88d1b1d9c6cfb8e93331813535c127289437c75f822029e9a3bca8ea6b52ee","glsl-optimizer/src/util/strndup.h":"0273c4fdb7482cd7746881a63d3998648c6d63415ba85af1d1860f0e0dc504c6","glsl-optimizer/src/util/strtod.c":"5cf610d8a37373cf37cfb7aae903525d943b2674b1f32594c70b0eb19a8c9697","glsl-optimizer/src/util/strtod.h":"237396def4e264d35ed4bedea00ef9a4ceab6d7a11a18c770d9747d22c69ed2d","glsl-optimizer/src/util/u_atomic.h":"c02e809526c6c09ba8fe51f50b2490d1b6c8e5c7f3c4031ae958250d098fc3bb","glsl-optimizer/src/util/u_debug.c":"8c060e379b816618f3dd22c9ea523c68b9425c76c36a7dfe5d6d375b337f5f4a","glsl-optimizer/src/util/u_debug.h":"e11e26edd9b9e4e6f8e6a435e69f4d9edda27e9a379f68f4c82ea2525aaaea68","glsl-optimizer/src/util/u_dynarray.h":"853d0fa6ff2261614488be624deb8a2b01e57c2c8eabc28578cbeed4ccc95694","glsl-optimizer/src/util/u_endian.h":"3ccea7e529740318d8a4b05c00db3adc9d1e292a52bdc56a05c9fae99209720f","glsl-optimizer/src/util/u_math.c":"c868a8c0886dc78f1b06b13404ba8b253090449045774dd56893ac9d75795184","glsl-optimizer/src/util/u_math.h":"ae235e8f73fea1b83e68e4141244666280094b3871ce299023b4c2daa1d7555c","glsl-optimizer/src/util/u_memory.h":"c5db17c724c70283ddbe04165722f6988d4e0eb9aa3602ae472feff016649af9","glsl-optimizer/src/util/u_queue.h":"92930ce236c0528a98b695f5cea8c5c6aa9683beaf71a2227bdc5d33d1b21506","glsl-optimizer/src/util/u_string.h":"c5a2f4ef576d1547bda12c4ea219179fefa54414977743ac094abcaf696ef6ca","glsl-optimizer/src/util/u_thread.h":"00b708459b27f9910d18db92c18cc65cfc618ac2b3cd144e45f8640057b10d58","glsl-optimizer/src/util/xxhash.h":"2f2aff2fc6c0c929f52cf6ae7314122124c5be026d41ad1c357608383c4a37ad","src/bindings.rs":"79993db2058bde39f99ef483d02560d33b1cb882f6a552319e8b86eb6f9021e1","src/lib.rs":"04be1554cd829eb40864b06d80b491dd48117a4e3a601c7d482117f7a0391e67","wrapper.hpp":"f3ea34cc496f7d90b9bfcada3250b37b314c3524dac693b2ece9517bc7d274ac"},"package":"74a3f5c04450dfdadb4b08f6e5ee6f5110f674de1acbd6199bfec68392a8cbaf"} \ No newline at end of file
diff --git a/third_party/rust/glslopt/Cargo.toml b/third_party/rust/glslopt/Cargo.toml
new file mode 100644
index 0000000000..2ecdbb976a
--- /dev/null
+++ b/third_party/rust/glslopt/Cargo.toml
@@ -0,0 +1,23 @@
+# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
+#
+# When uploading crates to the registry Cargo will automatically
+# "normalize" Cargo.toml files for maximal compatibility
+# with all versions of Cargo and also rewrite `path` dependencies
+# to registry (e.g., crates.io) dependencies
+#
+# If you believe there's an error in this file please file an
+# issue against the rust-lang/cargo repository. If you're
+# editing this file be aware that the upstream Cargo.toml
+# will likely look very different (and much more reasonable)
+
+[package]
+edition = "2018"
+name = "glslopt"
+version = "0.1.9"
+authors = ["Jamie Nicol <jnicol@mozilla.com>"]
+description = "Optimizes GLSL shader code"
+keywords = ["opengl", "gl", "gles", "glsl", "shader"]
+license = "MIT"
+repository = "https://github.com/jamienicol/glslopt-rs"
+[build-dependencies.cc]
+version = "1.0"
diff --git a/third_party/rust/glslopt/README.md b/third_party/rust/glslopt/README.md
new file mode 100644
index 0000000000..95997604a7
--- /dev/null
+++ b/third_party/rust/glslopt/README.md
@@ -0,0 +1,20 @@
+# glslopt-rs
+
+Rust bindings to [glsl-optimizer](https://github.com/jamienicol/glsl-optimizer).
+
+## Updating glsl-optimizer
+
+To update the version of glsl-optimizer, update the git submodule:
+
+```sh
+git submodule update --remote glsl-optimizer
+```
+
+Then, if required, regenerate the bindings:
+
+```sh
+cargo install bindgen
+bindgen wrapper.hpp -o src/bindings.rs
+```
+
+Then commit the changes.
diff --git a/third_party/rust/glslopt/build.rs b/third_party/rust/glslopt/build.rs
new file mode 100644
index 0000000000..2e7e5ed855
--- /dev/null
+++ b/third_party/rust/glslopt/build.rs
@@ -0,0 +1,199 @@
+use cc;
+
+/// Adds the required definitions to build mesa/glsl-optimizer for the
+/// target platform.
+fn configure(build: &mut cc::Build) -> &mut cc::Build {
+ build.define("__STDC_FORMAT_MACROS", None);
+ if cfg!(target_os = "linux") {
+ build.define("_GNU_SOURCE", None);
+ build.define("HAVE_ENDIAN_H", None);
+ }
+ if cfg!(any(target_os = "freebsd", target_os = "dragonfly", target_os = "openbsd")) {
+ build.define("PTHREAD_SETAFFINITY_IN_NP_HEADER", None);
+ }
+ if cfg!(target_os = "windows") {
+ build.define("_USE_MATH_DEFINES", None);
+ } else {
+ build.define("HAVE_PTHREAD", None);
+ build.define("HAVE_TIMESPEC_GET", None);
+ }
+
+ // Avoid using e.g. moz_malloc in Gecko builds.
+ build.define("MOZ_INCLUDE_MOZALLOC_H", None);
+ // Avoid using e.g. mozalloc_abort in Gecko builds.
+ build.define("mozilla_throw_gcc_h", None);
+
+ build
+}
+
+fn main() {
+ configure(&mut cc::Build::new())
+ .warnings(false)
+ .include("glsl-optimizer/include")
+ .include("glsl-optimizer/src/mesa")
+ .include("glsl-optimizer/src/mapi")
+ .include("glsl-optimizer/src/compiler")
+ .include("glsl-optimizer/src/compiler/glsl")
+ .include("glsl-optimizer/src/gallium/auxiliary")
+ .include("glsl-optimizer/src/gallium/include")
+ .include("glsl-optimizer/src")
+ .include("glsl-optimizer/src/util")
+ .file("glsl-optimizer/src/compiler/glsl/glcpp/glcpp-lex.c")
+ .file("glsl-optimizer/src/compiler/glsl/glcpp/glcpp-parse.c")
+ .file("glsl-optimizer/src/compiler/glsl/glcpp/pp_standalone_scaffolding.c")
+ .file("glsl-optimizer/src/compiler/glsl/glcpp/pp.c")
+ .file("glsl-optimizer/src/util/blob.c")
+ .file("glsl-optimizer/src/util/half_float.c")
+ .file("glsl-optimizer/src/util/hash_table.c")
+ .file("glsl-optimizer/src/util/mesa-sha1.c")
+ .file("glsl-optimizer/src/util/os_misc.c")
+ .file("glsl-optimizer/src/util/ralloc.c")
+ .file("glsl-optimizer/src/util/set.c")
+ .file("glsl-optimizer/src/util/sha1/sha1.c")
+ .file("glsl-optimizer/src/util/softfloat.c")
+ .file("glsl-optimizer/src/util/string_buffer.c")
+ .file("glsl-optimizer/src/util/strtod.c")
+ .file("glsl-optimizer/src/util/u_debug.c")
+ .compile("glcpp");
+
+ configure(&mut cc::Build::new())
+ .warnings(false)
+ .include("glsl-optimizer/include")
+ .include("glsl-optimizer/src/mesa")
+ .include("glsl-optimizer/src/mapi")
+ .include("glsl-optimizer/src/compiler")
+ .include("glsl-optimizer/src/compiler/glsl")
+ .include("glsl-optimizer/src/gallium/auxiliary")
+ .include("glsl-optimizer/src/gallium/include")
+ .include("glsl-optimizer/src")
+ .include("glsl-optimizer/src/util")
+ .file("glsl-optimizer/src/mesa/program/dummy_errors.c")
+ .file("glsl-optimizer/src/mesa/program/symbol_table.c")
+ .file("glsl-optimizer/src/mesa/main/extensions_table.c")
+ .file("glsl-optimizer/src/compiler/shader_enums.c")
+ .compile("mesa");
+
+ configure(&mut cc::Build::new())
+ .cpp(true)
+ .warnings(false)
+ .include("glsl-optimizer/include")
+ .include("glsl-optimizer/src/mesa")
+ .include("glsl-optimizer/src/mapi")
+ .include("glsl-optimizer/src/compiler")
+ .include("glsl-optimizer/src/compiler/glsl")
+ .include("glsl-optimizer/src/gallium/auxiliary")
+ .include("glsl-optimizer/src/gallium/include")
+ .include("glsl-optimizer/src")
+ .include("glsl-optimizer/src/util")
+ .file("glsl-optimizer/src/compiler/glsl_types.cpp")
+ .file("glsl-optimizer/src/compiler/glsl/ast_array_index.cpp")
+ .file("glsl-optimizer/src/compiler/glsl/ast_expr.cpp")
+ .file("glsl-optimizer/src/compiler/glsl/ast_function.cpp")
+ .file("glsl-optimizer/src/compiler/glsl/ast_to_hir.cpp")
+ .file("glsl-optimizer/src/compiler/glsl/ast_type.cpp")
+ .file("glsl-optimizer/src/compiler/glsl/builtin_functions.cpp")
+ .file("glsl-optimizer/src/compiler/glsl/builtin_types.cpp")
+ .file("glsl-optimizer/src/compiler/glsl/builtin_variables.cpp")
+ .file("glsl-optimizer/src/compiler/glsl/generate_ir.cpp")
+ .file("glsl-optimizer/src/compiler/glsl/glsl_lexer.cpp")
+ .file("glsl-optimizer/src/compiler/glsl/glsl_optimizer.cpp")
+ .file("glsl-optimizer/src/compiler/glsl/glsl_parser_extras.cpp")
+ .file("glsl-optimizer/src/compiler/glsl/glsl_parser.cpp")
+ .file("glsl-optimizer/src/compiler/glsl/glsl_symbol_table.cpp")
+ .file("glsl-optimizer/src/compiler/glsl/hir_field_selection.cpp")
+ .file("glsl-optimizer/src/compiler/glsl/ir_array_refcount.cpp")
+ .file("glsl-optimizer/src/compiler/glsl/ir_basic_block.cpp")
+ .file("glsl-optimizer/src/compiler/glsl/ir_builder.cpp")
+ .file("glsl-optimizer/src/compiler/glsl/ir_clone.cpp")
+ .file("glsl-optimizer/src/compiler/glsl/ir_constant_expression.cpp")
+ .file("glsl-optimizer/src/compiler/glsl/ir_equals.cpp")
+ .file("glsl-optimizer/src/compiler/glsl/ir_expression_flattening.cpp")
+ .file("glsl-optimizer/src/compiler/glsl/ir_function_can_inline.cpp")
+ .file("glsl-optimizer/src/compiler/glsl/ir_function_detect_recursion.cpp")
+ .file("glsl-optimizer/src/compiler/glsl/ir_function.cpp")
+ .file("glsl-optimizer/src/compiler/glsl/ir_hierarchical_visitor.cpp")
+ .file("glsl-optimizer/src/compiler/glsl/ir_hv_accept.cpp")
+ .file("glsl-optimizer/src/compiler/glsl/ir_print_glsl_visitor.cpp")
+ .file("glsl-optimizer/src/compiler/glsl/ir_print_visitor.cpp")
+ .file("glsl-optimizer/src/compiler/glsl/ir_reader.cpp")
+ .file("glsl-optimizer/src/compiler/glsl/ir_rvalue_visitor.cpp")
+ .file("glsl-optimizer/src/compiler/glsl/ir_set_program_inouts.cpp")
+ .file("glsl-optimizer/src/compiler/glsl/ir_unused_structs.cpp")
+ .file("glsl-optimizer/src/compiler/glsl/ir_validate.cpp")
+ .file("glsl-optimizer/src/compiler/glsl/ir_variable_refcount.cpp")
+ .file("glsl-optimizer/src/compiler/glsl/ir.cpp")
+ .file("glsl-optimizer/src/compiler/glsl/link_atomics.cpp")
+ .file("glsl-optimizer/src/compiler/glsl/link_functions.cpp")
+ .file("glsl-optimizer/src/compiler/glsl/link_interface_blocks.cpp")
+ .file("glsl-optimizer/src/compiler/glsl/link_uniform_block_active_visitor.cpp")
+ .file("glsl-optimizer/src/compiler/glsl/link_uniform_blocks.cpp")
+ .file("glsl-optimizer/src/compiler/glsl/link_uniform_initializers.cpp")
+ .file("glsl-optimizer/src/compiler/glsl/link_uniforms.cpp")
+ .file("glsl-optimizer/src/compiler/glsl/link_varyings.cpp")
+ .file("glsl-optimizer/src/compiler/glsl/linker_util.cpp")
+ .file("glsl-optimizer/src/compiler/glsl/linker.cpp")
+ .file("glsl-optimizer/src/compiler/glsl/loop_analysis.cpp")
+ .file("glsl-optimizer/src/compiler/glsl/loop_unroll.cpp")
+ .file("glsl-optimizer/src/compiler/glsl/lower_blend_equation_advanced.cpp")
+ .file("glsl-optimizer/src/compiler/glsl/lower_buffer_access.cpp")
+ .file("glsl-optimizer/src/compiler/glsl/lower_builtins.cpp")
+ .file("glsl-optimizer/src/compiler/glsl/lower_const_arrays_to_uniforms.cpp")
+ .file("glsl-optimizer/src/compiler/glsl/lower_cs_derived.cpp")
+ .file("glsl-optimizer/src/compiler/glsl/lower_discard_flow.cpp")
+ .file("glsl-optimizer/src/compiler/glsl/lower_discard.cpp")
+ .file("glsl-optimizer/src/compiler/glsl/lower_distance.cpp")
+ .file("glsl-optimizer/src/compiler/glsl/lower_if_to_cond_assign.cpp")
+ .file("glsl-optimizer/src/compiler/glsl/lower_instructions.cpp")
+ .file("glsl-optimizer/src/compiler/glsl/lower_int64.cpp")
+ .file("glsl-optimizer/src/compiler/glsl/lower_jumps.cpp")
+ .file("glsl-optimizer/src/compiler/glsl/lower_mat_op_to_vec.cpp")
+ .file("glsl-optimizer/src/compiler/glsl/lower_named_interface_blocks.cpp")
+ .file("glsl-optimizer/src/compiler/glsl/lower_offset_array.cpp")
+ .file("glsl-optimizer/src/compiler/glsl/lower_output_reads.cpp")
+ .file("glsl-optimizer/src/compiler/glsl/lower_packed_varyings.cpp")
+ .file("glsl-optimizer/src/compiler/glsl/lower_packing_builtins.cpp")
+ .file("glsl-optimizer/src/compiler/glsl/lower_precision.cpp")
+ .file("glsl-optimizer/src/compiler/glsl/lower_shared_reference.cpp")
+ .file("glsl-optimizer/src/compiler/glsl/lower_subroutine.cpp")
+ .file("glsl-optimizer/src/compiler/glsl/lower_tess_level.cpp")
+ .file("glsl-optimizer/src/compiler/glsl/lower_texture_projection.cpp")
+ .file("glsl-optimizer/src/compiler/glsl/lower_ubo_reference.cpp")
+ .file("glsl-optimizer/src/compiler/glsl/lower_variable_index_to_cond_assign.cpp")
+ .file("glsl-optimizer/src/compiler/glsl/lower_vec_index_to_cond_assign.cpp")
+ .file("glsl-optimizer/src/compiler/glsl/lower_vec_index_to_swizzle.cpp")
+ .file("glsl-optimizer/src/compiler/glsl/lower_vector_derefs.cpp")
+ .file("glsl-optimizer/src/compiler/glsl/lower_vector_insert.cpp")
+ .file("glsl-optimizer/src/compiler/glsl/lower_vector.cpp")
+ .file("glsl-optimizer/src/compiler/glsl/lower_vertex_id.cpp")
+ .file("glsl-optimizer/src/compiler/glsl/lower_xfb_varying.cpp")
+ .file("glsl-optimizer/src/compiler/glsl/opt_algebraic.cpp")
+ .file("glsl-optimizer/src/compiler/glsl/opt_array_splitting.cpp")
+ .file("glsl-optimizer/src/compiler/glsl/opt_conditional_discard.cpp")
+ .file("glsl-optimizer/src/compiler/glsl/opt_constant_folding.cpp")
+ .file("glsl-optimizer/src/compiler/glsl/opt_constant_propagation.cpp")
+ .file("glsl-optimizer/src/compiler/glsl/opt_constant_variable.cpp")
+ .file("glsl-optimizer/src/compiler/glsl/opt_copy_propagation_elements.cpp")
+ .file("glsl-optimizer/src/compiler/glsl/opt_dead_builtin_variables.cpp")
+ .file("glsl-optimizer/src/compiler/glsl/opt_dead_builtin_varyings.cpp")
+ .file("glsl-optimizer/src/compiler/glsl/opt_dead_code_local.cpp")
+ .file("glsl-optimizer/src/compiler/glsl/opt_dead_code.cpp")
+ .file("glsl-optimizer/src/compiler/glsl/opt_dead_functions.cpp")
+ .file("glsl-optimizer/src/compiler/glsl/opt_flatten_nested_if_blocks.cpp")
+ .file("glsl-optimizer/src/compiler/glsl/opt_flip_matrices.cpp")
+ .file("glsl-optimizer/src/compiler/glsl/opt_function_inlining.cpp")
+ .file("glsl-optimizer/src/compiler/glsl/opt_if_simplification.cpp")
+ .file("glsl-optimizer/src/compiler/glsl/opt_minmax.cpp")
+ .file("glsl-optimizer/src/compiler/glsl/opt_rebalance_tree.cpp")
+ .file("glsl-optimizer/src/compiler/glsl/opt_redundant_jumps.cpp")
+ .file("glsl-optimizer/src/compiler/glsl/opt_structure_splitting.cpp")
+ .file("glsl-optimizer/src/compiler/glsl/opt_swizzle.cpp")
+ .file("glsl-optimizer/src/compiler/glsl/opt_tree_grafting.cpp")
+ .file("glsl-optimizer/src/compiler/glsl/opt_vectorize.cpp")
+ .file("glsl-optimizer/src/compiler/glsl/propagate_invariance.cpp")
+ .file("glsl-optimizer/src/compiler/glsl/s_expression.cpp")
+ .file("glsl-optimizer/src/compiler/glsl/serialize.cpp")
+ .file("glsl-optimizer/src/compiler/glsl/shader_cache.cpp")
+ .file("glsl-optimizer/src/compiler/glsl/standalone_scaffolding.cpp")
+ .file("glsl-optimizer/src/compiler/glsl/string_to_uint_map.cpp")
+ .compile("glsl_optimizer");
+}
diff --git a/third_party/rust/glslopt/glsl-optimizer/CMakeLists.txt b/third_party/rust/glslopt/glsl-optimizer/CMakeLists.txt
new file mode 100644
index 0000000000..1ae76bbed9
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/CMakeLists.txt
@@ -0,0 +1,171 @@
+cmake_minimum_required(VERSION 3.0)
+
+project(glsl_optimizer VERSION 0.1
+ DESCRIPTION "GLSL Optimizer"
+ LANGUAGES C CXX)
+
+set(CMAKE_CXX_STANDARD 11)
+
+include_directories(include)
+include_directories(src/mesa)
+include_directories(src/mapi)
+include_directories(src/compiler)
+include_directories(src/compiler/glsl)
+include_directories(src/gallium/auxiliary)
+include_directories(src/gallium/include)
+include_directories(src)
+include_directories(src/util)
+
+add_definitions(-D__STDC_FORMAT_MACROS)
+if(${CMAKE_SYSTEM_NAME} MATCHES "Linux")
+ add_definitions(-D_GNU_SOURCE)
+ add_definitions(-DHAVE_ENDIAN_H)
+endif()
+if(${CMAKE_SYSTEM_NAME} MATCHES "Windows")
+ add_definitions(-D_USE_MATH_DEFINES)
+else()
+ add_definitions(-DHAVE_PTHREAD)
+ add_definitions(-DHAVE_TIMESPEC_GET)
+endif()
+
+add_library(glcpp STATIC "src/compiler/glsl/glcpp/glcpp-lex.c"
+ "src/compiler/glsl/glcpp/glcpp-parse.c"
+ "src/compiler/glsl/glcpp/glcpp.h"
+ "src/compiler/glsl/glcpp/pp_standalone_scaffolding.c"
+ "src/compiler/glsl/glcpp/pp.c"
+ "src/util/blob.c"
+ "src/util/half_float.c"
+ "src/util/hash_table.c"
+ "src/util/mesa-sha1.c"
+ "src/util/os_misc.c"
+ "src/util/ralloc.c"
+ "src/util/set.c"
+ "src/util/sha1/sha1.c"
+ "src/util/softfloat.c"
+ "src/util/string_buffer.c"
+ "src/util/strtod.c"
+ "src/util/u_debug.c")
+
+add_library(mesa STATIC "src/mesa/program/dummy_errors.c"
+ "src/mesa/program/symbol_table.c"
+ "src/mesa/main/extensions_table.c")
+
+add_library(glsl_optimizer STATIC "src/compiler/glsl_types.cpp"
+ "src/compiler/glsl/ast_array_index.cpp"
+ "src/compiler/glsl/ast_expr.cpp"
+ "src/compiler/glsl/ast_function.cpp"
+ "src/compiler/glsl/ast_to_hir.cpp"
+ "src/compiler/glsl/ast_type.cpp"
+ "src/compiler/glsl/builtin_functions.cpp"
+ "src/compiler/glsl/builtin_int64.h"
+ "src/compiler/glsl/builtin_types.cpp"
+ "src/compiler/glsl/builtin_variables.cpp"
+ "src/compiler/glsl/generate_ir.cpp"
+ "src/compiler/glsl/glsl_lexer.cpp"
+ "src/compiler/glsl/glsl_optimizer.cpp"
+ "src/compiler/glsl/glsl_parser_extras.cpp"
+ "src/compiler/glsl/glsl_parser.cpp"
+ "src/compiler/glsl/glsl_symbol_table.cpp"
+ "src/compiler/glsl/hir_field_selection.cpp"
+ "src/compiler/glsl/ir_array_refcount.cpp"
+ "src/compiler/glsl/ir_basic_block.cpp"
+ "src/compiler/glsl/ir_builder.cpp"
+ "src/compiler/glsl/ir_clone.cpp"
+ "src/compiler/glsl/ir_constant_expression.cpp"
+ "src/compiler/glsl/ir_equals.cpp"
+ "src/compiler/glsl/ir_expression_flattening.cpp"
+ "src/compiler/glsl/ir_function_can_inline.cpp"
+ "src/compiler/glsl/ir_function_detect_recursion.cpp"
+ "src/compiler/glsl/ir_function.cpp"
+ "src/compiler/glsl/ir_hierarchical_visitor.cpp"
+ "src/compiler/glsl/ir_hv_accept.cpp"
+ "src/compiler/glsl/ir_print_glsl_visitor.cpp"
+ "src/compiler/glsl/ir_print_visitor.cpp"
+ "src/compiler/glsl/ir_reader.cpp"
+ "src/compiler/glsl/ir_rvalue_visitor.cpp"
+ "src/compiler/glsl/ir_set_program_inouts.cpp"
+ "src/compiler/glsl/ir_unused_structs.cpp"
+ "src/compiler/glsl/ir_validate.cpp"
+ "src/compiler/glsl/ir_variable_refcount.cpp"
+ "src/compiler/glsl/ir_visitor.h"
+ "src/compiler/glsl/ir.cpp"
+ "src/compiler/glsl/link_atomics.cpp"
+ "src/compiler/glsl/link_functions.cpp"
+ "src/compiler/glsl/link_interface_blocks.cpp"
+ "src/compiler/glsl/link_uniform_block_active_visitor.cpp"
+ "src/compiler/glsl/link_uniform_blocks.cpp"
+ "src/compiler/glsl/link_uniform_initializers.cpp"
+ "src/compiler/glsl/link_uniforms.cpp"
+ "src/compiler/glsl/link_varyings.cpp"
+ "src/compiler/glsl/linker_util.cpp"
+ "src/compiler/glsl/linker.cpp"
+ "src/compiler/glsl/list.h"
+ "src/compiler/glsl/loop_analysis.cpp"
+ "src/compiler/glsl/loop_unroll.cpp"
+ "src/compiler/glsl/lower_blend_equation_advanced.cpp"
+ "src/compiler/glsl/lower_buffer_access.cpp"
+ "src/compiler/glsl/lower_builtins.cpp"
+ "src/compiler/glsl/lower_const_arrays_to_uniforms.cpp"
+ "src/compiler/glsl/lower_cs_derived.cpp"
+ "src/compiler/glsl/lower_discard_flow.cpp"
+ "src/compiler/glsl/lower_discard.cpp"
+ "src/compiler/glsl/lower_distance.cpp"
+ "src/compiler/glsl/lower_if_to_cond_assign.cpp"
+ "src/compiler/glsl/lower_instructions.cpp"
+ "src/compiler/glsl/lower_int64.cpp"
+ "src/compiler/glsl/lower_jumps.cpp"
+ "src/compiler/glsl/lower_mat_op_to_vec.cpp"
+ "src/compiler/glsl/lower_named_interface_blocks.cpp"
+ "src/compiler/glsl/lower_offset_array.cpp"
+ "src/compiler/glsl/lower_output_reads.cpp"
+ "src/compiler/glsl/lower_packed_varyings.cpp"
+ "src/compiler/glsl/lower_packing_builtins.cpp"
+ "src/compiler/glsl/lower_precision.cpp"
+ "src/compiler/glsl/lower_shared_reference.cpp"
+ "src/compiler/glsl/lower_subroutine.cpp"
+ "src/compiler/glsl/lower_tess_level.cpp"
+ "src/compiler/glsl/lower_texture_projection.cpp"
+ "src/compiler/glsl/lower_ubo_reference.cpp"
+ "src/compiler/glsl/lower_variable_index_to_cond_assign.cpp"
+ "src/compiler/glsl/lower_vec_index_to_cond_assign.cpp"
+ "src/compiler/glsl/lower_vec_index_to_swizzle.cpp"
+ "src/compiler/glsl/lower_vector_derefs.cpp"
+ "src/compiler/glsl/lower_vector_insert.cpp"
+ "src/compiler/glsl/lower_vector.cpp"
+ "src/compiler/glsl/lower_vertex_id.cpp"
+ "src/compiler/glsl/lower_xfb_varying.cpp"
+ "src/compiler/glsl/opt_algebraic.cpp"
+ "src/compiler/glsl/opt_array_splitting.cpp"
+ "src/compiler/glsl/opt_conditional_discard.cpp"
+ "src/compiler/glsl/opt_constant_folding.cpp"
+ "src/compiler/glsl/opt_constant_propagation.cpp"
+ "src/compiler/glsl/opt_constant_variable.cpp"
+ "src/compiler/glsl/opt_copy_propagation_elements.cpp"
+ "src/compiler/glsl/opt_dead_builtin_variables.cpp"
+ "src/compiler/glsl/opt_dead_builtin_varyings.cpp"
+ "src/compiler/glsl/opt_dead_code_local.cpp"
+ "src/compiler/glsl/opt_dead_code.cpp"
+ "src/compiler/glsl/opt_dead_functions.cpp"
+ "src/compiler/glsl/opt_flatten_nested_if_blocks.cpp"
+ "src/compiler/glsl/opt_flip_matrices.cpp"
+ "src/compiler/glsl/opt_function_inlining.cpp"
+ "src/compiler/glsl/opt_if_simplification.cpp"
+ "src/compiler/glsl/opt_minmax.cpp"
+ "src/compiler/glsl/opt_rebalance_tree.cpp"
+ "src/compiler/glsl/opt_redundant_jumps.cpp"
+ "src/compiler/glsl/opt_structure_splitting.cpp"
+ "src/compiler/glsl/opt_swizzle.cpp"
+ "src/compiler/glsl/opt_tree_grafting.cpp"
+ "src/compiler/glsl/opt_vectorize.cpp"
+ "src/compiler/glsl/propagate_invariance.cpp"
+ "src/compiler/glsl/s_expression.cpp"
+ "src/compiler/glsl/serialize.cpp"
+ "src/compiler/glsl/shader_cache.cpp"
+ "src/compiler/glsl/standalone_scaffolding.cpp"
+ "src/compiler/glsl/string_to_uint_map.cpp"
+ "src/compiler/shader_enums.c")
+
+target_link_libraries(glsl_optimizer glcpp mesa)
+
+add_executable(glslopt "contrib/glslopt/Main.cpp")
+target_link_libraries(glslopt glsl_optimizer)
diff --git a/third_party/rust/glslopt/glsl-optimizer/README.md b/third_party/rust/glslopt/glsl-optimizer/README.md
new file mode 100644
index 0000000000..5948a69e6f
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/README.md
@@ -0,0 +1,100 @@
+GLSL optimizer
+==============
+
+> :warning: As of mid-2016, the project is unlikely to have any significant developments. At Unity we are moving to a different
+shader compilation pipeline, with glsl-optimizer is not used. So from my side there won't be significant work done on it. :warning:
+
+
+A C++ library that takes GLSL shaders, does some GPU-independent optimizations on them
+and outputs GLSL or Metal source back. Optimizations are function inlining, dead code removal, copy propagation,
+constant folding, constant propagation, arithmetic optimizations and so on.
+
+Apparently quite a few mobile platforms are pretty bad at optimizing shaders; and
+unfortunately they *also* lack offline shader compilers. So using a GLSL optimizer offline
+before can make the shader run much faster on a platform like that. See performance numbers
+in [this blog post](http://aras-p.info/blog/2010/09/29/glsl-optimizer/).
+
+Even for drivers that have decent shader optimization, GLSL optimizer could be useful to just strip away
+dead code, make shaders smaller and do uniform/input reflection offline.
+
+Almost all actual code is [Mesa 3D's GLSL](http://cgit.freedesktop.org/mesa/mesa/log/)
+compiler; all this library does is spits out optimized GLSL or Metal back, and adds GLES type precision
+handling to the optimizer.
+
+This GLSL optimizer is made for [Unity's](http://unity3d.com/) purposes and is built-in
+starting with Unity 3.0.
+
+GLSL Optimizer is licensed according to the terms of the MIT license.
+
+See [change log here](Changelog.md).
+
+
+Usage
+-----
+
+Visual Studio 2010 (Windows, x86/x64) and Xcode 5+ (Mac, i386) project files for a static
+library are provided in `projects/vs2010/glsl_optimizer.sln` and `projects/xcode5/glsl_optimizer_lib`
+respectively.
+
+> Note: only the VS and Xcode project files are maintained and should work at any time.
+> There's also a cmake and gyp build system for Linux et al., and some stuff in contrib folder -
+> all that may or might not work.
+
+For Linux you can use cmake. Just type "cmake . && make" in the root directory.
+This will build the optimizer library and some executable binaries.
+
+Interface for the library is `src/glsl/glsl_optimizer.h`. General usage is:
+
+ ctx = glslopt_initialize(targetVersion);
+ for (lots of shaders) {
+ shader = glslopt_optimize (ctx, shaderType, shaderSource, options);
+ if (glslopt_get_status (shader)) {
+ newSource = glslopt_get_output (shader);
+ } else {
+ errorLog = glslopt_get_log (shader);
+ }
+ glslopt_shader_delete (shader);
+ }
+ glslopt_cleanup (ctx);
+
+
+Tests
+-----
+
+There's a testing suite for catching regressions, see `tests` folder. In VS, build
+and run `glsl_optimizer_tests` project; in Xcode use `projects/xcode5/glsl_optimizer_tests`
+project. The test executable requires path to the `tests` folder as an argument.
+
+Each test comes as three text files; input, expected IR dump and expected optimized
+GLSL dump. GLES3 tests are also converted into Metal.
+
+If you're making changes to the project and want pull requests accepted easier, I'd
+appreciate if there would be no test suite regressions. If you are implementing a
+feature, it would be cool to add tests to cover it as well!
+
+
+Notes
+-----
+
+* GLSL versions 1.10 and 1.20 are supported. 1.10 is the default, use #version 120 to specify
+1.20. Higher GLSL versions might work, but aren't tested now.
+* GLSL ES versions 1.00 and 3.00 are supported.
+
+
+Dev Notes
+---------
+
+Pulling Mesa upstream:
+
+ git fetch upstream
+ git merge upstream/master
+ sh removeDeletedByUs.sh
+ # inspect files, git rm unneeded ones, fix conflicts etc.
+ # git commit
+
+Rebuilding flex/bison parsers:
+
+* When .y/.l files are changed, the parsers are *not* rebuilt automatically,
+* Run ./generateParsers.sh to do that. You'll need bison & flex (on Mac, do "Install Command Line Tools" from Xcode)
+* I use bison 2.3 and flex 2.5.35 (in OS X 10.8/10.9)
+
diff --git a/third_party/rust/glslopt/glsl-optimizer/contrib/glslopt/Main.cpp b/third_party/rust/glslopt/glsl-optimizer/contrib/glslopt/Main.cpp
new file mode 100644
index 0000000000..4f38dfe0dc
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/contrib/glslopt/Main.cpp
@@ -0,0 +1,159 @@
+
+#include <stdio.h>
+#include <string.h>
+#include <stdlib.h>
+#include "glsl_optimizer.h"
+
+static glslopt_ctx* gContext = 0;
+
+static int printhelp(const char* msg)
+{
+ if (msg) printf("%s\n\n\n", msg);
+ printf("Usage: glslopt <-f|-v> <input shader> [<output shader>]\n");
+ printf("\t-f : fragment shader (default)\n");
+ printf("\t-v : vertex shader\n");
+ printf("\t-1 : target OpenGL (default)\n");
+ printf("\t-2 : target OpenGL ES 2.0\n");
+ printf("\t-3 : target OpenGL ES 3.0\n");
+ printf("\n\tIf no output specified, output is to [input].out.\n");
+ return 1;
+}
+
+static bool init(glslopt_target target)
+{
+ gContext = glslopt_initialize(target);
+ if( !gContext )
+ return false;
+ return true;
+}
+
+static void term()
+{
+ glslopt_cleanup(gContext);
+}
+
+static char* loadFile(const char* filename)
+{
+ FILE* file = fopen(filename, "rt");
+ if( !file )
+ {
+ printf("Failed to open %s for reading\n", filename);
+ return 0;
+ }
+
+ fseek(file, 0, SEEK_END);
+ const int size = ftell(file);
+ fseek(file, 0, SEEK_SET);
+
+ char* result = new char[size+1];
+ const int count = (int)fread(result, 1, size, file);
+ result[count] = 0;
+
+ fclose(file);
+ return result;
+}
+
+static bool saveFile(const char* filename, const char* data)
+{
+ int size = (int)strlen(data);
+
+ FILE* file = fopen(filename, "wt");
+ if( !file )
+ {
+ printf( "Failed to open %s for writing\n", filename);
+ return false;
+ }
+
+ if( 1 != fwrite(data,size,1,file) )
+ {
+ printf( "Failed to write to %s\n", filename);
+ fclose(file);
+ return false;
+ }
+
+ fclose(file);
+ return true;
+}
+
+static bool compileShader(const char* dstfilename, const char* srcfilename, bool vertexShader)
+{
+ const char* originalShader = loadFile(srcfilename);
+ if( !originalShader )
+ return false;
+
+ const glslopt_shader_type type = vertexShader ? kGlslOptShaderVertex : kGlslOptShaderFragment;
+
+ glslopt_shader* shader = glslopt_optimize(gContext, type, originalShader, 0);
+ if( !glslopt_get_status(shader) )
+ {
+ printf( "Failed to compile %s:\n\n%s\n", srcfilename, glslopt_get_log(shader));
+ return false;
+ }
+
+ const char* optimizedShader = glslopt_get_output(shader);
+
+ if( !saveFile(dstfilename, optimizedShader) )
+ return false;
+
+ delete[] originalShader;
+ return true;
+}
+
+int main(int argc, char* argv[])
+{
+ if( argc < 3 )
+ return printhelp(NULL);
+
+ bool vertexShader = false, freename = false;
+ glslopt_target languageTarget = kGlslTargetOpenGL;
+ const char* source = 0;
+ char* dest = 0;
+
+ for( int i=1; i < argc; i++ )
+ {
+ if( argv[i][0] == '-' )
+ {
+ if( 0 == strcmp("-v", argv[i]) )
+ vertexShader = true;
+ else if( 0 == strcmp("-f", argv[i]) )
+ vertexShader = false;
+ else if( 0 == strcmp("-1", argv[i]) )
+ languageTarget = kGlslTargetOpenGL;
+ else if( 0 == strcmp("-2", argv[i]) )
+ languageTarget = kGlslTargetOpenGLES20;
+ else if( 0 == strcmp("-3", argv[i]) )
+ languageTarget = kGlslTargetOpenGLES30;
+ }
+ else
+ {
+ if( source == 0 )
+ source = argv[i];
+ else if( dest == 0 )
+ dest = argv[i];
+ }
+ }
+
+ if( !source )
+ return printhelp("Must give a source");
+
+ if( !init(languageTarget) )
+ {
+ printf("Failed to initialize glslopt!\n");
+ return 1;
+ }
+
+ if ( !dest ) {
+ dest = (char *) calloc(strlen(source)+5, sizeof(char));
+ snprintf(dest, strlen(source)+5, "%s.out", source);
+ freename = true;
+ }
+
+ int result = 0;
+ if( !compileShader(dest, source, vertexShader) )
+ result = 1;
+
+ if( freename ) free(dest);
+
+ term();
+ return result;
+}
diff --git a/third_party/rust/glslopt/glsl-optimizer/contrib/glslopt/Readme b/third_party/rust/glslopt/glsl-optimizer/contrib/glslopt/Readme
new file mode 100644
index 0000000000..7d28ecdda6
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/contrib/glslopt/Readme
@@ -0,0 +1,13 @@
+This is a small sample program to get you quickly up and running, it transforms an input textfile
+to an output textfile. The input source textfile should be a fully preprocessed GLSL shader and
+the output should be a simple shader in textform.
+
+The project file is generated by BadgerConfig.
+
+VS2005
+BadgerConfig
+
+
+
+
+Jim Tilander, Santa Monica 2010
diff --git a/third_party/rust/glslopt/glsl-optimizer/generateParsers.sh b/third_party/rust/glslopt/glsl-optimizer/generateParsers.sh
new file mode 100755
index 0000000000..1b4e635136
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/generateParsers.sh
@@ -0,0 +1,9 @@
+#! /bin/sh
+flex --nounistd -osrc/compiler/glsl/glcpp/glcpp-lex.c src/compiler/glsl/glcpp/glcpp-lex.l
+flex --nounistd -osrc/compiler/glsl/glsl_lexer.cpp src/compiler/glsl/glsl_lexer.ll
+bison -v -o "src/compiler/glsl/glcpp/glcpp-parse.c" -p "glcpp_parser_" --defines=src/compiler/glsl/glcpp/glcpp-parse.h src/compiler/glsl/glcpp/glcpp-parse.y
+bison -v -o "src/compiler/glsl/glsl_parser.cpp" -p "_mesa_glsl_" --defines=src/compiler/glsl/glsl_parser.h src/compiler/glsl/glsl_parser.yy
+
+python "src/compiler/glsl/ir_expression_operation.py" "enum" >src/compiler/glsl/ir_expression_operation.h
+python "src/compiler/glsl/ir_expression_operation.py" "strings" >src/compiler/glsl/ir_expression_operation_strings.h
+python "src/compiler/glsl/ir_expression_operation.py" "constant" >src/compiler/glsl/ir_expression_operation_constant.h
diff --git a/third_party/rust/glslopt/glsl-optimizer/include/GL/gl.h b/third_party/rust/glslopt/glsl-optimizer/include/GL/gl.h
new file mode 100644
index 0000000000..2518dfbb40
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/include/GL/gl.h
@@ -0,0 +1,2103 @@
+/*
+ * Mesa 3-D graphics library
+ *
+ * Copyright (C) 1999-2006 Brian Paul All Rights Reserved.
+ * Copyright (C) 2009 VMware, Inc. All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+
+#ifndef __gl_h_
+#define __gl_h_
+
+/**********************************************************************
+ * Begin system-specific stuff.
+ */
+
+#if defined(_WIN32) && !defined(__WIN32__) && !defined(__CYGWIN__)
+#define __WIN32__
+#endif
+
+#if defined(__WIN32__) && !defined(__CYGWIN__)
+# if (defined(_MSC_VER) || defined(__MINGW32__)) && defined(BUILD_GL32) /* tag specify we're building mesa as a DLL */
+# define GLAPI __declspec(dllexport)
+# elif (defined(_MSC_VER) || defined(__MINGW32__)) && defined(_DLL) /* tag specifying we're building for DLL runtime support */
+# define GLAPI __declspec(dllimport)
+# else /* for use with static link lib build of Win32 edition only */
+# define GLAPI extern
+# endif
+# if defined(__MINGW32__) && defined(GL_NO_STDCALL) || defined(UNDER_CE) /* The generated DLLs by MingW with STDCALL are not compatible with the ones done by Microsoft's compilers */
+# define GLAPIENTRY
+# else
+# define GLAPIENTRY __stdcall
+# endif
+#elif defined(__CYGWIN__) && defined(USE_OPENGL32) /* use native windows opengl32 */
+# define GLAPI extern
+# define GLAPIENTRY __stdcall
+#elif (defined(__GNUC__) && __GNUC__ >= 4) || (defined(__SUNPRO_C) && (__SUNPRO_C >= 0x590))
+# define GLAPI __attribute__((visibility("default")))
+# define GLAPIENTRY
+#endif /* WIN32 && !CYGWIN */
+
+/*
+ * WINDOWS: Include windows.h here to define APIENTRY.
+ * It is also useful when applications include this file by
+ * including only glut.h, since glut.h depends on windows.h.
+ * Applications needing to include windows.h with parms other
+ * than "WIN32_LEAN_AND_MEAN" may include windows.h before
+ * glut.h or gl.h.
+ */
+#if defined(_WIN32) && !defined(APIENTRY) && !defined(__CYGWIN__)
+#ifndef WIN32_LEAN_AND_MEAN
+#define WIN32_LEAN_AND_MEAN 1
+#endif
+#include <windows.h>
+#endif
+
+#ifndef GLAPI
+#define GLAPI extern
+#endif
+
+#ifndef GLAPIENTRY
+#define GLAPIENTRY
+#endif
+
+#ifndef APIENTRY
+#define APIENTRY GLAPIENTRY
+#endif
+
+/* "P" suffix to be used for a pointer to a function */
+#ifndef APIENTRYP
+#define APIENTRYP APIENTRY *
+#endif
+
+#ifndef GLAPIENTRYP
+#define GLAPIENTRYP GLAPIENTRY *
+#endif
+
+/*
+ * End system-specific stuff.
+ **********************************************************************/
+
+
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+
+#define GL_VERSION_1_1 1
+#define GL_VERSION_1_2 1
+#define GL_VERSION_1_3 1
+#define GL_ARB_imaging 1
+
+
+/*
+ * Datatypes
+ */
+typedef unsigned int GLenum;
+typedef unsigned char GLboolean;
+typedef unsigned int GLbitfield;
+typedef void GLvoid;
+typedef signed char GLbyte; /* 1-byte signed */
+typedef short GLshort; /* 2-byte signed */
+typedef int GLint; /* 4-byte signed */
+typedef unsigned char GLubyte; /* 1-byte unsigned */
+typedef unsigned short GLushort; /* 2-byte unsigned */
+typedef unsigned int GLuint; /* 4-byte unsigned */
+typedef int GLsizei; /* 4-byte signed */
+typedef float GLfloat; /* single precision float */
+typedef float GLclampf; /* single precision float in [0,1] */
+typedef double GLdouble; /* double precision float */
+typedef double GLclampd; /* double precision float in [0,1] */
+
+
+
+/*
+ * Constants
+ */
+
+/* Boolean values */
+#define GL_FALSE 0
+#define GL_TRUE 1
+
+/* Data types */
+#define GL_BYTE 0x1400
+#define GL_UNSIGNED_BYTE 0x1401
+#define GL_SHORT 0x1402
+#define GL_UNSIGNED_SHORT 0x1403
+#define GL_INT 0x1404
+#define GL_UNSIGNED_INT 0x1405
+#define GL_FLOAT 0x1406
+#define GL_2_BYTES 0x1407
+#define GL_3_BYTES 0x1408
+#define GL_4_BYTES 0x1409
+#define GL_DOUBLE 0x140A
+
+/* Primitives */
+#define GL_POINTS 0x0000
+#define GL_LINES 0x0001
+#define GL_LINE_LOOP 0x0002
+#define GL_LINE_STRIP 0x0003
+#define GL_TRIANGLES 0x0004
+#define GL_TRIANGLE_STRIP 0x0005
+#define GL_TRIANGLE_FAN 0x0006
+#define GL_QUADS 0x0007
+#define GL_QUAD_STRIP 0x0008
+#define GL_POLYGON 0x0009
+
+/* Vertex Arrays */
+#define GL_VERTEX_ARRAY 0x8074
+#define GL_NORMAL_ARRAY 0x8075
+#define GL_COLOR_ARRAY 0x8076
+#define GL_INDEX_ARRAY 0x8077
+#define GL_TEXTURE_COORD_ARRAY 0x8078
+#define GL_EDGE_FLAG_ARRAY 0x8079
+#define GL_VERTEX_ARRAY_SIZE 0x807A
+#define GL_VERTEX_ARRAY_TYPE 0x807B
+#define GL_VERTEX_ARRAY_STRIDE 0x807C
+#define GL_NORMAL_ARRAY_TYPE 0x807E
+#define GL_NORMAL_ARRAY_STRIDE 0x807F
+#define GL_COLOR_ARRAY_SIZE 0x8081
+#define GL_COLOR_ARRAY_TYPE 0x8082
+#define GL_COLOR_ARRAY_STRIDE 0x8083
+#define GL_INDEX_ARRAY_TYPE 0x8085
+#define GL_INDEX_ARRAY_STRIDE 0x8086
+#define GL_TEXTURE_COORD_ARRAY_SIZE 0x8088
+#define GL_TEXTURE_COORD_ARRAY_TYPE 0x8089
+#define GL_TEXTURE_COORD_ARRAY_STRIDE 0x808A
+#define GL_EDGE_FLAG_ARRAY_STRIDE 0x808C
+#define GL_VERTEX_ARRAY_POINTER 0x808E
+#define GL_NORMAL_ARRAY_POINTER 0x808F
+#define GL_COLOR_ARRAY_POINTER 0x8090
+#define GL_INDEX_ARRAY_POINTER 0x8091
+#define GL_TEXTURE_COORD_ARRAY_POINTER 0x8092
+#define GL_EDGE_FLAG_ARRAY_POINTER 0x8093
+#define GL_V2F 0x2A20
+#define GL_V3F 0x2A21
+#define GL_C4UB_V2F 0x2A22
+#define GL_C4UB_V3F 0x2A23
+#define GL_C3F_V3F 0x2A24
+#define GL_N3F_V3F 0x2A25
+#define GL_C4F_N3F_V3F 0x2A26
+#define GL_T2F_V3F 0x2A27
+#define GL_T4F_V4F 0x2A28
+#define GL_T2F_C4UB_V3F 0x2A29
+#define GL_T2F_C3F_V3F 0x2A2A
+#define GL_T2F_N3F_V3F 0x2A2B
+#define GL_T2F_C4F_N3F_V3F 0x2A2C
+#define GL_T4F_C4F_N3F_V4F 0x2A2D
+
+/* Matrix Mode */
+#define GL_MATRIX_MODE 0x0BA0
+#define GL_MODELVIEW 0x1700
+#define GL_PROJECTION 0x1701
+#define GL_TEXTURE 0x1702
+
+/* Points */
+#define GL_POINT_SMOOTH 0x0B10
+#define GL_POINT_SIZE 0x0B11
+#define GL_POINT_SIZE_GRANULARITY 0x0B13
+#define GL_POINT_SIZE_RANGE 0x0B12
+
+/* Lines */
+#define GL_LINE_SMOOTH 0x0B20
+#define GL_LINE_STIPPLE 0x0B24
+#define GL_LINE_STIPPLE_PATTERN 0x0B25
+#define GL_LINE_STIPPLE_REPEAT 0x0B26
+#define GL_LINE_WIDTH 0x0B21
+#define GL_LINE_WIDTH_GRANULARITY 0x0B23
+#define GL_LINE_WIDTH_RANGE 0x0B22
+
+/* Polygons */
+#define GL_POINT 0x1B00
+#define GL_LINE 0x1B01
+#define GL_FILL 0x1B02
+#define GL_CW 0x0900
+#define GL_CCW 0x0901
+#define GL_FRONT 0x0404
+#define GL_BACK 0x0405
+#define GL_POLYGON_MODE 0x0B40
+#define GL_POLYGON_SMOOTH 0x0B41
+#define GL_POLYGON_STIPPLE 0x0B42
+#define GL_EDGE_FLAG 0x0B43
+#define GL_CULL_FACE 0x0B44
+#define GL_CULL_FACE_MODE 0x0B45
+#define GL_FRONT_FACE 0x0B46
+#define GL_POLYGON_OFFSET_FACTOR 0x8038
+#define GL_POLYGON_OFFSET_UNITS 0x2A00
+#define GL_POLYGON_OFFSET_POINT 0x2A01
+#define GL_POLYGON_OFFSET_LINE 0x2A02
+#define GL_POLYGON_OFFSET_FILL 0x8037
+
+/* Display Lists */
+#define GL_COMPILE 0x1300
+#define GL_COMPILE_AND_EXECUTE 0x1301
+#define GL_LIST_BASE 0x0B32
+#define GL_LIST_INDEX 0x0B33
+#define GL_LIST_MODE 0x0B30
+
+/* Depth buffer */
+#define GL_NEVER 0x0200
+#define GL_LESS 0x0201
+#define GL_EQUAL 0x0202
+#define GL_LEQUAL 0x0203
+#define GL_GREATER 0x0204
+#define GL_NOTEQUAL 0x0205
+#define GL_GEQUAL 0x0206
+#define GL_ALWAYS 0x0207
+#define GL_DEPTH_TEST 0x0B71
+#define GL_DEPTH_BITS 0x0D56
+#define GL_DEPTH_CLEAR_VALUE 0x0B73
+#define GL_DEPTH_FUNC 0x0B74
+#define GL_DEPTH_RANGE 0x0B70
+#define GL_DEPTH_WRITEMASK 0x0B72
+#define GL_DEPTH_COMPONENT 0x1902
+
+/* Lighting */
+#define GL_LIGHTING 0x0B50
+#define GL_LIGHT0 0x4000
+#define GL_LIGHT1 0x4001
+#define GL_LIGHT2 0x4002
+#define GL_LIGHT3 0x4003
+#define GL_LIGHT4 0x4004
+#define GL_LIGHT5 0x4005
+#define GL_LIGHT6 0x4006
+#define GL_LIGHT7 0x4007
+#define GL_SPOT_EXPONENT 0x1205
+#define GL_SPOT_CUTOFF 0x1206
+#define GL_CONSTANT_ATTENUATION 0x1207
+#define GL_LINEAR_ATTENUATION 0x1208
+#define GL_QUADRATIC_ATTENUATION 0x1209
+#define GL_AMBIENT 0x1200
+#define GL_DIFFUSE 0x1201
+#define GL_SPECULAR 0x1202
+#define GL_SHININESS 0x1601
+#define GL_EMISSION 0x1600
+#define GL_POSITION 0x1203
+#define GL_SPOT_DIRECTION 0x1204
+#define GL_AMBIENT_AND_DIFFUSE 0x1602
+#define GL_COLOR_INDEXES 0x1603
+#define GL_LIGHT_MODEL_TWO_SIDE 0x0B52
+#define GL_LIGHT_MODEL_LOCAL_VIEWER 0x0B51
+#define GL_LIGHT_MODEL_AMBIENT 0x0B53
+#define GL_FRONT_AND_BACK 0x0408
+#define GL_SHADE_MODEL 0x0B54
+#define GL_FLAT 0x1D00
+#define GL_SMOOTH 0x1D01
+#define GL_COLOR_MATERIAL 0x0B57
+#define GL_COLOR_MATERIAL_FACE 0x0B55
+#define GL_COLOR_MATERIAL_PARAMETER 0x0B56
+#define GL_NORMALIZE 0x0BA1
+
+/* User clipping planes */
+#define GL_CLIP_PLANE0 0x3000
+#define GL_CLIP_PLANE1 0x3001
+#define GL_CLIP_PLANE2 0x3002
+#define GL_CLIP_PLANE3 0x3003
+#define GL_CLIP_PLANE4 0x3004
+#define GL_CLIP_PLANE5 0x3005
+
+/* Accumulation buffer */
+#define GL_ACCUM_RED_BITS 0x0D58
+#define GL_ACCUM_GREEN_BITS 0x0D59
+#define GL_ACCUM_BLUE_BITS 0x0D5A
+#define GL_ACCUM_ALPHA_BITS 0x0D5B
+#define GL_ACCUM_CLEAR_VALUE 0x0B80
+#define GL_ACCUM 0x0100
+#define GL_ADD 0x0104
+#define GL_LOAD 0x0101
+#define GL_MULT 0x0103
+#define GL_RETURN 0x0102
+
+/* Alpha testing */
+#define GL_ALPHA_TEST 0x0BC0
+#define GL_ALPHA_TEST_REF 0x0BC2
+#define GL_ALPHA_TEST_FUNC 0x0BC1
+
+/* Blending */
+#define GL_BLEND 0x0BE2
+#define GL_BLEND_SRC 0x0BE1
+#define GL_BLEND_DST 0x0BE0
+#define GL_ZERO 0
+#define GL_ONE 1
+#define GL_SRC_COLOR 0x0300
+#define GL_ONE_MINUS_SRC_COLOR 0x0301
+#define GL_SRC_ALPHA 0x0302
+#define GL_ONE_MINUS_SRC_ALPHA 0x0303
+#define GL_DST_ALPHA 0x0304
+#define GL_ONE_MINUS_DST_ALPHA 0x0305
+#define GL_DST_COLOR 0x0306
+#define GL_ONE_MINUS_DST_COLOR 0x0307
+#define GL_SRC_ALPHA_SATURATE 0x0308
+
+/* Render Mode */
+#define GL_FEEDBACK 0x1C01
+#define GL_RENDER 0x1C00
+#define GL_SELECT 0x1C02
+
+/* Feedback */
+#define GL_2D 0x0600
+#define GL_3D 0x0601
+#define GL_3D_COLOR 0x0602
+#define GL_3D_COLOR_TEXTURE 0x0603
+#define GL_4D_COLOR_TEXTURE 0x0604
+#define GL_POINT_TOKEN 0x0701
+#define GL_LINE_TOKEN 0x0702
+#define GL_LINE_RESET_TOKEN 0x0707
+#define GL_POLYGON_TOKEN 0x0703
+#define GL_BITMAP_TOKEN 0x0704
+#define GL_DRAW_PIXEL_TOKEN 0x0705
+#define GL_COPY_PIXEL_TOKEN 0x0706
+#define GL_PASS_THROUGH_TOKEN 0x0700
+#define GL_FEEDBACK_BUFFER_POINTER 0x0DF0
+#define GL_FEEDBACK_BUFFER_SIZE 0x0DF1
+#define GL_FEEDBACK_BUFFER_TYPE 0x0DF2
+
+/* Selection */
+#define GL_SELECTION_BUFFER_POINTER 0x0DF3
+#define GL_SELECTION_BUFFER_SIZE 0x0DF4
+
+/* Fog */
+#define GL_FOG 0x0B60
+#define GL_FOG_MODE 0x0B65
+#define GL_FOG_DENSITY 0x0B62
+#define GL_FOG_COLOR 0x0B66
+#define GL_FOG_INDEX 0x0B61
+#define GL_FOG_START 0x0B63
+#define GL_FOG_END 0x0B64
+#define GL_LINEAR 0x2601
+#define GL_EXP 0x0800
+#define GL_EXP2 0x0801
+
+/* Logic Ops */
+#define GL_LOGIC_OP 0x0BF1
+#define GL_INDEX_LOGIC_OP 0x0BF1
+#define GL_COLOR_LOGIC_OP 0x0BF2
+#define GL_LOGIC_OP_MODE 0x0BF0
+#define GL_CLEAR 0x1500
+#define GL_SET 0x150F
+#define GL_COPY 0x1503
+#define GL_COPY_INVERTED 0x150C
+#define GL_NOOP 0x1505
+#define GL_INVERT 0x150A
+#define GL_AND 0x1501
+#define GL_NAND 0x150E
+#define GL_OR 0x1507
+#define GL_NOR 0x1508
+#define GL_XOR 0x1506
+#define GL_EQUIV 0x1509
+#define GL_AND_REVERSE 0x1502
+#define GL_AND_INVERTED 0x1504
+#define GL_OR_REVERSE 0x150B
+#define GL_OR_INVERTED 0x150D
+
+/* Stencil */
+#define GL_STENCIL_BITS 0x0D57
+#define GL_STENCIL_TEST 0x0B90
+#define GL_STENCIL_CLEAR_VALUE 0x0B91
+#define GL_STENCIL_FUNC 0x0B92
+#define GL_STENCIL_VALUE_MASK 0x0B93
+#define GL_STENCIL_FAIL 0x0B94
+#define GL_STENCIL_PASS_DEPTH_FAIL 0x0B95
+#define GL_STENCIL_PASS_DEPTH_PASS 0x0B96
+#define GL_STENCIL_REF 0x0B97
+#define GL_STENCIL_WRITEMASK 0x0B98
+#define GL_STENCIL_INDEX 0x1901
+#define GL_KEEP 0x1E00
+#define GL_REPLACE 0x1E01
+#define GL_INCR 0x1E02
+#define GL_DECR 0x1E03
+
+/* Buffers, Pixel Drawing/Reading */
+#define GL_NONE 0
+#define GL_LEFT 0x0406
+#define GL_RIGHT 0x0407
+/*GL_FRONT 0x0404 */
+/*GL_BACK 0x0405 */
+/*GL_FRONT_AND_BACK 0x0408 */
+#define GL_FRONT_LEFT 0x0400
+#define GL_FRONT_RIGHT 0x0401
+#define GL_BACK_LEFT 0x0402
+#define GL_BACK_RIGHT 0x0403
+#define GL_AUX0 0x0409
+#define GL_AUX1 0x040A
+#define GL_AUX2 0x040B
+#define GL_AUX3 0x040C
+#define GL_COLOR_INDEX 0x1900
+#define GL_RED 0x1903
+#define GL_GREEN 0x1904
+#define GL_BLUE 0x1905
+#define GL_ALPHA 0x1906
+#define GL_LUMINANCE 0x1909
+#define GL_LUMINANCE_ALPHA 0x190A
+#define GL_ALPHA_BITS 0x0D55
+#define GL_RED_BITS 0x0D52
+#define GL_GREEN_BITS 0x0D53
+#define GL_BLUE_BITS 0x0D54
+#define GL_INDEX_BITS 0x0D51
+#define GL_SUBPIXEL_BITS 0x0D50
+#define GL_AUX_BUFFERS 0x0C00
+#define GL_READ_BUFFER 0x0C02
+#define GL_DRAW_BUFFER 0x0C01
+#define GL_DOUBLEBUFFER 0x0C32
+#define GL_STEREO 0x0C33
+#define GL_BITMAP 0x1A00
+#define GL_COLOR 0x1800
+#define GL_DEPTH 0x1801
+#define GL_STENCIL 0x1802
+#define GL_DITHER 0x0BD0
+#define GL_RGB 0x1907
+#define GL_RGBA 0x1908
+
+/* Implementation limits */
+#define GL_MAX_LIST_NESTING 0x0B31
+#define GL_MAX_EVAL_ORDER 0x0D30
+#define GL_MAX_LIGHTS 0x0D31
+#define GL_MAX_CLIP_PLANES 0x0D32
+#define GL_MAX_TEXTURE_SIZE 0x0D33
+#define GL_MAX_PIXEL_MAP_TABLE 0x0D34
+#define GL_MAX_ATTRIB_STACK_DEPTH 0x0D35
+#define GL_MAX_MODELVIEW_STACK_DEPTH 0x0D36
+#define GL_MAX_NAME_STACK_DEPTH 0x0D37
+#define GL_MAX_PROJECTION_STACK_DEPTH 0x0D38
+#define GL_MAX_TEXTURE_STACK_DEPTH 0x0D39
+#define GL_MAX_VIEWPORT_DIMS 0x0D3A
+#define GL_MAX_CLIENT_ATTRIB_STACK_DEPTH 0x0D3B
+
+/* Gets */
+#define GL_ATTRIB_STACK_DEPTH 0x0BB0
+#define GL_CLIENT_ATTRIB_STACK_DEPTH 0x0BB1
+#define GL_COLOR_CLEAR_VALUE 0x0C22
+#define GL_COLOR_WRITEMASK 0x0C23
+#define GL_CURRENT_INDEX 0x0B01
+#define GL_CURRENT_COLOR 0x0B00
+#define GL_CURRENT_NORMAL 0x0B02
+#define GL_CURRENT_RASTER_COLOR 0x0B04
+#define GL_CURRENT_RASTER_DISTANCE 0x0B09
+#define GL_CURRENT_RASTER_INDEX 0x0B05
+#define GL_CURRENT_RASTER_POSITION 0x0B07
+#define GL_CURRENT_RASTER_TEXTURE_COORDS 0x0B06
+#define GL_CURRENT_RASTER_POSITION_VALID 0x0B08
+#define GL_CURRENT_TEXTURE_COORDS 0x0B03
+#define GL_INDEX_CLEAR_VALUE 0x0C20
+#define GL_INDEX_MODE 0x0C30
+#define GL_INDEX_WRITEMASK 0x0C21
+#define GL_MODELVIEW_MATRIX 0x0BA6
+#define GL_MODELVIEW_STACK_DEPTH 0x0BA3
+#define GL_NAME_STACK_DEPTH 0x0D70
+#define GL_PROJECTION_MATRIX 0x0BA7
+#define GL_PROJECTION_STACK_DEPTH 0x0BA4
+#define GL_RENDER_MODE 0x0C40
+#define GL_RGBA_MODE 0x0C31
+#define GL_TEXTURE_MATRIX 0x0BA8
+#define GL_TEXTURE_STACK_DEPTH 0x0BA5
+#define GL_VIEWPORT 0x0BA2
+
+/* Evaluators */
+#define GL_AUTO_NORMAL 0x0D80
+#define GL_MAP1_COLOR_4 0x0D90
+#define GL_MAP1_INDEX 0x0D91
+#define GL_MAP1_NORMAL 0x0D92
+#define GL_MAP1_TEXTURE_COORD_1 0x0D93
+#define GL_MAP1_TEXTURE_COORD_2 0x0D94
+#define GL_MAP1_TEXTURE_COORD_3 0x0D95
+#define GL_MAP1_TEXTURE_COORD_4 0x0D96
+#define GL_MAP1_VERTEX_3 0x0D97
+#define GL_MAP1_VERTEX_4 0x0D98
+#define GL_MAP2_COLOR_4 0x0DB0
+#define GL_MAP2_INDEX 0x0DB1
+#define GL_MAP2_NORMAL 0x0DB2
+#define GL_MAP2_TEXTURE_COORD_1 0x0DB3
+#define GL_MAP2_TEXTURE_COORD_2 0x0DB4
+#define GL_MAP2_TEXTURE_COORD_3 0x0DB5
+#define GL_MAP2_TEXTURE_COORD_4 0x0DB6
+#define GL_MAP2_VERTEX_3 0x0DB7
+#define GL_MAP2_VERTEX_4 0x0DB8
+#define GL_MAP1_GRID_DOMAIN 0x0DD0
+#define GL_MAP1_GRID_SEGMENTS 0x0DD1
+#define GL_MAP2_GRID_DOMAIN 0x0DD2
+#define GL_MAP2_GRID_SEGMENTS 0x0DD3
+#define GL_COEFF 0x0A00
+#define GL_ORDER 0x0A01
+#define GL_DOMAIN 0x0A02
+
+/* Hints */
+#define GL_PERSPECTIVE_CORRECTION_HINT 0x0C50
+#define GL_POINT_SMOOTH_HINT 0x0C51
+#define GL_LINE_SMOOTH_HINT 0x0C52
+#define GL_POLYGON_SMOOTH_HINT 0x0C53
+#define GL_FOG_HINT 0x0C54
+#define GL_DONT_CARE 0x1100
+#define GL_FASTEST 0x1101
+#define GL_NICEST 0x1102
+
+/* Scissor box */
+#define GL_SCISSOR_BOX 0x0C10
+#define GL_SCISSOR_TEST 0x0C11
+
+/* Pixel Mode / Transfer */
+#define GL_MAP_COLOR 0x0D10
+#define GL_MAP_STENCIL 0x0D11
+#define GL_INDEX_SHIFT 0x0D12
+#define GL_INDEX_OFFSET 0x0D13
+#define GL_RED_SCALE 0x0D14
+#define GL_RED_BIAS 0x0D15
+#define GL_GREEN_SCALE 0x0D18
+#define GL_GREEN_BIAS 0x0D19
+#define GL_BLUE_SCALE 0x0D1A
+#define GL_BLUE_BIAS 0x0D1B
+#define GL_ALPHA_SCALE 0x0D1C
+#define GL_ALPHA_BIAS 0x0D1D
+#define GL_DEPTH_SCALE 0x0D1E
+#define GL_DEPTH_BIAS 0x0D1F
+#define GL_PIXEL_MAP_S_TO_S_SIZE 0x0CB1
+#define GL_PIXEL_MAP_I_TO_I_SIZE 0x0CB0
+#define GL_PIXEL_MAP_I_TO_R_SIZE 0x0CB2
+#define GL_PIXEL_MAP_I_TO_G_SIZE 0x0CB3
+#define GL_PIXEL_MAP_I_TO_B_SIZE 0x0CB4
+#define GL_PIXEL_MAP_I_TO_A_SIZE 0x0CB5
+#define GL_PIXEL_MAP_R_TO_R_SIZE 0x0CB6
+#define GL_PIXEL_MAP_G_TO_G_SIZE 0x0CB7
+#define GL_PIXEL_MAP_B_TO_B_SIZE 0x0CB8
+#define GL_PIXEL_MAP_A_TO_A_SIZE 0x0CB9
+#define GL_PIXEL_MAP_S_TO_S 0x0C71
+#define GL_PIXEL_MAP_I_TO_I 0x0C70
+#define GL_PIXEL_MAP_I_TO_R 0x0C72
+#define GL_PIXEL_MAP_I_TO_G 0x0C73
+#define GL_PIXEL_MAP_I_TO_B 0x0C74
+#define GL_PIXEL_MAP_I_TO_A 0x0C75
+#define GL_PIXEL_MAP_R_TO_R 0x0C76
+#define GL_PIXEL_MAP_G_TO_G 0x0C77
+#define GL_PIXEL_MAP_B_TO_B 0x0C78
+#define GL_PIXEL_MAP_A_TO_A 0x0C79
+#define GL_PACK_ALIGNMENT 0x0D05
+#define GL_PACK_LSB_FIRST 0x0D01
+#define GL_PACK_ROW_LENGTH 0x0D02
+#define GL_PACK_SKIP_PIXELS 0x0D04
+#define GL_PACK_SKIP_ROWS 0x0D03
+#define GL_PACK_SWAP_BYTES 0x0D00
+#define GL_UNPACK_ALIGNMENT 0x0CF5
+#define GL_UNPACK_LSB_FIRST 0x0CF1
+#define GL_UNPACK_ROW_LENGTH 0x0CF2
+#define GL_UNPACK_SKIP_PIXELS 0x0CF4
+#define GL_UNPACK_SKIP_ROWS 0x0CF3
+#define GL_UNPACK_SWAP_BYTES 0x0CF0
+#define GL_ZOOM_X 0x0D16
+#define GL_ZOOM_Y 0x0D17
+
+/* Texture mapping */
+#define GL_TEXTURE_ENV 0x2300
+#define GL_TEXTURE_ENV_MODE 0x2200
+#define GL_TEXTURE_1D 0x0DE0
+#define GL_TEXTURE_2D 0x0DE1
+#define GL_TEXTURE_WRAP_S 0x2802
+#define GL_TEXTURE_WRAP_T 0x2803
+#define GL_TEXTURE_MAG_FILTER 0x2800
+#define GL_TEXTURE_MIN_FILTER 0x2801
+#define GL_TEXTURE_ENV_COLOR 0x2201
+#define GL_TEXTURE_GEN_S 0x0C60
+#define GL_TEXTURE_GEN_T 0x0C61
+#define GL_TEXTURE_GEN_R 0x0C62
+#define GL_TEXTURE_GEN_Q 0x0C63
+#define GL_TEXTURE_GEN_MODE 0x2500
+#define GL_TEXTURE_BORDER_COLOR 0x1004
+#define GL_TEXTURE_WIDTH 0x1000
+#define GL_TEXTURE_HEIGHT 0x1001
+#define GL_TEXTURE_BORDER 0x1005
+#define GL_TEXTURE_COMPONENTS 0x1003
+#define GL_TEXTURE_RED_SIZE 0x805C
+#define GL_TEXTURE_GREEN_SIZE 0x805D
+#define GL_TEXTURE_BLUE_SIZE 0x805E
+#define GL_TEXTURE_ALPHA_SIZE 0x805F
+#define GL_TEXTURE_LUMINANCE_SIZE 0x8060
+#define GL_TEXTURE_INTENSITY_SIZE 0x8061
+#define GL_NEAREST_MIPMAP_NEAREST 0x2700
+#define GL_NEAREST_MIPMAP_LINEAR 0x2702
+#define GL_LINEAR_MIPMAP_NEAREST 0x2701
+#define GL_LINEAR_MIPMAP_LINEAR 0x2703
+#define GL_OBJECT_LINEAR 0x2401
+#define GL_OBJECT_PLANE 0x2501
+#define GL_EYE_LINEAR 0x2400
+#define GL_EYE_PLANE 0x2502
+#define GL_SPHERE_MAP 0x2402
+#define GL_DECAL 0x2101
+#define GL_MODULATE 0x2100
+#define GL_NEAREST 0x2600
+#define GL_REPEAT 0x2901
+#define GL_CLAMP 0x2900
+#define GL_S 0x2000
+#define GL_T 0x2001
+#define GL_R 0x2002
+#define GL_Q 0x2003
+
+/* Utility */
+#define GL_VENDOR 0x1F00
+#define GL_RENDERER 0x1F01
+#define GL_VERSION 0x1F02
+#define GL_EXTENSIONS 0x1F03
+
+/* Errors */
+#define GL_NO_ERROR 0
+#define GL_INVALID_ENUM 0x0500
+#define GL_INVALID_VALUE 0x0501
+#define GL_INVALID_OPERATION 0x0502
+#define GL_STACK_OVERFLOW 0x0503
+#define GL_STACK_UNDERFLOW 0x0504
+#define GL_OUT_OF_MEMORY 0x0505
+
+/* glPush/PopAttrib bits */
+#define GL_CURRENT_BIT 0x00000001
+#define GL_POINT_BIT 0x00000002
+#define GL_LINE_BIT 0x00000004
+#define GL_POLYGON_BIT 0x00000008
+#define GL_POLYGON_STIPPLE_BIT 0x00000010
+#define GL_PIXEL_MODE_BIT 0x00000020
+#define GL_LIGHTING_BIT 0x00000040
+#define GL_FOG_BIT 0x00000080
+#define GL_DEPTH_BUFFER_BIT 0x00000100
+#define GL_ACCUM_BUFFER_BIT 0x00000200
+#define GL_STENCIL_BUFFER_BIT 0x00000400
+#define GL_VIEWPORT_BIT 0x00000800
+#define GL_TRANSFORM_BIT 0x00001000
+#define GL_ENABLE_BIT 0x00002000
+#define GL_COLOR_BUFFER_BIT 0x00004000
+#define GL_HINT_BIT 0x00008000
+#define GL_EVAL_BIT 0x00010000
+#define GL_LIST_BIT 0x00020000
+#define GL_TEXTURE_BIT 0x00040000
+#define GL_SCISSOR_BIT 0x00080000
+#define GL_ALL_ATTRIB_BITS 0xFFFFFFFF
+
+
+/* OpenGL 1.1 */
+#define GL_PROXY_TEXTURE_1D 0x8063
+#define GL_PROXY_TEXTURE_2D 0x8064
+#define GL_TEXTURE_PRIORITY 0x8066
+#define GL_TEXTURE_RESIDENT 0x8067
+#define GL_TEXTURE_BINDING_1D 0x8068
+#define GL_TEXTURE_BINDING_2D 0x8069
+#define GL_TEXTURE_INTERNAL_FORMAT 0x1003
+#define GL_ALPHA4 0x803B
+#define GL_ALPHA8 0x803C
+#define GL_ALPHA12 0x803D
+#define GL_ALPHA16 0x803E
+#define GL_LUMINANCE4 0x803F
+#define GL_LUMINANCE8 0x8040
+#define GL_LUMINANCE12 0x8041
+#define GL_LUMINANCE16 0x8042
+#define GL_LUMINANCE4_ALPHA4 0x8043
+#define GL_LUMINANCE6_ALPHA2 0x8044
+#define GL_LUMINANCE8_ALPHA8 0x8045
+#define GL_LUMINANCE12_ALPHA4 0x8046
+#define GL_LUMINANCE12_ALPHA12 0x8047
+#define GL_LUMINANCE16_ALPHA16 0x8048
+#define GL_INTENSITY 0x8049
+#define GL_INTENSITY4 0x804A
+#define GL_INTENSITY8 0x804B
+#define GL_INTENSITY12 0x804C
+#define GL_INTENSITY16 0x804D
+#define GL_R3_G3_B2 0x2A10
+#define GL_RGB4 0x804F
+#define GL_RGB5 0x8050
+#define GL_RGB8 0x8051
+#define GL_RGB10 0x8052
+#define GL_RGB12 0x8053
+#define GL_RGB16 0x8054
+#define GL_RGBA2 0x8055
+#define GL_RGBA4 0x8056
+#define GL_RGB5_A1 0x8057
+#define GL_RGBA8 0x8058
+#define GL_RGB10_A2 0x8059
+#define GL_RGBA12 0x805A
+#define GL_RGBA16 0x805B
+#define GL_CLIENT_PIXEL_STORE_BIT 0x00000001
+#define GL_CLIENT_VERTEX_ARRAY_BIT 0x00000002
+#define GL_ALL_CLIENT_ATTRIB_BITS 0xFFFFFFFF
+#define GL_CLIENT_ALL_ATTRIB_BITS 0xFFFFFFFF
+
+
+
+/*
+ * Miscellaneous
+ */
+
+GLAPI void GLAPIENTRY glClearIndex( GLfloat c );
+
+GLAPI void GLAPIENTRY glClearColor( GLclampf red, GLclampf green, GLclampf blue, GLclampf alpha );
+
+GLAPI void GLAPIENTRY glClear( GLbitfield mask );
+
+GLAPI void GLAPIENTRY glIndexMask( GLuint mask );
+
+GLAPI void GLAPIENTRY glColorMask( GLboolean red, GLboolean green, GLboolean blue, GLboolean alpha );
+
+GLAPI void GLAPIENTRY glAlphaFunc( GLenum func, GLclampf ref );
+
+GLAPI void GLAPIENTRY glBlendFunc( GLenum sfactor, GLenum dfactor );
+
+GLAPI void GLAPIENTRY glLogicOp( GLenum opcode );
+
+GLAPI void GLAPIENTRY glCullFace( GLenum mode );
+
+GLAPI void GLAPIENTRY glFrontFace( GLenum mode );
+
+GLAPI void GLAPIENTRY glPointSize( GLfloat size );
+
+GLAPI void GLAPIENTRY glLineWidth( GLfloat width );
+
+GLAPI void GLAPIENTRY glLineStipple( GLint factor, GLushort pattern );
+
+GLAPI void GLAPIENTRY glPolygonMode( GLenum face, GLenum mode );
+
+GLAPI void GLAPIENTRY glPolygonOffset( GLfloat factor, GLfloat units );
+
+GLAPI void GLAPIENTRY glPolygonStipple( const GLubyte *mask );
+
+GLAPI void GLAPIENTRY glGetPolygonStipple( GLubyte *mask );
+
+GLAPI void GLAPIENTRY glEdgeFlag( GLboolean flag );
+
+GLAPI void GLAPIENTRY glEdgeFlagv( const GLboolean *flag );
+
+GLAPI void GLAPIENTRY glScissor( GLint x, GLint y, GLsizei width, GLsizei height);
+
+GLAPI void GLAPIENTRY glClipPlane( GLenum plane, const GLdouble *equation );
+
+GLAPI void GLAPIENTRY glGetClipPlane( GLenum plane, GLdouble *equation );
+
+GLAPI void GLAPIENTRY glDrawBuffer( GLenum mode );
+
+GLAPI void GLAPIENTRY glReadBuffer( GLenum mode );
+
+GLAPI void GLAPIENTRY glEnable( GLenum cap );
+
+GLAPI void GLAPIENTRY glDisable( GLenum cap );
+
+GLAPI GLboolean GLAPIENTRY glIsEnabled( GLenum cap );
+
+
+GLAPI void GLAPIENTRY glEnableClientState( GLenum cap ); /* 1.1 */
+
+GLAPI void GLAPIENTRY glDisableClientState( GLenum cap ); /* 1.1 */
+
+
+GLAPI void GLAPIENTRY glGetBooleanv( GLenum pname, GLboolean *params );
+
+GLAPI void GLAPIENTRY glGetDoublev( GLenum pname, GLdouble *params );
+
+GLAPI void GLAPIENTRY glGetFloatv( GLenum pname, GLfloat *params );
+
+GLAPI void GLAPIENTRY glGetIntegerv( GLenum pname, GLint *params );
+
+
+GLAPI void GLAPIENTRY glPushAttrib( GLbitfield mask );
+
+GLAPI void GLAPIENTRY glPopAttrib( void );
+
+
+GLAPI void GLAPIENTRY glPushClientAttrib( GLbitfield mask ); /* 1.1 */
+
+GLAPI void GLAPIENTRY glPopClientAttrib( void ); /* 1.1 */
+
+
+GLAPI GLint GLAPIENTRY glRenderMode( GLenum mode );
+
+GLAPI GLenum GLAPIENTRY glGetError( void );
+
+GLAPI const GLubyte * GLAPIENTRY glGetString( GLenum name );
+
+GLAPI void GLAPIENTRY glFinish( void );
+
+GLAPI void GLAPIENTRY glFlush( void );
+
+GLAPI void GLAPIENTRY glHint( GLenum target, GLenum mode );
+
+
+/*
+ * Depth Buffer
+ */
+
+GLAPI void GLAPIENTRY glClearDepth( GLclampd depth );
+
+GLAPI void GLAPIENTRY glDepthFunc( GLenum func );
+
+GLAPI void GLAPIENTRY glDepthMask( GLboolean flag );
+
+GLAPI void GLAPIENTRY glDepthRange( GLclampd near_val, GLclampd far_val );
+
+
+/*
+ * Accumulation Buffer
+ */
+
+GLAPI void GLAPIENTRY glClearAccum( GLfloat red, GLfloat green, GLfloat blue, GLfloat alpha );
+
+GLAPI void GLAPIENTRY glAccum( GLenum op, GLfloat value );
+
+
+/*
+ * Transformation
+ */
+
+GLAPI void GLAPIENTRY glMatrixMode( GLenum mode );
+
+GLAPI void GLAPIENTRY glOrtho( GLdouble left, GLdouble right,
+ GLdouble bottom, GLdouble top,
+ GLdouble near_val, GLdouble far_val );
+
+GLAPI void GLAPIENTRY glFrustum( GLdouble left, GLdouble right,
+ GLdouble bottom, GLdouble top,
+ GLdouble near_val, GLdouble far_val );
+
+GLAPI void GLAPIENTRY glViewport( GLint x, GLint y,
+ GLsizei width, GLsizei height );
+
+GLAPI void GLAPIENTRY glPushMatrix( void );
+
+GLAPI void GLAPIENTRY glPopMatrix( void );
+
+GLAPI void GLAPIENTRY glLoadIdentity( void );
+
+GLAPI void GLAPIENTRY glLoadMatrixd( const GLdouble *m );
+GLAPI void GLAPIENTRY glLoadMatrixf( const GLfloat *m );
+
+GLAPI void GLAPIENTRY glMultMatrixd( const GLdouble *m );
+GLAPI void GLAPIENTRY glMultMatrixf( const GLfloat *m );
+
+GLAPI void GLAPIENTRY glRotated( GLdouble angle,
+ GLdouble x, GLdouble y, GLdouble z );
+GLAPI void GLAPIENTRY glRotatef( GLfloat angle,
+ GLfloat x, GLfloat y, GLfloat z );
+
+GLAPI void GLAPIENTRY glScaled( GLdouble x, GLdouble y, GLdouble z );
+GLAPI void GLAPIENTRY glScalef( GLfloat x, GLfloat y, GLfloat z );
+
+GLAPI void GLAPIENTRY glTranslated( GLdouble x, GLdouble y, GLdouble z );
+GLAPI void GLAPIENTRY glTranslatef( GLfloat x, GLfloat y, GLfloat z );
+
+
+/*
+ * Display Lists
+ */
+
+GLAPI GLboolean GLAPIENTRY glIsList( GLuint list );
+
+GLAPI void GLAPIENTRY glDeleteLists( GLuint list, GLsizei range );
+
+GLAPI GLuint GLAPIENTRY glGenLists( GLsizei range );
+
+GLAPI void GLAPIENTRY glNewList( GLuint list, GLenum mode );
+
+GLAPI void GLAPIENTRY glEndList( void );
+
+GLAPI void GLAPIENTRY glCallList( GLuint list );
+
+GLAPI void GLAPIENTRY glCallLists( GLsizei n, GLenum type,
+ const GLvoid *lists );
+
+GLAPI void GLAPIENTRY glListBase( GLuint base );
+
+
+/*
+ * Drawing Functions
+ */
+
+GLAPI void GLAPIENTRY glBegin( GLenum mode );
+
+GLAPI void GLAPIENTRY glEnd( void );
+
+
+GLAPI void GLAPIENTRY glVertex2d( GLdouble x, GLdouble y );
+GLAPI void GLAPIENTRY glVertex2f( GLfloat x, GLfloat y );
+GLAPI void GLAPIENTRY glVertex2i( GLint x, GLint y );
+GLAPI void GLAPIENTRY glVertex2s( GLshort x, GLshort y );
+
+GLAPI void GLAPIENTRY glVertex3d( GLdouble x, GLdouble y, GLdouble z );
+GLAPI void GLAPIENTRY glVertex3f( GLfloat x, GLfloat y, GLfloat z );
+GLAPI void GLAPIENTRY glVertex3i( GLint x, GLint y, GLint z );
+GLAPI void GLAPIENTRY glVertex3s( GLshort x, GLshort y, GLshort z );
+
+GLAPI void GLAPIENTRY glVertex4d( GLdouble x, GLdouble y, GLdouble z, GLdouble w );
+GLAPI void GLAPIENTRY glVertex4f( GLfloat x, GLfloat y, GLfloat z, GLfloat w );
+GLAPI void GLAPIENTRY glVertex4i( GLint x, GLint y, GLint z, GLint w );
+GLAPI void GLAPIENTRY glVertex4s( GLshort x, GLshort y, GLshort z, GLshort w );
+
+GLAPI void GLAPIENTRY glVertex2dv( const GLdouble *v );
+GLAPI void GLAPIENTRY glVertex2fv( const GLfloat *v );
+GLAPI void GLAPIENTRY glVertex2iv( const GLint *v );
+GLAPI void GLAPIENTRY glVertex2sv( const GLshort *v );
+
+GLAPI void GLAPIENTRY glVertex3dv( const GLdouble *v );
+GLAPI void GLAPIENTRY glVertex3fv( const GLfloat *v );
+GLAPI void GLAPIENTRY glVertex3iv( const GLint *v );
+GLAPI void GLAPIENTRY glVertex3sv( const GLshort *v );
+
+GLAPI void GLAPIENTRY glVertex4dv( const GLdouble *v );
+GLAPI void GLAPIENTRY glVertex4fv( const GLfloat *v );
+GLAPI void GLAPIENTRY glVertex4iv( const GLint *v );
+GLAPI void GLAPIENTRY glVertex4sv( const GLshort *v );
+
+
+GLAPI void GLAPIENTRY glNormal3b( GLbyte nx, GLbyte ny, GLbyte nz );
+GLAPI void GLAPIENTRY glNormal3d( GLdouble nx, GLdouble ny, GLdouble nz );
+GLAPI void GLAPIENTRY glNormal3f( GLfloat nx, GLfloat ny, GLfloat nz );
+GLAPI void GLAPIENTRY glNormal3i( GLint nx, GLint ny, GLint nz );
+GLAPI void GLAPIENTRY glNormal3s( GLshort nx, GLshort ny, GLshort nz );
+
+GLAPI void GLAPIENTRY glNormal3bv( const GLbyte *v );
+GLAPI void GLAPIENTRY glNormal3dv( const GLdouble *v );
+GLAPI void GLAPIENTRY glNormal3fv( const GLfloat *v );
+GLAPI void GLAPIENTRY glNormal3iv( const GLint *v );
+GLAPI void GLAPIENTRY glNormal3sv( const GLshort *v );
+
+
+GLAPI void GLAPIENTRY glIndexd( GLdouble c );
+GLAPI void GLAPIENTRY glIndexf( GLfloat c );
+GLAPI void GLAPIENTRY glIndexi( GLint c );
+GLAPI void GLAPIENTRY glIndexs( GLshort c );
+GLAPI void GLAPIENTRY glIndexub( GLubyte c ); /* 1.1 */
+
+GLAPI void GLAPIENTRY glIndexdv( const GLdouble *c );
+GLAPI void GLAPIENTRY glIndexfv( const GLfloat *c );
+GLAPI void GLAPIENTRY glIndexiv( const GLint *c );
+GLAPI void GLAPIENTRY glIndexsv( const GLshort *c );
+GLAPI void GLAPIENTRY glIndexubv( const GLubyte *c ); /* 1.1 */
+
+GLAPI void GLAPIENTRY glColor3b( GLbyte red, GLbyte green, GLbyte blue );
+GLAPI void GLAPIENTRY glColor3d( GLdouble red, GLdouble green, GLdouble blue );
+GLAPI void GLAPIENTRY glColor3f( GLfloat red, GLfloat green, GLfloat blue );
+GLAPI void GLAPIENTRY glColor3i( GLint red, GLint green, GLint blue );
+GLAPI void GLAPIENTRY glColor3s( GLshort red, GLshort green, GLshort blue );
+GLAPI void GLAPIENTRY glColor3ub( GLubyte red, GLubyte green, GLubyte blue );
+GLAPI void GLAPIENTRY glColor3ui( GLuint red, GLuint green, GLuint blue );
+GLAPI void GLAPIENTRY glColor3us( GLushort red, GLushort green, GLushort blue );
+
+GLAPI void GLAPIENTRY glColor4b( GLbyte red, GLbyte green,
+ GLbyte blue, GLbyte alpha );
+GLAPI void GLAPIENTRY glColor4d( GLdouble red, GLdouble green,
+ GLdouble blue, GLdouble alpha );
+GLAPI void GLAPIENTRY glColor4f( GLfloat red, GLfloat green,
+ GLfloat blue, GLfloat alpha );
+GLAPI void GLAPIENTRY glColor4i( GLint red, GLint green,
+ GLint blue, GLint alpha );
+GLAPI void GLAPIENTRY glColor4s( GLshort red, GLshort green,
+ GLshort blue, GLshort alpha );
+GLAPI void GLAPIENTRY glColor4ub( GLubyte red, GLubyte green,
+ GLubyte blue, GLubyte alpha );
+GLAPI void GLAPIENTRY glColor4ui( GLuint red, GLuint green,
+ GLuint blue, GLuint alpha );
+GLAPI void GLAPIENTRY glColor4us( GLushort red, GLushort green,
+ GLushort blue, GLushort alpha );
+
+
+GLAPI void GLAPIENTRY glColor3bv( const GLbyte *v );
+GLAPI void GLAPIENTRY glColor3dv( const GLdouble *v );
+GLAPI void GLAPIENTRY glColor3fv( const GLfloat *v );
+GLAPI void GLAPIENTRY glColor3iv( const GLint *v );
+GLAPI void GLAPIENTRY glColor3sv( const GLshort *v );
+GLAPI void GLAPIENTRY glColor3ubv( const GLubyte *v );
+GLAPI void GLAPIENTRY glColor3uiv( const GLuint *v );
+GLAPI void GLAPIENTRY glColor3usv( const GLushort *v );
+
+GLAPI void GLAPIENTRY glColor4bv( const GLbyte *v );
+GLAPI void GLAPIENTRY glColor4dv( const GLdouble *v );
+GLAPI void GLAPIENTRY glColor4fv( const GLfloat *v );
+GLAPI void GLAPIENTRY glColor4iv( const GLint *v );
+GLAPI void GLAPIENTRY glColor4sv( const GLshort *v );
+GLAPI void GLAPIENTRY glColor4ubv( const GLubyte *v );
+GLAPI void GLAPIENTRY glColor4uiv( const GLuint *v );
+GLAPI void GLAPIENTRY glColor4usv( const GLushort *v );
+
+
+GLAPI void GLAPIENTRY glTexCoord1d( GLdouble s );
+GLAPI void GLAPIENTRY glTexCoord1f( GLfloat s );
+GLAPI void GLAPIENTRY glTexCoord1i( GLint s );
+GLAPI void GLAPIENTRY glTexCoord1s( GLshort s );
+
+GLAPI void GLAPIENTRY glTexCoord2d( GLdouble s, GLdouble t );
+GLAPI void GLAPIENTRY glTexCoord2f( GLfloat s, GLfloat t );
+GLAPI void GLAPIENTRY glTexCoord2i( GLint s, GLint t );
+GLAPI void GLAPIENTRY glTexCoord2s( GLshort s, GLshort t );
+
+GLAPI void GLAPIENTRY glTexCoord3d( GLdouble s, GLdouble t, GLdouble r );
+GLAPI void GLAPIENTRY glTexCoord3f( GLfloat s, GLfloat t, GLfloat r );
+GLAPI void GLAPIENTRY glTexCoord3i( GLint s, GLint t, GLint r );
+GLAPI void GLAPIENTRY glTexCoord3s( GLshort s, GLshort t, GLshort r );
+
+GLAPI void GLAPIENTRY glTexCoord4d( GLdouble s, GLdouble t, GLdouble r, GLdouble q );
+GLAPI void GLAPIENTRY glTexCoord4f( GLfloat s, GLfloat t, GLfloat r, GLfloat q );
+GLAPI void GLAPIENTRY glTexCoord4i( GLint s, GLint t, GLint r, GLint q );
+GLAPI void GLAPIENTRY glTexCoord4s( GLshort s, GLshort t, GLshort r, GLshort q );
+
+GLAPI void GLAPIENTRY glTexCoord1dv( const GLdouble *v );
+GLAPI void GLAPIENTRY glTexCoord1fv( const GLfloat *v );
+GLAPI void GLAPIENTRY glTexCoord1iv( const GLint *v );
+GLAPI void GLAPIENTRY glTexCoord1sv( const GLshort *v );
+
+GLAPI void GLAPIENTRY glTexCoord2dv( const GLdouble *v );
+GLAPI void GLAPIENTRY glTexCoord2fv( const GLfloat *v );
+GLAPI void GLAPIENTRY glTexCoord2iv( const GLint *v );
+GLAPI void GLAPIENTRY glTexCoord2sv( const GLshort *v );
+
+GLAPI void GLAPIENTRY glTexCoord3dv( const GLdouble *v );
+GLAPI void GLAPIENTRY glTexCoord3fv( const GLfloat *v );
+GLAPI void GLAPIENTRY glTexCoord3iv( const GLint *v );
+GLAPI void GLAPIENTRY glTexCoord3sv( const GLshort *v );
+
+GLAPI void GLAPIENTRY glTexCoord4dv( const GLdouble *v );
+GLAPI void GLAPIENTRY glTexCoord4fv( const GLfloat *v );
+GLAPI void GLAPIENTRY glTexCoord4iv( const GLint *v );
+GLAPI void GLAPIENTRY glTexCoord4sv( const GLshort *v );
+
+
+GLAPI void GLAPIENTRY glRasterPos2d( GLdouble x, GLdouble y );
+GLAPI void GLAPIENTRY glRasterPos2f( GLfloat x, GLfloat y );
+GLAPI void GLAPIENTRY glRasterPos2i( GLint x, GLint y );
+GLAPI void GLAPIENTRY glRasterPos2s( GLshort x, GLshort y );
+
+GLAPI void GLAPIENTRY glRasterPos3d( GLdouble x, GLdouble y, GLdouble z );
+GLAPI void GLAPIENTRY glRasterPos3f( GLfloat x, GLfloat y, GLfloat z );
+GLAPI void GLAPIENTRY glRasterPos3i( GLint x, GLint y, GLint z );
+GLAPI void GLAPIENTRY glRasterPos3s( GLshort x, GLshort y, GLshort z );
+
+GLAPI void GLAPIENTRY glRasterPos4d( GLdouble x, GLdouble y, GLdouble z, GLdouble w );
+GLAPI void GLAPIENTRY glRasterPos4f( GLfloat x, GLfloat y, GLfloat z, GLfloat w );
+GLAPI void GLAPIENTRY glRasterPos4i( GLint x, GLint y, GLint z, GLint w );
+GLAPI void GLAPIENTRY glRasterPos4s( GLshort x, GLshort y, GLshort z, GLshort w );
+
+GLAPI void GLAPIENTRY glRasterPos2dv( const GLdouble *v );
+GLAPI void GLAPIENTRY glRasterPos2fv( const GLfloat *v );
+GLAPI void GLAPIENTRY glRasterPos2iv( const GLint *v );
+GLAPI void GLAPIENTRY glRasterPos2sv( const GLshort *v );
+
+GLAPI void GLAPIENTRY glRasterPos3dv( const GLdouble *v );
+GLAPI void GLAPIENTRY glRasterPos3fv( const GLfloat *v );
+GLAPI void GLAPIENTRY glRasterPos3iv( const GLint *v );
+GLAPI void GLAPIENTRY glRasterPos3sv( const GLshort *v );
+
+GLAPI void GLAPIENTRY glRasterPos4dv( const GLdouble *v );
+GLAPI void GLAPIENTRY glRasterPos4fv( const GLfloat *v );
+GLAPI void GLAPIENTRY glRasterPos4iv( const GLint *v );
+GLAPI void GLAPIENTRY glRasterPos4sv( const GLshort *v );
+
+
+GLAPI void GLAPIENTRY glRectd( GLdouble x1, GLdouble y1, GLdouble x2, GLdouble y2 );
+GLAPI void GLAPIENTRY glRectf( GLfloat x1, GLfloat y1, GLfloat x2, GLfloat y2 );
+GLAPI void GLAPIENTRY glRecti( GLint x1, GLint y1, GLint x2, GLint y2 );
+GLAPI void GLAPIENTRY glRects( GLshort x1, GLshort y1, GLshort x2, GLshort y2 );
+
+
+GLAPI void GLAPIENTRY glRectdv( const GLdouble *v1, const GLdouble *v2 );
+GLAPI void GLAPIENTRY glRectfv( const GLfloat *v1, const GLfloat *v2 );
+GLAPI void GLAPIENTRY glRectiv( const GLint *v1, const GLint *v2 );
+GLAPI void GLAPIENTRY glRectsv( const GLshort *v1, const GLshort *v2 );
+
+
+/*
+ * Vertex Arrays (1.1)
+ */
+
+GLAPI void GLAPIENTRY glVertexPointer( GLint size, GLenum type,
+ GLsizei stride, const GLvoid *ptr );
+
+GLAPI void GLAPIENTRY glNormalPointer( GLenum type, GLsizei stride,
+ const GLvoid *ptr );
+
+GLAPI void GLAPIENTRY glColorPointer( GLint size, GLenum type,
+ GLsizei stride, const GLvoid *ptr );
+
+GLAPI void GLAPIENTRY glIndexPointer( GLenum type, GLsizei stride,
+ const GLvoid *ptr );
+
+GLAPI void GLAPIENTRY glTexCoordPointer( GLint size, GLenum type,
+ GLsizei stride, const GLvoid *ptr );
+
+GLAPI void GLAPIENTRY glEdgeFlagPointer( GLsizei stride, const GLvoid *ptr );
+
+GLAPI void GLAPIENTRY glGetPointerv( GLenum pname, GLvoid **params );
+
+GLAPI void GLAPIENTRY glArrayElement( GLint i );
+
+GLAPI void GLAPIENTRY glDrawArrays( GLenum mode, GLint first, GLsizei count );
+
+GLAPI void GLAPIENTRY glDrawElements( GLenum mode, GLsizei count,
+ GLenum type, const GLvoid *indices );
+
+GLAPI void GLAPIENTRY glInterleavedArrays( GLenum format, GLsizei stride,
+ const GLvoid *pointer );
+
+/*
+ * Lighting
+ */
+
+GLAPI void GLAPIENTRY glShadeModel( GLenum mode );
+
+GLAPI void GLAPIENTRY glLightf( GLenum light, GLenum pname, GLfloat param );
+GLAPI void GLAPIENTRY glLighti( GLenum light, GLenum pname, GLint param );
+GLAPI void GLAPIENTRY glLightfv( GLenum light, GLenum pname,
+ const GLfloat *params );
+GLAPI void GLAPIENTRY glLightiv( GLenum light, GLenum pname,
+ const GLint *params );
+
+GLAPI void GLAPIENTRY glGetLightfv( GLenum light, GLenum pname,
+ GLfloat *params );
+GLAPI void GLAPIENTRY glGetLightiv( GLenum light, GLenum pname,
+ GLint *params );
+
+GLAPI void GLAPIENTRY glLightModelf( GLenum pname, GLfloat param );
+GLAPI void GLAPIENTRY glLightModeli( GLenum pname, GLint param );
+GLAPI void GLAPIENTRY glLightModelfv( GLenum pname, const GLfloat *params );
+GLAPI void GLAPIENTRY glLightModeliv( GLenum pname, const GLint *params );
+
+GLAPI void GLAPIENTRY glMaterialf( GLenum face, GLenum pname, GLfloat param );
+GLAPI void GLAPIENTRY glMateriali( GLenum face, GLenum pname, GLint param );
+GLAPI void GLAPIENTRY glMaterialfv( GLenum face, GLenum pname, const GLfloat *params );
+GLAPI void GLAPIENTRY glMaterialiv( GLenum face, GLenum pname, const GLint *params );
+
+GLAPI void GLAPIENTRY glGetMaterialfv( GLenum face, GLenum pname, GLfloat *params );
+GLAPI void GLAPIENTRY glGetMaterialiv( GLenum face, GLenum pname, GLint *params );
+
+GLAPI void GLAPIENTRY glColorMaterial( GLenum face, GLenum mode );
+
+
+/*
+ * Raster functions
+ */
+
+GLAPI void GLAPIENTRY glPixelZoom( GLfloat xfactor, GLfloat yfactor );
+
+GLAPI void GLAPIENTRY glPixelStoref( GLenum pname, GLfloat param );
+GLAPI void GLAPIENTRY glPixelStorei( GLenum pname, GLint param );
+
+GLAPI void GLAPIENTRY glPixelTransferf( GLenum pname, GLfloat param );
+GLAPI void GLAPIENTRY glPixelTransferi( GLenum pname, GLint param );
+
+GLAPI void GLAPIENTRY glPixelMapfv( GLenum map, GLsizei mapsize,
+ const GLfloat *values );
+GLAPI void GLAPIENTRY glPixelMapuiv( GLenum map, GLsizei mapsize,
+ const GLuint *values );
+GLAPI void GLAPIENTRY glPixelMapusv( GLenum map, GLsizei mapsize,
+ const GLushort *values );
+
+GLAPI void GLAPIENTRY glGetPixelMapfv( GLenum map, GLfloat *values );
+GLAPI void GLAPIENTRY glGetPixelMapuiv( GLenum map, GLuint *values );
+GLAPI void GLAPIENTRY glGetPixelMapusv( GLenum map, GLushort *values );
+
+GLAPI void GLAPIENTRY glBitmap( GLsizei width, GLsizei height,
+ GLfloat xorig, GLfloat yorig,
+ GLfloat xmove, GLfloat ymove,
+ const GLubyte *bitmap );
+
+GLAPI void GLAPIENTRY glReadPixels( GLint x, GLint y,
+ GLsizei width, GLsizei height,
+ GLenum format, GLenum type,
+ GLvoid *pixels );
+
+GLAPI void GLAPIENTRY glDrawPixels( GLsizei width, GLsizei height,
+ GLenum format, GLenum type,
+ const GLvoid *pixels );
+
+GLAPI void GLAPIENTRY glCopyPixels( GLint x, GLint y,
+ GLsizei width, GLsizei height,
+ GLenum type );
+
+/*
+ * Stenciling
+ */
+
+GLAPI void GLAPIENTRY glStencilFunc( GLenum func, GLint ref, GLuint mask );
+
+GLAPI void GLAPIENTRY glStencilMask( GLuint mask );
+
+GLAPI void GLAPIENTRY glStencilOp( GLenum fail, GLenum zfail, GLenum zpass );
+
+GLAPI void GLAPIENTRY glClearStencil( GLint s );
+
+
+
+/*
+ * Texture mapping
+ */
+
+GLAPI void GLAPIENTRY glTexGend( GLenum coord, GLenum pname, GLdouble param );
+GLAPI void GLAPIENTRY glTexGenf( GLenum coord, GLenum pname, GLfloat param );
+GLAPI void GLAPIENTRY glTexGeni( GLenum coord, GLenum pname, GLint param );
+
+GLAPI void GLAPIENTRY glTexGendv( GLenum coord, GLenum pname, const GLdouble *params );
+GLAPI void GLAPIENTRY glTexGenfv( GLenum coord, GLenum pname, const GLfloat *params );
+GLAPI void GLAPIENTRY glTexGeniv( GLenum coord, GLenum pname, const GLint *params );
+
+GLAPI void GLAPIENTRY glGetTexGendv( GLenum coord, GLenum pname, GLdouble *params );
+GLAPI void GLAPIENTRY glGetTexGenfv( GLenum coord, GLenum pname, GLfloat *params );
+GLAPI void GLAPIENTRY glGetTexGeniv( GLenum coord, GLenum pname, GLint *params );
+
+
+GLAPI void GLAPIENTRY glTexEnvf( GLenum target, GLenum pname, GLfloat param );
+GLAPI void GLAPIENTRY glTexEnvi( GLenum target, GLenum pname, GLint param );
+
+GLAPI void GLAPIENTRY glTexEnvfv( GLenum target, GLenum pname, const GLfloat *params );
+GLAPI void GLAPIENTRY glTexEnviv( GLenum target, GLenum pname, const GLint *params );
+
+GLAPI void GLAPIENTRY glGetTexEnvfv( GLenum target, GLenum pname, GLfloat *params );
+GLAPI void GLAPIENTRY glGetTexEnviv( GLenum target, GLenum pname, GLint *params );
+
+
+GLAPI void GLAPIENTRY glTexParameterf( GLenum target, GLenum pname, GLfloat param );
+GLAPI void GLAPIENTRY glTexParameteri( GLenum target, GLenum pname, GLint param );
+
+GLAPI void GLAPIENTRY glTexParameterfv( GLenum target, GLenum pname,
+ const GLfloat *params );
+GLAPI void GLAPIENTRY glTexParameteriv( GLenum target, GLenum pname,
+ const GLint *params );
+
+GLAPI void GLAPIENTRY glGetTexParameterfv( GLenum target,
+ GLenum pname, GLfloat *params);
+GLAPI void GLAPIENTRY glGetTexParameteriv( GLenum target,
+ GLenum pname, GLint *params );
+
+GLAPI void GLAPIENTRY glGetTexLevelParameterfv( GLenum target, GLint level,
+ GLenum pname, GLfloat *params );
+GLAPI void GLAPIENTRY glGetTexLevelParameteriv( GLenum target, GLint level,
+ GLenum pname, GLint *params );
+
+
+GLAPI void GLAPIENTRY glTexImage1D( GLenum target, GLint level,
+ GLint internalFormat,
+ GLsizei width, GLint border,
+ GLenum format, GLenum type,
+ const GLvoid *pixels );
+
+GLAPI void GLAPIENTRY glTexImage2D( GLenum target, GLint level,
+ GLint internalFormat,
+ GLsizei width, GLsizei height,
+ GLint border, GLenum format, GLenum type,
+ const GLvoid *pixels );
+
+GLAPI void GLAPIENTRY glGetTexImage( GLenum target, GLint level,
+ GLenum format, GLenum type,
+ GLvoid *pixels );
+
+
+/* 1.1 functions */
+
+GLAPI void GLAPIENTRY glGenTextures( GLsizei n, GLuint *textures );
+
+GLAPI void GLAPIENTRY glDeleteTextures( GLsizei n, const GLuint *textures);
+
+GLAPI void GLAPIENTRY glBindTexture( GLenum target, GLuint texture );
+
+GLAPI void GLAPIENTRY glPrioritizeTextures( GLsizei n,
+ const GLuint *textures,
+ const GLclampf *priorities );
+
+GLAPI GLboolean GLAPIENTRY glAreTexturesResident( GLsizei n,
+ const GLuint *textures,
+ GLboolean *residences );
+
+GLAPI GLboolean GLAPIENTRY glIsTexture( GLuint texture );
+
+
+GLAPI void GLAPIENTRY glTexSubImage1D( GLenum target, GLint level,
+ GLint xoffset,
+ GLsizei width, GLenum format,
+ GLenum type, const GLvoid *pixels );
+
+
+GLAPI void GLAPIENTRY glTexSubImage2D( GLenum target, GLint level,
+ GLint xoffset, GLint yoffset,
+ GLsizei width, GLsizei height,
+ GLenum format, GLenum type,
+ const GLvoid *pixels );
+
+
+GLAPI void GLAPIENTRY glCopyTexImage1D( GLenum target, GLint level,
+ GLenum internalformat,
+ GLint x, GLint y,
+ GLsizei width, GLint border );
+
+
+GLAPI void GLAPIENTRY glCopyTexImage2D( GLenum target, GLint level,
+ GLenum internalformat,
+ GLint x, GLint y,
+ GLsizei width, GLsizei height,
+ GLint border );
+
+
+GLAPI void GLAPIENTRY glCopyTexSubImage1D( GLenum target, GLint level,
+ GLint xoffset, GLint x, GLint y,
+ GLsizei width );
+
+
+GLAPI void GLAPIENTRY glCopyTexSubImage2D( GLenum target, GLint level,
+ GLint xoffset, GLint yoffset,
+ GLint x, GLint y,
+ GLsizei width, GLsizei height );
+
+
+/*
+ * Evaluators
+ */
+
+GLAPI void GLAPIENTRY glMap1d( GLenum target, GLdouble u1, GLdouble u2,
+ GLint stride,
+ GLint order, const GLdouble *points );
+GLAPI void GLAPIENTRY glMap1f( GLenum target, GLfloat u1, GLfloat u2,
+ GLint stride,
+ GLint order, const GLfloat *points );
+
+GLAPI void GLAPIENTRY glMap2d( GLenum target,
+ GLdouble u1, GLdouble u2, GLint ustride, GLint uorder,
+ GLdouble v1, GLdouble v2, GLint vstride, GLint vorder,
+ const GLdouble *points );
+GLAPI void GLAPIENTRY glMap2f( GLenum target,
+ GLfloat u1, GLfloat u2, GLint ustride, GLint uorder,
+ GLfloat v1, GLfloat v2, GLint vstride, GLint vorder,
+ const GLfloat *points );
+
+GLAPI void GLAPIENTRY glGetMapdv( GLenum target, GLenum query, GLdouble *v );
+GLAPI void GLAPIENTRY glGetMapfv( GLenum target, GLenum query, GLfloat *v );
+GLAPI void GLAPIENTRY glGetMapiv( GLenum target, GLenum query, GLint *v );
+
+GLAPI void GLAPIENTRY glEvalCoord1d( GLdouble u );
+GLAPI void GLAPIENTRY glEvalCoord1f( GLfloat u );
+
+GLAPI void GLAPIENTRY glEvalCoord1dv( const GLdouble *u );
+GLAPI void GLAPIENTRY glEvalCoord1fv( const GLfloat *u );
+
+GLAPI void GLAPIENTRY glEvalCoord2d( GLdouble u, GLdouble v );
+GLAPI void GLAPIENTRY glEvalCoord2f( GLfloat u, GLfloat v );
+
+GLAPI void GLAPIENTRY glEvalCoord2dv( const GLdouble *u );
+GLAPI void GLAPIENTRY glEvalCoord2fv( const GLfloat *u );
+
+GLAPI void GLAPIENTRY glMapGrid1d( GLint un, GLdouble u1, GLdouble u2 );
+GLAPI void GLAPIENTRY glMapGrid1f( GLint un, GLfloat u1, GLfloat u2 );
+
+GLAPI void GLAPIENTRY glMapGrid2d( GLint un, GLdouble u1, GLdouble u2,
+ GLint vn, GLdouble v1, GLdouble v2 );
+GLAPI void GLAPIENTRY glMapGrid2f( GLint un, GLfloat u1, GLfloat u2,
+ GLint vn, GLfloat v1, GLfloat v2 );
+
+GLAPI void GLAPIENTRY glEvalPoint1( GLint i );
+
+GLAPI void GLAPIENTRY glEvalPoint2( GLint i, GLint j );
+
+GLAPI void GLAPIENTRY glEvalMesh1( GLenum mode, GLint i1, GLint i2 );
+
+GLAPI void GLAPIENTRY glEvalMesh2( GLenum mode, GLint i1, GLint i2, GLint j1, GLint j2 );
+
+
+/*
+ * Fog
+ */
+
+GLAPI void GLAPIENTRY glFogf( GLenum pname, GLfloat param );
+
+GLAPI void GLAPIENTRY glFogi( GLenum pname, GLint param );
+
+GLAPI void GLAPIENTRY glFogfv( GLenum pname, const GLfloat *params );
+
+GLAPI void GLAPIENTRY glFogiv( GLenum pname, const GLint *params );
+
+
+/*
+ * Selection and Feedback
+ */
+
+GLAPI void GLAPIENTRY glFeedbackBuffer( GLsizei size, GLenum type, GLfloat *buffer );
+
+GLAPI void GLAPIENTRY glPassThrough( GLfloat token );
+
+GLAPI void GLAPIENTRY glSelectBuffer( GLsizei size, GLuint *buffer );
+
+GLAPI void GLAPIENTRY glInitNames( void );
+
+GLAPI void GLAPIENTRY glLoadName( GLuint name );
+
+GLAPI void GLAPIENTRY glPushName( GLuint name );
+
+GLAPI void GLAPIENTRY glPopName( void );
+
+
+
+/*
+ * OpenGL 1.2
+ */
+
+#define GL_RESCALE_NORMAL 0x803A
+#define GL_CLAMP_TO_EDGE 0x812F
+#define GL_MAX_ELEMENTS_VERTICES 0x80E8
+#define GL_MAX_ELEMENTS_INDICES 0x80E9
+#define GL_BGR 0x80E0
+#define GL_BGRA 0x80E1
+#define GL_UNSIGNED_BYTE_3_3_2 0x8032
+#define GL_UNSIGNED_BYTE_2_3_3_REV 0x8362
+#define GL_UNSIGNED_SHORT_5_6_5 0x8363
+#define GL_UNSIGNED_SHORT_5_6_5_REV 0x8364
+#define GL_UNSIGNED_SHORT_4_4_4_4 0x8033
+#define GL_UNSIGNED_SHORT_4_4_4_4_REV 0x8365
+#define GL_UNSIGNED_SHORT_5_5_5_1 0x8034
+#define GL_UNSIGNED_SHORT_1_5_5_5_REV 0x8366
+#define GL_UNSIGNED_INT_8_8_8_8 0x8035
+#define GL_UNSIGNED_INT_8_8_8_8_REV 0x8367
+#define GL_UNSIGNED_INT_10_10_10_2 0x8036
+#define GL_UNSIGNED_INT_2_10_10_10_REV 0x8368
+#define GL_LIGHT_MODEL_COLOR_CONTROL 0x81F8
+#define GL_SINGLE_COLOR 0x81F9
+#define GL_SEPARATE_SPECULAR_COLOR 0x81FA
+#define GL_TEXTURE_MIN_LOD 0x813A
+#define GL_TEXTURE_MAX_LOD 0x813B
+#define GL_TEXTURE_BASE_LEVEL 0x813C
+#define GL_TEXTURE_MAX_LEVEL 0x813D
+#define GL_SMOOTH_POINT_SIZE_RANGE 0x0B12
+#define GL_SMOOTH_POINT_SIZE_GRANULARITY 0x0B13
+#define GL_SMOOTH_LINE_WIDTH_RANGE 0x0B22
+#define GL_SMOOTH_LINE_WIDTH_GRANULARITY 0x0B23
+#define GL_ALIASED_POINT_SIZE_RANGE 0x846D
+#define GL_ALIASED_LINE_WIDTH_RANGE 0x846E
+#define GL_PACK_SKIP_IMAGES 0x806B
+#define GL_PACK_IMAGE_HEIGHT 0x806C
+#define GL_UNPACK_SKIP_IMAGES 0x806D
+#define GL_UNPACK_IMAGE_HEIGHT 0x806E
+#define GL_TEXTURE_3D 0x806F
+#define GL_PROXY_TEXTURE_3D 0x8070
+#define GL_TEXTURE_DEPTH 0x8071
+#define GL_TEXTURE_WRAP_R 0x8072
+#define GL_MAX_3D_TEXTURE_SIZE 0x8073
+#define GL_TEXTURE_BINDING_3D 0x806A
+
+GLAPI void GLAPIENTRY glDrawRangeElements( GLenum mode, GLuint start,
+ GLuint end, GLsizei count, GLenum type, const GLvoid *indices );
+
+GLAPI void GLAPIENTRY glTexImage3D( GLenum target, GLint level,
+ GLint internalFormat,
+ GLsizei width, GLsizei height,
+ GLsizei depth, GLint border,
+ GLenum format, GLenum type,
+ const GLvoid *pixels );
+
+GLAPI void GLAPIENTRY glTexSubImage3D( GLenum target, GLint level,
+ GLint xoffset, GLint yoffset,
+ GLint zoffset, GLsizei width,
+ GLsizei height, GLsizei depth,
+ GLenum format,
+ GLenum type, const GLvoid *pixels);
+
+GLAPI void GLAPIENTRY glCopyTexSubImage3D( GLenum target, GLint level,
+ GLint xoffset, GLint yoffset,
+ GLint zoffset, GLint x,
+ GLint y, GLsizei width,
+ GLsizei height );
+
+typedef void (APIENTRYP PFNGLDRAWRANGEELEMENTSPROC) (GLenum mode, GLuint start, GLuint end, GLsizei count, GLenum type, const GLvoid *indices);
+typedef void (APIENTRYP PFNGLTEXIMAGE3DPROC) (GLenum target, GLint level, GLint internalformat, GLsizei width, GLsizei height, GLsizei depth, GLint border, GLenum format, GLenum type, const GLvoid *pixels);
+typedef void (APIENTRYP PFNGLTEXSUBIMAGE3DPROC) (GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint zoffset, GLsizei width, GLsizei height, GLsizei depth, GLenum format, GLenum type, const GLvoid *pixels);
+typedef void (APIENTRYP PFNGLCOPYTEXSUBIMAGE3DPROC) (GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint zoffset, GLint x, GLint y, GLsizei width, GLsizei height);
+
+
+/*
+ * GL_ARB_imaging
+ */
+
+#define GL_CONSTANT_COLOR 0x8001
+#define GL_ONE_MINUS_CONSTANT_COLOR 0x8002
+#define GL_CONSTANT_ALPHA 0x8003
+#define GL_ONE_MINUS_CONSTANT_ALPHA 0x8004
+#define GL_COLOR_TABLE 0x80D0
+#define GL_POST_CONVOLUTION_COLOR_TABLE 0x80D1
+#define GL_POST_COLOR_MATRIX_COLOR_TABLE 0x80D2
+#define GL_PROXY_COLOR_TABLE 0x80D3
+#define GL_PROXY_POST_CONVOLUTION_COLOR_TABLE 0x80D4
+#define GL_PROXY_POST_COLOR_MATRIX_COLOR_TABLE 0x80D5
+#define GL_COLOR_TABLE_SCALE 0x80D6
+#define GL_COLOR_TABLE_BIAS 0x80D7
+#define GL_COLOR_TABLE_FORMAT 0x80D8
+#define GL_COLOR_TABLE_WIDTH 0x80D9
+#define GL_COLOR_TABLE_RED_SIZE 0x80DA
+#define GL_COLOR_TABLE_GREEN_SIZE 0x80DB
+#define GL_COLOR_TABLE_BLUE_SIZE 0x80DC
+#define GL_COLOR_TABLE_ALPHA_SIZE 0x80DD
+#define GL_COLOR_TABLE_LUMINANCE_SIZE 0x80DE
+#define GL_COLOR_TABLE_INTENSITY_SIZE 0x80DF
+#define GL_CONVOLUTION_1D 0x8010
+#define GL_CONVOLUTION_2D 0x8011
+#define GL_SEPARABLE_2D 0x8012
+#define GL_CONVOLUTION_BORDER_MODE 0x8013
+#define GL_CONVOLUTION_FILTER_SCALE 0x8014
+#define GL_CONVOLUTION_FILTER_BIAS 0x8015
+#define GL_REDUCE 0x8016
+#define GL_CONVOLUTION_FORMAT 0x8017
+#define GL_CONVOLUTION_WIDTH 0x8018
+#define GL_CONVOLUTION_HEIGHT 0x8019
+#define GL_MAX_CONVOLUTION_WIDTH 0x801A
+#define GL_MAX_CONVOLUTION_HEIGHT 0x801B
+#define GL_POST_CONVOLUTION_RED_SCALE 0x801C
+#define GL_POST_CONVOLUTION_GREEN_SCALE 0x801D
+#define GL_POST_CONVOLUTION_BLUE_SCALE 0x801E
+#define GL_POST_CONVOLUTION_ALPHA_SCALE 0x801F
+#define GL_POST_CONVOLUTION_RED_BIAS 0x8020
+#define GL_POST_CONVOLUTION_GREEN_BIAS 0x8021
+#define GL_POST_CONVOLUTION_BLUE_BIAS 0x8022
+#define GL_POST_CONVOLUTION_ALPHA_BIAS 0x8023
+#define GL_CONSTANT_BORDER 0x8151
+#define GL_REPLICATE_BORDER 0x8153
+#define GL_CONVOLUTION_BORDER_COLOR 0x8154
+#define GL_COLOR_MATRIX 0x80B1
+#define GL_COLOR_MATRIX_STACK_DEPTH 0x80B2
+#define GL_MAX_COLOR_MATRIX_STACK_DEPTH 0x80B3
+#define GL_POST_COLOR_MATRIX_RED_SCALE 0x80B4
+#define GL_POST_COLOR_MATRIX_GREEN_SCALE 0x80B5
+#define GL_POST_COLOR_MATRIX_BLUE_SCALE 0x80B6
+#define GL_POST_COLOR_MATRIX_ALPHA_SCALE 0x80B7
+#define GL_POST_COLOR_MATRIX_RED_BIAS 0x80B8
+#define GL_POST_COLOR_MATRIX_GREEN_BIAS 0x80B9
+#define GL_POST_COLOR_MATRIX_BLUE_BIAS 0x80BA
+#define GL_POST_COLOR_MATRIX_ALPHA_BIAS 0x80BB
+#define GL_HISTOGRAM 0x8024
+#define GL_PROXY_HISTOGRAM 0x8025
+#define GL_HISTOGRAM_WIDTH 0x8026
+#define GL_HISTOGRAM_FORMAT 0x8027
+#define GL_HISTOGRAM_RED_SIZE 0x8028
+#define GL_HISTOGRAM_GREEN_SIZE 0x8029
+#define GL_HISTOGRAM_BLUE_SIZE 0x802A
+#define GL_HISTOGRAM_ALPHA_SIZE 0x802B
+#define GL_HISTOGRAM_LUMINANCE_SIZE 0x802C
+#define GL_HISTOGRAM_SINK 0x802D
+#define GL_MINMAX 0x802E
+#define GL_MINMAX_FORMAT 0x802F
+#define GL_MINMAX_SINK 0x8030
+#define GL_TABLE_TOO_LARGE 0x8031
+#define GL_BLEND_EQUATION 0x8009
+#define GL_MIN 0x8007
+#define GL_MAX 0x8008
+#define GL_FUNC_ADD 0x8006
+#define GL_FUNC_SUBTRACT 0x800A
+#define GL_FUNC_REVERSE_SUBTRACT 0x800B
+#define GL_BLEND_COLOR 0x8005
+
+
+GLAPI void GLAPIENTRY glColorTable( GLenum target, GLenum internalformat,
+ GLsizei width, GLenum format,
+ GLenum type, const GLvoid *table );
+
+GLAPI void GLAPIENTRY glColorSubTable( GLenum target,
+ GLsizei start, GLsizei count,
+ GLenum format, GLenum type,
+ const GLvoid *data );
+
+GLAPI void GLAPIENTRY glColorTableParameteriv(GLenum target, GLenum pname,
+ const GLint *params);
+
+GLAPI void GLAPIENTRY glColorTableParameterfv(GLenum target, GLenum pname,
+ const GLfloat *params);
+
+GLAPI void GLAPIENTRY glCopyColorSubTable( GLenum target, GLsizei start,
+ GLint x, GLint y, GLsizei width );
+
+GLAPI void GLAPIENTRY glCopyColorTable( GLenum target, GLenum internalformat,
+ GLint x, GLint y, GLsizei width );
+
+GLAPI void GLAPIENTRY glGetColorTable( GLenum target, GLenum format,
+ GLenum type, GLvoid *table );
+
+GLAPI void GLAPIENTRY glGetColorTableParameterfv( GLenum target, GLenum pname,
+ GLfloat *params );
+
+GLAPI void GLAPIENTRY glGetColorTableParameteriv( GLenum target, GLenum pname,
+ GLint *params );
+
+GLAPI void GLAPIENTRY glBlendEquation( GLenum mode );
+
+GLAPI void GLAPIENTRY glBlendColor( GLclampf red, GLclampf green,
+ GLclampf blue, GLclampf alpha );
+
+GLAPI void GLAPIENTRY glHistogram( GLenum target, GLsizei width,
+ GLenum internalformat, GLboolean sink );
+
+GLAPI void GLAPIENTRY glResetHistogram( GLenum target );
+
+GLAPI void GLAPIENTRY glGetHistogram( GLenum target, GLboolean reset,
+ GLenum format, GLenum type,
+ GLvoid *values );
+
+GLAPI void GLAPIENTRY glGetHistogramParameterfv( GLenum target, GLenum pname,
+ GLfloat *params );
+
+GLAPI void GLAPIENTRY glGetHistogramParameteriv( GLenum target, GLenum pname,
+ GLint *params );
+
+GLAPI void GLAPIENTRY glMinmax( GLenum target, GLenum internalformat,
+ GLboolean sink );
+
+GLAPI void GLAPIENTRY glResetMinmax( GLenum target );
+
+GLAPI void GLAPIENTRY glGetMinmax( GLenum target, GLboolean reset,
+ GLenum format, GLenum types,
+ GLvoid *values );
+
+GLAPI void GLAPIENTRY glGetMinmaxParameterfv( GLenum target, GLenum pname,
+ GLfloat *params );
+
+GLAPI void GLAPIENTRY glGetMinmaxParameteriv( GLenum target, GLenum pname,
+ GLint *params );
+
+GLAPI void GLAPIENTRY glConvolutionFilter1D( GLenum target,
+ GLenum internalformat, GLsizei width, GLenum format, GLenum type,
+ const GLvoid *image );
+
+GLAPI void GLAPIENTRY glConvolutionFilter2D( GLenum target,
+ GLenum internalformat, GLsizei width, GLsizei height, GLenum format,
+ GLenum type, const GLvoid *image );
+
+GLAPI void GLAPIENTRY glConvolutionParameterf( GLenum target, GLenum pname,
+ GLfloat params );
+
+GLAPI void GLAPIENTRY glConvolutionParameterfv( GLenum target, GLenum pname,
+ const GLfloat *params );
+
+GLAPI void GLAPIENTRY glConvolutionParameteri( GLenum target, GLenum pname,
+ GLint params );
+
+GLAPI void GLAPIENTRY glConvolutionParameteriv( GLenum target, GLenum pname,
+ const GLint *params );
+
+GLAPI void GLAPIENTRY glCopyConvolutionFilter1D( GLenum target,
+ GLenum internalformat, GLint x, GLint y, GLsizei width );
+
+GLAPI void GLAPIENTRY glCopyConvolutionFilter2D( GLenum target,
+ GLenum internalformat, GLint x, GLint y, GLsizei width,
+ GLsizei height);
+
+GLAPI void GLAPIENTRY glGetConvolutionFilter( GLenum target, GLenum format,
+ GLenum type, GLvoid *image );
+
+GLAPI void GLAPIENTRY glGetConvolutionParameterfv( GLenum target, GLenum pname,
+ GLfloat *params );
+
+GLAPI void GLAPIENTRY glGetConvolutionParameteriv( GLenum target, GLenum pname,
+ GLint *params );
+
+GLAPI void GLAPIENTRY glSeparableFilter2D( GLenum target,
+ GLenum internalformat, GLsizei width, GLsizei height, GLenum format,
+ GLenum type, const GLvoid *row, const GLvoid *column );
+
+GLAPI void GLAPIENTRY glGetSeparableFilter( GLenum target, GLenum format,
+ GLenum type, GLvoid *row, GLvoid *column, GLvoid *span );
+
+
+
+
+/*
+ * OpenGL 1.3
+ */
+
+/* multitexture */
+#define GL_TEXTURE0 0x84C0
+#define GL_TEXTURE1 0x84C1
+#define GL_TEXTURE2 0x84C2
+#define GL_TEXTURE3 0x84C3
+#define GL_TEXTURE4 0x84C4
+#define GL_TEXTURE5 0x84C5
+#define GL_TEXTURE6 0x84C6
+#define GL_TEXTURE7 0x84C7
+#define GL_TEXTURE8 0x84C8
+#define GL_TEXTURE9 0x84C9
+#define GL_TEXTURE10 0x84CA
+#define GL_TEXTURE11 0x84CB
+#define GL_TEXTURE12 0x84CC
+#define GL_TEXTURE13 0x84CD
+#define GL_TEXTURE14 0x84CE
+#define GL_TEXTURE15 0x84CF
+#define GL_TEXTURE16 0x84D0
+#define GL_TEXTURE17 0x84D1
+#define GL_TEXTURE18 0x84D2
+#define GL_TEXTURE19 0x84D3
+#define GL_TEXTURE20 0x84D4
+#define GL_TEXTURE21 0x84D5
+#define GL_TEXTURE22 0x84D6
+#define GL_TEXTURE23 0x84D7
+#define GL_TEXTURE24 0x84D8
+#define GL_TEXTURE25 0x84D9
+#define GL_TEXTURE26 0x84DA
+#define GL_TEXTURE27 0x84DB
+#define GL_TEXTURE28 0x84DC
+#define GL_TEXTURE29 0x84DD
+#define GL_TEXTURE30 0x84DE
+#define GL_TEXTURE31 0x84DF
+#define GL_ACTIVE_TEXTURE 0x84E0
+#define GL_CLIENT_ACTIVE_TEXTURE 0x84E1
+#define GL_MAX_TEXTURE_UNITS 0x84E2
+/* texture_cube_map */
+#define GL_NORMAL_MAP 0x8511
+#define GL_REFLECTION_MAP 0x8512
+#define GL_TEXTURE_CUBE_MAP 0x8513
+#define GL_TEXTURE_BINDING_CUBE_MAP 0x8514
+#define GL_TEXTURE_CUBE_MAP_POSITIVE_X 0x8515
+#define GL_TEXTURE_CUBE_MAP_NEGATIVE_X 0x8516
+#define GL_TEXTURE_CUBE_MAP_POSITIVE_Y 0x8517
+#define GL_TEXTURE_CUBE_MAP_NEGATIVE_Y 0x8518
+#define GL_TEXTURE_CUBE_MAP_POSITIVE_Z 0x8519
+#define GL_TEXTURE_CUBE_MAP_NEGATIVE_Z 0x851A
+#define GL_PROXY_TEXTURE_CUBE_MAP 0x851B
+#define GL_MAX_CUBE_MAP_TEXTURE_SIZE 0x851C
+/* texture_compression */
+#define GL_COMPRESSED_ALPHA 0x84E9
+#define GL_COMPRESSED_LUMINANCE 0x84EA
+#define GL_COMPRESSED_LUMINANCE_ALPHA 0x84EB
+#define GL_COMPRESSED_INTENSITY 0x84EC
+#define GL_COMPRESSED_RGB 0x84ED
+#define GL_COMPRESSED_RGBA 0x84EE
+#define GL_TEXTURE_COMPRESSION_HINT 0x84EF
+#define GL_TEXTURE_COMPRESSED_IMAGE_SIZE 0x86A0
+#define GL_TEXTURE_COMPRESSED 0x86A1
+#define GL_NUM_COMPRESSED_TEXTURE_FORMATS 0x86A2
+#define GL_COMPRESSED_TEXTURE_FORMATS 0x86A3
+/* multisample */
+#define GL_MULTISAMPLE 0x809D
+#define GL_SAMPLE_ALPHA_TO_COVERAGE 0x809E
+#define GL_SAMPLE_ALPHA_TO_ONE 0x809F
+#define GL_SAMPLE_COVERAGE 0x80A0
+#define GL_SAMPLE_BUFFERS 0x80A8
+#define GL_SAMPLES 0x80A9
+#define GL_SAMPLE_COVERAGE_VALUE 0x80AA
+#define GL_SAMPLE_COVERAGE_INVERT 0x80AB
+#define GL_MULTISAMPLE_BIT 0x20000000
+/* transpose_matrix */
+#define GL_TRANSPOSE_MODELVIEW_MATRIX 0x84E3
+#define GL_TRANSPOSE_PROJECTION_MATRIX 0x84E4
+#define GL_TRANSPOSE_TEXTURE_MATRIX 0x84E5
+#define GL_TRANSPOSE_COLOR_MATRIX 0x84E6
+/* texture_env_combine */
+#define GL_COMBINE 0x8570
+#define GL_COMBINE_RGB 0x8571
+#define GL_COMBINE_ALPHA 0x8572
+#define GL_SOURCE0_RGB 0x8580
+#define GL_SOURCE1_RGB 0x8581
+#define GL_SOURCE2_RGB 0x8582
+#define GL_SOURCE0_ALPHA 0x8588
+#define GL_SOURCE1_ALPHA 0x8589
+#define GL_SOURCE2_ALPHA 0x858A
+#define GL_OPERAND0_RGB 0x8590
+#define GL_OPERAND1_RGB 0x8591
+#define GL_OPERAND2_RGB 0x8592
+#define GL_OPERAND0_ALPHA 0x8598
+#define GL_OPERAND1_ALPHA 0x8599
+#define GL_OPERAND2_ALPHA 0x859A
+#define GL_RGB_SCALE 0x8573
+#define GL_ADD_SIGNED 0x8574
+#define GL_INTERPOLATE 0x8575
+#define GL_SUBTRACT 0x84E7
+#define GL_CONSTANT 0x8576
+#define GL_PRIMARY_COLOR 0x8577
+#define GL_PREVIOUS 0x8578
+/* texture_env_dot3 */
+#define GL_DOT3_RGB 0x86AE
+#define GL_DOT3_RGBA 0x86AF
+/* texture_border_clamp */
+#define GL_CLAMP_TO_BORDER 0x812D
+
+GLAPI void GLAPIENTRY glActiveTexture( GLenum texture );
+
+GLAPI void GLAPIENTRY glClientActiveTexture( GLenum texture );
+
+GLAPI void GLAPIENTRY glCompressedTexImage1D( GLenum target, GLint level, GLenum internalformat, GLsizei width, GLint border, GLsizei imageSize, const GLvoid *data );
+
+GLAPI void GLAPIENTRY glCompressedTexImage2D( GLenum target, GLint level, GLenum internalformat, GLsizei width, GLsizei height, GLint border, GLsizei imageSize, const GLvoid *data );
+
+GLAPI void GLAPIENTRY glCompressedTexImage3D( GLenum target, GLint level, GLenum internalformat, GLsizei width, GLsizei height, GLsizei depth, GLint border, GLsizei imageSize, const GLvoid *data );
+
+GLAPI void GLAPIENTRY glCompressedTexSubImage1D( GLenum target, GLint level, GLint xoffset, GLsizei width, GLenum format, GLsizei imageSize, const GLvoid *data );
+
+GLAPI void GLAPIENTRY glCompressedTexSubImage2D( GLenum target, GLint level, GLint xoffset, GLint yoffset, GLsizei width, GLsizei height, GLenum format, GLsizei imageSize, const GLvoid *data );
+
+GLAPI void GLAPIENTRY glCompressedTexSubImage3D( GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint zoffset, GLsizei width, GLsizei height, GLsizei depth, GLenum format, GLsizei imageSize, const GLvoid *data );
+
+GLAPI void GLAPIENTRY glGetCompressedTexImage( GLenum target, GLint lod, GLvoid *img );
+
+GLAPI void GLAPIENTRY glMultiTexCoord1d( GLenum target, GLdouble s );
+
+GLAPI void GLAPIENTRY glMultiTexCoord1dv( GLenum target, const GLdouble *v );
+
+GLAPI void GLAPIENTRY glMultiTexCoord1f( GLenum target, GLfloat s );
+
+GLAPI void GLAPIENTRY glMultiTexCoord1fv( GLenum target, const GLfloat *v );
+
+GLAPI void GLAPIENTRY glMultiTexCoord1i( GLenum target, GLint s );
+
+GLAPI void GLAPIENTRY glMultiTexCoord1iv( GLenum target, const GLint *v );
+
+GLAPI void GLAPIENTRY glMultiTexCoord1s( GLenum target, GLshort s );
+
+GLAPI void GLAPIENTRY glMultiTexCoord1sv( GLenum target, const GLshort *v );
+
+GLAPI void GLAPIENTRY glMultiTexCoord2d( GLenum target, GLdouble s, GLdouble t );
+
+GLAPI void GLAPIENTRY glMultiTexCoord2dv( GLenum target, const GLdouble *v );
+
+GLAPI void GLAPIENTRY glMultiTexCoord2f( GLenum target, GLfloat s, GLfloat t );
+
+GLAPI void GLAPIENTRY glMultiTexCoord2fv( GLenum target, const GLfloat *v );
+
+GLAPI void GLAPIENTRY glMultiTexCoord2i( GLenum target, GLint s, GLint t );
+
+GLAPI void GLAPIENTRY glMultiTexCoord2iv( GLenum target, const GLint *v );
+
+GLAPI void GLAPIENTRY glMultiTexCoord2s( GLenum target, GLshort s, GLshort t );
+
+GLAPI void GLAPIENTRY glMultiTexCoord2sv( GLenum target, const GLshort *v );
+
+GLAPI void GLAPIENTRY glMultiTexCoord3d( GLenum target, GLdouble s, GLdouble t, GLdouble r );
+
+GLAPI void GLAPIENTRY glMultiTexCoord3dv( GLenum target, const GLdouble *v );
+
+GLAPI void GLAPIENTRY glMultiTexCoord3f( GLenum target, GLfloat s, GLfloat t, GLfloat r );
+
+GLAPI void GLAPIENTRY glMultiTexCoord3fv( GLenum target, const GLfloat *v );
+
+GLAPI void GLAPIENTRY glMultiTexCoord3i( GLenum target, GLint s, GLint t, GLint r );
+
+GLAPI void GLAPIENTRY glMultiTexCoord3iv( GLenum target, const GLint *v );
+
+GLAPI void GLAPIENTRY glMultiTexCoord3s( GLenum target, GLshort s, GLshort t, GLshort r );
+
+GLAPI void GLAPIENTRY glMultiTexCoord3sv( GLenum target, const GLshort *v );
+
+GLAPI void GLAPIENTRY glMultiTexCoord4d( GLenum target, GLdouble s, GLdouble t, GLdouble r, GLdouble q );
+
+GLAPI void GLAPIENTRY glMultiTexCoord4dv( GLenum target, const GLdouble *v );
+
+GLAPI void GLAPIENTRY glMultiTexCoord4f( GLenum target, GLfloat s, GLfloat t, GLfloat r, GLfloat q );
+
+GLAPI void GLAPIENTRY glMultiTexCoord4fv( GLenum target, const GLfloat *v );
+
+GLAPI void GLAPIENTRY glMultiTexCoord4i( GLenum target, GLint s, GLint t, GLint r, GLint q );
+
+GLAPI void GLAPIENTRY glMultiTexCoord4iv( GLenum target, const GLint *v );
+
+GLAPI void GLAPIENTRY glMultiTexCoord4s( GLenum target, GLshort s, GLshort t, GLshort r, GLshort q );
+
+GLAPI void GLAPIENTRY glMultiTexCoord4sv( GLenum target, const GLshort *v );
+
+
+GLAPI void GLAPIENTRY glLoadTransposeMatrixd( const GLdouble m[16] );
+
+GLAPI void GLAPIENTRY glLoadTransposeMatrixf( const GLfloat m[16] );
+
+GLAPI void GLAPIENTRY glMultTransposeMatrixd( const GLdouble m[16] );
+
+GLAPI void GLAPIENTRY glMultTransposeMatrixf( const GLfloat m[16] );
+
+GLAPI void GLAPIENTRY glSampleCoverage( GLclampf value, GLboolean invert );
+
+
+typedef void (APIENTRYP PFNGLACTIVETEXTUREPROC) (GLenum texture);
+typedef void (APIENTRYP PFNGLSAMPLECOVERAGEPROC) (GLclampf value, GLboolean invert);
+typedef void (APIENTRYP PFNGLCOMPRESSEDTEXIMAGE3DPROC) (GLenum target, GLint level, GLenum internalformat, GLsizei width, GLsizei height, GLsizei depth, GLint border, GLsizei imageSize, const GLvoid *data);
+typedef void (APIENTRYP PFNGLCOMPRESSEDTEXIMAGE2DPROC) (GLenum target, GLint level, GLenum internalformat, GLsizei width, GLsizei height, GLint border, GLsizei imageSize, const GLvoid *data);
+typedef void (APIENTRYP PFNGLCOMPRESSEDTEXIMAGE1DPROC) (GLenum target, GLint level, GLenum internalformat, GLsizei width, GLint border, GLsizei imageSize, const GLvoid *data);
+typedef void (APIENTRYP PFNGLCOMPRESSEDTEXSUBIMAGE3DPROC) (GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint zoffset, GLsizei width, GLsizei height, GLsizei depth, GLenum format, GLsizei imageSize, const GLvoid *data);
+typedef void (APIENTRYP PFNGLCOMPRESSEDTEXSUBIMAGE2DPROC) (GLenum target, GLint level, GLint xoffset, GLint yoffset, GLsizei width, GLsizei height, GLenum format, GLsizei imageSize, const GLvoid *data);
+typedef void (APIENTRYP PFNGLCOMPRESSEDTEXSUBIMAGE1DPROC) (GLenum target, GLint level, GLint xoffset, GLsizei width, GLenum format, GLsizei imageSize, const GLvoid *data);
+typedef void (APIENTRYP PFNGLGETCOMPRESSEDTEXIMAGEPROC) (GLenum target, GLint level, GLvoid *img);
+
+
+
+/*
+ * GL_ARB_multitexture (ARB extension 1 and OpenGL 1.2.1)
+ */
+#ifndef GL_ARB_multitexture
+#define GL_ARB_multitexture 1
+
+#define GL_TEXTURE0_ARB 0x84C0
+#define GL_TEXTURE1_ARB 0x84C1
+#define GL_TEXTURE2_ARB 0x84C2
+#define GL_TEXTURE3_ARB 0x84C3
+#define GL_TEXTURE4_ARB 0x84C4
+#define GL_TEXTURE5_ARB 0x84C5
+#define GL_TEXTURE6_ARB 0x84C6
+#define GL_TEXTURE7_ARB 0x84C7
+#define GL_TEXTURE8_ARB 0x84C8
+#define GL_TEXTURE9_ARB 0x84C9
+#define GL_TEXTURE10_ARB 0x84CA
+#define GL_TEXTURE11_ARB 0x84CB
+#define GL_TEXTURE12_ARB 0x84CC
+#define GL_TEXTURE13_ARB 0x84CD
+#define GL_TEXTURE14_ARB 0x84CE
+#define GL_TEXTURE15_ARB 0x84CF
+#define GL_TEXTURE16_ARB 0x84D0
+#define GL_TEXTURE17_ARB 0x84D1
+#define GL_TEXTURE18_ARB 0x84D2
+#define GL_TEXTURE19_ARB 0x84D3
+#define GL_TEXTURE20_ARB 0x84D4
+#define GL_TEXTURE21_ARB 0x84D5
+#define GL_TEXTURE22_ARB 0x84D6
+#define GL_TEXTURE23_ARB 0x84D7
+#define GL_TEXTURE24_ARB 0x84D8
+#define GL_TEXTURE25_ARB 0x84D9
+#define GL_TEXTURE26_ARB 0x84DA
+#define GL_TEXTURE27_ARB 0x84DB
+#define GL_TEXTURE28_ARB 0x84DC
+#define GL_TEXTURE29_ARB 0x84DD
+#define GL_TEXTURE30_ARB 0x84DE
+#define GL_TEXTURE31_ARB 0x84DF
+#define GL_ACTIVE_TEXTURE_ARB 0x84E0
+#define GL_CLIENT_ACTIVE_TEXTURE_ARB 0x84E1
+#define GL_MAX_TEXTURE_UNITS_ARB 0x84E2
+
+GLAPI void GLAPIENTRY glActiveTextureARB(GLenum texture);
+GLAPI void GLAPIENTRY glClientActiveTextureARB(GLenum texture);
+GLAPI void GLAPIENTRY glMultiTexCoord1dARB(GLenum target, GLdouble s);
+GLAPI void GLAPIENTRY glMultiTexCoord1dvARB(GLenum target, const GLdouble *v);
+GLAPI void GLAPIENTRY glMultiTexCoord1fARB(GLenum target, GLfloat s);
+GLAPI void GLAPIENTRY glMultiTexCoord1fvARB(GLenum target, const GLfloat *v);
+GLAPI void GLAPIENTRY glMultiTexCoord1iARB(GLenum target, GLint s);
+GLAPI void GLAPIENTRY glMultiTexCoord1ivARB(GLenum target, const GLint *v);
+GLAPI void GLAPIENTRY glMultiTexCoord1sARB(GLenum target, GLshort s);
+GLAPI void GLAPIENTRY glMultiTexCoord1svARB(GLenum target, const GLshort *v);
+GLAPI void GLAPIENTRY glMultiTexCoord2dARB(GLenum target, GLdouble s, GLdouble t);
+GLAPI void GLAPIENTRY glMultiTexCoord2dvARB(GLenum target, const GLdouble *v);
+GLAPI void GLAPIENTRY glMultiTexCoord2fARB(GLenum target, GLfloat s, GLfloat t);
+GLAPI void GLAPIENTRY glMultiTexCoord2fvARB(GLenum target, const GLfloat *v);
+GLAPI void GLAPIENTRY glMultiTexCoord2iARB(GLenum target, GLint s, GLint t);
+GLAPI void GLAPIENTRY glMultiTexCoord2ivARB(GLenum target, const GLint *v);
+GLAPI void GLAPIENTRY glMultiTexCoord2sARB(GLenum target, GLshort s, GLshort t);
+GLAPI void GLAPIENTRY glMultiTexCoord2svARB(GLenum target, const GLshort *v);
+GLAPI void GLAPIENTRY glMultiTexCoord3dARB(GLenum target, GLdouble s, GLdouble t, GLdouble r);
+GLAPI void GLAPIENTRY glMultiTexCoord3dvARB(GLenum target, const GLdouble *v);
+GLAPI void GLAPIENTRY glMultiTexCoord3fARB(GLenum target, GLfloat s, GLfloat t, GLfloat r);
+GLAPI void GLAPIENTRY glMultiTexCoord3fvARB(GLenum target, const GLfloat *v);
+GLAPI void GLAPIENTRY glMultiTexCoord3iARB(GLenum target, GLint s, GLint t, GLint r);
+GLAPI void GLAPIENTRY glMultiTexCoord3ivARB(GLenum target, const GLint *v);
+GLAPI void GLAPIENTRY glMultiTexCoord3sARB(GLenum target, GLshort s, GLshort t, GLshort r);
+GLAPI void GLAPIENTRY glMultiTexCoord3svARB(GLenum target, const GLshort *v);
+GLAPI void GLAPIENTRY glMultiTexCoord4dARB(GLenum target, GLdouble s, GLdouble t, GLdouble r, GLdouble q);
+GLAPI void GLAPIENTRY glMultiTexCoord4dvARB(GLenum target, const GLdouble *v);
+GLAPI void GLAPIENTRY glMultiTexCoord4fARB(GLenum target, GLfloat s, GLfloat t, GLfloat r, GLfloat q);
+GLAPI void GLAPIENTRY glMultiTexCoord4fvARB(GLenum target, const GLfloat *v);
+GLAPI void GLAPIENTRY glMultiTexCoord4iARB(GLenum target, GLint s, GLint t, GLint r, GLint q);
+GLAPI void GLAPIENTRY glMultiTexCoord4ivARB(GLenum target, const GLint *v);
+GLAPI void GLAPIENTRY glMultiTexCoord4sARB(GLenum target, GLshort s, GLshort t, GLshort r, GLshort q);
+GLAPI void GLAPIENTRY glMultiTexCoord4svARB(GLenum target, const GLshort *v);
+
+typedef void (APIENTRYP PFNGLACTIVETEXTUREARBPROC) (GLenum texture);
+typedef void (APIENTRYP PFNGLCLIENTACTIVETEXTUREARBPROC) (GLenum texture);
+typedef void (APIENTRYP PFNGLMULTITEXCOORD1DARBPROC) (GLenum target, GLdouble s);
+typedef void (APIENTRYP PFNGLMULTITEXCOORD1DVARBPROC) (GLenum target, const GLdouble *v);
+typedef void (APIENTRYP PFNGLMULTITEXCOORD1FARBPROC) (GLenum target, GLfloat s);
+typedef void (APIENTRYP PFNGLMULTITEXCOORD1FVARBPROC) (GLenum target, const GLfloat *v);
+typedef void (APIENTRYP PFNGLMULTITEXCOORD1IARBPROC) (GLenum target, GLint s);
+typedef void (APIENTRYP PFNGLMULTITEXCOORD1IVARBPROC) (GLenum target, const GLint *v);
+typedef void (APIENTRYP PFNGLMULTITEXCOORD1SARBPROC) (GLenum target, GLshort s);
+typedef void (APIENTRYP PFNGLMULTITEXCOORD1SVARBPROC) (GLenum target, const GLshort *v);
+typedef void (APIENTRYP PFNGLMULTITEXCOORD2DARBPROC) (GLenum target, GLdouble s, GLdouble t);
+typedef void (APIENTRYP PFNGLMULTITEXCOORD2DVARBPROC) (GLenum target, const GLdouble *v);
+typedef void (APIENTRYP PFNGLMULTITEXCOORD2FARBPROC) (GLenum target, GLfloat s, GLfloat t);
+typedef void (APIENTRYP PFNGLMULTITEXCOORD2FVARBPROC) (GLenum target, const GLfloat *v);
+typedef void (APIENTRYP PFNGLMULTITEXCOORD2IARBPROC) (GLenum target, GLint s, GLint t);
+typedef void (APIENTRYP PFNGLMULTITEXCOORD2IVARBPROC) (GLenum target, const GLint *v);
+typedef void (APIENTRYP PFNGLMULTITEXCOORD2SARBPROC) (GLenum target, GLshort s, GLshort t);
+typedef void (APIENTRYP PFNGLMULTITEXCOORD2SVARBPROC) (GLenum target, const GLshort *v);
+typedef void (APIENTRYP PFNGLMULTITEXCOORD3DARBPROC) (GLenum target, GLdouble s, GLdouble t, GLdouble r);
+typedef void (APIENTRYP PFNGLMULTITEXCOORD3DVARBPROC) (GLenum target, const GLdouble *v);
+typedef void (APIENTRYP PFNGLMULTITEXCOORD3FARBPROC) (GLenum target, GLfloat s, GLfloat t, GLfloat r);
+typedef void (APIENTRYP PFNGLMULTITEXCOORD3FVARBPROC) (GLenum target, const GLfloat *v);
+typedef void (APIENTRYP PFNGLMULTITEXCOORD3IARBPROC) (GLenum target, GLint s, GLint t, GLint r);
+typedef void (APIENTRYP PFNGLMULTITEXCOORD3IVARBPROC) (GLenum target, const GLint *v);
+typedef void (APIENTRYP PFNGLMULTITEXCOORD3SARBPROC) (GLenum target, GLshort s, GLshort t, GLshort r);
+typedef void (APIENTRYP PFNGLMULTITEXCOORD3SVARBPROC) (GLenum target, const GLshort *v);
+typedef void (APIENTRYP PFNGLMULTITEXCOORD4DARBPROC) (GLenum target, GLdouble s, GLdouble t, GLdouble r, GLdouble q);
+typedef void (APIENTRYP PFNGLMULTITEXCOORD4DVARBPROC) (GLenum target, const GLdouble *v);
+typedef void (APIENTRYP PFNGLMULTITEXCOORD4FARBPROC) (GLenum target, GLfloat s, GLfloat t, GLfloat r, GLfloat q);
+typedef void (APIENTRYP PFNGLMULTITEXCOORD4FVARBPROC) (GLenum target, const GLfloat *v);
+typedef void (APIENTRYP PFNGLMULTITEXCOORD4IARBPROC) (GLenum target, GLint s, GLint t, GLint r, GLint q);
+typedef void (APIENTRYP PFNGLMULTITEXCOORD4IVARBPROC) (GLenum target, const GLint *v);
+typedef void (APIENTRYP PFNGLMULTITEXCOORD4SARBPROC) (GLenum target, GLshort s, GLshort t, GLshort r, GLshort q);
+typedef void (APIENTRYP PFNGLMULTITEXCOORD4SVARBPROC) (GLenum target, const GLshort *v);
+
+#endif /* GL_ARB_multitexture */
+
+
+
+/*
+ * Define this token if you want "old-style" header file behaviour (extensions
+ * defined in gl.h). Otherwise, extensions will be included from glext.h.
+ */
+#if defined(GL_GLEXT_LEGACY)
+
+/* All extensions that used to be here are now found in glext.h */
+
+#else /* GL_GLEXT_LEGACY */
+
+#include <GL/glext.h>
+
+#endif /* GL_GLEXT_LEGACY */
+
+
+
+/*
+ * ???. GL_MESA_packed_depth_stencil
+ * XXX obsolete
+ */
+#ifndef GL_MESA_packed_depth_stencil
+#define GL_MESA_packed_depth_stencil 1
+
+#define GL_DEPTH_STENCIL_MESA 0x8750
+#define GL_UNSIGNED_INT_24_8_MESA 0x8751
+#define GL_UNSIGNED_INT_8_24_REV_MESA 0x8752
+#define GL_UNSIGNED_SHORT_15_1_MESA 0x8753
+#define GL_UNSIGNED_SHORT_1_15_REV_MESA 0x8754
+
+#endif /* GL_MESA_packed_depth_stencil */
+
+
+#ifndef GL_ATI_blend_equation_separate
+#define GL_ATI_blend_equation_separate 1
+
+#define GL_ALPHA_BLEND_EQUATION_ATI 0x883D
+
+GLAPI void GLAPIENTRY glBlendEquationSeparateATI( GLenum modeRGB, GLenum modeA );
+typedef void (APIENTRYP PFNGLBLENDEQUATIONSEPARATEATIPROC) (GLenum modeRGB, GLenum modeA);
+
+#endif /* GL_ATI_blend_equation_separate */
+
+
+/* GL_OES_EGL_image */
+#if !defined(GL_OES_EGL_image) && !defined(GL_EXT_EGL_image_storage)
+typedef void* GLeglImageOES;
+#endif
+
+#ifndef GL_OES_EGL_image
+#define GL_OES_EGL_image 1
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glEGLImageTargetTexture2DOES (GLenum target, GLeglImageOES image);
+GLAPI void APIENTRY glEGLImageTargetRenderbufferStorageOES (GLenum target, GLeglImageOES image);
+#endif
+typedef void (APIENTRYP PFNGLEGLIMAGETARGETTEXTURE2DOESPROC) (GLenum target, GLeglImageOES image);
+typedef void (APIENTRYP PFNGLEGLIMAGETARGETRENDERBUFFERSTORAGEOESPROC) (GLenum target, GLeglImageOES image);
+#endif
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __gl_h_ */
diff --git a/third_party/rust/glslopt/glsl-optimizer/include/GL/glext.h b/third_party/rust/glslopt/glsl-optimizer/include/GL/glext.h
new file mode 100644
index 0000000000..e8694ae096
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/include/GL/glext.h
@@ -0,0 +1,12832 @@
+#ifndef __gl_glext_h_
+#define __gl_glext_h_ 1
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+** Copyright (c) 2013-2018 The Khronos Group Inc.
+**
+** Permission is hereby granted, free of charge, to any person obtaining a
+** copy of this software and/or associated documentation files (the
+** "Materials"), to deal in the Materials without restriction, including
+** without limitation the rights to use, copy, modify, merge, publish,
+** distribute, sublicense, and/or sell copies of the Materials, and to
+** permit persons to whom the Materials are furnished to do so, subject to
+** the following conditions:
+**
+** The above copyright notice and this permission notice shall be included
+** in all copies or substantial portions of the Materials.
+**
+** THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+** EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+** MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+** IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+** CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+** TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+** MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS.
+*/
+/*
+** This header is generated from the Khronos OpenGL / OpenGL ES XML
+** API Registry. The current version of the Registry, generator scripts
+** used to make the header, and the header can be found at
+** https://github.com/KhronosGroup/OpenGL-Registry
+*/
+
+#if defined(_WIN32) && !defined(APIENTRY) && !defined(__CYGWIN__) && !defined(__SCITECH_SNAP__)
+#ifndef WIN32_LEAN_AND_MEAN
+#define WIN32_LEAN_AND_MEAN 1
+#endif
+#include <windows.h>
+#endif
+
+#ifndef APIENTRY
+#define APIENTRY
+#endif
+#ifndef APIENTRYP
+#define APIENTRYP APIENTRY *
+#endif
+#ifndef GLAPI
+#define GLAPI extern
+#endif
+
+#define GL_GLEXT_VERSION 20190911
+
+#include <KHR/khrplatform.h>
+
+/* Generated C header for:
+ * API: gl
+ * Profile: compatibility
+ * Versions considered: .*
+ * Versions emitted: 1\.[2-9]|[234]\.[0-9]
+ * Default extensions included: gl
+ * Additional extensions included: _nomatch_^
+ * Extensions removed: _nomatch_^
+ */
+
+#ifndef GL_VERSION_1_2
+#define GL_VERSION_1_2 1
+#define GL_UNSIGNED_BYTE_3_3_2 0x8032
+#define GL_UNSIGNED_SHORT_4_4_4_4 0x8033
+#define GL_UNSIGNED_SHORT_5_5_5_1 0x8034
+#define GL_UNSIGNED_INT_8_8_8_8 0x8035
+#define GL_UNSIGNED_INT_10_10_10_2 0x8036
+#define GL_TEXTURE_BINDING_3D 0x806A
+#define GL_PACK_SKIP_IMAGES 0x806B
+#define GL_PACK_IMAGE_HEIGHT 0x806C
+#define GL_UNPACK_SKIP_IMAGES 0x806D
+#define GL_UNPACK_IMAGE_HEIGHT 0x806E
+#define GL_TEXTURE_3D 0x806F
+#define GL_PROXY_TEXTURE_3D 0x8070
+#define GL_TEXTURE_DEPTH 0x8071
+#define GL_TEXTURE_WRAP_R 0x8072
+#define GL_MAX_3D_TEXTURE_SIZE 0x8073
+#define GL_UNSIGNED_BYTE_2_3_3_REV 0x8362
+#define GL_UNSIGNED_SHORT_5_6_5 0x8363
+#define GL_UNSIGNED_SHORT_5_6_5_REV 0x8364
+#define GL_UNSIGNED_SHORT_4_4_4_4_REV 0x8365
+#define GL_UNSIGNED_SHORT_1_5_5_5_REV 0x8366
+#define GL_UNSIGNED_INT_8_8_8_8_REV 0x8367
+#define GL_UNSIGNED_INT_2_10_10_10_REV 0x8368
+#define GL_BGR 0x80E0
+#define GL_BGRA 0x80E1
+#define GL_MAX_ELEMENTS_VERTICES 0x80E8
+#define GL_MAX_ELEMENTS_INDICES 0x80E9
+#define GL_CLAMP_TO_EDGE 0x812F
+#define GL_TEXTURE_MIN_LOD 0x813A
+#define GL_TEXTURE_MAX_LOD 0x813B
+#define GL_TEXTURE_BASE_LEVEL 0x813C
+#define GL_TEXTURE_MAX_LEVEL 0x813D
+#define GL_SMOOTH_POINT_SIZE_RANGE 0x0B12
+#define GL_SMOOTH_POINT_SIZE_GRANULARITY 0x0B13
+#define GL_SMOOTH_LINE_WIDTH_RANGE 0x0B22
+#define GL_SMOOTH_LINE_WIDTH_GRANULARITY 0x0B23
+#define GL_ALIASED_LINE_WIDTH_RANGE 0x846E
+#define GL_RESCALE_NORMAL 0x803A
+#define GL_LIGHT_MODEL_COLOR_CONTROL 0x81F8
+#define GL_SINGLE_COLOR 0x81F9
+#define GL_SEPARATE_SPECULAR_COLOR 0x81FA
+#define GL_ALIASED_POINT_SIZE_RANGE 0x846D
+typedef void (APIENTRYP PFNGLDRAWRANGEELEMENTSPROC) (GLenum mode, GLuint start, GLuint end, GLsizei count, GLenum type, const void *indices);
+typedef void (APIENTRYP PFNGLTEXIMAGE3DPROC) (GLenum target, GLint level, GLint internalformat, GLsizei width, GLsizei height, GLsizei depth, GLint border, GLenum format, GLenum type, const void *pixels);
+typedef void (APIENTRYP PFNGLTEXSUBIMAGE3DPROC) (GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint zoffset, GLsizei width, GLsizei height, GLsizei depth, GLenum format, GLenum type, const void *pixels);
+typedef void (APIENTRYP PFNGLCOPYTEXSUBIMAGE3DPROC) (GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint zoffset, GLint x, GLint y, GLsizei width, GLsizei height);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glDrawRangeElements (GLenum mode, GLuint start, GLuint end, GLsizei count, GLenum type, const void *indices);
+GLAPI void APIENTRY glTexImage3D (GLenum target, GLint level, GLint internalformat, GLsizei width, GLsizei height, GLsizei depth, GLint border, GLenum format, GLenum type, const void *pixels);
+GLAPI void APIENTRY glTexSubImage3D (GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint zoffset, GLsizei width, GLsizei height, GLsizei depth, GLenum format, GLenum type, const void *pixels);
+GLAPI void APIENTRY glCopyTexSubImage3D (GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint zoffset, GLint x, GLint y, GLsizei width, GLsizei height);
+#endif
+#endif /* GL_VERSION_1_2 */
+
+#ifndef GL_VERSION_1_3
+#define GL_VERSION_1_3 1
+#define GL_TEXTURE0 0x84C0
+#define GL_TEXTURE1 0x84C1
+#define GL_TEXTURE2 0x84C2
+#define GL_TEXTURE3 0x84C3
+#define GL_TEXTURE4 0x84C4
+#define GL_TEXTURE5 0x84C5
+#define GL_TEXTURE6 0x84C6
+#define GL_TEXTURE7 0x84C7
+#define GL_TEXTURE8 0x84C8
+#define GL_TEXTURE9 0x84C9
+#define GL_TEXTURE10 0x84CA
+#define GL_TEXTURE11 0x84CB
+#define GL_TEXTURE12 0x84CC
+#define GL_TEXTURE13 0x84CD
+#define GL_TEXTURE14 0x84CE
+#define GL_TEXTURE15 0x84CF
+#define GL_TEXTURE16 0x84D0
+#define GL_TEXTURE17 0x84D1
+#define GL_TEXTURE18 0x84D2
+#define GL_TEXTURE19 0x84D3
+#define GL_TEXTURE20 0x84D4
+#define GL_TEXTURE21 0x84D5
+#define GL_TEXTURE22 0x84D6
+#define GL_TEXTURE23 0x84D7
+#define GL_TEXTURE24 0x84D8
+#define GL_TEXTURE25 0x84D9
+#define GL_TEXTURE26 0x84DA
+#define GL_TEXTURE27 0x84DB
+#define GL_TEXTURE28 0x84DC
+#define GL_TEXTURE29 0x84DD
+#define GL_TEXTURE30 0x84DE
+#define GL_TEXTURE31 0x84DF
+#define GL_ACTIVE_TEXTURE 0x84E0
+#define GL_MULTISAMPLE 0x809D
+#define GL_SAMPLE_ALPHA_TO_COVERAGE 0x809E
+#define GL_SAMPLE_ALPHA_TO_ONE 0x809F
+#define GL_SAMPLE_COVERAGE 0x80A0
+#define GL_SAMPLE_BUFFERS 0x80A8
+#define GL_SAMPLES 0x80A9
+#define GL_SAMPLE_COVERAGE_VALUE 0x80AA
+#define GL_SAMPLE_COVERAGE_INVERT 0x80AB
+#define GL_TEXTURE_CUBE_MAP 0x8513
+#define GL_TEXTURE_BINDING_CUBE_MAP 0x8514
+#define GL_TEXTURE_CUBE_MAP_POSITIVE_X 0x8515
+#define GL_TEXTURE_CUBE_MAP_NEGATIVE_X 0x8516
+#define GL_TEXTURE_CUBE_MAP_POSITIVE_Y 0x8517
+#define GL_TEXTURE_CUBE_MAP_NEGATIVE_Y 0x8518
+#define GL_TEXTURE_CUBE_MAP_POSITIVE_Z 0x8519
+#define GL_TEXTURE_CUBE_MAP_NEGATIVE_Z 0x851A
+#define GL_PROXY_TEXTURE_CUBE_MAP 0x851B
+#define GL_MAX_CUBE_MAP_TEXTURE_SIZE 0x851C
+#define GL_COMPRESSED_RGB 0x84ED
+#define GL_COMPRESSED_RGBA 0x84EE
+#define GL_TEXTURE_COMPRESSION_HINT 0x84EF
+#define GL_TEXTURE_COMPRESSED_IMAGE_SIZE 0x86A0
+#define GL_TEXTURE_COMPRESSED 0x86A1
+#define GL_NUM_COMPRESSED_TEXTURE_FORMATS 0x86A2
+#define GL_COMPRESSED_TEXTURE_FORMATS 0x86A3
+#define GL_CLAMP_TO_BORDER 0x812D
+#define GL_CLIENT_ACTIVE_TEXTURE 0x84E1
+#define GL_MAX_TEXTURE_UNITS 0x84E2
+#define GL_TRANSPOSE_MODELVIEW_MATRIX 0x84E3
+#define GL_TRANSPOSE_PROJECTION_MATRIX 0x84E4
+#define GL_TRANSPOSE_TEXTURE_MATRIX 0x84E5
+#define GL_TRANSPOSE_COLOR_MATRIX 0x84E6
+#define GL_MULTISAMPLE_BIT 0x20000000
+#define GL_NORMAL_MAP 0x8511
+#define GL_REFLECTION_MAP 0x8512
+#define GL_COMPRESSED_ALPHA 0x84E9
+#define GL_COMPRESSED_LUMINANCE 0x84EA
+#define GL_COMPRESSED_LUMINANCE_ALPHA 0x84EB
+#define GL_COMPRESSED_INTENSITY 0x84EC
+#define GL_COMBINE 0x8570
+#define GL_COMBINE_RGB 0x8571
+#define GL_COMBINE_ALPHA 0x8572
+#define GL_SOURCE0_RGB 0x8580
+#define GL_SOURCE1_RGB 0x8581
+#define GL_SOURCE2_RGB 0x8582
+#define GL_SOURCE0_ALPHA 0x8588
+#define GL_SOURCE1_ALPHA 0x8589
+#define GL_SOURCE2_ALPHA 0x858A
+#define GL_OPERAND0_RGB 0x8590
+#define GL_OPERAND1_RGB 0x8591
+#define GL_OPERAND2_RGB 0x8592
+#define GL_OPERAND0_ALPHA 0x8598
+#define GL_OPERAND1_ALPHA 0x8599
+#define GL_OPERAND2_ALPHA 0x859A
+#define GL_RGB_SCALE 0x8573
+#define GL_ADD_SIGNED 0x8574
+#define GL_INTERPOLATE 0x8575
+#define GL_SUBTRACT 0x84E7
+#define GL_CONSTANT 0x8576
+#define GL_PRIMARY_COLOR 0x8577
+#define GL_PREVIOUS 0x8578
+#define GL_DOT3_RGB 0x86AE
+#define GL_DOT3_RGBA 0x86AF
+typedef void (APIENTRYP PFNGLACTIVETEXTUREPROC) (GLenum texture);
+typedef void (APIENTRYP PFNGLSAMPLECOVERAGEPROC) (GLfloat value, GLboolean invert);
+typedef void (APIENTRYP PFNGLCOMPRESSEDTEXIMAGE3DPROC) (GLenum target, GLint level, GLenum internalformat, GLsizei width, GLsizei height, GLsizei depth, GLint border, GLsizei imageSize, const void *data);
+typedef void (APIENTRYP PFNGLCOMPRESSEDTEXIMAGE2DPROC) (GLenum target, GLint level, GLenum internalformat, GLsizei width, GLsizei height, GLint border, GLsizei imageSize, const void *data);
+typedef void (APIENTRYP PFNGLCOMPRESSEDTEXIMAGE1DPROC) (GLenum target, GLint level, GLenum internalformat, GLsizei width, GLint border, GLsizei imageSize, const void *data);
+typedef void (APIENTRYP PFNGLCOMPRESSEDTEXSUBIMAGE3DPROC) (GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint zoffset, GLsizei width, GLsizei height, GLsizei depth, GLenum format, GLsizei imageSize, const void *data);
+typedef void (APIENTRYP PFNGLCOMPRESSEDTEXSUBIMAGE2DPROC) (GLenum target, GLint level, GLint xoffset, GLint yoffset, GLsizei width, GLsizei height, GLenum format, GLsizei imageSize, const void *data);
+typedef void (APIENTRYP PFNGLCOMPRESSEDTEXSUBIMAGE1DPROC) (GLenum target, GLint level, GLint xoffset, GLsizei width, GLenum format, GLsizei imageSize, const void *data);
+typedef void (APIENTRYP PFNGLGETCOMPRESSEDTEXIMAGEPROC) (GLenum target, GLint level, void *img);
+typedef void (APIENTRYP PFNGLCLIENTACTIVETEXTUREPROC) (GLenum texture);
+typedef void (APIENTRYP PFNGLMULTITEXCOORD1DPROC) (GLenum target, GLdouble s);
+typedef void (APIENTRYP PFNGLMULTITEXCOORD1DVPROC) (GLenum target, const GLdouble *v);
+typedef void (APIENTRYP PFNGLMULTITEXCOORD1FPROC) (GLenum target, GLfloat s);
+typedef void (APIENTRYP PFNGLMULTITEXCOORD1FVPROC) (GLenum target, const GLfloat *v);
+typedef void (APIENTRYP PFNGLMULTITEXCOORD1IPROC) (GLenum target, GLint s);
+typedef void (APIENTRYP PFNGLMULTITEXCOORD1IVPROC) (GLenum target, const GLint *v);
+typedef void (APIENTRYP PFNGLMULTITEXCOORD1SPROC) (GLenum target, GLshort s);
+typedef void (APIENTRYP PFNGLMULTITEXCOORD1SVPROC) (GLenum target, const GLshort *v);
+typedef void (APIENTRYP PFNGLMULTITEXCOORD2DPROC) (GLenum target, GLdouble s, GLdouble t);
+typedef void (APIENTRYP PFNGLMULTITEXCOORD2DVPROC) (GLenum target, const GLdouble *v);
+typedef void (APIENTRYP PFNGLMULTITEXCOORD2FPROC) (GLenum target, GLfloat s, GLfloat t);
+typedef void (APIENTRYP PFNGLMULTITEXCOORD2FVPROC) (GLenum target, const GLfloat *v);
+typedef void (APIENTRYP PFNGLMULTITEXCOORD2IPROC) (GLenum target, GLint s, GLint t);
+typedef void (APIENTRYP PFNGLMULTITEXCOORD2IVPROC) (GLenum target, const GLint *v);
+typedef void (APIENTRYP PFNGLMULTITEXCOORD2SPROC) (GLenum target, GLshort s, GLshort t);
+typedef void (APIENTRYP PFNGLMULTITEXCOORD2SVPROC) (GLenum target, const GLshort *v);
+typedef void (APIENTRYP PFNGLMULTITEXCOORD3DPROC) (GLenum target, GLdouble s, GLdouble t, GLdouble r);
+typedef void (APIENTRYP PFNGLMULTITEXCOORD3DVPROC) (GLenum target, const GLdouble *v);
+typedef void (APIENTRYP PFNGLMULTITEXCOORD3FPROC) (GLenum target, GLfloat s, GLfloat t, GLfloat r);
+typedef void (APIENTRYP PFNGLMULTITEXCOORD3FVPROC) (GLenum target, const GLfloat *v);
+typedef void (APIENTRYP PFNGLMULTITEXCOORD3IPROC) (GLenum target, GLint s, GLint t, GLint r);
+typedef void (APIENTRYP PFNGLMULTITEXCOORD3IVPROC) (GLenum target, const GLint *v);
+typedef void (APIENTRYP PFNGLMULTITEXCOORD3SPROC) (GLenum target, GLshort s, GLshort t, GLshort r);
+typedef void (APIENTRYP PFNGLMULTITEXCOORD3SVPROC) (GLenum target, const GLshort *v);
+typedef void (APIENTRYP PFNGLMULTITEXCOORD4DPROC) (GLenum target, GLdouble s, GLdouble t, GLdouble r, GLdouble q);
+typedef void (APIENTRYP PFNGLMULTITEXCOORD4DVPROC) (GLenum target, const GLdouble *v);
+typedef void (APIENTRYP PFNGLMULTITEXCOORD4FPROC) (GLenum target, GLfloat s, GLfloat t, GLfloat r, GLfloat q);
+typedef void (APIENTRYP PFNGLMULTITEXCOORD4FVPROC) (GLenum target, const GLfloat *v);
+typedef void (APIENTRYP PFNGLMULTITEXCOORD4IPROC) (GLenum target, GLint s, GLint t, GLint r, GLint q);
+typedef void (APIENTRYP PFNGLMULTITEXCOORD4IVPROC) (GLenum target, const GLint *v);
+typedef void (APIENTRYP PFNGLMULTITEXCOORD4SPROC) (GLenum target, GLshort s, GLshort t, GLshort r, GLshort q);
+typedef void (APIENTRYP PFNGLMULTITEXCOORD4SVPROC) (GLenum target, const GLshort *v);
+typedef void (APIENTRYP PFNGLLOADTRANSPOSEMATRIXFPROC) (const GLfloat *m);
+typedef void (APIENTRYP PFNGLLOADTRANSPOSEMATRIXDPROC) (const GLdouble *m);
+typedef void (APIENTRYP PFNGLMULTTRANSPOSEMATRIXFPROC) (const GLfloat *m);
+typedef void (APIENTRYP PFNGLMULTTRANSPOSEMATRIXDPROC) (const GLdouble *m);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glActiveTexture (GLenum texture);
+GLAPI void APIENTRY glSampleCoverage (GLfloat value, GLboolean invert);
+GLAPI void APIENTRY glCompressedTexImage3D (GLenum target, GLint level, GLenum internalformat, GLsizei width, GLsizei height, GLsizei depth, GLint border, GLsizei imageSize, const void *data);
+GLAPI void APIENTRY glCompressedTexImage2D (GLenum target, GLint level, GLenum internalformat, GLsizei width, GLsizei height, GLint border, GLsizei imageSize, const void *data);
+GLAPI void APIENTRY glCompressedTexImage1D (GLenum target, GLint level, GLenum internalformat, GLsizei width, GLint border, GLsizei imageSize, const void *data);
+GLAPI void APIENTRY glCompressedTexSubImage3D (GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint zoffset, GLsizei width, GLsizei height, GLsizei depth, GLenum format, GLsizei imageSize, const void *data);
+GLAPI void APIENTRY glCompressedTexSubImage2D (GLenum target, GLint level, GLint xoffset, GLint yoffset, GLsizei width, GLsizei height, GLenum format, GLsizei imageSize, const void *data);
+GLAPI void APIENTRY glCompressedTexSubImage1D (GLenum target, GLint level, GLint xoffset, GLsizei width, GLenum format, GLsizei imageSize, const void *data);
+GLAPI void APIENTRY glGetCompressedTexImage (GLenum target, GLint level, void *img);
+GLAPI void APIENTRY glClientActiveTexture (GLenum texture);
+GLAPI void APIENTRY glMultiTexCoord1d (GLenum target, GLdouble s);
+GLAPI void APIENTRY glMultiTexCoord1dv (GLenum target, const GLdouble *v);
+GLAPI void APIENTRY glMultiTexCoord1f (GLenum target, GLfloat s);
+GLAPI void APIENTRY glMultiTexCoord1fv (GLenum target, const GLfloat *v);
+GLAPI void APIENTRY glMultiTexCoord1i (GLenum target, GLint s);
+GLAPI void APIENTRY glMultiTexCoord1iv (GLenum target, const GLint *v);
+GLAPI void APIENTRY glMultiTexCoord1s (GLenum target, GLshort s);
+GLAPI void APIENTRY glMultiTexCoord1sv (GLenum target, const GLshort *v);
+GLAPI void APIENTRY glMultiTexCoord2d (GLenum target, GLdouble s, GLdouble t);
+GLAPI void APIENTRY glMultiTexCoord2dv (GLenum target, const GLdouble *v);
+GLAPI void APIENTRY glMultiTexCoord2f (GLenum target, GLfloat s, GLfloat t);
+GLAPI void APIENTRY glMultiTexCoord2fv (GLenum target, const GLfloat *v);
+GLAPI void APIENTRY glMultiTexCoord2i (GLenum target, GLint s, GLint t);
+GLAPI void APIENTRY glMultiTexCoord2iv (GLenum target, const GLint *v);
+GLAPI void APIENTRY glMultiTexCoord2s (GLenum target, GLshort s, GLshort t);
+GLAPI void APIENTRY glMultiTexCoord2sv (GLenum target, const GLshort *v);
+GLAPI void APIENTRY glMultiTexCoord3d (GLenum target, GLdouble s, GLdouble t, GLdouble r);
+GLAPI void APIENTRY glMultiTexCoord3dv (GLenum target, const GLdouble *v);
+GLAPI void APIENTRY glMultiTexCoord3f (GLenum target, GLfloat s, GLfloat t, GLfloat r);
+GLAPI void APIENTRY glMultiTexCoord3fv (GLenum target, const GLfloat *v);
+GLAPI void APIENTRY glMultiTexCoord3i (GLenum target, GLint s, GLint t, GLint r);
+GLAPI void APIENTRY glMultiTexCoord3iv (GLenum target, const GLint *v);
+GLAPI void APIENTRY glMultiTexCoord3s (GLenum target, GLshort s, GLshort t, GLshort r);
+GLAPI void APIENTRY glMultiTexCoord3sv (GLenum target, const GLshort *v);
+GLAPI void APIENTRY glMultiTexCoord4d (GLenum target, GLdouble s, GLdouble t, GLdouble r, GLdouble q);
+GLAPI void APIENTRY glMultiTexCoord4dv (GLenum target, const GLdouble *v);
+GLAPI void APIENTRY glMultiTexCoord4f (GLenum target, GLfloat s, GLfloat t, GLfloat r, GLfloat q);
+GLAPI void APIENTRY glMultiTexCoord4fv (GLenum target, const GLfloat *v);
+GLAPI void APIENTRY glMultiTexCoord4i (GLenum target, GLint s, GLint t, GLint r, GLint q);
+GLAPI void APIENTRY glMultiTexCoord4iv (GLenum target, const GLint *v);
+GLAPI void APIENTRY glMultiTexCoord4s (GLenum target, GLshort s, GLshort t, GLshort r, GLshort q);
+GLAPI void APIENTRY glMultiTexCoord4sv (GLenum target, const GLshort *v);
+GLAPI void APIENTRY glLoadTransposeMatrixf (const GLfloat *m);
+GLAPI void APIENTRY glLoadTransposeMatrixd (const GLdouble *m);
+GLAPI void APIENTRY glMultTransposeMatrixf (const GLfloat *m);
+GLAPI void APIENTRY glMultTransposeMatrixd (const GLdouble *m);
+#endif
+#endif /* GL_VERSION_1_3 */
+
+#ifndef GL_VERSION_1_4
+#define GL_VERSION_1_4 1
+#define GL_BLEND_DST_RGB 0x80C8
+#define GL_BLEND_SRC_RGB 0x80C9
+#define GL_BLEND_DST_ALPHA 0x80CA
+#define GL_BLEND_SRC_ALPHA 0x80CB
+#define GL_POINT_FADE_THRESHOLD_SIZE 0x8128
+#define GL_DEPTH_COMPONENT16 0x81A5
+#define GL_DEPTH_COMPONENT24 0x81A6
+#define GL_DEPTH_COMPONENT32 0x81A7
+#define GL_MIRRORED_REPEAT 0x8370
+#define GL_MAX_TEXTURE_LOD_BIAS 0x84FD
+#define GL_TEXTURE_LOD_BIAS 0x8501
+#define GL_INCR_WRAP 0x8507
+#define GL_DECR_WRAP 0x8508
+#define GL_TEXTURE_DEPTH_SIZE 0x884A
+#define GL_TEXTURE_COMPARE_MODE 0x884C
+#define GL_TEXTURE_COMPARE_FUNC 0x884D
+#define GL_POINT_SIZE_MIN 0x8126
+#define GL_POINT_SIZE_MAX 0x8127
+#define GL_POINT_DISTANCE_ATTENUATION 0x8129
+#define GL_GENERATE_MIPMAP 0x8191
+#define GL_GENERATE_MIPMAP_HINT 0x8192
+#define GL_FOG_COORDINATE_SOURCE 0x8450
+#define GL_FOG_COORDINATE 0x8451
+#define GL_FRAGMENT_DEPTH 0x8452
+#define GL_CURRENT_FOG_COORDINATE 0x8453
+#define GL_FOG_COORDINATE_ARRAY_TYPE 0x8454
+#define GL_FOG_COORDINATE_ARRAY_STRIDE 0x8455
+#define GL_FOG_COORDINATE_ARRAY_POINTER 0x8456
+#define GL_FOG_COORDINATE_ARRAY 0x8457
+#define GL_COLOR_SUM 0x8458
+#define GL_CURRENT_SECONDARY_COLOR 0x8459
+#define GL_SECONDARY_COLOR_ARRAY_SIZE 0x845A
+#define GL_SECONDARY_COLOR_ARRAY_TYPE 0x845B
+#define GL_SECONDARY_COLOR_ARRAY_STRIDE 0x845C
+#define GL_SECONDARY_COLOR_ARRAY_POINTER 0x845D
+#define GL_SECONDARY_COLOR_ARRAY 0x845E
+#define GL_TEXTURE_FILTER_CONTROL 0x8500
+#define GL_DEPTH_TEXTURE_MODE 0x884B
+#define GL_COMPARE_R_TO_TEXTURE 0x884E
+#define GL_BLEND_COLOR 0x8005
+#define GL_BLEND_EQUATION 0x8009
+#define GL_CONSTANT_COLOR 0x8001
+#define GL_ONE_MINUS_CONSTANT_COLOR 0x8002
+#define GL_CONSTANT_ALPHA 0x8003
+#define GL_ONE_MINUS_CONSTANT_ALPHA 0x8004
+#define GL_FUNC_ADD 0x8006
+#define GL_FUNC_REVERSE_SUBTRACT 0x800B
+#define GL_FUNC_SUBTRACT 0x800A
+#define GL_MIN 0x8007
+#define GL_MAX 0x8008
+typedef void (APIENTRYP PFNGLBLENDFUNCSEPARATEPROC) (GLenum sfactorRGB, GLenum dfactorRGB, GLenum sfactorAlpha, GLenum dfactorAlpha);
+typedef void (APIENTRYP PFNGLMULTIDRAWARRAYSPROC) (GLenum mode, const GLint *first, const GLsizei *count, GLsizei drawcount);
+typedef void (APIENTRYP PFNGLMULTIDRAWELEMENTSPROC) (GLenum mode, const GLsizei *count, GLenum type, const void *const*indices, GLsizei drawcount);
+typedef void (APIENTRYP PFNGLPOINTPARAMETERFPROC) (GLenum pname, GLfloat param);
+typedef void (APIENTRYP PFNGLPOINTPARAMETERFVPROC) (GLenum pname, const GLfloat *params);
+typedef void (APIENTRYP PFNGLPOINTPARAMETERIPROC) (GLenum pname, GLint param);
+typedef void (APIENTRYP PFNGLPOINTPARAMETERIVPROC) (GLenum pname, const GLint *params);
+typedef void (APIENTRYP PFNGLFOGCOORDFPROC) (GLfloat coord);
+typedef void (APIENTRYP PFNGLFOGCOORDFVPROC) (const GLfloat *coord);
+typedef void (APIENTRYP PFNGLFOGCOORDDPROC) (GLdouble coord);
+typedef void (APIENTRYP PFNGLFOGCOORDDVPROC) (const GLdouble *coord);
+typedef void (APIENTRYP PFNGLFOGCOORDPOINTERPROC) (GLenum type, GLsizei stride, const void *pointer);
+typedef void (APIENTRYP PFNGLSECONDARYCOLOR3BPROC) (GLbyte red, GLbyte green, GLbyte blue);
+typedef void (APIENTRYP PFNGLSECONDARYCOLOR3BVPROC) (const GLbyte *v);
+typedef void (APIENTRYP PFNGLSECONDARYCOLOR3DPROC) (GLdouble red, GLdouble green, GLdouble blue);
+typedef void (APIENTRYP PFNGLSECONDARYCOLOR3DVPROC) (const GLdouble *v);
+typedef void (APIENTRYP PFNGLSECONDARYCOLOR3FPROC) (GLfloat red, GLfloat green, GLfloat blue);
+typedef void (APIENTRYP PFNGLSECONDARYCOLOR3FVPROC) (const GLfloat *v);
+typedef void (APIENTRYP PFNGLSECONDARYCOLOR3IPROC) (GLint red, GLint green, GLint blue);
+typedef void (APIENTRYP PFNGLSECONDARYCOLOR3IVPROC) (const GLint *v);
+typedef void (APIENTRYP PFNGLSECONDARYCOLOR3SPROC) (GLshort red, GLshort green, GLshort blue);
+typedef void (APIENTRYP PFNGLSECONDARYCOLOR3SVPROC) (const GLshort *v);
+typedef void (APIENTRYP PFNGLSECONDARYCOLOR3UBPROC) (GLubyte red, GLubyte green, GLubyte blue);
+typedef void (APIENTRYP PFNGLSECONDARYCOLOR3UBVPROC) (const GLubyte *v);
+typedef void (APIENTRYP PFNGLSECONDARYCOLOR3UIPROC) (GLuint red, GLuint green, GLuint blue);
+typedef void (APIENTRYP PFNGLSECONDARYCOLOR3UIVPROC) (const GLuint *v);
+typedef void (APIENTRYP PFNGLSECONDARYCOLOR3USPROC) (GLushort red, GLushort green, GLushort blue);
+typedef void (APIENTRYP PFNGLSECONDARYCOLOR3USVPROC) (const GLushort *v);
+typedef void (APIENTRYP PFNGLSECONDARYCOLORPOINTERPROC) (GLint size, GLenum type, GLsizei stride, const void *pointer);
+typedef void (APIENTRYP PFNGLWINDOWPOS2DPROC) (GLdouble x, GLdouble y);
+typedef void (APIENTRYP PFNGLWINDOWPOS2DVPROC) (const GLdouble *v);
+typedef void (APIENTRYP PFNGLWINDOWPOS2FPROC) (GLfloat x, GLfloat y);
+typedef void (APIENTRYP PFNGLWINDOWPOS2FVPROC) (const GLfloat *v);
+typedef void (APIENTRYP PFNGLWINDOWPOS2IPROC) (GLint x, GLint y);
+typedef void (APIENTRYP PFNGLWINDOWPOS2IVPROC) (const GLint *v);
+typedef void (APIENTRYP PFNGLWINDOWPOS2SPROC) (GLshort x, GLshort y);
+typedef void (APIENTRYP PFNGLWINDOWPOS2SVPROC) (const GLshort *v);
+typedef void (APIENTRYP PFNGLWINDOWPOS3DPROC) (GLdouble x, GLdouble y, GLdouble z);
+typedef void (APIENTRYP PFNGLWINDOWPOS3DVPROC) (const GLdouble *v);
+typedef void (APIENTRYP PFNGLWINDOWPOS3FPROC) (GLfloat x, GLfloat y, GLfloat z);
+typedef void (APIENTRYP PFNGLWINDOWPOS3FVPROC) (const GLfloat *v);
+typedef void (APIENTRYP PFNGLWINDOWPOS3IPROC) (GLint x, GLint y, GLint z);
+typedef void (APIENTRYP PFNGLWINDOWPOS3IVPROC) (const GLint *v);
+typedef void (APIENTRYP PFNGLWINDOWPOS3SPROC) (GLshort x, GLshort y, GLshort z);
+typedef void (APIENTRYP PFNGLWINDOWPOS3SVPROC) (const GLshort *v);
+typedef void (APIENTRYP PFNGLBLENDCOLORPROC) (GLfloat red, GLfloat green, GLfloat blue, GLfloat alpha);
+typedef void (APIENTRYP PFNGLBLENDEQUATIONPROC) (GLenum mode);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glBlendFuncSeparate (GLenum sfactorRGB, GLenum dfactorRGB, GLenum sfactorAlpha, GLenum dfactorAlpha);
+GLAPI void APIENTRY glMultiDrawArrays (GLenum mode, const GLint *first, const GLsizei *count, GLsizei drawcount);
+GLAPI void APIENTRY glMultiDrawElements (GLenum mode, const GLsizei *count, GLenum type, const void *const*indices, GLsizei drawcount);
+GLAPI void APIENTRY glPointParameterf (GLenum pname, GLfloat param);
+GLAPI void APIENTRY glPointParameterfv (GLenum pname, const GLfloat *params);
+GLAPI void APIENTRY glPointParameteri (GLenum pname, GLint param);
+GLAPI void APIENTRY glPointParameteriv (GLenum pname, const GLint *params);
+GLAPI void APIENTRY glFogCoordf (GLfloat coord);
+GLAPI void APIENTRY glFogCoordfv (const GLfloat *coord);
+GLAPI void APIENTRY glFogCoordd (GLdouble coord);
+GLAPI void APIENTRY glFogCoorddv (const GLdouble *coord);
+GLAPI void APIENTRY glFogCoordPointer (GLenum type, GLsizei stride, const void *pointer);
+GLAPI void APIENTRY glSecondaryColor3b (GLbyte red, GLbyte green, GLbyte blue);
+GLAPI void APIENTRY glSecondaryColor3bv (const GLbyte *v);
+GLAPI void APIENTRY glSecondaryColor3d (GLdouble red, GLdouble green, GLdouble blue);
+GLAPI void APIENTRY glSecondaryColor3dv (const GLdouble *v);
+GLAPI void APIENTRY glSecondaryColor3f (GLfloat red, GLfloat green, GLfloat blue);
+GLAPI void APIENTRY glSecondaryColor3fv (const GLfloat *v);
+GLAPI void APIENTRY glSecondaryColor3i (GLint red, GLint green, GLint blue);
+GLAPI void APIENTRY glSecondaryColor3iv (const GLint *v);
+GLAPI void APIENTRY glSecondaryColor3s (GLshort red, GLshort green, GLshort blue);
+GLAPI void APIENTRY glSecondaryColor3sv (const GLshort *v);
+GLAPI void APIENTRY glSecondaryColor3ub (GLubyte red, GLubyte green, GLubyte blue);
+GLAPI void APIENTRY glSecondaryColor3ubv (const GLubyte *v);
+GLAPI void APIENTRY glSecondaryColor3ui (GLuint red, GLuint green, GLuint blue);
+GLAPI void APIENTRY glSecondaryColor3uiv (const GLuint *v);
+GLAPI void APIENTRY glSecondaryColor3us (GLushort red, GLushort green, GLushort blue);
+GLAPI void APIENTRY glSecondaryColor3usv (const GLushort *v);
+GLAPI void APIENTRY glSecondaryColorPointer (GLint size, GLenum type, GLsizei stride, const void *pointer);
+GLAPI void APIENTRY glWindowPos2d (GLdouble x, GLdouble y);
+GLAPI void APIENTRY glWindowPos2dv (const GLdouble *v);
+GLAPI void APIENTRY glWindowPos2f (GLfloat x, GLfloat y);
+GLAPI void APIENTRY glWindowPos2fv (const GLfloat *v);
+GLAPI void APIENTRY glWindowPos2i (GLint x, GLint y);
+GLAPI void APIENTRY glWindowPos2iv (const GLint *v);
+GLAPI void APIENTRY glWindowPos2s (GLshort x, GLshort y);
+GLAPI void APIENTRY glWindowPos2sv (const GLshort *v);
+GLAPI void APIENTRY glWindowPos3d (GLdouble x, GLdouble y, GLdouble z);
+GLAPI void APIENTRY glWindowPos3dv (const GLdouble *v);
+GLAPI void APIENTRY glWindowPos3f (GLfloat x, GLfloat y, GLfloat z);
+GLAPI void APIENTRY glWindowPos3fv (const GLfloat *v);
+GLAPI void APIENTRY glWindowPos3i (GLint x, GLint y, GLint z);
+GLAPI void APIENTRY glWindowPos3iv (const GLint *v);
+GLAPI void APIENTRY glWindowPos3s (GLshort x, GLshort y, GLshort z);
+GLAPI void APIENTRY glWindowPos3sv (const GLshort *v);
+GLAPI void APIENTRY glBlendColor (GLfloat red, GLfloat green, GLfloat blue, GLfloat alpha);
+GLAPI void APIENTRY glBlendEquation (GLenum mode);
+#endif
+#endif /* GL_VERSION_1_4 */
+
+#ifndef GL_VERSION_1_5
+#define GL_VERSION_1_5 1
+typedef khronos_ssize_t GLsizeiptr;
+typedef khronos_intptr_t GLintptr;
+#define GL_BUFFER_SIZE 0x8764
+#define GL_BUFFER_USAGE 0x8765
+#define GL_QUERY_COUNTER_BITS 0x8864
+#define GL_CURRENT_QUERY 0x8865
+#define GL_QUERY_RESULT 0x8866
+#define GL_QUERY_RESULT_AVAILABLE 0x8867
+#define GL_ARRAY_BUFFER 0x8892
+#define GL_ELEMENT_ARRAY_BUFFER 0x8893
+#define GL_ARRAY_BUFFER_BINDING 0x8894
+#define GL_ELEMENT_ARRAY_BUFFER_BINDING 0x8895
+#define GL_VERTEX_ATTRIB_ARRAY_BUFFER_BINDING 0x889F
+#define GL_READ_ONLY 0x88B8
+#define GL_WRITE_ONLY 0x88B9
+#define GL_READ_WRITE 0x88BA
+#define GL_BUFFER_ACCESS 0x88BB
+#define GL_BUFFER_MAPPED 0x88BC
+#define GL_BUFFER_MAP_POINTER 0x88BD
+#define GL_STREAM_DRAW 0x88E0
+#define GL_STREAM_READ 0x88E1
+#define GL_STREAM_COPY 0x88E2
+#define GL_STATIC_DRAW 0x88E4
+#define GL_STATIC_READ 0x88E5
+#define GL_STATIC_COPY 0x88E6
+#define GL_DYNAMIC_DRAW 0x88E8
+#define GL_DYNAMIC_READ 0x88E9
+#define GL_DYNAMIC_COPY 0x88EA
+#define GL_SAMPLES_PASSED 0x8914
+#define GL_SRC1_ALPHA 0x8589
+#define GL_VERTEX_ARRAY_BUFFER_BINDING 0x8896
+#define GL_NORMAL_ARRAY_BUFFER_BINDING 0x8897
+#define GL_COLOR_ARRAY_BUFFER_BINDING 0x8898
+#define GL_INDEX_ARRAY_BUFFER_BINDING 0x8899
+#define GL_TEXTURE_COORD_ARRAY_BUFFER_BINDING 0x889A
+#define GL_EDGE_FLAG_ARRAY_BUFFER_BINDING 0x889B
+#define GL_SECONDARY_COLOR_ARRAY_BUFFER_BINDING 0x889C
+#define GL_FOG_COORDINATE_ARRAY_BUFFER_BINDING 0x889D
+#define GL_WEIGHT_ARRAY_BUFFER_BINDING 0x889E
+#define GL_FOG_COORD_SRC 0x8450
+#define GL_FOG_COORD 0x8451
+#define GL_CURRENT_FOG_COORD 0x8453
+#define GL_FOG_COORD_ARRAY_TYPE 0x8454
+#define GL_FOG_COORD_ARRAY_STRIDE 0x8455
+#define GL_FOG_COORD_ARRAY_POINTER 0x8456
+#define GL_FOG_COORD_ARRAY 0x8457
+#define GL_FOG_COORD_ARRAY_BUFFER_BINDING 0x889D
+#define GL_SRC0_RGB 0x8580
+#define GL_SRC1_RGB 0x8581
+#define GL_SRC2_RGB 0x8582
+#define GL_SRC0_ALPHA 0x8588
+#define GL_SRC2_ALPHA 0x858A
+typedef void (APIENTRYP PFNGLGENQUERIESPROC) (GLsizei n, GLuint *ids);
+typedef void (APIENTRYP PFNGLDELETEQUERIESPROC) (GLsizei n, const GLuint *ids);
+typedef GLboolean (APIENTRYP PFNGLISQUERYPROC) (GLuint id);
+typedef void (APIENTRYP PFNGLBEGINQUERYPROC) (GLenum target, GLuint id);
+typedef void (APIENTRYP PFNGLENDQUERYPROC) (GLenum target);
+typedef void (APIENTRYP PFNGLGETQUERYIVPROC) (GLenum target, GLenum pname, GLint *params);
+typedef void (APIENTRYP PFNGLGETQUERYOBJECTIVPROC) (GLuint id, GLenum pname, GLint *params);
+typedef void (APIENTRYP PFNGLGETQUERYOBJECTUIVPROC) (GLuint id, GLenum pname, GLuint *params);
+typedef void (APIENTRYP PFNGLBINDBUFFERPROC) (GLenum target, GLuint buffer);
+typedef void (APIENTRYP PFNGLDELETEBUFFERSPROC) (GLsizei n, const GLuint *buffers);
+typedef void (APIENTRYP PFNGLGENBUFFERSPROC) (GLsizei n, GLuint *buffers);
+typedef GLboolean (APIENTRYP PFNGLISBUFFERPROC) (GLuint buffer);
+typedef void (APIENTRYP PFNGLBUFFERDATAPROC) (GLenum target, GLsizeiptr size, const void *data, GLenum usage);
+typedef void (APIENTRYP PFNGLBUFFERSUBDATAPROC) (GLenum target, GLintptr offset, GLsizeiptr size, const void *data);
+typedef void (APIENTRYP PFNGLGETBUFFERSUBDATAPROC) (GLenum target, GLintptr offset, GLsizeiptr size, void *data);
+typedef void *(APIENTRYP PFNGLMAPBUFFERPROC) (GLenum target, GLenum access);
+typedef GLboolean (APIENTRYP PFNGLUNMAPBUFFERPROC) (GLenum target);
+typedef void (APIENTRYP PFNGLGETBUFFERPARAMETERIVPROC) (GLenum target, GLenum pname, GLint *params);
+typedef void (APIENTRYP PFNGLGETBUFFERPOINTERVPROC) (GLenum target, GLenum pname, void **params);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glGenQueries (GLsizei n, GLuint *ids);
+GLAPI void APIENTRY glDeleteQueries (GLsizei n, const GLuint *ids);
+GLAPI GLboolean APIENTRY glIsQuery (GLuint id);
+GLAPI void APIENTRY glBeginQuery (GLenum target, GLuint id);
+GLAPI void APIENTRY glEndQuery (GLenum target);
+GLAPI void APIENTRY glGetQueryiv (GLenum target, GLenum pname, GLint *params);
+GLAPI void APIENTRY glGetQueryObjectiv (GLuint id, GLenum pname, GLint *params);
+GLAPI void APIENTRY glGetQueryObjectuiv (GLuint id, GLenum pname, GLuint *params);
+GLAPI void APIENTRY glBindBuffer (GLenum target, GLuint buffer);
+GLAPI void APIENTRY glDeleteBuffers (GLsizei n, const GLuint *buffers);
+GLAPI void APIENTRY glGenBuffers (GLsizei n, GLuint *buffers);
+GLAPI GLboolean APIENTRY glIsBuffer (GLuint buffer);
+GLAPI void APIENTRY glBufferData (GLenum target, GLsizeiptr size, const void *data, GLenum usage);
+GLAPI void APIENTRY glBufferSubData (GLenum target, GLintptr offset, GLsizeiptr size, const void *data);
+GLAPI void APIENTRY glGetBufferSubData (GLenum target, GLintptr offset, GLsizeiptr size, void *data);
+GLAPI void *APIENTRY glMapBuffer (GLenum target, GLenum access);
+GLAPI GLboolean APIENTRY glUnmapBuffer (GLenum target);
+GLAPI void APIENTRY glGetBufferParameteriv (GLenum target, GLenum pname, GLint *params);
+GLAPI void APIENTRY glGetBufferPointerv (GLenum target, GLenum pname, void **params);
+#endif
+#endif /* GL_VERSION_1_5 */
+
+#ifndef GL_VERSION_2_0
+#define GL_VERSION_2_0 1
+typedef char GLchar;
+#define GL_BLEND_EQUATION_RGB 0x8009
+#define GL_VERTEX_ATTRIB_ARRAY_ENABLED 0x8622
+#define GL_VERTEX_ATTRIB_ARRAY_SIZE 0x8623
+#define GL_VERTEX_ATTRIB_ARRAY_STRIDE 0x8624
+#define GL_VERTEX_ATTRIB_ARRAY_TYPE 0x8625
+#define GL_CURRENT_VERTEX_ATTRIB 0x8626
+#define GL_VERTEX_PROGRAM_POINT_SIZE 0x8642
+#define GL_VERTEX_ATTRIB_ARRAY_POINTER 0x8645
+#define GL_STENCIL_BACK_FUNC 0x8800
+#define GL_STENCIL_BACK_FAIL 0x8801
+#define GL_STENCIL_BACK_PASS_DEPTH_FAIL 0x8802
+#define GL_STENCIL_BACK_PASS_DEPTH_PASS 0x8803
+#define GL_MAX_DRAW_BUFFERS 0x8824
+#define GL_DRAW_BUFFER0 0x8825
+#define GL_DRAW_BUFFER1 0x8826
+#define GL_DRAW_BUFFER2 0x8827
+#define GL_DRAW_BUFFER3 0x8828
+#define GL_DRAW_BUFFER4 0x8829
+#define GL_DRAW_BUFFER5 0x882A
+#define GL_DRAW_BUFFER6 0x882B
+#define GL_DRAW_BUFFER7 0x882C
+#define GL_DRAW_BUFFER8 0x882D
+#define GL_DRAW_BUFFER9 0x882E
+#define GL_DRAW_BUFFER10 0x882F
+#define GL_DRAW_BUFFER11 0x8830
+#define GL_DRAW_BUFFER12 0x8831
+#define GL_DRAW_BUFFER13 0x8832
+#define GL_DRAW_BUFFER14 0x8833
+#define GL_DRAW_BUFFER15 0x8834
+#define GL_BLEND_EQUATION_ALPHA 0x883D
+#define GL_MAX_VERTEX_ATTRIBS 0x8869
+#define GL_VERTEX_ATTRIB_ARRAY_NORMALIZED 0x886A
+#define GL_MAX_TEXTURE_IMAGE_UNITS 0x8872
+#define GL_FRAGMENT_SHADER 0x8B30
+#define GL_VERTEX_SHADER 0x8B31
+#define GL_MAX_FRAGMENT_UNIFORM_COMPONENTS 0x8B49
+#define GL_MAX_VERTEX_UNIFORM_COMPONENTS 0x8B4A
+#define GL_MAX_VARYING_FLOATS 0x8B4B
+#define GL_MAX_VERTEX_TEXTURE_IMAGE_UNITS 0x8B4C
+#define GL_MAX_COMBINED_TEXTURE_IMAGE_UNITS 0x8B4D
+#define GL_SHADER_TYPE 0x8B4F
+#define GL_FLOAT_VEC2 0x8B50
+#define GL_FLOAT_VEC3 0x8B51
+#define GL_FLOAT_VEC4 0x8B52
+#define GL_INT_VEC2 0x8B53
+#define GL_INT_VEC3 0x8B54
+#define GL_INT_VEC4 0x8B55
+#define GL_BOOL 0x8B56
+#define GL_BOOL_VEC2 0x8B57
+#define GL_BOOL_VEC3 0x8B58
+#define GL_BOOL_VEC4 0x8B59
+#define GL_FLOAT_MAT2 0x8B5A
+#define GL_FLOAT_MAT3 0x8B5B
+#define GL_FLOAT_MAT4 0x8B5C
+#define GL_SAMPLER_1D 0x8B5D
+#define GL_SAMPLER_2D 0x8B5E
+#define GL_SAMPLER_3D 0x8B5F
+#define GL_SAMPLER_CUBE 0x8B60
+#define GL_SAMPLER_1D_SHADOW 0x8B61
+#define GL_SAMPLER_2D_SHADOW 0x8B62
+#define GL_DELETE_STATUS 0x8B80
+#define GL_COMPILE_STATUS 0x8B81
+#define GL_LINK_STATUS 0x8B82
+#define GL_VALIDATE_STATUS 0x8B83
+#define GL_INFO_LOG_LENGTH 0x8B84
+#define GL_ATTACHED_SHADERS 0x8B85
+#define GL_ACTIVE_UNIFORMS 0x8B86
+#define GL_ACTIVE_UNIFORM_MAX_LENGTH 0x8B87
+#define GL_SHADER_SOURCE_LENGTH 0x8B88
+#define GL_ACTIVE_ATTRIBUTES 0x8B89
+#define GL_ACTIVE_ATTRIBUTE_MAX_LENGTH 0x8B8A
+#define GL_FRAGMENT_SHADER_DERIVATIVE_HINT 0x8B8B
+#define GL_SHADING_LANGUAGE_VERSION 0x8B8C
+#define GL_CURRENT_PROGRAM 0x8B8D
+#define GL_POINT_SPRITE_COORD_ORIGIN 0x8CA0
+#define GL_LOWER_LEFT 0x8CA1
+#define GL_UPPER_LEFT 0x8CA2
+#define GL_STENCIL_BACK_REF 0x8CA3
+#define GL_STENCIL_BACK_VALUE_MASK 0x8CA4
+#define GL_STENCIL_BACK_WRITEMASK 0x8CA5
+#define GL_VERTEX_PROGRAM_TWO_SIDE 0x8643
+#define GL_POINT_SPRITE 0x8861
+#define GL_COORD_REPLACE 0x8862
+#define GL_MAX_TEXTURE_COORDS 0x8871
+typedef void (APIENTRYP PFNGLBLENDEQUATIONSEPARATEPROC) (GLenum modeRGB, GLenum modeAlpha);
+typedef void (APIENTRYP PFNGLDRAWBUFFERSPROC) (GLsizei n, const GLenum *bufs);
+typedef void (APIENTRYP PFNGLSTENCILOPSEPARATEPROC) (GLenum face, GLenum sfail, GLenum dpfail, GLenum dppass);
+typedef void (APIENTRYP PFNGLSTENCILFUNCSEPARATEPROC) (GLenum face, GLenum func, GLint ref, GLuint mask);
+typedef void (APIENTRYP PFNGLSTENCILMASKSEPARATEPROC) (GLenum face, GLuint mask);
+typedef void (APIENTRYP PFNGLATTACHSHADERPROC) (GLuint program, GLuint shader);
+typedef void (APIENTRYP PFNGLBINDATTRIBLOCATIONPROC) (GLuint program, GLuint index, const GLchar *name);
+typedef void (APIENTRYP PFNGLCOMPILESHADERPROC) (GLuint shader);
+typedef GLuint (APIENTRYP PFNGLCREATEPROGRAMPROC) (void);
+typedef GLuint (APIENTRYP PFNGLCREATESHADERPROC) (GLenum type);
+typedef void (APIENTRYP PFNGLDELETEPROGRAMPROC) (GLuint program);
+typedef void (APIENTRYP PFNGLDELETESHADERPROC) (GLuint shader);
+typedef void (APIENTRYP PFNGLDETACHSHADERPROC) (GLuint program, GLuint shader);
+typedef void (APIENTRYP PFNGLDISABLEVERTEXATTRIBARRAYPROC) (GLuint index);
+typedef void (APIENTRYP PFNGLENABLEVERTEXATTRIBARRAYPROC) (GLuint index);
+typedef void (APIENTRYP PFNGLGETACTIVEATTRIBPROC) (GLuint program, GLuint index, GLsizei bufSize, GLsizei *length, GLint *size, GLenum *type, GLchar *name);
+typedef void (APIENTRYP PFNGLGETACTIVEUNIFORMPROC) (GLuint program, GLuint index, GLsizei bufSize, GLsizei *length, GLint *size, GLenum *type, GLchar *name);
+typedef void (APIENTRYP PFNGLGETATTACHEDSHADERSPROC) (GLuint program, GLsizei maxCount, GLsizei *count, GLuint *shaders);
+typedef GLint (APIENTRYP PFNGLGETATTRIBLOCATIONPROC) (GLuint program, const GLchar *name);
+typedef void (APIENTRYP PFNGLGETPROGRAMIVPROC) (GLuint program, GLenum pname, GLint *params);
+typedef void (APIENTRYP PFNGLGETPROGRAMINFOLOGPROC) (GLuint program, GLsizei bufSize, GLsizei *length, GLchar *infoLog);
+typedef void (APIENTRYP PFNGLGETSHADERIVPROC) (GLuint shader, GLenum pname, GLint *params);
+typedef void (APIENTRYP PFNGLGETSHADERINFOLOGPROC) (GLuint shader, GLsizei bufSize, GLsizei *length, GLchar *infoLog);
+typedef void (APIENTRYP PFNGLGETSHADERSOURCEPROC) (GLuint shader, GLsizei bufSize, GLsizei *length, GLchar *source);
+typedef GLint (APIENTRYP PFNGLGETUNIFORMLOCATIONPROC) (GLuint program, const GLchar *name);
+typedef void (APIENTRYP PFNGLGETUNIFORMFVPROC) (GLuint program, GLint location, GLfloat *params);
+typedef void (APIENTRYP PFNGLGETUNIFORMIVPROC) (GLuint program, GLint location, GLint *params);
+typedef void (APIENTRYP PFNGLGETVERTEXATTRIBDVPROC) (GLuint index, GLenum pname, GLdouble *params);
+typedef void (APIENTRYP PFNGLGETVERTEXATTRIBFVPROC) (GLuint index, GLenum pname, GLfloat *params);
+typedef void (APIENTRYP PFNGLGETVERTEXATTRIBIVPROC) (GLuint index, GLenum pname, GLint *params);
+typedef void (APIENTRYP PFNGLGETVERTEXATTRIBPOINTERVPROC) (GLuint index, GLenum pname, void **pointer);
+typedef GLboolean (APIENTRYP PFNGLISPROGRAMPROC) (GLuint program);
+typedef GLboolean (APIENTRYP PFNGLISSHADERPROC) (GLuint shader);
+typedef void (APIENTRYP PFNGLLINKPROGRAMPROC) (GLuint program);
+typedef void (APIENTRYP PFNGLSHADERSOURCEPROC) (GLuint shader, GLsizei count, const GLchar *const*string, const GLint *length);
+typedef void (APIENTRYP PFNGLUSEPROGRAMPROC) (GLuint program);
+typedef void (APIENTRYP PFNGLUNIFORM1FPROC) (GLint location, GLfloat v0);
+typedef void (APIENTRYP PFNGLUNIFORM2FPROC) (GLint location, GLfloat v0, GLfloat v1);
+typedef void (APIENTRYP PFNGLUNIFORM3FPROC) (GLint location, GLfloat v0, GLfloat v1, GLfloat v2);
+typedef void (APIENTRYP PFNGLUNIFORM4FPROC) (GLint location, GLfloat v0, GLfloat v1, GLfloat v2, GLfloat v3);
+typedef void (APIENTRYP PFNGLUNIFORM1IPROC) (GLint location, GLint v0);
+typedef void (APIENTRYP PFNGLUNIFORM2IPROC) (GLint location, GLint v0, GLint v1);
+typedef void (APIENTRYP PFNGLUNIFORM3IPROC) (GLint location, GLint v0, GLint v1, GLint v2);
+typedef void (APIENTRYP PFNGLUNIFORM4IPROC) (GLint location, GLint v0, GLint v1, GLint v2, GLint v3);
+typedef void (APIENTRYP PFNGLUNIFORM1FVPROC) (GLint location, GLsizei count, const GLfloat *value);
+typedef void (APIENTRYP PFNGLUNIFORM2FVPROC) (GLint location, GLsizei count, const GLfloat *value);
+typedef void (APIENTRYP PFNGLUNIFORM3FVPROC) (GLint location, GLsizei count, const GLfloat *value);
+typedef void (APIENTRYP PFNGLUNIFORM4FVPROC) (GLint location, GLsizei count, const GLfloat *value);
+typedef void (APIENTRYP PFNGLUNIFORM1IVPROC) (GLint location, GLsizei count, const GLint *value);
+typedef void (APIENTRYP PFNGLUNIFORM2IVPROC) (GLint location, GLsizei count, const GLint *value);
+typedef void (APIENTRYP PFNGLUNIFORM3IVPROC) (GLint location, GLsizei count, const GLint *value);
+typedef void (APIENTRYP PFNGLUNIFORM4IVPROC) (GLint location, GLsizei count, const GLint *value);
+typedef void (APIENTRYP PFNGLUNIFORMMATRIX2FVPROC) (GLint location, GLsizei count, GLboolean transpose, const GLfloat *value);
+typedef void (APIENTRYP PFNGLUNIFORMMATRIX3FVPROC) (GLint location, GLsizei count, GLboolean transpose, const GLfloat *value);
+typedef void (APIENTRYP PFNGLUNIFORMMATRIX4FVPROC) (GLint location, GLsizei count, GLboolean transpose, const GLfloat *value);
+typedef void (APIENTRYP PFNGLVALIDATEPROGRAMPROC) (GLuint program);
+typedef void (APIENTRYP PFNGLVERTEXATTRIB1DPROC) (GLuint index, GLdouble x);
+typedef void (APIENTRYP PFNGLVERTEXATTRIB1DVPROC) (GLuint index, const GLdouble *v);
+typedef void (APIENTRYP PFNGLVERTEXATTRIB1FPROC) (GLuint index, GLfloat x);
+typedef void (APIENTRYP PFNGLVERTEXATTRIB1FVPROC) (GLuint index, const GLfloat *v);
+typedef void (APIENTRYP PFNGLVERTEXATTRIB1SPROC) (GLuint index, GLshort x);
+typedef void (APIENTRYP PFNGLVERTEXATTRIB1SVPROC) (GLuint index, const GLshort *v);
+typedef void (APIENTRYP PFNGLVERTEXATTRIB2DPROC) (GLuint index, GLdouble x, GLdouble y);
+typedef void (APIENTRYP PFNGLVERTEXATTRIB2DVPROC) (GLuint index, const GLdouble *v);
+typedef void (APIENTRYP PFNGLVERTEXATTRIB2FPROC) (GLuint index, GLfloat x, GLfloat y);
+typedef void (APIENTRYP PFNGLVERTEXATTRIB2FVPROC) (GLuint index, const GLfloat *v);
+typedef void (APIENTRYP PFNGLVERTEXATTRIB2SPROC) (GLuint index, GLshort x, GLshort y);
+typedef void (APIENTRYP PFNGLVERTEXATTRIB2SVPROC) (GLuint index, const GLshort *v);
+typedef void (APIENTRYP PFNGLVERTEXATTRIB3DPROC) (GLuint index, GLdouble x, GLdouble y, GLdouble z);
+typedef void (APIENTRYP PFNGLVERTEXATTRIB3DVPROC) (GLuint index, const GLdouble *v);
+typedef void (APIENTRYP PFNGLVERTEXATTRIB3FPROC) (GLuint index, GLfloat x, GLfloat y, GLfloat z);
+typedef void (APIENTRYP PFNGLVERTEXATTRIB3FVPROC) (GLuint index, const GLfloat *v);
+typedef void (APIENTRYP PFNGLVERTEXATTRIB3SPROC) (GLuint index, GLshort x, GLshort y, GLshort z);
+typedef void (APIENTRYP PFNGLVERTEXATTRIB3SVPROC) (GLuint index, const GLshort *v);
+typedef void (APIENTRYP PFNGLVERTEXATTRIB4NBVPROC) (GLuint index, const GLbyte *v);
+typedef void (APIENTRYP PFNGLVERTEXATTRIB4NIVPROC) (GLuint index, const GLint *v);
+typedef void (APIENTRYP PFNGLVERTEXATTRIB4NSVPROC) (GLuint index, const GLshort *v);
+typedef void (APIENTRYP PFNGLVERTEXATTRIB4NUBPROC) (GLuint index, GLubyte x, GLubyte y, GLubyte z, GLubyte w);
+typedef void (APIENTRYP PFNGLVERTEXATTRIB4NUBVPROC) (GLuint index, const GLubyte *v);
+typedef void (APIENTRYP PFNGLVERTEXATTRIB4NUIVPROC) (GLuint index, const GLuint *v);
+typedef void (APIENTRYP PFNGLVERTEXATTRIB4NUSVPROC) (GLuint index, const GLushort *v);
+typedef void (APIENTRYP PFNGLVERTEXATTRIB4BVPROC) (GLuint index, const GLbyte *v);
+typedef void (APIENTRYP PFNGLVERTEXATTRIB4DPROC) (GLuint index, GLdouble x, GLdouble y, GLdouble z, GLdouble w);
+typedef void (APIENTRYP PFNGLVERTEXATTRIB4DVPROC) (GLuint index, const GLdouble *v);
+typedef void (APIENTRYP PFNGLVERTEXATTRIB4FPROC) (GLuint index, GLfloat x, GLfloat y, GLfloat z, GLfloat w);
+typedef void (APIENTRYP PFNGLVERTEXATTRIB4FVPROC) (GLuint index, const GLfloat *v);
+typedef void (APIENTRYP PFNGLVERTEXATTRIB4IVPROC) (GLuint index, const GLint *v);
+typedef void (APIENTRYP PFNGLVERTEXATTRIB4SPROC) (GLuint index, GLshort x, GLshort y, GLshort z, GLshort w);
+typedef void (APIENTRYP PFNGLVERTEXATTRIB4SVPROC) (GLuint index, const GLshort *v);
+typedef void (APIENTRYP PFNGLVERTEXATTRIB4UBVPROC) (GLuint index, const GLubyte *v);
+typedef void (APIENTRYP PFNGLVERTEXATTRIB4UIVPROC) (GLuint index, const GLuint *v);
+typedef void (APIENTRYP PFNGLVERTEXATTRIB4USVPROC) (GLuint index, const GLushort *v);
+typedef void (APIENTRYP PFNGLVERTEXATTRIBPOINTERPROC) (GLuint index, GLint size, GLenum type, GLboolean normalized, GLsizei stride, const void *pointer);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glBlendEquationSeparate (GLenum modeRGB, GLenum modeAlpha);
+GLAPI void APIENTRY glDrawBuffers (GLsizei n, const GLenum *bufs);
+GLAPI void APIENTRY glStencilOpSeparate (GLenum face, GLenum sfail, GLenum dpfail, GLenum dppass);
+GLAPI void APIENTRY glStencilFuncSeparate (GLenum face, GLenum func, GLint ref, GLuint mask);
+GLAPI void APIENTRY glStencilMaskSeparate (GLenum face, GLuint mask);
+GLAPI void APIENTRY glAttachShader (GLuint program, GLuint shader);
+GLAPI void APIENTRY glBindAttribLocation (GLuint program, GLuint index, const GLchar *name);
+GLAPI void APIENTRY glCompileShader (GLuint shader);
+GLAPI GLuint APIENTRY glCreateProgram (void);
+GLAPI GLuint APIENTRY glCreateShader (GLenum type);
+GLAPI void APIENTRY glDeleteProgram (GLuint program);
+GLAPI void APIENTRY glDeleteShader (GLuint shader);
+GLAPI void APIENTRY glDetachShader (GLuint program, GLuint shader);
+GLAPI void APIENTRY glDisableVertexAttribArray (GLuint index);
+GLAPI void APIENTRY glEnableVertexAttribArray (GLuint index);
+GLAPI void APIENTRY glGetActiveAttrib (GLuint program, GLuint index, GLsizei bufSize, GLsizei *length, GLint *size, GLenum *type, GLchar *name);
+GLAPI void APIENTRY glGetActiveUniform (GLuint program, GLuint index, GLsizei bufSize, GLsizei *length, GLint *size, GLenum *type, GLchar *name);
+GLAPI void APIENTRY glGetAttachedShaders (GLuint program, GLsizei maxCount, GLsizei *count, GLuint *shaders);
+GLAPI GLint APIENTRY glGetAttribLocation (GLuint program, const GLchar *name);
+GLAPI void APIENTRY glGetProgramiv (GLuint program, GLenum pname, GLint *params);
+GLAPI void APIENTRY glGetProgramInfoLog (GLuint program, GLsizei bufSize, GLsizei *length, GLchar *infoLog);
+GLAPI void APIENTRY glGetShaderiv (GLuint shader, GLenum pname, GLint *params);
+GLAPI void APIENTRY glGetShaderInfoLog (GLuint shader, GLsizei bufSize, GLsizei *length, GLchar *infoLog);
+GLAPI void APIENTRY glGetShaderSource (GLuint shader, GLsizei bufSize, GLsizei *length, GLchar *source);
+GLAPI GLint APIENTRY glGetUniformLocation (GLuint program, const GLchar *name);
+GLAPI void APIENTRY glGetUniformfv (GLuint program, GLint location, GLfloat *params);
+GLAPI void APIENTRY glGetUniformiv (GLuint program, GLint location, GLint *params);
+GLAPI void APIENTRY glGetVertexAttribdv (GLuint index, GLenum pname, GLdouble *params);
+GLAPI void APIENTRY glGetVertexAttribfv (GLuint index, GLenum pname, GLfloat *params);
+GLAPI void APIENTRY glGetVertexAttribiv (GLuint index, GLenum pname, GLint *params);
+GLAPI void APIENTRY glGetVertexAttribPointerv (GLuint index, GLenum pname, void **pointer);
+GLAPI GLboolean APIENTRY glIsProgram (GLuint program);
+GLAPI GLboolean APIENTRY glIsShader (GLuint shader);
+GLAPI void APIENTRY glLinkProgram (GLuint program);
+GLAPI void APIENTRY glShaderSource (GLuint shader, GLsizei count, const GLchar *const*string, const GLint *length);
+GLAPI void APIENTRY glUseProgram (GLuint program);
+GLAPI void APIENTRY glUniform1f (GLint location, GLfloat v0);
+GLAPI void APIENTRY glUniform2f (GLint location, GLfloat v0, GLfloat v1);
+GLAPI void APIENTRY glUniform3f (GLint location, GLfloat v0, GLfloat v1, GLfloat v2);
+GLAPI void APIENTRY glUniform4f (GLint location, GLfloat v0, GLfloat v1, GLfloat v2, GLfloat v3);
+GLAPI void APIENTRY glUniform1i (GLint location, GLint v0);
+GLAPI void APIENTRY glUniform2i (GLint location, GLint v0, GLint v1);
+GLAPI void APIENTRY glUniform3i (GLint location, GLint v0, GLint v1, GLint v2);
+GLAPI void APIENTRY glUniform4i (GLint location, GLint v0, GLint v1, GLint v2, GLint v3);
+GLAPI void APIENTRY glUniform1fv (GLint location, GLsizei count, const GLfloat *value);
+GLAPI void APIENTRY glUniform2fv (GLint location, GLsizei count, const GLfloat *value);
+GLAPI void APIENTRY glUniform3fv (GLint location, GLsizei count, const GLfloat *value);
+GLAPI void APIENTRY glUniform4fv (GLint location, GLsizei count, const GLfloat *value);
+GLAPI void APIENTRY glUniform1iv (GLint location, GLsizei count, const GLint *value);
+GLAPI void APIENTRY glUniform2iv (GLint location, GLsizei count, const GLint *value);
+GLAPI void APIENTRY glUniform3iv (GLint location, GLsizei count, const GLint *value);
+GLAPI void APIENTRY glUniform4iv (GLint location, GLsizei count, const GLint *value);
+GLAPI void APIENTRY glUniformMatrix2fv (GLint location, GLsizei count, GLboolean transpose, const GLfloat *value);
+GLAPI void APIENTRY glUniformMatrix3fv (GLint location, GLsizei count, GLboolean transpose, const GLfloat *value);
+GLAPI void APIENTRY glUniformMatrix4fv (GLint location, GLsizei count, GLboolean transpose, const GLfloat *value);
+GLAPI void APIENTRY glValidateProgram (GLuint program);
+GLAPI void APIENTRY glVertexAttrib1d (GLuint index, GLdouble x);
+GLAPI void APIENTRY glVertexAttrib1dv (GLuint index, const GLdouble *v);
+GLAPI void APIENTRY glVertexAttrib1f (GLuint index, GLfloat x);
+GLAPI void APIENTRY glVertexAttrib1fv (GLuint index, const GLfloat *v);
+GLAPI void APIENTRY glVertexAttrib1s (GLuint index, GLshort x);
+GLAPI void APIENTRY glVertexAttrib1sv (GLuint index, const GLshort *v);
+GLAPI void APIENTRY glVertexAttrib2d (GLuint index, GLdouble x, GLdouble y);
+GLAPI void APIENTRY glVertexAttrib2dv (GLuint index, const GLdouble *v);
+GLAPI void APIENTRY glVertexAttrib2f (GLuint index, GLfloat x, GLfloat y);
+GLAPI void APIENTRY glVertexAttrib2fv (GLuint index, const GLfloat *v);
+GLAPI void APIENTRY glVertexAttrib2s (GLuint index, GLshort x, GLshort y);
+GLAPI void APIENTRY glVertexAttrib2sv (GLuint index, const GLshort *v);
+GLAPI void APIENTRY glVertexAttrib3d (GLuint index, GLdouble x, GLdouble y, GLdouble z);
+GLAPI void APIENTRY glVertexAttrib3dv (GLuint index, const GLdouble *v);
+GLAPI void APIENTRY glVertexAttrib3f (GLuint index, GLfloat x, GLfloat y, GLfloat z);
+GLAPI void APIENTRY glVertexAttrib3fv (GLuint index, const GLfloat *v);
+GLAPI void APIENTRY glVertexAttrib3s (GLuint index, GLshort x, GLshort y, GLshort z);
+GLAPI void APIENTRY glVertexAttrib3sv (GLuint index, const GLshort *v);
+GLAPI void APIENTRY glVertexAttrib4Nbv (GLuint index, const GLbyte *v);
+GLAPI void APIENTRY glVertexAttrib4Niv (GLuint index, const GLint *v);
+GLAPI void APIENTRY glVertexAttrib4Nsv (GLuint index, const GLshort *v);
+GLAPI void APIENTRY glVertexAttrib4Nub (GLuint index, GLubyte x, GLubyte y, GLubyte z, GLubyte w);
+GLAPI void APIENTRY glVertexAttrib4Nubv (GLuint index, const GLubyte *v);
+GLAPI void APIENTRY glVertexAttrib4Nuiv (GLuint index, const GLuint *v);
+GLAPI void APIENTRY glVertexAttrib4Nusv (GLuint index, const GLushort *v);
+GLAPI void APIENTRY glVertexAttrib4bv (GLuint index, const GLbyte *v);
+GLAPI void APIENTRY glVertexAttrib4d (GLuint index, GLdouble x, GLdouble y, GLdouble z, GLdouble w);
+GLAPI void APIENTRY glVertexAttrib4dv (GLuint index, const GLdouble *v);
+GLAPI void APIENTRY glVertexAttrib4f (GLuint index, GLfloat x, GLfloat y, GLfloat z, GLfloat w);
+GLAPI void APIENTRY glVertexAttrib4fv (GLuint index, const GLfloat *v);
+GLAPI void APIENTRY glVertexAttrib4iv (GLuint index, const GLint *v);
+GLAPI void APIENTRY glVertexAttrib4s (GLuint index, GLshort x, GLshort y, GLshort z, GLshort w);
+GLAPI void APIENTRY glVertexAttrib4sv (GLuint index, const GLshort *v);
+GLAPI void APIENTRY glVertexAttrib4ubv (GLuint index, const GLubyte *v);
+GLAPI void APIENTRY glVertexAttrib4uiv (GLuint index, const GLuint *v);
+GLAPI void APIENTRY glVertexAttrib4usv (GLuint index, const GLushort *v);
+GLAPI void APIENTRY glVertexAttribPointer (GLuint index, GLint size, GLenum type, GLboolean normalized, GLsizei stride, const void *pointer);
+#endif
+#endif /* GL_VERSION_2_0 */
+
+#ifndef GL_VERSION_2_1
+#define GL_VERSION_2_1 1
+#define GL_PIXEL_PACK_BUFFER 0x88EB
+#define GL_PIXEL_UNPACK_BUFFER 0x88EC
+#define GL_PIXEL_PACK_BUFFER_BINDING 0x88ED
+#define GL_PIXEL_UNPACK_BUFFER_BINDING 0x88EF
+#define GL_FLOAT_MAT2x3 0x8B65
+#define GL_FLOAT_MAT2x4 0x8B66
+#define GL_FLOAT_MAT3x2 0x8B67
+#define GL_FLOAT_MAT3x4 0x8B68
+#define GL_FLOAT_MAT4x2 0x8B69
+#define GL_FLOAT_MAT4x3 0x8B6A
+#define GL_SRGB 0x8C40
+#define GL_SRGB8 0x8C41
+#define GL_SRGB_ALPHA 0x8C42
+#define GL_SRGB8_ALPHA8 0x8C43
+#define GL_COMPRESSED_SRGB 0x8C48
+#define GL_COMPRESSED_SRGB_ALPHA 0x8C49
+#define GL_CURRENT_RASTER_SECONDARY_COLOR 0x845F
+#define GL_SLUMINANCE_ALPHA 0x8C44
+#define GL_SLUMINANCE8_ALPHA8 0x8C45
+#define GL_SLUMINANCE 0x8C46
+#define GL_SLUMINANCE8 0x8C47
+#define GL_COMPRESSED_SLUMINANCE 0x8C4A
+#define GL_COMPRESSED_SLUMINANCE_ALPHA 0x8C4B
+typedef void (APIENTRYP PFNGLUNIFORMMATRIX2X3FVPROC) (GLint location, GLsizei count, GLboolean transpose, const GLfloat *value);
+typedef void (APIENTRYP PFNGLUNIFORMMATRIX3X2FVPROC) (GLint location, GLsizei count, GLboolean transpose, const GLfloat *value);
+typedef void (APIENTRYP PFNGLUNIFORMMATRIX2X4FVPROC) (GLint location, GLsizei count, GLboolean transpose, const GLfloat *value);
+typedef void (APIENTRYP PFNGLUNIFORMMATRIX4X2FVPROC) (GLint location, GLsizei count, GLboolean transpose, const GLfloat *value);
+typedef void (APIENTRYP PFNGLUNIFORMMATRIX3X4FVPROC) (GLint location, GLsizei count, GLboolean transpose, const GLfloat *value);
+typedef void (APIENTRYP PFNGLUNIFORMMATRIX4X3FVPROC) (GLint location, GLsizei count, GLboolean transpose, const GLfloat *value);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glUniformMatrix2x3fv (GLint location, GLsizei count, GLboolean transpose, const GLfloat *value);
+GLAPI void APIENTRY glUniformMatrix3x2fv (GLint location, GLsizei count, GLboolean transpose, const GLfloat *value);
+GLAPI void APIENTRY glUniformMatrix2x4fv (GLint location, GLsizei count, GLboolean transpose, const GLfloat *value);
+GLAPI void APIENTRY glUniformMatrix4x2fv (GLint location, GLsizei count, GLboolean transpose, const GLfloat *value);
+GLAPI void APIENTRY glUniformMatrix3x4fv (GLint location, GLsizei count, GLboolean transpose, const GLfloat *value);
+GLAPI void APIENTRY glUniformMatrix4x3fv (GLint location, GLsizei count, GLboolean transpose, const GLfloat *value);
+#endif
+#endif /* GL_VERSION_2_1 */
+
+#ifndef GL_VERSION_3_0
+#define GL_VERSION_3_0 1
+typedef khronos_uint16_t GLhalf;
+#define GL_COMPARE_REF_TO_TEXTURE 0x884E
+#define GL_CLIP_DISTANCE0 0x3000
+#define GL_CLIP_DISTANCE1 0x3001
+#define GL_CLIP_DISTANCE2 0x3002
+#define GL_CLIP_DISTANCE3 0x3003
+#define GL_CLIP_DISTANCE4 0x3004
+#define GL_CLIP_DISTANCE5 0x3005
+#define GL_CLIP_DISTANCE6 0x3006
+#define GL_CLIP_DISTANCE7 0x3007
+#define GL_MAX_CLIP_DISTANCES 0x0D32
+#define GL_MAJOR_VERSION 0x821B
+#define GL_MINOR_VERSION 0x821C
+#define GL_NUM_EXTENSIONS 0x821D
+#define GL_CONTEXT_FLAGS 0x821E
+#define GL_COMPRESSED_RED 0x8225
+#define GL_COMPRESSED_RG 0x8226
+#define GL_CONTEXT_FLAG_FORWARD_COMPATIBLE_BIT 0x00000001
+#define GL_RGBA32F 0x8814
+#define GL_RGB32F 0x8815
+#define GL_RGBA16F 0x881A
+#define GL_RGB16F 0x881B
+#define GL_VERTEX_ATTRIB_ARRAY_INTEGER 0x88FD
+#define GL_MAX_ARRAY_TEXTURE_LAYERS 0x88FF
+#define GL_MIN_PROGRAM_TEXEL_OFFSET 0x8904
+#define GL_MAX_PROGRAM_TEXEL_OFFSET 0x8905
+#define GL_CLAMP_READ_COLOR 0x891C
+#define GL_FIXED_ONLY 0x891D
+#define GL_MAX_VARYING_COMPONENTS 0x8B4B
+#define GL_TEXTURE_1D_ARRAY 0x8C18
+#define GL_PROXY_TEXTURE_1D_ARRAY 0x8C19
+#define GL_TEXTURE_2D_ARRAY 0x8C1A
+#define GL_PROXY_TEXTURE_2D_ARRAY 0x8C1B
+#define GL_TEXTURE_BINDING_1D_ARRAY 0x8C1C
+#define GL_TEXTURE_BINDING_2D_ARRAY 0x8C1D
+#define GL_R11F_G11F_B10F 0x8C3A
+#define GL_UNSIGNED_INT_10F_11F_11F_REV 0x8C3B
+#define GL_RGB9_E5 0x8C3D
+#define GL_UNSIGNED_INT_5_9_9_9_REV 0x8C3E
+#define GL_TEXTURE_SHARED_SIZE 0x8C3F
+#define GL_TRANSFORM_FEEDBACK_VARYING_MAX_LENGTH 0x8C76
+#define GL_TRANSFORM_FEEDBACK_BUFFER_MODE 0x8C7F
+#define GL_MAX_TRANSFORM_FEEDBACK_SEPARATE_COMPONENTS 0x8C80
+#define GL_TRANSFORM_FEEDBACK_VARYINGS 0x8C83
+#define GL_TRANSFORM_FEEDBACK_BUFFER_START 0x8C84
+#define GL_TRANSFORM_FEEDBACK_BUFFER_SIZE 0x8C85
+#define GL_PRIMITIVES_GENERATED 0x8C87
+#define GL_TRANSFORM_FEEDBACK_PRIMITIVES_WRITTEN 0x8C88
+#define GL_RASTERIZER_DISCARD 0x8C89
+#define GL_MAX_TRANSFORM_FEEDBACK_INTERLEAVED_COMPONENTS 0x8C8A
+#define GL_MAX_TRANSFORM_FEEDBACK_SEPARATE_ATTRIBS 0x8C8B
+#define GL_INTERLEAVED_ATTRIBS 0x8C8C
+#define GL_SEPARATE_ATTRIBS 0x8C8D
+#define GL_TRANSFORM_FEEDBACK_BUFFER 0x8C8E
+#define GL_TRANSFORM_FEEDBACK_BUFFER_BINDING 0x8C8F
+#define GL_RGBA32UI 0x8D70
+#define GL_RGB32UI 0x8D71
+#define GL_RGBA16UI 0x8D76
+#define GL_RGB16UI 0x8D77
+#define GL_RGBA8UI 0x8D7C
+#define GL_RGB8UI 0x8D7D
+#define GL_RGBA32I 0x8D82
+#define GL_RGB32I 0x8D83
+#define GL_RGBA16I 0x8D88
+#define GL_RGB16I 0x8D89
+#define GL_RGBA8I 0x8D8E
+#define GL_RGB8I 0x8D8F
+#define GL_RED_INTEGER 0x8D94
+#define GL_GREEN_INTEGER 0x8D95
+#define GL_BLUE_INTEGER 0x8D96
+#define GL_RGB_INTEGER 0x8D98
+#define GL_RGBA_INTEGER 0x8D99
+#define GL_BGR_INTEGER 0x8D9A
+#define GL_BGRA_INTEGER 0x8D9B
+#define GL_SAMPLER_1D_ARRAY 0x8DC0
+#define GL_SAMPLER_2D_ARRAY 0x8DC1
+#define GL_SAMPLER_1D_ARRAY_SHADOW 0x8DC3
+#define GL_SAMPLER_2D_ARRAY_SHADOW 0x8DC4
+#define GL_SAMPLER_CUBE_SHADOW 0x8DC5
+#define GL_UNSIGNED_INT_VEC2 0x8DC6
+#define GL_UNSIGNED_INT_VEC3 0x8DC7
+#define GL_UNSIGNED_INT_VEC4 0x8DC8
+#define GL_INT_SAMPLER_1D 0x8DC9
+#define GL_INT_SAMPLER_2D 0x8DCA
+#define GL_INT_SAMPLER_3D 0x8DCB
+#define GL_INT_SAMPLER_CUBE 0x8DCC
+#define GL_INT_SAMPLER_1D_ARRAY 0x8DCE
+#define GL_INT_SAMPLER_2D_ARRAY 0x8DCF
+#define GL_UNSIGNED_INT_SAMPLER_1D 0x8DD1
+#define GL_UNSIGNED_INT_SAMPLER_2D 0x8DD2
+#define GL_UNSIGNED_INT_SAMPLER_3D 0x8DD3
+#define GL_UNSIGNED_INT_SAMPLER_CUBE 0x8DD4
+#define GL_UNSIGNED_INT_SAMPLER_1D_ARRAY 0x8DD6
+#define GL_UNSIGNED_INT_SAMPLER_2D_ARRAY 0x8DD7
+#define GL_QUERY_WAIT 0x8E13
+#define GL_QUERY_NO_WAIT 0x8E14
+#define GL_QUERY_BY_REGION_WAIT 0x8E15
+#define GL_QUERY_BY_REGION_NO_WAIT 0x8E16
+#define GL_BUFFER_ACCESS_FLAGS 0x911F
+#define GL_BUFFER_MAP_LENGTH 0x9120
+#define GL_BUFFER_MAP_OFFSET 0x9121
+#define GL_DEPTH_COMPONENT32F 0x8CAC
+#define GL_DEPTH32F_STENCIL8 0x8CAD
+#define GL_FLOAT_32_UNSIGNED_INT_24_8_REV 0x8DAD
+#define GL_INVALID_FRAMEBUFFER_OPERATION 0x0506
+#define GL_FRAMEBUFFER_ATTACHMENT_COLOR_ENCODING 0x8210
+#define GL_FRAMEBUFFER_ATTACHMENT_COMPONENT_TYPE 0x8211
+#define GL_FRAMEBUFFER_ATTACHMENT_RED_SIZE 0x8212
+#define GL_FRAMEBUFFER_ATTACHMENT_GREEN_SIZE 0x8213
+#define GL_FRAMEBUFFER_ATTACHMENT_BLUE_SIZE 0x8214
+#define GL_FRAMEBUFFER_ATTACHMENT_ALPHA_SIZE 0x8215
+#define GL_FRAMEBUFFER_ATTACHMENT_DEPTH_SIZE 0x8216
+#define GL_FRAMEBUFFER_ATTACHMENT_STENCIL_SIZE 0x8217
+#define GL_FRAMEBUFFER_DEFAULT 0x8218
+#define GL_FRAMEBUFFER_UNDEFINED 0x8219
+#define GL_DEPTH_STENCIL_ATTACHMENT 0x821A
+#define GL_MAX_RENDERBUFFER_SIZE 0x84E8
+#define GL_DEPTH_STENCIL 0x84F9
+#define GL_UNSIGNED_INT_24_8 0x84FA
+#define GL_DEPTH24_STENCIL8 0x88F0
+#define GL_TEXTURE_STENCIL_SIZE 0x88F1
+#define GL_TEXTURE_RED_TYPE 0x8C10
+#define GL_TEXTURE_GREEN_TYPE 0x8C11
+#define GL_TEXTURE_BLUE_TYPE 0x8C12
+#define GL_TEXTURE_ALPHA_TYPE 0x8C13
+#define GL_TEXTURE_DEPTH_TYPE 0x8C16
+#define GL_UNSIGNED_NORMALIZED 0x8C17
+#define GL_FRAMEBUFFER_BINDING 0x8CA6
+#define GL_DRAW_FRAMEBUFFER_BINDING 0x8CA6
+#define GL_RENDERBUFFER_BINDING 0x8CA7
+#define GL_READ_FRAMEBUFFER 0x8CA8
+#define GL_DRAW_FRAMEBUFFER 0x8CA9
+#define GL_READ_FRAMEBUFFER_BINDING 0x8CAA
+#define GL_RENDERBUFFER_SAMPLES 0x8CAB
+#define GL_FRAMEBUFFER_ATTACHMENT_OBJECT_TYPE 0x8CD0
+#define GL_FRAMEBUFFER_ATTACHMENT_OBJECT_NAME 0x8CD1
+#define GL_FRAMEBUFFER_ATTACHMENT_TEXTURE_LEVEL 0x8CD2
+#define GL_FRAMEBUFFER_ATTACHMENT_TEXTURE_CUBE_MAP_FACE 0x8CD3
+#define GL_FRAMEBUFFER_ATTACHMENT_TEXTURE_LAYER 0x8CD4
+#define GL_FRAMEBUFFER_COMPLETE 0x8CD5
+#define GL_FRAMEBUFFER_INCOMPLETE_ATTACHMENT 0x8CD6
+#define GL_FRAMEBUFFER_INCOMPLETE_MISSING_ATTACHMENT 0x8CD7
+#define GL_FRAMEBUFFER_INCOMPLETE_DRAW_BUFFER 0x8CDB
+#define GL_FRAMEBUFFER_INCOMPLETE_READ_BUFFER 0x8CDC
+#define GL_FRAMEBUFFER_UNSUPPORTED 0x8CDD
+#define GL_MAX_COLOR_ATTACHMENTS 0x8CDF
+#define GL_COLOR_ATTACHMENT0 0x8CE0
+#define GL_COLOR_ATTACHMENT1 0x8CE1
+#define GL_COLOR_ATTACHMENT2 0x8CE2
+#define GL_COLOR_ATTACHMENT3 0x8CE3
+#define GL_COLOR_ATTACHMENT4 0x8CE4
+#define GL_COLOR_ATTACHMENT5 0x8CE5
+#define GL_COLOR_ATTACHMENT6 0x8CE6
+#define GL_COLOR_ATTACHMENT7 0x8CE7
+#define GL_COLOR_ATTACHMENT8 0x8CE8
+#define GL_COLOR_ATTACHMENT9 0x8CE9
+#define GL_COLOR_ATTACHMENT10 0x8CEA
+#define GL_COLOR_ATTACHMENT11 0x8CEB
+#define GL_COLOR_ATTACHMENT12 0x8CEC
+#define GL_COLOR_ATTACHMENT13 0x8CED
+#define GL_COLOR_ATTACHMENT14 0x8CEE
+#define GL_COLOR_ATTACHMENT15 0x8CEF
+#define GL_COLOR_ATTACHMENT16 0x8CF0
+#define GL_COLOR_ATTACHMENT17 0x8CF1
+#define GL_COLOR_ATTACHMENT18 0x8CF2
+#define GL_COLOR_ATTACHMENT19 0x8CF3
+#define GL_COLOR_ATTACHMENT20 0x8CF4
+#define GL_COLOR_ATTACHMENT21 0x8CF5
+#define GL_COLOR_ATTACHMENT22 0x8CF6
+#define GL_COLOR_ATTACHMENT23 0x8CF7
+#define GL_COLOR_ATTACHMENT24 0x8CF8
+#define GL_COLOR_ATTACHMENT25 0x8CF9
+#define GL_COLOR_ATTACHMENT26 0x8CFA
+#define GL_COLOR_ATTACHMENT27 0x8CFB
+#define GL_COLOR_ATTACHMENT28 0x8CFC
+#define GL_COLOR_ATTACHMENT29 0x8CFD
+#define GL_COLOR_ATTACHMENT30 0x8CFE
+#define GL_COLOR_ATTACHMENT31 0x8CFF
+#define GL_DEPTH_ATTACHMENT 0x8D00
+#define GL_STENCIL_ATTACHMENT 0x8D20
+#define GL_FRAMEBUFFER 0x8D40
+#define GL_RENDERBUFFER 0x8D41
+#define GL_RENDERBUFFER_WIDTH 0x8D42
+#define GL_RENDERBUFFER_HEIGHT 0x8D43
+#define GL_RENDERBUFFER_INTERNAL_FORMAT 0x8D44
+#define GL_STENCIL_INDEX1 0x8D46
+#define GL_STENCIL_INDEX4 0x8D47
+#define GL_STENCIL_INDEX8 0x8D48
+#define GL_STENCIL_INDEX16 0x8D49
+#define GL_RENDERBUFFER_RED_SIZE 0x8D50
+#define GL_RENDERBUFFER_GREEN_SIZE 0x8D51
+#define GL_RENDERBUFFER_BLUE_SIZE 0x8D52
+#define GL_RENDERBUFFER_ALPHA_SIZE 0x8D53
+#define GL_RENDERBUFFER_DEPTH_SIZE 0x8D54
+#define GL_RENDERBUFFER_STENCIL_SIZE 0x8D55
+#define GL_FRAMEBUFFER_INCOMPLETE_MULTISAMPLE 0x8D56
+#define GL_MAX_SAMPLES 0x8D57
+#define GL_INDEX 0x8222
+#define GL_TEXTURE_LUMINANCE_TYPE 0x8C14
+#define GL_TEXTURE_INTENSITY_TYPE 0x8C15
+#define GL_FRAMEBUFFER_SRGB 0x8DB9
+#define GL_HALF_FLOAT 0x140B
+#define GL_MAP_READ_BIT 0x0001
+#define GL_MAP_WRITE_BIT 0x0002
+#define GL_MAP_INVALIDATE_RANGE_BIT 0x0004
+#define GL_MAP_INVALIDATE_BUFFER_BIT 0x0008
+#define GL_MAP_FLUSH_EXPLICIT_BIT 0x0010
+#define GL_MAP_UNSYNCHRONIZED_BIT 0x0020
+#define GL_COMPRESSED_RED_RGTC1 0x8DBB
+#define GL_COMPRESSED_SIGNED_RED_RGTC1 0x8DBC
+#define GL_COMPRESSED_RG_RGTC2 0x8DBD
+#define GL_COMPRESSED_SIGNED_RG_RGTC2 0x8DBE
+#define GL_RG 0x8227
+#define GL_RG_INTEGER 0x8228
+#define GL_R8 0x8229
+#define GL_R16 0x822A
+#define GL_RG8 0x822B
+#define GL_RG16 0x822C
+#define GL_R16F 0x822D
+#define GL_R32F 0x822E
+#define GL_RG16F 0x822F
+#define GL_RG32F 0x8230
+#define GL_R8I 0x8231
+#define GL_R8UI 0x8232
+#define GL_R16I 0x8233
+#define GL_R16UI 0x8234
+#define GL_R32I 0x8235
+#define GL_R32UI 0x8236
+#define GL_RG8I 0x8237
+#define GL_RG8UI 0x8238
+#define GL_RG16I 0x8239
+#define GL_RG16UI 0x823A
+#define GL_RG32I 0x823B
+#define GL_RG32UI 0x823C
+#define GL_VERTEX_ARRAY_BINDING 0x85B5
+#define GL_CLAMP_VERTEX_COLOR 0x891A
+#define GL_CLAMP_FRAGMENT_COLOR 0x891B
+#define GL_ALPHA_INTEGER 0x8D97
+typedef void (APIENTRYP PFNGLCOLORMASKIPROC) (GLuint index, GLboolean r, GLboolean g, GLboolean b, GLboolean a);
+typedef void (APIENTRYP PFNGLGETBOOLEANI_VPROC) (GLenum target, GLuint index, GLboolean *data);
+typedef void (APIENTRYP PFNGLGETINTEGERI_VPROC) (GLenum target, GLuint index, GLint *data);
+typedef void (APIENTRYP PFNGLENABLEIPROC) (GLenum target, GLuint index);
+typedef void (APIENTRYP PFNGLDISABLEIPROC) (GLenum target, GLuint index);
+typedef GLboolean (APIENTRYP PFNGLISENABLEDIPROC) (GLenum target, GLuint index);
+typedef void (APIENTRYP PFNGLBEGINTRANSFORMFEEDBACKPROC) (GLenum primitiveMode);
+typedef void (APIENTRYP PFNGLENDTRANSFORMFEEDBACKPROC) (void);
+typedef void (APIENTRYP PFNGLBINDBUFFERRANGEPROC) (GLenum target, GLuint index, GLuint buffer, GLintptr offset, GLsizeiptr size);
+typedef void (APIENTRYP PFNGLBINDBUFFERBASEPROC) (GLenum target, GLuint index, GLuint buffer);
+typedef void (APIENTRYP PFNGLTRANSFORMFEEDBACKVARYINGSPROC) (GLuint program, GLsizei count, const GLchar *const*varyings, GLenum bufferMode);
+typedef void (APIENTRYP PFNGLGETTRANSFORMFEEDBACKVARYINGPROC) (GLuint program, GLuint index, GLsizei bufSize, GLsizei *length, GLsizei *size, GLenum *type, GLchar *name);
+typedef void (APIENTRYP PFNGLCLAMPCOLORPROC) (GLenum target, GLenum clamp);
+typedef void (APIENTRYP PFNGLBEGINCONDITIONALRENDERPROC) (GLuint id, GLenum mode);
+typedef void (APIENTRYP PFNGLENDCONDITIONALRENDERPROC) (void);
+typedef void (APIENTRYP PFNGLVERTEXATTRIBIPOINTERPROC) (GLuint index, GLint size, GLenum type, GLsizei stride, const void *pointer);
+typedef void (APIENTRYP PFNGLGETVERTEXATTRIBIIVPROC) (GLuint index, GLenum pname, GLint *params);
+typedef void (APIENTRYP PFNGLGETVERTEXATTRIBIUIVPROC) (GLuint index, GLenum pname, GLuint *params);
+typedef void (APIENTRYP PFNGLVERTEXATTRIBI1IPROC) (GLuint index, GLint x);
+typedef void (APIENTRYP PFNGLVERTEXATTRIBI2IPROC) (GLuint index, GLint x, GLint y);
+typedef void (APIENTRYP PFNGLVERTEXATTRIBI3IPROC) (GLuint index, GLint x, GLint y, GLint z);
+typedef void (APIENTRYP PFNGLVERTEXATTRIBI4IPROC) (GLuint index, GLint x, GLint y, GLint z, GLint w);
+typedef void (APIENTRYP PFNGLVERTEXATTRIBI1UIPROC) (GLuint index, GLuint x);
+typedef void (APIENTRYP PFNGLVERTEXATTRIBI2UIPROC) (GLuint index, GLuint x, GLuint y);
+typedef void (APIENTRYP PFNGLVERTEXATTRIBI3UIPROC) (GLuint index, GLuint x, GLuint y, GLuint z);
+typedef void (APIENTRYP PFNGLVERTEXATTRIBI4UIPROC) (GLuint index, GLuint x, GLuint y, GLuint z, GLuint w);
+typedef void (APIENTRYP PFNGLVERTEXATTRIBI1IVPROC) (GLuint index, const GLint *v);
+typedef void (APIENTRYP PFNGLVERTEXATTRIBI2IVPROC) (GLuint index, const GLint *v);
+typedef void (APIENTRYP PFNGLVERTEXATTRIBI3IVPROC) (GLuint index, const GLint *v);
+typedef void (APIENTRYP PFNGLVERTEXATTRIBI4IVPROC) (GLuint index, const GLint *v);
+typedef void (APIENTRYP PFNGLVERTEXATTRIBI1UIVPROC) (GLuint index, const GLuint *v);
+typedef void (APIENTRYP PFNGLVERTEXATTRIBI2UIVPROC) (GLuint index, const GLuint *v);
+typedef void (APIENTRYP PFNGLVERTEXATTRIBI3UIVPROC) (GLuint index, const GLuint *v);
+typedef void (APIENTRYP PFNGLVERTEXATTRIBI4UIVPROC) (GLuint index, const GLuint *v);
+typedef void (APIENTRYP PFNGLVERTEXATTRIBI4BVPROC) (GLuint index, const GLbyte *v);
+typedef void (APIENTRYP PFNGLVERTEXATTRIBI4SVPROC) (GLuint index, const GLshort *v);
+typedef void (APIENTRYP PFNGLVERTEXATTRIBI4UBVPROC) (GLuint index, const GLubyte *v);
+typedef void (APIENTRYP PFNGLVERTEXATTRIBI4USVPROC) (GLuint index, const GLushort *v);
+typedef void (APIENTRYP PFNGLGETUNIFORMUIVPROC) (GLuint program, GLint location, GLuint *params);
+typedef void (APIENTRYP PFNGLBINDFRAGDATALOCATIONPROC) (GLuint program, GLuint color, const GLchar *name);
+typedef GLint (APIENTRYP PFNGLGETFRAGDATALOCATIONPROC) (GLuint program, const GLchar *name);
+typedef void (APIENTRYP PFNGLUNIFORM1UIPROC) (GLint location, GLuint v0);
+typedef void (APIENTRYP PFNGLUNIFORM2UIPROC) (GLint location, GLuint v0, GLuint v1);
+typedef void (APIENTRYP PFNGLUNIFORM3UIPROC) (GLint location, GLuint v0, GLuint v1, GLuint v2);
+typedef void (APIENTRYP PFNGLUNIFORM4UIPROC) (GLint location, GLuint v0, GLuint v1, GLuint v2, GLuint v3);
+typedef void (APIENTRYP PFNGLUNIFORM1UIVPROC) (GLint location, GLsizei count, const GLuint *value);
+typedef void (APIENTRYP PFNGLUNIFORM2UIVPROC) (GLint location, GLsizei count, const GLuint *value);
+typedef void (APIENTRYP PFNGLUNIFORM3UIVPROC) (GLint location, GLsizei count, const GLuint *value);
+typedef void (APIENTRYP PFNGLUNIFORM4UIVPROC) (GLint location, GLsizei count, const GLuint *value);
+typedef void (APIENTRYP PFNGLTEXPARAMETERIIVPROC) (GLenum target, GLenum pname, const GLint *params);
+typedef void (APIENTRYP PFNGLTEXPARAMETERIUIVPROC) (GLenum target, GLenum pname, const GLuint *params);
+typedef void (APIENTRYP PFNGLGETTEXPARAMETERIIVPROC) (GLenum target, GLenum pname, GLint *params);
+typedef void (APIENTRYP PFNGLGETTEXPARAMETERIUIVPROC) (GLenum target, GLenum pname, GLuint *params);
+typedef void (APIENTRYP PFNGLCLEARBUFFERIVPROC) (GLenum buffer, GLint drawbuffer, const GLint *value);
+typedef void (APIENTRYP PFNGLCLEARBUFFERUIVPROC) (GLenum buffer, GLint drawbuffer, const GLuint *value);
+typedef void (APIENTRYP PFNGLCLEARBUFFERFVPROC) (GLenum buffer, GLint drawbuffer, const GLfloat *value);
+typedef void (APIENTRYP PFNGLCLEARBUFFERFIPROC) (GLenum buffer, GLint drawbuffer, GLfloat depth, GLint stencil);
+typedef const GLubyte *(APIENTRYP PFNGLGETSTRINGIPROC) (GLenum name, GLuint index);
+typedef GLboolean (APIENTRYP PFNGLISRENDERBUFFERPROC) (GLuint renderbuffer);
+typedef void (APIENTRYP PFNGLBINDRENDERBUFFERPROC) (GLenum target, GLuint renderbuffer);
+typedef void (APIENTRYP PFNGLDELETERENDERBUFFERSPROC) (GLsizei n, const GLuint *renderbuffers);
+typedef void (APIENTRYP PFNGLGENRENDERBUFFERSPROC) (GLsizei n, GLuint *renderbuffers);
+typedef void (APIENTRYP PFNGLRENDERBUFFERSTORAGEPROC) (GLenum target, GLenum internalformat, GLsizei width, GLsizei height);
+typedef void (APIENTRYP PFNGLGETRENDERBUFFERPARAMETERIVPROC) (GLenum target, GLenum pname, GLint *params);
+typedef GLboolean (APIENTRYP PFNGLISFRAMEBUFFERPROC) (GLuint framebuffer);
+typedef void (APIENTRYP PFNGLBINDFRAMEBUFFERPROC) (GLenum target, GLuint framebuffer);
+typedef void (APIENTRYP PFNGLDELETEFRAMEBUFFERSPROC) (GLsizei n, const GLuint *framebuffers);
+typedef void (APIENTRYP PFNGLGENFRAMEBUFFERSPROC) (GLsizei n, GLuint *framebuffers);
+typedef GLenum (APIENTRYP PFNGLCHECKFRAMEBUFFERSTATUSPROC) (GLenum target);
+typedef void (APIENTRYP PFNGLFRAMEBUFFERTEXTURE1DPROC) (GLenum target, GLenum attachment, GLenum textarget, GLuint texture, GLint level);
+typedef void (APIENTRYP PFNGLFRAMEBUFFERTEXTURE2DPROC) (GLenum target, GLenum attachment, GLenum textarget, GLuint texture, GLint level);
+typedef void (APIENTRYP PFNGLFRAMEBUFFERTEXTURE3DPROC) (GLenum target, GLenum attachment, GLenum textarget, GLuint texture, GLint level, GLint zoffset);
+typedef void (APIENTRYP PFNGLFRAMEBUFFERRENDERBUFFERPROC) (GLenum target, GLenum attachment, GLenum renderbuffertarget, GLuint renderbuffer);
+typedef void (APIENTRYP PFNGLGETFRAMEBUFFERATTACHMENTPARAMETERIVPROC) (GLenum target, GLenum attachment, GLenum pname, GLint *params);
+typedef void (APIENTRYP PFNGLGENERATEMIPMAPPROC) (GLenum target);
+typedef void (APIENTRYP PFNGLBLITFRAMEBUFFERPROC) (GLint srcX0, GLint srcY0, GLint srcX1, GLint srcY1, GLint dstX0, GLint dstY0, GLint dstX1, GLint dstY1, GLbitfield mask, GLenum filter);
+typedef void (APIENTRYP PFNGLRENDERBUFFERSTORAGEMULTISAMPLEPROC) (GLenum target, GLsizei samples, GLenum internalformat, GLsizei width, GLsizei height);
+typedef void (APIENTRYP PFNGLFRAMEBUFFERTEXTURELAYERPROC) (GLenum target, GLenum attachment, GLuint texture, GLint level, GLint layer);
+typedef void *(APIENTRYP PFNGLMAPBUFFERRANGEPROC) (GLenum target, GLintptr offset, GLsizeiptr length, GLbitfield access);
+typedef void (APIENTRYP PFNGLFLUSHMAPPEDBUFFERRANGEPROC) (GLenum target, GLintptr offset, GLsizeiptr length);
+typedef void (APIENTRYP PFNGLBINDVERTEXARRAYPROC) (GLuint array);
+typedef void (APIENTRYP PFNGLDELETEVERTEXARRAYSPROC) (GLsizei n, const GLuint *arrays);
+typedef void (APIENTRYP PFNGLGENVERTEXARRAYSPROC) (GLsizei n, GLuint *arrays);
+typedef GLboolean (APIENTRYP PFNGLISVERTEXARRAYPROC) (GLuint array);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glColorMaski (GLuint index, GLboolean r, GLboolean g, GLboolean b, GLboolean a);
+GLAPI void APIENTRY glGetBooleani_v (GLenum target, GLuint index, GLboolean *data);
+GLAPI void APIENTRY glGetIntegeri_v (GLenum target, GLuint index, GLint *data);
+GLAPI void APIENTRY glEnablei (GLenum target, GLuint index);
+GLAPI void APIENTRY glDisablei (GLenum target, GLuint index);
+GLAPI GLboolean APIENTRY glIsEnabledi (GLenum target, GLuint index);
+GLAPI void APIENTRY glBeginTransformFeedback (GLenum primitiveMode);
+GLAPI void APIENTRY glEndTransformFeedback (void);
+GLAPI void APIENTRY glBindBufferRange (GLenum target, GLuint index, GLuint buffer, GLintptr offset, GLsizeiptr size);
+GLAPI void APIENTRY glBindBufferBase (GLenum target, GLuint index, GLuint buffer);
+GLAPI void APIENTRY glTransformFeedbackVaryings (GLuint program, GLsizei count, const GLchar *const*varyings, GLenum bufferMode);
+GLAPI void APIENTRY glGetTransformFeedbackVarying (GLuint program, GLuint index, GLsizei bufSize, GLsizei *length, GLsizei *size, GLenum *type, GLchar *name);
+GLAPI void APIENTRY glClampColor (GLenum target, GLenum clamp);
+GLAPI void APIENTRY glBeginConditionalRender (GLuint id, GLenum mode);
+GLAPI void APIENTRY glEndConditionalRender (void);
+GLAPI void APIENTRY glVertexAttribIPointer (GLuint index, GLint size, GLenum type, GLsizei stride, const void *pointer);
+GLAPI void APIENTRY glGetVertexAttribIiv (GLuint index, GLenum pname, GLint *params);
+GLAPI void APIENTRY glGetVertexAttribIuiv (GLuint index, GLenum pname, GLuint *params);
+GLAPI void APIENTRY glVertexAttribI1i (GLuint index, GLint x);
+GLAPI void APIENTRY glVertexAttribI2i (GLuint index, GLint x, GLint y);
+GLAPI void APIENTRY glVertexAttribI3i (GLuint index, GLint x, GLint y, GLint z);
+GLAPI void APIENTRY glVertexAttribI4i (GLuint index, GLint x, GLint y, GLint z, GLint w);
+GLAPI void APIENTRY glVertexAttribI1ui (GLuint index, GLuint x);
+GLAPI void APIENTRY glVertexAttribI2ui (GLuint index, GLuint x, GLuint y);
+GLAPI void APIENTRY glVertexAttribI3ui (GLuint index, GLuint x, GLuint y, GLuint z);
+GLAPI void APIENTRY glVertexAttribI4ui (GLuint index, GLuint x, GLuint y, GLuint z, GLuint w);
+GLAPI void APIENTRY glVertexAttribI1iv (GLuint index, const GLint *v);
+GLAPI void APIENTRY glVertexAttribI2iv (GLuint index, const GLint *v);
+GLAPI void APIENTRY glVertexAttribI3iv (GLuint index, const GLint *v);
+GLAPI void APIENTRY glVertexAttribI4iv (GLuint index, const GLint *v);
+GLAPI void APIENTRY glVertexAttribI1uiv (GLuint index, const GLuint *v);
+GLAPI void APIENTRY glVertexAttribI2uiv (GLuint index, const GLuint *v);
+GLAPI void APIENTRY glVertexAttribI3uiv (GLuint index, const GLuint *v);
+GLAPI void APIENTRY glVertexAttribI4uiv (GLuint index, const GLuint *v);
+GLAPI void APIENTRY glVertexAttribI4bv (GLuint index, const GLbyte *v);
+GLAPI void APIENTRY glVertexAttribI4sv (GLuint index, const GLshort *v);
+GLAPI void APIENTRY glVertexAttribI4ubv (GLuint index, const GLubyte *v);
+GLAPI void APIENTRY glVertexAttribI4usv (GLuint index, const GLushort *v);
+GLAPI void APIENTRY glGetUniformuiv (GLuint program, GLint location, GLuint *params);
+GLAPI void APIENTRY glBindFragDataLocation (GLuint program, GLuint color, const GLchar *name);
+GLAPI GLint APIENTRY glGetFragDataLocation (GLuint program, const GLchar *name);
+GLAPI void APIENTRY glUniform1ui (GLint location, GLuint v0);
+GLAPI void APIENTRY glUniform2ui (GLint location, GLuint v0, GLuint v1);
+GLAPI void APIENTRY glUniform3ui (GLint location, GLuint v0, GLuint v1, GLuint v2);
+GLAPI void APIENTRY glUniform4ui (GLint location, GLuint v0, GLuint v1, GLuint v2, GLuint v3);
+GLAPI void APIENTRY glUniform1uiv (GLint location, GLsizei count, const GLuint *value);
+GLAPI void APIENTRY glUniform2uiv (GLint location, GLsizei count, const GLuint *value);
+GLAPI void APIENTRY glUniform3uiv (GLint location, GLsizei count, const GLuint *value);
+GLAPI void APIENTRY glUniform4uiv (GLint location, GLsizei count, const GLuint *value);
+GLAPI void APIENTRY glTexParameterIiv (GLenum target, GLenum pname, const GLint *params);
+GLAPI void APIENTRY glTexParameterIuiv (GLenum target, GLenum pname, const GLuint *params);
+GLAPI void APIENTRY glGetTexParameterIiv (GLenum target, GLenum pname, GLint *params);
+GLAPI void APIENTRY glGetTexParameterIuiv (GLenum target, GLenum pname, GLuint *params);
+GLAPI void APIENTRY glClearBufferiv (GLenum buffer, GLint drawbuffer, const GLint *value);
+GLAPI void APIENTRY glClearBufferuiv (GLenum buffer, GLint drawbuffer, const GLuint *value);
+GLAPI void APIENTRY glClearBufferfv (GLenum buffer, GLint drawbuffer, const GLfloat *value);
+GLAPI void APIENTRY glClearBufferfi (GLenum buffer, GLint drawbuffer, GLfloat depth, GLint stencil);
+GLAPI const GLubyte *APIENTRY glGetStringi (GLenum name, GLuint index);
+GLAPI GLboolean APIENTRY glIsRenderbuffer (GLuint renderbuffer);
+GLAPI void APIENTRY glBindRenderbuffer (GLenum target, GLuint renderbuffer);
+GLAPI void APIENTRY glDeleteRenderbuffers (GLsizei n, const GLuint *renderbuffers);
+GLAPI void APIENTRY glGenRenderbuffers (GLsizei n, GLuint *renderbuffers);
+GLAPI void APIENTRY glRenderbufferStorage (GLenum target, GLenum internalformat, GLsizei width, GLsizei height);
+GLAPI void APIENTRY glGetRenderbufferParameteriv (GLenum target, GLenum pname, GLint *params);
+GLAPI GLboolean APIENTRY glIsFramebuffer (GLuint framebuffer);
+GLAPI void APIENTRY glBindFramebuffer (GLenum target, GLuint framebuffer);
+GLAPI void APIENTRY glDeleteFramebuffers (GLsizei n, const GLuint *framebuffers);
+GLAPI void APIENTRY glGenFramebuffers (GLsizei n, GLuint *framebuffers);
+GLAPI GLenum APIENTRY glCheckFramebufferStatus (GLenum target);
+GLAPI void APIENTRY glFramebufferTexture1D (GLenum target, GLenum attachment, GLenum textarget, GLuint texture, GLint level);
+GLAPI void APIENTRY glFramebufferTexture2D (GLenum target, GLenum attachment, GLenum textarget, GLuint texture, GLint level);
+GLAPI void APIENTRY glFramebufferTexture3D (GLenum target, GLenum attachment, GLenum textarget, GLuint texture, GLint level, GLint zoffset);
+GLAPI void APIENTRY glFramebufferRenderbuffer (GLenum target, GLenum attachment, GLenum renderbuffertarget, GLuint renderbuffer);
+GLAPI void APIENTRY glGetFramebufferAttachmentParameteriv (GLenum target, GLenum attachment, GLenum pname, GLint *params);
+GLAPI void APIENTRY glGenerateMipmap (GLenum target);
+GLAPI void APIENTRY glBlitFramebuffer (GLint srcX0, GLint srcY0, GLint srcX1, GLint srcY1, GLint dstX0, GLint dstY0, GLint dstX1, GLint dstY1, GLbitfield mask, GLenum filter);
+GLAPI void APIENTRY glRenderbufferStorageMultisample (GLenum target, GLsizei samples, GLenum internalformat, GLsizei width, GLsizei height);
+GLAPI void APIENTRY glFramebufferTextureLayer (GLenum target, GLenum attachment, GLuint texture, GLint level, GLint layer);
+GLAPI void *APIENTRY glMapBufferRange (GLenum target, GLintptr offset, GLsizeiptr length, GLbitfield access);
+GLAPI void APIENTRY glFlushMappedBufferRange (GLenum target, GLintptr offset, GLsizeiptr length);
+GLAPI void APIENTRY glBindVertexArray (GLuint array);
+GLAPI void APIENTRY glDeleteVertexArrays (GLsizei n, const GLuint *arrays);
+GLAPI void APIENTRY glGenVertexArrays (GLsizei n, GLuint *arrays);
+GLAPI GLboolean APIENTRY glIsVertexArray (GLuint array);
+#endif
+#endif /* GL_VERSION_3_0 */
+
+#ifndef GL_VERSION_3_1
+#define GL_VERSION_3_1 1
+#define GL_SAMPLER_2D_RECT 0x8B63
+#define GL_SAMPLER_2D_RECT_SHADOW 0x8B64
+#define GL_SAMPLER_BUFFER 0x8DC2
+#define GL_INT_SAMPLER_2D_RECT 0x8DCD
+#define GL_INT_SAMPLER_BUFFER 0x8DD0
+#define GL_UNSIGNED_INT_SAMPLER_2D_RECT 0x8DD5
+#define GL_UNSIGNED_INT_SAMPLER_BUFFER 0x8DD8
+#define GL_TEXTURE_BUFFER 0x8C2A
+#define GL_MAX_TEXTURE_BUFFER_SIZE 0x8C2B
+#define GL_TEXTURE_BINDING_BUFFER 0x8C2C
+#define GL_TEXTURE_BUFFER_DATA_STORE_BINDING 0x8C2D
+#define GL_TEXTURE_RECTANGLE 0x84F5
+#define GL_TEXTURE_BINDING_RECTANGLE 0x84F6
+#define GL_PROXY_TEXTURE_RECTANGLE 0x84F7
+#define GL_MAX_RECTANGLE_TEXTURE_SIZE 0x84F8
+#define GL_R8_SNORM 0x8F94
+#define GL_RG8_SNORM 0x8F95
+#define GL_RGB8_SNORM 0x8F96
+#define GL_RGBA8_SNORM 0x8F97
+#define GL_R16_SNORM 0x8F98
+#define GL_RG16_SNORM 0x8F99
+#define GL_RGB16_SNORM 0x8F9A
+#define GL_RGBA16_SNORM 0x8F9B
+#define GL_SIGNED_NORMALIZED 0x8F9C
+#define GL_PRIMITIVE_RESTART 0x8F9D
+#define GL_PRIMITIVE_RESTART_INDEX 0x8F9E
+#define GL_COPY_READ_BUFFER 0x8F36
+#define GL_COPY_WRITE_BUFFER 0x8F37
+#define GL_UNIFORM_BUFFER 0x8A11
+#define GL_UNIFORM_BUFFER_BINDING 0x8A28
+#define GL_UNIFORM_BUFFER_START 0x8A29
+#define GL_UNIFORM_BUFFER_SIZE 0x8A2A
+#define GL_MAX_VERTEX_UNIFORM_BLOCKS 0x8A2B
+#define GL_MAX_GEOMETRY_UNIFORM_BLOCKS 0x8A2C
+#define GL_MAX_FRAGMENT_UNIFORM_BLOCKS 0x8A2D
+#define GL_MAX_COMBINED_UNIFORM_BLOCKS 0x8A2E
+#define GL_MAX_UNIFORM_BUFFER_BINDINGS 0x8A2F
+#define GL_MAX_UNIFORM_BLOCK_SIZE 0x8A30
+#define GL_MAX_COMBINED_VERTEX_UNIFORM_COMPONENTS 0x8A31
+#define GL_MAX_COMBINED_GEOMETRY_UNIFORM_COMPONENTS 0x8A32
+#define GL_MAX_COMBINED_FRAGMENT_UNIFORM_COMPONENTS 0x8A33
+#define GL_UNIFORM_BUFFER_OFFSET_ALIGNMENT 0x8A34
+#define GL_ACTIVE_UNIFORM_BLOCK_MAX_NAME_LENGTH 0x8A35
+#define GL_ACTIVE_UNIFORM_BLOCKS 0x8A36
+#define GL_UNIFORM_TYPE 0x8A37
+#define GL_UNIFORM_SIZE 0x8A38
+#define GL_UNIFORM_NAME_LENGTH 0x8A39
+#define GL_UNIFORM_BLOCK_INDEX 0x8A3A
+#define GL_UNIFORM_OFFSET 0x8A3B
+#define GL_UNIFORM_ARRAY_STRIDE 0x8A3C
+#define GL_UNIFORM_MATRIX_STRIDE 0x8A3D
+#define GL_UNIFORM_IS_ROW_MAJOR 0x8A3E
+#define GL_UNIFORM_BLOCK_BINDING 0x8A3F
+#define GL_UNIFORM_BLOCK_DATA_SIZE 0x8A40
+#define GL_UNIFORM_BLOCK_NAME_LENGTH 0x8A41
+#define GL_UNIFORM_BLOCK_ACTIVE_UNIFORMS 0x8A42
+#define GL_UNIFORM_BLOCK_ACTIVE_UNIFORM_INDICES 0x8A43
+#define GL_UNIFORM_BLOCK_REFERENCED_BY_VERTEX_SHADER 0x8A44
+#define GL_UNIFORM_BLOCK_REFERENCED_BY_GEOMETRY_SHADER 0x8A45
+#define GL_UNIFORM_BLOCK_REFERENCED_BY_FRAGMENT_SHADER 0x8A46
+#define GL_INVALID_INDEX 0xFFFFFFFFu
+typedef void (APIENTRYP PFNGLDRAWARRAYSINSTANCEDPROC) (GLenum mode, GLint first, GLsizei count, GLsizei instancecount);
+typedef void (APIENTRYP PFNGLDRAWELEMENTSINSTANCEDPROC) (GLenum mode, GLsizei count, GLenum type, const void *indices, GLsizei instancecount);
+typedef void (APIENTRYP PFNGLTEXBUFFERPROC) (GLenum target, GLenum internalformat, GLuint buffer);
+typedef void (APIENTRYP PFNGLPRIMITIVERESTARTINDEXPROC) (GLuint index);
+typedef void (APIENTRYP PFNGLCOPYBUFFERSUBDATAPROC) (GLenum readTarget, GLenum writeTarget, GLintptr readOffset, GLintptr writeOffset, GLsizeiptr size);
+typedef void (APIENTRYP PFNGLGETUNIFORMINDICESPROC) (GLuint program, GLsizei uniformCount, const GLchar *const*uniformNames, GLuint *uniformIndices);
+typedef void (APIENTRYP PFNGLGETACTIVEUNIFORMSIVPROC) (GLuint program, GLsizei uniformCount, const GLuint *uniformIndices, GLenum pname, GLint *params);
+typedef void (APIENTRYP PFNGLGETACTIVEUNIFORMNAMEPROC) (GLuint program, GLuint uniformIndex, GLsizei bufSize, GLsizei *length, GLchar *uniformName);
+typedef GLuint (APIENTRYP PFNGLGETUNIFORMBLOCKINDEXPROC) (GLuint program, const GLchar *uniformBlockName);
+typedef void (APIENTRYP PFNGLGETACTIVEUNIFORMBLOCKIVPROC) (GLuint program, GLuint uniformBlockIndex, GLenum pname, GLint *params);
+typedef void (APIENTRYP PFNGLGETACTIVEUNIFORMBLOCKNAMEPROC) (GLuint program, GLuint uniformBlockIndex, GLsizei bufSize, GLsizei *length, GLchar *uniformBlockName);
+typedef void (APIENTRYP PFNGLUNIFORMBLOCKBINDINGPROC) (GLuint program, GLuint uniformBlockIndex, GLuint uniformBlockBinding);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glDrawArraysInstanced (GLenum mode, GLint first, GLsizei count, GLsizei instancecount);
+GLAPI void APIENTRY glDrawElementsInstanced (GLenum mode, GLsizei count, GLenum type, const void *indices, GLsizei instancecount);
+GLAPI void APIENTRY glTexBuffer (GLenum target, GLenum internalformat, GLuint buffer);
+GLAPI void APIENTRY glPrimitiveRestartIndex (GLuint index);
+GLAPI void APIENTRY glCopyBufferSubData (GLenum readTarget, GLenum writeTarget, GLintptr readOffset, GLintptr writeOffset, GLsizeiptr size);
+GLAPI void APIENTRY glGetUniformIndices (GLuint program, GLsizei uniformCount, const GLchar *const*uniformNames, GLuint *uniformIndices);
+GLAPI void APIENTRY glGetActiveUniformsiv (GLuint program, GLsizei uniformCount, const GLuint *uniformIndices, GLenum pname, GLint *params);
+GLAPI void APIENTRY glGetActiveUniformName (GLuint program, GLuint uniformIndex, GLsizei bufSize, GLsizei *length, GLchar *uniformName);
+GLAPI GLuint APIENTRY glGetUniformBlockIndex (GLuint program, const GLchar *uniformBlockName);
+GLAPI void APIENTRY glGetActiveUniformBlockiv (GLuint program, GLuint uniformBlockIndex, GLenum pname, GLint *params);
+GLAPI void APIENTRY glGetActiveUniformBlockName (GLuint program, GLuint uniformBlockIndex, GLsizei bufSize, GLsizei *length, GLchar *uniformBlockName);
+GLAPI void APIENTRY glUniformBlockBinding (GLuint program, GLuint uniformBlockIndex, GLuint uniformBlockBinding);
+#endif
+#endif /* GL_VERSION_3_1 */
+
+#ifndef GL_VERSION_3_2
+#define GL_VERSION_3_2 1
+typedef struct __GLsync *GLsync;
+typedef khronos_uint64_t GLuint64;
+typedef khronos_int64_t GLint64;
+#define GL_CONTEXT_CORE_PROFILE_BIT 0x00000001
+#define GL_CONTEXT_COMPATIBILITY_PROFILE_BIT 0x00000002
+#define GL_LINES_ADJACENCY 0x000A
+#define GL_LINE_STRIP_ADJACENCY 0x000B
+#define GL_TRIANGLES_ADJACENCY 0x000C
+#define GL_TRIANGLE_STRIP_ADJACENCY 0x000D
+#define GL_PROGRAM_POINT_SIZE 0x8642
+#define GL_MAX_GEOMETRY_TEXTURE_IMAGE_UNITS 0x8C29
+#define GL_FRAMEBUFFER_ATTACHMENT_LAYERED 0x8DA7
+#define GL_FRAMEBUFFER_INCOMPLETE_LAYER_TARGETS 0x8DA8
+#define GL_GEOMETRY_SHADER 0x8DD9
+#define GL_GEOMETRY_VERTICES_OUT 0x8916
+#define GL_GEOMETRY_INPUT_TYPE 0x8917
+#define GL_GEOMETRY_OUTPUT_TYPE 0x8918
+#define GL_MAX_GEOMETRY_UNIFORM_COMPONENTS 0x8DDF
+#define GL_MAX_GEOMETRY_OUTPUT_VERTICES 0x8DE0
+#define GL_MAX_GEOMETRY_TOTAL_OUTPUT_COMPONENTS 0x8DE1
+#define GL_MAX_VERTEX_OUTPUT_COMPONENTS 0x9122
+#define GL_MAX_GEOMETRY_INPUT_COMPONENTS 0x9123
+#define GL_MAX_GEOMETRY_OUTPUT_COMPONENTS 0x9124
+#define GL_MAX_FRAGMENT_INPUT_COMPONENTS 0x9125
+#define GL_CONTEXT_PROFILE_MASK 0x9126
+#define GL_DEPTH_CLAMP 0x864F
+#define GL_QUADS_FOLLOW_PROVOKING_VERTEX_CONVENTION 0x8E4C
+#define GL_FIRST_VERTEX_CONVENTION 0x8E4D
+#define GL_LAST_VERTEX_CONVENTION 0x8E4E
+#define GL_PROVOKING_VERTEX 0x8E4F
+#define GL_TEXTURE_CUBE_MAP_SEAMLESS 0x884F
+#define GL_MAX_SERVER_WAIT_TIMEOUT 0x9111
+#define GL_OBJECT_TYPE 0x9112
+#define GL_SYNC_CONDITION 0x9113
+#define GL_SYNC_STATUS 0x9114
+#define GL_SYNC_FLAGS 0x9115
+#define GL_SYNC_FENCE 0x9116
+#define GL_SYNC_GPU_COMMANDS_COMPLETE 0x9117
+#define GL_UNSIGNALED 0x9118
+#define GL_SIGNALED 0x9119
+#define GL_ALREADY_SIGNALED 0x911A
+#define GL_TIMEOUT_EXPIRED 0x911B
+#define GL_CONDITION_SATISFIED 0x911C
+#define GL_WAIT_FAILED 0x911D
+#define GL_TIMEOUT_IGNORED 0xFFFFFFFFFFFFFFFFull
+#define GL_SYNC_FLUSH_COMMANDS_BIT 0x00000001
+#define GL_SAMPLE_POSITION 0x8E50
+#define GL_SAMPLE_MASK 0x8E51
+#define GL_SAMPLE_MASK_VALUE 0x8E52
+#define GL_MAX_SAMPLE_MASK_WORDS 0x8E59
+#define GL_TEXTURE_2D_MULTISAMPLE 0x9100
+#define GL_PROXY_TEXTURE_2D_MULTISAMPLE 0x9101
+#define GL_TEXTURE_2D_MULTISAMPLE_ARRAY 0x9102
+#define GL_PROXY_TEXTURE_2D_MULTISAMPLE_ARRAY 0x9103
+#define GL_TEXTURE_BINDING_2D_MULTISAMPLE 0x9104
+#define GL_TEXTURE_BINDING_2D_MULTISAMPLE_ARRAY 0x9105
+#define GL_TEXTURE_SAMPLES 0x9106
+#define GL_TEXTURE_FIXED_SAMPLE_LOCATIONS 0x9107
+#define GL_SAMPLER_2D_MULTISAMPLE 0x9108
+#define GL_INT_SAMPLER_2D_MULTISAMPLE 0x9109
+#define GL_UNSIGNED_INT_SAMPLER_2D_MULTISAMPLE 0x910A
+#define GL_SAMPLER_2D_MULTISAMPLE_ARRAY 0x910B
+#define GL_INT_SAMPLER_2D_MULTISAMPLE_ARRAY 0x910C
+#define GL_UNSIGNED_INT_SAMPLER_2D_MULTISAMPLE_ARRAY 0x910D
+#define GL_MAX_COLOR_TEXTURE_SAMPLES 0x910E
+#define GL_MAX_DEPTH_TEXTURE_SAMPLES 0x910F
+#define GL_MAX_INTEGER_SAMPLES 0x9110
+typedef void (APIENTRYP PFNGLDRAWELEMENTSBASEVERTEXPROC) (GLenum mode, GLsizei count, GLenum type, const void *indices, GLint basevertex);
+typedef void (APIENTRYP PFNGLDRAWRANGEELEMENTSBASEVERTEXPROC) (GLenum mode, GLuint start, GLuint end, GLsizei count, GLenum type, const void *indices, GLint basevertex);
+typedef void (APIENTRYP PFNGLDRAWELEMENTSINSTANCEDBASEVERTEXPROC) (GLenum mode, GLsizei count, GLenum type, const void *indices, GLsizei instancecount, GLint basevertex);
+typedef void (APIENTRYP PFNGLMULTIDRAWELEMENTSBASEVERTEXPROC) (GLenum mode, const GLsizei *count, GLenum type, const void *const*indices, GLsizei drawcount, const GLint *basevertex);
+typedef void (APIENTRYP PFNGLPROVOKINGVERTEXPROC) (GLenum mode);
+typedef GLsync (APIENTRYP PFNGLFENCESYNCPROC) (GLenum condition, GLbitfield flags);
+typedef GLboolean (APIENTRYP PFNGLISSYNCPROC) (GLsync sync);
+typedef void (APIENTRYP PFNGLDELETESYNCPROC) (GLsync sync);
+typedef GLenum (APIENTRYP PFNGLCLIENTWAITSYNCPROC) (GLsync sync, GLbitfield flags, GLuint64 timeout);
+typedef void (APIENTRYP PFNGLWAITSYNCPROC) (GLsync sync, GLbitfield flags, GLuint64 timeout);
+typedef void (APIENTRYP PFNGLGETINTEGER64VPROC) (GLenum pname, GLint64 *data);
+typedef void (APIENTRYP PFNGLGETSYNCIVPROC) (GLsync sync, GLenum pname, GLsizei bufSize, GLsizei *length, GLint *values);
+typedef void (APIENTRYP PFNGLGETINTEGER64I_VPROC) (GLenum target, GLuint index, GLint64 *data);
+typedef void (APIENTRYP PFNGLGETBUFFERPARAMETERI64VPROC) (GLenum target, GLenum pname, GLint64 *params);
+typedef void (APIENTRYP PFNGLFRAMEBUFFERTEXTUREPROC) (GLenum target, GLenum attachment, GLuint texture, GLint level);
+typedef void (APIENTRYP PFNGLTEXIMAGE2DMULTISAMPLEPROC) (GLenum target, GLsizei samples, GLenum internalformat, GLsizei width, GLsizei height, GLboolean fixedsamplelocations);
+typedef void (APIENTRYP PFNGLTEXIMAGE3DMULTISAMPLEPROC) (GLenum target, GLsizei samples, GLenum internalformat, GLsizei width, GLsizei height, GLsizei depth, GLboolean fixedsamplelocations);
+typedef void (APIENTRYP PFNGLGETMULTISAMPLEFVPROC) (GLenum pname, GLuint index, GLfloat *val);
+typedef void (APIENTRYP PFNGLSAMPLEMASKIPROC) (GLuint maskNumber, GLbitfield mask);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glDrawElementsBaseVertex (GLenum mode, GLsizei count, GLenum type, const void *indices, GLint basevertex);
+GLAPI void APIENTRY glDrawRangeElementsBaseVertex (GLenum mode, GLuint start, GLuint end, GLsizei count, GLenum type, const void *indices, GLint basevertex);
+GLAPI void APIENTRY glDrawElementsInstancedBaseVertex (GLenum mode, GLsizei count, GLenum type, const void *indices, GLsizei instancecount, GLint basevertex);
+GLAPI void APIENTRY glMultiDrawElementsBaseVertex (GLenum mode, const GLsizei *count, GLenum type, const void *const*indices, GLsizei drawcount, const GLint *basevertex);
+GLAPI void APIENTRY glProvokingVertex (GLenum mode);
+GLAPI GLsync APIENTRY glFenceSync (GLenum condition, GLbitfield flags);
+GLAPI GLboolean APIENTRY glIsSync (GLsync sync);
+GLAPI void APIENTRY glDeleteSync (GLsync sync);
+GLAPI GLenum APIENTRY glClientWaitSync (GLsync sync, GLbitfield flags, GLuint64 timeout);
+GLAPI void APIENTRY glWaitSync (GLsync sync, GLbitfield flags, GLuint64 timeout);
+GLAPI void APIENTRY glGetInteger64v (GLenum pname, GLint64 *data);
+GLAPI void APIENTRY glGetSynciv (GLsync sync, GLenum pname, GLsizei bufSize, GLsizei *length, GLint *values);
+GLAPI void APIENTRY glGetInteger64i_v (GLenum target, GLuint index, GLint64 *data);
+GLAPI void APIENTRY glGetBufferParameteri64v (GLenum target, GLenum pname, GLint64 *params);
+GLAPI void APIENTRY glFramebufferTexture (GLenum target, GLenum attachment, GLuint texture, GLint level);
+GLAPI void APIENTRY glTexImage2DMultisample (GLenum target, GLsizei samples, GLenum internalformat, GLsizei width, GLsizei height, GLboolean fixedsamplelocations);
+GLAPI void APIENTRY glTexImage3DMultisample (GLenum target, GLsizei samples, GLenum internalformat, GLsizei width, GLsizei height, GLsizei depth, GLboolean fixedsamplelocations);
+GLAPI void APIENTRY glGetMultisamplefv (GLenum pname, GLuint index, GLfloat *val);
+GLAPI void APIENTRY glSampleMaski (GLuint maskNumber, GLbitfield mask);
+#endif
+#endif /* GL_VERSION_3_2 */
+
+#ifndef GL_VERSION_3_3
+#define GL_VERSION_3_3 1
+#define GL_VERTEX_ATTRIB_ARRAY_DIVISOR 0x88FE
+#define GL_SRC1_COLOR 0x88F9
+#define GL_ONE_MINUS_SRC1_COLOR 0x88FA
+#define GL_ONE_MINUS_SRC1_ALPHA 0x88FB
+#define GL_MAX_DUAL_SOURCE_DRAW_BUFFERS 0x88FC
+#define GL_ANY_SAMPLES_PASSED 0x8C2F
+#define GL_SAMPLER_BINDING 0x8919
+#define GL_RGB10_A2UI 0x906F
+#define GL_TEXTURE_SWIZZLE_R 0x8E42
+#define GL_TEXTURE_SWIZZLE_G 0x8E43
+#define GL_TEXTURE_SWIZZLE_B 0x8E44
+#define GL_TEXTURE_SWIZZLE_A 0x8E45
+#define GL_TEXTURE_SWIZZLE_RGBA 0x8E46
+#define GL_TIME_ELAPSED 0x88BF
+#define GL_TIMESTAMP 0x8E28
+#define GL_INT_2_10_10_10_REV 0x8D9F
+typedef void (APIENTRYP PFNGLBINDFRAGDATALOCATIONINDEXEDPROC) (GLuint program, GLuint colorNumber, GLuint index, const GLchar *name);
+typedef GLint (APIENTRYP PFNGLGETFRAGDATAINDEXPROC) (GLuint program, const GLchar *name);
+typedef void (APIENTRYP PFNGLGENSAMPLERSPROC) (GLsizei count, GLuint *samplers);
+typedef void (APIENTRYP PFNGLDELETESAMPLERSPROC) (GLsizei count, const GLuint *samplers);
+typedef GLboolean (APIENTRYP PFNGLISSAMPLERPROC) (GLuint sampler);
+typedef void (APIENTRYP PFNGLBINDSAMPLERPROC) (GLuint unit, GLuint sampler);
+typedef void (APIENTRYP PFNGLSAMPLERPARAMETERIPROC) (GLuint sampler, GLenum pname, GLint param);
+typedef void (APIENTRYP PFNGLSAMPLERPARAMETERIVPROC) (GLuint sampler, GLenum pname, const GLint *param);
+typedef void (APIENTRYP PFNGLSAMPLERPARAMETERFPROC) (GLuint sampler, GLenum pname, GLfloat param);
+typedef void (APIENTRYP PFNGLSAMPLERPARAMETERFVPROC) (GLuint sampler, GLenum pname, const GLfloat *param);
+typedef void (APIENTRYP PFNGLSAMPLERPARAMETERIIVPROC) (GLuint sampler, GLenum pname, const GLint *param);
+typedef void (APIENTRYP PFNGLSAMPLERPARAMETERIUIVPROC) (GLuint sampler, GLenum pname, const GLuint *param);
+typedef void (APIENTRYP PFNGLGETSAMPLERPARAMETERIVPROC) (GLuint sampler, GLenum pname, GLint *params);
+typedef void (APIENTRYP PFNGLGETSAMPLERPARAMETERIIVPROC) (GLuint sampler, GLenum pname, GLint *params);
+typedef void (APIENTRYP PFNGLGETSAMPLERPARAMETERFVPROC) (GLuint sampler, GLenum pname, GLfloat *params);
+typedef void (APIENTRYP PFNGLGETSAMPLERPARAMETERIUIVPROC) (GLuint sampler, GLenum pname, GLuint *params);
+typedef void (APIENTRYP PFNGLQUERYCOUNTERPROC) (GLuint id, GLenum target);
+typedef void (APIENTRYP PFNGLGETQUERYOBJECTI64VPROC) (GLuint id, GLenum pname, GLint64 *params);
+typedef void (APIENTRYP PFNGLGETQUERYOBJECTUI64VPROC) (GLuint id, GLenum pname, GLuint64 *params);
+typedef void (APIENTRYP PFNGLVERTEXATTRIBDIVISORPROC) (GLuint index, GLuint divisor);
+typedef void (APIENTRYP PFNGLVERTEXATTRIBP1UIPROC) (GLuint index, GLenum type, GLboolean normalized, GLuint value);
+typedef void (APIENTRYP PFNGLVERTEXATTRIBP1UIVPROC) (GLuint index, GLenum type, GLboolean normalized, const GLuint *value);
+typedef void (APIENTRYP PFNGLVERTEXATTRIBP2UIPROC) (GLuint index, GLenum type, GLboolean normalized, GLuint value);
+typedef void (APIENTRYP PFNGLVERTEXATTRIBP2UIVPROC) (GLuint index, GLenum type, GLboolean normalized, const GLuint *value);
+typedef void (APIENTRYP PFNGLVERTEXATTRIBP3UIPROC) (GLuint index, GLenum type, GLboolean normalized, GLuint value);
+typedef void (APIENTRYP PFNGLVERTEXATTRIBP3UIVPROC) (GLuint index, GLenum type, GLboolean normalized, const GLuint *value);
+typedef void (APIENTRYP PFNGLVERTEXATTRIBP4UIPROC) (GLuint index, GLenum type, GLboolean normalized, GLuint value);
+typedef void (APIENTRYP PFNGLVERTEXATTRIBP4UIVPROC) (GLuint index, GLenum type, GLboolean normalized, const GLuint *value);
+typedef void (APIENTRYP PFNGLVERTEXP2UIPROC) (GLenum type, GLuint value);
+typedef void (APIENTRYP PFNGLVERTEXP2UIVPROC) (GLenum type, const GLuint *value);
+typedef void (APIENTRYP PFNGLVERTEXP3UIPROC) (GLenum type, GLuint value);
+typedef void (APIENTRYP PFNGLVERTEXP3UIVPROC) (GLenum type, const GLuint *value);
+typedef void (APIENTRYP PFNGLVERTEXP4UIPROC) (GLenum type, GLuint value);
+typedef void (APIENTRYP PFNGLVERTEXP4UIVPROC) (GLenum type, const GLuint *value);
+typedef void (APIENTRYP PFNGLTEXCOORDP1UIPROC) (GLenum type, GLuint coords);
+typedef void (APIENTRYP PFNGLTEXCOORDP1UIVPROC) (GLenum type, const GLuint *coords);
+typedef void (APIENTRYP PFNGLTEXCOORDP2UIPROC) (GLenum type, GLuint coords);
+typedef void (APIENTRYP PFNGLTEXCOORDP2UIVPROC) (GLenum type, const GLuint *coords);
+typedef void (APIENTRYP PFNGLTEXCOORDP3UIPROC) (GLenum type, GLuint coords);
+typedef void (APIENTRYP PFNGLTEXCOORDP3UIVPROC) (GLenum type, const GLuint *coords);
+typedef void (APIENTRYP PFNGLTEXCOORDP4UIPROC) (GLenum type, GLuint coords);
+typedef void (APIENTRYP PFNGLTEXCOORDP4UIVPROC) (GLenum type, const GLuint *coords);
+typedef void (APIENTRYP PFNGLMULTITEXCOORDP1UIPROC) (GLenum texture, GLenum type, GLuint coords);
+typedef void (APIENTRYP PFNGLMULTITEXCOORDP1UIVPROC) (GLenum texture, GLenum type, const GLuint *coords);
+typedef void (APIENTRYP PFNGLMULTITEXCOORDP2UIPROC) (GLenum texture, GLenum type, GLuint coords);
+typedef void (APIENTRYP PFNGLMULTITEXCOORDP2UIVPROC) (GLenum texture, GLenum type, const GLuint *coords);
+typedef void (APIENTRYP PFNGLMULTITEXCOORDP3UIPROC) (GLenum texture, GLenum type, GLuint coords);
+typedef void (APIENTRYP PFNGLMULTITEXCOORDP3UIVPROC) (GLenum texture, GLenum type, const GLuint *coords);
+typedef void (APIENTRYP PFNGLMULTITEXCOORDP4UIPROC) (GLenum texture, GLenum type, GLuint coords);
+typedef void (APIENTRYP PFNGLMULTITEXCOORDP4UIVPROC) (GLenum texture, GLenum type, const GLuint *coords);
+typedef void (APIENTRYP PFNGLNORMALP3UIPROC) (GLenum type, GLuint coords);
+typedef void (APIENTRYP PFNGLNORMALP3UIVPROC) (GLenum type, const GLuint *coords);
+typedef void (APIENTRYP PFNGLCOLORP3UIPROC) (GLenum type, GLuint color);
+typedef void (APIENTRYP PFNGLCOLORP3UIVPROC) (GLenum type, const GLuint *color);
+typedef void (APIENTRYP PFNGLCOLORP4UIPROC) (GLenum type, GLuint color);
+typedef void (APIENTRYP PFNGLCOLORP4UIVPROC) (GLenum type, const GLuint *color);
+typedef void (APIENTRYP PFNGLSECONDARYCOLORP3UIPROC) (GLenum type, GLuint color);
+typedef void (APIENTRYP PFNGLSECONDARYCOLORP3UIVPROC) (GLenum type, const GLuint *color);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glBindFragDataLocationIndexed (GLuint program, GLuint colorNumber, GLuint index, const GLchar *name);
+GLAPI GLint APIENTRY glGetFragDataIndex (GLuint program, const GLchar *name);
+GLAPI void APIENTRY glGenSamplers (GLsizei count, GLuint *samplers);
+GLAPI void APIENTRY glDeleteSamplers (GLsizei count, const GLuint *samplers);
+GLAPI GLboolean APIENTRY glIsSampler (GLuint sampler);
+GLAPI void APIENTRY glBindSampler (GLuint unit, GLuint sampler);
+GLAPI void APIENTRY glSamplerParameteri (GLuint sampler, GLenum pname, GLint param);
+GLAPI void APIENTRY glSamplerParameteriv (GLuint sampler, GLenum pname, const GLint *param);
+GLAPI void APIENTRY glSamplerParameterf (GLuint sampler, GLenum pname, GLfloat param);
+GLAPI void APIENTRY glSamplerParameterfv (GLuint sampler, GLenum pname, const GLfloat *param);
+GLAPI void APIENTRY glSamplerParameterIiv (GLuint sampler, GLenum pname, const GLint *param);
+GLAPI void APIENTRY glSamplerParameterIuiv (GLuint sampler, GLenum pname, const GLuint *param);
+GLAPI void APIENTRY glGetSamplerParameteriv (GLuint sampler, GLenum pname, GLint *params);
+GLAPI void APIENTRY glGetSamplerParameterIiv (GLuint sampler, GLenum pname, GLint *params);
+GLAPI void APIENTRY glGetSamplerParameterfv (GLuint sampler, GLenum pname, GLfloat *params);
+GLAPI void APIENTRY glGetSamplerParameterIuiv (GLuint sampler, GLenum pname, GLuint *params);
+GLAPI void APIENTRY glQueryCounter (GLuint id, GLenum target);
+GLAPI void APIENTRY glGetQueryObjecti64v (GLuint id, GLenum pname, GLint64 *params);
+GLAPI void APIENTRY glGetQueryObjectui64v (GLuint id, GLenum pname, GLuint64 *params);
+GLAPI void APIENTRY glVertexAttribDivisor (GLuint index, GLuint divisor);
+GLAPI void APIENTRY glVertexAttribP1ui (GLuint index, GLenum type, GLboolean normalized, GLuint value);
+GLAPI void APIENTRY glVertexAttribP1uiv (GLuint index, GLenum type, GLboolean normalized, const GLuint *value);
+GLAPI void APIENTRY glVertexAttribP2ui (GLuint index, GLenum type, GLboolean normalized, GLuint value);
+GLAPI void APIENTRY glVertexAttribP2uiv (GLuint index, GLenum type, GLboolean normalized, const GLuint *value);
+GLAPI void APIENTRY glVertexAttribP3ui (GLuint index, GLenum type, GLboolean normalized, GLuint value);
+GLAPI void APIENTRY glVertexAttribP3uiv (GLuint index, GLenum type, GLboolean normalized, const GLuint *value);
+GLAPI void APIENTRY glVertexAttribP4ui (GLuint index, GLenum type, GLboolean normalized, GLuint value);
+GLAPI void APIENTRY glVertexAttribP4uiv (GLuint index, GLenum type, GLboolean normalized, const GLuint *value);
+GLAPI void APIENTRY glVertexP2ui (GLenum type, GLuint value);
+GLAPI void APIENTRY glVertexP2uiv (GLenum type, const GLuint *value);
+GLAPI void APIENTRY glVertexP3ui (GLenum type, GLuint value);
+GLAPI void APIENTRY glVertexP3uiv (GLenum type, const GLuint *value);
+GLAPI void APIENTRY glVertexP4ui (GLenum type, GLuint value);
+GLAPI void APIENTRY glVertexP4uiv (GLenum type, const GLuint *value);
+GLAPI void APIENTRY glTexCoordP1ui (GLenum type, GLuint coords);
+GLAPI void APIENTRY glTexCoordP1uiv (GLenum type, const GLuint *coords);
+GLAPI void APIENTRY glTexCoordP2ui (GLenum type, GLuint coords);
+GLAPI void APIENTRY glTexCoordP2uiv (GLenum type, const GLuint *coords);
+GLAPI void APIENTRY glTexCoordP3ui (GLenum type, GLuint coords);
+GLAPI void APIENTRY glTexCoordP3uiv (GLenum type, const GLuint *coords);
+GLAPI void APIENTRY glTexCoordP4ui (GLenum type, GLuint coords);
+GLAPI void APIENTRY glTexCoordP4uiv (GLenum type, const GLuint *coords);
+GLAPI void APIENTRY glMultiTexCoordP1ui (GLenum texture, GLenum type, GLuint coords);
+GLAPI void APIENTRY glMultiTexCoordP1uiv (GLenum texture, GLenum type, const GLuint *coords);
+GLAPI void APIENTRY glMultiTexCoordP2ui (GLenum texture, GLenum type, GLuint coords);
+GLAPI void APIENTRY glMultiTexCoordP2uiv (GLenum texture, GLenum type, const GLuint *coords);
+GLAPI void APIENTRY glMultiTexCoordP3ui (GLenum texture, GLenum type, GLuint coords);
+GLAPI void APIENTRY glMultiTexCoordP3uiv (GLenum texture, GLenum type, const GLuint *coords);
+GLAPI void APIENTRY glMultiTexCoordP4ui (GLenum texture, GLenum type, GLuint coords);
+GLAPI void APIENTRY glMultiTexCoordP4uiv (GLenum texture, GLenum type, const GLuint *coords);
+GLAPI void APIENTRY glNormalP3ui (GLenum type, GLuint coords);
+GLAPI void APIENTRY glNormalP3uiv (GLenum type, const GLuint *coords);
+GLAPI void APIENTRY glColorP3ui (GLenum type, GLuint color);
+GLAPI void APIENTRY glColorP3uiv (GLenum type, const GLuint *color);
+GLAPI void APIENTRY glColorP4ui (GLenum type, GLuint color);
+GLAPI void APIENTRY glColorP4uiv (GLenum type, const GLuint *color);
+GLAPI void APIENTRY glSecondaryColorP3ui (GLenum type, GLuint color);
+GLAPI void APIENTRY glSecondaryColorP3uiv (GLenum type, const GLuint *color);
+#endif
+#endif /* GL_VERSION_3_3 */
+
+#ifndef GL_VERSION_4_0
+#define GL_VERSION_4_0 1
+#define GL_SAMPLE_SHADING 0x8C36
+#define GL_MIN_SAMPLE_SHADING_VALUE 0x8C37
+#define GL_MIN_PROGRAM_TEXTURE_GATHER_OFFSET 0x8E5E
+#define GL_MAX_PROGRAM_TEXTURE_GATHER_OFFSET 0x8E5F
+#define GL_TEXTURE_CUBE_MAP_ARRAY 0x9009
+#define GL_TEXTURE_BINDING_CUBE_MAP_ARRAY 0x900A
+#define GL_PROXY_TEXTURE_CUBE_MAP_ARRAY 0x900B
+#define GL_SAMPLER_CUBE_MAP_ARRAY 0x900C
+#define GL_SAMPLER_CUBE_MAP_ARRAY_SHADOW 0x900D
+#define GL_INT_SAMPLER_CUBE_MAP_ARRAY 0x900E
+#define GL_UNSIGNED_INT_SAMPLER_CUBE_MAP_ARRAY 0x900F
+#define GL_DRAW_INDIRECT_BUFFER 0x8F3F
+#define GL_DRAW_INDIRECT_BUFFER_BINDING 0x8F43
+#define GL_GEOMETRY_SHADER_INVOCATIONS 0x887F
+#define GL_MAX_GEOMETRY_SHADER_INVOCATIONS 0x8E5A
+#define GL_MIN_FRAGMENT_INTERPOLATION_OFFSET 0x8E5B
+#define GL_MAX_FRAGMENT_INTERPOLATION_OFFSET 0x8E5C
+#define GL_FRAGMENT_INTERPOLATION_OFFSET_BITS 0x8E5D
+#define GL_MAX_VERTEX_STREAMS 0x8E71
+#define GL_DOUBLE_VEC2 0x8FFC
+#define GL_DOUBLE_VEC3 0x8FFD
+#define GL_DOUBLE_VEC4 0x8FFE
+#define GL_DOUBLE_MAT2 0x8F46
+#define GL_DOUBLE_MAT3 0x8F47
+#define GL_DOUBLE_MAT4 0x8F48
+#define GL_DOUBLE_MAT2x3 0x8F49
+#define GL_DOUBLE_MAT2x4 0x8F4A
+#define GL_DOUBLE_MAT3x2 0x8F4B
+#define GL_DOUBLE_MAT3x4 0x8F4C
+#define GL_DOUBLE_MAT4x2 0x8F4D
+#define GL_DOUBLE_MAT4x3 0x8F4E
+#define GL_ACTIVE_SUBROUTINES 0x8DE5
+#define GL_ACTIVE_SUBROUTINE_UNIFORMS 0x8DE6
+#define GL_ACTIVE_SUBROUTINE_UNIFORM_LOCATIONS 0x8E47
+#define GL_ACTIVE_SUBROUTINE_MAX_LENGTH 0x8E48
+#define GL_ACTIVE_SUBROUTINE_UNIFORM_MAX_LENGTH 0x8E49
+#define GL_MAX_SUBROUTINES 0x8DE7
+#define GL_MAX_SUBROUTINE_UNIFORM_LOCATIONS 0x8DE8
+#define GL_NUM_COMPATIBLE_SUBROUTINES 0x8E4A
+#define GL_COMPATIBLE_SUBROUTINES 0x8E4B
+#define GL_PATCHES 0x000E
+#define GL_PATCH_VERTICES 0x8E72
+#define GL_PATCH_DEFAULT_INNER_LEVEL 0x8E73
+#define GL_PATCH_DEFAULT_OUTER_LEVEL 0x8E74
+#define GL_TESS_CONTROL_OUTPUT_VERTICES 0x8E75
+#define GL_TESS_GEN_MODE 0x8E76
+#define GL_TESS_GEN_SPACING 0x8E77
+#define GL_TESS_GEN_VERTEX_ORDER 0x8E78
+#define GL_TESS_GEN_POINT_MODE 0x8E79
+#define GL_ISOLINES 0x8E7A
+#define GL_FRACTIONAL_ODD 0x8E7B
+#define GL_FRACTIONAL_EVEN 0x8E7C
+#define GL_MAX_PATCH_VERTICES 0x8E7D
+#define GL_MAX_TESS_GEN_LEVEL 0x8E7E
+#define GL_MAX_TESS_CONTROL_UNIFORM_COMPONENTS 0x8E7F
+#define GL_MAX_TESS_EVALUATION_UNIFORM_COMPONENTS 0x8E80
+#define GL_MAX_TESS_CONTROL_TEXTURE_IMAGE_UNITS 0x8E81
+#define GL_MAX_TESS_EVALUATION_TEXTURE_IMAGE_UNITS 0x8E82
+#define GL_MAX_TESS_CONTROL_OUTPUT_COMPONENTS 0x8E83
+#define GL_MAX_TESS_PATCH_COMPONENTS 0x8E84
+#define GL_MAX_TESS_CONTROL_TOTAL_OUTPUT_COMPONENTS 0x8E85
+#define GL_MAX_TESS_EVALUATION_OUTPUT_COMPONENTS 0x8E86
+#define GL_MAX_TESS_CONTROL_UNIFORM_BLOCKS 0x8E89
+#define GL_MAX_TESS_EVALUATION_UNIFORM_BLOCKS 0x8E8A
+#define GL_MAX_TESS_CONTROL_INPUT_COMPONENTS 0x886C
+#define GL_MAX_TESS_EVALUATION_INPUT_COMPONENTS 0x886D
+#define GL_MAX_COMBINED_TESS_CONTROL_UNIFORM_COMPONENTS 0x8E1E
+#define GL_MAX_COMBINED_TESS_EVALUATION_UNIFORM_COMPONENTS 0x8E1F
+#define GL_UNIFORM_BLOCK_REFERENCED_BY_TESS_CONTROL_SHADER 0x84F0
+#define GL_UNIFORM_BLOCK_REFERENCED_BY_TESS_EVALUATION_SHADER 0x84F1
+#define GL_TESS_EVALUATION_SHADER 0x8E87
+#define GL_TESS_CONTROL_SHADER 0x8E88
+#define GL_TRANSFORM_FEEDBACK 0x8E22
+#define GL_TRANSFORM_FEEDBACK_BUFFER_PAUSED 0x8E23
+#define GL_TRANSFORM_FEEDBACK_BUFFER_ACTIVE 0x8E24
+#define GL_TRANSFORM_FEEDBACK_BINDING 0x8E25
+#define GL_MAX_TRANSFORM_FEEDBACK_BUFFERS 0x8E70
+typedef void (APIENTRYP PFNGLMINSAMPLESHADINGPROC) (GLfloat value);
+typedef void (APIENTRYP PFNGLBLENDEQUATIONIPROC) (GLuint buf, GLenum mode);
+typedef void (APIENTRYP PFNGLBLENDEQUATIONSEPARATEIPROC) (GLuint buf, GLenum modeRGB, GLenum modeAlpha);
+typedef void (APIENTRYP PFNGLBLENDFUNCIPROC) (GLuint buf, GLenum src, GLenum dst);
+typedef void (APIENTRYP PFNGLBLENDFUNCSEPARATEIPROC) (GLuint buf, GLenum srcRGB, GLenum dstRGB, GLenum srcAlpha, GLenum dstAlpha);
+typedef void (APIENTRYP PFNGLDRAWARRAYSINDIRECTPROC) (GLenum mode, const void *indirect);
+typedef void (APIENTRYP PFNGLDRAWELEMENTSINDIRECTPROC) (GLenum mode, GLenum type, const void *indirect);
+typedef void (APIENTRYP PFNGLUNIFORM1DPROC) (GLint location, GLdouble x);
+typedef void (APIENTRYP PFNGLUNIFORM2DPROC) (GLint location, GLdouble x, GLdouble y);
+typedef void (APIENTRYP PFNGLUNIFORM3DPROC) (GLint location, GLdouble x, GLdouble y, GLdouble z);
+typedef void (APIENTRYP PFNGLUNIFORM4DPROC) (GLint location, GLdouble x, GLdouble y, GLdouble z, GLdouble w);
+typedef void (APIENTRYP PFNGLUNIFORM1DVPROC) (GLint location, GLsizei count, const GLdouble *value);
+typedef void (APIENTRYP PFNGLUNIFORM2DVPROC) (GLint location, GLsizei count, const GLdouble *value);
+typedef void (APIENTRYP PFNGLUNIFORM3DVPROC) (GLint location, GLsizei count, const GLdouble *value);
+typedef void (APIENTRYP PFNGLUNIFORM4DVPROC) (GLint location, GLsizei count, const GLdouble *value);
+typedef void (APIENTRYP PFNGLUNIFORMMATRIX2DVPROC) (GLint location, GLsizei count, GLboolean transpose, const GLdouble *value);
+typedef void (APIENTRYP PFNGLUNIFORMMATRIX3DVPROC) (GLint location, GLsizei count, GLboolean transpose, const GLdouble *value);
+typedef void (APIENTRYP PFNGLUNIFORMMATRIX4DVPROC) (GLint location, GLsizei count, GLboolean transpose, const GLdouble *value);
+typedef void (APIENTRYP PFNGLUNIFORMMATRIX2X3DVPROC) (GLint location, GLsizei count, GLboolean transpose, const GLdouble *value);
+typedef void (APIENTRYP PFNGLUNIFORMMATRIX2X4DVPROC) (GLint location, GLsizei count, GLboolean transpose, const GLdouble *value);
+typedef void (APIENTRYP PFNGLUNIFORMMATRIX3X2DVPROC) (GLint location, GLsizei count, GLboolean transpose, const GLdouble *value);
+typedef void (APIENTRYP PFNGLUNIFORMMATRIX3X4DVPROC) (GLint location, GLsizei count, GLboolean transpose, const GLdouble *value);
+typedef void (APIENTRYP PFNGLUNIFORMMATRIX4X2DVPROC) (GLint location, GLsizei count, GLboolean transpose, const GLdouble *value);
+typedef void (APIENTRYP PFNGLUNIFORMMATRIX4X3DVPROC) (GLint location, GLsizei count, GLboolean transpose, const GLdouble *value);
+typedef void (APIENTRYP PFNGLGETUNIFORMDVPROC) (GLuint program, GLint location, GLdouble *params);
+typedef GLint (APIENTRYP PFNGLGETSUBROUTINEUNIFORMLOCATIONPROC) (GLuint program, GLenum shadertype, const GLchar *name);
+typedef GLuint (APIENTRYP PFNGLGETSUBROUTINEINDEXPROC) (GLuint program, GLenum shadertype, const GLchar *name);
+typedef void (APIENTRYP PFNGLGETACTIVESUBROUTINEUNIFORMIVPROC) (GLuint program, GLenum shadertype, GLuint index, GLenum pname, GLint *values);
+typedef void (APIENTRYP PFNGLGETACTIVESUBROUTINEUNIFORMNAMEPROC) (GLuint program, GLenum shadertype, GLuint index, GLsizei bufsize, GLsizei *length, GLchar *name);
+typedef void (APIENTRYP PFNGLGETACTIVESUBROUTINENAMEPROC) (GLuint program, GLenum shadertype, GLuint index, GLsizei bufsize, GLsizei *length, GLchar *name);
+typedef void (APIENTRYP PFNGLUNIFORMSUBROUTINESUIVPROC) (GLenum shadertype, GLsizei count, const GLuint *indices);
+typedef void (APIENTRYP PFNGLGETUNIFORMSUBROUTINEUIVPROC) (GLenum shadertype, GLint location, GLuint *params);
+typedef void (APIENTRYP PFNGLGETPROGRAMSTAGEIVPROC) (GLuint program, GLenum shadertype, GLenum pname, GLint *values);
+typedef void (APIENTRYP PFNGLPATCHPARAMETERIPROC) (GLenum pname, GLint value);
+typedef void (APIENTRYP PFNGLPATCHPARAMETERFVPROC) (GLenum pname, const GLfloat *values);
+typedef void (APIENTRYP PFNGLBINDTRANSFORMFEEDBACKPROC) (GLenum target, GLuint id);
+typedef void (APIENTRYP PFNGLDELETETRANSFORMFEEDBACKSPROC) (GLsizei n, const GLuint *ids);
+typedef void (APIENTRYP PFNGLGENTRANSFORMFEEDBACKSPROC) (GLsizei n, GLuint *ids);
+typedef GLboolean (APIENTRYP PFNGLISTRANSFORMFEEDBACKPROC) (GLuint id);
+typedef void (APIENTRYP PFNGLPAUSETRANSFORMFEEDBACKPROC) (void);
+typedef void (APIENTRYP PFNGLRESUMETRANSFORMFEEDBACKPROC) (void);
+typedef void (APIENTRYP PFNGLDRAWTRANSFORMFEEDBACKPROC) (GLenum mode, GLuint id);
+typedef void (APIENTRYP PFNGLDRAWTRANSFORMFEEDBACKSTREAMPROC) (GLenum mode, GLuint id, GLuint stream);
+typedef void (APIENTRYP PFNGLBEGINQUERYINDEXEDPROC) (GLenum target, GLuint index, GLuint id);
+typedef void (APIENTRYP PFNGLENDQUERYINDEXEDPROC) (GLenum target, GLuint index);
+typedef void (APIENTRYP PFNGLGETQUERYINDEXEDIVPROC) (GLenum target, GLuint index, GLenum pname, GLint *params);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glMinSampleShading (GLfloat value);
+GLAPI void APIENTRY glBlendEquationi (GLuint buf, GLenum mode);
+GLAPI void APIENTRY glBlendEquationSeparatei (GLuint buf, GLenum modeRGB, GLenum modeAlpha);
+GLAPI void APIENTRY glBlendFunci (GLuint buf, GLenum src, GLenum dst);
+GLAPI void APIENTRY glBlendFuncSeparatei (GLuint buf, GLenum srcRGB, GLenum dstRGB, GLenum srcAlpha, GLenum dstAlpha);
+GLAPI void APIENTRY glDrawArraysIndirect (GLenum mode, const void *indirect);
+GLAPI void APIENTRY glDrawElementsIndirect (GLenum mode, GLenum type, const void *indirect);
+GLAPI void APIENTRY glUniform1d (GLint location, GLdouble x);
+GLAPI void APIENTRY glUniform2d (GLint location, GLdouble x, GLdouble y);
+GLAPI void APIENTRY glUniform3d (GLint location, GLdouble x, GLdouble y, GLdouble z);
+GLAPI void APIENTRY glUniform4d (GLint location, GLdouble x, GLdouble y, GLdouble z, GLdouble w);
+GLAPI void APIENTRY glUniform1dv (GLint location, GLsizei count, const GLdouble *value);
+GLAPI void APIENTRY glUniform2dv (GLint location, GLsizei count, const GLdouble *value);
+GLAPI void APIENTRY glUniform3dv (GLint location, GLsizei count, const GLdouble *value);
+GLAPI void APIENTRY glUniform4dv (GLint location, GLsizei count, const GLdouble *value);
+GLAPI void APIENTRY glUniformMatrix2dv (GLint location, GLsizei count, GLboolean transpose, const GLdouble *value);
+GLAPI void APIENTRY glUniformMatrix3dv (GLint location, GLsizei count, GLboolean transpose, const GLdouble *value);
+GLAPI void APIENTRY glUniformMatrix4dv (GLint location, GLsizei count, GLboolean transpose, const GLdouble *value);
+GLAPI void APIENTRY glUniformMatrix2x3dv (GLint location, GLsizei count, GLboolean transpose, const GLdouble *value);
+GLAPI void APIENTRY glUniformMatrix2x4dv (GLint location, GLsizei count, GLboolean transpose, const GLdouble *value);
+GLAPI void APIENTRY glUniformMatrix3x2dv (GLint location, GLsizei count, GLboolean transpose, const GLdouble *value);
+GLAPI void APIENTRY glUniformMatrix3x4dv (GLint location, GLsizei count, GLboolean transpose, const GLdouble *value);
+GLAPI void APIENTRY glUniformMatrix4x2dv (GLint location, GLsizei count, GLboolean transpose, const GLdouble *value);
+GLAPI void APIENTRY glUniformMatrix4x3dv (GLint location, GLsizei count, GLboolean transpose, const GLdouble *value);
+GLAPI void APIENTRY glGetUniformdv (GLuint program, GLint location, GLdouble *params);
+GLAPI GLint APIENTRY glGetSubroutineUniformLocation (GLuint program, GLenum shadertype, const GLchar *name);
+GLAPI GLuint APIENTRY glGetSubroutineIndex (GLuint program, GLenum shadertype, const GLchar *name);
+GLAPI void APIENTRY glGetActiveSubroutineUniformiv (GLuint program, GLenum shadertype, GLuint index, GLenum pname, GLint *values);
+GLAPI void APIENTRY glGetActiveSubroutineUniformName (GLuint program, GLenum shadertype, GLuint index, GLsizei bufsize, GLsizei *length, GLchar *name);
+GLAPI void APIENTRY glGetActiveSubroutineName (GLuint program, GLenum shadertype, GLuint index, GLsizei bufsize, GLsizei *length, GLchar *name);
+GLAPI void APIENTRY glUniformSubroutinesuiv (GLenum shadertype, GLsizei count, const GLuint *indices);
+GLAPI void APIENTRY glGetUniformSubroutineuiv (GLenum shadertype, GLint location, GLuint *params);
+GLAPI void APIENTRY glGetProgramStageiv (GLuint program, GLenum shadertype, GLenum pname, GLint *values);
+GLAPI void APIENTRY glPatchParameteri (GLenum pname, GLint value);
+GLAPI void APIENTRY glPatchParameterfv (GLenum pname, const GLfloat *values);
+GLAPI void APIENTRY glBindTransformFeedback (GLenum target, GLuint id);
+GLAPI void APIENTRY glDeleteTransformFeedbacks (GLsizei n, const GLuint *ids);
+GLAPI void APIENTRY glGenTransformFeedbacks (GLsizei n, GLuint *ids);
+GLAPI GLboolean APIENTRY glIsTransformFeedback (GLuint id);
+GLAPI void APIENTRY glPauseTransformFeedback (void);
+GLAPI void APIENTRY glResumeTransformFeedback (void);
+GLAPI void APIENTRY glDrawTransformFeedback (GLenum mode, GLuint id);
+GLAPI void APIENTRY glDrawTransformFeedbackStream (GLenum mode, GLuint id, GLuint stream);
+GLAPI void APIENTRY glBeginQueryIndexed (GLenum target, GLuint index, GLuint id);
+GLAPI void APIENTRY glEndQueryIndexed (GLenum target, GLuint index);
+GLAPI void APIENTRY glGetQueryIndexediv (GLenum target, GLuint index, GLenum pname, GLint *params);
+#endif
+#endif /* GL_VERSION_4_0 */
+
+#ifndef GL_VERSION_4_1
+#define GL_VERSION_4_1 1
+#define GL_FIXED 0x140C
+#define GL_IMPLEMENTATION_COLOR_READ_TYPE 0x8B9A
+#define GL_IMPLEMENTATION_COLOR_READ_FORMAT 0x8B9B
+#define GL_LOW_FLOAT 0x8DF0
+#define GL_MEDIUM_FLOAT 0x8DF1
+#define GL_HIGH_FLOAT 0x8DF2
+#define GL_LOW_INT 0x8DF3
+#define GL_MEDIUM_INT 0x8DF4
+#define GL_HIGH_INT 0x8DF5
+#define GL_SHADER_COMPILER 0x8DFA
+#define GL_SHADER_BINARY_FORMATS 0x8DF8
+#define GL_NUM_SHADER_BINARY_FORMATS 0x8DF9
+#define GL_MAX_VERTEX_UNIFORM_VECTORS 0x8DFB
+#define GL_MAX_VARYING_VECTORS 0x8DFC
+#define GL_MAX_FRAGMENT_UNIFORM_VECTORS 0x8DFD
+#define GL_RGB565 0x8D62
+#define GL_PROGRAM_BINARY_RETRIEVABLE_HINT 0x8257
+#define GL_PROGRAM_BINARY_LENGTH 0x8741
+#define GL_NUM_PROGRAM_BINARY_FORMATS 0x87FE
+#define GL_PROGRAM_BINARY_FORMATS 0x87FF
+#define GL_VERTEX_SHADER_BIT 0x00000001
+#define GL_FRAGMENT_SHADER_BIT 0x00000002
+#define GL_GEOMETRY_SHADER_BIT 0x00000004
+#define GL_TESS_CONTROL_SHADER_BIT 0x00000008
+#define GL_TESS_EVALUATION_SHADER_BIT 0x00000010
+#define GL_ALL_SHADER_BITS 0xFFFFFFFF
+#define GL_PROGRAM_SEPARABLE 0x8258
+#define GL_ACTIVE_PROGRAM 0x8259
+#define GL_PROGRAM_PIPELINE_BINDING 0x825A
+#define GL_MAX_VIEWPORTS 0x825B
+#define GL_VIEWPORT_SUBPIXEL_BITS 0x825C
+#define GL_VIEWPORT_BOUNDS_RANGE 0x825D
+#define GL_LAYER_PROVOKING_VERTEX 0x825E
+#define GL_VIEWPORT_INDEX_PROVOKING_VERTEX 0x825F
+#define GL_UNDEFINED_VERTEX 0x8260
+typedef void (APIENTRYP PFNGLRELEASESHADERCOMPILERPROC) (void);
+typedef void (APIENTRYP PFNGLSHADERBINARYPROC) (GLsizei count, const GLuint *shaders, GLenum binaryformat, const void *binary, GLsizei length);
+typedef void (APIENTRYP PFNGLGETSHADERPRECISIONFORMATPROC) (GLenum shadertype, GLenum precisiontype, GLint *range, GLint *precision);
+typedef void (APIENTRYP PFNGLDEPTHRANGEFPROC) (GLfloat n, GLfloat f);
+typedef void (APIENTRYP PFNGLCLEARDEPTHFPROC) (GLfloat d);
+typedef void (APIENTRYP PFNGLGETPROGRAMBINARYPROC) (GLuint program, GLsizei bufSize, GLsizei *length, GLenum *binaryFormat, void *binary);
+typedef void (APIENTRYP PFNGLPROGRAMBINARYPROC) (GLuint program, GLenum binaryFormat, const void *binary, GLsizei length);
+typedef void (APIENTRYP PFNGLPROGRAMPARAMETERIPROC) (GLuint program, GLenum pname, GLint value);
+typedef void (APIENTRYP PFNGLUSEPROGRAMSTAGESPROC) (GLuint pipeline, GLbitfield stages, GLuint program);
+typedef void (APIENTRYP PFNGLACTIVESHADERPROGRAMPROC) (GLuint pipeline, GLuint program);
+typedef GLuint (APIENTRYP PFNGLCREATESHADERPROGRAMVPROC) (GLenum type, GLsizei count, const GLchar *const*strings);
+typedef void (APIENTRYP PFNGLBINDPROGRAMPIPELINEPROC) (GLuint pipeline);
+typedef void (APIENTRYP PFNGLDELETEPROGRAMPIPELINESPROC) (GLsizei n, const GLuint *pipelines);
+typedef void (APIENTRYP PFNGLGENPROGRAMPIPELINESPROC) (GLsizei n, GLuint *pipelines);
+typedef GLboolean (APIENTRYP PFNGLISPROGRAMPIPELINEPROC) (GLuint pipeline);
+typedef void (APIENTRYP PFNGLGETPROGRAMPIPELINEIVPROC) (GLuint pipeline, GLenum pname, GLint *params);
+typedef void (APIENTRYP PFNGLPROGRAMUNIFORM1IPROC) (GLuint program, GLint location, GLint v0);
+typedef void (APIENTRYP PFNGLPROGRAMUNIFORM1IVPROC) (GLuint program, GLint location, GLsizei count, const GLint *value);
+typedef void (APIENTRYP PFNGLPROGRAMUNIFORM1FPROC) (GLuint program, GLint location, GLfloat v0);
+typedef void (APIENTRYP PFNGLPROGRAMUNIFORM1FVPROC) (GLuint program, GLint location, GLsizei count, const GLfloat *value);
+typedef void (APIENTRYP PFNGLPROGRAMUNIFORM1DPROC) (GLuint program, GLint location, GLdouble v0);
+typedef void (APIENTRYP PFNGLPROGRAMUNIFORM1DVPROC) (GLuint program, GLint location, GLsizei count, const GLdouble *value);
+typedef void (APIENTRYP PFNGLPROGRAMUNIFORM1UIPROC) (GLuint program, GLint location, GLuint v0);
+typedef void (APIENTRYP PFNGLPROGRAMUNIFORM1UIVPROC) (GLuint program, GLint location, GLsizei count, const GLuint *value);
+typedef void (APIENTRYP PFNGLPROGRAMUNIFORM2IPROC) (GLuint program, GLint location, GLint v0, GLint v1);
+typedef void (APIENTRYP PFNGLPROGRAMUNIFORM2IVPROC) (GLuint program, GLint location, GLsizei count, const GLint *value);
+typedef void (APIENTRYP PFNGLPROGRAMUNIFORM2FPROC) (GLuint program, GLint location, GLfloat v0, GLfloat v1);
+typedef void (APIENTRYP PFNGLPROGRAMUNIFORM2FVPROC) (GLuint program, GLint location, GLsizei count, const GLfloat *value);
+typedef void (APIENTRYP PFNGLPROGRAMUNIFORM2DPROC) (GLuint program, GLint location, GLdouble v0, GLdouble v1);
+typedef void (APIENTRYP PFNGLPROGRAMUNIFORM2DVPROC) (GLuint program, GLint location, GLsizei count, const GLdouble *value);
+typedef void (APIENTRYP PFNGLPROGRAMUNIFORM2UIPROC) (GLuint program, GLint location, GLuint v0, GLuint v1);
+typedef void (APIENTRYP PFNGLPROGRAMUNIFORM2UIVPROC) (GLuint program, GLint location, GLsizei count, const GLuint *value);
+typedef void (APIENTRYP PFNGLPROGRAMUNIFORM3IPROC) (GLuint program, GLint location, GLint v0, GLint v1, GLint v2);
+typedef void (APIENTRYP PFNGLPROGRAMUNIFORM3IVPROC) (GLuint program, GLint location, GLsizei count, const GLint *value);
+typedef void (APIENTRYP PFNGLPROGRAMUNIFORM3FPROC) (GLuint program, GLint location, GLfloat v0, GLfloat v1, GLfloat v2);
+typedef void (APIENTRYP PFNGLPROGRAMUNIFORM3FVPROC) (GLuint program, GLint location, GLsizei count, const GLfloat *value);
+typedef void (APIENTRYP PFNGLPROGRAMUNIFORM3DPROC) (GLuint program, GLint location, GLdouble v0, GLdouble v1, GLdouble v2);
+typedef void (APIENTRYP PFNGLPROGRAMUNIFORM3DVPROC) (GLuint program, GLint location, GLsizei count, const GLdouble *value);
+typedef void (APIENTRYP PFNGLPROGRAMUNIFORM3UIPROC) (GLuint program, GLint location, GLuint v0, GLuint v1, GLuint v2);
+typedef void (APIENTRYP PFNGLPROGRAMUNIFORM3UIVPROC) (GLuint program, GLint location, GLsizei count, const GLuint *value);
+typedef void (APIENTRYP PFNGLPROGRAMUNIFORM4IPROC) (GLuint program, GLint location, GLint v0, GLint v1, GLint v2, GLint v3);
+typedef void (APIENTRYP PFNGLPROGRAMUNIFORM4IVPROC) (GLuint program, GLint location, GLsizei count, const GLint *value);
+typedef void (APIENTRYP PFNGLPROGRAMUNIFORM4FPROC) (GLuint program, GLint location, GLfloat v0, GLfloat v1, GLfloat v2, GLfloat v3);
+typedef void (APIENTRYP PFNGLPROGRAMUNIFORM4FVPROC) (GLuint program, GLint location, GLsizei count, const GLfloat *value);
+typedef void (APIENTRYP PFNGLPROGRAMUNIFORM4DPROC) (GLuint program, GLint location, GLdouble v0, GLdouble v1, GLdouble v2, GLdouble v3);
+typedef void (APIENTRYP PFNGLPROGRAMUNIFORM4DVPROC) (GLuint program, GLint location, GLsizei count, const GLdouble *value);
+typedef void (APIENTRYP PFNGLPROGRAMUNIFORM4UIPROC) (GLuint program, GLint location, GLuint v0, GLuint v1, GLuint v2, GLuint v3);
+typedef void (APIENTRYP PFNGLPROGRAMUNIFORM4UIVPROC) (GLuint program, GLint location, GLsizei count, const GLuint *value);
+typedef void (APIENTRYP PFNGLPROGRAMUNIFORMMATRIX2FVPROC) (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value);
+typedef void (APIENTRYP PFNGLPROGRAMUNIFORMMATRIX3FVPROC) (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value);
+typedef void (APIENTRYP PFNGLPROGRAMUNIFORMMATRIX4FVPROC) (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value);
+typedef void (APIENTRYP PFNGLPROGRAMUNIFORMMATRIX2DVPROC) (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLdouble *value);
+typedef void (APIENTRYP PFNGLPROGRAMUNIFORMMATRIX3DVPROC) (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLdouble *value);
+typedef void (APIENTRYP PFNGLPROGRAMUNIFORMMATRIX4DVPROC) (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLdouble *value);
+typedef void (APIENTRYP PFNGLPROGRAMUNIFORMMATRIX2X3FVPROC) (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value);
+typedef void (APIENTRYP PFNGLPROGRAMUNIFORMMATRIX3X2FVPROC) (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value);
+typedef void (APIENTRYP PFNGLPROGRAMUNIFORMMATRIX2X4FVPROC) (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value);
+typedef void (APIENTRYP PFNGLPROGRAMUNIFORMMATRIX4X2FVPROC) (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value);
+typedef void (APIENTRYP PFNGLPROGRAMUNIFORMMATRIX3X4FVPROC) (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value);
+typedef void (APIENTRYP PFNGLPROGRAMUNIFORMMATRIX4X3FVPROC) (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value);
+typedef void (APIENTRYP PFNGLPROGRAMUNIFORMMATRIX2X3DVPROC) (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLdouble *value);
+typedef void (APIENTRYP PFNGLPROGRAMUNIFORMMATRIX3X2DVPROC) (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLdouble *value);
+typedef void (APIENTRYP PFNGLPROGRAMUNIFORMMATRIX2X4DVPROC) (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLdouble *value);
+typedef void (APIENTRYP PFNGLPROGRAMUNIFORMMATRIX4X2DVPROC) (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLdouble *value);
+typedef void (APIENTRYP PFNGLPROGRAMUNIFORMMATRIX3X4DVPROC) (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLdouble *value);
+typedef void (APIENTRYP PFNGLPROGRAMUNIFORMMATRIX4X3DVPROC) (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLdouble *value);
+typedef void (APIENTRYP PFNGLVALIDATEPROGRAMPIPELINEPROC) (GLuint pipeline);
+typedef void (APIENTRYP PFNGLGETPROGRAMPIPELINEINFOLOGPROC) (GLuint pipeline, GLsizei bufSize, GLsizei *length, GLchar *infoLog);
+typedef void (APIENTRYP PFNGLVERTEXATTRIBL1DPROC) (GLuint index, GLdouble x);
+typedef void (APIENTRYP PFNGLVERTEXATTRIBL2DPROC) (GLuint index, GLdouble x, GLdouble y);
+typedef void (APIENTRYP PFNGLVERTEXATTRIBL3DPROC) (GLuint index, GLdouble x, GLdouble y, GLdouble z);
+typedef void (APIENTRYP PFNGLVERTEXATTRIBL4DPROC) (GLuint index, GLdouble x, GLdouble y, GLdouble z, GLdouble w);
+typedef void (APIENTRYP PFNGLVERTEXATTRIBL1DVPROC) (GLuint index, const GLdouble *v);
+typedef void (APIENTRYP PFNGLVERTEXATTRIBL2DVPROC) (GLuint index, const GLdouble *v);
+typedef void (APIENTRYP PFNGLVERTEXATTRIBL3DVPROC) (GLuint index, const GLdouble *v);
+typedef void (APIENTRYP PFNGLVERTEXATTRIBL4DVPROC) (GLuint index, const GLdouble *v);
+typedef void (APIENTRYP PFNGLVERTEXATTRIBLPOINTERPROC) (GLuint index, GLint size, GLenum type, GLsizei stride, const void *pointer);
+typedef void (APIENTRYP PFNGLGETVERTEXATTRIBLDVPROC) (GLuint index, GLenum pname, GLdouble *params);
+typedef void (APIENTRYP PFNGLVIEWPORTARRAYVPROC) (GLuint first, GLsizei count, const GLfloat *v);
+typedef void (APIENTRYP PFNGLVIEWPORTINDEXEDFPROC) (GLuint index, GLfloat x, GLfloat y, GLfloat w, GLfloat h);
+typedef void (APIENTRYP PFNGLVIEWPORTINDEXEDFVPROC) (GLuint index, const GLfloat *v);
+typedef void (APIENTRYP PFNGLSCISSORARRAYVPROC) (GLuint first, GLsizei count, const GLint *v);
+typedef void (APIENTRYP PFNGLSCISSORINDEXEDPROC) (GLuint index, GLint left, GLint bottom, GLsizei width, GLsizei height);
+typedef void (APIENTRYP PFNGLSCISSORINDEXEDVPROC) (GLuint index, const GLint *v);
+typedef void (APIENTRYP PFNGLDEPTHRANGEARRAYVPROC) (GLuint first, GLsizei count, const GLdouble *v);
+typedef void (APIENTRYP PFNGLDEPTHRANGEINDEXEDPROC) (GLuint index, GLdouble n, GLdouble f);
+typedef void (APIENTRYP PFNGLGETFLOATI_VPROC) (GLenum target, GLuint index, GLfloat *data);
+typedef void (APIENTRYP PFNGLGETDOUBLEI_VPROC) (GLenum target, GLuint index, GLdouble *data);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glReleaseShaderCompiler (void);
+GLAPI void APIENTRY glShaderBinary (GLsizei count, const GLuint *shaders, GLenum binaryformat, const void *binary, GLsizei length);
+GLAPI void APIENTRY glGetShaderPrecisionFormat (GLenum shadertype, GLenum precisiontype, GLint *range, GLint *precision);
+GLAPI void APIENTRY glDepthRangef (GLfloat n, GLfloat f);
+GLAPI void APIENTRY glClearDepthf (GLfloat d);
+GLAPI void APIENTRY glGetProgramBinary (GLuint program, GLsizei bufSize, GLsizei *length, GLenum *binaryFormat, void *binary);
+GLAPI void APIENTRY glProgramBinary (GLuint program, GLenum binaryFormat, const void *binary, GLsizei length);
+GLAPI void APIENTRY glProgramParameteri (GLuint program, GLenum pname, GLint value);
+GLAPI void APIENTRY glUseProgramStages (GLuint pipeline, GLbitfield stages, GLuint program);
+GLAPI void APIENTRY glActiveShaderProgram (GLuint pipeline, GLuint program);
+GLAPI GLuint APIENTRY glCreateShaderProgramv (GLenum type, GLsizei count, const GLchar *const*strings);
+GLAPI void APIENTRY glBindProgramPipeline (GLuint pipeline);
+GLAPI void APIENTRY glDeleteProgramPipelines (GLsizei n, const GLuint *pipelines);
+GLAPI void APIENTRY glGenProgramPipelines (GLsizei n, GLuint *pipelines);
+GLAPI GLboolean APIENTRY glIsProgramPipeline (GLuint pipeline);
+GLAPI void APIENTRY glGetProgramPipelineiv (GLuint pipeline, GLenum pname, GLint *params);
+GLAPI void APIENTRY glProgramUniform1i (GLuint program, GLint location, GLint v0);
+GLAPI void APIENTRY glProgramUniform1iv (GLuint program, GLint location, GLsizei count, const GLint *value);
+GLAPI void APIENTRY glProgramUniform1f (GLuint program, GLint location, GLfloat v0);
+GLAPI void APIENTRY glProgramUniform1fv (GLuint program, GLint location, GLsizei count, const GLfloat *value);
+GLAPI void APIENTRY glProgramUniform1d (GLuint program, GLint location, GLdouble v0);
+GLAPI void APIENTRY glProgramUniform1dv (GLuint program, GLint location, GLsizei count, const GLdouble *value);
+GLAPI void APIENTRY glProgramUniform1ui (GLuint program, GLint location, GLuint v0);
+GLAPI void APIENTRY glProgramUniform1uiv (GLuint program, GLint location, GLsizei count, const GLuint *value);
+GLAPI void APIENTRY glProgramUniform2i (GLuint program, GLint location, GLint v0, GLint v1);
+GLAPI void APIENTRY glProgramUniform2iv (GLuint program, GLint location, GLsizei count, const GLint *value);
+GLAPI void APIENTRY glProgramUniform2f (GLuint program, GLint location, GLfloat v0, GLfloat v1);
+GLAPI void APIENTRY glProgramUniform2fv (GLuint program, GLint location, GLsizei count, const GLfloat *value);
+GLAPI void APIENTRY glProgramUniform2d (GLuint program, GLint location, GLdouble v0, GLdouble v1);
+GLAPI void APIENTRY glProgramUniform2dv (GLuint program, GLint location, GLsizei count, const GLdouble *value);
+GLAPI void APIENTRY glProgramUniform2ui (GLuint program, GLint location, GLuint v0, GLuint v1);
+GLAPI void APIENTRY glProgramUniform2uiv (GLuint program, GLint location, GLsizei count, const GLuint *value);
+GLAPI void APIENTRY glProgramUniform3i (GLuint program, GLint location, GLint v0, GLint v1, GLint v2);
+GLAPI void APIENTRY glProgramUniform3iv (GLuint program, GLint location, GLsizei count, const GLint *value);
+GLAPI void APIENTRY glProgramUniform3f (GLuint program, GLint location, GLfloat v0, GLfloat v1, GLfloat v2);
+GLAPI void APIENTRY glProgramUniform3fv (GLuint program, GLint location, GLsizei count, const GLfloat *value);
+GLAPI void APIENTRY glProgramUniform3d (GLuint program, GLint location, GLdouble v0, GLdouble v1, GLdouble v2);
+GLAPI void APIENTRY glProgramUniform3dv (GLuint program, GLint location, GLsizei count, const GLdouble *value);
+GLAPI void APIENTRY glProgramUniform3ui (GLuint program, GLint location, GLuint v0, GLuint v1, GLuint v2);
+GLAPI void APIENTRY glProgramUniform3uiv (GLuint program, GLint location, GLsizei count, const GLuint *value);
+GLAPI void APIENTRY glProgramUniform4i (GLuint program, GLint location, GLint v0, GLint v1, GLint v2, GLint v3);
+GLAPI void APIENTRY glProgramUniform4iv (GLuint program, GLint location, GLsizei count, const GLint *value);
+GLAPI void APIENTRY glProgramUniform4f (GLuint program, GLint location, GLfloat v0, GLfloat v1, GLfloat v2, GLfloat v3);
+GLAPI void APIENTRY glProgramUniform4fv (GLuint program, GLint location, GLsizei count, const GLfloat *value);
+GLAPI void APIENTRY glProgramUniform4d (GLuint program, GLint location, GLdouble v0, GLdouble v1, GLdouble v2, GLdouble v3);
+GLAPI void APIENTRY glProgramUniform4dv (GLuint program, GLint location, GLsizei count, const GLdouble *value);
+GLAPI void APIENTRY glProgramUniform4ui (GLuint program, GLint location, GLuint v0, GLuint v1, GLuint v2, GLuint v3);
+GLAPI void APIENTRY glProgramUniform4uiv (GLuint program, GLint location, GLsizei count, const GLuint *value);
+GLAPI void APIENTRY glProgramUniformMatrix2fv (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value);
+GLAPI void APIENTRY glProgramUniformMatrix3fv (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value);
+GLAPI void APIENTRY glProgramUniformMatrix4fv (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value);
+GLAPI void APIENTRY glProgramUniformMatrix2dv (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLdouble *value);
+GLAPI void APIENTRY glProgramUniformMatrix3dv (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLdouble *value);
+GLAPI void APIENTRY glProgramUniformMatrix4dv (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLdouble *value);
+GLAPI void APIENTRY glProgramUniformMatrix2x3fv (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value);
+GLAPI void APIENTRY glProgramUniformMatrix3x2fv (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value);
+GLAPI void APIENTRY glProgramUniformMatrix2x4fv (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value);
+GLAPI void APIENTRY glProgramUniformMatrix4x2fv (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value);
+GLAPI void APIENTRY glProgramUniformMatrix3x4fv (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value);
+GLAPI void APIENTRY glProgramUniformMatrix4x3fv (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value);
+GLAPI void APIENTRY glProgramUniformMatrix2x3dv (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLdouble *value);
+GLAPI void APIENTRY glProgramUniformMatrix3x2dv (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLdouble *value);
+GLAPI void APIENTRY glProgramUniformMatrix2x4dv (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLdouble *value);
+GLAPI void APIENTRY glProgramUniformMatrix4x2dv (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLdouble *value);
+GLAPI void APIENTRY glProgramUniformMatrix3x4dv (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLdouble *value);
+GLAPI void APIENTRY glProgramUniformMatrix4x3dv (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLdouble *value);
+GLAPI void APIENTRY glValidateProgramPipeline (GLuint pipeline);
+GLAPI void APIENTRY glGetProgramPipelineInfoLog (GLuint pipeline, GLsizei bufSize, GLsizei *length, GLchar *infoLog);
+GLAPI void APIENTRY glVertexAttribL1d (GLuint index, GLdouble x);
+GLAPI void APIENTRY glVertexAttribL2d (GLuint index, GLdouble x, GLdouble y);
+GLAPI void APIENTRY glVertexAttribL3d (GLuint index, GLdouble x, GLdouble y, GLdouble z);
+GLAPI void APIENTRY glVertexAttribL4d (GLuint index, GLdouble x, GLdouble y, GLdouble z, GLdouble w);
+GLAPI void APIENTRY glVertexAttribL1dv (GLuint index, const GLdouble *v);
+GLAPI void APIENTRY glVertexAttribL2dv (GLuint index, const GLdouble *v);
+GLAPI void APIENTRY glVertexAttribL3dv (GLuint index, const GLdouble *v);
+GLAPI void APIENTRY glVertexAttribL4dv (GLuint index, const GLdouble *v);
+GLAPI void APIENTRY glVertexAttribLPointer (GLuint index, GLint size, GLenum type, GLsizei stride, const void *pointer);
+GLAPI void APIENTRY glGetVertexAttribLdv (GLuint index, GLenum pname, GLdouble *params);
+GLAPI void APIENTRY glViewportArrayv (GLuint first, GLsizei count, const GLfloat *v);
+GLAPI void APIENTRY glViewportIndexedf (GLuint index, GLfloat x, GLfloat y, GLfloat w, GLfloat h);
+GLAPI void APIENTRY glViewportIndexedfv (GLuint index, const GLfloat *v);
+GLAPI void APIENTRY glScissorArrayv (GLuint first, GLsizei count, const GLint *v);
+GLAPI void APIENTRY glScissorIndexed (GLuint index, GLint left, GLint bottom, GLsizei width, GLsizei height);
+GLAPI void APIENTRY glScissorIndexedv (GLuint index, const GLint *v);
+GLAPI void APIENTRY glDepthRangeArrayv (GLuint first, GLsizei count, const GLdouble *v);
+GLAPI void APIENTRY glDepthRangeIndexed (GLuint index, GLdouble n, GLdouble f);
+GLAPI void APIENTRY glGetFloati_v (GLenum target, GLuint index, GLfloat *data);
+GLAPI void APIENTRY glGetDoublei_v (GLenum target, GLuint index, GLdouble *data);
+#endif
+#endif /* GL_VERSION_4_1 */
+
+#ifndef GL_VERSION_4_2
+#define GL_VERSION_4_2 1
+#define GL_COPY_READ_BUFFER_BINDING 0x8F36
+#define GL_COPY_WRITE_BUFFER_BINDING 0x8F37
+#define GL_TRANSFORM_FEEDBACK_ACTIVE 0x8E24
+#define GL_TRANSFORM_FEEDBACK_PAUSED 0x8E23
+#define GL_UNPACK_COMPRESSED_BLOCK_WIDTH 0x9127
+#define GL_UNPACK_COMPRESSED_BLOCK_HEIGHT 0x9128
+#define GL_UNPACK_COMPRESSED_BLOCK_DEPTH 0x9129
+#define GL_UNPACK_COMPRESSED_BLOCK_SIZE 0x912A
+#define GL_PACK_COMPRESSED_BLOCK_WIDTH 0x912B
+#define GL_PACK_COMPRESSED_BLOCK_HEIGHT 0x912C
+#define GL_PACK_COMPRESSED_BLOCK_DEPTH 0x912D
+#define GL_PACK_COMPRESSED_BLOCK_SIZE 0x912E
+#define GL_NUM_SAMPLE_COUNTS 0x9380
+#define GL_MIN_MAP_BUFFER_ALIGNMENT 0x90BC
+#define GL_ATOMIC_COUNTER_BUFFER 0x92C0
+#define GL_ATOMIC_COUNTER_BUFFER_BINDING 0x92C1
+#define GL_ATOMIC_COUNTER_BUFFER_START 0x92C2
+#define GL_ATOMIC_COUNTER_BUFFER_SIZE 0x92C3
+#define GL_ATOMIC_COUNTER_BUFFER_DATA_SIZE 0x92C4
+#define GL_ATOMIC_COUNTER_BUFFER_ACTIVE_ATOMIC_COUNTERS 0x92C5
+#define GL_ATOMIC_COUNTER_BUFFER_ACTIVE_ATOMIC_COUNTER_INDICES 0x92C6
+#define GL_ATOMIC_COUNTER_BUFFER_REFERENCED_BY_VERTEX_SHADER 0x92C7
+#define GL_ATOMIC_COUNTER_BUFFER_REFERENCED_BY_TESS_CONTROL_SHADER 0x92C8
+#define GL_ATOMIC_COUNTER_BUFFER_REFERENCED_BY_TESS_EVALUATION_SHADER 0x92C9
+#define GL_ATOMIC_COUNTER_BUFFER_REFERENCED_BY_GEOMETRY_SHADER 0x92CA
+#define GL_ATOMIC_COUNTER_BUFFER_REFERENCED_BY_FRAGMENT_SHADER 0x92CB
+#define GL_MAX_VERTEX_ATOMIC_COUNTER_BUFFERS 0x92CC
+#define GL_MAX_TESS_CONTROL_ATOMIC_COUNTER_BUFFERS 0x92CD
+#define GL_MAX_TESS_EVALUATION_ATOMIC_COUNTER_BUFFERS 0x92CE
+#define GL_MAX_GEOMETRY_ATOMIC_COUNTER_BUFFERS 0x92CF
+#define GL_MAX_FRAGMENT_ATOMIC_COUNTER_BUFFERS 0x92D0
+#define GL_MAX_COMBINED_ATOMIC_COUNTER_BUFFERS 0x92D1
+#define GL_MAX_VERTEX_ATOMIC_COUNTERS 0x92D2
+#define GL_MAX_TESS_CONTROL_ATOMIC_COUNTERS 0x92D3
+#define GL_MAX_TESS_EVALUATION_ATOMIC_COUNTERS 0x92D4
+#define GL_MAX_GEOMETRY_ATOMIC_COUNTERS 0x92D5
+#define GL_MAX_FRAGMENT_ATOMIC_COUNTERS 0x92D6
+#define GL_MAX_COMBINED_ATOMIC_COUNTERS 0x92D7
+#define GL_MAX_ATOMIC_COUNTER_BUFFER_SIZE 0x92D8
+#define GL_MAX_ATOMIC_COUNTER_BUFFER_BINDINGS 0x92DC
+#define GL_ACTIVE_ATOMIC_COUNTER_BUFFERS 0x92D9
+#define GL_UNIFORM_ATOMIC_COUNTER_BUFFER_INDEX 0x92DA
+#define GL_UNSIGNED_INT_ATOMIC_COUNTER 0x92DB
+#define GL_VERTEX_ATTRIB_ARRAY_BARRIER_BIT 0x00000001
+#define GL_ELEMENT_ARRAY_BARRIER_BIT 0x00000002
+#define GL_UNIFORM_BARRIER_BIT 0x00000004
+#define GL_TEXTURE_FETCH_BARRIER_BIT 0x00000008
+#define GL_SHADER_IMAGE_ACCESS_BARRIER_BIT 0x00000020
+#define GL_COMMAND_BARRIER_BIT 0x00000040
+#define GL_PIXEL_BUFFER_BARRIER_BIT 0x00000080
+#define GL_TEXTURE_UPDATE_BARRIER_BIT 0x00000100
+#define GL_BUFFER_UPDATE_BARRIER_BIT 0x00000200
+#define GL_FRAMEBUFFER_BARRIER_BIT 0x00000400
+#define GL_TRANSFORM_FEEDBACK_BARRIER_BIT 0x00000800
+#define GL_ATOMIC_COUNTER_BARRIER_BIT 0x00001000
+#define GL_ALL_BARRIER_BITS 0xFFFFFFFF
+#define GL_MAX_IMAGE_UNITS 0x8F38
+#define GL_MAX_COMBINED_IMAGE_UNITS_AND_FRAGMENT_OUTPUTS 0x8F39
+#define GL_IMAGE_BINDING_NAME 0x8F3A
+#define GL_IMAGE_BINDING_LEVEL 0x8F3B
+#define GL_IMAGE_BINDING_LAYERED 0x8F3C
+#define GL_IMAGE_BINDING_LAYER 0x8F3D
+#define GL_IMAGE_BINDING_ACCESS 0x8F3E
+#define GL_IMAGE_1D 0x904C
+#define GL_IMAGE_2D 0x904D
+#define GL_IMAGE_3D 0x904E
+#define GL_IMAGE_2D_RECT 0x904F
+#define GL_IMAGE_CUBE 0x9050
+#define GL_IMAGE_BUFFER 0x9051
+#define GL_IMAGE_1D_ARRAY 0x9052
+#define GL_IMAGE_2D_ARRAY 0x9053
+#define GL_IMAGE_CUBE_MAP_ARRAY 0x9054
+#define GL_IMAGE_2D_MULTISAMPLE 0x9055
+#define GL_IMAGE_2D_MULTISAMPLE_ARRAY 0x9056
+#define GL_INT_IMAGE_1D 0x9057
+#define GL_INT_IMAGE_2D 0x9058
+#define GL_INT_IMAGE_3D 0x9059
+#define GL_INT_IMAGE_2D_RECT 0x905A
+#define GL_INT_IMAGE_CUBE 0x905B
+#define GL_INT_IMAGE_BUFFER 0x905C
+#define GL_INT_IMAGE_1D_ARRAY 0x905D
+#define GL_INT_IMAGE_2D_ARRAY 0x905E
+#define GL_INT_IMAGE_CUBE_MAP_ARRAY 0x905F
+#define GL_INT_IMAGE_2D_MULTISAMPLE 0x9060
+#define GL_INT_IMAGE_2D_MULTISAMPLE_ARRAY 0x9061
+#define GL_UNSIGNED_INT_IMAGE_1D 0x9062
+#define GL_UNSIGNED_INT_IMAGE_2D 0x9063
+#define GL_UNSIGNED_INT_IMAGE_3D 0x9064
+#define GL_UNSIGNED_INT_IMAGE_2D_RECT 0x9065
+#define GL_UNSIGNED_INT_IMAGE_CUBE 0x9066
+#define GL_UNSIGNED_INT_IMAGE_BUFFER 0x9067
+#define GL_UNSIGNED_INT_IMAGE_1D_ARRAY 0x9068
+#define GL_UNSIGNED_INT_IMAGE_2D_ARRAY 0x9069
+#define GL_UNSIGNED_INT_IMAGE_CUBE_MAP_ARRAY 0x906A
+#define GL_UNSIGNED_INT_IMAGE_2D_MULTISAMPLE 0x906B
+#define GL_UNSIGNED_INT_IMAGE_2D_MULTISAMPLE_ARRAY 0x906C
+#define GL_MAX_IMAGE_SAMPLES 0x906D
+#define GL_IMAGE_BINDING_FORMAT 0x906E
+#define GL_IMAGE_FORMAT_COMPATIBILITY_TYPE 0x90C7
+#define GL_IMAGE_FORMAT_COMPATIBILITY_BY_SIZE 0x90C8
+#define GL_IMAGE_FORMAT_COMPATIBILITY_BY_CLASS 0x90C9
+#define GL_MAX_VERTEX_IMAGE_UNIFORMS 0x90CA
+#define GL_MAX_TESS_CONTROL_IMAGE_UNIFORMS 0x90CB
+#define GL_MAX_TESS_EVALUATION_IMAGE_UNIFORMS 0x90CC
+#define GL_MAX_GEOMETRY_IMAGE_UNIFORMS 0x90CD
+#define GL_MAX_FRAGMENT_IMAGE_UNIFORMS 0x90CE
+#define GL_MAX_COMBINED_IMAGE_UNIFORMS 0x90CF
+#define GL_COMPRESSED_RGBA_BPTC_UNORM 0x8E8C
+#define GL_COMPRESSED_SRGB_ALPHA_BPTC_UNORM 0x8E8D
+#define GL_COMPRESSED_RGB_BPTC_SIGNED_FLOAT 0x8E8E
+#define GL_COMPRESSED_RGB_BPTC_UNSIGNED_FLOAT 0x8E8F
+#define GL_TEXTURE_IMMUTABLE_FORMAT 0x912F
+typedef void (APIENTRYP PFNGLDRAWARRAYSINSTANCEDBASEINSTANCEPROC) (GLenum mode, GLint first, GLsizei count, GLsizei instancecount, GLuint baseinstance);
+typedef void (APIENTRYP PFNGLDRAWELEMENTSINSTANCEDBASEINSTANCEPROC) (GLenum mode, GLsizei count, GLenum type, const void *indices, GLsizei instancecount, GLuint baseinstance);
+typedef void (APIENTRYP PFNGLDRAWELEMENTSINSTANCEDBASEVERTEXBASEINSTANCEPROC) (GLenum mode, GLsizei count, GLenum type, const void *indices, GLsizei instancecount, GLint basevertex, GLuint baseinstance);
+typedef void (APIENTRYP PFNGLGETINTERNALFORMATIVPROC) (GLenum target, GLenum internalformat, GLenum pname, GLsizei bufSize, GLint *params);
+typedef void (APIENTRYP PFNGLGETACTIVEATOMICCOUNTERBUFFERIVPROC) (GLuint program, GLuint bufferIndex, GLenum pname, GLint *params);
+typedef void (APIENTRYP PFNGLBINDIMAGETEXTUREPROC) (GLuint unit, GLuint texture, GLint level, GLboolean layered, GLint layer, GLenum access, GLenum format);
+typedef void (APIENTRYP PFNGLMEMORYBARRIERPROC) (GLbitfield barriers);
+typedef void (APIENTRYP PFNGLTEXSTORAGE1DPROC) (GLenum target, GLsizei levels, GLenum internalformat, GLsizei width);
+typedef void (APIENTRYP PFNGLTEXSTORAGE2DPROC) (GLenum target, GLsizei levels, GLenum internalformat, GLsizei width, GLsizei height);
+typedef void (APIENTRYP PFNGLTEXSTORAGE3DPROC) (GLenum target, GLsizei levels, GLenum internalformat, GLsizei width, GLsizei height, GLsizei depth);
+typedef void (APIENTRYP PFNGLDRAWTRANSFORMFEEDBACKINSTANCEDPROC) (GLenum mode, GLuint id, GLsizei instancecount);
+typedef void (APIENTRYP PFNGLDRAWTRANSFORMFEEDBACKSTREAMINSTANCEDPROC) (GLenum mode, GLuint id, GLuint stream, GLsizei instancecount);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glDrawArraysInstancedBaseInstance (GLenum mode, GLint first, GLsizei count, GLsizei instancecount, GLuint baseinstance);
+GLAPI void APIENTRY glDrawElementsInstancedBaseInstance (GLenum mode, GLsizei count, GLenum type, const void *indices, GLsizei instancecount, GLuint baseinstance);
+GLAPI void APIENTRY glDrawElementsInstancedBaseVertexBaseInstance (GLenum mode, GLsizei count, GLenum type, const void *indices, GLsizei instancecount, GLint basevertex, GLuint baseinstance);
+GLAPI void APIENTRY glGetInternalformativ (GLenum target, GLenum internalformat, GLenum pname, GLsizei bufSize, GLint *params);
+GLAPI void APIENTRY glGetActiveAtomicCounterBufferiv (GLuint program, GLuint bufferIndex, GLenum pname, GLint *params);
+GLAPI void APIENTRY glBindImageTexture (GLuint unit, GLuint texture, GLint level, GLboolean layered, GLint layer, GLenum access, GLenum format);
+GLAPI void APIENTRY glMemoryBarrier (GLbitfield barriers);
+GLAPI void APIENTRY glTexStorage1D (GLenum target, GLsizei levels, GLenum internalformat, GLsizei width);
+GLAPI void APIENTRY glTexStorage2D (GLenum target, GLsizei levels, GLenum internalformat, GLsizei width, GLsizei height);
+GLAPI void APIENTRY glTexStorage3D (GLenum target, GLsizei levels, GLenum internalformat, GLsizei width, GLsizei height, GLsizei depth);
+GLAPI void APIENTRY glDrawTransformFeedbackInstanced (GLenum mode, GLuint id, GLsizei instancecount);
+GLAPI void APIENTRY glDrawTransformFeedbackStreamInstanced (GLenum mode, GLuint id, GLuint stream, GLsizei instancecount);
+#endif
+#endif /* GL_VERSION_4_2 */
+
+#ifndef GL_VERSION_4_3
+#define GL_VERSION_4_3 1
+typedef void (APIENTRY *GLDEBUGPROC)(GLenum source,GLenum type,GLuint id,GLenum severity,GLsizei length,const GLchar *message,const void *userParam);
+#define GL_NUM_SHADING_LANGUAGE_VERSIONS 0x82E9
+#define GL_VERTEX_ATTRIB_ARRAY_LONG 0x874E
+#define GL_COMPRESSED_RGB8_ETC2 0x9274
+#define GL_COMPRESSED_SRGB8_ETC2 0x9275
+#define GL_COMPRESSED_RGB8_PUNCHTHROUGH_ALPHA1_ETC2 0x9276
+#define GL_COMPRESSED_SRGB8_PUNCHTHROUGH_ALPHA1_ETC2 0x9277
+#define GL_COMPRESSED_RGBA8_ETC2_EAC 0x9278
+#define GL_COMPRESSED_SRGB8_ALPHA8_ETC2_EAC 0x9279
+#define GL_COMPRESSED_R11_EAC 0x9270
+#define GL_COMPRESSED_SIGNED_R11_EAC 0x9271
+#define GL_COMPRESSED_RG11_EAC 0x9272
+#define GL_COMPRESSED_SIGNED_RG11_EAC 0x9273
+#define GL_PRIMITIVE_RESTART_FIXED_INDEX 0x8D69
+#define GL_ANY_SAMPLES_PASSED_CONSERVATIVE 0x8D6A
+#define GL_MAX_ELEMENT_INDEX 0x8D6B
+#define GL_COMPUTE_SHADER 0x91B9
+#define GL_MAX_COMPUTE_UNIFORM_BLOCKS 0x91BB
+#define GL_MAX_COMPUTE_TEXTURE_IMAGE_UNITS 0x91BC
+#define GL_MAX_COMPUTE_IMAGE_UNIFORMS 0x91BD
+#define GL_MAX_COMPUTE_SHARED_MEMORY_SIZE 0x8262
+#define GL_MAX_COMPUTE_UNIFORM_COMPONENTS 0x8263
+#define GL_MAX_COMPUTE_ATOMIC_COUNTER_BUFFERS 0x8264
+#define GL_MAX_COMPUTE_ATOMIC_COUNTERS 0x8265
+#define GL_MAX_COMBINED_COMPUTE_UNIFORM_COMPONENTS 0x8266
+#define GL_MAX_COMPUTE_WORK_GROUP_INVOCATIONS 0x90EB
+#define GL_MAX_COMPUTE_WORK_GROUP_COUNT 0x91BE
+#define GL_MAX_COMPUTE_WORK_GROUP_SIZE 0x91BF
+#define GL_COMPUTE_WORK_GROUP_SIZE 0x8267
+#define GL_UNIFORM_BLOCK_REFERENCED_BY_COMPUTE_SHADER 0x90EC
+#define GL_ATOMIC_COUNTER_BUFFER_REFERENCED_BY_COMPUTE_SHADER 0x90ED
+#define GL_DISPATCH_INDIRECT_BUFFER 0x90EE
+#define GL_DISPATCH_INDIRECT_BUFFER_BINDING 0x90EF
+#define GL_COMPUTE_SHADER_BIT 0x00000020
+#define GL_DEBUG_OUTPUT_SYNCHRONOUS 0x8242
+#define GL_DEBUG_NEXT_LOGGED_MESSAGE_LENGTH 0x8243
+#define GL_DEBUG_CALLBACK_FUNCTION 0x8244
+#define GL_DEBUG_CALLBACK_USER_PARAM 0x8245
+#define GL_DEBUG_SOURCE_API 0x8246
+#define GL_DEBUG_SOURCE_WINDOW_SYSTEM 0x8247
+#define GL_DEBUG_SOURCE_SHADER_COMPILER 0x8248
+#define GL_DEBUG_SOURCE_THIRD_PARTY 0x8249
+#define GL_DEBUG_SOURCE_APPLICATION 0x824A
+#define GL_DEBUG_SOURCE_OTHER 0x824B
+#define GL_DEBUG_TYPE_ERROR 0x824C
+#define GL_DEBUG_TYPE_DEPRECATED_BEHAVIOR 0x824D
+#define GL_DEBUG_TYPE_UNDEFINED_BEHAVIOR 0x824E
+#define GL_DEBUG_TYPE_PORTABILITY 0x824F
+#define GL_DEBUG_TYPE_PERFORMANCE 0x8250
+#define GL_DEBUG_TYPE_OTHER 0x8251
+#define GL_MAX_DEBUG_MESSAGE_LENGTH 0x9143
+#define GL_MAX_DEBUG_LOGGED_MESSAGES 0x9144
+#define GL_DEBUG_LOGGED_MESSAGES 0x9145
+#define GL_DEBUG_SEVERITY_HIGH 0x9146
+#define GL_DEBUG_SEVERITY_MEDIUM 0x9147
+#define GL_DEBUG_SEVERITY_LOW 0x9148
+#define GL_DEBUG_TYPE_MARKER 0x8268
+#define GL_DEBUG_TYPE_PUSH_GROUP 0x8269
+#define GL_DEBUG_TYPE_POP_GROUP 0x826A
+#define GL_DEBUG_SEVERITY_NOTIFICATION 0x826B
+#define GL_MAX_DEBUG_GROUP_STACK_DEPTH 0x826C
+#define GL_DEBUG_GROUP_STACK_DEPTH 0x826D
+#define GL_BUFFER 0x82E0
+#define GL_SHADER 0x82E1
+#define GL_PROGRAM 0x82E2
+#define GL_QUERY 0x82E3
+#define GL_PROGRAM_PIPELINE 0x82E4
+#define GL_SAMPLER 0x82E6
+#define GL_MAX_LABEL_LENGTH 0x82E8
+#define GL_DEBUG_OUTPUT 0x92E0
+#define GL_CONTEXT_FLAG_DEBUG_BIT 0x00000002
+#define GL_MAX_UNIFORM_LOCATIONS 0x826E
+#define GL_FRAMEBUFFER_DEFAULT_WIDTH 0x9310
+#define GL_FRAMEBUFFER_DEFAULT_HEIGHT 0x9311
+#define GL_FRAMEBUFFER_DEFAULT_LAYERS 0x9312
+#define GL_FRAMEBUFFER_DEFAULT_SAMPLES 0x9313
+#define GL_FRAMEBUFFER_DEFAULT_FIXED_SAMPLE_LOCATIONS 0x9314
+#define GL_MAX_FRAMEBUFFER_WIDTH 0x9315
+#define GL_MAX_FRAMEBUFFER_HEIGHT 0x9316
+#define GL_MAX_FRAMEBUFFER_LAYERS 0x9317
+#define GL_MAX_FRAMEBUFFER_SAMPLES 0x9318
+#define GL_INTERNALFORMAT_SUPPORTED 0x826F
+#define GL_INTERNALFORMAT_PREFERRED 0x8270
+#define GL_INTERNALFORMAT_RED_SIZE 0x8271
+#define GL_INTERNALFORMAT_GREEN_SIZE 0x8272
+#define GL_INTERNALFORMAT_BLUE_SIZE 0x8273
+#define GL_INTERNALFORMAT_ALPHA_SIZE 0x8274
+#define GL_INTERNALFORMAT_DEPTH_SIZE 0x8275
+#define GL_INTERNALFORMAT_STENCIL_SIZE 0x8276
+#define GL_INTERNALFORMAT_SHARED_SIZE 0x8277
+#define GL_INTERNALFORMAT_RED_TYPE 0x8278
+#define GL_INTERNALFORMAT_GREEN_TYPE 0x8279
+#define GL_INTERNALFORMAT_BLUE_TYPE 0x827A
+#define GL_INTERNALFORMAT_ALPHA_TYPE 0x827B
+#define GL_INTERNALFORMAT_DEPTH_TYPE 0x827C
+#define GL_INTERNALFORMAT_STENCIL_TYPE 0x827D
+#define GL_MAX_WIDTH 0x827E
+#define GL_MAX_HEIGHT 0x827F
+#define GL_MAX_DEPTH 0x8280
+#define GL_MAX_LAYERS 0x8281
+#define GL_MAX_COMBINED_DIMENSIONS 0x8282
+#define GL_COLOR_COMPONENTS 0x8283
+#define GL_DEPTH_COMPONENTS 0x8284
+#define GL_STENCIL_COMPONENTS 0x8285
+#define GL_COLOR_RENDERABLE 0x8286
+#define GL_DEPTH_RENDERABLE 0x8287
+#define GL_STENCIL_RENDERABLE 0x8288
+#define GL_FRAMEBUFFER_RENDERABLE 0x8289
+#define GL_FRAMEBUFFER_RENDERABLE_LAYERED 0x828A
+#define GL_FRAMEBUFFER_BLEND 0x828B
+#define GL_READ_PIXELS 0x828C
+#define GL_READ_PIXELS_FORMAT 0x828D
+#define GL_READ_PIXELS_TYPE 0x828E
+#define GL_TEXTURE_IMAGE_FORMAT 0x828F
+#define GL_TEXTURE_IMAGE_TYPE 0x8290
+#define GL_GET_TEXTURE_IMAGE_FORMAT 0x8291
+#define GL_GET_TEXTURE_IMAGE_TYPE 0x8292
+#define GL_MIPMAP 0x8293
+#define GL_MANUAL_GENERATE_MIPMAP 0x8294
+#define GL_AUTO_GENERATE_MIPMAP 0x8295
+#define GL_COLOR_ENCODING 0x8296
+#define GL_SRGB_READ 0x8297
+#define GL_SRGB_WRITE 0x8298
+#define GL_FILTER 0x829A
+#define GL_VERTEX_TEXTURE 0x829B
+#define GL_TESS_CONTROL_TEXTURE 0x829C
+#define GL_TESS_EVALUATION_TEXTURE 0x829D
+#define GL_GEOMETRY_TEXTURE 0x829E
+#define GL_FRAGMENT_TEXTURE 0x829F
+#define GL_COMPUTE_TEXTURE 0x82A0
+#define GL_TEXTURE_SHADOW 0x82A1
+#define GL_TEXTURE_GATHER 0x82A2
+#define GL_TEXTURE_GATHER_SHADOW 0x82A3
+#define GL_SHADER_IMAGE_LOAD 0x82A4
+#define GL_SHADER_IMAGE_STORE 0x82A5
+#define GL_SHADER_IMAGE_ATOMIC 0x82A6
+#define GL_IMAGE_TEXEL_SIZE 0x82A7
+#define GL_IMAGE_COMPATIBILITY_CLASS 0x82A8
+#define GL_IMAGE_PIXEL_FORMAT 0x82A9
+#define GL_IMAGE_PIXEL_TYPE 0x82AA
+#define GL_SIMULTANEOUS_TEXTURE_AND_DEPTH_TEST 0x82AC
+#define GL_SIMULTANEOUS_TEXTURE_AND_STENCIL_TEST 0x82AD
+#define GL_SIMULTANEOUS_TEXTURE_AND_DEPTH_WRITE 0x82AE
+#define GL_SIMULTANEOUS_TEXTURE_AND_STENCIL_WRITE 0x82AF
+#define GL_TEXTURE_COMPRESSED_BLOCK_WIDTH 0x82B1
+#define GL_TEXTURE_COMPRESSED_BLOCK_HEIGHT 0x82B2
+#define GL_TEXTURE_COMPRESSED_BLOCK_SIZE 0x82B3
+#define GL_CLEAR_BUFFER 0x82B4
+#define GL_TEXTURE_VIEW 0x82B5
+#define GL_VIEW_COMPATIBILITY_CLASS 0x82B6
+#define GL_FULL_SUPPORT 0x82B7
+#define GL_CAVEAT_SUPPORT 0x82B8
+#define GL_IMAGE_CLASS_4_X_32 0x82B9
+#define GL_IMAGE_CLASS_2_X_32 0x82BA
+#define GL_IMAGE_CLASS_1_X_32 0x82BB
+#define GL_IMAGE_CLASS_4_X_16 0x82BC
+#define GL_IMAGE_CLASS_2_X_16 0x82BD
+#define GL_IMAGE_CLASS_1_X_16 0x82BE
+#define GL_IMAGE_CLASS_4_X_8 0x82BF
+#define GL_IMAGE_CLASS_2_X_8 0x82C0
+#define GL_IMAGE_CLASS_1_X_8 0x82C1
+#define GL_IMAGE_CLASS_11_11_10 0x82C2
+#define GL_IMAGE_CLASS_10_10_10_2 0x82C3
+#define GL_VIEW_CLASS_128_BITS 0x82C4
+#define GL_VIEW_CLASS_96_BITS 0x82C5
+#define GL_VIEW_CLASS_64_BITS 0x82C6
+#define GL_VIEW_CLASS_48_BITS 0x82C7
+#define GL_VIEW_CLASS_32_BITS 0x82C8
+#define GL_VIEW_CLASS_24_BITS 0x82C9
+#define GL_VIEW_CLASS_16_BITS 0x82CA
+#define GL_VIEW_CLASS_8_BITS 0x82CB
+#define GL_VIEW_CLASS_S3TC_DXT1_RGB 0x82CC
+#define GL_VIEW_CLASS_S3TC_DXT1_RGBA 0x82CD
+#define GL_VIEW_CLASS_S3TC_DXT3_RGBA 0x82CE
+#define GL_VIEW_CLASS_S3TC_DXT5_RGBA 0x82CF
+#define GL_VIEW_CLASS_RGTC1_RED 0x82D0
+#define GL_VIEW_CLASS_RGTC2_RG 0x82D1
+#define GL_VIEW_CLASS_BPTC_UNORM 0x82D2
+#define GL_VIEW_CLASS_BPTC_FLOAT 0x82D3
+#define GL_UNIFORM 0x92E1
+#define GL_UNIFORM_BLOCK 0x92E2
+#define GL_PROGRAM_INPUT 0x92E3
+#define GL_PROGRAM_OUTPUT 0x92E4
+#define GL_BUFFER_VARIABLE 0x92E5
+#define GL_SHADER_STORAGE_BLOCK 0x92E6
+#define GL_VERTEX_SUBROUTINE 0x92E8
+#define GL_TESS_CONTROL_SUBROUTINE 0x92E9
+#define GL_TESS_EVALUATION_SUBROUTINE 0x92EA
+#define GL_GEOMETRY_SUBROUTINE 0x92EB
+#define GL_FRAGMENT_SUBROUTINE 0x92EC
+#define GL_COMPUTE_SUBROUTINE 0x92ED
+#define GL_VERTEX_SUBROUTINE_UNIFORM 0x92EE
+#define GL_TESS_CONTROL_SUBROUTINE_UNIFORM 0x92EF
+#define GL_TESS_EVALUATION_SUBROUTINE_UNIFORM 0x92F0
+#define GL_GEOMETRY_SUBROUTINE_UNIFORM 0x92F1
+#define GL_FRAGMENT_SUBROUTINE_UNIFORM 0x92F2
+#define GL_COMPUTE_SUBROUTINE_UNIFORM 0x92F3
+#define GL_TRANSFORM_FEEDBACK_VARYING 0x92F4
+#define GL_ACTIVE_RESOURCES 0x92F5
+#define GL_MAX_NAME_LENGTH 0x92F6
+#define GL_MAX_NUM_ACTIVE_VARIABLES 0x92F7
+#define GL_MAX_NUM_COMPATIBLE_SUBROUTINES 0x92F8
+#define GL_NAME_LENGTH 0x92F9
+#define GL_TYPE 0x92FA
+#define GL_ARRAY_SIZE 0x92FB
+#define GL_OFFSET 0x92FC
+#define GL_BLOCK_INDEX 0x92FD
+#define GL_ARRAY_STRIDE 0x92FE
+#define GL_MATRIX_STRIDE 0x92FF
+#define GL_IS_ROW_MAJOR 0x9300
+#define GL_ATOMIC_COUNTER_BUFFER_INDEX 0x9301
+#define GL_BUFFER_BINDING 0x9302
+#define GL_BUFFER_DATA_SIZE 0x9303
+#define GL_NUM_ACTIVE_VARIABLES 0x9304
+#define GL_ACTIVE_VARIABLES 0x9305
+#define GL_REFERENCED_BY_VERTEX_SHADER 0x9306
+#define GL_REFERENCED_BY_TESS_CONTROL_SHADER 0x9307
+#define GL_REFERENCED_BY_TESS_EVALUATION_SHADER 0x9308
+#define GL_REFERENCED_BY_GEOMETRY_SHADER 0x9309
+#define GL_REFERENCED_BY_FRAGMENT_SHADER 0x930A
+#define GL_REFERENCED_BY_COMPUTE_SHADER 0x930B
+#define GL_TOP_LEVEL_ARRAY_SIZE 0x930C
+#define GL_TOP_LEVEL_ARRAY_STRIDE 0x930D
+#define GL_LOCATION 0x930E
+#define GL_LOCATION_INDEX 0x930F
+#define GL_IS_PER_PATCH 0x92E7
+#define GL_SHADER_STORAGE_BUFFER 0x90D2
+#define GL_SHADER_STORAGE_BUFFER_BINDING 0x90D3
+#define GL_SHADER_STORAGE_BUFFER_START 0x90D4
+#define GL_SHADER_STORAGE_BUFFER_SIZE 0x90D5
+#define GL_MAX_VERTEX_SHADER_STORAGE_BLOCKS 0x90D6
+#define GL_MAX_GEOMETRY_SHADER_STORAGE_BLOCKS 0x90D7
+#define GL_MAX_TESS_CONTROL_SHADER_STORAGE_BLOCKS 0x90D8
+#define GL_MAX_TESS_EVALUATION_SHADER_STORAGE_BLOCKS 0x90D9
+#define GL_MAX_FRAGMENT_SHADER_STORAGE_BLOCKS 0x90DA
+#define GL_MAX_COMPUTE_SHADER_STORAGE_BLOCKS 0x90DB
+#define GL_MAX_COMBINED_SHADER_STORAGE_BLOCKS 0x90DC
+#define GL_MAX_SHADER_STORAGE_BUFFER_BINDINGS 0x90DD
+#define GL_MAX_SHADER_STORAGE_BLOCK_SIZE 0x90DE
+#define GL_SHADER_STORAGE_BUFFER_OFFSET_ALIGNMENT 0x90DF
+#define GL_SHADER_STORAGE_BARRIER_BIT 0x00002000
+#define GL_MAX_COMBINED_SHADER_OUTPUT_RESOURCES 0x8F39
+#define GL_DEPTH_STENCIL_TEXTURE_MODE 0x90EA
+#define GL_TEXTURE_BUFFER_OFFSET 0x919D
+#define GL_TEXTURE_BUFFER_SIZE 0x919E
+#define GL_TEXTURE_BUFFER_OFFSET_ALIGNMENT 0x919F
+#define GL_TEXTURE_VIEW_MIN_LEVEL 0x82DB
+#define GL_TEXTURE_VIEW_NUM_LEVELS 0x82DC
+#define GL_TEXTURE_VIEW_MIN_LAYER 0x82DD
+#define GL_TEXTURE_VIEW_NUM_LAYERS 0x82DE
+#define GL_TEXTURE_IMMUTABLE_LEVELS 0x82DF
+#define GL_VERTEX_ATTRIB_BINDING 0x82D4
+#define GL_VERTEX_ATTRIB_RELATIVE_OFFSET 0x82D5
+#define GL_VERTEX_BINDING_DIVISOR 0x82D6
+#define GL_VERTEX_BINDING_OFFSET 0x82D7
+#define GL_VERTEX_BINDING_STRIDE 0x82D8
+#define GL_MAX_VERTEX_ATTRIB_RELATIVE_OFFSET 0x82D9
+#define GL_MAX_VERTEX_ATTRIB_BINDINGS 0x82DA
+#define GL_VERTEX_BINDING_BUFFER 0x8F4F
+#define GL_DISPLAY_LIST 0x82E7
+typedef void (APIENTRYP PFNGLCLEARBUFFERDATAPROC) (GLenum target, GLenum internalformat, GLenum format, GLenum type, const void *data);
+typedef void (APIENTRYP PFNGLCLEARBUFFERSUBDATAPROC) (GLenum target, GLenum internalformat, GLintptr offset, GLsizeiptr size, GLenum format, GLenum type, const void *data);
+typedef void (APIENTRYP PFNGLDISPATCHCOMPUTEPROC) (GLuint num_groups_x, GLuint num_groups_y, GLuint num_groups_z);
+typedef void (APIENTRYP PFNGLDISPATCHCOMPUTEINDIRECTPROC) (GLintptr indirect);
+typedef void (APIENTRYP PFNGLCOPYIMAGESUBDATAPROC) (GLuint srcName, GLenum srcTarget, GLint srcLevel, GLint srcX, GLint srcY, GLint srcZ, GLuint dstName, GLenum dstTarget, GLint dstLevel, GLint dstX, GLint dstY, GLint dstZ, GLsizei srcWidth, GLsizei srcHeight, GLsizei srcDepth);
+typedef void (APIENTRYP PFNGLFRAMEBUFFERPARAMETERIPROC) (GLenum target, GLenum pname, GLint param);
+typedef void (APIENTRYP PFNGLGETFRAMEBUFFERPARAMETERIVPROC) (GLenum target, GLenum pname, GLint *params);
+typedef void (APIENTRYP PFNGLGETINTERNALFORMATI64VPROC) (GLenum target, GLenum internalformat, GLenum pname, GLsizei bufSize, GLint64 *params);
+typedef void (APIENTRYP PFNGLINVALIDATETEXSUBIMAGEPROC) (GLuint texture, GLint level, GLint xoffset, GLint yoffset, GLint zoffset, GLsizei width, GLsizei height, GLsizei depth);
+typedef void (APIENTRYP PFNGLINVALIDATETEXIMAGEPROC) (GLuint texture, GLint level);
+typedef void (APIENTRYP PFNGLINVALIDATEBUFFERSUBDATAPROC) (GLuint buffer, GLintptr offset, GLsizeiptr length);
+typedef void (APIENTRYP PFNGLINVALIDATEBUFFERDATAPROC) (GLuint buffer);
+typedef void (APIENTRYP PFNGLINVALIDATEFRAMEBUFFERPROC) (GLenum target, GLsizei numAttachments, const GLenum *attachments);
+typedef void (APIENTRYP PFNGLINVALIDATESUBFRAMEBUFFERPROC) (GLenum target, GLsizei numAttachments, const GLenum *attachments, GLint x, GLint y, GLsizei width, GLsizei height);
+typedef void (APIENTRYP PFNGLMULTIDRAWARRAYSINDIRECTPROC) (GLenum mode, const void *indirect, GLsizei drawcount, GLsizei stride);
+typedef void (APIENTRYP PFNGLMULTIDRAWELEMENTSINDIRECTPROC) (GLenum mode, GLenum type, const void *indirect, GLsizei drawcount, GLsizei stride);
+typedef void (APIENTRYP PFNGLGETPROGRAMINTERFACEIVPROC) (GLuint program, GLenum programInterface, GLenum pname, GLint *params);
+typedef GLuint (APIENTRYP PFNGLGETPROGRAMRESOURCEINDEXPROC) (GLuint program, GLenum programInterface, const GLchar *name);
+typedef void (APIENTRYP PFNGLGETPROGRAMRESOURCENAMEPROC) (GLuint program, GLenum programInterface, GLuint index, GLsizei bufSize, GLsizei *length, GLchar *name);
+typedef void (APIENTRYP PFNGLGETPROGRAMRESOURCEIVPROC) (GLuint program, GLenum programInterface, GLuint index, GLsizei propCount, const GLenum *props, GLsizei bufSize, GLsizei *length, GLint *params);
+typedef GLint (APIENTRYP PFNGLGETPROGRAMRESOURCELOCATIONPROC) (GLuint program, GLenum programInterface, const GLchar *name);
+typedef GLint (APIENTRYP PFNGLGETPROGRAMRESOURCELOCATIONINDEXPROC) (GLuint program, GLenum programInterface, const GLchar *name);
+typedef void (APIENTRYP PFNGLSHADERSTORAGEBLOCKBINDINGPROC) (GLuint program, GLuint storageBlockIndex, GLuint storageBlockBinding);
+typedef void (APIENTRYP PFNGLTEXBUFFERRANGEPROC) (GLenum target, GLenum internalformat, GLuint buffer, GLintptr offset, GLsizeiptr size);
+typedef void (APIENTRYP PFNGLTEXSTORAGE2DMULTISAMPLEPROC) (GLenum target, GLsizei samples, GLenum internalformat, GLsizei width, GLsizei height, GLboolean fixedsamplelocations);
+typedef void (APIENTRYP PFNGLTEXSTORAGE3DMULTISAMPLEPROC) (GLenum target, GLsizei samples, GLenum internalformat, GLsizei width, GLsizei height, GLsizei depth, GLboolean fixedsamplelocations);
+typedef void (APIENTRYP PFNGLTEXTUREVIEWPROC) (GLuint texture, GLenum target, GLuint origtexture, GLenum internalformat, GLuint minlevel, GLuint numlevels, GLuint minlayer, GLuint numlayers);
+typedef void (APIENTRYP PFNGLBINDVERTEXBUFFERPROC) (GLuint bindingindex, GLuint buffer, GLintptr offset, GLsizei stride);
+typedef void (APIENTRYP PFNGLVERTEXATTRIBFORMATPROC) (GLuint attribindex, GLint size, GLenum type, GLboolean normalized, GLuint relativeoffset);
+typedef void (APIENTRYP PFNGLVERTEXATTRIBIFORMATPROC) (GLuint attribindex, GLint size, GLenum type, GLuint relativeoffset);
+typedef void (APIENTRYP PFNGLVERTEXATTRIBLFORMATPROC) (GLuint attribindex, GLint size, GLenum type, GLuint relativeoffset);
+typedef void (APIENTRYP PFNGLVERTEXATTRIBBINDINGPROC) (GLuint attribindex, GLuint bindingindex);
+typedef void (APIENTRYP PFNGLVERTEXBINDINGDIVISORPROC) (GLuint bindingindex, GLuint divisor);
+typedef void (APIENTRYP PFNGLDEBUGMESSAGECONTROLPROC) (GLenum source, GLenum type, GLenum severity, GLsizei count, const GLuint *ids, GLboolean enabled);
+typedef void (APIENTRYP PFNGLDEBUGMESSAGEINSERTPROC) (GLenum source, GLenum type, GLuint id, GLenum severity, GLsizei length, const GLchar *buf);
+typedef void (APIENTRYP PFNGLDEBUGMESSAGECALLBACKPROC) (GLDEBUGPROC callback, const void *userParam);
+typedef GLuint (APIENTRYP PFNGLGETDEBUGMESSAGELOGPROC) (GLuint count, GLsizei bufSize, GLenum *sources, GLenum *types, GLuint *ids, GLenum *severities, GLsizei *lengths, GLchar *messageLog);
+typedef void (APIENTRYP PFNGLPUSHDEBUGGROUPPROC) (GLenum source, GLuint id, GLsizei length, const GLchar *message);
+typedef void (APIENTRYP PFNGLPOPDEBUGGROUPPROC) (void);
+typedef void (APIENTRYP PFNGLOBJECTLABELPROC) (GLenum identifier, GLuint name, GLsizei length, const GLchar *label);
+typedef void (APIENTRYP PFNGLGETOBJECTLABELPROC) (GLenum identifier, GLuint name, GLsizei bufSize, GLsizei *length, GLchar *label);
+typedef void (APIENTRYP PFNGLOBJECTPTRLABELPROC) (const void *ptr, GLsizei length, const GLchar *label);
+typedef void (APIENTRYP PFNGLGETOBJECTPTRLABELPROC) (const void *ptr, GLsizei bufSize, GLsizei *length, GLchar *label);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glClearBufferData (GLenum target, GLenum internalformat, GLenum format, GLenum type, const void *data);
+GLAPI void APIENTRY glClearBufferSubData (GLenum target, GLenum internalformat, GLintptr offset, GLsizeiptr size, GLenum format, GLenum type, const void *data);
+GLAPI void APIENTRY glDispatchCompute (GLuint num_groups_x, GLuint num_groups_y, GLuint num_groups_z);
+GLAPI void APIENTRY glDispatchComputeIndirect (GLintptr indirect);
+GLAPI void APIENTRY glCopyImageSubData (GLuint srcName, GLenum srcTarget, GLint srcLevel, GLint srcX, GLint srcY, GLint srcZ, GLuint dstName, GLenum dstTarget, GLint dstLevel, GLint dstX, GLint dstY, GLint dstZ, GLsizei srcWidth, GLsizei srcHeight, GLsizei srcDepth);
+GLAPI void APIENTRY glFramebufferParameteri (GLenum target, GLenum pname, GLint param);
+GLAPI void APIENTRY glGetFramebufferParameteriv (GLenum target, GLenum pname, GLint *params);
+GLAPI void APIENTRY glGetInternalformati64v (GLenum target, GLenum internalformat, GLenum pname, GLsizei bufSize, GLint64 *params);
+GLAPI void APIENTRY glInvalidateTexSubImage (GLuint texture, GLint level, GLint xoffset, GLint yoffset, GLint zoffset, GLsizei width, GLsizei height, GLsizei depth);
+GLAPI void APIENTRY glInvalidateTexImage (GLuint texture, GLint level);
+GLAPI void APIENTRY glInvalidateBufferSubData (GLuint buffer, GLintptr offset, GLsizeiptr length);
+GLAPI void APIENTRY glInvalidateBufferData (GLuint buffer);
+GLAPI void APIENTRY glInvalidateFramebuffer (GLenum target, GLsizei numAttachments, const GLenum *attachments);
+GLAPI void APIENTRY glInvalidateSubFramebuffer (GLenum target, GLsizei numAttachments, const GLenum *attachments, GLint x, GLint y, GLsizei width, GLsizei height);
+GLAPI void APIENTRY glMultiDrawArraysIndirect (GLenum mode, const void *indirect, GLsizei drawcount, GLsizei stride);
+GLAPI void APIENTRY glMultiDrawElementsIndirect (GLenum mode, GLenum type, const void *indirect, GLsizei drawcount, GLsizei stride);
+GLAPI void APIENTRY glGetProgramInterfaceiv (GLuint program, GLenum programInterface, GLenum pname, GLint *params);
+GLAPI GLuint APIENTRY glGetProgramResourceIndex (GLuint program, GLenum programInterface, const GLchar *name);
+GLAPI void APIENTRY glGetProgramResourceName (GLuint program, GLenum programInterface, GLuint index, GLsizei bufSize, GLsizei *length, GLchar *name);
+GLAPI void APIENTRY glGetProgramResourceiv (GLuint program, GLenum programInterface, GLuint index, GLsizei propCount, const GLenum *props, GLsizei bufSize, GLsizei *length, GLint *params);
+GLAPI GLint APIENTRY glGetProgramResourceLocation (GLuint program, GLenum programInterface, const GLchar *name);
+GLAPI GLint APIENTRY glGetProgramResourceLocationIndex (GLuint program, GLenum programInterface, const GLchar *name);
+GLAPI void APIENTRY glShaderStorageBlockBinding (GLuint program, GLuint storageBlockIndex, GLuint storageBlockBinding);
+GLAPI void APIENTRY glTexBufferRange (GLenum target, GLenum internalformat, GLuint buffer, GLintptr offset, GLsizeiptr size);
+GLAPI void APIENTRY glTexStorage2DMultisample (GLenum target, GLsizei samples, GLenum internalformat, GLsizei width, GLsizei height, GLboolean fixedsamplelocations);
+GLAPI void APIENTRY glTexStorage3DMultisample (GLenum target, GLsizei samples, GLenum internalformat, GLsizei width, GLsizei height, GLsizei depth, GLboolean fixedsamplelocations);
+GLAPI void APIENTRY glTextureView (GLuint texture, GLenum target, GLuint origtexture, GLenum internalformat, GLuint minlevel, GLuint numlevels, GLuint minlayer, GLuint numlayers);
+GLAPI void APIENTRY glBindVertexBuffer (GLuint bindingindex, GLuint buffer, GLintptr offset, GLsizei stride);
+GLAPI void APIENTRY glVertexAttribFormat (GLuint attribindex, GLint size, GLenum type, GLboolean normalized, GLuint relativeoffset);
+GLAPI void APIENTRY glVertexAttribIFormat (GLuint attribindex, GLint size, GLenum type, GLuint relativeoffset);
+GLAPI void APIENTRY glVertexAttribLFormat (GLuint attribindex, GLint size, GLenum type, GLuint relativeoffset);
+GLAPI void APIENTRY glVertexAttribBinding (GLuint attribindex, GLuint bindingindex);
+GLAPI void APIENTRY glVertexBindingDivisor (GLuint bindingindex, GLuint divisor);
+GLAPI void APIENTRY glDebugMessageControl (GLenum source, GLenum type, GLenum severity, GLsizei count, const GLuint *ids, GLboolean enabled);
+GLAPI void APIENTRY glDebugMessageInsert (GLenum source, GLenum type, GLuint id, GLenum severity, GLsizei length, const GLchar *buf);
+GLAPI void APIENTRY glDebugMessageCallback (GLDEBUGPROC callback, const void *userParam);
+GLAPI GLuint APIENTRY glGetDebugMessageLog (GLuint count, GLsizei bufSize, GLenum *sources, GLenum *types, GLuint *ids, GLenum *severities, GLsizei *lengths, GLchar *messageLog);
+GLAPI void APIENTRY glPushDebugGroup (GLenum source, GLuint id, GLsizei length, const GLchar *message);
+GLAPI void APIENTRY glPopDebugGroup (void);
+GLAPI void APIENTRY glObjectLabel (GLenum identifier, GLuint name, GLsizei length, const GLchar *label);
+GLAPI void APIENTRY glGetObjectLabel (GLenum identifier, GLuint name, GLsizei bufSize, GLsizei *length, GLchar *label);
+GLAPI void APIENTRY glObjectPtrLabel (const void *ptr, GLsizei length, const GLchar *label);
+GLAPI void APIENTRY glGetObjectPtrLabel (const void *ptr, GLsizei bufSize, GLsizei *length, GLchar *label);
+#endif
+#endif /* GL_VERSION_4_3 */
+
+#ifndef GL_VERSION_4_4
+#define GL_VERSION_4_4 1
+#define GL_MAX_VERTEX_ATTRIB_STRIDE 0x82E5
+#define GL_PRIMITIVE_RESTART_FOR_PATCHES_SUPPORTED 0x8221
+#define GL_TEXTURE_BUFFER_BINDING 0x8C2A
+#define GL_MAP_PERSISTENT_BIT 0x0040
+#define GL_MAP_COHERENT_BIT 0x0080
+#define GL_DYNAMIC_STORAGE_BIT 0x0100
+#define GL_CLIENT_STORAGE_BIT 0x0200
+#define GL_CLIENT_MAPPED_BUFFER_BARRIER_BIT 0x00004000
+#define GL_BUFFER_IMMUTABLE_STORAGE 0x821F
+#define GL_BUFFER_STORAGE_FLAGS 0x8220
+#define GL_CLEAR_TEXTURE 0x9365
+#define GL_LOCATION_COMPONENT 0x934A
+#define GL_TRANSFORM_FEEDBACK_BUFFER_INDEX 0x934B
+#define GL_TRANSFORM_FEEDBACK_BUFFER_STRIDE 0x934C
+#define GL_QUERY_BUFFER 0x9192
+#define GL_QUERY_BUFFER_BARRIER_BIT 0x00008000
+#define GL_QUERY_BUFFER_BINDING 0x9193
+#define GL_QUERY_RESULT_NO_WAIT 0x9194
+#define GL_MIRROR_CLAMP_TO_EDGE 0x8743
+typedef void (APIENTRYP PFNGLBUFFERSTORAGEPROC) (GLenum target, GLsizeiptr size, const void *data, GLbitfield flags);
+typedef void (APIENTRYP PFNGLCLEARTEXIMAGEPROC) (GLuint texture, GLint level, GLenum format, GLenum type, const void *data);
+typedef void (APIENTRYP PFNGLCLEARTEXSUBIMAGEPROC) (GLuint texture, GLint level, GLint xoffset, GLint yoffset, GLint zoffset, GLsizei width, GLsizei height, GLsizei depth, GLenum format, GLenum type, const void *data);
+typedef void (APIENTRYP PFNGLBINDBUFFERSBASEPROC) (GLenum target, GLuint first, GLsizei count, const GLuint *buffers);
+typedef void (APIENTRYP PFNGLBINDBUFFERSRANGEPROC) (GLenum target, GLuint first, GLsizei count, const GLuint *buffers, const GLintptr *offsets, const GLsizeiptr *sizes);
+typedef void (APIENTRYP PFNGLBINDTEXTURESPROC) (GLuint first, GLsizei count, const GLuint *textures);
+typedef void (APIENTRYP PFNGLBINDSAMPLERSPROC) (GLuint first, GLsizei count, const GLuint *samplers);
+typedef void (APIENTRYP PFNGLBINDIMAGETEXTURESPROC) (GLuint first, GLsizei count, const GLuint *textures);
+typedef void (APIENTRYP PFNGLBINDVERTEXBUFFERSPROC) (GLuint first, GLsizei count, const GLuint *buffers, const GLintptr *offsets, const GLsizei *strides);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glBufferStorage (GLenum target, GLsizeiptr size, const void *data, GLbitfield flags);
+GLAPI void APIENTRY glClearTexImage (GLuint texture, GLint level, GLenum format, GLenum type, const void *data);
+GLAPI void APIENTRY glClearTexSubImage (GLuint texture, GLint level, GLint xoffset, GLint yoffset, GLint zoffset, GLsizei width, GLsizei height, GLsizei depth, GLenum format, GLenum type, const void *data);
+GLAPI void APIENTRY glBindBuffersBase (GLenum target, GLuint first, GLsizei count, const GLuint *buffers);
+GLAPI void APIENTRY glBindBuffersRange (GLenum target, GLuint first, GLsizei count, const GLuint *buffers, const GLintptr *offsets, const GLsizeiptr *sizes);
+GLAPI void APIENTRY glBindTextures (GLuint first, GLsizei count, const GLuint *textures);
+GLAPI void APIENTRY glBindSamplers (GLuint first, GLsizei count, const GLuint *samplers);
+GLAPI void APIENTRY glBindImageTextures (GLuint first, GLsizei count, const GLuint *textures);
+GLAPI void APIENTRY glBindVertexBuffers (GLuint first, GLsizei count, const GLuint *buffers, const GLintptr *offsets, const GLsizei *strides);
+#endif
+#endif /* GL_VERSION_4_4 */
+
+#ifndef GL_VERSION_4_5
+#define GL_VERSION_4_5 1
+#define GL_CONTEXT_LOST 0x0507
+#define GL_NEGATIVE_ONE_TO_ONE 0x935E
+#define GL_ZERO_TO_ONE 0x935F
+#define GL_CLIP_ORIGIN 0x935C
+#define GL_CLIP_DEPTH_MODE 0x935D
+#define GL_QUERY_WAIT_INVERTED 0x8E17
+#define GL_QUERY_NO_WAIT_INVERTED 0x8E18
+#define GL_QUERY_BY_REGION_WAIT_INVERTED 0x8E19
+#define GL_QUERY_BY_REGION_NO_WAIT_INVERTED 0x8E1A
+#define GL_MAX_CULL_DISTANCES 0x82F9
+#define GL_MAX_COMBINED_CLIP_AND_CULL_DISTANCES 0x82FA
+#define GL_TEXTURE_TARGET 0x1006
+#define GL_QUERY_TARGET 0x82EA
+#define GL_GUILTY_CONTEXT_RESET 0x8253
+#define GL_INNOCENT_CONTEXT_RESET 0x8254
+#define GL_UNKNOWN_CONTEXT_RESET 0x8255
+#define GL_RESET_NOTIFICATION_STRATEGY 0x8256
+#define GL_LOSE_CONTEXT_ON_RESET 0x8252
+#define GL_NO_RESET_NOTIFICATION 0x8261
+#define GL_CONTEXT_FLAG_ROBUST_ACCESS_BIT 0x00000004
+#define GL_CONTEXT_RELEASE_BEHAVIOR 0x82FB
+#define GL_CONTEXT_RELEASE_BEHAVIOR_FLUSH 0x82FC
+typedef void (APIENTRYP PFNGLCLIPCONTROLPROC) (GLenum origin, GLenum depth);
+typedef void (APIENTRYP PFNGLCREATETRANSFORMFEEDBACKSPROC) (GLsizei n, GLuint *ids);
+typedef void (APIENTRYP PFNGLTRANSFORMFEEDBACKBUFFERBASEPROC) (GLuint xfb, GLuint index, GLuint buffer);
+typedef void (APIENTRYP PFNGLTRANSFORMFEEDBACKBUFFERRANGEPROC) (GLuint xfb, GLuint index, GLuint buffer, GLintptr offset, GLsizeiptr size);
+typedef void (APIENTRYP PFNGLGETTRANSFORMFEEDBACKIVPROC) (GLuint xfb, GLenum pname, GLint *param);
+typedef void (APIENTRYP PFNGLGETTRANSFORMFEEDBACKI_VPROC) (GLuint xfb, GLenum pname, GLuint index, GLint *param);
+typedef void (APIENTRYP PFNGLGETTRANSFORMFEEDBACKI64_VPROC) (GLuint xfb, GLenum pname, GLuint index, GLint64 *param);
+typedef void (APIENTRYP PFNGLCREATEBUFFERSPROC) (GLsizei n, GLuint *buffers);
+typedef void (APIENTRYP PFNGLNAMEDBUFFERSTORAGEPROC) (GLuint buffer, GLsizeiptr size, const void *data, GLbitfield flags);
+typedef void (APIENTRYP PFNGLNAMEDBUFFERDATAPROC) (GLuint buffer, GLsizeiptr size, const void *data, GLenum usage);
+typedef void (APIENTRYP PFNGLNAMEDBUFFERSUBDATAPROC) (GLuint buffer, GLintptr offset, GLsizeiptr size, const void *data);
+typedef void (APIENTRYP PFNGLCOPYNAMEDBUFFERSUBDATAPROC) (GLuint readBuffer, GLuint writeBuffer, GLintptr readOffset, GLintptr writeOffset, GLsizeiptr size);
+typedef void (APIENTRYP PFNGLCLEARNAMEDBUFFERDATAPROC) (GLuint buffer, GLenum internalformat, GLenum format, GLenum type, const void *data);
+typedef void (APIENTRYP PFNGLCLEARNAMEDBUFFERSUBDATAPROC) (GLuint buffer, GLenum internalformat, GLintptr offset, GLsizeiptr size, GLenum format, GLenum type, const void *data);
+typedef void *(APIENTRYP PFNGLMAPNAMEDBUFFERPROC) (GLuint buffer, GLenum access);
+typedef void *(APIENTRYP PFNGLMAPNAMEDBUFFERRANGEPROC) (GLuint buffer, GLintptr offset, GLsizeiptr length, GLbitfield access);
+typedef GLboolean (APIENTRYP PFNGLUNMAPNAMEDBUFFERPROC) (GLuint buffer);
+typedef void (APIENTRYP PFNGLFLUSHMAPPEDNAMEDBUFFERRANGEPROC) (GLuint buffer, GLintptr offset, GLsizeiptr length);
+typedef void (APIENTRYP PFNGLGETNAMEDBUFFERPARAMETERIVPROC) (GLuint buffer, GLenum pname, GLint *params);
+typedef void (APIENTRYP PFNGLGETNAMEDBUFFERPARAMETERI64VPROC) (GLuint buffer, GLenum pname, GLint64 *params);
+typedef void (APIENTRYP PFNGLGETNAMEDBUFFERPOINTERVPROC) (GLuint buffer, GLenum pname, void **params);
+typedef void (APIENTRYP PFNGLGETNAMEDBUFFERSUBDATAPROC) (GLuint buffer, GLintptr offset, GLsizeiptr size, void *data);
+typedef void (APIENTRYP PFNGLCREATEFRAMEBUFFERSPROC) (GLsizei n, GLuint *framebuffers);
+typedef void (APIENTRYP PFNGLNAMEDFRAMEBUFFERRENDERBUFFERPROC) (GLuint framebuffer, GLenum attachment, GLenum renderbuffertarget, GLuint renderbuffer);
+typedef void (APIENTRYP PFNGLNAMEDFRAMEBUFFERPARAMETERIPROC) (GLuint framebuffer, GLenum pname, GLint param);
+typedef void (APIENTRYP PFNGLNAMEDFRAMEBUFFERTEXTUREPROC) (GLuint framebuffer, GLenum attachment, GLuint texture, GLint level);
+typedef void (APIENTRYP PFNGLNAMEDFRAMEBUFFERTEXTURELAYERPROC) (GLuint framebuffer, GLenum attachment, GLuint texture, GLint level, GLint layer);
+typedef void (APIENTRYP PFNGLNAMEDFRAMEBUFFERDRAWBUFFERPROC) (GLuint framebuffer, GLenum buf);
+typedef void (APIENTRYP PFNGLNAMEDFRAMEBUFFERDRAWBUFFERSPROC) (GLuint framebuffer, GLsizei n, const GLenum *bufs);
+typedef void (APIENTRYP PFNGLNAMEDFRAMEBUFFERREADBUFFERPROC) (GLuint framebuffer, GLenum src);
+typedef void (APIENTRYP PFNGLINVALIDATENAMEDFRAMEBUFFERDATAPROC) (GLuint framebuffer, GLsizei numAttachments, const GLenum *attachments);
+typedef void (APIENTRYP PFNGLINVALIDATENAMEDFRAMEBUFFERSUBDATAPROC) (GLuint framebuffer, GLsizei numAttachments, const GLenum *attachments, GLint x, GLint y, GLsizei width, GLsizei height);
+typedef void (APIENTRYP PFNGLCLEARNAMEDFRAMEBUFFERIVPROC) (GLuint framebuffer, GLenum buffer, GLint drawbuffer, const GLint *value);
+typedef void (APIENTRYP PFNGLCLEARNAMEDFRAMEBUFFERUIVPROC) (GLuint framebuffer, GLenum buffer, GLint drawbuffer, const GLuint *value);
+typedef void (APIENTRYP PFNGLCLEARNAMEDFRAMEBUFFERFVPROC) (GLuint framebuffer, GLenum buffer, GLint drawbuffer, const GLfloat *value);
+typedef void (APIENTRYP PFNGLCLEARNAMEDFRAMEBUFFERFIPROC) (GLuint framebuffer, GLenum buffer, GLint drawbuffer, GLfloat depth, GLint stencil);
+typedef void (APIENTRYP PFNGLBLITNAMEDFRAMEBUFFERPROC) (GLuint readFramebuffer, GLuint drawFramebuffer, GLint srcX0, GLint srcY0, GLint srcX1, GLint srcY1, GLint dstX0, GLint dstY0, GLint dstX1, GLint dstY1, GLbitfield mask, GLenum filter);
+typedef GLenum (APIENTRYP PFNGLCHECKNAMEDFRAMEBUFFERSTATUSPROC) (GLuint framebuffer, GLenum target);
+typedef void (APIENTRYP PFNGLGETNAMEDFRAMEBUFFERPARAMETERIVPROC) (GLuint framebuffer, GLenum pname, GLint *param);
+typedef void (APIENTRYP PFNGLGETNAMEDFRAMEBUFFERATTACHMENTPARAMETERIVPROC) (GLuint framebuffer, GLenum attachment, GLenum pname, GLint *params);
+typedef void (APIENTRYP PFNGLCREATERENDERBUFFERSPROC) (GLsizei n, GLuint *renderbuffers);
+typedef void (APIENTRYP PFNGLNAMEDRENDERBUFFERSTORAGEPROC) (GLuint renderbuffer, GLenum internalformat, GLsizei width, GLsizei height);
+typedef void (APIENTRYP PFNGLNAMEDRENDERBUFFERSTORAGEMULTISAMPLEPROC) (GLuint renderbuffer, GLsizei samples, GLenum internalformat, GLsizei width, GLsizei height);
+typedef void (APIENTRYP PFNGLGETNAMEDRENDERBUFFERPARAMETERIVPROC) (GLuint renderbuffer, GLenum pname, GLint *params);
+typedef void (APIENTRYP PFNGLCREATETEXTURESPROC) (GLenum target, GLsizei n, GLuint *textures);
+typedef void (APIENTRYP PFNGLTEXTUREBUFFERPROC) (GLuint texture, GLenum internalformat, GLuint buffer);
+typedef void (APIENTRYP PFNGLTEXTUREBUFFERRANGEPROC) (GLuint texture, GLenum internalformat, GLuint buffer, GLintptr offset, GLsizeiptr size);
+typedef void (APIENTRYP PFNGLTEXTURESTORAGE1DPROC) (GLuint texture, GLsizei levels, GLenum internalformat, GLsizei width);
+typedef void (APIENTRYP PFNGLTEXTURESTORAGE2DPROC) (GLuint texture, GLsizei levels, GLenum internalformat, GLsizei width, GLsizei height);
+typedef void (APIENTRYP PFNGLTEXTURESTORAGE3DPROC) (GLuint texture, GLsizei levels, GLenum internalformat, GLsizei width, GLsizei height, GLsizei depth);
+typedef void (APIENTRYP PFNGLTEXTURESTORAGE2DMULTISAMPLEPROC) (GLuint texture, GLsizei samples, GLenum internalformat, GLsizei width, GLsizei height, GLboolean fixedsamplelocations);
+typedef void (APIENTRYP PFNGLTEXTURESTORAGE3DMULTISAMPLEPROC) (GLuint texture, GLsizei samples, GLenum internalformat, GLsizei width, GLsizei height, GLsizei depth, GLboolean fixedsamplelocations);
+typedef void (APIENTRYP PFNGLTEXTURESUBIMAGE1DPROC) (GLuint texture, GLint level, GLint xoffset, GLsizei width, GLenum format, GLenum type, const void *pixels);
+typedef void (APIENTRYP PFNGLTEXTURESUBIMAGE2DPROC) (GLuint texture, GLint level, GLint xoffset, GLint yoffset, GLsizei width, GLsizei height, GLenum format, GLenum type, const void *pixels);
+typedef void (APIENTRYP PFNGLTEXTURESUBIMAGE3DPROC) (GLuint texture, GLint level, GLint xoffset, GLint yoffset, GLint zoffset, GLsizei width, GLsizei height, GLsizei depth, GLenum format, GLenum type, const void *pixels);
+typedef void (APIENTRYP PFNGLCOMPRESSEDTEXTURESUBIMAGE1DPROC) (GLuint texture, GLint level, GLint xoffset, GLsizei width, GLenum format, GLsizei imageSize, const void *data);
+typedef void (APIENTRYP PFNGLCOMPRESSEDTEXTURESUBIMAGE2DPROC) (GLuint texture, GLint level, GLint xoffset, GLint yoffset, GLsizei width, GLsizei height, GLenum format, GLsizei imageSize, const void *data);
+typedef void (APIENTRYP PFNGLCOMPRESSEDTEXTURESUBIMAGE3DPROC) (GLuint texture, GLint level, GLint xoffset, GLint yoffset, GLint zoffset, GLsizei width, GLsizei height, GLsizei depth, GLenum format, GLsizei imageSize, const void *data);
+typedef void (APIENTRYP PFNGLCOPYTEXTURESUBIMAGE1DPROC) (GLuint texture, GLint level, GLint xoffset, GLint x, GLint y, GLsizei width);
+typedef void (APIENTRYP PFNGLCOPYTEXTURESUBIMAGE2DPROC) (GLuint texture, GLint level, GLint xoffset, GLint yoffset, GLint x, GLint y, GLsizei width, GLsizei height);
+typedef void (APIENTRYP PFNGLCOPYTEXTURESUBIMAGE3DPROC) (GLuint texture, GLint level, GLint xoffset, GLint yoffset, GLint zoffset, GLint x, GLint y, GLsizei width, GLsizei height);
+typedef void (APIENTRYP PFNGLTEXTUREPARAMETERFPROC) (GLuint texture, GLenum pname, GLfloat param);
+typedef void (APIENTRYP PFNGLTEXTUREPARAMETERFVPROC) (GLuint texture, GLenum pname, const GLfloat *param);
+typedef void (APIENTRYP PFNGLTEXTUREPARAMETERIPROC) (GLuint texture, GLenum pname, GLint param);
+typedef void (APIENTRYP PFNGLTEXTUREPARAMETERIIVPROC) (GLuint texture, GLenum pname, const GLint *params);
+typedef void (APIENTRYP PFNGLTEXTUREPARAMETERIUIVPROC) (GLuint texture, GLenum pname, const GLuint *params);
+typedef void (APIENTRYP PFNGLTEXTUREPARAMETERIVPROC) (GLuint texture, GLenum pname, const GLint *param);
+typedef void (APIENTRYP PFNGLGENERATETEXTUREMIPMAPPROC) (GLuint texture);
+typedef void (APIENTRYP PFNGLBINDTEXTUREUNITPROC) (GLuint unit, GLuint texture);
+typedef void (APIENTRYP PFNGLGETTEXTUREIMAGEPROC) (GLuint texture, GLint level, GLenum format, GLenum type, GLsizei bufSize, void *pixels);
+typedef void (APIENTRYP PFNGLGETCOMPRESSEDTEXTUREIMAGEPROC) (GLuint texture, GLint level, GLsizei bufSize, void *pixels);
+typedef void (APIENTRYP PFNGLGETTEXTURELEVELPARAMETERFVPROC) (GLuint texture, GLint level, GLenum pname, GLfloat *params);
+typedef void (APIENTRYP PFNGLGETTEXTURELEVELPARAMETERIVPROC) (GLuint texture, GLint level, GLenum pname, GLint *params);
+typedef void (APIENTRYP PFNGLGETTEXTUREPARAMETERFVPROC) (GLuint texture, GLenum pname, GLfloat *params);
+typedef void (APIENTRYP PFNGLGETTEXTUREPARAMETERIIVPROC) (GLuint texture, GLenum pname, GLint *params);
+typedef void (APIENTRYP PFNGLGETTEXTUREPARAMETERIUIVPROC) (GLuint texture, GLenum pname, GLuint *params);
+typedef void (APIENTRYP PFNGLGETTEXTUREPARAMETERIVPROC) (GLuint texture, GLenum pname, GLint *params);
+typedef void (APIENTRYP PFNGLCREATEVERTEXARRAYSPROC) (GLsizei n, GLuint *arrays);
+typedef void (APIENTRYP PFNGLDISABLEVERTEXARRAYATTRIBPROC) (GLuint vaobj, GLuint index);
+typedef void (APIENTRYP PFNGLENABLEVERTEXARRAYATTRIBPROC) (GLuint vaobj, GLuint index);
+typedef void (APIENTRYP PFNGLVERTEXARRAYELEMENTBUFFERPROC) (GLuint vaobj, GLuint buffer);
+typedef void (APIENTRYP PFNGLVERTEXARRAYVERTEXBUFFERPROC) (GLuint vaobj, GLuint bindingindex, GLuint buffer, GLintptr offset, GLsizei stride);
+typedef void (APIENTRYP PFNGLVERTEXARRAYVERTEXBUFFERSPROC) (GLuint vaobj, GLuint first, GLsizei count, const GLuint *buffers, const GLintptr *offsets, const GLsizei *strides);
+typedef void (APIENTRYP PFNGLVERTEXARRAYATTRIBBINDINGPROC) (GLuint vaobj, GLuint attribindex, GLuint bindingindex);
+typedef void (APIENTRYP PFNGLVERTEXARRAYATTRIBFORMATPROC) (GLuint vaobj, GLuint attribindex, GLint size, GLenum type, GLboolean normalized, GLuint relativeoffset);
+typedef void (APIENTRYP PFNGLVERTEXARRAYATTRIBIFORMATPROC) (GLuint vaobj, GLuint attribindex, GLint size, GLenum type, GLuint relativeoffset);
+typedef void (APIENTRYP PFNGLVERTEXARRAYATTRIBLFORMATPROC) (GLuint vaobj, GLuint attribindex, GLint size, GLenum type, GLuint relativeoffset);
+typedef void (APIENTRYP PFNGLVERTEXARRAYBINDINGDIVISORPROC) (GLuint vaobj, GLuint bindingindex, GLuint divisor);
+typedef void (APIENTRYP PFNGLGETVERTEXARRAYIVPROC) (GLuint vaobj, GLenum pname, GLint *param);
+typedef void (APIENTRYP PFNGLGETVERTEXARRAYINDEXEDIVPROC) (GLuint vaobj, GLuint index, GLenum pname, GLint *param);
+typedef void (APIENTRYP PFNGLGETVERTEXARRAYINDEXED64IVPROC) (GLuint vaobj, GLuint index, GLenum pname, GLint64 *param);
+typedef void (APIENTRYP PFNGLCREATESAMPLERSPROC) (GLsizei n, GLuint *samplers);
+typedef void (APIENTRYP PFNGLCREATEPROGRAMPIPELINESPROC) (GLsizei n, GLuint *pipelines);
+typedef void (APIENTRYP PFNGLCREATEQUERIESPROC) (GLenum target, GLsizei n, GLuint *ids);
+typedef void (APIENTRYP PFNGLGETQUERYBUFFEROBJECTI64VPROC) (GLuint id, GLuint buffer, GLenum pname, GLintptr offset);
+typedef void (APIENTRYP PFNGLGETQUERYBUFFEROBJECTIVPROC) (GLuint id, GLuint buffer, GLenum pname, GLintptr offset);
+typedef void (APIENTRYP PFNGLGETQUERYBUFFEROBJECTUI64VPROC) (GLuint id, GLuint buffer, GLenum pname, GLintptr offset);
+typedef void (APIENTRYP PFNGLGETQUERYBUFFEROBJECTUIVPROC) (GLuint id, GLuint buffer, GLenum pname, GLintptr offset);
+typedef void (APIENTRYP PFNGLMEMORYBARRIERBYREGIONPROC) (GLbitfield barriers);
+typedef void (APIENTRYP PFNGLGETTEXTURESUBIMAGEPROC) (GLuint texture, GLint level, GLint xoffset, GLint yoffset, GLint zoffset, GLsizei width, GLsizei height, GLsizei depth, GLenum format, GLenum type, GLsizei bufSize, void *pixels);
+typedef void (APIENTRYP PFNGLGETCOMPRESSEDTEXTURESUBIMAGEPROC) (GLuint texture, GLint level, GLint xoffset, GLint yoffset, GLint zoffset, GLsizei width, GLsizei height, GLsizei depth, GLsizei bufSize, void *pixels);
+typedef GLenum (APIENTRYP PFNGLGETGRAPHICSRESETSTATUSPROC) (void);
+typedef void (APIENTRYP PFNGLGETNCOMPRESSEDTEXIMAGEPROC) (GLenum target, GLint lod, GLsizei bufSize, void *pixels);
+typedef void (APIENTRYP PFNGLGETNTEXIMAGEPROC) (GLenum target, GLint level, GLenum format, GLenum type, GLsizei bufSize, void *pixels);
+typedef void (APIENTRYP PFNGLGETNUNIFORMDVPROC) (GLuint program, GLint location, GLsizei bufSize, GLdouble *params);
+typedef void (APIENTRYP PFNGLGETNUNIFORMFVPROC) (GLuint program, GLint location, GLsizei bufSize, GLfloat *params);
+typedef void (APIENTRYP PFNGLGETNUNIFORMIVPROC) (GLuint program, GLint location, GLsizei bufSize, GLint *params);
+typedef void (APIENTRYP PFNGLGETNUNIFORMUIVPROC) (GLuint program, GLint location, GLsizei bufSize, GLuint *params);
+typedef void (APIENTRYP PFNGLREADNPIXELSPROC) (GLint x, GLint y, GLsizei width, GLsizei height, GLenum format, GLenum type, GLsizei bufSize, void *data);
+typedef void (APIENTRYP PFNGLGETNMAPDVPROC) (GLenum target, GLenum query, GLsizei bufSize, GLdouble *v);
+typedef void (APIENTRYP PFNGLGETNMAPFVPROC) (GLenum target, GLenum query, GLsizei bufSize, GLfloat *v);
+typedef void (APIENTRYP PFNGLGETNMAPIVPROC) (GLenum target, GLenum query, GLsizei bufSize, GLint *v);
+typedef void (APIENTRYP PFNGLGETNPIXELMAPFVPROC) (GLenum map, GLsizei bufSize, GLfloat *values);
+typedef void (APIENTRYP PFNGLGETNPIXELMAPUIVPROC) (GLenum map, GLsizei bufSize, GLuint *values);
+typedef void (APIENTRYP PFNGLGETNPIXELMAPUSVPROC) (GLenum map, GLsizei bufSize, GLushort *values);
+typedef void (APIENTRYP PFNGLGETNPOLYGONSTIPPLEPROC) (GLsizei bufSize, GLubyte *pattern);
+typedef void (APIENTRYP PFNGLGETNCOLORTABLEPROC) (GLenum target, GLenum format, GLenum type, GLsizei bufSize, void *table);
+typedef void (APIENTRYP PFNGLGETNCONVOLUTIONFILTERPROC) (GLenum target, GLenum format, GLenum type, GLsizei bufSize, void *image);
+typedef void (APIENTRYP PFNGLGETNSEPARABLEFILTERPROC) (GLenum target, GLenum format, GLenum type, GLsizei rowBufSize, void *row, GLsizei columnBufSize, void *column, void *span);
+typedef void (APIENTRYP PFNGLGETNHISTOGRAMPROC) (GLenum target, GLboolean reset, GLenum format, GLenum type, GLsizei bufSize, void *values);
+typedef void (APIENTRYP PFNGLGETNMINMAXPROC) (GLenum target, GLboolean reset, GLenum format, GLenum type, GLsizei bufSize, void *values);
+typedef void (APIENTRYP PFNGLTEXTUREBARRIERPROC) (void);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glClipControl (GLenum origin, GLenum depth);
+GLAPI void APIENTRY glCreateTransformFeedbacks (GLsizei n, GLuint *ids);
+GLAPI void APIENTRY glTransformFeedbackBufferBase (GLuint xfb, GLuint index, GLuint buffer);
+GLAPI void APIENTRY glTransformFeedbackBufferRange (GLuint xfb, GLuint index, GLuint buffer, GLintptr offset, GLsizeiptr size);
+GLAPI void APIENTRY glGetTransformFeedbackiv (GLuint xfb, GLenum pname, GLint *param);
+GLAPI void APIENTRY glGetTransformFeedbacki_v (GLuint xfb, GLenum pname, GLuint index, GLint *param);
+GLAPI void APIENTRY glGetTransformFeedbacki64_v (GLuint xfb, GLenum pname, GLuint index, GLint64 *param);
+GLAPI void APIENTRY glCreateBuffers (GLsizei n, GLuint *buffers);
+GLAPI void APIENTRY glNamedBufferStorage (GLuint buffer, GLsizeiptr size, const void *data, GLbitfield flags);
+GLAPI void APIENTRY glNamedBufferData (GLuint buffer, GLsizeiptr size, const void *data, GLenum usage);
+GLAPI void APIENTRY glNamedBufferSubData (GLuint buffer, GLintptr offset, GLsizeiptr size, const void *data);
+GLAPI void APIENTRY glCopyNamedBufferSubData (GLuint readBuffer, GLuint writeBuffer, GLintptr readOffset, GLintptr writeOffset, GLsizeiptr size);
+GLAPI void APIENTRY glClearNamedBufferData (GLuint buffer, GLenum internalformat, GLenum format, GLenum type, const void *data);
+GLAPI void APIENTRY glClearNamedBufferSubData (GLuint buffer, GLenum internalformat, GLintptr offset, GLsizeiptr size, GLenum format, GLenum type, const void *data);
+GLAPI void *APIENTRY glMapNamedBuffer (GLuint buffer, GLenum access);
+GLAPI void *APIENTRY glMapNamedBufferRange (GLuint buffer, GLintptr offset, GLsizeiptr length, GLbitfield access);
+GLAPI GLboolean APIENTRY glUnmapNamedBuffer (GLuint buffer);
+GLAPI void APIENTRY glFlushMappedNamedBufferRange (GLuint buffer, GLintptr offset, GLsizeiptr length);
+GLAPI void APIENTRY glGetNamedBufferParameteriv (GLuint buffer, GLenum pname, GLint *params);
+GLAPI void APIENTRY glGetNamedBufferParameteri64v (GLuint buffer, GLenum pname, GLint64 *params);
+GLAPI void APIENTRY glGetNamedBufferPointerv (GLuint buffer, GLenum pname, void **params);
+GLAPI void APIENTRY glGetNamedBufferSubData (GLuint buffer, GLintptr offset, GLsizeiptr size, void *data);
+GLAPI void APIENTRY glCreateFramebuffers (GLsizei n, GLuint *framebuffers);
+GLAPI void APIENTRY glNamedFramebufferRenderbuffer (GLuint framebuffer, GLenum attachment, GLenum renderbuffertarget, GLuint renderbuffer);
+GLAPI void APIENTRY glNamedFramebufferParameteri (GLuint framebuffer, GLenum pname, GLint param);
+GLAPI void APIENTRY glNamedFramebufferTexture (GLuint framebuffer, GLenum attachment, GLuint texture, GLint level);
+GLAPI void APIENTRY glNamedFramebufferTextureLayer (GLuint framebuffer, GLenum attachment, GLuint texture, GLint level, GLint layer);
+GLAPI void APIENTRY glNamedFramebufferDrawBuffer (GLuint framebuffer, GLenum buf);
+GLAPI void APIENTRY glNamedFramebufferDrawBuffers (GLuint framebuffer, GLsizei n, const GLenum *bufs);
+GLAPI void APIENTRY glNamedFramebufferReadBuffer (GLuint framebuffer, GLenum src);
+GLAPI void APIENTRY glInvalidateNamedFramebufferData (GLuint framebuffer, GLsizei numAttachments, const GLenum *attachments);
+GLAPI void APIENTRY glInvalidateNamedFramebufferSubData (GLuint framebuffer, GLsizei numAttachments, const GLenum *attachments, GLint x, GLint y, GLsizei width, GLsizei height);
+GLAPI void APIENTRY glClearNamedFramebufferiv (GLuint framebuffer, GLenum buffer, GLint drawbuffer, const GLint *value);
+GLAPI void APIENTRY glClearNamedFramebufferuiv (GLuint framebuffer, GLenum buffer, GLint drawbuffer, const GLuint *value);
+GLAPI void APIENTRY glClearNamedFramebufferfv (GLuint framebuffer, GLenum buffer, GLint drawbuffer, const GLfloat *value);
+GLAPI void APIENTRY glClearNamedFramebufferfi (GLuint framebuffer, GLenum buffer, GLint drawbuffer, GLfloat depth, GLint stencil);
+GLAPI void APIENTRY glBlitNamedFramebuffer (GLuint readFramebuffer, GLuint drawFramebuffer, GLint srcX0, GLint srcY0, GLint srcX1, GLint srcY1, GLint dstX0, GLint dstY0, GLint dstX1, GLint dstY1, GLbitfield mask, GLenum filter);
+GLAPI GLenum APIENTRY glCheckNamedFramebufferStatus (GLuint framebuffer, GLenum target);
+GLAPI void APIENTRY glGetNamedFramebufferParameteriv (GLuint framebuffer, GLenum pname, GLint *param);
+GLAPI void APIENTRY glGetNamedFramebufferAttachmentParameteriv (GLuint framebuffer, GLenum attachment, GLenum pname, GLint *params);
+GLAPI void APIENTRY glCreateRenderbuffers (GLsizei n, GLuint *renderbuffers);
+GLAPI void APIENTRY glNamedRenderbufferStorage (GLuint renderbuffer, GLenum internalformat, GLsizei width, GLsizei height);
+GLAPI void APIENTRY glNamedRenderbufferStorageMultisample (GLuint renderbuffer, GLsizei samples, GLenum internalformat, GLsizei width, GLsizei height);
+GLAPI void APIENTRY glGetNamedRenderbufferParameteriv (GLuint renderbuffer, GLenum pname, GLint *params);
+GLAPI void APIENTRY glCreateTextures (GLenum target, GLsizei n, GLuint *textures);
+GLAPI void APIENTRY glTextureBuffer (GLuint texture, GLenum internalformat, GLuint buffer);
+GLAPI void APIENTRY glTextureBufferRange (GLuint texture, GLenum internalformat, GLuint buffer, GLintptr offset, GLsizeiptr size);
+GLAPI void APIENTRY glTextureStorage1D (GLuint texture, GLsizei levels, GLenum internalformat, GLsizei width);
+GLAPI void APIENTRY glTextureStorage2D (GLuint texture, GLsizei levels, GLenum internalformat, GLsizei width, GLsizei height);
+GLAPI void APIENTRY glTextureStorage3D (GLuint texture, GLsizei levels, GLenum internalformat, GLsizei width, GLsizei height, GLsizei depth);
+GLAPI void APIENTRY glTextureStorage2DMultisample (GLuint texture, GLsizei samples, GLenum internalformat, GLsizei width, GLsizei height, GLboolean fixedsamplelocations);
+GLAPI void APIENTRY glTextureStorage3DMultisample (GLuint texture, GLsizei samples, GLenum internalformat, GLsizei width, GLsizei height, GLsizei depth, GLboolean fixedsamplelocations);
+GLAPI void APIENTRY glTextureSubImage1D (GLuint texture, GLint level, GLint xoffset, GLsizei width, GLenum format, GLenum type, const void *pixels);
+GLAPI void APIENTRY glTextureSubImage2D (GLuint texture, GLint level, GLint xoffset, GLint yoffset, GLsizei width, GLsizei height, GLenum format, GLenum type, const void *pixels);
+GLAPI void APIENTRY glTextureSubImage3D (GLuint texture, GLint level, GLint xoffset, GLint yoffset, GLint zoffset, GLsizei width, GLsizei height, GLsizei depth, GLenum format, GLenum type, const void *pixels);
+GLAPI void APIENTRY glCompressedTextureSubImage1D (GLuint texture, GLint level, GLint xoffset, GLsizei width, GLenum format, GLsizei imageSize, const void *data);
+GLAPI void APIENTRY glCompressedTextureSubImage2D (GLuint texture, GLint level, GLint xoffset, GLint yoffset, GLsizei width, GLsizei height, GLenum format, GLsizei imageSize, const void *data);
+GLAPI void APIENTRY glCompressedTextureSubImage3D (GLuint texture, GLint level, GLint xoffset, GLint yoffset, GLint zoffset, GLsizei width, GLsizei height, GLsizei depth, GLenum format, GLsizei imageSize, const void *data);
+GLAPI void APIENTRY glCopyTextureSubImage1D (GLuint texture, GLint level, GLint xoffset, GLint x, GLint y, GLsizei width);
+GLAPI void APIENTRY glCopyTextureSubImage2D (GLuint texture, GLint level, GLint xoffset, GLint yoffset, GLint x, GLint y, GLsizei width, GLsizei height);
+GLAPI void APIENTRY glCopyTextureSubImage3D (GLuint texture, GLint level, GLint xoffset, GLint yoffset, GLint zoffset, GLint x, GLint y, GLsizei width, GLsizei height);
+GLAPI void APIENTRY glTextureParameterf (GLuint texture, GLenum pname, GLfloat param);
+GLAPI void APIENTRY glTextureParameterfv (GLuint texture, GLenum pname, const GLfloat *param);
+GLAPI void APIENTRY glTextureParameteri (GLuint texture, GLenum pname, GLint param);
+GLAPI void APIENTRY glTextureParameterIiv (GLuint texture, GLenum pname, const GLint *params);
+GLAPI void APIENTRY glTextureParameterIuiv (GLuint texture, GLenum pname, const GLuint *params);
+GLAPI void APIENTRY glTextureParameteriv (GLuint texture, GLenum pname, const GLint *param);
+GLAPI void APIENTRY glGenerateTextureMipmap (GLuint texture);
+GLAPI void APIENTRY glBindTextureUnit (GLuint unit, GLuint texture);
+GLAPI void APIENTRY glGetTextureImage (GLuint texture, GLint level, GLenum format, GLenum type, GLsizei bufSize, void *pixels);
+GLAPI void APIENTRY glGetCompressedTextureImage (GLuint texture, GLint level, GLsizei bufSize, void *pixels);
+GLAPI void APIENTRY glGetTextureLevelParameterfv (GLuint texture, GLint level, GLenum pname, GLfloat *params);
+GLAPI void APIENTRY glGetTextureLevelParameteriv (GLuint texture, GLint level, GLenum pname, GLint *params);
+GLAPI void APIENTRY glGetTextureParameterfv (GLuint texture, GLenum pname, GLfloat *params);
+GLAPI void APIENTRY glGetTextureParameterIiv (GLuint texture, GLenum pname, GLint *params);
+GLAPI void APIENTRY glGetTextureParameterIuiv (GLuint texture, GLenum pname, GLuint *params);
+GLAPI void APIENTRY glGetTextureParameteriv (GLuint texture, GLenum pname, GLint *params);
+GLAPI void APIENTRY glCreateVertexArrays (GLsizei n, GLuint *arrays);
+GLAPI void APIENTRY glDisableVertexArrayAttrib (GLuint vaobj, GLuint index);
+GLAPI void APIENTRY glEnableVertexArrayAttrib (GLuint vaobj, GLuint index);
+GLAPI void APIENTRY glVertexArrayElementBuffer (GLuint vaobj, GLuint buffer);
+GLAPI void APIENTRY glVertexArrayVertexBuffer (GLuint vaobj, GLuint bindingindex, GLuint buffer, GLintptr offset, GLsizei stride);
+GLAPI void APIENTRY glVertexArrayVertexBuffers (GLuint vaobj, GLuint first, GLsizei count, const GLuint *buffers, const GLintptr *offsets, const GLsizei *strides);
+GLAPI void APIENTRY glVertexArrayAttribBinding (GLuint vaobj, GLuint attribindex, GLuint bindingindex);
+GLAPI void APIENTRY glVertexArrayAttribFormat (GLuint vaobj, GLuint attribindex, GLint size, GLenum type, GLboolean normalized, GLuint relativeoffset);
+GLAPI void APIENTRY glVertexArrayAttribIFormat (GLuint vaobj, GLuint attribindex, GLint size, GLenum type, GLuint relativeoffset);
+GLAPI void APIENTRY glVertexArrayAttribLFormat (GLuint vaobj, GLuint attribindex, GLint size, GLenum type, GLuint relativeoffset);
+GLAPI void APIENTRY glVertexArrayBindingDivisor (GLuint vaobj, GLuint bindingindex, GLuint divisor);
+GLAPI void APIENTRY glGetVertexArrayiv (GLuint vaobj, GLenum pname, GLint *param);
+GLAPI void APIENTRY glGetVertexArrayIndexediv (GLuint vaobj, GLuint index, GLenum pname, GLint *param);
+GLAPI void APIENTRY glGetVertexArrayIndexed64iv (GLuint vaobj, GLuint index, GLenum pname, GLint64 *param);
+GLAPI void APIENTRY glCreateSamplers (GLsizei n, GLuint *samplers);
+GLAPI void APIENTRY glCreateProgramPipelines (GLsizei n, GLuint *pipelines);
+GLAPI void APIENTRY glCreateQueries (GLenum target, GLsizei n, GLuint *ids);
+GLAPI void APIENTRY glGetQueryBufferObjecti64v (GLuint id, GLuint buffer, GLenum pname, GLintptr offset);
+GLAPI void APIENTRY glGetQueryBufferObjectiv (GLuint id, GLuint buffer, GLenum pname, GLintptr offset);
+GLAPI void APIENTRY glGetQueryBufferObjectui64v (GLuint id, GLuint buffer, GLenum pname, GLintptr offset);
+GLAPI void APIENTRY glGetQueryBufferObjectuiv (GLuint id, GLuint buffer, GLenum pname, GLintptr offset);
+GLAPI void APIENTRY glMemoryBarrierByRegion (GLbitfield barriers);
+GLAPI void APIENTRY glGetTextureSubImage (GLuint texture, GLint level, GLint xoffset, GLint yoffset, GLint zoffset, GLsizei width, GLsizei height, GLsizei depth, GLenum format, GLenum type, GLsizei bufSize, void *pixels);
+GLAPI void APIENTRY glGetCompressedTextureSubImage (GLuint texture, GLint level, GLint xoffset, GLint yoffset, GLint zoffset, GLsizei width, GLsizei height, GLsizei depth, GLsizei bufSize, void *pixels);
+GLAPI GLenum APIENTRY glGetGraphicsResetStatus (void);
+GLAPI void APIENTRY glGetnCompressedTexImage (GLenum target, GLint lod, GLsizei bufSize, void *pixels);
+GLAPI void APIENTRY glGetnTexImage (GLenum target, GLint level, GLenum format, GLenum type, GLsizei bufSize, void *pixels);
+GLAPI void APIENTRY glGetnUniformdv (GLuint program, GLint location, GLsizei bufSize, GLdouble *params);
+GLAPI void APIENTRY glGetnUniformfv (GLuint program, GLint location, GLsizei bufSize, GLfloat *params);
+GLAPI void APIENTRY glGetnUniformiv (GLuint program, GLint location, GLsizei bufSize, GLint *params);
+GLAPI void APIENTRY glGetnUniformuiv (GLuint program, GLint location, GLsizei bufSize, GLuint *params);
+GLAPI void APIENTRY glReadnPixels (GLint x, GLint y, GLsizei width, GLsizei height, GLenum format, GLenum type, GLsizei bufSize, void *data);
+GLAPI void APIENTRY glGetnMapdv (GLenum target, GLenum query, GLsizei bufSize, GLdouble *v);
+GLAPI void APIENTRY glGetnMapfv (GLenum target, GLenum query, GLsizei bufSize, GLfloat *v);
+GLAPI void APIENTRY glGetnMapiv (GLenum target, GLenum query, GLsizei bufSize, GLint *v);
+GLAPI void APIENTRY glGetnPixelMapfv (GLenum map, GLsizei bufSize, GLfloat *values);
+GLAPI void APIENTRY glGetnPixelMapuiv (GLenum map, GLsizei bufSize, GLuint *values);
+GLAPI void APIENTRY glGetnPixelMapusv (GLenum map, GLsizei bufSize, GLushort *values);
+GLAPI void APIENTRY glGetnPolygonStipple (GLsizei bufSize, GLubyte *pattern);
+GLAPI void APIENTRY glGetnColorTable (GLenum target, GLenum format, GLenum type, GLsizei bufSize, void *table);
+GLAPI void APIENTRY glGetnConvolutionFilter (GLenum target, GLenum format, GLenum type, GLsizei bufSize, void *image);
+GLAPI void APIENTRY glGetnSeparableFilter (GLenum target, GLenum format, GLenum type, GLsizei rowBufSize, void *row, GLsizei columnBufSize, void *column, void *span);
+GLAPI void APIENTRY glGetnHistogram (GLenum target, GLboolean reset, GLenum format, GLenum type, GLsizei bufSize, void *values);
+GLAPI void APIENTRY glGetnMinmax (GLenum target, GLboolean reset, GLenum format, GLenum type, GLsizei bufSize, void *values);
+GLAPI void APIENTRY glTextureBarrier (void);
+#endif
+#endif /* GL_VERSION_4_5 */
+
+#ifndef GL_VERSION_4_6
+#define GL_VERSION_4_6 1
+#define GL_SHADER_BINARY_FORMAT_SPIR_V 0x9551
+#define GL_SPIR_V_BINARY 0x9552
+#define GL_PARAMETER_BUFFER 0x80EE
+#define GL_PARAMETER_BUFFER_BINDING 0x80EF
+#define GL_CONTEXT_FLAG_NO_ERROR_BIT 0x00000008
+#define GL_VERTICES_SUBMITTED 0x82EE
+#define GL_PRIMITIVES_SUBMITTED 0x82EF
+#define GL_VERTEX_SHADER_INVOCATIONS 0x82F0
+#define GL_TESS_CONTROL_SHADER_PATCHES 0x82F1
+#define GL_TESS_EVALUATION_SHADER_INVOCATIONS 0x82F2
+#define GL_GEOMETRY_SHADER_PRIMITIVES_EMITTED 0x82F3
+#define GL_FRAGMENT_SHADER_INVOCATIONS 0x82F4
+#define GL_COMPUTE_SHADER_INVOCATIONS 0x82F5
+#define GL_CLIPPING_INPUT_PRIMITIVES 0x82F6
+#define GL_CLIPPING_OUTPUT_PRIMITIVES 0x82F7
+#define GL_POLYGON_OFFSET_CLAMP 0x8E1B
+#define GL_SPIR_V_EXTENSIONS 0x9553
+#define GL_NUM_SPIR_V_EXTENSIONS 0x9554
+#define GL_TEXTURE_MAX_ANISOTROPY 0x84FE
+#define GL_MAX_TEXTURE_MAX_ANISOTROPY 0x84FF
+#define GL_TRANSFORM_FEEDBACK_OVERFLOW 0x82EC
+#define GL_TRANSFORM_FEEDBACK_STREAM_OVERFLOW 0x82ED
+typedef void (APIENTRYP PFNGLSPECIALIZESHADERPROC) (GLuint shader, const GLchar *pEntryPoint, GLuint numSpecializationConstants, const GLuint *pConstantIndex, const GLuint *pConstantValue);
+typedef void (APIENTRYP PFNGLMULTIDRAWARRAYSINDIRECTCOUNTPROC) (GLenum mode, const void *indirect, GLintptr drawcount, GLsizei maxdrawcount, GLsizei stride);
+typedef void (APIENTRYP PFNGLMULTIDRAWELEMENTSINDIRECTCOUNTPROC) (GLenum mode, GLenum type, const void *indirect, GLintptr drawcount, GLsizei maxdrawcount, GLsizei stride);
+typedef void (APIENTRYP PFNGLPOLYGONOFFSETCLAMPPROC) (GLfloat factor, GLfloat units, GLfloat clamp);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glSpecializeShader (GLuint shader, const GLchar *pEntryPoint, GLuint numSpecializationConstants, const GLuint *pConstantIndex, const GLuint *pConstantValue);
+GLAPI void APIENTRY glMultiDrawArraysIndirectCount (GLenum mode, const void *indirect, GLintptr drawcount, GLsizei maxdrawcount, GLsizei stride);
+GLAPI void APIENTRY glMultiDrawElementsIndirectCount (GLenum mode, GLenum type, const void *indirect, GLintptr drawcount, GLsizei maxdrawcount, GLsizei stride);
+GLAPI void APIENTRY glPolygonOffsetClamp (GLfloat factor, GLfloat units, GLfloat clamp);
+#endif
+#endif /* GL_VERSION_4_6 */
+
+#ifndef GL_ARB_ES2_compatibility
+#define GL_ARB_ES2_compatibility 1
+#endif /* GL_ARB_ES2_compatibility */
+
+#ifndef GL_ARB_ES3_1_compatibility
+#define GL_ARB_ES3_1_compatibility 1
+#endif /* GL_ARB_ES3_1_compatibility */
+
+#ifndef GL_ARB_ES3_2_compatibility
+#define GL_ARB_ES3_2_compatibility 1
+#define GL_PRIMITIVE_BOUNDING_BOX_ARB 0x92BE
+#define GL_MULTISAMPLE_LINE_WIDTH_RANGE_ARB 0x9381
+#define GL_MULTISAMPLE_LINE_WIDTH_GRANULARITY_ARB 0x9382
+typedef void (APIENTRYP PFNGLPRIMITIVEBOUNDINGBOXARBPROC) (GLfloat minX, GLfloat minY, GLfloat minZ, GLfloat minW, GLfloat maxX, GLfloat maxY, GLfloat maxZ, GLfloat maxW);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glPrimitiveBoundingBoxARB (GLfloat minX, GLfloat minY, GLfloat minZ, GLfloat minW, GLfloat maxX, GLfloat maxY, GLfloat maxZ, GLfloat maxW);
+#endif
+#endif /* GL_ARB_ES3_2_compatibility */
+
+#ifndef GL_ARB_ES3_compatibility
+#define GL_ARB_ES3_compatibility 1
+#endif /* GL_ARB_ES3_compatibility */
+
+#ifndef GL_ARB_arrays_of_arrays
+#define GL_ARB_arrays_of_arrays 1
+#endif /* GL_ARB_arrays_of_arrays */
+
+#ifndef GL_ARB_base_instance
+#define GL_ARB_base_instance 1
+#endif /* GL_ARB_base_instance */
+
+#ifndef GL_ARB_bindless_texture
+#define GL_ARB_bindless_texture 1
+typedef khronos_uint64_t GLuint64EXT;
+#define GL_UNSIGNED_INT64_ARB 0x140F
+typedef GLuint64 (APIENTRYP PFNGLGETTEXTUREHANDLEARBPROC) (GLuint texture);
+typedef GLuint64 (APIENTRYP PFNGLGETTEXTURESAMPLERHANDLEARBPROC) (GLuint texture, GLuint sampler);
+typedef void (APIENTRYP PFNGLMAKETEXTUREHANDLERESIDENTARBPROC) (GLuint64 handle);
+typedef void (APIENTRYP PFNGLMAKETEXTUREHANDLENONRESIDENTARBPROC) (GLuint64 handle);
+typedef GLuint64 (APIENTRYP PFNGLGETIMAGEHANDLEARBPROC) (GLuint texture, GLint level, GLboolean layered, GLint layer, GLenum format);
+typedef void (APIENTRYP PFNGLMAKEIMAGEHANDLERESIDENTARBPROC) (GLuint64 handle, GLenum access);
+typedef void (APIENTRYP PFNGLMAKEIMAGEHANDLENONRESIDENTARBPROC) (GLuint64 handle);
+typedef void (APIENTRYP PFNGLUNIFORMHANDLEUI64ARBPROC) (GLint location, GLuint64 value);
+typedef void (APIENTRYP PFNGLUNIFORMHANDLEUI64VARBPROC) (GLint location, GLsizei count, const GLuint64 *value);
+typedef void (APIENTRYP PFNGLPROGRAMUNIFORMHANDLEUI64ARBPROC) (GLuint program, GLint location, GLuint64 value);
+typedef void (APIENTRYP PFNGLPROGRAMUNIFORMHANDLEUI64VARBPROC) (GLuint program, GLint location, GLsizei count, const GLuint64 *values);
+typedef GLboolean (APIENTRYP PFNGLISTEXTUREHANDLERESIDENTARBPROC) (GLuint64 handle);
+typedef GLboolean (APIENTRYP PFNGLISIMAGEHANDLERESIDENTARBPROC) (GLuint64 handle);
+typedef void (APIENTRYP PFNGLVERTEXATTRIBL1UI64ARBPROC) (GLuint index, GLuint64EXT x);
+typedef void (APIENTRYP PFNGLVERTEXATTRIBL1UI64VARBPROC) (GLuint index, const GLuint64EXT *v);
+typedef void (APIENTRYP PFNGLGETVERTEXATTRIBLUI64VARBPROC) (GLuint index, GLenum pname, GLuint64EXT *params);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI GLuint64 APIENTRY glGetTextureHandleARB (GLuint texture);
+GLAPI GLuint64 APIENTRY glGetTextureSamplerHandleARB (GLuint texture, GLuint sampler);
+GLAPI void APIENTRY glMakeTextureHandleResidentARB (GLuint64 handle);
+GLAPI void APIENTRY glMakeTextureHandleNonResidentARB (GLuint64 handle);
+GLAPI GLuint64 APIENTRY glGetImageHandleARB (GLuint texture, GLint level, GLboolean layered, GLint layer, GLenum format);
+GLAPI void APIENTRY glMakeImageHandleResidentARB (GLuint64 handle, GLenum access);
+GLAPI void APIENTRY glMakeImageHandleNonResidentARB (GLuint64 handle);
+GLAPI void APIENTRY glUniformHandleui64ARB (GLint location, GLuint64 value);
+GLAPI void APIENTRY glUniformHandleui64vARB (GLint location, GLsizei count, const GLuint64 *value);
+GLAPI void APIENTRY glProgramUniformHandleui64ARB (GLuint program, GLint location, GLuint64 value);
+GLAPI void APIENTRY glProgramUniformHandleui64vARB (GLuint program, GLint location, GLsizei count, const GLuint64 *values);
+GLAPI GLboolean APIENTRY glIsTextureHandleResidentARB (GLuint64 handle);
+GLAPI GLboolean APIENTRY glIsImageHandleResidentARB (GLuint64 handle);
+GLAPI void APIENTRY glVertexAttribL1ui64ARB (GLuint index, GLuint64EXT x);
+GLAPI void APIENTRY glVertexAttribL1ui64vARB (GLuint index, const GLuint64EXT *v);
+GLAPI void APIENTRY glGetVertexAttribLui64vARB (GLuint index, GLenum pname, GLuint64EXT *params);
+#endif
+#endif /* GL_ARB_bindless_texture */
+
+#ifndef GL_ARB_blend_func_extended
+#define GL_ARB_blend_func_extended 1
+#endif /* GL_ARB_blend_func_extended */
+
+#ifndef GL_ARB_buffer_storage
+#define GL_ARB_buffer_storage 1
+#endif /* GL_ARB_buffer_storage */
+
+#ifndef GL_ARB_cl_event
+#define GL_ARB_cl_event 1
+struct _cl_context;
+struct _cl_event;
+#define GL_SYNC_CL_EVENT_ARB 0x8240
+#define GL_SYNC_CL_EVENT_COMPLETE_ARB 0x8241
+typedef GLsync (APIENTRYP PFNGLCREATESYNCFROMCLEVENTARBPROC) (struct _cl_context *context, struct _cl_event *event, GLbitfield flags);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI GLsync APIENTRY glCreateSyncFromCLeventARB (struct _cl_context *context, struct _cl_event *event, GLbitfield flags);
+#endif
+#endif /* GL_ARB_cl_event */
+
+#ifndef GL_ARB_clear_buffer_object
+#define GL_ARB_clear_buffer_object 1
+#endif /* GL_ARB_clear_buffer_object */
+
+#ifndef GL_ARB_clear_texture
+#define GL_ARB_clear_texture 1
+#endif /* GL_ARB_clear_texture */
+
+#ifndef GL_ARB_clip_control
+#define GL_ARB_clip_control 1
+#endif /* GL_ARB_clip_control */
+
+#ifndef GL_ARB_color_buffer_float
+#define GL_ARB_color_buffer_float 1
+#define GL_RGBA_FLOAT_MODE_ARB 0x8820
+#define GL_CLAMP_VERTEX_COLOR_ARB 0x891A
+#define GL_CLAMP_FRAGMENT_COLOR_ARB 0x891B
+#define GL_CLAMP_READ_COLOR_ARB 0x891C
+#define GL_FIXED_ONLY_ARB 0x891D
+typedef void (APIENTRYP PFNGLCLAMPCOLORARBPROC) (GLenum target, GLenum clamp);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glClampColorARB (GLenum target, GLenum clamp);
+#endif
+#endif /* GL_ARB_color_buffer_float */
+
+#ifndef GL_ARB_compatibility
+#define GL_ARB_compatibility 1
+#endif /* GL_ARB_compatibility */
+
+#ifndef GL_ARB_compressed_texture_pixel_storage
+#define GL_ARB_compressed_texture_pixel_storage 1
+#endif /* GL_ARB_compressed_texture_pixel_storage */
+
+#ifndef GL_ARB_compute_shader
+#define GL_ARB_compute_shader 1
+#endif /* GL_ARB_compute_shader */
+
+#ifndef GL_ARB_compute_variable_group_size
+#define GL_ARB_compute_variable_group_size 1
+#define GL_MAX_COMPUTE_VARIABLE_GROUP_INVOCATIONS_ARB 0x9344
+#define GL_MAX_COMPUTE_FIXED_GROUP_INVOCATIONS_ARB 0x90EB
+#define GL_MAX_COMPUTE_VARIABLE_GROUP_SIZE_ARB 0x9345
+#define GL_MAX_COMPUTE_FIXED_GROUP_SIZE_ARB 0x91BF
+typedef void (APIENTRYP PFNGLDISPATCHCOMPUTEGROUPSIZEARBPROC) (GLuint num_groups_x, GLuint num_groups_y, GLuint num_groups_z, GLuint group_size_x, GLuint group_size_y, GLuint group_size_z);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glDispatchComputeGroupSizeARB (GLuint num_groups_x, GLuint num_groups_y, GLuint num_groups_z, GLuint group_size_x, GLuint group_size_y, GLuint group_size_z);
+#endif
+#endif /* GL_ARB_compute_variable_group_size */
+
+#ifndef GL_ARB_conditional_render_inverted
+#define GL_ARB_conditional_render_inverted 1
+#endif /* GL_ARB_conditional_render_inverted */
+
+#ifndef GL_ARB_conservative_depth
+#define GL_ARB_conservative_depth 1
+#endif /* GL_ARB_conservative_depth */
+
+#ifndef GL_ARB_copy_buffer
+#define GL_ARB_copy_buffer 1
+#endif /* GL_ARB_copy_buffer */
+
+#ifndef GL_ARB_copy_image
+#define GL_ARB_copy_image 1
+#endif /* GL_ARB_copy_image */
+
+#ifndef GL_ARB_cull_distance
+#define GL_ARB_cull_distance 1
+#endif /* GL_ARB_cull_distance */
+
+#ifndef GL_ARB_debug_output
+#define GL_ARB_debug_output 1
+typedef void (APIENTRY *GLDEBUGPROCARB)(GLenum source,GLenum type,GLuint id,GLenum severity,GLsizei length,const GLchar *message,const void *userParam);
+#define GL_DEBUG_OUTPUT_SYNCHRONOUS_ARB 0x8242
+#define GL_DEBUG_NEXT_LOGGED_MESSAGE_LENGTH_ARB 0x8243
+#define GL_DEBUG_CALLBACK_FUNCTION_ARB 0x8244
+#define GL_DEBUG_CALLBACK_USER_PARAM_ARB 0x8245
+#define GL_DEBUG_SOURCE_API_ARB 0x8246
+#define GL_DEBUG_SOURCE_WINDOW_SYSTEM_ARB 0x8247
+#define GL_DEBUG_SOURCE_SHADER_COMPILER_ARB 0x8248
+#define GL_DEBUG_SOURCE_THIRD_PARTY_ARB 0x8249
+#define GL_DEBUG_SOURCE_APPLICATION_ARB 0x824A
+#define GL_DEBUG_SOURCE_OTHER_ARB 0x824B
+#define GL_DEBUG_TYPE_ERROR_ARB 0x824C
+#define GL_DEBUG_TYPE_DEPRECATED_BEHAVIOR_ARB 0x824D
+#define GL_DEBUG_TYPE_UNDEFINED_BEHAVIOR_ARB 0x824E
+#define GL_DEBUG_TYPE_PORTABILITY_ARB 0x824F
+#define GL_DEBUG_TYPE_PERFORMANCE_ARB 0x8250
+#define GL_DEBUG_TYPE_OTHER_ARB 0x8251
+#define GL_MAX_DEBUG_MESSAGE_LENGTH_ARB 0x9143
+#define GL_MAX_DEBUG_LOGGED_MESSAGES_ARB 0x9144
+#define GL_DEBUG_LOGGED_MESSAGES_ARB 0x9145
+#define GL_DEBUG_SEVERITY_HIGH_ARB 0x9146
+#define GL_DEBUG_SEVERITY_MEDIUM_ARB 0x9147
+#define GL_DEBUG_SEVERITY_LOW_ARB 0x9148
+typedef void (APIENTRYP PFNGLDEBUGMESSAGECONTROLARBPROC) (GLenum source, GLenum type, GLenum severity, GLsizei count, const GLuint *ids, GLboolean enabled);
+typedef void (APIENTRYP PFNGLDEBUGMESSAGEINSERTARBPROC) (GLenum source, GLenum type, GLuint id, GLenum severity, GLsizei length, const GLchar *buf);
+typedef void (APIENTRYP PFNGLDEBUGMESSAGECALLBACKARBPROC) (GLDEBUGPROCARB callback, const void *userParam);
+typedef GLuint (APIENTRYP PFNGLGETDEBUGMESSAGELOGARBPROC) (GLuint count, GLsizei bufSize, GLenum *sources, GLenum *types, GLuint *ids, GLenum *severities, GLsizei *lengths, GLchar *messageLog);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glDebugMessageControlARB (GLenum source, GLenum type, GLenum severity, GLsizei count, const GLuint *ids, GLboolean enabled);
+GLAPI void APIENTRY glDebugMessageInsertARB (GLenum source, GLenum type, GLuint id, GLenum severity, GLsizei length, const GLchar *buf);
+GLAPI void APIENTRY glDebugMessageCallbackARB (GLDEBUGPROCARB callback, const void *userParam);
+GLAPI GLuint APIENTRY glGetDebugMessageLogARB (GLuint count, GLsizei bufSize, GLenum *sources, GLenum *types, GLuint *ids, GLenum *severities, GLsizei *lengths, GLchar *messageLog);
+#endif
+#endif /* GL_ARB_debug_output */
+
+#ifndef GL_ARB_depth_buffer_float
+#define GL_ARB_depth_buffer_float 1
+#endif /* GL_ARB_depth_buffer_float */
+
+#ifndef GL_ARB_depth_clamp
+#define GL_ARB_depth_clamp 1
+#endif /* GL_ARB_depth_clamp */
+
+#ifndef GL_ARB_depth_texture
+#define GL_ARB_depth_texture 1
+#define GL_DEPTH_COMPONENT16_ARB 0x81A5
+#define GL_DEPTH_COMPONENT24_ARB 0x81A6
+#define GL_DEPTH_COMPONENT32_ARB 0x81A7
+#define GL_TEXTURE_DEPTH_SIZE_ARB 0x884A
+#define GL_DEPTH_TEXTURE_MODE_ARB 0x884B
+#endif /* GL_ARB_depth_texture */
+
+#ifndef GL_ARB_derivative_control
+#define GL_ARB_derivative_control 1
+#endif /* GL_ARB_derivative_control */
+
+#ifndef GL_ARB_direct_state_access
+#define GL_ARB_direct_state_access 1
+#endif /* GL_ARB_direct_state_access */
+
+#ifndef GL_ARB_draw_buffers
+#define GL_ARB_draw_buffers 1
+#define GL_MAX_DRAW_BUFFERS_ARB 0x8824
+#define GL_DRAW_BUFFER0_ARB 0x8825
+#define GL_DRAW_BUFFER1_ARB 0x8826
+#define GL_DRAW_BUFFER2_ARB 0x8827
+#define GL_DRAW_BUFFER3_ARB 0x8828
+#define GL_DRAW_BUFFER4_ARB 0x8829
+#define GL_DRAW_BUFFER5_ARB 0x882A
+#define GL_DRAW_BUFFER6_ARB 0x882B
+#define GL_DRAW_BUFFER7_ARB 0x882C
+#define GL_DRAW_BUFFER8_ARB 0x882D
+#define GL_DRAW_BUFFER9_ARB 0x882E
+#define GL_DRAW_BUFFER10_ARB 0x882F
+#define GL_DRAW_BUFFER11_ARB 0x8830
+#define GL_DRAW_BUFFER12_ARB 0x8831
+#define GL_DRAW_BUFFER13_ARB 0x8832
+#define GL_DRAW_BUFFER14_ARB 0x8833
+#define GL_DRAW_BUFFER15_ARB 0x8834
+typedef void (APIENTRYP PFNGLDRAWBUFFERSARBPROC) (GLsizei n, const GLenum *bufs);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glDrawBuffersARB (GLsizei n, const GLenum *bufs);
+#endif
+#endif /* GL_ARB_draw_buffers */
+
+#ifndef GL_ARB_draw_buffers_blend
+#define GL_ARB_draw_buffers_blend 1
+typedef void (APIENTRYP PFNGLBLENDEQUATIONIARBPROC) (GLuint buf, GLenum mode);
+typedef void (APIENTRYP PFNGLBLENDEQUATIONSEPARATEIARBPROC) (GLuint buf, GLenum modeRGB, GLenum modeAlpha);
+typedef void (APIENTRYP PFNGLBLENDFUNCIARBPROC) (GLuint buf, GLenum src, GLenum dst);
+typedef void (APIENTRYP PFNGLBLENDFUNCSEPARATEIARBPROC) (GLuint buf, GLenum srcRGB, GLenum dstRGB, GLenum srcAlpha, GLenum dstAlpha);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glBlendEquationiARB (GLuint buf, GLenum mode);
+GLAPI void APIENTRY glBlendEquationSeparateiARB (GLuint buf, GLenum modeRGB, GLenum modeAlpha);
+GLAPI void APIENTRY glBlendFunciARB (GLuint buf, GLenum src, GLenum dst);
+GLAPI void APIENTRY glBlendFuncSeparateiARB (GLuint buf, GLenum srcRGB, GLenum dstRGB, GLenum srcAlpha, GLenum dstAlpha);
+#endif
+#endif /* GL_ARB_draw_buffers_blend */
+
+#ifndef GL_ARB_draw_elements_base_vertex
+#define GL_ARB_draw_elements_base_vertex 1
+#endif /* GL_ARB_draw_elements_base_vertex */
+
+#ifndef GL_ARB_draw_indirect
+#define GL_ARB_draw_indirect 1
+#endif /* GL_ARB_draw_indirect */
+
+#ifndef GL_ARB_draw_instanced
+#define GL_ARB_draw_instanced 1
+typedef void (APIENTRYP PFNGLDRAWARRAYSINSTANCEDARBPROC) (GLenum mode, GLint first, GLsizei count, GLsizei primcount);
+typedef void (APIENTRYP PFNGLDRAWELEMENTSINSTANCEDARBPROC) (GLenum mode, GLsizei count, GLenum type, const void *indices, GLsizei primcount);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glDrawArraysInstancedARB (GLenum mode, GLint first, GLsizei count, GLsizei primcount);
+GLAPI void APIENTRY glDrawElementsInstancedARB (GLenum mode, GLsizei count, GLenum type, const void *indices, GLsizei primcount);
+#endif
+#endif /* GL_ARB_draw_instanced */
+
+#ifndef GL_ARB_enhanced_layouts
+#define GL_ARB_enhanced_layouts 1
+#endif /* GL_ARB_enhanced_layouts */
+
+#ifndef GL_ARB_explicit_attrib_location
+#define GL_ARB_explicit_attrib_location 1
+#endif /* GL_ARB_explicit_attrib_location */
+
+#ifndef GL_ARB_explicit_uniform_location
+#define GL_ARB_explicit_uniform_location 1
+#endif /* GL_ARB_explicit_uniform_location */
+
+#ifndef GL_ARB_fragment_coord_conventions
+#define GL_ARB_fragment_coord_conventions 1
+#endif /* GL_ARB_fragment_coord_conventions */
+
+#ifndef GL_ARB_fragment_layer_viewport
+#define GL_ARB_fragment_layer_viewport 1
+#endif /* GL_ARB_fragment_layer_viewport */
+
+#ifndef GL_ARB_fragment_program
+#define GL_ARB_fragment_program 1
+#define GL_FRAGMENT_PROGRAM_ARB 0x8804
+#define GL_PROGRAM_FORMAT_ASCII_ARB 0x8875
+#define GL_PROGRAM_LENGTH_ARB 0x8627
+#define GL_PROGRAM_FORMAT_ARB 0x8876
+#define GL_PROGRAM_BINDING_ARB 0x8677
+#define GL_PROGRAM_INSTRUCTIONS_ARB 0x88A0
+#define GL_MAX_PROGRAM_INSTRUCTIONS_ARB 0x88A1
+#define GL_PROGRAM_NATIVE_INSTRUCTIONS_ARB 0x88A2
+#define GL_MAX_PROGRAM_NATIVE_INSTRUCTIONS_ARB 0x88A3
+#define GL_PROGRAM_TEMPORARIES_ARB 0x88A4
+#define GL_MAX_PROGRAM_TEMPORARIES_ARB 0x88A5
+#define GL_PROGRAM_NATIVE_TEMPORARIES_ARB 0x88A6
+#define GL_MAX_PROGRAM_NATIVE_TEMPORARIES_ARB 0x88A7
+#define GL_PROGRAM_PARAMETERS_ARB 0x88A8
+#define GL_MAX_PROGRAM_PARAMETERS_ARB 0x88A9
+#define GL_PROGRAM_NATIVE_PARAMETERS_ARB 0x88AA
+#define GL_MAX_PROGRAM_NATIVE_PARAMETERS_ARB 0x88AB
+#define GL_PROGRAM_ATTRIBS_ARB 0x88AC
+#define GL_MAX_PROGRAM_ATTRIBS_ARB 0x88AD
+#define GL_PROGRAM_NATIVE_ATTRIBS_ARB 0x88AE
+#define GL_MAX_PROGRAM_NATIVE_ATTRIBS_ARB 0x88AF
+#define GL_MAX_PROGRAM_LOCAL_PARAMETERS_ARB 0x88B4
+#define GL_MAX_PROGRAM_ENV_PARAMETERS_ARB 0x88B5
+#define GL_PROGRAM_UNDER_NATIVE_LIMITS_ARB 0x88B6
+#define GL_PROGRAM_ALU_INSTRUCTIONS_ARB 0x8805
+#define GL_PROGRAM_TEX_INSTRUCTIONS_ARB 0x8806
+#define GL_PROGRAM_TEX_INDIRECTIONS_ARB 0x8807
+#define GL_PROGRAM_NATIVE_ALU_INSTRUCTIONS_ARB 0x8808
+#define GL_PROGRAM_NATIVE_TEX_INSTRUCTIONS_ARB 0x8809
+#define GL_PROGRAM_NATIVE_TEX_INDIRECTIONS_ARB 0x880A
+#define GL_MAX_PROGRAM_ALU_INSTRUCTIONS_ARB 0x880B
+#define GL_MAX_PROGRAM_TEX_INSTRUCTIONS_ARB 0x880C
+#define GL_MAX_PROGRAM_TEX_INDIRECTIONS_ARB 0x880D
+#define GL_MAX_PROGRAM_NATIVE_ALU_INSTRUCTIONS_ARB 0x880E
+#define GL_MAX_PROGRAM_NATIVE_TEX_INSTRUCTIONS_ARB 0x880F
+#define GL_MAX_PROGRAM_NATIVE_TEX_INDIRECTIONS_ARB 0x8810
+#define GL_PROGRAM_STRING_ARB 0x8628
+#define GL_PROGRAM_ERROR_POSITION_ARB 0x864B
+#define GL_CURRENT_MATRIX_ARB 0x8641
+#define GL_TRANSPOSE_CURRENT_MATRIX_ARB 0x88B7
+#define GL_CURRENT_MATRIX_STACK_DEPTH_ARB 0x8640
+#define GL_MAX_PROGRAM_MATRICES_ARB 0x862F
+#define GL_MAX_PROGRAM_MATRIX_STACK_DEPTH_ARB 0x862E
+#define GL_MAX_TEXTURE_COORDS_ARB 0x8871
+#define GL_MAX_TEXTURE_IMAGE_UNITS_ARB 0x8872
+#define GL_PROGRAM_ERROR_STRING_ARB 0x8874
+#define GL_MATRIX0_ARB 0x88C0
+#define GL_MATRIX1_ARB 0x88C1
+#define GL_MATRIX2_ARB 0x88C2
+#define GL_MATRIX3_ARB 0x88C3
+#define GL_MATRIX4_ARB 0x88C4
+#define GL_MATRIX5_ARB 0x88C5
+#define GL_MATRIX6_ARB 0x88C6
+#define GL_MATRIX7_ARB 0x88C7
+#define GL_MATRIX8_ARB 0x88C8
+#define GL_MATRIX9_ARB 0x88C9
+#define GL_MATRIX10_ARB 0x88CA
+#define GL_MATRIX11_ARB 0x88CB
+#define GL_MATRIX12_ARB 0x88CC
+#define GL_MATRIX13_ARB 0x88CD
+#define GL_MATRIX14_ARB 0x88CE
+#define GL_MATRIX15_ARB 0x88CF
+#define GL_MATRIX16_ARB 0x88D0
+#define GL_MATRIX17_ARB 0x88D1
+#define GL_MATRIX18_ARB 0x88D2
+#define GL_MATRIX19_ARB 0x88D3
+#define GL_MATRIX20_ARB 0x88D4
+#define GL_MATRIX21_ARB 0x88D5
+#define GL_MATRIX22_ARB 0x88D6
+#define GL_MATRIX23_ARB 0x88D7
+#define GL_MATRIX24_ARB 0x88D8
+#define GL_MATRIX25_ARB 0x88D9
+#define GL_MATRIX26_ARB 0x88DA
+#define GL_MATRIX27_ARB 0x88DB
+#define GL_MATRIX28_ARB 0x88DC
+#define GL_MATRIX29_ARB 0x88DD
+#define GL_MATRIX30_ARB 0x88DE
+#define GL_MATRIX31_ARB 0x88DF
+typedef void (APIENTRYP PFNGLPROGRAMSTRINGARBPROC) (GLenum target, GLenum format, GLsizei len, const void *string);
+typedef void (APIENTRYP PFNGLBINDPROGRAMARBPROC) (GLenum target, GLuint program);
+typedef void (APIENTRYP PFNGLDELETEPROGRAMSARBPROC) (GLsizei n, const GLuint *programs);
+typedef void (APIENTRYP PFNGLGENPROGRAMSARBPROC) (GLsizei n, GLuint *programs);
+typedef void (APIENTRYP PFNGLPROGRAMENVPARAMETER4DARBPROC) (GLenum target, GLuint index, GLdouble x, GLdouble y, GLdouble z, GLdouble w);
+typedef void (APIENTRYP PFNGLPROGRAMENVPARAMETER4DVARBPROC) (GLenum target, GLuint index, const GLdouble *params);
+typedef void (APIENTRYP PFNGLPROGRAMENVPARAMETER4FARBPROC) (GLenum target, GLuint index, GLfloat x, GLfloat y, GLfloat z, GLfloat w);
+typedef void (APIENTRYP PFNGLPROGRAMENVPARAMETER4FVARBPROC) (GLenum target, GLuint index, const GLfloat *params);
+typedef void (APIENTRYP PFNGLPROGRAMLOCALPARAMETER4DARBPROC) (GLenum target, GLuint index, GLdouble x, GLdouble y, GLdouble z, GLdouble w);
+typedef void (APIENTRYP PFNGLPROGRAMLOCALPARAMETER4DVARBPROC) (GLenum target, GLuint index, const GLdouble *params);
+typedef void (APIENTRYP PFNGLPROGRAMLOCALPARAMETER4FARBPROC) (GLenum target, GLuint index, GLfloat x, GLfloat y, GLfloat z, GLfloat w);
+typedef void (APIENTRYP PFNGLPROGRAMLOCALPARAMETER4FVARBPROC) (GLenum target, GLuint index, const GLfloat *params);
+typedef void (APIENTRYP PFNGLGETPROGRAMENVPARAMETERDVARBPROC) (GLenum target, GLuint index, GLdouble *params);
+typedef void (APIENTRYP PFNGLGETPROGRAMENVPARAMETERFVARBPROC) (GLenum target, GLuint index, GLfloat *params);
+typedef void (APIENTRYP PFNGLGETPROGRAMLOCALPARAMETERDVARBPROC) (GLenum target, GLuint index, GLdouble *params);
+typedef void (APIENTRYP PFNGLGETPROGRAMLOCALPARAMETERFVARBPROC) (GLenum target, GLuint index, GLfloat *params);
+typedef void (APIENTRYP PFNGLGETPROGRAMIVARBPROC) (GLenum target, GLenum pname, GLint *params);
+typedef void (APIENTRYP PFNGLGETPROGRAMSTRINGARBPROC) (GLenum target, GLenum pname, void *string);
+typedef GLboolean (APIENTRYP PFNGLISPROGRAMARBPROC) (GLuint program);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glProgramStringARB (GLenum target, GLenum format, GLsizei len, const void *string);
+GLAPI void APIENTRY glBindProgramARB (GLenum target, GLuint program);
+GLAPI void APIENTRY glDeleteProgramsARB (GLsizei n, const GLuint *programs);
+GLAPI void APIENTRY glGenProgramsARB (GLsizei n, GLuint *programs);
+GLAPI void APIENTRY glProgramEnvParameter4dARB (GLenum target, GLuint index, GLdouble x, GLdouble y, GLdouble z, GLdouble w);
+GLAPI void APIENTRY glProgramEnvParameter4dvARB (GLenum target, GLuint index, const GLdouble *params);
+GLAPI void APIENTRY glProgramEnvParameter4fARB (GLenum target, GLuint index, GLfloat x, GLfloat y, GLfloat z, GLfloat w);
+GLAPI void APIENTRY glProgramEnvParameter4fvARB (GLenum target, GLuint index, const GLfloat *params);
+GLAPI void APIENTRY glProgramLocalParameter4dARB (GLenum target, GLuint index, GLdouble x, GLdouble y, GLdouble z, GLdouble w);
+GLAPI void APIENTRY glProgramLocalParameter4dvARB (GLenum target, GLuint index, const GLdouble *params);
+GLAPI void APIENTRY glProgramLocalParameter4fARB (GLenum target, GLuint index, GLfloat x, GLfloat y, GLfloat z, GLfloat w);
+GLAPI void APIENTRY glProgramLocalParameter4fvARB (GLenum target, GLuint index, const GLfloat *params);
+GLAPI void APIENTRY glGetProgramEnvParameterdvARB (GLenum target, GLuint index, GLdouble *params);
+GLAPI void APIENTRY glGetProgramEnvParameterfvARB (GLenum target, GLuint index, GLfloat *params);
+GLAPI void APIENTRY glGetProgramLocalParameterdvARB (GLenum target, GLuint index, GLdouble *params);
+GLAPI void APIENTRY glGetProgramLocalParameterfvARB (GLenum target, GLuint index, GLfloat *params);
+GLAPI void APIENTRY glGetProgramivARB (GLenum target, GLenum pname, GLint *params);
+GLAPI void APIENTRY glGetProgramStringARB (GLenum target, GLenum pname, void *string);
+GLAPI GLboolean APIENTRY glIsProgramARB (GLuint program);
+#endif
+#endif /* GL_ARB_fragment_program */
+
+#ifndef GL_ARB_fragment_program_shadow
+#define GL_ARB_fragment_program_shadow 1
+#endif /* GL_ARB_fragment_program_shadow */
+
+#ifndef GL_ARB_fragment_shader
+#define GL_ARB_fragment_shader 1
+#define GL_FRAGMENT_SHADER_ARB 0x8B30
+#define GL_MAX_FRAGMENT_UNIFORM_COMPONENTS_ARB 0x8B49
+#define GL_FRAGMENT_SHADER_DERIVATIVE_HINT_ARB 0x8B8B
+#endif /* GL_ARB_fragment_shader */
+
+#ifndef GL_ARB_fragment_shader_interlock
+#define GL_ARB_fragment_shader_interlock 1
+#endif /* GL_ARB_fragment_shader_interlock */
+
+#ifndef GL_ARB_framebuffer_no_attachments
+#define GL_ARB_framebuffer_no_attachments 1
+#endif /* GL_ARB_framebuffer_no_attachments */
+
+#ifndef GL_ARB_framebuffer_object
+#define GL_ARB_framebuffer_object 1
+#endif /* GL_ARB_framebuffer_object */
+
+#ifndef GL_ARB_framebuffer_sRGB
+#define GL_ARB_framebuffer_sRGB 1
+#endif /* GL_ARB_framebuffer_sRGB */
+
+#ifndef GL_ARB_geometry_shader4
+#define GL_ARB_geometry_shader4 1
+#define GL_LINES_ADJACENCY_ARB 0x000A
+#define GL_LINE_STRIP_ADJACENCY_ARB 0x000B
+#define GL_TRIANGLES_ADJACENCY_ARB 0x000C
+#define GL_TRIANGLE_STRIP_ADJACENCY_ARB 0x000D
+#define GL_PROGRAM_POINT_SIZE_ARB 0x8642
+#define GL_MAX_GEOMETRY_TEXTURE_IMAGE_UNITS_ARB 0x8C29
+#define GL_FRAMEBUFFER_ATTACHMENT_LAYERED_ARB 0x8DA7
+#define GL_FRAMEBUFFER_INCOMPLETE_LAYER_TARGETS_ARB 0x8DA8
+#define GL_FRAMEBUFFER_INCOMPLETE_LAYER_COUNT_ARB 0x8DA9
+#define GL_GEOMETRY_SHADER_ARB 0x8DD9
+#define GL_GEOMETRY_VERTICES_OUT_ARB 0x8DDA
+#define GL_GEOMETRY_INPUT_TYPE_ARB 0x8DDB
+#define GL_GEOMETRY_OUTPUT_TYPE_ARB 0x8DDC
+#define GL_MAX_GEOMETRY_VARYING_COMPONENTS_ARB 0x8DDD
+#define GL_MAX_VERTEX_VARYING_COMPONENTS_ARB 0x8DDE
+#define GL_MAX_GEOMETRY_UNIFORM_COMPONENTS_ARB 0x8DDF
+#define GL_MAX_GEOMETRY_OUTPUT_VERTICES_ARB 0x8DE0
+#define GL_MAX_GEOMETRY_TOTAL_OUTPUT_COMPONENTS_ARB 0x8DE1
+typedef void (APIENTRYP PFNGLPROGRAMPARAMETERIARBPROC) (GLuint program, GLenum pname, GLint value);
+typedef void (APIENTRYP PFNGLFRAMEBUFFERTEXTUREARBPROC) (GLenum target, GLenum attachment, GLuint texture, GLint level);
+typedef void (APIENTRYP PFNGLFRAMEBUFFERTEXTURELAYERARBPROC) (GLenum target, GLenum attachment, GLuint texture, GLint level, GLint layer);
+typedef void (APIENTRYP PFNGLFRAMEBUFFERTEXTUREFACEARBPROC) (GLenum target, GLenum attachment, GLuint texture, GLint level, GLenum face);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glProgramParameteriARB (GLuint program, GLenum pname, GLint value);
+GLAPI void APIENTRY glFramebufferTextureARB (GLenum target, GLenum attachment, GLuint texture, GLint level);
+GLAPI void APIENTRY glFramebufferTextureLayerARB (GLenum target, GLenum attachment, GLuint texture, GLint level, GLint layer);
+GLAPI void APIENTRY glFramebufferTextureFaceARB (GLenum target, GLenum attachment, GLuint texture, GLint level, GLenum face);
+#endif
+#endif /* GL_ARB_geometry_shader4 */
+
+#ifndef GL_ARB_get_program_binary
+#define GL_ARB_get_program_binary 1
+#endif /* GL_ARB_get_program_binary */
+
+#ifndef GL_ARB_get_texture_sub_image
+#define GL_ARB_get_texture_sub_image 1
+#endif /* GL_ARB_get_texture_sub_image */
+
+#ifndef GL_ARB_gl_spirv
+#define GL_ARB_gl_spirv 1
+#define GL_SHADER_BINARY_FORMAT_SPIR_V_ARB 0x9551
+#define GL_SPIR_V_BINARY_ARB 0x9552
+typedef void (APIENTRYP PFNGLSPECIALIZESHADERARBPROC) (GLuint shader, const GLchar *pEntryPoint, GLuint numSpecializationConstants, const GLuint *pConstantIndex, const GLuint *pConstantValue);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glSpecializeShaderARB (GLuint shader, const GLchar *pEntryPoint, GLuint numSpecializationConstants, const GLuint *pConstantIndex, const GLuint *pConstantValue);
+#endif
+#endif /* GL_ARB_gl_spirv */
+
+#ifndef GL_ARB_gpu_shader5
+#define GL_ARB_gpu_shader5 1
+#endif /* GL_ARB_gpu_shader5 */
+
+#ifndef GL_ARB_gpu_shader_fp64
+#define GL_ARB_gpu_shader_fp64 1
+#endif /* GL_ARB_gpu_shader_fp64 */
+
+#ifndef GL_ARB_gpu_shader_int64
+#define GL_ARB_gpu_shader_int64 1
+#define GL_INT64_ARB 0x140E
+#define GL_INT64_VEC2_ARB 0x8FE9
+#define GL_INT64_VEC3_ARB 0x8FEA
+#define GL_INT64_VEC4_ARB 0x8FEB
+#define GL_UNSIGNED_INT64_VEC2_ARB 0x8FF5
+#define GL_UNSIGNED_INT64_VEC3_ARB 0x8FF6
+#define GL_UNSIGNED_INT64_VEC4_ARB 0x8FF7
+typedef void (APIENTRYP PFNGLUNIFORM1I64ARBPROC) (GLint location, GLint64 x);
+typedef void (APIENTRYP PFNGLUNIFORM2I64ARBPROC) (GLint location, GLint64 x, GLint64 y);
+typedef void (APIENTRYP PFNGLUNIFORM3I64ARBPROC) (GLint location, GLint64 x, GLint64 y, GLint64 z);
+typedef void (APIENTRYP PFNGLUNIFORM4I64ARBPROC) (GLint location, GLint64 x, GLint64 y, GLint64 z, GLint64 w);
+typedef void (APIENTRYP PFNGLUNIFORM1I64VARBPROC) (GLint location, GLsizei count, const GLint64 *value);
+typedef void (APIENTRYP PFNGLUNIFORM2I64VARBPROC) (GLint location, GLsizei count, const GLint64 *value);
+typedef void (APIENTRYP PFNGLUNIFORM3I64VARBPROC) (GLint location, GLsizei count, const GLint64 *value);
+typedef void (APIENTRYP PFNGLUNIFORM4I64VARBPROC) (GLint location, GLsizei count, const GLint64 *value);
+typedef void (APIENTRYP PFNGLUNIFORM1UI64ARBPROC) (GLint location, GLuint64 x);
+typedef void (APIENTRYP PFNGLUNIFORM2UI64ARBPROC) (GLint location, GLuint64 x, GLuint64 y);
+typedef void (APIENTRYP PFNGLUNIFORM3UI64ARBPROC) (GLint location, GLuint64 x, GLuint64 y, GLuint64 z);
+typedef void (APIENTRYP PFNGLUNIFORM4UI64ARBPROC) (GLint location, GLuint64 x, GLuint64 y, GLuint64 z, GLuint64 w);
+typedef void (APIENTRYP PFNGLUNIFORM1UI64VARBPROC) (GLint location, GLsizei count, const GLuint64 *value);
+typedef void (APIENTRYP PFNGLUNIFORM2UI64VARBPROC) (GLint location, GLsizei count, const GLuint64 *value);
+typedef void (APIENTRYP PFNGLUNIFORM3UI64VARBPROC) (GLint location, GLsizei count, const GLuint64 *value);
+typedef void (APIENTRYP PFNGLUNIFORM4UI64VARBPROC) (GLint location, GLsizei count, const GLuint64 *value);
+typedef void (APIENTRYP PFNGLGETUNIFORMI64VARBPROC) (GLuint program, GLint location, GLint64 *params);
+typedef void (APIENTRYP PFNGLGETUNIFORMUI64VARBPROC) (GLuint program, GLint location, GLuint64 *params);
+typedef void (APIENTRYP PFNGLGETNUNIFORMI64VARBPROC) (GLuint program, GLint location, GLsizei bufSize, GLint64 *params);
+typedef void (APIENTRYP PFNGLGETNUNIFORMUI64VARBPROC) (GLuint program, GLint location, GLsizei bufSize, GLuint64 *params);
+typedef void (APIENTRYP PFNGLPROGRAMUNIFORM1I64ARBPROC) (GLuint program, GLint location, GLint64 x);
+typedef void (APIENTRYP PFNGLPROGRAMUNIFORM2I64ARBPROC) (GLuint program, GLint location, GLint64 x, GLint64 y);
+typedef void (APIENTRYP PFNGLPROGRAMUNIFORM3I64ARBPROC) (GLuint program, GLint location, GLint64 x, GLint64 y, GLint64 z);
+typedef void (APIENTRYP PFNGLPROGRAMUNIFORM4I64ARBPROC) (GLuint program, GLint location, GLint64 x, GLint64 y, GLint64 z, GLint64 w);
+typedef void (APIENTRYP PFNGLPROGRAMUNIFORM1I64VARBPROC) (GLuint program, GLint location, GLsizei count, const GLint64 *value);
+typedef void (APIENTRYP PFNGLPROGRAMUNIFORM2I64VARBPROC) (GLuint program, GLint location, GLsizei count, const GLint64 *value);
+typedef void (APIENTRYP PFNGLPROGRAMUNIFORM3I64VARBPROC) (GLuint program, GLint location, GLsizei count, const GLint64 *value);
+typedef void (APIENTRYP PFNGLPROGRAMUNIFORM4I64VARBPROC) (GLuint program, GLint location, GLsizei count, const GLint64 *value);
+typedef void (APIENTRYP PFNGLPROGRAMUNIFORM1UI64ARBPROC) (GLuint program, GLint location, GLuint64 x);
+typedef void (APIENTRYP PFNGLPROGRAMUNIFORM2UI64ARBPROC) (GLuint program, GLint location, GLuint64 x, GLuint64 y);
+typedef void (APIENTRYP PFNGLPROGRAMUNIFORM3UI64ARBPROC) (GLuint program, GLint location, GLuint64 x, GLuint64 y, GLuint64 z);
+typedef void (APIENTRYP PFNGLPROGRAMUNIFORM4UI64ARBPROC) (GLuint program, GLint location, GLuint64 x, GLuint64 y, GLuint64 z, GLuint64 w);
+typedef void (APIENTRYP PFNGLPROGRAMUNIFORM1UI64VARBPROC) (GLuint program, GLint location, GLsizei count, const GLuint64 *value);
+typedef void (APIENTRYP PFNGLPROGRAMUNIFORM2UI64VARBPROC) (GLuint program, GLint location, GLsizei count, const GLuint64 *value);
+typedef void (APIENTRYP PFNGLPROGRAMUNIFORM3UI64VARBPROC) (GLuint program, GLint location, GLsizei count, const GLuint64 *value);
+typedef void (APIENTRYP PFNGLPROGRAMUNIFORM4UI64VARBPROC) (GLuint program, GLint location, GLsizei count, const GLuint64 *value);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glUniform1i64ARB (GLint location, GLint64 x);
+GLAPI void APIENTRY glUniform2i64ARB (GLint location, GLint64 x, GLint64 y);
+GLAPI void APIENTRY glUniform3i64ARB (GLint location, GLint64 x, GLint64 y, GLint64 z);
+GLAPI void APIENTRY glUniform4i64ARB (GLint location, GLint64 x, GLint64 y, GLint64 z, GLint64 w);
+GLAPI void APIENTRY glUniform1i64vARB (GLint location, GLsizei count, const GLint64 *value);
+GLAPI void APIENTRY glUniform2i64vARB (GLint location, GLsizei count, const GLint64 *value);
+GLAPI void APIENTRY glUniform3i64vARB (GLint location, GLsizei count, const GLint64 *value);
+GLAPI void APIENTRY glUniform4i64vARB (GLint location, GLsizei count, const GLint64 *value);
+GLAPI void APIENTRY glUniform1ui64ARB (GLint location, GLuint64 x);
+GLAPI void APIENTRY glUniform2ui64ARB (GLint location, GLuint64 x, GLuint64 y);
+GLAPI void APIENTRY glUniform3ui64ARB (GLint location, GLuint64 x, GLuint64 y, GLuint64 z);
+GLAPI void APIENTRY glUniform4ui64ARB (GLint location, GLuint64 x, GLuint64 y, GLuint64 z, GLuint64 w);
+GLAPI void APIENTRY glUniform1ui64vARB (GLint location, GLsizei count, const GLuint64 *value);
+GLAPI void APIENTRY glUniform2ui64vARB (GLint location, GLsizei count, const GLuint64 *value);
+GLAPI void APIENTRY glUniform3ui64vARB (GLint location, GLsizei count, const GLuint64 *value);
+GLAPI void APIENTRY glUniform4ui64vARB (GLint location, GLsizei count, const GLuint64 *value);
+GLAPI void APIENTRY glGetUniformi64vARB (GLuint program, GLint location, GLint64 *params);
+GLAPI void APIENTRY glGetUniformui64vARB (GLuint program, GLint location, GLuint64 *params);
+GLAPI void APIENTRY glGetnUniformi64vARB (GLuint program, GLint location, GLsizei bufSize, GLint64 *params);
+GLAPI void APIENTRY glGetnUniformui64vARB (GLuint program, GLint location, GLsizei bufSize, GLuint64 *params);
+GLAPI void APIENTRY glProgramUniform1i64ARB (GLuint program, GLint location, GLint64 x);
+GLAPI void APIENTRY glProgramUniform2i64ARB (GLuint program, GLint location, GLint64 x, GLint64 y);
+GLAPI void APIENTRY glProgramUniform3i64ARB (GLuint program, GLint location, GLint64 x, GLint64 y, GLint64 z);
+GLAPI void APIENTRY glProgramUniform4i64ARB (GLuint program, GLint location, GLint64 x, GLint64 y, GLint64 z, GLint64 w);
+GLAPI void APIENTRY glProgramUniform1i64vARB (GLuint program, GLint location, GLsizei count, const GLint64 *value);
+GLAPI void APIENTRY glProgramUniform2i64vARB (GLuint program, GLint location, GLsizei count, const GLint64 *value);
+GLAPI void APIENTRY glProgramUniform3i64vARB (GLuint program, GLint location, GLsizei count, const GLint64 *value);
+GLAPI void APIENTRY glProgramUniform4i64vARB (GLuint program, GLint location, GLsizei count, const GLint64 *value);
+GLAPI void APIENTRY glProgramUniform1ui64ARB (GLuint program, GLint location, GLuint64 x);
+GLAPI void APIENTRY glProgramUniform2ui64ARB (GLuint program, GLint location, GLuint64 x, GLuint64 y);
+GLAPI void APIENTRY glProgramUniform3ui64ARB (GLuint program, GLint location, GLuint64 x, GLuint64 y, GLuint64 z);
+GLAPI void APIENTRY glProgramUniform4ui64ARB (GLuint program, GLint location, GLuint64 x, GLuint64 y, GLuint64 z, GLuint64 w);
+GLAPI void APIENTRY glProgramUniform1ui64vARB (GLuint program, GLint location, GLsizei count, const GLuint64 *value);
+GLAPI void APIENTRY glProgramUniform2ui64vARB (GLuint program, GLint location, GLsizei count, const GLuint64 *value);
+GLAPI void APIENTRY glProgramUniform3ui64vARB (GLuint program, GLint location, GLsizei count, const GLuint64 *value);
+GLAPI void APIENTRY glProgramUniform4ui64vARB (GLuint program, GLint location, GLsizei count, const GLuint64 *value);
+#endif
+#endif /* GL_ARB_gpu_shader_int64 */
+
+#ifndef GL_ARB_half_float_pixel
+#define GL_ARB_half_float_pixel 1
+typedef khronos_uint16_t GLhalfARB;
+#define GL_HALF_FLOAT_ARB 0x140B
+#endif /* GL_ARB_half_float_pixel */
+
+#ifndef GL_ARB_half_float_vertex
+#define GL_ARB_half_float_vertex 1
+#endif /* GL_ARB_half_float_vertex */
+
+#ifndef GL_ARB_imaging
+#define GL_ARB_imaging 1
+#define GL_CONVOLUTION_1D 0x8010
+#define GL_CONVOLUTION_2D 0x8011
+#define GL_SEPARABLE_2D 0x8012
+#define GL_CONVOLUTION_BORDER_MODE 0x8013
+#define GL_CONVOLUTION_FILTER_SCALE 0x8014
+#define GL_CONVOLUTION_FILTER_BIAS 0x8015
+#define GL_REDUCE 0x8016
+#define GL_CONVOLUTION_FORMAT 0x8017
+#define GL_CONVOLUTION_WIDTH 0x8018
+#define GL_CONVOLUTION_HEIGHT 0x8019
+#define GL_MAX_CONVOLUTION_WIDTH 0x801A
+#define GL_MAX_CONVOLUTION_HEIGHT 0x801B
+#define GL_POST_CONVOLUTION_RED_SCALE 0x801C
+#define GL_POST_CONVOLUTION_GREEN_SCALE 0x801D
+#define GL_POST_CONVOLUTION_BLUE_SCALE 0x801E
+#define GL_POST_CONVOLUTION_ALPHA_SCALE 0x801F
+#define GL_POST_CONVOLUTION_RED_BIAS 0x8020
+#define GL_POST_CONVOLUTION_GREEN_BIAS 0x8021
+#define GL_POST_CONVOLUTION_BLUE_BIAS 0x8022
+#define GL_POST_CONVOLUTION_ALPHA_BIAS 0x8023
+#define GL_HISTOGRAM 0x8024
+#define GL_PROXY_HISTOGRAM 0x8025
+#define GL_HISTOGRAM_WIDTH 0x8026
+#define GL_HISTOGRAM_FORMAT 0x8027
+#define GL_HISTOGRAM_RED_SIZE 0x8028
+#define GL_HISTOGRAM_GREEN_SIZE 0x8029
+#define GL_HISTOGRAM_BLUE_SIZE 0x802A
+#define GL_HISTOGRAM_ALPHA_SIZE 0x802B
+#define GL_HISTOGRAM_LUMINANCE_SIZE 0x802C
+#define GL_HISTOGRAM_SINK 0x802D
+#define GL_MINMAX 0x802E
+#define GL_MINMAX_FORMAT 0x802F
+#define GL_MINMAX_SINK 0x8030
+#define GL_TABLE_TOO_LARGE 0x8031
+#define GL_COLOR_MATRIX 0x80B1
+#define GL_COLOR_MATRIX_STACK_DEPTH 0x80B2
+#define GL_MAX_COLOR_MATRIX_STACK_DEPTH 0x80B3
+#define GL_POST_COLOR_MATRIX_RED_SCALE 0x80B4
+#define GL_POST_COLOR_MATRIX_GREEN_SCALE 0x80B5
+#define GL_POST_COLOR_MATRIX_BLUE_SCALE 0x80B6
+#define GL_POST_COLOR_MATRIX_ALPHA_SCALE 0x80B7
+#define GL_POST_COLOR_MATRIX_RED_BIAS 0x80B8
+#define GL_POST_COLOR_MATRIX_GREEN_BIAS 0x80B9
+#define GL_POST_COLOR_MATRIX_BLUE_BIAS 0x80BA
+#define GL_POST_COLOR_MATRIX_ALPHA_BIAS 0x80BB
+#define GL_COLOR_TABLE 0x80D0
+#define GL_POST_CONVOLUTION_COLOR_TABLE 0x80D1
+#define GL_POST_COLOR_MATRIX_COLOR_TABLE 0x80D2
+#define GL_PROXY_COLOR_TABLE 0x80D3
+#define GL_PROXY_POST_CONVOLUTION_COLOR_TABLE 0x80D4
+#define GL_PROXY_POST_COLOR_MATRIX_COLOR_TABLE 0x80D5
+#define GL_COLOR_TABLE_SCALE 0x80D6
+#define GL_COLOR_TABLE_BIAS 0x80D7
+#define GL_COLOR_TABLE_FORMAT 0x80D8
+#define GL_COLOR_TABLE_WIDTH 0x80D9
+#define GL_COLOR_TABLE_RED_SIZE 0x80DA
+#define GL_COLOR_TABLE_GREEN_SIZE 0x80DB
+#define GL_COLOR_TABLE_BLUE_SIZE 0x80DC
+#define GL_COLOR_TABLE_ALPHA_SIZE 0x80DD
+#define GL_COLOR_TABLE_LUMINANCE_SIZE 0x80DE
+#define GL_COLOR_TABLE_INTENSITY_SIZE 0x80DF
+#define GL_CONSTANT_BORDER 0x8151
+#define GL_REPLICATE_BORDER 0x8153
+#define GL_CONVOLUTION_BORDER_COLOR 0x8154
+typedef void (APIENTRYP PFNGLCOLORTABLEPROC) (GLenum target, GLenum internalformat, GLsizei width, GLenum format, GLenum type, const void *table);
+typedef void (APIENTRYP PFNGLCOLORTABLEPARAMETERFVPROC) (GLenum target, GLenum pname, const GLfloat *params);
+typedef void (APIENTRYP PFNGLCOLORTABLEPARAMETERIVPROC) (GLenum target, GLenum pname, const GLint *params);
+typedef void (APIENTRYP PFNGLCOPYCOLORTABLEPROC) (GLenum target, GLenum internalformat, GLint x, GLint y, GLsizei width);
+typedef void (APIENTRYP PFNGLGETCOLORTABLEPROC) (GLenum target, GLenum format, GLenum type, void *table);
+typedef void (APIENTRYP PFNGLGETCOLORTABLEPARAMETERFVPROC) (GLenum target, GLenum pname, GLfloat *params);
+typedef void (APIENTRYP PFNGLGETCOLORTABLEPARAMETERIVPROC) (GLenum target, GLenum pname, GLint *params);
+typedef void (APIENTRYP PFNGLCOLORSUBTABLEPROC) (GLenum target, GLsizei start, GLsizei count, GLenum format, GLenum type, const void *data);
+typedef void (APIENTRYP PFNGLCOPYCOLORSUBTABLEPROC) (GLenum target, GLsizei start, GLint x, GLint y, GLsizei width);
+typedef void (APIENTRYP PFNGLCONVOLUTIONFILTER1DPROC) (GLenum target, GLenum internalformat, GLsizei width, GLenum format, GLenum type, const void *image);
+typedef void (APIENTRYP PFNGLCONVOLUTIONFILTER2DPROC) (GLenum target, GLenum internalformat, GLsizei width, GLsizei height, GLenum format, GLenum type, const void *image);
+typedef void (APIENTRYP PFNGLCONVOLUTIONPARAMETERFPROC) (GLenum target, GLenum pname, GLfloat params);
+typedef void (APIENTRYP PFNGLCONVOLUTIONPARAMETERFVPROC) (GLenum target, GLenum pname, const GLfloat *params);
+typedef void (APIENTRYP PFNGLCONVOLUTIONPARAMETERIPROC) (GLenum target, GLenum pname, GLint params);
+typedef void (APIENTRYP PFNGLCONVOLUTIONPARAMETERIVPROC) (GLenum target, GLenum pname, const GLint *params);
+typedef void (APIENTRYP PFNGLCOPYCONVOLUTIONFILTER1DPROC) (GLenum target, GLenum internalformat, GLint x, GLint y, GLsizei width);
+typedef void (APIENTRYP PFNGLCOPYCONVOLUTIONFILTER2DPROC) (GLenum target, GLenum internalformat, GLint x, GLint y, GLsizei width, GLsizei height);
+typedef void (APIENTRYP PFNGLGETCONVOLUTIONFILTERPROC) (GLenum target, GLenum format, GLenum type, void *image);
+typedef void (APIENTRYP PFNGLGETCONVOLUTIONPARAMETERFVPROC) (GLenum target, GLenum pname, GLfloat *params);
+typedef void (APIENTRYP PFNGLGETCONVOLUTIONPARAMETERIVPROC) (GLenum target, GLenum pname, GLint *params);
+typedef void (APIENTRYP PFNGLGETSEPARABLEFILTERPROC) (GLenum target, GLenum format, GLenum type, void *row, void *column, void *span);
+typedef void (APIENTRYP PFNGLSEPARABLEFILTER2DPROC) (GLenum target, GLenum internalformat, GLsizei width, GLsizei height, GLenum format, GLenum type, const void *row, const void *column);
+typedef void (APIENTRYP PFNGLGETHISTOGRAMPROC) (GLenum target, GLboolean reset, GLenum format, GLenum type, void *values);
+typedef void (APIENTRYP PFNGLGETHISTOGRAMPARAMETERFVPROC) (GLenum target, GLenum pname, GLfloat *params);
+typedef void (APIENTRYP PFNGLGETHISTOGRAMPARAMETERIVPROC) (GLenum target, GLenum pname, GLint *params);
+typedef void (APIENTRYP PFNGLGETMINMAXPROC) (GLenum target, GLboolean reset, GLenum format, GLenum type, void *values);
+typedef void (APIENTRYP PFNGLGETMINMAXPARAMETERFVPROC) (GLenum target, GLenum pname, GLfloat *params);
+typedef void (APIENTRYP PFNGLGETMINMAXPARAMETERIVPROC) (GLenum target, GLenum pname, GLint *params);
+typedef void (APIENTRYP PFNGLHISTOGRAMPROC) (GLenum target, GLsizei width, GLenum internalformat, GLboolean sink);
+typedef void (APIENTRYP PFNGLMINMAXPROC) (GLenum target, GLenum internalformat, GLboolean sink);
+typedef void (APIENTRYP PFNGLRESETHISTOGRAMPROC) (GLenum target);
+typedef void (APIENTRYP PFNGLRESETMINMAXPROC) (GLenum target);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glColorTable (GLenum target, GLenum internalformat, GLsizei width, GLenum format, GLenum type, const void *table);
+GLAPI void APIENTRY glColorTableParameterfv (GLenum target, GLenum pname, const GLfloat *params);
+GLAPI void APIENTRY glColorTableParameteriv (GLenum target, GLenum pname, const GLint *params);
+GLAPI void APIENTRY glCopyColorTable (GLenum target, GLenum internalformat, GLint x, GLint y, GLsizei width);
+GLAPI void APIENTRY glGetColorTable (GLenum target, GLenum format, GLenum type, void *table);
+GLAPI void APIENTRY glGetColorTableParameterfv (GLenum target, GLenum pname, GLfloat *params);
+GLAPI void APIENTRY glGetColorTableParameteriv (GLenum target, GLenum pname, GLint *params);
+GLAPI void APIENTRY glColorSubTable (GLenum target, GLsizei start, GLsizei count, GLenum format, GLenum type, const void *data);
+GLAPI void APIENTRY glCopyColorSubTable (GLenum target, GLsizei start, GLint x, GLint y, GLsizei width);
+GLAPI void APIENTRY glConvolutionFilter1D (GLenum target, GLenum internalformat, GLsizei width, GLenum format, GLenum type, const void *image);
+GLAPI void APIENTRY glConvolutionFilter2D (GLenum target, GLenum internalformat, GLsizei width, GLsizei height, GLenum format, GLenum type, const void *image);
+GLAPI void APIENTRY glConvolutionParameterf (GLenum target, GLenum pname, GLfloat params);
+GLAPI void APIENTRY glConvolutionParameterfv (GLenum target, GLenum pname, const GLfloat *params);
+GLAPI void APIENTRY glConvolutionParameteri (GLenum target, GLenum pname, GLint params);
+GLAPI void APIENTRY glConvolutionParameteriv (GLenum target, GLenum pname, const GLint *params);
+GLAPI void APIENTRY glCopyConvolutionFilter1D (GLenum target, GLenum internalformat, GLint x, GLint y, GLsizei width);
+GLAPI void APIENTRY glCopyConvolutionFilter2D (GLenum target, GLenum internalformat, GLint x, GLint y, GLsizei width, GLsizei height);
+GLAPI void APIENTRY glGetConvolutionFilter (GLenum target, GLenum format, GLenum type, void *image);
+GLAPI void APIENTRY glGetConvolutionParameterfv (GLenum target, GLenum pname, GLfloat *params);
+GLAPI void APIENTRY glGetConvolutionParameteriv (GLenum target, GLenum pname, GLint *params);
+GLAPI void APIENTRY glGetSeparableFilter (GLenum target, GLenum format, GLenum type, void *row, void *column, void *span);
+GLAPI void APIENTRY glSeparableFilter2D (GLenum target, GLenum internalformat, GLsizei width, GLsizei height, GLenum format, GLenum type, const void *row, const void *column);
+GLAPI void APIENTRY glGetHistogram (GLenum target, GLboolean reset, GLenum format, GLenum type, void *values);
+GLAPI void APIENTRY glGetHistogramParameterfv (GLenum target, GLenum pname, GLfloat *params);
+GLAPI void APIENTRY glGetHistogramParameteriv (GLenum target, GLenum pname, GLint *params);
+GLAPI void APIENTRY glGetMinmax (GLenum target, GLboolean reset, GLenum format, GLenum type, void *values);
+GLAPI void APIENTRY glGetMinmaxParameterfv (GLenum target, GLenum pname, GLfloat *params);
+GLAPI void APIENTRY glGetMinmaxParameteriv (GLenum target, GLenum pname, GLint *params);
+GLAPI void APIENTRY glHistogram (GLenum target, GLsizei width, GLenum internalformat, GLboolean sink);
+GLAPI void APIENTRY glMinmax (GLenum target, GLenum internalformat, GLboolean sink);
+GLAPI void APIENTRY glResetHistogram (GLenum target);
+GLAPI void APIENTRY glResetMinmax (GLenum target);
+#endif
+#endif /* GL_ARB_imaging */
+
+#ifndef GL_ARB_indirect_parameters
+#define GL_ARB_indirect_parameters 1
+#define GL_PARAMETER_BUFFER_ARB 0x80EE
+#define GL_PARAMETER_BUFFER_BINDING_ARB 0x80EF
+typedef void (APIENTRYP PFNGLMULTIDRAWARRAYSINDIRECTCOUNTARBPROC) (GLenum mode, const void *indirect, GLintptr drawcount, GLsizei maxdrawcount, GLsizei stride);
+typedef void (APIENTRYP PFNGLMULTIDRAWELEMENTSINDIRECTCOUNTARBPROC) (GLenum mode, GLenum type, const void *indirect, GLintptr drawcount, GLsizei maxdrawcount, GLsizei stride);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glMultiDrawArraysIndirectCountARB (GLenum mode, const void *indirect, GLintptr drawcount, GLsizei maxdrawcount, GLsizei stride);
+GLAPI void APIENTRY glMultiDrawElementsIndirectCountARB (GLenum mode, GLenum type, const void *indirect, GLintptr drawcount, GLsizei maxdrawcount, GLsizei stride);
+#endif
+#endif /* GL_ARB_indirect_parameters */
+
+#ifndef GL_ARB_instanced_arrays
+#define GL_ARB_instanced_arrays 1
+#define GL_VERTEX_ATTRIB_ARRAY_DIVISOR_ARB 0x88FE
+typedef void (APIENTRYP PFNGLVERTEXATTRIBDIVISORARBPROC) (GLuint index, GLuint divisor);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glVertexAttribDivisorARB (GLuint index, GLuint divisor);
+#endif
+#endif /* GL_ARB_instanced_arrays */
+
+#ifndef GL_ARB_internalformat_query
+#define GL_ARB_internalformat_query 1
+#endif /* GL_ARB_internalformat_query */
+
+#ifndef GL_ARB_internalformat_query2
+#define GL_ARB_internalformat_query2 1
+#define GL_SRGB_DECODE_ARB 0x8299
+#define GL_VIEW_CLASS_EAC_R11 0x9383
+#define GL_VIEW_CLASS_EAC_RG11 0x9384
+#define GL_VIEW_CLASS_ETC2_RGB 0x9385
+#define GL_VIEW_CLASS_ETC2_RGBA 0x9386
+#define GL_VIEW_CLASS_ETC2_EAC_RGBA 0x9387
+#define GL_VIEW_CLASS_ASTC_4x4_RGBA 0x9388
+#define GL_VIEW_CLASS_ASTC_5x4_RGBA 0x9389
+#define GL_VIEW_CLASS_ASTC_5x5_RGBA 0x938A
+#define GL_VIEW_CLASS_ASTC_6x5_RGBA 0x938B
+#define GL_VIEW_CLASS_ASTC_6x6_RGBA 0x938C
+#define GL_VIEW_CLASS_ASTC_8x5_RGBA 0x938D
+#define GL_VIEW_CLASS_ASTC_8x6_RGBA 0x938E
+#define GL_VIEW_CLASS_ASTC_8x8_RGBA 0x938F
+#define GL_VIEW_CLASS_ASTC_10x5_RGBA 0x9390
+#define GL_VIEW_CLASS_ASTC_10x6_RGBA 0x9391
+#define GL_VIEW_CLASS_ASTC_10x8_RGBA 0x9392
+#define GL_VIEW_CLASS_ASTC_10x10_RGBA 0x9393
+#define GL_VIEW_CLASS_ASTC_12x10_RGBA 0x9394
+#define GL_VIEW_CLASS_ASTC_12x12_RGBA 0x9395
+#endif /* GL_ARB_internalformat_query2 */
+
+#ifndef GL_ARB_invalidate_subdata
+#define GL_ARB_invalidate_subdata 1
+#endif /* GL_ARB_invalidate_subdata */
+
+#ifndef GL_ARB_map_buffer_alignment
+#define GL_ARB_map_buffer_alignment 1
+#endif /* GL_ARB_map_buffer_alignment */
+
+#ifndef GL_ARB_map_buffer_range
+#define GL_ARB_map_buffer_range 1
+#endif /* GL_ARB_map_buffer_range */
+
+#ifndef GL_ARB_matrix_palette
+#define GL_ARB_matrix_palette 1
+#define GL_MATRIX_PALETTE_ARB 0x8840
+#define GL_MAX_MATRIX_PALETTE_STACK_DEPTH_ARB 0x8841
+#define GL_MAX_PALETTE_MATRICES_ARB 0x8842
+#define GL_CURRENT_PALETTE_MATRIX_ARB 0x8843
+#define GL_MATRIX_INDEX_ARRAY_ARB 0x8844
+#define GL_CURRENT_MATRIX_INDEX_ARB 0x8845
+#define GL_MATRIX_INDEX_ARRAY_SIZE_ARB 0x8846
+#define GL_MATRIX_INDEX_ARRAY_TYPE_ARB 0x8847
+#define GL_MATRIX_INDEX_ARRAY_STRIDE_ARB 0x8848
+#define GL_MATRIX_INDEX_ARRAY_POINTER_ARB 0x8849
+typedef void (APIENTRYP PFNGLCURRENTPALETTEMATRIXARBPROC) (GLint index);
+typedef void (APIENTRYP PFNGLMATRIXINDEXUBVARBPROC) (GLint size, const GLubyte *indices);
+typedef void (APIENTRYP PFNGLMATRIXINDEXUSVARBPROC) (GLint size, const GLushort *indices);
+typedef void (APIENTRYP PFNGLMATRIXINDEXUIVARBPROC) (GLint size, const GLuint *indices);
+typedef void (APIENTRYP PFNGLMATRIXINDEXPOINTERARBPROC) (GLint size, GLenum type, GLsizei stride, const void *pointer);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glCurrentPaletteMatrixARB (GLint index);
+GLAPI void APIENTRY glMatrixIndexubvARB (GLint size, const GLubyte *indices);
+GLAPI void APIENTRY glMatrixIndexusvARB (GLint size, const GLushort *indices);
+GLAPI void APIENTRY glMatrixIndexuivARB (GLint size, const GLuint *indices);
+GLAPI void APIENTRY glMatrixIndexPointerARB (GLint size, GLenum type, GLsizei stride, const void *pointer);
+#endif
+#endif /* GL_ARB_matrix_palette */
+
+#ifndef GL_ARB_multi_bind
+#define GL_ARB_multi_bind 1
+#endif /* GL_ARB_multi_bind */
+
+#ifndef GL_ARB_multi_draw_indirect
+#define GL_ARB_multi_draw_indirect 1
+#endif /* GL_ARB_multi_draw_indirect */
+
+#ifndef GL_ARB_multisample
+#define GL_ARB_multisample 1
+#define GL_MULTISAMPLE_ARB 0x809D
+#define GL_SAMPLE_ALPHA_TO_COVERAGE_ARB 0x809E
+#define GL_SAMPLE_ALPHA_TO_ONE_ARB 0x809F
+#define GL_SAMPLE_COVERAGE_ARB 0x80A0
+#define GL_SAMPLE_BUFFERS_ARB 0x80A8
+#define GL_SAMPLES_ARB 0x80A9
+#define GL_SAMPLE_COVERAGE_VALUE_ARB 0x80AA
+#define GL_SAMPLE_COVERAGE_INVERT_ARB 0x80AB
+#define GL_MULTISAMPLE_BIT_ARB 0x20000000
+typedef void (APIENTRYP PFNGLSAMPLECOVERAGEARBPROC) (GLfloat value, GLboolean invert);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glSampleCoverageARB (GLfloat value, GLboolean invert);
+#endif
+#endif /* GL_ARB_multisample */
+
+#ifndef GL_ARB_multitexture
+#define GL_ARB_multitexture 1
+#define GL_TEXTURE0_ARB 0x84C0
+#define GL_TEXTURE1_ARB 0x84C1
+#define GL_TEXTURE2_ARB 0x84C2
+#define GL_TEXTURE3_ARB 0x84C3
+#define GL_TEXTURE4_ARB 0x84C4
+#define GL_TEXTURE5_ARB 0x84C5
+#define GL_TEXTURE6_ARB 0x84C6
+#define GL_TEXTURE7_ARB 0x84C7
+#define GL_TEXTURE8_ARB 0x84C8
+#define GL_TEXTURE9_ARB 0x84C9
+#define GL_TEXTURE10_ARB 0x84CA
+#define GL_TEXTURE11_ARB 0x84CB
+#define GL_TEXTURE12_ARB 0x84CC
+#define GL_TEXTURE13_ARB 0x84CD
+#define GL_TEXTURE14_ARB 0x84CE
+#define GL_TEXTURE15_ARB 0x84CF
+#define GL_TEXTURE16_ARB 0x84D0
+#define GL_TEXTURE17_ARB 0x84D1
+#define GL_TEXTURE18_ARB 0x84D2
+#define GL_TEXTURE19_ARB 0x84D3
+#define GL_TEXTURE20_ARB 0x84D4
+#define GL_TEXTURE21_ARB 0x84D5
+#define GL_TEXTURE22_ARB 0x84D6
+#define GL_TEXTURE23_ARB 0x84D7
+#define GL_TEXTURE24_ARB 0x84D8
+#define GL_TEXTURE25_ARB 0x84D9
+#define GL_TEXTURE26_ARB 0x84DA
+#define GL_TEXTURE27_ARB 0x84DB
+#define GL_TEXTURE28_ARB 0x84DC
+#define GL_TEXTURE29_ARB 0x84DD
+#define GL_TEXTURE30_ARB 0x84DE
+#define GL_TEXTURE31_ARB 0x84DF
+#define GL_ACTIVE_TEXTURE_ARB 0x84E0
+#define GL_CLIENT_ACTIVE_TEXTURE_ARB 0x84E1
+#define GL_MAX_TEXTURE_UNITS_ARB 0x84E2
+typedef void (APIENTRYP PFNGLACTIVETEXTUREARBPROC) (GLenum texture);
+typedef void (APIENTRYP PFNGLCLIENTACTIVETEXTUREARBPROC) (GLenum texture);
+typedef void (APIENTRYP PFNGLMULTITEXCOORD1DARBPROC) (GLenum target, GLdouble s);
+typedef void (APIENTRYP PFNGLMULTITEXCOORD1DVARBPROC) (GLenum target, const GLdouble *v);
+typedef void (APIENTRYP PFNGLMULTITEXCOORD1FARBPROC) (GLenum target, GLfloat s);
+typedef void (APIENTRYP PFNGLMULTITEXCOORD1FVARBPROC) (GLenum target, const GLfloat *v);
+typedef void (APIENTRYP PFNGLMULTITEXCOORD1IARBPROC) (GLenum target, GLint s);
+typedef void (APIENTRYP PFNGLMULTITEXCOORD1IVARBPROC) (GLenum target, const GLint *v);
+typedef void (APIENTRYP PFNGLMULTITEXCOORD1SARBPROC) (GLenum target, GLshort s);
+typedef void (APIENTRYP PFNGLMULTITEXCOORD1SVARBPROC) (GLenum target, const GLshort *v);
+typedef void (APIENTRYP PFNGLMULTITEXCOORD2DARBPROC) (GLenum target, GLdouble s, GLdouble t);
+typedef void (APIENTRYP PFNGLMULTITEXCOORD2DVARBPROC) (GLenum target, const GLdouble *v);
+typedef void (APIENTRYP PFNGLMULTITEXCOORD2FARBPROC) (GLenum target, GLfloat s, GLfloat t);
+typedef void (APIENTRYP PFNGLMULTITEXCOORD2FVARBPROC) (GLenum target, const GLfloat *v);
+typedef void (APIENTRYP PFNGLMULTITEXCOORD2IARBPROC) (GLenum target, GLint s, GLint t);
+typedef void (APIENTRYP PFNGLMULTITEXCOORD2IVARBPROC) (GLenum target, const GLint *v);
+typedef void (APIENTRYP PFNGLMULTITEXCOORD2SARBPROC) (GLenum target, GLshort s, GLshort t);
+typedef void (APIENTRYP PFNGLMULTITEXCOORD2SVARBPROC) (GLenum target, const GLshort *v);
+typedef void (APIENTRYP PFNGLMULTITEXCOORD3DARBPROC) (GLenum target, GLdouble s, GLdouble t, GLdouble r);
+typedef void (APIENTRYP PFNGLMULTITEXCOORD3DVARBPROC) (GLenum target, const GLdouble *v);
+typedef void (APIENTRYP PFNGLMULTITEXCOORD3FARBPROC) (GLenum target, GLfloat s, GLfloat t, GLfloat r);
+typedef void (APIENTRYP PFNGLMULTITEXCOORD3FVARBPROC) (GLenum target, const GLfloat *v);
+typedef void (APIENTRYP PFNGLMULTITEXCOORD3IARBPROC) (GLenum target, GLint s, GLint t, GLint r);
+typedef void (APIENTRYP PFNGLMULTITEXCOORD3IVARBPROC) (GLenum target, const GLint *v);
+typedef void (APIENTRYP PFNGLMULTITEXCOORD3SARBPROC) (GLenum target, GLshort s, GLshort t, GLshort r);
+typedef void (APIENTRYP PFNGLMULTITEXCOORD3SVARBPROC) (GLenum target, const GLshort *v);
+typedef void (APIENTRYP PFNGLMULTITEXCOORD4DARBPROC) (GLenum target, GLdouble s, GLdouble t, GLdouble r, GLdouble q);
+typedef void (APIENTRYP PFNGLMULTITEXCOORD4DVARBPROC) (GLenum target, const GLdouble *v);
+typedef void (APIENTRYP PFNGLMULTITEXCOORD4FARBPROC) (GLenum target, GLfloat s, GLfloat t, GLfloat r, GLfloat q);
+typedef void (APIENTRYP PFNGLMULTITEXCOORD4FVARBPROC) (GLenum target, const GLfloat *v);
+typedef void (APIENTRYP PFNGLMULTITEXCOORD4IARBPROC) (GLenum target, GLint s, GLint t, GLint r, GLint q);
+typedef void (APIENTRYP PFNGLMULTITEXCOORD4IVARBPROC) (GLenum target, const GLint *v);
+typedef void (APIENTRYP PFNGLMULTITEXCOORD4SARBPROC) (GLenum target, GLshort s, GLshort t, GLshort r, GLshort q);
+typedef void (APIENTRYP PFNGLMULTITEXCOORD4SVARBPROC) (GLenum target, const GLshort *v);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glActiveTextureARB (GLenum texture);
+GLAPI void APIENTRY glClientActiveTextureARB (GLenum texture);
+GLAPI void APIENTRY glMultiTexCoord1dARB (GLenum target, GLdouble s);
+GLAPI void APIENTRY glMultiTexCoord1dvARB (GLenum target, const GLdouble *v);
+GLAPI void APIENTRY glMultiTexCoord1fARB (GLenum target, GLfloat s);
+GLAPI void APIENTRY glMultiTexCoord1fvARB (GLenum target, const GLfloat *v);
+GLAPI void APIENTRY glMultiTexCoord1iARB (GLenum target, GLint s);
+GLAPI void APIENTRY glMultiTexCoord1ivARB (GLenum target, const GLint *v);
+GLAPI void APIENTRY glMultiTexCoord1sARB (GLenum target, GLshort s);
+GLAPI void APIENTRY glMultiTexCoord1svARB (GLenum target, const GLshort *v);
+GLAPI void APIENTRY glMultiTexCoord2dARB (GLenum target, GLdouble s, GLdouble t);
+GLAPI void APIENTRY glMultiTexCoord2dvARB (GLenum target, const GLdouble *v);
+GLAPI void APIENTRY glMultiTexCoord2fARB (GLenum target, GLfloat s, GLfloat t);
+GLAPI void APIENTRY glMultiTexCoord2fvARB (GLenum target, const GLfloat *v);
+GLAPI void APIENTRY glMultiTexCoord2iARB (GLenum target, GLint s, GLint t);
+GLAPI void APIENTRY glMultiTexCoord2ivARB (GLenum target, const GLint *v);
+GLAPI void APIENTRY glMultiTexCoord2sARB (GLenum target, GLshort s, GLshort t);
+GLAPI void APIENTRY glMultiTexCoord2svARB (GLenum target, const GLshort *v);
+GLAPI void APIENTRY glMultiTexCoord3dARB (GLenum target, GLdouble s, GLdouble t, GLdouble r);
+GLAPI void APIENTRY glMultiTexCoord3dvARB (GLenum target, const GLdouble *v);
+GLAPI void APIENTRY glMultiTexCoord3fARB (GLenum target, GLfloat s, GLfloat t, GLfloat r);
+GLAPI void APIENTRY glMultiTexCoord3fvARB (GLenum target, const GLfloat *v);
+GLAPI void APIENTRY glMultiTexCoord3iARB (GLenum target, GLint s, GLint t, GLint r);
+GLAPI void APIENTRY glMultiTexCoord3ivARB (GLenum target, const GLint *v);
+GLAPI void APIENTRY glMultiTexCoord3sARB (GLenum target, GLshort s, GLshort t, GLshort r);
+GLAPI void APIENTRY glMultiTexCoord3svARB (GLenum target, const GLshort *v);
+GLAPI void APIENTRY glMultiTexCoord4dARB (GLenum target, GLdouble s, GLdouble t, GLdouble r, GLdouble q);
+GLAPI void APIENTRY glMultiTexCoord4dvARB (GLenum target, const GLdouble *v);
+GLAPI void APIENTRY glMultiTexCoord4fARB (GLenum target, GLfloat s, GLfloat t, GLfloat r, GLfloat q);
+GLAPI void APIENTRY glMultiTexCoord4fvARB (GLenum target, const GLfloat *v);
+GLAPI void APIENTRY glMultiTexCoord4iARB (GLenum target, GLint s, GLint t, GLint r, GLint q);
+GLAPI void APIENTRY glMultiTexCoord4ivARB (GLenum target, const GLint *v);
+GLAPI void APIENTRY glMultiTexCoord4sARB (GLenum target, GLshort s, GLshort t, GLshort r, GLshort q);
+GLAPI void APIENTRY glMultiTexCoord4svARB (GLenum target, const GLshort *v);
+#endif
+#endif /* GL_ARB_multitexture */
+
+#ifndef GL_ARB_occlusion_query
+#define GL_ARB_occlusion_query 1
+#define GL_QUERY_COUNTER_BITS_ARB 0x8864
+#define GL_CURRENT_QUERY_ARB 0x8865
+#define GL_QUERY_RESULT_ARB 0x8866
+#define GL_QUERY_RESULT_AVAILABLE_ARB 0x8867
+#define GL_SAMPLES_PASSED_ARB 0x8914
+typedef void (APIENTRYP PFNGLGENQUERIESARBPROC) (GLsizei n, GLuint *ids);
+typedef void (APIENTRYP PFNGLDELETEQUERIESARBPROC) (GLsizei n, const GLuint *ids);
+typedef GLboolean (APIENTRYP PFNGLISQUERYARBPROC) (GLuint id);
+typedef void (APIENTRYP PFNGLBEGINQUERYARBPROC) (GLenum target, GLuint id);
+typedef void (APIENTRYP PFNGLENDQUERYARBPROC) (GLenum target);
+typedef void (APIENTRYP PFNGLGETQUERYIVARBPROC) (GLenum target, GLenum pname, GLint *params);
+typedef void (APIENTRYP PFNGLGETQUERYOBJECTIVARBPROC) (GLuint id, GLenum pname, GLint *params);
+typedef void (APIENTRYP PFNGLGETQUERYOBJECTUIVARBPROC) (GLuint id, GLenum pname, GLuint *params);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glGenQueriesARB (GLsizei n, GLuint *ids);
+GLAPI void APIENTRY glDeleteQueriesARB (GLsizei n, const GLuint *ids);
+GLAPI GLboolean APIENTRY glIsQueryARB (GLuint id);
+GLAPI void APIENTRY glBeginQueryARB (GLenum target, GLuint id);
+GLAPI void APIENTRY glEndQueryARB (GLenum target);
+GLAPI void APIENTRY glGetQueryivARB (GLenum target, GLenum pname, GLint *params);
+GLAPI void APIENTRY glGetQueryObjectivARB (GLuint id, GLenum pname, GLint *params);
+GLAPI void APIENTRY glGetQueryObjectuivARB (GLuint id, GLenum pname, GLuint *params);
+#endif
+#endif /* GL_ARB_occlusion_query */
+
+#ifndef GL_ARB_occlusion_query2
+#define GL_ARB_occlusion_query2 1
+#endif /* GL_ARB_occlusion_query2 */
+
+#ifndef GL_ARB_parallel_shader_compile
+#define GL_ARB_parallel_shader_compile 1
+#define GL_MAX_SHADER_COMPILER_THREADS_ARB 0x91B0
+#define GL_COMPLETION_STATUS_ARB 0x91B1
+typedef void (APIENTRYP PFNGLMAXSHADERCOMPILERTHREADSARBPROC) (GLuint count);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glMaxShaderCompilerThreadsARB (GLuint count);
+#endif
+#endif /* GL_ARB_parallel_shader_compile */
+
+#ifndef GL_ARB_pipeline_statistics_query
+#define GL_ARB_pipeline_statistics_query 1
+#define GL_VERTICES_SUBMITTED_ARB 0x82EE
+#define GL_PRIMITIVES_SUBMITTED_ARB 0x82EF
+#define GL_VERTEX_SHADER_INVOCATIONS_ARB 0x82F0
+#define GL_TESS_CONTROL_SHADER_PATCHES_ARB 0x82F1
+#define GL_TESS_EVALUATION_SHADER_INVOCATIONS_ARB 0x82F2
+#define GL_GEOMETRY_SHADER_PRIMITIVES_EMITTED_ARB 0x82F3
+#define GL_FRAGMENT_SHADER_INVOCATIONS_ARB 0x82F4
+#define GL_COMPUTE_SHADER_INVOCATIONS_ARB 0x82F5
+#define GL_CLIPPING_INPUT_PRIMITIVES_ARB 0x82F6
+#define GL_CLIPPING_OUTPUT_PRIMITIVES_ARB 0x82F7
+#endif /* GL_ARB_pipeline_statistics_query */
+
+#ifndef GL_ARB_pixel_buffer_object
+#define GL_ARB_pixel_buffer_object 1
+#define GL_PIXEL_PACK_BUFFER_ARB 0x88EB
+#define GL_PIXEL_UNPACK_BUFFER_ARB 0x88EC
+#define GL_PIXEL_PACK_BUFFER_BINDING_ARB 0x88ED
+#define GL_PIXEL_UNPACK_BUFFER_BINDING_ARB 0x88EF
+#endif /* GL_ARB_pixel_buffer_object */
+
+#ifndef GL_ARB_point_parameters
+#define GL_ARB_point_parameters 1
+#define GL_POINT_SIZE_MIN_ARB 0x8126
+#define GL_POINT_SIZE_MAX_ARB 0x8127
+#define GL_POINT_FADE_THRESHOLD_SIZE_ARB 0x8128
+#define GL_POINT_DISTANCE_ATTENUATION_ARB 0x8129
+typedef void (APIENTRYP PFNGLPOINTPARAMETERFARBPROC) (GLenum pname, GLfloat param);
+typedef void (APIENTRYP PFNGLPOINTPARAMETERFVARBPROC) (GLenum pname, const GLfloat *params);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glPointParameterfARB (GLenum pname, GLfloat param);
+GLAPI void APIENTRY glPointParameterfvARB (GLenum pname, const GLfloat *params);
+#endif
+#endif /* GL_ARB_point_parameters */
+
+#ifndef GL_ARB_point_sprite
+#define GL_ARB_point_sprite 1
+#define GL_POINT_SPRITE_ARB 0x8861
+#define GL_COORD_REPLACE_ARB 0x8862
+#endif /* GL_ARB_point_sprite */
+
+#ifndef GL_ARB_polygon_offset_clamp
+#define GL_ARB_polygon_offset_clamp 1
+#endif /* GL_ARB_polygon_offset_clamp */
+
+#ifndef GL_ARB_post_depth_coverage
+#define GL_ARB_post_depth_coverage 1
+#endif /* GL_ARB_post_depth_coverage */
+
+#ifndef GL_ARB_program_interface_query
+#define GL_ARB_program_interface_query 1
+#endif /* GL_ARB_program_interface_query */
+
+#ifndef GL_ARB_provoking_vertex
+#define GL_ARB_provoking_vertex 1
+#endif /* GL_ARB_provoking_vertex */
+
+#ifndef GL_ARB_query_buffer_object
+#define GL_ARB_query_buffer_object 1
+#endif /* GL_ARB_query_buffer_object */
+
+#ifndef GL_ARB_robust_buffer_access_behavior
+#define GL_ARB_robust_buffer_access_behavior 1
+#endif /* GL_ARB_robust_buffer_access_behavior */
+
+#ifndef GL_ARB_robustness
+#define GL_ARB_robustness 1
+#define GL_CONTEXT_FLAG_ROBUST_ACCESS_BIT_ARB 0x00000004
+#define GL_LOSE_CONTEXT_ON_RESET_ARB 0x8252
+#define GL_GUILTY_CONTEXT_RESET_ARB 0x8253
+#define GL_INNOCENT_CONTEXT_RESET_ARB 0x8254
+#define GL_UNKNOWN_CONTEXT_RESET_ARB 0x8255
+#define GL_RESET_NOTIFICATION_STRATEGY_ARB 0x8256
+#define GL_NO_RESET_NOTIFICATION_ARB 0x8261
+typedef GLenum (APIENTRYP PFNGLGETGRAPHICSRESETSTATUSARBPROC) (void);
+typedef void (APIENTRYP PFNGLGETNTEXIMAGEARBPROC) (GLenum target, GLint level, GLenum format, GLenum type, GLsizei bufSize, void *img);
+typedef void (APIENTRYP PFNGLREADNPIXELSARBPROC) (GLint x, GLint y, GLsizei width, GLsizei height, GLenum format, GLenum type, GLsizei bufSize, void *data);
+typedef void (APIENTRYP PFNGLGETNCOMPRESSEDTEXIMAGEARBPROC) (GLenum target, GLint lod, GLsizei bufSize, void *img);
+typedef void (APIENTRYP PFNGLGETNUNIFORMFVARBPROC) (GLuint program, GLint location, GLsizei bufSize, GLfloat *params);
+typedef void (APIENTRYP PFNGLGETNUNIFORMIVARBPROC) (GLuint program, GLint location, GLsizei bufSize, GLint *params);
+typedef void (APIENTRYP PFNGLGETNUNIFORMUIVARBPROC) (GLuint program, GLint location, GLsizei bufSize, GLuint *params);
+typedef void (APIENTRYP PFNGLGETNUNIFORMDVARBPROC) (GLuint program, GLint location, GLsizei bufSize, GLdouble *params);
+typedef void (APIENTRYP PFNGLGETNMAPDVARBPROC) (GLenum target, GLenum query, GLsizei bufSize, GLdouble *v);
+typedef void (APIENTRYP PFNGLGETNMAPFVARBPROC) (GLenum target, GLenum query, GLsizei bufSize, GLfloat *v);
+typedef void (APIENTRYP PFNGLGETNMAPIVARBPROC) (GLenum target, GLenum query, GLsizei bufSize, GLint *v);
+typedef void (APIENTRYP PFNGLGETNPIXELMAPFVARBPROC) (GLenum map, GLsizei bufSize, GLfloat *values);
+typedef void (APIENTRYP PFNGLGETNPIXELMAPUIVARBPROC) (GLenum map, GLsizei bufSize, GLuint *values);
+typedef void (APIENTRYP PFNGLGETNPIXELMAPUSVARBPROC) (GLenum map, GLsizei bufSize, GLushort *values);
+typedef void (APIENTRYP PFNGLGETNPOLYGONSTIPPLEARBPROC) (GLsizei bufSize, GLubyte *pattern);
+typedef void (APIENTRYP PFNGLGETNCOLORTABLEARBPROC) (GLenum target, GLenum format, GLenum type, GLsizei bufSize, void *table);
+typedef void (APIENTRYP PFNGLGETNCONVOLUTIONFILTERARBPROC) (GLenum target, GLenum format, GLenum type, GLsizei bufSize, void *image);
+typedef void (APIENTRYP PFNGLGETNSEPARABLEFILTERARBPROC) (GLenum target, GLenum format, GLenum type, GLsizei rowBufSize, void *row, GLsizei columnBufSize, void *column, void *span);
+typedef void (APIENTRYP PFNGLGETNHISTOGRAMARBPROC) (GLenum target, GLboolean reset, GLenum format, GLenum type, GLsizei bufSize, void *values);
+typedef void (APIENTRYP PFNGLGETNMINMAXARBPROC) (GLenum target, GLboolean reset, GLenum format, GLenum type, GLsizei bufSize, void *values);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI GLenum APIENTRY glGetGraphicsResetStatusARB (void);
+GLAPI void APIENTRY glGetnTexImageARB (GLenum target, GLint level, GLenum format, GLenum type, GLsizei bufSize, void *img);
+GLAPI void APIENTRY glReadnPixelsARB (GLint x, GLint y, GLsizei width, GLsizei height, GLenum format, GLenum type, GLsizei bufSize, void *data);
+GLAPI void APIENTRY glGetnCompressedTexImageARB (GLenum target, GLint lod, GLsizei bufSize, void *img);
+GLAPI void APIENTRY glGetnUniformfvARB (GLuint program, GLint location, GLsizei bufSize, GLfloat *params);
+GLAPI void APIENTRY glGetnUniformivARB (GLuint program, GLint location, GLsizei bufSize, GLint *params);
+GLAPI void APIENTRY glGetnUniformuivARB (GLuint program, GLint location, GLsizei bufSize, GLuint *params);
+GLAPI void APIENTRY glGetnUniformdvARB (GLuint program, GLint location, GLsizei bufSize, GLdouble *params);
+GLAPI void APIENTRY glGetnMapdvARB (GLenum target, GLenum query, GLsizei bufSize, GLdouble *v);
+GLAPI void APIENTRY glGetnMapfvARB (GLenum target, GLenum query, GLsizei bufSize, GLfloat *v);
+GLAPI void APIENTRY glGetnMapivARB (GLenum target, GLenum query, GLsizei bufSize, GLint *v);
+GLAPI void APIENTRY glGetnPixelMapfvARB (GLenum map, GLsizei bufSize, GLfloat *values);
+GLAPI void APIENTRY glGetnPixelMapuivARB (GLenum map, GLsizei bufSize, GLuint *values);
+GLAPI void APIENTRY glGetnPixelMapusvARB (GLenum map, GLsizei bufSize, GLushort *values);
+GLAPI void APIENTRY glGetnPolygonStippleARB (GLsizei bufSize, GLubyte *pattern);
+GLAPI void APIENTRY glGetnColorTableARB (GLenum target, GLenum format, GLenum type, GLsizei bufSize, void *table);
+GLAPI void APIENTRY glGetnConvolutionFilterARB (GLenum target, GLenum format, GLenum type, GLsizei bufSize, void *image);
+GLAPI void APIENTRY glGetnSeparableFilterARB (GLenum target, GLenum format, GLenum type, GLsizei rowBufSize, void *row, GLsizei columnBufSize, void *column, void *span);
+GLAPI void APIENTRY glGetnHistogramARB (GLenum target, GLboolean reset, GLenum format, GLenum type, GLsizei bufSize, void *values);
+GLAPI void APIENTRY glGetnMinmaxARB (GLenum target, GLboolean reset, GLenum format, GLenum type, GLsizei bufSize, void *values);
+#endif
+#endif /* GL_ARB_robustness */
+
+#ifndef GL_ARB_robustness_isolation
+#define GL_ARB_robustness_isolation 1
+#endif /* GL_ARB_robustness_isolation */
+
+#ifndef GL_ARB_sample_locations
+#define GL_ARB_sample_locations 1
+#define GL_SAMPLE_LOCATION_SUBPIXEL_BITS_ARB 0x933D
+#define GL_SAMPLE_LOCATION_PIXEL_GRID_WIDTH_ARB 0x933E
+#define GL_SAMPLE_LOCATION_PIXEL_GRID_HEIGHT_ARB 0x933F
+#define GL_PROGRAMMABLE_SAMPLE_LOCATION_TABLE_SIZE_ARB 0x9340
+#define GL_SAMPLE_LOCATION_ARB 0x8E50
+#define GL_PROGRAMMABLE_SAMPLE_LOCATION_ARB 0x9341
+#define GL_FRAMEBUFFER_PROGRAMMABLE_SAMPLE_LOCATIONS_ARB 0x9342
+#define GL_FRAMEBUFFER_SAMPLE_LOCATION_PIXEL_GRID_ARB 0x9343
+typedef void (APIENTRYP PFNGLFRAMEBUFFERSAMPLELOCATIONSFVARBPROC) (GLenum target, GLuint start, GLsizei count, const GLfloat *v);
+typedef void (APIENTRYP PFNGLNAMEDFRAMEBUFFERSAMPLELOCATIONSFVARBPROC) (GLuint framebuffer, GLuint start, GLsizei count, const GLfloat *v);
+typedef void (APIENTRYP PFNGLEVALUATEDEPTHVALUESARBPROC) (void);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glFramebufferSampleLocationsfvARB (GLenum target, GLuint start, GLsizei count, const GLfloat *v);
+GLAPI void APIENTRY glNamedFramebufferSampleLocationsfvARB (GLuint framebuffer, GLuint start, GLsizei count, const GLfloat *v);
+GLAPI void APIENTRY glEvaluateDepthValuesARB (void);
+#endif
+#endif /* GL_ARB_sample_locations */
+
+#ifndef GL_ARB_sample_shading
+#define GL_ARB_sample_shading 1
+#define GL_SAMPLE_SHADING_ARB 0x8C36
+#define GL_MIN_SAMPLE_SHADING_VALUE_ARB 0x8C37
+typedef void (APIENTRYP PFNGLMINSAMPLESHADINGARBPROC) (GLfloat value);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glMinSampleShadingARB (GLfloat value);
+#endif
+#endif /* GL_ARB_sample_shading */
+
+#ifndef GL_ARB_sampler_objects
+#define GL_ARB_sampler_objects 1
+#endif /* GL_ARB_sampler_objects */
+
+#ifndef GL_ARB_seamless_cube_map
+#define GL_ARB_seamless_cube_map 1
+#endif /* GL_ARB_seamless_cube_map */
+
+#ifndef GL_ARB_seamless_cubemap_per_texture
+#define GL_ARB_seamless_cubemap_per_texture 1
+#endif /* GL_ARB_seamless_cubemap_per_texture */
+
+#ifndef GL_ARB_separate_shader_objects
+#define GL_ARB_separate_shader_objects 1
+#endif /* GL_ARB_separate_shader_objects */
+
+#ifndef GL_ARB_shader_atomic_counter_ops
+#define GL_ARB_shader_atomic_counter_ops 1
+#endif /* GL_ARB_shader_atomic_counter_ops */
+
+#ifndef GL_ARB_shader_atomic_counters
+#define GL_ARB_shader_atomic_counters 1
+#endif /* GL_ARB_shader_atomic_counters */
+
+#ifndef GL_ARB_shader_ballot
+#define GL_ARB_shader_ballot 1
+#endif /* GL_ARB_shader_ballot */
+
+#ifndef GL_ARB_shader_bit_encoding
+#define GL_ARB_shader_bit_encoding 1
+#endif /* GL_ARB_shader_bit_encoding */
+
+#ifndef GL_ARB_shader_clock
+#define GL_ARB_shader_clock 1
+#endif /* GL_ARB_shader_clock */
+
+#ifndef GL_ARB_shader_draw_parameters
+#define GL_ARB_shader_draw_parameters 1
+#endif /* GL_ARB_shader_draw_parameters */
+
+#ifndef GL_ARB_shader_group_vote
+#define GL_ARB_shader_group_vote 1
+#endif /* GL_ARB_shader_group_vote */
+
+#ifndef GL_ARB_shader_image_load_store
+#define GL_ARB_shader_image_load_store 1
+#endif /* GL_ARB_shader_image_load_store */
+
+#ifndef GL_ARB_shader_image_size
+#define GL_ARB_shader_image_size 1
+#endif /* GL_ARB_shader_image_size */
+
+#ifndef GL_ARB_shader_objects
+#define GL_ARB_shader_objects 1
+#ifdef __APPLE__
+#ifdef BUILDING_MESA
+/* Avoid uint <-> void* warnings */
+typedef unsigned long GLhandleARB;
+#else
+typedef void *GLhandleARB;
+#endif
+#else
+typedef unsigned int GLhandleARB;
+#endif
+typedef char GLcharARB;
+#define GL_PROGRAM_OBJECT_ARB 0x8B40
+#define GL_SHADER_OBJECT_ARB 0x8B48
+#define GL_OBJECT_TYPE_ARB 0x8B4E
+#define GL_OBJECT_SUBTYPE_ARB 0x8B4F
+#define GL_FLOAT_VEC2_ARB 0x8B50
+#define GL_FLOAT_VEC3_ARB 0x8B51
+#define GL_FLOAT_VEC4_ARB 0x8B52
+#define GL_INT_VEC2_ARB 0x8B53
+#define GL_INT_VEC3_ARB 0x8B54
+#define GL_INT_VEC4_ARB 0x8B55
+#define GL_BOOL_ARB 0x8B56
+#define GL_BOOL_VEC2_ARB 0x8B57
+#define GL_BOOL_VEC3_ARB 0x8B58
+#define GL_BOOL_VEC4_ARB 0x8B59
+#define GL_FLOAT_MAT2_ARB 0x8B5A
+#define GL_FLOAT_MAT3_ARB 0x8B5B
+#define GL_FLOAT_MAT4_ARB 0x8B5C
+#define GL_SAMPLER_1D_ARB 0x8B5D
+#define GL_SAMPLER_2D_ARB 0x8B5E
+#define GL_SAMPLER_3D_ARB 0x8B5F
+#define GL_SAMPLER_CUBE_ARB 0x8B60
+#define GL_SAMPLER_1D_SHADOW_ARB 0x8B61
+#define GL_SAMPLER_2D_SHADOW_ARB 0x8B62
+#define GL_SAMPLER_2D_RECT_ARB 0x8B63
+#define GL_SAMPLER_2D_RECT_SHADOW_ARB 0x8B64
+#define GL_OBJECT_DELETE_STATUS_ARB 0x8B80
+#define GL_OBJECT_COMPILE_STATUS_ARB 0x8B81
+#define GL_OBJECT_LINK_STATUS_ARB 0x8B82
+#define GL_OBJECT_VALIDATE_STATUS_ARB 0x8B83
+#define GL_OBJECT_INFO_LOG_LENGTH_ARB 0x8B84
+#define GL_OBJECT_ATTACHED_OBJECTS_ARB 0x8B85
+#define GL_OBJECT_ACTIVE_UNIFORMS_ARB 0x8B86
+#define GL_OBJECT_ACTIVE_UNIFORM_MAX_LENGTH_ARB 0x8B87
+#define GL_OBJECT_SHADER_SOURCE_LENGTH_ARB 0x8B88
+typedef void (APIENTRYP PFNGLDELETEOBJECTARBPROC) (GLhandleARB obj);
+typedef GLhandleARB (APIENTRYP PFNGLGETHANDLEARBPROC) (GLenum pname);
+typedef void (APIENTRYP PFNGLDETACHOBJECTARBPROC) (GLhandleARB containerObj, GLhandleARB attachedObj);
+typedef GLhandleARB (APIENTRYP PFNGLCREATESHADEROBJECTARBPROC) (GLenum shaderType);
+typedef void (APIENTRYP PFNGLSHADERSOURCEARBPROC) (GLhandleARB shaderObj, GLsizei count, const GLcharARB **string, const GLint *length);
+typedef void (APIENTRYP PFNGLCOMPILESHADERARBPROC) (GLhandleARB shaderObj);
+typedef GLhandleARB (APIENTRYP PFNGLCREATEPROGRAMOBJECTARBPROC) (void);
+typedef void (APIENTRYP PFNGLATTACHOBJECTARBPROC) (GLhandleARB containerObj, GLhandleARB obj);
+typedef void (APIENTRYP PFNGLLINKPROGRAMARBPROC) (GLhandleARB programObj);
+typedef void (APIENTRYP PFNGLUSEPROGRAMOBJECTARBPROC) (GLhandleARB programObj);
+typedef void (APIENTRYP PFNGLVALIDATEPROGRAMARBPROC) (GLhandleARB programObj);
+typedef void (APIENTRYP PFNGLUNIFORM1FARBPROC) (GLint location, GLfloat v0);
+typedef void (APIENTRYP PFNGLUNIFORM2FARBPROC) (GLint location, GLfloat v0, GLfloat v1);
+typedef void (APIENTRYP PFNGLUNIFORM3FARBPROC) (GLint location, GLfloat v0, GLfloat v1, GLfloat v2);
+typedef void (APIENTRYP PFNGLUNIFORM4FARBPROC) (GLint location, GLfloat v0, GLfloat v1, GLfloat v2, GLfloat v3);
+typedef void (APIENTRYP PFNGLUNIFORM1IARBPROC) (GLint location, GLint v0);
+typedef void (APIENTRYP PFNGLUNIFORM2IARBPROC) (GLint location, GLint v0, GLint v1);
+typedef void (APIENTRYP PFNGLUNIFORM3IARBPROC) (GLint location, GLint v0, GLint v1, GLint v2);
+typedef void (APIENTRYP PFNGLUNIFORM4IARBPROC) (GLint location, GLint v0, GLint v1, GLint v2, GLint v3);
+typedef void (APIENTRYP PFNGLUNIFORM1FVARBPROC) (GLint location, GLsizei count, const GLfloat *value);
+typedef void (APIENTRYP PFNGLUNIFORM2FVARBPROC) (GLint location, GLsizei count, const GLfloat *value);
+typedef void (APIENTRYP PFNGLUNIFORM3FVARBPROC) (GLint location, GLsizei count, const GLfloat *value);
+typedef void (APIENTRYP PFNGLUNIFORM4FVARBPROC) (GLint location, GLsizei count, const GLfloat *value);
+typedef void (APIENTRYP PFNGLUNIFORM1IVARBPROC) (GLint location, GLsizei count, const GLint *value);
+typedef void (APIENTRYP PFNGLUNIFORM2IVARBPROC) (GLint location, GLsizei count, const GLint *value);
+typedef void (APIENTRYP PFNGLUNIFORM3IVARBPROC) (GLint location, GLsizei count, const GLint *value);
+typedef void (APIENTRYP PFNGLUNIFORM4IVARBPROC) (GLint location, GLsizei count, const GLint *value);
+typedef void (APIENTRYP PFNGLUNIFORMMATRIX2FVARBPROC) (GLint location, GLsizei count, GLboolean transpose, const GLfloat *value);
+typedef void (APIENTRYP PFNGLUNIFORMMATRIX3FVARBPROC) (GLint location, GLsizei count, GLboolean transpose, const GLfloat *value);
+typedef void (APIENTRYP PFNGLUNIFORMMATRIX4FVARBPROC) (GLint location, GLsizei count, GLboolean transpose, const GLfloat *value);
+typedef void (APIENTRYP PFNGLGETOBJECTPARAMETERFVARBPROC) (GLhandleARB obj, GLenum pname, GLfloat *params);
+typedef void (APIENTRYP PFNGLGETOBJECTPARAMETERIVARBPROC) (GLhandleARB obj, GLenum pname, GLint *params);
+typedef void (APIENTRYP PFNGLGETINFOLOGARBPROC) (GLhandleARB obj, GLsizei maxLength, GLsizei *length, GLcharARB *infoLog);
+typedef void (APIENTRYP PFNGLGETATTACHEDOBJECTSARBPROC) (GLhandleARB containerObj, GLsizei maxCount, GLsizei *count, GLhandleARB *obj);
+typedef GLint (APIENTRYP PFNGLGETUNIFORMLOCATIONARBPROC) (GLhandleARB programObj, const GLcharARB *name);
+typedef void (APIENTRYP PFNGLGETACTIVEUNIFORMARBPROC) (GLhandleARB programObj, GLuint index, GLsizei maxLength, GLsizei *length, GLint *size, GLenum *type, GLcharARB *name);
+typedef void (APIENTRYP PFNGLGETUNIFORMFVARBPROC) (GLhandleARB programObj, GLint location, GLfloat *params);
+typedef void (APIENTRYP PFNGLGETUNIFORMIVARBPROC) (GLhandleARB programObj, GLint location, GLint *params);
+typedef void (APIENTRYP PFNGLGETSHADERSOURCEARBPROC) (GLhandleARB obj, GLsizei maxLength, GLsizei *length, GLcharARB *source);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glDeleteObjectARB (GLhandleARB obj);
+GLAPI GLhandleARB APIENTRY glGetHandleARB (GLenum pname);
+GLAPI void APIENTRY glDetachObjectARB (GLhandleARB containerObj, GLhandleARB attachedObj);
+GLAPI GLhandleARB APIENTRY glCreateShaderObjectARB (GLenum shaderType);
+GLAPI void APIENTRY glShaderSourceARB (GLhandleARB shaderObj, GLsizei count, const GLcharARB **string, const GLint *length);
+GLAPI void APIENTRY glCompileShaderARB (GLhandleARB shaderObj);
+GLAPI GLhandleARB APIENTRY glCreateProgramObjectARB (void);
+GLAPI void APIENTRY glAttachObjectARB (GLhandleARB containerObj, GLhandleARB obj);
+GLAPI void APIENTRY glLinkProgramARB (GLhandleARB programObj);
+GLAPI void APIENTRY glUseProgramObjectARB (GLhandleARB programObj);
+GLAPI void APIENTRY glValidateProgramARB (GLhandleARB programObj);
+GLAPI void APIENTRY glUniform1fARB (GLint location, GLfloat v0);
+GLAPI void APIENTRY glUniform2fARB (GLint location, GLfloat v0, GLfloat v1);
+GLAPI void APIENTRY glUniform3fARB (GLint location, GLfloat v0, GLfloat v1, GLfloat v2);
+GLAPI void APIENTRY glUniform4fARB (GLint location, GLfloat v0, GLfloat v1, GLfloat v2, GLfloat v3);
+GLAPI void APIENTRY glUniform1iARB (GLint location, GLint v0);
+GLAPI void APIENTRY glUniform2iARB (GLint location, GLint v0, GLint v1);
+GLAPI void APIENTRY glUniform3iARB (GLint location, GLint v0, GLint v1, GLint v2);
+GLAPI void APIENTRY glUniform4iARB (GLint location, GLint v0, GLint v1, GLint v2, GLint v3);
+GLAPI void APIENTRY glUniform1fvARB (GLint location, GLsizei count, const GLfloat *value);
+GLAPI void APIENTRY glUniform2fvARB (GLint location, GLsizei count, const GLfloat *value);
+GLAPI void APIENTRY glUniform3fvARB (GLint location, GLsizei count, const GLfloat *value);
+GLAPI void APIENTRY glUniform4fvARB (GLint location, GLsizei count, const GLfloat *value);
+GLAPI void APIENTRY glUniform1ivARB (GLint location, GLsizei count, const GLint *value);
+GLAPI void APIENTRY glUniform2ivARB (GLint location, GLsizei count, const GLint *value);
+GLAPI void APIENTRY glUniform3ivARB (GLint location, GLsizei count, const GLint *value);
+GLAPI void APIENTRY glUniform4ivARB (GLint location, GLsizei count, const GLint *value);
+GLAPI void APIENTRY glUniformMatrix2fvARB (GLint location, GLsizei count, GLboolean transpose, const GLfloat *value);
+GLAPI void APIENTRY glUniformMatrix3fvARB (GLint location, GLsizei count, GLboolean transpose, const GLfloat *value);
+GLAPI void APIENTRY glUniformMatrix4fvARB (GLint location, GLsizei count, GLboolean transpose, const GLfloat *value);
+GLAPI void APIENTRY glGetObjectParameterfvARB (GLhandleARB obj, GLenum pname, GLfloat *params);
+GLAPI void APIENTRY glGetObjectParameterivARB (GLhandleARB obj, GLenum pname, GLint *params);
+GLAPI void APIENTRY glGetInfoLogARB (GLhandleARB obj, GLsizei maxLength, GLsizei *length, GLcharARB *infoLog);
+GLAPI void APIENTRY glGetAttachedObjectsARB (GLhandleARB containerObj, GLsizei maxCount, GLsizei *count, GLhandleARB *obj);
+GLAPI GLint APIENTRY glGetUniformLocationARB (GLhandleARB programObj, const GLcharARB *name);
+GLAPI void APIENTRY glGetActiveUniformARB (GLhandleARB programObj, GLuint index, GLsizei maxLength, GLsizei *length, GLint *size, GLenum *type, GLcharARB *name);
+GLAPI void APIENTRY glGetUniformfvARB (GLhandleARB programObj, GLint location, GLfloat *params);
+GLAPI void APIENTRY glGetUniformivARB (GLhandleARB programObj, GLint location, GLint *params);
+GLAPI void APIENTRY glGetShaderSourceARB (GLhandleARB obj, GLsizei maxLength, GLsizei *length, GLcharARB *source);
+#endif
+#endif /* GL_ARB_shader_objects */
+
+#ifndef GL_ARB_shader_precision
+#define GL_ARB_shader_precision 1
+#endif /* GL_ARB_shader_precision */
+
+#ifndef GL_ARB_shader_stencil_export
+#define GL_ARB_shader_stencil_export 1
+#endif /* GL_ARB_shader_stencil_export */
+
+#ifndef GL_ARB_shader_storage_buffer_object
+#define GL_ARB_shader_storage_buffer_object 1
+#endif /* GL_ARB_shader_storage_buffer_object */
+
+#ifndef GL_ARB_shader_subroutine
+#define GL_ARB_shader_subroutine 1
+#endif /* GL_ARB_shader_subroutine */
+
+#ifndef GL_ARB_shader_texture_image_samples
+#define GL_ARB_shader_texture_image_samples 1
+#endif /* GL_ARB_shader_texture_image_samples */
+
+#ifndef GL_ARB_shader_texture_lod
+#define GL_ARB_shader_texture_lod 1
+#endif /* GL_ARB_shader_texture_lod */
+
+#ifndef GL_ARB_shader_viewport_layer_array
+#define GL_ARB_shader_viewport_layer_array 1
+#endif /* GL_ARB_shader_viewport_layer_array */
+
+#ifndef GL_ARB_shading_language_100
+#define GL_ARB_shading_language_100 1
+#define GL_SHADING_LANGUAGE_VERSION_ARB 0x8B8C
+#endif /* GL_ARB_shading_language_100 */
+
+#ifndef GL_ARB_shading_language_420pack
+#define GL_ARB_shading_language_420pack 1
+#endif /* GL_ARB_shading_language_420pack */
+
+#ifndef GL_ARB_shading_language_include
+#define GL_ARB_shading_language_include 1
+#define GL_SHADER_INCLUDE_ARB 0x8DAE
+#define GL_NAMED_STRING_LENGTH_ARB 0x8DE9
+#define GL_NAMED_STRING_TYPE_ARB 0x8DEA
+typedef void (APIENTRYP PFNGLNAMEDSTRINGARBPROC) (GLenum type, GLint namelen, const GLchar *name, GLint stringlen, const GLchar *string);
+typedef void (APIENTRYP PFNGLDELETENAMEDSTRINGARBPROC) (GLint namelen, const GLchar *name);
+typedef void (APIENTRYP PFNGLCOMPILESHADERINCLUDEARBPROC) (GLuint shader, GLsizei count, const GLchar *const*path, const GLint *length);
+typedef GLboolean (APIENTRYP PFNGLISNAMEDSTRINGARBPROC) (GLint namelen, const GLchar *name);
+typedef void (APIENTRYP PFNGLGETNAMEDSTRINGARBPROC) (GLint namelen, const GLchar *name, GLsizei bufSize, GLint *stringlen, GLchar *string);
+typedef void (APIENTRYP PFNGLGETNAMEDSTRINGIVARBPROC) (GLint namelen, const GLchar *name, GLenum pname, GLint *params);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glNamedStringARB (GLenum type, GLint namelen, const GLchar *name, GLint stringlen, const GLchar *string);
+GLAPI void APIENTRY glDeleteNamedStringARB (GLint namelen, const GLchar *name);
+GLAPI void APIENTRY glCompileShaderIncludeARB (GLuint shader, GLsizei count, const GLchar *const*path, const GLint *length);
+GLAPI GLboolean APIENTRY glIsNamedStringARB (GLint namelen, const GLchar *name);
+GLAPI void APIENTRY glGetNamedStringARB (GLint namelen, const GLchar *name, GLsizei bufSize, GLint *stringlen, GLchar *string);
+GLAPI void APIENTRY glGetNamedStringivARB (GLint namelen, const GLchar *name, GLenum pname, GLint *params);
+#endif
+#endif /* GL_ARB_shading_language_include */
+
+#ifndef GL_ARB_shading_language_packing
+#define GL_ARB_shading_language_packing 1
+#endif /* GL_ARB_shading_language_packing */
+
+#ifndef GL_ARB_shadow
+#define GL_ARB_shadow 1
+#define GL_TEXTURE_COMPARE_MODE_ARB 0x884C
+#define GL_TEXTURE_COMPARE_FUNC_ARB 0x884D
+#define GL_COMPARE_R_TO_TEXTURE_ARB 0x884E
+#endif /* GL_ARB_shadow */
+
+#ifndef GL_ARB_shadow_ambient
+#define GL_ARB_shadow_ambient 1
+#define GL_TEXTURE_COMPARE_FAIL_VALUE_ARB 0x80BF
+#endif /* GL_ARB_shadow_ambient */
+
+#ifndef GL_ARB_sparse_buffer
+#define GL_ARB_sparse_buffer 1
+#define GL_SPARSE_STORAGE_BIT_ARB 0x0400
+#define GL_SPARSE_BUFFER_PAGE_SIZE_ARB 0x82F8
+typedef void (APIENTRYP PFNGLBUFFERPAGECOMMITMENTARBPROC) (GLenum target, GLintptr offset, GLsizeiptr size, GLboolean commit);
+typedef void (APIENTRYP PFNGLNAMEDBUFFERPAGECOMMITMENTEXTPROC) (GLuint buffer, GLintptr offset, GLsizeiptr size, GLboolean commit);
+typedef void (APIENTRYP PFNGLNAMEDBUFFERPAGECOMMITMENTARBPROC) (GLuint buffer, GLintptr offset, GLsizeiptr size, GLboolean commit);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glBufferPageCommitmentARB (GLenum target, GLintptr offset, GLsizeiptr size, GLboolean commit);
+GLAPI void APIENTRY glNamedBufferPageCommitmentEXT (GLuint buffer, GLintptr offset, GLsizeiptr size, GLboolean commit);
+GLAPI void APIENTRY glNamedBufferPageCommitmentARB (GLuint buffer, GLintptr offset, GLsizeiptr size, GLboolean commit);
+#endif
+#endif /* GL_ARB_sparse_buffer */
+
+#ifndef GL_ARB_sparse_texture
+#define GL_ARB_sparse_texture 1
+#define GL_TEXTURE_SPARSE_ARB 0x91A6
+#define GL_VIRTUAL_PAGE_SIZE_INDEX_ARB 0x91A7
+#define GL_NUM_SPARSE_LEVELS_ARB 0x91AA
+#define GL_NUM_VIRTUAL_PAGE_SIZES_ARB 0x91A8
+#define GL_VIRTUAL_PAGE_SIZE_X_ARB 0x9195
+#define GL_VIRTUAL_PAGE_SIZE_Y_ARB 0x9196
+#define GL_VIRTUAL_PAGE_SIZE_Z_ARB 0x9197
+#define GL_MAX_SPARSE_TEXTURE_SIZE_ARB 0x9198
+#define GL_MAX_SPARSE_3D_TEXTURE_SIZE_ARB 0x9199
+#define GL_MAX_SPARSE_ARRAY_TEXTURE_LAYERS_ARB 0x919A
+#define GL_SPARSE_TEXTURE_FULL_ARRAY_CUBE_MIPMAPS_ARB 0x91A9
+typedef void (APIENTRYP PFNGLTEXPAGECOMMITMENTARBPROC) (GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint zoffset, GLsizei width, GLsizei height, GLsizei depth, GLboolean commit);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glTexPageCommitmentARB (GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint zoffset, GLsizei width, GLsizei height, GLsizei depth, GLboolean commit);
+#endif
+#endif /* GL_ARB_sparse_texture */
+
+#ifndef GL_ARB_sparse_texture2
+#define GL_ARB_sparse_texture2 1
+#endif /* GL_ARB_sparse_texture2 */
+
+#ifndef GL_ARB_sparse_texture_clamp
+#define GL_ARB_sparse_texture_clamp 1
+#endif /* GL_ARB_sparse_texture_clamp */
+
+#ifndef GL_ARB_spirv_extensions
+#define GL_ARB_spirv_extensions 1
+#endif /* GL_ARB_spirv_extensions */
+
+#ifndef GL_ARB_stencil_texturing
+#define GL_ARB_stencil_texturing 1
+#endif /* GL_ARB_stencil_texturing */
+
+#ifndef GL_ARB_sync
+#define GL_ARB_sync 1
+#endif /* GL_ARB_sync */
+
+#ifndef GL_ARB_tessellation_shader
+#define GL_ARB_tessellation_shader 1
+#endif /* GL_ARB_tessellation_shader */
+
+#ifndef GL_ARB_texture_barrier
+#define GL_ARB_texture_barrier 1
+#endif /* GL_ARB_texture_barrier */
+
+#ifndef GL_ARB_texture_border_clamp
+#define GL_ARB_texture_border_clamp 1
+#define GL_CLAMP_TO_BORDER_ARB 0x812D
+#endif /* GL_ARB_texture_border_clamp */
+
+#ifndef GL_ARB_texture_buffer_object
+#define GL_ARB_texture_buffer_object 1
+#define GL_TEXTURE_BUFFER_ARB 0x8C2A
+#define GL_MAX_TEXTURE_BUFFER_SIZE_ARB 0x8C2B
+#define GL_TEXTURE_BINDING_BUFFER_ARB 0x8C2C
+#define GL_TEXTURE_BUFFER_DATA_STORE_BINDING_ARB 0x8C2D
+#define GL_TEXTURE_BUFFER_FORMAT_ARB 0x8C2E
+typedef void (APIENTRYP PFNGLTEXBUFFERARBPROC) (GLenum target, GLenum internalformat, GLuint buffer);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glTexBufferARB (GLenum target, GLenum internalformat, GLuint buffer);
+#endif
+#endif /* GL_ARB_texture_buffer_object */
+
+#ifndef GL_ARB_texture_buffer_object_rgb32
+#define GL_ARB_texture_buffer_object_rgb32 1
+#endif /* GL_ARB_texture_buffer_object_rgb32 */
+
+#ifndef GL_ARB_texture_buffer_range
+#define GL_ARB_texture_buffer_range 1
+#endif /* GL_ARB_texture_buffer_range */
+
+#ifndef GL_ARB_texture_compression
+#define GL_ARB_texture_compression 1
+#define GL_COMPRESSED_ALPHA_ARB 0x84E9
+#define GL_COMPRESSED_LUMINANCE_ARB 0x84EA
+#define GL_COMPRESSED_LUMINANCE_ALPHA_ARB 0x84EB
+#define GL_COMPRESSED_INTENSITY_ARB 0x84EC
+#define GL_COMPRESSED_RGB_ARB 0x84ED
+#define GL_COMPRESSED_RGBA_ARB 0x84EE
+#define GL_TEXTURE_COMPRESSION_HINT_ARB 0x84EF
+#define GL_TEXTURE_COMPRESSED_IMAGE_SIZE_ARB 0x86A0
+#define GL_TEXTURE_COMPRESSED_ARB 0x86A1
+#define GL_NUM_COMPRESSED_TEXTURE_FORMATS_ARB 0x86A2
+#define GL_COMPRESSED_TEXTURE_FORMATS_ARB 0x86A3
+typedef void (APIENTRYP PFNGLCOMPRESSEDTEXIMAGE3DARBPROC) (GLenum target, GLint level, GLenum internalformat, GLsizei width, GLsizei height, GLsizei depth, GLint border, GLsizei imageSize, const void *data);
+typedef void (APIENTRYP PFNGLCOMPRESSEDTEXIMAGE2DARBPROC) (GLenum target, GLint level, GLenum internalformat, GLsizei width, GLsizei height, GLint border, GLsizei imageSize, const void *data);
+typedef void (APIENTRYP PFNGLCOMPRESSEDTEXIMAGE1DARBPROC) (GLenum target, GLint level, GLenum internalformat, GLsizei width, GLint border, GLsizei imageSize, const void *data);
+typedef void (APIENTRYP PFNGLCOMPRESSEDTEXSUBIMAGE3DARBPROC) (GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint zoffset, GLsizei width, GLsizei height, GLsizei depth, GLenum format, GLsizei imageSize, const void *data);
+typedef void (APIENTRYP PFNGLCOMPRESSEDTEXSUBIMAGE2DARBPROC) (GLenum target, GLint level, GLint xoffset, GLint yoffset, GLsizei width, GLsizei height, GLenum format, GLsizei imageSize, const void *data);
+typedef void (APIENTRYP PFNGLCOMPRESSEDTEXSUBIMAGE1DARBPROC) (GLenum target, GLint level, GLint xoffset, GLsizei width, GLenum format, GLsizei imageSize, const void *data);
+typedef void (APIENTRYP PFNGLGETCOMPRESSEDTEXIMAGEARBPROC) (GLenum target, GLint level, void *img);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glCompressedTexImage3DARB (GLenum target, GLint level, GLenum internalformat, GLsizei width, GLsizei height, GLsizei depth, GLint border, GLsizei imageSize, const void *data);
+GLAPI void APIENTRY glCompressedTexImage2DARB (GLenum target, GLint level, GLenum internalformat, GLsizei width, GLsizei height, GLint border, GLsizei imageSize, const void *data);
+GLAPI void APIENTRY glCompressedTexImage1DARB (GLenum target, GLint level, GLenum internalformat, GLsizei width, GLint border, GLsizei imageSize, const void *data);
+GLAPI void APIENTRY glCompressedTexSubImage3DARB (GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint zoffset, GLsizei width, GLsizei height, GLsizei depth, GLenum format, GLsizei imageSize, const void *data);
+GLAPI void APIENTRY glCompressedTexSubImage2DARB (GLenum target, GLint level, GLint xoffset, GLint yoffset, GLsizei width, GLsizei height, GLenum format, GLsizei imageSize, const void *data);
+GLAPI void APIENTRY glCompressedTexSubImage1DARB (GLenum target, GLint level, GLint xoffset, GLsizei width, GLenum format, GLsizei imageSize, const void *data);
+GLAPI void APIENTRY glGetCompressedTexImageARB (GLenum target, GLint level, void *img);
+#endif
+#endif /* GL_ARB_texture_compression */
+
+#ifndef GL_ARB_texture_compression_bptc
+#define GL_ARB_texture_compression_bptc 1
+#define GL_COMPRESSED_RGBA_BPTC_UNORM_ARB 0x8E8C
+#define GL_COMPRESSED_SRGB_ALPHA_BPTC_UNORM_ARB 0x8E8D
+#define GL_COMPRESSED_RGB_BPTC_SIGNED_FLOAT_ARB 0x8E8E
+#define GL_COMPRESSED_RGB_BPTC_UNSIGNED_FLOAT_ARB 0x8E8F
+#endif /* GL_ARB_texture_compression_bptc */
+
+#ifndef GL_ARB_texture_compression_rgtc
+#define GL_ARB_texture_compression_rgtc 1
+#endif /* GL_ARB_texture_compression_rgtc */
+
+#ifndef GL_ARB_texture_cube_map
+#define GL_ARB_texture_cube_map 1
+#define GL_NORMAL_MAP_ARB 0x8511
+#define GL_REFLECTION_MAP_ARB 0x8512
+#define GL_TEXTURE_CUBE_MAP_ARB 0x8513
+#define GL_TEXTURE_BINDING_CUBE_MAP_ARB 0x8514
+#define GL_TEXTURE_CUBE_MAP_POSITIVE_X_ARB 0x8515
+#define GL_TEXTURE_CUBE_MAP_NEGATIVE_X_ARB 0x8516
+#define GL_TEXTURE_CUBE_MAP_POSITIVE_Y_ARB 0x8517
+#define GL_TEXTURE_CUBE_MAP_NEGATIVE_Y_ARB 0x8518
+#define GL_TEXTURE_CUBE_MAP_POSITIVE_Z_ARB 0x8519
+#define GL_TEXTURE_CUBE_MAP_NEGATIVE_Z_ARB 0x851A
+#define GL_PROXY_TEXTURE_CUBE_MAP_ARB 0x851B
+#define GL_MAX_CUBE_MAP_TEXTURE_SIZE_ARB 0x851C
+#endif /* GL_ARB_texture_cube_map */
+
+#ifndef GL_ARB_texture_cube_map_array
+#define GL_ARB_texture_cube_map_array 1
+#define GL_TEXTURE_CUBE_MAP_ARRAY_ARB 0x9009
+#define GL_TEXTURE_BINDING_CUBE_MAP_ARRAY_ARB 0x900A
+#define GL_PROXY_TEXTURE_CUBE_MAP_ARRAY_ARB 0x900B
+#define GL_SAMPLER_CUBE_MAP_ARRAY_ARB 0x900C
+#define GL_SAMPLER_CUBE_MAP_ARRAY_SHADOW_ARB 0x900D
+#define GL_INT_SAMPLER_CUBE_MAP_ARRAY_ARB 0x900E
+#define GL_UNSIGNED_INT_SAMPLER_CUBE_MAP_ARRAY_ARB 0x900F
+#endif /* GL_ARB_texture_cube_map_array */
+
+#ifndef GL_ARB_texture_env_add
+#define GL_ARB_texture_env_add 1
+#endif /* GL_ARB_texture_env_add */
+
+#ifndef GL_ARB_texture_env_combine
+#define GL_ARB_texture_env_combine 1
+#define GL_COMBINE_ARB 0x8570
+#define GL_COMBINE_RGB_ARB 0x8571
+#define GL_COMBINE_ALPHA_ARB 0x8572
+#define GL_SOURCE0_RGB_ARB 0x8580
+#define GL_SOURCE1_RGB_ARB 0x8581
+#define GL_SOURCE2_RGB_ARB 0x8582
+#define GL_SOURCE0_ALPHA_ARB 0x8588
+#define GL_SOURCE1_ALPHA_ARB 0x8589
+#define GL_SOURCE2_ALPHA_ARB 0x858A
+#define GL_OPERAND0_RGB_ARB 0x8590
+#define GL_OPERAND1_RGB_ARB 0x8591
+#define GL_OPERAND2_RGB_ARB 0x8592
+#define GL_OPERAND0_ALPHA_ARB 0x8598
+#define GL_OPERAND1_ALPHA_ARB 0x8599
+#define GL_OPERAND2_ALPHA_ARB 0x859A
+#define GL_RGB_SCALE_ARB 0x8573
+#define GL_ADD_SIGNED_ARB 0x8574
+#define GL_INTERPOLATE_ARB 0x8575
+#define GL_SUBTRACT_ARB 0x84E7
+#define GL_CONSTANT_ARB 0x8576
+#define GL_PRIMARY_COLOR_ARB 0x8577
+#define GL_PREVIOUS_ARB 0x8578
+#endif /* GL_ARB_texture_env_combine */
+
+#ifndef GL_ARB_texture_env_crossbar
+#define GL_ARB_texture_env_crossbar 1
+#endif /* GL_ARB_texture_env_crossbar */
+
+#ifndef GL_ARB_texture_env_dot3
+#define GL_ARB_texture_env_dot3 1
+#define GL_DOT3_RGB_ARB 0x86AE
+#define GL_DOT3_RGBA_ARB 0x86AF
+#endif /* GL_ARB_texture_env_dot3 */
+
+#ifndef GL_ARB_texture_filter_anisotropic
+#define GL_ARB_texture_filter_anisotropic 1
+#endif /* GL_ARB_texture_filter_anisotropic */
+
+#ifndef GL_ARB_texture_filter_minmax
+#define GL_ARB_texture_filter_minmax 1
+#define GL_TEXTURE_REDUCTION_MODE_ARB 0x9366
+#define GL_WEIGHTED_AVERAGE_ARB 0x9367
+#endif /* GL_ARB_texture_filter_minmax */
+
+#ifndef GL_ARB_texture_float
+#define GL_ARB_texture_float 1
+#define GL_TEXTURE_RED_TYPE_ARB 0x8C10
+#define GL_TEXTURE_GREEN_TYPE_ARB 0x8C11
+#define GL_TEXTURE_BLUE_TYPE_ARB 0x8C12
+#define GL_TEXTURE_ALPHA_TYPE_ARB 0x8C13
+#define GL_TEXTURE_LUMINANCE_TYPE_ARB 0x8C14
+#define GL_TEXTURE_INTENSITY_TYPE_ARB 0x8C15
+#define GL_TEXTURE_DEPTH_TYPE_ARB 0x8C16
+#define GL_UNSIGNED_NORMALIZED_ARB 0x8C17
+#define GL_RGBA32F_ARB 0x8814
+#define GL_RGB32F_ARB 0x8815
+#define GL_ALPHA32F_ARB 0x8816
+#define GL_INTENSITY32F_ARB 0x8817
+#define GL_LUMINANCE32F_ARB 0x8818
+#define GL_LUMINANCE_ALPHA32F_ARB 0x8819
+#define GL_RGBA16F_ARB 0x881A
+#define GL_RGB16F_ARB 0x881B
+#define GL_ALPHA16F_ARB 0x881C
+#define GL_INTENSITY16F_ARB 0x881D
+#define GL_LUMINANCE16F_ARB 0x881E
+#define GL_LUMINANCE_ALPHA16F_ARB 0x881F
+#endif /* GL_ARB_texture_float */
+
+#ifndef GL_ARB_texture_gather
+#define GL_ARB_texture_gather 1
+#define GL_MIN_PROGRAM_TEXTURE_GATHER_OFFSET_ARB 0x8E5E
+#define GL_MAX_PROGRAM_TEXTURE_GATHER_OFFSET_ARB 0x8E5F
+#define GL_MAX_PROGRAM_TEXTURE_GATHER_COMPONENTS_ARB 0x8F9F
+#endif /* GL_ARB_texture_gather */
+
+#ifndef GL_ARB_texture_mirror_clamp_to_edge
+#define GL_ARB_texture_mirror_clamp_to_edge 1
+#endif /* GL_ARB_texture_mirror_clamp_to_edge */
+
+#ifndef GL_ARB_texture_mirrored_repeat
+#define GL_ARB_texture_mirrored_repeat 1
+#define GL_MIRRORED_REPEAT_ARB 0x8370
+#endif /* GL_ARB_texture_mirrored_repeat */
+
+#ifndef GL_ARB_texture_multisample
+#define GL_ARB_texture_multisample 1
+#endif /* GL_ARB_texture_multisample */
+
+#ifndef GL_ARB_texture_non_power_of_two
+#define GL_ARB_texture_non_power_of_two 1
+#endif /* GL_ARB_texture_non_power_of_two */
+
+#ifndef GL_ARB_texture_query_levels
+#define GL_ARB_texture_query_levels 1
+#endif /* GL_ARB_texture_query_levels */
+
+#ifndef GL_ARB_texture_query_lod
+#define GL_ARB_texture_query_lod 1
+#endif /* GL_ARB_texture_query_lod */
+
+#ifndef GL_ARB_texture_rectangle
+#define GL_ARB_texture_rectangle 1
+#define GL_TEXTURE_RECTANGLE_ARB 0x84F5
+#define GL_TEXTURE_BINDING_RECTANGLE_ARB 0x84F6
+#define GL_PROXY_TEXTURE_RECTANGLE_ARB 0x84F7
+#define GL_MAX_RECTANGLE_TEXTURE_SIZE_ARB 0x84F8
+#endif /* GL_ARB_texture_rectangle */
+
+#ifndef GL_ARB_texture_rg
+#define GL_ARB_texture_rg 1
+#endif /* GL_ARB_texture_rg */
+
+#ifndef GL_ARB_texture_rgb10_a2ui
+#define GL_ARB_texture_rgb10_a2ui 1
+#endif /* GL_ARB_texture_rgb10_a2ui */
+
+#ifndef GL_ARB_texture_stencil8
+#define GL_ARB_texture_stencil8 1
+#endif /* GL_ARB_texture_stencil8 */
+
+#ifndef GL_ARB_texture_storage
+#define GL_ARB_texture_storage 1
+#endif /* GL_ARB_texture_storage */
+
+#ifndef GL_ARB_texture_storage_multisample
+#define GL_ARB_texture_storage_multisample 1
+#endif /* GL_ARB_texture_storage_multisample */
+
+#ifndef GL_ARB_texture_swizzle
+#define GL_ARB_texture_swizzle 1
+#endif /* GL_ARB_texture_swizzle */
+
+#ifndef GL_ARB_texture_view
+#define GL_ARB_texture_view 1
+#endif /* GL_ARB_texture_view */
+
+#ifndef GL_ARB_timer_query
+#define GL_ARB_timer_query 1
+#endif /* GL_ARB_timer_query */
+
+#ifndef GL_ARB_transform_feedback2
+#define GL_ARB_transform_feedback2 1
+#endif /* GL_ARB_transform_feedback2 */
+
+#ifndef GL_ARB_transform_feedback3
+#define GL_ARB_transform_feedback3 1
+#endif /* GL_ARB_transform_feedback3 */
+
+#ifndef GL_ARB_transform_feedback_instanced
+#define GL_ARB_transform_feedback_instanced 1
+#endif /* GL_ARB_transform_feedback_instanced */
+
+#ifndef GL_ARB_transform_feedback_overflow_query
+#define GL_ARB_transform_feedback_overflow_query 1
+#define GL_TRANSFORM_FEEDBACK_OVERFLOW_ARB 0x82EC
+#define GL_TRANSFORM_FEEDBACK_STREAM_OVERFLOW_ARB 0x82ED
+#endif /* GL_ARB_transform_feedback_overflow_query */
+
+#ifndef GL_ARB_transpose_matrix
+#define GL_ARB_transpose_matrix 1
+#define GL_TRANSPOSE_MODELVIEW_MATRIX_ARB 0x84E3
+#define GL_TRANSPOSE_PROJECTION_MATRIX_ARB 0x84E4
+#define GL_TRANSPOSE_TEXTURE_MATRIX_ARB 0x84E5
+#define GL_TRANSPOSE_COLOR_MATRIX_ARB 0x84E6
+typedef void (APIENTRYP PFNGLLOADTRANSPOSEMATRIXFARBPROC) (const GLfloat *m);
+typedef void (APIENTRYP PFNGLLOADTRANSPOSEMATRIXDARBPROC) (const GLdouble *m);
+typedef void (APIENTRYP PFNGLMULTTRANSPOSEMATRIXFARBPROC) (const GLfloat *m);
+typedef void (APIENTRYP PFNGLMULTTRANSPOSEMATRIXDARBPROC) (const GLdouble *m);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glLoadTransposeMatrixfARB (const GLfloat *m);
+GLAPI void APIENTRY glLoadTransposeMatrixdARB (const GLdouble *m);
+GLAPI void APIENTRY glMultTransposeMatrixfARB (const GLfloat *m);
+GLAPI void APIENTRY glMultTransposeMatrixdARB (const GLdouble *m);
+#endif
+#endif /* GL_ARB_transpose_matrix */
+
+#ifndef GL_ARB_uniform_buffer_object
+#define GL_ARB_uniform_buffer_object 1
+#endif /* GL_ARB_uniform_buffer_object */
+
+#ifndef GL_ARB_vertex_array_bgra
+#define GL_ARB_vertex_array_bgra 1
+#endif /* GL_ARB_vertex_array_bgra */
+
+#ifndef GL_ARB_vertex_array_object
+#define GL_ARB_vertex_array_object 1
+#endif /* GL_ARB_vertex_array_object */
+
+#ifndef GL_ARB_vertex_attrib_64bit
+#define GL_ARB_vertex_attrib_64bit 1
+#endif /* GL_ARB_vertex_attrib_64bit */
+
+#ifndef GL_ARB_vertex_attrib_binding
+#define GL_ARB_vertex_attrib_binding 1
+#endif /* GL_ARB_vertex_attrib_binding */
+
+#ifndef GL_ARB_vertex_blend
+#define GL_ARB_vertex_blend 1
+#define GL_MAX_VERTEX_UNITS_ARB 0x86A4
+#define GL_ACTIVE_VERTEX_UNITS_ARB 0x86A5
+#define GL_WEIGHT_SUM_UNITY_ARB 0x86A6
+#define GL_VERTEX_BLEND_ARB 0x86A7
+#define GL_CURRENT_WEIGHT_ARB 0x86A8
+#define GL_WEIGHT_ARRAY_TYPE_ARB 0x86A9
+#define GL_WEIGHT_ARRAY_STRIDE_ARB 0x86AA
+#define GL_WEIGHT_ARRAY_SIZE_ARB 0x86AB
+#define GL_WEIGHT_ARRAY_POINTER_ARB 0x86AC
+#define GL_WEIGHT_ARRAY_ARB 0x86AD
+#define GL_MODELVIEW0_ARB 0x1700
+#define GL_MODELVIEW1_ARB 0x850A
+#define GL_MODELVIEW2_ARB 0x8722
+#define GL_MODELVIEW3_ARB 0x8723
+#define GL_MODELVIEW4_ARB 0x8724
+#define GL_MODELVIEW5_ARB 0x8725
+#define GL_MODELVIEW6_ARB 0x8726
+#define GL_MODELVIEW7_ARB 0x8727
+#define GL_MODELVIEW8_ARB 0x8728
+#define GL_MODELVIEW9_ARB 0x8729
+#define GL_MODELVIEW10_ARB 0x872A
+#define GL_MODELVIEW11_ARB 0x872B
+#define GL_MODELVIEW12_ARB 0x872C
+#define GL_MODELVIEW13_ARB 0x872D
+#define GL_MODELVIEW14_ARB 0x872E
+#define GL_MODELVIEW15_ARB 0x872F
+#define GL_MODELVIEW16_ARB 0x8730
+#define GL_MODELVIEW17_ARB 0x8731
+#define GL_MODELVIEW18_ARB 0x8732
+#define GL_MODELVIEW19_ARB 0x8733
+#define GL_MODELVIEW20_ARB 0x8734
+#define GL_MODELVIEW21_ARB 0x8735
+#define GL_MODELVIEW22_ARB 0x8736
+#define GL_MODELVIEW23_ARB 0x8737
+#define GL_MODELVIEW24_ARB 0x8738
+#define GL_MODELVIEW25_ARB 0x8739
+#define GL_MODELVIEW26_ARB 0x873A
+#define GL_MODELVIEW27_ARB 0x873B
+#define GL_MODELVIEW28_ARB 0x873C
+#define GL_MODELVIEW29_ARB 0x873D
+#define GL_MODELVIEW30_ARB 0x873E
+#define GL_MODELVIEW31_ARB 0x873F
+typedef void (APIENTRYP PFNGLWEIGHTBVARBPROC) (GLint size, const GLbyte *weights);
+typedef void (APIENTRYP PFNGLWEIGHTSVARBPROC) (GLint size, const GLshort *weights);
+typedef void (APIENTRYP PFNGLWEIGHTIVARBPROC) (GLint size, const GLint *weights);
+typedef void (APIENTRYP PFNGLWEIGHTFVARBPROC) (GLint size, const GLfloat *weights);
+typedef void (APIENTRYP PFNGLWEIGHTDVARBPROC) (GLint size, const GLdouble *weights);
+typedef void (APIENTRYP PFNGLWEIGHTUBVARBPROC) (GLint size, const GLubyte *weights);
+typedef void (APIENTRYP PFNGLWEIGHTUSVARBPROC) (GLint size, const GLushort *weights);
+typedef void (APIENTRYP PFNGLWEIGHTUIVARBPROC) (GLint size, const GLuint *weights);
+typedef void (APIENTRYP PFNGLWEIGHTPOINTERARBPROC) (GLint size, GLenum type, GLsizei stride, const void *pointer);
+typedef void (APIENTRYP PFNGLVERTEXBLENDARBPROC) (GLint count);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glWeightbvARB (GLint size, const GLbyte *weights);
+GLAPI void APIENTRY glWeightsvARB (GLint size, const GLshort *weights);
+GLAPI void APIENTRY glWeightivARB (GLint size, const GLint *weights);
+GLAPI void APIENTRY glWeightfvARB (GLint size, const GLfloat *weights);
+GLAPI void APIENTRY glWeightdvARB (GLint size, const GLdouble *weights);
+GLAPI void APIENTRY glWeightubvARB (GLint size, const GLubyte *weights);
+GLAPI void APIENTRY glWeightusvARB (GLint size, const GLushort *weights);
+GLAPI void APIENTRY glWeightuivARB (GLint size, const GLuint *weights);
+GLAPI void APIENTRY glWeightPointerARB (GLint size, GLenum type, GLsizei stride, const void *pointer);
+GLAPI void APIENTRY glVertexBlendARB (GLint count);
+#endif
+#endif /* GL_ARB_vertex_blend */
+
+#ifndef GL_ARB_vertex_buffer_object
+#define GL_ARB_vertex_buffer_object 1
+typedef khronos_ssize_t GLsizeiptrARB;
+typedef khronos_intptr_t GLintptrARB;
+#define GL_BUFFER_SIZE_ARB 0x8764
+#define GL_BUFFER_USAGE_ARB 0x8765
+#define GL_ARRAY_BUFFER_ARB 0x8892
+#define GL_ELEMENT_ARRAY_BUFFER_ARB 0x8893
+#define GL_ARRAY_BUFFER_BINDING_ARB 0x8894
+#define GL_ELEMENT_ARRAY_BUFFER_BINDING_ARB 0x8895
+#define GL_VERTEX_ARRAY_BUFFER_BINDING_ARB 0x8896
+#define GL_NORMAL_ARRAY_BUFFER_BINDING_ARB 0x8897
+#define GL_COLOR_ARRAY_BUFFER_BINDING_ARB 0x8898
+#define GL_INDEX_ARRAY_BUFFER_BINDING_ARB 0x8899
+#define GL_TEXTURE_COORD_ARRAY_BUFFER_BINDING_ARB 0x889A
+#define GL_EDGE_FLAG_ARRAY_BUFFER_BINDING_ARB 0x889B
+#define GL_SECONDARY_COLOR_ARRAY_BUFFER_BINDING_ARB 0x889C
+#define GL_FOG_COORDINATE_ARRAY_BUFFER_BINDING_ARB 0x889D
+#define GL_WEIGHT_ARRAY_BUFFER_BINDING_ARB 0x889E
+#define GL_VERTEX_ATTRIB_ARRAY_BUFFER_BINDING_ARB 0x889F
+#define GL_READ_ONLY_ARB 0x88B8
+#define GL_WRITE_ONLY_ARB 0x88B9
+#define GL_READ_WRITE_ARB 0x88BA
+#define GL_BUFFER_ACCESS_ARB 0x88BB
+#define GL_BUFFER_MAPPED_ARB 0x88BC
+#define GL_BUFFER_MAP_POINTER_ARB 0x88BD
+#define GL_STREAM_DRAW_ARB 0x88E0
+#define GL_STREAM_READ_ARB 0x88E1
+#define GL_STREAM_COPY_ARB 0x88E2
+#define GL_STATIC_DRAW_ARB 0x88E4
+#define GL_STATIC_READ_ARB 0x88E5
+#define GL_STATIC_COPY_ARB 0x88E6
+#define GL_DYNAMIC_DRAW_ARB 0x88E8
+#define GL_DYNAMIC_READ_ARB 0x88E9
+#define GL_DYNAMIC_COPY_ARB 0x88EA
+typedef void (APIENTRYP PFNGLBINDBUFFERARBPROC) (GLenum target, GLuint buffer);
+typedef void (APIENTRYP PFNGLDELETEBUFFERSARBPROC) (GLsizei n, const GLuint *buffers);
+typedef void (APIENTRYP PFNGLGENBUFFERSARBPROC) (GLsizei n, GLuint *buffers);
+typedef GLboolean (APIENTRYP PFNGLISBUFFERARBPROC) (GLuint buffer);
+typedef void (APIENTRYP PFNGLBUFFERDATAARBPROC) (GLenum target, GLsizeiptrARB size, const void *data, GLenum usage);
+typedef void (APIENTRYP PFNGLBUFFERSUBDATAARBPROC) (GLenum target, GLintptrARB offset, GLsizeiptrARB size, const void *data);
+typedef void (APIENTRYP PFNGLGETBUFFERSUBDATAARBPROC) (GLenum target, GLintptrARB offset, GLsizeiptrARB size, void *data);
+typedef void *(APIENTRYP PFNGLMAPBUFFERARBPROC) (GLenum target, GLenum access);
+typedef GLboolean (APIENTRYP PFNGLUNMAPBUFFERARBPROC) (GLenum target);
+typedef void (APIENTRYP PFNGLGETBUFFERPARAMETERIVARBPROC) (GLenum target, GLenum pname, GLint *params);
+typedef void (APIENTRYP PFNGLGETBUFFERPOINTERVARBPROC) (GLenum target, GLenum pname, void **params);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glBindBufferARB (GLenum target, GLuint buffer);
+GLAPI void APIENTRY glDeleteBuffersARB (GLsizei n, const GLuint *buffers);
+GLAPI void APIENTRY glGenBuffersARB (GLsizei n, GLuint *buffers);
+GLAPI GLboolean APIENTRY glIsBufferARB (GLuint buffer);
+GLAPI void APIENTRY glBufferDataARB (GLenum target, GLsizeiptrARB size, const void *data, GLenum usage);
+GLAPI void APIENTRY glBufferSubDataARB (GLenum target, GLintptrARB offset, GLsizeiptrARB size, const void *data);
+GLAPI void APIENTRY glGetBufferSubDataARB (GLenum target, GLintptrARB offset, GLsizeiptrARB size, void *data);
+GLAPI void *APIENTRY glMapBufferARB (GLenum target, GLenum access);
+GLAPI GLboolean APIENTRY glUnmapBufferARB (GLenum target);
+GLAPI void APIENTRY glGetBufferParameterivARB (GLenum target, GLenum pname, GLint *params);
+GLAPI void APIENTRY glGetBufferPointervARB (GLenum target, GLenum pname, void **params);
+#endif
+#endif /* GL_ARB_vertex_buffer_object */
+
+#ifndef GL_ARB_vertex_program
+#define GL_ARB_vertex_program 1
+#define GL_COLOR_SUM_ARB 0x8458
+#define GL_VERTEX_PROGRAM_ARB 0x8620
+#define GL_VERTEX_ATTRIB_ARRAY_ENABLED_ARB 0x8622
+#define GL_VERTEX_ATTRIB_ARRAY_SIZE_ARB 0x8623
+#define GL_VERTEX_ATTRIB_ARRAY_STRIDE_ARB 0x8624
+#define GL_VERTEX_ATTRIB_ARRAY_TYPE_ARB 0x8625
+#define GL_CURRENT_VERTEX_ATTRIB_ARB 0x8626
+#define GL_VERTEX_PROGRAM_POINT_SIZE_ARB 0x8642
+#define GL_VERTEX_PROGRAM_TWO_SIDE_ARB 0x8643
+#define GL_VERTEX_ATTRIB_ARRAY_POINTER_ARB 0x8645
+#define GL_MAX_VERTEX_ATTRIBS_ARB 0x8869
+#define GL_VERTEX_ATTRIB_ARRAY_NORMALIZED_ARB 0x886A
+#define GL_PROGRAM_ADDRESS_REGISTERS_ARB 0x88B0
+#define GL_MAX_PROGRAM_ADDRESS_REGISTERS_ARB 0x88B1
+#define GL_PROGRAM_NATIVE_ADDRESS_REGISTERS_ARB 0x88B2
+#define GL_MAX_PROGRAM_NATIVE_ADDRESS_REGISTERS_ARB 0x88B3
+typedef void (APIENTRYP PFNGLVERTEXATTRIB1DARBPROC) (GLuint index, GLdouble x);
+typedef void (APIENTRYP PFNGLVERTEXATTRIB1DVARBPROC) (GLuint index, const GLdouble *v);
+typedef void (APIENTRYP PFNGLVERTEXATTRIB1FARBPROC) (GLuint index, GLfloat x);
+typedef void (APIENTRYP PFNGLVERTEXATTRIB1FVARBPROC) (GLuint index, const GLfloat *v);
+typedef void (APIENTRYP PFNGLVERTEXATTRIB1SARBPROC) (GLuint index, GLshort x);
+typedef void (APIENTRYP PFNGLVERTEXATTRIB1SVARBPROC) (GLuint index, const GLshort *v);
+typedef void (APIENTRYP PFNGLVERTEXATTRIB2DARBPROC) (GLuint index, GLdouble x, GLdouble y);
+typedef void (APIENTRYP PFNGLVERTEXATTRIB2DVARBPROC) (GLuint index, const GLdouble *v);
+typedef void (APIENTRYP PFNGLVERTEXATTRIB2FARBPROC) (GLuint index, GLfloat x, GLfloat y);
+typedef void (APIENTRYP PFNGLVERTEXATTRIB2FVARBPROC) (GLuint index, const GLfloat *v);
+typedef void (APIENTRYP PFNGLVERTEXATTRIB2SARBPROC) (GLuint index, GLshort x, GLshort y);
+typedef void (APIENTRYP PFNGLVERTEXATTRIB2SVARBPROC) (GLuint index, const GLshort *v);
+typedef void (APIENTRYP PFNGLVERTEXATTRIB3DARBPROC) (GLuint index, GLdouble x, GLdouble y, GLdouble z);
+typedef void (APIENTRYP PFNGLVERTEXATTRIB3DVARBPROC) (GLuint index, const GLdouble *v);
+typedef void (APIENTRYP PFNGLVERTEXATTRIB3FARBPROC) (GLuint index, GLfloat x, GLfloat y, GLfloat z);
+typedef void (APIENTRYP PFNGLVERTEXATTRIB3FVARBPROC) (GLuint index, const GLfloat *v);
+typedef void (APIENTRYP PFNGLVERTEXATTRIB3SARBPROC) (GLuint index, GLshort x, GLshort y, GLshort z);
+typedef void (APIENTRYP PFNGLVERTEXATTRIB3SVARBPROC) (GLuint index, const GLshort *v);
+typedef void (APIENTRYP PFNGLVERTEXATTRIB4NBVARBPROC) (GLuint index, const GLbyte *v);
+typedef void (APIENTRYP PFNGLVERTEXATTRIB4NIVARBPROC) (GLuint index, const GLint *v);
+typedef void (APIENTRYP PFNGLVERTEXATTRIB4NSVARBPROC) (GLuint index, const GLshort *v);
+typedef void (APIENTRYP PFNGLVERTEXATTRIB4NUBARBPROC) (GLuint index, GLubyte x, GLubyte y, GLubyte z, GLubyte w);
+typedef void (APIENTRYP PFNGLVERTEXATTRIB4NUBVARBPROC) (GLuint index, const GLubyte *v);
+typedef void (APIENTRYP PFNGLVERTEXATTRIB4NUIVARBPROC) (GLuint index, const GLuint *v);
+typedef void (APIENTRYP PFNGLVERTEXATTRIB4NUSVARBPROC) (GLuint index, const GLushort *v);
+typedef void (APIENTRYP PFNGLVERTEXATTRIB4BVARBPROC) (GLuint index, const GLbyte *v);
+typedef void (APIENTRYP PFNGLVERTEXATTRIB4DARBPROC) (GLuint index, GLdouble x, GLdouble y, GLdouble z, GLdouble w);
+typedef void (APIENTRYP PFNGLVERTEXATTRIB4DVARBPROC) (GLuint index, const GLdouble *v);
+typedef void (APIENTRYP PFNGLVERTEXATTRIB4FARBPROC) (GLuint index, GLfloat x, GLfloat y, GLfloat z, GLfloat w);
+typedef void (APIENTRYP PFNGLVERTEXATTRIB4FVARBPROC) (GLuint index, const GLfloat *v);
+typedef void (APIENTRYP PFNGLVERTEXATTRIB4IVARBPROC) (GLuint index, const GLint *v);
+typedef void (APIENTRYP PFNGLVERTEXATTRIB4SARBPROC) (GLuint index, GLshort x, GLshort y, GLshort z, GLshort w);
+typedef void (APIENTRYP PFNGLVERTEXATTRIB4SVARBPROC) (GLuint index, const GLshort *v);
+typedef void (APIENTRYP PFNGLVERTEXATTRIB4UBVARBPROC) (GLuint index, const GLubyte *v);
+typedef void (APIENTRYP PFNGLVERTEXATTRIB4UIVARBPROC) (GLuint index, const GLuint *v);
+typedef void (APIENTRYP PFNGLVERTEXATTRIB4USVARBPROC) (GLuint index, const GLushort *v);
+typedef void (APIENTRYP PFNGLVERTEXATTRIBPOINTERARBPROC) (GLuint index, GLint size, GLenum type, GLboolean normalized, GLsizei stride, const void *pointer);
+typedef void (APIENTRYP PFNGLENABLEVERTEXATTRIBARRAYARBPROC) (GLuint index);
+typedef void (APIENTRYP PFNGLDISABLEVERTEXATTRIBARRAYARBPROC) (GLuint index);
+typedef void (APIENTRYP PFNGLGETVERTEXATTRIBDVARBPROC) (GLuint index, GLenum pname, GLdouble *params);
+typedef void (APIENTRYP PFNGLGETVERTEXATTRIBFVARBPROC) (GLuint index, GLenum pname, GLfloat *params);
+typedef void (APIENTRYP PFNGLGETVERTEXATTRIBIVARBPROC) (GLuint index, GLenum pname, GLint *params);
+typedef void (APIENTRYP PFNGLGETVERTEXATTRIBPOINTERVARBPROC) (GLuint index, GLenum pname, void **pointer);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glVertexAttrib1dARB (GLuint index, GLdouble x);
+GLAPI void APIENTRY glVertexAttrib1dvARB (GLuint index, const GLdouble *v);
+GLAPI void APIENTRY glVertexAttrib1fARB (GLuint index, GLfloat x);
+GLAPI void APIENTRY glVertexAttrib1fvARB (GLuint index, const GLfloat *v);
+GLAPI void APIENTRY glVertexAttrib1sARB (GLuint index, GLshort x);
+GLAPI void APIENTRY glVertexAttrib1svARB (GLuint index, const GLshort *v);
+GLAPI void APIENTRY glVertexAttrib2dARB (GLuint index, GLdouble x, GLdouble y);
+GLAPI void APIENTRY glVertexAttrib2dvARB (GLuint index, const GLdouble *v);
+GLAPI void APIENTRY glVertexAttrib2fARB (GLuint index, GLfloat x, GLfloat y);
+GLAPI void APIENTRY glVertexAttrib2fvARB (GLuint index, const GLfloat *v);
+GLAPI void APIENTRY glVertexAttrib2sARB (GLuint index, GLshort x, GLshort y);
+GLAPI void APIENTRY glVertexAttrib2svARB (GLuint index, const GLshort *v);
+GLAPI void APIENTRY glVertexAttrib3dARB (GLuint index, GLdouble x, GLdouble y, GLdouble z);
+GLAPI void APIENTRY glVertexAttrib3dvARB (GLuint index, const GLdouble *v);
+GLAPI void APIENTRY glVertexAttrib3fARB (GLuint index, GLfloat x, GLfloat y, GLfloat z);
+GLAPI void APIENTRY glVertexAttrib3fvARB (GLuint index, const GLfloat *v);
+GLAPI void APIENTRY glVertexAttrib3sARB (GLuint index, GLshort x, GLshort y, GLshort z);
+GLAPI void APIENTRY glVertexAttrib3svARB (GLuint index, const GLshort *v);
+GLAPI void APIENTRY glVertexAttrib4NbvARB (GLuint index, const GLbyte *v);
+GLAPI void APIENTRY glVertexAttrib4NivARB (GLuint index, const GLint *v);
+GLAPI void APIENTRY glVertexAttrib4NsvARB (GLuint index, const GLshort *v);
+GLAPI void APIENTRY glVertexAttrib4NubARB (GLuint index, GLubyte x, GLubyte y, GLubyte z, GLubyte w);
+GLAPI void APIENTRY glVertexAttrib4NubvARB (GLuint index, const GLubyte *v);
+GLAPI void APIENTRY glVertexAttrib4NuivARB (GLuint index, const GLuint *v);
+GLAPI void APIENTRY glVertexAttrib4NusvARB (GLuint index, const GLushort *v);
+GLAPI void APIENTRY glVertexAttrib4bvARB (GLuint index, const GLbyte *v);
+GLAPI void APIENTRY glVertexAttrib4dARB (GLuint index, GLdouble x, GLdouble y, GLdouble z, GLdouble w);
+GLAPI void APIENTRY glVertexAttrib4dvARB (GLuint index, const GLdouble *v);
+GLAPI void APIENTRY glVertexAttrib4fARB (GLuint index, GLfloat x, GLfloat y, GLfloat z, GLfloat w);
+GLAPI void APIENTRY glVertexAttrib4fvARB (GLuint index, const GLfloat *v);
+GLAPI void APIENTRY glVertexAttrib4ivARB (GLuint index, const GLint *v);
+GLAPI void APIENTRY glVertexAttrib4sARB (GLuint index, GLshort x, GLshort y, GLshort z, GLshort w);
+GLAPI void APIENTRY glVertexAttrib4svARB (GLuint index, const GLshort *v);
+GLAPI void APIENTRY glVertexAttrib4ubvARB (GLuint index, const GLubyte *v);
+GLAPI void APIENTRY glVertexAttrib4uivARB (GLuint index, const GLuint *v);
+GLAPI void APIENTRY glVertexAttrib4usvARB (GLuint index, const GLushort *v);
+GLAPI void APIENTRY glVertexAttribPointerARB (GLuint index, GLint size, GLenum type, GLboolean normalized, GLsizei stride, const void *pointer);
+GLAPI void APIENTRY glEnableVertexAttribArrayARB (GLuint index);
+GLAPI void APIENTRY glDisableVertexAttribArrayARB (GLuint index);
+GLAPI void APIENTRY glGetVertexAttribdvARB (GLuint index, GLenum pname, GLdouble *params);
+GLAPI void APIENTRY glGetVertexAttribfvARB (GLuint index, GLenum pname, GLfloat *params);
+GLAPI void APIENTRY glGetVertexAttribivARB (GLuint index, GLenum pname, GLint *params);
+GLAPI void APIENTRY glGetVertexAttribPointervARB (GLuint index, GLenum pname, void **pointer);
+#endif
+#endif /* GL_ARB_vertex_program */
+
+#ifndef GL_ARB_vertex_shader
+#define GL_ARB_vertex_shader 1
+#define GL_VERTEX_SHADER_ARB 0x8B31
+#define GL_MAX_VERTEX_UNIFORM_COMPONENTS_ARB 0x8B4A
+#define GL_MAX_VARYING_FLOATS_ARB 0x8B4B
+#define GL_MAX_VERTEX_TEXTURE_IMAGE_UNITS_ARB 0x8B4C
+#define GL_MAX_COMBINED_TEXTURE_IMAGE_UNITS_ARB 0x8B4D
+#define GL_OBJECT_ACTIVE_ATTRIBUTES_ARB 0x8B89
+#define GL_OBJECT_ACTIVE_ATTRIBUTE_MAX_LENGTH_ARB 0x8B8A
+typedef void (APIENTRYP PFNGLBINDATTRIBLOCATIONARBPROC) (GLhandleARB programObj, GLuint index, const GLcharARB *name);
+typedef void (APIENTRYP PFNGLGETACTIVEATTRIBARBPROC) (GLhandleARB programObj, GLuint index, GLsizei maxLength, GLsizei *length, GLint *size, GLenum *type, GLcharARB *name);
+typedef GLint (APIENTRYP PFNGLGETATTRIBLOCATIONARBPROC) (GLhandleARB programObj, const GLcharARB *name);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glBindAttribLocationARB (GLhandleARB programObj, GLuint index, const GLcharARB *name);
+GLAPI void APIENTRY glGetActiveAttribARB (GLhandleARB programObj, GLuint index, GLsizei maxLength, GLsizei *length, GLint *size, GLenum *type, GLcharARB *name);
+GLAPI GLint APIENTRY glGetAttribLocationARB (GLhandleARB programObj, const GLcharARB *name);
+#endif
+#endif /* GL_ARB_vertex_shader */
+
+#ifndef GL_ARB_vertex_type_10f_11f_11f_rev
+#define GL_ARB_vertex_type_10f_11f_11f_rev 1
+#endif /* GL_ARB_vertex_type_10f_11f_11f_rev */
+
+#ifndef GL_ARB_vertex_type_2_10_10_10_rev
+#define GL_ARB_vertex_type_2_10_10_10_rev 1
+#endif /* GL_ARB_vertex_type_2_10_10_10_rev */
+
+#ifndef GL_ARB_viewport_array
+#define GL_ARB_viewport_array 1
+#endif /* GL_ARB_viewport_array */
+
+#ifndef GL_ARB_window_pos
+#define GL_ARB_window_pos 1
+typedef void (APIENTRYP PFNGLWINDOWPOS2DARBPROC) (GLdouble x, GLdouble y);
+typedef void (APIENTRYP PFNGLWINDOWPOS2DVARBPROC) (const GLdouble *v);
+typedef void (APIENTRYP PFNGLWINDOWPOS2FARBPROC) (GLfloat x, GLfloat y);
+typedef void (APIENTRYP PFNGLWINDOWPOS2FVARBPROC) (const GLfloat *v);
+typedef void (APIENTRYP PFNGLWINDOWPOS2IARBPROC) (GLint x, GLint y);
+typedef void (APIENTRYP PFNGLWINDOWPOS2IVARBPROC) (const GLint *v);
+typedef void (APIENTRYP PFNGLWINDOWPOS2SARBPROC) (GLshort x, GLshort y);
+typedef void (APIENTRYP PFNGLWINDOWPOS2SVARBPROC) (const GLshort *v);
+typedef void (APIENTRYP PFNGLWINDOWPOS3DARBPROC) (GLdouble x, GLdouble y, GLdouble z);
+typedef void (APIENTRYP PFNGLWINDOWPOS3DVARBPROC) (const GLdouble *v);
+typedef void (APIENTRYP PFNGLWINDOWPOS3FARBPROC) (GLfloat x, GLfloat y, GLfloat z);
+typedef void (APIENTRYP PFNGLWINDOWPOS3FVARBPROC) (const GLfloat *v);
+typedef void (APIENTRYP PFNGLWINDOWPOS3IARBPROC) (GLint x, GLint y, GLint z);
+typedef void (APIENTRYP PFNGLWINDOWPOS3IVARBPROC) (const GLint *v);
+typedef void (APIENTRYP PFNGLWINDOWPOS3SARBPROC) (GLshort x, GLshort y, GLshort z);
+typedef void (APIENTRYP PFNGLWINDOWPOS3SVARBPROC) (const GLshort *v);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glWindowPos2dARB (GLdouble x, GLdouble y);
+GLAPI void APIENTRY glWindowPos2dvARB (const GLdouble *v);
+GLAPI void APIENTRY glWindowPos2fARB (GLfloat x, GLfloat y);
+GLAPI void APIENTRY glWindowPos2fvARB (const GLfloat *v);
+GLAPI void APIENTRY glWindowPos2iARB (GLint x, GLint y);
+GLAPI void APIENTRY glWindowPos2ivARB (const GLint *v);
+GLAPI void APIENTRY glWindowPos2sARB (GLshort x, GLshort y);
+GLAPI void APIENTRY glWindowPos2svARB (const GLshort *v);
+GLAPI void APIENTRY glWindowPos3dARB (GLdouble x, GLdouble y, GLdouble z);
+GLAPI void APIENTRY glWindowPos3dvARB (const GLdouble *v);
+GLAPI void APIENTRY glWindowPos3fARB (GLfloat x, GLfloat y, GLfloat z);
+GLAPI void APIENTRY glWindowPos3fvARB (const GLfloat *v);
+GLAPI void APIENTRY glWindowPos3iARB (GLint x, GLint y, GLint z);
+GLAPI void APIENTRY glWindowPos3ivARB (const GLint *v);
+GLAPI void APIENTRY glWindowPos3sARB (GLshort x, GLshort y, GLshort z);
+GLAPI void APIENTRY glWindowPos3svARB (const GLshort *v);
+#endif
+#endif /* GL_ARB_window_pos */
+
+#ifndef GL_KHR_blend_equation_advanced
+#define GL_KHR_blend_equation_advanced 1
+#define GL_MULTIPLY_KHR 0x9294
+#define GL_SCREEN_KHR 0x9295
+#define GL_OVERLAY_KHR 0x9296
+#define GL_DARKEN_KHR 0x9297
+#define GL_LIGHTEN_KHR 0x9298
+#define GL_COLORDODGE_KHR 0x9299
+#define GL_COLORBURN_KHR 0x929A
+#define GL_HARDLIGHT_KHR 0x929B
+#define GL_SOFTLIGHT_KHR 0x929C
+#define GL_DIFFERENCE_KHR 0x929E
+#define GL_EXCLUSION_KHR 0x92A0
+#define GL_HSL_HUE_KHR 0x92AD
+#define GL_HSL_SATURATION_KHR 0x92AE
+#define GL_HSL_COLOR_KHR 0x92AF
+#define GL_HSL_LUMINOSITY_KHR 0x92B0
+typedef void (APIENTRYP PFNGLBLENDBARRIERKHRPROC) (void);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glBlendBarrierKHR (void);
+#endif
+#endif /* GL_KHR_blend_equation_advanced */
+
+#ifndef GL_KHR_blend_equation_advanced_coherent
+#define GL_KHR_blend_equation_advanced_coherent 1
+#define GL_BLEND_ADVANCED_COHERENT_KHR 0x9285
+#endif /* GL_KHR_blend_equation_advanced_coherent */
+
+#ifndef GL_KHR_context_flush_control
+#define GL_KHR_context_flush_control 1
+#endif /* GL_KHR_context_flush_control */
+
+#ifndef GL_KHR_debug
+#define GL_KHR_debug 1
+#endif /* GL_KHR_debug */
+
+#ifndef GL_KHR_no_error
+#define GL_KHR_no_error 1
+#define GL_CONTEXT_FLAG_NO_ERROR_BIT_KHR 0x00000008
+#endif /* GL_KHR_no_error */
+
+#ifndef GL_KHR_parallel_shader_compile
+#define GL_KHR_parallel_shader_compile 1
+#define GL_MAX_SHADER_COMPILER_THREADS_KHR 0x91B0
+#define GL_COMPLETION_STATUS_KHR 0x91B1
+typedef void (APIENTRYP PFNGLMAXSHADERCOMPILERTHREADSKHRPROC) (GLuint count);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glMaxShaderCompilerThreadsKHR (GLuint count);
+#endif
+#endif /* GL_KHR_parallel_shader_compile */
+
+#ifndef GL_KHR_robust_buffer_access_behavior
+#define GL_KHR_robust_buffer_access_behavior 1
+#endif /* GL_KHR_robust_buffer_access_behavior */
+
+#ifndef GL_KHR_robustness
+#define GL_KHR_robustness 1
+#define GL_CONTEXT_ROBUST_ACCESS 0x90F3
+#endif /* GL_KHR_robustness */
+
+#ifndef GL_KHR_shader_subgroup
+#define GL_KHR_shader_subgroup 1
+#define GL_SUBGROUP_SIZE_KHR 0x9532
+#define GL_SUBGROUP_SUPPORTED_STAGES_KHR 0x9533
+#define GL_SUBGROUP_SUPPORTED_FEATURES_KHR 0x9534
+#define GL_SUBGROUP_QUAD_ALL_STAGES_KHR 0x9535
+#define GL_SUBGROUP_FEATURE_BASIC_BIT_KHR 0x00000001
+#define GL_SUBGROUP_FEATURE_VOTE_BIT_KHR 0x00000002
+#define GL_SUBGROUP_FEATURE_ARITHMETIC_BIT_KHR 0x00000004
+#define GL_SUBGROUP_FEATURE_BALLOT_BIT_KHR 0x00000008
+#define GL_SUBGROUP_FEATURE_SHUFFLE_BIT_KHR 0x00000010
+#define GL_SUBGROUP_FEATURE_SHUFFLE_RELATIVE_BIT_KHR 0x00000020
+#define GL_SUBGROUP_FEATURE_CLUSTERED_BIT_KHR 0x00000040
+#define GL_SUBGROUP_FEATURE_QUAD_BIT_KHR 0x00000080
+#endif /* GL_KHR_shader_subgroup */
+
+#ifndef GL_KHR_texture_compression_astc_hdr
+#define GL_KHR_texture_compression_astc_hdr 1
+#define GL_COMPRESSED_RGBA_ASTC_4x4_KHR 0x93B0
+#define GL_COMPRESSED_RGBA_ASTC_5x4_KHR 0x93B1
+#define GL_COMPRESSED_RGBA_ASTC_5x5_KHR 0x93B2
+#define GL_COMPRESSED_RGBA_ASTC_6x5_KHR 0x93B3
+#define GL_COMPRESSED_RGBA_ASTC_6x6_KHR 0x93B4
+#define GL_COMPRESSED_RGBA_ASTC_8x5_KHR 0x93B5
+#define GL_COMPRESSED_RGBA_ASTC_8x6_KHR 0x93B6
+#define GL_COMPRESSED_RGBA_ASTC_8x8_KHR 0x93B7
+#define GL_COMPRESSED_RGBA_ASTC_10x5_KHR 0x93B8
+#define GL_COMPRESSED_RGBA_ASTC_10x6_KHR 0x93B9
+#define GL_COMPRESSED_RGBA_ASTC_10x8_KHR 0x93BA
+#define GL_COMPRESSED_RGBA_ASTC_10x10_KHR 0x93BB
+#define GL_COMPRESSED_RGBA_ASTC_12x10_KHR 0x93BC
+#define GL_COMPRESSED_RGBA_ASTC_12x12_KHR 0x93BD
+#define GL_COMPRESSED_SRGB8_ALPHA8_ASTC_4x4_KHR 0x93D0
+#define GL_COMPRESSED_SRGB8_ALPHA8_ASTC_5x4_KHR 0x93D1
+#define GL_COMPRESSED_SRGB8_ALPHA8_ASTC_5x5_KHR 0x93D2
+#define GL_COMPRESSED_SRGB8_ALPHA8_ASTC_6x5_KHR 0x93D3
+#define GL_COMPRESSED_SRGB8_ALPHA8_ASTC_6x6_KHR 0x93D4
+#define GL_COMPRESSED_SRGB8_ALPHA8_ASTC_8x5_KHR 0x93D5
+#define GL_COMPRESSED_SRGB8_ALPHA8_ASTC_8x6_KHR 0x93D6
+#define GL_COMPRESSED_SRGB8_ALPHA8_ASTC_8x8_KHR 0x93D7
+#define GL_COMPRESSED_SRGB8_ALPHA8_ASTC_10x5_KHR 0x93D8
+#define GL_COMPRESSED_SRGB8_ALPHA8_ASTC_10x6_KHR 0x93D9
+#define GL_COMPRESSED_SRGB8_ALPHA8_ASTC_10x8_KHR 0x93DA
+#define GL_COMPRESSED_SRGB8_ALPHA8_ASTC_10x10_KHR 0x93DB
+#define GL_COMPRESSED_SRGB8_ALPHA8_ASTC_12x10_KHR 0x93DC
+#define GL_COMPRESSED_SRGB8_ALPHA8_ASTC_12x12_KHR 0x93DD
+#endif /* GL_KHR_texture_compression_astc_hdr */
+
+#ifndef GL_KHR_texture_compression_astc_ldr
+#define GL_KHR_texture_compression_astc_ldr 1
+#endif /* GL_KHR_texture_compression_astc_ldr */
+
+#ifndef GL_KHR_texture_compression_astc_sliced_3d
+#define GL_KHR_texture_compression_astc_sliced_3d 1
+#endif /* GL_KHR_texture_compression_astc_sliced_3d */
+
+#ifndef GL_OES_byte_coordinates
+#define GL_OES_byte_coordinates 1
+typedef void (APIENTRYP PFNGLMULTITEXCOORD1BOESPROC) (GLenum texture, GLbyte s);
+typedef void (APIENTRYP PFNGLMULTITEXCOORD1BVOESPROC) (GLenum texture, const GLbyte *coords);
+typedef void (APIENTRYP PFNGLMULTITEXCOORD2BOESPROC) (GLenum texture, GLbyte s, GLbyte t);
+typedef void (APIENTRYP PFNGLMULTITEXCOORD2BVOESPROC) (GLenum texture, const GLbyte *coords);
+typedef void (APIENTRYP PFNGLMULTITEXCOORD3BOESPROC) (GLenum texture, GLbyte s, GLbyte t, GLbyte r);
+typedef void (APIENTRYP PFNGLMULTITEXCOORD3BVOESPROC) (GLenum texture, const GLbyte *coords);
+typedef void (APIENTRYP PFNGLMULTITEXCOORD4BOESPROC) (GLenum texture, GLbyte s, GLbyte t, GLbyte r, GLbyte q);
+typedef void (APIENTRYP PFNGLMULTITEXCOORD4BVOESPROC) (GLenum texture, const GLbyte *coords);
+typedef void (APIENTRYP PFNGLTEXCOORD1BOESPROC) (GLbyte s);
+typedef void (APIENTRYP PFNGLTEXCOORD1BVOESPROC) (const GLbyte *coords);
+typedef void (APIENTRYP PFNGLTEXCOORD2BOESPROC) (GLbyte s, GLbyte t);
+typedef void (APIENTRYP PFNGLTEXCOORD2BVOESPROC) (const GLbyte *coords);
+typedef void (APIENTRYP PFNGLTEXCOORD3BOESPROC) (GLbyte s, GLbyte t, GLbyte r);
+typedef void (APIENTRYP PFNGLTEXCOORD3BVOESPROC) (const GLbyte *coords);
+typedef void (APIENTRYP PFNGLTEXCOORD4BOESPROC) (GLbyte s, GLbyte t, GLbyte r, GLbyte q);
+typedef void (APIENTRYP PFNGLTEXCOORD4BVOESPROC) (const GLbyte *coords);
+typedef void (APIENTRYP PFNGLVERTEX2BOESPROC) (GLbyte x, GLbyte y);
+typedef void (APIENTRYP PFNGLVERTEX2BVOESPROC) (const GLbyte *coords);
+typedef void (APIENTRYP PFNGLVERTEX3BOESPROC) (GLbyte x, GLbyte y, GLbyte z);
+typedef void (APIENTRYP PFNGLVERTEX3BVOESPROC) (const GLbyte *coords);
+typedef void (APIENTRYP PFNGLVERTEX4BOESPROC) (GLbyte x, GLbyte y, GLbyte z, GLbyte w);
+typedef void (APIENTRYP PFNGLVERTEX4BVOESPROC) (const GLbyte *coords);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glMultiTexCoord1bOES (GLenum texture, GLbyte s);
+GLAPI void APIENTRY glMultiTexCoord1bvOES (GLenum texture, const GLbyte *coords);
+GLAPI void APIENTRY glMultiTexCoord2bOES (GLenum texture, GLbyte s, GLbyte t);
+GLAPI void APIENTRY glMultiTexCoord2bvOES (GLenum texture, const GLbyte *coords);
+GLAPI void APIENTRY glMultiTexCoord3bOES (GLenum texture, GLbyte s, GLbyte t, GLbyte r);
+GLAPI void APIENTRY glMultiTexCoord3bvOES (GLenum texture, const GLbyte *coords);
+GLAPI void APIENTRY glMultiTexCoord4bOES (GLenum texture, GLbyte s, GLbyte t, GLbyte r, GLbyte q);
+GLAPI void APIENTRY glMultiTexCoord4bvOES (GLenum texture, const GLbyte *coords);
+GLAPI void APIENTRY glTexCoord1bOES (GLbyte s);
+GLAPI void APIENTRY glTexCoord1bvOES (const GLbyte *coords);
+GLAPI void APIENTRY glTexCoord2bOES (GLbyte s, GLbyte t);
+GLAPI void APIENTRY glTexCoord2bvOES (const GLbyte *coords);
+GLAPI void APIENTRY glTexCoord3bOES (GLbyte s, GLbyte t, GLbyte r);
+GLAPI void APIENTRY glTexCoord3bvOES (const GLbyte *coords);
+GLAPI void APIENTRY glTexCoord4bOES (GLbyte s, GLbyte t, GLbyte r, GLbyte q);
+GLAPI void APIENTRY glTexCoord4bvOES (const GLbyte *coords);
+GLAPI void APIENTRY glVertex2bOES (GLbyte x, GLbyte y);
+GLAPI void APIENTRY glVertex2bvOES (const GLbyte *coords);
+GLAPI void APIENTRY glVertex3bOES (GLbyte x, GLbyte y, GLbyte z);
+GLAPI void APIENTRY glVertex3bvOES (const GLbyte *coords);
+GLAPI void APIENTRY glVertex4bOES (GLbyte x, GLbyte y, GLbyte z, GLbyte w);
+GLAPI void APIENTRY glVertex4bvOES (const GLbyte *coords);
+#endif
+#endif /* GL_OES_byte_coordinates */
+
+#ifndef GL_OES_compressed_paletted_texture
+#define GL_OES_compressed_paletted_texture 1
+#define GL_PALETTE4_RGB8_OES 0x8B90
+#define GL_PALETTE4_RGBA8_OES 0x8B91
+#define GL_PALETTE4_R5_G6_B5_OES 0x8B92
+#define GL_PALETTE4_RGBA4_OES 0x8B93
+#define GL_PALETTE4_RGB5_A1_OES 0x8B94
+#define GL_PALETTE8_RGB8_OES 0x8B95
+#define GL_PALETTE8_RGBA8_OES 0x8B96
+#define GL_PALETTE8_R5_G6_B5_OES 0x8B97
+#define GL_PALETTE8_RGBA4_OES 0x8B98
+#define GL_PALETTE8_RGB5_A1_OES 0x8B99
+#endif /* GL_OES_compressed_paletted_texture */
+
+#ifndef GL_OES_fixed_point
+#define GL_OES_fixed_point 1
+typedef khronos_int32_t GLfixed;
+#define GL_FIXED_OES 0x140C
+typedef void (APIENTRYP PFNGLALPHAFUNCXOESPROC) (GLenum func, GLfixed ref);
+typedef void (APIENTRYP PFNGLCLEARCOLORXOESPROC) (GLfixed red, GLfixed green, GLfixed blue, GLfixed alpha);
+typedef void (APIENTRYP PFNGLCLEARDEPTHXOESPROC) (GLfixed depth);
+typedef void (APIENTRYP PFNGLCLIPPLANEXOESPROC) (GLenum plane, const GLfixed *equation);
+typedef void (APIENTRYP PFNGLCOLOR4XOESPROC) (GLfixed red, GLfixed green, GLfixed blue, GLfixed alpha);
+typedef void (APIENTRYP PFNGLDEPTHRANGEXOESPROC) (GLfixed n, GLfixed f);
+typedef void (APIENTRYP PFNGLFOGXOESPROC) (GLenum pname, GLfixed param);
+typedef void (APIENTRYP PFNGLFOGXVOESPROC) (GLenum pname, const GLfixed *param);
+typedef void (APIENTRYP PFNGLFRUSTUMXOESPROC) (GLfixed l, GLfixed r, GLfixed b, GLfixed t, GLfixed n, GLfixed f);
+typedef void (APIENTRYP PFNGLGETCLIPPLANEXOESPROC) (GLenum plane, GLfixed *equation);
+typedef void (APIENTRYP PFNGLGETFIXEDVOESPROC) (GLenum pname, GLfixed *params);
+typedef void (APIENTRYP PFNGLGETTEXENVXVOESPROC) (GLenum target, GLenum pname, GLfixed *params);
+typedef void (APIENTRYP PFNGLGETTEXPARAMETERXVOESPROC) (GLenum target, GLenum pname, GLfixed *params);
+typedef void (APIENTRYP PFNGLLIGHTMODELXOESPROC) (GLenum pname, GLfixed param);
+typedef void (APIENTRYP PFNGLLIGHTMODELXVOESPROC) (GLenum pname, const GLfixed *param);
+typedef void (APIENTRYP PFNGLLIGHTXOESPROC) (GLenum light, GLenum pname, GLfixed param);
+typedef void (APIENTRYP PFNGLLIGHTXVOESPROC) (GLenum light, GLenum pname, const GLfixed *params);
+typedef void (APIENTRYP PFNGLLINEWIDTHXOESPROC) (GLfixed width);
+typedef void (APIENTRYP PFNGLLOADMATRIXXOESPROC) (const GLfixed *m);
+typedef void (APIENTRYP PFNGLMATERIALXOESPROC) (GLenum face, GLenum pname, GLfixed param);
+typedef void (APIENTRYP PFNGLMATERIALXVOESPROC) (GLenum face, GLenum pname, const GLfixed *param);
+typedef void (APIENTRYP PFNGLMULTMATRIXXOESPROC) (const GLfixed *m);
+typedef void (APIENTRYP PFNGLMULTITEXCOORD4XOESPROC) (GLenum texture, GLfixed s, GLfixed t, GLfixed r, GLfixed q);
+typedef void (APIENTRYP PFNGLNORMAL3XOESPROC) (GLfixed nx, GLfixed ny, GLfixed nz);
+typedef void (APIENTRYP PFNGLORTHOXOESPROC) (GLfixed l, GLfixed r, GLfixed b, GLfixed t, GLfixed n, GLfixed f);
+typedef void (APIENTRYP PFNGLPOINTPARAMETERXVOESPROC) (GLenum pname, const GLfixed *params);
+typedef void (APIENTRYP PFNGLPOINTSIZEXOESPROC) (GLfixed size);
+typedef void (APIENTRYP PFNGLPOLYGONOFFSETXOESPROC) (GLfixed factor, GLfixed units);
+typedef void (APIENTRYP PFNGLROTATEXOESPROC) (GLfixed angle, GLfixed x, GLfixed y, GLfixed z);
+typedef void (APIENTRYP PFNGLSCALEXOESPROC) (GLfixed x, GLfixed y, GLfixed z);
+typedef void (APIENTRYP PFNGLTEXENVXOESPROC) (GLenum target, GLenum pname, GLfixed param);
+typedef void (APIENTRYP PFNGLTEXENVXVOESPROC) (GLenum target, GLenum pname, const GLfixed *params);
+typedef void (APIENTRYP PFNGLTEXPARAMETERXOESPROC) (GLenum target, GLenum pname, GLfixed param);
+typedef void (APIENTRYP PFNGLTEXPARAMETERXVOESPROC) (GLenum target, GLenum pname, const GLfixed *params);
+typedef void (APIENTRYP PFNGLTRANSLATEXOESPROC) (GLfixed x, GLfixed y, GLfixed z);
+typedef void (APIENTRYP PFNGLACCUMXOESPROC) (GLenum op, GLfixed value);
+typedef void (APIENTRYP PFNGLBITMAPXOESPROC) (GLsizei width, GLsizei height, GLfixed xorig, GLfixed yorig, GLfixed xmove, GLfixed ymove, const GLubyte *bitmap);
+typedef void (APIENTRYP PFNGLBLENDCOLORXOESPROC) (GLfixed red, GLfixed green, GLfixed blue, GLfixed alpha);
+typedef void (APIENTRYP PFNGLCLEARACCUMXOESPROC) (GLfixed red, GLfixed green, GLfixed blue, GLfixed alpha);
+typedef void (APIENTRYP PFNGLCOLOR3XOESPROC) (GLfixed red, GLfixed green, GLfixed blue);
+typedef void (APIENTRYP PFNGLCOLOR3XVOESPROC) (const GLfixed *components);
+typedef void (APIENTRYP PFNGLCOLOR4XVOESPROC) (const GLfixed *components);
+typedef void (APIENTRYP PFNGLCONVOLUTIONPARAMETERXOESPROC) (GLenum target, GLenum pname, GLfixed param);
+typedef void (APIENTRYP PFNGLCONVOLUTIONPARAMETERXVOESPROC) (GLenum target, GLenum pname, const GLfixed *params);
+typedef void (APIENTRYP PFNGLEVALCOORD1XOESPROC) (GLfixed u);
+typedef void (APIENTRYP PFNGLEVALCOORD1XVOESPROC) (const GLfixed *coords);
+typedef void (APIENTRYP PFNGLEVALCOORD2XOESPROC) (GLfixed u, GLfixed v);
+typedef void (APIENTRYP PFNGLEVALCOORD2XVOESPROC) (const GLfixed *coords);
+typedef void (APIENTRYP PFNGLFEEDBACKBUFFERXOESPROC) (GLsizei n, GLenum type, const GLfixed *buffer);
+typedef void (APIENTRYP PFNGLGETCONVOLUTIONPARAMETERXVOESPROC) (GLenum target, GLenum pname, GLfixed *params);
+typedef void (APIENTRYP PFNGLGETHISTOGRAMPARAMETERXVOESPROC) (GLenum target, GLenum pname, GLfixed *params);
+typedef void (APIENTRYP PFNGLGETLIGHTXOESPROC) (GLenum light, GLenum pname, GLfixed *params);
+typedef void (APIENTRYP PFNGLGETMAPXVOESPROC) (GLenum target, GLenum query, GLfixed *v);
+typedef void (APIENTRYP PFNGLGETMATERIALXOESPROC) (GLenum face, GLenum pname, GLfixed param);
+typedef void (APIENTRYP PFNGLGETPIXELMAPXVPROC) (GLenum map, GLint size, GLfixed *values);
+typedef void (APIENTRYP PFNGLGETTEXGENXVOESPROC) (GLenum coord, GLenum pname, GLfixed *params);
+typedef void (APIENTRYP PFNGLGETTEXLEVELPARAMETERXVOESPROC) (GLenum target, GLint level, GLenum pname, GLfixed *params);
+typedef void (APIENTRYP PFNGLINDEXXOESPROC) (GLfixed component);
+typedef void (APIENTRYP PFNGLINDEXXVOESPROC) (const GLfixed *component);
+typedef void (APIENTRYP PFNGLLOADTRANSPOSEMATRIXXOESPROC) (const GLfixed *m);
+typedef void (APIENTRYP PFNGLMAP1XOESPROC) (GLenum target, GLfixed u1, GLfixed u2, GLint stride, GLint order, GLfixed points);
+typedef void (APIENTRYP PFNGLMAP2XOESPROC) (GLenum target, GLfixed u1, GLfixed u2, GLint ustride, GLint uorder, GLfixed v1, GLfixed v2, GLint vstride, GLint vorder, GLfixed points);
+typedef void (APIENTRYP PFNGLMAPGRID1XOESPROC) (GLint n, GLfixed u1, GLfixed u2);
+typedef void (APIENTRYP PFNGLMAPGRID2XOESPROC) (GLint n, GLfixed u1, GLfixed u2, GLfixed v1, GLfixed v2);
+typedef void (APIENTRYP PFNGLMULTTRANSPOSEMATRIXXOESPROC) (const GLfixed *m);
+typedef void (APIENTRYP PFNGLMULTITEXCOORD1XOESPROC) (GLenum texture, GLfixed s);
+typedef void (APIENTRYP PFNGLMULTITEXCOORD1XVOESPROC) (GLenum texture, const GLfixed *coords);
+typedef void (APIENTRYP PFNGLMULTITEXCOORD2XOESPROC) (GLenum texture, GLfixed s, GLfixed t);
+typedef void (APIENTRYP PFNGLMULTITEXCOORD2XVOESPROC) (GLenum texture, const GLfixed *coords);
+typedef void (APIENTRYP PFNGLMULTITEXCOORD3XOESPROC) (GLenum texture, GLfixed s, GLfixed t, GLfixed r);
+typedef void (APIENTRYP PFNGLMULTITEXCOORD3XVOESPROC) (GLenum texture, const GLfixed *coords);
+typedef void (APIENTRYP PFNGLMULTITEXCOORD4XVOESPROC) (GLenum texture, const GLfixed *coords);
+typedef void (APIENTRYP PFNGLNORMAL3XVOESPROC) (const GLfixed *coords);
+typedef void (APIENTRYP PFNGLPASSTHROUGHXOESPROC) (GLfixed token);
+typedef void (APIENTRYP PFNGLPIXELMAPXPROC) (GLenum map, GLint size, const GLfixed *values);
+typedef void (APIENTRYP PFNGLPIXELSTOREXPROC) (GLenum pname, GLfixed param);
+typedef void (APIENTRYP PFNGLPIXELTRANSFERXOESPROC) (GLenum pname, GLfixed param);
+typedef void (APIENTRYP PFNGLPIXELZOOMXOESPROC) (GLfixed xfactor, GLfixed yfactor);
+typedef void (APIENTRYP PFNGLPRIORITIZETEXTURESXOESPROC) (GLsizei n, const GLuint *textures, const GLfixed *priorities);
+typedef void (APIENTRYP PFNGLRASTERPOS2XOESPROC) (GLfixed x, GLfixed y);
+typedef void (APIENTRYP PFNGLRASTERPOS2XVOESPROC) (const GLfixed *coords);
+typedef void (APIENTRYP PFNGLRASTERPOS3XOESPROC) (GLfixed x, GLfixed y, GLfixed z);
+typedef void (APIENTRYP PFNGLRASTERPOS3XVOESPROC) (const GLfixed *coords);
+typedef void (APIENTRYP PFNGLRASTERPOS4XOESPROC) (GLfixed x, GLfixed y, GLfixed z, GLfixed w);
+typedef void (APIENTRYP PFNGLRASTERPOS4XVOESPROC) (const GLfixed *coords);
+typedef void (APIENTRYP PFNGLRECTXOESPROC) (GLfixed x1, GLfixed y1, GLfixed x2, GLfixed y2);
+typedef void (APIENTRYP PFNGLRECTXVOESPROC) (const GLfixed *v1, const GLfixed *v2);
+typedef void (APIENTRYP PFNGLTEXCOORD1XOESPROC) (GLfixed s);
+typedef void (APIENTRYP PFNGLTEXCOORD1XVOESPROC) (const GLfixed *coords);
+typedef void (APIENTRYP PFNGLTEXCOORD2XOESPROC) (GLfixed s, GLfixed t);
+typedef void (APIENTRYP PFNGLTEXCOORD2XVOESPROC) (const GLfixed *coords);
+typedef void (APIENTRYP PFNGLTEXCOORD3XOESPROC) (GLfixed s, GLfixed t, GLfixed r);
+typedef void (APIENTRYP PFNGLTEXCOORD3XVOESPROC) (const GLfixed *coords);
+typedef void (APIENTRYP PFNGLTEXCOORD4XOESPROC) (GLfixed s, GLfixed t, GLfixed r, GLfixed q);
+typedef void (APIENTRYP PFNGLTEXCOORD4XVOESPROC) (const GLfixed *coords);
+typedef void (APIENTRYP PFNGLTEXGENXOESPROC) (GLenum coord, GLenum pname, GLfixed param);
+typedef void (APIENTRYP PFNGLTEXGENXVOESPROC) (GLenum coord, GLenum pname, const GLfixed *params);
+typedef void (APIENTRYP PFNGLVERTEX2XOESPROC) (GLfixed x);
+typedef void (APIENTRYP PFNGLVERTEX2XVOESPROC) (const GLfixed *coords);
+typedef void (APIENTRYP PFNGLVERTEX3XOESPROC) (GLfixed x, GLfixed y);
+typedef void (APIENTRYP PFNGLVERTEX3XVOESPROC) (const GLfixed *coords);
+typedef void (APIENTRYP PFNGLVERTEX4XOESPROC) (GLfixed x, GLfixed y, GLfixed z);
+typedef void (APIENTRYP PFNGLVERTEX4XVOESPROC) (const GLfixed *coords);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glAlphaFuncxOES (GLenum func, GLfixed ref);
+GLAPI void APIENTRY glClearColorxOES (GLfixed red, GLfixed green, GLfixed blue, GLfixed alpha);
+GLAPI void APIENTRY glClearDepthxOES (GLfixed depth);
+GLAPI void APIENTRY glClipPlanexOES (GLenum plane, const GLfixed *equation);
+GLAPI void APIENTRY glColor4xOES (GLfixed red, GLfixed green, GLfixed blue, GLfixed alpha);
+GLAPI void APIENTRY glDepthRangexOES (GLfixed n, GLfixed f);
+GLAPI void APIENTRY glFogxOES (GLenum pname, GLfixed param);
+GLAPI void APIENTRY glFogxvOES (GLenum pname, const GLfixed *param);
+GLAPI void APIENTRY glFrustumxOES (GLfixed l, GLfixed r, GLfixed b, GLfixed t, GLfixed n, GLfixed f);
+GLAPI void APIENTRY glGetClipPlanexOES (GLenum plane, GLfixed *equation);
+GLAPI void APIENTRY glGetFixedvOES (GLenum pname, GLfixed *params);
+GLAPI void APIENTRY glGetTexEnvxvOES (GLenum target, GLenum pname, GLfixed *params);
+GLAPI void APIENTRY glGetTexParameterxvOES (GLenum target, GLenum pname, GLfixed *params);
+GLAPI void APIENTRY glLightModelxOES (GLenum pname, GLfixed param);
+GLAPI void APIENTRY glLightModelxvOES (GLenum pname, const GLfixed *param);
+GLAPI void APIENTRY glLightxOES (GLenum light, GLenum pname, GLfixed param);
+GLAPI void APIENTRY glLightxvOES (GLenum light, GLenum pname, const GLfixed *params);
+GLAPI void APIENTRY glLineWidthxOES (GLfixed width);
+GLAPI void APIENTRY glLoadMatrixxOES (const GLfixed *m);
+GLAPI void APIENTRY glMaterialxOES (GLenum face, GLenum pname, GLfixed param);
+GLAPI void APIENTRY glMaterialxvOES (GLenum face, GLenum pname, const GLfixed *param);
+GLAPI void APIENTRY glMultMatrixxOES (const GLfixed *m);
+GLAPI void APIENTRY glMultiTexCoord4xOES (GLenum texture, GLfixed s, GLfixed t, GLfixed r, GLfixed q);
+GLAPI void APIENTRY glNormal3xOES (GLfixed nx, GLfixed ny, GLfixed nz);
+GLAPI void APIENTRY glOrthoxOES (GLfixed l, GLfixed r, GLfixed b, GLfixed t, GLfixed n, GLfixed f);
+GLAPI void APIENTRY glPointParameterxvOES (GLenum pname, const GLfixed *params);
+GLAPI void APIENTRY glPointSizexOES (GLfixed size);
+GLAPI void APIENTRY glPolygonOffsetxOES (GLfixed factor, GLfixed units);
+GLAPI void APIENTRY glRotatexOES (GLfixed angle, GLfixed x, GLfixed y, GLfixed z);
+GLAPI void APIENTRY glScalexOES (GLfixed x, GLfixed y, GLfixed z);
+GLAPI void APIENTRY glTexEnvxOES (GLenum target, GLenum pname, GLfixed param);
+GLAPI void APIENTRY glTexEnvxvOES (GLenum target, GLenum pname, const GLfixed *params);
+GLAPI void APIENTRY glTexParameterxOES (GLenum target, GLenum pname, GLfixed param);
+GLAPI void APIENTRY glTexParameterxvOES (GLenum target, GLenum pname, const GLfixed *params);
+GLAPI void APIENTRY glTranslatexOES (GLfixed x, GLfixed y, GLfixed z);
+GLAPI void APIENTRY glAccumxOES (GLenum op, GLfixed value);
+GLAPI void APIENTRY glBitmapxOES (GLsizei width, GLsizei height, GLfixed xorig, GLfixed yorig, GLfixed xmove, GLfixed ymove, const GLubyte *bitmap);
+GLAPI void APIENTRY glBlendColorxOES (GLfixed red, GLfixed green, GLfixed blue, GLfixed alpha);
+GLAPI void APIENTRY glClearAccumxOES (GLfixed red, GLfixed green, GLfixed blue, GLfixed alpha);
+GLAPI void APIENTRY glColor3xOES (GLfixed red, GLfixed green, GLfixed blue);
+GLAPI void APIENTRY glColor3xvOES (const GLfixed *components);
+GLAPI void APIENTRY glColor4xvOES (const GLfixed *components);
+GLAPI void APIENTRY glConvolutionParameterxOES (GLenum target, GLenum pname, GLfixed param);
+GLAPI void APIENTRY glConvolutionParameterxvOES (GLenum target, GLenum pname, const GLfixed *params);
+GLAPI void APIENTRY glEvalCoord1xOES (GLfixed u);
+GLAPI void APIENTRY glEvalCoord1xvOES (const GLfixed *coords);
+GLAPI void APIENTRY glEvalCoord2xOES (GLfixed u, GLfixed v);
+GLAPI void APIENTRY glEvalCoord2xvOES (const GLfixed *coords);
+GLAPI void APIENTRY glFeedbackBufferxOES (GLsizei n, GLenum type, const GLfixed *buffer);
+GLAPI void APIENTRY glGetConvolutionParameterxvOES (GLenum target, GLenum pname, GLfixed *params);
+GLAPI void APIENTRY glGetHistogramParameterxvOES (GLenum target, GLenum pname, GLfixed *params);
+GLAPI void APIENTRY glGetLightxOES (GLenum light, GLenum pname, GLfixed *params);
+GLAPI void APIENTRY glGetMapxvOES (GLenum target, GLenum query, GLfixed *v);
+GLAPI void APIENTRY glGetMaterialxOES (GLenum face, GLenum pname, GLfixed param);
+GLAPI void APIENTRY glGetPixelMapxv (GLenum map, GLint size, GLfixed *values);
+GLAPI void APIENTRY glGetTexGenxvOES (GLenum coord, GLenum pname, GLfixed *params);
+GLAPI void APIENTRY glGetTexLevelParameterxvOES (GLenum target, GLint level, GLenum pname, GLfixed *params);
+GLAPI void APIENTRY glIndexxOES (GLfixed component);
+GLAPI void APIENTRY glIndexxvOES (const GLfixed *component);
+GLAPI void APIENTRY glLoadTransposeMatrixxOES (const GLfixed *m);
+GLAPI void APIENTRY glMap1xOES (GLenum target, GLfixed u1, GLfixed u2, GLint stride, GLint order, GLfixed points);
+GLAPI void APIENTRY glMap2xOES (GLenum target, GLfixed u1, GLfixed u2, GLint ustride, GLint uorder, GLfixed v1, GLfixed v2, GLint vstride, GLint vorder, GLfixed points);
+GLAPI void APIENTRY glMapGrid1xOES (GLint n, GLfixed u1, GLfixed u2);
+GLAPI void APIENTRY glMapGrid2xOES (GLint n, GLfixed u1, GLfixed u2, GLfixed v1, GLfixed v2);
+GLAPI void APIENTRY glMultTransposeMatrixxOES (const GLfixed *m);
+GLAPI void APIENTRY glMultiTexCoord1xOES (GLenum texture, GLfixed s);
+GLAPI void APIENTRY glMultiTexCoord1xvOES (GLenum texture, const GLfixed *coords);
+GLAPI void APIENTRY glMultiTexCoord2xOES (GLenum texture, GLfixed s, GLfixed t);
+GLAPI void APIENTRY glMultiTexCoord2xvOES (GLenum texture, const GLfixed *coords);
+GLAPI void APIENTRY glMultiTexCoord3xOES (GLenum texture, GLfixed s, GLfixed t, GLfixed r);
+GLAPI void APIENTRY glMultiTexCoord3xvOES (GLenum texture, const GLfixed *coords);
+GLAPI void APIENTRY glMultiTexCoord4xvOES (GLenum texture, const GLfixed *coords);
+GLAPI void APIENTRY glNormal3xvOES (const GLfixed *coords);
+GLAPI void APIENTRY glPassThroughxOES (GLfixed token);
+GLAPI void APIENTRY glPixelMapx (GLenum map, GLint size, const GLfixed *values);
+GLAPI void APIENTRY glPixelStorex (GLenum pname, GLfixed param);
+GLAPI void APIENTRY glPixelTransferxOES (GLenum pname, GLfixed param);
+GLAPI void APIENTRY glPixelZoomxOES (GLfixed xfactor, GLfixed yfactor);
+GLAPI void APIENTRY glPrioritizeTexturesxOES (GLsizei n, const GLuint *textures, const GLfixed *priorities);
+GLAPI void APIENTRY glRasterPos2xOES (GLfixed x, GLfixed y);
+GLAPI void APIENTRY glRasterPos2xvOES (const GLfixed *coords);
+GLAPI void APIENTRY glRasterPos3xOES (GLfixed x, GLfixed y, GLfixed z);
+GLAPI void APIENTRY glRasterPos3xvOES (const GLfixed *coords);
+GLAPI void APIENTRY glRasterPos4xOES (GLfixed x, GLfixed y, GLfixed z, GLfixed w);
+GLAPI void APIENTRY glRasterPos4xvOES (const GLfixed *coords);
+GLAPI void APIENTRY glRectxOES (GLfixed x1, GLfixed y1, GLfixed x2, GLfixed y2);
+GLAPI void APIENTRY glRectxvOES (const GLfixed *v1, const GLfixed *v2);
+GLAPI void APIENTRY glTexCoord1xOES (GLfixed s);
+GLAPI void APIENTRY glTexCoord1xvOES (const GLfixed *coords);
+GLAPI void APIENTRY glTexCoord2xOES (GLfixed s, GLfixed t);
+GLAPI void APIENTRY glTexCoord2xvOES (const GLfixed *coords);
+GLAPI void APIENTRY glTexCoord3xOES (GLfixed s, GLfixed t, GLfixed r);
+GLAPI void APIENTRY glTexCoord3xvOES (const GLfixed *coords);
+GLAPI void APIENTRY glTexCoord4xOES (GLfixed s, GLfixed t, GLfixed r, GLfixed q);
+GLAPI void APIENTRY glTexCoord4xvOES (const GLfixed *coords);
+GLAPI void APIENTRY glTexGenxOES (GLenum coord, GLenum pname, GLfixed param);
+GLAPI void APIENTRY glTexGenxvOES (GLenum coord, GLenum pname, const GLfixed *params);
+GLAPI void APIENTRY glVertex2xOES (GLfixed x);
+GLAPI void APIENTRY glVertex2xvOES (const GLfixed *coords);
+GLAPI void APIENTRY glVertex3xOES (GLfixed x, GLfixed y);
+GLAPI void APIENTRY glVertex3xvOES (const GLfixed *coords);
+GLAPI void APIENTRY glVertex4xOES (GLfixed x, GLfixed y, GLfixed z);
+GLAPI void APIENTRY glVertex4xvOES (const GLfixed *coords);
+#endif
+#endif /* GL_OES_fixed_point */
+
+#ifndef GL_OES_query_matrix
+#define GL_OES_query_matrix 1
+typedef GLbitfield (APIENTRYP PFNGLQUERYMATRIXXOESPROC) (GLfixed *mantissa, GLint *exponent);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI GLbitfield APIENTRY glQueryMatrixxOES (GLfixed *mantissa, GLint *exponent);
+#endif
+#endif /* GL_OES_query_matrix */
+
+#ifndef GL_OES_read_format
+#define GL_OES_read_format 1
+#define GL_IMPLEMENTATION_COLOR_READ_TYPE_OES 0x8B9A
+#define GL_IMPLEMENTATION_COLOR_READ_FORMAT_OES 0x8B9B
+#endif /* GL_OES_read_format */
+
+#ifndef GL_OES_single_precision
+#define GL_OES_single_precision 1
+typedef void (APIENTRYP PFNGLCLEARDEPTHFOESPROC) (GLclampf depth);
+typedef void (APIENTRYP PFNGLCLIPPLANEFOESPROC) (GLenum plane, const GLfloat *equation);
+typedef void (APIENTRYP PFNGLDEPTHRANGEFOESPROC) (GLclampf n, GLclampf f);
+typedef void (APIENTRYP PFNGLFRUSTUMFOESPROC) (GLfloat l, GLfloat r, GLfloat b, GLfloat t, GLfloat n, GLfloat f);
+typedef void (APIENTRYP PFNGLGETCLIPPLANEFOESPROC) (GLenum plane, GLfloat *equation);
+typedef void (APIENTRYP PFNGLORTHOFOESPROC) (GLfloat l, GLfloat r, GLfloat b, GLfloat t, GLfloat n, GLfloat f);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glClearDepthfOES (GLclampf depth);
+GLAPI void APIENTRY glClipPlanefOES (GLenum plane, const GLfloat *equation);
+GLAPI void APIENTRY glDepthRangefOES (GLclampf n, GLclampf f);
+GLAPI void APIENTRY glFrustumfOES (GLfloat l, GLfloat r, GLfloat b, GLfloat t, GLfloat n, GLfloat f);
+GLAPI void APIENTRY glGetClipPlanefOES (GLenum plane, GLfloat *equation);
+GLAPI void APIENTRY glOrthofOES (GLfloat l, GLfloat r, GLfloat b, GLfloat t, GLfloat n, GLfloat f);
+#endif
+#endif /* GL_OES_single_precision */
+
+#ifndef GL_3DFX_multisample
+#define GL_3DFX_multisample 1
+#define GL_MULTISAMPLE_3DFX 0x86B2
+#define GL_SAMPLE_BUFFERS_3DFX 0x86B3
+#define GL_SAMPLES_3DFX 0x86B4
+#define GL_MULTISAMPLE_BIT_3DFX 0x20000000
+#endif /* GL_3DFX_multisample */
+
+#ifndef GL_3DFX_tbuffer
+#define GL_3DFX_tbuffer 1
+typedef void (APIENTRYP PFNGLTBUFFERMASK3DFXPROC) (GLuint mask);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glTbufferMask3DFX (GLuint mask);
+#endif
+#endif /* GL_3DFX_tbuffer */
+
+#ifndef GL_3DFX_texture_compression_FXT1
+#define GL_3DFX_texture_compression_FXT1 1
+#define GL_COMPRESSED_RGB_FXT1_3DFX 0x86B0
+#define GL_COMPRESSED_RGBA_FXT1_3DFX 0x86B1
+#endif /* GL_3DFX_texture_compression_FXT1 */
+
+#ifndef GL_AMD_blend_minmax_factor
+#define GL_AMD_blend_minmax_factor 1
+#define GL_FACTOR_MIN_AMD 0x901C
+#define GL_FACTOR_MAX_AMD 0x901D
+#endif /* GL_AMD_blend_minmax_factor */
+
+#ifndef GL_AMD_conservative_depth
+#define GL_AMD_conservative_depth 1
+#endif /* GL_AMD_conservative_depth */
+
+#ifndef GL_AMD_debug_output
+#define GL_AMD_debug_output 1
+typedef void (APIENTRY *GLDEBUGPROCAMD)(GLuint id,GLenum category,GLenum severity,GLsizei length,const GLchar *message,void *userParam);
+#define GL_MAX_DEBUG_MESSAGE_LENGTH_AMD 0x9143
+#define GL_MAX_DEBUG_LOGGED_MESSAGES_AMD 0x9144
+#define GL_DEBUG_LOGGED_MESSAGES_AMD 0x9145
+#define GL_DEBUG_SEVERITY_HIGH_AMD 0x9146
+#define GL_DEBUG_SEVERITY_MEDIUM_AMD 0x9147
+#define GL_DEBUG_SEVERITY_LOW_AMD 0x9148
+#define GL_DEBUG_CATEGORY_API_ERROR_AMD 0x9149
+#define GL_DEBUG_CATEGORY_WINDOW_SYSTEM_AMD 0x914A
+#define GL_DEBUG_CATEGORY_DEPRECATION_AMD 0x914B
+#define GL_DEBUG_CATEGORY_UNDEFINED_BEHAVIOR_AMD 0x914C
+#define GL_DEBUG_CATEGORY_PERFORMANCE_AMD 0x914D
+#define GL_DEBUG_CATEGORY_SHADER_COMPILER_AMD 0x914E
+#define GL_DEBUG_CATEGORY_APPLICATION_AMD 0x914F
+#define GL_DEBUG_CATEGORY_OTHER_AMD 0x9150
+typedef void (APIENTRYP PFNGLDEBUGMESSAGEENABLEAMDPROC) (GLenum category, GLenum severity, GLsizei count, const GLuint *ids, GLboolean enabled);
+typedef void (APIENTRYP PFNGLDEBUGMESSAGEINSERTAMDPROC) (GLenum category, GLenum severity, GLuint id, GLsizei length, const GLchar *buf);
+typedef void (APIENTRYP PFNGLDEBUGMESSAGECALLBACKAMDPROC) (GLDEBUGPROCAMD callback, void *userParam);
+typedef GLuint (APIENTRYP PFNGLGETDEBUGMESSAGELOGAMDPROC) (GLuint count, GLsizei bufsize, GLenum *categories, GLuint *severities, GLuint *ids, GLsizei *lengths, GLchar *message);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glDebugMessageEnableAMD (GLenum category, GLenum severity, GLsizei count, const GLuint *ids, GLboolean enabled);
+GLAPI void APIENTRY glDebugMessageInsertAMD (GLenum category, GLenum severity, GLuint id, GLsizei length, const GLchar *buf);
+GLAPI void APIENTRY glDebugMessageCallbackAMD (GLDEBUGPROCAMD callback, void *userParam);
+GLAPI GLuint APIENTRY glGetDebugMessageLogAMD (GLuint count, GLsizei bufsize, GLenum *categories, GLuint *severities, GLuint *ids, GLsizei *lengths, GLchar *message);
+#endif
+#endif /* GL_AMD_debug_output */
+
+#ifndef GL_AMD_depth_clamp_separate
+#define GL_AMD_depth_clamp_separate 1
+#define GL_DEPTH_CLAMP_NEAR_AMD 0x901E
+#define GL_DEPTH_CLAMP_FAR_AMD 0x901F
+#endif /* GL_AMD_depth_clamp_separate */
+
+#ifndef GL_AMD_draw_buffers_blend
+#define GL_AMD_draw_buffers_blend 1
+typedef void (APIENTRYP PFNGLBLENDFUNCINDEXEDAMDPROC) (GLuint buf, GLenum src, GLenum dst);
+typedef void (APIENTRYP PFNGLBLENDFUNCSEPARATEINDEXEDAMDPROC) (GLuint buf, GLenum srcRGB, GLenum dstRGB, GLenum srcAlpha, GLenum dstAlpha);
+typedef void (APIENTRYP PFNGLBLENDEQUATIONINDEXEDAMDPROC) (GLuint buf, GLenum mode);
+typedef void (APIENTRYP PFNGLBLENDEQUATIONSEPARATEINDEXEDAMDPROC) (GLuint buf, GLenum modeRGB, GLenum modeAlpha);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glBlendFuncIndexedAMD (GLuint buf, GLenum src, GLenum dst);
+GLAPI void APIENTRY glBlendFuncSeparateIndexedAMD (GLuint buf, GLenum srcRGB, GLenum dstRGB, GLenum srcAlpha, GLenum dstAlpha);
+GLAPI void APIENTRY glBlendEquationIndexedAMD (GLuint buf, GLenum mode);
+GLAPI void APIENTRY glBlendEquationSeparateIndexedAMD (GLuint buf, GLenum modeRGB, GLenum modeAlpha);
+#endif
+#endif /* GL_AMD_draw_buffers_blend */
+
+#ifndef GL_AMD_framebuffer_multisample_advanced
+#define GL_AMD_framebuffer_multisample_advanced 1
+#define GL_RENDERBUFFER_STORAGE_SAMPLES_AMD 0x91B2
+#define GL_MAX_COLOR_FRAMEBUFFER_SAMPLES_AMD 0x91B3
+#define GL_MAX_COLOR_FRAMEBUFFER_STORAGE_SAMPLES_AMD 0x91B4
+#define GL_MAX_DEPTH_STENCIL_FRAMEBUFFER_SAMPLES_AMD 0x91B5
+#define GL_NUM_SUPPORTED_MULTISAMPLE_MODES_AMD 0x91B6
+#define GL_SUPPORTED_MULTISAMPLE_MODES_AMD 0x91B7
+typedef void (APIENTRYP PFNGLRENDERBUFFERSTORAGEMULTISAMPLEADVANCEDAMDPROC) (GLenum target, GLsizei samples, GLsizei storageSamples, GLenum internalformat, GLsizei width, GLsizei height);
+typedef void (APIENTRYP PFNGLNAMEDRENDERBUFFERSTORAGEMULTISAMPLEADVANCEDAMDPROC) (GLuint renderbuffer, GLsizei samples, GLsizei storageSamples, GLenum internalformat, GLsizei width, GLsizei height);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glRenderbufferStorageMultisampleAdvancedAMD (GLenum target, GLsizei samples, GLsizei storageSamples, GLenum internalformat, GLsizei width, GLsizei height);
+GLAPI void APIENTRY glNamedRenderbufferStorageMultisampleAdvancedAMD (GLuint renderbuffer, GLsizei samples, GLsizei storageSamples, GLenum internalformat, GLsizei width, GLsizei height);
+#endif
+#endif /* GL_AMD_framebuffer_multisample_advanced */
+
+#ifndef GL_AMD_framebuffer_sample_positions
+#define GL_AMD_framebuffer_sample_positions 1
+#define GL_SUBSAMPLE_DISTANCE_AMD 0x883F
+#define GL_PIXELS_PER_SAMPLE_PATTERN_X_AMD 0x91AE
+#define GL_PIXELS_PER_SAMPLE_PATTERN_Y_AMD 0x91AF
+#define GL_ALL_PIXELS_AMD 0xFFFFFFFF
+typedef void (APIENTRYP PFNGLFRAMEBUFFERSAMPLEPOSITIONSFVAMDPROC) (GLenum target, GLuint numsamples, GLuint pixelindex, const GLfloat *values);
+typedef void (APIENTRYP PFNGLNAMEDFRAMEBUFFERSAMPLEPOSITIONSFVAMDPROC) (GLuint framebuffer, GLuint numsamples, GLuint pixelindex, const GLfloat *values);
+typedef void (APIENTRYP PFNGLGETFRAMEBUFFERPARAMETERFVAMDPROC) (GLenum target, GLenum pname, GLuint numsamples, GLuint pixelindex, GLsizei size, GLfloat *values);
+typedef void (APIENTRYP PFNGLGETNAMEDFRAMEBUFFERPARAMETERFVAMDPROC) (GLuint framebuffer, GLenum pname, GLuint numsamples, GLuint pixelindex, GLsizei size, GLfloat *values);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glFramebufferSamplePositionsfvAMD (GLenum target, GLuint numsamples, GLuint pixelindex, const GLfloat *values);
+GLAPI void APIENTRY glNamedFramebufferSamplePositionsfvAMD (GLuint framebuffer, GLuint numsamples, GLuint pixelindex, const GLfloat *values);
+GLAPI void APIENTRY glGetFramebufferParameterfvAMD (GLenum target, GLenum pname, GLuint numsamples, GLuint pixelindex, GLsizei size, GLfloat *values);
+GLAPI void APIENTRY glGetNamedFramebufferParameterfvAMD (GLuint framebuffer, GLenum pname, GLuint numsamples, GLuint pixelindex, GLsizei size, GLfloat *values);
+#endif
+#endif /* GL_AMD_framebuffer_sample_positions */
+
+#ifndef GL_AMD_gcn_shader
+#define GL_AMD_gcn_shader 1
+#endif /* GL_AMD_gcn_shader */
+
+#ifndef GL_AMD_gpu_shader_half_float
+#define GL_AMD_gpu_shader_half_float 1
+#define GL_FLOAT16_NV 0x8FF8
+#define GL_FLOAT16_VEC2_NV 0x8FF9
+#define GL_FLOAT16_VEC3_NV 0x8FFA
+#define GL_FLOAT16_VEC4_NV 0x8FFB
+#define GL_FLOAT16_MAT2_AMD 0x91C5
+#define GL_FLOAT16_MAT3_AMD 0x91C6
+#define GL_FLOAT16_MAT4_AMD 0x91C7
+#define GL_FLOAT16_MAT2x3_AMD 0x91C8
+#define GL_FLOAT16_MAT2x4_AMD 0x91C9
+#define GL_FLOAT16_MAT3x2_AMD 0x91CA
+#define GL_FLOAT16_MAT3x4_AMD 0x91CB
+#define GL_FLOAT16_MAT4x2_AMD 0x91CC
+#define GL_FLOAT16_MAT4x3_AMD 0x91CD
+#endif /* GL_AMD_gpu_shader_half_float */
+
+#ifndef GL_AMD_gpu_shader_int16
+#define GL_AMD_gpu_shader_int16 1
+#endif /* GL_AMD_gpu_shader_int16 */
+
+#ifndef GL_AMD_gpu_shader_int64
+#define GL_AMD_gpu_shader_int64 1
+typedef khronos_int64_t GLint64EXT;
+#define GL_INT64_NV 0x140E
+#define GL_UNSIGNED_INT64_NV 0x140F
+#define GL_INT8_NV 0x8FE0
+#define GL_INT8_VEC2_NV 0x8FE1
+#define GL_INT8_VEC3_NV 0x8FE2
+#define GL_INT8_VEC4_NV 0x8FE3
+#define GL_INT16_NV 0x8FE4
+#define GL_INT16_VEC2_NV 0x8FE5
+#define GL_INT16_VEC3_NV 0x8FE6
+#define GL_INT16_VEC4_NV 0x8FE7
+#define GL_INT64_VEC2_NV 0x8FE9
+#define GL_INT64_VEC3_NV 0x8FEA
+#define GL_INT64_VEC4_NV 0x8FEB
+#define GL_UNSIGNED_INT8_NV 0x8FEC
+#define GL_UNSIGNED_INT8_VEC2_NV 0x8FED
+#define GL_UNSIGNED_INT8_VEC3_NV 0x8FEE
+#define GL_UNSIGNED_INT8_VEC4_NV 0x8FEF
+#define GL_UNSIGNED_INT16_NV 0x8FF0
+#define GL_UNSIGNED_INT16_VEC2_NV 0x8FF1
+#define GL_UNSIGNED_INT16_VEC3_NV 0x8FF2
+#define GL_UNSIGNED_INT16_VEC4_NV 0x8FF3
+#define GL_UNSIGNED_INT64_VEC2_NV 0x8FF5
+#define GL_UNSIGNED_INT64_VEC3_NV 0x8FF6
+#define GL_UNSIGNED_INT64_VEC4_NV 0x8FF7
+typedef void (APIENTRYP PFNGLUNIFORM1I64NVPROC) (GLint location, GLint64EXT x);
+typedef void (APIENTRYP PFNGLUNIFORM2I64NVPROC) (GLint location, GLint64EXT x, GLint64EXT y);
+typedef void (APIENTRYP PFNGLUNIFORM3I64NVPROC) (GLint location, GLint64EXT x, GLint64EXT y, GLint64EXT z);
+typedef void (APIENTRYP PFNGLUNIFORM4I64NVPROC) (GLint location, GLint64EXT x, GLint64EXT y, GLint64EXT z, GLint64EXT w);
+typedef void (APIENTRYP PFNGLUNIFORM1I64VNVPROC) (GLint location, GLsizei count, const GLint64EXT *value);
+typedef void (APIENTRYP PFNGLUNIFORM2I64VNVPROC) (GLint location, GLsizei count, const GLint64EXT *value);
+typedef void (APIENTRYP PFNGLUNIFORM3I64VNVPROC) (GLint location, GLsizei count, const GLint64EXT *value);
+typedef void (APIENTRYP PFNGLUNIFORM4I64VNVPROC) (GLint location, GLsizei count, const GLint64EXT *value);
+typedef void (APIENTRYP PFNGLUNIFORM1UI64NVPROC) (GLint location, GLuint64EXT x);
+typedef void (APIENTRYP PFNGLUNIFORM2UI64NVPROC) (GLint location, GLuint64EXT x, GLuint64EXT y);
+typedef void (APIENTRYP PFNGLUNIFORM3UI64NVPROC) (GLint location, GLuint64EXT x, GLuint64EXT y, GLuint64EXT z);
+typedef void (APIENTRYP PFNGLUNIFORM4UI64NVPROC) (GLint location, GLuint64EXT x, GLuint64EXT y, GLuint64EXT z, GLuint64EXT w);
+typedef void (APIENTRYP PFNGLUNIFORM1UI64VNVPROC) (GLint location, GLsizei count, const GLuint64EXT *value);
+typedef void (APIENTRYP PFNGLUNIFORM2UI64VNVPROC) (GLint location, GLsizei count, const GLuint64EXT *value);
+typedef void (APIENTRYP PFNGLUNIFORM3UI64VNVPROC) (GLint location, GLsizei count, const GLuint64EXT *value);
+typedef void (APIENTRYP PFNGLUNIFORM4UI64VNVPROC) (GLint location, GLsizei count, const GLuint64EXT *value);
+typedef void (APIENTRYP PFNGLGETUNIFORMI64VNVPROC) (GLuint program, GLint location, GLint64EXT *params);
+typedef void (APIENTRYP PFNGLGETUNIFORMUI64VNVPROC) (GLuint program, GLint location, GLuint64EXT *params);
+typedef void (APIENTRYP PFNGLPROGRAMUNIFORM1I64NVPROC) (GLuint program, GLint location, GLint64EXT x);
+typedef void (APIENTRYP PFNGLPROGRAMUNIFORM2I64NVPROC) (GLuint program, GLint location, GLint64EXT x, GLint64EXT y);
+typedef void (APIENTRYP PFNGLPROGRAMUNIFORM3I64NVPROC) (GLuint program, GLint location, GLint64EXT x, GLint64EXT y, GLint64EXT z);
+typedef void (APIENTRYP PFNGLPROGRAMUNIFORM4I64NVPROC) (GLuint program, GLint location, GLint64EXT x, GLint64EXT y, GLint64EXT z, GLint64EXT w);
+typedef void (APIENTRYP PFNGLPROGRAMUNIFORM1I64VNVPROC) (GLuint program, GLint location, GLsizei count, const GLint64EXT *value);
+typedef void (APIENTRYP PFNGLPROGRAMUNIFORM2I64VNVPROC) (GLuint program, GLint location, GLsizei count, const GLint64EXT *value);
+typedef void (APIENTRYP PFNGLPROGRAMUNIFORM3I64VNVPROC) (GLuint program, GLint location, GLsizei count, const GLint64EXT *value);
+typedef void (APIENTRYP PFNGLPROGRAMUNIFORM4I64VNVPROC) (GLuint program, GLint location, GLsizei count, const GLint64EXT *value);
+typedef void (APIENTRYP PFNGLPROGRAMUNIFORM1UI64NVPROC) (GLuint program, GLint location, GLuint64EXT x);
+typedef void (APIENTRYP PFNGLPROGRAMUNIFORM2UI64NVPROC) (GLuint program, GLint location, GLuint64EXT x, GLuint64EXT y);
+typedef void (APIENTRYP PFNGLPROGRAMUNIFORM3UI64NVPROC) (GLuint program, GLint location, GLuint64EXT x, GLuint64EXT y, GLuint64EXT z);
+typedef void (APIENTRYP PFNGLPROGRAMUNIFORM4UI64NVPROC) (GLuint program, GLint location, GLuint64EXT x, GLuint64EXT y, GLuint64EXT z, GLuint64EXT w);
+typedef void (APIENTRYP PFNGLPROGRAMUNIFORM1UI64VNVPROC) (GLuint program, GLint location, GLsizei count, const GLuint64EXT *value);
+typedef void (APIENTRYP PFNGLPROGRAMUNIFORM2UI64VNVPROC) (GLuint program, GLint location, GLsizei count, const GLuint64EXT *value);
+typedef void (APIENTRYP PFNGLPROGRAMUNIFORM3UI64VNVPROC) (GLuint program, GLint location, GLsizei count, const GLuint64EXT *value);
+typedef void (APIENTRYP PFNGLPROGRAMUNIFORM4UI64VNVPROC) (GLuint program, GLint location, GLsizei count, const GLuint64EXT *value);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glUniform1i64NV (GLint location, GLint64EXT x);
+GLAPI void APIENTRY glUniform2i64NV (GLint location, GLint64EXT x, GLint64EXT y);
+GLAPI void APIENTRY glUniform3i64NV (GLint location, GLint64EXT x, GLint64EXT y, GLint64EXT z);
+GLAPI void APIENTRY glUniform4i64NV (GLint location, GLint64EXT x, GLint64EXT y, GLint64EXT z, GLint64EXT w);
+GLAPI void APIENTRY glUniform1i64vNV (GLint location, GLsizei count, const GLint64EXT *value);
+GLAPI void APIENTRY glUniform2i64vNV (GLint location, GLsizei count, const GLint64EXT *value);
+GLAPI void APIENTRY glUniform3i64vNV (GLint location, GLsizei count, const GLint64EXT *value);
+GLAPI void APIENTRY glUniform4i64vNV (GLint location, GLsizei count, const GLint64EXT *value);
+GLAPI void APIENTRY glUniform1ui64NV (GLint location, GLuint64EXT x);
+GLAPI void APIENTRY glUniform2ui64NV (GLint location, GLuint64EXT x, GLuint64EXT y);
+GLAPI void APIENTRY glUniform3ui64NV (GLint location, GLuint64EXT x, GLuint64EXT y, GLuint64EXT z);
+GLAPI void APIENTRY glUniform4ui64NV (GLint location, GLuint64EXT x, GLuint64EXT y, GLuint64EXT z, GLuint64EXT w);
+GLAPI void APIENTRY glUniform1ui64vNV (GLint location, GLsizei count, const GLuint64EXT *value);
+GLAPI void APIENTRY glUniform2ui64vNV (GLint location, GLsizei count, const GLuint64EXT *value);
+GLAPI void APIENTRY glUniform3ui64vNV (GLint location, GLsizei count, const GLuint64EXT *value);
+GLAPI void APIENTRY glUniform4ui64vNV (GLint location, GLsizei count, const GLuint64EXT *value);
+GLAPI void APIENTRY glGetUniformi64vNV (GLuint program, GLint location, GLint64EXT *params);
+GLAPI void APIENTRY glGetUniformui64vNV (GLuint program, GLint location, GLuint64EXT *params);
+GLAPI void APIENTRY glProgramUniform1i64NV (GLuint program, GLint location, GLint64EXT x);
+GLAPI void APIENTRY glProgramUniform2i64NV (GLuint program, GLint location, GLint64EXT x, GLint64EXT y);
+GLAPI void APIENTRY glProgramUniform3i64NV (GLuint program, GLint location, GLint64EXT x, GLint64EXT y, GLint64EXT z);
+GLAPI void APIENTRY glProgramUniform4i64NV (GLuint program, GLint location, GLint64EXT x, GLint64EXT y, GLint64EXT z, GLint64EXT w);
+GLAPI void APIENTRY glProgramUniform1i64vNV (GLuint program, GLint location, GLsizei count, const GLint64EXT *value);
+GLAPI void APIENTRY glProgramUniform2i64vNV (GLuint program, GLint location, GLsizei count, const GLint64EXT *value);
+GLAPI void APIENTRY glProgramUniform3i64vNV (GLuint program, GLint location, GLsizei count, const GLint64EXT *value);
+GLAPI void APIENTRY glProgramUniform4i64vNV (GLuint program, GLint location, GLsizei count, const GLint64EXT *value);
+GLAPI void APIENTRY glProgramUniform1ui64NV (GLuint program, GLint location, GLuint64EXT x);
+GLAPI void APIENTRY glProgramUniform2ui64NV (GLuint program, GLint location, GLuint64EXT x, GLuint64EXT y);
+GLAPI void APIENTRY glProgramUniform3ui64NV (GLuint program, GLint location, GLuint64EXT x, GLuint64EXT y, GLuint64EXT z);
+GLAPI void APIENTRY glProgramUniform4ui64NV (GLuint program, GLint location, GLuint64EXT x, GLuint64EXT y, GLuint64EXT z, GLuint64EXT w);
+GLAPI void APIENTRY glProgramUniform1ui64vNV (GLuint program, GLint location, GLsizei count, const GLuint64EXT *value);
+GLAPI void APIENTRY glProgramUniform2ui64vNV (GLuint program, GLint location, GLsizei count, const GLuint64EXT *value);
+GLAPI void APIENTRY glProgramUniform3ui64vNV (GLuint program, GLint location, GLsizei count, const GLuint64EXT *value);
+GLAPI void APIENTRY glProgramUniform4ui64vNV (GLuint program, GLint location, GLsizei count, const GLuint64EXT *value);
+#endif
+#endif /* GL_AMD_gpu_shader_int64 */
+
+#ifndef GL_AMD_interleaved_elements
+#define GL_AMD_interleaved_elements 1
+#define GL_VERTEX_ELEMENT_SWIZZLE_AMD 0x91A4
+#define GL_VERTEX_ID_SWIZZLE_AMD 0x91A5
+typedef void (APIENTRYP PFNGLVERTEXATTRIBPARAMETERIAMDPROC) (GLuint index, GLenum pname, GLint param);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glVertexAttribParameteriAMD (GLuint index, GLenum pname, GLint param);
+#endif
+#endif /* GL_AMD_interleaved_elements */
+
+#ifndef GL_AMD_multi_draw_indirect
+#define GL_AMD_multi_draw_indirect 1
+typedef void (APIENTRYP PFNGLMULTIDRAWARRAYSINDIRECTAMDPROC) (GLenum mode, const void *indirect, GLsizei primcount, GLsizei stride);
+typedef void (APIENTRYP PFNGLMULTIDRAWELEMENTSINDIRECTAMDPROC) (GLenum mode, GLenum type, const void *indirect, GLsizei primcount, GLsizei stride);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glMultiDrawArraysIndirectAMD (GLenum mode, const void *indirect, GLsizei primcount, GLsizei stride);
+GLAPI void APIENTRY glMultiDrawElementsIndirectAMD (GLenum mode, GLenum type, const void *indirect, GLsizei primcount, GLsizei stride);
+#endif
+#endif /* GL_AMD_multi_draw_indirect */
+
+#ifndef GL_AMD_name_gen_delete
+#define GL_AMD_name_gen_delete 1
+#define GL_DATA_BUFFER_AMD 0x9151
+#define GL_PERFORMANCE_MONITOR_AMD 0x9152
+#define GL_QUERY_OBJECT_AMD 0x9153
+#define GL_VERTEX_ARRAY_OBJECT_AMD 0x9154
+#define GL_SAMPLER_OBJECT_AMD 0x9155
+typedef void (APIENTRYP PFNGLGENNAMESAMDPROC) (GLenum identifier, GLuint num, GLuint *names);
+typedef void (APIENTRYP PFNGLDELETENAMESAMDPROC) (GLenum identifier, GLuint num, const GLuint *names);
+typedef GLboolean (APIENTRYP PFNGLISNAMEAMDPROC) (GLenum identifier, GLuint name);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glGenNamesAMD (GLenum identifier, GLuint num, GLuint *names);
+GLAPI void APIENTRY glDeleteNamesAMD (GLenum identifier, GLuint num, const GLuint *names);
+GLAPI GLboolean APIENTRY glIsNameAMD (GLenum identifier, GLuint name);
+#endif
+#endif /* GL_AMD_name_gen_delete */
+
+#ifndef GL_AMD_occlusion_query_event
+#define GL_AMD_occlusion_query_event 1
+#define GL_OCCLUSION_QUERY_EVENT_MASK_AMD 0x874F
+#define GL_QUERY_DEPTH_PASS_EVENT_BIT_AMD 0x00000001
+#define GL_QUERY_DEPTH_FAIL_EVENT_BIT_AMD 0x00000002
+#define GL_QUERY_STENCIL_FAIL_EVENT_BIT_AMD 0x00000004
+#define GL_QUERY_DEPTH_BOUNDS_FAIL_EVENT_BIT_AMD 0x00000008
+#define GL_QUERY_ALL_EVENT_BITS_AMD 0xFFFFFFFF
+typedef void (APIENTRYP PFNGLQUERYOBJECTPARAMETERUIAMDPROC) (GLenum target, GLuint id, GLenum pname, GLuint param);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glQueryObjectParameteruiAMD (GLenum target, GLuint id, GLenum pname, GLuint param);
+#endif
+#endif /* GL_AMD_occlusion_query_event */
+
+#ifndef GL_AMD_performance_monitor
+#define GL_AMD_performance_monitor 1
+#define GL_COUNTER_TYPE_AMD 0x8BC0
+#define GL_COUNTER_RANGE_AMD 0x8BC1
+#define GL_UNSIGNED_INT64_AMD 0x8BC2
+#define GL_PERCENTAGE_AMD 0x8BC3
+#define GL_PERFMON_RESULT_AVAILABLE_AMD 0x8BC4
+#define GL_PERFMON_RESULT_SIZE_AMD 0x8BC5
+#define GL_PERFMON_RESULT_AMD 0x8BC6
+typedef void (APIENTRYP PFNGLGETPERFMONITORGROUPSAMDPROC) (GLint *numGroups, GLsizei groupsSize, GLuint *groups);
+typedef void (APIENTRYP PFNGLGETPERFMONITORCOUNTERSAMDPROC) (GLuint group, GLint *numCounters, GLint *maxActiveCounters, GLsizei counterSize, GLuint *counters);
+typedef void (APIENTRYP PFNGLGETPERFMONITORGROUPSTRINGAMDPROC) (GLuint group, GLsizei bufSize, GLsizei *length, GLchar *groupString);
+typedef void (APIENTRYP PFNGLGETPERFMONITORCOUNTERSTRINGAMDPROC) (GLuint group, GLuint counter, GLsizei bufSize, GLsizei *length, GLchar *counterString);
+typedef void (APIENTRYP PFNGLGETPERFMONITORCOUNTERINFOAMDPROC) (GLuint group, GLuint counter, GLenum pname, void *data);
+typedef void (APIENTRYP PFNGLGENPERFMONITORSAMDPROC) (GLsizei n, GLuint *monitors);
+typedef void (APIENTRYP PFNGLDELETEPERFMONITORSAMDPROC) (GLsizei n, GLuint *monitors);
+typedef void (APIENTRYP PFNGLSELECTPERFMONITORCOUNTERSAMDPROC) (GLuint monitor, GLboolean enable, GLuint group, GLint numCounters, GLuint *counterList);
+typedef void (APIENTRYP PFNGLBEGINPERFMONITORAMDPROC) (GLuint monitor);
+typedef void (APIENTRYP PFNGLENDPERFMONITORAMDPROC) (GLuint monitor);
+typedef void (APIENTRYP PFNGLGETPERFMONITORCOUNTERDATAAMDPROC) (GLuint monitor, GLenum pname, GLsizei dataSize, GLuint *data, GLint *bytesWritten);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glGetPerfMonitorGroupsAMD (GLint *numGroups, GLsizei groupsSize, GLuint *groups);
+GLAPI void APIENTRY glGetPerfMonitorCountersAMD (GLuint group, GLint *numCounters, GLint *maxActiveCounters, GLsizei counterSize, GLuint *counters);
+GLAPI void APIENTRY glGetPerfMonitorGroupStringAMD (GLuint group, GLsizei bufSize, GLsizei *length, GLchar *groupString);
+GLAPI void APIENTRY glGetPerfMonitorCounterStringAMD (GLuint group, GLuint counter, GLsizei bufSize, GLsizei *length, GLchar *counterString);
+GLAPI void APIENTRY glGetPerfMonitorCounterInfoAMD (GLuint group, GLuint counter, GLenum pname, void *data);
+GLAPI void APIENTRY glGenPerfMonitorsAMD (GLsizei n, GLuint *monitors);
+GLAPI void APIENTRY glDeletePerfMonitorsAMD (GLsizei n, GLuint *monitors);
+GLAPI void APIENTRY glSelectPerfMonitorCountersAMD (GLuint monitor, GLboolean enable, GLuint group, GLint numCounters, GLuint *counterList);
+GLAPI void APIENTRY glBeginPerfMonitorAMD (GLuint monitor);
+GLAPI void APIENTRY glEndPerfMonitorAMD (GLuint monitor);
+GLAPI void APIENTRY glGetPerfMonitorCounterDataAMD (GLuint monitor, GLenum pname, GLsizei dataSize, GLuint *data, GLint *bytesWritten);
+#endif
+#endif /* GL_AMD_performance_monitor */
+
+#ifndef GL_AMD_pinned_memory
+#define GL_AMD_pinned_memory 1
+#define GL_EXTERNAL_VIRTUAL_MEMORY_BUFFER_AMD 0x9160
+#endif /* GL_AMD_pinned_memory */
+
+#ifndef GL_AMD_query_buffer_object
+#define GL_AMD_query_buffer_object 1
+#define GL_QUERY_BUFFER_AMD 0x9192
+#define GL_QUERY_BUFFER_BINDING_AMD 0x9193
+#define GL_QUERY_RESULT_NO_WAIT_AMD 0x9194
+#endif /* GL_AMD_query_buffer_object */
+
+#ifndef GL_AMD_sample_positions
+#define GL_AMD_sample_positions 1
+typedef void (APIENTRYP PFNGLSETMULTISAMPLEFVAMDPROC) (GLenum pname, GLuint index, const GLfloat *val);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glSetMultisamplefvAMD (GLenum pname, GLuint index, const GLfloat *val);
+#endif
+#endif /* GL_AMD_sample_positions */
+
+#ifndef GL_AMD_seamless_cubemap_per_texture
+#define GL_AMD_seamless_cubemap_per_texture 1
+#endif /* GL_AMD_seamless_cubemap_per_texture */
+
+#ifndef GL_AMD_shader_atomic_counter_ops
+#define GL_AMD_shader_atomic_counter_ops 1
+#endif /* GL_AMD_shader_atomic_counter_ops */
+
+#ifndef GL_AMD_shader_ballot
+#define GL_AMD_shader_ballot 1
+#endif /* GL_AMD_shader_ballot */
+
+#ifndef GL_AMD_shader_explicit_vertex_parameter
+#define GL_AMD_shader_explicit_vertex_parameter 1
+#endif /* GL_AMD_shader_explicit_vertex_parameter */
+
+#ifndef GL_AMD_shader_gpu_shader_half_float_fetch
+#define GL_AMD_shader_gpu_shader_half_float_fetch 1
+#endif /* GL_AMD_shader_gpu_shader_half_float_fetch */
+
+#ifndef GL_AMD_shader_image_load_store_lod
+#define GL_AMD_shader_image_load_store_lod 1
+#endif /* GL_AMD_shader_image_load_store_lod */
+
+#ifndef GL_AMD_shader_stencil_export
+#define GL_AMD_shader_stencil_export 1
+#endif /* GL_AMD_shader_stencil_export */
+
+#ifndef GL_AMD_shader_trinary_minmax
+#define GL_AMD_shader_trinary_minmax 1
+#endif /* GL_AMD_shader_trinary_minmax */
+
+#ifndef GL_AMD_sparse_texture
+#define GL_AMD_sparse_texture 1
+#define GL_VIRTUAL_PAGE_SIZE_X_AMD 0x9195
+#define GL_VIRTUAL_PAGE_SIZE_Y_AMD 0x9196
+#define GL_VIRTUAL_PAGE_SIZE_Z_AMD 0x9197
+#define GL_MAX_SPARSE_TEXTURE_SIZE_AMD 0x9198
+#define GL_MAX_SPARSE_3D_TEXTURE_SIZE_AMD 0x9199
+#define GL_MAX_SPARSE_ARRAY_TEXTURE_LAYERS 0x919A
+#define GL_MIN_SPARSE_LEVEL_AMD 0x919B
+#define GL_MIN_LOD_WARNING_AMD 0x919C
+#define GL_TEXTURE_STORAGE_SPARSE_BIT_AMD 0x00000001
+typedef void (APIENTRYP PFNGLTEXSTORAGESPARSEAMDPROC) (GLenum target, GLenum internalFormat, GLsizei width, GLsizei height, GLsizei depth, GLsizei layers, GLbitfield flags);
+typedef void (APIENTRYP PFNGLTEXTURESTORAGESPARSEAMDPROC) (GLuint texture, GLenum target, GLenum internalFormat, GLsizei width, GLsizei height, GLsizei depth, GLsizei layers, GLbitfield flags);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glTexStorageSparseAMD (GLenum target, GLenum internalFormat, GLsizei width, GLsizei height, GLsizei depth, GLsizei layers, GLbitfield flags);
+GLAPI void APIENTRY glTextureStorageSparseAMD (GLuint texture, GLenum target, GLenum internalFormat, GLsizei width, GLsizei height, GLsizei depth, GLsizei layers, GLbitfield flags);
+#endif
+#endif /* GL_AMD_sparse_texture */
+
+#ifndef GL_AMD_stencil_operation_extended
+#define GL_AMD_stencil_operation_extended 1
+#define GL_SET_AMD 0x874A
+#define GL_REPLACE_VALUE_AMD 0x874B
+#define GL_STENCIL_OP_VALUE_AMD 0x874C
+#define GL_STENCIL_BACK_OP_VALUE_AMD 0x874D
+typedef void (APIENTRYP PFNGLSTENCILOPVALUEAMDPROC) (GLenum face, GLuint value);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glStencilOpValueAMD (GLenum face, GLuint value);
+#endif
+#endif /* GL_AMD_stencil_operation_extended */
+
+#ifndef GL_AMD_texture_gather_bias_lod
+#define GL_AMD_texture_gather_bias_lod 1
+#endif /* GL_AMD_texture_gather_bias_lod */
+
+#ifndef GL_AMD_texture_texture4
+#define GL_AMD_texture_texture4 1
+#endif /* GL_AMD_texture_texture4 */
+
+#ifndef GL_AMD_transform_feedback3_lines_triangles
+#define GL_AMD_transform_feedback3_lines_triangles 1
+#endif /* GL_AMD_transform_feedback3_lines_triangles */
+
+#ifndef GL_AMD_transform_feedback4
+#define GL_AMD_transform_feedback4 1
+#define GL_STREAM_RASTERIZATION_AMD 0x91A0
+#endif /* GL_AMD_transform_feedback4 */
+
+#ifndef GL_AMD_vertex_shader_layer
+#define GL_AMD_vertex_shader_layer 1
+#endif /* GL_AMD_vertex_shader_layer */
+
+#ifndef GL_AMD_vertex_shader_tessellator
+#define GL_AMD_vertex_shader_tessellator 1
+#define GL_SAMPLER_BUFFER_AMD 0x9001
+#define GL_INT_SAMPLER_BUFFER_AMD 0x9002
+#define GL_UNSIGNED_INT_SAMPLER_BUFFER_AMD 0x9003
+#define GL_TESSELLATION_MODE_AMD 0x9004
+#define GL_TESSELLATION_FACTOR_AMD 0x9005
+#define GL_DISCRETE_AMD 0x9006
+#define GL_CONTINUOUS_AMD 0x9007
+typedef void (APIENTRYP PFNGLTESSELLATIONFACTORAMDPROC) (GLfloat factor);
+typedef void (APIENTRYP PFNGLTESSELLATIONMODEAMDPROC) (GLenum mode);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glTessellationFactorAMD (GLfloat factor);
+GLAPI void APIENTRY glTessellationModeAMD (GLenum mode);
+#endif
+#endif /* GL_AMD_vertex_shader_tessellator */
+
+#ifndef GL_AMD_vertex_shader_viewport_index
+#define GL_AMD_vertex_shader_viewport_index 1
+#endif /* GL_AMD_vertex_shader_viewport_index */
+
+#ifndef GL_APPLE_aux_depth_stencil
+#define GL_APPLE_aux_depth_stencil 1
+#define GL_AUX_DEPTH_STENCIL_APPLE 0x8A14
+#endif /* GL_APPLE_aux_depth_stencil */
+
+#ifndef GL_APPLE_client_storage
+#define GL_APPLE_client_storage 1
+#define GL_UNPACK_CLIENT_STORAGE_APPLE 0x85B2
+#endif /* GL_APPLE_client_storage */
+
+#ifndef GL_APPLE_element_array
+#define GL_APPLE_element_array 1
+#define GL_ELEMENT_ARRAY_APPLE 0x8A0C
+#define GL_ELEMENT_ARRAY_TYPE_APPLE 0x8A0D
+#define GL_ELEMENT_ARRAY_POINTER_APPLE 0x8A0E
+typedef void (APIENTRYP PFNGLELEMENTPOINTERAPPLEPROC) (GLenum type, const void *pointer);
+typedef void (APIENTRYP PFNGLDRAWELEMENTARRAYAPPLEPROC) (GLenum mode, GLint first, GLsizei count);
+typedef void (APIENTRYP PFNGLDRAWRANGEELEMENTARRAYAPPLEPROC) (GLenum mode, GLuint start, GLuint end, GLint first, GLsizei count);
+typedef void (APIENTRYP PFNGLMULTIDRAWELEMENTARRAYAPPLEPROC) (GLenum mode, const GLint *first, const GLsizei *count, GLsizei primcount);
+typedef void (APIENTRYP PFNGLMULTIDRAWRANGEELEMENTARRAYAPPLEPROC) (GLenum mode, GLuint start, GLuint end, const GLint *first, const GLsizei *count, GLsizei primcount);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glElementPointerAPPLE (GLenum type, const void *pointer);
+GLAPI void APIENTRY glDrawElementArrayAPPLE (GLenum mode, GLint first, GLsizei count);
+GLAPI void APIENTRY glDrawRangeElementArrayAPPLE (GLenum mode, GLuint start, GLuint end, GLint first, GLsizei count);
+GLAPI void APIENTRY glMultiDrawElementArrayAPPLE (GLenum mode, const GLint *first, const GLsizei *count, GLsizei primcount);
+GLAPI void APIENTRY glMultiDrawRangeElementArrayAPPLE (GLenum mode, GLuint start, GLuint end, const GLint *first, const GLsizei *count, GLsizei primcount);
+#endif
+#endif /* GL_APPLE_element_array */
+
+#ifndef GL_APPLE_fence
+#define GL_APPLE_fence 1
+#define GL_DRAW_PIXELS_APPLE 0x8A0A
+#define GL_FENCE_APPLE 0x8A0B
+typedef void (APIENTRYP PFNGLGENFENCESAPPLEPROC) (GLsizei n, GLuint *fences);
+typedef void (APIENTRYP PFNGLDELETEFENCESAPPLEPROC) (GLsizei n, const GLuint *fences);
+typedef void (APIENTRYP PFNGLSETFENCEAPPLEPROC) (GLuint fence);
+typedef GLboolean (APIENTRYP PFNGLISFENCEAPPLEPROC) (GLuint fence);
+typedef GLboolean (APIENTRYP PFNGLTESTFENCEAPPLEPROC) (GLuint fence);
+typedef void (APIENTRYP PFNGLFINISHFENCEAPPLEPROC) (GLuint fence);
+typedef GLboolean (APIENTRYP PFNGLTESTOBJECTAPPLEPROC) (GLenum object, GLuint name);
+typedef void (APIENTRYP PFNGLFINISHOBJECTAPPLEPROC) (GLenum object, GLint name);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glGenFencesAPPLE (GLsizei n, GLuint *fences);
+GLAPI void APIENTRY glDeleteFencesAPPLE (GLsizei n, const GLuint *fences);
+GLAPI void APIENTRY glSetFenceAPPLE (GLuint fence);
+GLAPI GLboolean APIENTRY glIsFenceAPPLE (GLuint fence);
+GLAPI GLboolean APIENTRY glTestFenceAPPLE (GLuint fence);
+GLAPI void APIENTRY glFinishFenceAPPLE (GLuint fence);
+GLAPI GLboolean APIENTRY glTestObjectAPPLE (GLenum object, GLuint name);
+GLAPI void APIENTRY glFinishObjectAPPLE (GLenum object, GLint name);
+#endif
+#endif /* GL_APPLE_fence */
+
+#ifndef GL_APPLE_float_pixels
+#define GL_APPLE_float_pixels 1
+#define GL_HALF_APPLE 0x140B
+#define GL_RGBA_FLOAT32_APPLE 0x8814
+#define GL_RGB_FLOAT32_APPLE 0x8815
+#define GL_ALPHA_FLOAT32_APPLE 0x8816
+#define GL_INTENSITY_FLOAT32_APPLE 0x8817
+#define GL_LUMINANCE_FLOAT32_APPLE 0x8818
+#define GL_LUMINANCE_ALPHA_FLOAT32_APPLE 0x8819
+#define GL_RGBA_FLOAT16_APPLE 0x881A
+#define GL_RGB_FLOAT16_APPLE 0x881B
+#define GL_ALPHA_FLOAT16_APPLE 0x881C
+#define GL_INTENSITY_FLOAT16_APPLE 0x881D
+#define GL_LUMINANCE_FLOAT16_APPLE 0x881E
+#define GL_LUMINANCE_ALPHA_FLOAT16_APPLE 0x881F
+#define GL_COLOR_FLOAT_APPLE 0x8A0F
+#endif /* GL_APPLE_float_pixels */
+
+#ifndef GL_APPLE_flush_buffer_range
+#define GL_APPLE_flush_buffer_range 1
+#define GL_BUFFER_SERIALIZED_MODIFY_APPLE 0x8A12
+#define GL_BUFFER_FLUSHING_UNMAP_APPLE 0x8A13
+typedef void (APIENTRYP PFNGLBUFFERPARAMETERIAPPLEPROC) (GLenum target, GLenum pname, GLint param);
+typedef void (APIENTRYP PFNGLFLUSHMAPPEDBUFFERRANGEAPPLEPROC) (GLenum target, GLintptr offset, GLsizeiptr size);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glBufferParameteriAPPLE (GLenum target, GLenum pname, GLint param);
+GLAPI void APIENTRY glFlushMappedBufferRangeAPPLE (GLenum target, GLintptr offset, GLsizeiptr size);
+#endif
+#endif /* GL_APPLE_flush_buffer_range */
+
+#ifndef GL_APPLE_object_purgeable
+#define GL_APPLE_object_purgeable 1
+#define GL_BUFFER_OBJECT_APPLE 0x85B3
+#define GL_RELEASED_APPLE 0x8A19
+#define GL_VOLATILE_APPLE 0x8A1A
+#define GL_RETAINED_APPLE 0x8A1B
+#define GL_UNDEFINED_APPLE 0x8A1C
+#define GL_PURGEABLE_APPLE 0x8A1D
+typedef GLenum (APIENTRYP PFNGLOBJECTPURGEABLEAPPLEPROC) (GLenum objectType, GLuint name, GLenum option);
+typedef GLenum (APIENTRYP PFNGLOBJECTUNPURGEABLEAPPLEPROC) (GLenum objectType, GLuint name, GLenum option);
+typedef void (APIENTRYP PFNGLGETOBJECTPARAMETERIVAPPLEPROC) (GLenum objectType, GLuint name, GLenum pname, GLint *params);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI GLenum APIENTRY glObjectPurgeableAPPLE (GLenum objectType, GLuint name, GLenum option);
+GLAPI GLenum APIENTRY glObjectUnpurgeableAPPLE (GLenum objectType, GLuint name, GLenum option);
+GLAPI void APIENTRY glGetObjectParameterivAPPLE (GLenum objectType, GLuint name, GLenum pname, GLint *params);
+#endif
+#endif /* GL_APPLE_object_purgeable */
+
+#ifndef GL_APPLE_rgb_422
+#define GL_APPLE_rgb_422 1
+#define GL_RGB_422_APPLE 0x8A1F
+#define GL_UNSIGNED_SHORT_8_8_APPLE 0x85BA
+#define GL_UNSIGNED_SHORT_8_8_REV_APPLE 0x85BB
+#define GL_RGB_RAW_422_APPLE 0x8A51
+#endif /* GL_APPLE_rgb_422 */
+
+#ifndef GL_APPLE_row_bytes
+#define GL_APPLE_row_bytes 1
+#define GL_PACK_ROW_BYTES_APPLE 0x8A15
+#define GL_UNPACK_ROW_BYTES_APPLE 0x8A16
+#endif /* GL_APPLE_row_bytes */
+
+#ifndef GL_APPLE_specular_vector
+#define GL_APPLE_specular_vector 1
+#define GL_LIGHT_MODEL_SPECULAR_VECTOR_APPLE 0x85B0
+#endif /* GL_APPLE_specular_vector */
+
+#ifndef GL_APPLE_texture_range
+#define GL_APPLE_texture_range 1
+#define GL_TEXTURE_RANGE_LENGTH_APPLE 0x85B7
+#define GL_TEXTURE_RANGE_POINTER_APPLE 0x85B8
+#define GL_TEXTURE_STORAGE_HINT_APPLE 0x85BC
+#define GL_STORAGE_PRIVATE_APPLE 0x85BD
+#define GL_STORAGE_CACHED_APPLE 0x85BE
+#define GL_STORAGE_SHARED_APPLE 0x85BF
+typedef void (APIENTRYP PFNGLTEXTURERANGEAPPLEPROC) (GLenum target, GLsizei length, const void *pointer);
+typedef void (APIENTRYP PFNGLGETTEXPARAMETERPOINTERVAPPLEPROC) (GLenum target, GLenum pname, void **params);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glTextureRangeAPPLE (GLenum target, GLsizei length, const void *pointer);
+GLAPI void APIENTRY glGetTexParameterPointervAPPLE (GLenum target, GLenum pname, void **params);
+#endif
+#endif /* GL_APPLE_texture_range */
+
+#ifndef GL_APPLE_transform_hint
+#define GL_APPLE_transform_hint 1
+#define GL_TRANSFORM_HINT_APPLE 0x85B1
+#endif /* GL_APPLE_transform_hint */
+
+#ifndef GL_APPLE_vertex_array_object
+#define GL_APPLE_vertex_array_object 1
+#define GL_VERTEX_ARRAY_BINDING_APPLE 0x85B5
+typedef void (APIENTRYP PFNGLBINDVERTEXARRAYAPPLEPROC) (GLuint array);
+typedef void (APIENTRYP PFNGLDELETEVERTEXARRAYSAPPLEPROC) (GLsizei n, const GLuint *arrays);
+typedef void (APIENTRYP PFNGLGENVERTEXARRAYSAPPLEPROC) (GLsizei n, GLuint *arrays);
+typedef GLboolean (APIENTRYP PFNGLISVERTEXARRAYAPPLEPROC) (GLuint array);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glBindVertexArrayAPPLE (GLuint array);
+GLAPI void APIENTRY glDeleteVertexArraysAPPLE (GLsizei n, const GLuint *arrays);
+GLAPI void APIENTRY glGenVertexArraysAPPLE (GLsizei n, GLuint *arrays);
+GLAPI GLboolean APIENTRY glIsVertexArrayAPPLE (GLuint array);
+#endif
+#endif /* GL_APPLE_vertex_array_object */
+
+#ifndef GL_APPLE_vertex_array_range
+#define GL_APPLE_vertex_array_range 1
+#define GL_VERTEX_ARRAY_RANGE_APPLE 0x851D
+#define GL_VERTEX_ARRAY_RANGE_LENGTH_APPLE 0x851E
+#define GL_VERTEX_ARRAY_STORAGE_HINT_APPLE 0x851F
+#define GL_VERTEX_ARRAY_RANGE_POINTER_APPLE 0x8521
+#define GL_STORAGE_CLIENT_APPLE 0x85B4
+typedef void (APIENTRYP PFNGLVERTEXARRAYRANGEAPPLEPROC) (GLsizei length, void *pointer);
+typedef void (APIENTRYP PFNGLFLUSHVERTEXARRAYRANGEAPPLEPROC) (GLsizei length, void *pointer);
+typedef void (APIENTRYP PFNGLVERTEXARRAYPARAMETERIAPPLEPROC) (GLenum pname, GLint param);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glVertexArrayRangeAPPLE (GLsizei length, void *pointer);
+GLAPI void APIENTRY glFlushVertexArrayRangeAPPLE (GLsizei length, void *pointer);
+GLAPI void APIENTRY glVertexArrayParameteriAPPLE (GLenum pname, GLint param);
+#endif
+#endif /* GL_APPLE_vertex_array_range */
+
+#ifndef GL_APPLE_vertex_program_evaluators
+#define GL_APPLE_vertex_program_evaluators 1
+#define GL_VERTEX_ATTRIB_MAP1_APPLE 0x8A00
+#define GL_VERTEX_ATTRIB_MAP2_APPLE 0x8A01
+#define GL_VERTEX_ATTRIB_MAP1_SIZE_APPLE 0x8A02
+#define GL_VERTEX_ATTRIB_MAP1_COEFF_APPLE 0x8A03
+#define GL_VERTEX_ATTRIB_MAP1_ORDER_APPLE 0x8A04
+#define GL_VERTEX_ATTRIB_MAP1_DOMAIN_APPLE 0x8A05
+#define GL_VERTEX_ATTRIB_MAP2_SIZE_APPLE 0x8A06
+#define GL_VERTEX_ATTRIB_MAP2_COEFF_APPLE 0x8A07
+#define GL_VERTEX_ATTRIB_MAP2_ORDER_APPLE 0x8A08
+#define GL_VERTEX_ATTRIB_MAP2_DOMAIN_APPLE 0x8A09
+typedef void (APIENTRYP PFNGLENABLEVERTEXATTRIBAPPLEPROC) (GLuint index, GLenum pname);
+typedef void (APIENTRYP PFNGLDISABLEVERTEXATTRIBAPPLEPROC) (GLuint index, GLenum pname);
+typedef GLboolean (APIENTRYP PFNGLISVERTEXATTRIBENABLEDAPPLEPROC) (GLuint index, GLenum pname);
+typedef void (APIENTRYP PFNGLMAPVERTEXATTRIB1DAPPLEPROC) (GLuint index, GLuint size, GLdouble u1, GLdouble u2, GLint stride, GLint order, const GLdouble *points);
+typedef void (APIENTRYP PFNGLMAPVERTEXATTRIB1FAPPLEPROC) (GLuint index, GLuint size, GLfloat u1, GLfloat u2, GLint stride, GLint order, const GLfloat *points);
+typedef void (APIENTRYP PFNGLMAPVERTEXATTRIB2DAPPLEPROC) (GLuint index, GLuint size, GLdouble u1, GLdouble u2, GLint ustride, GLint uorder, GLdouble v1, GLdouble v2, GLint vstride, GLint vorder, const GLdouble *points);
+typedef void (APIENTRYP PFNGLMAPVERTEXATTRIB2FAPPLEPROC) (GLuint index, GLuint size, GLfloat u1, GLfloat u2, GLint ustride, GLint uorder, GLfloat v1, GLfloat v2, GLint vstride, GLint vorder, const GLfloat *points);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glEnableVertexAttribAPPLE (GLuint index, GLenum pname);
+GLAPI void APIENTRY glDisableVertexAttribAPPLE (GLuint index, GLenum pname);
+GLAPI GLboolean APIENTRY glIsVertexAttribEnabledAPPLE (GLuint index, GLenum pname);
+GLAPI void APIENTRY glMapVertexAttrib1dAPPLE (GLuint index, GLuint size, GLdouble u1, GLdouble u2, GLint stride, GLint order, const GLdouble *points);
+GLAPI void APIENTRY glMapVertexAttrib1fAPPLE (GLuint index, GLuint size, GLfloat u1, GLfloat u2, GLint stride, GLint order, const GLfloat *points);
+GLAPI void APIENTRY glMapVertexAttrib2dAPPLE (GLuint index, GLuint size, GLdouble u1, GLdouble u2, GLint ustride, GLint uorder, GLdouble v1, GLdouble v2, GLint vstride, GLint vorder, const GLdouble *points);
+GLAPI void APIENTRY glMapVertexAttrib2fAPPLE (GLuint index, GLuint size, GLfloat u1, GLfloat u2, GLint ustride, GLint uorder, GLfloat v1, GLfloat v2, GLint vstride, GLint vorder, const GLfloat *points);
+#endif
+#endif /* GL_APPLE_vertex_program_evaluators */
+
+#ifndef GL_APPLE_ycbcr_422
+#define GL_APPLE_ycbcr_422 1
+#define GL_YCBCR_422_APPLE 0x85B9
+#endif /* GL_APPLE_ycbcr_422 */
+
+#ifndef GL_ATI_draw_buffers
+#define GL_ATI_draw_buffers 1
+#define GL_MAX_DRAW_BUFFERS_ATI 0x8824
+#define GL_DRAW_BUFFER0_ATI 0x8825
+#define GL_DRAW_BUFFER1_ATI 0x8826
+#define GL_DRAW_BUFFER2_ATI 0x8827
+#define GL_DRAW_BUFFER3_ATI 0x8828
+#define GL_DRAW_BUFFER4_ATI 0x8829
+#define GL_DRAW_BUFFER5_ATI 0x882A
+#define GL_DRAW_BUFFER6_ATI 0x882B
+#define GL_DRAW_BUFFER7_ATI 0x882C
+#define GL_DRAW_BUFFER8_ATI 0x882D
+#define GL_DRAW_BUFFER9_ATI 0x882E
+#define GL_DRAW_BUFFER10_ATI 0x882F
+#define GL_DRAW_BUFFER11_ATI 0x8830
+#define GL_DRAW_BUFFER12_ATI 0x8831
+#define GL_DRAW_BUFFER13_ATI 0x8832
+#define GL_DRAW_BUFFER14_ATI 0x8833
+#define GL_DRAW_BUFFER15_ATI 0x8834
+typedef void (APIENTRYP PFNGLDRAWBUFFERSATIPROC) (GLsizei n, const GLenum *bufs);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glDrawBuffersATI (GLsizei n, const GLenum *bufs);
+#endif
+#endif /* GL_ATI_draw_buffers */
+
+#ifndef GL_ATI_element_array
+#define GL_ATI_element_array 1
+#define GL_ELEMENT_ARRAY_ATI 0x8768
+#define GL_ELEMENT_ARRAY_TYPE_ATI 0x8769
+#define GL_ELEMENT_ARRAY_POINTER_ATI 0x876A
+typedef void (APIENTRYP PFNGLELEMENTPOINTERATIPROC) (GLenum type, const void *pointer);
+typedef void (APIENTRYP PFNGLDRAWELEMENTARRAYATIPROC) (GLenum mode, GLsizei count);
+typedef void (APIENTRYP PFNGLDRAWRANGEELEMENTARRAYATIPROC) (GLenum mode, GLuint start, GLuint end, GLsizei count);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glElementPointerATI (GLenum type, const void *pointer);
+GLAPI void APIENTRY glDrawElementArrayATI (GLenum mode, GLsizei count);
+GLAPI void APIENTRY glDrawRangeElementArrayATI (GLenum mode, GLuint start, GLuint end, GLsizei count);
+#endif
+#endif /* GL_ATI_element_array */
+
+#ifndef GL_ATI_envmap_bumpmap
+#define GL_ATI_envmap_bumpmap 1
+#define GL_BUMP_ROT_MATRIX_ATI 0x8775
+#define GL_BUMP_ROT_MATRIX_SIZE_ATI 0x8776
+#define GL_BUMP_NUM_TEX_UNITS_ATI 0x8777
+#define GL_BUMP_TEX_UNITS_ATI 0x8778
+#define GL_DUDV_ATI 0x8779
+#define GL_DU8DV8_ATI 0x877A
+#define GL_BUMP_ENVMAP_ATI 0x877B
+#define GL_BUMP_TARGET_ATI 0x877C
+typedef void (APIENTRYP PFNGLTEXBUMPPARAMETERIVATIPROC) (GLenum pname, const GLint *param);
+typedef void (APIENTRYP PFNGLTEXBUMPPARAMETERFVATIPROC) (GLenum pname, const GLfloat *param);
+typedef void (APIENTRYP PFNGLGETTEXBUMPPARAMETERIVATIPROC) (GLenum pname, GLint *param);
+typedef void (APIENTRYP PFNGLGETTEXBUMPPARAMETERFVATIPROC) (GLenum pname, GLfloat *param);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glTexBumpParameterivATI (GLenum pname, const GLint *param);
+GLAPI void APIENTRY glTexBumpParameterfvATI (GLenum pname, const GLfloat *param);
+GLAPI void APIENTRY glGetTexBumpParameterivATI (GLenum pname, GLint *param);
+GLAPI void APIENTRY glGetTexBumpParameterfvATI (GLenum pname, GLfloat *param);
+#endif
+#endif /* GL_ATI_envmap_bumpmap */
+
+#ifndef GL_ATI_fragment_shader
+#define GL_ATI_fragment_shader 1
+#define GL_FRAGMENT_SHADER_ATI 0x8920
+#define GL_REG_0_ATI 0x8921
+#define GL_REG_1_ATI 0x8922
+#define GL_REG_2_ATI 0x8923
+#define GL_REG_3_ATI 0x8924
+#define GL_REG_4_ATI 0x8925
+#define GL_REG_5_ATI 0x8926
+#define GL_REG_6_ATI 0x8927
+#define GL_REG_7_ATI 0x8928
+#define GL_REG_8_ATI 0x8929
+#define GL_REG_9_ATI 0x892A
+#define GL_REG_10_ATI 0x892B
+#define GL_REG_11_ATI 0x892C
+#define GL_REG_12_ATI 0x892D
+#define GL_REG_13_ATI 0x892E
+#define GL_REG_14_ATI 0x892F
+#define GL_REG_15_ATI 0x8930
+#define GL_REG_16_ATI 0x8931
+#define GL_REG_17_ATI 0x8932
+#define GL_REG_18_ATI 0x8933
+#define GL_REG_19_ATI 0x8934
+#define GL_REG_20_ATI 0x8935
+#define GL_REG_21_ATI 0x8936
+#define GL_REG_22_ATI 0x8937
+#define GL_REG_23_ATI 0x8938
+#define GL_REG_24_ATI 0x8939
+#define GL_REG_25_ATI 0x893A
+#define GL_REG_26_ATI 0x893B
+#define GL_REG_27_ATI 0x893C
+#define GL_REG_28_ATI 0x893D
+#define GL_REG_29_ATI 0x893E
+#define GL_REG_30_ATI 0x893F
+#define GL_REG_31_ATI 0x8940
+#define GL_CON_0_ATI 0x8941
+#define GL_CON_1_ATI 0x8942
+#define GL_CON_2_ATI 0x8943
+#define GL_CON_3_ATI 0x8944
+#define GL_CON_4_ATI 0x8945
+#define GL_CON_5_ATI 0x8946
+#define GL_CON_6_ATI 0x8947
+#define GL_CON_7_ATI 0x8948
+#define GL_CON_8_ATI 0x8949
+#define GL_CON_9_ATI 0x894A
+#define GL_CON_10_ATI 0x894B
+#define GL_CON_11_ATI 0x894C
+#define GL_CON_12_ATI 0x894D
+#define GL_CON_13_ATI 0x894E
+#define GL_CON_14_ATI 0x894F
+#define GL_CON_15_ATI 0x8950
+#define GL_CON_16_ATI 0x8951
+#define GL_CON_17_ATI 0x8952
+#define GL_CON_18_ATI 0x8953
+#define GL_CON_19_ATI 0x8954
+#define GL_CON_20_ATI 0x8955
+#define GL_CON_21_ATI 0x8956
+#define GL_CON_22_ATI 0x8957
+#define GL_CON_23_ATI 0x8958
+#define GL_CON_24_ATI 0x8959
+#define GL_CON_25_ATI 0x895A
+#define GL_CON_26_ATI 0x895B
+#define GL_CON_27_ATI 0x895C
+#define GL_CON_28_ATI 0x895D
+#define GL_CON_29_ATI 0x895E
+#define GL_CON_30_ATI 0x895F
+#define GL_CON_31_ATI 0x8960
+#define GL_MOV_ATI 0x8961
+#define GL_ADD_ATI 0x8963
+#define GL_MUL_ATI 0x8964
+#define GL_SUB_ATI 0x8965
+#define GL_DOT3_ATI 0x8966
+#define GL_DOT4_ATI 0x8967
+#define GL_MAD_ATI 0x8968
+#define GL_LERP_ATI 0x8969
+#define GL_CND_ATI 0x896A
+#define GL_CND0_ATI 0x896B
+#define GL_DOT2_ADD_ATI 0x896C
+#define GL_SECONDARY_INTERPOLATOR_ATI 0x896D
+#define GL_NUM_FRAGMENT_REGISTERS_ATI 0x896E
+#define GL_NUM_FRAGMENT_CONSTANTS_ATI 0x896F
+#define GL_NUM_PASSES_ATI 0x8970
+#define GL_NUM_INSTRUCTIONS_PER_PASS_ATI 0x8971
+#define GL_NUM_INSTRUCTIONS_TOTAL_ATI 0x8972
+#define GL_NUM_INPUT_INTERPOLATOR_COMPONENTS_ATI 0x8973
+#define GL_NUM_LOOPBACK_COMPONENTS_ATI 0x8974
+#define GL_COLOR_ALPHA_PAIRING_ATI 0x8975
+#define GL_SWIZZLE_STR_ATI 0x8976
+#define GL_SWIZZLE_STQ_ATI 0x8977
+#define GL_SWIZZLE_STR_DR_ATI 0x8978
+#define GL_SWIZZLE_STQ_DQ_ATI 0x8979
+#define GL_SWIZZLE_STRQ_ATI 0x897A
+#define GL_SWIZZLE_STRQ_DQ_ATI 0x897B
+#define GL_RED_BIT_ATI 0x00000001
+#define GL_GREEN_BIT_ATI 0x00000002
+#define GL_BLUE_BIT_ATI 0x00000004
+#define GL_2X_BIT_ATI 0x00000001
+#define GL_4X_BIT_ATI 0x00000002
+#define GL_8X_BIT_ATI 0x00000004
+#define GL_HALF_BIT_ATI 0x00000008
+#define GL_QUARTER_BIT_ATI 0x00000010
+#define GL_EIGHTH_BIT_ATI 0x00000020
+#define GL_SATURATE_BIT_ATI 0x00000040
+#define GL_COMP_BIT_ATI 0x00000002
+#define GL_NEGATE_BIT_ATI 0x00000004
+#define GL_BIAS_BIT_ATI 0x00000008
+typedef GLuint (APIENTRYP PFNGLGENFRAGMENTSHADERSATIPROC) (GLuint range);
+typedef void (APIENTRYP PFNGLBINDFRAGMENTSHADERATIPROC) (GLuint id);
+typedef void (APIENTRYP PFNGLDELETEFRAGMENTSHADERATIPROC) (GLuint id);
+typedef void (APIENTRYP PFNGLBEGINFRAGMENTSHADERATIPROC) (void);
+typedef void (APIENTRYP PFNGLENDFRAGMENTSHADERATIPROC) (void);
+typedef void (APIENTRYP PFNGLPASSTEXCOORDATIPROC) (GLuint dst, GLuint coord, GLenum swizzle);
+typedef void (APIENTRYP PFNGLSAMPLEMAPATIPROC) (GLuint dst, GLuint interp, GLenum swizzle);
+typedef void (APIENTRYP PFNGLCOLORFRAGMENTOP1ATIPROC) (GLenum op, GLuint dst, GLuint dstMask, GLuint dstMod, GLuint arg1, GLuint arg1Rep, GLuint arg1Mod);
+typedef void (APIENTRYP PFNGLCOLORFRAGMENTOP2ATIPROC) (GLenum op, GLuint dst, GLuint dstMask, GLuint dstMod, GLuint arg1, GLuint arg1Rep, GLuint arg1Mod, GLuint arg2, GLuint arg2Rep, GLuint arg2Mod);
+typedef void (APIENTRYP PFNGLCOLORFRAGMENTOP3ATIPROC) (GLenum op, GLuint dst, GLuint dstMask, GLuint dstMod, GLuint arg1, GLuint arg1Rep, GLuint arg1Mod, GLuint arg2, GLuint arg2Rep, GLuint arg2Mod, GLuint arg3, GLuint arg3Rep, GLuint arg3Mod);
+typedef void (APIENTRYP PFNGLALPHAFRAGMENTOP1ATIPROC) (GLenum op, GLuint dst, GLuint dstMod, GLuint arg1, GLuint arg1Rep, GLuint arg1Mod);
+typedef void (APIENTRYP PFNGLALPHAFRAGMENTOP2ATIPROC) (GLenum op, GLuint dst, GLuint dstMod, GLuint arg1, GLuint arg1Rep, GLuint arg1Mod, GLuint arg2, GLuint arg2Rep, GLuint arg2Mod);
+typedef void (APIENTRYP PFNGLALPHAFRAGMENTOP3ATIPROC) (GLenum op, GLuint dst, GLuint dstMod, GLuint arg1, GLuint arg1Rep, GLuint arg1Mod, GLuint arg2, GLuint arg2Rep, GLuint arg2Mod, GLuint arg3, GLuint arg3Rep, GLuint arg3Mod);
+typedef void (APIENTRYP PFNGLSETFRAGMENTSHADERCONSTANTATIPROC) (GLuint dst, const GLfloat *value);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI GLuint APIENTRY glGenFragmentShadersATI (GLuint range);
+GLAPI void APIENTRY glBindFragmentShaderATI (GLuint id);
+GLAPI void APIENTRY glDeleteFragmentShaderATI (GLuint id);
+GLAPI void APIENTRY glBeginFragmentShaderATI (void);
+GLAPI void APIENTRY glEndFragmentShaderATI (void);
+GLAPI void APIENTRY glPassTexCoordATI (GLuint dst, GLuint coord, GLenum swizzle);
+GLAPI void APIENTRY glSampleMapATI (GLuint dst, GLuint interp, GLenum swizzle);
+GLAPI void APIENTRY glColorFragmentOp1ATI (GLenum op, GLuint dst, GLuint dstMask, GLuint dstMod, GLuint arg1, GLuint arg1Rep, GLuint arg1Mod);
+GLAPI void APIENTRY glColorFragmentOp2ATI (GLenum op, GLuint dst, GLuint dstMask, GLuint dstMod, GLuint arg1, GLuint arg1Rep, GLuint arg1Mod, GLuint arg2, GLuint arg2Rep, GLuint arg2Mod);
+GLAPI void APIENTRY glColorFragmentOp3ATI (GLenum op, GLuint dst, GLuint dstMask, GLuint dstMod, GLuint arg1, GLuint arg1Rep, GLuint arg1Mod, GLuint arg2, GLuint arg2Rep, GLuint arg2Mod, GLuint arg3, GLuint arg3Rep, GLuint arg3Mod);
+GLAPI void APIENTRY glAlphaFragmentOp1ATI (GLenum op, GLuint dst, GLuint dstMod, GLuint arg1, GLuint arg1Rep, GLuint arg1Mod);
+GLAPI void APIENTRY glAlphaFragmentOp2ATI (GLenum op, GLuint dst, GLuint dstMod, GLuint arg1, GLuint arg1Rep, GLuint arg1Mod, GLuint arg2, GLuint arg2Rep, GLuint arg2Mod);
+GLAPI void APIENTRY glAlphaFragmentOp3ATI (GLenum op, GLuint dst, GLuint dstMod, GLuint arg1, GLuint arg1Rep, GLuint arg1Mod, GLuint arg2, GLuint arg2Rep, GLuint arg2Mod, GLuint arg3, GLuint arg3Rep, GLuint arg3Mod);
+GLAPI void APIENTRY glSetFragmentShaderConstantATI (GLuint dst, const GLfloat *value);
+#endif
+#endif /* GL_ATI_fragment_shader */
+
+#ifndef GL_ATI_map_object_buffer
+#define GL_ATI_map_object_buffer 1
+typedef void *(APIENTRYP PFNGLMAPOBJECTBUFFERATIPROC) (GLuint buffer);
+typedef void (APIENTRYP PFNGLUNMAPOBJECTBUFFERATIPROC) (GLuint buffer);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void *APIENTRY glMapObjectBufferATI (GLuint buffer);
+GLAPI void APIENTRY glUnmapObjectBufferATI (GLuint buffer);
+#endif
+#endif /* GL_ATI_map_object_buffer */
+
+#ifndef GL_ATI_meminfo
+#define GL_ATI_meminfo 1
+#define GL_VBO_FREE_MEMORY_ATI 0x87FB
+#define GL_TEXTURE_FREE_MEMORY_ATI 0x87FC
+#define GL_RENDERBUFFER_FREE_MEMORY_ATI 0x87FD
+#endif /* GL_ATI_meminfo */
+
+#ifndef GL_ATI_pixel_format_float
+#define GL_ATI_pixel_format_float 1
+#define GL_RGBA_FLOAT_MODE_ATI 0x8820
+#define GL_COLOR_CLEAR_UNCLAMPED_VALUE_ATI 0x8835
+#endif /* GL_ATI_pixel_format_float */
+
+#ifndef GL_ATI_pn_triangles
+#define GL_ATI_pn_triangles 1
+#define GL_PN_TRIANGLES_ATI 0x87F0
+#define GL_MAX_PN_TRIANGLES_TESSELATION_LEVEL_ATI 0x87F1
+#define GL_PN_TRIANGLES_POINT_MODE_ATI 0x87F2
+#define GL_PN_TRIANGLES_NORMAL_MODE_ATI 0x87F3
+#define GL_PN_TRIANGLES_TESSELATION_LEVEL_ATI 0x87F4
+#define GL_PN_TRIANGLES_POINT_MODE_LINEAR_ATI 0x87F5
+#define GL_PN_TRIANGLES_POINT_MODE_CUBIC_ATI 0x87F6
+#define GL_PN_TRIANGLES_NORMAL_MODE_LINEAR_ATI 0x87F7
+#define GL_PN_TRIANGLES_NORMAL_MODE_QUADRATIC_ATI 0x87F8
+typedef void (APIENTRYP PFNGLPNTRIANGLESIATIPROC) (GLenum pname, GLint param);
+typedef void (APIENTRYP PFNGLPNTRIANGLESFATIPROC) (GLenum pname, GLfloat param);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glPNTrianglesiATI (GLenum pname, GLint param);
+GLAPI void APIENTRY glPNTrianglesfATI (GLenum pname, GLfloat param);
+#endif
+#endif /* GL_ATI_pn_triangles */
+
+#ifndef GL_ATI_separate_stencil
+#define GL_ATI_separate_stencil 1
+#define GL_STENCIL_BACK_FUNC_ATI 0x8800
+#define GL_STENCIL_BACK_FAIL_ATI 0x8801
+#define GL_STENCIL_BACK_PASS_DEPTH_FAIL_ATI 0x8802
+#define GL_STENCIL_BACK_PASS_DEPTH_PASS_ATI 0x8803
+typedef void (APIENTRYP PFNGLSTENCILOPSEPARATEATIPROC) (GLenum face, GLenum sfail, GLenum dpfail, GLenum dppass);
+typedef void (APIENTRYP PFNGLSTENCILFUNCSEPARATEATIPROC) (GLenum frontfunc, GLenum backfunc, GLint ref, GLuint mask);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glStencilOpSeparateATI (GLenum face, GLenum sfail, GLenum dpfail, GLenum dppass);
+GLAPI void APIENTRY glStencilFuncSeparateATI (GLenum frontfunc, GLenum backfunc, GLint ref, GLuint mask);
+#endif
+#endif /* GL_ATI_separate_stencil */
+
+#ifndef GL_ATI_text_fragment_shader
+#define GL_ATI_text_fragment_shader 1
+#define GL_TEXT_FRAGMENT_SHADER_ATI 0x8200
+#endif /* GL_ATI_text_fragment_shader */
+
+#ifndef GL_ATI_texture_env_combine3
+#define GL_ATI_texture_env_combine3 1
+#define GL_MODULATE_ADD_ATI 0x8744
+#define GL_MODULATE_SIGNED_ADD_ATI 0x8745
+#define GL_MODULATE_SUBTRACT_ATI 0x8746
+#endif /* GL_ATI_texture_env_combine3 */
+
+#ifndef GL_ATI_texture_float
+#define GL_ATI_texture_float 1
+#define GL_RGBA_FLOAT32_ATI 0x8814
+#define GL_RGB_FLOAT32_ATI 0x8815
+#define GL_ALPHA_FLOAT32_ATI 0x8816
+#define GL_INTENSITY_FLOAT32_ATI 0x8817
+#define GL_LUMINANCE_FLOAT32_ATI 0x8818
+#define GL_LUMINANCE_ALPHA_FLOAT32_ATI 0x8819
+#define GL_RGBA_FLOAT16_ATI 0x881A
+#define GL_RGB_FLOAT16_ATI 0x881B
+#define GL_ALPHA_FLOAT16_ATI 0x881C
+#define GL_INTENSITY_FLOAT16_ATI 0x881D
+#define GL_LUMINANCE_FLOAT16_ATI 0x881E
+#define GL_LUMINANCE_ALPHA_FLOAT16_ATI 0x881F
+#endif /* GL_ATI_texture_float */
+
+#ifndef GL_ATI_texture_mirror_once
+#define GL_ATI_texture_mirror_once 1
+#define GL_MIRROR_CLAMP_ATI 0x8742
+#define GL_MIRROR_CLAMP_TO_EDGE_ATI 0x8743
+#endif /* GL_ATI_texture_mirror_once */
+
+#ifndef GL_ATI_vertex_array_object
+#define GL_ATI_vertex_array_object 1
+#define GL_STATIC_ATI 0x8760
+#define GL_DYNAMIC_ATI 0x8761
+#define GL_PRESERVE_ATI 0x8762
+#define GL_DISCARD_ATI 0x8763
+#define GL_OBJECT_BUFFER_SIZE_ATI 0x8764
+#define GL_OBJECT_BUFFER_USAGE_ATI 0x8765
+#define GL_ARRAY_OBJECT_BUFFER_ATI 0x8766
+#define GL_ARRAY_OBJECT_OFFSET_ATI 0x8767
+typedef GLuint (APIENTRYP PFNGLNEWOBJECTBUFFERATIPROC) (GLsizei size, const void *pointer, GLenum usage);
+typedef GLboolean (APIENTRYP PFNGLISOBJECTBUFFERATIPROC) (GLuint buffer);
+typedef void (APIENTRYP PFNGLUPDATEOBJECTBUFFERATIPROC) (GLuint buffer, GLuint offset, GLsizei size, const void *pointer, GLenum preserve);
+typedef void (APIENTRYP PFNGLGETOBJECTBUFFERFVATIPROC) (GLuint buffer, GLenum pname, GLfloat *params);
+typedef void (APIENTRYP PFNGLGETOBJECTBUFFERIVATIPROC) (GLuint buffer, GLenum pname, GLint *params);
+typedef void (APIENTRYP PFNGLFREEOBJECTBUFFERATIPROC) (GLuint buffer);
+typedef void (APIENTRYP PFNGLARRAYOBJECTATIPROC) (GLenum array, GLint size, GLenum type, GLsizei stride, GLuint buffer, GLuint offset);
+typedef void (APIENTRYP PFNGLGETARRAYOBJECTFVATIPROC) (GLenum array, GLenum pname, GLfloat *params);
+typedef void (APIENTRYP PFNGLGETARRAYOBJECTIVATIPROC) (GLenum array, GLenum pname, GLint *params);
+typedef void (APIENTRYP PFNGLVARIANTARRAYOBJECTATIPROC) (GLuint id, GLenum type, GLsizei stride, GLuint buffer, GLuint offset);
+typedef void (APIENTRYP PFNGLGETVARIANTARRAYOBJECTFVATIPROC) (GLuint id, GLenum pname, GLfloat *params);
+typedef void (APIENTRYP PFNGLGETVARIANTARRAYOBJECTIVATIPROC) (GLuint id, GLenum pname, GLint *params);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI GLuint APIENTRY glNewObjectBufferATI (GLsizei size, const void *pointer, GLenum usage);
+GLAPI GLboolean APIENTRY glIsObjectBufferATI (GLuint buffer);
+GLAPI void APIENTRY glUpdateObjectBufferATI (GLuint buffer, GLuint offset, GLsizei size, const void *pointer, GLenum preserve);
+GLAPI void APIENTRY glGetObjectBufferfvATI (GLuint buffer, GLenum pname, GLfloat *params);
+GLAPI void APIENTRY glGetObjectBufferivATI (GLuint buffer, GLenum pname, GLint *params);
+GLAPI void APIENTRY glFreeObjectBufferATI (GLuint buffer);
+GLAPI void APIENTRY glArrayObjectATI (GLenum array, GLint size, GLenum type, GLsizei stride, GLuint buffer, GLuint offset);
+GLAPI void APIENTRY glGetArrayObjectfvATI (GLenum array, GLenum pname, GLfloat *params);
+GLAPI void APIENTRY glGetArrayObjectivATI (GLenum array, GLenum pname, GLint *params);
+GLAPI void APIENTRY glVariantArrayObjectATI (GLuint id, GLenum type, GLsizei stride, GLuint buffer, GLuint offset);
+GLAPI void APIENTRY glGetVariantArrayObjectfvATI (GLuint id, GLenum pname, GLfloat *params);
+GLAPI void APIENTRY glGetVariantArrayObjectivATI (GLuint id, GLenum pname, GLint *params);
+#endif
+#endif /* GL_ATI_vertex_array_object */
+
+#ifndef GL_ATI_vertex_attrib_array_object
+#define GL_ATI_vertex_attrib_array_object 1
+typedef void (APIENTRYP PFNGLVERTEXATTRIBARRAYOBJECTATIPROC) (GLuint index, GLint size, GLenum type, GLboolean normalized, GLsizei stride, GLuint buffer, GLuint offset);
+typedef void (APIENTRYP PFNGLGETVERTEXATTRIBARRAYOBJECTFVATIPROC) (GLuint index, GLenum pname, GLfloat *params);
+typedef void (APIENTRYP PFNGLGETVERTEXATTRIBARRAYOBJECTIVATIPROC) (GLuint index, GLenum pname, GLint *params);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glVertexAttribArrayObjectATI (GLuint index, GLint size, GLenum type, GLboolean normalized, GLsizei stride, GLuint buffer, GLuint offset);
+GLAPI void APIENTRY glGetVertexAttribArrayObjectfvATI (GLuint index, GLenum pname, GLfloat *params);
+GLAPI void APIENTRY glGetVertexAttribArrayObjectivATI (GLuint index, GLenum pname, GLint *params);
+#endif
+#endif /* GL_ATI_vertex_attrib_array_object */
+
+#ifndef GL_ATI_vertex_streams
+#define GL_ATI_vertex_streams 1
+#define GL_MAX_VERTEX_STREAMS_ATI 0x876B
+#define GL_VERTEX_STREAM0_ATI 0x876C
+#define GL_VERTEX_STREAM1_ATI 0x876D
+#define GL_VERTEX_STREAM2_ATI 0x876E
+#define GL_VERTEX_STREAM3_ATI 0x876F
+#define GL_VERTEX_STREAM4_ATI 0x8770
+#define GL_VERTEX_STREAM5_ATI 0x8771
+#define GL_VERTEX_STREAM6_ATI 0x8772
+#define GL_VERTEX_STREAM7_ATI 0x8773
+#define GL_VERTEX_SOURCE_ATI 0x8774
+typedef void (APIENTRYP PFNGLVERTEXSTREAM1SATIPROC) (GLenum stream, GLshort x);
+typedef void (APIENTRYP PFNGLVERTEXSTREAM1SVATIPROC) (GLenum stream, const GLshort *coords);
+typedef void (APIENTRYP PFNGLVERTEXSTREAM1IATIPROC) (GLenum stream, GLint x);
+typedef void (APIENTRYP PFNGLVERTEXSTREAM1IVATIPROC) (GLenum stream, const GLint *coords);
+typedef void (APIENTRYP PFNGLVERTEXSTREAM1FATIPROC) (GLenum stream, GLfloat x);
+typedef void (APIENTRYP PFNGLVERTEXSTREAM1FVATIPROC) (GLenum stream, const GLfloat *coords);
+typedef void (APIENTRYP PFNGLVERTEXSTREAM1DATIPROC) (GLenum stream, GLdouble x);
+typedef void (APIENTRYP PFNGLVERTEXSTREAM1DVATIPROC) (GLenum stream, const GLdouble *coords);
+typedef void (APIENTRYP PFNGLVERTEXSTREAM2SATIPROC) (GLenum stream, GLshort x, GLshort y);
+typedef void (APIENTRYP PFNGLVERTEXSTREAM2SVATIPROC) (GLenum stream, const GLshort *coords);
+typedef void (APIENTRYP PFNGLVERTEXSTREAM2IATIPROC) (GLenum stream, GLint x, GLint y);
+typedef void (APIENTRYP PFNGLVERTEXSTREAM2IVATIPROC) (GLenum stream, const GLint *coords);
+typedef void (APIENTRYP PFNGLVERTEXSTREAM2FATIPROC) (GLenum stream, GLfloat x, GLfloat y);
+typedef void (APIENTRYP PFNGLVERTEXSTREAM2FVATIPROC) (GLenum stream, const GLfloat *coords);
+typedef void (APIENTRYP PFNGLVERTEXSTREAM2DATIPROC) (GLenum stream, GLdouble x, GLdouble y);
+typedef void (APIENTRYP PFNGLVERTEXSTREAM2DVATIPROC) (GLenum stream, const GLdouble *coords);
+typedef void (APIENTRYP PFNGLVERTEXSTREAM3SATIPROC) (GLenum stream, GLshort x, GLshort y, GLshort z);
+typedef void (APIENTRYP PFNGLVERTEXSTREAM3SVATIPROC) (GLenum stream, const GLshort *coords);
+typedef void (APIENTRYP PFNGLVERTEXSTREAM3IATIPROC) (GLenum stream, GLint x, GLint y, GLint z);
+typedef void (APIENTRYP PFNGLVERTEXSTREAM3IVATIPROC) (GLenum stream, const GLint *coords);
+typedef void (APIENTRYP PFNGLVERTEXSTREAM3FATIPROC) (GLenum stream, GLfloat x, GLfloat y, GLfloat z);
+typedef void (APIENTRYP PFNGLVERTEXSTREAM3FVATIPROC) (GLenum stream, const GLfloat *coords);
+typedef void (APIENTRYP PFNGLVERTEXSTREAM3DATIPROC) (GLenum stream, GLdouble x, GLdouble y, GLdouble z);
+typedef void (APIENTRYP PFNGLVERTEXSTREAM3DVATIPROC) (GLenum stream, const GLdouble *coords);
+typedef void (APIENTRYP PFNGLVERTEXSTREAM4SATIPROC) (GLenum stream, GLshort x, GLshort y, GLshort z, GLshort w);
+typedef void (APIENTRYP PFNGLVERTEXSTREAM4SVATIPROC) (GLenum stream, const GLshort *coords);
+typedef void (APIENTRYP PFNGLVERTEXSTREAM4IATIPROC) (GLenum stream, GLint x, GLint y, GLint z, GLint w);
+typedef void (APIENTRYP PFNGLVERTEXSTREAM4IVATIPROC) (GLenum stream, const GLint *coords);
+typedef void (APIENTRYP PFNGLVERTEXSTREAM4FATIPROC) (GLenum stream, GLfloat x, GLfloat y, GLfloat z, GLfloat w);
+typedef void (APIENTRYP PFNGLVERTEXSTREAM4FVATIPROC) (GLenum stream, const GLfloat *coords);
+typedef void (APIENTRYP PFNGLVERTEXSTREAM4DATIPROC) (GLenum stream, GLdouble x, GLdouble y, GLdouble z, GLdouble w);
+typedef void (APIENTRYP PFNGLVERTEXSTREAM4DVATIPROC) (GLenum stream, const GLdouble *coords);
+typedef void (APIENTRYP PFNGLNORMALSTREAM3BATIPROC) (GLenum stream, GLbyte nx, GLbyte ny, GLbyte nz);
+typedef void (APIENTRYP PFNGLNORMALSTREAM3BVATIPROC) (GLenum stream, const GLbyte *coords);
+typedef void (APIENTRYP PFNGLNORMALSTREAM3SATIPROC) (GLenum stream, GLshort nx, GLshort ny, GLshort nz);
+typedef void (APIENTRYP PFNGLNORMALSTREAM3SVATIPROC) (GLenum stream, const GLshort *coords);
+typedef void (APIENTRYP PFNGLNORMALSTREAM3IATIPROC) (GLenum stream, GLint nx, GLint ny, GLint nz);
+typedef void (APIENTRYP PFNGLNORMALSTREAM3IVATIPROC) (GLenum stream, const GLint *coords);
+typedef void (APIENTRYP PFNGLNORMALSTREAM3FATIPROC) (GLenum stream, GLfloat nx, GLfloat ny, GLfloat nz);
+typedef void (APIENTRYP PFNGLNORMALSTREAM3FVATIPROC) (GLenum stream, const GLfloat *coords);
+typedef void (APIENTRYP PFNGLNORMALSTREAM3DATIPROC) (GLenum stream, GLdouble nx, GLdouble ny, GLdouble nz);
+typedef void (APIENTRYP PFNGLNORMALSTREAM3DVATIPROC) (GLenum stream, const GLdouble *coords);
+typedef void (APIENTRYP PFNGLCLIENTACTIVEVERTEXSTREAMATIPROC) (GLenum stream);
+typedef void (APIENTRYP PFNGLVERTEXBLENDENVIATIPROC) (GLenum pname, GLint param);
+typedef void (APIENTRYP PFNGLVERTEXBLENDENVFATIPROC) (GLenum pname, GLfloat param);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glVertexStream1sATI (GLenum stream, GLshort x);
+GLAPI void APIENTRY glVertexStream1svATI (GLenum stream, const GLshort *coords);
+GLAPI void APIENTRY glVertexStream1iATI (GLenum stream, GLint x);
+GLAPI void APIENTRY glVertexStream1ivATI (GLenum stream, const GLint *coords);
+GLAPI void APIENTRY glVertexStream1fATI (GLenum stream, GLfloat x);
+GLAPI void APIENTRY glVertexStream1fvATI (GLenum stream, const GLfloat *coords);
+GLAPI void APIENTRY glVertexStream1dATI (GLenum stream, GLdouble x);
+GLAPI void APIENTRY glVertexStream1dvATI (GLenum stream, const GLdouble *coords);
+GLAPI void APIENTRY glVertexStream2sATI (GLenum stream, GLshort x, GLshort y);
+GLAPI void APIENTRY glVertexStream2svATI (GLenum stream, const GLshort *coords);
+GLAPI void APIENTRY glVertexStream2iATI (GLenum stream, GLint x, GLint y);
+GLAPI void APIENTRY glVertexStream2ivATI (GLenum stream, const GLint *coords);
+GLAPI void APIENTRY glVertexStream2fATI (GLenum stream, GLfloat x, GLfloat y);
+GLAPI void APIENTRY glVertexStream2fvATI (GLenum stream, const GLfloat *coords);
+GLAPI void APIENTRY glVertexStream2dATI (GLenum stream, GLdouble x, GLdouble y);
+GLAPI void APIENTRY glVertexStream2dvATI (GLenum stream, const GLdouble *coords);
+GLAPI void APIENTRY glVertexStream3sATI (GLenum stream, GLshort x, GLshort y, GLshort z);
+GLAPI void APIENTRY glVertexStream3svATI (GLenum stream, const GLshort *coords);
+GLAPI void APIENTRY glVertexStream3iATI (GLenum stream, GLint x, GLint y, GLint z);
+GLAPI void APIENTRY glVertexStream3ivATI (GLenum stream, const GLint *coords);
+GLAPI void APIENTRY glVertexStream3fATI (GLenum stream, GLfloat x, GLfloat y, GLfloat z);
+GLAPI void APIENTRY glVertexStream3fvATI (GLenum stream, const GLfloat *coords);
+GLAPI void APIENTRY glVertexStream3dATI (GLenum stream, GLdouble x, GLdouble y, GLdouble z);
+GLAPI void APIENTRY glVertexStream3dvATI (GLenum stream, const GLdouble *coords);
+GLAPI void APIENTRY glVertexStream4sATI (GLenum stream, GLshort x, GLshort y, GLshort z, GLshort w);
+GLAPI void APIENTRY glVertexStream4svATI (GLenum stream, const GLshort *coords);
+GLAPI void APIENTRY glVertexStream4iATI (GLenum stream, GLint x, GLint y, GLint z, GLint w);
+GLAPI void APIENTRY glVertexStream4ivATI (GLenum stream, const GLint *coords);
+GLAPI void APIENTRY glVertexStream4fATI (GLenum stream, GLfloat x, GLfloat y, GLfloat z, GLfloat w);
+GLAPI void APIENTRY glVertexStream4fvATI (GLenum stream, const GLfloat *coords);
+GLAPI void APIENTRY glVertexStream4dATI (GLenum stream, GLdouble x, GLdouble y, GLdouble z, GLdouble w);
+GLAPI void APIENTRY glVertexStream4dvATI (GLenum stream, const GLdouble *coords);
+GLAPI void APIENTRY glNormalStream3bATI (GLenum stream, GLbyte nx, GLbyte ny, GLbyte nz);
+GLAPI void APIENTRY glNormalStream3bvATI (GLenum stream, const GLbyte *coords);
+GLAPI void APIENTRY glNormalStream3sATI (GLenum stream, GLshort nx, GLshort ny, GLshort nz);
+GLAPI void APIENTRY glNormalStream3svATI (GLenum stream, const GLshort *coords);
+GLAPI void APIENTRY glNormalStream3iATI (GLenum stream, GLint nx, GLint ny, GLint nz);
+GLAPI void APIENTRY glNormalStream3ivATI (GLenum stream, const GLint *coords);
+GLAPI void APIENTRY glNormalStream3fATI (GLenum stream, GLfloat nx, GLfloat ny, GLfloat nz);
+GLAPI void APIENTRY glNormalStream3fvATI (GLenum stream, const GLfloat *coords);
+GLAPI void APIENTRY glNormalStream3dATI (GLenum stream, GLdouble nx, GLdouble ny, GLdouble nz);
+GLAPI void APIENTRY glNormalStream3dvATI (GLenum stream, const GLdouble *coords);
+GLAPI void APIENTRY glClientActiveVertexStreamATI (GLenum stream);
+GLAPI void APIENTRY glVertexBlendEnviATI (GLenum pname, GLint param);
+GLAPI void APIENTRY glVertexBlendEnvfATI (GLenum pname, GLfloat param);
+#endif
+#endif /* GL_ATI_vertex_streams */
+
+#ifndef GL_EXT_422_pixels
+#define GL_EXT_422_pixels 1
+#define GL_422_EXT 0x80CC
+#define GL_422_REV_EXT 0x80CD
+#define GL_422_AVERAGE_EXT 0x80CE
+#define GL_422_REV_AVERAGE_EXT 0x80CF
+#endif /* GL_EXT_422_pixels */
+
+#ifndef GL_EXT_EGL_image_storage
+#define GL_EXT_EGL_image_storage 1
+typedef void *GLeglImageOES;
+typedef void (APIENTRYP PFNGLEGLIMAGETARGETTEXSTORAGEEXTPROC) (GLenum target, GLeglImageOES image, const GLint* attrib_list);
+typedef void (APIENTRYP PFNGLEGLIMAGETARGETTEXTURESTORAGEEXTPROC) (GLuint texture, GLeglImageOES image, const GLint* attrib_list);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glEGLImageTargetTexStorageEXT (GLenum target, GLeglImageOES image, const GLint* attrib_list);
+GLAPI void APIENTRY glEGLImageTargetTextureStorageEXT (GLuint texture, GLeglImageOES image, const GLint* attrib_list);
+#endif
+#endif /* GL_EXT_EGL_image_storage */
+
+#ifndef GL_EXT_EGL_sync
+#define GL_EXT_EGL_sync 1
+#endif /* GL_EXT_EGL_sync */
+
+#ifndef GL_EXT_abgr
+#define GL_EXT_abgr 1
+#define GL_ABGR_EXT 0x8000
+#endif /* GL_EXT_abgr */
+
+#ifndef GL_EXT_bgra
+#define GL_EXT_bgra 1
+#define GL_BGR_EXT 0x80E0
+#define GL_BGRA_EXT 0x80E1
+#endif /* GL_EXT_bgra */
+
+#ifndef GL_EXT_bindable_uniform
+#define GL_EXT_bindable_uniform 1
+#define GL_MAX_VERTEX_BINDABLE_UNIFORMS_EXT 0x8DE2
+#define GL_MAX_FRAGMENT_BINDABLE_UNIFORMS_EXT 0x8DE3
+#define GL_MAX_GEOMETRY_BINDABLE_UNIFORMS_EXT 0x8DE4
+#define GL_MAX_BINDABLE_UNIFORM_SIZE_EXT 0x8DED
+#define GL_UNIFORM_BUFFER_EXT 0x8DEE
+#define GL_UNIFORM_BUFFER_BINDING_EXT 0x8DEF
+typedef void (APIENTRYP PFNGLUNIFORMBUFFEREXTPROC) (GLuint program, GLint location, GLuint buffer);
+typedef GLint (APIENTRYP PFNGLGETUNIFORMBUFFERSIZEEXTPROC) (GLuint program, GLint location);
+typedef GLintptr (APIENTRYP PFNGLGETUNIFORMOFFSETEXTPROC) (GLuint program, GLint location);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glUniformBufferEXT (GLuint program, GLint location, GLuint buffer);
+GLAPI GLint APIENTRY glGetUniformBufferSizeEXT (GLuint program, GLint location);
+GLAPI GLintptr APIENTRY glGetUniformOffsetEXT (GLuint program, GLint location);
+#endif
+#endif /* GL_EXT_bindable_uniform */
+
+#ifndef GL_EXT_blend_color
+#define GL_EXT_blend_color 1
+#define GL_CONSTANT_COLOR_EXT 0x8001
+#define GL_ONE_MINUS_CONSTANT_COLOR_EXT 0x8002
+#define GL_CONSTANT_ALPHA_EXT 0x8003
+#define GL_ONE_MINUS_CONSTANT_ALPHA_EXT 0x8004
+#define GL_BLEND_COLOR_EXT 0x8005
+typedef void (APIENTRYP PFNGLBLENDCOLOREXTPROC) (GLfloat red, GLfloat green, GLfloat blue, GLfloat alpha);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glBlendColorEXT (GLfloat red, GLfloat green, GLfloat blue, GLfloat alpha);
+#endif
+#endif /* GL_EXT_blend_color */
+
+#ifndef GL_EXT_blend_equation_separate
+#define GL_EXT_blend_equation_separate 1
+#define GL_BLEND_EQUATION_RGB_EXT 0x8009
+#define GL_BLEND_EQUATION_ALPHA_EXT 0x883D
+typedef void (APIENTRYP PFNGLBLENDEQUATIONSEPARATEEXTPROC) (GLenum modeRGB, GLenum modeAlpha);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glBlendEquationSeparateEXT (GLenum modeRGB, GLenum modeAlpha);
+#endif
+#endif /* GL_EXT_blend_equation_separate */
+
+#ifndef GL_EXT_blend_func_separate
+#define GL_EXT_blend_func_separate 1
+#define GL_BLEND_DST_RGB_EXT 0x80C8
+#define GL_BLEND_SRC_RGB_EXT 0x80C9
+#define GL_BLEND_DST_ALPHA_EXT 0x80CA
+#define GL_BLEND_SRC_ALPHA_EXT 0x80CB
+typedef void (APIENTRYP PFNGLBLENDFUNCSEPARATEEXTPROC) (GLenum sfactorRGB, GLenum dfactorRGB, GLenum sfactorAlpha, GLenum dfactorAlpha);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glBlendFuncSeparateEXT (GLenum sfactorRGB, GLenum dfactorRGB, GLenum sfactorAlpha, GLenum dfactorAlpha);
+#endif
+#endif /* GL_EXT_blend_func_separate */
+
+#ifndef GL_EXT_blend_logic_op
+#define GL_EXT_blend_logic_op 1
+#endif /* GL_EXT_blend_logic_op */
+
+#ifndef GL_EXT_blend_minmax
+#define GL_EXT_blend_minmax 1
+#define GL_MIN_EXT 0x8007
+#define GL_MAX_EXT 0x8008
+#define GL_FUNC_ADD_EXT 0x8006
+#define GL_BLEND_EQUATION_EXT 0x8009
+typedef void (APIENTRYP PFNGLBLENDEQUATIONEXTPROC) (GLenum mode);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glBlendEquationEXT (GLenum mode);
+#endif
+#endif /* GL_EXT_blend_minmax */
+
+#ifndef GL_EXT_blend_subtract
+#define GL_EXT_blend_subtract 1
+#define GL_FUNC_SUBTRACT_EXT 0x800A
+#define GL_FUNC_REVERSE_SUBTRACT_EXT 0x800B
+#endif /* GL_EXT_blend_subtract */
+
+#ifndef GL_EXT_clip_volume_hint
+#define GL_EXT_clip_volume_hint 1
+#define GL_CLIP_VOLUME_CLIPPING_HINT_EXT 0x80F0
+#endif /* GL_EXT_clip_volume_hint */
+
+#ifndef GL_EXT_cmyka
+#define GL_EXT_cmyka 1
+#define GL_CMYK_EXT 0x800C
+#define GL_CMYKA_EXT 0x800D
+#define GL_PACK_CMYK_HINT_EXT 0x800E
+#define GL_UNPACK_CMYK_HINT_EXT 0x800F
+#endif /* GL_EXT_cmyka */
+
+#ifndef GL_EXT_color_subtable
+#define GL_EXT_color_subtable 1
+typedef void (APIENTRYP PFNGLCOLORSUBTABLEEXTPROC) (GLenum target, GLsizei start, GLsizei count, GLenum format, GLenum type, const void *data);
+typedef void (APIENTRYP PFNGLCOPYCOLORSUBTABLEEXTPROC) (GLenum target, GLsizei start, GLint x, GLint y, GLsizei width);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glColorSubTableEXT (GLenum target, GLsizei start, GLsizei count, GLenum format, GLenum type, const void *data);
+GLAPI void APIENTRY glCopyColorSubTableEXT (GLenum target, GLsizei start, GLint x, GLint y, GLsizei width);
+#endif
+#endif /* GL_EXT_color_subtable */
+
+#ifndef GL_EXT_compiled_vertex_array
+#define GL_EXT_compiled_vertex_array 1
+#define GL_ARRAY_ELEMENT_LOCK_FIRST_EXT 0x81A8
+#define GL_ARRAY_ELEMENT_LOCK_COUNT_EXT 0x81A9
+typedef void (APIENTRYP PFNGLLOCKARRAYSEXTPROC) (GLint first, GLsizei count);
+typedef void (APIENTRYP PFNGLUNLOCKARRAYSEXTPROC) (void);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glLockArraysEXT (GLint first, GLsizei count);
+GLAPI void APIENTRY glUnlockArraysEXT (void);
+#endif
+#endif /* GL_EXT_compiled_vertex_array */
+
+#ifndef GL_EXT_convolution
+#define GL_EXT_convolution 1
+#define GL_CONVOLUTION_1D_EXT 0x8010
+#define GL_CONVOLUTION_2D_EXT 0x8011
+#define GL_SEPARABLE_2D_EXT 0x8012
+#define GL_CONVOLUTION_BORDER_MODE_EXT 0x8013
+#define GL_CONVOLUTION_FILTER_SCALE_EXT 0x8014
+#define GL_CONVOLUTION_FILTER_BIAS_EXT 0x8015
+#define GL_REDUCE_EXT 0x8016
+#define GL_CONVOLUTION_FORMAT_EXT 0x8017
+#define GL_CONVOLUTION_WIDTH_EXT 0x8018
+#define GL_CONVOLUTION_HEIGHT_EXT 0x8019
+#define GL_MAX_CONVOLUTION_WIDTH_EXT 0x801A
+#define GL_MAX_CONVOLUTION_HEIGHT_EXT 0x801B
+#define GL_POST_CONVOLUTION_RED_SCALE_EXT 0x801C
+#define GL_POST_CONVOLUTION_GREEN_SCALE_EXT 0x801D
+#define GL_POST_CONVOLUTION_BLUE_SCALE_EXT 0x801E
+#define GL_POST_CONVOLUTION_ALPHA_SCALE_EXT 0x801F
+#define GL_POST_CONVOLUTION_RED_BIAS_EXT 0x8020
+#define GL_POST_CONVOLUTION_GREEN_BIAS_EXT 0x8021
+#define GL_POST_CONVOLUTION_BLUE_BIAS_EXT 0x8022
+#define GL_POST_CONVOLUTION_ALPHA_BIAS_EXT 0x8023
+typedef void (APIENTRYP PFNGLCONVOLUTIONFILTER1DEXTPROC) (GLenum target, GLenum internalformat, GLsizei width, GLenum format, GLenum type, const void *image);
+typedef void (APIENTRYP PFNGLCONVOLUTIONFILTER2DEXTPROC) (GLenum target, GLenum internalformat, GLsizei width, GLsizei height, GLenum format, GLenum type, const void *image);
+typedef void (APIENTRYP PFNGLCONVOLUTIONPARAMETERFEXTPROC) (GLenum target, GLenum pname, GLfloat params);
+typedef void (APIENTRYP PFNGLCONVOLUTIONPARAMETERFVEXTPROC) (GLenum target, GLenum pname, const GLfloat *params);
+typedef void (APIENTRYP PFNGLCONVOLUTIONPARAMETERIEXTPROC) (GLenum target, GLenum pname, GLint params);
+typedef void (APIENTRYP PFNGLCONVOLUTIONPARAMETERIVEXTPROC) (GLenum target, GLenum pname, const GLint *params);
+typedef void (APIENTRYP PFNGLCOPYCONVOLUTIONFILTER1DEXTPROC) (GLenum target, GLenum internalformat, GLint x, GLint y, GLsizei width);
+typedef void (APIENTRYP PFNGLCOPYCONVOLUTIONFILTER2DEXTPROC) (GLenum target, GLenum internalformat, GLint x, GLint y, GLsizei width, GLsizei height);
+typedef void (APIENTRYP PFNGLGETCONVOLUTIONFILTEREXTPROC) (GLenum target, GLenum format, GLenum type, void *image);
+typedef void (APIENTRYP PFNGLGETCONVOLUTIONPARAMETERFVEXTPROC) (GLenum target, GLenum pname, GLfloat *params);
+typedef void (APIENTRYP PFNGLGETCONVOLUTIONPARAMETERIVEXTPROC) (GLenum target, GLenum pname, GLint *params);
+typedef void (APIENTRYP PFNGLGETSEPARABLEFILTEREXTPROC) (GLenum target, GLenum format, GLenum type, void *row, void *column, void *span);
+typedef void (APIENTRYP PFNGLSEPARABLEFILTER2DEXTPROC) (GLenum target, GLenum internalformat, GLsizei width, GLsizei height, GLenum format, GLenum type, const void *row, const void *column);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glConvolutionFilter1DEXT (GLenum target, GLenum internalformat, GLsizei width, GLenum format, GLenum type, const void *image);
+GLAPI void APIENTRY glConvolutionFilter2DEXT (GLenum target, GLenum internalformat, GLsizei width, GLsizei height, GLenum format, GLenum type, const void *image);
+GLAPI void APIENTRY glConvolutionParameterfEXT (GLenum target, GLenum pname, GLfloat params);
+GLAPI void APIENTRY glConvolutionParameterfvEXT (GLenum target, GLenum pname, const GLfloat *params);
+GLAPI void APIENTRY glConvolutionParameteriEXT (GLenum target, GLenum pname, GLint params);
+GLAPI void APIENTRY glConvolutionParameterivEXT (GLenum target, GLenum pname, const GLint *params);
+GLAPI void APIENTRY glCopyConvolutionFilter1DEXT (GLenum target, GLenum internalformat, GLint x, GLint y, GLsizei width);
+GLAPI void APIENTRY glCopyConvolutionFilter2DEXT (GLenum target, GLenum internalformat, GLint x, GLint y, GLsizei width, GLsizei height);
+GLAPI void APIENTRY glGetConvolutionFilterEXT (GLenum target, GLenum format, GLenum type, void *image);
+GLAPI void APIENTRY glGetConvolutionParameterfvEXT (GLenum target, GLenum pname, GLfloat *params);
+GLAPI void APIENTRY glGetConvolutionParameterivEXT (GLenum target, GLenum pname, GLint *params);
+GLAPI void APIENTRY glGetSeparableFilterEXT (GLenum target, GLenum format, GLenum type, void *row, void *column, void *span);
+GLAPI void APIENTRY glSeparableFilter2DEXT (GLenum target, GLenum internalformat, GLsizei width, GLsizei height, GLenum format, GLenum type, const void *row, const void *column);
+#endif
+#endif /* GL_EXT_convolution */
+
+#ifndef GL_EXT_coordinate_frame
+#define GL_EXT_coordinate_frame 1
+#define GL_TANGENT_ARRAY_EXT 0x8439
+#define GL_BINORMAL_ARRAY_EXT 0x843A
+#define GL_CURRENT_TANGENT_EXT 0x843B
+#define GL_CURRENT_BINORMAL_EXT 0x843C
+#define GL_TANGENT_ARRAY_TYPE_EXT 0x843E
+#define GL_TANGENT_ARRAY_STRIDE_EXT 0x843F
+#define GL_BINORMAL_ARRAY_TYPE_EXT 0x8440
+#define GL_BINORMAL_ARRAY_STRIDE_EXT 0x8441
+#define GL_TANGENT_ARRAY_POINTER_EXT 0x8442
+#define GL_BINORMAL_ARRAY_POINTER_EXT 0x8443
+#define GL_MAP1_TANGENT_EXT 0x8444
+#define GL_MAP2_TANGENT_EXT 0x8445
+#define GL_MAP1_BINORMAL_EXT 0x8446
+#define GL_MAP2_BINORMAL_EXT 0x8447
+typedef void (APIENTRYP PFNGLTANGENT3BEXTPROC) (GLbyte tx, GLbyte ty, GLbyte tz);
+typedef void (APIENTRYP PFNGLTANGENT3BVEXTPROC) (const GLbyte *v);
+typedef void (APIENTRYP PFNGLTANGENT3DEXTPROC) (GLdouble tx, GLdouble ty, GLdouble tz);
+typedef void (APIENTRYP PFNGLTANGENT3DVEXTPROC) (const GLdouble *v);
+typedef void (APIENTRYP PFNGLTANGENT3FEXTPROC) (GLfloat tx, GLfloat ty, GLfloat tz);
+typedef void (APIENTRYP PFNGLTANGENT3FVEXTPROC) (const GLfloat *v);
+typedef void (APIENTRYP PFNGLTANGENT3IEXTPROC) (GLint tx, GLint ty, GLint tz);
+typedef void (APIENTRYP PFNGLTANGENT3IVEXTPROC) (const GLint *v);
+typedef void (APIENTRYP PFNGLTANGENT3SEXTPROC) (GLshort tx, GLshort ty, GLshort tz);
+typedef void (APIENTRYP PFNGLTANGENT3SVEXTPROC) (const GLshort *v);
+typedef void (APIENTRYP PFNGLBINORMAL3BEXTPROC) (GLbyte bx, GLbyte by, GLbyte bz);
+typedef void (APIENTRYP PFNGLBINORMAL3BVEXTPROC) (const GLbyte *v);
+typedef void (APIENTRYP PFNGLBINORMAL3DEXTPROC) (GLdouble bx, GLdouble by, GLdouble bz);
+typedef void (APIENTRYP PFNGLBINORMAL3DVEXTPROC) (const GLdouble *v);
+typedef void (APIENTRYP PFNGLBINORMAL3FEXTPROC) (GLfloat bx, GLfloat by, GLfloat bz);
+typedef void (APIENTRYP PFNGLBINORMAL3FVEXTPROC) (const GLfloat *v);
+typedef void (APIENTRYP PFNGLBINORMAL3IEXTPROC) (GLint bx, GLint by, GLint bz);
+typedef void (APIENTRYP PFNGLBINORMAL3IVEXTPROC) (const GLint *v);
+typedef void (APIENTRYP PFNGLBINORMAL3SEXTPROC) (GLshort bx, GLshort by, GLshort bz);
+typedef void (APIENTRYP PFNGLBINORMAL3SVEXTPROC) (const GLshort *v);
+typedef void (APIENTRYP PFNGLTANGENTPOINTEREXTPROC) (GLenum type, GLsizei stride, const void *pointer);
+typedef void (APIENTRYP PFNGLBINORMALPOINTEREXTPROC) (GLenum type, GLsizei stride, const void *pointer);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glTangent3bEXT (GLbyte tx, GLbyte ty, GLbyte tz);
+GLAPI void APIENTRY glTangent3bvEXT (const GLbyte *v);
+GLAPI void APIENTRY glTangent3dEXT (GLdouble tx, GLdouble ty, GLdouble tz);
+GLAPI void APIENTRY glTangent3dvEXT (const GLdouble *v);
+GLAPI void APIENTRY glTangent3fEXT (GLfloat tx, GLfloat ty, GLfloat tz);
+GLAPI void APIENTRY glTangent3fvEXT (const GLfloat *v);
+GLAPI void APIENTRY glTangent3iEXT (GLint tx, GLint ty, GLint tz);
+GLAPI void APIENTRY glTangent3ivEXT (const GLint *v);
+GLAPI void APIENTRY glTangent3sEXT (GLshort tx, GLshort ty, GLshort tz);
+GLAPI void APIENTRY glTangent3svEXT (const GLshort *v);
+GLAPI void APIENTRY glBinormal3bEXT (GLbyte bx, GLbyte by, GLbyte bz);
+GLAPI void APIENTRY glBinormal3bvEXT (const GLbyte *v);
+GLAPI void APIENTRY glBinormal3dEXT (GLdouble bx, GLdouble by, GLdouble bz);
+GLAPI void APIENTRY glBinormal3dvEXT (const GLdouble *v);
+GLAPI void APIENTRY glBinormal3fEXT (GLfloat bx, GLfloat by, GLfloat bz);
+GLAPI void APIENTRY glBinormal3fvEXT (const GLfloat *v);
+GLAPI void APIENTRY glBinormal3iEXT (GLint bx, GLint by, GLint bz);
+GLAPI void APIENTRY glBinormal3ivEXT (const GLint *v);
+GLAPI void APIENTRY glBinormal3sEXT (GLshort bx, GLshort by, GLshort bz);
+GLAPI void APIENTRY glBinormal3svEXT (const GLshort *v);
+GLAPI void APIENTRY glTangentPointerEXT (GLenum type, GLsizei stride, const void *pointer);
+GLAPI void APIENTRY glBinormalPointerEXT (GLenum type, GLsizei stride, const void *pointer);
+#endif
+#endif /* GL_EXT_coordinate_frame */
+
+#ifndef GL_EXT_copy_texture
+#define GL_EXT_copy_texture 1
+typedef void (APIENTRYP PFNGLCOPYTEXIMAGE1DEXTPROC) (GLenum target, GLint level, GLenum internalformat, GLint x, GLint y, GLsizei width, GLint border);
+typedef void (APIENTRYP PFNGLCOPYTEXIMAGE2DEXTPROC) (GLenum target, GLint level, GLenum internalformat, GLint x, GLint y, GLsizei width, GLsizei height, GLint border);
+typedef void (APIENTRYP PFNGLCOPYTEXSUBIMAGE1DEXTPROC) (GLenum target, GLint level, GLint xoffset, GLint x, GLint y, GLsizei width);
+typedef void (APIENTRYP PFNGLCOPYTEXSUBIMAGE2DEXTPROC) (GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint x, GLint y, GLsizei width, GLsizei height);
+typedef void (APIENTRYP PFNGLCOPYTEXSUBIMAGE3DEXTPROC) (GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint zoffset, GLint x, GLint y, GLsizei width, GLsizei height);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glCopyTexImage1DEXT (GLenum target, GLint level, GLenum internalformat, GLint x, GLint y, GLsizei width, GLint border);
+GLAPI void APIENTRY glCopyTexImage2DEXT (GLenum target, GLint level, GLenum internalformat, GLint x, GLint y, GLsizei width, GLsizei height, GLint border);
+GLAPI void APIENTRY glCopyTexSubImage1DEXT (GLenum target, GLint level, GLint xoffset, GLint x, GLint y, GLsizei width);
+GLAPI void APIENTRY glCopyTexSubImage2DEXT (GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint x, GLint y, GLsizei width, GLsizei height);
+GLAPI void APIENTRY glCopyTexSubImage3DEXT (GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint zoffset, GLint x, GLint y, GLsizei width, GLsizei height);
+#endif
+#endif /* GL_EXT_copy_texture */
+
+#ifndef GL_EXT_cull_vertex
+#define GL_EXT_cull_vertex 1
+#define GL_CULL_VERTEX_EXT 0x81AA
+#define GL_CULL_VERTEX_EYE_POSITION_EXT 0x81AB
+#define GL_CULL_VERTEX_OBJECT_POSITION_EXT 0x81AC
+typedef void (APIENTRYP PFNGLCULLPARAMETERDVEXTPROC) (GLenum pname, GLdouble *params);
+typedef void (APIENTRYP PFNGLCULLPARAMETERFVEXTPROC) (GLenum pname, GLfloat *params);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glCullParameterdvEXT (GLenum pname, GLdouble *params);
+GLAPI void APIENTRY glCullParameterfvEXT (GLenum pname, GLfloat *params);
+#endif
+#endif /* GL_EXT_cull_vertex */
+
+#ifndef GL_EXT_debug_label
+#define GL_EXT_debug_label 1
+#define GL_PROGRAM_PIPELINE_OBJECT_EXT 0x8A4F
+#define GL_PROGRAM_OBJECT_EXT 0x8B40
+#define GL_SHADER_OBJECT_EXT 0x8B48
+#define GL_BUFFER_OBJECT_EXT 0x9151
+#define GL_QUERY_OBJECT_EXT 0x9153
+#define GL_VERTEX_ARRAY_OBJECT_EXT 0x9154
+typedef void (APIENTRYP PFNGLLABELOBJECTEXTPROC) (GLenum type, GLuint object, GLsizei length, const GLchar *label);
+typedef void (APIENTRYP PFNGLGETOBJECTLABELEXTPROC) (GLenum type, GLuint object, GLsizei bufSize, GLsizei *length, GLchar *label);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glLabelObjectEXT (GLenum type, GLuint object, GLsizei length, const GLchar *label);
+GLAPI void APIENTRY glGetObjectLabelEXT (GLenum type, GLuint object, GLsizei bufSize, GLsizei *length, GLchar *label);
+#endif
+#endif /* GL_EXT_debug_label */
+
+#ifndef GL_EXT_debug_marker
+#define GL_EXT_debug_marker 1
+typedef void (APIENTRYP PFNGLINSERTEVENTMARKEREXTPROC) (GLsizei length, const GLchar *marker);
+typedef void (APIENTRYP PFNGLPUSHGROUPMARKEREXTPROC) (GLsizei length, const GLchar *marker);
+typedef void (APIENTRYP PFNGLPOPGROUPMARKEREXTPROC) (void);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glInsertEventMarkerEXT (GLsizei length, const GLchar *marker);
+GLAPI void APIENTRY glPushGroupMarkerEXT (GLsizei length, const GLchar *marker);
+GLAPI void APIENTRY glPopGroupMarkerEXT (void);
+#endif
+#endif /* GL_EXT_debug_marker */
+
+#ifndef GL_EXT_depth_bounds_test
+#define GL_EXT_depth_bounds_test 1
+#define GL_DEPTH_BOUNDS_TEST_EXT 0x8890
+#define GL_DEPTH_BOUNDS_EXT 0x8891
+typedef void (APIENTRYP PFNGLDEPTHBOUNDSEXTPROC) (GLclampd zmin, GLclampd zmax);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glDepthBoundsEXT (GLclampd zmin, GLclampd zmax);
+#endif
+#endif /* GL_EXT_depth_bounds_test */
+
+#ifndef GL_EXT_direct_state_access
+#define GL_EXT_direct_state_access 1
+#define GL_PROGRAM_MATRIX_EXT 0x8E2D
+#define GL_TRANSPOSE_PROGRAM_MATRIX_EXT 0x8E2E
+#define GL_PROGRAM_MATRIX_STACK_DEPTH_EXT 0x8E2F
+typedef void (APIENTRYP PFNGLMATRIXLOADFEXTPROC) (GLenum mode, const GLfloat *m);
+typedef void (APIENTRYP PFNGLMATRIXLOADDEXTPROC) (GLenum mode, const GLdouble *m);
+typedef void (APIENTRYP PFNGLMATRIXMULTFEXTPROC) (GLenum mode, const GLfloat *m);
+typedef void (APIENTRYP PFNGLMATRIXMULTDEXTPROC) (GLenum mode, const GLdouble *m);
+typedef void (APIENTRYP PFNGLMATRIXLOADIDENTITYEXTPROC) (GLenum mode);
+typedef void (APIENTRYP PFNGLMATRIXROTATEFEXTPROC) (GLenum mode, GLfloat angle, GLfloat x, GLfloat y, GLfloat z);
+typedef void (APIENTRYP PFNGLMATRIXROTATEDEXTPROC) (GLenum mode, GLdouble angle, GLdouble x, GLdouble y, GLdouble z);
+typedef void (APIENTRYP PFNGLMATRIXSCALEFEXTPROC) (GLenum mode, GLfloat x, GLfloat y, GLfloat z);
+typedef void (APIENTRYP PFNGLMATRIXSCALEDEXTPROC) (GLenum mode, GLdouble x, GLdouble y, GLdouble z);
+typedef void (APIENTRYP PFNGLMATRIXTRANSLATEFEXTPROC) (GLenum mode, GLfloat x, GLfloat y, GLfloat z);
+typedef void (APIENTRYP PFNGLMATRIXTRANSLATEDEXTPROC) (GLenum mode, GLdouble x, GLdouble y, GLdouble z);
+typedef void (APIENTRYP PFNGLMATRIXFRUSTUMEXTPROC) (GLenum mode, GLdouble left, GLdouble right, GLdouble bottom, GLdouble top, GLdouble zNear, GLdouble zFar);
+typedef void (APIENTRYP PFNGLMATRIXORTHOEXTPROC) (GLenum mode, GLdouble left, GLdouble right, GLdouble bottom, GLdouble top, GLdouble zNear, GLdouble zFar);
+typedef void (APIENTRYP PFNGLMATRIXPOPEXTPROC) (GLenum mode);
+typedef void (APIENTRYP PFNGLMATRIXPUSHEXTPROC) (GLenum mode);
+typedef void (APIENTRYP PFNGLCLIENTATTRIBDEFAULTEXTPROC) (GLbitfield mask);
+typedef void (APIENTRYP PFNGLPUSHCLIENTATTRIBDEFAULTEXTPROC) (GLbitfield mask);
+typedef void (APIENTRYP PFNGLTEXTUREPARAMETERFEXTPROC) (GLuint texture, GLenum target, GLenum pname, GLfloat param);
+typedef void (APIENTRYP PFNGLTEXTUREPARAMETERFVEXTPROC) (GLuint texture, GLenum target, GLenum pname, const GLfloat *params);
+typedef void (APIENTRYP PFNGLTEXTUREPARAMETERIEXTPROC) (GLuint texture, GLenum target, GLenum pname, GLint param);
+typedef void (APIENTRYP PFNGLTEXTUREPARAMETERIVEXTPROC) (GLuint texture, GLenum target, GLenum pname, const GLint *params);
+typedef void (APIENTRYP PFNGLTEXTUREIMAGE1DEXTPROC) (GLuint texture, GLenum target, GLint level, GLint internalformat, GLsizei width, GLint border, GLenum format, GLenum type, const void *pixels);
+typedef void (APIENTRYP PFNGLTEXTUREIMAGE2DEXTPROC) (GLuint texture, GLenum target, GLint level, GLint internalformat, GLsizei width, GLsizei height, GLint border, GLenum format, GLenum type, const void *pixels);
+typedef void (APIENTRYP PFNGLTEXTURESUBIMAGE1DEXTPROC) (GLuint texture, GLenum target, GLint level, GLint xoffset, GLsizei width, GLenum format, GLenum type, const void *pixels);
+typedef void (APIENTRYP PFNGLTEXTURESUBIMAGE2DEXTPROC) (GLuint texture, GLenum target, GLint level, GLint xoffset, GLint yoffset, GLsizei width, GLsizei height, GLenum format, GLenum type, const void *pixels);
+typedef void (APIENTRYP PFNGLCOPYTEXTUREIMAGE1DEXTPROC) (GLuint texture, GLenum target, GLint level, GLenum internalformat, GLint x, GLint y, GLsizei width, GLint border);
+typedef void (APIENTRYP PFNGLCOPYTEXTUREIMAGE2DEXTPROC) (GLuint texture, GLenum target, GLint level, GLenum internalformat, GLint x, GLint y, GLsizei width, GLsizei height, GLint border);
+typedef void (APIENTRYP PFNGLCOPYTEXTURESUBIMAGE1DEXTPROC) (GLuint texture, GLenum target, GLint level, GLint xoffset, GLint x, GLint y, GLsizei width);
+typedef void (APIENTRYP PFNGLCOPYTEXTURESUBIMAGE2DEXTPROC) (GLuint texture, GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint x, GLint y, GLsizei width, GLsizei height);
+typedef void (APIENTRYP PFNGLGETTEXTUREIMAGEEXTPROC) (GLuint texture, GLenum target, GLint level, GLenum format, GLenum type, void *pixels);
+typedef void (APIENTRYP PFNGLGETTEXTUREPARAMETERFVEXTPROC) (GLuint texture, GLenum target, GLenum pname, GLfloat *params);
+typedef void (APIENTRYP PFNGLGETTEXTUREPARAMETERIVEXTPROC) (GLuint texture, GLenum target, GLenum pname, GLint *params);
+typedef void (APIENTRYP PFNGLGETTEXTURELEVELPARAMETERFVEXTPROC) (GLuint texture, GLenum target, GLint level, GLenum pname, GLfloat *params);
+typedef void (APIENTRYP PFNGLGETTEXTURELEVELPARAMETERIVEXTPROC) (GLuint texture, GLenum target, GLint level, GLenum pname, GLint *params);
+typedef void (APIENTRYP PFNGLTEXTUREIMAGE3DEXTPROC) (GLuint texture, GLenum target, GLint level, GLint internalformat, GLsizei width, GLsizei height, GLsizei depth, GLint border, GLenum format, GLenum type, const void *pixels);
+typedef void (APIENTRYP PFNGLTEXTURESUBIMAGE3DEXTPROC) (GLuint texture, GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint zoffset, GLsizei width, GLsizei height, GLsizei depth, GLenum format, GLenum type, const void *pixels);
+typedef void (APIENTRYP PFNGLCOPYTEXTURESUBIMAGE3DEXTPROC) (GLuint texture, GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint zoffset, GLint x, GLint y, GLsizei width, GLsizei height);
+typedef void (APIENTRYP PFNGLBINDMULTITEXTUREEXTPROC) (GLenum texunit, GLenum target, GLuint texture);
+typedef void (APIENTRYP PFNGLMULTITEXCOORDPOINTEREXTPROC) (GLenum texunit, GLint size, GLenum type, GLsizei stride, const void *pointer);
+typedef void (APIENTRYP PFNGLMULTITEXENVFEXTPROC) (GLenum texunit, GLenum target, GLenum pname, GLfloat param);
+typedef void (APIENTRYP PFNGLMULTITEXENVFVEXTPROC) (GLenum texunit, GLenum target, GLenum pname, const GLfloat *params);
+typedef void (APIENTRYP PFNGLMULTITEXENVIEXTPROC) (GLenum texunit, GLenum target, GLenum pname, GLint param);
+typedef void (APIENTRYP PFNGLMULTITEXENVIVEXTPROC) (GLenum texunit, GLenum target, GLenum pname, const GLint *params);
+typedef void (APIENTRYP PFNGLMULTITEXGENDEXTPROC) (GLenum texunit, GLenum coord, GLenum pname, GLdouble param);
+typedef void (APIENTRYP PFNGLMULTITEXGENDVEXTPROC) (GLenum texunit, GLenum coord, GLenum pname, const GLdouble *params);
+typedef void (APIENTRYP PFNGLMULTITEXGENFEXTPROC) (GLenum texunit, GLenum coord, GLenum pname, GLfloat param);
+typedef void (APIENTRYP PFNGLMULTITEXGENFVEXTPROC) (GLenum texunit, GLenum coord, GLenum pname, const GLfloat *params);
+typedef void (APIENTRYP PFNGLMULTITEXGENIEXTPROC) (GLenum texunit, GLenum coord, GLenum pname, GLint param);
+typedef void (APIENTRYP PFNGLMULTITEXGENIVEXTPROC) (GLenum texunit, GLenum coord, GLenum pname, const GLint *params);
+typedef void (APIENTRYP PFNGLGETMULTITEXENVFVEXTPROC) (GLenum texunit, GLenum target, GLenum pname, GLfloat *params);
+typedef void (APIENTRYP PFNGLGETMULTITEXENVIVEXTPROC) (GLenum texunit, GLenum target, GLenum pname, GLint *params);
+typedef void (APIENTRYP PFNGLGETMULTITEXGENDVEXTPROC) (GLenum texunit, GLenum coord, GLenum pname, GLdouble *params);
+typedef void (APIENTRYP PFNGLGETMULTITEXGENFVEXTPROC) (GLenum texunit, GLenum coord, GLenum pname, GLfloat *params);
+typedef void (APIENTRYP PFNGLGETMULTITEXGENIVEXTPROC) (GLenum texunit, GLenum coord, GLenum pname, GLint *params);
+typedef void (APIENTRYP PFNGLMULTITEXPARAMETERIEXTPROC) (GLenum texunit, GLenum target, GLenum pname, GLint param);
+typedef void (APIENTRYP PFNGLMULTITEXPARAMETERIVEXTPROC) (GLenum texunit, GLenum target, GLenum pname, const GLint *params);
+typedef void (APIENTRYP PFNGLMULTITEXPARAMETERFEXTPROC) (GLenum texunit, GLenum target, GLenum pname, GLfloat param);
+typedef void (APIENTRYP PFNGLMULTITEXPARAMETERFVEXTPROC) (GLenum texunit, GLenum target, GLenum pname, const GLfloat *params);
+typedef void (APIENTRYP PFNGLMULTITEXIMAGE1DEXTPROC) (GLenum texunit, GLenum target, GLint level, GLint internalformat, GLsizei width, GLint border, GLenum format, GLenum type, const void *pixels);
+typedef void (APIENTRYP PFNGLMULTITEXIMAGE2DEXTPROC) (GLenum texunit, GLenum target, GLint level, GLint internalformat, GLsizei width, GLsizei height, GLint border, GLenum format, GLenum type, const void *pixels);
+typedef void (APIENTRYP PFNGLMULTITEXSUBIMAGE1DEXTPROC) (GLenum texunit, GLenum target, GLint level, GLint xoffset, GLsizei width, GLenum format, GLenum type, const void *pixels);
+typedef void (APIENTRYP PFNGLMULTITEXSUBIMAGE2DEXTPROC) (GLenum texunit, GLenum target, GLint level, GLint xoffset, GLint yoffset, GLsizei width, GLsizei height, GLenum format, GLenum type, const void *pixels);
+typedef void (APIENTRYP PFNGLCOPYMULTITEXIMAGE1DEXTPROC) (GLenum texunit, GLenum target, GLint level, GLenum internalformat, GLint x, GLint y, GLsizei width, GLint border);
+typedef void (APIENTRYP PFNGLCOPYMULTITEXIMAGE2DEXTPROC) (GLenum texunit, GLenum target, GLint level, GLenum internalformat, GLint x, GLint y, GLsizei width, GLsizei height, GLint border);
+typedef void (APIENTRYP PFNGLCOPYMULTITEXSUBIMAGE1DEXTPROC) (GLenum texunit, GLenum target, GLint level, GLint xoffset, GLint x, GLint y, GLsizei width);
+typedef void (APIENTRYP PFNGLCOPYMULTITEXSUBIMAGE2DEXTPROC) (GLenum texunit, GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint x, GLint y, GLsizei width, GLsizei height);
+typedef void (APIENTRYP PFNGLGETMULTITEXIMAGEEXTPROC) (GLenum texunit, GLenum target, GLint level, GLenum format, GLenum type, void *pixels);
+typedef void (APIENTRYP PFNGLGETMULTITEXPARAMETERFVEXTPROC) (GLenum texunit, GLenum target, GLenum pname, GLfloat *params);
+typedef void (APIENTRYP PFNGLGETMULTITEXPARAMETERIVEXTPROC) (GLenum texunit, GLenum target, GLenum pname, GLint *params);
+typedef void (APIENTRYP PFNGLGETMULTITEXLEVELPARAMETERFVEXTPROC) (GLenum texunit, GLenum target, GLint level, GLenum pname, GLfloat *params);
+typedef void (APIENTRYP PFNGLGETMULTITEXLEVELPARAMETERIVEXTPROC) (GLenum texunit, GLenum target, GLint level, GLenum pname, GLint *params);
+typedef void (APIENTRYP PFNGLMULTITEXIMAGE3DEXTPROC) (GLenum texunit, GLenum target, GLint level, GLint internalformat, GLsizei width, GLsizei height, GLsizei depth, GLint border, GLenum format, GLenum type, const void *pixels);
+typedef void (APIENTRYP PFNGLMULTITEXSUBIMAGE3DEXTPROC) (GLenum texunit, GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint zoffset, GLsizei width, GLsizei height, GLsizei depth, GLenum format, GLenum type, const void *pixels);
+typedef void (APIENTRYP PFNGLCOPYMULTITEXSUBIMAGE3DEXTPROC) (GLenum texunit, GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint zoffset, GLint x, GLint y, GLsizei width, GLsizei height);
+typedef void (APIENTRYP PFNGLENABLECLIENTSTATEINDEXEDEXTPROC) (GLenum array, GLuint index);
+typedef void (APIENTRYP PFNGLDISABLECLIENTSTATEINDEXEDEXTPROC) (GLenum array, GLuint index);
+typedef void (APIENTRYP PFNGLGETFLOATINDEXEDVEXTPROC) (GLenum target, GLuint index, GLfloat *data);
+typedef void (APIENTRYP PFNGLGETDOUBLEINDEXEDVEXTPROC) (GLenum target, GLuint index, GLdouble *data);
+typedef void (APIENTRYP PFNGLGETPOINTERINDEXEDVEXTPROC) (GLenum target, GLuint index, void **data);
+typedef void (APIENTRYP PFNGLENABLEINDEXEDEXTPROC) (GLenum target, GLuint index);
+typedef void (APIENTRYP PFNGLDISABLEINDEXEDEXTPROC) (GLenum target, GLuint index);
+typedef GLboolean (APIENTRYP PFNGLISENABLEDINDEXEDEXTPROC) (GLenum target, GLuint index);
+typedef void (APIENTRYP PFNGLGETINTEGERINDEXEDVEXTPROC) (GLenum target, GLuint index, GLint *data);
+typedef void (APIENTRYP PFNGLGETBOOLEANINDEXEDVEXTPROC) (GLenum target, GLuint index, GLboolean *data);
+typedef void (APIENTRYP PFNGLCOMPRESSEDTEXTUREIMAGE3DEXTPROC) (GLuint texture, GLenum target, GLint level, GLenum internalformat, GLsizei width, GLsizei height, GLsizei depth, GLint border, GLsizei imageSize, const void *bits);
+typedef void (APIENTRYP PFNGLCOMPRESSEDTEXTUREIMAGE2DEXTPROC) (GLuint texture, GLenum target, GLint level, GLenum internalformat, GLsizei width, GLsizei height, GLint border, GLsizei imageSize, const void *bits);
+typedef void (APIENTRYP PFNGLCOMPRESSEDTEXTUREIMAGE1DEXTPROC) (GLuint texture, GLenum target, GLint level, GLenum internalformat, GLsizei width, GLint border, GLsizei imageSize, const void *bits);
+typedef void (APIENTRYP PFNGLCOMPRESSEDTEXTURESUBIMAGE3DEXTPROC) (GLuint texture, GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint zoffset, GLsizei width, GLsizei height, GLsizei depth, GLenum format, GLsizei imageSize, const void *bits);
+typedef void (APIENTRYP PFNGLCOMPRESSEDTEXTURESUBIMAGE2DEXTPROC) (GLuint texture, GLenum target, GLint level, GLint xoffset, GLint yoffset, GLsizei width, GLsizei height, GLenum format, GLsizei imageSize, const void *bits);
+typedef void (APIENTRYP PFNGLCOMPRESSEDTEXTURESUBIMAGE1DEXTPROC) (GLuint texture, GLenum target, GLint level, GLint xoffset, GLsizei width, GLenum format, GLsizei imageSize, const void *bits);
+typedef void (APIENTRYP PFNGLGETCOMPRESSEDTEXTUREIMAGEEXTPROC) (GLuint texture, GLenum target, GLint lod, void *img);
+typedef void (APIENTRYP PFNGLCOMPRESSEDMULTITEXIMAGE3DEXTPROC) (GLenum texunit, GLenum target, GLint level, GLenum internalformat, GLsizei width, GLsizei height, GLsizei depth, GLint border, GLsizei imageSize, const void *bits);
+typedef void (APIENTRYP PFNGLCOMPRESSEDMULTITEXIMAGE2DEXTPROC) (GLenum texunit, GLenum target, GLint level, GLenum internalformat, GLsizei width, GLsizei height, GLint border, GLsizei imageSize, const void *bits);
+typedef void (APIENTRYP PFNGLCOMPRESSEDMULTITEXIMAGE1DEXTPROC) (GLenum texunit, GLenum target, GLint level, GLenum internalformat, GLsizei width, GLint border, GLsizei imageSize, const void *bits);
+typedef void (APIENTRYP PFNGLCOMPRESSEDMULTITEXSUBIMAGE3DEXTPROC) (GLenum texunit, GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint zoffset, GLsizei width, GLsizei height, GLsizei depth, GLenum format, GLsizei imageSize, const void *bits);
+typedef void (APIENTRYP PFNGLCOMPRESSEDMULTITEXSUBIMAGE2DEXTPROC) (GLenum texunit, GLenum target, GLint level, GLint xoffset, GLint yoffset, GLsizei width, GLsizei height, GLenum format, GLsizei imageSize, const void *bits);
+typedef void (APIENTRYP PFNGLCOMPRESSEDMULTITEXSUBIMAGE1DEXTPROC) (GLenum texunit, GLenum target, GLint level, GLint xoffset, GLsizei width, GLenum format, GLsizei imageSize, const void *bits);
+typedef void (APIENTRYP PFNGLGETCOMPRESSEDMULTITEXIMAGEEXTPROC) (GLenum texunit, GLenum target, GLint lod, void *img);
+typedef void (APIENTRYP PFNGLMATRIXLOADTRANSPOSEFEXTPROC) (GLenum mode, const GLfloat *m);
+typedef void (APIENTRYP PFNGLMATRIXLOADTRANSPOSEDEXTPROC) (GLenum mode, const GLdouble *m);
+typedef void (APIENTRYP PFNGLMATRIXMULTTRANSPOSEFEXTPROC) (GLenum mode, const GLfloat *m);
+typedef void (APIENTRYP PFNGLMATRIXMULTTRANSPOSEDEXTPROC) (GLenum mode, const GLdouble *m);
+typedef void (APIENTRYP PFNGLNAMEDBUFFERDATAEXTPROC) (GLuint buffer, GLsizeiptr size, const void *data, GLenum usage);
+typedef void (APIENTRYP PFNGLNAMEDBUFFERSUBDATAEXTPROC) (GLuint buffer, GLintptr offset, GLsizeiptr size, const void *data);
+typedef void *(APIENTRYP PFNGLMAPNAMEDBUFFEREXTPROC) (GLuint buffer, GLenum access);
+typedef GLboolean (APIENTRYP PFNGLUNMAPNAMEDBUFFEREXTPROC) (GLuint buffer);
+typedef void (APIENTRYP PFNGLGETNAMEDBUFFERPARAMETERIVEXTPROC) (GLuint buffer, GLenum pname, GLint *params);
+typedef void (APIENTRYP PFNGLGETNAMEDBUFFERPOINTERVEXTPROC) (GLuint buffer, GLenum pname, void **params);
+typedef void (APIENTRYP PFNGLGETNAMEDBUFFERSUBDATAEXTPROC) (GLuint buffer, GLintptr offset, GLsizeiptr size, void *data);
+typedef void (APIENTRYP PFNGLPROGRAMUNIFORM1FEXTPROC) (GLuint program, GLint location, GLfloat v0);
+typedef void (APIENTRYP PFNGLPROGRAMUNIFORM2FEXTPROC) (GLuint program, GLint location, GLfloat v0, GLfloat v1);
+typedef void (APIENTRYP PFNGLPROGRAMUNIFORM3FEXTPROC) (GLuint program, GLint location, GLfloat v0, GLfloat v1, GLfloat v2);
+typedef void (APIENTRYP PFNGLPROGRAMUNIFORM4FEXTPROC) (GLuint program, GLint location, GLfloat v0, GLfloat v1, GLfloat v2, GLfloat v3);
+typedef void (APIENTRYP PFNGLPROGRAMUNIFORM1IEXTPROC) (GLuint program, GLint location, GLint v0);
+typedef void (APIENTRYP PFNGLPROGRAMUNIFORM2IEXTPROC) (GLuint program, GLint location, GLint v0, GLint v1);
+typedef void (APIENTRYP PFNGLPROGRAMUNIFORM3IEXTPROC) (GLuint program, GLint location, GLint v0, GLint v1, GLint v2);
+typedef void (APIENTRYP PFNGLPROGRAMUNIFORM4IEXTPROC) (GLuint program, GLint location, GLint v0, GLint v1, GLint v2, GLint v3);
+typedef void (APIENTRYP PFNGLPROGRAMUNIFORM1FVEXTPROC) (GLuint program, GLint location, GLsizei count, const GLfloat *value);
+typedef void (APIENTRYP PFNGLPROGRAMUNIFORM2FVEXTPROC) (GLuint program, GLint location, GLsizei count, const GLfloat *value);
+typedef void (APIENTRYP PFNGLPROGRAMUNIFORM3FVEXTPROC) (GLuint program, GLint location, GLsizei count, const GLfloat *value);
+typedef void (APIENTRYP PFNGLPROGRAMUNIFORM4FVEXTPROC) (GLuint program, GLint location, GLsizei count, const GLfloat *value);
+typedef void (APIENTRYP PFNGLPROGRAMUNIFORM1IVEXTPROC) (GLuint program, GLint location, GLsizei count, const GLint *value);
+typedef void (APIENTRYP PFNGLPROGRAMUNIFORM2IVEXTPROC) (GLuint program, GLint location, GLsizei count, const GLint *value);
+typedef void (APIENTRYP PFNGLPROGRAMUNIFORM3IVEXTPROC) (GLuint program, GLint location, GLsizei count, const GLint *value);
+typedef void (APIENTRYP PFNGLPROGRAMUNIFORM4IVEXTPROC) (GLuint program, GLint location, GLsizei count, const GLint *value);
+typedef void (APIENTRYP PFNGLPROGRAMUNIFORMMATRIX2FVEXTPROC) (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value);
+typedef void (APIENTRYP PFNGLPROGRAMUNIFORMMATRIX3FVEXTPROC) (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value);
+typedef void (APIENTRYP PFNGLPROGRAMUNIFORMMATRIX4FVEXTPROC) (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value);
+typedef void (APIENTRYP PFNGLPROGRAMUNIFORMMATRIX2X3FVEXTPROC) (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value);
+typedef void (APIENTRYP PFNGLPROGRAMUNIFORMMATRIX3X2FVEXTPROC) (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value);
+typedef void (APIENTRYP PFNGLPROGRAMUNIFORMMATRIX2X4FVEXTPROC) (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value);
+typedef void (APIENTRYP PFNGLPROGRAMUNIFORMMATRIX4X2FVEXTPROC) (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value);
+typedef void (APIENTRYP PFNGLPROGRAMUNIFORMMATRIX3X4FVEXTPROC) (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value);
+typedef void (APIENTRYP PFNGLPROGRAMUNIFORMMATRIX4X3FVEXTPROC) (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value);
+typedef void (APIENTRYP PFNGLTEXTUREBUFFEREXTPROC) (GLuint texture, GLenum target, GLenum internalformat, GLuint buffer);
+typedef void (APIENTRYP PFNGLMULTITEXBUFFEREXTPROC) (GLenum texunit, GLenum target, GLenum internalformat, GLuint buffer);
+typedef void (APIENTRYP PFNGLTEXTUREPARAMETERIIVEXTPROC) (GLuint texture, GLenum target, GLenum pname, const GLint *params);
+typedef void (APIENTRYP PFNGLTEXTUREPARAMETERIUIVEXTPROC) (GLuint texture, GLenum target, GLenum pname, const GLuint *params);
+typedef void (APIENTRYP PFNGLGETTEXTUREPARAMETERIIVEXTPROC) (GLuint texture, GLenum target, GLenum pname, GLint *params);
+typedef void (APIENTRYP PFNGLGETTEXTUREPARAMETERIUIVEXTPROC) (GLuint texture, GLenum target, GLenum pname, GLuint *params);
+typedef void (APIENTRYP PFNGLMULTITEXPARAMETERIIVEXTPROC) (GLenum texunit, GLenum target, GLenum pname, const GLint *params);
+typedef void (APIENTRYP PFNGLMULTITEXPARAMETERIUIVEXTPROC) (GLenum texunit, GLenum target, GLenum pname, const GLuint *params);
+typedef void (APIENTRYP PFNGLGETMULTITEXPARAMETERIIVEXTPROC) (GLenum texunit, GLenum target, GLenum pname, GLint *params);
+typedef void (APIENTRYP PFNGLGETMULTITEXPARAMETERIUIVEXTPROC) (GLenum texunit, GLenum target, GLenum pname, GLuint *params);
+typedef void (APIENTRYP PFNGLPROGRAMUNIFORM1UIEXTPROC) (GLuint program, GLint location, GLuint v0);
+typedef void (APIENTRYP PFNGLPROGRAMUNIFORM2UIEXTPROC) (GLuint program, GLint location, GLuint v0, GLuint v1);
+typedef void (APIENTRYP PFNGLPROGRAMUNIFORM3UIEXTPROC) (GLuint program, GLint location, GLuint v0, GLuint v1, GLuint v2);
+typedef void (APIENTRYP PFNGLPROGRAMUNIFORM4UIEXTPROC) (GLuint program, GLint location, GLuint v0, GLuint v1, GLuint v2, GLuint v3);
+typedef void (APIENTRYP PFNGLPROGRAMUNIFORM1UIVEXTPROC) (GLuint program, GLint location, GLsizei count, const GLuint *value);
+typedef void (APIENTRYP PFNGLPROGRAMUNIFORM2UIVEXTPROC) (GLuint program, GLint location, GLsizei count, const GLuint *value);
+typedef void (APIENTRYP PFNGLPROGRAMUNIFORM3UIVEXTPROC) (GLuint program, GLint location, GLsizei count, const GLuint *value);
+typedef void (APIENTRYP PFNGLPROGRAMUNIFORM4UIVEXTPROC) (GLuint program, GLint location, GLsizei count, const GLuint *value);
+typedef void (APIENTRYP PFNGLNAMEDPROGRAMLOCALPARAMETERS4FVEXTPROC) (GLuint program, GLenum target, GLuint index, GLsizei count, const GLfloat *params);
+typedef void (APIENTRYP PFNGLNAMEDPROGRAMLOCALPARAMETERI4IEXTPROC) (GLuint program, GLenum target, GLuint index, GLint x, GLint y, GLint z, GLint w);
+typedef void (APIENTRYP PFNGLNAMEDPROGRAMLOCALPARAMETERI4IVEXTPROC) (GLuint program, GLenum target, GLuint index, const GLint *params);
+typedef void (APIENTRYP PFNGLNAMEDPROGRAMLOCALPARAMETERSI4IVEXTPROC) (GLuint program, GLenum target, GLuint index, GLsizei count, const GLint *params);
+typedef void (APIENTRYP PFNGLNAMEDPROGRAMLOCALPARAMETERI4UIEXTPROC) (GLuint program, GLenum target, GLuint index, GLuint x, GLuint y, GLuint z, GLuint w);
+typedef void (APIENTRYP PFNGLNAMEDPROGRAMLOCALPARAMETERI4UIVEXTPROC) (GLuint program, GLenum target, GLuint index, const GLuint *params);
+typedef void (APIENTRYP PFNGLNAMEDPROGRAMLOCALPARAMETERSI4UIVEXTPROC) (GLuint program, GLenum target, GLuint index, GLsizei count, const GLuint *params);
+typedef void (APIENTRYP PFNGLGETNAMEDPROGRAMLOCALPARAMETERIIVEXTPROC) (GLuint program, GLenum target, GLuint index, GLint *params);
+typedef void (APIENTRYP PFNGLGETNAMEDPROGRAMLOCALPARAMETERIUIVEXTPROC) (GLuint program, GLenum target, GLuint index, GLuint *params);
+typedef void (APIENTRYP PFNGLENABLECLIENTSTATEIEXTPROC) (GLenum array, GLuint index);
+typedef void (APIENTRYP PFNGLDISABLECLIENTSTATEIEXTPROC) (GLenum array, GLuint index);
+typedef void (APIENTRYP PFNGLGETFLOATI_VEXTPROC) (GLenum pname, GLuint index, GLfloat *params);
+typedef void (APIENTRYP PFNGLGETDOUBLEI_VEXTPROC) (GLenum pname, GLuint index, GLdouble *params);
+typedef void (APIENTRYP PFNGLGETPOINTERI_VEXTPROC) (GLenum pname, GLuint index, void **params);
+typedef void (APIENTRYP PFNGLNAMEDPROGRAMSTRINGEXTPROC) (GLuint program, GLenum target, GLenum format, GLsizei len, const void *string);
+typedef void (APIENTRYP PFNGLNAMEDPROGRAMLOCALPARAMETER4DEXTPROC) (GLuint program, GLenum target, GLuint index, GLdouble x, GLdouble y, GLdouble z, GLdouble w);
+typedef void (APIENTRYP PFNGLNAMEDPROGRAMLOCALPARAMETER4DVEXTPROC) (GLuint program, GLenum target, GLuint index, const GLdouble *params);
+typedef void (APIENTRYP PFNGLNAMEDPROGRAMLOCALPARAMETER4FEXTPROC) (GLuint program, GLenum target, GLuint index, GLfloat x, GLfloat y, GLfloat z, GLfloat w);
+typedef void (APIENTRYP PFNGLNAMEDPROGRAMLOCALPARAMETER4FVEXTPROC) (GLuint program, GLenum target, GLuint index, const GLfloat *params);
+typedef void (APIENTRYP PFNGLGETNAMEDPROGRAMLOCALPARAMETERDVEXTPROC) (GLuint program, GLenum target, GLuint index, GLdouble *params);
+typedef void (APIENTRYP PFNGLGETNAMEDPROGRAMLOCALPARAMETERFVEXTPROC) (GLuint program, GLenum target, GLuint index, GLfloat *params);
+typedef void (APIENTRYP PFNGLGETNAMEDPROGRAMIVEXTPROC) (GLuint program, GLenum target, GLenum pname, GLint *params);
+typedef void (APIENTRYP PFNGLGETNAMEDPROGRAMSTRINGEXTPROC) (GLuint program, GLenum target, GLenum pname, void *string);
+typedef void (APIENTRYP PFNGLNAMEDRENDERBUFFERSTORAGEEXTPROC) (GLuint renderbuffer, GLenum internalformat, GLsizei width, GLsizei height);
+typedef void (APIENTRYP PFNGLGETNAMEDRENDERBUFFERPARAMETERIVEXTPROC) (GLuint renderbuffer, GLenum pname, GLint *params);
+typedef void (APIENTRYP PFNGLNAMEDRENDERBUFFERSTORAGEMULTISAMPLEEXTPROC) (GLuint renderbuffer, GLsizei samples, GLenum internalformat, GLsizei width, GLsizei height);
+typedef void (APIENTRYP PFNGLNAMEDRENDERBUFFERSTORAGEMULTISAMPLECOVERAGEEXTPROC) (GLuint renderbuffer, GLsizei coverageSamples, GLsizei colorSamples, GLenum internalformat, GLsizei width, GLsizei height);
+typedef GLenum (APIENTRYP PFNGLCHECKNAMEDFRAMEBUFFERSTATUSEXTPROC) (GLuint framebuffer, GLenum target);
+typedef void (APIENTRYP PFNGLNAMEDFRAMEBUFFERTEXTURE1DEXTPROC) (GLuint framebuffer, GLenum attachment, GLenum textarget, GLuint texture, GLint level);
+typedef void (APIENTRYP PFNGLNAMEDFRAMEBUFFERTEXTURE2DEXTPROC) (GLuint framebuffer, GLenum attachment, GLenum textarget, GLuint texture, GLint level);
+typedef void (APIENTRYP PFNGLNAMEDFRAMEBUFFERTEXTURE3DEXTPROC) (GLuint framebuffer, GLenum attachment, GLenum textarget, GLuint texture, GLint level, GLint zoffset);
+typedef void (APIENTRYP PFNGLNAMEDFRAMEBUFFERRENDERBUFFEREXTPROC) (GLuint framebuffer, GLenum attachment, GLenum renderbuffertarget, GLuint renderbuffer);
+typedef void (APIENTRYP PFNGLGETNAMEDFRAMEBUFFERATTACHMENTPARAMETERIVEXTPROC) (GLuint framebuffer, GLenum attachment, GLenum pname, GLint *params);
+typedef void (APIENTRYP PFNGLGENERATETEXTUREMIPMAPEXTPROC) (GLuint texture, GLenum target);
+typedef void (APIENTRYP PFNGLGENERATEMULTITEXMIPMAPEXTPROC) (GLenum texunit, GLenum target);
+typedef void (APIENTRYP PFNGLFRAMEBUFFERDRAWBUFFEREXTPROC) (GLuint framebuffer, GLenum mode);
+typedef void (APIENTRYP PFNGLFRAMEBUFFERDRAWBUFFERSEXTPROC) (GLuint framebuffer, GLsizei n, const GLenum *bufs);
+typedef void (APIENTRYP PFNGLFRAMEBUFFERREADBUFFEREXTPROC) (GLuint framebuffer, GLenum mode);
+typedef void (APIENTRYP PFNGLGETFRAMEBUFFERPARAMETERIVEXTPROC) (GLuint framebuffer, GLenum pname, GLint *params);
+typedef void (APIENTRYP PFNGLNAMEDCOPYBUFFERSUBDATAEXTPROC) (GLuint readBuffer, GLuint writeBuffer, GLintptr readOffset, GLintptr writeOffset, GLsizeiptr size);
+typedef void (APIENTRYP PFNGLNAMEDFRAMEBUFFERTEXTUREEXTPROC) (GLuint framebuffer, GLenum attachment, GLuint texture, GLint level);
+typedef void (APIENTRYP PFNGLNAMEDFRAMEBUFFERTEXTURELAYEREXTPROC) (GLuint framebuffer, GLenum attachment, GLuint texture, GLint level, GLint layer);
+typedef void (APIENTRYP PFNGLNAMEDFRAMEBUFFERTEXTUREFACEEXTPROC) (GLuint framebuffer, GLenum attachment, GLuint texture, GLint level, GLenum face);
+typedef void (APIENTRYP PFNGLTEXTURERENDERBUFFEREXTPROC) (GLuint texture, GLenum target, GLuint renderbuffer);
+typedef void (APIENTRYP PFNGLMULTITEXRENDERBUFFEREXTPROC) (GLenum texunit, GLenum target, GLuint renderbuffer);
+typedef void (APIENTRYP PFNGLVERTEXARRAYVERTEXOFFSETEXTPROC) (GLuint vaobj, GLuint buffer, GLint size, GLenum type, GLsizei stride, GLintptr offset);
+typedef void (APIENTRYP PFNGLVERTEXARRAYCOLOROFFSETEXTPROC) (GLuint vaobj, GLuint buffer, GLint size, GLenum type, GLsizei stride, GLintptr offset);
+typedef void (APIENTRYP PFNGLVERTEXARRAYEDGEFLAGOFFSETEXTPROC) (GLuint vaobj, GLuint buffer, GLsizei stride, GLintptr offset);
+typedef void (APIENTRYP PFNGLVERTEXARRAYINDEXOFFSETEXTPROC) (GLuint vaobj, GLuint buffer, GLenum type, GLsizei stride, GLintptr offset);
+typedef void (APIENTRYP PFNGLVERTEXARRAYNORMALOFFSETEXTPROC) (GLuint vaobj, GLuint buffer, GLenum type, GLsizei stride, GLintptr offset);
+typedef void (APIENTRYP PFNGLVERTEXARRAYTEXCOORDOFFSETEXTPROC) (GLuint vaobj, GLuint buffer, GLint size, GLenum type, GLsizei stride, GLintptr offset);
+typedef void (APIENTRYP PFNGLVERTEXARRAYMULTITEXCOORDOFFSETEXTPROC) (GLuint vaobj, GLuint buffer, GLenum texunit, GLint size, GLenum type, GLsizei stride, GLintptr offset);
+typedef void (APIENTRYP PFNGLVERTEXARRAYFOGCOORDOFFSETEXTPROC) (GLuint vaobj, GLuint buffer, GLenum type, GLsizei stride, GLintptr offset);
+typedef void (APIENTRYP PFNGLVERTEXARRAYSECONDARYCOLOROFFSETEXTPROC) (GLuint vaobj, GLuint buffer, GLint size, GLenum type, GLsizei stride, GLintptr offset);
+typedef void (APIENTRYP PFNGLVERTEXARRAYVERTEXATTRIBOFFSETEXTPROC) (GLuint vaobj, GLuint buffer, GLuint index, GLint size, GLenum type, GLboolean normalized, GLsizei stride, GLintptr offset);
+typedef void (APIENTRYP PFNGLVERTEXARRAYVERTEXATTRIBIOFFSETEXTPROC) (GLuint vaobj, GLuint buffer, GLuint index, GLint size, GLenum type, GLsizei stride, GLintptr offset);
+typedef void (APIENTRYP PFNGLENABLEVERTEXARRAYEXTPROC) (GLuint vaobj, GLenum array);
+typedef void (APIENTRYP PFNGLDISABLEVERTEXARRAYEXTPROC) (GLuint vaobj, GLenum array);
+typedef void (APIENTRYP PFNGLENABLEVERTEXARRAYATTRIBEXTPROC) (GLuint vaobj, GLuint index);
+typedef void (APIENTRYP PFNGLDISABLEVERTEXARRAYATTRIBEXTPROC) (GLuint vaobj, GLuint index);
+typedef void (APIENTRYP PFNGLGETVERTEXARRAYINTEGERVEXTPROC) (GLuint vaobj, GLenum pname, GLint *param);
+typedef void (APIENTRYP PFNGLGETVERTEXARRAYPOINTERVEXTPROC) (GLuint vaobj, GLenum pname, void **param);
+typedef void (APIENTRYP PFNGLGETVERTEXARRAYINTEGERI_VEXTPROC) (GLuint vaobj, GLuint index, GLenum pname, GLint *param);
+typedef void (APIENTRYP PFNGLGETVERTEXARRAYPOINTERI_VEXTPROC) (GLuint vaobj, GLuint index, GLenum pname, void **param);
+typedef void *(APIENTRYP PFNGLMAPNAMEDBUFFERRANGEEXTPROC) (GLuint buffer, GLintptr offset, GLsizeiptr length, GLbitfield access);
+typedef void (APIENTRYP PFNGLFLUSHMAPPEDNAMEDBUFFERRANGEEXTPROC) (GLuint buffer, GLintptr offset, GLsizeiptr length);
+typedef void (APIENTRYP PFNGLNAMEDBUFFERSTORAGEEXTPROC) (GLuint buffer, GLsizeiptr size, const void *data, GLbitfield flags);
+typedef void (APIENTRYP PFNGLCLEARNAMEDBUFFERDATAEXTPROC) (GLuint buffer, GLenum internalformat, GLenum format, GLenum type, const void *data);
+typedef void (APIENTRYP PFNGLCLEARNAMEDBUFFERSUBDATAEXTPROC) (GLuint buffer, GLenum internalformat, GLsizeiptr offset, GLsizeiptr size, GLenum format, GLenum type, const void *data);
+typedef void (APIENTRYP PFNGLNAMEDFRAMEBUFFERPARAMETERIEXTPROC) (GLuint framebuffer, GLenum pname, GLint param);
+typedef void (APIENTRYP PFNGLGETNAMEDFRAMEBUFFERPARAMETERIVEXTPROC) (GLuint framebuffer, GLenum pname, GLint *params);
+typedef void (APIENTRYP PFNGLPROGRAMUNIFORM1DEXTPROC) (GLuint program, GLint location, GLdouble x);
+typedef void (APIENTRYP PFNGLPROGRAMUNIFORM2DEXTPROC) (GLuint program, GLint location, GLdouble x, GLdouble y);
+typedef void (APIENTRYP PFNGLPROGRAMUNIFORM3DEXTPROC) (GLuint program, GLint location, GLdouble x, GLdouble y, GLdouble z);
+typedef void (APIENTRYP PFNGLPROGRAMUNIFORM4DEXTPROC) (GLuint program, GLint location, GLdouble x, GLdouble y, GLdouble z, GLdouble w);
+typedef void (APIENTRYP PFNGLPROGRAMUNIFORM1DVEXTPROC) (GLuint program, GLint location, GLsizei count, const GLdouble *value);
+typedef void (APIENTRYP PFNGLPROGRAMUNIFORM2DVEXTPROC) (GLuint program, GLint location, GLsizei count, const GLdouble *value);
+typedef void (APIENTRYP PFNGLPROGRAMUNIFORM3DVEXTPROC) (GLuint program, GLint location, GLsizei count, const GLdouble *value);
+typedef void (APIENTRYP PFNGLPROGRAMUNIFORM4DVEXTPROC) (GLuint program, GLint location, GLsizei count, const GLdouble *value);
+typedef void (APIENTRYP PFNGLPROGRAMUNIFORMMATRIX2DVEXTPROC) (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLdouble *value);
+typedef void (APIENTRYP PFNGLPROGRAMUNIFORMMATRIX3DVEXTPROC) (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLdouble *value);
+typedef void (APIENTRYP PFNGLPROGRAMUNIFORMMATRIX4DVEXTPROC) (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLdouble *value);
+typedef void (APIENTRYP PFNGLPROGRAMUNIFORMMATRIX2X3DVEXTPROC) (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLdouble *value);
+typedef void (APIENTRYP PFNGLPROGRAMUNIFORMMATRIX2X4DVEXTPROC) (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLdouble *value);
+typedef void (APIENTRYP PFNGLPROGRAMUNIFORMMATRIX3X2DVEXTPROC) (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLdouble *value);
+typedef void (APIENTRYP PFNGLPROGRAMUNIFORMMATRIX3X4DVEXTPROC) (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLdouble *value);
+typedef void (APIENTRYP PFNGLPROGRAMUNIFORMMATRIX4X2DVEXTPROC) (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLdouble *value);
+typedef void (APIENTRYP PFNGLPROGRAMUNIFORMMATRIX4X3DVEXTPROC) (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLdouble *value);
+typedef void (APIENTRYP PFNGLTEXTUREBUFFERRANGEEXTPROC) (GLuint texture, GLenum target, GLenum internalformat, GLuint buffer, GLintptr offset, GLsizeiptr size);
+typedef void (APIENTRYP PFNGLTEXTURESTORAGE1DEXTPROC) (GLuint texture, GLenum target, GLsizei levels, GLenum internalformat, GLsizei width);
+typedef void (APIENTRYP PFNGLTEXTURESTORAGE2DEXTPROC) (GLuint texture, GLenum target, GLsizei levels, GLenum internalformat, GLsizei width, GLsizei height);
+typedef void (APIENTRYP PFNGLTEXTURESTORAGE3DEXTPROC) (GLuint texture, GLenum target, GLsizei levels, GLenum internalformat, GLsizei width, GLsizei height, GLsizei depth);
+typedef void (APIENTRYP PFNGLTEXTURESTORAGE2DMULTISAMPLEEXTPROC) (GLuint texture, GLenum target, GLsizei samples, GLenum internalformat, GLsizei width, GLsizei height, GLboolean fixedsamplelocations);
+typedef void (APIENTRYP PFNGLTEXTURESTORAGE3DMULTISAMPLEEXTPROC) (GLuint texture, GLenum target, GLsizei samples, GLenum internalformat, GLsizei width, GLsizei height, GLsizei depth, GLboolean fixedsamplelocations);
+typedef void (APIENTRYP PFNGLVERTEXARRAYBINDVERTEXBUFFEREXTPROC) (GLuint vaobj, GLuint bindingindex, GLuint buffer, GLintptr offset, GLsizei stride);
+typedef void (APIENTRYP PFNGLVERTEXARRAYVERTEXATTRIBFORMATEXTPROC) (GLuint vaobj, GLuint attribindex, GLint size, GLenum type, GLboolean normalized, GLuint relativeoffset);
+typedef void (APIENTRYP PFNGLVERTEXARRAYVERTEXATTRIBIFORMATEXTPROC) (GLuint vaobj, GLuint attribindex, GLint size, GLenum type, GLuint relativeoffset);
+typedef void (APIENTRYP PFNGLVERTEXARRAYVERTEXATTRIBLFORMATEXTPROC) (GLuint vaobj, GLuint attribindex, GLint size, GLenum type, GLuint relativeoffset);
+typedef void (APIENTRYP PFNGLVERTEXARRAYVERTEXATTRIBBINDINGEXTPROC) (GLuint vaobj, GLuint attribindex, GLuint bindingindex);
+typedef void (APIENTRYP PFNGLVERTEXARRAYVERTEXBINDINGDIVISOREXTPROC) (GLuint vaobj, GLuint bindingindex, GLuint divisor);
+typedef void (APIENTRYP PFNGLVERTEXARRAYVERTEXATTRIBLOFFSETEXTPROC) (GLuint vaobj, GLuint buffer, GLuint index, GLint size, GLenum type, GLsizei stride, GLintptr offset);
+typedef void (APIENTRYP PFNGLTEXTUREPAGECOMMITMENTEXTPROC) (GLuint texture, GLint level, GLint xoffset, GLint yoffset, GLint zoffset, GLsizei width, GLsizei height, GLsizei depth, GLboolean commit);
+typedef void (APIENTRYP PFNGLVERTEXARRAYVERTEXATTRIBDIVISOREXTPROC) (GLuint vaobj, GLuint index, GLuint divisor);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glMatrixLoadfEXT (GLenum mode, const GLfloat *m);
+GLAPI void APIENTRY glMatrixLoaddEXT (GLenum mode, const GLdouble *m);
+GLAPI void APIENTRY glMatrixMultfEXT (GLenum mode, const GLfloat *m);
+GLAPI void APIENTRY glMatrixMultdEXT (GLenum mode, const GLdouble *m);
+GLAPI void APIENTRY glMatrixLoadIdentityEXT (GLenum mode);
+GLAPI void APIENTRY glMatrixRotatefEXT (GLenum mode, GLfloat angle, GLfloat x, GLfloat y, GLfloat z);
+GLAPI void APIENTRY glMatrixRotatedEXT (GLenum mode, GLdouble angle, GLdouble x, GLdouble y, GLdouble z);
+GLAPI void APIENTRY glMatrixScalefEXT (GLenum mode, GLfloat x, GLfloat y, GLfloat z);
+GLAPI void APIENTRY glMatrixScaledEXT (GLenum mode, GLdouble x, GLdouble y, GLdouble z);
+GLAPI void APIENTRY glMatrixTranslatefEXT (GLenum mode, GLfloat x, GLfloat y, GLfloat z);
+GLAPI void APIENTRY glMatrixTranslatedEXT (GLenum mode, GLdouble x, GLdouble y, GLdouble z);
+GLAPI void APIENTRY glMatrixFrustumEXT (GLenum mode, GLdouble left, GLdouble right, GLdouble bottom, GLdouble top, GLdouble zNear, GLdouble zFar);
+GLAPI void APIENTRY glMatrixOrthoEXT (GLenum mode, GLdouble left, GLdouble right, GLdouble bottom, GLdouble top, GLdouble zNear, GLdouble zFar);
+GLAPI void APIENTRY glMatrixPopEXT (GLenum mode);
+GLAPI void APIENTRY glMatrixPushEXT (GLenum mode);
+GLAPI void APIENTRY glClientAttribDefaultEXT (GLbitfield mask);
+GLAPI void APIENTRY glPushClientAttribDefaultEXT (GLbitfield mask);
+GLAPI void APIENTRY glTextureParameterfEXT (GLuint texture, GLenum target, GLenum pname, GLfloat param);
+GLAPI void APIENTRY glTextureParameterfvEXT (GLuint texture, GLenum target, GLenum pname, const GLfloat *params);
+GLAPI void APIENTRY glTextureParameteriEXT (GLuint texture, GLenum target, GLenum pname, GLint param);
+GLAPI void APIENTRY glTextureParameterivEXT (GLuint texture, GLenum target, GLenum pname, const GLint *params);
+GLAPI void APIENTRY glTextureImage1DEXT (GLuint texture, GLenum target, GLint level, GLint internalformat, GLsizei width, GLint border, GLenum format, GLenum type, const void *pixels);
+GLAPI void APIENTRY glTextureImage2DEXT (GLuint texture, GLenum target, GLint level, GLint internalformat, GLsizei width, GLsizei height, GLint border, GLenum format, GLenum type, const void *pixels);
+GLAPI void APIENTRY glTextureSubImage1DEXT (GLuint texture, GLenum target, GLint level, GLint xoffset, GLsizei width, GLenum format, GLenum type, const void *pixels);
+GLAPI void APIENTRY glTextureSubImage2DEXT (GLuint texture, GLenum target, GLint level, GLint xoffset, GLint yoffset, GLsizei width, GLsizei height, GLenum format, GLenum type, const void *pixels);
+GLAPI void APIENTRY glCopyTextureImage1DEXT (GLuint texture, GLenum target, GLint level, GLenum internalformat, GLint x, GLint y, GLsizei width, GLint border);
+GLAPI void APIENTRY glCopyTextureImage2DEXT (GLuint texture, GLenum target, GLint level, GLenum internalformat, GLint x, GLint y, GLsizei width, GLsizei height, GLint border);
+GLAPI void APIENTRY glCopyTextureSubImage1DEXT (GLuint texture, GLenum target, GLint level, GLint xoffset, GLint x, GLint y, GLsizei width);
+GLAPI void APIENTRY glCopyTextureSubImage2DEXT (GLuint texture, GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint x, GLint y, GLsizei width, GLsizei height);
+GLAPI void APIENTRY glGetTextureImageEXT (GLuint texture, GLenum target, GLint level, GLenum format, GLenum type, void *pixels);
+GLAPI void APIENTRY glGetTextureParameterfvEXT (GLuint texture, GLenum target, GLenum pname, GLfloat *params);
+GLAPI void APIENTRY glGetTextureParameterivEXT (GLuint texture, GLenum target, GLenum pname, GLint *params);
+GLAPI void APIENTRY glGetTextureLevelParameterfvEXT (GLuint texture, GLenum target, GLint level, GLenum pname, GLfloat *params);
+GLAPI void APIENTRY glGetTextureLevelParameterivEXT (GLuint texture, GLenum target, GLint level, GLenum pname, GLint *params);
+GLAPI void APIENTRY glTextureImage3DEXT (GLuint texture, GLenum target, GLint level, GLint internalformat, GLsizei width, GLsizei height, GLsizei depth, GLint border, GLenum format, GLenum type, const void *pixels);
+GLAPI void APIENTRY glTextureSubImage3DEXT (GLuint texture, GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint zoffset, GLsizei width, GLsizei height, GLsizei depth, GLenum format, GLenum type, const void *pixels);
+GLAPI void APIENTRY glCopyTextureSubImage3DEXT (GLuint texture, GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint zoffset, GLint x, GLint y, GLsizei width, GLsizei height);
+GLAPI void APIENTRY glBindMultiTextureEXT (GLenum texunit, GLenum target, GLuint texture);
+GLAPI void APIENTRY glMultiTexCoordPointerEXT (GLenum texunit, GLint size, GLenum type, GLsizei stride, const void *pointer);
+GLAPI void APIENTRY glMultiTexEnvfEXT (GLenum texunit, GLenum target, GLenum pname, GLfloat param);
+GLAPI void APIENTRY glMultiTexEnvfvEXT (GLenum texunit, GLenum target, GLenum pname, const GLfloat *params);
+GLAPI void APIENTRY glMultiTexEnviEXT (GLenum texunit, GLenum target, GLenum pname, GLint param);
+GLAPI void APIENTRY glMultiTexEnvivEXT (GLenum texunit, GLenum target, GLenum pname, const GLint *params);
+GLAPI void APIENTRY glMultiTexGendEXT (GLenum texunit, GLenum coord, GLenum pname, GLdouble param);
+GLAPI void APIENTRY glMultiTexGendvEXT (GLenum texunit, GLenum coord, GLenum pname, const GLdouble *params);
+GLAPI void APIENTRY glMultiTexGenfEXT (GLenum texunit, GLenum coord, GLenum pname, GLfloat param);
+GLAPI void APIENTRY glMultiTexGenfvEXT (GLenum texunit, GLenum coord, GLenum pname, const GLfloat *params);
+GLAPI void APIENTRY glMultiTexGeniEXT (GLenum texunit, GLenum coord, GLenum pname, GLint param);
+GLAPI void APIENTRY glMultiTexGenivEXT (GLenum texunit, GLenum coord, GLenum pname, const GLint *params);
+GLAPI void APIENTRY glGetMultiTexEnvfvEXT (GLenum texunit, GLenum target, GLenum pname, GLfloat *params);
+GLAPI void APIENTRY glGetMultiTexEnvivEXT (GLenum texunit, GLenum target, GLenum pname, GLint *params);
+GLAPI void APIENTRY glGetMultiTexGendvEXT (GLenum texunit, GLenum coord, GLenum pname, GLdouble *params);
+GLAPI void APIENTRY glGetMultiTexGenfvEXT (GLenum texunit, GLenum coord, GLenum pname, GLfloat *params);
+GLAPI void APIENTRY glGetMultiTexGenivEXT (GLenum texunit, GLenum coord, GLenum pname, GLint *params);
+GLAPI void APIENTRY glMultiTexParameteriEXT (GLenum texunit, GLenum target, GLenum pname, GLint param);
+GLAPI void APIENTRY glMultiTexParameterivEXT (GLenum texunit, GLenum target, GLenum pname, const GLint *params);
+GLAPI void APIENTRY glMultiTexParameterfEXT (GLenum texunit, GLenum target, GLenum pname, GLfloat param);
+GLAPI void APIENTRY glMultiTexParameterfvEXT (GLenum texunit, GLenum target, GLenum pname, const GLfloat *params);
+GLAPI void APIENTRY glMultiTexImage1DEXT (GLenum texunit, GLenum target, GLint level, GLint internalformat, GLsizei width, GLint border, GLenum format, GLenum type, const void *pixels);
+GLAPI void APIENTRY glMultiTexImage2DEXT (GLenum texunit, GLenum target, GLint level, GLint internalformat, GLsizei width, GLsizei height, GLint border, GLenum format, GLenum type, const void *pixels);
+GLAPI void APIENTRY glMultiTexSubImage1DEXT (GLenum texunit, GLenum target, GLint level, GLint xoffset, GLsizei width, GLenum format, GLenum type, const void *pixels);
+GLAPI void APIENTRY glMultiTexSubImage2DEXT (GLenum texunit, GLenum target, GLint level, GLint xoffset, GLint yoffset, GLsizei width, GLsizei height, GLenum format, GLenum type, const void *pixels);
+GLAPI void APIENTRY glCopyMultiTexImage1DEXT (GLenum texunit, GLenum target, GLint level, GLenum internalformat, GLint x, GLint y, GLsizei width, GLint border);
+GLAPI void APIENTRY glCopyMultiTexImage2DEXT (GLenum texunit, GLenum target, GLint level, GLenum internalformat, GLint x, GLint y, GLsizei width, GLsizei height, GLint border);
+GLAPI void APIENTRY glCopyMultiTexSubImage1DEXT (GLenum texunit, GLenum target, GLint level, GLint xoffset, GLint x, GLint y, GLsizei width);
+GLAPI void APIENTRY glCopyMultiTexSubImage2DEXT (GLenum texunit, GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint x, GLint y, GLsizei width, GLsizei height);
+GLAPI void APIENTRY glGetMultiTexImageEXT (GLenum texunit, GLenum target, GLint level, GLenum format, GLenum type, void *pixels);
+GLAPI void APIENTRY glGetMultiTexParameterfvEXT (GLenum texunit, GLenum target, GLenum pname, GLfloat *params);
+GLAPI void APIENTRY glGetMultiTexParameterivEXT (GLenum texunit, GLenum target, GLenum pname, GLint *params);
+GLAPI void APIENTRY glGetMultiTexLevelParameterfvEXT (GLenum texunit, GLenum target, GLint level, GLenum pname, GLfloat *params);
+GLAPI void APIENTRY glGetMultiTexLevelParameterivEXT (GLenum texunit, GLenum target, GLint level, GLenum pname, GLint *params);
+GLAPI void APIENTRY glMultiTexImage3DEXT (GLenum texunit, GLenum target, GLint level, GLint internalformat, GLsizei width, GLsizei height, GLsizei depth, GLint border, GLenum format, GLenum type, const void *pixels);
+GLAPI void APIENTRY glMultiTexSubImage3DEXT (GLenum texunit, GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint zoffset, GLsizei width, GLsizei height, GLsizei depth, GLenum format, GLenum type, const void *pixels);
+GLAPI void APIENTRY glCopyMultiTexSubImage3DEXT (GLenum texunit, GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint zoffset, GLint x, GLint y, GLsizei width, GLsizei height);
+GLAPI void APIENTRY glEnableClientStateIndexedEXT (GLenum array, GLuint index);
+GLAPI void APIENTRY glDisableClientStateIndexedEXT (GLenum array, GLuint index);
+GLAPI void APIENTRY glGetFloatIndexedvEXT (GLenum target, GLuint index, GLfloat *data);
+GLAPI void APIENTRY glGetDoubleIndexedvEXT (GLenum target, GLuint index, GLdouble *data);
+GLAPI void APIENTRY glGetPointerIndexedvEXT (GLenum target, GLuint index, void **data);
+GLAPI void APIENTRY glEnableIndexedEXT (GLenum target, GLuint index);
+GLAPI void APIENTRY glDisableIndexedEXT (GLenum target, GLuint index);
+GLAPI GLboolean APIENTRY glIsEnabledIndexedEXT (GLenum target, GLuint index);
+GLAPI void APIENTRY glGetIntegerIndexedvEXT (GLenum target, GLuint index, GLint *data);
+GLAPI void APIENTRY glGetBooleanIndexedvEXT (GLenum target, GLuint index, GLboolean *data);
+GLAPI void APIENTRY glCompressedTextureImage3DEXT (GLuint texture, GLenum target, GLint level, GLenum internalformat, GLsizei width, GLsizei height, GLsizei depth, GLint border, GLsizei imageSize, const void *bits);
+GLAPI void APIENTRY glCompressedTextureImage2DEXT (GLuint texture, GLenum target, GLint level, GLenum internalformat, GLsizei width, GLsizei height, GLint border, GLsizei imageSize, const void *bits);
+GLAPI void APIENTRY glCompressedTextureImage1DEXT (GLuint texture, GLenum target, GLint level, GLenum internalformat, GLsizei width, GLint border, GLsizei imageSize, const void *bits);
+GLAPI void APIENTRY glCompressedTextureSubImage3DEXT (GLuint texture, GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint zoffset, GLsizei width, GLsizei height, GLsizei depth, GLenum format, GLsizei imageSize, const void *bits);
+GLAPI void APIENTRY glCompressedTextureSubImage2DEXT (GLuint texture, GLenum target, GLint level, GLint xoffset, GLint yoffset, GLsizei width, GLsizei height, GLenum format, GLsizei imageSize, const void *bits);
+GLAPI void APIENTRY glCompressedTextureSubImage1DEXT (GLuint texture, GLenum target, GLint level, GLint xoffset, GLsizei width, GLenum format, GLsizei imageSize, const void *bits);
+GLAPI void APIENTRY glGetCompressedTextureImageEXT (GLuint texture, GLenum target, GLint lod, void *img);
+GLAPI void APIENTRY glCompressedMultiTexImage3DEXT (GLenum texunit, GLenum target, GLint level, GLenum internalformat, GLsizei width, GLsizei height, GLsizei depth, GLint border, GLsizei imageSize, const void *bits);
+GLAPI void APIENTRY glCompressedMultiTexImage2DEXT (GLenum texunit, GLenum target, GLint level, GLenum internalformat, GLsizei width, GLsizei height, GLint border, GLsizei imageSize, const void *bits);
+GLAPI void APIENTRY glCompressedMultiTexImage1DEXT (GLenum texunit, GLenum target, GLint level, GLenum internalformat, GLsizei width, GLint border, GLsizei imageSize, const void *bits);
+GLAPI void APIENTRY glCompressedMultiTexSubImage3DEXT (GLenum texunit, GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint zoffset, GLsizei width, GLsizei height, GLsizei depth, GLenum format, GLsizei imageSize, const void *bits);
+GLAPI void APIENTRY glCompressedMultiTexSubImage2DEXT (GLenum texunit, GLenum target, GLint level, GLint xoffset, GLint yoffset, GLsizei width, GLsizei height, GLenum format, GLsizei imageSize, const void *bits);
+GLAPI void APIENTRY glCompressedMultiTexSubImage1DEXT (GLenum texunit, GLenum target, GLint level, GLint xoffset, GLsizei width, GLenum format, GLsizei imageSize, const void *bits);
+GLAPI void APIENTRY glGetCompressedMultiTexImageEXT (GLenum texunit, GLenum target, GLint lod, void *img);
+GLAPI void APIENTRY glMatrixLoadTransposefEXT (GLenum mode, const GLfloat *m);
+GLAPI void APIENTRY glMatrixLoadTransposedEXT (GLenum mode, const GLdouble *m);
+GLAPI void APIENTRY glMatrixMultTransposefEXT (GLenum mode, const GLfloat *m);
+GLAPI void APIENTRY glMatrixMultTransposedEXT (GLenum mode, const GLdouble *m);
+GLAPI void APIENTRY glNamedBufferDataEXT (GLuint buffer, GLsizeiptr size, const void *data, GLenum usage);
+GLAPI void APIENTRY glNamedBufferSubDataEXT (GLuint buffer, GLintptr offset, GLsizeiptr size, const void *data);
+GLAPI void *APIENTRY glMapNamedBufferEXT (GLuint buffer, GLenum access);
+GLAPI GLboolean APIENTRY glUnmapNamedBufferEXT (GLuint buffer);
+GLAPI void APIENTRY glGetNamedBufferParameterivEXT (GLuint buffer, GLenum pname, GLint *params);
+GLAPI void APIENTRY glGetNamedBufferPointervEXT (GLuint buffer, GLenum pname, void **params);
+GLAPI void APIENTRY glGetNamedBufferSubDataEXT (GLuint buffer, GLintptr offset, GLsizeiptr size, void *data);
+GLAPI void APIENTRY glProgramUniform1fEXT (GLuint program, GLint location, GLfloat v0);
+GLAPI void APIENTRY glProgramUniform2fEXT (GLuint program, GLint location, GLfloat v0, GLfloat v1);
+GLAPI void APIENTRY glProgramUniform3fEXT (GLuint program, GLint location, GLfloat v0, GLfloat v1, GLfloat v2);
+GLAPI void APIENTRY glProgramUniform4fEXT (GLuint program, GLint location, GLfloat v0, GLfloat v1, GLfloat v2, GLfloat v3);
+GLAPI void APIENTRY glProgramUniform1iEXT (GLuint program, GLint location, GLint v0);
+GLAPI void APIENTRY glProgramUniform2iEXT (GLuint program, GLint location, GLint v0, GLint v1);
+GLAPI void APIENTRY glProgramUniform3iEXT (GLuint program, GLint location, GLint v0, GLint v1, GLint v2);
+GLAPI void APIENTRY glProgramUniform4iEXT (GLuint program, GLint location, GLint v0, GLint v1, GLint v2, GLint v3);
+GLAPI void APIENTRY glProgramUniform1fvEXT (GLuint program, GLint location, GLsizei count, const GLfloat *value);
+GLAPI void APIENTRY glProgramUniform2fvEXT (GLuint program, GLint location, GLsizei count, const GLfloat *value);
+GLAPI void APIENTRY glProgramUniform3fvEXT (GLuint program, GLint location, GLsizei count, const GLfloat *value);
+GLAPI void APIENTRY glProgramUniform4fvEXT (GLuint program, GLint location, GLsizei count, const GLfloat *value);
+GLAPI void APIENTRY glProgramUniform1ivEXT (GLuint program, GLint location, GLsizei count, const GLint *value);
+GLAPI void APIENTRY glProgramUniform2ivEXT (GLuint program, GLint location, GLsizei count, const GLint *value);
+GLAPI void APIENTRY glProgramUniform3ivEXT (GLuint program, GLint location, GLsizei count, const GLint *value);
+GLAPI void APIENTRY glProgramUniform4ivEXT (GLuint program, GLint location, GLsizei count, const GLint *value);
+GLAPI void APIENTRY glProgramUniformMatrix2fvEXT (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value);
+GLAPI void APIENTRY glProgramUniformMatrix3fvEXT (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value);
+GLAPI void APIENTRY glProgramUniformMatrix4fvEXT (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value);
+GLAPI void APIENTRY glProgramUniformMatrix2x3fvEXT (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value);
+GLAPI void APIENTRY glProgramUniformMatrix3x2fvEXT (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value);
+GLAPI void APIENTRY glProgramUniformMatrix2x4fvEXT (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value);
+GLAPI void APIENTRY glProgramUniformMatrix4x2fvEXT (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value);
+GLAPI void APIENTRY glProgramUniformMatrix3x4fvEXT (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value);
+GLAPI void APIENTRY glProgramUniformMatrix4x3fvEXT (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLfloat *value);
+GLAPI void APIENTRY glTextureBufferEXT (GLuint texture, GLenum target, GLenum internalformat, GLuint buffer);
+GLAPI void APIENTRY glMultiTexBufferEXT (GLenum texunit, GLenum target, GLenum internalformat, GLuint buffer);
+GLAPI void APIENTRY glTextureParameterIivEXT (GLuint texture, GLenum target, GLenum pname, const GLint *params);
+GLAPI void APIENTRY glTextureParameterIuivEXT (GLuint texture, GLenum target, GLenum pname, const GLuint *params);
+GLAPI void APIENTRY glGetTextureParameterIivEXT (GLuint texture, GLenum target, GLenum pname, GLint *params);
+GLAPI void APIENTRY glGetTextureParameterIuivEXT (GLuint texture, GLenum target, GLenum pname, GLuint *params);
+GLAPI void APIENTRY glMultiTexParameterIivEXT (GLenum texunit, GLenum target, GLenum pname, const GLint *params);
+GLAPI void APIENTRY glMultiTexParameterIuivEXT (GLenum texunit, GLenum target, GLenum pname, const GLuint *params);
+GLAPI void APIENTRY glGetMultiTexParameterIivEXT (GLenum texunit, GLenum target, GLenum pname, GLint *params);
+GLAPI void APIENTRY glGetMultiTexParameterIuivEXT (GLenum texunit, GLenum target, GLenum pname, GLuint *params);
+GLAPI void APIENTRY glProgramUniform1uiEXT (GLuint program, GLint location, GLuint v0);
+GLAPI void APIENTRY glProgramUniform2uiEXT (GLuint program, GLint location, GLuint v0, GLuint v1);
+GLAPI void APIENTRY glProgramUniform3uiEXT (GLuint program, GLint location, GLuint v0, GLuint v1, GLuint v2);
+GLAPI void APIENTRY glProgramUniform4uiEXT (GLuint program, GLint location, GLuint v0, GLuint v1, GLuint v2, GLuint v3);
+GLAPI void APIENTRY glProgramUniform1uivEXT (GLuint program, GLint location, GLsizei count, const GLuint *value);
+GLAPI void APIENTRY glProgramUniform2uivEXT (GLuint program, GLint location, GLsizei count, const GLuint *value);
+GLAPI void APIENTRY glProgramUniform3uivEXT (GLuint program, GLint location, GLsizei count, const GLuint *value);
+GLAPI void APIENTRY glProgramUniform4uivEXT (GLuint program, GLint location, GLsizei count, const GLuint *value);
+GLAPI void APIENTRY glNamedProgramLocalParameters4fvEXT (GLuint program, GLenum target, GLuint index, GLsizei count, const GLfloat *params);
+GLAPI void APIENTRY glNamedProgramLocalParameterI4iEXT (GLuint program, GLenum target, GLuint index, GLint x, GLint y, GLint z, GLint w);
+GLAPI void APIENTRY glNamedProgramLocalParameterI4ivEXT (GLuint program, GLenum target, GLuint index, const GLint *params);
+GLAPI void APIENTRY glNamedProgramLocalParametersI4ivEXT (GLuint program, GLenum target, GLuint index, GLsizei count, const GLint *params);
+GLAPI void APIENTRY glNamedProgramLocalParameterI4uiEXT (GLuint program, GLenum target, GLuint index, GLuint x, GLuint y, GLuint z, GLuint w);
+GLAPI void APIENTRY glNamedProgramLocalParameterI4uivEXT (GLuint program, GLenum target, GLuint index, const GLuint *params);
+GLAPI void APIENTRY glNamedProgramLocalParametersI4uivEXT (GLuint program, GLenum target, GLuint index, GLsizei count, const GLuint *params);
+GLAPI void APIENTRY glGetNamedProgramLocalParameterIivEXT (GLuint program, GLenum target, GLuint index, GLint *params);
+GLAPI void APIENTRY glGetNamedProgramLocalParameterIuivEXT (GLuint program, GLenum target, GLuint index, GLuint *params);
+GLAPI void APIENTRY glEnableClientStateiEXT (GLenum array, GLuint index);
+GLAPI void APIENTRY glDisableClientStateiEXT (GLenum array, GLuint index);
+GLAPI void APIENTRY glGetFloati_vEXT (GLenum pname, GLuint index, GLfloat *params);
+GLAPI void APIENTRY glGetDoublei_vEXT (GLenum pname, GLuint index, GLdouble *params);
+GLAPI void APIENTRY glGetPointeri_vEXT (GLenum pname, GLuint index, void **params);
+GLAPI void APIENTRY glNamedProgramStringEXT (GLuint program, GLenum target, GLenum format, GLsizei len, const void *string);
+GLAPI void APIENTRY glNamedProgramLocalParameter4dEXT (GLuint program, GLenum target, GLuint index, GLdouble x, GLdouble y, GLdouble z, GLdouble w);
+GLAPI void APIENTRY glNamedProgramLocalParameter4dvEXT (GLuint program, GLenum target, GLuint index, const GLdouble *params);
+GLAPI void APIENTRY glNamedProgramLocalParameter4fEXT (GLuint program, GLenum target, GLuint index, GLfloat x, GLfloat y, GLfloat z, GLfloat w);
+GLAPI void APIENTRY glNamedProgramLocalParameter4fvEXT (GLuint program, GLenum target, GLuint index, const GLfloat *params);
+GLAPI void APIENTRY glGetNamedProgramLocalParameterdvEXT (GLuint program, GLenum target, GLuint index, GLdouble *params);
+GLAPI void APIENTRY glGetNamedProgramLocalParameterfvEXT (GLuint program, GLenum target, GLuint index, GLfloat *params);
+GLAPI void APIENTRY glGetNamedProgramivEXT (GLuint program, GLenum target, GLenum pname, GLint *params);
+GLAPI void APIENTRY glGetNamedProgramStringEXT (GLuint program, GLenum target, GLenum pname, void *string);
+GLAPI void APIENTRY glNamedRenderbufferStorageEXT (GLuint renderbuffer, GLenum internalformat, GLsizei width, GLsizei height);
+GLAPI void APIENTRY glGetNamedRenderbufferParameterivEXT (GLuint renderbuffer, GLenum pname, GLint *params);
+GLAPI void APIENTRY glNamedRenderbufferStorageMultisampleEXT (GLuint renderbuffer, GLsizei samples, GLenum internalformat, GLsizei width, GLsizei height);
+GLAPI void APIENTRY glNamedRenderbufferStorageMultisampleCoverageEXT (GLuint renderbuffer, GLsizei coverageSamples, GLsizei colorSamples, GLenum internalformat, GLsizei width, GLsizei height);
+GLAPI GLenum APIENTRY glCheckNamedFramebufferStatusEXT (GLuint framebuffer, GLenum target);
+GLAPI void APIENTRY glNamedFramebufferTexture1DEXT (GLuint framebuffer, GLenum attachment, GLenum textarget, GLuint texture, GLint level);
+GLAPI void APIENTRY glNamedFramebufferTexture2DEXT (GLuint framebuffer, GLenum attachment, GLenum textarget, GLuint texture, GLint level);
+GLAPI void APIENTRY glNamedFramebufferTexture3DEXT (GLuint framebuffer, GLenum attachment, GLenum textarget, GLuint texture, GLint level, GLint zoffset);
+GLAPI void APIENTRY glNamedFramebufferRenderbufferEXT (GLuint framebuffer, GLenum attachment, GLenum renderbuffertarget, GLuint renderbuffer);
+GLAPI void APIENTRY glGetNamedFramebufferAttachmentParameterivEXT (GLuint framebuffer, GLenum attachment, GLenum pname, GLint *params);
+GLAPI void APIENTRY glGenerateTextureMipmapEXT (GLuint texture, GLenum target);
+GLAPI void APIENTRY glGenerateMultiTexMipmapEXT (GLenum texunit, GLenum target);
+GLAPI void APIENTRY glFramebufferDrawBufferEXT (GLuint framebuffer, GLenum mode);
+GLAPI void APIENTRY glFramebufferDrawBuffersEXT (GLuint framebuffer, GLsizei n, const GLenum *bufs);
+GLAPI void APIENTRY glFramebufferReadBufferEXT (GLuint framebuffer, GLenum mode);
+GLAPI void APIENTRY glGetFramebufferParameterivEXT (GLuint framebuffer, GLenum pname, GLint *params);
+GLAPI void APIENTRY glNamedCopyBufferSubDataEXT (GLuint readBuffer, GLuint writeBuffer, GLintptr readOffset, GLintptr writeOffset, GLsizeiptr size);
+GLAPI void APIENTRY glNamedFramebufferTextureEXT (GLuint framebuffer, GLenum attachment, GLuint texture, GLint level);
+GLAPI void APIENTRY glNamedFramebufferTextureLayerEXT (GLuint framebuffer, GLenum attachment, GLuint texture, GLint level, GLint layer);
+GLAPI void APIENTRY glNamedFramebufferTextureFaceEXT (GLuint framebuffer, GLenum attachment, GLuint texture, GLint level, GLenum face);
+GLAPI void APIENTRY glTextureRenderbufferEXT (GLuint texture, GLenum target, GLuint renderbuffer);
+GLAPI void APIENTRY glMultiTexRenderbufferEXT (GLenum texunit, GLenum target, GLuint renderbuffer);
+GLAPI void APIENTRY glVertexArrayVertexOffsetEXT (GLuint vaobj, GLuint buffer, GLint size, GLenum type, GLsizei stride, GLintptr offset);
+GLAPI void APIENTRY glVertexArrayColorOffsetEXT (GLuint vaobj, GLuint buffer, GLint size, GLenum type, GLsizei stride, GLintptr offset);
+GLAPI void APIENTRY glVertexArrayEdgeFlagOffsetEXT (GLuint vaobj, GLuint buffer, GLsizei stride, GLintptr offset);
+GLAPI void APIENTRY glVertexArrayIndexOffsetEXT (GLuint vaobj, GLuint buffer, GLenum type, GLsizei stride, GLintptr offset);
+GLAPI void APIENTRY glVertexArrayNormalOffsetEXT (GLuint vaobj, GLuint buffer, GLenum type, GLsizei stride, GLintptr offset);
+GLAPI void APIENTRY glVertexArrayTexCoordOffsetEXT (GLuint vaobj, GLuint buffer, GLint size, GLenum type, GLsizei stride, GLintptr offset);
+GLAPI void APIENTRY glVertexArrayMultiTexCoordOffsetEXT (GLuint vaobj, GLuint buffer, GLenum texunit, GLint size, GLenum type, GLsizei stride, GLintptr offset);
+GLAPI void APIENTRY glVertexArrayFogCoordOffsetEXT (GLuint vaobj, GLuint buffer, GLenum type, GLsizei stride, GLintptr offset);
+GLAPI void APIENTRY glVertexArraySecondaryColorOffsetEXT (GLuint vaobj, GLuint buffer, GLint size, GLenum type, GLsizei stride, GLintptr offset);
+GLAPI void APIENTRY glVertexArrayVertexAttribOffsetEXT (GLuint vaobj, GLuint buffer, GLuint index, GLint size, GLenum type, GLboolean normalized, GLsizei stride, GLintptr offset);
+GLAPI void APIENTRY glVertexArrayVertexAttribIOffsetEXT (GLuint vaobj, GLuint buffer, GLuint index, GLint size, GLenum type, GLsizei stride, GLintptr offset);
+GLAPI void APIENTRY glEnableVertexArrayEXT (GLuint vaobj, GLenum array);
+GLAPI void APIENTRY glDisableVertexArrayEXT (GLuint vaobj, GLenum array);
+GLAPI void APIENTRY glEnableVertexArrayAttribEXT (GLuint vaobj, GLuint index);
+GLAPI void APIENTRY glDisableVertexArrayAttribEXT (GLuint vaobj, GLuint index);
+GLAPI void APIENTRY glGetVertexArrayIntegervEXT (GLuint vaobj, GLenum pname, GLint *param);
+GLAPI void APIENTRY glGetVertexArrayPointervEXT (GLuint vaobj, GLenum pname, void **param);
+GLAPI void APIENTRY glGetVertexArrayIntegeri_vEXT (GLuint vaobj, GLuint index, GLenum pname, GLint *param);
+GLAPI void APIENTRY glGetVertexArrayPointeri_vEXT (GLuint vaobj, GLuint index, GLenum pname, void **param);
+GLAPI void *APIENTRY glMapNamedBufferRangeEXT (GLuint buffer, GLintptr offset, GLsizeiptr length, GLbitfield access);
+GLAPI void APIENTRY glFlushMappedNamedBufferRangeEXT (GLuint buffer, GLintptr offset, GLsizeiptr length);
+GLAPI void APIENTRY glNamedBufferStorageEXT (GLuint buffer, GLsizeiptr size, const void *data, GLbitfield flags);
+GLAPI void APIENTRY glClearNamedBufferDataEXT (GLuint buffer, GLenum internalformat, GLenum format, GLenum type, const void *data);
+GLAPI void APIENTRY glClearNamedBufferSubDataEXT (GLuint buffer, GLenum internalformat, GLsizeiptr offset, GLsizeiptr size, GLenum format, GLenum type, const void *data);
+GLAPI void APIENTRY glNamedFramebufferParameteriEXT (GLuint framebuffer, GLenum pname, GLint param);
+GLAPI void APIENTRY glGetNamedFramebufferParameterivEXT (GLuint framebuffer, GLenum pname, GLint *params);
+GLAPI void APIENTRY glProgramUniform1dEXT (GLuint program, GLint location, GLdouble x);
+GLAPI void APIENTRY glProgramUniform2dEXT (GLuint program, GLint location, GLdouble x, GLdouble y);
+GLAPI void APIENTRY glProgramUniform3dEXT (GLuint program, GLint location, GLdouble x, GLdouble y, GLdouble z);
+GLAPI void APIENTRY glProgramUniform4dEXT (GLuint program, GLint location, GLdouble x, GLdouble y, GLdouble z, GLdouble w);
+GLAPI void APIENTRY glProgramUniform1dvEXT (GLuint program, GLint location, GLsizei count, const GLdouble *value);
+GLAPI void APIENTRY glProgramUniform2dvEXT (GLuint program, GLint location, GLsizei count, const GLdouble *value);
+GLAPI void APIENTRY glProgramUniform3dvEXT (GLuint program, GLint location, GLsizei count, const GLdouble *value);
+GLAPI void APIENTRY glProgramUniform4dvEXT (GLuint program, GLint location, GLsizei count, const GLdouble *value);
+GLAPI void APIENTRY glProgramUniformMatrix2dvEXT (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLdouble *value);
+GLAPI void APIENTRY glProgramUniformMatrix3dvEXT (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLdouble *value);
+GLAPI void APIENTRY glProgramUniformMatrix4dvEXT (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLdouble *value);
+GLAPI void APIENTRY glProgramUniformMatrix2x3dvEXT (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLdouble *value);
+GLAPI void APIENTRY glProgramUniformMatrix2x4dvEXT (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLdouble *value);
+GLAPI void APIENTRY glProgramUniformMatrix3x2dvEXT (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLdouble *value);
+GLAPI void APIENTRY glProgramUniformMatrix3x4dvEXT (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLdouble *value);
+GLAPI void APIENTRY glProgramUniformMatrix4x2dvEXT (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLdouble *value);
+GLAPI void APIENTRY glProgramUniformMatrix4x3dvEXT (GLuint program, GLint location, GLsizei count, GLboolean transpose, const GLdouble *value);
+GLAPI void APIENTRY glTextureBufferRangeEXT (GLuint texture, GLenum target, GLenum internalformat, GLuint buffer, GLintptr offset, GLsizeiptr size);
+GLAPI void APIENTRY glTextureStorage1DEXT (GLuint texture, GLenum target, GLsizei levels, GLenum internalformat, GLsizei width);
+GLAPI void APIENTRY glTextureStorage2DEXT (GLuint texture, GLenum target, GLsizei levels, GLenum internalformat, GLsizei width, GLsizei height);
+GLAPI void APIENTRY glTextureStorage3DEXT (GLuint texture, GLenum target, GLsizei levels, GLenum internalformat, GLsizei width, GLsizei height, GLsizei depth);
+GLAPI void APIENTRY glTextureStorage2DMultisampleEXT (GLuint texture, GLenum target, GLsizei samples, GLenum internalformat, GLsizei width, GLsizei height, GLboolean fixedsamplelocations);
+GLAPI void APIENTRY glTextureStorage3DMultisampleEXT (GLuint texture, GLenum target, GLsizei samples, GLenum internalformat, GLsizei width, GLsizei height, GLsizei depth, GLboolean fixedsamplelocations);
+GLAPI void APIENTRY glVertexArrayBindVertexBufferEXT (GLuint vaobj, GLuint bindingindex, GLuint buffer, GLintptr offset, GLsizei stride);
+GLAPI void APIENTRY glVertexArrayVertexAttribFormatEXT (GLuint vaobj, GLuint attribindex, GLint size, GLenum type, GLboolean normalized, GLuint relativeoffset);
+GLAPI void APIENTRY glVertexArrayVertexAttribIFormatEXT (GLuint vaobj, GLuint attribindex, GLint size, GLenum type, GLuint relativeoffset);
+GLAPI void APIENTRY glVertexArrayVertexAttribLFormatEXT (GLuint vaobj, GLuint attribindex, GLint size, GLenum type, GLuint relativeoffset);
+GLAPI void APIENTRY glVertexArrayVertexAttribBindingEXT (GLuint vaobj, GLuint attribindex, GLuint bindingindex);
+GLAPI void APIENTRY glVertexArrayVertexBindingDivisorEXT (GLuint vaobj, GLuint bindingindex, GLuint divisor);
+GLAPI void APIENTRY glVertexArrayVertexAttribLOffsetEXT (GLuint vaobj, GLuint buffer, GLuint index, GLint size, GLenum type, GLsizei stride, GLintptr offset);
+GLAPI void APIENTRY glTexturePageCommitmentEXT (GLuint texture, GLint level, GLint xoffset, GLint yoffset, GLint zoffset, GLsizei width, GLsizei height, GLsizei depth, GLboolean commit);
+GLAPI void APIENTRY glVertexArrayVertexAttribDivisorEXT (GLuint vaobj, GLuint index, GLuint divisor);
+#endif
+#endif /* GL_EXT_direct_state_access */
+
+#ifndef GL_EXT_draw_buffers2
+#define GL_EXT_draw_buffers2 1
+typedef void (APIENTRYP PFNGLCOLORMASKINDEXEDEXTPROC) (GLuint index, GLboolean r, GLboolean g, GLboolean b, GLboolean a);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glColorMaskIndexedEXT (GLuint index, GLboolean r, GLboolean g, GLboolean b, GLboolean a);
+#endif
+#endif /* GL_EXT_draw_buffers2 */
+
+#ifndef GL_EXT_draw_instanced
+#define GL_EXT_draw_instanced 1
+typedef void (APIENTRYP PFNGLDRAWARRAYSINSTANCEDEXTPROC) (GLenum mode, GLint start, GLsizei count, GLsizei primcount);
+typedef void (APIENTRYP PFNGLDRAWELEMENTSINSTANCEDEXTPROC) (GLenum mode, GLsizei count, GLenum type, const void *indices, GLsizei primcount);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glDrawArraysInstancedEXT (GLenum mode, GLint start, GLsizei count, GLsizei primcount);
+GLAPI void APIENTRY glDrawElementsInstancedEXT (GLenum mode, GLsizei count, GLenum type, const void *indices, GLsizei primcount);
+#endif
+#endif /* GL_EXT_draw_instanced */
+
+#ifndef GL_EXT_draw_range_elements
+#define GL_EXT_draw_range_elements 1
+#define GL_MAX_ELEMENTS_VERTICES_EXT 0x80E8
+#define GL_MAX_ELEMENTS_INDICES_EXT 0x80E9
+typedef void (APIENTRYP PFNGLDRAWRANGEELEMENTSEXTPROC) (GLenum mode, GLuint start, GLuint end, GLsizei count, GLenum type, const void *indices);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glDrawRangeElementsEXT (GLenum mode, GLuint start, GLuint end, GLsizei count, GLenum type, const void *indices);
+#endif
+#endif /* GL_EXT_draw_range_elements */
+
+#ifndef GL_EXT_external_buffer
+#define GL_EXT_external_buffer 1
+typedef void *GLeglClientBufferEXT;
+typedef void (APIENTRYP PFNGLBUFFERSTORAGEEXTERNALEXTPROC) (GLenum target, GLintptr offset, GLsizeiptr size, GLeglClientBufferEXT clientBuffer, GLbitfield flags);
+typedef void (APIENTRYP PFNGLNAMEDBUFFERSTORAGEEXTERNALEXTPROC) (GLuint buffer, GLintptr offset, GLsizeiptr size, GLeglClientBufferEXT clientBuffer, GLbitfield flags);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glBufferStorageExternalEXT (GLenum target, GLintptr offset, GLsizeiptr size, GLeglClientBufferEXT clientBuffer, GLbitfield flags);
+GLAPI void APIENTRY glNamedBufferStorageExternalEXT (GLuint buffer, GLintptr offset, GLsizeiptr size, GLeglClientBufferEXT clientBuffer, GLbitfield flags);
+#endif
+#endif /* GL_EXT_external_buffer */
+
+#ifndef GL_EXT_fog_coord
+#define GL_EXT_fog_coord 1
+#define GL_FOG_COORDINATE_SOURCE_EXT 0x8450
+#define GL_FOG_COORDINATE_EXT 0x8451
+#define GL_FRAGMENT_DEPTH_EXT 0x8452
+#define GL_CURRENT_FOG_COORDINATE_EXT 0x8453
+#define GL_FOG_COORDINATE_ARRAY_TYPE_EXT 0x8454
+#define GL_FOG_COORDINATE_ARRAY_STRIDE_EXT 0x8455
+#define GL_FOG_COORDINATE_ARRAY_POINTER_EXT 0x8456
+#define GL_FOG_COORDINATE_ARRAY_EXT 0x8457
+typedef void (APIENTRYP PFNGLFOGCOORDFEXTPROC) (GLfloat coord);
+typedef void (APIENTRYP PFNGLFOGCOORDFVEXTPROC) (const GLfloat *coord);
+typedef void (APIENTRYP PFNGLFOGCOORDDEXTPROC) (GLdouble coord);
+typedef void (APIENTRYP PFNGLFOGCOORDDVEXTPROC) (const GLdouble *coord);
+typedef void (APIENTRYP PFNGLFOGCOORDPOINTEREXTPROC) (GLenum type, GLsizei stride, const void *pointer);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glFogCoordfEXT (GLfloat coord);
+GLAPI void APIENTRY glFogCoordfvEXT (const GLfloat *coord);
+GLAPI void APIENTRY glFogCoorddEXT (GLdouble coord);
+GLAPI void APIENTRY glFogCoorddvEXT (const GLdouble *coord);
+GLAPI void APIENTRY glFogCoordPointerEXT (GLenum type, GLsizei stride, const void *pointer);
+#endif
+#endif /* GL_EXT_fog_coord */
+
+#ifndef GL_EXT_framebuffer_blit
+#define GL_EXT_framebuffer_blit 1
+#define GL_READ_FRAMEBUFFER_EXT 0x8CA8
+#define GL_DRAW_FRAMEBUFFER_EXT 0x8CA9
+#define GL_DRAW_FRAMEBUFFER_BINDING_EXT 0x8CA6
+#define GL_READ_FRAMEBUFFER_BINDING_EXT 0x8CAA
+typedef void (APIENTRYP PFNGLBLITFRAMEBUFFEREXTPROC) (GLint srcX0, GLint srcY0, GLint srcX1, GLint srcY1, GLint dstX0, GLint dstY0, GLint dstX1, GLint dstY1, GLbitfield mask, GLenum filter);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glBlitFramebufferEXT (GLint srcX0, GLint srcY0, GLint srcX1, GLint srcY1, GLint dstX0, GLint dstY0, GLint dstX1, GLint dstY1, GLbitfield mask, GLenum filter);
+#endif
+#endif /* GL_EXT_framebuffer_blit */
+
+#ifndef GL_EXT_framebuffer_multisample
+#define GL_EXT_framebuffer_multisample 1
+#define GL_RENDERBUFFER_SAMPLES_EXT 0x8CAB
+#define GL_FRAMEBUFFER_INCOMPLETE_MULTISAMPLE_EXT 0x8D56
+#define GL_MAX_SAMPLES_EXT 0x8D57
+typedef void (APIENTRYP PFNGLRENDERBUFFERSTORAGEMULTISAMPLEEXTPROC) (GLenum target, GLsizei samples, GLenum internalformat, GLsizei width, GLsizei height);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glRenderbufferStorageMultisampleEXT (GLenum target, GLsizei samples, GLenum internalformat, GLsizei width, GLsizei height);
+#endif
+#endif /* GL_EXT_framebuffer_multisample */
+
+#ifndef GL_EXT_framebuffer_multisample_blit_scaled
+#define GL_EXT_framebuffer_multisample_blit_scaled 1
+#define GL_SCALED_RESOLVE_FASTEST_EXT 0x90BA
+#define GL_SCALED_RESOLVE_NICEST_EXT 0x90BB
+#endif /* GL_EXT_framebuffer_multisample_blit_scaled */
+
+#ifndef GL_EXT_framebuffer_object
+#define GL_EXT_framebuffer_object 1
+#define GL_INVALID_FRAMEBUFFER_OPERATION_EXT 0x0506
+#define GL_MAX_RENDERBUFFER_SIZE_EXT 0x84E8
+#define GL_FRAMEBUFFER_BINDING_EXT 0x8CA6
+#define GL_RENDERBUFFER_BINDING_EXT 0x8CA7
+#define GL_FRAMEBUFFER_ATTACHMENT_OBJECT_TYPE_EXT 0x8CD0
+#define GL_FRAMEBUFFER_ATTACHMENT_OBJECT_NAME_EXT 0x8CD1
+#define GL_FRAMEBUFFER_ATTACHMENT_TEXTURE_LEVEL_EXT 0x8CD2
+#define GL_FRAMEBUFFER_ATTACHMENT_TEXTURE_CUBE_MAP_FACE_EXT 0x8CD3
+#define GL_FRAMEBUFFER_ATTACHMENT_TEXTURE_3D_ZOFFSET_EXT 0x8CD4
+#define GL_FRAMEBUFFER_COMPLETE_EXT 0x8CD5
+#define GL_FRAMEBUFFER_INCOMPLETE_ATTACHMENT_EXT 0x8CD6
+#define GL_FRAMEBUFFER_INCOMPLETE_MISSING_ATTACHMENT_EXT 0x8CD7
+#define GL_FRAMEBUFFER_INCOMPLETE_DIMENSIONS_EXT 0x8CD9
+#define GL_FRAMEBUFFER_INCOMPLETE_FORMATS_EXT 0x8CDA
+#define GL_FRAMEBUFFER_INCOMPLETE_DRAW_BUFFER_EXT 0x8CDB
+#define GL_FRAMEBUFFER_INCOMPLETE_READ_BUFFER_EXT 0x8CDC
+#define GL_FRAMEBUFFER_UNSUPPORTED_EXT 0x8CDD
+#define GL_MAX_COLOR_ATTACHMENTS_EXT 0x8CDF
+#define GL_COLOR_ATTACHMENT0_EXT 0x8CE0
+#define GL_COLOR_ATTACHMENT1_EXT 0x8CE1
+#define GL_COLOR_ATTACHMENT2_EXT 0x8CE2
+#define GL_COLOR_ATTACHMENT3_EXT 0x8CE3
+#define GL_COLOR_ATTACHMENT4_EXT 0x8CE4
+#define GL_COLOR_ATTACHMENT5_EXT 0x8CE5
+#define GL_COLOR_ATTACHMENT6_EXT 0x8CE6
+#define GL_COLOR_ATTACHMENT7_EXT 0x8CE7
+#define GL_COLOR_ATTACHMENT8_EXT 0x8CE8
+#define GL_COLOR_ATTACHMENT9_EXT 0x8CE9
+#define GL_COLOR_ATTACHMENT10_EXT 0x8CEA
+#define GL_COLOR_ATTACHMENT11_EXT 0x8CEB
+#define GL_COLOR_ATTACHMENT12_EXT 0x8CEC
+#define GL_COLOR_ATTACHMENT13_EXT 0x8CED
+#define GL_COLOR_ATTACHMENT14_EXT 0x8CEE
+#define GL_COLOR_ATTACHMENT15_EXT 0x8CEF
+#define GL_DEPTH_ATTACHMENT_EXT 0x8D00
+#define GL_STENCIL_ATTACHMENT_EXT 0x8D20
+#define GL_FRAMEBUFFER_EXT 0x8D40
+#define GL_RENDERBUFFER_EXT 0x8D41
+#define GL_RENDERBUFFER_WIDTH_EXT 0x8D42
+#define GL_RENDERBUFFER_HEIGHT_EXT 0x8D43
+#define GL_RENDERBUFFER_INTERNAL_FORMAT_EXT 0x8D44
+#define GL_STENCIL_INDEX1_EXT 0x8D46
+#define GL_STENCIL_INDEX4_EXT 0x8D47
+#define GL_STENCIL_INDEX8_EXT 0x8D48
+#define GL_STENCIL_INDEX16_EXT 0x8D49
+#define GL_RENDERBUFFER_RED_SIZE_EXT 0x8D50
+#define GL_RENDERBUFFER_GREEN_SIZE_EXT 0x8D51
+#define GL_RENDERBUFFER_BLUE_SIZE_EXT 0x8D52
+#define GL_RENDERBUFFER_ALPHA_SIZE_EXT 0x8D53
+#define GL_RENDERBUFFER_DEPTH_SIZE_EXT 0x8D54
+#define GL_RENDERBUFFER_STENCIL_SIZE_EXT 0x8D55
+typedef GLboolean (APIENTRYP PFNGLISRENDERBUFFEREXTPROC) (GLuint renderbuffer);
+typedef void (APIENTRYP PFNGLBINDRENDERBUFFEREXTPROC) (GLenum target, GLuint renderbuffer);
+typedef void (APIENTRYP PFNGLDELETERENDERBUFFERSEXTPROC) (GLsizei n, const GLuint *renderbuffers);
+typedef void (APIENTRYP PFNGLGENRENDERBUFFERSEXTPROC) (GLsizei n, GLuint *renderbuffers);
+typedef void (APIENTRYP PFNGLRENDERBUFFERSTORAGEEXTPROC) (GLenum target, GLenum internalformat, GLsizei width, GLsizei height);
+typedef void (APIENTRYP PFNGLGETRENDERBUFFERPARAMETERIVEXTPROC) (GLenum target, GLenum pname, GLint *params);
+typedef GLboolean (APIENTRYP PFNGLISFRAMEBUFFEREXTPROC) (GLuint framebuffer);
+typedef void (APIENTRYP PFNGLBINDFRAMEBUFFEREXTPROC) (GLenum target, GLuint framebuffer);
+typedef void (APIENTRYP PFNGLDELETEFRAMEBUFFERSEXTPROC) (GLsizei n, const GLuint *framebuffers);
+typedef void (APIENTRYP PFNGLGENFRAMEBUFFERSEXTPROC) (GLsizei n, GLuint *framebuffers);
+typedef GLenum (APIENTRYP PFNGLCHECKFRAMEBUFFERSTATUSEXTPROC) (GLenum target);
+typedef void (APIENTRYP PFNGLFRAMEBUFFERTEXTURE1DEXTPROC) (GLenum target, GLenum attachment, GLenum textarget, GLuint texture, GLint level);
+typedef void (APIENTRYP PFNGLFRAMEBUFFERTEXTURE2DEXTPROC) (GLenum target, GLenum attachment, GLenum textarget, GLuint texture, GLint level);
+typedef void (APIENTRYP PFNGLFRAMEBUFFERTEXTURE3DEXTPROC) (GLenum target, GLenum attachment, GLenum textarget, GLuint texture, GLint level, GLint zoffset);
+typedef void (APIENTRYP PFNGLFRAMEBUFFERRENDERBUFFEREXTPROC) (GLenum target, GLenum attachment, GLenum renderbuffertarget, GLuint renderbuffer);
+typedef void (APIENTRYP PFNGLGETFRAMEBUFFERATTACHMENTPARAMETERIVEXTPROC) (GLenum target, GLenum attachment, GLenum pname, GLint *params);
+typedef void (APIENTRYP PFNGLGENERATEMIPMAPEXTPROC) (GLenum target);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI GLboolean APIENTRY glIsRenderbufferEXT (GLuint renderbuffer);
+GLAPI void APIENTRY glBindRenderbufferEXT (GLenum target, GLuint renderbuffer);
+GLAPI void APIENTRY glDeleteRenderbuffersEXT (GLsizei n, const GLuint *renderbuffers);
+GLAPI void APIENTRY glGenRenderbuffersEXT (GLsizei n, GLuint *renderbuffers);
+GLAPI void APIENTRY glRenderbufferStorageEXT (GLenum target, GLenum internalformat, GLsizei width, GLsizei height);
+GLAPI void APIENTRY glGetRenderbufferParameterivEXT (GLenum target, GLenum pname, GLint *params);
+GLAPI GLboolean APIENTRY glIsFramebufferEXT (GLuint framebuffer);
+GLAPI void APIENTRY glBindFramebufferEXT (GLenum target, GLuint framebuffer);
+GLAPI void APIENTRY glDeleteFramebuffersEXT (GLsizei n, const GLuint *framebuffers);
+GLAPI void APIENTRY glGenFramebuffersEXT (GLsizei n, GLuint *framebuffers);
+GLAPI GLenum APIENTRY glCheckFramebufferStatusEXT (GLenum target);
+GLAPI void APIENTRY glFramebufferTexture1DEXT (GLenum target, GLenum attachment, GLenum textarget, GLuint texture, GLint level);
+GLAPI void APIENTRY glFramebufferTexture2DEXT (GLenum target, GLenum attachment, GLenum textarget, GLuint texture, GLint level);
+GLAPI void APIENTRY glFramebufferTexture3DEXT (GLenum target, GLenum attachment, GLenum textarget, GLuint texture, GLint level, GLint zoffset);
+GLAPI void APIENTRY glFramebufferRenderbufferEXT (GLenum target, GLenum attachment, GLenum renderbuffertarget, GLuint renderbuffer);
+GLAPI void APIENTRY glGetFramebufferAttachmentParameterivEXT (GLenum target, GLenum attachment, GLenum pname, GLint *params);
+GLAPI void APIENTRY glGenerateMipmapEXT (GLenum target);
+#endif
+#endif /* GL_EXT_framebuffer_object */
+
+#ifndef GL_EXT_framebuffer_sRGB
+#define GL_EXT_framebuffer_sRGB 1
+#define GL_FRAMEBUFFER_SRGB_EXT 0x8DB9
+#define GL_FRAMEBUFFER_SRGB_CAPABLE_EXT 0x8DBA
+#endif /* GL_EXT_framebuffer_sRGB */
+
+#ifndef GL_EXT_geometry_shader4
+#define GL_EXT_geometry_shader4 1
+#define GL_GEOMETRY_SHADER_EXT 0x8DD9
+#define GL_GEOMETRY_VERTICES_OUT_EXT 0x8DDA
+#define GL_GEOMETRY_INPUT_TYPE_EXT 0x8DDB
+#define GL_GEOMETRY_OUTPUT_TYPE_EXT 0x8DDC
+#define GL_MAX_GEOMETRY_TEXTURE_IMAGE_UNITS_EXT 0x8C29
+#define GL_MAX_GEOMETRY_VARYING_COMPONENTS_EXT 0x8DDD
+#define GL_MAX_VERTEX_VARYING_COMPONENTS_EXT 0x8DDE
+#define GL_MAX_VARYING_COMPONENTS_EXT 0x8B4B
+#define GL_MAX_GEOMETRY_UNIFORM_COMPONENTS_EXT 0x8DDF
+#define GL_MAX_GEOMETRY_OUTPUT_VERTICES_EXT 0x8DE0
+#define GL_MAX_GEOMETRY_TOTAL_OUTPUT_COMPONENTS_EXT 0x8DE1
+#define GL_LINES_ADJACENCY_EXT 0x000A
+#define GL_LINE_STRIP_ADJACENCY_EXT 0x000B
+#define GL_TRIANGLES_ADJACENCY_EXT 0x000C
+#define GL_TRIANGLE_STRIP_ADJACENCY_EXT 0x000D
+#define GL_FRAMEBUFFER_INCOMPLETE_LAYER_TARGETS_EXT 0x8DA8
+#define GL_FRAMEBUFFER_INCOMPLETE_LAYER_COUNT_EXT 0x8DA9
+#define GL_FRAMEBUFFER_ATTACHMENT_LAYERED_EXT 0x8DA7
+#define GL_FRAMEBUFFER_ATTACHMENT_TEXTURE_LAYER_EXT 0x8CD4
+#define GL_PROGRAM_POINT_SIZE_EXT 0x8642
+typedef void (APIENTRYP PFNGLPROGRAMPARAMETERIEXTPROC) (GLuint program, GLenum pname, GLint value);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glProgramParameteriEXT (GLuint program, GLenum pname, GLint value);
+#endif
+#endif /* GL_EXT_geometry_shader4 */
+
+#ifndef GL_EXT_gpu_program_parameters
+#define GL_EXT_gpu_program_parameters 1
+typedef void (APIENTRYP PFNGLPROGRAMENVPARAMETERS4FVEXTPROC) (GLenum target, GLuint index, GLsizei count, const GLfloat *params);
+typedef void (APIENTRYP PFNGLPROGRAMLOCALPARAMETERS4FVEXTPROC) (GLenum target, GLuint index, GLsizei count, const GLfloat *params);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glProgramEnvParameters4fvEXT (GLenum target, GLuint index, GLsizei count, const GLfloat *params);
+GLAPI void APIENTRY glProgramLocalParameters4fvEXT (GLenum target, GLuint index, GLsizei count, const GLfloat *params);
+#endif
+#endif /* GL_EXT_gpu_program_parameters */
+
+#ifndef GL_EXT_gpu_shader4
+#define GL_EXT_gpu_shader4 1
+#define GL_VERTEX_ATTRIB_ARRAY_INTEGER_EXT 0x88FD
+#define GL_SAMPLER_1D_ARRAY_EXT 0x8DC0
+#define GL_SAMPLER_2D_ARRAY_EXT 0x8DC1
+#define GL_SAMPLER_BUFFER_EXT 0x8DC2
+#define GL_SAMPLER_1D_ARRAY_SHADOW_EXT 0x8DC3
+#define GL_SAMPLER_2D_ARRAY_SHADOW_EXT 0x8DC4
+#define GL_SAMPLER_CUBE_SHADOW_EXT 0x8DC5
+#define GL_UNSIGNED_INT_VEC2_EXT 0x8DC6
+#define GL_UNSIGNED_INT_VEC3_EXT 0x8DC7
+#define GL_UNSIGNED_INT_VEC4_EXT 0x8DC8
+#define GL_INT_SAMPLER_1D_EXT 0x8DC9
+#define GL_INT_SAMPLER_2D_EXT 0x8DCA
+#define GL_INT_SAMPLER_3D_EXT 0x8DCB
+#define GL_INT_SAMPLER_CUBE_EXT 0x8DCC
+#define GL_INT_SAMPLER_2D_RECT_EXT 0x8DCD
+#define GL_INT_SAMPLER_1D_ARRAY_EXT 0x8DCE
+#define GL_INT_SAMPLER_2D_ARRAY_EXT 0x8DCF
+#define GL_INT_SAMPLER_BUFFER_EXT 0x8DD0
+#define GL_UNSIGNED_INT_SAMPLER_1D_EXT 0x8DD1
+#define GL_UNSIGNED_INT_SAMPLER_2D_EXT 0x8DD2
+#define GL_UNSIGNED_INT_SAMPLER_3D_EXT 0x8DD3
+#define GL_UNSIGNED_INT_SAMPLER_CUBE_EXT 0x8DD4
+#define GL_UNSIGNED_INT_SAMPLER_2D_RECT_EXT 0x8DD5
+#define GL_UNSIGNED_INT_SAMPLER_1D_ARRAY_EXT 0x8DD6
+#define GL_UNSIGNED_INT_SAMPLER_2D_ARRAY_EXT 0x8DD7
+#define GL_UNSIGNED_INT_SAMPLER_BUFFER_EXT 0x8DD8
+#define GL_MIN_PROGRAM_TEXEL_OFFSET_EXT 0x8904
+#define GL_MAX_PROGRAM_TEXEL_OFFSET_EXT 0x8905
+typedef void (APIENTRYP PFNGLGETUNIFORMUIVEXTPROC) (GLuint program, GLint location, GLuint *params);
+typedef void (APIENTRYP PFNGLBINDFRAGDATALOCATIONEXTPROC) (GLuint program, GLuint color, const GLchar *name);
+typedef GLint (APIENTRYP PFNGLGETFRAGDATALOCATIONEXTPROC) (GLuint program, const GLchar *name);
+typedef void (APIENTRYP PFNGLUNIFORM1UIEXTPROC) (GLint location, GLuint v0);
+typedef void (APIENTRYP PFNGLUNIFORM2UIEXTPROC) (GLint location, GLuint v0, GLuint v1);
+typedef void (APIENTRYP PFNGLUNIFORM3UIEXTPROC) (GLint location, GLuint v0, GLuint v1, GLuint v2);
+typedef void (APIENTRYP PFNGLUNIFORM4UIEXTPROC) (GLint location, GLuint v0, GLuint v1, GLuint v2, GLuint v3);
+typedef void (APIENTRYP PFNGLUNIFORM1UIVEXTPROC) (GLint location, GLsizei count, const GLuint *value);
+typedef void (APIENTRYP PFNGLUNIFORM2UIVEXTPROC) (GLint location, GLsizei count, const GLuint *value);
+typedef void (APIENTRYP PFNGLUNIFORM3UIVEXTPROC) (GLint location, GLsizei count, const GLuint *value);
+typedef void (APIENTRYP PFNGLUNIFORM4UIVEXTPROC) (GLint location, GLsizei count, const GLuint *value);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glGetUniformuivEXT (GLuint program, GLint location, GLuint *params);
+GLAPI void APIENTRY glBindFragDataLocationEXT (GLuint program, GLuint color, const GLchar *name);
+GLAPI GLint APIENTRY glGetFragDataLocationEXT (GLuint program, const GLchar *name);
+GLAPI void APIENTRY glUniform1uiEXT (GLint location, GLuint v0);
+GLAPI void APIENTRY glUniform2uiEXT (GLint location, GLuint v0, GLuint v1);
+GLAPI void APIENTRY glUniform3uiEXT (GLint location, GLuint v0, GLuint v1, GLuint v2);
+GLAPI void APIENTRY glUniform4uiEXT (GLint location, GLuint v0, GLuint v1, GLuint v2, GLuint v3);
+GLAPI void APIENTRY glUniform1uivEXT (GLint location, GLsizei count, const GLuint *value);
+GLAPI void APIENTRY glUniform2uivEXT (GLint location, GLsizei count, const GLuint *value);
+GLAPI void APIENTRY glUniform3uivEXT (GLint location, GLsizei count, const GLuint *value);
+GLAPI void APIENTRY glUniform4uivEXT (GLint location, GLsizei count, const GLuint *value);
+#endif
+#endif /* GL_EXT_gpu_shader4 */
+
+#ifndef GL_EXT_histogram
+#define GL_EXT_histogram 1
+#define GL_HISTOGRAM_EXT 0x8024
+#define GL_PROXY_HISTOGRAM_EXT 0x8025
+#define GL_HISTOGRAM_WIDTH_EXT 0x8026
+#define GL_HISTOGRAM_FORMAT_EXT 0x8027
+#define GL_HISTOGRAM_RED_SIZE_EXT 0x8028
+#define GL_HISTOGRAM_GREEN_SIZE_EXT 0x8029
+#define GL_HISTOGRAM_BLUE_SIZE_EXT 0x802A
+#define GL_HISTOGRAM_ALPHA_SIZE_EXT 0x802B
+#define GL_HISTOGRAM_LUMINANCE_SIZE_EXT 0x802C
+#define GL_HISTOGRAM_SINK_EXT 0x802D
+#define GL_MINMAX_EXT 0x802E
+#define GL_MINMAX_FORMAT_EXT 0x802F
+#define GL_MINMAX_SINK_EXT 0x8030
+#define GL_TABLE_TOO_LARGE_EXT 0x8031
+typedef void (APIENTRYP PFNGLGETHISTOGRAMEXTPROC) (GLenum target, GLboolean reset, GLenum format, GLenum type, void *values);
+typedef void (APIENTRYP PFNGLGETHISTOGRAMPARAMETERFVEXTPROC) (GLenum target, GLenum pname, GLfloat *params);
+typedef void (APIENTRYP PFNGLGETHISTOGRAMPARAMETERIVEXTPROC) (GLenum target, GLenum pname, GLint *params);
+typedef void (APIENTRYP PFNGLGETMINMAXEXTPROC) (GLenum target, GLboolean reset, GLenum format, GLenum type, void *values);
+typedef void (APIENTRYP PFNGLGETMINMAXPARAMETERFVEXTPROC) (GLenum target, GLenum pname, GLfloat *params);
+typedef void (APIENTRYP PFNGLGETMINMAXPARAMETERIVEXTPROC) (GLenum target, GLenum pname, GLint *params);
+typedef void (APIENTRYP PFNGLHISTOGRAMEXTPROC) (GLenum target, GLsizei width, GLenum internalformat, GLboolean sink);
+typedef void (APIENTRYP PFNGLMINMAXEXTPROC) (GLenum target, GLenum internalformat, GLboolean sink);
+typedef void (APIENTRYP PFNGLRESETHISTOGRAMEXTPROC) (GLenum target);
+typedef void (APIENTRYP PFNGLRESETMINMAXEXTPROC) (GLenum target);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glGetHistogramEXT (GLenum target, GLboolean reset, GLenum format, GLenum type, void *values);
+GLAPI void APIENTRY glGetHistogramParameterfvEXT (GLenum target, GLenum pname, GLfloat *params);
+GLAPI void APIENTRY glGetHistogramParameterivEXT (GLenum target, GLenum pname, GLint *params);
+GLAPI void APIENTRY glGetMinmaxEXT (GLenum target, GLboolean reset, GLenum format, GLenum type, void *values);
+GLAPI void APIENTRY glGetMinmaxParameterfvEXT (GLenum target, GLenum pname, GLfloat *params);
+GLAPI void APIENTRY glGetMinmaxParameterivEXT (GLenum target, GLenum pname, GLint *params);
+GLAPI void APIENTRY glHistogramEXT (GLenum target, GLsizei width, GLenum internalformat, GLboolean sink);
+GLAPI void APIENTRY glMinmaxEXT (GLenum target, GLenum internalformat, GLboolean sink);
+GLAPI void APIENTRY glResetHistogramEXT (GLenum target);
+GLAPI void APIENTRY glResetMinmaxEXT (GLenum target);
+#endif
+#endif /* GL_EXT_histogram */
+
+#ifndef GL_EXT_index_array_formats
+#define GL_EXT_index_array_formats 1
+#define GL_IUI_V2F_EXT 0x81AD
+#define GL_IUI_V3F_EXT 0x81AE
+#define GL_IUI_N3F_V2F_EXT 0x81AF
+#define GL_IUI_N3F_V3F_EXT 0x81B0
+#define GL_T2F_IUI_V2F_EXT 0x81B1
+#define GL_T2F_IUI_V3F_EXT 0x81B2
+#define GL_T2F_IUI_N3F_V2F_EXT 0x81B3
+#define GL_T2F_IUI_N3F_V3F_EXT 0x81B4
+#endif /* GL_EXT_index_array_formats */
+
+#ifndef GL_EXT_index_func
+#define GL_EXT_index_func 1
+#define GL_INDEX_TEST_EXT 0x81B5
+#define GL_INDEX_TEST_FUNC_EXT 0x81B6
+#define GL_INDEX_TEST_REF_EXT 0x81B7
+typedef void (APIENTRYP PFNGLINDEXFUNCEXTPROC) (GLenum func, GLclampf ref);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glIndexFuncEXT (GLenum func, GLclampf ref);
+#endif
+#endif /* GL_EXT_index_func */
+
+#ifndef GL_EXT_index_material
+#define GL_EXT_index_material 1
+#define GL_INDEX_MATERIAL_EXT 0x81B8
+#define GL_INDEX_MATERIAL_PARAMETER_EXT 0x81B9
+#define GL_INDEX_MATERIAL_FACE_EXT 0x81BA
+typedef void (APIENTRYP PFNGLINDEXMATERIALEXTPROC) (GLenum face, GLenum mode);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glIndexMaterialEXT (GLenum face, GLenum mode);
+#endif
+#endif /* GL_EXT_index_material */
+
+#ifndef GL_EXT_index_texture
+#define GL_EXT_index_texture 1
+#endif /* GL_EXT_index_texture */
+
+#ifndef GL_EXT_light_texture
+#define GL_EXT_light_texture 1
+#define GL_FRAGMENT_MATERIAL_EXT 0x8349
+#define GL_FRAGMENT_NORMAL_EXT 0x834A
+#define GL_FRAGMENT_COLOR_EXT 0x834C
+#define GL_ATTENUATION_EXT 0x834D
+#define GL_SHADOW_ATTENUATION_EXT 0x834E
+#define GL_TEXTURE_APPLICATION_MODE_EXT 0x834F
+#define GL_TEXTURE_LIGHT_EXT 0x8350
+#define GL_TEXTURE_MATERIAL_FACE_EXT 0x8351
+#define GL_TEXTURE_MATERIAL_PARAMETER_EXT 0x8352
+typedef void (APIENTRYP PFNGLAPPLYTEXTUREEXTPROC) (GLenum mode);
+typedef void (APIENTRYP PFNGLTEXTURELIGHTEXTPROC) (GLenum pname);
+typedef void (APIENTRYP PFNGLTEXTUREMATERIALEXTPROC) (GLenum face, GLenum mode);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glApplyTextureEXT (GLenum mode);
+GLAPI void APIENTRY glTextureLightEXT (GLenum pname);
+GLAPI void APIENTRY glTextureMaterialEXT (GLenum face, GLenum mode);
+#endif
+#endif /* GL_EXT_light_texture */
+
+#ifndef GL_EXT_memory_object
+#define GL_EXT_memory_object 1
+#define GL_TEXTURE_TILING_EXT 0x9580
+#define GL_DEDICATED_MEMORY_OBJECT_EXT 0x9581
+#define GL_PROTECTED_MEMORY_OBJECT_EXT 0x959B
+#define GL_NUM_TILING_TYPES_EXT 0x9582
+#define GL_TILING_TYPES_EXT 0x9583
+#define GL_OPTIMAL_TILING_EXT 0x9584
+#define GL_LINEAR_TILING_EXT 0x9585
+#define GL_NUM_DEVICE_UUIDS_EXT 0x9596
+#define GL_DEVICE_UUID_EXT 0x9597
+#define GL_DRIVER_UUID_EXT 0x9598
+#define GL_UUID_SIZE_EXT 16
+typedef void (APIENTRYP PFNGLGETUNSIGNEDBYTEVEXTPROC) (GLenum pname, GLubyte *data);
+typedef void (APIENTRYP PFNGLGETUNSIGNEDBYTEI_VEXTPROC) (GLenum target, GLuint index, GLubyte *data);
+typedef void (APIENTRYP PFNGLDELETEMEMORYOBJECTSEXTPROC) (GLsizei n, const GLuint *memoryObjects);
+typedef GLboolean (APIENTRYP PFNGLISMEMORYOBJECTEXTPROC) (GLuint memoryObject);
+typedef void (APIENTRYP PFNGLCREATEMEMORYOBJECTSEXTPROC) (GLsizei n, GLuint *memoryObjects);
+typedef void (APIENTRYP PFNGLMEMORYOBJECTPARAMETERIVEXTPROC) (GLuint memoryObject, GLenum pname, const GLint *params);
+typedef void (APIENTRYP PFNGLGETMEMORYOBJECTPARAMETERIVEXTPROC) (GLuint memoryObject, GLenum pname, GLint *params);
+typedef void (APIENTRYP PFNGLTEXSTORAGEMEM2DEXTPROC) (GLenum target, GLsizei levels, GLenum internalFormat, GLsizei width, GLsizei height, GLuint memory, GLuint64 offset);
+typedef void (APIENTRYP PFNGLTEXSTORAGEMEM2DMULTISAMPLEEXTPROC) (GLenum target, GLsizei samples, GLenum internalFormat, GLsizei width, GLsizei height, GLboolean fixedSampleLocations, GLuint memory, GLuint64 offset);
+typedef void (APIENTRYP PFNGLTEXSTORAGEMEM3DEXTPROC) (GLenum target, GLsizei levels, GLenum internalFormat, GLsizei width, GLsizei height, GLsizei depth, GLuint memory, GLuint64 offset);
+typedef void (APIENTRYP PFNGLTEXSTORAGEMEM3DMULTISAMPLEEXTPROC) (GLenum target, GLsizei samples, GLenum internalFormat, GLsizei width, GLsizei height, GLsizei depth, GLboolean fixedSampleLocations, GLuint memory, GLuint64 offset);
+typedef void (APIENTRYP PFNGLBUFFERSTORAGEMEMEXTPROC) (GLenum target, GLsizeiptr size, GLuint memory, GLuint64 offset);
+typedef void (APIENTRYP PFNGLTEXTURESTORAGEMEM2DEXTPROC) (GLuint texture, GLsizei levels, GLenum internalFormat, GLsizei width, GLsizei height, GLuint memory, GLuint64 offset);
+typedef void (APIENTRYP PFNGLTEXTURESTORAGEMEM2DMULTISAMPLEEXTPROC) (GLuint texture, GLsizei samples, GLenum internalFormat, GLsizei width, GLsizei height, GLboolean fixedSampleLocations, GLuint memory, GLuint64 offset);
+typedef void (APIENTRYP PFNGLTEXTURESTORAGEMEM3DEXTPROC) (GLuint texture, GLsizei levels, GLenum internalFormat, GLsizei width, GLsizei height, GLsizei depth, GLuint memory, GLuint64 offset);
+typedef void (APIENTRYP PFNGLTEXTURESTORAGEMEM3DMULTISAMPLEEXTPROC) (GLuint texture, GLsizei samples, GLenum internalFormat, GLsizei width, GLsizei height, GLsizei depth, GLboolean fixedSampleLocations, GLuint memory, GLuint64 offset);
+typedef void (APIENTRYP PFNGLNAMEDBUFFERSTORAGEMEMEXTPROC) (GLuint buffer, GLsizeiptr size, GLuint memory, GLuint64 offset);
+typedef void (APIENTRYP PFNGLTEXSTORAGEMEM1DEXTPROC) (GLenum target, GLsizei levels, GLenum internalFormat, GLsizei width, GLuint memory, GLuint64 offset);
+typedef void (APIENTRYP PFNGLTEXTURESTORAGEMEM1DEXTPROC) (GLuint texture, GLsizei levels, GLenum internalFormat, GLsizei width, GLuint memory, GLuint64 offset);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glGetUnsignedBytevEXT (GLenum pname, GLubyte *data);
+GLAPI void APIENTRY glGetUnsignedBytei_vEXT (GLenum target, GLuint index, GLubyte *data);
+GLAPI void APIENTRY glDeleteMemoryObjectsEXT (GLsizei n, const GLuint *memoryObjects);
+GLAPI GLboolean APIENTRY glIsMemoryObjectEXT (GLuint memoryObject);
+GLAPI void APIENTRY glCreateMemoryObjectsEXT (GLsizei n, GLuint *memoryObjects);
+GLAPI void APIENTRY glMemoryObjectParameterivEXT (GLuint memoryObject, GLenum pname, const GLint *params);
+GLAPI void APIENTRY glGetMemoryObjectParameterivEXT (GLuint memoryObject, GLenum pname, GLint *params);
+GLAPI void APIENTRY glTexStorageMem2DEXT (GLenum target, GLsizei levels, GLenum internalFormat, GLsizei width, GLsizei height, GLuint memory, GLuint64 offset);
+GLAPI void APIENTRY glTexStorageMem2DMultisampleEXT (GLenum target, GLsizei samples, GLenum internalFormat, GLsizei width, GLsizei height, GLboolean fixedSampleLocations, GLuint memory, GLuint64 offset);
+GLAPI void APIENTRY glTexStorageMem3DEXT (GLenum target, GLsizei levels, GLenum internalFormat, GLsizei width, GLsizei height, GLsizei depth, GLuint memory, GLuint64 offset);
+GLAPI void APIENTRY glTexStorageMem3DMultisampleEXT (GLenum target, GLsizei samples, GLenum internalFormat, GLsizei width, GLsizei height, GLsizei depth, GLboolean fixedSampleLocations, GLuint memory, GLuint64 offset);
+GLAPI void APIENTRY glBufferStorageMemEXT (GLenum target, GLsizeiptr size, GLuint memory, GLuint64 offset);
+GLAPI void APIENTRY glTextureStorageMem2DEXT (GLuint texture, GLsizei levels, GLenum internalFormat, GLsizei width, GLsizei height, GLuint memory, GLuint64 offset);
+GLAPI void APIENTRY glTextureStorageMem2DMultisampleEXT (GLuint texture, GLsizei samples, GLenum internalFormat, GLsizei width, GLsizei height, GLboolean fixedSampleLocations, GLuint memory, GLuint64 offset);
+GLAPI void APIENTRY glTextureStorageMem3DEXT (GLuint texture, GLsizei levels, GLenum internalFormat, GLsizei width, GLsizei height, GLsizei depth, GLuint memory, GLuint64 offset);
+GLAPI void APIENTRY glTextureStorageMem3DMultisampleEXT (GLuint texture, GLsizei samples, GLenum internalFormat, GLsizei width, GLsizei height, GLsizei depth, GLboolean fixedSampleLocations, GLuint memory, GLuint64 offset);
+GLAPI void APIENTRY glNamedBufferStorageMemEXT (GLuint buffer, GLsizeiptr size, GLuint memory, GLuint64 offset);
+GLAPI void APIENTRY glTexStorageMem1DEXT (GLenum target, GLsizei levels, GLenum internalFormat, GLsizei width, GLuint memory, GLuint64 offset);
+GLAPI void APIENTRY glTextureStorageMem1DEXT (GLuint texture, GLsizei levels, GLenum internalFormat, GLsizei width, GLuint memory, GLuint64 offset);
+#endif
+#endif /* GL_EXT_memory_object */
+
+#ifndef GL_EXT_memory_object_fd
+#define GL_EXT_memory_object_fd 1
+#define GL_HANDLE_TYPE_OPAQUE_FD_EXT 0x9586
+typedef void (APIENTRYP PFNGLIMPORTMEMORYFDEXTPROC) (GLuint memory, GLuint64 size, GLenum handleType, GLint fd);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glImportMemoryFdEXT (GLuint memory, GLuint64 size, GLenum handleType, GLint fd);
+#endif
+#endif /* GL_EXT_memory_object_fd */
+
+#ifndef GL_EXT_memory_object_win32
+#define GL_EXT_memory_object_win32 1
+#define GL_HANDLE_TYPE_OPAQUE_WIN32_EXT 0x9587
+#define GL_HANDLE_TYPE_OPAQUE_WIN32_KMT_EXT 0x9588
+#define GL_DEVICE_LUID_EXT 0x9599
+#define GL_DEVICE_NODE_MASK_EXT 0x959A
+#define GL_LUID_SIZE_EXT 8
+#define GL_HANDLE_TYPE_D3D12_TILEPOOL_EXT 0x9589
+#define GL_HANDLE_TYPE_D3D12_RESOURCE_EXT 0x958A
+#define GL_HANDLE_TYPE_D3D11_IMAGE_EXT 0x958B
+#define GL_HANDLE_TYPE_D3D11_IMAGE_KMT_EXT 0x958C
+typedef void (APIENTRYP PFNGLIMPORTMEMORYWIN32HANDLEEXTPROC) (GLuint memory, GLuint64 size, GLenum handleType, void *handle);
+typedef void (APIENTRYP PFNGLIMPORTMEMORYWIN32NAMEEXTPROC) (GLuint memory, GLuint64 size, GLenum handleType, const void *name);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glImportMemoryWin32HandleEXT (GLuint memory, GLuint64 size, GLenum handleType, void *handle);
+GLAPI void APIENTRY glImportMemoryWin32NameEXT (GLuint memory, GLuint64 size, GLenum handleType, const void *name);
+#endif
+#endif /* GL_EXT_memory_object_win32 */
+
+#ifndef GL_EXT_misc_attribute
+#define GL_EXT_misc_attribute 1
+#endif /* GL_EXT_misc_attribute */
+
+#ifndef GL_EXT_multi_draw_arrays
+#define GL_EXT_multi_draw_arrays 1
+typedef void (APIENTRYP PFNGLMULTIDRAWARRAYSEXTPROC) (GLenum mode, const GLint *first, const GLsizei *count, GLsizei primcount);
+typedef void (APIENTRYP PFNGLMULTIDRAWELEMENTSEXTPROC) (GLenum mode, const GLsizei *count, GLenum type, const void *const*indices, GLsizei primcount);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glMultiDrawArraysEXT (GLenum mode, const GLint *first, const GLsizei *count, GLsizei primcount);
+GLAPI void APIENTRY glMultiDrawElementsEXT (GLenum mode, const GLsizei *count, GLenum type, const void *const*indices, GLsizei primcount);
+#endif
+#endif /* GL_EXT_multi_draw_arrays */
+
+#ifndef GL_EXT_multisample
+#define GL_EXT_multisample 1
+#define GL_MULTISAMPLE_EXT 0x809D
+#define GL_SAMPLE_ALPHA_TO_MASK_EXT 0x809E
+#define GL_SAMPLE_ALPHA_TO_ONE_EXT 0x809F
+#define GL_SAMPLE_MASK_EXT 0x80A0
+#define GL_1PASS_EXT 0x80A1
+#define GL_2PASS_0_EXT 0x80A2
+#define GL_2PASS_1_EXT 0x80A3
+#define GL_4PASS_0_EXT 0x80A4
+#define GL_4PASS_1_EXT 0x80A5
+#define GL_4PASS_2_EXT 0x80A6
+#define GL_4PASS_3_EXT 0x80A7
+#define GL_SAMPLE_BUFFERS_EXT 0x80A8
+#define GL_SAMPLES_EXT 0x80A9
+#define GL_SAMPLE_MASK_VALUE_EXT 0x80AA
+#define GL_SAMPLE_MASK_INVERT_EXT 0x80AB
+#define GL_SAMPLE_PATTERN_EXT 0x80AC
+#define GL_MULTISAMPLE_BIT_EXT 0x20000000
+typedef void (APIENTRYP PFNGLSAMPLEMASKEXTPROC) (GLclampf value, GLboolean invert);
+typedef void (APIENTRYP PFNGLSAMPLEPATTERNEXTPROC) (GLenum pattern);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glSampleMaskEXT (GLclampf value, GLboolean invert);
+GLAPI void APIENTRY glSamplePatternEXT (GLenum pattern);
+#endif
+#endif /* GL_EXT_multisample */
+
+#ifndef GL_EXT_multiview_tessellation_geometry_shader
+#define GL_EXT_multiview_tessellation_geometry_shader 1
+#endif /* GL_EXT_multiview_tessellation_geometry_shader */
+
+#ifndef GL_EXT_multiview_texture_multisample
+#define GL_EXT_multiview_texture_multisample 1
+#endif /* GL_EXT_multiview_texture_multisample */
+
+#ifndef GL_EXT_multiview_timer_query
+#define GL_EXT_multiview_timer_query 1
+#endif /* GL_EXT_multiview_timer_query */
+
+#ifndef GL_EXT_packed_depth_stencil
+#define GL_EXT_packed_depth_stencil 1
+#define GL_DEPTH_STENCIL_EXT 0x84F9
+#define GL_UNSIGNED_INT_24_8_EXT 0x84FA
+#define GL_DEPTH24_STENCIL8_EXT 0x88F0
+#define GL_TEXTURE_STENCIL_SIZE_EXT 0x88F1
+#endif /* GL_EXT_packed_depth_stencil */
+
+#ifndef GL_EXT_packed_float
+#define GL_EXT_packed_float 1
+#define GL_R11F_G11F_B10F_EXT 0x8C3A
+#define GL_UNSIGNED_INT_10F_11F_11F_REV_EXT 0x8C3B
+#define GL_RGBA_SIGNED_COMPONENTS_EXT 0x8C3C
+#endif /* GL_EXT_packed_float */
+
+#ifndef GL_EXT_packed_pixels
+#define GL_EXT_packed_pixels 1
+#define GL_UNSIGNED_BYTE_3_3_2_EXT 0x8032
+#define GL_UNSIGNED_SHORT_4_4_4_4_EXT 0x8033
+#define GL_UNSIGNED_SHORT_5_5_5_1_EXT 0x8034
+#define GL_UNSIGNED_INT_8_8_8_8_EXT 0x8035
+#define GL_UNSIGNED_INT_10_10_10_2_EXT 0x8036
+#endif /* GL_EXT_packed_pixels */
+
+#ifndef GL_EXT_paletted_texture
+#define GL_EXT_paletted_texture 1
+#define GL_COLOR_INDEX1_EXT 0x80E2
+#define GL_COLOR_INDEX2_EXT 0x80E3
+#define GL_COLOR_INDEX4_EXT 0x80E4
+#define GL_COLOR_INDEX8_EXT 0x80E5
+#define GL_COLOR_INDEX12_EXT 0x80E6
+#define GL_COLOR_INDEX16_EXT 0x80E7
+#define GL_TEXTURE_INDEX_SIZE_EXT 0x80ED
+typedef void (APIENTRYP PFNGLCOLORTABLEEXTPROC) (GLenum target, GLenum internalFormat, GLsizei width, GLenum format, GLenum type, const void *table);
+typedef void (APIENTRYP PFNGLGETCOLORTABLEEXTPROC) (GLenum target, GLenum format, GLenum type, void *data);
+typedef void (APIENTRYP PFNGLGETCOLORTABLEPARAMETERIVEXTPROC) (GLenum target, GLenum pname, GLint *params);
+typedef void (APIENTRYP PFNGLGETCOLORTABLEPARAMETERFVEXTPROC) (GLenum target, GLenum pname, GLfloat *params);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glColorTableEXT (GLenum target, GLenum internalFormat, GLsizei width, GLenum format, GLenum type, const void *table);
+GLAPI void APIENTRY glGetColorTableEXT (GLenum target, GLenum format, GLenum type, void *data);
+GLAPI void APIENTRY glGetColorTableParameterivEXT (GLenum target, GLenum pname, GLint *params);
+GLAPI void APIENTRY glGetColorTableParameterfvEXT (GLenum target, GLenum pname, GLfloat *params);
+#endif
+#endif /* GL_EXT_paletted_texture */
+
+#ifndef GL_EXT_pixel_buffer_object
+#define GL_EXT_pixel_buffer_object 1
+#define GL_PIXEL_PACK_BUFFER_EXT 0x88EB
+#define GL_PIXEL_UNPACK_BUFFER_EXT 0x88EC
+#define GL_PIXEL_PACK_BUFFER_BINDING_EXT 0x88ED
+#define GL_PIXEL_UNPACK_BUFFER_BINDING_EXT 0x88EF
+#endif /* GL_EXT_pixel_buffer_object */
+
+#ifndef GL_EXT_pixel_transform
+#define GL_EXT_pixel_transform 1
+#define GL_PIXEL_TRANSFORM_2D_EXT 0x8330
+#define GL_PIXEL_MAG_FILTER_EXT 0x8331
+#define GL_PIXEL_MIN_FILTER_EXT 0x8332
+#define GL_PIXEL_CUBIC_WEIGHT_EXT 0x8333
+#define GL_CUBIC_EXT 0x8334
+#define GL_AVERAGE_EXT 0x8335
+#define GL_PIXEL_TRANSFORM_2D_STACK_DEPTH_EXT 0x8336
+#define GL_MAX_PIXEL_TRANSFORM_2D_STACK_DEPTH_EXT 0x8337
+#define GL_PIXEL_TRANSFORM_2D_MATRIX_EXT 0x8338
+typedef void (APIENTRYP PFNGLPIXELTRANSFORMPARAMETERIEXTPROC) (GLenum target, GLenum pname, GLint param);
+typedef void (APIENTRYP PFNGLPIXELTRANSFORMPARAMETERFEXTPROC) (GLenum target, GLenum pname, GLfloat param);
+typedef void (APIENTRYP PFNGLPIXELTRANSFORMPARAMETERIVEXTPROC) (GLenum target, GLenum pname, const GLint *params);
+typedef void (APIENTRYP PFNGLPIXELTRANSFORMPARAMETERFVEXTPROC) (GLenum target, GLenum pname, const GLfloat *params);
+typedef void (APIENTRYP PFNGLGETPIXELTRANSFORMPARAMETERIVEXTPROC) (GLenum target, GLenum pname, GLint *params);
+typedef void (APIENTRYP PFNGLGETPIXELTRANSFORMPARAMETERFVEXTPROC) (GLenum target, GLenum pname, GLfloat *params);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glPixelTransformParameteriEXT (GLenum target, GLenum pname, GLint param);
+GLAPI void APIENTRY glPixelTransformParameterfEXT (GLenum target, GLenum pname, GLfloat param);
+GLAPI void APIENTRY glPixelTransformParameterivEXT (GLenum target, GLenum pname, const GLint *params);
+GLAPI void APIENTRY glPixelTransformParameterfvEXT (GLenum target, GLenum pname, const GLfloat *params);
+GLAPI void APIENTRY glGetPixelTransformParameterivEXT (GLenum target, GLenum pname, GLint *params);
+GLAPI void APIENTRY glGetPixelTransformParameterfvEXT (GLenum target, GLenum pname, GLfloat *params);
+#endif
+#endif /* GL_EXT_pixel_transform */
+
+#ifndef GL_EXT_pixel_transform_color_table
+#define GL_EXT_pixel_transform_color_table 1
+#endif /* GL_EXT_pixel_transform_color_table */
+
+#ifndef GL_EXT_point_parameters
+#define GL_EXT_point_parameters 1
+#define GL_POINT_SIZE_MIN_EXT 0x8126
+#define GL_POINT_SIZE_MAX_EXT 0x8127
+#define GL_POINT_FADE_THRESHOLD_SIZE_EXT 0x8128
+#define GL_DISTANCE_ATTENUATION_EXT 0x8129
+typedef void (APIENTRYP PFNGLPOINTPARAMETERFEXTPROC) (GLenum pname, GLfloat param);
+typedef void (APIENTRYP PFNGLPOINTPARAMETERFVEXTPROC) (GLenum pname, const GLfloat *params);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glPointParameterfEXT (GLenum pname, GLfloat param);
+GLAPI void APIENTRY glPointParameterfvEXT (GLenum pname, const GLfloat *params);
+#endif
+#endif /* GL_EXT_point_parameters */
+
+#ifndef GL_EXT_polygon_offset
+#define GL_EXT_polygon_offset 1
+#define GL_POLYGON_OFFSET_EXT 0x8037
+#define GL_POLYGON_OFFSET_FACTOR_EXT 0x8038
+#define GL_POLYGON_OFFSET_BIAS_EXT 0x8039
+typedef void (APIENTRYP PFNGLPOLYGONOFFSETEXTPROC) (GLfloat factor, GLfloat bias);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glPolygonOffsetEXT (GLfloat factor, GLfloat bias);
+#endif
+#endif /* GL_EXT_polygon_offset */
+
+#ifndef GL_EXT_polygon_offset_clamp
+#define GL_EXT_polygon_offset_clamp 1
+#define GL_POLYGON_OFFSET_CLAMP_EXT 0x8E1B
+typedef void (APIENTRYP PFNGLPOLYGONOFFSETCLAMPEXTPROC) (GLfloat factor, GLfloat units, GLfloat clamp);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glPolygonOffsetClampEXT (GLfloat factor, GLfloat units, GLfloat clamp);
+#endif
+#endif /* GL_EXT_polygon_offset_clamp */
+
+#ifndef GL_EXT_post_depth_coverage
+#define GL_EXT_post_depth_coverage 1
+#endif /* GL_EXT_post_depth_coverage */
+
+#ifndef GL_EXT_provoking_vertex
+#define GL_EXT_provoking_vertex 1
+#define GL_QUADS_FOLLOW_PROVOKING_VERTEX_CONVENTION_EXT 0x8E4C
+#define GL_FIRST_VERTEX_CONVENTION_EXT 0x8E4D
+#define GL_LAST_VERTEX_CONVENTION_EXT 0x8E4E
+#define GL_PROVOKING_VERTEX_EXT 0x8E4F
+typedef void (APIENTRYP PFNGLPROVOKINGVERTEXEXTPROC) (GLenum mode);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glProvokingVertexEXT (GLenum mode);
+#endif
+#endif /* GL_EXT_provoking_vertex */
+
+#ifndef GL_EXT_raster_multisample
+#define GL_EXT_raster_multisample 1
+#define GL_RASTER_MULTISAMPLE_EXT 0x9327
+#define GL_RASTER_SAMPLES_EXT 0x9328
+#define GL_MAX_RASTER_SAMPLES_EXT 0x9329
+#define GL_RASTER_FIXED_SAMPLE_LOCATIONS_EXT 0x932A
+#define GL_MULTISAMPLE_RASTERIZATION_ALLOWED_EXT 0x932B
+#define GL_EFFECTIVE_RASTER_SAMPLES_EXT 0x932C
+typedef void (APIENTRYP PFNGLRASTERSAMPLESEXTPROC) (GLuint samples, GLboolean fixedsamplelocations);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glRasterSamplesEXT (GLuint samples, GLboolean fixedsamplelocations);
+#endif
+#endif /* GL_EXT_raster_multisample */
+
+#ifndef GL_EXT_rescale_normal
+#define GL_EXT_rescale_normal 1
+#define GL_RESCALE_NORMAL_EXT 0x803A
+#endif /* GL_EXT_rescale_normal */
+
+#ifndef GL_EXT_secondary_color
+#define GL_EXT_secondary_color 1
+#define GL_COLOR_SUM_EXT 0x8458
+#define GL_CURRENT_SECONDARY_COLOR_EXT 0x8459
+#define GL_SECONDARY_COLOR_ARRAY_SIZE_EXT 0x845A
+#define GL_SECONDARY_COLOR_ARRAY_TYPE_EXT 0x845B
+#define GL_SECONDARY_COLOR_ARRAY_STRIDE_EXT 0x845C
+#define GL_SECONDARY_COLOR_ARRAY_POINTER_EXT 0x845D
+#define GL_SECONDARY_COLOR_ARRAY_EXT 0x845E
+typedef void (APIENTRYP PFNGLSECONDARYCOLOR3BEXTPROC) (GLbyte red, GLbyte green, GLbyte blue);
+typedef void (APIENTRYP PFNGLSECONDARYCOLOR3BVEXTPROC) (const GLbyte *v);
+typedef void (APIENTRYP PFNGLSECONDARYCOLOR3DEXTPROC) (GLdouble red, GLdouble green, GLdouble blue);
+typedef void (APIENTRYP PFNGLSECONDARYCOLOR3DVEXTPROC) (const GLdouble *v);
+typedef void (APIENTRYP PFNGLSECONDARYCOLOR3FEXTPROC) (GLfloat red, GLfloat green, GLfloat blue);
+typedef void (APIENTRYP PFNGLSECONDARYCOLOR3FVEXTPROC) (const GLfloat *v);
+typedef void (APIENTRYP PFNGLSECONDARYCOLOR3IEXTPROC) (GLint red, GLint green, GLint blue);
+typedef void (APIENTRYP PFNGLSECONDARYCOLOR3IVEXTPROC) (const GLint *v);
+typedef void (APIENTRYP PFNGLSECONDARYCOLOR3SEXTPROC) (GLshort red, GLshort green, GLshort blue);
+typedef void (APIENTRYP PFNGLSECONDARYCOLOR3SVEXTPROC) (const GLshort *v);
+typedef void (APIENTRYP PFNGLSECONDARYCOLOR3UBEXTPROC) (GLubyte red, GLubyte green, GLubyte blue);
+typedef void (APIENTRYP PFNGLSECONDARYCOLOR3UBVEXTPROC) (const GLubyte *v);
+typedef void (APIENTRYP PFNGLSECONDARYCOLOR3UIEXTPROC) (GLuint red, GLuint green, GLuint blue);
+typedef void (APIENTRYP PFNGLSECONDARYCOLOR3UIVEXTPROC) (const GLuint *v);
+typedef void (APIENTRYP PFNGLSECONDARYCOLOR3USEXTPROC) (GLushort red, GLushort green, GLushort blue);
+typedef void (APIENTRYP PFNGLSECONDARYCOLOR3USVEXTPROC) (const GLushort *v);
+typedef void (APIENTRYP PFNGLSECONDARYCOLORPOINTEREXTPROC) (GLint size, GLenum type, GLsizei stride, const void *pointer);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glSecondaryColor3bEXT (GLbyte red, GLbyte green, GLbyte blue);
+GLAPI void APIENTRY glSecondaryColor3bvEXT (const GLbyte *v);
+GLAPI void APIENTRY glSecondaryColor3dEXT (GLdouble red, GLdouble green, GLdouble blue);
+GLAPI void APIENTRY glSecondaryColor3dvEXT (const GLdouble *v);
+GLAPI void APIENTRY glSecondaryColor3fEXT (GLfloat red, GLfloat green, GLfloat blue);
+GLAPI void APIENTRY glSecondaryColor3fvEXT (const GLfloat *v);
+GLAPI void APIENTRY glSecondaryColor3iEXT (GLint red, GLint green, GLint blue);
+GLAPI void APIENTRY glSecondaryColor3ivEXT (const GLint *v);
+GLAPI void APIENTRY glSecondaryColor3sEXT (GLshort red, GLshort green, GLshort blue);
+GLAPI void APIENTRY glSecondaryColor3svEXT (const GLshort *v);
+GLAPI void APIENTRY glSecondaryColor3ubEXT (GLubyte red, GLubyte green, GLubyte blue);
+GLAPI void APIENTRY glSecondaryColor3ubvEXT (const GLubyte *v);
+GLAPI void APIENTRY glSecondaryColor3uiEXT (GLuint red, GLuint green, GLuint blue);
+GLAPI void APIENTRY glSecondaryColor3uivEXT (const GLuint *v);
+GLAPI void APIENTRY glSecondaryColor3usEXT (GLushort red, GLushort green, GLushort blue);
+GLAPI void APIENTRY glSecondaryColor3usvEXT (const GLushort *v);
+GLAPI void APIENTRY glSecondaryColorPointerEXT (GLint size, GLenum type, GLsizei stride, const void *pointer);
+#endif
+#endif /* GL_EXT_secondary_color */
+
+#ifndef GL_EXT_semaphore
+#define GL_EXT_semaphore 1
+#define GL_LAYOUT_GENERAL_EXT 0x958D
+#define GL_LAYOUT_COLOR_ATTACHMENT_EXT 0x958E
+#define GL_LAYOUT_DEPTH_STENCIL_ATTACHMENT_EXT 0x958F
+#define GL_LAYOUT_DEPTH_STENCIL_READ_ONLY_EXT 0x9590
+#define GL_LAYOUT_SHADER_READ_ONLY_EXT 0x9591
+#define GL_LAYOUT_TRANSFER_SRC_EXT 0x9592
+#define GL_LAYOUT_TRANSFER_DST_EXT 0x9593
+#define GL_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_EXT 0x9530
+#define GL_LAYOUT_DEPTH_ATTACHMENT_STENCIL_READ_ONLY_EXT 0x9531
+typedef void (APIENTRYP PFNGLGENSEMAPHORESEXTPROC) (GLsizei n, GLuint *semaphores);
+typedef void (APIENTRYP PFNGLDELETESEMAPHORESEXTPROC) (GLsizei n, const GLuint *semaphores);
+typedef GLboolean (APIENTRYP PFNGLISSEMAPHOREEXTPROC) (GLuint semaphore);
+typedef void (APIENTRYP PFNGLSEMAPHOREPARAMETERUI64VEXTPROC) (GLuint semaphore, GLenum pname, const GLuint64 *params);
+typedef void (APIENTRYP PFNGLGETSEMAPHOREPARAMETERUI64VEXTPROC) (GLuint semaphore, GLenum pname, GLuint64 *params);
+typedef void (APIENTRYP PFNGLWAITSEMAPHOREEXTPROC) (GLuint semaphore, GLuint numBufferBarriers, const GLuint *buffers, GLuint numTextureBarriers, const GLuint *textures, const GLenum *srcLayouts);
+typedef void (APIENTRYP PFNGLSIGNALSEMAPHOREEXTPROC) (GLuint semaphore, GLuint numBufferBarriers, const GLuint *buffers, GLuint numTextureBarriers, const GLuint *textures, const GLenum *dstLayouts);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glGenSemaphoresEXT (GLsizei n, GLuint *semaphores);
+GLAPI void APIENTRY glDeleteSemaphoresEXT (GLsizei n, const GLuint *semaphores);
+GLAPI GLboolean APIENTRY glIsSemaphoreEXT (GLuint semaphore);
+GLAPI void APIENTRY glSemaphoreParameterui64vEXT (GLuint semaphore, GLenum pname, const GLuint64 *params);
+GLAPI void APIENTRY glGetSemaphoreParameterui64vEXT (GLuint semaphore, GLenum pname, GLuint64 *params);
+GLAPI void APIENTRY glWaitSemaphoreEXT (GLuint semaphore, GLuint numBufferBarriers, const GLuint *buffers, GLuint numTextureBarriers, const GLuint *textures, const GLenum *srcLayouts);
+GLAPI void APIENTRY glSignalSemaphoreEXT (GLuint semaphore, GLuint numBufferBarriers, const GLuint *buffers, GLuint numTextureBarriers, const GLuint *textures, const GLenum *dstLayouts);
+#endif
+#endif /* GL_EXT_semaphore */
+
+#ifndef GL_EXT_semaphore_fd
+#define GL_EXT_semaphore_fd 1
+typedef void (APIENTRYP PFNGLIMPORTSEMAPHOREFDEXTPROC) (GLuint semaphore, GLenum handleType, GLint fd);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glImportSemaphoreFdEXT (GLuint semaphore, GLenum handleType, GLint fd);
+#endif
+#endif /* GL_EXT_semaphore_fd */
+
+#ifndef GL_EXT_semaphore_win32
+#define GL_EXT_semaphore_win32 1
+#define GL_HANDLE_TYPE_D3D12_FENCE_EXT 0x9594
+#define GL_D3D12_FENCE_VALUE_EXT 0x9595
+typedef void (APIENTRYP PFNGLIMPORTSEMAPHOREWIN32HANDLEEXTPROC) (GLuint semaphore, GLenum handleType, void *handle);
+typedef void (APIENTRYP PFNGLIMPORTSEMAPHOREWIN32NAMEEXTPROC) (GLuint semaphore, GLenum handleType, const void *name);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glImportSemaphoreWin32HandleEXT (GLuint semaphore, GLenum handleType, void *handle);
+GLAPI void APIENTRY glImportSemaphoreWin32NameEXT (GLuint semaphore, GLenum handleType, const void *name);
+#endif
+#endif /* GL_EXT_semaphore_win32 */
+
+#ifndef GL_EXT_separate_shader_objects
+#define GL_EXT_separate_shader_objects 1
+#define GL_ACTIVE_PROGRAM_EXT 0x8B8D
+typedef void (APIENTRYP PFNGLUSESHADERPROGRAMEXTPROC) (GLenum type, GLuint program);
+typedef void (APIENTRYP PFNGLACTIVEPROGRAMEXTPROC) (GLuint program);
+typedef GLuint (APIENTRYP PFNGLCREATESHADERPROGRAMEXTPROC) (GLenum type, const GLchar *string);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glUseShaderProgramEXT (GLenum type, GLuint program);
+GLAPI void APIENTRY glActiveProgramEXT (GLuint program);
+GLAPI GLuint APIENTRY glCreateShaderProgramEXT (GLenum type, const GLchar *string);
+#endif
+#endif /* GL_EXT_separate_shader_objects */
+
+#ifndef GL_EXT_separate_specular_color
+#define GL_EXT_separate_specular_color 1
+#define GL_LIGHT_MODEL_COLOR_CONTROL_EXT 0x81F8
+#define GL_SINGLE_COLOR_EXT 0x81F9
+#define GL_SEPARATE_SPECULAR_COLOR_EXT 0x81FA
+#endif /* GL_EXT_separate_specular_color */
+
+#ifndef GL_EXT_shader_framebuffer_fetch
+#define GL_EXT_shader_framebuffer_fetch 1
+#define GL_FRAGMENT_SHADER_DISCARDS_SAMPLES_EXT 0x8A52
+#endif /* GL_EXT_shader_framebuffer_fetch */
+
+#ifndef GL_EXT_shader_framebuffer_fetch_non_coherent
+#define GL_EXT_shader_framebuffer_fetch_non_coherent 1
+typedef void (APIENTRYP PFNGLFRAMEBUFFERFETCHBARRIEREXTPROC) (void);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glFramebufferFetchBarrierEXT (void);
+#endif
+#endif /* GL_EXT_shader_framebuffer_fetch_non_coherent */
+
+#ifndef GL_EXT_shader_image_load_formatted
+#define GL_EXT_shader_image_load_formatted 1
+#endif /* GL_EXT_shader_image_load_formatted */
+
+#ifndef GL_EXT_shader_image_load_store
+#define GL_EXT_shader_image_load_store 1
+#define GL_MAX_IMAGE_UNITS_EXT 0x8F38
+#define GL_MAX_COMBINED_IMAGE_UNITS_AND_FRAGMENT_OUTPUTS_EXT 0x8F39
+#define GL_IMAGE_BINDING_NAME_EXT 0x8F3A
+#define GL_IMAGE_BINDING_LEVEL_EXT 0x8F3B
+#define GL_IMAGE_BINDING_LAYERED_EXT 0x8F3C
+#define GL_IMAGE_BINDING_LAYER_EXT 0x8F3D
+#define GL_IMAGE_BINDING_ACCESS_EXT 0x8F3E
+#define GL_IMAGE_1D_EXT 0x904C
+#define GL_IMAGE_2D_EXT 0x904D
+#define GL_IMAGE_3D_EXT 0x904E
+#define GL_IMAGE_2D_RECT_EXT 0x904F
+#define GL_IMAGE_CUBE_EXT 0x9050
+#define GL_IMAGE_BUFFER_EXT 0x9051
+#define GL_IMAGE_1D_ARRAY_EXT 0x9052
+#define GL_IMAGE_2D_ARRAY_EXT 0x9053
+#define GL_IMAGE_CUBE_MAP_ARRAY_EXT 0x9054
+#define GL_IMAGE_2D_MULTISAMPLE_EXT 0x9055
+#define GL_IMAGE_2D_MULTISAMPLE_ARRAY_EXT 0x9056
+#define GL_INT_IMAGE_1D_EXT 0x9057
+#define GL_INT_IMAGE_2D_EXT 0x9058
+#define GL_INT_IMAGE_3D_EXT 0x9059
+#define GL_INT_IMAGE_2D_RECT_EXT 0x905A
+#define GL_INT_IMAGE_CUBE_EXT 0x905B
+#define GL_INT_IMAGE_BUFFER_EXT 0x905C
+#define GL_INT_IMAGE_1D_ARRAY_EXT 0x905D
+#define GL_INT_IMAGE_2D_ARRAY_EXT 0x905E
+#define GL_INT_IMAGE_CUBE_MAP_ARRAY_EXT 0x905F
+#define GL_INT_IMAGE_2D_MULTISAMPLE_EXT 0x9060
+#define GL_INT_IMAGE_2D_MULTISAMPLE_ARRAY_EXT 0x9061
+#define GL_UNSIGNED_INT_IMAGE_1D_EXT 0x9062
+#define GL_UNSIGNED_INT_IMAGE_2D_EXT 0x9063
+#define GL_UNSIGNED_INT_IMAGE_3D_EXT 0x9064
+#define GL_UNSIGNED_INT_IMAGE_2D_RECT_EXT 0x9065
+#define GL_UNSIGNED_INT_IMAGE_CUBE_EXT 0x9066
+#define GL_UNSIGNED_INT_IMAGE_BUFFER_EXT 0x9067
+#define GL_UNSIGNED_INT_IMAGE_1D_ARRAY_EXT 0x9068
+#define GL_UNSIGNED_INT_IMAGE_2D_ARRAY_EXT 0x9069
+#define GL_UNSIGNED_INT_IMAGE_CUBE_MAP_ARRAY_EXT 0x906A
+#define GL_UNSIGNED_INT_IMAGE_2D_MULTISAMPLE_EXT 0x906B
+#define GL_UNSIGNED_INT_IMAGE_2D_MULTISAMPLE_ARRAY_EXT 0x906C
+#define GL_MAX_IMAGE_SAMPLES_EXT 0x906D
+#define GL_IMAGE_BINDING_FORMAT_EXT 0x906E
+#define GL_VERTEX_ATTRIB_ARRAY_BARRIER_BIT_EXT 0x00000001
+#define GL_ELEMENT_ARRAY_BARRIER_BIT_EXT 0x00000002
+#define GL_UNIFORM_BARRIER_BIT_EXT 0x00000004
+#define GL_TEXTURE_FETCH_BARRIER_BIT_EXT 0x00000008
+#define GL_SHADER_IMAGE_ACCESS_BARRIER_BIT_EXT 0x00000020
+#define GL_COMMAND_BARRIER_BIT_EXT 0x00000040
+#define GL_PIXEL_BUFFER_BARRIER_BIT_EXT 0x00000080
+#define GL_TEXTURE_UPDATE_BARRIER_BIT_EXT 0x00000100
+#define GL_BUFFER_UPDATE_BARRIER_BIT_EXT 0x00000200
+#define GL_FRAMEBUFFER_BARRIER_BIT_EXT 0x00000400
+#define GL_TRANSFORM_FEEDBACK_BARRIER_BIT_EXT 0x00000800
+#define GL_ATOMIC_COUNTER_BARRIER_BIT_EXT 0x00001000
+#define GL_ALL_BARRIER_BITS_EXT 0xFFFFFFFF
+typedef void (APIENTRYP PFNGLBINDIMAGETEXTUREEXTPROC) (GLuint index, GLuint texture, GLint level, GLboolean layered, GLint layer, GLenum access, GLint format);
+typedef void (APIENTRYP PFNGLMEMORYBARRIEREXTPROC) (GLbitfield barriers);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glBindImageTextureEXT (GLuint index, GLuint texture, GLint level, GLboolean layered, GLint layer, GLenum access, GLint format);
+GLAPI void APIENTRY glMemoryBarrierEXT (GLbitfield barriers);
+#endif
+#endif /* GL_EXT_shader_image_load_store */
+
+#ifndef GL_EXT_shader_integer_mix
+#define GL_EXT_shader_integer_mix 1
+#endif /* GL_EXT_shader_integer_mix */
+
+#ifndef GL_EXT_shadow_funcs
+#define GL_EXT_shadow_funcs 1
+#endif /* GL_EXT_shadow_funcs */
+
+#ifndef GL_EXT_shared_texture_palette
+#define GL_EXT_shared_texture_palette 1
+#define GL_SHARED_TEXTURE_PALETTE_EXT 0x81FB
+#endif /* GL_EXT_shared_texture_palette */
+
+#ifndef GL_EXT_sparse_texture2
+#define GL_EXT_sparse_texture2 1
+#endif /* GL_EXT_sparse_texture2 */
+
+#ifndef GL_EXT_stencil_clear_tag
+#define GL_EXT_stencil_clear_tag 1
+#define GL_STENCIL_TAG_BITS_EXT 0x88F2
+#define GL_STENCIL_CLEAR_TAG_VALUE_EXT 0x88F3
+typedef void (APIENTRYP PFNGLSTENCILCLEARTAGEXTPROC) (GLsizei stencilTagBits, GLuint stencilClearTag);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glStencilClearTagEXT (GLsizei stencilTagBits, GLuint stencilClearTag);
+#endif
+#endif /* GL_EXT_stencil_clear_tag */
+
+#ifndef GL_EXT_stencil_two_side
+#define GL_EXT_stencil_two_side 1
+#define GL_STENCIL_TEST_TWO_SIDE_EXT 0x8910
+#define GL_ACTIVE_STENCIL_FACE_EXT 0x8911
+typedef void (APIENTRYP PFNGLACTIVESTENCILFACEEXTPROC) (GLenum face);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glActiveStencilFaceEXT (GLenum face);
+#endif
+#endif /* GL_EXT_stencil_two_side */
+
+#ifndef GL_EXT_stencil_wrap
+#define GL_EXT_stencil_wrap 1
+#define GL_INCR_WRAP_EXT 0x8507
+#define GL_DECR_WRAP_EXT 0x8508
+#endif /* GL_EXT_stencil_wrap */
+
+#ifndef GL_EXT_subtexture
+#define GL_EXT_subtexture 1
+typedef void (APIENTRYP PFNGLTEXSUBIMAGE1DEXTPROC) (GLenum target, GLint level, GLint xoffset, GLsizei width, GLenum format, GLenum type, const void *pixels);
+typedef void (APIENTRYP PFNGLTEXSUBIMAGE2DEXTPROC) (GLenum target, GLint level, GLint xoffset, GLint yoffset, GLsizei width, GLsizei height, GLenum format, GLenum type, const void *pixels);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glTexSubImage1DEXT (GLenum target, GLint level, GLint xoffset, GLsizei width, GLenum format, GLenum type, const void *pixels);
+GLAPI void APIENTRY glTexSubImage2DEXT (GLenum target, GLint level, GLint xoffset, GLint yoffset, GLsizei width, GLsizei height, GLenum format, GLenum type, const void *pixels);
+#endif
+#endif /* GL_EXT_subtexture */
+
+#ifndef GL_EXT_texture
+#define GL_EXT_texture 1
+#define GL_ALPHA4_EXT 0x803B
+#define GL_ALPHA8_EXT 0x803C
+#define GL_ALPHA12_EXT 0x803D
+#define GL_ALPHA16_EXT 0x803E
+#define GL_LUMINANCE4_EXT 0x803F
+#define GL_LUMINANCE8_EXT 0x8040
+#define GL_LUMINANCE12_EXT 0x8041
+#define GL_LUMINANCE16_EXT 0x8042
+#define GL_LUMINANCE4_ALPHA4_EXT 0x8043
+#define GL_LUMINANCE6_ALPHA2_EXT 0x8044
+#define GL_LUMINANCE8_ALPHA8_EXT 0x8045
+#define GL_LUMINANCE12_ALPHA4_EXT 0x8046
+#define GL_LUMINANCE12_ALPHA12_EXT 0x8047
+#define GL_LUMINANCE16_ALPHA16_EXT 0x8048
+#define GL_INTENSITY_EXT 0x8049
+#define GL_INTENSITY4_EXT 0x804A
+#define GL_INTENSITY8_EXT 0x804B
+#define GL_INTENSITY12_EXT 0x804C
+#define GL_INTENSITY16_EXT 0x804D
+#define GL_RGB2_EXT 0x804E
+#define GL_RGB4_EXT 0x804F
+#define GL_RGB5_EXT 0x8050
+#define GL_RGB8_EXT 0x8051
+#define GL_RGB10_EXT 0x8052
+#define GL_RGB12_EXT 0x8053
+#define GL_RGB16_EXT 0x8054
+#define GL_RGBA2_EXT 0x8055
+#define GL_RGBA4_EXT 0x8056
+#define GL_RGB5_A1_EXT 0x8057
+#define GL_RGBA8_EXT 0x8058
+#define GL_RGB10_A2_EXT 0x8059
+#define GL_RGBA12_EXT 0x805A
+#define GL_RGBA16_EXT 0x805B
+#define GL_TEXTURE_RED_SIZE_EXT 0x805C
+#define GL_TEXTURE_GREEN_SIZE_EXT 0x805D
+#define GL_TEXTURE_BLUE_SIZE_EXT 0x805E
+#define GL_TEXTURE_ALPHA_SIZE_EXT 0x805F
+#define GL_TEXTURE_LUMINANCE_SIZE_EXT 0x8060
+#define GL_TEXTURE_INTENSITY_SIZE_EXT 0x8061
+#define GL_REPLACE_EXT 0x8062
+#define GL_PROXY_TEXTURE_1D_EXT 0x8063
+#define GL_PROXY_TEXTURE_2D_EXT 0x8064
+#define GL_TEXTURE_TOO_LARGE_EXT 0x8065
+#endif /* GL_EXT_texture */
+
+#ifndef GL_EXT_texture3D
+#define GL_EXT_texture3D 1
+#define GL_PACK_SKIP_IMAGES_EXT 0x806B
+#define GL_PACK_IMAGE_HEIGHT_EXT 0x806C
+#define GL_UNPACK_SKIP_IMAGES_EXT 0x806D
+#define GL_UNPACK_IMAGE_HEIGHT_EXT 0x806E
+#define GL_TEXTURE_3D_EXT 0x806F
+#define GL_PROXY_TEXTURE_3D_EXT 0x8070
+#define GL_TEXTURE_DEPTH_EXT 0x8071
+#define GL_TEXTURE_WRAP_R_EXT 0x8072
+#define GL_MAX_3D_TEXTURE_SIZE_EXT 0x8073
+typedef void (APIENTRYP PFNGLTEXIMAGE3DEXTPROC) (GLenum target, GLint level, GLenum internalformat, GLsizei width, GLsizei height, GLsizei depth, GLint border, GLenum format, GLenum type, const void *pixels);
+typedef void (APIENTRYP PFNGLTEXSUBIMAGE3DEXTPROC) (GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint zoffset, GLsizei width, GLsizei height, GLsizei depth, GLenum format, GLenum type, const void *pixels);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glTexImage3DEXT (GLenum target, GLint level, GLenum internalformat, GLsizei width, GLsizei height, GLsizei depth, GLint border, GLenum format, GLenum type, const void *pixels);
+GLAPI void APIENTRY glTexSubImage3DEXT (GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint zoffset, GLsizei width, GLsizei height, GLsizei depth, GLenum format, GLenum type, const void *pixels);
+#endif
+#endif /* GL_EXT_texture3D */
+
+#ifndef GL_EXT_texture_array
+#define GL_EXT_texture_array 1
+#define GL_TEXTURE_1D_ARRAY_EXT 0x8C18
+#define GL_PROXY_TEXTURE_1D_ARRAY_EXT 0x8C19
+#define GL_TEXTURE_2D_ARRAY_EXT 0x8C1A
+#define GL_PROXY_TEXTURE_2D_ARRAY_EXT 0x8C1B
+#define GL_TEXTURE_BINDING_1D_ARRAY_EXT 0x8C1C
+#define GL_TEXTURE_BINDING_2D_ARRAY_EXT 0x8C1D
+#define GL_MAX_ARRAY_TEXTURE_LAYERS_EXT 0x88FF
+#define GL_COMPARE_REF_DEPTH_TO_TEXTURE_EXT 0x884E
+typedef void (APIENTRYP PFNGLFRAMEBUFFERTEXTURELAYEREXTPROC) (GLenum target, GLenum attachment, GLuint texture, GLint level, GLint layer);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glFramebufferTextureLayerEXT (GLenum target, GLenum attachment, GLuint texture, GLint level, GLint layer);
+#endif
+#endif /* GL_EXT_texture_array */
+
+#ifndef GL_EXT_texture_buffer_object
+#define GL_EXT_texture_buffer_object 1
+#define GL_TEXTURE_BUFFER_EXT 0x8C2A
+#define GL_MAX_TEXTURE_BUFFER_SIZE_EXT 0x8C2B
+#define GL_TEXTURE_BINDING_BUFFER_EXT 0x8C2C
+#define GL_TEXTURE_BUFFER_DATA_STORE_BINDING_EXT 0x8C2D
+#define GL_TEXTURE_BUFFER_FORMAT_EXT 0x8C2E
+typedef void (APIENTRYP PFNGLTEXBUFFEREXTPROC) (GLenum target, GLenum internalformat, GLuint buffer);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glTexBufferEXT (GLenum target, GLenum internalformat, GLuint buffer);
+#endif
+#endif /* GL_EXT_texture_buffer_object */
+
+#ifndef GL_EXT_texture_compression_latc
+#define GL_EXT_texture_compression_latc 1
+#define GL_COMPRESSED_LUMINANCE_LATC1_EXT 0x8C70
+#define GL_COMPRESSED_SIGNED_LUMINANCE_LATC1_EXT 0x8C71
+#define GL_COMPRESSED_LUMINANCE_ALPHA_LATC2_EXT 0x8C72
+#define GL_COMPRESSED_SIGNED_LUMINANCE_ALPHA_LATC2_EXT 0x8C73
+#endif /* GL_EXT_texture_compression_latc */
+
+#ifndef GL_EXT_texture_compression_rgtc
+#define GL_EXT_texture_compression_rgtc 1
+#define GL_COMPRESSED_RED_RGTC1_EXT 0x8DBB
+#define GL_COMPRESSED_SIGNED_RED_RGTC1_EXT 0x8DBC
+#define GL_COMPRESSED_RED_GREEN_RGTC2_EXT 0x8DBD
+#define GL_COMPRESSED_SIGNED_RED_GREEN_RGTC2_EXT 0x8DBE
+#endif /* GL_EXT_texture_compression_rgtc */
+
+#ifndef GL_EXT_texture_compression_s3tc
+#define GL_EXT_texture_compression_s3tc 1
+#define GL_COMPRESSED_RGB_S3TC_DXT1_EXT 0x83F0
+#define GL_COMPRESSED_RGBA_S3TC_DXT1_EXT 0x83F1
+#define GL_COMPRESSED_RGBA_S3TC_DXT3_EXT 0x83F2
+#define GL_COMPRESSED_RGBA_S3TC_DXT5_EXT 0x83F3
+#endif /* GL_EXT_texture_compression_s3tc */
+
+#ifndef GL_EXT_texture_cube_map
+#define GL_EXT_texture_cube_map 1
+#define GL_NORMAL_MAP_EXT 0x8511
+#define GL_REFLECTION_MAP_EXT 0x8512
+#define GL_TEXTURE_CUBE_MAP_EXT 0x8513
+#define GL_TEXTURE_BINDING_CUBE_MAP_EXT 0x8514
+#define GL_TEXTURE_CUBE_MAP_POSITIVE_X_EXT 0x8515
+#define GL_TEXTURE_CUBE_MAP_NEGATIVE_X_EXT 0x8516
+#define GL_TEXTURE_CUBE_MAP_POSITIVE_Y_EXT 0x8517
+#define GL_TEXTURE_CUBE_MAP_NEGATIVE_Y_EXT 0x8518
+#define GL_TEXTURE_CUBE_MAP_POSITIVE_Z_EXT 0x8519
+#define GL_TEXTURE_CUBE_MAP_NEGATIVE_Z_EXT 0x851A
+#define GL_PROXY_TEXTURE_CUBE_MAP_EXT 0x851B
+#define GL_MAX_CUBE_MAP_TEXTURE_SIZE_EXT 0x851C
+#endif /* GL_EXT_texture_cube_map */
+
+#ifndef GL_EXT_texture_env_add
+#define GL_EXT_texture_env_add 1
+#endif /* GL_EXT_texture_env_add */
+
+#ifndef GL_EXT_texture_env_combine
+#define GL_EXT_texture_env_combine 1
+#define GL_COMBINE_EXT 0x8570
+#define GL_COMBINE_RGB_EXT 0x8571
+#define GL_COMBINE_ALPHA_EXT 0x8572
+#define GL_RGB_SCALE_EXT 0x8573
+#define GL_ADD_SIGNED_EXT 0x8574
+#define GL_INTERPOLATE_EXT 0x8575
+#define GL_CONSTANT_EXT 0x8576
+#define GL_PRIMARY_COLOR_EXT 0x8577
+#define GL_PREVIOUS_EXT 0x8578
+#define GL_SOURCE0_RGB_EXT 0x8580
+#define GL_SOURCE1_RGB_EXT 0x8581
+#define GL_SOURCE2_RGB_EXT 0x8582
+#define GL_SOURCE0_ALPHA_EXT 0x8588
+#define GL_SOURCE1_ALPHA_EXT 0x8589
+#define GL_SOURCE2_ALPHA_EXT 0x858A
+#define GL_OPERAND0_RGB_EXT 0x8590
+#define GL_OPERAND1_RGB_EXT 0x8591
+#define GL_OPERAND2_RGB_EXT 0x8592
+#define GL_OPERAND0_ALPHA_EXT 0x8598
+#define GL_OPERAND1_ALPHA_EXT 0x8599
+#define GL_OPERAND2_ALPHA_EXT 0x859A
+#endif /* GL_EXT_texture_env_combine */
+
+#ifndef GL_EXT_texture_env_dot3
+#define GL_EXT_texture_env_dot3 1
+#define GL_DOT3_RGB_EXT 0x8740
+#define GL_DOT3_RGBA_EXT 0x8741
+#endif /* GL_EXT_texture_env_dot3 */
+
+#ifndef GL_EXT_texture_filter_anisotropic
+#define GL_EXT_texture_filter_anisotropic 1
+#define GL_TEXTURE_MAX_ANISOTROPY_EXT 0x84FE
+#define GL_MAX_TEXTURE_MAX_ANISOTROPY_EXT 0x84FF
+#endif /* GL_EXT_texture_filter_anisotropic */
+
+#ifndef GL_EXT_texture_filter_minmax
+#define GL_EXT_texture_filter_minmax 1
+#define GL_TEXTURE_REDUCTION_MODE_EXT 0x9366
+#define GL_WEIGHTED_AVERAGE_EXT 0x9367
+#endif /* GL_EXT_texture_filter_minmax */
+
+#ifndef GL_EXT_texture_integer
+#define GL_EXT_texture_integer 1
+#define GL_RGBA32UI_EXT 0x8D70
+#define GL_RGB32UI_EXT 0x8D71
+#define GL_ALPHA32UI_EXT 0x8D72
+#define GL_INTENSITY32UI_EXT 0x8D73
+#define GL_LUMINANCE32UI_EXT 0x8D74
+#define GL_LUMINANCE_ALPHA32UI_EXT 0x8D75
+#define GL_RGBA16UI_EXT 0x8D76
+#define GL_RGB16UI_EXT 0x8D77
+#define GL_ALPHA16UI_EXT 0x8D78
+#define GL_INTENSITY16UI_EXT 0x8D79
+#define GL_LUMINANCE16UI_EXT 0x8D7A
+#define GL_LUMINANCE_ALPHA16UI_EXT 0x8D7B
+#define GL_RGBA8UI_EXT 0x8D7C
+#define GL_RGB8UI_EXT 0x8D7D
+#define GL_ALPHA8UI_EXT 0x8D7E
+#define GL_INTENSITY8UI_EXT 0x8D7F
+#define GL_LUMINANCE8UI_EXT 0x8D80
+#define GL_LUMINANCE_ALPHA8UI_EXT 0x8D81
+#define GL_RGBA32I_EXT 0x8D82
+#define GL_RGB32I_EXT 0x8D83
+#define GL_ALPHA32I_EXT 0x8D84
+#define GL_INTENSITY32I_EXT 0x8D85
+#define GL_LUMINANCE32I_EXT 0x8D86
+#define GL_LUMINANCE_ALPHA32I_EXT 0x8D87
+#define GL_RGBA16I_EXT 0x8D88
+#define GL_RGB16I_EXT 0x8D89
+#define GL_ALPHA16I_EXT 0x8D8A
+#define GL_INTENSITY16I_EXT 0x8D8B
+#define GL_LUMINANCE16I_EXT 0x8D8C
+#define GL_LUMINANCE_ALPHA16I_EXT 0x8D8D
+#define GL_RGBA8I_EXT 0x8D8E
+#define GL_RGB8I_EXT 0x8D8F
+#define GL_ALPHA8I_EXT 0x8D90
+#define GL_INTENSITY8I_EXT 0x8D91
+#define GL_LUMINANCE8I_EXT 0x8D92
+#define GL_LUMINANCE_ALPHA8I_EXT 0x8D93
+#define GL_RED_INTEGER_EXT 0x8D94
+#define GL_GREEN_INTEGER_EXT 0x8D95
+#define GL_BLUE_INTEGER_EXT 0x8D96
+#define GL_ALPHA_INTEGER_EXT 0x8D97
+#define GL_RGB_INTEGER_EXT 0x8D98
+#define GL_RGBA_INTEGER_EXT 0x8D99
+#define GL_BGR_INTEGER_EXT 0x8D9A
+#define GL_BGRA_INTEGER_EXT 0x8D9B
+#define GL_LUMINANCE_INTEGER_EXT 0x8D9C
+#define GL_LUMINANCE_ALPHA_INTEGER_EXT 0x8D9D
+#define GL_RGBA_INTEGER_MODE_EXT 0x8D9E
+typedef void (APIENTRYP PFNGLTEXPARAMETERIIVEXTPROC) (GLenum target, GLenum pname, const GLint *params);
+typedef void (APIENTRYP PFNGLTEXPARAMETERIUIVEXTPROC) (GLenum target, GLenum pname, const GLuint *params);
+typedef void (APIENTRYP PFNGLGETTEXPARAMETERIIVEXTPROC) (GLenum target, GLenum pname, GLint *params);
+typedef void (APIENTRYP PFNGLGETTEXPARAMETERIUIVEXTPROC) (GLenum target, GLenum pname, GLuint *params);
+typedef void (APIENTRYP PFNGLCLEARCOLORIIEXTPROC) (GLint red, GLint green, GLint blue, GLint alpha);
+typedef void (APIENTRYP PFNGLCLEARCOLORIUIEXTPROC) (GLuint red, GLuint green, GLuint blue, GLuint alpha);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glTexParameterIivEXT (GLenum target, GLenum pname, const GLint *params);
+GLAPI void APIENTRY glTexParameterIuivEXT (GLenum target, GLenum pname, const GLuint *params);
+GLAPI void APIENTRY glGetTexParameterIivEXT (GLenum target, GLenum pname, GLint *params);
+GLAPI void APIENTRY glGetTexParameterIuivEXT (GLenum target, GLenum pname, GLuint *params);
+GLAPI void APIENTRY glClearColorIiEXT (GLint red, GLint green, GLint blue, GLint alpha);
+GLAPI void APIENTRY glClearColorIuiEXT (GLuint red, GLuint green, GLuint blue, GLuint alpha);
+#endif
+#endif /* GL_EXT_texture_integer */
+
+#ifndef GL_EXT_texture_lod_bias
+#define GL_EXT_texture_lod_bias 1
+#define GL_MAX_TEXTURE_LOD_BIAS_EXT 0x84FD
+#define GL_TEXTURE_FILTER_CONTROL_EXT 0x8500
+#define GL_TEXTURE_LOD_BIAS_EXT 0x8501
+#endif /* GL_EXT_texture_lod_bias */
+
+#ifndef GL_EXT_texture_mirror_clamp
+#define GL_EXT_texture_mirror_clamp 1
+#define GL_MIRROR_CLAMP_EXT 0x8742
+#define GL_MIRROR_CLAMP_TO_EDGE_EXT 0x8743
+#define GL_MIRROR_CLAMP_TO_BORDER_EXT 0x8912
+#endif /* GL_EXT_texture_mirror_clamp */
+
+#ifndef GL_EXT_texture_object
+#define GL_EXT_texture_object 1
+#define GL_TEXTURE_PRIORITY_EXT 0x8066
+#define GL_TEXTURE_RESIDENT_EXT 0x8067
+#define GL_TEXTURE_1D_BINDING_EXT 0x8068
+#define GL_TEXTURE_2D_BINDING_EXT 0x8069
+#define GL_TEXTURE_3D_BINDING_EXT 0x806A
+typedef GLboolean (APIENTRYP PFNGLARETEXTURESRESIDENTEXTPROC) (GLsizei n, const GLuint *textures, GLboolean *residences);
+typedef void (APIENTRYP PFNGLBINDTEXTUREEXTPROC) (GLenum target, GLuint texture);
+typedef void (APIENTRYP PFNGLDELETETEXTURESEXTPROC) (GLsizei n, const GLuint *textures);
+typedef void (APIENTRYP PFNGLGENTEXTURESEXTPROC) (GLsizei n, GLuint *textures);
+typedef GLboolean (APIENTRYP PFNGLISTEXTUREEXTPROC) (GLuint texture);
+typedef void (APIENTRYP PFNGLPRIORITIZETEXTURESEXTPROC) (GLsizei n, const GLuint *textures, const GLclampf *priorities);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI GLboolean APIENTRY glAreTexturesResidentEXT (GLsizei n, const GLuint *textures, GLboolean *residences);
+GLAPI void APIENTRY glBindTextureEXT (GLenum target, GLuint texture);
+GLAPI void APIENTRY glDeleteTexturesEXT (GLsizei n, const GLuint *textures);
+GLAPI void APIENTRY glGenTexturesEXT (GLsizei n, GLuint *textures);
+GLAPI GLboolean APIENTRY glIsTextureEXT (GLuint texture);
+GLAPI void APIENTRY glPrioritizeTexturesEXT (GLsizei n, const GLuint *textures, const GLclampf *priorities);
+#endif
+#endif /* GL_EXT_texture_object */
+
+#ifndef GL_EXT_texture_perturb_normal
+#define GL_EXT_texture_perturb_normal 1
+#define GL_PERTURB_EXT 0x85AE
+#define GL_TEXTURE_NORMAL_EXT 0x85AF
+typedef void (APIENTRYP PFNGLTEXTURENORMALEXTPROC) (GLenum mode);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glTextureNormalEXT (GLenum mode);
+#endif
+#endif /* GL_EXT_texture_perturb_normal */
+
+#ifndef GL_EXT_texture_sRGB
+#define GL_EXT_texture_sRGB 1
+#define GL_SRGB_EXT 0x8C40
+#define GL_SRGB8_EXT 0x8C41
+#define GL_SRGB_ALPHA_EXT 0x8C42
+#define GL_SRGB8_ALPHA8_EXT 0x8C43
+#define GL_SLUMINANCE_ALPHA_EXT 0x8C44
+#define GL_SLUMINANCE8_ALPHA8_EXT 0x8C45
+#define GL_SLUMINANCE_EXT 0x8C46
+#define GL_SLUMINANCE8_EXT 0x8C47
+#define GL_COMPRESSED_SRGB_EXT 0x8C48
+#define GL_COMPRESSED_SRGB_ALPHA_EXT 0x8C49
+#define GL_COMPRESSED_SLUMINANCE_EXT 0x8C4A
+#define GL_COMPRESSED_SLUMINANCE_ALPHA_EXT 0x8C4B
+#define GL_COMPRESSED_SRGB_S3TC_DXT1_EXT 0x8C4C
+#define GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT1_EXT 0x8C4D
+#define GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT3_EXT 0x8C4E
+#define GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT5_EXT 0x8C4F
+#endif /* GL_EXT_texture_sRGB */
+
+#ifndef GL_EXT_texture_sRGB_R8
+#define GL_EXT_texture_sRGB_R8 1
+#define GL_SR8_EXT 0x8FBD
+#endif /* GL_EXT_texture_sRGB_R8 */
+
+#ifndef GL_EXT_texture_sRGB_decode
+#define GL_EXT_texture_sRGB_decode 1
+#define GL_TEXTURE_SRGB_DECODE_EXT 0x8A48
+#define GL_DECODE_EXT 0x8A49
+#define GL_SKIP_DECODE_EXT 0x8A4A
+#endif /* GL_EXT_texture_sRGB_decode */
+
+#ifndef GL_EXT_texture_shadow_lod
+#define GL_EXT_texture_shadow_lod 1
+#endif /* GL_EXT_texture_shadow_lod */
+
+#ifndef GL_EXT_texture_shared_exponent
+#define GL_EXT_texture_shared_exponent 1
+#define GL_RGB9_E5_EXT 0x8C3D
+#define GL_UNSIGNED_INT_5_9_9_9_REV_EXT 0x8C3E
+#define GL_TEXTURE_SHARED_SIZE_EXT 0x8C3F
+#endif /* GL_EXT_texture_shared_exponent */
+
+#ifndef GL_EXT_texture_snorm
+#define GL_EXT_texture_snorm 1
+#define GL_ALPHA_SNORM 0x9010
+#define GL_LUMINANCE_SNORM 0x9011
+#define GL_LUMINANCE_ALPHA_SNORM 0x9012
+#define GL_INTENSITY_SNORM 0x9013
+#define GL_ALPHA8_SNORM 0x9014
+#define GL_LUMINANCE8_SNORM 0x9015
+#define GL_LUMINANCE8_ALPHA8_SNORM 0x9016
+#define GL_INTENSITY8_SNORM 0x9017
+#define GL_ALPHA16_SNORM 0x9018
+#define GL_LUMINANCE16_SNORM 0x9019
+#define GL_LUMINANCE16_ALPHA16_SNORM 0x901A
+#define GL_INTENSITY16_SNORM 0x901B
+#define GL_RED_SNORM 0x8F90
+#define GL_RG_SNORM 0x8F91
+#define GL_RGB_SNORM 0x8F92
+#define GL_RGBA_SNORM 0x8F93
+#endif /* GL_EXT_texture_snorm */
+
+#ifndef GL_EXT_texture_swizzle
+#define GL_EXT_texture_swizzle 1
+#define GL_TEXTURE_SWIZZLE_R_EXT 0x8E42
+#define GL_TEXTURE_SWIZZLE_G_EXT 0x8E43
+#define GL_TEXTURE_SWIZZLE_B_EXT 0x8E44
+#define GL_TEXTURE_SWIZZLE_A_EXT 0x8E45
+#define GL_TEXTURE_SWIZZLE_RGBA_EXT 0x8E46
+#endif /* GL_EXT_texture_swizzle */
+
+#ifndef GL_EXT_timer_query
+#define GL_EXT_timer_query 1
+#define GL_TIME_ELAPSED_EXT 0x88BF
+typedef void (APIENTRYP PFNGLGETQUERYOBJECTI64VEXTPROC) (GLuint id, GLenum pname, GLint64 *params);
+typedef void (APIENTRYP PFNGLGETQUERYOBJECTUI64VEXTPROC) (GLuint id, GLenum pname, GLuint64 *params);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glGetQueryObjecti64vEXT (GLuint id, GLenum pname, GLint64 *params);
+GLAPI void APIENTRY glGetQueryObjectui64vEXT (GLuint id, GLenum pname, GLuint64 *params);
+#endif
+#endif /* GL_EXT_timer_query */
+
+#ifndef GL_EXT_transform_feedback
+#define GL_EXT_transform_feedback 1
+#define GL_TRANSFORM_FEEDBACK_BUFFER_EXT 0x8C8E
+#define GL_TRANSFORM_FEEDBACK_BUFFER_START_EXT 0x8C84
+#define GL_TRANSFORM_FEEDBACK_BUFFER_SIZE_EXT 0x8C85
+#define GL_TRANSFORM_FEEDBACK_BUFFER_BINDING_EXT 0x8C8F
+#define GL_INTERLEAVED_ATTRIBS_EXT 0x8C8C
+#define GL_SEPARATE_ATTRIBS_EXT 0x8C8D
+#define GL_PRIMITIVES_GENERATED_EXT 0x8C87
+#define GL_TRANSFORM_FEEDBACK_PRIMITIVES_WRITTEN_EXT 0x8C88
+#define GL_RASTERIZER_DISCARD_EXT 0x8C89
+#define GL_MAX_TRANSFORM_FEEDBACK_INTERLEAVED_COMPONENTS_EXT 0x8C8A
+#define GL_MAX_TRANSFORM_FEEDBACK_SEPARATE_ATTRIBS_EXT 0x8C8B
+#define GL_MAX_TRANSFORM_FEEDBACK_SEPARATE_COMPONENTS_EXT 0x8C80
+#define GL_TRANSFORM_FEEDBACK_VARYINGS_EXT 0x8C83
+#define GL_TRANSFORM_FEEDBACK_BUFFER_MODE_EXT 0x8C7F
+#define GL_TRANSFORM_FEEDBACK_VARYING_MAX_LENGTH_EXT 0x8C76
+typedef void (APIENTRYP PFNGLBEGINTRANSFORMFEEDBACKEXTPROC) (GLenum primitiveMode);
+typedef void (APIENTRYP PFNGLENDTRANSFORMFEEDBACKEXTPROC) (void);
+typedef void (APIENTRYP PFNGLBINDBUFFERRANGEEXTPROC) (GLenum target, GLuint index, GLuint buffer, GLintptr offset, GLsizeiptr size);
+typedef void (APIENTRYP PFNGLBINDBUFFEROFFSETEXTPROC) (GLenum target, GLuint index, GLuint buffer, GLintptr offset);
+typedef void (APIENTRYP PFNGLBINDBUFFERBASEEXTPROC) (GLenum target, GLuint index, GLuint buffer);
+typedef void (APIENTRYP PFNGLTRANSFORMFEEDBACKVARYINGSEXTPROC) (GLuint program, GLsizei count, const GLchar *const*varyings, GLenum bufferMode);
+typedef void (APIENTRYP PFNGLGETTRANSFORMFEEDBACKVARYINGEXTPROC) (GLuint program, GLuint index, GLsizei bufSize, GLsizei *length, GLsizei *size, GLenum *type, GLchar *name);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glBeginTransformFeedbackEXT (GLenum primitiveMode);
+GLAPI void APIENTRY glEndTransformFeedbackEXT (void);
+GLAPI void APIENTRY glBindBufferRangeEXT (GLenum target, GLuint index, GLuint buffer, GLintptr offset, GLsizeiptr size);
+GLAPI void APIENTRY glBindBufferOffsetEXT (GLenum target, GLuint index, GLuint buffer, GLintptr offset);
+GLAPI void APIENTRY glBindBufferBaseEXT (GLenum target, GLuint index, GLuint buffer);
+GLAPI void APIENTRY glTransformFeedbackVaryingsEXT (GLuint program, GLsizei count, const GLchar *const*varyings, GLenum bufferMode);
+GLAPI void APIENTRY glGetTransformFeedbackVaryingEXT (GLuint program, GLuint index, GLsizei bufSize, GLsizei *length, GLsizei *size, GLenum *type, GLchar *name);
+#endif
+#endif /* GL_EXT_transform_feedback */
+
+#ifndef GL_EXT_vertex_array
+#define GL_EXT_vertex_array 1
+#define GL_VERTEX_ARRAY_EXT 0x8074
+#define GL_NORMAL_ARRAY_EXT 0x8075
+#define GL_COLOR_ARRAY_EXT 0x8076
+#define GL_INDEX_ARRAY_EXT 0x8077
+#define GL_TEXTURE_COORD_ARRAY_EXT 0x8078
+#define GL_EDGE_FLAG_ARRAY_EXT 0x8079
+#define GL_VERTEX_ARRAY_SIZE_EXT 0x807A
+#define GL_VERTEX_ARRAY_TYPE_EXT 0x807B
+#define GL_VERTEX_ARRAY_STRIDE_EXT 0x807C
+#define GL_VERTEX_ARRAY_COUNT_EXT 0x807D
+#define GL_NORMAL_ARRAY_TYPE_EXT 0x807E
+#define GL_NORMAL_ARRAY_STRIDE_EXT 0x807F
+#define GL_NORMAL_ARRAY_COUNT_EXT 0x8080
+#define GL_COLOR_ARRAY_SIZE_EXT 0x8081
+#define GL_COLOR_ARRAY_TYPE_EXT 0x8082
+#define GL_COLOR_ARRAY_STRIDE_EXT 0x8083
+#define GL_COLOR_ARRAY_COUNT_EXT 0x8084
+#define GL_INDEX_ARRAY_TYPE_EXT 0x8085
+#define GL_INDEX_ARRAY_STRIDE_EXT 0x8086
+#define GL_INDEX_ARRAY_COUNT_EXT 0x8087
+#define GL_TEXTURE_COORD_ARRAY_SIZE_EXT 0x8088
+#define GL_TEXTURE_COORD_ARRAY_TYPE_EXT 0x8089
+#define GL_TEXTURE_COORD_ARRAY_STRIDE_EXT 0x808A
+#define GL_TEXTURE_COORD_ARRAY_COUNT_EXT 0x808B
+#define GL_EDGE_FLAG_ARRAY_STRIDE_EXT 0x808C
+#define GL_EDGE_FLAG_ARRAY_COUNT_EXT 0x808D
+#define GL_VERTEX_ARRAY_POINTER_EXT 0x808E
+#define GL_NORMAL_ARRAY_POINTER_EXT 0x808F
+#define GL_COLOR_ARRAY_POINTER_EXT 0x8090
+#define GL_INDEX_ARRAY_POINTER_EXT 0x8091
+#define GL_TEXTURE_COORD_ARRAY_POINTER_EXT 0x8092
+#define GL_EDGE_FLAG_ARRAY_POINTER_EXT 0x8093
+typedef void (APIENTRYP PFNGLARRAYELEMENTEXTPROC) (GLint i);
+typedef void (APIENTRYP PFNGLCOLORPOINTEREXTPROC) (GLint size, GLenum type, GLsizei stride, GLsizei count, const void *pointer);
+typedef void (APIENTRYP PFNGLDRAWARRAYSEXTPROC) (GLenum mode, GLint first, GLsizei count);
+typedef void (APIENTRYP PFNGLEDGEFLAGPOINTEREXTPROC) (GLsizei stride, GLsizei count, const GLboolean *pointer);
+typedef void (APIENTRYP PFNGLGETPOINTERVEXTPROC) (GLenum pname, void **params);
+typedef void (APIENTRYP PFNGLINDEXPOINTEREXTPROC) (GLenum type, GLsizei stride, GLsizei count, const void *pointer);
+typedef void (APIENTRYP PFNGLNORMALPOINTEREXTPROC) (GLenum type, GLsizei stride, GLsizei count, const void *pointer);
+typedef void (APIENTRYP PFNGLTEXCOORDPOINTEREXTPROC) (GLint size, GLenum type, GLsizei stride, GLsizei count, const void *pointer);
+typedef void (APIENTRYP PFNGLVERTEXPOINTEREXTPROC) (GLint size, GLenum type, GLsizei stride, GLsizei count, const void *pointer);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glArrayElementEXT (GLint i);
+GLAPI void APIENTRY glColorPointerEXT (GLint size, GLenum type, GLsizei stride, GLsizei count, const void *pointer);
+GLAPI void APIENTRY glDrawArraysEXT (GLenum mode, GLint first, GLsizei count);
+GLAPI void APIENTRY glEdgeFlagPointerEXT (GLsizei stride, GLsizei count, const GLboolean *pointer);
+GLAPI void APIENTRY glGetPointervEXT (GLenum pname, void **params);
+GLAPI void APIENTRY glIndexPointerEXT (GLenum type, GLsizei stride, GLsizei count, const void *pointer);
+GLAPI void APIENTRY glNormalPointerEXT (GLenum type, GLsizei stride, GLsizei count, const void *pointer);
+GLAPI void APIENTRY glTexCoordPointerEXT (GLint size, GLenum type, GLsizei stride, GLsizei count, const void *pointer);
+GLAPI void APIENTRY glVertexPointerEXT (GLint size, GLenum type, GLsizei stride, GLsizei count, const void *pointer);
+#endif
+#endif /* GL_EXT_vertex_array */
+
+#ifndef GL_EXT_vertex_array_bgra
+#define GL_EXT_vertex_array_bgra 1
+#endif /* GL_EXT_vertex_array_bgra */
+
+#ifndef GL_EXT_vertex_attrib_64bit
+#define GL_EXT_vertex_attrib_64bit 1
+#define GL_DOUBLE_VEC2_EXT 0x8FFC
+#define GL_DOUBLE_VEC3_EXT 0x8FFD
+#define GL_DOUBLE_VEC4_EXT 0x8FFE
+#define GL_DOUBLE_MAT2_EXT 0x8F46
+#define GL_DOUBLE_MAT3_EXT 0x8F47
+#define GL_DOUBLE_MAT4_EXT 0x8F48
+#define GL_DOUBLE_MAT2x3_EXT 0x8F49
+#define GL_DOUBLE_MAT2x4_EXT 0x8F4A
+#define GL_DOUBLE_MAT3x2_EXT 0x8F4B
+#define GL_DOUBLE_MAT3x4_EXT 0x8F4C
+#define GL_DOUBLE_MAT4x2_EXT 0x8F4D
+#define GL_DOUBLE_MAT4x3_EXT 0x8F4E
+typedef void (APIENTRYP PFNGLVERTEXATTRIBL1DEXTPROC) (GLuint index, GLdouble x);
+typedef void (APIENTRYP PFNGLVERTEXATTRIBL2DEXTPROC) (GLuint index, GLdouble x, GLdouble y);
+typedef void (APIENTRYP PFNGLVERTEXATTRIBL3DEXTPROC) (GLuint index, GLdouble x, GLdouble y, GLdouble z);
+typedef void (APIENTRYP PFNGLVERTEXATTRIBL4DEXTPROC) (GLuint index, GLdouble x, GLdouble y, GLdouble z, GLdouble w);
+typedef void (APIENTRYP PFNGLVERTEXATTRIBL1DVEXTPROC) (GLuint index, const GLdouble *v);
+typedef void (APIENTRYP PFNGLVERTEXATTRIBL2DVEXTPROC) (GLuint index, const GLdouble *v);
+typedef void (APIENTRYP PFNGLVERTEXATTRIBL3DVEXTPROC) (GLuint index, const GLdouble *v);
+typedef void (APIENTRYP PFNGLVERTEXATTRIBL4DVEXTPROC) (GLuint index, const GLdouble *v);
+typedef void (APIENTRYP PFNGLVERTEXATTRIBLPOINTEREXTPROC) (GLuint index, GLint size, GLenum type, GLsizei stride, const void *pointer);
+typedef void (APIENTRYP PFNGLGETVERTEXATTRIBLDVEXTPROC) (GLuint index, GLenum pname, GLdouble *params);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glVertexAttribL1dEXT (GLuint index, GLdouble x);
+GLAPI void APIENTRY glVertexAttribL2dEXT (GLuint index, GLdouble x, GLdouble y);
+GLAPI void APIENTRY glVertexAttribL3dEXT (GLuint index, GLdouble x, GLdouble y, GLdouble z);
+GLAPI void APIENTRY glVertexAttribL4dEXT (GLuint index, GLdouble x, GLdouble y, GLdouble z, GLdouble w);
+GLAPI void APIENTRY glVertexAttribL1dvEXT (GLuint index, const GLdouble *v);
+GLAPI void APIENTRY glVertexAttribL2dvEXT (GLuint index, const GLdouble *v);
+GLAPI void APIENTRY glVertexAttribL3dvEXT (GLuint index, const GLdouble *v);
+GLAPI void APIENTRY glVertexAttribL4dvEXT (GLuint index, const GLdouble *v);
+GLAPI void APIENTRY glVertexAttribLPointerEXT (GLuint index, GLint size, GLenum type, GLsizei stride, const void *pointer);
+GLAPI void APIENTRY glGetVertexAttribLdvEXT (GLuint index, GLenum pname, GLdouble *params);
+#endif
+#endif /* GL_EXT_vertex_attrib_64bit */
+
+#ifndef GL_EXT_vertex_shader
+#define GL_EXT_vertex_shader 1
+#define GL_VERTEX_SHADER_EXT 0x8780
+#define GL_VERTEX_SHADER_BINDING_EXT 0x8781
+#define GL_OP_INDEX_EXT 0x8782
+#define GL_OP_NEGATE_EXT 0x8783
+#define GL_OP_DOT3_EXT 0x8784
+#define GL_OP_DOT4_EXT 0x8785
+#define GL_OP_MUL_EXT 0x8786
+#define GL_OP_ADD_EXT 0x8787
+#define GL_OP_MADD_EXT 0x8788
+#define GL_OP_FRAC_EXT 0x8789
+#define GL_OP_MAX_EXT 0x878A
+#define GL_OP_MIN_EXT 0x878B
+#define GL_OP_SET_GE_EXT 0x878C
+#define GL_OP_SET_LT_EXT 0x878D
+#define GL_OP_CLAMP_EXT 0x878E
+#define GL_OP_FLOOR_EXT 0x878F
+#define GL_OP_ROUND_EXT 0x8790
+#define GL_OP_EXP_BASE_2_EXT 0x8791
+#define GL_OP_LOG_BASE_2_EXT 0x8792
+#define GL_OP_POWER_EXT 0x8793
+#define GL_OP_RECIP_EXT 0x8794
+#define GL_OP_RECIP_SQRT_EXT 0x8795
+#define GL_OP_SUB_EXT 0x8796
+#define GL_OP_CROSS_PRODUCT_EXT 0x8797
+#define GL_OP_MULTIPLY_MATRIX_EXT 0x8798
+#define GL_OP_MOV_EXT 0x8799
+#define GL_OUTPUT_VERTEX_EXT 0x879A
+#define GL_OUTPUT_COLOR0_EXT 0x879B
+#define GL_OUTPUT_COLOR1_EXT 0x879C
+#define GL_OUTPUT_TEXTURE_COORD0_EXT 0x879D
+#define GL_OUTPUT_TEXTURE_COORD1_EXT 0x879E
+#define GL_OUTPUT_TEXTURE_COORD2_EXT 0x879F
+#define GL_OUTPUT_TEXTURE_COORD3_EXT 0x87A0
+#define GL_OUTPUT_TEXTURE_COORD4_EXT 0x87A1
+#define GL_OUTPUT_TEXTURE_COORD5_EXT 0x87A2
+#define GL_OUTPUT_TEXTURE_COORD6_EXT 0x87A3
+#define GL_OUTPUT_TEXTURE_COORD7_EXT 0x87A4
+#define GL_OUTPUT_TEXTURE_COORD8_EXT 0x87A5
+#define GL_OUTPUT_TEXTURE_COORD9_EXT 0x87A6
+#define GL_OUTPUT_TEXTURE_COORD10_EXT 0x87A7
+#define GL_OUTPUT_TEXTURE_COORD11_EXT 0x87A8
+#define GL_OUTPUT_TEXTURE_COORD12_EXT 0x87A9
+#define GL_OUTPUT_TEXTURE_COORD13_EXT 0x87AA
+#define GL_OUTPUT_TEXTURE_COORD14_EXT 0x87AB
+#define GL_OUTPUT_TEXTURE_COORD15_EXT 0x87AC
+#define GL_OUTPUT_TEXTURE_COORD16_EXT 0x87AD
+#define GL_OUTPUT_TEXTURE_COORD17_EXT 0x87AE
+#define GL_OUTPUT_TEXTURE_COORD18_EXT 0x87AF
+#define GL_OUTPUT_TEXTURE_COORD19_EXT 0x87B0
+#define GL_OUTPUT_TEXTURE_COORD20_EXT 0x87B1
+#define GL_OUTPUT_TEXTURE_COORD21_EXT 0x87B2
+#define GL_OUTPUT_TEXTURE_COORD22_EXT 0x87B3
+#define GL_OUTPUT_TEXTURE_COORD23_EXT 0x87B4
+#define GL_OUTPUT_TEXTURE_COORD24_EXT 0x87B5
+#define GL_OUTPUT_TEXTURE_COORD25_EXT 0x87B6
+#define GL_OUTPUT_TEXTURE_COORD26_EXT 0x87B7
+#define GL_OUTPUT_TEXTURE_COORD27_EXT 0x87B8
+#define GL_OUTPUT_TEXTURE_COORD28_EXT 0x87B9
+#define GL_OUTPUT_TEXTURE_COORD29_EXT 0x87BA
+#define GL_OUTPUT_TEXTURE_COORD30_EXT 0x87BB
+#define GL_OUTPUT_TEXTURE_COORD31_EXT 0x87BC
+#define GL_OUTPUT_FOG_EXT 0x87BD
+#define GL_SCALAR_EXT 0x87BE
+#define GL_VECTOR_EXT 0x87BF
+#define GL_MATRIX_EXT 0x87C0
+#define GL_VARIANT_EXT 0x87C1
+#define GL_INVARIANT_EXT 0x87C2
+#define GL_LOCAL_CONSTANT_EXT 0x87C3
+#define GL_LOCAL_EXT 0x87C4
+#define GL_MAX_VERTEX_SHADER_INSTRUCTIONS_EXT 0x87C5
+#define GL_MAX_VERTEX_SHADER_VARIANTS_EXT 0x87C6
+#define GL_MAX_VERTEX_SHADER_INVARIANTS_EXT 0x87C7
+#define GL_MAX_VERTEX_SHADER_LOCAL_CONSTANTS_EXT 0x87C8
+#define GL_MAX_VERTEX_SHADER_LOCALS_EXT 0x87C9
+#define GL_MAX_OPTIMIZED_VERTEX_SHADER_INSTRUCTIONS_EXT 0x87CA
+#define GL_MAX_OPTIMIZED_VERTEX_SHADER_VARIANTS_EXT 0x87CB
+#define GL_MAX_OPTIMIZED_VERTEX_SHADER_LOCAL_CONSTANTS_EXT 0x87CC
+#define GL_MAX_OPTIMIZED_VERTEX_SHADER_INVARIANTS_EXT 0x87CD
+#define GL_MAX_OPTIMIZED_VERTEX_SHADER_LOCALS_EXT 0x87CE
+#define GL_VERTEX_SHADER_INSTRUCTIONS_EXT 0x87CF
+#define GL_VERTEX_SHADER_VARIANTS_EXT 0x87D0
+#define GL_VERTEX_SHADER_INVARIANTS_EXT 0x87D1
+#define GL_VERTEX_SHADER_LOCAL_CONSTANTS_EXT 0x87D2
+#define GL_VERTEX_SHADER_LOCALS_EXT 0x87D3
+#define GL_VERTEX_SHADER_OPTIMIZED_EXT 0x87D4
+#define GL_X_EXT 0x87D5
+#define GL_Y_EXT 0x87D6
+#define GL_Z_EXT 0x87D7
+#define GL_W_EXT 0x87D8
+#define GL_NEGATIVE_X_EXT 0x87D9
+#define GL_NEGATIVE_Y_EXT 0x87DA
+#define GL_NEGATIVE_Z_EXT 0x87DB
+#define GL_NEGATIVE_W_EXT 0x87DC
+#define GL_ZERO_EXT 0x87DD
+#define GL_ONE_EXT 0x87DE
+#define GL_NEGATIVE_ONE_EXT 0x87DF
+#define GL_NORMALIZED_RANGE_EXT 0x87E0
+#define GL_FULL_RANGE_EXT 0x87E1
+#define GL_CURRENT_VERTEX_EXT 0x87E2
+#define GL_MVP_MATRIX_EXT 0x87E3
+#define GL_VARIANT_VALUE_EXT 0x87E4
+#define GL_VARIANT_DATATYPE_EXT 0x87E5
+#define GL_VARIANT_ARRAY_STRIDE_EXT 0x87E6
+#define GL_VARIANT_ARRAY_TYPE_EXT 0x87E7
+#define GL_VARIANT_ARRAY_EXT 0x87E8
+#define GL_VARIANT_ARRAY_POINTER_EXT 0x87E9
+#define GL_INVARIANT_VALUE_EXT 0x87EA
+#define GL_INVARIANT_DATATYPE_EXT 0x87EB
+#define GL_LOCAL_CONSTANT_VALUE_EXT 0x87EC
+#define GL_LOCAL_CONSTANT_DATATYPE_EXT 0x87ED
+typedef void (APIENTRYP PFNGLBEGINVERTEXSHADEREXTPROC) (void);
+typedef void (APIENTRYP PFNGLENDVERTEXSHADEREXTPROC) (void);
+typedef void (APIENTRYP PFNGLBINDVERTEXSHADEREXTPROC) (GLuint id);
+typedef GLuint (APIENTRYP PFNGLGENVERTEXSHADERSEXTPROC) (GLuint range);
+typedef void (APIENTRYP PFNGLDELETEVERTEXSHADEREXTPROC) (GLuint id);
+typedef void (APIENTRYP PFNGLSHADEROP1EXTPROC) (GLenum op, GLuint res, GLuint arg1);
+typedef void (APIENTRYP PFNGLSHADEROP2EXTPROC) (GLenum op, GLuint res, GLuint arg1, GLuint arg2);
+typedef void (APIENTRYP PFNGLSHADEROP3EXTPROC) (GLenum op, GLuint res, GLuint arg1, GLuint arg2, GLuint arg3);
+typedef void (APIENTRYP PFNGLSWIZZLEEXTPROC) (GLuint res, GLuint in, GLenum outX, GLenum outY, GLenum outZ, GLenum outW);
+typedef void (APIENTRYP PFNGLWRITEMASKEXTPROC) (GLuint res, GLuint in, GLenum outX, GLenum outY, GLenum outZ, GLenum outW);
+typedef void (APIENTRYP PFNGLINSERTCOMPONENTEXTPROC) (GLuint res, GLuint src, GLuint num);
+typedef void (APIENTRYP PFNGLEXTRACTCOMPONENTEXTPROC) (GLuint res, GLuint src, GLuint num);
+typedef GLuint (APIENTRYP PFNGLGENSYMBOLSEXTPROC) (GLenum datatype, GLenum storagetype, GLenum range, GLuint components);
+typedef void (APIENTRYP PFNGLSETINVARIANTEXTPROC) (GLuint id, GLenum type, const void *addr);
+typedef void (APIENTRYP PFNGLSETLOCALCONSTANTEXTPROC) (GLuint id, GLenum type, const void *addr);
+typedef void (APIENTRYP PFNGLVARIANTBVEXTPROC) (GLuint id, const GLbyte *addr);
+typedef void (APIENTRYP PFNGLVARIANTSVEXTPROC) (GLuint id, const GLshort *addr);
+typedef void (APIENTRYP PFNGLVARIANTIVEXTPROC) (GLuint id, const GLint *addr);
+typedef void (APIENTRYP PFNGLVARIANTFVEXTPROC) (GLuint id, const GLfloat *addr);
+typedef void (APIENTRYP PFNGLVARIANTDVEXTPROC) (GLuint id, const GLdouble *addr);
+typedef void (APIENTRYP PFNGLVARIANTUBVEXTPROC) (GLuint id, const GLubyte *addr);
+typedef void (APIENTRYP PFNGLVARIANTUSVEXTPROC) (GLuint id, const GLushort *addr);
+typedef void (APIENTRYP PFNGLVARIANTUIVEXTPROC) (GLuint id, const GLuint *addr);
+typedef void (APIENTRYP PFNGLVARIANTPOINTEREXTPROC) (GLuint id, GLenum type, GLuint stride, const void *addr);
+typedef void (APIENTRYP PFNGLENABLEVARIANTCLIENTSTATEEXTPROC) (GLuint id);
+typedef void (APIENTRYP PFNGLDISABLEVARIANTCLIENTSTATEEXTPROC) (GLuint id);
+typedef GLuint (APIENTRYP PFNGLBINDLIGHTPARAMETEREXTPROC) (GLenum light, GLenum value);
+typedef GLuint (APIENTRYP PFNGLBINDMATERIALPARAMETEREXTPROC) (GLenum face, GLenum value);
+typedef GLuint (APIENTRYP PFNGLBINDTEXGENPARAMETEREXTPROC) (GLenum unit, GLenum coord, GLenum value);
+typedef GLuint (APIENTRYP PFNGLBINDTEXTUREUNITPARAMETEREXTPROC) (GLenum unit, GLenum value);
+typedef GLuint (APIENTRYP PFNGLBINDPARAMETEREXTPROC) (GLenum value);
+typedef GLboolean (APIENTRYP PFNGLISVARIANTENABLEDEXTPROC) (GLuint id, GLenum cap);
+typedef void (APIENTRYP PFNGLGETVARIANTBOOLEANVEXTPROC) (GLuint id, GLenum value, GLboolean *data);
+typedef void (APIENTRYP PFNGLGETVARIANTINTEGERVEXTPROC) (GLuint id, GLenum value, GLint *data);
+typedef void (APIENTRYP PFNGLGETVARIANTFLOATVEXTPROC) (GLuint id, GLenum value, GLfloat *data);
+typedef void (APIENTRYP PFNGLGETVARIANTPOINTERVEXTPROC) (GLuint id, GLenum value, void **data);
+typedef void (APIENTRYP PFNGLGETINVARIANTBOOLEANVEXTPROC) (GLuint id, GLenum value, GLboolean *data);
+typedef void (APIENTRYP PFNGLGETINVARIANTINTEGERVEXTPROC) (GLuint id, GLenum value, GLint *data);
+typedef void (APIENTRYP PFNGLGETINVARIANTFLOATVEXTPROC) (GLuint id, GLenum value, GLfloat *data);
+typedef void (APIENTRYP PFNGLGETLOCALCONSTANTBOOLEANVEXTPROC) (GLuint id, GLenum value, GLboolean *data);
+typedef void (APIENTRYP PFNGLGETLOCALCONSTANTINTEGERVEXTPROC) (GLuint id, GLenum value, GLint *data);
+typedef void (APIENTRYP PFNGLGETLOCALCONSTANTFLOATVEXTPROC) (GLuint id, GLenum value, GLfloat *data);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glBeginVertexShaderEXT (void);
+GLAPI void APIENTRY glEndVertexShaderEXT (void);
+GLAPI void APIENTRY glBindVertexShaderEXT (GLuint id);
+GLAPI GLuint APIENTRY glGenVertexShadersEXT (GLuint range);
+GLAPI void APIENTRY glDeleteVertexShaderEXT (GLuint id);
+GLAPI void APIENTRY glShaderOp1EXT (GLenum op, GLuint res, GLuint arg1);
+GLAPI void APIENTRY glShaderOp2EXT (GLenum op, GLuint res, GLuint arg1, GLuint arg2);
+GLAPI void APIENTRY glShaderOp3EXT (GLenum op, GLuint res, GLuint arg1, GLuint arg2, GLuint arg3);
+GLAPI void APIENTRY glSwizzleEXT (GLuint res, GLuint in, GLenum outX, GLenum outY, GLenum outZ, GLenum outW);
+GLAPI void APIENTRY glWriteMaskEXT (GLuint res, GLuint in, GLenum outX, GLenum outY, GLenum outZ, GLenum outW);
+GLAPI void APIENTRY glInsertComponentEXT (GLuint res, GLuint src, GLuint num);
+GLAPI void APIENTRY glExtractComponentEXT (GLuint res, GLuint src, GLuint num);
+GLAPI GLuint APIENTRY glGenSymbolsEXT (GLenum datatype, GLenum storagetype, GLenum range, GLuint components);
+GLAPI void APIENTRY glSetInvariantEXT (GLuint id, GLenum type, const void *addr);
+GLAPI void APIENTRY glSetLocalConstantEXT (GLuint id, GLenum type, const void *addr);
+GLAPI void APIENTRY glVariantbvEXT (GLuint id, const GLbyte *addr);
+GLAPI void APIENTRY glVariantsvEXT (GLuint id, const GLshort *addr);
+GLAPI void APIENTRY glVariantivEXT (GLuint id, const GLint *addr);
+GLAPI void APIENTRY glVariantfvEXT (GLuint id, const GLfloat *addr);
+GLAPI void APIENTRY glVariantdvEXT (GLuint id, const GLdouble *addr);
+GLAPI void APIENTRY glVariantubvEXT (GLuint id, const GLubyte *addr);
+GLAPI void APIENTRY glVariantusvEXT (GLuint id, const GLushort *addr);
+GLAPI void APIENTRY glVariantuivEXT (GLuint id, const GLuint *addr);
+GLAPI void APIENTRY glVariantPointerEXT (GLuint id, GLenum type, GLuint stride, const void *addr);
+GLAPI void APIENTRY glEnableVariantClientStateEXT (GLuint id);
+GLAPI void APIENTRY glDisableVariantClientStateEXT (GLuint id);
+GLAPI GLuint APIENTRY glBindLightParameterEXT (GLenum light, GLenum value);
+GLAPI GLuint APIENTRY glBindMaterialParameterEXT (GLenum face, GLenum value);
+GLAPI GLuint APIENTRY glBindTexGenParameterEXT (GLenum unit, GLenum coord, GLenum value);
+GLAPI GLuint APIENTRY glBindTextureUnitParameterEXT (GLenum unit, GLenum value);
+GLAPI GLuint APIENTRY glBindParameterEXT (GLenum value);
+GLAPI GLboolean APIENTRY glIsVariantEnabledEXT (GLuint id, GLenum cap);
+GLAPI void APIENTRY glGetVariantBooleanvEXT (GLuint id, GLenum value, GLboolean *data);
+GLAPI void APIENTRY glGetVariantIntegervEXT (GLuint id, GLenum value, GLint *data);
+GLAPI void APIENTRY glGetVariantFloatvEXT (GLuint id, GLenum value, GLfloat *data);
+GLAPI void APIENTRY glGetVariantPointervEXT (GLuint id, GLenum value, void **data);
+GLAPI void APIENTRY glGetInvariantBooleanvEXT (GLuint id, GLenum value, GLboolean *data);
+GLAPI void APIENTRY glGetInvariantIntegervEXT (GLuint id, GLenum value, GLint *data);
+GLAPI void APIENTRY glGetInvariantFloatvEXT (GLuint id, GLenum value, GLfloat *data);
+GLAPI void APIENTRY glGetLocalConstantBooleanvEXT (GLuint id, GLenum value, GLboolean *data);
+GLAPI void APIENTRY glGetLocalConstantIntegervEXT (GLuint id, GLenum value, GLint *data);
+GLAPI void APIENTRY glGetLocalConstantFloatvEXT (GLuint id, GLenum value, GLfloat *data);
+#endif
+#endif /* GL_EXT_vertex_shader */
+
+#ifndef GL_EXT_vertex_weighting
+#define GL_EXT_vertex_weighting 1
+#define GL_MODELVIEW0_STACK_DEPTH_EXT 0x0BA3
+#define GL_MODELVIEW1_STACK_DEPTH_EXT 0x8502
+#define GL_MODELVIEW0_MATRIX_EXT 0x0BA6
+#define GL_MODELVIEW1_MATRIX_EXT 0x8506
+#define GL_VERTEX_WEIGHTING_EXT 0x8509
+#define GL_MODELVIEW0_EXT 0x1700
+#define GL_MODELVIEW1_EXT 0x850A
+#define GL_CURRENT_VERTEX_WEIGHT_EXT 0x850B
+#define GL_VERTEX_WEIGHT_ARRAY_EXT 0x850C
+#define GL_VERTEX_WEIGHT_ARRAY_SIZE_EXT 0x850D
+#define GL_VERTEX_WEIGHT_ARRAY_TYPE_EXT 0x850E
+#define GL_VERTEX_WEIGHT_ARRAY_STRIDE_EXT 0x850F
+#define GL_VERTEX_WEIGHT_ARRAY_POINTER_EXT 0x8510
+typedef void (APIENTRYP PFNGLVERTEXWEIGHTFEXTPROC) (GLfloat weight);
+typedef void (APIENTRYP PFNGLVERTEXWEIGHTFVEXTPROC) (const GLfloat *weight);
+typedef void (APIENTRYP PFNGLVERTEXWEIGHTPOINTEREXTPROC) (GLint size, GLenum type, GLsizei stride, const void *pointer);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glVertexWeightfEXT (GLfloat weight);
+GLAPI void APIENTRY glVertexWeightfvEXT (const GLfloat *weight);
+GLAPI void APIENTRY glVertexWeightPointerEXT (GLint size, GLenum type, GLsizei stride, const void *pointer);
+#endif
+#endif /* GL_EXT_vertex_weighting */
+
+#ifndef GL_EXT_win32_keyed_mutex
+#define GL_EXT_win32_keyed_mutex 1
+typedef GLboolean (APIENTRYP PFNGLACQUIREKEYEDMUTEXWIN32EXTPROC) (GLuint memory, GLuint64 key, GLuint timeout);
+typedef GLboolean (APIENTRYP PFNGLRELEASEKEYEDMUTEXWIN32EXTPROC) (GLuint memory, GLuint64 key);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI GLboolean APIENTRY glAcquireKeyedMutexWin32EXT (GLuint memory, GLuint64 key, GLuint timeout);
+GLAPI GLboolean APIENTRY glReleaseKeyedMutexWin32EXT (GLuint memory, GLuint64 key);
+#endif
+#endif /* GL_EXT_win32_keyed_mutex */
+
+#ifndef GL_EXT_window_rectangles
+#define GL_EXT_window_rectangles 1
+#define GL_INCLUSIVE_EXT 0x8F10
+#define GL_EXCLUSIVE_EXT 0x8F11
+#define GL_WINDOW_RECTANGLE_EXT 0x8F12
+#define GL_WINDOW_RECTANGLE_MODE_EXT 0x8F13
+#define GL_MAX_WINDOW_RECTANGLES_EXT 0x8F14
+#define GL_NUM_WINDOW_RECTANGLES_EXT 0x8F15
+typedef void (APIENTRYP PFNGLWINDOWRECTANGLESEXTPROC) (GLenum mode, GLsizei count, const GLint *box);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glWindowRectanglesEXT (GLenum mode, GLsizei count, const GLint *box);
+#endif
+#endif /* GL_EXT_window_rectangles */
+
+#ifndef GL_EXT_x11_sync_object
+#define GL_EXT_x11_sync_object 1
+#define GL_SYNC_X11_FENCE_EXT 0x90E1
+typedef GLsync (APIENTRYP PFNGLIMPORTSYNCEXTPROC) (GLenum external_sync_type, GLintptr external_sync, GLbitfield flags);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI GLsync APIENTRY glImportSyncEXT (GLenum external_sync_type, GLintptr external_sync, GLbitfield flags);
+#endif
+#endif /* GL_EXT_x11_sync_object */
+
+#ifndef GL_GREMEDY_frame_terminator
+#define GL_GREMEDY_frame_terminator 1
+typedef void (APIENTRYP PFNGLFRAMETERMINATORGREMEDYPROC) (void);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glFrameTerminatorGREMEDY (void);
+#endif
+#endif /* GL_GREMEDY_frame_terminator */
+
+#ifndef GL_GREMEDY_string_marker
+#define GL_GREMEDY_string_marker 1
+typedef void (APIENTRYP PFNGLSTRINGMARKERGREMEDYPROC) (GLsizei len, const void *string);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glStringMarkerGREMEDY (GLsizei len, const void *string);
+#endif
+#endif /* GL_GREMEDY_string_marker */
+
+#ifndef GL_HP_convolution_border_modes
+#define GL_HP_convolution_border_modes 1
+#define GL_IGNORE_BORDER_HP 0x8150
+#define GL_CONSTANT_BORDER_HP 0x8151
+#define GL_REPLICATE_BORDER_HP 0x8153
+#define GL_CONVOLUTION_BORDER_COLOR_HP 0x8154
+#endif /* GL_HP_convolution_border_modes */
+
+#ifndef GL_HP_image_transform
+#define GL_HP_image_transform 1
+#define GL_IMAGE_SCALE_X_HP 0x8155
+#define GL_IMAGE_SCALE_Y_HP 0x8156
+#define GL_IMAGE_TRANSLATE_X_HP 0x8157
+#define GL_IMAGE_TRANSLATE_Y_HP 0x8158
+#define GL_IMAGE_ROTATE_ANGLE_HP 0x8159
+#define GL_IMAGE_ROTATE_ORIGIN_X_HP 0x815A
+#define GL_IMAGE_ROTATE_ORIGIN_Y_HP 0x815B
+#define GL_IMAGE_MAG_FILTER_HP 0x815C
+#define GL_IMAGE_MIN_FILTER_HP 0x815D
+#define GL_IMAGE_CUBIC_WEIGHT_HP 0x815E
+#define GL_CUBIC_HP 0x815F
+#define GL_AVERAGE_HP 0x8160
+#define GL_IMAGE_TRANSFORM_2D_HP 0x8161
+#define GL_POST_IMAGE_TRANSFORM_COLOR_TABLE_HP 0x8162
+#define GL_PROXY_POST_IMAGE_TRANSFORM_COLOR_TABLE_HP 0x8163
+typedef void (APIENTRYP PFNGLIMAGETRANSFORMPARAMETERIHPPROC) (GLenum target, GLenum pname, GLint param);
+typedef void (APIENTRYP PFNGLIMAGETRANSFORMPARAMETERFHPPROC) (GLenum target, GLenum pname, GLfloat param);
+typedef void (APIENTRYP PFNGLIMAGETRANSFORMPARAMETERIVHPPROC) (GLenum target, GLenum pname, const GLint *params);
+typedef void (APIENTRYP PFNGLIMAGETRANSFORMPARAMETERFVHPPROC) (GLenum target, GLenum pname, const GLfloat *params);
+typedef void (APIENTRYP PFNGLGETIMAGETRANSFORMPARAMETERIVHPPROC) (GLenum target, GLenum pname, GLint *params);
+typedef void (APIENTRYP PFNGLGETIMAGETRANSFORMPARAMETERFVHPPROC) (GLenum target, GLenum pname, GLfloat *params);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glImageTransformParameteriHP (GLenum target, GLenum pname, GLint param);
+GLAPI void APIENTRY glImageTransformParameterfHP (GLenum target, GLenum pname, GLfloat param);
+GLAPI void APIENTRY glImageTransformParameterivHP (GLenum target, GLenum pname, const GLint *params);
+GLAPI void APIENTRY glImageTransformParameterfvHP (GLenum target, GLenum pname, const GLfloat *params);
+GLAPI void APIENTRY glGetImageTransformParameterivHP (GLenum target, GLenum pname, GLint *params);
+GLAPI void APIENTRY glGetImageTransformParameterfvHP (GLenum target, GLenum pname, GLfloat *params);
+#endif
+#endif /* GL_HP_image_transform */
+
+#ifndef GL_HP_occlusion_test
+#define GL_HP_occlusion_test 1
+#define GL_OCCLUSION_TEST_HP 0x8165
+#define GL_OCCLUSION_TEST_RESULT_HP 0x8166
+#endif /* GL_HP_occlusion_test */
+
+#ifndef GL_HP_texture_lighting
+#define GL_HP_texture_lighting 1
+#define GL_TEXTURE_LIGHTING_MODE_HP 0x8167
+#define GL_TEXTURE_POST_SPECULAR_HP 0x8168
+#define GL_TEXTURE_PRE_SPECULAR_HP 0x8169
+#endif /* GL_HP_texture_lighting */
+
+#ifndef GL_IBM_cull_vertex
+#define GL_IBM_cull_vertex 1
+#define GL_CULL_VERTEX_IBM 103050
+#endif /* GL_IBM_cull_vertex */
+
+#ifndef GL_IBM_multimode_draw_arrays
+#define GL_IBM_multimode_draw_arrays 1
+typedef void (APIENTRYP PFNGLMULTIMODEDRAWARRAYSIBMPROC) (const GLenum *mode, const GLint *first, const GLsizei *count, GLsizei primcount, GLint modestride);
+typedef void (APIENTRYP PFNGLMULTIMODEDRAWELEMENTSIBMPROC) (const GLenum *mode, const GLsizei *count, GLenum type, const void *const*indices, GLsizei primcount, GLint modestride);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glMultiModeDrawArraysIBM (const GLenum *mode, const GLint *first, const GLsizei *count, GLsizei primcount, GLint modestride);
+GLAPI void APIENTRY glMultiModeDrawElementsIBM (const GLenum *mode, const GLsizei *count, GLenum type, const void *const*indices, GLsizei primcount, GLint modestride);
+#endif
+#endif /* GL_IBM_multimode_draw_arrays */
+
+#ifndef GL_IBM_rasterpos_clip
+#define GL_IBM_rasterpos_clip 1
+#define GL_RASTER_POSITION_UNCLIPPED_IBM 0x19262
+#endif /* GL_IBM_rasterpos_clip */
+
+#ifndef GL_IBM_static_data
+#define GL_IBM_static_data 1
+#define GL_ALL_STATIC_DATA_IBM 103060
+#define GL_STATIC_VERTEX_ARRAY_IBM 103061
+typedef void (APIENTRYP PFNGLFLUSHSTATICDATAIBMPROC) (GLenum target);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glFlushStaticDataIBM (GLenum target);
+#endif
+#endif /* GL_IBM_static_data */
+
+#ifndef GL_IBM_texture_mirrored_repeat
+#define GL_IBM_texture_mirrored_repeat 1
+#define GL_MIRRORED_REPEAT_IBM 0x8370
+#endif /* GL_IBM_texture_mirrored_repeat */
+
+#ifndef GL_IBM_vertex_array_lists
+#define GL_IBM_vertex_array_lists 1
+#define GL_VERTEX_ARRAY_LIST_IBM 103070
+#define GL_NORMAL_ARRAY_LIST_IBM 103071
+#define GL_COLOR_ARRAY_LIST_IBM 103072
+#define GL_INDEX_ARRAY_LIST_IBM 103073
+#define GL_TEXTURE_COORD_ARRAY_LIST_IBM 103074
+#define GL_EDGE_FLAG_ARRAY_LIST_IBM 103075
+#define GL_FOG_COORDINATE_ARRAY_LIST_IBM 103076
+#define GL_SECONDARY_COLOR_ARRAY_LIST_IBM 103077
+#define GL_VERTEX_ARRAY_LIST_STRIDE_IBM 103080
+#define GL_NORMAL_ARRAY_LIST_STRIDE_IBM 103081
+#define GL_COLOR_ARRAY_LIST_STRIDE_IBM 103082
+#define GL_INDEX_ARRAY_LIST_STRIDE_IBM 103083
+#define GL_TEXTURE_COORD_ARRAY_LIST_STRIDE_IBM 103084
+#define GL_EDGE_FLAG_ARRAY_LIST_STRIDE_IBM 103085
+#define GL_FOG_COORDINATE_ARRAY_LIST_STRIDE_IBM 103086
+#define GL_SECONDARY_COLOR_ARRAY_LIST_STRIDE_IBM 103087
+typedef void (APIENTRYP PFNGLCOLORPOINTERLISTIBMPROC) (GLint size, GLenum type, GLint stride, const void **pointer, GLint ptrstride);
+typedef void (APIENTRYP PFNGLSECONDARYCOLORPOINTERLISTIBMPROC) (GLint size, GLenum type, GLint stride, const void **pointer, GLint ptrstride);
+typedef void (APIENTRYP PFNGLEDGEFLAGPOINTERLISTIBMPROC) (GLint stride, const GLboolean **pointer, GLint ptrstride);
+typedef void (APIENTRYP PFNGLFOGCOORDPOINTERLISTIBMPROC) (GLenum type, GLint stride, const void **pointer, GLint ptrstride);
+typedef void (APIENTRYP PFNGLINDEXPOINTERLISTIBMPROC) (GLenum type, GLint stride, const void **pointer, GLint ptrstride);
+typedef void (APIENTRYP PFNGLNORMALPOINTERLISTIBMPROC) (GLenum type, GLint stride, const void **pointer, GLint ptrstride);
+typedef void (APIENTRYP PFNGLTEXCOORDPOINTERLISTIBMPROC) (GLint size, GLenum type, GLint stride, const void **pointer, GLint ptrstride);
+typedef void (APIENTRYP PFNGLVERTEXPOINTERLISTIBMPROC) (GLint size, GLenum type, GLint stride, const void **pointer, GLint ptrstride);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glColorPointerListIBM (GLint size, GLenum type, GLint stride, const void **pointer, GLint ptrstride);
+GLAPI void APIENTRY glSecondaryColorPointerListIBM (GLint size, GLenum type, GLint stride, const void **pointer, GLint ptrstride);
+GLAPI void APIENTRY glEdgeFlagPointerListIBM (GLint stride, const GLboolean **pointer, GLint ptrstride);
+GLAPI void APIENTRY glFogCoordPointerListIBM (GLenum type, GLint stride, const void **pointer, GLint ptrstride);
+GLAPI void APIENTRY glIndexPointerListIBM (GLenum type, GLint stride, const void **pointer, GLint ptrstride);
+GLAPI void APIENTRY glNormalPointerListIBM (GLenum type, GLint stride, const void **pointer, GLint ptrstride);
+GLAPI void APIENTRY glTexCoordPointerListIBM (GLint size, GLenum type, GLint stride, const void **pointer, GLint ptrstride);
+GLAPI void APIENTRY glVertexPointerListIBM (GLint size, GLenum type, GLint stride, const void **pointer, GLint ptrstride);
+#endif
+#endif /* GL_IBM_vertex_array_lists */
+
+#ifndef GL_INGR_blend_func_separate
+#define GL_INGR_blend_func_separate 1
+typedef void (APIENTRYP PFNGLBLENDFUNCSEPARATEINGRPROC) (GLenum sfactorRGB, GLenum dfactorRGB, GLenum sfactorAlpha, GLenum dfactorAlpha);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glBlendFuncSeparateINGR (GLenum sfactorRGB, GLenum dfactorRGB, GLenum sfactorAlpha, GLenum dfactorAlpha);
+#endif
+#endif /* GL_INGR_blend_func_separate */
+
+#ifndef GL_INGR_color_clamp
+#define GL_INGR_color_clamp 1
+#define GL_RED_MIN_CLAMP_INGR 0x8560
+#define GL_GREEN_MIN_CLAMP_INGR 0x8561
+#define GL_BLUE_MIN_CLAMP_INGR 0x8562
+#define GL_ALPHA_MIN_CLAMP_INGR 0x8563
+#define GL_RED_MAX_CLAMP_INGR 0x8564
+#define GL_GREEN_MAX_CLAMP_INGR 0x8565
+#define GL_BLUE_MAX_CLAMP_INGR 0x8566
+#define GL_ALPHA_MAX_CLAMP_INGR 0x8567
+#endif /* GL_INGR_color_clamp */
+
+#ifndef GL_INGR_interlace_read
+#define GL_INGR_interlace_read 1
+#define GL_INTERLACE_READ_INGR 0x8568
+#endif /* GL_INGR_interlace_read */
+
+#ifndef GL_INTEL_blackhole_render
+#define GL_INTEL_blackhole_render 1
+#define GL_BLACKHOLE_RENDER_INTEL 0x83FC
+#endif /* GL_INTEL_blackhole_render */
+
+#ifndef GL_INTEL_conservative_rasterization
+#define GL_INTEL_conservative_rasterization 1
+#define GL_CONSERVATIVE_RASTERIZATION_INTEL 0x83FE
+#endif /* GL_INTEL_conservative_rasterization */
+
+#ifndef GL_INTEL_fragment_shader_ordering
+#define GL_INTEL_fragment_shader_ordering 1
+#endif /* GL_INTEL_fragment_shader_ordering */
+
+#ifndef GL_INTEL_framebuffer_CMAA
+#define GL_INTEL_framebuffer_CMAA 1
+typedef void (APIENTRYP PFNGLAPPLYFRAMEBUFFERATTACHMENTCMAAINTELPROC) (void);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glApplyFramebufferAttachmentCMAAINTEL (void);
+#endif
+#endif /* GL_INTEL_framebuffer_CMAA */
+
+#ifndef GL_INTEL_map_texture
+#define GL_INTEL_map_texture 1
+#define GL_TEXTURE_MEMORY_LAYOUT_INTEL 0x83FF
+#define GL_LAYOUT_DEFAULT_INTEL 0
+#define GL_LAYOUT_LINEAR_INTEL 1
+#define GL_LAYOUT_LINEAR_CPU_CACHED_INTEL 2
+typedef void (APIENTRYP PFNGLSYNCTEXTUREINTELPROC) (GLuint texture);
+typedef void (APIENTRYP PFNGLUNMAPTEXTURE2DINTELPROC) (GLuint texture, GLint level);
+typedef void *(APIENTRYP PFNGLMAPTEXTURE2DINTELPROC) (GLuint texture, GLint level, GLbitfield access, GLint *stride, GLenum *layout);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glSyncTextureINTEL (GLuint texture);
+GLAPI void APIENTRY glUnmapTexture2DINTEL (GLuint texture, GLint level);
+GLAPI void *APIENTRY glMapTexture2DINTEL (GLuint texture, GLint level, GLbitfield access, GLint *stride, GLenum *layout);
+#endif
+#endif /* GL_INTEL_map_texture */
+
+#ifndef GL_INTEL_parallel_arrays
+#define GL_INTEL_parallel_arrays 1
+#define GL_PARALLEL_ARRAYS_INTEL 0x83F4
+#define GL_VERTEX_ARRAY_PARALLEL_POINTERS_INTEL 0x83F5
+#define GL_NORMAL_ARRAY_PARALLEL_POINTERS_INTEL 0x83F6
+#define GL_COLOR_ARRAY_PARALLEL_POINTERS_INTEL 0x83F7
+#define GL_TEXTURE_COORD_ARRAY_PARALLEL_POINTERS_INTEL 0x83F8
+typedef void (APIENTRYP PFNGLVERTEXPOINTERVINTELPROC) (GLint size, GLenum type, const void **pointer);
+typedef void (APIENTRYP PFNGLNORMALPOINTERVINTELPROC) (GLenum type, const void **pointer);
+typedef void (APIENTRYP PFNGLCOLORPOINTERVINTELPROC) (GLint size, GLenum type, const void **pointer);
+typedef void (APIENTRYP PFNGLTEXCOORDPOINTERVINTELPROC) (GLint size, GLenum type, const void **pointer);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glVertexPointervINTEL (GLint size, GLenum type, const void **pointer);
+GLAPI void APIENTRY glNormalPointervINTEL (GLenum type, const void **pointer);
+GLAPI void APIENTRY glColorPointervINTEL (GLint size, GLenum type, const void **pointer);
+GLAPI void APIENTRY glTexCoordPointervINTEL (GLint size, GLenum type, const void **pointer);
+#endif
+#endif /* GL_INTEL_parallel_arrays */
+
+#ifndef GL_INTEL_performance_query
+#define GL_INTEL_performance_query 1
+#define GL_PERFQUERY_SINGLE_CONTEXT_INTEL 0x00000000
+#define GL_PERFQUERY_GLOBAL_CONTEXT_INTEL 0x00000001
+#define GL_PERFQUERY_WAIT_INTEL 0x83FB
+#define GL_PERFQUERY_FLUSH_INTEL 0x83FA
+#define GL_PERFQUERY_DONOT_FLUSH_INTEL 0x83F9
+#define GL_PERFQUERY_COUNTER_EVENT_INTEL 0x94F0
+#define GL_PERFQUERY_COUNTER_DURATION_NORM_INTEL 0x94F1
+#define GL_PERFQUERY_COUNTER_DURATION_RAW_INTEL 0x94F2
+#define GL_PERFQUERY_COUNTER_THROUGHPUT_INTEL 0x94F3
+#define GL_PERFQUERY_COUNTER_RAW_INTEL 0x94F4
+#define GL_PERFQUERY_COUNTER_TIMESTAMP_INTEL 0x94F5
+#define GL_PERFQUERY_COUNTER_DATA_UINT32_INTEL 0x94F8
+#define GL_PERFQUERY_COUNTER_DATA_UINT64_INTEL 0x94F9
+#define GL_PERFQUERY_COUNTER_DATA_FLOAT_INTEL 0x94FA
+#define GL_PERFQUERY_COUNTER_DATA_DOUBLE_INTEL 0x94FB
+#define GL_PERFQUERY_COUNTER_DATA_BOOL32_INTEL 0x94FC
+#define GL_PERFQUERY_QUERY_NAME_LENGTH_MAX_INTEL 0x94FD
+#define GL_PERFQUERY_COUNTER_NAME_LENGTH_MAX_INTEL 0x94FE
+#define GL_PERFQUERY_COUNTER_DESC_LENGTH_MAX_INTEL 0x94FF
+#define GL_PERFQUERY_GPA_EXTENDED_COUNTERS_INTEL 0x9500
+typedef void (APIENTRYP PFNGLBEGINPERFQUERYINTELPROC) (GLuint queryHandle);
+typedef void (APIENTRYP PFNGLCREATEPERFQUERYINTELPROC) (GLuint queryId, GLuint *queryHandle);
+typedef void (APIENTRYP PFNGLDELETEPERFQUERYINTELPROC) (GLuint queryHandle);
+typedef void (APIENTRYP PFNGLENDPERFQUERYINTELPROC) (GLuint queryHandle);
+typedef void (APIENTRYP PFNGLGETFIRSTPERFQUERYIDINTELPROC) (GLuint *queryId);
+typedef void (APIENTRYP PFNGLGETNEXTPERFQUERYIDINTELPROC) (GLuint queryId, GLuint *nextQueryId);
+typedef void (APIENTRYP PFNGLGETPERFCOUNTERINFOINTELPROC) (GLuint queryId, GLuint counterId, GLuint counterNameLength, GLchar *counterName, GLuint counterDescLength, GLchar *counterDesc, GLuint *counterOffset, GLuint *counterDataSize, GLuint *counterTypeEnum, GLuint *counterDataTypeEnum, GLuint64 *rawCounterMaxValue);
+typedef void (APIENTRYP PFNGLGETPERFQUERYDATAINTELPROC) (GLuint queryHandle, GLuint flags, GLsizei dataSize, void *data, GLuint *bytesWritten);
+typedef void (APIENTRYP PFNGLGETPERFQUERYIDBYNAMEINTELPROC) (GLchar *queryName, GLuint *queryId);
+typedef void (APIENTRYP PFNGLGETPERFQUERYINFOINTELPROC) (GLuint queryId, GLuint queryNameLength, GLchar *queryName, GLuint *dataSize, GLuint *noCounters, GLuint *noInstances, GLuint *capsMask);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glBeginPerfQueryINTEL (GLuint queryHandle);
+GLAPI void APIENTRY glCreatePerfQueryINTEL (GLuint queryId, GLuint *queryHandle);
+GLAPI void APIENTRY glDeletePerfQueryINTEL (GLuint queryHandle);
+GLAPI void APIENTRY glEndPerfQueryINTEL (GLuint queryHandle);
+GLAPI void APIENTRY glGetFirstPerfQueryIdINTEL (GLuint *queryId);
+GLAPI void APIENTRY glGetNextPerfQueryIdINTEL (GLuint queryId, GLuint *nextQueryId);
+GLAPI void APIENTRY glGetPerfCounterInfoINTEL (GLuint queryId, GLuint counterId, GLuint counterNameLength, GLchar *counterName, GLuint counterDescLength, GLchar *counterDesc, GLuint *counterOffset, GLuint *counterDataSize, GLuint *counterTypeEnum, GLuint *counterDataTypeEnum, GLuint64 *rawCounterMaxValue);
+GLAPI void APIENTRY glGetPerfQueryDataINTEL (GLuint queryHandle, GLuint flags, GLsizei dataSize, void *data, GLuint *bytesWritten);
+GLAPI void APIENTRY glGetPerfQueryIdByNameINTEL (GLchar *queryName, GLuint *queryId);
+GLAPI void APIENTRY glGetPerfQueryInfoINTEL (GLuint queryId, GLuint queryNameLength, GLchar *queryName, GLuint *dataSize, GLuint *noCounters, GLuint *noInstances, GLuint *capsMask);
+#endif
+#endif /* GL_INTEL_performance_query */
+
+#ifndef GL_MESAX_texture_stack
+#define GL_MESAX_texture_stack 1
+#define GL_TEXTURE_1D_STACK_MESAX 0x8759
+#define GL_TEXTURE_2D_STACK_MESAX 0x875A
+#define GL_PROXY_TEXTURE_1D_STACK_MESAX 0x875B
+#define GL_PROXY_TEXTURE_2D_STACK_MESAX 0x875C
+#define GL_TEXTURE_1D_STACK_BINDING_MESAX 0x875D
+#define GL_TEXTURE_2D_STACK_BINDING_MESAX 0x875E
+#endif /* GL_MESAX_texture_stack */
+
+#ifndef GL_MESA_framebuffer_flip_y
+#define GL_MESA_framebuffer_flip_y 1
+#define GL_FRAMEBUFFER_FLIP_Y_MESA 0x8BBB
+typedef void (APIENTRYP PFNGLFRAMEBUFFERPARAMETERIMESAPROC) (GLenum target, GLenum pname, GLint param);
+typedef void (APIENTRYP PFNGLGETFRAMEBUFFERPARAMETERIVMESAPROC) (GLenum target, GLenum pname, GLint *params);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glFramebufferParameteriMESA (GLenum target, GLenum pname, GLint param);
+GLAPI void APIENTRY glGetFramebufferParameterivMESA (GLenum target, GLenum pname, GLint *params);
+#endif
+#endif /* GL_MESA_framebuffer_flip_y */
+
+#ifndef GL_MESA_pack_invert
+#define GL_MESA_pack_invert 1
+#define GL_PACK_INVERT_MESA 0x8758
+#endif /* GL_MESA_pack_invert */
+
+#ifndef GL_MESA_program_binary_formats
+#define GL_MESA_program_binary_formats 1
+#define GL_PROGRAM_BINARY_FORMAT_MESA 0x875F
+#endif /* GL_MESA_program_binary_formats */
+
+#ifndef GL_MESA_resize_buffers
+#define GL_MESA_resize_buffers 1
+typedef void (APIENTRYP PFNGLRESIZEBUFFERSMESAPROC) (void);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glResizeBuffersMESA (void);
+#endif
+#endif /* GL_MESA_resize_buffers */
+
+#ifndef GL_MESA_shader_integer_functions
+#define GL_MESA_shader_integer_functions 1
+#endif /* GL_MESA_shader_integer_functions */
+
+#ifndef GL_MESA_tile_raster_order
+#define GL_MESA_tile_raster_order 1
+#define GL_TILE_RASTER_ORDER_FIXED_MESA 0x8BB8
+#define GL_TILE_RASTER_ORDER_INCREASING_X_MESA 0x8BB9
+#define GL_TILE_RASTER_ORDER_INCREASING_Y_MESA 0x8BBA
+#endif /* GL_MESA_tile_raster_order */
+
+#ifndef GL_MESA_window_pos
+#define GL_MESA_window_pos 1
+typedef void (APIENTRYP PFNGLWINDOWPOS2DMESAPROC) (GLdouble x, GLdouble y);
+typedef void (APIENTRYP PFNGLWINDOWPOS2DVMESAPROC) (const GLdouble *v);
+typedef void (APIENTRYP PFNGLWINDOWPOS2FMESAPROC) (GLfloat x, GLfloat y);
+typedef void (APIENTRYP PFNGLWINDOWPOS2FVMESAPROC) (const GLfloat *v);
+typedef void (APIENTRYP PFNGLWINDOWPOS2IMESAPROC) (GLint x, GLint y);
+typedef void (APIENTRYP PFNGLWINDOWPOS2IVMESAPROC) (const GLint *v);
+typedef void (APIENTRYP PFNGLWINDOWPOS2SMESAPROC) (GLshort x, GLshort y);
+typedef void (APIENTRYP PFNGLWINDOWPOS2SVMESAPROC) (const GLshort *v);
+typedef void (APIENTRYP PFNGLWINDOWPOS3DMESAPROC) (GLdouble x, GLdouble y, GLdouble z);
+typedef void (APIENTRYP PFNGLWINDOWPOS3DVMESAPROC) (const GLdouble *v);
+typedef void (APIENTRYP PFNGLWINDOWPOS3FMESAPROC) (GLfloat x, GLfloat y, GLfloat z);
+typedef void (APIENTRYP PFNGLWINDOWPOS3FVMESAPROC) (const GLfloat *v);
+typedef void (APIENTRYP PFNGLWINDOWPOS3IMESAPROC) (GLint x, GLint y, GLint z);
+typedef void (APIENTRYP PFNGLWINDOWPOS3IVMESAPROC) (const GLint *v);
+typedef void (APIENTRYP PFNGLWINDOWPOS3SMESAPROC) (GLshort x, GLshort y, GLshort z);
+typedef void (APIENTRYP PFNGLWINDOWPOS3SVMESAPROC) (const GLshort *v);
+typedef void (APIENTRYP PFNGLWINDOWPOS4DMESAPROC) (GLdouble x, GLdouble y, GLdouble z, GLdouble w);
+typedef void (APIENTRYP PFNGLWINDOWPOS4DVMESAPROC) (const GLdouble *v);
+typedef void (APIENTRYP PFNGLWINDOWPOS4FMESAPROC) (GLfloat x, GLfloat y, GLfloat z, GLfloat w);
+typedef void (APIENTRYP PFNGLWINDOWPOS4FVMESAPROC) (const GLfloat *v);
+typedef void (APIENTRYP PFNGLWINDOWPOS4IMESAPROC) (GLint x, GLint y, GLint z, GLint w);
+typedef void (APIENTRYP PFNGLWINDOWPOS4IVMESAPROC) (const GLint *v);
+typedef void (APIENTRYP PFNGLWINDOWPOS4SMESAPROC) (GLshort x, GLshort y, GLshort z, GLshort w);
+typedef void (APIENTRYP PFNGLWINDOWPOS4SVMESAPROC) (const GLshort *v);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glWindowPos2dMESA (GLdouble x, GLdouble y);
+GLAPI void APIENTRY glWindowPos2dvMESA (const GLdouble *v);
+GLAPI void APIENTRY glWindowPos2fMESA (GLfloat x, GLfloat y);
+GLAPI void APIENTRY glWindowPos2fvMESA (const GLfloat *v);
+GLAPI void APIENTRY glWindowPos2iMESA (GLint x, GLint y);
+GLAPI void APIENTRY glWindowPos2ivMESA (const GLint *v);
+GLAPI void APIENTRY glWindowPos2sMESA (GLshort x, GLshort y);
+GLAPI void APIENTRY glWindowPos2svMESA (const GLshort *v);
+GLAPI void APIENTRY glWindowPos3dMESA (GLdouble x, GLdouble y, GLdouble z);
+GLAPI void APIENTRY glWindowPos3dvMESA (const GLdouble *v);
+GLAPI void APIENTRY glWindowPos3fMESA (GLfloat x, GLfloat y, GLfloat z);
+GLAPI void APIENTRY glWindowPos3fvMESA (const GLfloat *v);
+GLAPI void APIENTRY glWindowPos3iMESA (GLint x, GLint y, GLint z);
+GLAPI void APIENTRY glWindowPos3ivMESA (const GLint *v);
+GLAPI void APIENTRY glWindowPos3sMESA (GLshort x, GLshort y, GLshort z);
+GLAPI void APIENTRY glWindowPos3svMESA (const GLshort *v);
+GLAPI void APIENTRY glWindowPos4dMESA (GLdouble x, GLdouble y, GLdouble z, GLdouble w);
+GLAPI void APIENTRY glWindowPos4dvMESA (const GLdouble *v);
+GLAPI void APIENTRY glWindowPos4fMESA (GLfloat x, GLfloat y, GLfloat z, GLfloat w);
+GLAPI void APIENTRY glWindowPos4fvMESA (const GLfloat *v);
+GLAPI void APIENTRY glWindowPos4iMESA (GLint x, GLint y, GLint z, GLint w);
+GLAPI void APIENTRY glWindowPos4ivMESA (const GLint *v);
+GLAPI void APIENTRY glWindowPos4sMESA (GLshort x, GLshort y, GLshort z, GLshort w);
+GLAPI void APIENTRY glWindowPos4svMESA (const GLshort *v);
+#endif
+#endif /* GL_MESA_window_pos */
+
+#ifndef GL_MESA_ycbcr_texture
+#define GL_MESA_ycbcr_texture 1
+#define GL_UNSIGNED_SHORT_8_8_MESA 0x85BA
+#define GL_UNSIGNED_SHORT_8_8_REV_MESA 0x85BB
+#define GL_YCBCR_MESA 0x8757
+#endif /* GL_MESA_ycbcr_texture */
+
+#ifndef GL_NVX_blend_equation_advanced_multi_draw_buffers
+#define GL_NVX_blend_equation_advanced_multi_draw_buffers 1
+#endif /* GL_NVX_blend_equation_advanced_multi_draw_buffers */
+
+#ifndef GL_NVX_conditional_render
+#define GL_NVX_conditional_render 1
+typedef void (APIENTRYP PFNGLBEGINCONDITIONALRENDERNVXPROC) (GLuint id);
+typedef void (APIENTRYP PFNGLENDCONDITIONALRENDERNVXPROC) (void);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glBeginConditionalRenderNVX (GLuint id);
+GLAPI void APIENTRY glEndConditionalRenderNVX (void);
+#endif
+#endif /* GL_NVX_conditional_render */
+
+#ifndef GL_NVX_gpu_memory_info
+#define GL_NVX_gpu_memory_info 1
+#define GL_GPU_MEMORY_INFO_DEDICATED_VIDMEM_NVX 0x9047
+#define GL_GPU_MEMORY_INFO_TOTAL_AVAILABLE_MEMORY_NVX 0x9048
+#define GL_GPU_MEMORY_INFO_CURRENT_AVAILABLE_VIDMEM_NVX 0x9049
+#define GL_GPU_MEMORY_INFO_EVICTION_COUNT_NVX 0x904A
+#define GL_GPU_MEMORY_INFO_EVICTED_MEMORY_NVX 0x904B
+#endif /* GL_NVX_gpu_memory_info */
+
+#ifndef GL_NVX_gpu_multicast2
+#define GL_NVX_gpu_multicast2 1
+#define GL_UPLOAD_GPU_MASK_NVX 0x954A
+typedef void (APIENTRYP PFNGLUPLOADGPUMASKNVXPROC) (GLbitfield mask);
+typedef void (APIENTRYP PFNGLMULTICASTVIEWPORTARRAYVNVXPROC) (GLuint gpu, GLuint first, GLsizei count, const GLfloat *v);
+typedef void (APIENTRYP PFNGLMULTICASTVIEWPORTPOSITIONWSCALENVXPROC) (GLuint gpu, GLuint index, GLfloat xcoeff, GLfloat ycoeff);
+typedef void (APIENTRYP PFNGLMULTICASTSCISSORARRAYVNVXPROC) (GLuint gpu, GLuint first, GLsizei count, const GLint *v);
+typedef GLuint (APIENTRYP PFNGLASYNCCOPYBUFFERSUBDATANVXPROC) (GLsizei waitSemaphoreCount, const GLuint *waitSemaphoreArray, const GLuint64 *fenceValueArray, GLuint readGpu, GLbitfield writeGpuMask, GLuint readBuffer, GLuint writeBuffer, GLintptr readOffset, GLintptr writeOffset, GLsizeiptr size, GLsizei signalSemaphoreCount, const GLuint *signalSemaphoreArray, const GLuint64 *signalValueArray);
+typedef GLuint (APIENTRYP PFNGLASYNCCOPYIMAGESUBDATANVXPROC) (GLsizei waitSemaphoreCount, const GLuint *waitSemaphoreArray, const GLuint64 *waitValueArray, GLuint srcGpu, GLbitfield dstGpuMask, GLuint srcName, GLenum srcTarget, GLint srcLevel, GLint srcX, GLint srcY, GLint srcZ, GLuint dstName, GLenum dstTarget, GLint dstLevel, GLint dstX, GLint dstY, GLint dstZ, GLsizei srcWidth, GLsizei srcHeight, GLsizei srcDepth, GLsizei signalSemaphoreCount, const GLuint *signalSemaphoreArray, const GLuint64 *signalValueArray);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glUploadGpuMaskNVX (GLbitfield mask);
+GLAPI void APIENTRY glMulticastViewportArrayvNVX (GLuint gpu, GLuint first, GLsizei count, const GLfloat *v);
+GLAPI void APIENTRY glMulticastViewportPositionWScaleNVX (GLuint gpu, GLuint index, GLfloat xcoeff, GLfloat ycoeff);
+GLAPI void APIENTRY glMulticastScissorArrayvNVX (GLuint gpu, GLuint first, GLsizei count, const GLint *v);
+GLAPI GLuint APIENTRY glAsyncCopyBufferSubDataNVX (GLsizei waitSemaphoreCount, const GLuint *waitSemaphoreArray, const GLuint64 *fenceValueArray, GLuint readGpu, GLbitfield writeGpuMask, GLuint readBuffer, GLuint writeBuffer, GLintptr readOffset, GLintptr writeOffset, GLsizeiptr size, GLsizei signalSemaphoreCount, const GLuint *signalSemaphoreArray, const GLuint64 *signalValueArray);
+GLAPI GLuint APIENTRY glAsyncCopyImageSubDataNVX (GLsizei waitSemaphoreCount, const GLuint *waitSemaphoreArray, const GLuint64 *waitValueArray, GLuint srcGpu, GLbitfield dstGpuMask, GLuint srcName, GLenum srcTarget, GLint srcLevel, GLint srcX, GLint srcY, GLint srcZ, GLuint dstName, GLenum dstTarget, GLint dstLevel, GLint dstX, GLint dstY, GLint dstZ, GLsizei srcWidth, GLsizei srcHeight, GLsizei srcDepth, GLsizei signalSemaphoreCount, const GLuint *signalSemaphoreArray, const GLuint64 *signalValueArray);
+#endif
+#endif /* GL_NVX_gpu_multicast2 */
+
+#ifndef GL_NVX_linked_gpu_multicast
+#define GL_NVX_linked_gpu_multicast 1
+#define GL_LGPU_SEPARATE_STORAGE_BIT_NVX 0x0800
+#define GL_MAX_LGPU_GPUS_NVX 0x92BA
+typedef void (APIENTRYP PFNGLLGPUNAMEDBUFFERSUBDATANVXPROC) (GLbitfield gpuMask, GLuint buffer, GLintptr offset, GLsizeiptr size, const void *data);
+typedef void (APIENTRYP PFNGLLGPUCOPYIMAGESUBDATANVXPROC) (GLuint sourceGpu, GLbitfield destinationGpuMask, GLuint srcName, GLenum srcTarget, GLint srcLevel, GLint srcX, GLint srxY, GLint srcZ, GLuint dstName, GLenum dstTarget, GLint dstLevel, GLint dstX, GLint dstY, GLint dstZ, GLsizei width, GLsizei height, GLsizei depth);
+typedef void (APIENTRYP PFNGLLGPUINTERLOCKNVXPROC) (void);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glLGPUNamedBufferSubDataNVX (GLbitfield gpuMask, GLuint buffer, GLintptr offset, GLsizeiptr size, const void *data);
+GLAPI void APIENTRY glLGPUCopyImageSubDataNVX (GLuint sourceGpu, GLbitfield destinationGpuMask, GLuint srcName, GLenum srcTarget, GLint srcLevel, GLint srcX, GLint srxY, GLint srcZ, GLuint dstName, GLenum dstTarget, GLint dstLevel, GLint dstX, GLint dstY, GLint dstZ, GLsizei width, GLsizei height, GLsizei depth);
+GLAPI void APIENTRY glLGPUInterlockNVX (void);
+#endif
+#endif /* GL_NVX_linked_gpu_multicast */
+
+#ifndef GL_NVX_progress_fence
+#define GL_NVX_progress_fence 1
+typedef GLuint (APIENTRYP PFNGLCREATEPROGRESSFENCENVXPROC) (void);
+typedef void (APIENTRYP PFNGLSIGNALSEMAPHOREUI64NVXPROC) (GLuint signalGpu, GLsizei fenceObjectCount, const GLuint *semaphoreArray, const GLuint64 *fenceValueArray);
+typedef void (APIENTRYP PFNGLWAITSEMAPHOREUI64NVXPROC) (GLuint waitGpu, GLsizei fenceObjectCount, const GLuint *semaphoreArray, const GLuint64 *fenceValueArray);
+typedef void (APIENTRYP PFNGLCLIENTWAITSEMAPHOREUI64NVXPROC) (GLsizei fenceObjectCount, const GLuint *semaphoreArray, const GLuint64 *fenceValueArray);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI GLuint APIENTRY glCreateProgressFenceNVX (void);
+GLAPI void APIENTRY glSignalSemaphoreui64NVX (GLuint signalGpu, GLsizei fenceObjectCount, const GLuint *semaphoreArray, const GLuint64 *fenceValueArray);
+GLAPI void APIENTRY glWaitSemaphoreui64NVX (GLuint waitGpu, GLsizei fenceObjectCount, const GLuint *semaphoreArray, const GLuint64 *fenceValueArray);
+GLAPI void APIENTRY glClientWaitSemaphoreui64NVX (GLsizei fenceObjectCount, const GLuint *semaphoreArray, const GLuint64 *fenceValueArray);
+#endif
+#endif /* GL_NVX_progress_fence */
+
+#ifndef GL_NV_alpha_to_coverage_dither_control
+#define GL_NV_alpha_to_coverage_dither_control 1
+#define GL_ALPHA_TO_COVERAGE_DITHER_DEFAULT_NV 0x934D
+#define GL_ALPHA_TO_COVERAGE_DITHER_ENABLE_NV 0x934E
+#define GL_ALPHA_TO_COVERAGE_DITHER_DISABLE_NV 0x934F
+#define GL_ALPHA_TO_COVERAGE_DITHER_MODE_NV 0x92BF
+typedef void (APIENTRYP PFNGLALPHATOCOVERAGEDITHERCONTROLNVPROC) (GLenum mode);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glAlphaToCoverageDitherControlNV (GLenum mode);
+#endif
+#endif /* GL_NV_alpha_to_coverage_dither_control */
+
+#ifndef GL_NV_bindless_multi_draw_indirect
+#define GL_NV_bindless_multi_draw_indirect 1
+typedef void (APIENTRYP PFNGLMULTIDRAWARRAYSINDIRECTBINDLESSNVPROC) (GLenum mode, const void *indirect, GLsizei drawCount, GLsizei stride, GLint vertexBufferCount);
+typedef void (APIENTRYP PFNGLMULTIDRAWELEMENTSINDIRECTBINDLESSNVPROC) (GLenum mode, GLenum type, const void *indirect, GLsizei drawCount, GLsizei stride, GLint vertexBufferCount);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glMultiDrawArraysIndirectBindlessNV (GLenum mode, const void *indirect, GLsizei drawCount, GLsizei stride, GLint vertexBufferCount);
+GLAPI void APIENTRY glMultiDrawElementsIndirectBindlessNV (GLenum mode, GLenum type, const void *indirect, GLsizei drawCount, GLsizei stride, GLint vertexBufferCount);
+#endif
+#endif /* GL_NV_bindless_multi_draw_indirect */
+
+#ifndef GL_NV_bindless_multi_draw_indirect_count
+#define GL_NV_bindless_multi_draw_indirect_count 1
+typedef void (APIENTRYP PFNGLMULTIDRAWARRAYSINDIRECTBINDLESSCOUNTNVPROC) (GLenum mode, const void *indirect, GLsizei drawCount, GLsizei maxDrawCount, GLsizei stride, GLint vertexBufferCount);
+typedef void (APIENTRYP PFNGLMULTIDRAWELEMENTSINDIRECTBINDLESSCOUNTNVPROC) (GLenum mode, GLenum type, const void *indirect, GLsizei drawCount, GLsizei maxDrawCount, GLsizei stride, GLint vertexBufferCount);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glMultiDrawArraysIndirectBindlessCountNV (GLenum mode, const void *indirect, GLsizei drawCount, GLsizei maxDrawCount, GLsizei stride, GLint vertexBufferCount);
+GLAPI void APIENTRY glMultiDrawElementsIndirectBindlessCountNV (GLenum mode, GLenum type, const void *indirect, GLsizei drawCount, GLsizei maxDrawCount, GLsizei stride, GLint vertexBufferCount);
+#endif
+#endif /* GL_NV_bindless_multi_draw_indirect_count */
+
+#ifndef GL_NV_bindless_texture
+#define GL_NV_bindless_texture 1
+typedef GLuint64 (APIENTRYP PFNGLGETTEXTUREHANDLENVPROC) (GLuint texture);
+typedef GLuint64 (APIENTRYP PFNGLGETTEXTURESAMPLERHANDLENVPROC) (GLuint texture, GLuint sampler);
+typedef void (APIENTRYP PFNGLMAKETEXTUREHANDLERESIDENTNVPROC) (GLuint64 handle);
+typedef void (APIENTRYP PFNGLMAKETEXTUREHANDLENONRESIDENTNVPROC) (GLuint64 handle);
+typedef GLuint64 (APIENTRYP PFNGLGETIMAGEHANDLENVPROC) (GLuint texture, GLint level, GLboolean layered, GLint layer, GLenum format);
+typedef void (APIENTRYP PFNGLMAKEIMAGEHANDLERESIDENTNVPROC) (GLuint64 handle, GLenum access);
+typedef void (APIENTRYP PFNGLMAKEIMAGEHANDLENONRESIDENTNVPROC) (GLuint64 handle);
+typedef void (APIENTRYP PFNGLUNIFORMHANDLEUI64NVPROC) (GLint location, GLuint64 value);
+typedef void (APIENTRYP PFNGLUNIFORMHANDLEUI64VNVPROC) (GLint location, GLsizei count, const GLuint64 *value);
+typedef void (APIENTRYP PFNGLPROGRAMUNIFORMHANDLEUI64NVPROC) (GLuint program, GLint location, GLuint64 value);
+typedef void (APIENTRYP PFNGLPROGRAMUNIFORMHANDLEUI64VNVPROC) (GLuint program, GLint location, GLsizei count, const GLuint64 *values);
+typedef GLboolean (APIENTRYP PFNGLISTEXTUREHANDLERESIDENTNVPROC) (GLuint64 handle);
+typedef GLboolean (APIENTRYP PFNGLISIMAGEHANDLERESIDENTNVPROC) (GLuint64 handle);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI GLuint64 APIENTRY glGetTextureHandleNV (GLuint texture);
+GLAPI GLuint64 APIENTRY glGetTextureSamplerHandleNV (GLuint texture, GLuint sampler);
+GLAPI void APIENTRY glMakeTextureHandleResidentNV (GLuint64 handle);
+GLAPI void APIENTRY glMakeTextureHandleNonResidentNV (GLuint64 handle);
+GLAPI GLuint64 APIENTRY glGetImageHandleNV (GLuint texture, GLint level, GLboolean layered, GLint layer, GLenum format);
+GLAPI void APIENTRY glMakeImageHandleResidentNV (GLuint64 handle, GLenum access);
+GLAPI void APIENTRY glMakeImageHandleNonResidentNV (GLuint64 handle);
+GLAPI void APIENTRY glUniformHandleui64NV (GLint location, GLuint64 value);
+GLAPI void APIENTRY glUniformHandleui64vNV (GLint location, GLsizei count, const GLuint64 *value);
+GLAPI void APIENTRY glProgramUniformHandleui64NV (GLuint program, GLint location, GLuint64 value);
+GLAPI void APIENTRY glProgramUniformHandleui64vNV (GLuint program, GLint location, GLsizei count, const GLuint64 *values);
+GLAPI GLboolean APIENTRY glIsTextureHandleResidentNV (GLuint64 handle);
+GLAPI GLboolean APIENTRY glIsImageHandleResidentNV (GLuint64 handle);
+#endif
+#endif /* GL_NV_bindless_texture */
+
+#ifndef GL_NV_blend_equation_advanced
+#define GL_NV_blend_equation_advanced 1
+#define GL_BLEND_OVERLAP_NV 0x9281
+#define GL_BLEND_PREMULTIPLIED_SRC_NV 0x9280
+#define GL_BLUE_NV 0x1905
+#define GL_COLORBURN_NV 0x929A
+#define GL_COLORDODGE_NV 0x9299
+#define GL_CONJOINT_NV 0x9284
+#define GL_CONTRAST_NV 0x92A1
+#define GL_DARKEN_NV 0x9297
+#define GL_DIFFERENCE_NV 0x929E
+#define GL_DISJOINT_NV 0x9283
+#define GL_DST_ATOP_NV 0x928F
+#define GL_DST_IN_NV 0x928B
+#define GL_DST_NV 0x9287
+#define GL_DST_OUT_NV 0x928D
+#define GL_DST_OVER_NV 0x9289
+#define GL_EXCLUSION_NV 0x92A0
+#define GL_GREEN_NV 0x1904
+#define GL_HARDLIGHT_NV 0x929B
+#define GL_HARDMIX_NV 0x92A9
+#define GL_HSL_COLOR_NV 0x92AF
+#define GL_HSL_HUE_NV 0x92AD
+#define GL_HSL_LUMINOSITY_NV 0x92B0
+#define GL_HSL_SATURATION_NV 0x92AE
+#define GL_INVERT_OVG_NV 0x92B4
+#define GL_INVERT_RGB_NV 0x92A3
+#define GL_LIGHTEN_NV 0x9298
+#define GL_LINEARBURN_NV 0x92A5
+#define GL_LINEARDODGE_NV 0x92A4
+#define GL_LINEARLIGHT_NV 0x92A7
+#define GL_MINUS_CLAMPED_NV 0x92B3
+#define GL_MINUS_NV 0x929F
+#define GL_MULTIPLY_NV 0x9294
+#define GL_OVERLAY_NV 0x9296
+#define GL_PINLIGHT_NV 0x92A8
+#define GL_PLUS_CLAMPED_ALPHA_NV 0x92B2
+#define GL_PLUS_CLAMPED_NV 0x92B1
+#define GL_PLUS_DARKER_NV 0x9292
+#define GL_PLUS_NV 0x9291
+#define GL_RED_NV 0x1903
+#define GL_SCREEN_NV 0x9295
+#define GL_SOFTLIGHT_NV 0x929C
+#define GL_SRC_ATOP_NV 0x928E
+#define GL_SRC_IN_NV 0x928A
+#define GL_SRC_NV 0x9286
+#define GL_SRC_OUT_NV 0x928C
+#define GL_SRC_OVER_NV 0x9288
+#define GL_UNCORRELATED_NV 0x9282
+#define GL_VIVIDLIGHT_NV 0x92A6
+#define GL_XOR_NV 0x1506
+typedef void (APIENTRYP PFNGLBLENDPARAMETERINVPROC) (GLenum pname, GLint value);
+typedef void (APIENTRYP PFNGLBLENDBARRIERNVPROC) (void);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glBlendParameteriNV (GLenum pname, GLint value);
+GLAPI void APIENTRY glBlendBarrierNV (void);
+#endif
+#endif /* GL_NV_blend_equation_advanced */
+
+#ifndef GL_NV_blend_equation_advanced_coherent
+#define GL_NV_blend_equation_advanced_coherent 1
+#define GL_BLEND_ADVANCED_COHERENT_NV 0x9285
+#endif /* GL_NV_blend_equation_advanced_coherent */
+
+#ifndef GL_NV_blend_minmax_factor
+#define GL_NV_blend_minmax_factor 1
+#endif /* GL_NV_blend_minmax_factor */
+
+#ifndef GL_NV_blend_square
+#define GL_NV_blend_square 1
+#endif /* GL_NV_blend_square */
+
+#ifndef GL_NV_clip_space_w_scaling
+#define GL_NV_clip_space_w_scaling 1
+#define GL_VIEWPORT_POSITION_W_SCALE_NV 0x937C
+#define GL_VIEWPORT_POSITION_W_SCALE_X_COEFF_NV 0x937D
+#define GL_VIEWPORT_POSITION_W_SCALE_Y_COEFF_NV 0x937E
+typedef void (APIENTRYP PFNGLVIEWPORTPOSITIONWSCALENVPROC) (GLuint index, GLfloat xcoeff, GLfloat ycoeff);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glViewportPositionWScaleNV (GLuint index, GLfloat xcoeff, GLfloat ycoeff);
+#endif
+#endif /* GL_NV_clip_space_w_scaling */
+
+#ifndef GL_NV_command_list
+#define GL_NV_command_list 1
+#define GL_TERMINATE_SEQUENCE_COMMAND_NV 0x0000
+#define GL_NOP_COMMAND_NV 0x0001
+#define GL_DRAW_ELEMENTS_COMMAND_NV 0x0002
+#define GL_DRAW_ARRAYS_COMMAND_NV 0x0003
+#define GL_DRAW_ELEMENTS_STRIP_COMMAND_NV 0x0004
+#define GL_DRAW_ARRAYS_STRIP_COMMAND_NV 0x0005
+#define GL_DRAW_ELEMENTS_INSTANCED_COMMAND_NV 0x0006
+#define GL_DRAW_ARRAYS_INSTANCED_COMMAND_NV 0x0007
+#define GL_ELEMENT_ADDRESS_COMMAND_NV 0x0008
+#define GL_ATTRIBUTE_ADDRESS_COMMAND_NV 0x0009
+#define GL_UNIFORM_ADDRESS_COMMAND_NV 0x000A
+#define GL_BLEND_COLOR_COMMAND_NV 0x000B
+#define GL_STENCIL_REF_COMMAND_NV 0x000C
+#define GL_LINE_WIDTH_COMMAND_NV 0x000D
+#define GL_POLYGON_OFFSET_COMMAND_NV 0x000E
+#define GL_ALPHA_REF_COMMAND_NV 0x000F
+#define GL_VIEWPORT_COMMAND_NV 0x0010
+#define GL_SCISSOR_COMMAND_NV 0x0011
+#define GL_FRONT_FACE_COMMAND_NV 0x0012
+typedef void (APIENTRYP PFNGLCREATESTATESNVPROC) (GLsizei n, GLuint *states);
+typedef void (APIENTRYP PFNGLDELETESTATESNVPROC) (GLsizei n, const GLuint *states);
+typedef GLboolean (APIENTRYP PFNGLISSTATENVPROC) (GLuint state);
+typedef void (APIENTRYP PFNGLSTATECAPTURENVPROC) (GLuint state, GLenum mode);
+typedef GLuint (APIENTRYP PFNGLGETCOMMANDHEADERNVPROC) (GLenum tokenID, GLuint size);
+typedef GLushort (APIENTRYP PFNGLGETSTAGEINDEXNVPROC) (GLenum shadertype);
+typedef void (APIENTRYP PFNGLDRAWCOMMANDSNVPROC) (GLenum primitiveMode, GLuint buffer, const GLintptr *indirects, const GLsizei *sizes, GLuint count);
+typedef void (APIENTRYP PFNGLDRAWCOMMANDSADDRESSNVPROC) (GLenum primitiveMode, const GLuint64 *indirects, const GLsizei *sizes, GLuint count);
+typedef void (APIENTRYP PFNGLDRAWCOMMANDSSTATESNVPROC) (GLuint buffer, const GLintptr *indirects, const GLsizei *sizes, const GLuint *states, const GLuint *fbos, GLuint count);
+typedef void (APIENTRYP PFNGLDRAWCOMMANDSSTATESADDRESSNVPROC) (const GLuint64 *indirects, const GLsizei *sizes, const GLuint *states, const GLuint *fbos, GLuint count);
+typedef void (APIENTRYP PFNGLCREATECOMMANDLISTSNVPROC) (GLsizei n, GLuint *lists);
+typedef void (APIENTRYP PFNGLDELETECOMMANDLISTSNVPROC) (GLsizei n, const GLuint *lists);
+typedef GLboolean (APIENTRYP PFNGLISCOMMANDLISTNVPROC) (GLuint list);
+typedef void (APIENTRYP PFNGLLISTDRAWCOMMANDSSTATESCLIENTNVPROC) (GLuint list, GLuint segment, const void **indirects, const GLsizei *sizes, const GLuint *states, const GLuint *fbos, GLuint count);
+typedef void (APIENTRYP PFNGLCOMMANDLISTSEGMENTSNVPROC) (GLuint list, GLuint segments);
+typedef void (APIENTRYP PFNGLCOMPILECOMMANDLISTNVPROC) (GLuint list);
+typedef void (APIENTRYP PFNGLCALLCOMMANDLISTNVPROC) (GLuint list);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glCreateStatesNV (GLsizei n, GLuint *states);
+GLAPI void APIENTRY glDeleteStatesNV (GLsizei n, const GLuint *states);
+GLAPI GLboolean APIENTRY glIsStateNV (GLuint state);
+GLAPI void APIENTRY glStateCaptureNV (GLuint state, GLenum mode);
+GLAPI GLuint APIENTRY glGetCommandHeaderNV (GLenum tokenID, GLuint size);
+GLAPI GLushort APIENTRY glGetStageIndexNV (GLenum shadertype);
+GLAPI void APIENTRY glDrawCommandsNV (GLenum primitiveMode, GLuint buffer, const GLintptr *indirects, const GLsizei *sizes, GLuint count);
+GLAPI void APIENTRY glDrawCommandsAddressNV (GLenum primitiveMode, const GLuint64 *indirects, const GLsizei *sizes, GLuint count);
+GLAPI void APIENTRY glDrawCommandsStatesNV (GLuint buffer, const GLintptr *indirects, const GLsizei *sizes, const GLuint *states, const GLuint *fbos, GLuint count);
+GLAPI void APIENTRY glDrawCommandsStatesAddressNV (const GLuint64 *indirects, const GLsizei *sizes, const GLuint *states, const GLuint *fbos, GLuint count);
+GLAPI void APIENTRY glCreateCommandListsNV (GLsizei n, GLuint *lists);
+GLAPI void APIENTRY glDeleteCommandListsNV (GLsizei n, const GLuint *lists);
+GLAPI GLboolean APIENTRY glIsCommandListNV (GLuint list);
+GLAPI void APIENTRY glListDrawCommandsStatesClientNV (GLuint list, GLuint segment, const void **indirects, const GLsizei *sizes, const GLuint *states, const GLuint *fbos, GLuint count);
+GLAPI void APIENTRY glCommandListSegmentsNV (GLuint list, GLuint segments);
+GLAPI void APIENTRY glCompileCommandListNV (GLuint list);
+GLAPI void APIENTRY glCallCommandListNV (GLuint list);
+#endif
+#endif /* GL_NV_command_list */
+
+#ifndef GL_NV_compute_program5
+#define GL_NV_compute_program5 1
+#define GL_COMPUTE_PROGRAM_NV 0x90FB
+#define GL_COMPUTE_PROGRAM_PARAMETER_BUFFER_NV 0x90FC
+#endif /* GL_NV_compute_program5 */
+
+#ifndef GL_NV_compute_shader_derivatives
+#define GL_NV_compute_shader_derivatives 1
+#endif /* GL_NV_compute_shader_derivatives */
+
+#ifndef GL_NV_conditional_render
+#define GL_NV_conditional_render 1
+#define GL_QUERY_WAIT_NV 0x8E13
+#define GL_QUERY_NO_WAIT_NV 0x8E14
+#define GL_QUERY_BY_REGION_WAIT_NV 0x8E15
+#define GL_QUERY_BY_REGION_NO_WAIT_NV 0x8E16
+typedef void (APIENTRYP PFNGLBEGINCONDITIONALRENDERNVPROC) (GLuint id, GLenum mode);
+typedef void (APIENTRYP PFNGLENDCONDITIONALRENDERNVPROC) (void);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glBeginConditionalRenderNV (GLuint id, GLenum mode);
+GLAPI void APIENTRY glEndConditionalRenderNV (void);
+#endif
+#endif /* GL_NV_conditional_render */
+
+#ifndef GL_NV_conservative_raster
+#define GL_NV_conservative_raster 1
+#define GL_CONSERVATIVE_RASTERIZATION_NV 0x9346
+#define GL_SUBPIXEL_PRECISION_BIAS_X_BITS_NV 0x9347
+#define GL_SUBPIXEL_PRECISION_BIAS_Y_BITS_NV 0x9348
+#define GL_MAX_SUBPIXEL_PRECISION_BIAS_BITS_NV 0x9349
+typedef void (APIENTRYP PFNGLSUBPIXELPRECISIONBIASNVPROC) (GLuint xbits, GLuint ybits);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glSubpixelPrecisionBiasNV (GLuint xbits, GLuint ybits);
+#endif
+#endif /* GL_NV_conservative_raster */
+
+#ifndef GL_NV_conservative_raster_dilate
+#define GL_NV_conservative_raster_dilate 1
+#define GL_CONSERVATIVE_RASTER_DILATE_NV 0x9379
+#define GL_CONSERVATIVE_RASTER_DILATE_RANGE_NV 0x937A
+#define GL_CONSERVATIVE_RASTER_DILATE_GRANULARITY_NV 0x937B
+typedef void (APIENTRYP PFNGLCONSERVATIVERASTERPARAMETERFNVPROC) (GLenum pname, GLfloat value);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glConservativeRasterParameterfNV (GLenum pname, GLfloat value);
+#endif
+#endif /* GL_NV_conservative_raster_dilate */
+
+#ifndef GL_NV_conservative_raster_pre_snap
+#define GL_NV_conservative_raster_pre_snap 1
+#define GL_CONSERVATIVE_RASTER_MODE_PRE_SNAP_NV 0x9550
+#endif /* GL_NV_conservative_raster_pre_snap */
+
+#ifndef GL_NV_conservative_raster_pre_snap_triangles
+#define GL_NV_conservative_raster_pre_snap_triangles 1
+#define GL_CONSERVATIVE_RASTER_MODE_NV 0x954D
+#define GL_CONSERVATIVE_RASTER_MODE_POST_SNAP_NV 0x954E
+#define GL_CONSERVATIVE_RASTER_MODE_PRE_SNAP_TRIANGLES_NV 0x954F
+typedef void (APIENTRYP PFNGLCONSERVATIVERASTERPARAMETERINVPROC) (GLenum pname, GLint param);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glConservativeRasterParameteriNV (GLenum pname, GLint param);
+#endif
+#endif /* GL_NV_conservative_raster_pre_snap_triangles */
+
+#ifndef GL_NV_conservative_raster_underestimation
+#define GL_NV_conservative_raster_underestimation 1
+#endif /* GL_NV_conservative_raster_underestimation */
+
+#ifndef GL_NV_copy_depth_to_color
+#define GL_NV_copy_depth_to_color 1
+#define GL_DEPTH_STENCIL_TO_RGBA_NV 0x886E
+#define GL_DEPTH_STENCIL_TO_BGRA_NV 0x886F
+#endif /* GL_NV_copy_depth_to_color */
+
+#ifndef GL_NV_copy_image
+#define GL_NV_copy_image 1
+typedef void (APIENTRYP PFNGLCOPYIMAGESUBDATANVPROC) (GLuint srcName, GLenum srcTarget, GLint srcLevel, GLint srcX, GLint srcY, GLint srcZ, GLuint dstName, GLenum dstTarget, GLint dstLevel, GLint dstX, GLint dstY, GLint dstZ, GLsizei width, GLsizei height, GLsizei depth);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glCopyImageSubDataNV (GLuint srcName, GLenum srcTarget, GLint srcLevel, GLint srcX, GLint srcY, GLint srcZ, GLuint dstName, GLenum dstTarget, GLint dstLevel, GLint dstX, GLint dstY, GLint dstZ, GLsizei width, GLsizei height, GLsizei depth);
+#endif
+#endif /* GL_NV_copy_image */
+
+#ifndef GL_NV_deep_texture3D
+#define GL_NV_deep_texture3D 1
+#define GL_MAX_DEEP_3D_TEXTURE_WIDTH_HEIGHT_NV 0x90D0
+#define GL_MAX_DEEP_3D_TEXTURE_DEPTH_NV 0x90D1
+#endif /* GL_NV_deep_texture3D */
+
+#ifndef GL_NV_depth_buffer_float
+#define GL_NV_depth_buffer_float 1
+#define GL_DEPTH_COMPONENT32F_NV 0x8DAB
+#define GL_DEPTH32F_STENCIL8_NV 0x8DAC
+#define GL_FLOAT_32_UNSIGNED_INT_24_8_REV_NV 0x8DAD
+#define GL_DEPTH_BUFFER_FLOAT_MODE_NV 0x8DAF
+typedef void (APIENTRYP PFNGLDEPTHRANGEDNVPROC) (GLdouble zNear, GLdouble zFar);
+typedef void (APIENTRYP PFNGLCLEARDEPTHDNVPROC) (GLdouble depth);
+typedef void (APIENTRYP PFNGLDEPTHBOUNDSDNVPROC) (GLdouble zmin, GLdouble zmax);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glDepthRangedNV (GLdouble zNear, GLdouble zFar);
+GLAPI void APIENTRY glClearDepthdNV (GLdouble depth);
+GLAPI void APIENTRY glDepthBoundsdNV (GLdouble zmin, GLdouble zmax);
+#endif
+#endif /* GL_NV_depth_buffer_float */
+
+#ifndef GL_NV_depth_clamp
+#define GL_NV_depth_clamp 1
+#define GL_DEPTH_CLAMP_NV 0x864F
+#endif /* GL_NV_depth_clamp */
+
+#ifndef GL_NV_draw_texture
+#define GL_NV_draw_texture 1
+typedef void (APIENTRYP PFNGLDRAWTEXTURENVPROC) (GLuint texture, GLuint sampler, GLfloat x0, GLfloat y0, GLfloat x1, GLfloat y1, GLfloat z, GLfloat s0, GLfloat t0, GLfloat s1, GLfloat t1);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glDrawTextureNV (GLuint texture, GLuint sampler, GLfloat x0, GLfloat y0, GLfloat x1, GLfloat y1, GLfloat z, GLfloat s0, GLfloat t0, GLfloat s1, GLfloat t1);
+#endif
+#endif /* GL_NV_draw_texture */
+
+#ifndef GL_NV_draw_vulkan_image
+#define GL_NV_draw_vulkan_image 1
+typedef void (APIENTRY *GLVULKANPROCNV)(void);
+typedef void (APIENTRYP PFNGLDRAWVKIMAGENVPROC) (GLuint64 vkImage, GLuint sampler, GLfloat x0, GLfloat y0, GLfloat x1, GLfloat y1, GLfloat z, GLfloat s0, GLfloat t0, GLfloat s1, GLfloat t1);
+typedef GLVULKANPROCNV (APIENTRYP PFNGLGETVKPROCADDRNVPROC) (const GLchar *name);
+typedef void (APIENTRYP PFNGLWAITVKSEMAPHORENVPROC) (GLuint64 vkSemaphore);
+typedef void (APIENTRYP PFNGLSIGNALVKSEMAPHORENVPROC) (GLuint64 vkSemaphore);
+typedef void (APIENTRYP PFNGLSIGNALVKFENCENVPROC) (GLuint64 vkFence);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glDrawVkImageNV (GLuint64 vkImage, GLuint sampler, GLfloat x0, GLfloat y0, GLfloat x1, GLfloat y1, GLfloat z, GLfloat s0, GLfloat t0, GLfloat s1, GLfloat t1);
+GLAPI GLVULKANPROCNV APIENTRY glGetVkProcAddrNV (const GLchar *name);
+GLAPI void APIENTRY glWaitVkSemaphoreNV (GLuint64 vkSemaphore);
+GLAPI void APIENTRY glSignalVkSemaphoreNV (GLuint64 vkSemaphore);
+GLAPI void APIENTRY glSignalVkFenceNV (GLuint64 vkFence);
+#endif
+#endif /* GL_NV_draw_vulkan_image */
+
+#ifndef GL_NV_evaluators
+#define GL_NV_evaluators 1
+#define GL_EVAL_2D_NV 0x86C0
+#define GL_EVAL_TRIANGULAR_2D_NV 0x86C1
+#define GL_MAP_TESSELLATION_NV 0x86C2
+#define GL_MAP_ATTRIB_U_ORDER_NV 0x86C3
+#define GL_MAP_ATTRIB_V_ORDER_NV 0x86C4
+#define GL_EVAL_FRACTIONAL_TESSELLATION_NV 0x86C5
+#define GL_EVAL_VERTEX_ATTRIB0_NV 0x86C6
+#define GL_EVAL_VERTEX_ATTRIB1_NV 0x86C7
+#define GL_EVAL_VERTEX_ATTRIB2_NV 0x86C8
+#define GL_EVAL_VERTEX_ATTRIB3_NV 0x86C9
+#define GL_EVAL_VERTEX_ATTRIB4_NV 0x86CA
+#define GL_EVAL_VERTEX_ATTRIB5_NV 0x86CB
+#define GL_EVAL_VERTEX_ATTRIB6_NV 0x86CC
+#define GL_EVAL_VERTEX_ATTRIB7_NV 0x86CD
+#define GL_EVAL_VERTEX_ATTRIB8_NV 0x86CE
+#define GL_EVAL_VERTEX_ATTRIB9_NV 0x86CF
+#define GL_EVAL_VERTEX_ATTRIB10_NV 0x86D0
+#define GL_EVAL_VERTEX_ATTRIB11_NV 0x86D1
+#define GL_EVAL_VERTEX_ATTRIB12_NV 0x86D2
+#define GL_EVAL_VERTEX_ATTRIB13_NV 0x86D3
+#define GL_EVAL_VERTEX_ATTRIB14_NV 0x86D4
+#define GL_EVAL_VERTEX_ATTRIB15_NV 0x86D5
+#define GL_MAX_MAP_TESSELLATION_NV 0x86D6
+#define GL_MAX_RATIONAL_EVAL_ORDER_NV 0x86D7
+typedef void (APIENTRYP PFNGLMAPCONTROLPOINTSNVPROC) (GLenum target, GLuint index, GLenum type, GLsizei ustride, GLsizei vstride, GLint uorder, GLint vorder, GLboolean packed, const void *points);
+typedef void (APIENTRYP PFNGLMAPPARAMETERIVNVPROC) (GLenum target, GLenum pname, const GLint *params);
+typedef void (APIENTRYP PFNGLMAPPARAMETERFVNVPROC) (GLenum target, GLenum pname, const GLfloat *params);
+typedef void (APIENTRYP PFNGLGETMAPCONTROLPOINTSNVPROC) (GLenum target, GLuint index, GLenum type, GLsizei ustride, GLsizei vstride, GLboolean packed, void *points);
+typedef void (APIENTRYP PFNGLGETMAPPARAMETERIVNVPROC) (GLenum target, GLenum pname, GLint *params);
+typedef void (APIENTRYP PFNGLGETMAPPARAMETERFVNVPROC) (GLenum target, GLenum pname, GLfloat *params);
+typedef void (APIENTRYP PFNGLGETMAPATTRIBPARAMETERIVNVPROC) (GLenum target, GLuint index, GLenum pname, GLint *params);
+typedef void (APIENTRYP PFNGLGETMAPATTRIBPARAMETERFVNVPROC) (GLenum target, GLuint index, GLenum pname, GLfloat *params);
+typedef void (APIENTRYP PFNGLEVALMAPSNVPROC) (GLenum target, GLenum mode);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glMapControlPointsNV (GLenum target, GLuint index, GLenum type, GLsizei ustride, GLsizei vstride, GLint uorder, GLint vorder, GLboolean packed, const void *points);
+GLAPI void APIENTRY glMapParameterivNV (GLenum target, GLenum pname, const GLint *params);
+GLAPI void APIENTRY glMapParameterfvNV (GLenum target, GLenum pname, const GLfloat *params);
+GLAPI void APIENTRY glGetMapControlPointsNV (GLenum target, GLuint index, GLenum type, GLsizei ustride, GLsizei vstride, GLboolean packed, void *points);
+GLAPI void APIENTRY glGetMapParameterivNV (GLenum target, GLenum pname, GLint *params);
+GLAPI void APIENTRY glGetMapParameterfvNV (GLenum target, GLenum pname, GLfloat *params);
+GLAPI void APIENTRY glGetMapAttribParameterivNV (GLenum target, GLuint index, GLenum pname, GLint *params);
+GLAPI void APIENTRY glGetMapAttribParameterfvNV (GLenum target, GLuint index, GLenum pname, GLfloat *params);
+GLAPI void APIENTRY glEvalMapsNV (GLenum target, GLenum mode);
+#endif
+#endif /* GL_NV_evaluators */
+
+#ifndef GL_NV_explicit_multisample
+#define GL_NV_explicit_multisample 1
+#define GL_SAMPLE_POSITION_NV 0x8E50
+#define GL_SAMPLE_MASK_NV 0x8E51
+#define GL_SAMPLE_MASK_VALUE_NV 0x8E52
+#define GL_TEXTURE_BINDING_RENDERBUFFER_NV 0x8E53
+#define GL_TEXTURE_RENDERBUFFER_DATA_STORE_BINDING_NV 0x8E54
+#define GL_TEXTURE_RENDERBUFFER_NV 0x8E55
+#define GL_SAMPLER_RENDERBUFFER_NV 0x8E56
+#define GL_INT_SAMPLER_RENDERBUFFER_NV 0x8E57
+#define GL_UNSIGNED_INT_SAMPLER_RENDERBUFFER_NV 0x8E58
+#define GL_MAX_SAMPLE_MASK_WORDS_NV 0x8E59
+typedef void (APIENTRYP PFNGLGETMULTISAMPLEFVNVPROC) (GLenum pname, GLuint index, GLfloat *val);
+typedef void (APIENTRYP PFNGLSAMPLEMASKINDEXEDNVPROC) (GLuint index, GLbitfield mask);
+typedef void (APIENTRYP PFNGLTEXRENDERBUFFERNVPROC) (GLenum target, GLuint renderbuffer);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glGetMultisamplefvNV (GLenum pname, GLuint index, GLfloat *val);
+GLAPI void APIENTRY glSampleMaskIndexedNV (GLuint index, GLbitfield mask);
+GLAPI void APIENTRY glTexRenderbufferNV (GLenum target, GLuint renderbuffer);
+#endif
+#endif /* GL_NV_explicit_multisample */
+
+#ifndef GL_NV_fence
+#define GL_NV_fence 1
+#define GL_ALL_COMPLETED_NV 0x84F2
+#define GL_FENCE_STATUS_NV 0x84F3
+#define GL_FENCE_CONDITION_NV 0x84F4
+typedef void (APIENTRYP PFNGLDELETEFENCESNVPROC) (GLsizei n, const GLuint *fences);
+typedef void (APIENTRYP PFNGLGENFENCESNVPROC) (GLsizei n, GLuint *fences);
+typedef GLboolean (APIENTRYP PFNGLISFENCENVPROC) (GLuint fence);
+typedef GLboolean (APIENTRYP PFNGLTESTFENCENVPROC) (GLuint fence);
+typedef void (APIENTRYP PFNGLGETFENCEIVNVPROC) (GLuint fence, GLenum pname, GLint *params);
+typedef void (APIENTRYP PFNGLFINISHFENCENVPROC) (GLuint fence);
+typedef void (APIENTRYP PFNGLSETFENCENVPROC) (GLuint fence, GLenum condition);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glDeleteFencesNV (GLsizei n, const GLuint *fences);
+GLAPI void APIENTRY glGenFencesNV (GLsizei n, GLuint *fences);
+GLAPI GLboolean APIENTRY glIsFenceNV (GLuint fence);
+GLAPI GLboolean APIENTRY glTestFenceNV (GLuint fence);
+GLAPI void APIENTRY glGetFenceivNV (GLuint fence, GLenum pname, GLint *params);
+GLAPI void APIENTRY glFinishFenceNV (GLuint fence);
+GLAPI void APIENTRY glSetFenceNV (GLuint fence, GLenum condition);
+#endif
+#endif /* GL_NV_fence */
+
+#ifndef GL_NV_fill_rectangle
+#define GL_NV_fill_rectangle 1
+#define GL_FILL_RECTANGLE_NV 0x933C
+#endif /* GL_NV_fill_rectangle */
+
+#ifndef GL_NV_float_buffer
+#define GL_NV_float_buffer 1
+#define GL_FLOAT_R_NV 0x8880
+#define GL_FLOAT_RG_NV 0x8881
+#define GL_FLOAT_RGB_NV 0x8882
+#define GL_FLOAT_RGBA_NV 0x8883
+#define GL_FLOAT_R16_NV 0x8884
+#define GL_FLOAT_R32_NV 0x8885
+#define GL_FLOAT_RG16_NV 0x8886
+#define GL_FLOAT_RG32_NV 0x8887
+#define GL_FLOAT_RGB16_NV 0x8888
+#define GL_FLOAT_RGB32_NV 0x8889
+#define GL_FLOAT_RGBA16_NV 0x888A
+#define GL_FLOAT_RGBA32_NV 0x888B
+#define GL_TEXTURE_FLOAT_COMPONENTS_NV 0x888C
+#define GL_FLOAT_CLEAR_COLOR_VALUE_NV 0x888D
+#define GL_FLOAT_RGBA_MODE_NV 0x888E
+#endif /* GL_NV_float_buffer */
+
+#ifndef GL_NV_fog_distance
+#define GL_NV_fog_distance 1
+#define GL_FOG_DISTANCE_MODE_NV 0x855A
+#define GL_EYE_RADIAL_NV 0x855B
+#define GL_EYE_PLANE_ABSOLUTE_NV 0x855C
+#endif /* GL_NV_fog_distance */
+
+#ifndef GL_NV_fragment_coverage_to_color
+#define GL_NV_fragment_coverage_to_color 1
+#define GL_FRAGMENT_COVERAGE_TO_COLOR_NV 0x92DD
+#define GL_FRAGMENT_COVERAGE_COLOR_NV 0x92DE
+typedef void (APIENTRYP PFNGLFRAGMENTCOVERAGECOLORNVPROC) (GLuint color);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glFragmentCoverageColorNV (GLuint color);
+#endif
+#endif /* GL_NV_fragment_coverage_to_color */
+
+#ifndef GL_NV_fragment_program
+#define GL_NV_fragment_program 1
+#define GL_MAX_FRAGMENT_PROGRAM_LOCAL_PARAMETERS_NV 0x8868
+#define GL_FRAGMENT_PROGRAM_NV 0x8870
+#define GL_MAX_TEXTURE_COORDS_NV 0x8871
+#define GL_MAX_TEXTURE_IMAGE_UNITS_NV 0x8872
+#define GL_FRAGMENT_PROGRAM_BINDING_NV 0x8873
+#define GL_PROGRAM_ERROR_STRING_NV 0x8874
+typedef void (APIENTRYP PFNGLPROGRAMNAMEDPARAMETER4FNVPROC) (GLuint id, GLsizei len, const GLubyte *name, GLfloat x, GLfloat y, GLfloat z, GLfloat w);
+typedef void (APIENTRYP PFNGLPROGRAMNAMEDPARAMETER4FVNVPROC) (GLuint id, GLsizei len, const GLubyte *name, const GLfloat *v);
+typedef void (APIENTRYP PFNGLPROGRAMNAMEDPARAMETER4DNVPROC) (GLuint id, GLsizei len, const GLubyte *name, GLdouble x, GLdouble y, GLdouble z, GLdouble w);
+typedef void (APIENTRYP PFNGLPROGRAMNAMEDPARAMETER4DVNVPROC) (GLuint id, GLsizei len, const GLubyte *name, const GLdouble *v);
+typedef void (APIENTRYP PFNGLGETPROGRAMNAMEDPARAMETERFVNVPROC) (GLuint id, GLsizei len, const GLubyte *name, GLfloat *params);
+typedef void (APIENTRYP PFNGLGETPROGRAMNAMEDPARAMETERDVNVPROC) (GLuint id, GLsizei len, const GLubyte *name, GLdouble *params);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glProgramNamedParameter4fNV (GLuint id, GLsizei len, const GLubyte *name, GLfloat x, GLfloat y, GLfloat z, GLfloat w);
+GLAPI void APIENTRY glProgramNamedParameter4fvNV (GLuint id, GLsizei len, const GLubyte *name, const GLfloat *v);
+GLAPI void APIENTRY glProgramNamedParameter4dNV (GLuint id, GLsizei len, const GLubyte *name, GLdouble x, GLdouble y, GLdouble z, GLdouble w);
+GLAPI void APIENTRY glProgramNamedParameter4dvNV (GLuint id, GLsizei len, const GLubyte *name, const GLdouble *v);
+GLAPI void APIENTRY glGetProgramNamedParameterfvNV (GLuint id, GLsizei len, const GLubyte *name, GLfloat *params);
+GLAPI void APIENTRY glGetProgramNamedParameterdvNV (GLuint id, GLsizei len, const GLubyte *name, GLdouble *params);
+#endif
+#endif /* GL_NV_fragment_program */
+
+#ifndef GL_NV_fragment_program2
+#define GL_NV_fragment_program2 1
+#define GL_MAX_PROGRAM_EXEC_INSTRUCTIONS_NV 0x88F4
+#define GL_MAX_PROGRAM_CALL_DEPTH_NV 0x88F5
+#define GL_MAX_PROGRAM_IF_DEPTH_NV 0x88F6
+#define GL_MAX_PROGRAM_LOOP_DEPTH_NV 0x88F7
+#define GL_MAX_PROGRAM_LOOP_COUNT_NV 0x88F8
+#endif /* GL_NV_fragment_program2 */
+
+#ifndef GL_NV_fragment_program4
+#define GL_NV_fragment_program4 1
+#endif /* GL_NV_fragment_program4 */
+
+#ifndef GL_NV_fragment_program_option
+#define GL_NV_fragment_program_option 1
+#endif /* GL_NV_fragment_program_option */
+
+#ifndef GL_NV_fragment_shader_barycentric
+#define GL_NV_fragment_shader_barycentric 1
+#endif /* GL_NV_fragment_shader_barycentric */
+
+#ifndef GL_NV_fragment_shader_interlock
+#define GL_NV_fragment_shader_interlock 1
+#endif /* GL_NV_fragment_shader_interlock */
+
+#ifndef GL_NV_framebuffer_mixed_samples
+#define GL_NV_framebuffer_mixed_samples 1
+#define GL_COVERAGE_MODULATION_TABLE_NV 0x9331
+#define GL_COLOR_SAMPLES_NV 0x8E20
+#define GL_DEPTH_SAMPLES_NV 0x932D
+#define GL_STENCIL_SAMPLES_NV 0x932E
+#define GL_MIXED_DEPTH_SAMPLES_SUPPORTED_NV 0x932F
+#define GL_MIXED_STENCIL_SAMPLES_SUPPORTED_NV 0x9330
+#define GL_COVERAGE_MODULATION_NV 0x9332
+#define GL_COVERAGE_MODULATION_TABLE_SIZE_NV 0x9333
+typedef void (APIENTRYP PFNGLCOVERAGEMODULATIONTABLENVPROC) (GLsizei n, const GLfloat *v);
+typedef void (APIENTRYP PFNGLGETCOVERAGEMODULATIONTABLENVPROC) (GLsizei bufsize, GLfloat *v);
+typedef void (APIENTRYP PFNGLCOVERAGEMODULATIONNVPROC) (GLenum components);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glCoverageModulationTableNV (GLsizei n, const GLfloat *v);
+GLAPI void APIENTRY glGetCoverageModulationTableNV (GLsizei bufsize, GLfloat *v);
+GLAPI void APIENTRY glCoverageModulationNV (GLenum components);
+#endif
+#endif /* GL_NV_framebuffer_mixed_samples */
+
+#ifndef GL_NV_framebuffer_multisample_coverage
+#define GL_NV_framebuffer_multisample_coverage 1
+#define GL_RENDERBUFFER_COVERAGE_SAMPLES_NV 0x8CAB
+#define GL_RENDERBUFFER_COLOR_SAMPLES_NV 0x8E10
+#define GL_MAX_MULTISAMPLE_COVERAGE_MODES_NV 0x8E11
+#define GL_MULTISAMPLE_COVERAGE_MODES_NV 0x8E12
+typedef void (APIENTRYP PFNGLRENDERBUFFERSTORAGEMULTISAMPLECOVERAGENVPROC) (GLenum target, GLsizei coverageSamples, GLsizei colorSamples, GLenum internalformat, GLsizei width, GLsizei height);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glRenderbufferStorageMultisampleCoverageNV (GLenum target, GLsizei coverageSamples, GLsizei colorSamples, GLenum internalformat, GLsizei width, GLsizei height);
+#endif
+#endif /* GL_NV_framebuffer_multisample_coverage */
+
+#ifndef GL_NV_geometry_program4
+#define GL_NV_geometry_program4 1
+#define GL_GEOMETRY_PROGRAM_NV 0x8C26
+#define GL_MAX_PROGRAM_OUTPUT_VERTICES_NV 0x8C27
+#define GL_MAX_PROGRAM_TOTAL_OUTPUT_COMPONENTS_NV 0x8C28
+typedef void (APIENTRYP PFNGLPROGRAMVERTEXLIMITNVPROC) (GLenum target, GLint limit);
+typedef void (APIENTRYP PFNGLFRAMEBUFFERTEXTUREEXTPROC) (GLenum target, GLenum attachment, GLuint texture, GLint level);
+typedef void (APIENTRYP PFNGLFRAMEBUFFERTEXTUREFACEEXTPROC) (GLenum target, GLenum attachment, GLuint texture, GLint level, GLenum face);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glProgramVertexLimitNV (GLenum target, GLint limit);
+GLAPI void APIENTRY glFramebufferTextureEXT (GLenum target, GLenum attachment, GLuint texture, GLint level);
+GLAPI void APIENTRY glFramebufferTextureFaceEXT (GLenum target, GLenum attachment, GLuint texture, GLint level, GLenum face);
+#endif
+#endif /* GL_NV_geometry_program4 */
+
+#ifndef GL_NV_geometry_shader4
+#define GL_NV_geometry_shader4 1
+#endif /* GL_NV_geometry_shader4 */
+
+#ifndef GL_NV_geometry_shader_passthrough
+#define GL_NV_geometry_shader_passthrough 1
+#endif /* GL_NV_geometry_shader_passthrough */
+
+#ifndef GL_NV_gpu_multicast
+#define GL_NV_gpu_multicast 1
+#define GL_PER_GPU_STORAGE_BIT_NV 0x0800
+#define GL_MULTICAST_GPUS_NV 0x92BA
+#define GL_RENDER_GPU_MASK_NV 0x9558
+#define GL_PER_GPU_STORAGE_NV 0x9548
+#define GL_MULTICAST_PROGRAMMABLE_SAMPLE_LOCATION_NV 0x9549
+typedef void (APIENTRYP PFNGLRENDERGPUMASKNVPROC) (GLbitfield mask);
+typedef void (APIENTRYP PFNGLMULTICASTBUFFERSUBDATANVPROC) (GLbitfield gpuMask, GLuint buffer, GLintptr offset, GLsizeiptr size, const void *data);
+typedef void (APIENTRYP PFNGLMULTICASTCOPYBUFFERSUBDATANVPROC) (GLuint readGpu, GLbitfield writeGpuMask, GLuint readBuffer, GLuint writeBuffer, GLintptr readOffset, GLintptr writeOffset, GLsizeiptr size);
+typedef void (APIENTRYP PFNGLMULTICASTCOPYIMAGESUBDATANVPROC) (GLuint srcGpu, GLbitfield dstGpuMask, GLuint srcName, GLenum srcTarget, GLint srcLevel, GLint srcX, GLint srcY, GLint srcZ, GLuint dstName, GLenum dstTarget, GLint dstLevel, GLint dstX, GLint dstY, GLint dstZ, GLsizei srcWidth, GLsizei srcHeight, GLsizei srcDepth);
+typedef void (APIENTRYP PFNGLMULTICASTBLITFRAMEBUFFERNVPROC) (GLuint srcGpu, GLuint dstGpu, GLint srcX0, GLint srcY0, GLint srcX1, GLint srcY1, GLint dstX0, GLint dstY0, GLint dstX1, GLint dstY1, GLbitfield mask, GLenum filter);
+typedef void (APIENTRYP PFNGLMULTICASTFRAMEBUFFERSAMPLELOCATIONSFVNVPROC) (GLuint gpu, GLuint framebuffer, GLuint start, GLsizei count, const GLfloat *v);
+typedef void (APIENTRYP PFNGLMULTICASTBARRIERNVPROC) (void);
+typedef void (APIENTRYP PFNGLMULTICASTWAITSYNCNVPROC) (GLuint signalGpu, GLbitfield waitGpuMask);
+typedef void (APIENTRYP PFNGLMULTICASTGETQUERYOBJECTIVNVPROC) (GLuint gpu, GLuint id, GLenum pname, GLint *params);
+typedef void (APIENTRYP PFNGLMULTICASTGETQUERYOBJECTUIVNVPROC) (GLuint gpu, GLuint id, GLenum pname, GLuint *params);
+typedef void (APIENTRYP PFNGLMULTICASTGETQUERYOBJECTI64VNVPROC) (GLuint gpu, GLuint id, GLenum pname, GLint64 *params);
+typedef void (APIENTRYP PFNGLMULTICASTGETQUERYOBJECTUI64VNVPROC) (GLuint gpu, GLuint id, GLenum pname, GLuint64 *params);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glRenderGpuMaskNV (GLbitfield mask);
+GLAPI void APIENTRY glMulticastBufferSubDataNV (GLbitfield gpuMask, GLuint buffer, GLintptr offset, GLsizeiptr size, const void *data);
+GLAPI void APIENTRY glMulticastCopyBufferSubDataNV (GLuint readGpu, GLbitfield writeGpuMask, GLuint readBuffer, GLuint writeBuffer, GLintptr readOffset, GLintptr writeOffset, GLsizeiptr size);
+GLAPI void APIENTRY glMulticastCopyImageSubDataNV (GLuint srcGpu, GLbitfield dstGpuMask, GLuint srcName, GLenum srcTarget, GLint srcLevel, GLint srcX, GLint srcY, GLint srcZ, GLuint dstName, GLenum dstTarget, GLint dstLevel, GLint dstX, GLint dstY, GLint dstZ, GLsizei srcWidth, GLsizei srcHeight, GLsizei srcDepth);
+GLAPI void APIENTRY glMulticastBlitFramebufferNV (GLuint srcGpu, GLuint dstGpu, GLint srcX0, GLint srcY0, GLint srcX1, GLint srcY1, GLint dstX0, GLint dstY0, GLint dstX1, GLint dstY1, GLbitfield mask, GLenum filter);
+GLAPI void APIENTRY glMulticastFramebufferSampleLocationsfvNV (GLuint gpu, GLuint framebuffer, GLuint start, GLsizei count, const GLfloat *v);
+GLAPI void APIENTRY glMulticastBarrierNV (void);
+GLAPI void APIENTRY glMulticastWaitSyncNV (GLuint signalGpu, GLbitfield waitGpuMask);
+GLAPI void APIENTRY glMulticastGetQueryObjectivNV (GLuint gpu, GLuint id, GLenum pname, GLint *params);
+GLAPI void APIENTRY glMulticastGetQueryObjectuivNV (GLuint gpu, GLuint id, GLenum pname, GLuint *params);
+GLAPI void APIENTRY glMulticastGetQueryObjecti64vNV (GLuint gpu, GLuint id, GLenum pname, GLint64 *params);
+GLAPI void APIENTRY glMulticastGetQueryObjectui64vNV (GLuint gpu, GLuint id, GLenum pname, GLuint64 *params);
+#endif
+#endif /* GL_NV_gpu_multicast */
+
+#ifndef GL_NV_gpu_program4
+#define GL_NV_gpu_program4 1
+#define GL_MIN_PROGRAM_TEXEL_OFFSET_NV 0x8904
+#define GL_MAX_PROGRAM_TEXEL_OFFSET_NV 0x8905
+#define GL_PROGRAM_ATTRIB_COMPONENTS_NV 0x8906
+#define GL_PROGRAM_RESULT_COMPONENTS_NV 0x8907
+#define GL_MAX_PROGRAM_ATTRIB_COMPONENTS_NV 0x8908
+#define GL_MAX_PROGRAM_RESULT_COMPONENTS_NV 0x8909
+#define GL_MAX_PROGRAM_GENERIC_ATTRIBS_NV 0x8DA5
+#define GL_MAX_PROGRAM_GENERIC_RESULTS_NV 0x8DA6
+typedef void (APIENTRYP PFNGLPROGRAMLOCALPARAMETERI4INVPROC) (GLenum target, GLuint index, GLint x, GLint y, GLint z, GLint w);
+typedef void (APIENTRYP PFNGLPROGRAMLOCALPARAMETERI4IVNVPROC) (GLenum target, GLuint index, const GLint *params);
+typedef void (APIENTRYP PFNGLPROGRAMLOCALPARAMETERSI4IVNVPROC) (GLenum target, GLuint index, GLsizei count, const GLint *params);
+typedef void (APIENTRYP PFNGLPROGRAMLOCALPARAMETERI4UINVPROC) (GLenum target, GLuint index, GLuint x, GLuint y, GLuint z, GLuint w);
+typedef void (APIENTRYP PFNGLPROGRAMLOCALPARAMETERI4UIVNVPROC) (GLenum target, GLuint index, const GLuint *params);
+typedef void (APIENTRYP PFNGLPROGRAMLOCALPARAMETERSI4UIVNVPROC) (GLenum target, GLuint index, GLsizei count, const GLuint *params);
+typedef void (APIENTRYP PFNGLPROGRAMENVPARAMETERI4INVPROC) (GLenum target, GLuint index, GLint x, GLint y, GLint z, GLint w);
+typedef void (APIENTRYP PFNGLPROGRAMENVPARAMETERI4IVNVPROC) (GLenum target, GLuint index, const GLint *params);
+typedef void (APIENTRYP PFNGLPROGRAMENVPARAMETERSI4IVNVPROC) (GLenum target, GLuint index, GLsizei count, const GLint *params);
+typedef void (APIENTRYP PFNGLPROGRAMENVPARAMETERI4UINVPROC) (GLenum target, GLuint index, GLuint x, GLuint y, GLuint z, GLuint w);
+typedef void (APIENTRYP PFNGLPROGRAMENVPARAMETERI4UIVNVPROC) (GLenum target, GLuint index, const GLuint *params);
+typedef void (APIENTRYP PFNGLPROGRAMENVPARAMETERSI4UIVNVPROC) (GLenum target, GLuint index, GLsizei count, const GLuint *params);
+typedef void (APIENTRYP PFNGLGETPROGRAMLOCALPARAMETERIIVNVPROC) (GLenum target, GLuint index, GLint *params);
+typedef void (APIENTRYP PFNGLGETPROGRAMLOCALPARAMETERIUIVNVPROC) (GLenum target, GLuint index, GLuint *params);
+typedef void (APIENTRYP PFNGLGETPROGRAMENVPARAMETERIIVNVPROC) (GLenum target, GLuint index, GLint *params);
+typedef void (APIENTRYP PFNGLGETPROGRAMENVPARAMETERIUIVNVPROC) (GLenum target, GLuint index, GLuint *params);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glProgramLocalParameterI4iNV (GLenum target, GLuint index, GLint x, GLint y, GLint z, GLint w);
+GLAPI void APIENTRY glProgramLocalParameterI4ivNV (GLenum target, GLuint index, const GLint *params);
+GLAPI void APIENTRY glProgramLocalParametersI4ivNV (GLenum target, GLuint index, GLsizei count, const GLint *params);
+GLAPI void APIENTRY glProgramLocalParameterI4uiNV (GLenum target, GLuint index, GLuint x, GLuint y, GLuint z, GLuint w);
+GLAPI void APIENTRY glProgramLocalParameterI4uivNV (GLenum target, GLuint index, const GLuint *params);
+GLAPI void APIENTRY glProgramLocalParametersI4uivNV (GLenum target, GLuint index, GLsizei count, const GLuint *params);
+GLAPI void APIENTRY glProgramEnvParameterI4iNV (GLenum target, GLuint index, GLint x, GLint y, GLint z, GLint w);
+GLAPI void APIENTRY glProgramEnvParameterI4ivNV (GLenum target, GLuint index, const GLint *params);
+GLAPI void APIENTRY glProgramEnvParametersI4ivNV (GLenum target, GLuint index, GLsizei count, const GLint *params);
+GLAPI void APIENTRY glProgramEnvParameterI4uiNV (GLenum target, GLuint index, GLuint x, GLuint y, GLuint z, GLuint w);
+GLAPI void APIENTRY glProgramEnvParameterI4uivNV (GLenum target, GLuint index, const GLuint *params);
+GLAPI void APIENTRY glProgramEnvParametersI4uivNV (GLenum target, GLuint index, GLsizei count, const GLuint *params);
+GLAPI void APIENTRY glGetProgramLocalParameterIivNV (GLenum target, GLuint index, GLint *params);
+GLAPI void APIENTRY glGetProgramLocalParameterIuivNV (GLenum target, GLuint index, GLuint *params);
+GLAPI void APIENTRY glGetProgramEnvParameterIivNV (GLenum target, GLuint index, GLint *params);
+GLAPI void APIENTRY glGetProgramEnvParameterIuivNV (GLenum target, GLuint index, GLuint *params);
+#endif
+#endif /* GL_NV_gpu_program4 */
+
+#ifndef GL_NV_gpu_program5
+#define GL_NV_gpu_program5 1
+#define GL_MAX_GEOMETRY_PROGRAM_INVOCATIONS_NV 0x8E5A
+#define GL_MIN_FRAGMENT_INTERPOLATION_OFFSET_NV 0x8E5B
+#define GL_MAX_FRAGMENT_INTERPOLATION_OFFSET_NV 0x8E5C
+#define GL_FRAGMENT_PROGRAM_INTERPOLATION_OFFSET_BITS_NV 0x8E5D
+#define GL_MIN_PROGRAM_TEXTURE_GATHER_OFFSET_NV 0x8E5E
+#define GL_MAX_PROGRAM_TEXTURE_GATHER_OFFSET_NV 0x8E5F
+#define GL_MAX_PROGRAM_SUBROUTINE_PARAMETERS_NV 0x8F44
+#define GL_MAX_PROGRAM_SUBROUTINE_NUM_NV 0x8F45
+typedef void (APIENTRYP PFNGLPROGRAMSUBROUTINEPARAMETERSUIVNVPROC) (GLenum target, GLsizei count, const GLuint *params);
+typedef void (APIENTRYP PFNGLGETPROGRAMSUBROUTINEPARAMETERUIVNVPROC) (GLenum target, GLuint index, GLuint *param);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glProgramSubroutineParametersuivNV (GLenum target, GLsizei count, const GLuint *params);
+GLAPI void APIENTRY glGetProgramSubroutineParameteruivNV (GLenum target, GLuint index, GLuint *param);
+#endif
+#endif /* GL_NV_gpu_program5 */
+
+#ifndef GL_NV_gpu_program5_mem_extended
+#define GL_NV_gpu_program5_mem_extended 1
+#endif /* GL_NV_gpu_program5_mem_extended */
+
+#ifndef GL_NV_gpu_shader5
+#define GL_NV_gpu_shader5 1
+#endif /* GL_NV_gpu_shader5 */
+
+#ifndef GL_NV_half_float
+#define GL_NV_half_float 1
+typedef unsigned short GLhalfNV;
+#define GL_HALF_FLOAT_NV 0x140B
+typedef void (APIENTRYP PFNGLVERTEX2HNVPROC) (GLhalfNV x, GLhalfNV y);
+typedef void (APIENTRYP PFNGLVERTEX2HVNVPROC) (const GLhalfNV *v);
+typedef void (APIENTRYP PFNGLVERTEX3HNVPROC) (GLhalfNV x, GLhalfNV y, GLhalfNV z);
+typedef void (APIENTRYP PFNGLVERTEX3HVNVPROC) (const GLhalfNV *v);
+typedef void (APIENTRYP PFNGLVERTEX4HNVPROC) (GLhalfNV x, GLhalfNV y, GLhalfNV z, GLhalfNV w);
+typedef void (APIENTRYP PFNGLVERTEX4HVNVPROC) (const GLhalfNV *v);
+typedef void (APIENTRYP PFNGLNORMAL3HNVPROC) (GLhalfNV nx, GLhalfNV ny, GLhalfNV nz);
+typedef void (APIENTRYP PFNGLNORMAL3HVNVPROC) (const GLhalfNV *v);
+typedef void (APIENTRYP PFNGLCOLOR3HNVPROC) (GLhalfNV red, GLhalfNV green, GLhalfNV blue);
+typedef void (APIENTRYP PFNGLCOLOR3HVNVPROC) (const GLhalfNV *v);
+typedef void (APIENTRYP PFNGLCOLOR4HNVPROC) (GLhalfNV red, GLhalfNV green, GLhalfNV blue, GLhalfNV alpha);
+typedef void (APIENTRYP PFNGLCOLOR4HVNVPROC) (const GLhalfNV *v);
+typedef void (APIENTRYP PFNGLTEXCOORD1HNVPROC) (GLhalfNV s);
+typedef void (APIENTRYP PFNGLTEXCOORD1HVNVPROC) (const GLhalfNV *v);
+typedef void (APIENTRYP PFNGLTEXCOORD2HNVPROC) (GLhalfNV s, GLhalfNV t);
+typedef void (APIENTRYP PFNGLTEXCOORD2HVNVPROC) (const GLhalfNV *v);
+typedef void (APIENTRYP PFNGLTEXCOORD3HNVPROC) (GLhalfNV s, GLhalfNV t, GLhalfNV r);
+typedef void (APIENTRYP PFNGLTEXCOORD3HVNVPROC) (const GLhalfNV *v);
+typedef void (APIENTRYP PFNGLTEXCOORD4HNVPROC) (GLhalfNV s, GLhalfNV t, GLhalfNV r, GLhalfNV q);
+typedef void (APIENTRYP PFNGLTEXCOORD4HVNVPROC) (const GLhalfNV *v);
+typedef void (APIENTRYP PFNGLMULTITEXCOORD1HNVPROC) (GLenum target, GLhalfNV s);
+typedef void (APIENTRYP PFNGLMULTITEXCOORD1HVNVPROC) (GLenum target, const GLhalfNV *v);
+typedef void (APIENTRYP PFNGLMULTITEXCOORD2HNVPROC) (GLenum target, GLhalfNV s, GLhalfNV t);
+typedef void (APIENTRYP PFNGLMULTITEXCOORD2HVNVPROC) (GLenum target, const GLhalfNV *v);
+typedef void (APIENTRYP PFNGLMULTITEXCOORD3HNVPROC) (GLenum target, GLhalfNV s, GLhalfNV t, GLhalfNV r);
+typedef void (APIENTRYP PFNGLMULTITEXCOORD3HVNVPROC) (GLenum target, const GLhalfNV *v);
+typedef void (APIENTRYP PFNGLMULTITEXCOORD4HNVPROC) (GLenum target, GLhalfNV s, GLhalfNV t, GLhalfNV r, GLhalfNV q);
+typedef void (APIENTRYP PFNGLMULTITEXCOORD4HVNVPROC) (GLenum target, const GLhalfNV *v);
+typedef void (APIENTRYP PFNGLFOGCOORDHNVPROC) (GLhalfNV fog);
+typedef void (APIENTRYP PFNGLFOGCOORDHVNVPROC) (const GLhalfNV *fog);
+typedef void (APIENTRYP PFNGLSECONDARYCOLOR3HNVPROC) (GLhalfNV red, GLhalfNV green, GLhalfNV blue);
+typedef void (APIENTRYP PFNGLSECONDARYCOLOR3HVNVPROC) (const GLhalfNV *v);
+typedef void (APIENTRYP PFNGLVERTEXWEIGHTHNVPROC) (GLhalfNV weight);
+typedef void (APIENTRYP PFNGLVERTEXWEIGHTHVNVPROC) (const GLhalfNV *weight);
+typedef void (APIENTRYP PFNGLVERTEXATTRIB1HNVPROC) (GLuint index, GLhalfNV x);
+typedef void (APIENTRYP PFNGLVERTEXATTRIB1HVNVPROC) (GLuint index, const GLhalfNV *v);
+typedef void (APIENTRYP PFNGLVERTEXATTRIB2HNVPROC) (GLuint index, GLhalfNV x, GLhalfNV y);
+typedef void (APIENTRYP PFNGLVERTEXATTRIB2HVNVPROC) (GLuint index, const GLhalfNV *v);
+typedef void (APIENTRYP PFNGLVERTEXATTRIB3HNVPROC) (GLuint index, GLhalfNV x, GLhalfNV y, GLhalfNV z);
+typedef void (APIENTRYP PFNGLVERTEXATTRIB3HVNVPROC) (GLuint index, const GLhalfNV *v);
+typedef void (APIENTRYP PFNGLVERTEXATTRIB4HNVPROC) (GLuint index, GLhalfNV x, GLhalfNV y, GLhalfNV z, GLhalfNV w);
+typedef void (APIENTRYP PFNGLVERTEXATTRIB4HVNVPROC) (GLuint index, const GLhalfNV *v);
+typedef void (APIENTRYP PFNGLVERTEXATTRIBS1HVNVPROC) (GLuint index, GLsizei n, const GLhalfNV *v);
+typedef void (APIENTRYP PFNGLVERTEXATTRIBS2HVNVPROC) (GLuint index, GLsizei n, const GLhalfNV *v);
+typedef void (APIENTRYP PFNGLVERTEXATTRIBS3HVNVPROC) (GLuint index, GLsizei n, const GLhalfNV *v);
+typedef void (APIENTRYP PFNGLVERTEXATTRIBS4HVNVPROC) (GLuint index, GLsizei n, const GLhalfNV *v);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glVertex2hNV (GLhalfNV x, GLhalfNV y);
+GLAPI void APIENTRY glVertex2hvNV (const GLhalfNV *v);
+GLAPI void APIENTRY glVertex3hNV (GLhalfNV x, GLhalfNV y, GLhalfNV z);
+GLAPI void APIENTRY glVertex3hvNV (const GLhalfNV *v);
+GLAPI void APIENTRY glVertex4hNV (GLhalfNV x, GLhalfNV y, GLhalfNV z, GLhalfNV w);
+GLAPI void APIENTRY glVertex4hvNV (const GLhalfNV *v);
+GLAPI void APIENTRY glNormal3hNV (GLhalfNV nx, GLhalfNV ny, GLhalfNV nz);
+GLAPI void APIENTRY glNormal3hvNV (const GLhalfNV *v);
+GLAPI void APIENTRY glColor3hNV (GLhalfNV red, GLhalfNV green, GLhalfNV blue);
+GLAPI void APIENTRY glColor3hvNV (const GLhalfNV *v);
+GLAPI void APIENTRY glColor4hNV (GLhalfNV red, GLhalfNV green, GLhalfNV blue, GLhalfNV alpha);
+GLAPI void APIENTRY glColor4hvNV (const GLhalfNV *v);
+GLAPI void APIENTRY glTexCoord1hNV (GLhalfNV s);
+GLAPI void APIENTRY glTexCoord1hvNV (const GLhalfNV *v);
+GLAPI void APIENTRY glTexCoord2hNV (GLhalfNV s, GLhalfNV t);
+GLAPI void APIENTRY glTexCoord2hvNV (const GLhalfNV *v);
+GLAPI void APIENTRY glTexCoord3hNV (GLhalfNV s, GLhalfNV t, GLhalfNV r);
+GLAPI void APIENTRY glTexCoord3hvNV (const GLhalfNV *v);
+GLAPI void APIENTRY glTexCoord4hNV (GLhalfNV s, GLhalfNV t, GLhalfNV r, GLhalfNV q);
+GLAPI void APIENTRY glTexCoord4hvNV (const GLhalfNV *v);
+GLAPI void APIENTRY glMultiTexCoord1hNV (GLenum target, GLhalfNV s);
+GLAPI void APIENTRY glMultiTexCoord1hvNV (GLenum target, const GLhalfNV *v);
+GLAPI void APIENTRY glMultiTexCoord2hNV (GLenum target, GLhalfNV s, GLhalfNV t);
+GLAPI void APIENTRY glMultiTexCoord2hvNV (GLenum target, const GLhalfNV *v);
+GLAPI void APIENTRY glMultiTexCoord3hNV (GLenum target, GLhalfNV s, GLhalfNV t, GLhalfNV r);
+GLAPI void APIENTRY glMultiTexCoord3hvNV (GLenum target, const GLhalfNV *v);
+GLAPI void APIENTRY glMultiTexCoord4hNV (GLenum target, GLhalfNV s, GLhalfNV t, GLhalfNV r, GLhalfNV q);
+GLAPI void APIENTRY glMultiTexCoord4hvNV (GLenum target, const GLhalfNV *v);
+GLAPI void APIENTRY glFogCoordhNV (GLhalfNV fog);
+GLAPI void APIENTRY glFogCoordhvNV (const GLhalfNV *fog);
+GLAPI void APIENTRY glSecondaryColor3hNV (GLhalfNV red, GLhalfNV green, GLhalfNV blue);
+GLAPI void APIENTRY glSecondaryColor3hvNV (const GLhalfNV *v);
+GLAPI void APIENTRY glVertexWeighthNV (GLhalfNV weight);
+GLAPI void APIENTRY glVertexWeighthvNV (const GLhalfNV *weight);
+GLAPI void APIENTRY glVertexAttrib1hNV (GLuint index, GLhalfNV x);
+GLAPI void APIENTRY glVertexAttrib1hvNV (GLuint index, const GLhalfNV *v);
+GLAPI void APIENTRY glVertexAttrib2hNV (GLuint index, GLhalfNV x, GLhalfNV y);
+GLAPI void APIENTRY glVertexAttrib2hvNV (GLuint index, const GLhalfNV *v);
+GLAPI void APIENTRY glVertexAttrib3hNV (GLuint index, GLhalfNV x, GLhalfNV y, GLhalfNV z);
+GLAPI void APIENTRY glVertexAttrib3hvNV (GLuint index, const GLhalfNV *v);
+GLAPI void APIENTRY glVertexAttrib4hNV (GLuint index, GLhalfNV x, GLhalfNV y, GLhalfNV z, GLhalfNV w);
+GLAPI void APIENTRY glVertexAttrib4hvNV (GLuint index, const GLhalfNV *v);
+GLAPI void APIENTRY glVertexAttribs1hvNV (GLuint index, GLsizei n, const GLhalfNV *v);
+GLAPI void APIENTRY glVertexAttribs2hvNV (GLuint index, GLsizei n, const GLhalfNV *v);
+GLAPI void APIENTRY glVertexAttribs3hvNV (GLuint index, GLsizei n, const GLhalfNV *v);
+GLAPI void APIENTRY glVertexAttribs4hvNV (GLuint index, GLsizei n, const GLhalfNV *v);
+#endif
+#endif /* GL_NV_half_float */
+
+#ifndef GL_NV_internalformat_sample_query
+#define GL_NV_internalformat_sample_query 1
+#define GL_MULTISAMPLES_NV 0x9371
+#define GL_SUPERSAMPLE_SCALE_X_NV 0x9372
+#define GL_SUPERSAMPLE_SCALE_Y_NV 0x9373
+#define GL_CONFORMANT_NV 0x9374
+typedef void (APIENTRYP PFNGLGETINTERNALFORMATSAMPLEIVNVPROC) (GLenum target, GLenum internalformat, GLsizei samples, GLenum pname, GLsizei bufSize, GLint *params);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glGetInternalformatSampleivNV (GLenum target, GLenum internalformat, GLsizei samples, GLenum pname, GLsizei bufSize, GLint *params);
+#endif
+#endif /* GL_NV_internalformat_sample_query */
+
+#ifndef GL_NV_light_max_exponent
+#define GL_NV_light_max_exponent 1
+#define GL_MAX_SHININESS_NV 0x8504
+#define GL_MAX_SPOT_EXPONENT_NV 0x8505
+#endif /* GL_NV_light_max_exponent */
+
+#ifndef GL_NV_memory_attachment
+#define GL_NV_memory_attachment 1
+#define GL_ATTACHED_MEMORY_OBJECT_NV 0x95A4
+#define GL_ATTACHED_MEMORY_OFFSET_NV 0x95A5
+#define GL_MEMORY_ATTACHABLE_ALIGNMENT_NV 0x95A6
+#define GL_MEMORY_ATTACHABLE_SIZE_NV 0x95A7
+#define GL_MEMORY_ATTACHABLE_NV 0x95A8
+#define GL_DETACHED_MEMORY_INCARNATION_NV 0x95A9
+#define GL_DETACHED_TEXTURES_NV 0x95AA
+#define GL_DETACHED_BUFFERS_NV 0x95AB
+#define GL_MAX_DETACHED_TEXTURES_NV 0x95AC
+#define GL_MAX_DETACHED_BUFFERS_NV 0x95AD
+typedef void (APIENTRYP PFNGLGETMEMORYOBJECTDETACHEDRESOURCESUIVNVPROC) (GLuint memory, GLenum pname, GLint first, GLsizei count, GLuint *params);
+typedef void (APIENTRYP PFNGLRESETMEMORYOBJECTPARAMETERNVPROC) (GLuint memory, GLenum pname);
+typedef void (APIENTRYP PFNGLTEXATTACHMEMORYNVPROC) (GLenum target, GLuint memory, GLuint64 offset);
+typedef void (APIENTRYP PFNGLBUFFERATTACHMEMORYNVPROC) (GLenum target, GLuint memory, GLuint64 offset);
+typedef void (APIENTRYP PFNGLTEXTUREATTACHMEMORYNVPROC) (GLuint texture, GLuint memory, GLuint64 offset);
+typedef void (APIENTRYP PFNGLNAMEDBUFFERATTACHMEMORYNVPROC) (GLuint buffer, GLuint memory, GLuint64 offset);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glGetMemoryObjectDetachedResourcesuivNV (GLuint memory, GLenum pname, GLint first, GLsizei count, GLuint *params);
+GLAPI void APIENTRY glResetMemoryObjectParameterNV (GLuint memory, GLenum pname);
+GLAPI void APIENTRY glTexAttachMemoryNV (GLenum target, GLuint memory, GLuint64 offset);
+GLAPI void APIENTRY glBufferAttachMemoryNV (GLenum target, GLuint memory, GLuint64 offset);
+GLAPI void APIENTRY glTextureAttachMemoryNV (GLuint texture, GLuint memory, GLuint64 offset);
+GLAPI void APIENTRY glNamedBufferAttachMemoryNV (GLuint buffer, GLuint memory, GLuint64 offset);
+#endif
+#endif /* GL_NV_memory_attachment */
+
+#ifndef GL_NV_mesh_shader
+#define GL_NV_mesh_shader 1
+#define GL_MESH_SHADER_NV 0x9559
+#define GL_TASK_SHADER_NV 0x955A
+#define GL_MAX_MESH_UNIFORM_BLOCKS_NV 0x8E60
+#define GL_MAX_MESH_TEXTURE_IMAGE_UNITS_NV 0x8E61
+#define GL_MAX_MESH_IMAGE_UNIFORMS_NV 0x8E62
+#define GL_MAX_MESH_UNIFORM_COMPONENTS_NV 0x8E63
+#define GL_MAX_MESH_ATOMIC_COUNTER_BUFFERS_NV 0x8E64
+#define GL_MAX_MESH_ATOMIC_COUNTERS_NV 0x8E65
+#define GL_MAX_MESH_SHADER_STORAGE_BLOCKS_NV 0x8E66
+#define GL_MAX_COMBINED_MESH_UNIFORM_COMPONENTS_NV 0x8E67
+#define GL_MAX_TASK_UNIFORM_BLOCKS_NV 0x8E68
+#define GL_MAX_TASK_TEXTURE_IMAGE_UNITS_NV 0x8E69
+#define GL_MAX_TASK_IMAGE_UNIFORMS_NV 0x8E6A
+#define GL_MAX_TASK_UNIFORM_COMPONENTS_NV 0x8E6B
+#define GL_MAX_TASK_ATOMIC_COUNTER_BUFFERS_NV 0x8E6C
+#define GL_MAX_TASK_ATOMIC_COUNTERS_NV 0x8E6D
+#define GL_MAX_TASK_SHADER_STORAGE_BLOCKS_NV 0x8E6E
+#define GL_MAX_COMBINED_TASK_UNIFORM_COMPONENTS_NV 0x8E6F
+#define GL_MAX_MESH_WORK_GROUP_INVOCATIONS_NV 0x95A2
+#define GL_MAX_TASK_WORK_GROUP_INVOCATIONS_NV 0x95A3
+#define GL_MAX_MESH_TOTAL_MEMORY_SIZE_NV 0x9536
+#define GL_MAX_TASK_TOTAL_MEMORY_SIZE_NV 0x9537
+#define GL_MAX_MESH_OUTPUT_VERTICES_NV 0x9538
+#define GL_MAX_MESH_OUTPUT_PRIMITIVES_NV 0x9539
+#define GL_MAX_TASK_OUTPUT_COUNT_NV 0x953A
+#define GL_MAX_DRAW_MESH_TASKS_COUNT_NV 0x953D
+#define GL_MAX_MESH_VIEWS_NV 0x9557
+#define GL_MESH_OUTPUT_PER_VERTEX_GRANULARITY_NV 0x92DF
+#define GL_MESH_OUTPUT_PER_PRIMITIVE_GRANULARITY_NV 0x9543
+#define GL_MAX_MESH_WORK_GROUP_SIZE_NV 0x953B
+#define GL_MAX_TASK_WORK_GROUP_SIZE_NV 0x953C
+#define GL_MESH_WORK_GROUP_SIZE_NV 0x953E
+#define GL_TASK_WORK_GROUP_SIZE_NV 0x953F
+#define GL_MESH_VERTICES_OUT_NV 0x9579
+#define GL_MESH_PRIMITIVES_OUT_NV 0x957A
+#define GL_MESH_OUTPUT_TYPE_NV 0x957B
+#define GL_UNIFORM_BLOCK_REFERENCED_BY_MESH_SHADER_NV 0x959C
+#define GL_UNIFORM_BLOCK_REFERENCED_BY_TASK_SHADER_NV 0x959D
+#define GL_REFERENCED_BY_MESH_SHADER_NV 0x95A0
+#define GL_REFERENCED_BY_TASK_SHADER_NV 0x95A1
+#define GL_MESH_SHADER_BIT_NV 0x00000040
+#define GL_TASK_SHADER_BIT_NV 0x00000080
+#define GL_MESH_SUBROUTINE_NV 0x957C
+#define GL_TASK_SUBROUTINE_NV 0x957D
+#define GL_MESH_SUBROUTINE_UNIFORM_NV 0x957E
+#define GL_TASK_SUBROUTINE_UNIFORM_NV 0x957F
+#define GL_ATOMIC_COUNTER_BUFFER_REFERENCED_BY_MESH_SHADER_NV 0x959E
+#define GL_ATOMIC_COUNTER_BUFFER_REFERENCED_BY_TASK_SHADER_NV 0x959F
+typedef void (APIENTRYP PFNGLDRAWMESHTASKSNVPROC) (GLuint first, GLuint count);
+typedef void (APIENTRYP PFNGLDRAWMESHTASKSINDIRECTNVPROC) (GLintptr indirect);
+typedef void (APIENTRYP PFNGLMULTIDRAWMESHTASKSINDIRECTNVPROC) (GLintptr indirect, GLsizei drawcount, GLsizei stride);
+typedef void (APIENTRYP PFNGLMULTIDRAWMESHTASKSINDIRECTCOUNTNVPROC) (GLintptr indirect, GLintptr drawcount, GLsizei maxdrawcount, GLsizei stride);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glDrawMeshTasksNV (GLuint first, GLuint count);
+GLAPI void APIENTRY glDrawMeshTasksIndirectNV (GLintptr indirect);
+GLAPI void APIENTRY glMultiDrawMeshTasksIndirectNV (GLintptr indirect, GLsizei drawcount, GLsizei stride);
+GLAPI void APIENTRY glMultiDrawMeshTasksIndirectCountNV (GLintptr indirect, GLintptr drawcount, GLsizei maxdrawcount, GLsizei stride);
+#endif
+#endif /* GL_NV_mesh_shader */
+
+#ifndef GL_NV_multisample_coverage
+#define GL_NV_multisample_coverage 1
+#endif /* GL_NV_multisample_coverage */
+
+#ifndef GL_NV_multisample_filter_hint
+#define GL_NV_multisample_filter_hint 1
+#define GL_MULTISAMPLE_FILTER_HINT_NV 0x8534
+#endif /* GL_NV_multisample_filter_hint */
+
+#ifndef GL_NV_occlusion_query
+#define GL_NV_occlusion_query 1
+#define GL_PIXEL_COUNTER_BITS_NV 0x8864
+#define GL_CURRENT_OCCLUSION_QUERY_ID_NV 0x8865
+#define GL_PIXEL_COUNT_NV 0x8866
+#define GL_PIXEL_COUNT_AVAILABLE_NV 0x8867
+typedef void (APIENTRYP PFNGLGENOCCLUSIONQUERIESNVPROC) (GLsizei n, GLuint *ids);
+typedef void (APIENTRYP PFNGLDELETEOCCLUSIONQUERIESNVPROC) (GLsizei n, const GLuint *ids);
+typedef GLboolean (APIENTRYP PFNGLISOCCLUSIONQUERYNVPROC) (GLuint id);
+typedef void (APIENTRYP PFNGLBEGINOCCLUSIONQUERYNVPROC) (GLuint id);
+typedef void (APIENTRYP PFNGLENDOCCLUSIONQUERYNVPROC) (void);
+typedef void (APIENTRYP PFNGLGETOCCLUSIONQUERYIVNVPROC) (GLuint id, GLenum pname, GLint *params);
+typedef void (APIENTRYP PFNGLGETOCCLUSIONQUERYUIVNVPROC) (GLuint id, GLenum pname, GLuint *params);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glGenOcclusionQueriesNV (GLsizei n, GLuint *ids);
+GLAPI void APIENTRY glDeleteOcclusionQueriesNV (GLsizei n, const GLuint *ids);
+GLAPI GLboolean APIENTRY glIsOcclusionQueryNV (GLuint id);
+GLAPI void APIENTRY glBeginOcclusionQueryNV (GLuint id);
+GLAPI void APIENTRY glEndOcclusionQueryNV (void);
+GLAPI void APIENTRY glGetOcclusionQueryivNV (GLuint id, GLenum pname, GLint *params);
+GLAPI void APIENTRY glGetOcclusionQueryuivNV (GLuint id, GLenum pname, GLuint *params);
+#endif
+#endif /* GL_NV_occlusion_query */
+
+#ifndef GL_NV_packed_depth_stencil
+#define GL_NV_packed_depth_stencil 1
+#define GL_DEPTH_STENCIL_NV 0x84F9
+#define GL_UNSIGNED_INT_24_8_NV 0x84FA
+#endif /* GL_NV_packed_depth_stencil */
+
+#ifndef GL_NV_parameter_buffer_object
+#define GL_NV_parameter_buffer_object 1
+#define GL_MAX_PROGRAM_PARAMETER_BUFFER_BINDINGS_NV 0x8DA0
+#define GL_MAX_PROGRAM_PARAMETER_BUFFER_SIZE_NV 0x8DA1
+#define GL_VERTEX_PROGRAM_PARAMETER_BUFFER_NV 0x8DA2
+#define GL_GEOMETRY_PROGRAM_PARAMETER_BUFFER_NV 0x8DA3
+#define GL_FRAGMENT_PROGRAM_PARAMETER_BUFFER_NV 0x8DA4
+typedef void (APIENTRYP PFNGLPROGRAMBUFFERPARAMETERSFVNVPROC) (GLenum target, GLuint bindingIndex, GLuint wordIndex, GLsizei count, const GLfloat *params);
+typedef void (APIENTRYP PFNGLPROGRAMBUFFERPARAMETERSIIVNVPROC) (GLenum target, GLuint bindingIndex, GLuint wordIndex, GLsizei count, const GLint *params);
+typedef void (APIENTRYP PFNGLPROGRAMBUFFERPARAMETERSIUIVNVPROC) (GLenum target, GLuint bindingIndex, GLuint wordIndex, GLsizei count, const GLuint *params);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glProgramBufferParametersfvNV (GLenum target, GLuint bindingIndex, GLuint wordIndex, GLsizei count, const GLfloat *params);
+GLAPI void APIENTRY glProgramBufferParametersIivNV (GLenum target, GLuint bindingIndex, GLuint wordIndex, GLsizei count, const GLint *params);
+GLAPI void APIENTRY glProgramBufferParametersIuivNV (GLenum target, GLuint bindingIndex, GLuint wordIndex, GLsizei count, const GLuint *params);
+#endif
+#endif /* GL_NV_parameter_buffer_object */
+
+#ifndef GL_NV_parameter_buffer_object2
+#define GL_NV_parameter_buffer_object2 1
+#endif /* GL_NV_parameter_buffer_object2 */
+
+#ifndef GL_NV_path_rendering
+#define GL_NV_path_rendering 1
+#define GL_PATH_FORMAT_SVG_NV 0x9070
+#define GL_PATH_FORMAT_PS_NV 0x9071
+#define GL_STANDARD_FONT_NAME_NV 0x9072
+#define GL_SYSTEM_FONT_NAME_NV 0x9073
+#define GL_FILE_NAME_NV 0x9074
+#define GL_PATH_STROKE_WIDTH_NV 0x9075
+#define GL_PATH_END_CAPS_NV 0x9076
+#define GL_PATH_INITIAL_END_CAP_NV 0x9077
+#define GL_PATH_TERMINAL_END_CAP_NV 0x9078
+#define GL_PATH_JOIN_STYLE_NV 0x9079
+#define GL_PATH_MITER_LIMIT_NV 0x907A
+#define GL_PATH_DASH_CAPS_NV 0x907B
+#define GL_PATH_INITIAL_DASH_CAP_NV 0x907C
+#define GL_PATH_TERMINAL_DASH_CAP_NV 0x907D
+#define GL_PATH_DASH_OFFSET_NV 0x907E
+#define GL_PATH_CLIENT_LENGTH_NV 0x907F
+#define GL_PATH_FILL_MODE_NV 0x9080
+#define GL_PATH_FILL_MASK_NV 0x9081
+#define GL_PATH_FILL_COVER_MODE_NV 0x9082
+#define GL_PATH_STROKE_COVER_MODE_NV 0x9083
+#define GL_PATH_STROKE_MASK_NV 0x9084
+#define GL_COUNT_UP_NV 0x9088
+#define GL_COUNT_DOWN_NV 0x9089
+#define GL_PATH_OBJECT_BOUNDING_BOX_NV 0x908A
+#define GL_CONVEX_HULL_NV 0x908B
+#define GL_BOUNDING_BOX_NV 0x908D
+#define GL_TRANSLATE_X_NV 0x908E
+#define GL_TRANSLATE_Y_NV 0x908F
+#define GL_TRANSLATE_2D_NV 0x9090
+#define GL_TRANSLATE_3D_NV 0x9091
+#define GL_AFFINE_2D_NV 0x9092
+#define GL_AFFINE_3D_NV 0x9094
+#define GL_TRANSPOSE_AFFINE_2D_NV 0x9096
+#define GL_TRANSPOSE_AFFINE_3D_NV 0x9098
+#define GL_UTF8_NV 0x909A
+#define GL_UTF16_NV 0x909B
+#define GL_BOUNDING_BOX_OF_BOUNDING_BOXES_NV 0x909C
+#define GL_PATH_COMMAND_COUNT_NV 0x909D
+#define GL_PATH_COORD_COUNT_NV 0x909E
+#define GL_PATH_DASH_ARRAY_COUNT_NV 0x909F
+#define GL_PATH_COMPUTED_LENGTH_NV 0x90A0
+#define GL_PATH_FILL_BOUNDING_BOX_NV 0x90A1
+#define GL_PATH_STROKE_BOUNDING_BOX_NV 0x90A2
+#define GL_SQUARE_NV 0x90A3
+#define GL_ROUND_NV 0x90A4
+#define GL_TRIANGULAR_NV 0x90A5
+#define GL_BEVEL_NV 0x90A6
+#define GL_MITER_REVERT_NV 0x90A7
+#define GL_MITER_TRUNCATE_NV 0x90A8
+#define GL_SKIP_MISSING_GLYPH_NV 0x90A9
+#define GL_USE_MISSING_GLYPH_NV 0x90AA
+#define GL_PATH_ERROR_POSITION_NV 0x90AB
+#define GL_ACCUM_ADJACENT_PAIRS_NV 0x90AD
+#define GL_ADJACENT_PAIRS_NV 0x90AE
+#define GL_FIRST_TO_REST_NV 0x90AF
+#define GL_PATH_GEN_MODE_NV 0x90B0
+#define GL_PATH_GEN_COEFF_NV 0x90B1
+#define GL_PATH_GEN_COMPONENTS_NV 0x90B3
+#define GL_PATH_STENCIL_FUNC_NV 0x90B7
+#define GL_PATH_STENCIL_REF_NV 0x90B8
+#define GL_PATH_STENCIL_VALUE_MASK_NV 0x90B9
+#define GL_PATH_STENCIL_DEPTH_OFFSET_FACTOR_NV 0x90BD
+#define GL_PATH_STENCIL_DEPTH_OFFSET_UNITS_NV 0x90BE
+#define GL_PATH_COVER_DEPTH_FUNC_NV 0x90BF
+#define GL_PATH_DASH_OFFSET_RESET_NV 0x90B4
+#define GL_MOVE_TO_RESETS_NV 0x90B5
+#define GL_MOVE_TO_CONTINUES_NV 0x90B6
+#define GL_CLOSE_PATH_NV 0x00
+#define GL_MOVE_TO_NV 0x02
+#define GL_RELATIVE_MOVE_TO_NV 0x03
+#define GL_LINE_TO_NV 0x04
+#define GL_RELATIVE_LINE_TO_NV 0x05
+#define GL_HORIZONTAL_LINE_TO_NV 0x06
+#define GL_RELATIVE_HORIZONTAL_LINE_TO_NV 0x07
+#define GL_VERTICAL_LINE_TO_NV 0x08
+#define GL_RELATIVE_VERTICAL_LINE_TO_NV 0x09
+#define GL_QUADRATIC_CURVE_TO_NV 0x0A
+#define GL_RELATIVE_QUADRATIC_CURVE_TO_NV 0x0B
+#define GL_CUBIC_CURVE_TO_NV 0x0C
+#define GL_RELATIVE_CUBIC_CURVE_TO_NV 0x0D
+#define GL_SMOOTH_QUADRATIC_CURVE_TO_NV 0x0E
+#define GL_RELATIVE_SMOOTH_QUADRATIC_CURVE_TO_NV 0x0F
+#define GL_SMOOTH_CUBIC_CURVE_TO_NV 0x10
+#define GL_RELATIVE_SMOOTH_CUBIC_CURVE_TO_NV 0x11
+#define GL_SMALL_CCW_ARC_TO_NV 0x12
+#define GL_RELATIVE_SMALL_CCW_ARC_TO_NV 0x13
+#define GL_SMALL_CW_ARC_TO_NV 0x14
+#define GL_RELATIVE_SMALL_CW_ARC_TO_NV 0x15
+#define GL_LARGE_CCW_ARC_TO_NV 0x16
+#define GL_RELATIVE_LARGE_CCW_ARC_TO_NV 0x17
+#define GL_LARGE_CW_ARC_TO_NV 0x18
+#define GL_RELATIVE_LARGE_CW_ARC_TO_NV 0x19
+#define GL_RESTART_PATH_NV 0xF0
+#define GL_DUP_FIRST_CUBIC_CURVE_TO_NV 0xF2
+#define GL_DUP_LAST_CUBIC_CURVE_TO_NV 0xF4
+#define GL_RECT_NV 0xF6
+#define GL_CIRCULAR_CCW_ARC_TO_NV 0xF8
+#define GL_CIRCULAR_CW_ARC_TO_NV 0xFA
+#define GL_CIRCULAR_TANGENT_ARC_TO_NV 0xFC
+#define GL_ARC_TO_NV 0xFE
+#define GL_RELATIVE_ARC_TO_NV 0xFF
+#define GL_BOLD_BIT_NV 0x01
+#define GL_ITALIC_BIT_NV 0x02
+#define GL_GLYPH_WIDTH_BIT_NV 0x01
+#define GL_GLYPH_HEIGHT_BIT_NV 0x02
+#define GL_GLYPH_HORIZONTAL_BEARING_X_BIT_NV 0x04
+#define GL_GLYPH_HORIZONTAL_BEARING_Y_BIT_NV 0x08
+#define GL_GLYPH_HORIZONTAL_BEARING_ADVANCE_BIT_NV 0x10
+#define GL_GLYPH_VERTICAL_BEARING_X_BIT_NV 0x20
+#define GL_GLYPH_VERTICAL_BEARING_Y_BIT_NV 0x40
+#define GL_GLYPH_VERTICAL_BEARING_ADVANCE_BIT_NV 0x80
+#define GL_GLYPH_HAS_KERNING_BIT_NV 0x100
+#define GL_FONT_X_MIN_BOUNDS_BIT_NV 0x00010000
+#define GL_FONT_Y_MIN_BOUNDS_BIT_NV 0x00020000
+#define GL_FONT_X_MAX_BOUNDS_BIT_NV 0x00040000
+#define GL_FONT_Y_MAX_BOUNDS_BIT_NV 0x00080000
+#define GL_FONT_UNITS_PER_EM_BIT_NV 0x00100000
+#define GL_FONT_ASCENDER_BIT_NV 0x00200000
+#define GL_FONT_DESCENDER_BIT_NV 0x00400000
+#define GL_FONT_HEIGHT_BIT_NV 0x00800000
+#define GL_FONT_MAX_ADVANCE_WIDTH_BIT_NV 0x01000000
+#define GL_FONT_MAX_ADVANCE_HEIGHT_BIT_NV 0x02000000
+#define GL_FONT_UNDERLINE_POSITION_BIT_NV 0x04000000
+#define GL_FONT_UNDERLINE_THICKNESS_BIT_NV 0x08000000
+#define GL_FONT_HAS_KERNING_BIT_NV 0x10000000
+#define GL_ROUNDED_RECT_NV 0xE8
+#define GL_RELATIVE_ROUNDED_RECT_NV 0xE9
+#define GL_ROUNDED_RECT2_NV 0xEA
+#define GL_RELATIVE_ROUNDED_RECT2_NV 0xEB
+#define GL_ROUNDED_RECT4_NV 0xEC
+#define GL_RELATIVE_ROUNDED_RECT4_NV 0xED
+#define GL_ROUNDED_RECT8_NV 0xEE
+#define GL_RELATIVE_ROUNDED_RECT8_NV 0xEF
+#define GL_RELATIVE_RECT_NV 0xF7
+#define GL_FONT_GLYPHS_AVAILABLE_NV 0x9368
+#define GL_FONT_TARGET_UNAVAILABLE_NV 0x9369
+#define GL_FONT_UNAVAILABLE_NV 0x936A
+#define GL_FONT_UNINTELLIGIBLE_NV 0x936B
+#define GL_CONIC_CURVE_TO_NV 0x1A
+#define GL_RELATIVE_CONIC_CURVE_TO_NV 0x1B
+#define GL_FONT_NUM_GLYPH_INDICES_BIT_NV 0x20000000
+#define GL_STANDARD_FONT_FORMAT_NV 0x936C
+#define GL_2_BYTES_NV 0x1407
+#define GL_3_BYTES_NV 0x1408
+#define GL_4_BYTES_NV 0x1409
+#define GL_EYE_LINEAR_NV 0x2400
+#define GL_OBJECT_LINEAR_NV 0x2401
+#define GL_CONSTANT_NV 0x8576
+#define GL_PATH_FOG_GEN_MODE_NV 0x90AC
+#define GL_PRIMARY_COLOR_NV 0x852C
+#define GL_SECONDARY_COLOR_NV 0x852D
+#define GL_PATH_GEN_COLOR_FORMAT_NV 0x90B2
+#define GL_PATH_PROJECTION_NV 0x1701
+#define GL_PATH_MODELVIEW_NV 0x1700
+#define GL_PATH_MODELVIEW_STACK_DEPTH_NV 0x0BA3
+#define GL_PATH_MODELVIEW_MATRIX_NV 0x0BA6
+#define GL_PATH_MAX_MODELVIEW_STACK_DEPTH_NV 0x0D36
+#define GL_PATH_TRANSPOSE_MODELVIEW_MATRIX_NV 0x84E3
+#define GL_PATH_PROJECTION_STACK_DEPTH_NV 0x0BA4
+#define GL_PATH_PROJECTION_MATRIX_NV 0x0BA7
+#define GL_PATH_MAX_PROJECTION_STACK_DEPTH_NV 0x0D38
+#define GL_PATH_TRANSPOSE_PROJECTION_MATRIX_NV 0x84E4
+#define GL_FRAGMENT_INPUT_NV 0x936D
+typedef GLuint (APIENTRYP PFNGLGENPATHSNVPROC) (GLsizei range);
+typedef void (APIENTRYP PFNGLDELETEPATHSNVPROC) (GLuint path, GLsizei range);
+typedef GLboolean (APIENTRYP PFNGLISPATHNVPROC) (GLuint path);
+typedef void (APIENTRYP PFNGLPATHCOMMANDSNVPROC) (GLuint path, GLsizei numCommands, const GLubyte *commands, GLsizei numCoords, GLenum coordType, const void *coords);
+typedef void (APIENTRYP PFNGLPATHCOORDSNVPROC) (GLuint path, GLsizei numCoords, GLenum coordType, const void *coords);
+typedef void (APIENTRYP PFNGLPATHSUBCOMMANDSNVPROC) (GLuint path, GLsizei commandStart, GLsizei commandsToDelete, GLsizei numCommands, const GLubyte *commands, GLsizei numCoords, GLenum coordType, const void *coords);
+typedef void (APIENTRYP PFNGLPATHSUBCOORDSNVPROC) (GLuint path, GLsizei coordStart, GLsizei numCoords, GLenum coordType, const void *coords);
+typedef void (APIENTRYP PFNGLPATHSTRINGNVPROC) (GLuint path, GLenum format, GLsizei length, const void *pathString);
+typedef void (APIENTRYP PFNGLPATHGLYPHSNVPROC) (GLuint firstPathName, GLenum fontTarget, const void *fontName, GLbitfield fontStyle, GLsizei numGlyphs, GLenum type, const void *charcodes, GLenum handleMissingGlyphs, GLuint pathParameterTemplate, GLfloat emScale);
+typedef void (APIENTRYP PFNGLPATHGLYPHRANGENVPROC) (GLuint firstPathName, GLenum fontTarget, const void *fontName, GLbitfield fontStyle, GLuint firstGlyph, GLsizei numGlyphs, GLenum handleMissingGlyphs, GLuint pathParameterTemplate, GLfloat emScale);
+typedef void (APIENTRYP PFNGLWEIGHTPATHSNVPROC) (GLuint resultPath, GLsizei numPaths, const GLuint *paths, const GLfloat *weights);
+typedef void (APIENTRYP PFNGLCOPYPATHNVPROC) (GLuint resultPath, GLuint srcPath);
+typedef void (APIENTRYP PFNGLINTERPOLATEPATHSNVPROC) (GLuint resultPath, GLuint pathA, GLuint pathB, GLfloat weight);
+typedef void (APIENTRYP PFNGLTRANSFORMPATHNVPROC) (GLuint resultPath, GLuint srcPath, GLenum transformType, const GLfloat *transformValues);
+typedef void (APIENTRYP PFNGLPATHPARAMETERIVNVPROC) (GLuint path, GLenum pname, const GLint *value);
+typedef void (APIENTRYP PFNGLPATHPARAMETERINVPROC) (GLuint path, GLenum pname, GLint value);
+typedef void (APIENTRYP PFNGLPATHPARAMETERFVNVPROC) (GLuint path, GLenum pname, const GLfloat *value);
+typedef void (APIENTRYP PFNGLPATHPARAMETERFNVPROC) (GLuint path, GLenum pname, GLfloat value);
+typedef void (APIENTRYP PFNGLPATHDASHARRAYNVPROC) (GLuint path, GLsizei dashCount, const GLfloat *dashArray);
+typedef void (APIENTRYP PFNGLPATHSTENCILFUNCNVPROC) (GLenum func, GLint ref, GLuint mask);
+typedef void (APIENTRYP PFNGLPATHSTENCILDEPTHOFFSETNVPROC) (GLfloat factor, GLfloat units);
+typedef void (APIENTRYP PFNGLSTENCILFILLPATHNVPROC) (GLuint path, GLenum fillMode, GLuint mask);
+typedef void (APIENTRYP PFNGLSTENCILSTROKEPATHNVPROC) (GLuint path, GLint reference, GLuint mask);
+typedef void (APIENTRYP PFNGLSTENCILFILLPATHINSTANCEDNVPROC) (GLsizei numPaths, GLenum pathNameType, const void *paths, GLuint pathBase, GLenum fillMode, GLuint mask, GLenum transformType, const GLfloat *transformValues);
+typedef void (APIENTRYP PFNGLSTENCILSTROKEPATHINSTANCEDNVPROC) (GLsizei numPaths, GLenum pathNameType, const void *paths, GLuint pathBase, GLint reference, GLuint mask, GLenum transformType, const GLfloat *transformValues);
+typedef void (APIENTRYP PFNGLPATHCOVERDEPTHFUNCNVPROC) (GLenum func);
+typedef void (APIENTRYP PFNGLCOVERFILLPATHNVPROC) (GLuint path, GLenum coverMode);
+typedef void (APIENTRYP PFNGLCOVERSTROKEPATHNVPROC) (GLuint path, GLenum coverMode);
+typedef void (APIENTRYP PFNGLCOVERFILLPATHINSTANCEDNVPROC) (GLsizei numPaths, GLenum pathNameType, const void *paths, GLuint pathBase, GLenum coverMode, GLenum transformType, const GLfloat *transformValues);
+typedef void (APIENTRYP PFNGLCOVERSTROKEPATHINSTANCEDNVPROC) (GLsizei numPaths, GLenum pathNameType, const void *paths, GLuint pathBase, GLenum coverMode, GLenum transformType, const GLfloat *transformValues);
+typedef void (APIENTRYP PFNGLGETPATHPARAMETERIVNVPROC) (GLuint path, GLenum pname, GLint *value);
+typedef void (APIENTRYP PFNGLGETPATHPARAMETERFVNVPROC) (GLuint path, GLenum pname, GLfloat *value);
+typedef void (APIENTRYP PFNGLGETPATHCOMMANDSNVPROC) (GLuint path, GLubyte *commands);
+typedef void (APIENTRYP PFNGLGETPATHCOORDSNVPROC) (GLuint path, GLfloat *coords);
+typedef void (APIENTRYP PFNGLGETPATHDASHARRAYNVPROC) (GLuint path, GLfloat *dashArray);
+typedef void (APIENTRYP PFNGLGETPATHMETRICSNVPROC) (GLbitfield metricQueryMask, GLsizei numPaths, GLenum pathNameType, const void *paths, GLuint pathBase, GLsizei stride, GLfloat *metrics);
+typedef void (APIENTRYP PFNGLGETPATHMETRICRANGENVPROC) (GLbitfield metricQueryMask, GLuint firstPathName, GLsizei numPaths, GLsizei stride, GLfloat *metrics);
+typedef void (APIENTRYP PFNGLGETPATHSPACINGNVPROC) (GLenum pathListMode, GLsizei numPaths, GLenum pathNameType, const void *paths, GLuint pathBase, GLfloat advanceScale, GLfloat kerningScale, GLenum transformType, GLfloat *returnedSpacing);
+typedef GLboolean (APIENTRYP PFNGLISPOINTINFILLPATHNVPROC) (GLuint path, GLuint mask, GLfloat x, GLfloat y);
+typedef GLboolean (APIENTRYP PFNGLISPOINTINSTROKEPATHNVPROC) (GLuint path, GLfloat x, GLfloat y);
+typedef GLfloat (APIENTRYP PFNGLGETPATHLENGTHNVPROC) (GLuint path, GLsizei startSegment, GLsizei numSegments);
+typedef GLboolean (APIENTRYP PFNGLPOINTALONGPATHNVPROC) (GLuint path, GLsizei startSegment, GLsizei numSegments, GLfloat distance, GLfloat *x, GLfloat *y, GLfloat *tangentX, GLfloat *tangentY);
+typedef void (APIENTRYP PFNGLMATRIXLOAD3X2FNVPROC) (GLenum matrixMode, const GLfloat *m);
+typedef void (APIENTRYP PFNGLMATRIXLOAD3X3FNVPROC) (GLenum matrixMode, const GLfloat *m);
+typedef void (APIENTRYP PFNGLMATRIXLOADTRANSPOSE3X3FNVPROC) (GLenum matrixMode, const GLfloat *m);
+typedef void (APIENTRYP PFNGLMATRIXMULT3X2FNVPROC) (GLenum matrixMode, const GLfloat *m);
+typedef void (APIENTRYP PFNGLMATRIXMULT3X3FNVPROC) (GLenum matrixMode, const GLfloat *m);
+typedef void (APIENTRYP PFNGLMATRIXMULTTRANSPOSE3X3FNVPROC) (GLenum matrixMode, const GLfloat *m);
+typedef void (APIENTRYP PFNGLSTENCILTHENCOVERFILLPATHNVPROC) (GLuint path, GLenum fillMode, GLuint mask, GLenum coverMode);
+typedef void (APIENTRYP PFNGLSTENCILTHENCOVERSTROKEPATHNVPROC) (GLuint path, GLint reference, GLuint mask, GLenum coverMode);
+typedef void (APIENTRYP PFNGLSTENCILTHENCOVERFILLPATHINSTANCEDNVPROC) (GLsizei numPaths, GLenum pathNameType, const void *paths, GLuint pathBase, GLenum fillMode, GLuint mask, GLenum coverMode, GLenum transformType, const GLfloat *transformValues);
+typedef void (APIENTRYP PFNGLSTENCILTHENCOVERSTROKEPATHINSTANCEDNVPROC) (GLsizei numPaths, GLenum pathNameType, const void *paths, GLuint pathBase, GLint reference, GLuint mask, GLenum coverMode, GLenum transformType, const GLfloat *transformValues);
+typedef GLenum (APIENTRYP PFNGLPATHGLYPHINDEXRANGENVPROC) (GLenum fontTarget, const void *fontName, GLbitfield fontStyle, GLuint pathParameterTemplate, GLfloat emScale, GLuint baseAndCount[2]);
+typedef GLenum (APIENTRYP PFNGLPATHGLYPHINDEXARRAYNVPROC) (GLuint firstPathName, GLenum fontTarget, const void *fontName, GLbitfield fontStyle, GLuint firstGlyphIndex, GLsizei numGlyphs, GLuint pathParameterTemplate, GLfloat emScale);
+typedef GLenum (APIENTRYP PFNGLPATHMEMORYGLYPHINDEXARRAYNVPROC) (GLuint firstPathName, GLenum fontTarget, GLsizeiptr fontSize, const void *fontData, GLsizei faceIndex, GLuint firstGlyphIndex, GLsizei numGlyphs, GLuint pathParameterTemplate, GLfloat emScale);
+typedef void (APIENTRYP PFNGLPROGRAMPATHFRAGMENTINPUTGENNVPROC) (GLuint program, GLint location, GLenum genMode, GLint components, const GLfloat *coeffs);
+typedef void (APIENTRYP PFNGLGETPROGRAMRESOURCEFVNVPROC) (GLuint program, GLenum programInterface, GLuint index, GLsizei propCount, const GLenum *props, GLsizei bufSize, GLsizei *length, GLfloat *params);
+typedef void (APIENTRYP PFNGLPATHCOLORGENNVPROC) (GLenum color, GLenum genMode, GLenum colorFormat, const GLfloat *coeffs);
+typedef void (APIENTRYP PFNGLPATHTEXGENNVPROC) (GLenum texCoordSet, GLenum genMode, GLint components, const GLfloat *coeffs);
+typedef void (APIENTRYP PFNGLPATHFOGGENNVPROC) (GLenum genMode);
+typedef void (APIENTRYP PFNGLGETPATHCOLORGENIVNVPROC) (GLenum color, GLenum pname, GLint *value);
+typedef void (APIENTRYP PFNGLGETPATHCOLORGENFVNVPROC) (GLenum color, GLenum pname, GLfloat *value);
+typedef void (APIENTRYP PFNGLGETPATHTEXGENIVNVPROC) (GLenum texCoordSet, GLenum pname, GLint *value);
+typedef void (APIENTRYP PFNGLGETPATHTEXGENFVNVPROC) (GLenum texCoordSet, GLenum pname, GLfloat *value);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI GLuint APIENTRY glGenPathsNV (GLsizei range);
+GLAPI void APIENTRY glDeletePathsNV (GLuint path, GLsizei range);
+GLAPI GLboolean APIENTRY glIsPathNV (GLuint path);
+GLAPI void APIENTRY glPathCommandsNV (GLuint path, GLsizei numCommands, const GLubyte *commands, GLsizei numCoords, GLenum coordType, const void *coords);
+GLAPI void APIENTRY glPathCoordsNV (GLuint path, GLsizei numCoords, GLenum coordType, const void *coords);
+GLAPI void APIENTRY glPathSubCommandsNV (GLuint path, GLsizei commandStart, GLsizei commandsToDelete, GLsizei numCommands, const GLubyte *commands, GLsizei numCoords, GLenum coordType, const void *coords);
+GLAPI void APIENTRY glPathSubCoordsNV (GLuint path, GLsizei coordStart, GLsizei numCoords, GLenum coordType, const void *coords);
+GLAPI void APIENTRY glPathStringNV (GLuint path, GLenum format, GLsizei length, const void *pathString);
+GLAPI void APIENTRY glPathGlyphsNV (GLuint firstPathName, GLenum fontTarget, const void *fontName, GLbitfield fontStyle, GLsizei numGlyphs, GLenum type, const void *charcodes, GLenum handleMissingGlyphs, GLuint pathParameterTemplate, GLfloat emScale);
+GLAPI void APIENTRY glPathGlyphRangeNV (GLuint firstPathName, GLenum fontTarget, const void *fontName, GLbitfield fontStyle, GLuint firstGlyph, GLsizei numGlyphs, GLenum handleMissingGlyphs, GLuint pathParameterTemplate, GLfloat emScale);
+GLAPI void APIENTRY glWeightPathsNV (GLuint resultPath, GLsizei numPaths, const GLuint *paths, const GLfloat *weights);
+GLAPI void APIENTRY glCopyPathNV (GLuint resultPath, GLuint srcPath);
+GLAPI void APIENTRY glInterpolatePathsNV (GLuint resultPath, GLuint pathA, GLuint pathB, GLfloat weight);
+GLAPI void APIENTRY glTransformPathNV (GLuint resultPath, GLuint srcPath, GLenum transformType, const GLfloat *transformValues);
+GLAPI void APIENTRY glPathParameterivNV (GLuint path, GLenum pname, const GLint *value);
+GLAPI void APIENTRY glPathParameteriNV (GLuint path, GLenum pname, GLint value);
+GLAPI void APIENTRY glPathParameterfvNV (GLuint path, GLenum pname, const GLfloat *value);
+GLAPI void APIENTRY glPathParameterfNV (GLuint path, GLenum pname, GLfloat value);
+GLAPI void APIENTRY glPathDashArrayNV (GLuint path, GLsizei dashCount, const GLfloat *dashArray);
+GLAPI void APIENTRY glPathStencilFuncNV (GLenum func, GLint ref, GLuint mask);
+GLAPI void APIENTRY glPathStencilDepthOffsetNV (GLfloat factor, GLfloat units);
+GLAPI void APIENTRY glStencilFillPathNV (GLuint path, GLenum fillMode, GLuint mask);
+GLAPI void APIENTRY glStencilStrokePathNV (GLuint path, GLint reference, GLuint mask);
+GLAPI void APIENTRY glStencilFillPathInstancedNV (GLsizei numPaths, GLenum pathNameType, const void *paths, GLuint pathBase, GLenum fillMode, GLuint mask, GLenum transformType, const GLfloat *transformValues);
+GLAPI void APIENTRY glStencilStrokePathInstancedNV (GLsizei numPaths, GLenum pathNameType, const void *paths, GLuint pathBase, GLint reference, GLuint mask, GLenum transformType, const GLfloat *transformValues);
+GLAPI void APIENTRY glPathCoverDepthFuncNV (GLenum func);
+GLAPI void APIENTRY glCoverFillPathNV (GLuint path, GLenum coverMode);
+GLAPI void APIENTRY glCoverStrokePathNV (GLuint path, GLenum coverMode);
+GLAPI void APIENTRY glCoverFillPathInstancedNV (GLsizei numPaths, GLenum pathNameType, const void *paths, GLuint pathBase, GLenum coverMode, GLenum transformType, const GLfloat *transformValues);
+GLAPI void APIENTRY glCoverStrokePathInstancedNV (GLsizei numPaths, GLenum pathNameType, const void *paths, GLuint pathBase, GLenum coverMode, GLenum transformType, const GLfloat *transformValues);
+GLAPI void APIENTRY glGetPathParameterivNV (GLuint path, GLenum pname, GLint *value);
+GLAPI void APIENTRY glGetPathParameterfvNV (GLuint path, GLenum pname, GLfloat *value);
+GLAPI void APIENTRY glGetPathCommandsNV (GLuint path, GLubyte *commands);
+GLAPI void APIENTRY glGetPathCoordsNV (GLuint path, GLfloat *coords);
+GLAPI void APIENTRY glGetPathDashArrayNV (GLuint path, GLfloat *dashArray);
+GLAPI void APIENTRY glGetPathMetricsNV (GLbitfield metricQueryMask, GLsizei numPaths, GLenum pathNameType, const void *paths, GLuint pathBase, GLsizei stride, GLfloat *metrics);
+GLAPI void APIENTRY glGetPathMetricRangeNV (GLbitfield metricQueryMask, GLuint firstPathName, GLsizei numPaths, GLsizei stride, GLfloat *metrics);
+GLAPI void APIENTRY glGetPathSpacingNV (GLenum pathListMode, GLsizei numPaths, GLenum pathNameType, const void *paths, GLuint pathBase, GLfloat advanceScale, GLfloat kerningScale, GLenum transformType, GLfloat *returnedSpacing);
+GLAPI GLboolean APIENTRY glIsPointInFillPathNV (GLuint path, GLuint mask, GLfloat x, GLfloat y);
+GLAPI GLboolean APIENTRY glIsPointInStrokePathNV (GLuint path, GLfloat x, GLfloat y);
+GLAPI GLfloat APIENTRY glGetPathLengthNV (GLuint path, GLsizei startSegment, GLsizei numSegments);
+GLAPI GLboolean APIENTRY glPointAlongPathNV (GLuint path, GLsizei startSegment, GLsizei numSegments, GLfloat distance, GLfloat *x, GLfloat *y, GLfloat *tangentX, GLfloat *tangentY);
+GLAPI void APIENTRY glMatrixLoad3x2fNV (GLenum matrixMode, const GLfloat *m);
+GLAPI void APIENTRY glMatrixLoad3x3fNV (GLenum matrixMode, const GLfloat *m);
+GLAPI void APIENTRY glMatrixLoadTranspose3x3fNV (GLenum matrixMode, const GLfloat *m);
+GLAPI void APIENTRY glMatrixMult3x2fNV (GLenum matrixMode, const GLfloat *m);
+GLAPI void APIENTRY glMatrixMult3x3fNV (GLenum matrixMode, const GLfloat *m);
+GLAPI void APIENTRY glMatrixMultTranspose3x3fNV (GLenum matrixMode, const GLfloat *m);
+GLAPI void APIENTRY glStencilThenCoverFillPathNV (GLuint path, GLenum fillMode, GLuint mask, GLenum coverMode);
+GLAPI void APIENTRY glStencilThenCoverStrokePathNV (GLuint path, GLint reference, GLuint mask, GLenum coverMode);
+GLAPI void APIENTRY glStencilThenCoverFillPathInstancedNV (GLsizei numPaths, GLenum pathNameType, const void *paths, GLuint pathBase, GLenum fillMode, GLuint mask, GLenum coverMode, GLenum transformType, const GLfloat *transformValues);
+GLAPI void APIENTRY glStencilThenCoverStrokePathInstancedNV (GLsizei numPaths, GLenum pathNameType, const void *paths, GLuint pathBase, GLint reference, GLuint mask, GLenum coverMode, GLenum transformType, const GLfloat *transformValues);
+GLAPI GLenum APIENTRY glPathGlyphIndexRangeNV (GLenum fontTarget, const void *fontName, GLbitfield fontStyle, GLuint pathParameterTemplate, GLfloat emScale, GLuint baseAndCount[2]);
+GLAPI GLenum APIENTRY glPathGlyphIndexArrayNV (GLuint firstPathName, GLenum fontTarget, const void *fontName, GLbitfield fontStyle, GLuint firstGlyphIndex, GLsizei numGlyphs, GLuint pathParameterTemplate, GLfloat emScale);
+GLAPI GLenum APIENTRY glPathMemoryGlyphIndexArrayNV (GLuint firstPathName, GLenum fontTarget, GLsizeiptr fontSize, const void *fontData, GLsizei faceIndex, GLuint firstGlyphIndex, GLsizei numGlyphs, GLuint pathParameterTemplate, GLfloat emScale);
+GLAPI void APIENTRY glProgramPathFragmentInputGenNV (GLuint program, GLint location, GLenum genMode, GLint components, const GLfloat *coeffs);
+GLAPI void APIENTRY glGetProgramResourcefvNV (GLuint program, GLenum programInterface, GLuint index, GLsizei propCount, const GLenum *props, GLsizei bufSize, GLsizei *length, GLfloat *params);
+GLAPI void APIENTRY glPathColorGenNV (GLenum color, GLenum genMode, GLenum colorFormat, const GLfloat *coeffs);
+GLAPI void APIENTRY glPathTexGenNV (GLenum texCoordSet, GLenum genMode, GLint components, const GLfloat *coeffs);
+GLAPI void APIENTRY glPathFogGenNV (GLenum genMode);
+GLAPI void APIENTRY glGetPathColorGenivNV (GLenum color, GLenum pname, GLint *value);
+GLAPI void APIENTRY glGetPathColorGenfvNV (GLenum color, GLenum pname, GLfloat *value);
+GLAPI void APIENTRY glGetPathTexGenivNV (GLenum texCoordSet, GLenum pname, GLint *value);
+GLAPI void APIENTRY glGetPathTexGenfvNV (GLenum texCoordSet, GLenum pname, GLfloat *value);
+#endif
+#endif /* GL_NV_path_rendering */
+
+#ifndef GL_NV_path_rendering_shared_edge
+#define GL_NV_path_rendering_shared_edge 1
+#define GL_SHARED_EDGE_NV 0xC0
+#endif /* GL_NV_path_rendering_shared_edge */
+
+#ifndef GL_NV_pixel_data_range
+#define GL_NV_pixel_data_range 1
+#define GL_WRITE_PIXEL_DATA_RANGE_NV 0x8878
+#define GL_READ_PIXEL_DATA_RANGE_NV 0x8879
+#define GL_WRITE_PIXEL_DATA_RANGE_LENGTH_NV 0x887A
+#define GL_READ_PIXEL_DATA_RANGE_LENGTH_NV 0x887B
+#define GL_WRITE_PIXEL_DATA_RANGE_POINTER_NV 0x887C
+#define GL_READ_PIXEL_DATA_RANGE_POINTER_NV 0x887D
+typedef void (APIENTRYP PFNGLPIXELDATARANGENVPROC) (GLenum target, GLsizei length, const void *pointer);
+typedef void (APIENTRYP PFNGLFLUSHPIXELDATARANGENVPROC) (GLenum target);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glPixelDataRangeNV (GLenum target, GLsizei length, const void *pointer);
+GLAPI void APIENTRY glFlushPixelDataRangeNV (GLenum target);
+#endif
+#endif /* GL_NV_pixel_data_range */
+
+#ifndef GL_NV_point_sprite
+#define GL_NV_point_sprite 1
+#define GL_POINT_SPRITE_NV 0x8861
+#define GL_COORD_REPLACE_NV 0x8862
+#define GL_POINT_SPRITE_R_MODE_NV 0x8863
+typedef void (APIENTRYP PFNGLPOINTPARAMETERINVPROC) (GLenum pname, GLint param);
+typedef void (APIENTRYP PFNGLPOINTPARAMETERIVNVPROC) (GLenum pname, const GLint *params);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glPointParameteriNV (GLenum pname, GLint param);
+GLAPI void APIENTRY glPointParameterivNV (GLenum pname, const GLint *params);
+#endif
+#endif /* GL_NV_point_sprite */
+
+#ifndef GL_NV_present_video
+#define GL_NV_present_video 1
+#define GL_FRAME_NV 0x8E26
+#define GL_FIELDS_NV 0x8E27
+#define GL_CURRENT_TIME_NV 0x8E28
+#define GL_NUM_FILL_STREAMS_NV 0x8E29
+#define GL_PRESENT_TIME_NV 0x8E2A
+#define GL_PRESENT_DURATION_NV 0x8E2B
+typedef void (APIENTRYP PFNGLPRESENTFRAMEKEYEDNVPROC) (GLuint video_slot, GLuint64EXT minPresentTime, GLuint beginPresentTimeId, GLuint presentDurationId, GLenum type, GLenum target0, GLuint fill0, GLuint key0, GLenum target1, GLuint fill1, GLuint key1);
+typedef void (APIENTRYP PFNGLPRESENTFRAMEDUALFILLNVPROC) (GLuint video_slot, GLuint64EXT minPresentTime, GLuint beginPresentTimeId, GLuint presentDurationId, GLenum type, GLenum target0, GLuint fill0, GLenum target1, GLuint fill1, GLenum target2, GLuint fill2, GLenum target3, GLuint fill3);
+typedef void (APIENTRYP PFNGLGETVIDEOIVNVPROC) (GLuint video_slot, GLenum pname, GLint *params);
+typedef void (APIENTRYP PFNGLGETVIDEOUIVNVPROC) (GLuint video_slot, GLenum pname, GLuint *params);
+typedef void (APIENTRYP PFNGLGETVIDEOI64VNVPROC) (GLuint video_slot, GLenum pname, GLint64EXT *params);
+typedef void (APIENTRYP PFNGLGETVIDEOUI64VNVPROC) (GLuint video_slot, GLenum pname, GLuint64EXT *params);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glPresentFrameKeyedNV (GLuint video_slot, GLuint64EXT minPresentTime, GLuint beginPresentTimeId, GLuint presentDurationId, GLenum type, GLenum target0, GLuint fill0, GLuint key0, GLenum target1, GLuint fill1, GLuint key1);
+GLAPI void APIENTRY glPresentFrameDualFillNV (GLuint video_slot, GLuint64EXT minPresentTime, GLuint beginPresentTimeId, GLuint presentDurationId, GLenum type, GLenum target0, GLuint fill0, GLenum target1, GLuint fill1, GLenum target2, GLuint fill2, GLenum target3, GLuint fill3);
+GLAPI void APIENTRY glGetVideoivNV (GLuint video_slot, GLenum pname, GLint *params);
+GLAPI void APIENTRY glGetVideouivNV (GLuint video_slot, GLenum pname, GLuint *params);
+GLAPI void APIENTRY glGetVideoi64vNV (GLuint video_slot, GLenum pname, GLint64EXT *params);
+GLAPI void APIENTRY glGetVideoui64vNV (GLuint video_slot, GLenum pname, GLuint64EXT *params);
+#endif
+#endif /* GL_NV_present_video */
+
+#ifndef GL_NV_primitive_restart
+#define GL_NV_primitive_restart 1
+#define GL_PRIMITIVE_RESTART_NV 0x8558
+#define GL_PRIMITIVE_RESTART_INDEX_NV 0x8559
+typedef void (APIENTRYP PFNGLPRIMITIVERESTARTNVPROC) (void);
+typedef void (APIENTRYP PFNGLPRIMITIVERESTARTINDEXNVPROC) (GLuint index);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glPrimitiveRestartNV (void);
+GLAPI void APIENTRY glPrimitiveRestartIndexNV (GLuint index);
+#endif
+#endif /* GL_NV_primitive_restart */
+
+#ifndef GL_NV_query_resource
+#define GL_NV_query_resource 1
+#define GL_QUERY_RESOURCE_TYPE_VIDMEM_ALLOC_NV 0x9540
+#define GL_QUERY_RESOURCE_MEMTYPE_VIDMEM_NV 0x9542
+#define GL_QUERY_RESOURCE_SYS_RESERVED_NV 0x9544
+#define GL_QUERY_RESOURCE_TEXTURE_NV 0x9545
+#define GL_QUERY_RESOURCE_RENDERBUFFER_NV 0x9546
+#define GL_QUERY_RESOURCE_BUFFEROBJECT_NV 0x9547
+typedef GLint (APIENTRYP PFNGLQUERYRESOURCENVPROC) (GLenum queryType, GLint tagId, GLuint bufSize, GLint *buffer);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI GLint APIENTRY glQueryResourceNV (GLenum queryType, GLint tagId, GLuint bufSize, GLint *buffer);
+#endif
+#endif /* GL_NV_query_resource */
+
+#ifndef GL_NV_query_resource_tag
+#define GL_NV_query_resource_tag 1
+typedef void (APIENTRYP PFNGLGENQUERYRESOURCETAGNVPROC) (GLsizei n, GLint *tagIds);
+typedef void (APIENTRYP PFNGLDELETEQUERYRESOURCETAGNVPROC) (GLsizei n, const GLint *tagIds);
+typedef void (APIENTRYP PFNGLQUERYRESOURCETAGNVPROC) (GLint tagId, const GLchar *tagString);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glGenQueryResourceTagNV (GLsizei n, GLint *tagIds);
+GLAPI void APIENTRY glDeleteQueryResourceTagNV (GLsizei n, const GLint *tagIds);
+GLAPI void APIENTRY glQueryResourceTagNV (GLint tagId, const GLchar *tagString);
+#endif
+#endif /* GL_NV_query_resource_tag */
+
+#ifndef GL_NV_register_combiners
+#define GL_NV_register_combiners 1
+#define GL_REGISTER_COMBINERS_NV 0x8522
+#define GL_VARIABLE_A_NV 0x8523
+#define GL_VARIABLE_B_NV 0x8524
+#define GL_VARIABLE_C_NV 0x8525
+#define GL_VARIABLE_D_NV 0x8526
+#define GL_VARIABLE_E_NV 0x8527
+#define GL_VARIABLE_F_NV 0x8528
+#define GL_VARIABLE_G_NV 0x8529
+#define GL_CONSTANT_COLOR0_NV 0x852A
+#define GL_CONSTANT_COLOR1_NV 0x852B
+#define GL_SPARE0_NV 0x852E
+#define GL_SPARE1_NV 0x852F
+#define GL_DISCARD_NV 0x8530
+#define GL_E_TIMES_F_NV 0x8531
+#define GL_SPARE0_PLUS_SECONDARY_COLOR_NV 0x8532
+#define GL_UNSIGNED_IDENTITY_NV 0x8536
+#define GL_UNSIGNED_INVERT_NV 0x8537
+#define GL_EXPAND_NORMAL_NV 0x8538
+#define GL_EXPAND_NEGATE_NV 0x8539
+#define GL_HALF_BIAS_NORMAL_NV 0x853A
+#define GL_HALF_BIAS_NEGATE_NV 0x853B
+#define GL_SIGNED_IDENTITY_NV 0x853C
+#define GL_SIGNED_NEGATE_NV 0x853D
+#define GL_SCALE_BY_TWO_NV 0x853E
+#define GL_SCALE_BY_FOUR_NV 0x853F
+#define GL_SCALE_BY_ONE_HALF_NV 0x8540
+#define GL_BIAS_BY_NEGATIVE_ONE_HALF_NV 0x8541
+#define GL_COMBINER_INPUT_NV 0x8542
+#define GL_COMBINER_MAPPING_NV 0x8543
+#define GL_COMBINER_COMPONENT_USAGE_NV 0x8544
+#define GL_COMBINER_AB_DOT_PRODUCT_NV 0x8545
+#define GL_COMBINER_CD_DOT_PRODUCT_NV 0x8546
+#define GL_COMBINER_MUX_SUM_NV 0x8547
+#define GL_COMBINER_SCALE_NV 0x8548
+#define GL_COMBINER_BIAS_NV 0x8549
+#define GL_COMBINER_AB_OUTPUT_NV 0x854A
+#define GL_COMBINER_CD_OUTPUT_NV 0x854B
+#define GL_COMBINER_SUM_OUTPUT_NV 0x854C
+#define GL_MAX_GENERAL_COMBINERS_NV 0x854D
+#define GL_NUM_GENERAL_COMBINERS_NV 0x854E
+#define GL_COLOR_SUM_CLAMP_NV 0x854F
+#define GL_COMBINER0_NV 0x8550
+#define GL_COMBINER1_NV 0x8551
+#define GL_COMBINER2_NV 0x8552
+#define GL_COMBINER3_NV 0x8553
+#define GL_COMBINER4_NV 0x8554
+#define GL_COMBINER5_NV 0x8555
+#define GL_COMBINER6_NV 0x8556
+#define GL_COMBINER7_NV 0x8557
+typedef void (APIENTRYP PFNGLCOMBINERPARAMETERFVNVPROC) (GLenum pname, const GLfloat *params);
+typedef void (APIENTRYP PFNGLCOMBINERPARAMETERFNVPROC) (GLenum pname, GLfloat param);
+typedef void (APIENTRYP PFNGLCOMBINERPARAMETERIVNVPROC) (GLenum pname, const GLint *params);
+typedef void (APIENTRYP PFNGLCOMBINERPARAMETERINVPROC) (GLenum pname, GLint param);
+typedef void (APIENTRYP PFNGLCOMBINERINPUTNVPROC) (GLenum stage, GLenum portion, GLenum variable, GLenum input, GLenum mapping, GLenum componentUsage);
+typedef void (APIENTRYP PFNGLCOMBINEROUTPUTNVPROC) (GLenum stage, GLenum portion, GLenum abOutput, GLenum cdOutput, GLenum sumOutput, GLenum scale, GLenum bias, GLboolean abDotProduct, GLboolean cdDotProduct, GLboolean muxSum);
+typedef void (APIENTRYP PFNGLFINALCOMBINERINPUTNVPROC) (GLenum variable, GLenum input, GLenum mapping, GLenum componentUsage);
+typedef void (APIENTRYP PFNGLGETCOMBINERINPUTPARAMETERFVNVPROC) (GLenum stage, GLenum portion, GLenum variable, GLenum pname, GLfloat *params);
+typedef void (APIENTRYP PFNGLGETCOMBINERINPUTPARAMETERIVNVPROC) (GLenum stage, GLenum portion, GLenum variable, GLenum pname, GLint *params);
+typedef void (APIENTRYP PFNGLGETCOMBINEROUTPUTPARAMETERFVNVPROC) (GLenum stage, GLenum portion, GLenum pname, GLfloat *params);
+typedef void (APIENTRYP PFNGLGETCOMBINEROUTPUTPARAMETERIVNVPROC) (GLenum stage, GLenum portion, GLenum pname, GLint *params);
+typedef void (APIENTRYP PFNGLGETFINALCOMBINERINPUTPARAMETERFVNVPROC) (GLenum variable, GLenum pname, GLfloat *params);
+typedef void (APIENTRYP PFNGLGETFINALCOMBINERINPUTPARAMETERIVNVPROC) (GLenum variable, GLenum pname, GLint *params);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glCombinerParameterfvNV (GLenum pname, const GLfloat *params);
+GLAPI void APIENTRY glCombinerParameterfNV (GLenum pname, GLfloat param);
+GLAPI void APIENTRY glCombinerParameterivNV (GLenum pname, const GLint *params);
+GLAPI void APIENTRY glCombinerParameteriNV (GLenum pname, GLint param);
+GLAPI void APIENTRY glCombinerInputNV (GLenum stage, GLenum portion, GLenum variable, GLenum input, GLenum mapping, GLenum componentUsage);
+GLAPI void APIENTRY glCombinerOutputNV (GLenum stage, GLenum portion, GLenum abOutput, GLenum cdOutput, GLenum sumOutput, GLenum scale, GLenum bias, GLboolean abDotProduct, GLboolean cdDotProduct, GLboolean muxSum);
+GLAPI void APIENTRY glFinalCombinerInputNV (GLenum variable, GLenum input, GLenum mapping, GLenum componentUsage);
+GLAPI void APIENTRY glGetCombinerInputParameterfvNV (GLenum stage, GLenum portion, GLenum variable, GLenum pname, GLfloat *params);
+GLAPI void APIENTRY glGetCombinerInputParameterivNV (GLenum stage, GLenum portion, GLenum variable, GLenum pname, GLint *params);
+GLAPI void APIENTRY glGetCombinerOutputParameterfvNV (GLenum stage, GLenum portion, GLenum pname, GLfloat *params);
+GLAPI void APIENTRY glGetCombinerOutputParameterivNV (GLenum stage, GLenum portion, GLenum pname, GLint *params);
+GLAPI void APIENTRY glGetFinalCombinerInputParameterfvNV (GLenum variable, GLenum pname, GLfloat *params);
+GLAPI void APIENTRY glGetFinalCombinerInputParameterivNV (GLenum variable, GLenum pname, GLint *params);
+#endif
+#endif /* GL_NV_register_combiners */
+
+#ifndef GL_NV_register_combiners2
+#define GL_NV_register_combiners2 1
+#define GL_PER_STAGE_CONSTANTS_NV 0x8535
+typedef void (APIENTRYP PFNGLCOMBINERSTAGEPARAMETERFVNVPROC) (GLenum stage, GLenum pname, const GLfloat *params);
+typedef void (APIENTRYP PFNGLGETCOMBINERSTAGEPARAMETERFVNVPROC) (GLenum stage, GLenum pname, GLfloat *params);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glCombinerStageParameterfvNV (GLenum stage, GLenum pname, const GLfloat *params);
+GLAPI void APIENTRY glGetCombinerStageParameterfvNV (GLenum stage, GLenum pname, GLfloat *params);
+#endif
+#endif /* GL_NV_register_combiners2 */
+
+#ifndef GL_NV_representative_fragment_test
+#define GL_NV_representative_fragment_test 1
+#define GL_REPRESENTATIVE_FRAGMENT_TEST_NV 0x937F
+#endif /* GL_NV_representative_fragment_test */
+
+#ifndef GL_NV_robustness_video_memory_purge
+#define GL_NV_robustness_video_memory_purge 1
+#define GL_PURGED_CONTEXT_RESET_NV 0x92BB
+#endif /* GL_NV_robustness_video_memory_purge */
+
+#ifndef GL_NV_sample_locations
+#define GL_NV_sample_locations 1
+#define GL_SAMPLE_LOCATION_SUBPIXEL_BITS_NV 0x933D
+#define GL_SAMPLE_LOCATION_PIXEL_GRID_WIDTH_NV 0x933E
+#define GL_SAMPLE_LOCATION_PIXEL_GRID_HEIGHT_NV 0x933F
+#define GL_PROGRAMMABLE_SAMPLE_LOCATION_TABLE_SIZE_NV 0x9340
+#define GL_SAMPLE_LOCATION_NV 0x8E50
+#define GL_PROGRAMMABLE_SAMPLE_LOCATION_NV 0x9341
+#define GL_FRAMEBUFFER_PROGRAMMABLE_SAMPLE_LOCATIONS_NV 0x9342
+#define GL_FRAMEBUFFER_SAMPLE_LOCATION_PIXEL_GRID_NV 0x9343
+typedef void (APIENTRYP PFNGLFRAMEBUFFERSAMPLELOCATIONSFVNVPROC) (GLenum target, GLuint start, GLsizei count, const GLfloat *v);
+typedef void (APIENTRYP PFNGLNAMEDFRAMEBUFFERSAMPLELOCATIONSFVNVPROC) (GLuint framebuffer, GLuint start, GLsizei count, const GLfloat *v);
+typedef void (APIENTRYP PFNGLRESOLVEDEPTHVALUESNVPROC) (void);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glFramebufferSampleLocationsfvNV (GLenum target, GLuint start, GLsizei count, const GLfloat *v);
+GLAPI void APIENTRY glNamedFramebufferSampleLocationsfvNV (GLuint framebuffer, GLuint start, GLsizei count, const GLfloat *v);
+GLAPI void APIENTRY glResolveDepthValuesNV (void);
+#endif
+#endif /* GL_NV_sample_locations */
+
+#ifndef GL_NV_sample_mask_override_coverage
+#define GL_NV_sample_mask_override_coverage 1
+#endif /* GL_NV_sample_mask_override_coverage */
+
+#ifndef GL_NV_scissor_exclusive
+#define GL_NV_scissor_exclusive 1
+#define GL_SCISSOR_TEST_EXCLUSIVE_NV 0x9555
+#define GL_SCISSOR_BOX_EXCLUSIVE_NV 0x9556
+typedef void (APIENTRYP PFNGLSCISSOREXCLUSIVENVPROC) (GLint x, GLint y, GLsizei width, GLsizei height);
+typedef void (APIENTRYP PFNGLSCISSOREXCLUSIVEARRAYVNVPROC) (GLuint first, GLsizei count, const GLint *v);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glScissorExclusiveNV (GLint x, GLint y, GLsizei width, GLsizei height);
+GLAPI void APIENTRY glScissorExclusiveArrayvNV (GLuint first, GLsizei count, const GLint *v);
+#endif
+#endif /* GL_NV_scissor_exclusive */
+
+#ifndef GL_NV_shader_atomic_counters
+#define GL_NV_shader_atomic_counters 1
+#endif /* GL_NV_shader_atomic_counters */
+
+#ifndef GL_NV_shader_atomic_float
+#define GL_NV_shader_atomic_float 1
+#endif /* GL_NV_shader_atomic_float */
+
+#ifndef GL_NV_shader_atomic_float64
+#define GL_NV_shader_atomic_float64 1
+#endif /* GL_NV_shader_atomic_float64 */
+
+#ifndef GL_NV_shader_atomic_fp16_vector
+#define GL_NV_shader_atomic_fp16_vector 1
+#endif /* GL_NV_shader_atomic_fp16_vector */
+
+#ifndef GL_NV_shader_atomic_int64
+#define GL_NV_shader_atomic_int64 1
+#endif /* GL_NV_shader_atomic_int64 */
+
+#ifndef GL_NV_shader_buffer_load
+#define GL_NV_shader_buffer_load 1
+#define GL_BUFFER_GPU_ADDRESS_NV 0x8F1D
+#define GL_GPU_ADDRESS_NV 0x8F34
+#define GL_MAX_SHADER_BUFFER_ADDRESS_NV 0x8F35
+typedef void (APIENTRYP PFNGLMAKEBUFFERRESIDENTNVPROC) (GLenum target, GLenum access);
+typedef void (APIENTRYP PFNGLMAKEBUFFERNONRESIDENTNVPROC) (GLenum target);
+typedef GLboolean (APIENTRYP PFNGLISBUFFERRESIDENTNVPROC) (GLenum target);
+typedef void (APIENTRYP PFNGLMAKENAMEDBUFFERRESIDENTNVPROC) (GLuint buffer, GLenum access);
+typedef void (APIENTRYP PFNGLMAKENAMEDBUFFERNONRESIDENTNVPROC) (GLuint buffer);
+typedef GLboolean (APIENTRYP PFNGLISNAMEDBUFFERRESIDENTNVPROC) (GLuint buffer);
+typedef void (APIENTRYP PFNGLGETBUFFERPARAMETERUI64VNVPROC) (GLenum target, GLenum pname, GLuint64EXT *params);
+typedef void (APIENTRYP PFNGLGETNAMEDBUFFERPARAMETERUI64VNVPROC) (GLuint buffer, GLenum pname, GLuint64EXT *params);
+typedef void (APIENTRYP PFNGLGETINTEGERUI64VNVPROC) (GLenum value, GLuint64EXT *result);
+typedef void (APIENTRYP PFNGLUNIFORMUI64NVPROC) (GLint location, GLuint64EXT value);
+typedef void (APIENTRYP PFNGLUNIFORMUI64VNVPROC) (GLint location, GLsizei count, const GLuint64EXT *value);
+typedef void (APIENTRYP PFNGLPROGRAMUNIFORMUI64NVPROC) (GLuint program, GLint location, GLuint64EXT value);
+typedef void (APIENTRYP PFNGLPROGRAMUNIFORMUI64VNVPROC) (GLuint program, GLint location, GLsizei count, const GLuint64EXT *value);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glMakeBufferResidentNV (GLenum target, GLenum access);
+GLAPI void APIENTRY glMakeBufferNonResidentNV (GLenum target);
+GLAPI GLboolean APIENTRY glIsBufferResidentNV (GLenum target);
+GLAPI void APIENTRY glMakeNamedBufferResidentNV (GLuint buffer, GLenum access);
+GLAPI void APIENTRY glMakeNamedBufferNonResidentNV (GLuint buffer);
+GLAPI GLboolean APIENTRY glIsNamedBufferResidentNV (GLuint buffer);
+GLAPI void APIENTRY glGetBufferParameterui64vNV (GLenum target, GLenum pname, GLuint64EXT *params);
+GLAPI void APIENTRY glGetNamedBufferParameterui64vNV (GLuint buffer, GLenum pname, GLuint64EXT *params);
+GLAPI void APIENTRY glGetIntegerui64vNV (GLenum value, GLuint64EXT *result);
+GLAPI void APIENTRY glUniformui64NV (GLint location, GLuint64EXT value);
+GLAPI void APIENTRY glUniformui64vNV (GLint location, GLsizei count, const GLuint64EXT *value);
+GLAPI void APIENTRY glProgramUniformui64NV (GLuint program, GLint location, GLuint64EXT value);
+GLAPI void APIENTRY glProgramUniformui64vNV (GLuint program, GLint location, GLsizei count, const GLuint64EXT *value);
+#endif
+#endif /* GL_NV_shader_buffer_load */
+
+#ifndef GL_NV_shader_buffer_store
+#define GL_NV_shader_buffer_store 1
+#define GL_SHADER_GLOBAL_ACCESS_BARRIER_BIT_NV 0x00000010
+#endif /* GL_NV_shader_buffer_store */
+
+#ifndef GL_NV_shader_storage_buffer_object
+#define GL_NV_shader_storage_buffer_object 1
+#endif /* GL_NV_shader_storage_buffer_object */
+
+#ifndef GL_NV_shader_subgroup_partitioned
+#define GL_NV_shader_subgroup_partitioned 1
+#define GL_SUBGROUP_FEATURE_PARTITIONED_BIT_NV 0x00000100
+#endif /* GL_NV_shader_subgroup_partitioned */
+
+#ifndef GL_NV_shader_texture_footprint
+#define GL_NV_shader_texture_footprint 1
+#endif /* GL_NV_shader_texture_footprint */
+
+#ifndef GL_NV_shader_thread_group
+#define GL_NV_shader_thread_group 1
+#define GL_WARP_SIZE_NV 0x9339
+#define GL_WARPS_PER_SM_NV 0x933A
+#define GL_SM_COUNT_NV 0x933B
+#endif /* GL_NV_shader_thread_group */
+
+#ifndef GL_NV_shader_thread_shuffle
+#define GL_NV_shader_thread_shuffle 1
+#endif /* GL_NV_shader_thread_shuffle */
+
+#ifndef GL_NV_shading_rate_image
+#define GL_NV_shading_rate_image 1
+#define GL_SHADING_RATE_IMAGE_NV 0x9563
+#define GL_SHADING_RATE_NO_INVOCATIONS_NV 0x9564
+#define GL_SHADING_RATE_1_INVOCATION_PER_PIXEL_NV 0x9565
+#define GL_SHADING_RATE_1_INVOCATION_PER_1X2_PIXELS_NV 0x9566
+#define GL_SHADING_RATE_1_INVOCATION_PER_2X1_PIXELS_NV 0x9567
+#define GL_SHADING_RATE_1_INVOCATION_PER_2X2_PIXELS_NV 0x9568
+#define GL_SHADING_RATE_1_INVOCATION_PER_2X4_PIXELS_NV 0x9569
+#define GL_SHADING_RATE_1_INVOCATION_PER_4X2_PIXELS_NV 0x956A
+#define GL_SHADING_RATE_1_INVOCATION_PER_4X4_PIXELS_NV 0x956B
+#define GL_SHADING_RATE_2_INVOCATIONS_PER_PIXEL_NV 0x956C
+#define GL_SHADING_RATE_4_INVOCATIONS_PER_PIXEL_NV 0x956D
+#define GL_SHADING_RATE_8_INVOCATIONS_PER_PIXEL_NV 0x956E
+#define GL_SHADING_RATE_16_INVOCATIONS_PER_PIXEL_NV 0x956F
+#define GL_SHADING_RATE_IMAGE_BINDING_NV 0x955B
+#define GL_SHADING_RATE_IMAGE_TEXEL_WIDTH_NV 0x955C
+#define GL_SHADING_RATE_IMAGE_TEXEL_HEIGHT_NV 0x955D
+#define GL_SHADING_RATE_IMAGE_PALETTE_SIZE_NV 0x955E
+#define GL_MAX_COARSE_FRAGMENT_SAMPLES_NV 0x955F
+#define GL_SHADING_RATE_SAMPLE_ORDER_DEFAULT_NV 0x95AE
+#define GL_SHADING_RATE_SAMPLE_ORDER_PIXEL_MAJOR_NV 0x95AF
+#define GL_SHADING_RATE_SAMPLE_ORDER_SAMPLE_MAJOR_NV 0x95B0
+typedef void (APIENTRYP PFNGLBINDSHADINGRATEIMAGENVPROC) (GLuint texture);
+typedef void (APIENTRYP PFNGLGETSHADINGRATEIMAGEPALETTENVPROC) (GLuint viewport, GLuint entry, GLenum *rate);
+typedef void (APIENTRYP PFNGLGETSHADINGRATESAMPLELOCATIONIVNVPROC) (GLenum rate, GLuint samples, GLuint index, GLint *location);
+typedef void (APIENTRYP PFNGLSHADINGRATEIMAGEBARRIERNVPROC) (GLboolean synchronize);
+typedef void (APIENTRYP PFNGLSHADINGRATEIMAGEPALETTENVPROC) (GLuint viewport, GLuint first, GLsizei count, const GLenum *rates);
+typedef void (APIENTRYP PFNGLSHADINGRATESAMPLEORDERNVPROC) (GLenum order);
+typedef void (APIENTRYP PFNGLSHADINGRATESAMPLEORDERCUSTOMNVPROC) (GLenum rate, GLuint samples, const GLint *locations);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glBindShadingRateImageNV (GLuint texture);
+GLAPI void APIENTRY glGetShadingRateImagePaletteNV (GLuint viewport, GLuint entry, GLenum *rate);
+GLAPI void APIENTRY glGetShadingRateSampleLocationivNV (GLenum rate, GLuint samples, GLuint index, GLint *location);
+GLAPI void APIENTRY glShadingRateImageBarrierNV (GLboolean synchronize);
+GLAPI void APIENTRY glShadingRateImagePaletteNV (GLuint viewport, GLuint first, GLsizei count, const GLenum *rates);
+GLAPI void APIENTRY glShadingRateSampleOrderNV (GLenum order);
+GLAPI void APIENTRY glShadingRateSampleOrderCustomNV (GLenum rate, GLuint samples, const GLint *locations);
+#endif
+#endif /* GL_NV_shading_rate_image */
+
+#ifndef GL_NV_stereo_view_rendering
+#define GL_NV_stereo_view_rendering 1
+#endif /* GL_NV_stereo_view_rendering */
+
+#ifndef GL_NV_tessellation_program5
+#define GL_NV_tessellation_program5 1
+#define GL_MAX_PROGRAM_PATCH_ATTRIBS_NV 0x86D8
+#define GL_TESS_CONTROL_PROGRAM_NV 0x891E
+#define GL_TESS_EVALUATION_PROGRAM_NV 0x891F
+#define GL_TESS_CONTROL_PROGRAM_PARAMETER_BUFFER_NV 0x8C74
+#define GL_TESS_EVALUATION_PROGRAM_PARAMETER_BUFFER_NV 0x8C75
+#endif /* GL_NV_tessellation_program5 */
+
+#ifndef GL_NV_texgen_emboss
+#define GL_NV_texgen_emboss 1
+#define GL_EMBOSS_LIGHT_NV 0x855D
+#define GL_EMBOSS_CONSTANT_NV 0x855E
+#define GL_EMBOSS_MAP_NV 0x855F
+#endif /* GL_NV_texgen_emboss */
+
+#ifndef GL_NV_texgen_reflection
+#define GL_NV_texgen_reflection 1
+#define GL_NORMAL_MAP_NV 0x8511
+#define GL_REFLECTION_MAP_NV 0x8512
+#endif /* GL_NV_texgen_reflection */
+
+#ifndef GL_NV_texture_barrier
+#define GL_NV_texture_barrier 1
+typedef void (APIENTRYP PFNGLTEXTUREBARRIERNVPROC) (void);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glTextureBarrierNV (void);
+#endif
+#endif /* GL_NV_texture_barrier */
+
+#ifndef GL_NV_texture_compression_vtc
+#define GL_NV_texture_compression_vtc 1
+#endif /* GL_NV_texture_compression_vtc */
+
+#ifndef GL_NV_texture_env_combine4
+#define GL_NV_texture_env_combine4 1
+#define GL_COMBINE4_NV 0x8503
+#define GL_SOURCE3_RGB_NV 0x8583
+#define GL_SOURCE3_ALPHA_NV 0x858B
+#define GL_OPERAND3_RGB_NV 0x8593
+#define GL_OPERAND3_ALPHA_NV 0x859B
+#endif /* GL_NV_texture_env_combine4 */
+
+#ifndef GL_NV_texture_expand_normal
+#define GL_NV_texture_expand_normal 1
+#define GL_TEXTURE_UNSIGNED_REMAP_MODE_NV 0x888F
+#endif /* GL_NV_texture_expand_normal */
+
+#ifndef GL_NV_texture_multisample
+#define GL_NV_texture_multisample 1
+#define GL_TEXTURE_COVERAGE_SAMPLES_NV 0x9045
+#define GL_TEXTURE_COLOR_SAMPLES_NV 0x9046
+typedef void (APIENTRYP PFNGLTEXIMAGE2DMULTISAMPLECOVERAGENVPROC) (GLenum target, GLsizei coverageSamples, GLsizei colorSamples, GLint internalFormat, GLsizei width, GLsizei height, GLboolean fixedSampleLocations);
+typedef void (APIENTRYP PFNGLTEXIMAGE3DMULTISAMPLECOVERAGENVPROC) (GLenum target, GLsizei coverageSamples, GLsizei colorSamples, GLint internalFormat, GLsizei width, GLsizei height, GLsizei depth, GLboolean fixedSampleLocations);
+typedef void (APIENTRYP PFNGLTEXTUREIMAGE2DMULTISAMPLENVPROC) (GLuint texture, GLenum target, GLsizei samples, GLint internalFormat, GLsizei width, GLsizei height, GLboolean fixedSampleLocations);
+typedef void (APIENTRYP PFNGLTEXTUREIMAGE3DMULTISAMPLENVPROC) (GLuint texture, GLenum target, GLsizei samples, GLint internalFormat, GLsizei width, GLsizei height, GLsizei depth, GLboolean fixedSampleLocations);
+typedef void (APIENTRYP PFNGLTEXTUREIMAGE2DMULTISAMPLECOVERAGENVPROC) (GLuint texture, GLenum target, GLsizei coverageSamples, GLsizei colorSamples, GLint internalFormat, GLsizei width, GLsizei height, GLboolean fixedSampleLocations);
+typedef void (APIENTRYP PFNGLTEXTUREIMAGE3DMULTISAMPLECOVERAGENVPROC) (GLuint texture, GLenum target, GLsizei coverageSamples, GLsizei colorSamples, GLint internalFormat, GLsizei width, GLsizei height, GLsizei depth, GLboolean fixedSampleLocations);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glTexImage2DMultisampleCoverageNV (GLenum target, GLsizei coverageSamples, GLsizei colorSamples, GLint internalFormat, GLsizei width, GLsizei height, GLboolean fixedSampleLocations);
+GLAPI void APIENTRY glTexImage3DMultisampleCoverageNV (GLenum target, GLsizei coverageSamples, GLsizei colorSamples, GLint internalFormat, GLsizei width, GLsizei height, GLsizei depth, GLboolean fixedSampleLocations);
+GLAPI void APIENTRY glTextureImage2DMultisampleNV (GLuint texture, GLenum target, GLsizei samples, GLint internalFormat, GLsizei width, GLsizei height, GLboolean fixedSampleLocations);
+GLAPI void APIENTRY glTextureImage3DMultisampleNV (GLuint texture, GLenum target, GLsizei samples, GLint internalFormat, GLsizei width, GLsizei height, GLsizei depth, GLboolean fixedSampleLocations);
+GLAPI void APIENTRY glTextureImage2DMultisampleCoverageNV (GLuint texture, GLenum target, GLsizei coverageSamples, GLsizei colorSamples, GLint internalFormat, GLsizei width, GLsizei height, GLboolean fixedSampleLocations);
+GLAPI void APIENTRY glTextureImage3DMultisampleCoverageNV (GLuint texture, GLenum target, GLsizei coverageSamples, GLsizei colorSamples, GLint internalFormat, GLsizei width, GLsizei height, GLsizei depth, GLboolean fixedSampleLocations);
+#endif
+#endif /* GL_NV_texture_multisample */
+
+#ifndef GL_NV_texture_rectangle
+#define GL_NV_texture_rectangle 1
+#define GL_TEXTURE_RECTANGLE_NV 0x84F5
+#define GL_TEXTURE_BINDING_RECTANGLE_NV 0x84F6
+#define GL_PROXY_TEXTURE_RECTANGLE_NV 0x84F7
+#define GL_MAX_RECTANGLE_TEXTURE_SIZE_NV 0x84F8
+#endif /* GL_NV_texture_rectangle */
+
+#ifndef GL_NV_texture_rectangle_compressed
+#define GL_NV_texture_rectangle_compressed 1
+#endif /* GL_NV_texture_rectangle_compressed */
+
+#ifndef GL_NV_texture_shader
+#define GL_NV_texture_shader 1
+#define GL_OFFSET_TEXTURE_RECTANGLE_NV 0x864C
+#define GL_OFFSET_TEXTURE_RECTANGLE_SCALE_NV 0x864D
+#define GL_DOT_PRODUCT_TEXTURE_RECTANGLE_NV 0x864E
+#define GL_RGBA_UNSIGNED_DOT_PRODUCT_MAPPING_NV 0x86D9
+#define GL_UNSIGNED_INT_S8_S8_8_8_NV 0x86DA
+#define GL_UNSIGNED_INT_8_8_S8_S8_REV_NV 0x86DB
+#define GL_DSDT_MAG_INTENSITY_NV 0x86DC
+#define GL_SHADER_CONSISTENT_NV 0x86DD
+#define GL_TEXTURE_SHADER_NV 0x86DE
+#define GL_SHADER_OPERATION_NV 0x86DF
+#define GL_CULL_MODES_NV 0x86E0
+#define GL_OFFSET_TEXTURE_MATRIX_NV 0x86E1
+#define GL_OFFSET_TEXTURE_SCALE_NV 0x86E2
+#define GL_OFFSET_TEXTURE_BIAS_NV 0x86E3
+#define GL_OFFSET_TEXTURE_2D_MATRIX_NV 0x86E1
+#define GL_OFFSET_TEXTURE_2D_SCALE_NV 0x86E2
+#define GL_OFFSET_TEXTURE_2D_BIAS_NV 0x86E3
+#define GL_PREVIOUS_TEXTURE_INPUT_NV 0x86E4
+#define GL_CONST_EYE_NV 0x86E5
+#define GL_PASS_THROUGH_NV 0x86E6
+#define GL_CULL_FRAGMENT_NV 0x86E7
+#define GL_OFFSET_TEXTURE_2D_NV 0x86E8
+#define GL_DEPENDENT_AR_TEXTURE_2D_NV 0x86E9
+#define GL_DEPENDENT_GB_TEXTURE_2D_NV 0x86EA
+#define GL_DOT_PRODUCT_NV 0x86EC
+#define GL_DOT_PRODUCT_DEPTH_REPLACE_NV 0x86ED
+#define GL_DOT_PRODUCT_TEXTURE_2D_NV 0x86EE
+#define GL_DOT_PRODUCT_TEXTURE_CUBE_MAP_NV 0x86F0
+#define GL_DOT_PRODUCT_DIFFUSE_CUBE_MAP_NV 0x86F1
+#define GL_DOT_PRODUCT_REFLECT_CUBE_MAP_NV 0x86F2
+#define GL_DOT_PRODUCT_CONST_EYE_REFLECT_CUBE_MAP_NV 0x86F3
+#define GL_HILO_NV 0x86F4
+#define GL_DSDT_NV 0x86F5
+#define GL_DSDT_MAG_NV 0x86F6
+#define GL_DSDT_MAG_VIB_NV 0x86F7
+#define GL_HILO16_NV 0x86F8
+#define GL_SIGNED_HILO_NV 0x86F9
+#define GL_SIGNED_HILO16_NV 0x86FA
+#define GL_SIGNED_RGBA_NV 0x86FB
+#define GL_SIGNED_RGBA8_NV 0x86FC
+#define GL_SIGNED_RGB_NV 0x86FE
+#define GL_SIGNED_RGB8_NV 0x86FF
+#define GL_SIGNED_LUMINANCE_NV 0x8701
+#define GL_SIGNED_LUMINANCE8_NV 0x8702
+#define GL_SIGNED_LUMINANCE_ALPHA_NV 0x8703
+#define GL_SIGNED_LUMINANCE8_ALPHA8_NV 0x8704
+#define GL_SIGNED_ALPHA_NV 0x8705
+#define GL_SIGNED_ALPHA8_NV 0x8706
+#define GL_SIGNED_INTENSITY_NV 0x8707
+#define GL_SIGNED_INTENSITY8_NV 0x8708
+#define GL_DSDT8_NV 0x8709
+#define GL_DSDT8_MAG8_NV 0x870A
+#define GL_DSDT8_MAG8_INTENSITY8_NV 0x870B
+#define GL_SIGNED_RGB_UNSIGNED_ALPHA_NV 0x870C
+#define GL_SIGNED_RGB8_UNSIGNED_ALPHA8_NV 0x870D
+#define GL_HI_SCALE_NV 0x870E
+#define GL_LO_SCALE_NV 0x870F
+#define GL_DS_SCALE_NV 0x8710
+#define GL_DT_SCALE_NV 0x8711
+#define GL_MAGNITUDE_SCALE_NV 0x8712
+#define GL_VIBRANCE_SCALE_NV 0x8713
+#define GL_HI_BIAS_NV 0x8714
+#define GL_LO_BIAS_NV 0x8715
+#define GL_DS_BIAS_NV 0x8716
+#define GL_DT_BIAS_NV 0x8717
+#define GL_MAGNITUDE_BIAS_NV 0x8718
+#define GL_VIBRANCE_BIAS_NV 0x8719
+#define GL_TEXTURE_BORDER_VALUES_NV 0x871A
+#define GL_TEXTURE_HI_SIZE_NV 0x871B
+#define GL_TEXTURE_LO_SIZE_NV 0x871C
+#define GL_TEXTURE_DS_SIZE_NV 0x871D
+#define GL_TEXTURE_DT_SIZE_NV 0x871E
+#define GL_TEXTURE_MAG_SIZE_NV 0x871F
+#endif /* GL_NV_texture_shader */
+
+#ifndef GL_NV_texture_shader2
+#define GL_NV_texture_shader2 1
+#define GL_DOT_PRODUCT_TEXTURE_3D_NV 0x86EF
+#endif /* GL_NV_texture_shader2 */
+
+#ifndef GL_NV_texture_shader3
+#define GL_NV_texture_shader3 1
+#define GL_OFFSET_PROJECTIVE_TEXTURE_2D_NV 0x8850
+#define GL_OFFSET_PROJECTIVE_TEXTURE_2D_SCALE_NV 0x8851
+#define GL_OFFSET_PROJECTIVE_TEXTURE_RECTANGLE_NV 0x8852
+#define GL_OFFSET_PROJECTIVE_TEXTURE_RECTANGLE_SCALE_NV 0x8853
+#define GL_OFFSET_HILO_TEXTURE_2D_NV 0x8854
+#define GL_OFFSET_HILO_TEXTURE_RECTANGLE_NV 0x8855
+#define GL_OFFSET_HILO_PROJECTIVE_TEXTURE_2D_NV 0x8856
+#define GL_OFFSET_HILO_PROJECTIVE_TEXTURE_RECTANGLE_NV 0x8857
+#define GL_DEPENDENT_HILO_TEXTURE_2D_NV 0x8858
+#define GL_DEPENDENT_RGB_TEXTURE_3D_NV 0x8859
+#define GL_DEPENDENT_RGB_TEXTURE_CUBE_MAP_NV 0x885A
+#define GL_DOT_PRODUCT_PASS_THROUGH_NV 0x885B
+#define GL_DOT_PRODUCT_TEXTURE_1D_NV 0x885C
+#define GL_DOT_PRODUCT_AFFINE_DEPTH_REPLACE_NV 0x885D
+#define GL_HILO8_NV 0x885E
+#define GL_SIGNED_HILO8_NV 0x885F
+#define GL_FORCE_BLUE_TO_ONE_NV 0x8860
+#endif /* GL_NV_texture_shader3 */
+
+#ifndef GL_NV_transform_feedback
+#define GL_NV_transform_feedback 1
+#define GL_BACK_PRIMARY_COLOR_NV 0x8C77
+#define GL_BACK_SECONDARY_COLOR_NV 0x8C78
+#define GL_TEXTURE_COORD_NV 0x8C79
+#define GL_CLIP_DISTANCE_NV 0x8C7A
+#define GL_VERTEX_ID_NV 0x8C7B
+#define GL_PRIMITIVE_ID_NV 0x8C7C
+#define GL_GENERIC_ATTRIB_NV 0x8C7D
+#define GL_TRANSFORM_FEEDBACK_ATTRIBS_NV 0x8C7E
+#define GL_TRANSFORM_FEEDBACK_BUFFER_MODE_NV 0x8C7F
+#define GL_MAX_TRANSFORM_FEEDBACK_SEPARATE_COMPONENTS_NV 0x8C80
+#define GL_ACTIVE_VARYINGS_NV 0x8C81
+#define GL_ACTIVE_VARYING_MAX_LENGTH_NV 0x8C82
+#define GL_TRANSFORM_FEEDBACK_VARYINGS_NV 0x8C83
+#define GL_TRANSFORM_FEEDBACK_BUFFER_START_NV 0x8C84
+#define GL_TRANSFORM_FEEDBACK_BUFFER_SIZE_NV 0x8C85
+#define GL_TRANSFORM_FEEDBACK_RECORD_NV 0x8C86
+#define GL_PRIMITIVES_GENERATED_NV 0x8C87
+#define GL_TRANSFORM_FEEDBACK_PRIMITIVES_WRITTEN_NV 0x8C88
+#define GL_RASTERIZER_DISCARD_NV 0x8C89
+#define GL_MAX_TRANSFORM_FEEDBACK_INTERLEAVED_COMPONENTS_NV 0x8C8A
+#define GL_MAX_TRANSFORM_FEEDBACK_SEPARATE_ATTRIBS_NV 0x8C8B
+#define GL_INTERLEAVED_ATTRIBS_NV 0x8C8C
+#define GL_SEPARATE_ATTRIBS_NV 0x8C8D
+#define GL_TRANSFORM_FEEDBACK_BUFFER_NV 0x8C8E
+#define GL_TRANSFORM_FEEDBACK_BUFFER_BINDING_NV 0x8C8F
+#define GL_LAYER_NV 0x8DAA
+#define GL_NEXT_BUFFER_NV -2
+#define GL_SKIP_COMPONENTS4_NV -3
+#define GL_SKIP_COMPONENTS3_NV -4
+#define GL_SKIP_COMPONENTS2_NV -5
+#define GL_SKIP_COMPONENTS1_NV -6
+typedef void (APIENTRYP PFNGLBEGINTRANSFORMFEEDBACKNVPROC) (GLenum primitiveMode);
+typedef void (APIENTRYP PFNGLENDTRANSFORMFEEDBACKNVPROC) (void);
+typedef void (APIENTRYP PFNGLTRANSFORMFEEDBACKATTRIBSNVPROC) (GLsizei count, const GLint *attribs, GLenum bufferMode);
+typedef void (APIENTRYP PFNGLBINDBUFFERRANGENVPROC) (GLenum target, GLuint index, GLuint buffer, GLintptr offset, GLsizeiptr size);
+typedef void (APIENTRYP PFNGLBINDBUFFEROFFSETNVPROC) (GLenum target, GLuint index, GLuint buffer, GLintptr offset);
+typedef void (APIENTRYP PFNGLBINDBUFFERBASENVPROC) (GLenum target, GLuint index, GLuint buffer);
+typedef void (APIENTRYP PFNGLTRANSFORMFEEDBACKVARYINGSNVPROC) (GLuint program, GLsizei count, const GLint *locations, GLenum bufferMode);
+typedef void (APIENTRYP PFNGLACTIVEVARYINGNVPROC) (GLuint program, const GLchar *name);
+typedef GLint (APIENTRYP PFNGLGETVARYINGLOCATIONNVPROC) (GLuint program, const GLchar *name);
+typedef void (APIENTRYP PFNGLGETACTIVEVARYINGNVPROC) (GLuint program, GLuint index, GLsizei bufSize, GLsizei *length, GLsizei *size, GLenum *type, GLchar *name);
+typedef void (APIENTRYP PFNGLGETTRANSFORMFEEDBACKVARYINGNVPROC) (GLuint program, GLuint index, GLint *location);
+typedef void (APIENTRYP PFNGLTRANSFORMFEEDBACKSTREAMATTRIBSNVPROC) (GLsizei count, const GLint *attribs, GLsizei nbuffers, const GLint *bufstreams, GLenum bufferMode);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glBeginTransformFeedbackNV (GLenum primitiveMode);
+GLAPI void APIENTRY glEndTransformFeedbackNV (void);
+GLAPI void APIENTRY glTransformFeedbackAttribsNV (GLsizei count, const GLint *attribs, GLenum bufferMode);
+GLAPI void APIENTRY glBindBufferRangeNV (GLenum target, GLuint index, GLuint buffer, GLintptr offset, GLsizeiptr size);
+GLAPI void APIENTRY glBindBufferOffsetNV (GLenum target, GLuint index, GLuint buffer, GLintptr offset);
+GLAPI void APIENTRY glBindBufferBaseNV (GLenum target, GLuint index, GLuint buffer);
+GLAPI void APIENTRY glTransformFeedbackVaryingsNV (GLuint program, GLsizei count, const GLint *locations, GLenum bufferMode);
+GLAPI void APIENTRY glActiveVaryingNV (GLuint program, const GLchar *name);
+GLAPI GLint APIENTRY glGetVaryingLocationNV (GLuint program, const GLchar *name);
+GLAPI void APIENTRY glGetActiveVaryingNV (GLuint program, GLuint index, GLsizei bufSize, GLsizei *length, GLsizei *size, GLenum *type, GLchar *name);
+GLAPI void APIENTRY glGetTransformFeedbackVaryingNV (GLuint program, GLuint index, GLint *location);
+GLAPI void APIENTRY glTransformFeedbackStreamAttribsNV (GLsizei count, const GLint *attribs, GLsizei nbuffers, const GLint *bufstreams, GLenum bufferMode);
+#endif
+#endif /* GL_NV_transform_feedback */
+
+#ifndef GL_NV_transform_feedback2
+#define GL_NV_transform_feedback2 1
+#define GL_TRANSFORM_FEEDBACK_NV 0x8E22
+#define GL_TRANSFORM_FEEDBACK_BUFFER_PAUSED_NV 0x8E23
+#define GL_TRANSFORM_FEEDBACK_BUFFER_ACTIVE_NV 0x8E24
+#define GL_TRANSFORM_FEEDBACK_BINDING_NV 0x8E25
+typedef void (APIENTRYP PFNGLBINDTRANSFORMFEEDBACKNVPROC) (GLenum target, GLuint id);
+typedef void (APIENTRYP PFNGLDELETETRANSFORMFEEDBACKSNVPROC) (GLsizei n, const GLuint *ids);
+typedef void (APIENTRYP PFNGLGENTRANSFORMFEEDBACKSNVPROC) (GLsizei n, GLuint *ids);
+typedef GLboolean (APIENTRYP PFNGLISTRANSFORMFEEDBACKNVPROC) (GLuint id);
+typedef void (APIENTRYP PFNGLPAUSETRANSFORMFEEDBACKNVPROC) (void);
+typedef void (APIENTRYP PFNGLRESUMETRANSFORMFEEDBACKNVPROC) (void);
+typedef void (APIENTRYP PFNGLDRAWTRANSFORMFEEDBACKNVPROC) (GLenum mode, GLuint id);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glBindTransformFeedbackNV (GLenum target, GLuint id);
+GLAPI void APIENTRY glDeleteTransformFeedbacksNV (GLsizei n, const GLuint *ids);
+GLAPI void APIENTRY glGenTransformFeedbacksNV (GLsizei n, GLuint *ids);
+GLAPI GLboolean APIENTRY glIsTransformFeedbackNV (GLuint id);
+GLAPI void APIENTRY glPauseTransformFeedbackNV (void);
+GLAPI void APIENTRY glResumeTransformFeedbackNV (void);
+GLAPI void APIENTRY glDrawTransformFeedbackNV (GLenum mode, GLuint id);
+#endif
+#endif /* GL_NV_transform_feedback2 */
+
+#ifndef GL_NV_uniform_buffer_unified_memory
+#define GL_NV_uniform_buffer_unified_memory 1
+#define GL_UNIFORM_BUFFER_UNIFIED_NV 0x936E
+#define GL_UNIFORM_BUFFER_ADDRESS_NV 0x936F
+#define GL_UNIFORM_BUFFER_LENGTH_NV 0x9370
+#endif /* GL_NV_uniform_buffer_unified_memory */
+
+#ifndef GL_NV_vdpau_interop
+#define GL_NV_vdpau_interop 1
+typedef GLintptr GLvdpauSurfaceNV;
+#define GL_SURFACE_STATE_NV 0x86EB
+#define GL_SURFACE_REGISTERED_NV 0x86FD
+#define GL_SURFACE_MAPPED_NV 0x8700
+#define GL_WRITE_DISCARD_NV 0x88BE
+typedef void (APIENTRYP PFNGLVDPAUINITNVPROC) (const void *vdpDevice, const void *getProcAddress);
+typedef void (APIENTRYP PFNGLVDPAUFININVPROC) (void);
+typedef GLvdpauSurfaceNV (APIENTRYP PFNGLVDPAUREGISTERVIDEOSURFACENVPROC) (const void *vdpSurface, GLenum target, GLsizei numTextureNames, const GLuint *textureNames);
+typedef GLvdpauSurfaceNV (APIENTRYP PFNGLVDPAUREGISTEROUTPUTSURFACENVPROC) (const void *vdpSurface, GLenum target, GLsizei numTextureNames, const GLuint *textureNames);
+typedef GLboolean (APIENTRYP PFNGLVDPAUISSURFACENVPROC) (GLvdpauSurfaceNV surface);
+typedef void (APIENTRYP PFNGLVDPAUUNREGISTERSURFACENVPROC) (GLvdpauSurfaceNV surface);
+typedef void (APIENTRYP PFNGLVDPAUGETSURFACEIVNVPROC) (GLvdpauSurfaceNV surface, GLenum pname, GLsizei bufSize, GLsizei *length, GLint *values);
+typedef void (APIENTRYP PFNGLVDPAUSURFACEACCESSNVPROC) (GLvdpauSurfaceNV surface, GLenum access);
+typedef void (APIENTRYP PFNGLVDPAUMAPSURFACESNVPROC) (GLsizei numSurfaces, const GLvdpauSurfaceNV *surfaces);
+typedef void (APIENTRYP PFNGLVDPAUUNMAPSURFACESNVPROC) (GLsizei numSurface, const GLvdpauSurfaceNV *surfaces);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glVDPAUInitNV (const void *vdpDevice, const void *getProcAddress);
+GLAPI void APIENTRY glVDPAUFiniNV (void);
+GLAPI GLvdpauSurfaceNV APIENTRY glVDPAURegisterVideoSurfaceNV (const void *vdpSurface, GLenum target, GLsizei numTextureNames, const GLuint *textureNames);
+GLAPI GLvdpauSurfaceNV APIENTRY glVDPAURegisterOutputSurfaceNV (const void *vdpSurface, GLenum target, GLsizei numTextureNames, const GLuint *textureNames);
+GLAPI GLboolean APIENTRY glVDPAUIsSurfaceNV (GLvdpauSurfaceNV surface);
+GLAPI void APIENTRY glVDPAUUnregisterSurfaceNV (GLvdpauSurfaceNV surface);
+GLAPI void APIENTRY glVDPAUGetSurfaceivNV (GLvdpauSurfaceNV surface, GLenum pname, GLsizei bufSize, GLsizei *length, GLint *values);
+GLAPI void APIENTRY glVDPAUSurfaceAccessNV (GLvdpauSurfaceNV surface, GLenum access);
+GLAPI void APIENTRY glVDPAUMapSurfacesNV (GLsizei numSurfaces, const GLvdpauSurfaceNV *surfaces);
+GLAPI void APIENTRY glVDPAUUnmapSurfacesNV (GLsizei numSurface, const GLvdpauSurfaceNV *surfaces);
+#endif
+#endif /* GL_NV_vdpau_interop */
+
+#ifndef GL_NV_vdpau_interop2
+#define GL_NV_vdpau_interop2 1
+typedef GLvdpauSurfaceNV (APIENTRYP PFNGLVDPAUREGISTERVIDEOSURFACEWITHPICTURESTRUCTURENVPROC) (const void *vdpSurface, GLenum target, GLsizei numTextureNames, const GLuint *textureNames, GLboolean isFrameStructure);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI GLvdpauSurfaceNV APIENTRY glVDPAURegisterVideoSurfaceWithPictureStructureNV (const void *vdpSurface, GLenum target, GLsizei numTextureNames, const GLuint *textureNames, GLboolean isFrameStructure);
+#endif
+#endif /* GL_NV_vdpau_interop2 */
+
+#ifndef GL_NV_vertex_array_range
+#define GL_NV_vertex_array_range 1
+#define GL_VERTEX_ARRAY_RANGE_NV 0x851D
+#define GL_VERTEX_ARRAY_RANGE_LENGTH_NV 0x851E
+#define GL_VERTEX_ARRAY_RANGE_VALID_NV 0x851F
+#define GL_MAX_VERTEX_ARRAY_RANGE_ELEMENT_NV 0x8520
+#define GL_VERTEX_ARRAY_RANGE_POINTER_NV 0x8521
+typedef void (APIENTRYP PFNGLFLUSHVERTEXARRAYRANGENVPROC) (void);
+typedef void (APIENTRYP PFNGLVERTEXARRAYRANGENVPROC) (GLsizei length, const void *pointer);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glFlushVertexArrayRangeNV (void);
+GLAPI void APIENTRY glVertexArrayRangeNV (GLsizei length, const void *pointer);
+#endif
+#endif /* GL_NV_vertex_array_range */
+
+#ifndef GL_NV_vertex_array_range2
+#define GL_NV_vertex_array_range2 1
+#define GL_VERTEX_ARRAY_RANGE_WITHOUT_FLUSH_NV 0x8533
+#endif /* GL_NV_vertex_array_range2 */
+
+#ifndef GL_NV_vertex_attrib_integer_64bit
+#define GL_NV_vertex_attrib_integer_64bit 1
+typedef void (APIENTRYP PFNGLVERTEXATTRIBL1I64NVPROC) (GLuint index, GLint64EXT x);
+typedef void (APIENTRYP PFNGLVERTEXATTRIBL2I64NVPROC) (GLuint index, GLint64EXT x, GLint64EXT y);
+typedef void (APIENTRYP PFNGLVERTEXATTRIBL3I64NVPROC) (GLuint index, GLint64EXT x, GLint64EXT y, GLint64EXT z);
+typedef void (APIENTRYP PFNGLVERTEXATTRIBL4I64NVPROC) (GLuint index, GLint64EXT x, GLint64EXT y, GLint64EXT z, GLint64EXT w);
+typedef void (APIENTRYP PFNGLVERTEXATTRIBL1I64VNVPROC) (GLuint index, const GLint64EXT *v);
+typedef void (APIENTRYP PFNGLVERTEXATTRIBL2I64VNVPROC) (GLuint index, const GLint64EXT *v);
+typedef void (APIENTRYP PFNGLVERTEXATTRIBL3I64VNVPROC) (GLuint index, const GLint64EXT *v);
+typedef void (APIENTRYP PFNGLVERTEXATTRIBL4I64VNVPROC) (GLuint index, const GLint64EXT *v);
+typedef void (APIENTRYP PFNGLVERTEXATTRIBL1UI64NVPROC) (GLuint index, GLuint64EXT x);
+typedef void (APIENTRYP PFNGLVERTEXATTRIBL2UI64NVPROC) (GLuint index, GLuint64EXT x, GLuint64EXT y);
+typedef void (APIENTRYP PFNGLVERTEXATTRIBL3UI64NVPROC) (GLuint index, GLuint64EXT x, GLuint64EXT y, GLuint64EXT z);
+typedef void (APIENTRYP PFNGLVERTEXATTRIBL4UI64NVPROC) (GLuint index, GLuint64EXT x, GLuint64EXT y, GLuint64EXT z, GLuint64EXT w);
+typedef void (APIENTRYP PFNGLVERTEXATTRIBL1UI64VNVPROC) (GLuint index, const GLuint64EXT *v);
+typedef void (APIENTRYP PFNGLVERTEXATTRIBL2UI64VNVPROC) (GLuint index, const GLuint64EXT *v);
+typedef void (APIENTRYP PFNGLVERTEXATTRIBL3UI64VNVPROC) (GLuint index, const GLuint64EXT *v);
+typedef void (APIENTRYP PFNGLVERTEXATTRIBL4UI64VNVPROC) (GLuint index, const GLuint64EXT *v);
+typedef void (APIENTRYP PFNGLGETVERTEXATTRIBLI64VNVPROC) (GLuint index, GLenum pname, GLint64EXT *params);
+typedef void (APIENTRYP PFNGLGETVERTEXATTRIBLUI64VNVPROC) (GLuint index, GLenum pname, GLuint64EXT *params);
+typedef void (APIENTRYP PFNGLVERTEXATTRIBLFORMATNVPROC) (GLuint index, GLint size, GLenum type, GLsizei stride);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glVertexAttribL1i64NV (GLuint index, GLint64EXT x);
+GLAPI void APIENTRY glVertexAttribL2i64NV (GLuint index, GLint64EXT x, GLint64EXT y);
+GLAPI void APIENTRY glVertexAttribL3i64NV (GLuint index, GLint64EXT x, GLint64EXT y, GLint64EXT z);
+GLAPI void APIENTRY glVertexAttribL4i64NV (GLuint index, GLint64EXT x, GLint64EXT y, GLint64EXT z, GLint64EXT w);
+GLAPI void APIENTRY glVertexAttribL1i64vNV (GLuint index, const GLint64EXT *v);
+GLAPI void APIENTRY glVertexAttribL2i64vNV (GLuint index, const GLint64EXT *v);
+GLAPI void APIENTRY glVertexAttribL3i64vNV (GLuint index, const GLint64EXT *v);
+GLAPI void APIENTRY glVertexAttribL4i64vNV (GLuint index, const GLint64EXT *v);
+GLAPI void APIENTRY glVertexAttribL1ui64NV (GLuint index, GLuint64EXT x);
+GLAPI void APIENTRY glVertexAttribL2ui64NV (GLuint index, GLuint64EXT x, GLuint64EXT y);
+GLAPI void APIENTRY glVertexAttribL3ui64NV (GLuint index, GLuint64EXT x, GLuint64EXT y, GLuint64EXT z);
+GLAPI void APIENTRY glVertexAttribL4ui64NV (GLuint index, GLuint64EXT x, GLuint64EXT y, GLuint64EXT z, GLuint64EXT w);
+GLAPI void APIENTRY glVertexAttribL1ui64vNV (GLuint index, const GLuint64EXT *v);
+GLAPI void APIENTRY glVertexAttribL2ui64vNV (GLuint index, const GLuint64EXT *v);
+GLAPI void APIENTRY glVertexAttribL3ui64vNV (GLuint index, const GLuint64EXT *v);
+GLAPI void APIENTRY glVertexAttribL4ui64vNV (GLuint index, const GLuint64EXT *v);
+GLAPI void APIENTRY glGetVertexAttribLi64vNV (GLuint index, GLenum pname, GLint64EXT *params);
+GLAPI void APIENTRY glGetVertexAttribLui64vNV (GLuint index, GLenum pname, GLuint64EXT *params);
+GLAPI void APIENTRY glVertexAttribLFormatNV (GLuint index, GLint size, GLenum type, GLsizei stride);
+#endif
+#endif /* GL_NV_vertex_attrib_integer_64bit */
+
+#ifndef GL_NV_vertex_buffer_unified_memory
+#define GL_NV_vertex_buffer_unified_memory 1
+#define GL_VERTEX_ATTRIB_ARRAY_UNIFIED_NV 0x8F1E
+#define GL_ELEMENT_ARRAY_UNIFIED_NV 0x8F1F
+#define GL_VERTEX_ATTRIB_ARRAY_ADDRESS_NV 0x8F20
+#define GL_VERTEX_ARRAY_ADDRESS_NV 0x8F21
+#define GL_NORMAL_ARRAY_ADDRESS_NV 0x8F22
+#define GL_COLOR_ARRAY_ADDRESS_NV 0x8F23
+#define GL_INDEX_ARRAY_ADDRESS_NV 0x8F24
+#define GL_TEXTURE_COORD_ARRAY_ADDRESS_NV 0x8F25
+#define GL_EDGE_FLAG_ARRAY_ADDRESS_NV 0x8F26
+#define GL_SECONDARY_COLOR_ARRAY_ADDRESS_NV 0x8F27
+#define GL_FOG_COORD_ARRAY_ADDRESS_NV 0x8F28
+#define GL_ELEMENT_ARRAY_ADDRESS_NV 0x8F29
+#define GL_VERTEX_ATTRIB_ARRAY_LENGTH_NV 0x8F2A
+#define GL_VERTEX_ARRAY_LENGTH_NV 0x8F2B
+#define GL_NORMAL_ARRAY_LENGTH_NV 0x8F2C
+#define GL_COLOR_ARRAY_LENGTH_NV 0x8F2D
+#define GL_INDEX_ARRAY_LENGTH_NV 0x8F2E
+#define GL_TEXTURE_COORD_ARRAY_LENGTH_NV 0x8F2F
+#define GL_EDGE_FLAG_ARRAY_LENGTH_NV 0x8F30
+#define GL_SECONDARY_COLOR_ARRAY_LENGTH_NV 0x8F31
+#define GL_FOG_COORD_ARRAY_LENGTH_NV 0x8F32
+#define GL_ELEMENT_ARRAY_LENGTH_NV 0x8F33
+#define GL_DRAW_INDIRECT_UNIFIED_NV 0x8F40
+#define GL_DRAW_INDIRECT_ADDRESS_NV 0x8F41
+#define GL_DRAW_INDIRECT_LENGTH_NV 0x8F42
+typedef void (APIENTRYP PFNGLBUFFERADDRESSRANGENVPROC) (GLenum pname, GLuint index, GLuint64EXT address, GLsizeiptr length);
+typedef void (APIENTRYP PFNGLVERTEXFORMATNVPROC) (GLint size, GLenum type, GLsizei stride);
+typedef void (APIENTRYP PFNGLNORMALFORMATNVPROC) (GLenum type, GLsizei stride);
+typedef void (APIENTRYP PFNGLCOLORFORMATNVPROC) (GLint size, GLenum type, GLsizei stride);
+typedef void (APIENTRYP PFNGLINDEXFORMATNVPROC) (GLenum type, GLsizei stride);
+typedef void (APIENTRYP PFNGLTEXCOORDFORMATNVPROC) (GLint size, GLenum type, GLsizei stride);
+typedef void (APIENTRYP PFNGLEDGEFLAGFORMATNVPROC) (GLsizei stride);
+typedef void (APIENTRYP PFNGLSECONDARYCOLORFORMATNVPROC) (GLint size, GLenum type, GLsizei stride);
+typedef void (APIENTRYP PFNGLFOGCOORDFORMATNVPROC) (GLenum type, GLsizei stride);
+typedef void (APIENTRYP PFNGLVERTEXATTRIBFORMATNVPROC) (GLuint index, GLint size, GLenum type, GLboolean normalized, GLsizei stride);
+typedef void (APIENTRYP PFNGLVERTEXATTRIBIFORMATNVPROC) (GLuint index, GLint size, GLenum type, GLsizei stride);
+typedef void (APIENTRYP PFNGLGETINTEGERUI64I_VNVPROC) (GLenum value, GLuint index, GLuint64EXT *result);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glBufferAddressRangeNV (GLenum pname, GLuint index, GLuint64EXT address, GLsizeiptr length);
+GLAPI void APIENTRY glVertexFormatNV (GLint size, GLenum type, GLsizei stride);
+GLAPI void APIENTRY glNormalFormatNV (GLenum type, GLsizei stride);
+GLAPI void APIENTRY glColorFormatNV (GLint size, GLenum type, GLsizei stride);
+GLAPI void APIENTRY glIndexFormatNV (GLenum type, GLsizei stride);
+GLAPI void APIENTRY glTexCoordFormatNV (GLint size, GLenum type, GLsizei stride);
+GLAPI void APIENTRY glEdgeFlagFormatNV (GLsizei stride);
+GLAPI void APIENTRY glSecondaryColorFormatNV (GLint size, GLenum type, GLsizei stride);
+GLAPI void APIENTRY glFogCoordFormatNV (GLenum type, GLsizei stride);
+GLAPI void APIENTRY glVertexAttribFormatNV (GLuint index, GLint size, GLenum type, GLboolean normalized, GLsizei stride);
+GLAPI void APIENTRY glVertexAttribIFormatNV (GLuint index, GLint size, GLenum type, GLsizei stride);
+GLAPI void APIENTRY glGetIntegerui64i_vNV (GLenum value, GLuint index, GLuint64EXT *result);
+#endif
+#endif /* GL_NV_vertex_buffer_unified_memory */
+
+#ifndef GL_NV_vertex_program
+#define GL_NV_vertex_program 1
+#define GL_VERTEX_PROGRAM_NV 0x8620
+#define GL_VERTEX_STATE_PROGRAM_NV 0x8621
+#define GL_ATTRIB_ARRAY_SIZE_NV 0x8623
+#define GL_ATTRIB_ARRAY_STRIDE_NV 0x8624
+#define GL_ATTRIB_ARRAY_TYPE_NV 0x8625
+#define GL_CURRENT_ATTRIB_NV 0x8626
+#define GL_PROGRAM_LENGTH_NV 0x8627
+#define GL_PROGRAM_STRING_NV 0x8628
+#define GL_MODELVIEW_PROJECTION_NV 0x8629
+#define GL_IDENTITY_NV 0x862A
+#define GL_INVERSE_NV 0x862B
+#define GL_TRANSPOSE_NV 0x862C
+#define GL_INVERSE_TRANSPOSE_NV 0x862D
+#define GL_MAX_TRACK_MATRIX_STACK_DEPTH_NV 0x862E
+#define GL_MAX_TRACK_MATRICES_NV 0x862F
+#define GL_MATRIX0_NV 0x8630
+#define GL_MATRIX1_NV 0x8631
+#define GL_MATRIX2_NV 0x8632
+#define GL_MATRIX3_NV 0x8633
+#define GL_MATRIX4_NV 0x8634
+#define GL_MATRIX5_NV 0x8635
+#define GL_MATRIX6_NV 0x8636
+#define GL_MATRIX7_NV 0x8637
+#define GL_CURRENT_MATRIX_STACK_DEPTH_NV 0x8640
+#define GL_CURRENT_MATRIX_NV 0x8641
+#define GL_VERTEX_PROGRAM_POINT_SIZE_NV 0x8642
+#define GL_VERTEX_PROGRAM_TWO_SIDE_NV 0x8643
+#define GL_PROGRAM_PARAMETER_NV 0x8644
+#define GL_ATTRIB_ARRAY_POINTER_NV 0x8645
+#define GL_PROGRAM_TARGET_NV 0x8646
+#define GL_PROGRAM_RESIDENT_NV 0x8647
+#define GL_TRACK_MATRIX_NV 0x8648
+#define GL_TRACK_MATRIX_TRANSFORM_NV 0x8649
+#define GL_VERTEX_PROGRAM_BINDING_NV 0x864A
+#define GL_PROGRAM_ERROR_POSITION_NV 0x864B
+#define GL_VERTEX_ATTRIB_ARRAY0_NV 0x8650
+#define GL_VERTEX_ATTRIB_ARRAY1_NV 0x8651
+#define GL_VERTEX_ATTRIB_ARRAY2_NV 0x8652
+#define GL_VERTEX_ATTRIB_ARRAY3_NV 0x8653
+#define GL_VERTEX_ATTRIB_ARRAY4_NV 0x8654
+#define GL_VERTEX_ATTRIB_ARRAY5_NV 0x8655
+#define GL_VERTEX_ATTRIB_ARRAY6_NV 0x8656
+#define GL_VERTEX_ATTRIB_ARRAY7_NV 0x8657
+#define GL_VERTEX_ATTRIB_ARRAY8_NV 0x8658
+#define GL_VERTEX_ATTRIB_ARRAY9_NV 0x8659
+#define GL_VERTEX_ATTRIB_ARRAY10_NV 0x865A
+#define GL_VERTEX_ATTRIB_ARRAY11_NV 0x865B
+#define GL_VERTEX_ATTRIB_ARRAY12_NV 0x865C
+#define GL_VERTEX_ATTRIB_ARRAY13_NV 0x865D
+#define GL_VERTEX_ATTRIB_ARRAY14_NV 0x865E
+#define GL_VERTEX_ATTRIB_ARRAY15_NV 0x865F
+#define GL_MAP1_VERTEX_ATTRIB0_4_NV 0x8660
+#define GL_MAP1_VERTEX_ATTRIB1_4_NV 0x8661
+#define GL_MAP1_VERTEX_ATTRIB2_4_NV 0x8662
+#define GL_MAP1_VERTEX_ATTRIB3_4_NV 0x8663
+#define GL_MAP1_VERTEX_ATTRIB4_4_NV 0x8664
+#define GL_MAP1_VERTEX_ATTRIB5_4_NV 0x8665
+#define GL_MAP1_VERTEX_ATTRIB6_4_NV 0x8666
+#define GL_MAP1_VERTEX_ATTRIB7_4_NV 0x8667
+#define GL_MAP1_VERTEX_ATTRIB8_4_NV 0x8668
+#define GL_MAP1_VERTEX_ATTRIB9_4_NV 0x8669
+#define GL_MAP1_VERTEX_ATTRIB10_4_NV 0x866A
+#define GL_MAP1_VERTEX_ATTRIB11_4_NV 0x866B
+#define GL_MAP1_VERTEX_ATTRIB12_4_NV 0x866C
+#define GL_MAP1_VERTEX_ATTRIB13_4_NV 0x866D
+#define GL_MAP1_VERTEX_ATTRIB14_4_NV 0x866E
+#define GL_MAP1_VERTEX_ATTRIB15_4_NV 0x866F
+#define GL_MAP2_VERTEX_ATTRIB0_4_NV 0x8670
+#define GL_MAP2_VERTEX_ATTRIB1_4_NV 0x8671
+#define GL_MAP2_VERTEX_ATTRIB2_4_NV 0x8672
+#define GL_MAP2_VERTEX_ATTRIB3_4_NV 0x8673
+#define GL_MAP2_VERTEX_ATTRIB4_4_NV 0x8674
+#define GL_MAP2_VERTEX_ATTRIB5_4_NV 0x8675
+#define GL_MAP2_VERTEX_ATTRIB6_4_NV 0x8676
+#define GL_MAP2_VERTEX_ATTRIB7_4_NV 0x8677
+#define GL_MAP2_VERTEX_ATTRIB8_4_NV 0x8678
+#define GL_MAP2_VERTEX_ATTRIB9_4_NV 0x8679
+#define GL_MAP2_VERTEX_ATTRIB10_4_NV 0x867A
+#define GL_MAP2_VERTEX_ATTRIB11_4_NV 0x867B
+#define GL_MAP2_VERTEX_ATTRIB12_4_NV 0x867C
+#define GL_MAP2_VERTEX_ATTRIB13_4_NV 0x867D
+#define GL_MAP2_VERTEX_ATTRIB14_4_NV 0x867E
+#define GL_MAP2_VERTEX_ATTRIB15_4_NV 0x867F
+typedef GLboolean (APIENTRYP PFNGLAREPROGRAMSRESIDENTNVPROC) (GLsizei n, const GLuint *programs, GLboolean *residences);
+typedef void (APIENTRYP PFNGLBINDPROGRAMNVPROC) (GLenum target, GLuint id);
+typedef void (APIENTRYP PFNGLDELETEPROGRAMSNVPROC) (GLsizei n, const GLuint *programs);
+typedef void (APIENTRYP PFNGLEXECUTEPROGRAMNVPROC) (GLenum target, GLuint id, const GLfloat *params);
+typedef void (APIENTRYP PFNGLGENPROGRAMSNVPROC) (GLsizei n, GLuint *programs);
+typedef void (APIENTRYP PFNGLGETPROGRAMPARAMETERDVNVPROC) (GLenum target, GLuint index, GLenum pname, GLdouble *params);
+typedef void (APIENTRYP PFNGLGETPROGRAMPARAMETERFVNVPROC) (GLenum target, GLuint index, GLenum pname, GLfloat *params);
+typedef void (APIENTRYP PFNGLGETPROGRAMIVNVPROC) (GLuint id, GLenum pname, GLint *params);
+typedef void (APIENTRYP PFNGLGETPROGRAMSTRINGNVPROC) (GLuint id, GLenum pname, GLubyte *program);
+typedef void (APIENTRYP PFNGLGETTRACKMATRIXIVNVPROC) (GLenum target, GLuint address, GLenum pname, GLint *params);
+typedef void (APIENTRYP PFNGLGETVERTEXATTRIBDVNVPROC) (GLuint index, GLenum pname, GLdouble *params);
+typedef void (APIENTRYP PFNGLGETVERTEXATTRIBFVNVPROC) (GLuint index, GLenum pname, GLfloat *params);
+typedef void (APIENTRYP PFNGLGETVERTEXATTRIBIVNVPROC) (GLuint index, GLenum pname, GLint *params);
+typedef void (APIENTRYP PFNGLGETVERTEXATTRIBPOINTERVNVPROC) (GLuint index, GLenum pname, void **pointer);
+typedef GLboolean (APIENTRYP PFNGLISPROGRAMNVPROC) (GLuint id);
+typedef void (APIENTRYP PFNGLLOADPROGRAMNVPROC) (GLenum target, GLuint id, GLsizei len, const GLubyte *program);
+typedef void (APIENTRYP PFNGLPROGRAMPARAMETER4DNVPROC) (GLenum target, GLuint index, GLdouble x, GLdouble y, GLdouble z, GLdouble w);
+typedef void (APIENTRYP PFNGLPROGRAMPARAMETER4DVNVPROC) (GLenum target, GLuint index, const GLdouble *v);
+typedef void (APIENTRYP PFNGLPROGRAMPARAMETER4FNVPROC) (GLenum target, GLuint index, GLfloat x, GLfloat y, GLfloat z, GLfloat w);
+typedef void (APIENTRYP PFNGLPROGRAMPARAMETER4FVNVPROC) (GLenum target, GLuint index, const GLfloat *v);
+typedef void (APIENTRYP PFNGLPROGRAMPARAMETERS4DVNVPROC) (GLenum target, GLuint index, GLsizei count, const GLdouble *v);
+typedef void (APIENTRYP PFNGLPROGRAMPARAMETERS4FVNVPROC) (GLenum target, GLuint index, GLsizei count, const GLfloat *v);
+typedef void (APIENTRYP PFNGLREQUESTRESIDENTPROGRAMSNVPROC) (GLsizei n, const GLuint *programs);
+typedef void (APIENTRYP PFNGLTRACKMATRIXNVPROC) (GLenum target, GLuint address, GLenum matrix, GLenum transform);
+typedef void (APIENTRYP PFNGLVERTEXATTRIBPOINTERNVPROC) (GLuint index, GLint fsize, GLenum type, GLsizei stride, const void *pointer);
+typedef void (APIENTRYP PFNGLVERTEXATTRIB1DNVPROC) (GLuint index, GLdouble x);
+typedef void (APIENTRYP PFNGLVERTEXATTRIB1DVNVPROC) (GLuint index, const GLdouble *v);
+typedef void (APIENTRYP PFNGLVERTEXATTRIB1FNVPROC) (GLuint index, GLfloat x);
+typedef void (APIENTRYP PFNGLVERTEXATTRIB1FVNVPROC) (GLuint index, const GLfloat *v);
+typedef void (APIENTRYP PFNGLVERTEXATTRIB1SNVPROC) (GLuint index, GLshort x);
+typedef void (APIENTRYP PFNGLVERTEXATTRIB1SVNVPROC) (GLuint index, const GLshort *v);
+typedef void (APIENTRYP PFNGLVERTEXATTRIB2DNVPROC) (GLuint index, GLdouble x, GLdouble y);
+typedef void (APIENTRYP PFNGLVERTEXATTRIB2DVNVPROC) (GLuint index, const GLdouble *v);
+typedef void (APIENTRYP PFNGLVERTEXATTRIB2FNVPROC) (GLuint index, GLfloat x, GLfloat y);
+typedef void (APIENTRYP PFNGLVERTEXATTRIB2FVNVPROC) (GLuint index, const GLfloat *v);
+typedef void (APIENTRYP PFNGLVERTEXATTRIB2SNVPROC) (GLuint index, GLshort x, GLshort y);
+typedef void (APIENTRYP PFNGLVERTEXATTRIB2SVNVPROC) (GLuint index, const GLshort *v);
+typedef void (APIENTRYP PFNGLVERTEXATTRIB3DNVPROC) (GLuint index, GLdouble x, GLdouble y, GLdouble z);
+typedef void (APIENTRYP PFNGLVERTEXATTRIB3DVNVPROC) (GLuint index, const GLdouble *v);
+typedef void (APIENTRYP PFNGLVERTEXATTRIB3FNVPROC) (GLuint index, GLfloat x, GLfloat y, GLfloat z);
+typedef void (APIENTRYP PFNGLVERTEXATTRIB3FVNVPROC) (GLuint index, const GLfloat *v);
+typedef void (APIENTRYP PFNGLVERTEXATTRIB3SNVPROC) (GLuint index, GLshort x, GLshort y, GLshort z);
+typedef void (APIENTRYP PFNGLVERTEXATTRIB3SVNVPROC) (GLuint index, const GLshort *v);
+typedef void (APIENTRYP PFNGLVERTEXATTRIB4DNVPROC) (GLuint index, GLdouble x, GLdouble y, GLdouble z, GLdouble w);
+typedef void (APIENTRYP PFNGLVERTEXATTRIB4DVNVPROC) (GLuint index, const GLdouble *v);
+typedef void (APIENTRYP PFNGLVERTEXATTRIB4FNVPROC) (GLuint index, GLfloat x, GLfloat y, GLfloat z, GLfloat w);
+typedef void (APIENTRYP PFNGLVERTEXATTRIB4FVNVPROC) (GLuint index, const GLfloat *v);
+typedef void (APIENTRYP PFNGLVERTEXATTRIB4SNVPROC) (GLuint index, GLshort x, GLshort y, GLshort z, GLshort w);
+typedef void (APIENTRYP PFNGLVERTEXATTRIB4SVNVPROC) (GLuint index, const GLshort *v);
+typedef void (APIENTRYP PFNGLVERTEXATTRIB4UBNVPROC) (GLuint index, GLubyte x, GLubyte y, GLubyte z, GLubyte w);
+typedef void (APIENTRYP PFNGLVERTEXATTRIB4UBVNVPROC) (GLuint index, const GLubyte *v);
+typedef void (APIENTRYP PFNGLVERTEXATTRIBS1DVNVPROC) (GLuint index, GLsizei count, const GLdouble *v);
+typedef void (APIENTRYP PFNGLVERTEXATTRIBS1FVNVPROC) (GLuint index, GLsizei count, const GLfloat *v);
+typedef void (APIENTRYP PFNGLVERTEXATTRIBS1SVNVPROC) (GLuint index, GLsizei count, const GLshort *v);
+typedef void (APIENTRYP PFNGLVERTEXATTRIBS2DVNVPROC) (GLuint index, GLsizei count, const GLdouble *v);
+typedef void (APIENTRYP PFNGLVERTEXATTRIBS2FVNVPROC) (GLuint index, GLsizei count, const GLfloat *v);
+typedef void (APIENTRYP PFNGLVERTEXATTRIBS2SVNVPROC) (GLuint index, GLsizei count, const GLshort *v);
+typedef void (APIENTRYP PFNGLVERTEXATTRIBS3DVNVPROC) (GLuint index, GLsizei count, const GLdouble *v);
+typedef void (APIENTRYP PFNGLVERTEXATTRIBS3FVNVPROC) (GLuint index, GLsizei count, const GLfloat *v);
+typedef void (APIENTRYP PFNGLVERTEXATTRIBS3SVNVPROC) (GLuint index, GLsizei count, const GLshort *v);
+typedef void (APIENTRYP PFNGLVERTEXATTRIBS4DVNVPROC) (GLuint index, GLsizei count, const GLdouble *v);
+typedef void (APIENTRYP PFNGLVERTEXATTRIBS4FVNVPROC) (GLuint index, GLsizei count, const GLfloat *v);
+typedef void (APIENTRYP PFNGLVERTEXATTRIBS4SVNVPROC) (GLuint index, GLsizei count, const GLshort *v);
+typedef void (APIENTRYP PFNGLVERTEXATTRIBS4UBVNVPROC) (GLuint index, GLsizei count, const GLubyte *v);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI GLboolean APIENTRY glAreProgramsResidentNV (GLsizei n, const GLuint *programs, GLboolean *residences);
+GLAPI void APIENTRY glBindProgramNV (GLenum target, GLuint id);
+GLAPI void APIENTRY glDeleteProgramsNV (GLsizei n, const GLuint *programs);
+GLAPI void APIENTRY glExecuteProgramNV (GLenum target, GLuint id, const GLfloat *params);
+GLAPI void APIENTRY glGenProgramsNV (GLsizei n, GLuint *programs);
+GLAPI void APIENTRY glGetProgramParameterdvNV (GLenum target, GLuint index, GLenum pname, GLdouble *params);
+GLAPI void APIENTRY glGetProgramParameterfvNV (GLenum target, GLuint index, GLenum pname, GLfloat *params);
+GLAPI void APIENTRY glGetProgramivNV (GLuint id, GLenum pname, GLint *params);
+GLAPI void APIENTRY glGetProgramStringNV (GLuint id, GLenum pname, GLubyte *program);
+GLAPI void APIENTRY glGetTrackMatrixivNV (GLenum target, GLuint address, GLenum pname, GLint *params);
+GLAPI void APIENTRY glGetVertexAttribdvNV (GLuint index, GLenum pname, GLdouble *params);
+GLAPI void APIENTRY glGetVertexAttribfvNV (GLuint index, GLenum pname, GLfloat *params);
+GLAPI void APIENTRY glGetVertexAttribivNV (GLuint index, GLenum pname, GLint *params);
+GLAPI void APIENTRY glGetVertexAttribPointervNV (GLuint index, GLenum pname, void **pointer);
+GLAPI GLboolean APIENTRY glIsProgramNV (GLuint id);
+GLAPI void APIENTRY glLoadProgramNV (GLenum target, GLuint id, GLsizei len, const GLubyte *program);
+GLAPI void APIENTRY glProgramParameter4dNV (GLenum target, GLuint index, GLdouble x, GLdouble y, GLdouble z, GLdouble w);
+GLAPI void APIENTRY glProgramParameter4dvNV (GLenum target, GLuint index, const GLdouble *v);
+GLAPI void APIENTRY glProgramParameter4fNV (GLenum target, GLuint index, GLfloat x, GLfloat y, GLfloat z, GLfloat w);
+GLAPI void APIENTRY glProgramParameter4fvNV (GLenum target, GLuint index, const GLfloat *v);
+GLAPI void APIENTRY glProgramParameters4dvNV (GLenum target, GLuint index, GLsizei count, const GLdouble *v);
+GLAPI void APIENTRY glProgramParameters4fvNV (GLenum target, GLuint index, GLsizei count, const GLfloat *v);
+GLAPI void APIENTRY glRequestResidentProgramsNV (GLsizei n, const GLuint *programs);
+GLAPI void APIENTRY glTrackMatrixNV (GLenum target, GLuint address, GLenum matrix, GLenum transform);
+GLAPI void APIENTRY glVertexAttribPointerNV (GLuint index, GLint fsize, GLenum type, GLsizei stride, const void *pointer);
+GLAPI void APIENTRY glVertexAttrib1dNV (GLuint index, GLdouble x);
+GLAPI void APIENTRY glVertexAttrib1dvNV (GLuint index, const GLdouble *v);
+GLAPI void APIENTRY glVertexAttrib1fNV (GLuint index, GLfloat x);
+GLAPI void APIENTRY glVertexAttrib1fvNV (GLuint index, const GLfloat *v);
+GLAPI void APIENTRY glVertexAttrib1sNV (GLuint index, GLshort x);
+GLAPI void APIENTRY glVertexAttrib1svNV (GLuint index, const GLshort *v);
+GLAPI void APIENTRY glVertexAttrib2dNV (GLuint index, GLdouble x, GLdouble y);
+GLAPI void APIENTRY glVertexAttrib2dvNV (GLuint index, const GLdouble *v);
+GLAPI void APIENTRY glVertexAttrib2fNV (GLuint index, GLfloat x, GLfloat y);
+GLAPI void APIENTRY glVertexAttrib2fvNV (GLuint index, const GLfloat *v);
+GLAPI void APIENTRY glVertexAttrib2sNV (GLuint index, GLshort x, GLshort y);
+GLAPI void APIENTRY glVertexAttrib2svNV (GLuint index, const GLshort *v);
+GLAPI void APIENTRY glVertexAttrib3dNV (GLuint index, GLdouble x, GLdouble y, GLdouble z);
+GLAPI void APIENTRY glVertexAttrib3dvNV (GLuint index, const GLdouble *v);
+GLAPI void APIENTRY glVertexAttrib3fNV (GLuint index, GLfloat x, GLfloat y, GLfloat z);
+GLAPI void APIENTRY glVertexAttrib3fvNV (GLuint index, const GLfloat *v);
+GLAPI void APIENTRY glVertexAttrib3sNV (GLuint index, GLshort x, GLshort y, GLshort z);
+GLAPI void APIENTRY glVertexAttrib3svNV (GLuint index, const GLshort *v);
+GLAPI void APIENTRY glVertexAttrib4dNV (GLuint index, GLdouble x, GLdouble y, GLdouble z, GLdouble w);
+GLAPI void APIENTRY glVertexAttrib4dvNV (GLuint index, const GLdouble *v);
+GLAPI void APIENTRY glVertexAttrib4fNV (GLuint index, GLfloat x, GLfloat y, GLfloat z, GLfloat w);
+GLAPI void APIENTRY glVertexAttrib4fvNV (GLuint index, const GLfloat *v);
+GLAPI void APIENTRY glVertexAttrib4sNV (GLuint index, GLshort x, GLshort y, GLshort z, GLshort w);
+GLAPI void APIENTRY glVertexAttrib4svNV (GLuint index, const GLshort *v);
+GLAPI void APIENTRY glVertexAttrib4ubNV (GLuint index, GLubyte x, GLubyte y, GLubyte z, GLubyte w);
+GLAPI void APIENTRY glVertexAttrib4ubvNV (GLuint index, const GLubyte *v);
+GLAPI void APIENTRY glVertexAttribs1dvNV (GLuint index, GLsizei count, const GLdouble *v);
+GLAPI void APIENTRY glVertexAttribs1fvNV (GLuint index, GLsizei count, const GLfloat *v);
+GLAPI void APIENTRY glVertexAttribs1svNV (GLuint index, GLsizei count, const GLshort *v);
+GLAPI void APIENTRY glVertexAttribs2dvNV (GLuint index, GLsizei count, const GLdouble *v);
+GLAPI void APIENTRY glVertexAttribs2fvNV (GLuint index, GLsizei count, const GLfloat *v);
+GLAPI void APIENTRY glVertexAttribs2svNV (GLuint index, GLsizei count, const GLshort *v);
+GLAPI void APIENTRY glVertexAttribs3dvNV (GLuint index, GLsizei count, const GLdouble *v);
+GLAPI void APIENTRY glVertexAttribs3fvNV (GLuint index, GLsizei count, const GLfloat *v);
+GLAPI void APIENTRY glVertexAttribs3svNV (GLuint index, GLsizei count, const GLshort *v);
+GLAPI void APIENTRY glVertexAttribs4dvNV (GLuint index, GLsizei count, const GLdouble *v);
+GLAPI void APIENTRY glVertexAttribs4fvNV (GLuint index, GLsizei count, const GLfloat *v);
+GLAPI void APIENTRY glVertexAttribs4svNV (GLuint index, GLsizei count, const GLshort *v);
+GLAPI void APIENTRY glVertexAttribs4ubvNV (GLuint index, GLsizei count, const GLubyte *v);
+#endif
+#endif /* GL_NV_vertex_program */
+
+#ifndef GL_NV_vertex_program1_1
+#define GL_NV_vertex_program1_1 1
+#endif /* GL_NV_vertex_program1_1 */
+
+#ifndef GL_NV_vertex_program2
+#define GL_NV_vertex_program2 1
+#endif /* GL_NV_vertex_program2 */
+
+#ifndef GL_NV_vertex_program2_option
+#define GL_NV_vertex_program2_option 1
+#endif /* GL_NV_vertex_program2_option */
+
+#ifndef GL_NV_vertex_program3
+#define GL_NV_vertex_program3 1
+#endif /* GL_NV_vertex_program3 */
+
+#ifndef GL_NV_vertex_program4
+#define GL_NV_vertex_program4 1
+#define GL_VERTEX_ATTRIB_ARRAY_INTEGER_NV 0x88FD
+typedef void (APIENTRYP PFNGLVERTEXATTRIBI1IEXTPROC) (GLuint index, GLint x);
+typedef void (APIENTRYP PFNGLVERTEXATTRIBI2IEXTPROC) (GLuint index, GLint x, GLint y);
+typedef void (APIENTRYP PFNGLVERTEXATTRIBI3IEXTPROC) (GLuint index, GLint x, GLint y, GLint z);
+typedef void (APIENTRYP PFNGLVERTEXATTRIBI4IEXTPROC) (GLuint index, GLint x, GLint y, GLint z, GLint w);
+typedef void (APIENTRYP PFNGLVERTEXATTRIBI1UIEXTPROC) (GLuint index, GLuint x);
+typedef void (APIENTRYP PFNGLVERTEXATTRIBI2UIEXTPROC) (GLuint index, GLuint x, GLuint y);
+typedef void (APIENTRYP PFNGLVERTEXATTRIBI3UIEXTPROC) (GLuint index, GLuint x, GLuint y, GLuint z);
+typedef void (APIENTRYP PFNGLVERTEXATTRIBI4UIEXTPROC) (GLuint index, GLuint x, GLuint y, GLuint z, GLuint w);
+typedef void (APIENTRYP PFNGLVERTEXATTRIBI1IVEXTPROC) (GLuint index, const GLint *v);
+typedef void (APIENTRYP PFNGLVERTEXATTRIBI2IVEXTPROC) (GLuint index, const GLint *v);
+typedef void (APIENTRYP PFNGLVERTEXATTRIBI3IVEXTPROC) (GLuint index, const GLint *v);
+typedef void (APIENTRYP PFNGLVERTEXATTRIBI4IVEXTPROC) (GLuint index, const GLint *v);
+typedef void (APIENTRYP PFNGLVERTEXATTRIBI1UIVEXTPROC) (GLuint index, const GLuint *v);
+typedef void (APIENTRYP PFNGLVERTEXATTRIBI2UIVEXTPROC) (GLuint index, const GLuint *v);
+typedef void (APIENTRYP PFNGLVERTEXATTRIBI3UIVEXTPROC) (GLuint index, const GLuint *v);
+typedef void (APIENTRYP PFNGLVERTEXATTRIBI4UIVEXTPROC) (GLuint index, const GLuint *v);
+typedef void (APIENTRYP PFNGLVERTEXATTRIBI4BVEXTPROC) (GLuint index, const GLbyte *v);
+typedef void (APIENTRYP PFNGLVERTEXATTRIBI4SVEXTPROC) (GLuint index, const GLshort *v);
+typedef void (APIENTRYP PFNGLVERTEXATTRIBI4UBVEXTPROC) (GLuint index, const GLubyte *v);
+typedef void (APIENTRYP PFNGLVERTEXATTRIBI4USVEXTPROC) (GLuint index, const GLushort *v);
+typedef void (APIENTRYP PFNGLVERTEXATTRIBIPOINTEREXTPROC) (GLuint index, GLint size, GLenum type, GLsizei stride, const void *pointer);
+typedef void (APIENTRYP PFNGLGETVERTEXATTRIBIIVEXTPROC) (GLuint index, GLenum pname, GLint *params);
+typedef void (APIENTRYP PFNGLGETVERTEXATTRIBIUIVEXTPROC) (GLuint index, GLenum pname, GLuint *params);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glVertexAttribI1iEXT (GLuint index, GLint x);
+GLAPI void APIENTRY glVertexAttribI2iEXT (GLuint index, GLint x, GLint y);
+GLAPI void APIENTRY glVertexAttribI3iEXT (GLuint index, GLint x, GLint y, GLint z);
+GLAPI void APIENTRY glVertexAttribI4iEXT (GLuint index, GLint x, GLint y, GLint z, GLint w);
+GLAPI void APIENTRY glVertexAttribI1uiEXT (GLuint index, GLuint x);
+GLAPI void APIENTRY glVertexAttribI2uiEXT (GLuint index, GLuint x, GLuint y);
+GLAPI void APIENTRY glVertexAttribI3uiEXT (GLuint index, GLuint x, GLuint y, GLuint z);
+GLAPI void APIENTRY glVertexAttribI4uiEXT (GLuint index, GLuint x, GLuint y, GLuint z, GLuint w);
+GLAPI void APIENTRY glVertexAttribI1ivEXT (GLuint index, const GLint *v);
+GLAPI void APIENTRY glVertexAttribI2ivEXT (GLuint index, const GLint *v);
+GLAPI void APIENTRY glVertexAttribI3ivEXT (GLuint index, const GLint *v);
+GLAPI void APIENTRY glVertexAttribI4ivEXT (GLuint index, const GLint *v);
+GLAPI void APIENTRY glVertexAttribI1uivEXT (GLuint index, const GLuint *v);
+GLAPI void APIENTRY glVertexAttribI2uivEXT (GLuint index, const GLuint *v);
+GLAPI void APIENTRY glVertexAttribI3uivEXT (GLuint index, const GLuint *v);
+GLAPI void APIENTRY glVertexAttribI4uivEXT (GLuint index, const GLuint *v);
+GLAPI void APIENTRY glVertexAttribI4bvEXT (GLuint index, const GLbyte *v);
+GLAPI void APIENTRY glVertexAttribI4svEXT (GLuint index, const GLshort *v);
+GLAPI void APIENTRY glVertexAttribI4ubvEXT (GLuint index, const GLubyte *v);
+GLAPI void APIENTRY glVertexAttribI4usvEXT (GLuint index, const GLushort *v);
+GLAPI void APIENTRY glVertexAttribIPointerEXT (GLuint index, GLint size, GLenum type, GLsizei stride, const void *pointer);
+GLAPI void APIENTRY glGetVertexAttribIivEXT (GLuint index, GLenum pname, GLint *params);
+GLAPI void APIENTRY glGetVertexAttribIuivEXT (GLuint index, GLenum pname, GLuint *params);
+#endif
+#endif /* GL_NV_vertex_program4 */
+
+#ifndef GL_NV_video_capture
+#define GL_NV_video_capture 1
+#define GL_VIDEO_BUFFER_NV 0x9020
+#define GL_VIDEO_BUFFER_BINDING_NV 0x9021
+#define GL_FIELD_UPPER_NV 0x9022
+#define GL_FIELD_LOWER_NV 0x9023
+#define GL_NUM_VIDEO_CAPTURE_STREAMS_NV 0x9024
+#define GL_NEXT_VIDEO_CAPTURE_BUFFER_STATUS_NV 0x9025
+#define GL_VIDEO_CAPTURE_TO_422_SUPPORTED_NV 0x9026
+#define GL_LAST_VIDEO_CAPTURE_STATUS_NV 0x9027
+#define GL_VIDEO_BUFFER_PITCH_NV 0x9028
+#define GL_VIDEO_COLOR_CONVERSION_MATRIX_NV 0x9029
+#define GL_VIDEO_COLOR_CONVERSION_MAX_NV 0x902A
+#define GL_VIDEO_COLOR_CONVERSION_MIN_NV 0x902B
+#define GL_VIDEO_COLOR_CONVERSION_OFFSET_NV 0x902C
+#define GL_VIDEO_BUFFER_INTERNAL_FORMAT_NV 0x902D
+#define GL_PARTIAL_SUCCESS_NV 0x902E
+#define GL_SUCCESS_NV 0x902F
+#define GL_FAILURE_NV 0x9030
+#define GL_YCBYCR8_422_NV 0x9031
+#define GL_YCBAYCR8A_4224_NV 0x9032
+#define GL_Z6Y10Z6CB10Z6Y10Z6CR10_422_NV 0x9033
+#define GL_Z6Y10Z6CB10Z6A10Z6Y10Z6CR10Z6A10_4224_NV 0x9034
+#define GL_Z4Y12Z4CB12Z4Y12Z4CR12_422_NV 0x9035
+#define GL_Z4Y12Z4CB12Z4A12Z4Y12Z4CR12Z4A12_4224_NV 0x9036
+#define GL_Z4Y12Z4CB12Z4CR12_444_NV 0x9037
+#define GL_VIDEO_CAPTURE_FRAME_WIDTH_NV 0x9038
+#define GL_VIDEO_CAPTURE_FRAME_HEIGHT_NV 0x9039
+#define GL_VIDEO_CAPTURE_FIELD_UPPER_HEIGHT_NV 0x903A
+#define GL_VIDEO_CAPTURE_FIELD_LOWER_HEIGHT_NV 0x903B
+#define GL_VIDEO_CAPTURE_SURFACE_ORIGIN_NV 0x903C
+typedef void (APIENTRYP PFNGLBEGINVIDEOCAPTURENVPROC) (GLuint video_capture_slot);
+typedef void (APIENTRYP PFNGLBINDVIDEOCAPTURESTREAMBUFFERNVPROC) (GLuint video_capture_slot, GLuint stream, GLenum frame_region, GLintptrARB offset);
+typedef void (APIENTRYP PFNGLBINDVIDEOCAPTURESTREAMTEXTURENVPROC) (GLuint video_capture_slot, GLuint stream, GLenum frame_region, GLenum target, GLuint texture);
+typedef void (APIENTRYP PFNGLENDVIDEOCAPTURENVPROC) (GLuint video_capture_slot);
+typedef void (APIENTRYP PFNGLGETVIDEOCAPTUREIVNVPROC) (GLuint video_capture_slot, GLenum pname, GLint *params);
+typedef void (APIENTRYP PFNGLGETVIDEOCAPTURESTREAMIVNVPROC) (GLuint video_capture_slot, GLuint stream, GLenum pname, GLint *params);
+typedef void (APIENTRYP PFNGLGETVIDEOCAPTURESTREAMFVNVPROC) (GLuint video_capture_slot, GLuint stream, GLenum pname, GLfloat *params);
+typedef void (APIENTRYP PFNGLGETVIDEOCAPTURESTREAMDVNVPROC) (GLuint video_capture_slot, GLuint stream, GLenum pname, GLdouble *params);
+typedef GLenum (APIENTRYP PFNGLVIDEOCAPTURENVPROC) (GLuint video_capture_slot, GLuint *sequence_num, GLuint64EXT *capture_time);
+typedef void (APIENTRYP PFNGLVIDEOCAPTURESTREAMPARAMETERIVNVPROC) (GLuint video_capture_slot, GLuint stream, GLenum pname, const GLint *params);
+typedef void (APIENTRYP PFNGLVIDEOCAPTURESTREAMPARAMETERFVNVPROC) (GLuint video_capture_slot, GLuint stream, GLenum pname, const GLfloat *params);
+typedef void (APIENTRYP PFNGLVIDEOCAPTURESTREAMPARAMETERDVNVPROC) (GLuint video_capture_slot, GLuint stream, GLenum pname, const GLdouble *params);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glBeginVideoCaptureNV (GLuint video_capture_slot);
+GLAPI void APIENTRY glBindVideoCaptureStreamBufferNV (GLuint video_capture_slot, GLuint stream, GLenum frame_region, GLintptrARB offset);
+GLAPI void APIENTRY glBindVideoCaptureStreamTextureNV (GLuint video_capture_slot, GLuint stream, GLenum frame_region, GLenum target, GLuint texture);
+GLAPI void APIENTRY glEndVideoCaptureNV (GLuint video_capture_slot);
+GLAPI void APIENTRY glGetVideoCaptureivNV (GLuint video_capture_slot, GLenum pname, GLint *params);
+GLAPI void APIENTRY glGetVideoCaptureStreamivNV (GLuint video_capture_slot, GLuint stream, GLenum pname, GLint *params);
+GLAPI void APIENTRY glGetVideoCaptureStreamfvNV (GLuint video_capture_slot, GLuint stream, GLenum pname, GLfloat *params);
+GLAPI void APIENTRY glGetVideoCaptureStreamdvNV (GLuint video_capture_slot, GLuint stream, GLenum pname, GLdouble *params);
+GLAPI GLenum APIENTRY glVideoCaptureNV (GLuint video_capture_slot, GLuint *sequence_num, GLuint64EXT *capture_time);
+GLAPI void APIENTRY glVideoCaptureStreamParameterivNV (GLuint video_capture_slot, GLuint stream, GLenum pname, const GLint *params);
+GLAPI void APIENTRY glVideoCaptureStreamParameterfvNV (GLuint video_capture_slot, GLuint stream, GLenum pname, const GLfloat *params);
+GLAPI void APIENTRY glVideoCaptureStreamParameterdvNV (GLuint video_capture_slot, GLuint stream, GLenum pname, const GLdouble *params);
+#endif
+#endif /* GL_NV_video_capture */
+
+#ifndef GL_NV_viewport_array2
+#define GL_NV_viewport_array2 1
+#endif /* GL_NV_viewport_array2 */
+
+#ifndef GL_NV_viewport_swizzle
+#define GL_NV_viewport_swizzle 1
+#define GL_VIEWPORT_SWIZZLE_POSITIVE_X_NV 0x9350
+#define GL_VIEWPORT_SWIZZLE_NEGATIVE_X_NV 0x9351
+#define GL_VIEWPORT_SWIZZLE_POSITIVE_Y_NV 0x9352
+#define GL_VIEWPORT_SWIZZLE_NEGATIVE_Y_NV 0x9353
+#define GL_VIEWPORT_SWIZZLE_POSITIVE_Z_NV 0x9354
+#define GL_VIEWPORT_SWIZZLE_NEGATIVE_Z_NV 0x9355
+#define GL_VIEWPORT_SWIZZLE_POSITIVE_W_NV 0x9356
+#define GL_VIEWPORT_SWIZZLE_NEGATIVE_W_NV 0x9357
+#define GL_VIEWPORT_SWIZZLE_X_NV 0x9358
+#define GL_VIEWPORT_SWIZZLE_Y_NV 0x9359
+#define GL_VIEWPORT_SWIZZLE_Z_NV 0x935A
+#define GL_VIEWPORT_SWIZZLE_W_NV 0x935B
+typedef void (APIENTRYP PFNGLVIEWPORTSWIZZLENVPROC) (GLuint index, GLenum swizzlex, GLenum swizzley, GLenum swizzlez, GLenum swizzlew);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glViewportSwizzleNV (GLuint index, GLenum swizzlex, GLenum swizzley, GLenum swizzlez, GLenum swizzlew);
+#endif
+#endif /* GL_NV_viewport_swizzle */
+
+#ifndef GL_OML_interlace
+#define GL_OML_interlace 1
+#define GL_INTERLACE_OML 0x8980
+#define GL_INTERLACE_READ_OML 0x8981
+#endif /* GL_OML_interlace */
+
+#ifndef GL_OML_resample
+#define GL_OML_resample 1
+#define GL_PACK_RESAMPLE_OML 0x8984
+#define GL_UNPACK_RESAMPLE_OML 0x8985
+#define GL_RESAMPLE_REPLICATE_OML 0x8986
+#define GL_RESAMPLE_ZERO_FILL_OML 0x8987
+#define GL_RESAMPLE_AVERAGE_OML 0x8988
+#define GL_RESAMPLE_DECIMATE_OML 0x8989
+#endif /* GL_OML_resample */
+
+#ifndef GL_OML_subsample
+#define GL_OML_subsample 1
+#define GL_FORMAT_SUBSAMPLE_24_24_OML 0x8982
+#define GL_FORMAT_SUBSAMPLE_244_244_OML 0x8983
+#endif /* GL_OML_subsample */
+
+#ifndef GL_OVR_multiview
+#define GL_OVR_multiview 1
+#define GL_FRAMEBUFFER_ATTACHMENT_TEXTURE_NUM_VIEWS_OVR 0x9630
+#define GL_FRAMEBUFFER_ATTACHMENT_TEXTURE_BASE_VIEW_INDEX_OVR 0x9632
+#define GL_MAX_VIEWS_OVR 0x9631
+#define GL_FRAMEBUFFER_INCOMPLETE_VIEW_TARGETS_OVR 0x9633
+typedef void (APIENTRYP PFNGLFRAMEBUFFERTEXTUREMULTIVIEWOVRPROC) (GLenum target, GLenum attachment, GLuint texture, GLint level, GLint baseViewIndex, GLsizei numViews);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glFramebufferTextureMultiviewOVR (GLenum target, GLenum attachment, GLuint texture, GLint level, GLint baseViewIndex, GLsizei numViews);
+#endif
+#endif /* GL_OVR_multiview */
+
+#ifndef GL_OVR_multiview2
+#define GL_OVR_multiview2 1
+#endif /* GL_OVR_multiview2 */
+
+#ifndef GL_PGI_misc_hints
+#define GL_PGI_misc_hints 1
+#define GL_PREFER_DOUBLEBUFFER_HINT_PGI 0x1A1F8
+#define GL_CONSERVE_MEMORY_HINT_PGI 0x1A1FD
+#define GL_RECLAIM_MEMORY_HINT_PGI 0x1A1FE
+#define GL_NATIVE_GRAPHICS_HANDLE_PGI 0x1A202
+#define GL_NATIVE_GRAPHICS_BEGIN_HINT_PGI 0x1A203
+#define GL_NATIVE_GRAPHICS_END_HINT_PGI 0x1A204
+#define GL_ALWAYS_FAST_HINT_PGI 0x1A20C
+#define GL_ALWAYS_SOFT_HINT_PGI 0x1A20D
+#define GL_ALLOW_DRAW_OBJ_HINT_PGI 0x1A20E
+#define GL_ALLOW_DRAW_WIN_HINT_PGI 0x1A20F
+#define GL_ALLOW_DRAW_FRG_HINT_PGI 0x1A210
+#define GL_ALLOW_DRAW_MEM_HINT_PGI 0x1A211
+#define GL_STRICT_DEPTHFUNC_HINT_PGI 0x1A216
+#define GL_STRICT_LIGHTING_HINT_PGI 0x1A217
+#define GL_STRICT_SCISSOR_HINT_PGI 0x1A218
+#define GL_FULL_STIPPLE_HINT_PGI 0x1A219
+#define GL_CLIP_NEAR_HINT_PGI 0x1A220
+#define GL_CLIP_FAR_HINT_PGI 0x1A221
+#define GL_WIDE_LINE_HINT_PGI 0x1A222
+#define GL_BACK_NORMALS_HINT_PGI 0x1A223
+typedef void (APIENTRYP PFNGLHINTPGIPROC) (GLenum target, GLint mode);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glHintPGI (GLenum target, GLint mode);
+#endif
+#endif /* GL_PGI_misc_hints */
+
+#ifndef GL_PGI_vertex_hints
+#define GL_PGI_vertex_hints 1
+#define GL_VERTEX_DATA_HINT_PGI 0x1A22A
+#define GL_VERTEX_CONSISTENT_HINT_PGI 0x1A22B
+#define GL_MATERIAL_SIDE_HINT_PGI 0x1A22C
+#define GL_MAX_VERTEX_HINT_PGI 0x1A22D
+#define GL_COLOR3_BIT_PGI 0x00010000
+#define GL_COLOR4_BIT_PGI 0x00020000
+#define GL_EDGEFLAG_BIT_PGI 0x00040000
+#define GL_INDEX_BIT_PGI 0x00080000
+#define GL_MAT_AMBIENT_BIT_PGI 0x00100000
+#define GL_MAT_AMBIENT_AND_DIFFUSE_BIT_PGI 0x00200000
+#define GL_MAT_DIFFUSE_BIT_PGI 0x00400000
+#define GL_MAT_EMISSION_BIT_PGI 0x00800000
+#define GL_MAT_COLOR_INDEXES_BIT_PGI 0x01000000
+#define GL_MAT_SHININESS_BIT_PGI 0x02000000
+#define GL_MAT_SPECULAR_BIT_PGI 0x04000000
+#define GL_NORMAL_BIT_PGI 0x08000000
+#define GL_TEXCOORD1_BIT_PGI 0x10000000
+#define GL_TEXCOORD2_BIT_PGI 0x20000000
+#define GL_TEXCOORD3_BIT_PGI 0x40000000
+#define GL_TEXCOORD4_BIT_PGI 0x80000000
+#define GL_VERTEX23_BIT_PGI 0x00000004
+#define GL_VERTEX4_BIT_PGI 0x00000008
+#endif /* GL_PGI_vertex_hints */
+
+#ifndef GL_REND_screen_coordinates
+#define GL_REND_screen_coordinates 1
+#define GL_SCREEN_COORDINATES_REND 0x8490
+#define GL_INVERTED_SCREEN_W_REND 0x8491
+#endif /* GL_REND_screen_coordinates */
+
+#ifndef GL_S3_s3tc
+#define GL_S3_s3tc 1
+#define GL_RGB_S3TC 0x83A0
+#define GL_RGB4_S3TC 0x83A1
+#define GL_RGBA_S3TC 0x83A2
+#define GL_RGBA4_S3TC 0x83A3
+#define GL_RGBA_DXT5_S3TC 0x83A4
+#define GL_RGBA4_DXT5_S3TC 0x83A5
+#endif /* GL_S3_s3tc */
+
+#ifndef GL_SGIS_detail_texture
+#define GL_SGIS_detail_texture 1
+#define GL_DETAIL_TEXTURE_2D_SGIS 0x8095
+#define GL_DETAIL_TEXTURE_2D_BINDING_SGIS 0x8096
+#define GL_LINEAR_DETAIL_SGIS 0x8097
+#define GL_LINEAR_DETAIL_ALPHA_SGIS 0x8098
+#define GL_LINEAR_DETAIL_COLOR_SGIS 0x8099
+#define GL_DETAIL_TEXTURE_LEVEL_SGIS 0x809A
+#define GL_DETAIL_TEXTURE_MODE_SGIS 0x809B
+#define GL_DETAIL_TEXTURE_FUNC_POINTS_SGIS 0x809C
+typedef void (APIENTRYP PFNGLDETAILTEXFUNCSGISPROC) (GLenum target, GLsizei n, const GLfloat *points);
+typedef void (APIENTRYP PFNGLGETDETAILTEXFUNCSGISPROC) (GLenum target, GLfloat *points);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glDetailTexFuncSGIS (GLenum target, GLsizei n, const GLfloat *points);
+GLAPI void APIENTRY glGetDetailTexFuncSGIS (GLenum target, GLfloat *points);
+#endif
+#endif /* GL_SGIS_detail_texture */
+
+#ifndef GL_SGIS_fog_function
+#define GL_SGIS_fog_function 1
+#define GL_FOG_FUNC_SGIS 0x812A
+#define GL_FOG_FUNC_POINTS_SGIS 0x812B
+#define GL_MAX_FOG_FUNC_POINTS_SGIS 0x812C
+typedef void (APIENTRYP PFNGLFOGFUNCSGISPROC) (GLsizei n, const GLfloat *points);
+typedef void (APIENTRYP PFNGLGETFOGFUNCSGISPROC) (GLfloat *points);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glFogFuncSGIS (GLsizei n, const GLfloat *points);
+GLAPI void APIENTRY glGetFogFuncSGIS (GLfloat *points);
+#endif
+#endif /* GL_SGIS_fog_function */
+
+#ifndef GL_SGIS_generate_mipmap
+#define GL_SGIS_generate_mipmap 1
+#define GL_GENERATE_MIPMAP_SGIS 0x8191
+#define GL_GENERATE_MIPMAP_HINT_SGIS 0x8192
+#endif /* GL_SGIS_generate_mipmap */
+
+#ifndef GL_SGIS_multisample
+#define GL_SGIS_multisample 1
+#define GL_MULTISAMPLE_SGIS 0x809D
+#define GL_SAMPLE_ALPHA_TO_MASK_SGIS 0x809E
+#define GL_SAMPLE_ALPHA_TO_ONE_SGIS 0x809F
+#define GL_SAMPLE_MASK_SGIS 0x80A0
+#define GL_1PASS_SGIS 0x80A1
+#define GL_2PASS_0_SGIS 0x80A2
+#define GL_2PASS_1_SGIS 0x80A3
+#define GL_4PASS_0_SGIS 0x80A4
+#define GL_4PASS_1_SGIS 0x80A5
+#define GL_4PASS_2_SGIS 0x80A6
+#define GL_4PASS_3_SGIS 0x80A7
+#define GL_SAMPLE_BUFFERS_SGIS 0x80A8
+#define GL_SAMPLES_SGIS 0x80A9
+#define GL_SAMPLE_MASK_VALUE_SGIS 0x80AA
+#define GL_SAMPLE_MASK_INVERT_SGIS 0x80AB
+#define GL_SAMPLE_PATTERN_SGIS 0x80AC
+typedef void (APIENTRYP PFNGLSAMPLEMASKSGISPROC) (GLclampf value, GLboolean invert);
+typedef void (APIENTRYP PFNGLSAMPLEPATTERNSGISPROC) (GLenum pattern);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glSampleMaskSGIS (GLclampf value, GLboolean invert);
+GLAPI void APIENTRY glSamplePatternSGIS (GLenum pattern);
+#endif
+#endif /* GL_SGIS_multisample */
+
+#ifndef GL_SGIS_pixel_texture
+#define GL_SGIS_pixel_texture 1
+#define GL_PIXEL_TEXTURE_SGIS 0x8353
+#define GL_PIXEL_FRAGMENT_RGB_SOURCE_SGIS 0x8354
+#define GL_PIXEL_FRAGMENT_ALPHA_SOURCE_SGIS 0x8355
+#define GL_PIXEL_GROUP_COLOR_SGIS 0x8356
+typedef void (APIENTRYP PFNGLPIXELTEXGENPARAMETERISGISPROC) (GLenum pname, GLint param);
+typedef void (APIENTRYP PFNGLPIXELTEXGENPARAMETERIVSGISPROC) (GLenum pname, const GLint *params);
+typedef void (APIENTRYP PFNGLPIXELTEXGENPARAMETERFSGISPROC) (GLenum pname, GLfloat param);
+typedef void (APIENTRYP PFNGLPIXELTEXGENPARAMETERFVSGISPROC) (GLenum pname, const GLfloat *params);
+typedef void (APIENTRYP PFNGLGETPIXELTEXGENPARAMETERIVSGISPROC) (GLenum pname, GLint *params);
+typedef void (APIENTRYP PFNGLGETPIXELTEXGENPARAMETERFVSGISPROC) (GLenum pname, GLfloat *params);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glPixelTexGenParameteriSGIS (GLenum pname, GLint param);
+GLAPI void APIENTRY glPixelTexGenParameterivSGIS (GLenum pname, const GLint *params);
+GLAPI void APIENTRY glPixelTexGenParameterfSGIS (GLenum pname, GLfloat param);
+GLAPI void APIENTRY glPixelTexGenParameterfvSGIS (GLenum pname, const GLfloat *params);
+GLAPI void APIENTRY glGetPixelTexGenParameterivSGIS (GLenum pname, GLint *params);
+GLAPI void APIENTRY glGetPixelTexGenParameterfvSGIS (GLenum pname, GLfloat *params);
+#endif
+#endif /* GL_SGIS_pixel_texture */
+
+#ifndef GL_SGIS_point_line_texgen
+#define GL_SGIS_point_line_texgen 1
+#define GL_EYE_DISTANCE_TO_POINT_SGIS 0x81F0
+#define GL_OBJECT_DISTANCE_TO_POINT_SGIS 0x81F1
+#define GL_EYE_DISTANCE_TO_LINE_SGIS 0x81F2
+#define GL_OBJECT_DISTANCE_TO_LINE_SGIS 0x81F3
+#define GL_EYE_POINT_SGIS 0x81F4
+#define GL_OBJECT_POINT_SGIS 0x81F5
+#define GL_EYE_LINE_SGIS 0x81F6
+#define GL_OBJECT_LINE_SGIS 0x81F7
+#endif /* GL_SGIS_point_line_texgen */
+
+#ifndef GL_SGIS_point_parameters
+#define GL_SGIS_point_parameters 1
+#define GL_POINT_SIZE_MIN_SGIS 0x8126
+#define GL_POINT_SIZE_MAX_SGIS 0x8127
+#define GL_POINT_FADE_THRESHOLD_SIZE_SGIS 0x8128
+#define GL_DISTANCE_ATTENUATION_SGIS 0x8129
+typedef void (APIENTRYP PFNGLPOINTPARAMETERFSGISPROC) (GLenum pname, GLfloat param);
+typedef void (APIENTRYP PFNGLPOINTPARAMETERFVSGISPROC) (GLenum pname, const GLfloat *params);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glPointParameterfSGIS (GLenum pname, GLfloat param);
+GLAPI void APIENTRY glPointParameterfvSGIS (GLenum pname, const GLfloat *params);
+#endif
+#endif /* GL_SGIS_point_parameters */
+
+#ifndef GL_SGIS_sharpen_texture
+#define GL_SGIS_sharpen_texture 1
+#define GL_LINEAR_SHARPEN_SGIS 0x80AD
+#define GL_LINEAR_SHARPEN_ALPHA_SGIS 0x80AE
+#define GL_LINEAR_SHARPEN_COLOR_SGIS 0x80AF
+#define GL_SHARPEN_TEXTURE_FUNC_POINTS_SGIS 0x80B0
+typedef void (APIENTRYP PFNGLSHARPENTEXFUNCSGISPROC) (GLenum target, GLsizei n, const GLfloat *points);
+typedef void (APIENTRYP PFNGLGETSHARPENTEXFUNCSGISPROC) (GLenum target, GLfloat *points);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glSharpenTexFuncSGIS (GLenum target, GLsizei n, const GLfloat *points);
+GLAPI void APIENTRY glGetSharpenTexFuncSGIS (GLenum target, GLfloat *points);
+#endif
+#endif /* GL_SGIS_sharpen_texture */
+
+#ifndef GL_SGIS_texture4D
+#define GL_SGIS_texture4D 1
+#define GL_PACK_SKIP_VOLUMES_SGIS 0x8130
+#define GL_PACK_IMAGE_DEPTH_SGIS 0x8131
+#define GL_UNPACK_SKIP_VOLUMES_SGIS 0x8132
+#define GL_UNPACK_IMAGE_DEPTH_SGIS 0x8133
+#define GL_TEXTURE_4D_SGIS 0x8134
+#define GL_PROXY_TEXTURE_4D_SGIS 0x8135
+#define GL_TEXTURE_4DSIZE_SGIS 0x8136
+#define GL_TEXTURE_WRAP_Q_SGIS 0x8137
+#define GL_MAX_4D_TEXTURE_SIZE_SGIS 0x8138
+#define GL_TEXTURE_4D_BINDING_SGIS 0x814F
+typedef void (APIENTRYP PFNGLTEXIMAGE4DSGISPROC) (GLenum target, GLint level, GLenum internalformat, GLsizei width, GLsizei height, GLsizei depth, GLsizei size4d, GLint border, GLenum format, GLenum type, const void *pixels);
+typedef void (APIENTRYP PFNGLTEXSUBIMAGE4DSGISPROC) (GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint zoffset, GLint woffset, GLsizei width, GLsizei height, GLsizei depth, GLsizei size4d, GLenum format, GLenum type, const void *pixels);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glTexImage4DSGIS (GLenum target, GLint level, GLenum internalformat, GLsizei width, GLsizei height, GLsizei depth, GLsizei size4d, GLint border, GLenum format, GLenum type, const void *pixels);
+GLAPI void APIENTRY glTexSubImage4DSGIS (GLenum target, GLint level, GLint xoffset, GLint yoffset, GLint zoffset, GLint woffset, GLsizei width, GLsizei height, GLsizei depth, GLsizei size4d, GLenum format, GLenum type, const void *pixels);
+#endif
+#endif /* GL_SGIS_texture4D */
+
+#ifndef GL_SGIS_texture_border_clamp
+#define GL_SGIS_texture_border_clamp 1
+#define GL_CLAMP_TO_BORDER_SGIS 0x812D
+#endif /* GL_SGIS_texture_border_clamp */
+
+#ifndef GL_SGIS_texture_color_mask
+#define GL_SGIS_texture_color_mask 1
+#define GL_TEXTURE_COLOR_WRITEMASK_SGIS 0x81EF
+typedef void (APIENTRYP PFNGLTEXTURECOLORMASKSGISPROC) (GLboolean red, GLboolean green, GLboolean blue, GLboolean alpha);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glTextureColorMaskSGIS (GLboolean red, GLboolean green, GLboolean blue, GLboolean alpha);
+#endif
+#endif /* GL_SGIS_texture_color_mask */
+
+#ifndef GL_SGIS_texture_edge_clamp
+#define GL_SGIS_texture_edge_clamp 1
+#define GL_CLAMP_TO_EDGE_SGIS 0x812F
+#endif /* GL_SGIS_texture_edge_clamp */
+
+#ifndef GL_SGIS_texture_filter4
+#define GL_SGIS_texture_filter4 1
+#define GL_FILTER4_SGIS 0x8146
+#define GL_TEXTURE_FILTER4_SIZE_SGIS 0x8147
+typedef void (APIENTRYP PFNGLGETTEXFILTERFUNCSGISPROC) (GLenum target, GLenum filter, GLfloat *weights);
+typedef void (APIENTRYP PFNGLTEXFILTERFUNCSGISPROC) (GLenum target, GLenum filter, GLsizei n, const GLfloat *weights);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glGetTexFilterFuncSGIS (GLenum target, GLenum filter, GLfloat *weights);
+GLAPI void APIENTRY glTexFilterFuncSGIS (GLenum target, GLenum filter, GLsizei n, const GLfloat *weights);
+#endif
+#endif /* GL_SGIS_texture_filter4 */
+
+#ifndef GL_SGIS_texture_lod
+#define GL_SGIS_texture_lod 1
+#define GL_TEXTURE_MIN_LOD_SGIS 0x813A
+#define GL_TEXTURE_MAX_LOD_SGIS 0x813B
+#define GL_TEXTURE_BASE_LEVEL_SGIS 0x813C
+#define GL_TEXTURE_MAX_LEVEL_SGIS 0x813D
+#endif /* GL_SGIS_texture_lod */
+
+#ifndef GL_SGIS_texture_select
+#define GL_SGIS_texture_select 1
+#define GL_DUAL_ALPHA4_SGIS 0x8110
+#define GL_DUAL_ALPHA8_SGIS 0x8111
+#define GL_DUAL_ALPHA12_SGIS 0x8112
+#define GL_DUAL_ALPHA16_SGIS 0x8113
+#define GL_DUAL_LUMINANCE4_SGIS 0x8114
+#define GL_DUAL_LUMINANCE8_SGIS 0x8115
+#define GL_DUAL_LUMINANCE12_SGIS 0x8116
+#define GL_DUAL_LUMINANCE16_SGIS 0x8117
+#define GL_DUAL_INTENSITY4_SGIS 0x8118
+#define GL_DUAL_INTENSITY8_SGIS 0x8119
+#define GL_DUAL_INTENSITY12_SGIS 0x811A
+#define GL_DUAL_INTENSITY16_SGIS 0x811B
+#define GL_DUAL_LUMINANCE_ALPHA4_SGIS 0x811C
+#define GL_DUAL_LUMINANCE_ALPHA8_SGIS 0x811D
+#define GL_QUAD_ALPHA4_SGIS 0x811E
+#define GL_QUAD_ALPHA8_SGIS 0x811F
+#define GL_QUAD_LUMINANCE4_SGIS 0x8120
+#define GL_QUAD_LUMINANCE8_SGIS 0x8121
+#define GL_QUAD_INTENSITY4_SGIS 0x8122
+#define GL_QUAD_INTENSITY8_SGIS 0x8123
+#define GL_DUAL_TEXTURE_SELECT_SGIS 0x8124
+#define GL_QUAD_TEXTURE_SELECT_SGIS 0x8125
+#endif /* GL_SGIS_texture_select */
+
+#ifndef GL_SGIX_async
+#define GL_SGIX_async 1
+#define GL_ASYNC_MARKER_SGIX 0x8329
+typedef void (APIENTRYP PFNGLASYNCMARKERSGIXPROC) (GLuint marker);
+typedef GLint (APIENTRYP PFNGLFINISHASYNCSGIXPROC) (GLuint *markerp);
+typedef GLint (APIENTRYP PFNGLPOLLASYNCSGIXPROC) (GLuint *markerp);
+typedef GLuint (APIENTRYP PFNGLGENASYNCMARKERSSGIXPROC) (GLsizei range);
+typedef void (APIENTRYP PFNGLDELETEASYNCMARKERSSGIXPROC) (GLuint marker, GLsizei range);
+typedef GLboolean (APIENTRYP PFNGLISASYNCMARKERSGIXPROC) (GLuint marker);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glAsyncMarkerSGIX (GLuint marker);
+GLAPI GLint APIENTRY glFinishAsyncSGIX (GLuint *markerp);
+GLAPI GLint APIENTRY glPollAsyncSGIX (GLuint *markerp);
+GLAPI GLuint APIENTRY glGenAsyncMarkersSGIX (GLsizei range);
+GLAPI void APIENTRY glDeleteAsyncMarkersSGIX (GLuint marker, GLsizei range);
+GLAPI GLboolean APIENTRY glIsAsyncMarkerSGIX (GLuint marker);
+#endif
+#endif /* GL_SGIX_async */
+
+#ifndef GL_SGIX_async_histogram
+#define GL_SGIX_async_histogram 1
+#define GL_ASYNC_HISTOGRAM_SGIX 0x832C
+#define GL_MAX_ASYNC_HISTOGRAM_SGIX 0x832D
+#endif /* GL_SGIX_async_histogram */
+
+#ifndef GL_SGIX_async_pixel
+#define GL_SGIX_async_pixel 1
+#define GL_ASYNC_TEX_IMAGE_SGIX 0x835C
+#define GL_ASYNC_DRAW_PIXELS_SGIX 0x835D
+#define GL_ASYNC_READ_PIXELS_SGIX 0x835E
+#define GL_MAX_ASYNC_TEX_IMAGE_SGIX 0x835F
+#define GL_MAX_ASYNC_DRAW_PIXELS_SGIX 0x8360
+#define GL_MAX_ASYNC_READ_PIXELS_SGIX 0x8361
+#endif /* GL_SGIX_async_pixel */
+
+#ifndef GL_SGIX_blend_alpha_minmax
+#define GL_SGIX_blend_alpha_minmax 1
+#define GL_ALPHA_MIN_SGIX 0x8320
+#define GL_ALPHA_MAX_SGIX 0x8321
+#endif /* GL_SGIX_blend_alpha_minmax */
+
+#ifndef GL_SGIX_calligraphic_fragment
+#define GL_SGIX_calligraphic_fragment 1
+#define GL_CALLIGRAPHIC_FRAGMENT_SGIX 0x8183
+#endif /* GL_SGIX_calligraphic_fragment */
+
+#ifndef GL_SGIX_clipmap
+#define GL_SGIX_clipmap 1
+#define GL_LINEAR_CLIPMAP_LINEAR_SGIX 0x8170
+#define GL_TEXTURE_CLIPMAP_CENTER_SGIX 0x8171
+#define GL_TEXTURE_CLIPMAP_FRAME_SGIX 0x8172
+#define GL_TEXTURE_CLIPMAP_OFFSET_SGIX 0x8173
+#define GL_TEXTURE_CLIPMAP_VIRTUAL_DEPTH_SGIX 0x8174
+#define GL_TEXTURE_CLIPMAP_LOD_OFFSET_SGIX 0x8175
+#define GL_TEXTURE_CLIPMAP_DEPTH_SGIX 0x8176
+#define GL_MAX_CLIPMAP_DEPTH_SGIX 0x8177
+#define GL_MAX_CLIPMAP_VIRTUAL_DEPTH_SGIX 0x8178
+#define GL_NEAREST_CLIPMAP_NEAREST_SGIX 0x844D
+#define GL_NEAREST_CLIPMAP_LINEAR_SGIX 0x844E
+#define GL_LINEAR_CLIPMAP_NEAREST_SGIX 0x844F
+#endif /* GL_SGIX_clipmap */
+
+#ifndef GL_SGIX_convolution_accuracy
+#define GL_SGIX_convolution_accuracy 1
+#define GL_CONVOLUTION_HINT_SGIX 0x8316
+#endif /* GL_SGIX_convolution_accuracy */
+
+#ifndef GL_SGIX_depth_pass_instrument
+#define GL_SGIX_depth_pass_instrument 1
+#endif /* GL_SGIX_depth_pass_instrument */
+
+#ifndef GL_SGIX_depth_texture
+#define GL_SGIX_depth_texture 1
+#define GL_DEPTH_COMPONENT16_SGIX 0x81A5
+#define GL_DEPTH_COMPONENT24_SGIX 0x81A6
+#define GL_DEPTH_COMPONENT32_SGIX 0x81A7
+#endif /* GL_SGIX_depth_texture */
+
+#ifndef GL_SGIX_flush_raster
+#define GL_SGIX_flush_raster 1
+typedef void (APIENTRYP PFNGLFLUSHRASTERSGIXPROC) (void);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glFlushRasterSGIX (void);
+#endif
+#endif /* GL_SGIX_flush_raster */
+
+#ifndef GL_SGIX_fog_offset
+#define GL_SGIX_fog_offset 1
+#define GL_FOG_OFFSET_SGIX 0x8198
+#define GL_FOG_OFFSET_VALUE_SGIX 0x8199
+#endif /* GL_SGIX_fog_offset */
+
+#ifndef GL_SGIX_fragment_lighting
+#define GL_SGIX_fragment_lighting 1
+#define GL_FRAGMENT_LIGHTING_SGIX 0x8400
+#define GL_FRAGMENT_COLOR_MATERIAL_SGIX 0x8401
+#define GL_FRAGMENT_COLOR_MATERIAL_FACE_SGIX 0x8402
+#define GL_FRAGMENT_COLOR_MATERIAL_PARAMETER_SGIX 0x8403
+#define GL_MAX_FRAGMENT_LIGHTS_SGIX 0x8404
+#define GL_MAX_ACTIVE_LIGHTS_SGIX 0x8405
+#define GL_CURRENT_RASTER_NORMAL_SGIX 0x8406
+#define GL_LIGHT_ENV_MODE_SGIX 0x8407
+#define GL_FRAGMENT_LIGHT_MODEL_LOCAL_VIEWER_SGIX 0x8408
+#define GL_FRAGMENT_LIGHT_MODEL_TWO_SIDE_SGIX 0x8409
+#define GL_FRAGMENT_LIGHT_MODEL_AMBIENT_SGIX 0x840A
+#define GL_FRAGMENT_LIGHT_MODEL_NORMAL_INTERPOLATION_SGIX 0x840B
+#define GL_FRAGMENT_LIGHT0_SGIX 0x840C
+#define GL_FRAGMENT_LIGHT1_SGIX 0x840D
+#define GL_FRAGMENT_LIGHT2_SGIX 0x840E
+#define GL_FRAGMENT_LIGHT3_SGIX 0x840F
+#define GL_FRAGMENT_LIGHT4_SGIX 0x8410
+#define GL_FRAGMENT_LIGHT5_SGIX 0x8411
+#define GL_FRAGMENT_LIGHT6_SGIX 0x8412
+#define GL_FRAGMENT_LIGHT7_SGIX 0x8413
+typedef void (APIENTRYP PFNGLFRAGMENTCOLORMATERIALSGIXPROC) (GLenum face, GLenum mode);
+typedef void (APIENTRYP PFNGLFRAGMENTLIGHTFSGIXPROC) (GLenum light, GLenum pname, GLfloat param);
+typedef void (APIENTRYP PFNGLFRAGMENTLIGHTFVSGIXPROC) (GLenum light, GLenum pname, const GLfloat *params);
+typedef void (APIENTRYP PFNGLFRAGMENTLIGHTISGIXPROC) (GLenum light, GLenum pname, GLint param);
+typedef void (APIENTRYP PFNGLFRAGMENTLIGHTIVSGIXPROC) (GLenum light, GLenum pname, const GLint *params);
+typedef void (APIENTRYP PFNGLFRAGMENTLIGHTMODELFSGIXPROC) (GLenum pname, GLfloat param);
+typedef void (APIENTRYP PFNGLFRAGMENTLIGHTMODELFVSGIXPROC) (GLenum pname, const GLfloat *params);
+typedef void (APIENTRYP PFNGLFRAGMENTLIGHTMODELISGIXPROC) (GLenum pname, GLint param);
+typedef void (APIENTRYP PFNGLFRAGMENTLIGHTMODELIVSGIXPROC) (GLenum pname, const GLint *params);
+typedef void (APIENTRYP PFNGLFRAGMENTMATERIALFSGIXPROC) (GLenum face, GLenum pname, GLfloat param);
+typedef void (APIENTRYP PFNGLFRAGMENTMATERIALFVSGIXPROC) (GLenum face, GLenum pname, const GLfloat *params);
+typedef void (APIENTRYP PFNGLFRAGMENTMATERIALISGIXPROC) (GLenum face, GLenum pname, GLint param);
+typedef void (APIENTRYP PFNGLFRAGMENTMATERIALIVSGIXPROC) (GLenum face, GLenum pname, const GLint *params);
+typedef void (APIENTRYP PFNGLGETFRAGMENTLIGHTFVSGIXPROC) (GLenum light, GLenum pname, GLfloat *params);
+typedef void (APIENTRYP PFNGLGETFRAGMENTLIGHTIVSGIXPROC) (GLenum light, GLenum pname, GLint *params);
+typedef void (APIENTRYP PFNGLGETFRAGMENTMATERIALFVSGIXPROC) (GLenum face, GLenum pname, GLfloat *params);
+typedef void (APIENTRYP PFNGLGETFRAGMENTMATERIALIVSGIXPROC) (GLenum face, GLenum pname, GLint *params);
+typedef void (APIENTRYP PFNGLLIGHTENVISGIXPROC) (GLenum pname, GLint param);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glFragmentColorMaterialSGIX (GLenum face, GLenum mode);
+GLAPI void APIENTRY glFragmentLightfSGIX (GLenum light, GLenum pname, GLfloat param);
+GLAPI void APIENTRY glFragmentLightfvSGIX (GLenum light, GLenum pname, const GLfloat *params);
+GLAPI void APIENTRY glFragmentLightiSGIX (GLenum light, GLenum pname, GLint param);
+GLAPI void APIENTRY glFragmentLightivSGIX (GLenum light, GLenum pname, const GLint *params);
+GLAPI void APIENTRY glFragmentLightModelfSGIX (GLenum pname, GLfloat param);
+GLAPI void APIENTRY glFragmentLightModelfvSGIX (GLenum pname, const GLfloat *params);
+GLAPI void APIENTRY glFragmentLightModeliSGIX (GLenum pname, GLint param);
+GLAPI void APIENTRY glFragmentLightModelivSGIX (GLenum pname, const GLint *params);
+GLAPI void APIENTRY glFragmentMaterialfSGIX (GLenum face, GLenum pname, GLfloat param);
+GLAPI void APIENTRY glFragmentMaterialfvSGIX (GLenum face, GLenum pname, const GLfloat *params);
+GLAPI void APIENTRY glFragmentMaterialiSGIX (GLenum face, GLenum pname, GLint param);
+GLAPI void APIENTRY glFragmentMaterialivSGIX (GLenum face, GLenum pname, const GLint *params);
+GLAPI void APIENTRY glGetFragmentLightfvSGIX (GLenum light, GLenum pname, GLfloat *params);
+GLAPI void APIENTRY glGetFragmentLightivSGIX (GLenum light, GLenum pname, GLint *params);
+GLAPI void APIENTRY glGetFragmentMaterialfvSGIX (GLenum face, GLenum pname, GLfloat *params);
+GLAPI void APIENTRY glGetFragmentMaterialivSGIX (GLenum face, GLenum pname, GLint *params);
+GLAPI void APIENTRY glLightEnviSGIX (GLenum pname, GLint param);
+#endif
+#endif /* GL_SGIX_fragment_lighting */
+
+#ifndef GL_SGIX_framezoom
+#define GL_SGIX_framezoom 1
+#define GL_FRAMEZOOM_SGIX 0x818B
+#define GL_FRAMEZOOM_FACTOR_SGIX 0x818C
+#define GL_MAX_FRAMEZOOM_FACTOR_SGIX 0x818D
+typedef void (APIENTRYP PFNGLFRAMEZOOMSGIXPROC) (GLint factor);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glFrameZoomSGIX (GLint factor);
+#endif
+#endif /* GL_SGIX_framezoom */
+
+#ifndef GL_SGIX_igloo_interface
+#define GL_SGIX_igloo_interface 1
+typedef void (APIENTRYP PFNGLIGLOOINTERFACESGIXPROC) (GLenum pname, const void *params);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glIglooInterfaceSGIX (GLenum pname, const void *params);
+#endif
+#endif /* GL_SGIX_igloo_interface */
+
+#ifndef GL_SGIX_instruments
+#define GL_SGIX_instruments 1
+#define GL_INSTRUMENT_BUFFER_POINTER_SGIX 0x8180
+#define GL_INSTRUMENT_MEASUREMENTS_SGIX 0x8181
+typedef GLint (APIENTRYP PFNGLGETINSTRUMENTSSGIXPROC) (void);
+typedef void (APIENTRYP PFNGLINSTRUMENTSBUFFERSGIXPROC) (GLsizei size, GLint *buffer);
+typedef GLint (APIENTRYP PFNGLPOLLINSTRUMENTSSGIXPROC) (GLint *marker_p);
+typedef void (APIENTRYP PFNGLREADINSTRUMENTSSGIXPROC) (GLint marker);
+typedef void (APIENTRYP PFNGLSTARTINSTRUMENTSSGIXPROC) (void);
+typedef void (APIENTRYP PFNGLSTOPINSTRUMENTSSGIXPROC) (GLint marker);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI GLint APIENTRY glGetInstrumentsSGIX (void);
+GLAPI void APIENTRY glInstrumentsBufferSGIX (GLsizei size, GLint *buffer);
+GLAPI GLint APIENTRY glPollInstrumentsSGIX (GLint *marker_p);
+GLAPI void APIENTRY glReadInstrumentsSGIX (GLint marker);
+GLAPI void APIENTRY glStartInstrumentsSGIX (void);
+GLAPI void APIENTRY glStopInstrumentsSGIX (GLint marker);
+#endif
+#endif /* GL_SGIX_instruments */
+
+#ifndef GL_SGIX_interlace
+#define GL_SGIX_interlace 1
+#define GL_INTERLACE_SGIX 0x8094
+#endif /* GL_SGIX_interlace */
+
+#ifndef GL_SGIX_ir_instrument1
+#define GL_SGIX_ir_instrument1 1
+#define GL_IR_INSTRUMENT1_SGIX 0x817F
+#endif /* GL_SGIX_ir_instrument1 */
+
+#ifndef GL_SGIX_list_priority
+#define GL_SGIX_list_priority 1
+#define GL_LIST_PRIORITY_SGIX 0x8182
+typedef void (APIENTRYP PFNGLGETLISTPARAMETERFVSGIXPROC) (GLuint list, GLenum pname, GLfloat *params);
+typedef void (APIENTRYP PFNGLGETLISTPARAMETERIVSGIXPROC) (GLuint list, GLenum pname, GLint *params);
+typedef void (APIENTRYP PFNGLLISTPARAMETERFSGIXPROC) (GLuint list, GLenum pname, GLfloat param);
+typedef void (APIENTRYP PFNGLLISTPARAMETERFVSGIXPROC) (GLuint list, GLenum pname, const GLfloat *params);
+typedef void (APIENTRYP PFNGLLISTPARAMETERISGIXPROC) (GLuint list, GLenum pname, GLint param);
+typedef void (APIENTRYP PFNGLLISTPARAMETERIVSGIXPROC) (GLuint list, GLenum pname, const GLint *params);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glGetListParameterfvSGIX (GLuint list, GLenum pname, GLfloat *params);
+GLAPI void APIENTRY glGetListParameterivSGIX (GLuint list, GLenum pname, GLint *params);
+GLAPI void APIENTRY glListParameterfSGIX (GLuint list, GLenum pname, GLfloat param);
+GLAPI void APIENTRY glListParameterfvSGIX (GLuint list, GLenum pname, const GLfloat *params);
+GLAPI void APIENTRY glListParameteriSGIX (GLuint list, GLenum pname, GLint param);
+GLAPI void APIENTRY glListParameterivSGIX (GLuint list, GLenum pname, const GLint *params);
+#endif
+#endif /* GL_SGIX_list_priority */
+
+#ifndef GL_SGIX_pixel_texture
+#define GL_SGIX_pixel_texture 1
+#define GL_PIXEL_TEX_GEN_SGIX 0x8139
+#define GL_PIXEL_TEX_GEN_MODE_SGIX 0x832B
+typedef void (APIENTRYP PFNGLPIXELTEXGENSGIXPROC) (GLenum mode);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glPixelTexGenSGIX (GLenum mode);
+#endif
+#endif /* GL_SGIX_pixel_texture */
+
+#ifndef GL_SGIX_pixel_tiles
+#define GL_SGIX_pixel_tiles 1
+#define GL_PIXEL_TILE_BEST_ALIGNMENT_SGIX 0x813E
+#define GL_PIXEL_TILE_CACHE_INCREMENT_SGIX 0x813F
+#define GL_PIXEL_TILE_WIDTH_SGIX 0x8140
+#define GL_PIXEL_TILE_HEIGHT_SGIX 0x8141
+#define GL_PIXEL_TILE_GRID_WIDTH_SGIX 0x8142
+#define GL_PIXEL_TILE_GRID_HEIGHT_SGIX 0x8143
+#define GL_PIXEL_TILE_GRID_DEPTH_SGIX 0x8144
+#define GL_PIXEL_TILE_CACHE_SIZE_SGIX 0x8145
+#endif /* GL_SGIX_pixel_tiles */
+
+#ifndef GL_SGIX_polynomial_ffd
+#define GL_SGIX_polynomial_ffd 1
+#define GL_TEXTURE_DEFORMATION_BIT_SGIX 0x00000001
+#define GL_GEOMETRY_DEFORMATION_BIT_SGIX 0x00000002
+#define GL_GEOMETRY_DEFORMATION_SGIX 0x8194
+#define GL_TEXTURE_DEFORMATION_SGIX 0x8195
+#define GL_DEFORMATIONS_MASK_SGIX 0x8196
+#define GL_MAX_DEFORMATION_ORDER_SGIX 0x8197
+typedef void (APIENTRYP PFNGLDEFORMATIONMAP3DSGIXPROC) (GLenum target, GLdouble u1, GLdouble u2, GLint ustride, GLint uorder, GLdouble v1, GLdouble v2, GLint vstride, GLint vorder, GLdouble w1, GLdouble w2, GLint wstride, GLint worder, const GLdouble *points);
+typedef void (APIENTRYP PFNGLDEFORMATIONMAP3FSGIXPROC) (GLenum target, GLfloat u1, GLfloat u2, GLint ustride, GLint uorder, GLfloat v1, GLfloat v2, GLint vstride, GLint vorder, GLfloat w1, GLfloat w2, GLint wstride, GLint worder, const GLfloat *points);
+typedef void (APIENTRYP PFNGLDEFORMSGIXPROC) (GLbitfield mask);
+typedef void (APIENTRYP PFNGLLOADIDENTITYDEFORMATIONMAPSGIXPROC) (GLbitfield mask);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glDeformationMap3dSGIX (GLenum target, GLdouble u1, GLdouble u2, GLint ustride, GLint uorder, GLdouble v1, GLdouble v2, GLint vstride, GLint vorder, GLdouble w1, GLdouble w2, GLint wstride, GLint worder, const GLdouble *points);
+GLAPI void APIENTRY glDeformationMap3fSGIX (GLenum target, GLfloat u1, GLfloat u2, GLint ustride, GLint uorder, GLfloat v1, GLfloat v2, GLint vstride, GLint vorder, GLfloat w1, GLfloat w2, GLint wstride, GLint worder, const GLfloat *points);
+GLAPI void APIENTRY glDeformSGIX (GLbitfield mask);
+GLAPI void APIENTRY glLoadIdentityDeformationMapSGIX (GLbitfield mask);
+#endif
+#endif /* GL_SGIX_polynomial_ffd */
+
+#ifndef GL_SGIX_reference_plane
+#define GL_SGIX_reference_plane 1
+#define GL_REFERENCE_PLANE_SGIX 0x817D
+#define GL_REFERENCE_PLANE_EQUATION_SGIX 0x817E
+typedef void (APIENTRYP PFNGLREFERENCEPLANESGIXPROC) (const GLdouble *equation);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glReferencePlaneSGIX (const GLdouble *equation);
+#endif
+#endif /* GL_SGIX_reference_plane */
+
+#ifndef GL_SGIX_resample
+#define GL_SGIX_resample 1
+#define GL_PACK_RESAMPLE_SGIX 0x842E
+#define GL_UNPACK_RESAMPLE_SGIX 0x842F
+#define GL_RESAMPLE_REPLICATE_SGIX 0x8433
+#define GL_RESAMPLE_ZERO_FILL_SGIX 0x8434
+#define GL_RESAMPLE_DECIMATE_SGIX 0x8430
+#endif /* GL_SGIX_resample */
+
+#ifndef GL_SGIX_scalebias_hint
+#define GL_SGIX_scalebias_hint 1
+#define GL_SCALEBIAS_HINT_SGIX 0x8322
+#endif /* GL_SGIX_scalebias_hint */
+
+#ifndef GL_SGIX_shadow
+#define GL_SGIX_shadow 1
+#define GL_TEXTURE_COMPARE_SGIX 0x819A
+#define GL_TEXTURE_COMPARE_OPERATOR_SGIX 0x819B
+#define GL_TEXTURE_LEQUAL_R_SGIX 0x819C
+#define GL_TEXTURE_GEQUAL_R_SGIX 0x819D
+#endif /* GL_SGIX_shadow */
+
+#ifndef GL_SGIX_shadow_ambient
+#define GL_SGIX_shadow_ambient 1
+#define GL_SHADOW_AMBIENT_SGIX 0x80BF
+#endif /* GL_SGIX_shadow_ambient */
+
+#ifndef GL_SGIX_sprite
+#define GL_SGIX_sprite 1
+#define GL_SPRITE_SGIX 0x8148
+#define GL_SPRITE_MODE_SGIX 0x8149
+#define GL_SPRITE_AXIS_SGIX 0x814A
+#define GL_SPRITE_TRANSLATION_SGIX 0x814B
+#define GL_SPRITE_AXIAL_SGIX 0x814C
+#define GL_SPRITE_OBJECT_ALIGNED_SGIX 0x814D
+#define GL_SPRITE_EYE_ALIGNED_SGIX 0x814E
+typedef void (APIENTRYP PFNGLSPRITEPARAMETERFSGIXPROC) (GLenum pname, GLfloat param);
+typedef void (APIENTRYP PFNGLSPRITEPARAMETERFVSGIXPROC) (GLenum pname, const GLfloat *params);
+typedef void (APIENTRYP PFNGLSPRITEPARAMETERISGIXPROC) (GLenum pname, GLint param);
+typedef void (APIENTRYP PFNGLSPRITEPARAMETERIVSGIXPROC) (GLenum pname, const GLint *params);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glSpriteParameterfSGIX (GLenum pname, GLfloat param);
+GLAPI void APIENTRY glSpriteParameterfvSGIX (GLenum pname, const GLfloat *params);
+GLAPI void APIENTRY glSpriteParameteriSGIX (GLenum pname, GLint param);
+GLAPI void APIENTRY glSpriteParameterivSGIX (GLenum pname, const GLint *params);
+#endif
+#endif /* GL_SGIX_sprite */
+
+#ifndef GL_SGIX_subsample
+#define GL_SGIX_subsample 1
+#define GL_PACK_SUBSAMPLE_RATE_SGIX 0x85A0
+#define GL_UNPACK_SUBSAMPLE_RATE_SGIX 0x85A1
+#define GL_PIXEL_SUBSAMPLE_4444_SGIX 0x85A2
+#define GL_PIXEL_SUBSAMPLE_2424_SGIX 0x85A3
+#define GL_PIXEL_SUBSAMPLE_4242_SGIX 0x85A4
+#endif /* GL_SGIX_subsample */
+
+#ifndef GL_SGIX_tag_sample_buffer
+#define GL_SGIX_tag_sample_buffer 1
+typedef void (APIENTRYP PFNGLTAGSAMPLEBUFFERSGIXPROC) (void);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glTagSampleBufferSGIX (void);
+#endif
+#endif /* GL_SGIX_tag_sample_buffer */
+
+#ifndef GL_SGIX_texture_add_env
+#define GL_SGIX_texture_add_env 1
+#define GL_TEXTURE_ENV_BIAS_SGIX 0x80BE
+#endif /* GL_SGIX_texture_add_env */
+
+#ifndef GL_SGIX_texture_coordinate_clamp
+#define GL_SGIX_texture_coordinate_clamp 1
+#define GL_TEXTURE_MAX_CLAMP_S_SGIX 0x8369
+#define GL_TEXTURE_MAX_CLAMP_T_SGIX 0x836A
+#define GL_TEXTURE_MAX_CLAMP_R_SGIX 0x836B
+#endif /* GL_SGIX_texture_coordinate_clamp */
+
+#ifndef GL_SGIX_texture_lod_bias
+#define GL_SGIX_texture_lod_bias 1
+#define GL_TEXTURE_LOD_BIAS_S_SGIX 0x818E
+#define GL_TEXTURE_LOD_BIAS_T_SGIX 0x818F
+#define GL_TEXTURE_LOD_BIAS_R_SGIX 0x8190
+#endif /* GL_SGIX_texture_lod_bias */
+
+#ifndef GL_SGIX_texture_multi_buffer
+#define GL_SGIX_texture_multi_buffer 1
+#define GL_TEXTURE_MULTI_BUFFER_HINT_SGIX 0x812E
+#endif /* GL_SGIX_texture_multi_buffer */
+
+#ifndef GL_SGIX_texture_scale_bias
+#define GL_SGIX_texture_scale_bias 1
+#define GL_POST_TEXTURE_FILTER_BIAS_SGIX 0x8179
+#define GL_POST_TEXTURE_FILTER_SCALE_SGIX 0x817A
+#define GL_POST_TEXTURE_FILTER_BIAS_RANGE_SGIX 0x817B
+#define GL_POST_TEXTURE_FILTER_SCALE_RANGE_SGIX 0x817C
+#endif /* GL_SGIX_texture_scale_bias */
+
+#ifndef GL_SGIX_vertex_preclip
+#define GL_SGIX_vertex_preclip 1
+#define GL_VERTEX_PRECLIP_SGIX 0x83EE
+#define GL_VERTEX_PRECLIP_HINT_SGIX 0x83EF
+#endif /* GL_SGIX_vertex_preclip */
+
+#ifndef GL_SGIX_ycrcb
+#define GL_SGIX_ycrcb 1
+#define GL_YCRCB_422_SGIX 0x81BB
+#define GL_YCRCB_444_SGIX 0x81BC
+#endif /* GL_SGIX_ycrcb */
+
+#ifndef GL_SGIX_ycrcb_subsample
+#define GL_SGIX_ycrcb_subsample 1
+#endif /* GL_SGIX_ycrcb_subsample */
+
+#ifndef GL_SGIX_ycrcba
+#define GL_SGIX_ycrcba 1
+#define GL_YCRCB_SGIX 0x8318
+#define GL_YCRCBA_SGIX 0x8319
+#endif /* GL_SGIX_ycrcba */
+
+#ifndef GL_SGI_color_matrix
+#define GL_SGI_color_matrix 1
+#define GL_COLOR_MATRIX_SGI 0x80B1
+#define GL_COLOR_MATRIX_STACK_DEPTH_SGI 0x80B2
+#define GL_MAX_COLOR_MATRIX_STACK_DEPTH_SGI 0x80B3
+#define GL_POST_COLOR_MATRIX_RED_SCALE_SGI 0x80B4
+#define GL_POST_COLOR_MATRIX_GREEN_SCALE_SGI 0x80B5
+#define GL_POST_COLOR_MATRIX_BLUE_SCALE_SGI 0x80B6
+#define GL_POST_COLOR_MATRIX_ALPHA_SCALE_SGI 0x80B7
+#define GL_POST_COLOR_MATRIX_RED_BIAS_SGI 0x80B8
+#define GL_POST_COLOR_MATRIX_GREEN_BIAS_SGI 0x80B9
+#define GL_POST_COLOR_MATRIX_BLUE_BIAS_SGI 0x80BA
+#define GL_POST_COLOR_MATRIX_ALPHA_BIAS_SGI 0x80BB
+#endif /* GL_SGI_color_matrix */
+
+#ifndef GL_SGI_color_table
+#define GL_SGI_color_table 1
+#define GL_COLOR_TABLE_SGI 0x80D0
+#define GL_POST_CONVOLUTION_COLOR_TABLE_SGI 0x80D1
+#define GL_POST_COLOR_MATRIX_COLOR_TABLE_SGI 0x80D2
+#define GL_PROXY_COLOR_TABLE_SGI 0x80D3
+#define GL_PROXY_POST_CONVOLUTION_COLOR_TABLE_SGI 0x80D4
+#define GL_PROXY_POST_COLOR_MATRIX_COLOR_TABLE_SGI 0x80D5
+#define GL_COLOR_TABLE_SCALE_SGI 0x80D6
+#define GL_COLOR_TABLE_BIAS_SGI 0x80D7
+#define GL_COLOR_TABLE_FORMAT_SGI 0x80D8
+#define GL_COLOR_TABLE_WIDTH_SGI 0x80D9
+#define GL_COLOR_TABLE_RED_SIZE_SGI 0x80DA
+#define GL_COLOR_TABLE_GREEN_SIZE_SGI 0x80DB
+#define GL_COLOR_TABLE_BLUE_SIZE_SGI 0x80DC
+#define GL_COLOR_TABLE_ALPHA_SIZE_SGI 0x80DD
+#define GL_COLOR_TABLE_LUMINANCE_SIZE_SGI 0x80DE
+#define GL_COLOR_TABLE_INTENSITY_SIZE_SGI 0x80DF
+typedef void (APIENTRYP PFNGLCOLORTABLESGIPROC) (GLenum target, GLenum internalformat, GLsizei width, GLenum format, GLenum type, const void *table);
+typedef void (APIENTRYP PFNGLCOLORTABLEPARAMETERFVSGIPROC) (GLenum target, GLenum pname, const GLfloat *params);
+typedef void (APIENTRYP PFNGLCOLORTABLEPARAMETERIVSGIPROC) (GLenum target, GLenum pname, const GLint *params);
+typedef void (APIENTRYP PFNGLCOPYCOLORTABLESGIPROC) (GLenum target, GLenum internalformat, GLint x, GLint y, GLsizei width);
+typedef void (APIENTRYP PFNGLGETCOLORTABLESGIPROC) (GLenum target, GLenum format, GLenum type, void *table);
+typedef void (APIENTRYP PFNGLGETCOLORTABLEPARAMETERFVSGIPROC) (GLenum target, GLenum pname, GLfloat *params);
+typedef void (APIENTRYP PFNGLGETCOLORTABLEPARAMETERIVSGIPROC) (GLenum target, GLenum pname, GLint *params);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glColorTableSGI (GLenum target, GLenum internalformat, GLsizei width, GLenum format, GLenum type, const void *table);
+GLAPI void APIENTRY glColorTableParameterfvSGI (GLenum target, GLenum pname, const GLfloat *params);
+GLAPI void APIENTRY glColorTableParameterivSGI (GLenum target, GLenum pname, const GLint *params);
+GLAPI void APIENTRY glCopyColorTableSGI (GLenum target, GLenum internalformat, GLint x, GLint y, GLsizei width);
+GLAPI void APIENTRY glGetColorTableSGI (GLenum target, GLenum format, GLenum type, void *table);
+GLAPI void APIENTRY glGetColorTableParameterfvSGI (GLenum target, GLenum pname, GLfloat *params);
+GLAPI void APIENTRY glGetColorTableParameterivSGI (GLenum target, GLenum pname, GLint *params);
+#endif
+#endif /* GL_SGI_color_table */
+
+#ifndef GL_SGI_texture_color_table
+#define GL_SGI_texture_color_table 1
+#define GL_TEXTURE_COLOR_TABLE_SGI 0x80BC
+#define GL_PROXY_TEXTURE_COLOR_TABLE_SGI 0x80BD
+#endif /* GL_SGI_texture_color_table */
+
+#ifndef GL_SUNX_constant_data
+#define GL_SUNX_constant_data 1
+#define GL_UNPACK_CONSTANT_DATA_SUNX 0x81D5
+#define GL_TEXTURE_CONSTANT_DATA_SUNX 0x81D6
+typedef void (APIENTRYP PFNGLFINISHTEXTURESUNXPROC) (void);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glFinishTextureSUNX (void);
+#endif
+#endif /* GL_SUNX_constant_data */
+
+#ifndef GL_SUN_convolution_border_modes
+#define GL_SUN_convolution_border_modes 1
+#define GL_WRAP_BORDER_SUN 0x81D4
+#endif /* GL_SUN_convolution_border_modes */
+
+#ifndef GL_SUN_global_alpha
+#define GL_SUN_global_alpha 1
+#define GL_GLOBAL_ALPHA_SUN 0x81D9
+#define GL_GLOBAL_ALPHA_FACTOR_SUN 0x81DA
+typedef void (APIENTRYP PFNGLGLOBALALPHAFACTORBSUNPROC) (GLbyte factor);
+typedef void (APIENTRYP PFNGLGLOBALALPHAFACTORSSUNPROC) (GLshort factor);
+typedef void (APIENTRYP PFNGLGLOBALALPHAFACTORISUNPROC) (GLint factor);
+typedef void (APIENTRYP PFNGLGLOBALALPHAFACTORFSUNPROC) (GLfloat factor);
+typedef void (APIENTRYP PFNGLGLOBALALPHAFACTORDSUNPROC) (GLdouble factor);
+typedef void (APIENTRYP PFNGLGLOBALALPHAFACTORUBSUNPROC) (GLubyte factor);
+typedef void (APIENTRYP PFNGLGLOBALALPHAFACTORUSSUNPROC) (GLushort factor);
+typedef void (APIENTRYP PFNGLGLOBALALPHAFACTORUISUNPROC) (GLuint factor);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glGlobalAlphaFactorbSUN (GLbyte factor);
+GLAPI void APIENTRY glGlobalAlphaFactorsSUN (GLshort factor);
+GLAPI void APIENTRY glGlobalAlphaFactoriSUN (GLint factor);
+GLAPI void APIENTRY glGlobalAlphaFactorfSUN (GLfloat factor);
+GLAPI void APIENTRY glGlobalAlphaFactordSUN (GLdouble factor);
+GLAPI void APIENTRY glGlobalAlphaFactorubSUN (GLubyte factor);
+GLAPI void APIENTRY glGlobalAlphaFactorusSUN (GLushort factor);
+GLAPI void APIENTRY glGlobalAlphaFactoruiSUN (GLuint factor);
+#endif
+#endif /* GL_SUN_global_alpha */
+
+#ifndef GL_SUN_mesh_array
+#define GL_SUN_mesh_array 1
+#define GL_QUAD_MESH_SUN 0x8614
+#define GL_TRIANGLE_MESH_SUN 0x8615
+typedef void (APIENTRYP PFNGLDRAWMESHARRAYSSUNPROC) (GLenum mode, GLint first, GLsizei count, GLsizei width);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glDrawMeshArraysSUN (GLenum mode, GLint first, GLsizei count, GLsizei width);
+#endif
+#endif /* GL_SUN_mesh_array */
+
+#ifndef GL_SUN_slice_accum
+#define GL_SUN_slice_accum 1
+#define GL_SLICE_ACCUM_SUN 0x85CC
+#endif /* GL_SUN_slice_accum */
+
+#ifndef GL_SUN_triangle_list
+#define GL_SUN_triangle_list 1
+#define GL_RESTART_SUN 0x0001
+#define GL_REPLACE_MIDDLE_SUN 0x0002
+#define GL_REPLACE_OLDEST_SUN 0x0003
+#define GL_TRIANGLE_LIST_SUN 0x81D7
+#define GL_REPLACEMENT_CODE_SUN 0x81D8
+#define GL_REPLACEMENT_CODE_ARRAY_SUN 0x85C0
+#define GL_REPLACEMENT_CODE_ARRAY_TYPE_SUN 0x85C1
+#define GL_REPLACEMENT_CODE_ARRAY_STRIDE_SUN 0x85C2
+#define GL_REPLACEMENT_CODE_ARRAY_POINTER_SUN 0x85C3
+#define GL_R1UI_V3F_SUN 0x85C4
+#define GL_R1UI_C4UB_V3F_SUN 0x85C5
+#define GL_R1UI_C3F_V3F_SUN 0x85C6
+#define GL_R1UI_N3F_V3F_SUN 0x85C7
+#define GL_R1UI_C4F_N3F_V3F_SUN 0x85C8
+#define GL_R1UI_T2F_V3F_SUN 0x85C9
+#define GL_R1UI_T2F_N3F_V3F_SUN 0x85CA
+#define GL_R1UI_T2F_C4F_N3F_V3F_SUN 0x85CB
+typedef void (APIENTRYP PFNGLREPLACEMENTCODEUISUNPROC) (GLuint code);
+typedef void (APIENTRYP PFNGLREPLACEMENTCODEUSSUNPROC) (GLushort code);
+typedef void (APIENTRYP PFNGLREPLACEMENTCODEUBSUNPROC) (GLubyte code);
+typedef void (APIENTRYP PFNGLREPLACEMENTCODEUIVSUNPROC) (const GLuint *code);
+typedef void (APIENTRYP PFNGLREPLACEMENTCODEUSVSUNPROC) (const GLushort *code);
+typedef void (APIENTRYP PFNGLREPLACEMENTCODEUBVSUNPROC) (const GLubyte *code);
+typedef void (APIENTRYP PFNGLREPLACEMENTCODEPOINTERSUNPROC) (GLenum type, GLsizei stride, const void **pointer);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glReplacementCodeuiSUN (GLuint code);
+GLAPI void APIENTRY glReplacementCodeusSUN (GLushort code);
+GLAPI void APIENTRY glReplacementCodeubSUN (GLubyte code);
+GLAPI void APIENTRY glReplacementCodeuivSUN (const GLuint *code);
+GLAPI void APIENTRY glReplacementCodeusvSUN (const GLushort *code);
+GLAPI void APIENTRY glReplacementCodeubvSUN (const GLubyte *code);
+GLAPI void APIENTRY glReplacementCodePointerSUN (GLenum type, GLsizei stride, const void **pointer);
+#endif
+#endif /* GL_SUN_triangle_list */
+
+#ifndef GL_SUN_vertex
+#define GL_SUN_vertex 1
+typedef void (APIENTRYP PFNGLCOLOR4UBVERTEX2FSUNPROC) (GLubyte r, GLubyte g, GLubyte b, GLubyte a, GLfloat x, GLfloat y);
+typedef void (APIENTRYP PFNGLCOLOR4UBVERTEX2FVSUNPROC) (const GLubyte *c, const GLfloat *v);
+typedef void (APIENTRYP PFNGLCOLOR4UBVERTEX3FSUNPROC) (GLubyte r, GLubyte g, GLubyte b, GLubyte a, GLfloat x, GLfloat y, GLfloat z);
+typedef void (APIENTRYP PFNGLCOLOR4UBVERTEX3FVSUNPROC) (const GLubyte *c, const GLfloat *v);
+typedef void (APIENTRYP PFNGLCOLOR3FVERTEX3FSUNPROC) (GLfloat r, GLfloat g, GLfloat b, GLfloat x, GLfloat y, GLfloat z);
+typedef void (APIENTRYP PFNGLCOLOR3FVERTEX3FVSUNPROC) (const GLfloat *c, const GLfloat *v);
+typedef void (APIENTRYP PFNGLNORMAL3FVERTEX3FSUNPROC) (GLfloat nx, GLfloat ny, GLfloat nz, GLfloat x, GLfloat y, GLfloat z);
+typedef void (APIENTRYP PFNGLNORMAL3FVERTEX3FVSUNPROC) (const GLfloat *n, const GLfloat *v);
+typedef void (APIENTRYP PFNGLCOLOR4FNORMAL3FVERTEX3FSUNPROC) (GLfloat r, GLfloat g, GLfloat b, GLfloat a, GLfloat nx, GLfloat ny, GLfloat nz, GLfloat x, GLfloat y, GLfloat z);
+typedef void (APIENTRYP PFNGLCOLOR4FNORMAL3FVERTEX3FVSUNPROC) (const GLfloat *c, const GLfloat *n, const GLfloat *v);
+typedef void (APIENTRYP PFNGLTEXCOORD2FVERTEX3FSUNPROC) (GLfloat s, GLfloat t, GLfloat x, GLfloat y, GLfloat z);
+typedef void (APIENTRYP PFNGLTEXCOORD2FVERTEX3FVSUNPROC) (const GLfloat *tc, const GLfloat *v);
+typedef void (APIENTRYP PFNGLTEXCOORD4FVERTEX4FSUNPROC) (GLfloat s, GLfloat t, GLfloat p, GLfloat q, GLfloat x, GLfloat y, GLfloat z, GLfloat w);
+typedef void (APIENTRYP PFNGLTEXCOORD4FVERTEX4FVSUNPROC) (const GLfloat *tc, const GLfloat *v);
+typedef void (APIENTRYP PFNGLTEXCOORD2FCOLOR4UBVERTEX3FSUNPROC) (GLfloat s, GLfloat t, GLubyte r, GLubyte g, GLubyte b, GLubyte a, GLfloat x, GLfloat y, GLfloat z);
+typedef void (APIENTRYP PFNGLTEXCOORD2FCOLOR4UBVERTEX3FVSUNPROC) (const GLfloat *tc, const GLubyte *c, const GLfloat *v);
+typedef void (APIENTRYP PFNGLTEXCOORD2FCOLOR3FVERTEX3FSUNPROC) (GLfloat s, GLfloat t, GLfloat r, GLfloat g, GLfloat b, GLfloat x, GLfloat y, GLfloat z);
+typedef void (APIENTRYP PFNGLTEXCOORD2FCOLOR3FVERTEX3FVSUNPROC) (const GLfloat *tc, const GLfloat *c, const GLfloat *v);
+typedef void (APIENTRYP PFNGLTEXCOORD2FNORMAL3FVERTEX3FSUNPROC) (GLfloat s, GLfloat t, GLfloat nx, GLfloat ny, GLfloat nz, GLfloat x, GLfloat y, GLfloat z);
+typedef void (APIENTRYP PFNGLTEXCOORD2FNORMAL3FVERTEX3FVSUNPROC) (const GLfloat *tc, const GLfloat *n, const GLfloat *v);
+typedef void (APIENTRYP PFNGLTEXCOORD2FCOLOR4FNORMAL3FVERTEX3FSUNPROC) (GLfloat s, GLfloat t, GLfloat r, GLfloat g, GLfloat b, GLfloat a, GLfloat nx, GLfloat ny, GLfloat nz, GLfloat x, GLfloat y, GLfloat z);
+typedef void (APIENTRYP PFNGLTEXCOORD2FCOLOR4FNORMAL3FVERTEX3FVSUNPROC) (const GLfloat *tc, const GLfloat *c, const GLfloat *n, const GLfloat *v);
+typedef void (APIENTRYP PFNGLTEXCOORD4FCOLOR4FNORMAL3FVERTEX4FSUNPROC) (GLfloat s, GLfloat t, GLfloat p, GLfloat q, GLfloat r, GLfloat g, GLfloat b, GLfloat a, GLfloat nx, GLfloat ny, GLfloat nz, GLfloat x, GLfloat y, GLfloat z, GLfloat w);
+typedef void (APIENTRYP PFNGLTEXCOORD4FCOLOR4FNORMAL3FVERTEX4FVSUNPROC) (const GLfloat *tc, const GLfloat *c, const GLfloat *n, const GLfloat *v);
+typedef void (APIENTRYP PFNGLREPLACEMENTCODEUIVERTEX3FSUNPROC) (GLuint rc, GLfloat x, GLfloat y, GLfloat z);
+typedef void (APIENTRYP PFNGLREPLACEMENTCODEUIVERTEX3FVSUNPROC) (const GLuint *rc, const GLfloat *v);
+typedef void (APIENTRYP PFNGLREPLACEMENTCODEUICOLOR4UBVERTEX3FSUNPROC) (GLuint rc, GLubyte r, GLubyte g, GLubyte b, GLubyte a, GLfloat x, GLfloat y, GLfloat z);
+typedef void (APIENTRYP PFNGLREPLACEMENTCODEUICOLOR4UBVERTEX3FVSUNPROC) (const GLuint *rc, const GLubyte *c, const GLfloat *v);
+typedef void (APIENTRYP PFNGLREPLACEMENTCODEUICOLOR3FVERTEX3FSUNPROC) (GLuint rc, GLfloat r, GLfloat g, GLfloat b, GLfloat x, GLfloat y, GLfloat z);
+typedef void (APIENTRYP PFNGLREPLACEMENTCODEUICOLOR3FVERTEX3FVSUNPROC) (const GLuint *rc, const GLfloat *c, const GLfloat *v);
+typedef void (APIENTRYP PFNGLREPLACEMENTCODEUINORMAL3FVERTEX3FSUNPROC) (GLuint rc, GLfloat nx, GLfloat ny, GLfloat nz, GLfloat x, GLfloat y, GLfloat z);
+typedef void (APIENTRYP PFNGLREPLACEMENTCODEUINORMAL3FVERTEX3FVSUNPROC) (const GLuint *rc, const GLfloat *n, const GLfloat *v);
+typedef void (APIENTRYP PFNGLREPLACEMENTCODEUICOLOR4FNORMAL3FVERTEX3FSUNPROC) (GLuint rc, GLfloat r, GLfloat g, GLfloat b, GLfloat a, GLfloat nx, GLfloat ny, GLfloat nz, GLfloat x, GLfloat y, GLfloat z);
+typedef void (APIENTRYP PFNGLREPLACEMENTCODEUICOLOR4FNORMAL3FVERTEX3FVSUNPROC) (const GLuint *rc, const GLfloat *c, const GLfloat *n, const GLfloat *v);
+typedef void (APIENTRYP PFNGLREPLACEMENTCODEUITEXCOORD2FVERTEX3FSUNPROC) (GLuint rc, GLfloat s, GLfloat t, GLfloat x, GLfloat y, GLfloat z);
+typedef void (APIENTRYP PFNGLREPLACEMENTCODEUITEXCOORD2FVERTEX3FVSUNPROC) (const GLuint *rc, const GLfloat *tc, const GLfloat *v);
+typedef void (APIENTRYP PFNGLREPLACEMENTCODEUITEXCOORD2FNORMAL3FVERTEX3FSUNPROC) (GLuint rc, GLfloat s, GLfloat t, GLfloat nx, GLfloat ny, GLfloat nz, GLfloat x, GLfloat y, GLfloat z);
+typedef void (APIENTRYP PFNGLREPLACEMENTCODEUITEXCOORD2FNORMAL3FVERTEX3FVSUNPROC) (const GLuint *rc, const GLfloat *tc, const GLfloat *n, const GLfloat *v);
+typedef void (APIENTRYP PFNGLREPLACEMENTCODEUITEXCOORD2FCOLOR4FNORMAL3FVERTEX3FSUNPROC) (GLuint rc, GLfloat s, GLfloat t, GLfloat r, GLfloat g, GLfloat b, GLfloat a, GLfloat nx, GLfloat ny, GLfloat nz, GLfloat x, GLfloat y, GLfloat z);
+typedef void (APIENTRYP PFNGLREPLACEMENTCODEUITEXCOORD2FCOLOR4FNORMAL3FVERTEX3FVSUNPROC) (const GLuint *rc, const GLfloat *tc, const GLfloat *c, const GLfloat *n, const GLfloat *v);
+#ifdef GL_GLEXT_PROTOTYPES
+GLAPI void APIENTRY glColor4ubVertex2fSUN (GLubyte r, GLubyte g, GLubyte b, GLubyte a, GLfloat x, GLfloat y);
+GLAPI void APIENTRY glColor4ubVertex2fvSUN (const GLubyte *c, const GLfloat *v);
+GLAPI void APIENTRY glColor4ubVertex3fSUN (GLubyte r, GLubyte g, GLubyte b, GLubyte a, GLfloat x, GLfloat y, GLfloat z);
+GLAPI void APIENTRY glColor4ubVertex3fvSUN (const GLubyte *c, const GLfloat *v);
+GLAPI void APIENTRY glColor3fVertex3fSUN (GLfloat r, GLfloat g, GLfloat b, GLfloat x, GLfloat y, GLfloat z);
+GLAPI void APIENTRY glColor3fVertex3fvSUN (const GLfloat *c, const GLfloat *v);
+GLAPI void APIENTRY glNormal3fVertex3fSUN (GLfloat nx, GLfloat ny, GLfloat nz, GLfloat x, GLfloat y, GLfloat z);
+GLAPI void APIENTRY glNormal3fVertex3fvSUN (const GLfloat *n, const GLfloat *v);
+GLAPI void APIENTRY glColor4fNormal3fVertex3fSUN (GLfloat r, GLfloat g, GLfloat b, GLfloat a, GLfloat nx, GLfloat ny, GLfloat nz, GLfloat x, GLfloat y, GLfloat z);
+GLAPI void APIENTRY glColor4fNormal3fVertex3fvSUN (const GLfloat *c, const GLfloat *n, const GLfloat *v);
+GLAPI void APIENTRY glTexCoord2fVertex3fSUN (GLfloat s, GLfloat t, GLfloat x, GLfloat y, GLfloat z);
+GLAPI void APIENTRY glTexCoord2fVertex3fvSUN (const GLfloat *tc, const GLfloat *v);
+GLAPI void APIENTRY glTexCoord4fVertex4fSUN (GLfloat s, GLfloat t, GLfloat p, GLfloat q, GLfloat x, GLfloat y, GLfloat z, GLfloat w);
+GLAPI void APIENTRY glTexCoord4fVertex4fvSUN (const GLfloat *tc, const GLfloat *v);
+GLAPI void APIENTRY glTexCoord2fColor4ubVertex3fSUN (GLfloat s, GLfloat t, GLubyte r, GLubyte g, GLubyte b, GLubyte a, GLfloat x, GLfloat y, GLfloat z);
+GLAPI void APIENTRY glTexCoord2fColor4ubVertex3fvSUN (const GLfloat *tc, const GLubyte *c, const GLfloat *v);
+GLAPI void APIENTRY glTexCoord2fColor3fVertex3fSUN (GLfloat s, GLfloat t, GLfloat r, GLfloat g, GLfloat b, GLfloat x, GLfloat y, GLfloat z);
+GLAPI void APIENTRY glTexCoord2fColor3fVertex3fvSUN (const GLfloat *tc, const GLfloat *c, const GLfloat *v);
+GLAPI void APIENTRY glTexCoord2fNormal3fVertex3fSUN (GLfloat s, GLfloat t, GLfloat nx, GLfloat ny, GLfloat nz, GLfloat x, GLfloat y, GLfloat z);
+GLAPI void APIENTRY glTexCoord2fNormal3fVertex3fvSUN (const GLfloat *tc, const GLfloat *n, const GLfloat *v);
+GLAPI void APIENTRY glTexCoord2fColor4fNormal3fVertex3fSUN (GLfloat s, GLfloat t, GLfloat r, GLfloat g, GLfloat b, GLfloat a, GLfloat nx, GLfloat ny, GLfloat nz, GLfloat x, GLfloat y, GLfloat z);
+GLAPI void APIENTRY glTexCoord2fColor4fNormal3fVertex3fvSUN (const GLfloat *tc, const GLfloat *c, const GLfloat *n, const GLfloat *v);
+GLAPI void APIENTRY glTexCoord4fColor4fNormal3fVertex4fSUN (GLfloat s, GLfloat t, GLfloat p, GLfloat q, GLfloat r, GLfloat g, GLfloat b, GLfloat a, GLfloat nx, GLfloat ny, GLfloat nz, GLfloat x, GLfloat y, GLfloat z, GLfloat w);
+GLAPI void APIENTRY glTexCoord4fColor4fNormal3fVertex4fvSUN (const GLfloat *tc, const GLfloat *c, const GLfloat *n, const GLfloat *v);
+GLAPI void APIENTRY glReplacementCodeuiVertex3fSUN (GLuint rc, GLfloat x, GLfloat y, GLfloat z);
+GLAPI void APIENTRY glReplacementCodeuiVertex3fvSUN (const GLuint *rc, const GLfloat *v);
+GLAPI void APIENTRY glReplacementCodeuiColor4ubVertex3fSUN (GLuint rc, GLubyte r, GLubyte g, GLubyte b, GLubyte a, GLfloat x, GLfloat y, GLfloat z);
+GLAPI void APIENTRY glReplacementCodeuiColor4ubVertex3fvSUN (const GLuint *rc, const GLubyte *c, const GLfloat *v);
+GLAPI void APIENTRY glReplacementCodeuiColor3fVertex3fSUN (GLuint rc, GLfloat r, GLfloat g, GLfloat b, GLfloat x, GLfloat y, GLfloat z);
+GLAPI void APIENTRY glReplacementCodeuiColor3fVertex3fvSUN (const GLuint *rc, const GLfloat *c, const GLfloat *v);
+GLAPI void APIENTRY glReplacementCodeuiNormal3fVertex3fSUN (GLuint rc, GLfloat nx, GLfloat ny, GLfloat nz, GLfloat x, GLfloat y, GLfloat z);
+GLAPI void APIENTRY glReplacementCodeuiNormal3fVertex3fvSUN (const GLuint *rc, const GLfloat *n, const GLfloat *v);
+GLAPI void APIENTRY glReplacementCodeuiColor4fNormal3fVertex3fSUN (GLuint rc, GLfloat r, GLfloat g, GLfloat b, GLfloat a, GLfloat nx, GLfloat ny, GLfloat nz, GLfloat x, GLfloat y, GLfloat z);
+GLAPI void APIENTRY glReplacementCodeuiColor4fNormal3fVertex3fvSUN (const GLuint *rc, const GLfloat *c, const GLfloat *n, const GLfloat *v);
+GLAPI void APIENTRY glReplacementCodeuiTexCoord2fVertex3fSUN (GLuint rc, GLfloat s, GLfloat t, GLfloat x, GLfloat y, GLfloat z);
+GLAPI void APIENTRY glReplacementCodeuiTexCoord2fVertex3fvSUN (const GLuint *rc, const GLfloat *tc, const GLfloat *v);
+GLAPI void APIENTRY glReplacementCodeuiTexCoord2fNormal3fVertex3fSUN (GLuint rc, GLfloat s, GLfloat t, GLfloat nx, GLfloat ny, GLfloat nz, GLfloat x, GLfloat y, GLfloat z);
+GLAPI void APIENTRY glReplacementCodeuiTexCoord2fNormal3fVertex3fvSUN (const GLuint *rc, const GLfloat *tc, const GLfloat *n, const GLfloat *v);
+GLAPI void APIENTRY glReplacementCodeuiTexCoord2fColor4fNormal3fVertex3fSUN (GLuint rc, GLfloat s, GLfloat t, GLfloat r, GLfloat g, GLfloat b, GLfloat a, GLfloat nx, GLfloat ny, GLfloat nz, GLfloat x, GLfloat y, GLfloat z);
+GLAPI void APIENTRY glReplacementCodeuiTexCoord2fColor4fNormal3fVertex3fvSUN (const GLuint *rc, const GLfloat *tc, const GLfloat *c, const GLfloat *n, const GLfloat *v);
+#endif
+#endif /* GL_SUN_vertex */
+
+#ifndef GL_WIN_phong_shading
+#define GL_WIN_phong_shading 1
+#define GL_PHONG_WIN 0x80EA
+#define GL_PHONG_HINT_WIN 0x80EB
+#endif /* GL_WIN_phong_shading */
+
+#ifndef GL_WIN_specular_fog
+#define GL_WIN_specular_fog 1
+#define GL_FOG_SPECULAR_TEXTURE_WIN 0x80EC
+#endif /* GL_WIN_specular_fog */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/third_party/rust/glslopt/glsl-optimizer/include/KHR/khrplatform.h b/third_party/rust/glslopt/glsl-optimizer/include/KHR/khrplatform.h
new file mode 100644
index 0000000000..ba47c26319
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/include/KHR/khrplatform.h
@@ -0,0 +1,292 @@
+#ifndef __khrplatform_h_
+#define __khrplatform_h_
+
+/*
+** Copyright (c) 2008-2018 The Khronos Group Inc.
+**
+** Permission is hereby granted, free of charge, to any person obtaining a
+** copy of this software and/or associated documentation files (the
+** "Materials"), to deal in the Materials without restriction, including
+** without limitation the rights to use, copy, modify, merge, publish,
+** distribute, sublicense, and/or sell copies of the Materials, and to
+** permit persons to whom the Materials are furnished to do so, subject to
+** the following conditions:
+**
+** The above copyright notice and this permission notice shall be included
+** in all copies or substantial portions of the Materials.
+**
+** THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+** EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+** MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+** IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+** CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+** TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+** MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS.
+*/
+
+/* Khronos platform-specific types and definitions.
+ *
+ * The master copy of khrplatform.h is maintained in the Khronos EGL
+ * Registry repository at https://github.com/KhronosGroup/EGL-Registry
+ * The last semantic modification to khrplatform.h was at commit ID:
+ * 67a3e0864c2d75ea5287b9f3d2eb74a745936692
+ *
+ * Adopters may modify this file to suit their platform. Adopters are
+ * encouraged to submit platform specific modifications to the Khronos
+ * group so that they can be included in future versions of this file.
+ * Please submit changes by filing pull requests or issues on
+ * the EGL Registry repository linked above.
+ *
+ *
+ * See the Implementer's Guidelines for information about where this file
+ * should be located on your system and for more details of its use:
+ * http://www.khronos.org/registry/implementers_guide.pdf
+ *
+ * This file should be included as
+ * #include <KHR/khrplatform.h>
+ * by Khronos client API header files that use its types and defines.
+ *
+ * The types in khrplatform.h should only be used to define API-specific types.
+ *
+ * Types defined in khrplatform.h:
+ * khronos_int8_t signed 8 bit
+ * khronos_uint8_t unsigned 8 bit
+ * khronos_int16_t signed 16 bit
+ * khronos_uint16_t unsigned 16 bit
+ * khronos_int32_t signed 32 bit
+ * khronos_uint32_t unsigned 32 bit
+ * khronos_int64_t signed 64 bit
+ * khronos_uint64_t unsigned 64 bit
+ * khronos_intptr_t signed same number of bits as a pointer
+ * khronos_uintptr_t unsigned same number of bits as a pointer
+ * khronos_ssize_t signed size
+ * khronos_usize_t unsigned size
+ * khronos_float_t signed 32 bit floating point
+ * khronos_time_ns_t unsigned 64 bit time in nanoseconds
+ * khronos_utime_nanoseconds_t unsigned time interval or absolute time in
+ * nanoseconds
+ * khronos_stime_nanoseconds_t signed time interval in nanoseconds
+ * khronos_boolean_enum_t enumerated boolean type. This should
+ * only be used as a base type when a client API's boolean type is
+ * an enum. Client APIs which use an integer or other type for
+ * booleans cannot use this as the base type for their boolean.
+ *
+ * Tokens defined in khrplatform.h:
+ *
+ * KHRONOS_FALSE, KHRONOS_TRUE Enumerated boolean false/true values.
+ *
+ * KHRONOS_SUPPORT_INT64 is 1 if 64 bit integers are supported; otherwise 0.
+ * KHRONOS_SUPPORT_FLOAT is 1 if floats are supported; otherwise 0.
+ *
+ * Calling convention macros defined in this file:
+ * KHRONOS_APICALL
+ * KHRONOS_APIENTRY
+ * KHRONOS_APIATTRIBUTES
+ *
+ * These may be used in function prototypes as:
+ *
+ * KHRONOS_APICALL void KHRONOS_APIENTRY funcname(
+ * int arg1,
+ * int arg2) KHRONOS_APIATTRIBUTES;
+ */
+
+#if defined(__SCITECH_SNAP__) && !defined(KHRONOS_STATIC)
+# define KHRONOS_STATIC 1
+#endif
+
+/*-------------------------------------------------------------------------
+ * Definition of KHRONOS_APICALL
+ *-------------------------------------------------------------------------
+ * This precedes the return type of the function in the function prototype.
+ */
+#if defined(KHRONOS_STATIC)
+ /* If the preprocessor constant KHRONOS_STATIC is defined, make the
+ * header compatible with static linking. */
+# define KHRONOS_APICALL
+#elif defined(_WIN32)
+# define KHRONOS_APICALL __declspec(dllimport)
+#elif defined (__SYMBIAN32__)
+# define KHRONOS_APICALL IMPORT_C
+#elif (defined(__GNUC__) && (__GNUC__ * 100 + __GNUC_MINOR__) >= 303) \
+ || (defined(__SUNPRO_C) && (__SUNPRO_C >= 0x590))
+/* KHRONOS_APIATTRIBUTES is not used by the client API headers yet */
+# define KHRONOS_APICALL __attribute__((visibility("default")))
+#else
+# define KHRONOS_APICALL
+#endif
+
+/*-------------------------------------------------------------------------
+ * Definition of KHRONOS_APIENTRY
+ *-------------------------------------------------------------------------
+ * This follows the return type of the function and precedes the function
+ * name in the function prototype.
+ */
+#if defined(_WIN32) && !defined(_WIN32_WCE) && !defined(KHRONOS_STATIC)
+ /* Win32 but not WinCE */
+# define KHRONOS_APIENTRY __stdcall
+#else
+# define KHRONOS_APIENTRY
+#endif
+
+/*-------------------------------------------------------------------------
+ * Definition of KHRONOS_APIATTRIBUTES
+ *-------------------------------------------------------------------------
+ * This follows the closing parenthesis of the function prototype arguments.
+ */
+#if defined (__ARMCC_2__)
+#define KHRONOS_APIATTRIBUTES __softfp
+#else
+#define KHRONOS_APIATTRIBUTES
+#endif
+
+/*-------------------------------------------------------------------------
+ * basic type definitions
+ *-----------------------------------------------------------------------*/
+#if (defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L) || defined(__GNUC__) || defined(__SCO__) || defined(__USLC__)
+
+
+/*
+ * Using <stdint.h>
+ */
+#include <stdint.h>
+typedef int32_t khronos_int32_t;
+typedef uint32_t khronos_uint32_t;
+typedef int64_t khronos_int64_t;
+typedef uint64_t khronos_uint64_t;
+#define KHRONOS_SUPPORT_INT64 1
+#define KHRONOS_SUPPORT_FLOAT 1
+
+#elif defined(__VMS ) || defined(__sgi)
+
+/*
+ * Using <inttypes.h>
+ */
+#include <inttypes.h>
+typedef int32_t khronos_int32_t;
+typedef uint32_t khronos_uint32_t;
+typedef int64_t khronos_int64_t;
+typedef uint64_t khronos_uint64_t;
+#define KHRONOS_SUPPORT_INT64 1
+#define KHRONOS_SUPPORT_FLOAT 1
+
+#elif defined(_WIN32) && !defined(__SCITECH_SNAP__)
+
+/*
+ * Win32
+ */
+typedef __int32 khronos_int32_t;
+typedef unsigned __int32 khronos_uint32_t;
+typedef __int64 khronos_int64_t;
+typedef unsigned __int64 khronos_uint64_t;
+#define KHRONOS_SUPPORT_INT64 1
+#define KHRONOS_SUPPORT_FLOAT 1
+
+#elif defined(__sun__) || defined(__digital__)
+
+/*
+ * Sun or Digital
+ */
+typedef int khronos_int32_t;
+typedef unsigned int khronos_uint32_t;
+#if defined(__arch64__) || defined(_LP64)
+typedef long int khronos_int64_t;
+typedef unsigned long int khronos_uint64_t;
+#else
+typedef long long int khronos_int64_t;
+typedef unsigned long long int khronos_uint64_t;
+#endif /* __arch64__ */
+#define KHRONOS_SUPPORT_INT64 1
+#define KHRONOS_SUPPORT_FLOAT 1
+
+#elif 0
+
+/*
+ * Hypothetical platform with no float or int64 support
+ */
+typedef int khronos_int32_t;
+typedef unsigned int khronos_uint32_t;
+#define KHRONOS_SUPPORT_INT64 0
+#define KHRONOS_SUPPORT_FLOAT 0
+
+#else
+
+/*
+ * Generic fallback
+ */
+#include <stdint.h>
+typedef int32_t khronos_int32_t;
+typedef uint32_t khronos_uint32_t;
+typedef int64_t khronos_int64_t;
+typedef uint64_t khronos_uint64_t;
+#define KHRONOS_SUPPORT_INT64 1
+#define KHRONOS_SUPPORT_FLOAT 1
+
+#endif
+
+
+/*
+ * Types that are (so far) the same on all platforms
+ */
+typedef signed char khronos_int8_t;
+typedef unsigned char khronos_uint8_t;
+typedef signed short int khronos_int16_t;
+typedef unsigned short int khronos_uint16_t;
+
+/*
+ * Types that differ between LLP64 and LP64 architectures - in LLP64,
+ * pointers are 64 bits, but 'long' is still 32 bits. Win64 appears
+ * to be the only LLP64 architecture in current use.
+ */
+#ifdef _WIN64
+typedef signed long long int khronos_intptr_t;
+typedef unsigned long long int khronos_uintptr_t;
+typedef signed long long int khronos_ssize_t;
+typedef unsigned long long int khronos_usize_t;
+#else
+typedef signed long int khronos_intptr_t;
+typedef unsigned long int khronos_uintptr_t;
+typedef signed long int khronos_ssize_t;
+typedef unsigned long int khronos_usize_t;
+#endif
+
+#if KHRONOS_SUPPORT_FLOAT
+/*
+ * Float type
+ */
+typedef float khronos_float_t;
+#endif
+
+#if KHRONOS_SUPPORT_INT64
+/* Time types
+ *
+ * These types can be used to represent a time interval in nanoseconds or
+ * an absolute Unadjusted System Time. Unadjusted System Time is the number
+ * of nanoseconds since some arbitrary system event (e.g. since the last
+ * time the system booted). The Unadjusted System Time is an unsigned
+ * 64 bit value that wraps back to 0 every 584 years. Time intervals
+ * may be either signed or unsigned.
+ */
+typedef khronos_uint64_t khronos_utime_nanoseconds_t;
+typedef khronos_int64_t khronos_stime_nanoseconds_t;
+#endif
+
+/*
+ * Dummy value used to pad enum types to 32 bits.
+ */
+#ifndef KHRONOS_MAX_ENUM
+#define KHRONOS_MAX_ENUM 0x7FFFFFFF
+#endif
+
+/*
+ * Enumerated boolean type
+ *
+ * Values other than zero should be considered to be true. Therefore
+ * comparisons should not be made against KHRONOS_TRUE.
+ */
+typedef enum {
+ KHRONOS_FALSE = 0,
+ KHRONOS_TRUE = 1,
+ KHRONOS_BOOLEAN_ENUM_FORCE_SIZE = KHRONOS_MAX_ENUM
+} khronos_boolean_enum_t;
+
+#endif /* __khrplatform_h_ */
diff --git a/third_party/rust/glslopt/glsl-optimizer/include/c11/threads.h b/third_party/rust/glslopt/glsl-optimizer/include/c11/threads.h
new file mode 100644
index 0000000000..3c3f23a8ab
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/include/c11/threads.h
@@ -0,0 +1,73 @@
+/*
+ * C11 <threads.h> emulation library
+ *
+ * (C) Copyright yohhoy 2012.
+ * Distributed under the Boost Software License, Version 1.0.
+ *
+ * Permission is hereby granted, free of charge, to any person or organization
+ * obtaining a copy of the software and accompanying documentation covered by
+ * this license (the "Software") to use, reproduce, display, distribute,
+ * execute, and transmit the Software, and to prepare [[derivative work]]s of the
+ * Software, and to permit third-parties to whom the Software is furnished to
+ * do so, all subject to the following:
+ *
+ * The copyright notices in the Software and this entire statement, including
+ * the above license grant, this restriction and the following disclaimer,
+ * must be included in all copies of the Software, in whole or in part, and
+ * all derivative works of the Software, unless such copies or derivative
+ * works are solely in the form of machine-executable object code generated by
+ * a source language processor.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT
+ * SHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE
+ * FOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+#ifndef EMULATED_THREADS_H_INCLUDED_
+#define EMULATED_THREADS_H_INCLUDED_
+
+#include <time.h>
+
+#ifndef TIME_UTC
+#define TIME_UTC 1
+#endif
+
+#include "c99_compat.h" /* for `inline` */
+
+/*---------------------------- types ----------------------------*/
+typedef void (*tss_dtor_t)(void*);
+typedef int (*thrd_start_t)(void*);
+
+
+/*-------------------- enumeration constants --------------------*/
+enum {
+ mtx_plain = 0,
+ mtx_try = 1,
+ mtx_timed = 2,
+ mtx_recursive = 4
+};
+
+enum {
+ thrd_success = 0, // succeeded
+ thrd_timeout, // timeout
+ thrd_error, // failed
+ thrd_busy, // resource busy
+ thrd_nomem // out of memory
+};
+
+/*-------------------------- functions --------------------------*/
+
+#if defined(_WIN32) && !defined(__CYGWIN__)
+#include "threads_win32.h"
+#elif defined(HAVE_PTHREAD)
+#include "threads_posix.h"
+#else
+#error Not supported on this platform.
+#endif
+
+
+
+#endif /* EMULATED_THREADS_H_INCLUDED_ */
diff --git a/third_party/rust/glslopt/glsl-optimizer/include/c11/threads_posix.h b/third_party/rust/glslopt/glsl-optimizer/include/c11/threads_posix.h
new file mode 100644
index 0000000000..45cb6075e6
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/include/c11/threads_posix.h
@@ -0,0 +1,396 @@
+/*
+ * C11 <threads.h> emulation library
+ *
+ * (C) Copyright yohhoy 2012.
+ * Distributed under the Boost Software License, Version 1.0.
+ *
+ * Permission is hereby granted, free of charge, to any person or organization
+ * obtaining a copy of the software and accompanying documentation covered by
+ * this license (the "Software") to use, reproduce, display, distribute,
+ * execute, and transmit the Software, and to prepare [[derivative work]]s of the
+ * Software, and to permit third-parties to whom the Software is furnished to
+ * do so, all subject to the following:
+ *
+ * The copyright notices in the Software and this entire statement, including
+ * the above license grant, this restriction and the following disclaimer,
+ * must be included in all copies of the Software, in whole or in part, and
+ * all derivative works of the Software, unless such copies or derivative
+ * works are solely in the form of machine-executable object code generated by
+ * a source language processor.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT
+ * SHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE
+ * FOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+#include <stdlib.h>
+#ifndef assert
+#include <assert.h>
+#endif
+#include <limits.h>
+#include <errno.h>
+#include <unistd.h>
+#include <sched.h>
+#include <stdint.h> /* for intptr_t */
+
+/*
+Configuration macro:
+
+ EMULATED_THREADS_USE_NATIVE_TIMEDLOCK
+ Use pthread_mutex_timedlock() for `mtx_timedlock()'
+ Otherwise use mtx_trylock() + *busy loop* emulation.
+*/
+#if !defined(__CYGWIN__) && !defined(__APPLE__) && !defined(__NetBSD__)
+#define EMULATED_THREADS_USE_NATIVE_TIMEDLOCK
+#endif
+
+
+#include <pthread.h>
+
+/*---------------------------- macros ----------------------------*/
+#define ONCE_FLAG_INIT PTHREAD_ONCE_INIT
+#ifdef INIT_ONCE_STATIC_INIT
+#define TSS_DTOR_ITERATIONS PTHREAD_DESTRUCTOR_ITERATIONS
+#else
+#define TSS_DTOR_ITERATIONS 1 // assume TSS dtor MAY be called at least once.
+#endif
+
+// FIXME: temporary non-standard hack to ease transition
+#define _MTX_INITIALIZER_NP PTHREAD_MUTEX_INITIALIZER
+
+/*---------------------------- types ----------------------------*/
+typedef pthread_cond_t cnd_t;
+typedef pthread_t thrd_t;
+typedef pthread_key_t tss_t;
+typedef pthread_mutex_t mtx_t;
+typedef pthread_once_t once_flag;
+
+
+/*
+Implementation limits:
+ - Conditionally emulation for "mutex with timeout"
+ (see EMULATED_THREADS_USE_NATIVE_TIMEDLOCK macro)
+*/
+struct impl_thrd_param {
+ thrd_start_t func;
+ void *arg;
+};
+
+static inline void *
+impl_thrd_routine(void *p)
+{
+ struct impl_thrd_param pack = *((struct impl_thrd_param *)p);
+ free(p);
+ return (void*)(intptr_t)pack.func(pack.arg);
+}
+
+
+/*--------------- 7.25.2 Initialization functions ---------------*/
+// 7.25.2.1
+static inline void
+call_once(once_flag *flag, void (*func)(void))
+{
+ pthread_once(flag, func);
+}
+
+
+/*------------- 7.25.3 Condition variable functions -------------*/
+// 7.25.3.1
+static inline int
+cnd_broadcast(cnd_t *cond)
+{
+ assert(cond != NULL);
+ return (pthread_cond_broadcast(cond) == 0) ? thrd_success : thrd_error;
+}
+
+// 7.25.3.2
+static inline void
+cnd_destroy(cnd_t *cond)
+{
+ assert(cond);
+ pthread_cond_destroy(cond);
+}
+
+// 7.25.3.3
+static inline int
+cnd_init(cnd_t *cond)
+{
+ assert(cond != NULL);
+ return (pthread_cond_init(cond, NULL) == 0) ? thrd_success : thrd_error;
+}
+
+// 7.25.3.4
+static inline int
+cnd_signal(cnd_t *cond)
+{
+ assert(cond != NULL);
+ return (pthread_cond_signal(cond) == 0) ? thrd_success : thrd_error;
+}
+
+// 7.25.3.5
+static inline int
+cnd_timedwait(cnd_t *cond, mtx_t *mtx, const struct timespec *abs_time)
+{
+ int rt;
+
+ assert(mtx != NULL);
+ assert(cond != NULL);
+ assert(abs_time != NULL);
+
+ rt = pthread_cond_timedwait(cond, mtx, abs_time);
+ if (rt == ETIMEDOUT)
+ return thrd_busy;
+ return (rt == 0) ? thrd_success : thrd_error;
+}
+
+// 7.25.3.6
+static inline int
+cnd_wait(cnd_t *cond, mtx_t *mtx)
+{
+ assert(mtx != NULL);
+ assert(cond != NULL);
+ return (pthread_cond_wait(cond, mtx) == 0) ? thrd_success : thrd_error;
+}
+
+
+/*-------------------- 7.25.4 Mutex functions --------------------*/
+// 7.25.4.1
+static inline void
+mtx_destroy(mtx_t *mtx)
+{
+ assert(mtx != NULL);
+ pthread_mutex_destroy(mtx);
+}
+
+/*
+ * XXX: Workaround when building with -O0 and without pthreads link.
+ *
+ * In such cases constant folding and dead code elimination won't be
+ * available, thus the compiler will always add the pthread_mutexattr*
+ * functions into the binary. As we try to link, we'll fail as the
+ * symbols are unresolved.
+ *
+ * Ideally we'll enable the optimisations locally, yet that does not
+ * seem to work.
+ *
+ * So the alternative workaround is to annotate the symbols as weak.
+ * Thus the linker will be happy and things don't clash when building
+ * with -O1 or greater.
+ */
+#if defined(HAVE_FUNC_ATTRIBUTE_WEAK) && !defined(__CYGWIN__)
+__attribute__((weak))
+int pthread_mutexattr_init(pthread_mutexattr_t *attr);
+
+__attribute__((weak))
+int pthread_mutexattr_settype(pthread_mutexattr_t *attr, int type);
+
+__attribute__((weak))
+int pthread_mutexattr_destroy(pthread_mutexattr_t *attr);
+#endif
+
+// 7.25.4.2
+static inline int
+mtx_init(mtx_t *mtx, int type)
+{
+ pthread_mutexattr_t attr;
+ assert(mtx != NULL);
+ if (type != mtx_plain && type != mtx_timed && type != mtx_try
+ && type != (mtx_plain|mtx_recursive)
+ && type != (mtx_timed|mtx_recursive)
+ && type != (mtx_try|mtx_recursive))
+ return thrd_error;
+
+ if ((type & mtx_recursive) == 0) {
+ pthread_mutex_init(mtx, NULL);
+ return thrd_success;
+ }
+
+ pthread_mutexattr_init(&attr);
+ pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE);
+ pthread_mutex_init(mtx, &attr);
+ pthread_mutexattr_destroy(&attr);
+ return thrd_success;
+}
+
+// 7.25.4.3
+static inline int
+mtx_lock(mtx_t *mtx)
+{
+ assert(mtx != NULL);
+ return (pthread_mutex_lock(mtx) == 0) ? thrd_success : thrd_error;
+}
+
+static inline int
+mtx_trylock(mtx_t *mtx);
+
+static inline void
+thrd_yield(void);
+
+// 7.25.4.4
+static inline int
+mtx_timedlock(mtx_t *mtx, const struct timespec *ts)
+{
+ assert(mtx != NULL);
+ assert(ts != NULL);
+
+ {
+#ifdef EMULATED_THREADS_USE_NATIVE_TIMEDLOCK
+ int rt;
+ rt = pthread_mutex_timedlock(mtx, ts);
+ if (rt == 0)
+ return thrd_success;
+ return (rt == ETIMEDOUT) ? thrd_busy : thrd_error;
+#else
+ time_t expire = time(NULL);
+ expire += ts->tv_sec;
+ while (mtx_trylock(mtx) != thrd_success) {
+ time_t now = time(NULL);
+ if (expire < now)
+ return thrd_busy;
+ // busy loop!
+ thrd_yield();
+ }
+ return thrd_success;
+#endif
+ }
+}
+
+// 7.25.4.5
+static inline int
+mtx_trylock(mtx_t *mtx)
+{
+ assert(mtx != NULL);
+ return (pthread_mutex_trylock(mtx) == 0) ? thrd_success : thrd_busy;
+}
+
+// 7.25.4.6
+static inline int
+mtx_unlock(mtx_t *mtx)
+{
+ assert(mtx != NULL);
+ return (pthread_mutex_unlock(mtx) == 0) ? thrd_success : thrd_error;
+}
+
+
+/*------------------- 7.25.5 Thread functions -------------------*/
+// 7.25.5.1
+static inline int
+thrd_create(thrd_t *thr, thrd_start_t func, void *arg)
+{
+ struct impl_thrd_param *pack;
+ assert(thr != NULL);
+ pack = (struct impl_thrd_param *)malloc(sizeof(struct impl_thrd_param));
+ if (!pack) return thrd_nomem;
+ pack->func = func;
+ pack->arg = arg;
+ if (pthread_create(thr, NULL, impl_thrd_routine, pack) != 0) {
+ free(pack);
+ return thrd_error;
+ }
+ return thrd_success;
+}
+
+// 7.25.5.2
+static inline thrd_t
+thrd_current(void)
+{
+ return pthread_self();
+}
+
+// 7.25.5.3
+static inline int
+thrd_detach(thrd_t thr)
+{
+ return (pthread_detach(thr) == 0) ? thrd_success : thrd_error;
+}
+
+// 7.25.5.4
+static inline int
+thrd_equal(thrd_t thr0, thrd_t thr1)
+{
+ return pthread_equal(thr0, thr1);
+}
+
+// 7.25.5.5
+static inline void
+thrd_exit(int res)
+{
+ pthread_exit((void*)(intptr_t)res);
+}
+
+// 7.25.5.6
+static inline int
+thrd_join(thrd_t thr, int *res)
+{
+ void *code;
+ if (pthread_join(thr, &code) != 0)
+ return thrd_error;
+ if (res)
+ *res = (int)(intptr_t)code;
+ return thrd_success;
+}
+
+// 7.25.5.7
+static inline void
+thrd_sleep(const struct timespec *time_point, struct timespec *remaining)
+{
+ assert(time_point != NULL);
+ nanosleep(time_point, remaining);
+}
+
+// 7.25.5.8
+static inline void
+thrd_yield(void)
+{
+ sched_yield();
+}
+
+
+/*----------- 7.25.6 Thread-specific storage functions -----------*/
+// 7.25.6.1
+static inline int
+tss_create(tss_t *key, tss_dtor_t dtor)
+{
+ assert(key != NULL);
+ return (pthread_key_create(key, dtor) == 0) ? thrd_success : thrd_error;
+}
+
+// 7.25.6.2
+static inline void
+tss_delete(tss_t key)
+{
+ pthread_key_delete(key);
+}
+
+// 7.25.6.3
+static inline void *
+tss_get(tss_t key)
+{
+ return pthread_getspecific(key);
+}
+
+// 7.25.6.4
+static inline int
+tss_set(tss_t key, void *val)
+{
+ return (pthread_setspecific(key, val) == 0) ? thrd_success : thrd_error;
+}
+
+
+/*-------------------- 7.25.7 Time functions --------------------*/
+// 7.25.6.1
+#ifndef HAVE_TIMESPEC_GET
+static inline int
+timespec_get(struct timespec *ts, int base)
+{
+ if (!ts) return 0;
+ if (base == TIME_UTC) {
+ clock_gettime(CLOCK_REALTIME, ts);
+ return base;
+ }
+ return 0;
+}
+#endif
diff --git a/third_party/rust/glslopt/glsl-optimizer/include/c11/threads_win32.h b/third_party/rust/glslopt/glsl-optimizer/include/c11/threads_win32.h
new file mode 100644
index 0000000000..326cfc4677
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/include/c11/threads_win32.h
@@ -0,0 +1,653 @@
+/*
+ * C11 <threads.h> emulation library
+ *
+ * (C) Copyright yohhoy 2012.
+ * Distributed under the Boost Software License, Version 1.0.
+ *
+ * Permission is hereby granted, free of charge, to any person or organization
+ * obtaining a copy of the software and accompanying documentation covered by
+ * this license (the "Software") to use, reproduce, display, distribute,
+ * execute, and transmit the Software, and to prepare [[derivative work]]s of the
+ * Software, and to permit third-parties to whom the Software is furnished to
+ * do so, all subject to the following:
+ *
+ * The copyright notices in the Software and this entire statement, including
+ * the above license grant, this restriction and the following disclaimer,
+ * must be included in all copies of the Software, in whole or in part, and
+ * all derivative works of the Software, unless such copies or derivative
+ * works are solely in the form of machine-executable object code generated by
+ * a source language processor.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT
+ * SHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE
+ * FOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+#ifndef assert
+#include <assert.h>
+#endif
+#include <limits.h>
+#include <errno.h>
+#include <process.h> // MSVCRT
+#include <stdlib.h>
+
+/*
+Configuration macro:
+
+ EMULATED_THREADS_USE_NATIVE_CALL_ONCE
+ Use native WindowsAPI one-time initialization function.
+ (requires WinVista or later)
+ Otherwise emulate by mtx_trylock() + *busy loop* for WinXP.
+
+ EMULATED_THREADS_USE_NATIVE_CV
+ Use native WindowsAPI condition variable object.
+ (requires WinVista or later)
+ Otherwise use emulated implementation for WinXP.
+
+ EMULATED_THREADS_TSS_DTOR_SLOTNUM
+ Max registerable TSS dtor number.
+*/
+
+// XXX: Retain XP compatability
+#if 0
+#if _WIN32_WINNT >= 0x0600
+// Prefer native WindowsAPI on newer environment.
+#if !defined(__MINGW32__)
+#define EMULATED_THREADS_USE_NATIVE_CALL_ONCE
+#endif
+#define EMULATED_THREADS_USE_NATIVE_CV
+#endif
+#endif
+#define EMULATED_THREADS_TSS_DTOR_SLOTNUM 64 // see TLS_MINIMUM_AVAILABLE
+
+
+#include <windows.h>
+
+// check configuration
+#if defined(EMULATED_THREADS_USE_NATIVE_CALL_ONCE) && (_WIN32_WINNT < 0x0600)
+#error EMULATED_THREADS_USE_NATIVE_CALL_ONCE requires _WIN32_WINNT>=0x0600
+#endif
+
+#if defined(EMULATED_THREADS_USE_NATIVE_CV) && (_WIN32_WINNT < 0x0600)
+#error EMULATED_THREADS_USE_NATIVE_CV requires _WIN32_WINNT>=0x0600
+#endif
+
+/* Visual Studio 2015 and later */
+#ifdef _MSC_VER
+#define HAVE_TIMESPEC_GET
+#endif
+
+/*---------------------------- macros ----------------------------*/
+#ifdef EMULATED_THREADS_USE_NATIVE_CALL_ONCE
+#define ONCE_FLAG_INIT INIT_ONCE_STATIC_INIT
+#else
+#define ONCE_FLAG_INIT {0}
+#endif
+#define TSS_DTOR_ITERATIONS 1
+
+// FIXME: temporary non-standard hack to ease transition
+#define _MTX_INITIALIZER_NP {(PCRITICAL_SECTION_DEBUG)-1, -1, 0, 0, 0, 0}
+
+/*---------------------------- types ----------------------------*/
+typedef struct cnd_t {
+#ifdef EMULATED_THREADS_USE_NATIVE_CV
+ CONDITION_VARIABLE condvar;
+#else
+ int blocked;
+ int gone;
+ int to_unblock;
+ HANDLE sem_queue;
+ HANDLE sem_gate;
+ CRITICAL_SECTION monitor;
+#endif
+} cnd_t;
+
+typedef HANDLE thrd_t;
+
+typedef DWORD tss_t;
+
+typedef CRITICAL_SECTION mtx_t;
+
+#ifdef EMULATED_THREADS_USE_NATIVE_CALL_ONCE
+typedef INIT_ONCE once_flag;
+#else
+typedef struct once_flag_t {
+ volatile LONG status;
+} once_flag;
+#endif
+
+
+static inline void * tss_get(tss_t key);
+static inline void thrd_yield(void);
+static inline int mtx_trylock(mtx_t *mtx);
+static inline int mtx_lock(mtx_t *mtx);
+static inline int mtx_unlock(mtx_t *mtx);
+
+/*
+Implementation limits:
+ - Conditionally emulation for "Initialization functions"
+ (see EMULATED_THREADS_USE_NATIVE_CALL_ONCE macro)
+ - Emulated `mtx_timelock()' with mtx_trylock() + *busy loop*
+*/
+static void impl_tss_dtor_invoke(void); // forward decl.
+
+struct impl_thrd_param {
+ thrd_start_t func;
+ void *arg;
+};
+
+static unsigned __stdcall impl_thrd_routine(void *p)
+{
+ struct impl_thrd_param pack;
+ int code;
+ memcpy(&pack, p, sizeof(struct impl_thrd_param));
+ free(p);
+ code = pack.func(pack.arg);
+ impl_tss_dtor_invoke();
+ return (unsigned)code;
+}
+
+static DWORD impl_timespec2msec(const struct timespec *ts)
+{
+ return (DWORD)((ts->tv_sec * 1000U) + (ts->tv_nsec / 1000000L));
+}
+
+#ifdef EMULATED_THREADS_USE_NATIVE_CALL_ONCE
+struct impl_call_once_param { void (*func)(void); };
+static BOOL CALLBACK impl_call_once_callback(PINIT_ONCE InitOnce, PVOID Parameter, PVOID *Context)
+{
+ struct impl_call_once_param *param = (struct impl_call_once_param*)Parameter;
+ (param->func)();
+ ((void)InitOnce); ((void)Context); // suppress warning
+ return TRUE;
+}
+#endif // ifdef EMULATED_THREADS_USE_NATIVE_CALL_ONCE
+
+#ifndef EMULATED_THREADS_USE_NATIVE_CV
+/*
+Note:
+ The implementation of condition variable is ported from Boost.Interprocess
+ See http://www.boost.org/boost/interprocess/sync/windows/condition.hpp
+*/
+static void impl_cond_do_signal(cnd_t *cond, int broadcast)
+{
+ int nsignal = 0;
+
+ EnterCriticalSection(&cond->monitor);
+ if (cond->to_unblock != 0) {
+ if (cond->blocked == 0) {
+ LeaveCriticalSection(&cond->monitor);
+ return;
+ }
+ if (broadcast) {
+ cond->to_unblock += nsignal = cond->blocked;
+ cond->blocked = 0;
+ } else {
+ nsignal = 1;
+ cond->to_unblock++;
+ cond->blocked--;
+ }
+ } else if (cond->blocked > cond->gone) {
+ WaitForSingleObject(cond->sem_gate, INFINITE);
+ if (cond->gone != 0) {
+ cond->blocked -= cond->gone;
+ cond->gone = 0;
+ }
+ if (broadcast) {
+ nsignal = cond->to_unblock = cond->blocked;
+ cond->blocked = 0;
+ } else {
+ nsignal = cond->to_unblock = 1;
+ cond->blocked--;
+ }
+ }
+ LeaveCriticalSection(&cond->monitor);
+
+ if (0 < nsignal)
+ ReleaseSemaphore(cond->sem_queue, nsignal, NULL);
+}
+
+static int impl_cond_do_wait(cnd_t *cond, mtx_t *mtx, const struct timespec *ts)
+{
+ int nleft = 0;
+ int ngone = 0;
+ int timeout = 0;
+ DWORD w;
+
+ WaitForSingleObject(cond->sem_gate, INFINITE);
+ cond->blocked++;
+ ReleaseSemaphore(cond->sem_gate, 1, NULL);
+
+ mtx_unlock(mtx);
+
+ w = WaitForSingleObject(cond->sem_queue, ts ? impl_timespec2msec(ts) : INFINITE);
+ timeout = (w == WAIT_TIMEOUT);
+
+ EnterCriticalSection(&cond->monitor);
+ if ((nleft = cond->to_unblock) != 0) {
+ if (timeout) {
+ if (cond->blocked != 0) {
+ cond->blocked--;
+ } else {
+ cond->gone++;
+ }
+ }
+ if (--cond->to_unblock == 0) {
+ if (cond->blocked != 0) {
+ ReleaseSemaphore(cond->sem_gate, 1, NULL);
+ nleft = 0;
+ }
+ else if ((ngone = cond->gone) != 0) {
+ cond->gone = 0;
+ }
+ }
+ } else if (++cond->gone == INT_MAX/2) {
+ WaitForSingleObject(cond->sem_gate, INFINITE);
+ cond->blocked -= cond->gone;
+ ReleaseSemaphore(cond->sem_gate, 1, NULL);
+ cond->gone = 0;
+ }
+ LeaveCriticalSection(&cond->monitor);
+
+ if (nleft == 1) {
+ while (ngone--)
+ WaitForSingleObject(cond->sem_queue, INFINITE);
+ ReleaseSemaphore(cond->sem_gate, 1, NULL);
+ }
+
+ mtx_lock(mtx);
+ return timeout ? thrd_busy : thrd_success;
+}
+#endif // ifndef EMULATED_THREADS_USE_NATIVE_CV
+
+static struct impl_tss_dtor_entry {
+ tss_t key;
+ tss_dtor_t dtor;
+} impl_tss_dtor_tbl[EMULATED_THREADS_TSS_DTOR_SLOTNUM];
+
+static int impl_tss_dtor_register(tss_t key, tss_dtor_t dtor)
+{
+ int i;
+ for (i = 0; i < EMULATED_THREADS_TSS_DTOR_SLOTNUM; i++) {
+ if (!impl_tss_dtor_tbl[i].dtor)
+ break;
+ }
+ if (i == EMULATED_THREADS_TSS_DTOR_SLOTNUM)
+ return 1;
+ impl_tss_dtor_tbl[i].key = key;
+ impl_tss_dtor_tbl[i].dtor = dtor;
+ return 0;
+}
+
+static void impl_tss_dtor_invoke()
+{
+ int i;
+ for (i = 0; i < EMULATED_THREADS_TSS_DTOR_SLOTNUM; i++) {
+ if (impl_tss_dtor_tbl[i].dtor) {
+ void* val = tss_get(impl_tss_dtor_tbl[i].key);
+ if (val)
+ (impl_tss_dtor_tbl[i].dtor)(val);
+ }
+ }
+}
+
+
+/*--------------- 7.25.2 Initialization functions ---------------*/
+// 7.25.2.1
+static inline void
+call_once(once_flag *flag, void (*func)(void))
+{
+ assert(flag && func);
+#ifdef EMULATED_THREADS_USE_NATIVE_CALL_ONCE
+ {
+ struct impl_call_once_param param;
+ param.func = func;
+ InitOnceExecuteOnce(flag, impl_call_once_callback, (PVOID)&param, NULL);
+ }
+#else
+ if (InterlockedCompareExchange(&flag->status, 1, 0) == 0) {
+ (func)();
+ InterlockedExchange(&flag->status, 2);
+ } else {
+ while (flag->status == 1) {
+ // busy loop!
+ thrd_yield();
+ }
+ }
+#endif
+}
+
+
+/*------------- 7.25.3 Condition variable functions -------------*/
+// 7.25.3.1
+static inline int
+cnd_broadcast(cnd_t *cond)
+{
+ if (!cond) return thrd_error;
+#ifdef EMULATED_THREADS_USE_NATIVE_CV
+ WakeAllConditionVariable(&cond->condvar);
+#else
+ impl_cond_do_signal(cond, 1);
+#endif
+ return thrd_success;
+}
+
+// 7.25.3.2
+static inline void
+cnd_destroy(cnd_t *cond)
+{
+ assert(cond);
+#ifdef EMULATED_THREADS_USE_NATIVE_CV
+ // do nothing
+#else
+ CloseHandle(cond->sem_queue);
+ CloseHandle(cond->sem_gate);
+ DeleteCriticalSection(&cond->monitor);
+#endif
+}
+
+// 7.25.3.3
+static inline int
+cnd_init(cnd_t *cond)
+{
+ if (!cond) return thrd_error;
+#ifdef EMULATED_THREADS_USE_NATIVE_CV
+ InitializeConditionVariable(&cond->condvar);
+#else
+ cond->blocked = 0;
+ cond->gone = 0;
+ cond->to_unblock = 0;
+ cond->sem_queue = CreateSemaphore(NULL, 0, LONG_MAX, NULL);
+ cond->sem_gate = CreateSemaphore(NULL, 1, 1, NULL);
+ InitializeCriticalSection(&cond->monitor);
+#endif
+ return thrd_success;
+}
+
+// 7.25.3.4
+static inline int
+cnd_signal(cnd_t *cond)
+{
+ if (!cond) return thrd_error;
+#ifdef EMULATED_THREADS_USE_NATIVE_CV
+ WakeConditionVariable(&cond->condvar);
+#else
+ impl_cond_do_signal(cond, 0);
+#endif
+ return thrd_success;
+}
+
+// 7.25.3.5
+static inline int
+cnd_timedwait(cnd_t *cond, mtx_t *mtx, const struct timespec *abs_time)
+{
+ if (!cond || !mtx || !abs_time) return thrd_error;
+#ifdef EMULATED_THREADS_USE_NATIVE_CV
+ if (SleepConditionVariableCS(&cond->condvar, mtx, impl_timespec2msec(abs_time)))
+ return thrd_success;
+ return (GetLastError() == ERROR_TIMEOUT) ? thrd_busy : thrd_error;
+#else
+ return impl_cond_do_wait(cond, mtx, abs_time);
+#endif
+}
+
+// 7.25.3.6
+static inline int
+cnd_wait(cnd_t *cond, mtx_t *mtx)
+{
+ if (!cond || !mtx) return thrd_error;
+#ifdef EMULATED_THREADS_USE_NATIVE_CV
+ SleepConditionVariableCS(&cond->condvar, mtx, INFINITE);
+#else
+ impl_cond_do_wait(cond, mtx, NULL);
+#endif
+ return thrd_success;
+}
+
+
+/*-------------------- 7.25.4 Mutex functions --------------------*/
+// 7.25.4.1
+static inline void
+mtx_destroy(mtx_t *mtx)
+{
+ assert(mtx);
+ DeleteCriticalSection(mtx);
+}
+
+// 7.25.4.2
+static inline int
+mtx_init(mtx_t *mtx, int type)
+{
+ if (!mtx) return thrd_error;
+ if (type != mtx_plain && type != mtx_timed && type != mtx_try
+ && type != (mtx_plain|mtx_recursive)
+ && type != (mtx_timed|mtx_recursive)
+ && type != (mtx_try|mtx_recursive))
+ return thrd_error;
+ InitializeCriticalSection(mtx);
+ return thrd_success;
+}
+
+// 7.25.4.3
+static inline int
+mtx_lock(mtx_t *mtx)
+{
+ if (!mtx) return thrd_error;
+ EnterCriticalSection(mtx);
+ return thrd_success;
+}
+
+// 7.25.4.4
+static inline int
+mtx_timedlock(mtx_t *mtx, const struct timespec *ts)
+{
+ time_t expire, now;
+ if (!mtx || !ts) return thrd_error;
+ expire = time(NULL);
+ expire += ts->tv_sec;
+ while (mtx_trylock(mtx) != thrd_success) {
+ now = time(NULL);
+ if (expire < now)
+ return thrd_busy;
+ // busy loop!
+ thrd_yield();
+ }
+ return thrd_success;
+}
+
+// 7.25.4.5
+static inline int
+mtx_trylock(mtx_t *mtx)
+{
+ if (!mtx) return thrd_error;
+ return TryEnterCriticalSection(mtx) ? thrd_success : thrd_busy;
+}
+
+// 7.25.4.6
+static inline int
+mtx_unlock(mtx_t *mtx)
+{
+ if (!mtx) return thrd_error;
+ LeaveCriticalSection(mtx);
+ return thrd_success;
+}
+
+
+/*------------------- 7.25.5 Thread functions -------------------*/
+// 7.25.5.1
+static inline int
+thrd_create(thrd_t *thr, thrd_start_t func, void *arg)
+{
+ struct impl_thrd_param *pack;
+ uintptr_t handle;
+ if (!thr) return thrd_error;
+ pack = (struct impl_thrd_param *)malloc(sizeof(struct impl_thrd_param));
+ if (!pack) return thrd_nomem;
+ pack->func = func;
+ pack->arg = arg;
+ handle = _beginthreadex(NULL, 0, impl_thrd_routine, pack, 0, NULL);
+ if (handle == 0) {
+ if (errno == EAGAIN || errno == EACCES)
+ return thrd_nomem;
+ return thrd_error;
+ }
+ *thr = (thrd_t)handle;
+ return thrd_success;
+}
+
+#if 0
+// 7.25.5.2
+static inline thrd_t
+thrd_current(void)
+{
+ HANDLE hCurrentThread;
+ BOOL bRet;
+
+ /* GetCurrentThread() returns a pseudo-handle, which we need
+ * to pass to DuplicateHandle(). Only the resulting handle can be used
+ * from other threads.
+ *
+ * Note that neither handle can be compared to the one by thread_create.
+ * Only the thread IDs - as returned by GetThreadId() and GetCurrentThreadId()
+ * can be compared directly.
+ *
+ * Other potential solutions would be:
+ * - define thrd_t as a thread Ids, but this would mean we'd need to OpenThread for many operations
+ * - use malloc'ed memory for thrd_t. This would imply using TLS for current thread.
+ *
+ * Neither is particularly nice.
+ *
+ * Life would be much easier if C11 threads had different abstractions for
+ * threads and thread IDs, just like C++11 threads does...
+ */
+
+ bRet = DuplicateHandle(GetCurrentProcess(), // source process (pseudo) handle
+ GetCurrentThread(), // source (pseudo) handle
+ GetCurrentProcess(), // target process
+ &hCurrentThread, // target handle
+ 0,
+ FALSE,
+ DUPLICATE_SAME_ACCESS);
+ assert(bRet);
+ if (!bRet) {
+ hCurrentThread = GetCurrentThread();
+ }
+ return hCurrentThread;
+}
+#endif
+
+// 7.25.5.3
+static inline int
+thrd_detach(thrd_t thr)
+{
+ CloseHandle(thr);
+ return thrd_success;
+}
+
+// 7.25.5.4
+static inline int
+thrd_equal(thrd_t thr0, thrd_t thr1)
+{
+ return GetThreadId(thr0) == GetThreadId(thr1);
+}
+
+// 7.25.5.5
+static inline void
+thrd_exit(int res)
+{
+ impl_tss_dtor_invoke();
+ _endthreadex((unsigned)res);
+}
+
+// 7.25.5.6
+static inline int
+thrd_join(thrd_t thr, int *res)
+{
+ DWORD w, code;
+ w = WaitForSingleObject(thr, INFINITE);
+ if (w != WAIT_OBJECT_0)
+ return thrd_error;
+ if (res) {
+ if (!GetExitCodeThread(thr, &code)) {
+ CloseHandle(thr);
+ return thrd_error;
+ }
+ *res = (int)code;
+ }
+ CloseHandle(thr);
+ return thrd_success;
+}
+
+// 7.25.5.7
+static inline void
+thrd_sleep(const struct timespec *time_point, struct timespec *remaining)
+{
+ assert(time_point);
+ assert(!remaining); /* not implemented */
+ Sleep(impl_timespec2msec(time_point));
+}
+
+// 7.25.5.8
+static inline void
+thrd_yield(void)
+{
+ SwitchToThread();
+}
+
+
+/*----------- 7.25.6 Thread-specific storage functions -----------*/
+// 7.25.6.1
+static inline int
+tss_create(tss_t *key, tss_dtor_t dtor)
+{
+ if (!key) return thrd_error;
+ *key = TlsAlloc();
+ if (dtor) {
+ if (impl_tss_dtor_register(*key, dtor)) {
+ TlsFree(*key);
+ return thrd_error;
+ }
+ }
+ return (*key != 0xFFFFFFFF) ? thrd_success : thrd_error;
+}
+
+// 7.25.6.2
+static inline void
+tss_delete(tss_t key)
+{
+ TlsFree(key);
+}
+
+// 7.25.6.3
+static inline void *
+tss_get(tss_t key)
+{
+ return TlsGetValue(key);
+}
+
+// 7.25.6.4
+static inline int
+tss_set(tss_t key, void *val)
+{
+ return TlsSetValue(key, val) ? thrd_success : thrd_error;
+}
+
+
+/*-------------------- 7.25.7 Time functions --------------------*/
+// 7.25.6.1
+#ifndef HAVE_TIMESPEC_GET
+static inline int
+timespec_get(struct timespec *ts, int base)
+{
+ if (!ts) return 0;
+ if (base == TIME_UTC) {
+ ts->tv_sec = time(NULL);
+ ts->tv_nsec = 0;
+ return base;
+ }
+ return 0;
+}
+#endif
diff --git a/third_party/rust/glslopt/glsl-optimizer/include/c11_compat.h b/third_party/rust/glslopt/glsl-optimizer/include/c11_compat.h
new file mode 100644
index 0000000000..d35740f47a
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/include/c11_compat.h
@@ -0,0 +1,27 @@
+/* Copyright 2019 Intel Corporation */
+/* SPDX-License-Identifier: MIT */
+
+#include "no_extern_c.h"
+
+#ifndef _C11_COMPAT_H_
+#define _C11_COMPAT_H_
+
+#if defined(__cplusplus)
+ /* This is C++ code, not C */
+#elif (__STDC_VERSION__ >= 201112L)
+ /* Already C11 */
+#else
+
+
+/*
+ * C11 static_assert() macro
+ * assert.h only defines that name for C11 and above
+ */
+#ifndef static_assert
+#define static_assert _Static_assert
+#endif
+
+
+#endif /* !C++ && !C11 */
+
+#endif /* _C11_COMPAT_H_ */
diff --git a/third_party/rust/glslopt/glsl-optimizer/include/c99_alloca.h b/third_party/rust/glslopt/glsl-optimizer/include/c99_alloca.h
new file mode 100644
index 0000000000..5a3b8c19ab
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/include/c99_alloca.h
@@ -0,0 +1,49 @@
+/**************************************************************************
+ *
+ * Copyright 2015 VMware, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifndef _C99_ALLOCA_H_
+#define _C99_ALLOCA_H_
+
+
+#if defined(_MSC_VER)
+
+# include <malloc.h>
+
+# define alloca _alloca
+
+#elif defined(__sun) || defined(__CYGWIN__)
+
+# include <alloca.h>
+
+#else /* !defined(_MSC_VER) */
+
+# include <stdlib.h>
+
+#endif /* !defined(_MSC_VER) */
+
+
+#endif
diff --git a/third_party/rust/glslopt/glsl-optimizer/include/c99_compat.h b/third_party/rust/glslopt/glsl-optimizer/include/c99_compat.h
new file mode 100644
index 0000000000..729b5b7986
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/include/c99_compat.h
@@ -0,0 +1,183 @@
+/**************************************************************************
+ *
+ * Copyright 2007-2013 VMware, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "no_extern_c.h"
+
+#ifndef _C99_COMPAT_H_
+#define _C99_COMPAT_H_
+
+
+/*
+ * MSVC hacks.
+ */
+#if defined(_MSC_VER)
+
+# if _MSC_VER < 1900
+# error "Microsoft Visual Studio 2015 or higher required"
+# endif
+
+ /*
+ * Visual Studio will complain if we define the `inline` keyword, but
+ * actually it only supports the keyword on C++.
+ *
+ * To avoid this the _ALLOW_KEYWORD_MACROS must be set.
+ */
+# if !defined(_ALLOW_KEYWORD_MACROS)
+# define _ALLOW_KEYWORD_MACROS
+# endif
+
+ /*
+ * XXX: MSVC has a `__restrict` keyword, but it also has a
+ * `__declspec(restrict)` modifier, so it is impossible to define a
+ * `restrict` macro without interfering with the latter. Furthermore the
+ * MSVC standard library uses __declspec(restrict) under the _CRTRESTRICT
+ * macro. For now resolve this issue by redefining _CRTRESTRICT, but going
+ * forward we should probably should stop using restrict, especially
+ * considering that our code does not obbey strict aliasing rules any way.
+ */
+# include <crtdefs.h>
+# undef _CRTRESTRICT
+# define _CRTRESTRICT
+#endif
+
+
+/*
+ * C99 inline keyword
+ */
+#ifndef inline
+# ifdef __cplusplus
+ /* C++ supports inline keyword */
+# elif defined(__GNUC__)
+# define inline __inline__
+# elif defined(_MSC_VER)
+# define inline __inline
+# elif defined(__ICL)
+# define inline __inline
+# elif defined(__INTEL_COMPILER)
+ /* Intel compiler supports inline keyword */
+# elif defined(__WATCOMC__) && (__WATCOMC__ >= 1100)
+# define inline __inline
+# elif (__STDC_VERSION__ >= 199901L)
+ /* C99 supports inline keyword */
+# else
+# define inline
+# endif
+#endif
+
+
+/*
+ * C99 restrict keyword
+ *
+ * See also:
+ * - http://cellperformance.beyond3d.com/articles/2006/05/demystifying-the-restrict-keyword.html
+ */
+#ifndef restrict
+# if (__STDC_VERSION__ >= 199901L) && !defined(__cplusplus)
+ /* C99 */
+# elif defined(__GNUC__)
+# define restrict __restrict__
+# elif defined(_MSC_VER)
+# define restrict __restrict
+# else
+# define restrict /* */
+# endif
+#endif
+
+
+/*
+ * C99 __func__ macro
+ */
+#ifndef __func__
+# if (__STDC_VERSION__ >= 199901L)
+ /* C99 */
+# elif defined(__GNUC__)
+# define __func__ __FUNCTION__
+# elif defined(_MSC_VER)
+# define __func__ __FUNCTION__
+# else
+# define __func__ "<unknown>"
+# endif
+#endif
+
+
+/* Simple test case for debugging */
+#if 0
+static inline const char *
+test_c99_compat_h(const void * restrict a,
+ const void * restrict b)
+{
+ return __func__;
+}
+#endif
+
+
+/* Fallback definitions, for scons which doesn't auto-detect these things. */
+#ifdef HAVE_SCONS
+
+# ifndef _WIN32
+# define HAVE_PTHREAD
+# define HAVE_POSIX_MEMALIGN
+# endif
+
+# ifdef __GNUC__
+# if __GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 2)
+# error "GCC version 4.2 or higher required"
+# endif
+
+ /* https://gcc.gnu.org/onlinedocs/gcc-4.2.4/gcc/Other-Builtins.html */
+# define HAVE___BUILTIN_CLZ 1
+# define HAVE___BUILTIN_CLZLL 1
+# define HAVE___BUILTIN_CTZ 1
+# define HAVE___BUILTIN_EXPECT 1
+# define HAVE___BUILTIN_FFS 1
+# define HAVE___BUILTIN_FFSLL 1
+# define HAVE___BUILTIN_POPCOUNT 1
+# define HAVE___BUILTIN_POPCOUNTLL 1
+ /* https://gcc.gnu.org/onlinedocs/gcc-4.2.4/gcc/Function-Attributes.html */
+# define HAVE_FUNC_ATTRIBUTE_FLATTEN 1
+# define HAVE_FUNC_ATTRIBUTE_UNUSED 1
+# define HAVE_FUNC_ATTRIBUTE_FORMAT 1
+# define HAVE_FUNC_ATTRIBUTE_PACKED 1
+# define HAVE_FUNC_ATTRIBUTE_ALIAS 1
+# define HAVE_FUNC_ATTRIBUTE_NORETURN 1
+
+# if __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 3)
+ /* https://gcc.gnu.org/onlinedocs/gcc-4.3.6/gcc/Other-Builtins.html */
+# define HAVE___BUILTIN_BSWAP32 1
+# define HAVE___BUILTIN_BSWAP64 1
+# endif
+
+# if __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 5)
+# define HAVE___BUILTIN_UNREACHABLE 1
+# endif
+
+# endif /* __GNUC__ */
+
+#endif /* HAVE_SCONS */
+
+
+#endif /* _C99_COMPAT_H_ */
diff --git a/third_party/rust/glslopt/glsl-optimizer/include/c99_math.h b/third_party/rust/glslopt/glsl-optimizer/include/c99_math.h
new file mode 100644
index 0000000000..e906c26aa5
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/include/c99_math.h
@@ -0,0 +1,211 @@
+/**************************************************************************
+ *
+ * Copyright 2007-2015 VMware, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/**
+ * Wrapper for math.h which makes sure we have definitions of all the c99
+ * functions.
+ */
+
+
+#ifndef _C99_MATH_H_
+#define _C99_MATH_H_
+
+#include <math.h>
+#include "c99_compat.h"
+
+
+/* This is to ensure that we get M_PI, etc. definitions */
+#if defined(_MSC_VER) && !defined(_USE_MATH_DEFINES)
+#error _USE_MATH_DEFINES define required when building with MSVC
+#endif
+
+
+#if !defined(_MSC_VER) && \
+ __STDC_VERSION__ < 199901L && \
+ (!defined(_XOPEN_SOURCE) || _XOPEN_SOURCE < 600) && \
+ !defined(__cplusplus)
+
+static inline long int
+lrint(double d)
+{
+ long int rounded = (long int)(d + 0.5);
+
+ if (d - floor(d) == 0.5) {
+ if (rounded % 2 != 0)
+ rounded += (d > 0) ? -1 : 1;
+ }
+
+ return rounded;
+}
+
+static inline long int
+lrintf(float f)
+{
+ long int rounded = (long int)(f + 0.5f);
+
+ if (f - floorf(f) == 0.5f) {
+ if (rounded % 2 != 0)
+ rounded += (f > 0) ? -1 : 1;
+ }
+
+ return rounded;
+}
+
+static inline long long int
+llrint(double d)
+{
+ long long int rounded = (long long int)(d + 0.5);
+
+ if (d - floor(d) == 0.5) {
+ if (rounded % 2 != 0)
+ rounded += (d > 0) ? -1 : 1;
+ }
+
+ return rounded;
+}
+
+static inline long long int
+llrintf(float f)
+{
+ long long int rounded = (long long int)(f + 0.5f);
+
+ if (f - floorf(f) == 0.5f) {
+ if (rounded % 2 != 0)
+ rounded += (f > 0) ? -1 : 1;
+ }
+
+ return rounded;
+}
+
+static inline float
+exp2f(float f)
+{
+ return powf(2.0f, f);
+}
+
+static inline double
+exp2(double d)
+{
+ return pow(2.0, d);
+}
+
+#endif /* C99 */
+
+
+/*
+ * signbit() is a macro on Linux. Not available on Windows.
+ */
+#ifndef signbit
+#define signbit(x) ((x) < 0.0f)
+#endif
+
+
+#ifndef M_PI
+#define M_PI (3.14159265358979323846)
+#endif
+
+#ifndef M_E
+#define M_E (2.7182818284590452354)
+#endif
+
+#ifndef M_LOG2E
+#define M_LOG2E (1.4426950408889634074)
+#endif
+
+#ifndef FLT_MAX_EXP
+#define FLT_MAX_EXP 128
+#endif
+
+
+#if defined(fpclassify)
+/* ISO C99 says that fpclassify is a macro. Assume that any implementation
+ * of fpclassify, whether it's in a C99 compiler or not, will be a macro.
+ */
+#elif defined(__cplusplus)
+/* For C++, fpclassify() should be defined in <cmath> */
+#elif defined(_MSC_VER)
+/* Not required on VS2013 and above. Oddly, the fpclassify() function
+ * doesn't exist in such a form on MSVC. This is an implementation using
+ * slightly different lower-level Windows functions.
+ */
+#include <float.h>
+
+static inline enum {FP_NAN, FP_INFINITE, FP_ZERO, FP_SUBNORMAL, FP_NORMAL}
+fpclassify(double x)
+{
+ switch(_fpclass(x)) {
+ case _FPCLASS_SNAN: /* signaling NaN */
+ case _FPCLASS_QNAN: /* quiet NaN */
+ return FP_NAN;
+ case _FPCLASS_NINF: /* negative infinity */
+ case _FPCLASS_PINF: /* positive infinity */
+ return FP_INFINITE;
+ case _FPCLASS_NN: /* negative normal */
+ case _FPCLASS_PN: /* positive normal */
+ return FP_NORMAL;
+ case _FPCLASS_ND: /* negative denormalized */
+ case _FPCLASS_PD: /* positive denormalized */
+ return FP_SUBNORMAL;
+ case _FPCLASS_NZ: /* negative zero */
+ case _FPCLASS_PZ: /* positive zero */
+ return FP_ZERO;
+ default:
+ /* Should never get here; but if we do, this will guarantee
+ * that the pattern is not treated like a number.
+ */
+ return FP_NAN;
+ }
+}
+#else
+#error "Need to include or define an fpclassify function"
+#endif
+
+
+/* Since C++11, the following functions are part of the std namespace. Their C
+ * counteparts should still exist in the global namespace, however cmath
+ * undefines those functions, which in glibc 2.23, are defined as macros rather
+ * than functions as in glibc 2.22.
+ */
+#if __cplusplus >= 201103L && (__GLIBC__ > 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ >= 23))
+#include <cmath>
+
+using std::fpclassify;
+using std::isfinite;
+using std::isinf;
+using std::isnan;
+using std::isnormal;
+using std::signbit;
+using std::isgreater;
+using std::isgreaterequal;
+using std::isless;
+using std::islessequal;
+using std::islessgreater;
+using std::isunordered;
+#endif
+
+
+#endif /* #define _C99_MATH_H_ */
diff --git a/third_party/rust/glslopt/glsl-optimizer/include/no_extern_c.h b/third_party/rust/glslopt/glsl-optimizer/include/no_extern_c.h
new file mode 100644
index 0000000000..f79602c030
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/include/no_extern_c.h
@@ -0,0 +1,48 @@
+/**************************************************************************
+ *
+ * Copyright 2014 VMware, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+
+/*
+ * Including system's headers inside `extern "C" { ... }` is not safe, as system
+ * headers may have C++ code in them, and C++ code inside extern "C"
+ * leads to syntatically incorrect code.
+ *
+ * This is because putting code inside extern "C" won't make __cplusplus define
+ * go away, that is, the system header being included thinks is free to use C++
+ * as it sees fits.
+ *
+ * Including non-system headers inside extern "C" is not safe either, because
+ * non-system headers end up including system headers, hence fall in the above
+ * case too.
+ *
+ * Conclusion, includes inside extern "C" is simply not portable.
+ *
+ *
+ * This header helps surface these issues.
+ */
+
+#ifdef __cplusplus
+template<class T> class _IncludeInsideExternCNotPortable;
+#endif
diff --git a/third_party/rust/glslopt/glsl-optimizer/license.txt b/third_party/rust/glslopt/glsl-optimizer/license.txt
new file mode 100644
index 0000000000..11a04bb6fc
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/license.txt
@@ -0,0 +1,21 @@
+GLSL Optimizer is licensed according to the terms of the MIT license:
+
+Copyright (C) 1999-2007 Brian Paul All Rights Reserved.
+Copyright (C) 2010-2013 Unity Technologies All Rights Reserved.
+
+Permission is hereby granted, free of charge, to any person obtaining a
+copy of this software and associated documentation files (the "Software"),
+to deal in the Software without restriction, including without limitation
+the rights to use, copy, modify, merge, publish, distribute, sublicense,
+and/or sell copies of the Software, and to permit persons to whom the
+Software is furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included
+in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+BRIAN PAUL BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/compiler/builtin_type_macros.h b/third_party/rust/glslopt/glsl-optimizer/src/compiler/builtin_type_macros.h
new file mode 100644
index 0000000000..55ad2b8955
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/compiler/builtin_type_macros.h
@@ -0,0 +1,185 @@
+/*
+ * Copyright © 2013 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+/**
+ * \file builtin_type_macros.h
+ *
+ * This contains definitions for all GLSL built-in types, regardless of what
+ * language version or extension might provide them.
+ */
+
+DECL_TYPE(error, GL_INVALID_ENUM, GLSL_TYPE_ERROR, 0, 0)
+DECL_TYPE(void, GL_INVALID_ENUM, GLSL_TYPE_VOID, 0, 0)
+
+#define DECL_VEC_TYPE(stype, vtype, btype, etype, ...) \
+ DECL_TYPE(stype, etype ##__VA_ARGS__, btype, 1, 1) \
+ DECL_TYPE(vtype ## 2, etype ##_VEC2 ##__VA_ARGS__, btype, 2, 1) \
+ DECL_TYPE(vtype ## 3, etype ##_VEC3 ##__VA_ARGS__, btype, 3, 1) \
+ DECL_TYPE(vtype ## 4, etype ##_VEC4 ##__VA_ARGS__, btype, 4, 1) \
+ DECL_TYPE(vtype ## 8, 0, btype, 8, 1) \
+ DECL_TYPE(vtype ## 16, 0, btype, 16, 1)
+
+DECL_VEC_TYPE(bool, bvec, GLSL_TYPE_BOOL, GL_BOOL)
+DECL_VEC_TYPE(int, ivec, GLSL_TYPE_INT, GL_INT)
+DECL_VEC_TYPE(uint, uvec, GLSL_TYPE_UINT, GL_UNSIGNED_INT)
+DECL_VEC_TYPE(float, vec, GLSL_TYPE_FLOAT, GL_FLOAT)
+DECL_VEC_TYPE(float16_t, f16vec, GLSL_TYPE_FLOAT16, GL_FLOAT16, _NV)
+DECL_VEC_TYPE(double, dvec, GLSL_TYPE_DOUBLE, GL_DOUBLE)
+DECL_VEC_TYPE(int64_t, i64vec, GLSL_TYPE_INT64, GL_INT64, _ARB)
+DECL_VEC_TYPE(uint64_t, u64vec, GLSL_TYPE_UINT64, GL_UNSIGNED_INT64, _ARB)
+DECL_VEC_TYPE(int16_t, i16vec, GLSL_TYPE_INT16, GL_INT16, _NV)
+DECL_VEC_TYPE(uint16_t, u16vec, GLSL_TYPE_UINT16, GL_UNSIGNED_INT16, _NV)
+DECL_VEC_TYPE(int8_t, i8vec, GLSL_TYPE_INT8, GL_INT8, _NV)
+DECL_VEC_TYPE(uint8_t, u8vec, GLSL_TYPE_UINT8, GL_UNSIGNED_INT8, _NV)
+
+DECL_TYPE(mat2, GL_FLOAT_MAT2, GLSL_TYPE_FLOAT, 2, 2)
+DECL_TYPE(mat3, GL_FLOAT_MAT3, GLSL_TYPE_FLOAT, 3, 3)
+DECL_TYPE(mat4, GL_FLOAT_MAT4, GLSL_TYPE_FLOAT, 4, 4)
+
+DECL_TYPE(mat2x3, GL_FLOAT_MAT2x3, GLSL_TYPE_FLOAT, 3, 2)
+DECL_TYPE(mat2x4, GL_FLOAT_MAT2x4, GLSL_TYPE_FLOAT, 4, 2)
+DECL_TYPE(mat3x2, GL_FLOAT_MAT3x2, GLSL_TYPE_FLOAT, 2, 3)
+DECL_TYPE(mat3x4, GL_FLOAT_MAT3x4, GLSL_TYPE_FLOAT, 4, 3)
+DECL_TYPE(mat4x2, GL_FLOAT_MAT4x2, GLSL_TYPE_FLOAT, 2, 4)
+DECL_TYPE(mat4x3, GL_FLOAT_MAT4x3, GLSL_TYPE_FLOAT, 3, 4)
+
+DECL_TYPE(f16mat2, GL_FLOAT16_MAT2_AMD, GLSL_TYPE_FLOAT16, 2, 2)
+DECL_TYPE(f16mat3, GL_FLOAT16_MAT3_AMD, GLSL_TYPE_FLOAT16, 3, 3)
+DECL_TYPE(f16mat4, GL_FLOAT16_MAT4_AMD, GLSL_TYPE_FLOAT16, 4, 4)
+
+DECL_TYPE(f16mat2x3, GL_FLOAT16_MAT2x3_AMD, GLSL_TYPE_FLOAT16, 3, 2)
+DECL_TYPE(f16mat2x4, GL_FLOAT16_MAT2x4_AMD, GLSL_TYPE_FLOAT16, 4, 2)
+DECL_TYPE(f16mat3x2, GL_FLOAT16_MAT3x2_AMD, GLSL_TYPE_FLOAT16, 2, 3)
+DECL_TYPE(f16mat3x4, GL_FLOAT16_MAT3x4_AMD, GLSL_TYPE_FLOAT16, 4, 3)
+DECL_TYPE(f16mat4x2, GL_FLOAT16_MAT4x2_AMD, GLSL_TYPE_FLOAT16, 2, 4)
+DECL_TYPE(f16mat4x3, GL_FLOAT16_MAT4x3_AMD, GLSL_TYPE_FLOAT16, 3, 4)
+
+DECL_TYPE(dmat2, GL_DOUBLE_MAT2, GLSL_TYPE_DOUBLE, 2, 2)
+DECL_TYPE(dmat3, GL_DOUBLE_MAT3, GLSL_TYPE_DOUBLE, 3, 3)
+DECL_TYPE(dmat4, GL_DOUBLE_MAT4, GLSL_TYPE_DOUBLE, 4, 4)
+
+DECL_TYPE(dmat2x3, GL_DOUBLE_MAT2x3, GLSL_TYPE_DOUBLE, 3, 2)
+DECL_TYPE(dmat2x4, GL_DOUBLE_MAT2x4, GLSL_TYPE_DOUBLE, 4, 2)
+DECL_TYPE(dmat3x2, GL_DOUBLE_MAT3x2, GLSL_TYPE_DOUBLE, 2, 3)
+DECL_TYPE(dmat3x4, GL_DOUBLE_MAT3x4, GLSL_TYPE_DOUBLE, 4, 3)
+DECL_TYPE(dmat4x2, GL_DOUBLE_MAT4x2, GLSL_TYPE_DOUBLE, 2, 4)
+DECL_TYPE(dmat4x3, GL_DOUBLE_MAT4x3, GLSL_TYPE_DOUBLE, 3, 4)
+
+DECL_TYPE(sampler, GL_SAMPLER_1D, GLSL_TYPE_SAMPLER, GLSL_SAMPLER_DIM_1D, 0, 0, GLSL_TYPE_VOID)
+DECL_TYPE(sampler1D, GL_SAMPLER_1D, GLSL_TYPE_SAMPLER, GLSL_SAMPLER_DIM_1D, 0, 0, GLSL_TYPE_FLOAT)
+DECL_TYPE(sampler2D, GL_SAMPLER_2D, GLSL_TYPE_SAMPLER, GLSL_SAMPLER_DIM_2D, 0, 0, GLSL_TYPE_FLOAT)
+DECL_TYPE(sampler3D, GL_SAMPLER_3D, GLSL_TYPE_SAMPLER, GLSL_SAMPLER_DIM_3D, 0, 0, GLSL_TYPE_FLOAT)
+DECL_TYPE(samplerCube, GL_SAMPLER_CUBE, GLSL_TYPE_SAMPLER, GLSL_SAMPLER_DIM_CUBE, 0, 0, GLSL_TYPE_FLOAT)
+DECL_TYPE(sampler1DArray, GL_SAMPLER_1D_ARRAY, GLSL_TYPE_SAMPLER, GLSL_SAMPLER_DIM_1D, 0, 1, GLSL_TYPE_FLOAT)
+DECL_TYPE(sampler2DArray, GL_SAMPLER_2D_ARRAY, GLSL_TYPE_SAMPLER, GLSL_SAMPLER_DIM_2D, 0, 1, GLSL_TYPE_FLOAT)
+DECL_TYPE(samplerCubeArray, GL_SAMPLER_CUBE_MAP_ARRAY, GLSL_TYPE_SAMPLER, GLSL_SAMPLER_DIM_CUBE, 0, 1, GLSL_TYPE_FLOAT)
+DECL_TYPE(sampler2DRect, GL_SAMPLER_2D_RECT, GLSL_TYPE_SAMPLER, GLSL_SAMPLER_DIM_RECT, 0, 0, GLSL_TYPE_FLOAT)
+DECL_TYPE(samplerBuffer, GL_SAMPLER_BUFFER, GLSL_TYPE_SAMPLER, GLSL_SAMPLER_DIM_BUF, 0, 0, GLSL_TYPE_FLOAT)
+DECL_TYPE(sampler2DMS, GL_SAMPLER_2D_MULTISAMPLE, GLSL_TYPE_SAMPLER, GLSL_SAMPLER_DIM_MS, 0, 0, GLSL_TYPE_FLOAT)
+DECL_TYPE(sampler2DMSArray, GL_SAMPLER_2D_MULTISAMPLE_ARRAY, GLSL_TYPE_SAMPLER, GLSL_SAMPLER_DIM_MS, 0, 1, GLSL_TYPE_FLOAT)
+
+DECL_TYPE(isampler1D, GL_INT_SAMPLER_1D, GLSL_TYPE_SAMPLER, GLSL_SAMPLER_DIM_1D, 0, 0, GLSL_TYPE_INT)
+DECL_TYPE(isampler2D, GL_INT_SAMPLER_2D, GLSL_TYPE_SAMPLER, GLSL_SAMPLER_DIM_2D, 0, 0, GLSL_TYPE_INT)
+DECL_TYPE(isampler3D, GL_INT_SAMPLER_3D, GLSL_TYPE_SAMPLER, GLSL_SAMPLER_DIM_3D, 0, 0, GLSL_TYPE_INT)
+DECL_TYPE(isamplerCube, GL_INT_SAMPLER_CUBE, GLSL_TYPE_SAMPLER, GLSL_SAMPLER_DIM_CUBE, 0, 0, GLSL_TYPE_INT)
+DECL_TYPE(isampler1DArray, GL_INT_SAMPLER_1D_ARRAY, GLSL_TYPE_SAMPLER, GLSL_SAMPLER_DIM_1D, 0, 1, GLSL_TYPE_INT)
+DECL_TYPE(isampler2DArray, GL_INT_SAMPLER_2D_ARRAY, GLSL_TYPE_SAMPLER, GLSL_SAMPLER_DIM_2D, 0, 1, GLSL_TYPE_INT)
+DECL_TYPE(isamplerCubeArray, GL_INT_SAMPLER_CUBE_MAP_ARRAY, GLSL_TYPE_SAMPLER, GLSL_SAMPLER_DIM_CUBE, 0, 1, GLSL_TYPE_INT)
+DECL_TYPE(isampler2DRect, GL_INT_SAMPLER_2D_RECT, GLSL_TYPE_SAMPLER, GLSL_SAMPLER_DIM_RECT, 0, 0, GLSL_TYPE_INT)
+DECL_TYPE(isamplerBuffer, GL_INT_SAMPLER_BUFFER, GLSL_TYPE_SAMPLER, GLSL_SAMPLER_DIM_BUF, 0, 0, GLSL_TYPE_INT)
+DECL_TYPE(isampler2DMS, GL_INT_SAMPLER_2D_MULTISAMPLE, GLSL_TYPE_SAMPLER, GLSL_SAMPLER_DIM_MS, 0, 0, GLSL_TYPE_INT)
+DECL_TYPE(isampler2DMSArray, GL_INT_SAMPLER_2D_MULTISAMPLE_ARRAY, GLSL_TYPE_SAMPLER, GLSL_SAMPLER_DIM_MS, 0, 1, GLSL_TYPE_INT)
+
+DECL_TYPE(usampler1D, GL_UNSIGNED_INT_SAMPLER_1D, GLSL_TYPE_SAMPLER, GLSL_SAMPLER_DIM_1D, 0, 0, GLSL_TYPE_UINT)
+DECL_TYPE(usampler2D, GL_UNSIGNED_INT_SAMPLER_2D, GLSL_TYPE_SAMPLER, GLSL_SAMPLER_DIM_2D, 0, 0, GLSL_TYPE_UINT)
+DECL_TYPE(usampler3D, GL_UNSIGNED_INT_SAMPLER_3D, GLSL_TYPE_SAMPLER, GLSL_SAMPLER_DIM_3D, 0, 0, GLSL_TYPE_UINT)
+DECL_TYPE(usamplerCube, GL_UNSIGNED_INT_SAMPLER_CUBE, GLSL_TYPE_SAMPLER, GLSL_SAMPLER_DIM_CUBE, 0, 0, GLSL_TYPE_UINT)
+DECL_TYPE(usampler1DArray, GL_UNSIGNED_INT_SAMPLER_1D_ARRAY, GLSL_TYPE_SAMPLER, GLSL_SAMPLER_DIM_1D, 0, 1, GLSL_TYPE_UINT)
+DECL_TYPE(usampler2DArray, GL_UNSIGNED_INT_SAMPLER_2D_ARRAY, GLSL_TYPE_SAMPLER, GLSL_SAMPLER_DIM_2D, 0, 1, GLSL_TYPE_UINT)
+DECL_TYPE(usamplerCubeArray, GL_UNSIGNED_INT_SAMPLER_CUBE_MAP_ARRAY, GLSL_TYPE_SAMPLER, GLSL_SAMPLER_DIM_CUBE, 0, 1, GLSL_TYPE_UINT)
+DECL_TYPE(usampler2DRect, GL_UNSIGNED_INT_SAMPLER_2D_RECT, GLSL_TYPE_SAMPLER, GLSL_SAMPLER_DIM_RECT, 0, 0, GLSL_TYPE_UINT)
+DECL_TYPE(usamplerBuffer, GL_UNSIGNED_INT_SAMPLER_BUFFER, GLSL_TYPE_SAMPLER, GLSL_SAMPLER_DIM_BUF, 0, 0, GLSL_TYPE_UINT)
+DECL_TYPE(usampler2DMS, GL_UNSIGNED_INT_SAMPLER_2D_MULTISAMPLE, GLSL_TYPE_SAMPLER, GLSL_SAMPLER_DIM_MS, 0, 0, GLSL_TYPE_UINT)
+DECL_TYPE(usampler2DMSArray, GL_UNSIGNED_INT_SAMPLER_2D_MULTISAMPLE_ARRAY, GLSL_TYPE_SAMPLER, GLSL_SAMPLER_DIM_MS, 0, 1, GLSL_TYPE_UINT)
+
+DECL_TYPE(sampler1DShadow, GL_SAMPLER_1D_SHADOW, GLSL_TYPE_SAMPLER, GLSL_SAMPLER_DIM_1D, 1, 0, GLSL_TYPE_FLOAT)
+DECL_TYPE(sampler2DShadow, GL_SAMPLER_2D_SHADOW, GLSL_TYPE_SAMPLER, GLSL_SAMPLER_DIM_2D, 1, 0, GLSL_TYPE_FLOAT)
+DECL_TYPE(samplerCubeShadow, GL_SAMPLER_CUBE_SHADOW, GLSL_TYPE_SAMPLER, GLSL_SAMPLER_DIM_CUBE, 1, 0, GLSL_TYPE_FLOAT)
+DECL_TYPE(sampler1DArrayShadow, GL_SAMPLER_1D_ARRAY_SHADOW, GLSL_TYPE_SAMPLER, GLSL_SAMPLER_DIM_1D, 1, 1, GLSL_TYPE_FLOAT)
+DECL_TYPE(sampler2DArrayShadow, GL_SAMPLER_2D_ARRAY_SHADOW, GLSL_TYPE_SAMPLER, GLSL_SAMPLER_DIM_2D, 1, 1, GLSL_TYPE_FLOAT)
+DECL_TYPE(samplerCubeArrayShadow, GL_SAMPLER_CUBE_MAP_ARRAY_SHADOW, GLSL_TYPE_SAMPLER, GLSL_SAMPLER_DIM_CUBE, 1, 1, GLSL_TYPE_FLOAT)
+DECL_TYPE(sampler2DRectShadow, GL_SAMPLER_2D_RECT_SHADOW, GLSL_TYPE_SAMPLER, GLSL_SAMPLER_DIM_RECT, 1, 0, GLSL_TYPE_FLOAT)
+
+DECL_TYPE(samplerExternalOES, GL_SAMPLER_EXTERNAL_OES, GLSL_TYPE_SAMPLER, GLSL_SAMPLER_DIM_EXTERNAL, 0, 0, GLSL_TYPE_FLOAT)
+
+DECL_TYPE(image1D, GL_IMAGE_1D, GLSL_TYPE_IMAGE, GLSL_SAMPLER_DIM_1D, 0, 0, GLSL_TYPE_FLOAT)
+DECL_TYPE(image2D, GL_IMAGE_2D, GLSL_TYPE_IMAGE, GLSL_SAMPLER_DIM_2D, 0, 0, GLSL_TYPE_FLOAT)
+DECL_TYPE(image3D, GL_IMAGE_3D, GLSL_TYPE_IMAGE, GLSL_SAMPLER_DIM_3D, 0, 0, GLSL_TYPE_FLOAT)
+DECL_TYPE(image2DRect, GL_IMAGE_2D_RECT, GLSL_TYPE_IMAGE, GLSL_SAMPLER_DIM_RECT, 0, 0, GLSL_TYPE_FLOAT)
+DECL_TYPE(imageCube, GL_IMAGE_CUBE, GLSL_TYPE_IMAGE, GLSL_SAMPLER_DIM_CUBE, 0, 0, GLSL_TYPE_FLOAT)
+DECL_TYPE(imageBuffer, GL_IMAGE_BUFFER, GLSL_TYPE_IMAGE, GLSL_SAMPLER_DIM_BUF, 0, 0, GLSL_TYPE_FLOAT)
+DECL_TYPE(image1DArray, GL_IMAGE_1D_ARRAY, GLSL_TYPE_IMAGE, GLSL_SAMPLER_DIM_1D, 0, 1, GLSL_TYPE_FLOAT)
+DECL_TYPE(image2DArray, GL_IMAGE_2D_ARRAY, GLSL_TYPE_IMAGE, GLSL_SAMPLER_DIM_2D, 0, 1, GLSL_TYPE_FLOAT)
+DECL_TYPE(imageCubeArray, GL_IMAGE_CUBE_MAP_ARRAY, GLSL_TYPE_IMAGE, GLSL_SAMPLER_DIM_CUBE, 0, 1, GLSL_TYPE_FLOAT)
+DECL_TYPE(image2DMS, GL_IMAGE_2D_MULTISAMPLE, GLSL_TYPE_IMAGE, GLSL_SAMPLER_DIM_MS, 0, 0, GLSL_TYPE_FLOAT)
+DECL_TYPE(image2DMSArray, GL_IMAGE_2D_MULTISAMPLE_ARRAY, GLSL_TYPE_IMAGE, GLSL_SAMPLER_DIM_MS, 0, 1, GLSL_TYPE_FLOAT)
+DECL_TYPE(iimage1D, GL_INT_IMAGE_1D, GLSL_TYPE_IMAGE, GLSL_SAMPLER_DIM_1D, 0, 0, GLSL_TYPE_INT)
+DECL_TYPE(iimage2D, GL_INT_IMAGE_2D, GLSL_TYPE_IMAGE, GLSL_SAMPLER_DIM_2D, 0, 0, GLSL_TYPE_INT)
+DECL_TYPE(iimage3D, GL_INT_IMAGE_3D, GLSL_TYPE_IMAGE, GLSL_SAMPLER_DIM_3D, 0, 0, GLSL_TYPE_INT)
+DECL_TYPE(iimage2DRect, GL_INT_IMAGE_2D_RECT, GLSL_TYPE_IMAGE, GLSL_SAMPLER_DIM_RECT, 0, 0, GLSL_TYPE_INT)
+DECL_TYPE(iimageCube, GL_INT_IMAGE_CUBE, GLSL_TYPE_IMAGE, GLSL_SAMPLER_DIM_CUBE, 0, 0, GLSL_TYPE_INT)
+DECL_TYPE(iimageBuffer, GL_INT_IMAGE_BUFFER, GLSL_TYPE_IMAGE, GLSL_SAMPLER_DIM_BUF, 0, 0, GLSL_TYPE_INT)
+DECL_TYPE(iimage1DArray, GL_INT_IMAGE_1D_ARRAY, GLSL_TYPE_IMAGE, GLSL_SAMPLER_DIM_1D, 0, 1, GLSL_TYPE_INT)
+DECL_TYPE(iimage2DArray, GL_INT_IMAGE_2D_ARRAY, GLSL_TYPE_IMAGE, GLSL_SAMPLER_DIM_2D, 0, 1, GLSL_TYPE_INT)
+DECL_TYPE(iimageCubeArray, GL_INT_IMAGE_CUBE_MAP_ARRAY, GLSL_TYPE_IMAGE, GLSL_SAMPLER_DIM_CUBE, 0, 1, GLSL_TYPE_INT)
+DECL_TYPE(iimage2DMS, GL_INT_IMAGE_2D_MULTISAMPLE, GLSL_TYPE_IMAGE, GLSL_SAMPLER_DIM_MS, 0, 0, GLSL_TYPE_INT)
+DECL_TYPE(iimage2DMSArray, GL_INT_IMAGE_2D_MULTISAMPLE_ARRAY, GLSL_TYPE_IMAGE, GLSL_SAMPLER_DIM_MS, 0, 1, GLSL_TYPE_INT)
+DECL_TYPE(uimage1D, GL_UNSIGNED_INT_IMAGE_1D, GLSL_TYPE_IMAGE, GLSL_SAMPLER_DIM_1D, 0, 0, GLSL_TYPE_UINT)
+DECL_TYPE(uimage2D, GL_UNSIGNED_INT_IMAGE_2D, GLSL_TYPE_IMAGE, GLSL_SAMPLER_DIM_2D, 0, 0, GLSL_TYPE_UINT)
+DECL_TYPE(uimage3D, GL_UNSIGNED_INT_IMAGE_3D, GLSL_TYPE_IMAGE, GLSL_SAMPLER_DIM_3D, 0, 0, GLSL_TYPE_UINT)
+DECL_TYPE(uimage2DRect, GL_UNSIGNED_INT_IMAGE_2D_RECT, GLSL_TYPE_IMAGE, GLSL_SAMPLER_DIM_RECT, 0, 0, GLSL_TYPE_UINT)
+DECL_TYPE(uimageCube, GL_UNSIGNED_INT_IMAGE_CUBE, GLSL_TYPE_IMAGE, GLSL_SAMPLER_DIM_CUBE, 0, 0, GLSL_TYPE_UINT)
+DECL_TYPE(uimageBuffer, GL_UNSIGNED_INT_IMAGE_BUFFER, GLSL_TYPE_IMAGE, GLSL_SAMPLER_DIM_BUF, 0, 0, GLSL_TYPE_UINT)
+DECL_TYPE(uimage1DArray, GL_UNSIGNED_INT_IMAGE_1D_ARRAY, GLSL_TYPE_IMAGE, GLSL_SAMPLER_DIM_1D, 0, 1, GLSL_TYPE_UINT)
+DECL_TYPE(uimage2DArray, GL_UNSIGNED_INT_IMAGE_2D_ARRAY, GLSL_TYPE_IMAGE, GLSL_SAMPLER_DIM_2D, 0, 1, GLSL_TYPE_UINT)
+DECL_TYPE(uimageCubeArray, GL_UNSIGNED_INT_IMAGE_CUBE_MAP_ARRAY, GLSL_TYPE_IMAGE, GLSL_SAMPLER_DIM_CUBE, 0, 1, GLSL_TYPE_UINT)
+DECL_TYPE(uimage2DMS, GL_UNSIGNED_INT_IMAGE_2D_MULTISAMPLE, GLSL_TYPE_IMAGE, GLSL_SAMPLER_DIM_MS, 0, 0, GLSL_TYPE_UINT)
+DECL_TYPE(uimage2DMSArray, GL_UNSIGNED_INT_IMAGE_2D_MULTISAMPLE_ARRAY, GLSL_TYPE_IMAGE, GLSL_SAMPLER_DIM_MS, 0, 1, GLSL_TYPE_UINT)
+
+DECL_TYPE(subpassInput, 0, GLSL_TYPE_IMAGE, GLSL_SAMPLER_DIM_SUBPASS, 0, 0, GLSL_TYPE_FLOAT)
+DECL_TYPE(subpassInputMS, 0, GLSL_TYPE_IMAGE, GLSL_SAMPLER_DIM_SUBPASS_MS, 0, 0, GLSL_TYPE_FLOAT)
+DECL_TYPE(isubpassInput, 0, GLSL_TYPE_IMAGE, GLSL_SAMPLER_DIM_SUBPASS, 0, 0, GLSL_TYPE_INT)
+DECL_TYPE(isubpassInputMS, 0, GLSL_TYPE_IMAGE, GLSL_SAMPLER_DIM_SUBPASS_MS, 0, 0, GLSL_TYPE_INT)
+DECL_TYPE(usubpassInput, 0, GLSL_TYPE_IMAGE, GLSL_SAMPLER_DIM_SUBPASS, 0, 0, GLSL_TYPE_UINT)
+DECL_TYPE(usubpassInputMS, 0, GLSL_TYPE_IMAGE, GLSL_SAMPLER_DIM_SUBPASS_MS, 0, 0, GLSL_TYPE_UINT)
+
+DECL_TYPE(atomic_uint, GL_UNSIGNED_INT_ATOMIC_COUNTER, GLSL_TYPE_ATOMIC_UINT, 1, 1)
+
+STRUCT_TYPE(gl_DepthRangeParameters)
+STRUCT_TYPE(gl_PointParameters)
+STRUCT_TYPE(gl_MaterialParameters)
+STRUCT_TYPE(gl_LightSourceParameters)
+STRUCT_TYPE(gl_LightModelParameters)
+STRUCT_TYPE(gl_LightModelProducts)
+STRUCT_TYPE(gl_LightProducts)
+STRUCT_TYPE(gl_FogParameters)
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/README b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/README
new file mode 100644
index 0000000000..9d2d10c04a
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/README
@@ -0,0 +1,228 @@
+Welcome to Mesa's GLSL compiler. A brief overview of how things flow:
+
+1) lex and yacc-based preprocessor takes the incoming shader string
+and produces a new string containing the preprocessed shader. This
+takes care of things like #if, #ifdef, #define, and preprocessor macro
+invocations. Note that #version, #extension, and some others are
+passed straight through. See glcpp/*
+
+2) lex and yacc-based parser takes the preprocessed string and
+generates the AST (abstract syntax tree). Almost no checking is
+performed in this stage. See glsl_lexer.ll and glsl_parser.yy.
+
+3) The AST is converted to "HIR". This is the intermediate
+representation of the compiler. Constructors are generated, function
+calls are resolved to particular function signatures, and all the
+semantic checking is performed. See ast_*.cpp for the conversion, and
+ir.h for the IR structures.
+
+4) The driver (Mesa, or main.cpp for the standalone binary) performs
+optimizations. These include copy propagation, dead code elimination,
+constant folding, and others. Generally the driver will call
+optimizations in a loop, as each may open up opportunities for other
+optimizations to do additional work. See most files called ir_*.cpp
+
+5) linking is performed. This does checking to ensure that the
+outputs of the vertex shader match the inputs of the fragment shader,
+and assigns locations to uniforms, attributes, and varyings. See
+linker.cpp.
+
+6) The driver may perform additional optimization at this point, as
+for example dead code elimination previously couldn't remove functions
+or global variable usage when we didn't know what other code would be
+linked in.
+
+7) The driver performs code generation out of the IR, taking a linked
+shader program and producing a compiled program for each stage. See
+../mesa/program/ir_to_mesa.cpp for Mesa IR code generation.
+
+FAQ:
+
+Q: What is HIR versus IR versus LIR?
+
+A: The idea behind the naming was that ast_to_hir would produce a
+high-level IR ("HIR"), with things like matrix operations, structure
+assignments, etc., present. A series of lowering passes would occur
+that do things like break matrix multiplication into a series of dot
+products/MADs, make structure assignment be a series of assignment of
+components, flatten if statements into conditional moves, and such,
+producing a low level IR ("LIR").
+
+However, it now appears that each driver will have different
+requirements from a LIR. A 915-generation chipset wants all functions
+inlined, all loops unrolled, all ifs flattened, no variable array
+accesses, and matrix multiplication broken down. The Mesa IR backend
+for swrast would like matrices and structure assignment broken down,
+but it can support function calls and dynamic branching. A 965 vertex
+shader IR backend could potentially even handle some matrix operations
+without breaking them down, but the 965 fragment shader IR backend
+would want to break to have (almost) all operations down channel-wise
+and perform optimization on that. As a result, there's no single
+low-level IR that will make everyone happy. So that usage has fallen
+out of favor, and each driver will perform a series of lowering passes
+to take the HIR down to whatever restrictions it wants to impose
+before doing codegen.
+
+Q: How is the IR structured?
+
+A: The best way to get started seeing it would be to run the
+standalone compiler against a shader:
+
+./glsl_compiler --dump-lir \
+ ~/src/piglit/tests/shaders/glsl-orangebook-ch06-bump.frag
+
+So for example one of the ir_instructions in main() contains:
+
+(assign (constant bool (1)) (var_ref litColor) (expression vec3 * (var_ref Surf
+aceColor) (var_ref __retval) ) )
+
+Or more visually:
+ (assign)
+ / | \
+ (var_ref) (expression *) (constant bool 1)
+ / / \
+(litColor) (var_ref) (var_ref)
+ / \
+ (SurfaceColor) (__retval)
+
+which came from:
+
+litColor = SurfaceColor * max(dot(normDelta, LightDir), 0.0);
+
+(the max call is not represented in this expression tree, as it was a
+function call that got inlined but not brought into this expression
+tree)
+
+Each of those nodes is a subclass of ir_instruction. A particular
+ir_instruction instance may only appear once in the whole IR tree with
+the exception of ir_variables, which appear once as variable
+declarations:
+
+(declare () vec3 normDelta)
+
+and multiple times as the targets of variable dereferences:
+...
+(assign (constant bool (1)) (var_ref __retval) (expression float dot
+ (var_ref normDelta) (var_ref LightDir) ) )
+...
+(assign (constant bool (1)) (var_ref __retval) (expression vec3 -
+ (var_ref LightDir) (expression vec3 * (constant float (2.000000))
+ (expression vec3 * (expression float dot (var_ref normDelta) (var_ref
+ LightDir) ) (var_ref normDelta) ) ) ) )
+...
+
+Each node has a type. Expressions may involve several different types:
+(declare (uniform ) mat4 gl_ModelViewMatrix)
+((assign (constant bool (1)) (var_ref constructor_tmp) (expression
+ vec4 * (var_ref gl_ModelViewMatrix) (var_ref gl_Vertex) ) )
+
+An expression tree can be arbitrarily deep, and the compiler tries to
+keep them structured like that so that things like algebraic
+optimizations ((color * 1.0 == color) and ((mat1 * mat2) * vec == mat1
+* (mat2 * vec))) or recognizing operation patterns for code generation
+(vec1 * vec2 + vec3 == mad(vec1, vec2, vec3)) are easier. This comes
+at the expense of additional trickery in implementing some
+optimizations like CSE where one must navigate an expression tree.
+
+Q: Why no SSA representation?
+
+A: Converting an IR tree to SSA form makes dead code elimination,
+common subexpression elimination, and many other optimizations much
+easier. However, in our primarily vector-based language, there's some
+major questions as to how it would work. Do we do SSA on the scalar
+or vector level? If we do it at the vector level, we're going to end
+up with many different versions of the variable when encountering code
+like:
+
+(assign (constant bool (1)) (swiz x (var_ref __retval) ) (var_ref a) )
+(assign (constant bool (1)) (swiz y (var_ref __retval) ) (var_ref b) )
+(assign (constant bool (1)) (swiz z (var_ref __retval) ) (var_ref c) )
+
+If every masked update of a component relies on the previous value of
+the variable, then we're probably going to be quite limited in our
+dead code elimination wins, and recognizing common expressions may
+just not happen. On the other hand, if we operate channel-wise, then
+we'll be prone to optimizing the operation on one of the channels at
+the expense of making its instruction flow different from the other
+channels, and a vector-based GPU would end up with worse code than if
+we didn't optimize operations on that channel!
+
+Once again, it appears that our optimization requirements are driven
+significantly by the target architecture. For now, targeting the Mesa
+IR backend, SSA does not appear to be that important to producing
+excellent code, but we do expect to do some SSA-based optimizations
+for the 965 fragment shader backend when that is developed.
+
+Q: How should I expand instructions that take multiple backend instructions?
+
+Sometimes you'll have to do the expansion in your code generation --
+see, for example, ir_to_mesa.cpp's handling of ir_unop_sqrt. However,
+in many cases you'll want to do a pass over the IR to convert
+non-native instructions to a series of native instructions. For
+example, for the Mesa backend we have ir_div_to_mul_rcp.cpp because
+Mesa IR (and many hardware backends) only have a reciprocal
+instruction, not a divide. Implementing non-native instructions this
+way gives the chance for constant folding to occur, so (a / 2.0)
+becomes (a * 0.5) after codegen instead of (a * (1.0 / 2.0))
+
+Q: How shoud I handle my special hardware instructions with respect to IR?
+
+Our current theory is that if multiple targets have an instruction for
+some operation, then we should probably be able to represent that in
+the IR. Generally this is in the form of an ir_{bin,un}op expression
+type. For example, we initially implemented fract() using (a -
+floor(a)), but both 945 and 965 have instructions to give that result,
+and it would also simplify the implementation of mod(), so
+ir_unop_fract was added. The following areas need updating to add a
+new expression type:
+
+ir.h (new enum)
+ir.cpp:operator_strs (used for ir_reader)
+ir_constant_expression.cpp (you probably want to be able to constant fold)
+ir_validate.cpp (check users have the right types)
+
+You may also need to update the backends if they will see the new expr type:
+
+../mesa/program/ir_to_mesa.cpp
+
+You can then use the new expression from builtins (if all backends
+would rather see it), or scan the IR and convert to use your new
+expression type (see ir_mod_to_floor, for example).
+
+Q: How is memory management handled in the compiler?
+
+The hierarchical memory allocator "talloc" developed for the Samba
+project is used, so that things like optimization passes don't have to
+worry about their garbage collection so much. It has a few nice
+features, including low performance overhead and good debugging
+support that's trivially available.
+
+Generally, each stage of the compile creates a talloc context and
+allocates its memory out of that or children of it. At the end of the
+stage, the pieces still live are stolen to a new context and the old
+one freed, or the whole context is kept for use by the next stage.
+
+For IR transformations, a temporary context is used, then at the end
+of all transformations, reparent_ir reparents all live nodes under the
+shader's IR list, and the old context full of dead nodes is freed.
+When developing a single IR transformation pass, this means that you
+want to allocate instruction nodes out of the temporary context, so if
+it becomes dead it doesn't live on as the child of a live node. At
+the moment, optimization passes aren't passed that temporary context,
+so they find it by calling talloc_parent() on a nearby IR node. The
+talloc_parent() call is expensive, so many passes will cache the
+result of the first talloc_parent(). Cleaning up all the optimization
+passes to take a context argument and not call talloc_parent() is left
+as an exercise.
+
+Q: What is the file naming convention in this directory?
+
+Initially, there really wasn't one. We have since adopted one:
+
+ - Files that implement code lowering passes should be named lower_*
+ (e.g., lower_builtins.cpp).
+ - Files that implement optimization passes should be named opt_*.
+ - Files that implement a class that is used throught the code should
+ take the name of that class (e.g., ir_hierarchical_visitor.cpp).
+ - Files that contain code not fitting in one of the previous
+ categories should have a sensible name (e.g., glsl_parser.yy).
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/TODO b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/TODO
new file mode 100644
index 0000000000..bd077a8567
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/TODO
@@ -0,0 +1,12 @@
+- Detect code paths in non-void functions that don't reach a return statement
+
+- Improve handling of constants and their initializers. Constant initializers
+ should never generate any code. This is trival for scalar constants. It is
+ also trivial for arrays, matrices, and vectors that are accessed with
+ constant index values. For others it is more complicated. Perhaps these
+ cases should be silently converted to uniforms?
+
+- Track source locations throughout the IR. There are currently several
+ places where we cannot emit line numbers for errors (and currently emit 0:0)
+ because we've "lost" the line number information. This is particularly
+ noticeable at link time.
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/ast.h b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/ast.h
new file mode 100644
index 0000000000..3a960c2ff3
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/ast.h
@@ -0,0 +1,1401 @@
+/* -*- c++ -*- */
+/*
+ * Copyright © 2009 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef AST_H
+#define AST_H
+
+#include "list.h"
+#include "glsl_parser_extras.h"
+#include "compiler/glsl_types.h"
+#include "util/bitset.h"
+
+struct _mesa_glsl_parse_state;
+
+struct YYLTYPE;
+
+/**
+ * \defgroup AST Abstract syntax tree node definitions
+ *
+ * An abstract syntax tree is generated by the parser. This is a fairly
+ * direct representation of the gramma derivation for the source program.
+ * No symantic checking is done during the generation of the AST. Only
+ * syntactic checking is done. Symantic checking is performed by a later
+ * stage that converts the AST to a more generic intermediate representation.
+ *
+ *@{
+ */
+/**
+ * Base class of all abstract syntax tree nodes
+ */
+class ast_node {
+public:
+ DECLARE_LINEAR_ZALLOC_CXX_OPERATORS(ast_node);
+
+ /**
+ * Print an AST node in something approximating the original GLSL code
+ */
+ virtual void print(void) const;
+
+ /**
+ * Convert the AST node to the high-level intermediate representation
+ */
+ virtual ir_rvalue *hir(exec_list *instructions,
+ struct _mesa_glsl_parse_state *state);
+
+ virtual bool has_sequence_subexpression() const;
+
+ /**
+ * Retrieve the source location of an AST node
+ *
+ * This function is primarily used to get the source position of an AST node
+ * into a form that can be passed to \c _mesa_glsl_error.
+ *
+ * \sa _mesa_glsl_error, ast_node::set_location
+ */
+ struct YYLTYPE get_location(void) const
+ {
+ struct YYLTYPE locp;
+
+ locp.path = this->location.path;
+ locp.source = this->location.source;
+ locp.first_line = this->location.first_line;
+ locp.first_column = this->location.first_column;
+ locp.last_line = this->location.last_line;
+ locp.last_column = this->location.last_column;
+
+ return locp;
+ }
+
+ /**
+ * Set the source location of an AST node from a parser location
+ *
+ * \sa ast_node::get_location
+ */
+ void set_location(const struct YYLTYPE &locp)
+ {
+ this->location.path = locp.path;
+ this->location.source = locp.source;
+ this->location.first_line = locp.first_line;
+ this->location.first_column = locp.first_column;
+ this->location.last_line = locp.last_line;
+ this->location.last_column = locp.last_column;
+ }
+
+ /**
+ * Set the source location range of an AST node using two location nodes
+ *
+ * \sa ast_node::set_location
+ */
+ void set_location_range(const struct YYLTYPE &begin, const struct YYLTYPE &end)
+ {
+ this->location.path = begin.path;
+ this->location.source = begin.source;
+ this->location.first_line = begin.first_line;
+ this->location.last_line = end.last_line;
+ this->location.first_column = begin.first_column;
+ this->location.last_column = end.last_column;
+ }
+
+ /**
+ * Source location of the AST node.
+ */
+ struct {
+ char *path; /**< GLSL shader include path. */
+ unsigned source; /**< GLSL source number. */
+ unsigned first_line; /**< First line number within the source string. */
+ unsigned first_column; /**< First column in the first line. */
+ unsigned last_line; /**< Last line number within the source string. */
+ unsigned last_column; /**< Last column in the last line. */
+ } location;
+
+ exec_node link;
+
+ virtual void set_is_lhs(bool);
+
+protected:
+ /**
+ * The only constructor is protected so that only derived class objects can
+ * be created.
+ */
+ ast_node(void);
+};
+
+
+/**
+ * Operators for AST expression nodes.
+ */
+enum ast_operators {
+ ast_assign,
+ ast_plus, /**< Unary + operator. */
+ ast_neg,
+ ast_add,
+ ast_sub,
+ ast_mul,
+ ast_div,
+ ast_mod,
+ ast_lshift,
+ ast_rshift,
+ ast_less,
+ ast_greater,
+ ast_lequal,
+ ast_gequal,
+ ast_equal,
+ ast_nequal,
+ ast_bit_and,
+ ast_bit_xor,
+ ast_bit_or,
+ ast_bit_not,
+ ast_logic_and,
+ ast_logic_xor,
+ ast_logic_or,
+ ast_logic_not,
+
+ ast_mul_assign,
+ ast_div_assign,
+ ast_mod_assign,
+ ast_add_assign,
+ ast_sub_assign,
+ ast_ls_assign,
+ ast_rs_assign,
+ ast_and_assign,
+ ast_xor_assign,
+ ast_or_assign,
+
+ ast_conditional,
+
+ ast_pre_inc,
+ ast_pre_dec,
+ ast_post_inc,
+ ast_post_dec,
+ ast_field_selection,
+ ast_array_index,
+ ast_unsized_array_dim,
+
+ ast_function_call,
+
+ ast_identifier,
+ ast_int_constant,
+ ast_uint_constant,
+ ast_float_constant,
+ ast_bool_constant,
+ ast_double_constant,
+ ast_int64_constant,
+ ast_uint64_constant,
+
+ ast_sequence,
+ ast_aggregate
+
+ /**
+ * Number of possible operators for an ast_expression
+ *
+ * This is done as a define instead of as an additional value in the enum so
+ * that the compiler won't generate spurious messages like "warning:
+ * enumeration value ‘ast_num_operators’ not handled in switch"
+ */
+ #define AST_NUM_OPERATORS (ast_aggregate + 1)
+};
+
+/**
+ * Representation of any sort of expression.
+ */
+class ast_expression : public ast_node {
+public:
+ ast_expression(int oper, ast_expression *,
+ ast_expression *, ast_expression *);
+
+ ast_expression(const char *identifier) :
+ oper(ast_identifier)
+ {
+ subexpressions[0] = NULL;
+ subexpressions[1] = NULL;
+ subexpressions[2] = NULL;
+ primary_expression.identifier = identifier;
+ this->non_lvalue_description = NULL;
+ this->is_lhs = false;
+ }
+
+ static const char *operator_string(enum ast_operators op);
+
+ virtual ir_rvalue *hir(exec_list *instructions,
+ struct _mesa_glsl_parse_state *state);
+
+ virtual void hir_no_rvalue(exec_list *instructions,
+ struct _mesa_glsl_parse_state *state);
+
+ virtual bool has_sequence_subexpression() const;
+
+ ir_rvalue *do_hir(exec_list *instructions,
+ struct _mesa_glsl_parse_state *state,
+ bool needs_rvalue);
+
+ virtual void print(void) const;
+
+ enum ast_operators oper;
+
+ ast_expression *subexpressions[3];
+
+ union {
+ const char *identifier;
+ int int_constant;
+ float float_constant;
+ unsigned uint_constant;
+ int bool_constant;
+ double double_constant;
+ uint64_t uint64_constant;
+ int64_t int64_constant;
+ } primary_expression;
+
+
+ /**
+ * List of expressions for an \c ast_sequence or parameters for an
+ * \c ast_function_call
+ */
+ exec_list expressions;
+
+ /**
+ * For things that can't be l-values, this describes what it is.
+ *
+ * This text is used by the code that generates IR for assignments to
+ * detect and emit useful messages for assignments to some things that
+ * can't be l-values. For example, pre- or post-incerement expressions.
+ *
+ * \note
+ * This pointer may be \c NULL.
+ */
+ const char *non_lvalue_description;
+
+ void set_is_lhs(bool new_value);
+
+private:
+ bool is_lhs;
+};
+
+class ast_expression_bin : public ast_expression {
+public:
+ ast_expression_bin(int oper, ast_expression *, ast_expression *);
+
+ virtual void print(void) const;
+};
+
+/**
+ * Subclass of expressions for function calls
+ */
+class ast_function_expression : public ast_expression {
+public:
+ ast_function_expression(ast_expression *callee)
+ : ast_expression(ast_function_call, callee,
+ NULL, NULL),
+ cons(false)
+ {
+ /* empty */
+ }
+
+ ast_function_expression(class ast_type_specifier *type)
+ : ast_expression(ast_function_call, (ast_expression *) type,
+ NULL, NULL),
+ cons(true)
+ {
+ /* empty */
+ }
+
+ bool is_constructor() const
+ {
+ return cons;
+ }
+
+ virtual ir_rvalue *hir(exec_list *instructions,
+ struct _mesa_glsl_parse_state *state);
+
+ virtual void hir_no_rvalue(exec_list *instructions,
+ struct _mesa_glsl_parse_state *state);
+
+ virtual bool has_sequence_subexpression() const;
+
+private:
+ /**
+ * Is this function call actually a constructor?
+ */
+ bool cons;
+ ir_rvalue *
+ handle_method(exec_list *instructions,
+ struct _mesa_glsl_parse_state *state);
+};
+
+class ast_subroutine_list : public ast_node
+{
+public:
+ virtual void print(void) const;
+ exec_list declarations;
+};
+
+class ast_array_specifier : public ast_node {
+public:
+ ast_array_specifier(const struct YYLTYPE &locp, ast_expression *dim)
+ {
+ set_location(locp);
+ array_dimensions.push_tail(&dim->link);
+ }
+
+ void add_dimension(ast_expression *dim)
+ {
+ array_dimensions.push_tail(&dim->link);
+ }
+
+ bool is_single_dimension() const
+ {
+ return this->array_dimensions.get_tail_raw()->prev != NULL &&
+ this->array_dimensions.get_tail_raw()->prev->is_head_sentinel();
+ }
+
+ virtual void print(void) const;
+
+ /* This list contains objects of type ast_node containing the
+ * array dimensions in outermost-to-innermost order.
+ */
+ exec_list array_dimensions;
+};
+
+class ast_layout_expression : public ast_node {
+public:
+ ast_layout_expression(const struct YYLTYPE &locp, ast_expression *expr)
+ {
+ set_location(locp);
+ layout_const_expressions.push_tail(&expr->link);
+ }
+
+ bool process_qualifier_constant(struct _mesa_glsl_parse_state *state,
+ const char *qual_indentifier,
+ unsigned *value, bool can_be_zero);
+
+ void merge_qualifier(ast_layout_expression *l_expr)
+ {
+ layout_const_expressions.append_list(&l_expr->layout_const_expressions);
+ }
+
+ exec_list layout_const_expressions;
+};
+
+/**
+ * C-style aggregate initialization class
+ *
+ * Represents C-style initializers of vectors, matrices, arrays, and
+ * structures. E.g., vec3 pos = {1.0, 0.0, -1.0} is equivalent to
+ * vec3 pos = vec3(1.0, 0.0, -1.0).
+ *
+ * Specified in GLSL 4.20 and GL_ARB_shading_language_420pack.
+ *
+ * \sa _mesa_ast_set_aggregate_type
+ */
+class ast_aggregate_initializer : public ast_expression {
+public:
+ ast_aggregate_initializer()
+ : ast_expression(ast_aggregate, NULL, NULL, NULL),
+ constructor_type(NULL)
+ {
+ /* empty */
+ }
+
+ /**
+ * glsl_type of the aggregate, which is inferred from the LHS of whatever
+ * the aggregate is being used to initialize. This can't be inferred at
+ * parse time (since the parser deals with ast_type_specifiers, not
+ * glsl_types), so the parser leaves it NULL. However, the ast-to-hir
+ * conversion code makes sure to fill it in with the appropriate type
+ * before hir() is called.
+ */
+ const glsl_type *constructor_type;
+
+ virtual ir_rvalue *hir(exec_list *instructions,
+ struct _mesa_glsl_parse_state *state);
+
+ virtual void hir_no_rvalue(exec_list *instructions,
+ struct _mesa_glsl_parse_state *state);
+};
+
+
+class ast_compound_statement : public ast_node {
+public:
+ ast_compound_statement(int new_scope, ast_node *statements);
+ virtual void print(void) const;
+
+ virtual ir_rvalue *hir(exec_list *instructions,
+ struct _mesa_glsl_parse_state *state);
+
+ int new_scope;
+ exec_list statements;
+};
+
+class ast_declaration : public ast_node {
+public:
+ ast_declaration(const char *identifier,
+ ast_array_specifier *array_specifier,
+ ast_expression *initializer);
+ virtual void print(void) const;
+
+ const char *identifier;
+
+ ast_array_specifier *array_specifier;
+
+ ast_expression *initializer;
+};
+
+
+enum {
+ ast_precision_none = 0, /**< Absence of precision qualifier. */
+ ast_precision_high,
+ ast_precision_medium,
+ ast_precision_low
+};
+
+enum {
+ ast_depth_none = 0, /**< Absence of depth qualifier. */
+ ast_depth_any,
+ ast_depth_greater,
+ ast_depth_less,
+ ast_depth_unchanged
+};
+
+struct ast_type_qualifier {
+ DECLARE_RALLOC_CXX_OPERATORS(ast_type_qualifier);
+ /* Note: this bitset needs to have at least as many bits as the 'q'
+ * struct has flags, below. Previously, the size was 128 instead of 96.
+ * But an apparent bug in GCC 5.4.0 causes bad SSE code generation
+ * elsewhere, leading to a crash. 96 bits works around the issue.
+ * See https://bugs.freedesktop.org/show_bug.cgi?id=105497
+ */
+ DECLARE_BITSET_T(bitset_t, 96);
+
+ union flags {
+ struct {
+ unsigned invariant:1;
+ unsigned precise:1;
+ unsigned constant:1;
+ unsigned attribute:1;
+ unsigned varying:1;
+ unsigned in:1;
+ unsigned out:1;
+ unsigned centroid:1;
+ unsigned sample:1;
+ unsigned patch:1;
+ unsigned uniform:1;
+ unsigned buffer:1;
+ unsigned shared_storage:1;
+ unsigned smooth:1;
+ unsigned flat:1;
+ unsigned noperspective:1;
+
+ /** \name Layout qualifiers for GL_ARB_fragment_coord_conventions */
+ /*@{*/
+ unsigned origin_upper_left:1;
+ unsigned pixel_center_integer:1;
+ /*@}*/
+
+ /**
+ * Flag set if GL_ARB_enhanced_layouts "align" layout qualifier is
+ * used.
+ */
+ unsigned explicit_align:1;
+
+ /**
+ * Flag set if GL_ARB_explicit_attrib_location "location" layout
+ * qualifier is used.
+ */
+ unsigned explicit_location:1;
+ /**
+ * Flag set if GL_ARB_explicit_attrib_location "index" layout
+ * qualifier is used.
+ */
+ unsigned explicit_index:1;
+
+ /**
+ * Flag set if GL_ARB_enhanced_layouts "component" layout
+ * qualifier is used.
+ */
+ unsigned explicit_component:1;
+
+ /**
+ * Flag set if GL_ARB_shading_language_420pack "binding" layout
+ * qualifier is used.
+ */
+ unsigned explicit_binding:1;
+
+ /**
+ * Flag set if GL_ARB_shader_atomic counter "offset" layout
+ * qualifier is used.
+ */
+ unsigned explicit_offset:1;
+
+ /** \name Layout qualifiers for GL_AMD_conservative_depth */
+ /** \{ */
+ unsigned depth_type:1;
+ /** \} */
+
+ /** \name Layout qualifiers for GL_ARB_uniform_buffer_object */
+ /** \{ */
+ unsigned std140:1;
+ unsigned std430:1;
+ unsigned shared:1;
+ unsigned packed:1;
+ unsigned column_major:1;
+ unsigned row_major:1;
+ /** \} */
+
+ /** \name Layout qualifiers for GLSL 1.50 geometry shaders */
+ /** \{ */
+ unsigned prim_type:1;
+ unsigned max_vertices:1;
+ /** \} */
+
+ /**
+ * local_size_{x,y,z} flags for compute shaders. Bit 0 represents
+ * local_size_x, and so on.
+ */
+ unsigned local_size:3;
+
+ /** \name Layout qualifiers for ARB_compute_variable_group_size. */
+ /** \{ */
+ unsigned local_size_variable:1;
+ /** \} */
+
+ /** \name Layout and memory qualifiers for ARB_shader_image_load_store. */
+ /** \{ */
+ unsigned early_fragment_tests:1;
+ unsigned explicit_image_format:1;
+ unsigned coherent:1;
+ unsigned _volatile:1;
+ unsigned restrict_flag:1;
+ unsigned read_only:1; /**< "readonly" qualifier. */
+ unsigned write_only:1; /**< "writeonly" qualifier. */
+ /** \} */
+
+ /** \name Layout qualifiers for GL_ARB_gpu_shader5 */
+ /** \{ */
+ unsigned invocations:1;
+ unsigned stream:1; /**< Has stream value assigned */
+ unsigned explicit_stream:1; /**< stream value assigned explicitly by shader code */
+ /** \} */
+
+ /** \name Layout qualifiers for GL_ARB_enhanced_layouts */
+ /** \{ */
+ unsigned explicit_xfb_offset:1; /**< xfb_offset value assigned explicitly by shader code */
+ unsigned xfb_buffer:1; /**< Has xfb_buffer value assigned */
+ unsigned explicit_xfb_buffer:1; /**< xfb_buffer value assigned explicitly by shader code */
+ unsigned xfb_stride:1; /**< Is xfb_stride value yet to be merged with global values */
+ unsigned explicit_xfb_stride:1; /**< xfb_stride value assigned explicitly by shader code */
+ /** \} */
+
+ /** \name Layout qualifiers for GL_ARB_tessellation_shader */
+ /** \{ */
+ /* tess eval input layout */
+ /* gs prim_type reused for primitive mode */
+ unsigned vertex_spacing:1;
+ unsigned ordering:1;
+ unsigned point_mode:1;
+ /* tess control output layout */
+ unsigned vertices:1;
+ /** \} */
+
+ /** \name Qualifiers for GL_ARB_shader_subroutine */
+ /** \{ */
+ unsigned subroutine:1; /**< Is this marked 'subroutine' */
+ /** \} */
+
+ /** \name Qualifiers for GL_KHR_blend_equation_advanced */
+ /** \{ */
+ unsigned blend_support:1; /**< Are there any blend_support_ qualifiers */
+ /** \} */
+
+ /**
+ * Flag set if GL_ARB_post_depth_coverage layout qualifier is used.
+ */
+ unsigned post_depth_coverage:1;
+
+ /**
+ * Flags for the layout qualifers added by ARB_fragment_shader_interlock
+ */
+
+ unsigned pixel_interlock_ordered:1;
+ unsigned pixel_interlock_unordered:1;
+ unsigned sample_interlock_ordered:1;
+ unsigned sample_interlock_unordered:1;
+
+ /**
+ * Flag set if GL_INTEL_conservartive_rasterization layout qualifier
+ * is used.
+ */
+ unsigned inner_coverage:1;
+
+ /** \name Layout qualifiers for GL_ARB_bindless_texture */
+ /** \{ */
+ unsigned bindless_sampler:1;
+ unsigned bindless_image:1;
+ unsigned bound_sampler:1;
+ unsigned bound_image:1;
+ /** \} */
+
+ /** \name Layout qualifiers for GL_EXT_shader_framebuffer_fetch_non_coherent */
+ /** \{ */
+ unsigned non_coherent:1;
+ /** \} */
+
+ /** \name Layout qualifiers for NV_compute_shader_derivatives */
+ /** \{ */
+ unsigned derivative_group:1;
+ /** \} */
+
+ /**
+ * Flag set if GL_NV_viewport_array2 viewport_relative layout
+ * qualifier is used.
+ */
+ unsigned viewport_relative:1;
+ }
+ /** \brief Set of flags, accessed by name. */
+ q;
+
+ /** \brief Set of flags, accessed as a bitmask. */
+ bitset_t i;
+ } flags;
+
+ /** Precision of the type (highp/medium/lowp). */
+ unsigned precision:2;
+
+ /** Type of layout qualifiers for GL_AMD_conservative_depth. */
+ unsigned depth_type:3;
+
+ /**
+ * Alignment specified via GL_ARB_enhanced_layouts "align" layout qualifier
+ */
+ ast_expression *align;
+
+ /** Geometry shader invocations for GL_ARB_gpu_shader5. */
+ ast_layout_expression *invocations;
+
+ /**
+ * Location specified via GL_ARB_explicit_attrib_location layout
+ *
+ * \note
+ * This field is only valid if \c explicit_location is set.
+ */
+ ast_expression *location;
+ /**
+ * Index specified via GL_ARB_explicit_attrib_location layout
+ *
+ * \note
+ * This field is only valid if \c explicit_index is set.
+ */
+ ast_expression *index;
+
+ /**
+ * Component specified via GL_ARB_enhaced_layouts
+ *
+ * \note
+ * This field is only valid if \c explicit_component is set.
+ */
+ ast_expression *component;
+
+ /** Maximum output vertices in GLSL 1.50 geometry shaders. */
+ ast_layout_expression *max_vertices;
+
+ /** Stream in GLSL 1.50 geometry shaders. */
+ ast_expression *stream;
+
+ /** xfb_buffer specified via the GL_ARB_enhanced_layouts keyword. */
+ ast_expression *xfb_buffer;
+
+ /** xfb_stride specified via the GL_ARB_enhanced_layouts keyword. */
+ ast_expression *xfb_stride;
+
+ /** global xfb_stride values for each buffer */
+ ast_layout_expression *out_xfb_stride[MAX_FEEDBACK_BUFFERS];
+
+ /**
+ * Input or output primitive type in GLSL 1.50 geometry shaders
+ * and tessellation shaders.
+ */
+ GLenum prim_type;
+
+ /**
+ * Binding specified via GL_ARB_shading_language_420pack's "binding" keyword.
+ *
+ * \note
+ * This field is only valid if \c explicit_binding is set.
+ */
+ ast_expression *binding;
+
+ /**
+ * Offset specified via GL_ARB_shader_atomic_counter's or
+ * GL_ARB_enhanced_layouts "offset" keyword, or by GL_ARB_enhanced_layouts
+ * "xfb_offset" keyword.
+ *
+ * \note
+ * This field is only valid if \c explicit_offset is set.
+ */
+ ast_expression *offset;
+
+ /**
+ * Local size specified via GL_ARB_compute_shader's "local_size_{x,y,z}"
+ * layout qualifier. Element i of this array is only valid if
+ * flags.q.local_size & (1 << i) is set.
+ */
+ ast_layout_expression *local_size[3];
+
+ /** Tessellation evaluation shader: vertex spacing (equal, fractional even/odd) */
+ enum gl_tess_spacing vertex_spacing;
+
+ /** Tessellation evaluation shader: vertex ordering (CW or CCW) */
+ GLenum ordering;
+
+ /** Tessellation evaluation shader: point mode */
+ bool point_mode;
+
+ /** Tessellation control shader: number of output vertices */
+ ast_layout_expression *vertices;
+
+ /**
+ * Image format specified with an ARB_shader_image_load_store
+ * layout qualifier.
+ *
+ * \note
+ * This field is only valid if \c explicit_image_format is set.
+ */
+ enum pipe_format image_format;
+
+ /**
+ * Arrangement of invocations used to calculate derivatives in a compute
+ * shader. From NV_compute_shader_derivatives.
+ */
+ enum gl_derivative_group derivative_group;
+
+ /**
+ * Base type of the data read from or written to this image. Only
+ * the following enumerants are allowed: GLSL_TYPE_UINT,
+ * GLSL_TYPE_INT, GLSL_TYPE_FLOAT.
+ *
+ * \note
+ * This field is only valid if \c explicit_image_format is set.
+ */
+ glsl_base_type image_base_type;
+
+ /**
+ * Return true if and only if an interpolation qualifier is present.
+ */
+ bool has_interpolation() const;
+
+ /**
+ * Return whether a layout qualifier is present.
+ */
+ bool has_layout() const;
+
+ /**
+ * Return whether a storage qualifier is present.
+ */
+ bool has_storage() const;
+
+ /**
+ * Return whether an auxiliary storage qualifier is present.
+ */
+ bool has_auxiliary_storage() const;
+
+ /**
+ * Return true if and only if a memory qualifier is present.
+ */
+ bool has_memory() const;
+
+ /**
+ * Return true if the qualifier is a subroutine declaration.
+ */
+ bool is_subroutine_decl() const;
+
+ bool merge_qualifier(YYLTYPE *loc,
+ _mesa_glsl_parse_state *state,
+ const ast_type_qualifier &q,
+ bool is_single_layout_merge,
+ bool is_multiple_layouts_merge = false);
+
+ /**
+ * Validate current qualifier against the global out one.
+ */
+ bool validate_out_qualifier(YYLTYPE *loc,
+ _mesa_glsl_parse_state *state);
+
+ /**
+ * Merge current qualifier into the global out one.
+ */
+ bool merge_into_out_qualifier(YYLTYPE *loc,
+ _mesa_glsl_parse_state *state,
+ ast_node* &node);
+
+ /**
+ * Validate current qualifier against the global in one.
+ */
+ bool validate_in_qualifier(YYLTYPE *loc,
+ _mesa_glsl_parse_state *state);
+
+ /**
+ * Merge current qualifier into the global in one.
+ */
+ bool merge_into_in_qualifier(YYLTYPE *loc,
+ _mesa_glsl_parse_state *state,
+ ast_node* &node);
+
+ /**
+ * Push pending layout qualifiers to the global values.
+ */
+ bool push_to_global(YYLTYPE *loc,
+ _mesa_glsl_parse_state *state);
+
+ bool validate_flags(YYLTYPE *loc,
+ _mesa_glsl_parse_state *state,
+ const ast_type_qualifier &allowed_flags,
+ const char *message, const char *name);
+
+ ast_subroutine_list *subroutine_list;
+};
+
+class ast_declarator_list;
+
+class ast_struct_specifier : public ast_node {
+public:
+ ast_struct_specifier(const char *identifier,
+ ast_declarator_list *declarator_list);
+ virtual void print(void) const;
+
+ virtual ir_rvalue *hir(exec_list *instructions,
+ struct _mesa_glsl_parse_state *state);
+
+ const char *name;
+ ast_type_qualifier *layout;
+ /* List of ast_declarator_list * */
+ exec_list declarations;
+ bool is_declaration;
+ const glsl_type *type;
+};
+
+
+
+class ast_type_specifier : public ast_node {
+public:
+ /** Construct a type specifier from a type name */
+ ast_type_specifier(const char *name)
+ : type(NULL), type_name(name), structure(NULL), array_specifier(NULL),
+ default_precision(ast_precision_none)
+ {
+ /* empty */
+ }
+
+ /** Construct a type specifier from a structure definition */
+ ast_type_specifier(ast_struct_specifier *s)
+ : type(NULL), type_name(s->name), structure(s), array_specifier(NULL),
+ default_precision(ast_precision_none)
+ {
+ /* empty */
+ }
+
+ ast_type_specifier(const glsl_type *t)
+ : type(t), type_name(t->name), structure(NULL), array_specifier(NULL),
+ default_precision(ast_precision_none)
+ {
+ /* empty */
+ }
+
+ const struct glsl_type *glsl_type(const char **name,
+ struct _mesa_glsl_parse_state *state)
+ const;
+
+ virtual void print(void) const;
+
+ ir_rvalue *hir(exec_list *, struct _mesa_glsl_parse_state *);
+
+ const struct glsl_type *type;
+ const char *type_name;
+ ast_struct_specifier *structure;
+
+ ast_array_specifier *array_specifier;
+
+ /** For precision statements, this is the given precision; otherwise none. */
+ unsigned default_precision:2;
+};
+
+
+class ast_fully_specified_type : public ast_node {
+public:
+ virtual void print(void) const;
+ bool has_qualifiers(_mesa_glsl_parse_state *state) const;
+
+ ast_fully_specified_type() : qualifier(), specifier(NULL)
+ {
+ }
+
+ const struct glsl_type *glsl_type(const char **name,
+ struct _mesa_glsl_parse_state *state)
+ const;
+
+ ast_type_qualifier qualifier;
+ ast_type_specifier *specifier;
+};
+
+
+class ast_declarator_list : public ast_node {
+public:
+ ast_declarator_list(ast_fully_specified_type *);
+ virtual void print(void) const;
+
+ virtual ir_rvalue *hir(exec_list *instructions,
+ struct _mesa_glsl_parse_state *state);
+
+ ast_fully_specified_type *type;
+ /** List of 'ast_declaration *' */
+ exec_list declarations;
+
+ /**
+ * Flags for redeclarations. In these cases, no type is specified, to
+ * `type` is allowed to be NULL. In all other cases, this would be an error.
+ */
+ int invariant; /** < `invariant` redeclaration */
+ int precise; /** < `precise` redeclaration */
+};
+
+
+class ast_parameter_declarator : public ast_node {
+public:
+ ast_parameter_declarator() :
+ type(NULL),
+ identifier(NULL),
+ array_specifier(NULL),
+ formal_parameter(false),
+ is_void(false)
+ {
+ /* empty */
+ }
+
+ virtual void print(void) const;
+
+ virtual ir_rvalue *hir(exec_list *instructions,
+ struct _mesa_glsl_parse_state *state);
+
+ ast_fully_specified_type *type;
+ const char *identifier;
+ ast_array_specifier *array_specifier;
+
+ static void parameters_to_hir(exec_list *ast_parameters,
+ bool formal, exec_list *ir_parameters,
+ struct _mesa_glsl_parse_state *state);
+
+private:
+ /** Is this parameter declaration part of a formal parameter list? */
+ bool formal_parameter;
+
+ /**
+ * Is this parameter 'void' type?
+ *
+ * This field is set by \c ::hir.
+ */
+ bool is_void;
+};
+
+
+class ast_function : public ast_node {
+public:
+ ast_function(void);
+
+ virtual void print(void) const;
+
+ virtual ir_rvalue *hir(exec_list *instructions,
+ struct _mesa_glsl_parse_state *state);
+
+ ast_fully_specified_type *return_type;
+ const char *identifier;
+
+ exec_list parameters;
+
+private:
+ /**
+ * Is this prototype part of the function definition?
+ *
+ * Used by ast_function_definition::hir to process the parameters, etc.
+ * of the function.
+ *
+ * \sa ::hir
+ */
+ bool is_definition;
+
+ /**
+ * Function signature corresponding to this function prototype instance
+ *
+ * Used by ast_function_definition::hir to process the parameters, etc.
+ * of the function.
+ *
+ * \sa ::hir
+ */
+ class ir_function_signature *signature;
+
+ friend class ast_function_definition;
+};
+
+
+class ast_expression_statement : public ast_node {
+public:
+ ast_expression_statement(ast_expression *);
+ virtual void print(void) const;
+
+ virtual ir_rvalue *hir(exec_list *instructions,
+ struct _mesa_glsl_parse_state *state);
+
+ ast_expression *expression;
+};
+
+
+class ast_case_label : public ast_node {
+public:
+ ast_case_label(ast_expression *test_value);
+ virtual void print(void) const;
+
+ virtual ir_rvalue *hir(exec_list *instructions,
+ struct _mesa_glsl_parse_state *state);
+
+ /**
+ * An test value of NULL means 'default'.
+ */
+ ast_expression *test_value;
+};
+
+
+class ast_case_label_list : public ast_node {
+public:
+ ast_case_label_list(void);
+ virtual void print(void) const;
+
+ virtual ir_rvalue *hir(exec_list *instructions,
+ struct _mesa_glsl_parse_state *state);
+
+ /**
+ * A list of case labels.
+ */
+ exec_list labels;
+};
+
+
+class ast_case_statement : public ast_node {
+public:
+ ast_case_statement(ast_case_label_list *labels);
+ virtual void print(void) const;
+
+ virtual ir_rvalue *hir(exec_list *instructions,
+ struct _mesa_glsl_parse_state *state);
+
+ ast_case_label_list *labels;
+
+ /**
+ * A list of statements.
+ */
+ exec_list stmts;
+};
+
+
+class ast_case_statement_list : public ast_node {
+public:
+ ast_case_statement_list(void);
+ virtual void print(void) const;
+
+ virtual ir_rvalue *hir(exec_list *instructions,
+ struct _mesa_glsl_parse_state *state);
+
+ /**
+ * A list of cases.
+ */
+ exec_list cases;
+};
+
+
+class ast_switch_body : public ast_node {
+public:
+ ast_switch_body(ast_case_statement_list *stmts);
+ virtual void print(void) const;
+
+ virtual ir_rvalue *hir(exec_list *instructions,
+ struct _mesa_glsl_parse_state *state);
+
+ ast_case_statement_list *stmts;
+};
+
+
+class ast_selection_statement : public ast_node {
+public:
+ ast_selection_statement(ast_expression *condition,
+ ast_node *then_statement,
+ ast_node *else_statement);
+ virtual void print(void) const;
+
+ virtual ir_rvalue *hir(exec_list *instructions,
+ struct _mesa_glsl_parse_state *state);
+
+ ast_expression *condition;
+ ast_node *then_statement;
+ ast_node *else_statement;
+};
+
+
+class ast_switch_statement : public ast_node {
+public:
+ ast_switch_statement(ast_expression *test_expression,
+ ast_node *body);
+ virtual void print(void) const;
+
+ virtual ir_rvalue *hir(exec_list *instructions,
+ struct _mesa_glsl_parse_state *state);
+
+ ast_expression *test_expression;
+ ast_node *body;
+
+protected:
+ void test_to_hir(exec_list *, struct _mesa_glsl_parse_state *);
+};
+
+class ast_iteration_statement : public ast_node {
+public:
+ ast_iteration_statement(int mode, ast_node *init, ast_node *condition,
+ ast_expression *rest_expression, ast_node *body);
+
+ virtual void print(void) const;
+
+ virtual ir_rvalue *hir(exec_list *, struct _mesa_glsl_parse_state *);
+
+ enum ast_iteration_modes {
+ ast_for,
+ ast_while,
+ ast_do_while
+ } mode;
+
+
+ ast_node *init_statement;
+ ast_node *condition;
+ ast_expression *rest_expression;
+
+ ast_node *body;
+
+ /**
+ * Generate IR from the condition of a loop
+ *
+ * This is factored out of ::hir because some loops have the condition
+ * test at the top (for and while), and others have it at the end (do-while).
+ */
+ void condition_to_hir(exec_list *, struct _mesa_glsl_parse_state *);
+};
+
+
+class ast_jump_statement : public ast_node {
+public:
+ ast_jump_statement(int mode, ast_expression *return_value);
+ virtual void print(void) const;
+
+ virtual ir_rvalue *hir(exec_list *instructions,
+ struct _mesa_glsl_parse_state *state);
+
+ enum ast_jump_modes {
+ ast_continue,
+ ast_break,
+ ast_return,
+ ast_discard
+ } mode;
+
+ ast_expression *opt_return_value;
+};
+
+
+class ast_demote_statement : public ast_node {
+public:
+ ast_demote_statement(void) {}
+ virtual void print(void) const;
+
+ virtual ir_rvalue *hir(exec_list *instructions,
+ struct _mesa_glsl_parse_state *state);
+};
+
+
+class ast_function_definition : public ast_node {
+public:
+ ast_function_definition() : prototype(NULL), body(NULL)
+ {
+ }
+
+ virtual void print(void) const;
+
+ virtual ir_rvalue *hir(exec_list *instructions,
+ struct _mesa_glsl_parse_state *state);
+
+ ast_function *prototype;
+ ast_compound_statement *body;
+};
+
+class ast_interface_block : public ast_node {
+public:
+ ast_interface_block(const char *instance_name,
+ ast_array_specifier *array_specifier)
+ : block_name(NULL), instance_name(instance_name),
+ array_specifier(array_specifier)
+ {
+ }
+
+ virtual ir_rvalue *hir(exec_list *instructions,
+ struct _mesa_glsl_parse_state *state);
+
+ ast_type_qualifier default_layout;
+ ast_type_qualifier layout;
+ const char *block_name;
+
+ /**
+ * Declared name of the block instance, if specified.
+ *
+ * If the block does not have an instance name, this field will be
+ * \c NULL.
+ */
+ const char *instance_name;
+
+ /** List of ast_declarator_list * */
+ exec_list declarations;
+
+ /**
+ * Declared array size of the block instance
+ *
+ * If the block is not declared as an array or if the block instance array
+ * is unsized, this field will be \c NULL.
+ */
+ ast_array_specifier *array_specifier;
+};
+
+
+/**
+ * AST node representing a declaration of the output layout for tessellation
+ * control shaders.
+ */
+class ast_tcs_output_layout : public ast_node
+{
+public:
+ ast_tcs_output_layout(const struct YYLTYPE &locp)
+ {
+ set_location(locp);
+ }
+
+ virtual ir_rvalue *hir(exec_list *instructions,
+ struct _mesa_glsl_parse_state *state);
+};
+
+
+/**
+ * AST node representing a declaration of the input layout for geometry
+ * shaders.
+ */
+class ast_gs_input_layout : public ast_node
+{
+public:
+ ast_gs_input_layout(const struct YYLTYPE &locp, GLenum prim_type)
+ : prim_type(prim_type)
+ {
+ set_location(locp);
+ }
+
+ virtual ir_rvalue *hir(exec_list *instructions,
+ struct _mesa_glsl_parse_state *state);
+
+private:
+ const GLenum prim_type;
+};
+
+
+/**
+ * AST node representing a decalaration of the input layout for compute
+ * shaders.
+ */
+class ast_cs_input_layout : public ast_node
+{
+public:
+ ast_cs_input_layout(const struct YYLTYPE &locp,
+ ast_layout_expression *const *local_size)
+ {
+ for (int i = 0; i < 3; i++) {
+ this->local_size[i] = local_size[i];
+ }
+ set_location(locp);
+ }
+
+ virtual ir_rvalue *hir(exec_list *instructions,
+ struct _mesa_glsl_parse_state *state);
+
+private:
+ ast_layout_expression *local_size[3];
+};
+
+class ast_warnings_toggle : public ast_node {
+public:
+ ast_warnings_toggle(bool _enable)
+ : enable(_enable)
+ {
+ /* empty */
+ }
+
+ virtual ir_rvalue *hir(exec_list *instructions,
+ struct _mesa_glsl_parse_state *state);
+
+private:
+ bool enable;
+};
+/*@}*/
+
+extern void
+_mesa_ast_to_hir(exec_list *instructions, struct _mesa_glsl_parse_state *state);
+
+extern ir_rvalue *
+_mesa_ast_field_selection_to_hir(const ast_expression *expr,
+ exec_list *instructions,
+ struct _mesa_glsl_parse_state *state);
+
+extern ir_rvalue *
+_mesa_ast_array_index_to_hir(void *mem_ctx,
+ struct _mesa_glsl_parse_state *state,
+ ir_rvalue *array, ir_rvalue *idx,
+ YYLTYPE &loc, YYLTYPE &idx_loc);
+
+extern void
+_mesa_ast_set_aggregate_type(const glsl_type *type,
+ ast_expression *expr);
+
+void
+emit_function(_mesa_glsl_parse_state *state, ir_function *f);
+
+extern void
+check_builtin_array_max_size(const char *name, unsigned size,
+ YYLTYPE loc, struct _mesa_glsl_parse_state *state);
+
+extern void _mesa_ast_process_interface_block(YYLTYPE *locp,
+ _mesa_glsl_parse_state *state,
+ ast_interface_block *const block,
+ const struct ast_type_qualifier &q);
+
+extern bool
+process_qualifier_constant(struct _mesa_glsl_parse_state *state,
+ YYLTYPE *loc,
+ const char *qual_indentifier,
+ ast_expression *const_expression,
+ unsigned *value);
+#endif /* AST_H */
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/ast_array_index.cpp b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/ast_array_index.cpp
new file mode 100644
index 0000000000..ea305b5470
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/ast_array_index.cpp
@@ -0,0 +1,364 @@
+/*
+ * Copyright © 2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include "ast.h"
+#include "compiler/glsl_types.h"
+#include "ir.h"
+
+void
+ast_array_specifier::print(void) const
+{
+ foreach_list_typed (ast_node, array_dimension, link, &this->array_dimensions) {
+ printf("[ ");
+ if (((ast_expression*)array_dimension)->oper != ast_unsized_array_dim)
+ array_dimension->print();
+ printf("] ");
+ }
+}
+
+/**
+ * If \c ir is a reference to an array for which we are tracking the max array
+ * element accessed, track that the given element has been accessed.
+ * Otherwise do nothing.
+ *
+ * This function also checks whether the array is a built-in array whose
+ * maximum size is too small to accommodate the given index, and if so uses
+ * loc and state to report the error.
+ */
+static void
+update_max_array_access(ir_rvalue *ir, int idx, YYLTYPE *loc,
+ struct _mesa_glsl_parse_state *state)
+{
+ if (ir_dereference_variable *deref_var = ir->as_dereference_variable()) {
+ ir_variable *var = deref_var->var;
+ if (idx > (int)var->data.max_array_access) {
+ var->data.max_array_access = idx;
+
+ /* Check whether this access will, as a side effect, implicitly cause
+ * the size of a built-in array to be too large.
+ */
+ check_builtin_array_max_size(var->name, idx+1, *loc, state);
+ }
+ } else if (ir_dereference_record *deref_record =
+ ir->as_dereference_record()) {
+ /* There are three possibilities we need to consider:
+ *
+ * - Accessing an element of an array that is a member of a named
+ * interface block (e.g. ifc.foo[i])
+ *
+ * - Accessing an element of an array that is a member of a named
+ * interface block array (e.g. ifc[j].foo[i]).
+ *
+ * - Accessing an element of an array that is a member of a named
+ * interface block array of arrays (e.g. ifc[j][k].foo[i]).
+ */
+ ir_dereference_variable *deref_var =
+ deref_record->record->as_dereference_variable();
+ if (deref_var == NULL) {
+ ir_dereference_array *deref_array =
+ deref_record->record->as_dereference_array();
+ ir_dereference_array *deref_array_prev = NULL;
+ while (deref_array != NULL) {
+ deref_array_prev = deref_array;
+ deref_array = deref_array->array->as_dereference_array();
+ }
+ if (deref_array_prev != NULL)
+ deref_var = deref_array_prev->array->as_dereference_variable();
+ }
+
+ if (deref_var != NULL) {
+ if (deref_var->var->is_interface_instance()) {
+ unsigned field_idx = deref_record->field_idx;
+ assert(field_idx < deref_var->var->get_interface_type()->length);
+
+ int *const max_ifc_array_access =
+ deref_var->var->get_max_ifc_array_access();
+
+ assert(max_ifc_array_access != NULL);
+
+ if (idx > max_ifc_array_access[field_idx]) {
+ max_ifc_array_access[field_idx] = idx;
+
+ /* Check whether this access will, as a side effect, implicitly
+ * cause the size of a built-in array to be too large.
+ */
+ const char *field_name =
+ deref_record->record->type->fields.structure[field_idx].name;
+ check_builtin_array_max_size(field_name, idx+1, *loc, state);
+ }
+ }
+ }
+ }
+}
+
+
+static int
+get_implicit_array_size(struct _mesa_glsl_parse_state *state,
+ ir_rvalue *array)
+{
+ ir_variable *var = array->variable_referenced();
+
+ /* Inputs in control shader are implicitly sized
+ * to the maximum patch size.
+ */
+ if (state->stage == MESA_SHADER_TESS_CTRL &&
+ var->data.mode == ir_var_shader_in) {
+ return state->Const.MaxPatchVertices;
+ }
+
+ /* Non-patch inputs in evaluation shader are implicitly sized
+ * to the maximum patch size.
+ */
+ if (state->stage == MESA_SHADER_TESS_EVAL &&
+ var->data.mode == ir_var_shader_in &&
+ !var->data.patch) {
+ return state->Const.MaxPatchVertices;
+ }
+
+ return 0;
+}
+
+
+ir_rvalue *
+_mesa_ast_array_index_to_hir(void *mem_ctx,
+ struct _mesa_glsl_parse_state *state,
+ ir_rvalue *array, ir_rvalue *idx,
+ YYLTYPE &loc, YYLTYPE &idx_loc)
+{
+ if (!array->type->is_error()
+ && !array->type->is_array()
+ && !array->type->is_matrix()
+ && !array->type->is_vector()) {
+ _mesa_glsl_error(& idx_loc, state,
+ "cannot dereference non-array / non-matrix / "
+ "non-vector");
+ }
+
+ if (!idx->type->is_error()) {
+ if (!idx->type->is_integer_32()) {
+ _mesa_glsl_error(& idx_loc, state, "array index must be integer type");
+ } else if (!idx->type->is_scalar()) {
+ _mesa_glsl_error(& idx_loc, state, "array index must be scalar");
+ }
+ }
+
+ /* If the array index is a constant expression and the array has a
+ * declared size, ensure that the access is in-bounds. If the array
+ * index is not a constant expression, ensure that the array has a
+ * declared size.
+ */
+ ir_constant *const const_index = idx->constant_expression_value(mem_ctx);
+ if (const_index != NULL && idx->type->is_integer_32()) {
+ const int idx = const_index->value.i[0];
+ const char *type_name = "error";
+ unsigned bound = 0;
+
+ /* From page 24 (page 30 of the PDF) of the GLSL 1.50 spec:
+ *
+ * "It is illegal to declare an array with a size, and then
+ * later (in the same shader) index the same array with an
+ * integral constant expression greater than or equal to the
+ * declared size. It is also illegal to index an array with a
+ * negative constant expression."
+ */
+ if (array->type->is_matrix()) {
+ if (array->type->row_type()->vector_elements <= idx) {
+ type_name = "matrix";
+ bound = array->type->row_type()->vector_elements;
+ }
+ } else if (array->type->is_vector()) {
+ if (array->type->vector_elements <= idx) {
+ type_name = "vector";
+ bound = array->type->vector_elements;
+ }
+ } else {
+ /* glsl_type::array_size() returns -1 for non-array types. This means
+ * that we don't need to verify that the type is an array before
+ * doing the bounds checking.
+ */
+ if ((array->type->array_size() > 0)
+ && (array->type->array_size() <= idx)) {
+ type_name = "array";
+ bound = array->type->array_size();
+ }
+ }
+
+ if (bound > 0) {
+ _mesa_glsl_error(& loc, state, "%s index must be < %u",
+ type_name, bound);
+ } else if (idx < 0) {
+ _mesa_glsl_error(& loc, state, "%s index must be >= 0", type_name);
+ }
+
+ if (array->type->is_array())
+ update_max_array_access(array, idx, &loc, state);
+ } else if (const_index == NULL && array->type->is_array()) {
+ if (array->type->is_unsized_array()) {
+ int implicit_size = get_implicit_array_size(state, array);
+ if (implicit_size) {
+ ir_variable *v = array->whole_variable_referenced();
+ if (v != NULL)
+ v->data.max_array_access = implicit_size - 1;
+ }
+ else if (state->stage == MESA_SHADER_TESS_CTRL &&
+ array->variable_referenced()->data.mode == ir_var_shader_out &&
+ !array->variable_referenced()->data.patch) {
+ /* Tessellation control shader output non-patch arrays are
+ * initially unsized. Despite that, they are allowed to be
+ * indexed with a non-constant expression (typically
+ * "gl_InvocationID"). The array size will be determined
+ * by the linker.
+ */
+ }
+ else if (array->variable_referenced()->data.mode !=
+ ir_var_shader_storage) {
+ _mesa_glsl_error(&loc, state, "unsized array index must be constant");
+ } else {
+ /* Unsized array non-constant indexing on SSBO is allowed only for
+ * the last member of the SSBO definition.
+ */
+ ir_variable *var = array->variable_referenced();
+ const glsl_type *iface_type = var->get_interface_type();
+ int field_index = iface_type->field_index(var->name);
+ /* Field index can be < 0 for instance arrays */
+ if (field_index >= 0 &&
+ field_index != (int) iface_type->length - 1) {
+ _mesa_glsl_error(&loc, state, "Indirect access on unsized "
+ "array is limited to the last member of "
+ "SSBO.");
+ }
+ }
+ } else if (array->type->without_array()->is_interface()
+ && ((array->variable_referenced()->data.mode == ir_var_uniform
+ && !state->is_version(400, 320)
+ && !state->ARB_gpu_shader5_enable
+ && !state->EXT_gpu_shader5_enable
+ && !state->OES_gpu_shader5_enable) ||
+ (array->variable_referenced()->data.mode == ir_var_shader_storage
+ && !state->is_version(400, 0)
+ && !state->ARB_gpu_shader5_enable))) {
+ /* Page 50 in section 4.3.9 of the OpenGL ES 3.10 spec says:
+ *
+ * "All indices used to index a uniform or shader storage block
+ * array must be constant integral expressions."
+ *
+ * But OES_gpu_shader5 (and ESSL 3.20) relax this to allow indexing
+ * on uniform blocks but not shader storage blocks.
+ *
+ */
+ _mesa_glsl_error(&loc, state, "%s block array index must be constant",
+ array->variable_referenced()->data.mode
+ == ir_var_uniform ? "uniform" : "shader storage");
+ } else {
+ /* whole_variable_referenced can return NULL if the array is a
+ * member of a structure. In this case it is safe to not update
+ * the max_array_access field because it is never used for fields
+ * of structures.
+ */
+ ir_variable *v = array->whole_variable_referenced();
+ if (v != NULL)
+ v->data.max_array_access = array->type->array_size() - 1;
+ }
+
+ /* From page 23 (29 of the PDF) of the GLSL 1.30 spec:
+ *
+ * "Samplers aggregated into arrays within a shader (using square
+ * brackets [ ]) can only be indexed with integral constant
+ * expressions [...]."
+ *
+ * This restriction was added in GLSL 1.30. Shaders using earlier
+ * version of the language should not be rejected by the compiler
+ * front-end for using this construct. This allows useful things such
+ * as using a loop counter as the index to an array of samplers. If the
+ * loop in unrolled, the code should compile correctly. Instead, emit a
+ * warning.
+ *
+ * In GLSL 4.00 / ARB_gpu_shader5, this requirement is relaxed again to allow
+ * indexing with dynamically uniform expressions. Note that these are not
+ * required to be uniforms or expressions based on them, but merely that the
+ * values must not diverge between shader invocations run together. If the
+ * values *do* diverge, then the behavior of the operation requiring a
+ * dynamically uniform expression is undefined.
+ *
+ * From section 4.1.7 of the ARB_bindless_texture spec:
+ *
+ * "Samplers aggregated into arrays within a shader (using square
+ * brackets []) can be indexed with arbitrary integer expressions."
+ */
+ if (array->type->without_array()->is_sampler()) {
+ if (!state->is_version(400, 320) &&
+ !state->ARB_gpu_shader5_enable &&
+ !state->EXT_gpu_shader5_enable &&
+ !state->OES_gpu_shader5_enable &&
+ !state->has_bindless()) {
+ if (state->is_version(130, 300))
+ _mesa_glsl_error(&loc, state,
+ "sampler arrays indexed with non-constant "
+ "expressions are forbidden in GLSL %s "
+ "and later",
+ state->es_shader ? "ES 3.00" : "1.30");
+ else if (state->es_shader)
+ _mesa_glsl_warning(&loc, state,
+ "sampler arrays indexed with non-constant "
+ "expressions will be forbidden in GLSL "
+ "3.00 and later");
+ else
+ _mesa_glsl_warning(&loc, state,
+ "sampler arrays indexed with non-constant "
+ "expressions will be forbidden in GLSL "
+ "1.30 and later");
+ }
+ }
+
+ /* From page 27 of the GLSL ES 3.1 specification:
+ *
+ * "When aggregated into arrays within a shader, images can only be
+ * indexed with a constant integral expression."
+ *
+ * On the other hand the desktop GL specification extension allows
+ * non-constant indexing of image arrays, but behavior is left undefined
+ * in cases where the indexing expression is not dynamically uniform.
+ */
+ if (state->es_shader && array->type->without_array()->is_image()) {
+ _mesa_glsl_error(&loc, state,
+ "image arrays indexed with non-constant "
+ "expressions are forbidden in GLSL ES.");
+ }
+ }
+
+ /* After performing all of the error checking, generate the IR for the
+ * expression.
+ */
+ if (array->type->is_array()
+ || array->type->is_matrix()
+ || array->type->is_vector()) {
+ return new(mem_ctx) ir_dereference_array(array, idx);
+ } else if (array->type->is_error()) {
+ return array;
+ } else {
+ ir_rvalue *result = new(mem_ctx) ir_dereference_array(array, idx);
+ result->type = glsl_type::error_type;
+
+ return result;
+ }
+}
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/ast_expr.cpp b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/ast_expr.cpp
new file mode 100644
index 0000000000..1fd5b6e642
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/ast_expr.cpp
@@ -0,0 +1,95 @@
+/*
+ * Copyright © 2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+#include <assert.h>
+#include "ast.h"
+
+const char *
+ast_expression::operator_string(enum ast_operators op)
+{
+ static const char *const operators[] = {
+ "=",
+ "+",
+ "-",
+ "+",
+ "-",
+ "*",
+ "/",
+ "%",
+ "<<",
+ ">>",
+ "<",
+ ">",
+ "<=",
+ ">=",
+ "==",
+ "!=",
+ "&",
+ "^",
+ "|",
+ "~",
+ "&&",
+ "^^",
+ "||",
+ "!",
+
+ "*=",
+ "/=",
+ "%=",
+ "+=",
+ "-=",
+ "<<=",
+ ">>=",
+ "&=",
+ "^=",
+ "|=",
+
+ "?:",
+
+ "++",
+ "--",
+ "++",
+ "--",
+ ".",
+ };
+
+ assert((unsigned int)op < sizeof(operators) / sizeof(operators[0]));
+
+ return operators[op];
+}
+
+
+ast_expression_bin::ast_expression_bin(int oper, ast_expression *ex0,
+ ast_expression *ex1) :
+ ast_expression(oper, ex0, ex1, NULL)
+{
+ assert((oper >= ast_plus) && (oper <= ast_logic_not));
+}
+
+
+void
+ast_expression_bin::print(void) const
+{
+ subexpressions[0]->print();
+ printf("%s ", operator_string(oper));
+ subexpressions[1]->print();
+}
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/ast_function.cpp b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/ast_function.cpp
new file mode 100644
index 0000000000..b6b81bf1e1
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/ast_function.cpp
@@ -0,0 +1,2512 @@
+/*
+ * Copyright © 2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include "glsl_symbol_table.h"
+#include "ast.h"
+#include "compiler/glsl_types.h"
+#include "ir.h"
+#include "main/mtypes.h"
+#include "main/shaderobj.h"
+#include "builtin_functions.h"
+
+static ir_rvalue *
+convert_component(ir_rvalue *src, const glsl_type *desired_type);
+
+static unsigned
+process_parameters(exec_list *instructions, exec_list *actual_parameters,
+ exec_list *parameters,
+ struct _mesa_glsl_parse_state *state)
+{
+ void *mem_ctx = state;
+ unsigned count = 0;
+
+ foreach_list_typed(ast_node, ast, link, parameters) {
+ /* We need to process the parameters first in order to know if we can
+ * raise or not a unitialized warning. Calling set_is_lhs silence the
+ * warning for now. Raising the warning or not will be checked at
+ * verify_parameter_modes.
+ */
+ ast->set_is_lhs(true);
+ ir_rvalue *result = ast->hir(instructions, state);
+
+ /* Error happened processing function parameter */
+ if (!result) {
+ actual_parameters->push_tail(ir_rvalue::error_value(mem_ctx));
+ count++;
+ continue;
+ }
+
+ ir_constant *const constant =
+ result->constant_expression_value(mem_ctx);
+
+ if (constant != NULL)
+ result = constant;
+
+ actual_parameters->push_tail(result);
+ count++;
+ }
+
+ return count;
+}
+
+
+/**
+ * Generate a source prototype for a function signature
+ *
+ * \param return_type Return type of the function. May be \c NULL.
+ * \param name Name of the function.
+ * \param parameters List of \c ir_instruction nodes representing the
+ * parameter list for the function. This may be either a
+ * formal (\c ir_variable) or actual (\c ir_rvalue)
+ * parameter list. Only the type is used.
+ *
+ * \return
+ * A ralloced string representing the prototype of the function.
+ */
+char *
+prototype_string(const glsl_type *return_type, const char *name,
+ exec_list *parameters)
+{
+ char *str = NULL;
+
+ if (return_type != NULL)
+ str = ralloc_asprintf(NULL, "%s ", return_type->name);
+
+ ralloc_asprintf_append(&str, "%s(", name);
+
+ const char *comma = "";
+ foreach_in_list(const ir_variable, param, parameters) {
+ ralloc_asprintf_append(&str, "%s%s", comma, param->type->name);
+ comma = ", ";
+ }
+
+ ralloc_strcat(&str, ")");
+ return str;
+}
+
+static bool
+verify_image_parameter(YYLTYPE *loc, _mesa_glsl_parse_state *state,
+ const ir_variable *formal, const ir_variable *actual)
+{
+ /**
+ * From the ARB_shader_image_load_store specification:
+ *
+ * "The values of image variables qualified with coherent,
+ * volatile, restrict, readonly, or writeonly may not be passed
+ * to functions whose formal parameters lack such
+ * qualifiers. [...] It is legal to have additional qualifiers
+ * on a formal parameter, but not to have fewer."
+ */
+ if (actual->data.memory_coherent && !formal->data.memory_coherent) {
+ _mesa_glsl_error(loc, state,
+ "function call parameter `%s' drops "
+ "`coherent' qualifier", formal->name);
+ return false;
+ }
+
+ if (actual->data.memory_volatile && !formal->data.memory_volatile) {
+ _mesa_glsl_error(loc, state,
+ "function call parameter `%s' drops "
+ "`volatile' qualifier", formal->name);
+ return false;
+ }
+
+ if (actual->data.memory_restrict && !formal->data.memory_restrict) {
+ _mesa_glsl_error(loc, state,
+ "function call parameter `%s' drops "
+ "`restrict' qualifier", formal->name);
+ return false;
+ }
+
+ if (actual->data.memory_read_only && !formal->data.memory_read_only) {
+ _mesa_glsl_error(loc, state,
+ "function call parameter `%s' drops "
+ "`readonly' qualifier", formal->name);
+ return false;
+ }
+
+ if (actual->data.memory_write_only && !formal->data.memory_write_only) {
+ _mesa_glsl_error(loc, state,
+ "function call parameter `%s' drops "
+ "`writeonly' qualifier", formal->name);
+ return false;
+ }
+
+ return true;
+}
+
+static bool
+verify_first_atomic_parameter(YYLTYPE *loc, _mesa_glsl_parse_state *state,
+ ir_variable *var)
+{
+ if (!var ||
+ (!var->is_in_shader_storage_block() &&
+ var->data.mode != ir_var_shader_shared)) {
+ _mesa_glsl_error(loc, state, "First argument to atomic function "
+ "must be a buffer or shared variable");
+ return false;
+ }
+ return true;
+}
+
+static bool
+is_atomic_function(const char *func_name)
+{
+ return !strcmp(func_name, "atomicAdd") ||
+ !strcmp(func_name, "atomicMin") ||
+ !strcmp(func_name, "atomicMax") ||
+ !strcmp(func_name, "atomicAnd") ||
+ !strcmp(func_name, "atomicOr") ||
+ !strcmp(func_name, "atomicXor") ||
+ !strcmp(func_name, "atomicExchange") ||
+ !strcmp(func_name, "atomicCompSwap");
+}
+
+/**
+ * Verify that 'out' and 'inout' actual parameters are lvalues. Also, verify
+ * that 'const_in' formal parameters (an extension in our IR) correspond to
+ * ir_constant actual parameters.
+ */
+static bool
+verify_parameter_modes(_mesa_glsl_parse_state *state,
+ ir_function_signature *sig,
+ exec_list &actual_ir_parameters,
+ exec_list &actual_ast_parameters)
+{
+ exec_node *actual_ir_node = actual_ir_parameters.get_head_raw();
+ exec_node *actual_ast_node = actual_ast_parameters.get_head_raw();
+
+ foreach_in_list(const ir_variable, formal, &sig->parameters) {
+ /* The lists must be the same length. */
+ assert(!actual_ir_node->is_tail_sentinel());
+ assert(!actual_ast_node->is_tail_sentinel());
+
+ const ir_rvalue *const actual = (ir_rvalue *) actual_ir_node;
+ const ast_expression *const actual_ast =
+ exec_node_data(ast_expression, actual_ast_node, link);
+
+ /* FIXME: 'loc' is incorrect (as of 2011-01-21). It is always
+ * FIXME: 0:0(0).
+ */
+ YYLTYPE loc = actual_ast->get_location();
+
+ /* Verify that 'const_in' parameters are ir_constants. */
+ if (formal->data.mode == ir_var_const_in &&
+ actual->ir_type != ir_type_constant) {
+ _mesa_glsl_error(&loc, state,
+ "parameter `in %s' must be a constant expression",
+ formal->name);
+ return false;
+ }
+
+ /* Verify that shader_in parameters are shader inputs */
+ if (formal->data.must_be_shader_input) {
+ const ir_rvalue *val = actual;
+
+ /* GLSL 4.40 allows swizzles, while earlier GLSL versions do not. */
+ if (val->ir_type == ir_type_swizzle) {
+ if (!state->is_version(440, 0)) {
+ _mesa_glsl_error(&loc, state,
+ "parameter `%s` must not be swizzled",
+ formal->name);
+ return false;
+ }
+ val = ((ir_swizzle *)val)->val;
+ }
+
+ for (;;) {
+ if (val->ir_type == ir_type_dereference_array) {
+ val = ((ir_dereference_array *)val)->array;
+ } else if (val->ir_type == ir_type_dereference_record &&
+ !state->es_shader) {
+ val = ((ir_dereference_record *)val)->record;
+ } else
+ break;
+ }
+
+ ir_variable *var = NULL;
+ if (const ir_dereference_variable *deref_var = val->as_dereference_variable())
+ var = deref_var->variable_referenced();
+
+ if (!var || var->data.mode != ir_var_shader_in) {
+ _mesa_glsl_error(&loc, state,
+ "parameter `%s` must be a shader input",
+ formal->name);
+ return false;
+ }
+
+ var->data.must_be_shader_input = 1;
+ }
+
+ /* Verify that 'out' and 'inout' actual parameters are lvalues. */
+ if (formal->data.mode == ir_var_function_out
+ || formal->data.mode == ir_var_function_inout) {
+ const char *mode = NULL;
+ switch (formal->data.mode) {
+ case ir_var_function_out: mode = "out"; break;
+ case ir_var_function_inout: mode = "inout"; break;
+ default: assert(false); break;
+ }
+
+ /* This AST-based check catches errors like f(i++). The IR-based
+ * is_lvalue() is insufficient because the actual parameter at the
+ * IR-level is just a temporary value, which is an l-value.
+ */
+ if (actual_ast->non_lvalue_description != NULL) {
+ _mesa_glsl_error(&loc, state,
+ "function parameter '%s %s' references a %s",
+ mode, formal->name,
+ actual_ast->non_lvalue_description);
+ return false;
+ }
+
+ ir_variable *var = actual->variable_referenced();
+
+ if (var && formal->data.mode == ir_var_function_inout) {
+ if ((var->data.mode == ir_var_auto ||
+ var->data.mode == ir_var_shader_out) &&
+ !var->data.assigned &&
+ !is_gl_identifier(var->name)) {
+ _mesa_glsl_warning(&loc, state, "`%s' used uninitialized",
+ var->name);
+ }
+ }
+
+ if (var)
+ var->data.assigned = true;
+
+ if (var && var->data.read_only) {
+ _mesa_glsl_error(&loc, state,
+ "function parameter '%s %s' references the "
+ "read-only variable '%s'",
+ mode, formal->name,
+ actual->variable_referenced()->name);
+ return false;
+ } else if (!actual->is_lvalue(state)) {
+ _mesa_glsl_error(&loc, state,
+ "function parameter '%s %s' is not an lvalue",
+ mode, formal->name);
+ return false;
+ }
+ } else {
+ assert(formal->data.mode == ir_var_function_in ||
+ formal->data.mode == ir_var_const_in);
+ ir_variable *var = actual->variable_referenced();
+ if (var) {
+ if ((var->data.mode == ir_var_auto ||
+ var->data.mode == ir_var_shader_out) &&
+ !var->data.assigned &&
+ !is_gl_identifier(var->name)) {
+ _mesa_glsl_warning(&loc, state, "`%s' used uninitialized",
+ var->name);
+ }
+ }
+ }
+
+ if (formal->type->is_image() &&
+ actual->variable_referenced()) {
+ if (!verify_image_parameter(&loc, state, formal,
+ actual->variable_referenced()))
+ return false;
+ }
+
+ actual_ir_node = actual_ir_node->next;
+ actual_ast_node = actual_ast_node->next;
+ }
+
+ /* The first parameter of atomic functions must be a buffer variable */
+ const char *func_name = sig->function_name();
+ bool is_atomic = is_atomic_function(func_name);
+ if (is_atomic) {
+ const ir_rvalue *const actual =
+ (ir_rvalue *) actual_ir_parameters.get_head_raw();
+
+ const ast_expression *const actual_ast =
+ exec_node_data(ast_expression,
+ actual_ast_parameters.get_head_raw(), link);
+ YYLTYPE loc = actual_ast->get_location();
+
+ if (!verify_first_atomic_parameter(&loc, state,
+ actual->variable_referenced())) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+struct copy_index_deref_data {
+ void *mem_ctx;
+ exec_list *before_instructions;
+};
+
+static void
+copy_index_derefs_to_temps(ir_instruction *ir, void *data)
+{
+ struct copy_index_deref_data *d = (struct copy_index_deref_data *)data;
+
+ if (ir->ir_type == ir_type_dereference_array) {
+ ir_dereference_array *a = (ir_dereference_array *) ir;
+ ir = a->array->as_dereference();
+
+ ir_rvalue *idx = a->array_index;
+ ir_variable *var = idx->variable_referenced();
+
+ /* If the index is read only it cannot change so there is no need
+ * to copy it.
+ */
+ if (!var || var->data.read_only || var->data.memory_read_only)
+ return;
+
+ ir_variable *tmp = new(d->mem_ctx) ir_variable(idx->type, "idx_tmp",
+ ir_var_temporary);
+ d->before_instructions->push_tail(tmp);
+
+ ir_dereference_variable *const deref_tmp_1 =
+ new(d->mem_ctx) ir_dereference_variable(tmp);
+ ir_assignment *const assignment =
+ new(d->mem_ctx) ir_assignment(deref_tmp_1,
+ idx->clone(d->mem_ctx, NULL));
+ d->before_instructions->push_tail(assignment);
+
+ /* Replace the array index with a dereference of the new temporary */
+ ir_dereference_variable *const deref_tmp_2 =
+ new(d->mem_ctx) ir_dereference_variable(tmp);
+ a->array_index = deref_tmp_2;
+ }
+}
+
+static void
+fix_parameter(void *mem_ctx, ir_rvalue *actual, const glsl_type *formal_type,
+ exec_list *before_instructions, exec_list *after_instructions,
+ bool parameter_is_inout)
+{
+ ir_expression *const expr = actual->as_expression();
+
+ /* If the types match exactly and the parameter is not a vector-extract,
+ * nothing needs to be done to fix the parameter.
+ */
+ if (formal_type == actual->type
+ && (expr == NULL || expr->operation != ir_binop_vector_extract)
+ && actual->as_dereference_variable())
+ return;
+
+ /* An array index could also be an out variable so we need to make a copy
+ * of them before the function is called.
+ */
+ if (!actual->as_dereference_variable()) {
+ struct copy_index_deref_data data;
+ data.mem_ctx = mem_ctx;
+ data.before_instructions = before_instructions;
+
+ visit_tree(actual, copy_index_derefs_to_temps, &data);
+ }
+
+ /* To convert an out parameter, we need to create a temporary variable to
+ * hold the value before conversion, and then perform the conversion after
+ * the function call returns.
+ *
+ * This has the effect of transforming code like this:
+ *
+ * void f(out int x);
+ * float value;
+ * f(value);
+ *
+ * Into IR that's equivalent to this:
+ *
+ * void f(out int x);
+ * float value;
+ * int out_parameter_conversion;
+ * f(out_parameter_conversion);
+ * value = float(out_parameter_conversion);
+ *
+ * If the parameter is an ir_expression of ir_binop_vector_extract,
+ * additional conversion is needed in the post-call re-write.
+ */
+ ir_variable *tmp =
+ new(mem_ctx) ir_variable(formal_type, "inout_tmp", ir_var_temporary);
+
+ before_instructions->push_tail(tmp);
+
+ /* If the parameter is an inout parameter, copy the value of the actual
+ * parameter to the new temporary. Note that no type conversion is allowed
+ * here because inout parameters must match types exactly.
+ */
+ if (parameter_is_inout) {
+ /* Inout parameters should never require conversion, since that would
+ * require an implicit conversion to exist both to and from the formal
+ * parameter type, and there are no bidirectional implicit conversions.
+ */
+ assert (actual->type == formal_type);
+
+ ir_dereference_variable *const deref_tmp_1 =
+ new(mem_ctx) ir_dereference_variable(tmp);
+ ir_assignment *const assignment =
+ new(mem_ctx) ir_assignment(deref_tmp_1, actual->clone(mem_ctx, NULL));
+ before_instructions->push_tail(assignment);
+ }
+
+ /* Replace the parameter in the call with a dereference of the new
+ * temporary.
+ */
+ ir_dereference_variable *const deref_tmp_2 =
+ new(mem_ctx) ir_dereference_variable(tmp);
+ actual->replace_with(deref_tmp_2);
+
+
+ /* Copy the temporary variable to the actual parameter with optional
+ * type conversion applied.
+ */
+ ir_rvalue *rhs = new(mem_ctx) ir_dereference_variable(tmp);
+ if (actual->type != formal_type)
+ rhs = convert_component(rhs, actual->type);
+
+ ir_rvalue *lhs = actual;
+ if (expr != NULL && expr->operation == ir_binop_vector_extract) {
+ lhs = new(mem_ctx) ir_dereference_array(expr->operands[0]->clone(mem_ctx,
+ NULL),
+ expr->operands[1]->clone(mem_ctx,
+ NULL));
+ }
+
+ ir_assignment *const assignment_2 = new(mem_ctx) ir_assignment(lhs, rhs);
+ after_instructions->push_tail(assignment_2);
+}
+
+/**
+ * Generate a function call.
+ *
+ * For non-void functions, this returns a dereference of the temporary
+ * variable which stores the return value for the call. For void functions,
+ * this returns NULL.
+ */
+static ir_rvalue *
+generate_call(exec_list *instructions, ir_function_signature *sig,
+ exec_list *actual_parameters,
+ ir_variable *sub_var,
+ ir_rvalue *array_idx,
+ struct _mesa_glsl_parse_state *state)
+{
+ void *ctx = state;
+ exec_list post_call_conversions;
+
+ /* Perform implicit conversion of arguments. For out parameters, we need
+ * to place them in a temporary variable and do the conversion after the
+ * call takes place. Since we haven't emitted the call yet, we'll place
+ * the post-call conversions in a temporary exec_list, and emit them later.
+ */
+ foreach_two_lists(formal_node, &sig->parameters,
+ actual_node, actual_parameters) {
+ ir_rvalue *actual = (ir_rvalue *) actual_node;
+ ir_variable *formal = (ir_variable *) formal_node;
+
+ if (formal->type->is_numeric() || formal->type->is_boolean()) {
+ switch (formal->data.mode) {
+ case ir_var_const_in:
+ case ir_var_function_in: {
+ ir_rvalue *converted
+ = convert_component(actual, formal->type);
+ actual->replace_with(converted);
+ break;
+ }
+ case ir_var_function_out:
+ case ir_var_function_inout:
+ fix_parameter(ctx, actual, formal->type,
+ instructions, &post_call_conversions,
+ formal->data.mode == ir_var_function_inout);
+ break;
+ default:
+ assert (!"Illegal formal parameter mode");
+ break;
+ }
+ }
+ }
+
+ /* Section 4.3.2 (Const) of the GLSL 1.10.59 spec says:
+ *
+ * "Initializers for const declarations must be formed from literal
+ * values, other const variables (not including function call
+ * paramaters), or expressions of these.
+ *
+ * Constructors may be used in such expressions, but function calls may
+ * not."
+ *
+ * Section 4.3.3 (Constant Expressions) of the GLSL 1.20.8 spec says:
+ *
+ * "A constant expression is one of
+ *
+ * ...
+ *
+ * - a built-in function call whose arguments are all constant
+ * expressions, with the exception of the texture lookup
+ * functions, the noise functions, and ftransform. The built-in
+ * functions dFdx, dFdy, and fwidth must return 0 when evaluated
+ * inside an initializer with an argument that is a constant
+ * expression."
+ *
+ * Section 5.10 (Constant Expressions) of the GLSL ES 1.00.17 spec says:
+ *
+ * "A constant expression is one of
+ *
+ * ...
+ *
+ * - a built-in function call whose arguments are all constant
+ * expressions, with the exception of the texture lookup
+ * functions."
+ *
+ * Section 4.3.3 (Constant Expressions) of the GLSL ES 3.00.4 spec says:
+ *
+ * "A constant expression is one of
+ *
+ * ...
+ *
+ * - a built-in function call whose arguments are all constant
+ * expressions, with the exception of the texture lookup
+ * functions. The built-in functions dFdx, dFdy, and fwidth must
+ * return 0 when evaluated inside an initializer with an argument
+ * that is a constant expression."
+ *
+ * If the function call is a constant expression, don't generate any
+ * instructions; just generate an ir_constant.
+ */
+ if (state->is_version(120, 100) ||
+ state->ctx->Const.AllowGLSLBuiltinConstantExpression) {
+ ir_constant *value = sig->constant_expression_value(ctx,
+ actual_parameters,
+ NULL);
+ if (value != NULL) {
+ return value;
+ }
+ }
+
+ ir_dereference_variable *deref = NULL;
+ if (!sig->return_type->is_void()) {
+ /* Create a new temporary to hold the return value. */
+ char *const name = ir_variable::temporaries_allocate_names
+ ? ralloc_asprintf(ctx, "%s_retval", sig->function_name())
+ : NULL;
+
+ ir_variable *var;
+
+ var = new(ctx) ir_variable(sig->return_type, name, ir_var_temporary);
+ instructions->push_tail(var);
+
+ ralloc_free(name);
+
+ deref = new(ctx) ir_dereference_variable(var);
+ }
+
+ ir_call *call = new(ctx) ir_call(sig, deref,
+ actual_parameters, sub_var, array_idx);
+ instructions->push_tail(call);
+
+ /* Also emit any necessary out-parameter conversions. */
+ instructions->append_list(&post_call_conversions);
+
+ return deref ? deref->clone(ctx, NULL) : NULL;
+}
+
+/**
+ * Given a function name and parameter list, find the matching signature.
+ */
+static ir_function_signature *
+match_function_by_name(const char *name,
+ exec_list *actual_parameters,
+ struct _mesa_glsl_parse_state *state)
+{
+ ir_function *f = state->symbols->get_function(name);
+ ir_function_signature *local_sig = NULL;
+ ir_function_signature *sig = NULL;
+
+ /* Is the function hidden by a record type constructor? */
+ if (state->symbols->get_type(name))
+ return sig; /* no match */
+
+ /* Is the function hidden by a variable (impossible in 1.10)? */
+ if (!state->symbols->separate_function_namespace
+ && state->symbols->get_variable(name))
+ return sig; /* no match */
+
+ if (f != NULL) {
+ /* In desktop GL, the presence of a user-defined signature hides any
+ * built-in signatures, so we must ignore them. In contrast, in ES2
+ * user-defined signatures add new overloads, so we must consider them.
+ */
+ bool allow_builtins = state->es_shader || !f->has_user_signature();
+
+ /* Look for a match in the local shader. If exact, we're done. */
+ bool is_exact = false;
+ sig = local_sig = f->matching_signature(state, actual_parameters,
+ allow_builtins, &is_exact);
+ if (is_exact)
+ return sig;
+
+ if (!allow_builtins)
+ return sig;
+ }
+
+ /* Local shader has no exact candidates; check the built-ins. */
+ sig = _mesa_glsl_find_builtin_function(state, name, actual_parameters);
+
+ /* if _mesa_glsl_find_builtin_function failed, fall back to the result
+ * of choose_best_inexact_overload() instead. This should only affect
+ * GLES.
+ */
+ return sig ? sig : local_sig;
+}
+
+static ir_function_signature *
+match_subroutine_by_name(const char *name,
+ exec_list *actual_parameters,
+ struct _mesa_glsl_parse_state *state,
+ ir_variable **var_r)
+{
+ void *ctx = state;
+ ir_function_signature *sig = NULL;
+ ir_function *f, *found = NULL;
+ const char *new_name;
+ ir_variable *var;
+ bool is_exact = false;
+
+ new_name =
+ ralloc_asprintf(ctx, "%s_%s",
+ _mesa_shader_stage_to_subroutine_prefix(state->stage),
+ name);
+ var = state->symbols->get_variable(new_name);
+ if (!var)
+ return NULL;
+
+ for (int i = 0; i < state->num_subroutine_types; i++) {
+ f = state->subroutine_types[i];
+ if (strcmp(f->name, var->type->without_array()->name))
+ continue;
+ found = f;
+ break;
+ }
+
+ if (!found)
+ return NULL;
+ *var_r = var;
+ sig = found->matching_signature(state, actual_parameters,
+ false, &is_exact);
+ return sig;
+}
+
+static ir_rvalue *
+generate_array_index(void *mem_ctx, exec_list *instructions,
+ struct _mesa_glsl_parse_state *state, YYLTYPE loc,
+ const ast_expression *array, ast_expression *idx,
+ const char **function_name, exec_list *actual_parameters)
+{
+ if (array->oper == ast_array_index) {
+ /* This handles arrays of arrays */
+ ir_rvalue *outer_array = generate_array_index(mem_ctx, instructions,
+ state, loc,
+ array->subexpressions[0],
+ array->subexpressions[1],
+ function_name,
+ actual_parameters);
+ ir_rvalue *outer_array_idx = idx->hir(instructions, state);
+
+ YYLTYPE index_loc = idx->get_location();
+ return _mesa_ast_array_index_to_hir(mem_ctx, state, outer_array,
+ outer_array_idx, loc,
+ index_loc);
+ } else {
+ ir_variable *sub_var = NULL;
+ *function_name = array->primary_expression.identifier;
+
+ if (!match_subroutine_by_name(*function_name, actual_parameters,
+ state, &sub_var)) {
+ _mesa_glsl_error(&loc, state, "Unknown subroutine `%s'",
+ *function_name);
+ *function_name = NULL; /* indicate error condition to caller */
+ return NULL;
+ }
+
+ ir_rvalue *outer_array_idx = idx->hir(instructions, state);
+ return new(mem_ctx) ir_dereference_array(sub_var, outer_array_idx);
+ }
+}
+
+static bool
+function_exists(_mesa_glsl_parse_state *state,
+ struct glsl_symbol_table *symbols, const char *name)
+{
+ ir_function *f = symbols->get_function(name);
+ if (f != NULL) {
+ foreach_in_list(ir_function_signature, sig, &f->signatures) {
+ if (sig->is_builtin() && !sig->is_builtin_available(state))
+ continue;
+ return true;
+ }
+ }
+ return false;
+}
+
+static void
+print_function_prototypes(_mesa_glsl_parse_state *state, YYLTYPE *loc,
+ ir_function *f)
+{
+ if (f == NULL)
+ return;
+
+ foreach_in_list(ir_function_signature, sig, &f->signatures) {
+ if (sig->is_builtin() && !sig->is_builtin_available(state))
+ continue;
+
+ char *str = prototype_string(sig->return_type, f->name,
+ &sig->parameters);
+ _mesa_glsl_error(loc, state, " %s", str);
+ ralloc_free(str);
+ }
+}
+
+/**
+ * Raise a "no matching function" error, listing all possible overloads the
+ * compiler considered so developers can figure out what went wrong.
+ */
+static void
+no_matching_function_error(const char *name,
+ YYLTYPE *loc,
+ exec_list *actual_parameters,
+ _mesa_glsl_parse_state *state)
+{
+ gl_shader *sh = _mesa_glsl_get_builtin_function_shader();
+
+ if (!function_exists(state, state->symbols, name)
+ && (!state->uses_builtin_functions
+ || !function_exists(state, sh->symbols, name))) {
+ _mesa_glsl_error(loc, state, "no function with name '%s'", name);
+ } else {
+ char *str = prototype_string(NULL, name, actual_parameters);
+ _mesa_glsl_error(loc, state,
+ "no matching function for call to `%s';"
+ " candidates are:",
+ str);
+ ralloc_free(str);
+
+ print_function_prototypes(state, loc,
+ state->symbols->get_function(name));
+
+ if (state->uses_builtin_functions) {
+ print_function_prototypes(state, loc,
+ sh->symbols->get_function(name));
+ }
+ }
+}
+
+/**
+ * Perform automatic type conversion of constructor parameters
+ *
+ * This implements the rules in the "Conversion and Scalar Constructors"
+ * section (GLSL 1.10 section 5.4.1), not the "Implicit Conversions" rules.
+ */
+static ir_rvalue *
+convert_component(ir_rvalue *src, const glsl_type *desired_type)
+{
+ void *ctx = ralloc_parent(src);
+ const unsigned a = desired_type->base_type;
+ const unsigned b = src->type->base_type;
+ ir_expression *result = NULL;
+
+ if (src->type->is_error())
+ return src;
+
+ assert(a <= GLSL_TYPE_IMAGE);
+ assert(b <= GLSL_TYPE_IMAGE);
+
+ if (a == b)
+ return src;
+
+ switch (a) {
+ case GLSL_TYPE_UINT:
+ switch (b) {
+ case GLSL_TYPE_INT:
+ result = new(ctx) ir_expression(ir_unop_i2u, src);
+ break;
+ case GLSL_TYPE_FLOAT:
+ result = new(ctx) ir_expression(ir_unop_f2u, src);
+ break;
+ case GLSL_TYPE_BOOL:
+ result = new(ctx) ir_expression(ir_unop_i2u,
+ new(ctx) ir_expression(ir_unop_b2i,
+ src));
+ break;
+ case GLSL_TYPE_DOUBLE:
+ result = new(ctx) ir_expression(ir_unop_d2u, src);
+ break;
+ case GLSL_TYPE_UINT64:
+ result = new(ctx) ir_expression(ir_unop_u642u, src);
+ break;
+ case GLSL_TYPE_INT64:
+ result = new(ctx) ir_expression(ir_unop_i642u, src);
+ break;
+ case GLSL_TYPE_SAMPLER:
+ result = new(ctx) ir_expression(ir_unop_unpack_sampler_2x32, src);
+ break;
+ case GLSL_TYPE_IMAGE:
+ result = new(ctx) ir_expression(ir_unop_unpack_image_2x32, src);
+ break;
+ }
+ break;
+ case GLSL_TYPE_INT:
+ switch (b) {
+ case GLSL_TYPE_UINT:
+ result = new(ctx) ir_expression(ir_unop_u2i, src);
+ break;
+ case GLSL_TYPE_FLOAT:
+ result = new(ctx) ir_expression(ir_unop_f2i, src);
+ break;
+ case GLSL_TYPE_BOOL:
+ result = new(ctx) ir_expression(ir_unop_b2i, src);
+ break;
+ case GLSL_TYPE_DOUBLE:
+ result = new(ctx) ir_expression(ir_unop_d2i, src);
+ break;
+ case GLSL_TYPE_UINT64:
+ result = new(ctx) ir_expression(ir_unop_u642i, src);
+ break;
+ case GLSL_TYPE_INT64:
+ result = new(ctx) ir_expression(ir_unop_i642i, src);
+ break;
+ }
+ break;
+ case GLSL_TYPE_FLOAT:
+ switch (b) {
+ case GLSL_TYPE_UINT:
+ result = new(ctx) ir_expression(ir_unop_u2f, desired_type, src, NULL);
+ break;
+ case GLSL_TYPE_INT:
+ result = new(ctx) ir_expression(ir_unop_i2f, desired_type, src, NULL);
+ break;
+ case GLSL_TYPE_BOOL:
+ result = new(ctx) ir_expression(ir_unop_b2f, desired_type, src, NULL);
+ break;
+ case GLSL_TYPE_DOUBLE:
+ result = new(ctx) ir_expression(ir_unop_d2f, desired_type, src, NULL);
+ break;
+ case GLSL_TYPE_UINT64:
+ result = new(ctx) ir_expression(ir_unop_u642f, desired_type, src, NULL);
+ break;
+ case GLSL_TYPE_INT64:
+ result = new(ctx) ir_expression(ir_unop_i642f, desired_type, src, NULL);
+ break;
+ }
+ break;
+ case GLSL_TYPE_BOOL:
+ switch (b) {
+ case GLSL_TYPE_UINT:
+ result = new(ctx) ir_expression(ir_unop_i2b,
+ new(ctx) ir_expression(ir_unop_u2i,
+ src));
+ break;
+ case GLSL_TYPE_INT:
+ result = new(ctx) ir_expression(ir_unop_i2b, desired_type, src, NULL);
+ break;
+ case GLSL_TYPE_FLOAT:
+ result = new(ctx) ir_expression(ir_unop_f2b, desired_type, src, NULL);
+ break;
+ case GLSL_TYPE_DOUBLE:
+ result = new(ctx) ir_expression(ir_unop_d2b, desired_type, src, NULL);
+ break;
+ case GLSL_TYPE_UINT64:
+ result = new(ctx) ir_expression(ir_unop_i642b,
+ new(ctx) ir_expression(ir_unop_u642i64,
+ src));
+ break;
+ case GLSL_TYPE_INT64:
+ result = new(ctx) ir_expression(ir_unop_i642b, desired_type, src, NULL);
+ break;
+ }
+ break;
+ case GLSL_TYPE_DOUBLE:
+ switch (b) {
+ case GLSL_TYPE_INT:
+ result = new(ctx) ir_expression(ir_unop_i2d, src);
+ break;
+ case GLSL_TYPE_UINT:
+ result = new(ctx) ir_expression(ir_unop_u2d, src);
+ break;
+ case GLSL_TYPE_BOOL:
+ result = new(ctx) ir_expression(ir_unop_f2d,
+ new(ctx) ir_expression(ir_unop_b2f,
+ src));
+ break;
+ case GLSL_TYPE_FLOAT:
+ result = new(ctx) ir_expression(ir_unop_f2d, desired_type, src, NULL);
+ break;
+ case GLSL_TYPE_UINT64:
+ result = new(ctx) ir_expression(ir_unop_u642d, desired_type, src, NULL);
+ break;
+ case GLSL_TYPE_INT64:
+ result = new(ctx) ir_expression(ir_unop_i642d, desired_type, src, NULL);
+ break;
+ }
+ break;
+ case GLSL_TYPE_UINT64:
+ switch (b) {
+ case GLSL_TYPE_INT:
+ result = new(ctx) ir_expression(ir_unop_i2u64, src);
+ break;
+ case GLSL_TYPE_UINT:
+ result = new(ctx) ir_expression(ir_unop_u2u64, src);
+ break;
+ case GLSL_TYPE_BOOL:
+ result = new(ctx) ir_expression(ir_unop_i642u64,
+ new(ctx) ir_expression(ir_unop_b2i64,
+ src));
+ break;
+ case GLSL_TYPE_FLOAT:
+ result = new(ctx) ir_expression(ir_unop_f2u64, src);
+ break;
+ case GLSL_TYPE_DOUBLE:
+ result = new(ctx) ir_expression(ir_unop_d2u64, src);
+ break;
+ case GLSL_TYPE_INT64:
+ result = new(ctx) ir_expression(ir_unop_i642u64, src);
+ break;
+ }
+ break;
+ case GLSL_TYPE_INT64:
+ switch (b) {
+ case GLSL_TYPE_INT:
+ result = new(ctx) ir_expression(ir_unop_i2i64, src);
+ break;
+ case GLSL_TYPE_UINT:
+ result = new(ctx) ir_expression(ir_unop_u2i64, src);
+ break;
+ case GLSL_TYPE_BOOL:
+ result = new(ctx) ir_expression(ir_unop_b2i64, src);
+ break;
+ case GLSL_TYPE_FLOAT:
+ result = new(ctx) ir_expression(ir_unop_f2i64, src);
+ break;
+ case GLSL_TYPE_DOUBLE:
+ result = new(ctx) ir_expression(ir_unop_d2i64, src);
+ break;
+ case GLSL_TYPE_UINT64:
+ result = new(ctx) ir_expression(ir_unop_u642i64, src);
+ break;
+ }
+ break;
+ case GLSL_TYPE_SAMPLER:
+ switch (b) {
+ case GLSL_TYPE_UINT:
+ result = new(ctx)
+ ir_expression(ir_unop_pack_sampler_2x32, desired_type, src);
+ break;
+ }
+ break;
+ case GLSL_TYPE_IMAGE:
+ switch (b) {
+ case GLSL_TYPE_UINT:
+ result = new(ctx)
+ ir_expression(ir_unop_pack_image_2x32, desired_type, src);
+ break;
+ }
+ break;
+ }
+
+ assert(result != NULL);
+ assert(result->type == desired_type);
+
+ /* Try constant folding; it may fold in the conversion we just added. */
+ ir_constant *const constant = result->constant_expression_value(ctx);
+ return (constant != NULL) ? (ir_rvalue *) constant : (ir_rvalue *) result;
+}
+
+
+/**
+ * Perform automatic type and constant conversion of constructor parameters
+ *
+ * This implements the rules in the "Implicit Conversions" rules, not the
+ * "Conversion and Scalar Constructors".
+ *
+ * After attempting the implicit conversion, an attempt to convert into a
+ * constant valued expression is also done.
+ *
+ * The \c from \c ir_rvalue is converted "in place".
+ *
+ * \param from Operand that is being converted
+ * \param to Base type the operand will be converted to
+ * \param state GLSL compiler state
+ *
+ * \return
+ * If the attempt to convert into a constant expression succeeds, \c true is
+ * returned. Otherwise \c false is returned.
+ */
+static bool
+implicitly_convert_component(ir_rvalue * &from, const glsl_base_type to,
+ struct _mesa_glsl_parse_state *state)
+{
+ void *mem_ctx = state;
+ ir_rvalue *result = from;
+
+ if (to != from->type->base_type) {
+ const glsl_type *desired_type =
+ glsl_type::get_instance(to,
+ from->type->vector_elements,
+ from->type->matrix_columns);
+
+ if (from->type->can_implicitly_convert_to(desired_type, state)) {
+ /* Even though convert_component() implements the constructor
+ * conversion rules (not the implicit conversion rules), its safe
+ * to use it here because we already checked that the implicit
+ * conversion is legal.
+ */
+ result = convert_component(from, desired_type);
+ }
+ }
+
+ ir_rvalue *const constant = result->constant_expression_value(mem_ctx);
+
+ if (constant != NULL)
+ result = constant;
+
+ if (from != result) {
+ from->replace_with(result);
+ from = result;
+ }
+
+ return constant != NULL;
+}
+
+
+/**
+ * Dereference a specific component from a scalar, vector, or matrix
+ */
+static ir_rvalue *
+dereference_component(ir_rvalue *src, unsigned component)
+{
+ void *ctx = ralloc_parent(src);
+ assert(component < src->type->components());
+
+ /* If the source is a constant, just create a new constant instead of a
+ * dereference of the existing constant.
+ */
+ ir_constant *constant = src->as_constant();
+ if (constant)
+ return new(ctx) ir_constant(constant, component);
+
+ if (src->type->is_scalar()) {
+ return src;
+ } else if (src->type->is_vector()) {
+ return new(ctx) ir_swizzle(src, component, 0, 0, 0, 1);
+ } else {
+ assert(src->type->is_matrix());
+
+ /* Dereference a row of the matrix, then call this function again to get
+ * a specific element from that row.
+ */
+ const int c = component / src->type->column_type()->vector_elements;
+ const int r = component % src->type->column_type()->vector_elements;
+ ir_constant *const col_index = new(ctx) ir_constant(c);
+ ir_dereference *const col = new(ctx) ir_dereference_array(src,
+ col_index);
+
+ col->type = src->type->column_type();
+
+ return dereference_component(col, r);
+ }
+
+ assert(!"Should not get here.");
+ return NULL;
+}
+
+
+static ir_rvalue *
+process_vec_mat_constructor(exec_list *instructions,
+ const glsl_type *constructor_type,
+ YYLTYPE *loc, exec_list *parameters,
+ struct _mesa_glsl_parse_state *state)
+{
+ void *ctx = state;
+
+ /* The ARB_shading_language_420pack spec says:
+ *
+ * "If an initializer is a list of initializers enclosed in curly braces,
+ * the variable being declared must be a vector, a matrix, an array, or a
+ * structure.
+ *
+ * int i = { 1 }; // illegal, i is not an aggregate"
+ */
+ if (constructor_type->vector_elements <= 1) {
+ _mesa_glsl_error(loc, state, "aggregates can only initialize vectors, "
+ "matrices, arrays, and structs");
+ return ir_rvalue::error_value(ctx);
+ }
+
+ exec_list actual_parameters;
+ const unsigned parameter_count =
+ process_parameters(instructions, &actual_parameters, parameters, state);
+
+ if (parameter_count == 0
+ || (constructor_type->is_vector() &&
+ constructor_type->vector_elements != parameter_count)
+ || (constructor_type->is_matrix() &&
+ constructor_type->matrix_columns != parameter_count)) {
+ _mesa_glsl_error(loc, state, "%s constructor must have %u parameters",
+ constructor_type->is_vector() ? "vector" : "matrix",
+ constructor_type->vector_elements);
+ return ir_rvalue::error_value(ctx);
+ }
+
+ bool all_parameters_are_constant = true;
+
+ /* Type cast each parameter and, if possible, fold constants. */
+ foreach_in_list_safe(ir_rvalue, ir, &actual_parameters) {
+ /* Apply implicit conversions (not the scalar constructor rules, see the
+ * spec quote above!) and attempt to convert the parameter to a constant
+ * valued expression. After doing so, track whether or not all the
+ * parameters to the constructor are trivially constant valued
+ * expressions.
+ */
+ all_parameters_are_constant &=
+ implicitly_convert_component(ir, constructor_type->base_type, state);
+
+ if (constructor_type->is_matrix()) {
+ if (ir->type != constructor_type->column_type()) {
+ _mesa_glsl_error(loc, state, "type error in matrix constructor: "
+ "expected: %s, found %s",
+ constructor_type->column_type()->name,
+ ir->type->name);
+ return ir_rvalue::error_value(ctx);
+ }
+ } else if (ir->type != constructor_type->get_scalar_type()) {
+ _mesa_glsl_error(loc, state, "type error in vector constructor: "
+ "expected: %s, found %s",
+ constructor_type->get_scalar_type()->name,
+ ir->type->name);
+ return ir_rvalue::error_value(ctx);
+ }
+ }
+
+ if (all_parameters_are_constant)
+ return new(ctx) ir_constant(constructor_type, &actual_parameters);
+
+ ir_variable *var = new(ctx) ir_variable(constructor_type, "vec_mat_ctor",
+ ir_var_temporary);
+ instructions->push_tail(var);
+
+ int i = 0;
+
+ foreach_in_list(ir_rvalue, rhs, &actual_parameters) {
+ ir_instruction *assignment = NULL;
+
+ if (var->type->is_matrix()) {
+ ir_rvalue *lhs =
+ new(ctx) ir_dereference_array(var, new(ctx) ir_constant(i));
+ assignment = new(ctx) ir_assignment(lhs, rhs);
+ } else {
+ /* use writemask rather than index for vector */
+ assert(var->type->is_vector());
+ assert(i < 4);
+ ir_dereference *lhs = new(ctx) ir_dereference_variable(var);
+ assignment = new(ctx) ir_assignment(lhs, rhs, NULL,
+ (unsigned)(1 << i));
+ }
+
+ instructions->push_tail(assignment);
+
+ i++;
+ }
+
+ return new(ctx) ir_dereference_variable(var);
+}
+
+
+static ir_rvalue *
+process_array_constructor(exec_list *instructions,
+ const glsl_type *constructor_type,
+ YYLTYPE *loc, exec_list *parameters,
+ struct _mesa_glsl_parse_state *state)
+{
+ void *ctx = state;
+ /* Array constructors come in two forms: sized and unsized. Sized array
+ * constructors look like 'vec4[2](a, b)', where 'a' and 'b' are vec4
+ * variables. In this case the number of parameters must exactly match the
+ * specified size of the array.
+ *
+ * Unsized array constructors look like 'vec4[](a, b)', where 'a' and 'b'
+ * are vec4 variables. In this case the size of the array being constructed
+ * is determined by the number of parameters.
+ *
+ * From page 52 (page 58 of the PDF) of the GLSL 1.50 spec:
+ *
+ * "There must be exactly the same number of arguments as the size of
+ * the array being constructed. If no size is present in the
+ * constructor, then the array is explicitly sized to the number of
+ * arguments provided. The arguments are assigned in order, starting at
+ * element 0, to the elements of the constructed array. Each argument
+ * must be the same type as the element type of the array, or be a type
+ * that can be converted to the element type of the array according to
+ * Section 4.1.10 "Implicit Conversions.""
+ */
+ exec_list actual_parameters;
+ const unsigned parameter_count =
+ process_parameters(instructions, &actual_parameters, parameters, state);
+ bool is_unsized_array = constructor_type->is_unsized_array();
+
+ if ((parameter_count == 0) ||
+ (!is_unsized_array && (constructor_type->length != parameter_count))) {
+ const unsigned min_param = is_unsized_array
+ ? 1 : constructor_type->length;
+
+ _mesa_glsl_error(loc, state, "array constructor must have %s %u "
+ "parameter%s",
+ is_unsized_array ? "at least" : "exactly",
+ min_param, (min_param <= 1) ? "" : "s");
+ return ir_rvalue::error_value(ctx);
+ }
+
+ if (is_unsized_array) {
+ constructor_type =
+ glsl_type::get_array_instance(constructor_type->fields.array,
+ parameter_count);
+ assert(constructor_type != NULL);
+ assert(constructor_type->length == parameter_count);
+ }
+
+ bool all_parameters_are_constant = true;
+ const glsl_type *element_type = constructor_type->fields.array;
+
+ /* Type cast each parameter and, if possible, fold constants. */
+ foreach_in_list_safe(ir_rvalue, ir, &actual_parameters) {
+ /* Apply implicit conversions (not the scalar constructor rules, see the
+ * spec quote above!) and attempt to convert the parameter to a constant
+ * valued expression. After doing so, track whether or not all the
+ * parameters to the constructor are trivially constant valued
+ * expressions.
+ */
+ all_parameters_are_constant &=
+ implicitly_convert_component(ir, element_type->base_type, state);
+
+ if (constructor_type->fields.array->is_unsized_array()) {
+ /* As the inner parameters of the constructor are created without
+ * knowledge of each other we need to check to make sure unsized
+ * parameters of unsized constructors all end up with the same size.
+ *
+ * e.g we make sure to fail for a constructor like this:
+ * vec4[][] a = vec4[][](vec4[](vec4(0.0), vec4(1.0)),
+ * vec4[](vec4(0.0), vec4(1.0), vec4(1.0)),
+ * vec4[](vec4(0.0), vec4(1.0)));
+ */
+ if (element_type->is_unsized_array()) {
+ /* This is the first parameter so just get the type */
+ element_type = ir->type;
+ } else if (element_type != ir->type) {
+ _mesa_glsl_error(loc, state, "type error in array constructor: "
+ "expected: %s, found %s",
+ element_type->name,
+ ir->type->name);
+ return ir_rvalue::error_value(ctx);
+ }
+ } else if (ir->type != constructor_type->fields.array) {
+ _mesa_glsl_error(loc, state, "type error in array constructor: "
+ "expected: %s, found %s",
+ constructor_type->fields.array->name,
+ ir->type->name);
+ return ir_rvalue::error_value(ctx);
+ } else {
+ element_type = ir->type;
+ }
+ }
+
+ if (constructor_type->fields.array->is_unsized_array()) {
+ constructor_type =
+ glsl_type::get_array_instance(element_type,
+ parameter_count);
+ assert(constructor_type != NULL);
+ assert(constructor_type->length == parameter_count);
+ }
+
+ if (all_parameters_are_constant)
+ return new(ctx) ir_constant(constructor_type, &actual_parameters);
+
+ ir_variable *var = new(ctx) ir_variable(constructor_type, "array_ctor",
+ ir_var_temporary);
+ instructions->push_tail(var);
+
+ int i = 0;
+ foreach_in_list(ir_rvalue, rhs, &actual_parameters) {
+ ir_rvalue *lhs = new(ctx) ir_dereference_array(var,
+ new(ctx) ir_constant(i));
+
+ ir_instruction *assignment = new(ctx) ir_assignment(lhs, rhs);
+ instructions->push_tail(assignment);
+
+ i++;
+ }
+
+ return new(ctx) ir_dereference_variable(var);
+}
+
+
+/**
+ * Determine if a list consists of a single scalar r-value
+ */
+static bool
+single_scalar_parameter(exec_list *parameters)
+{
+ const ir_rvalue *const p = (ir_rvalue *) parameters->get_head_raw();
+ assert(((ir_rvalue *)p)->as_rvalue() != NULL);
+
+ return (p->type->is_scalar() && p->next->is_tail_sentinel());
+}
+
+
+/**
+ * Generate inline code for a vector constructor
+ *
+ * The generated constructor code will consist of a temporary variable
+ * declaration of the same type as the constructor. A sequence of assignments
+ * from constructor parameters to the temporary will follow.
+ *
+ * \return
+ * An \c ir_dereference_variable of the temprorary generated in the constructor
+ * body.
+ */
+static ir_rvalue *
+emit_inline_vector_constructor(const glsl_type *type,
+ exec_list *instructions,
+ exec_list *parameters,
+ void *ctx)
+{
+ assert(!parameters->is_empty());
+
+ ir_variable *var = new(ctx) ir_variable(type, "vec_ctor", ir_var_temporary);
+ instructions->push_tail(var);
+
+ /* There are three kinds of vector constructors.
+ *
+ * - Construct a vector from a single scalar by replicating that scalar to
+ * all components of the vector.
+ *
+ * - Construct a vector from at least a matrix. This case should already
+ * have been taken care of in ast_function_expression::hir by breaking
+ * down the matrix into a series of column vectors.
+ *
+ * - Construct a vector from an arbirary combination of vectors and
+ * scalars. The components of the constructor parameters are assigned
+ * to the vector in order until the vector is full.
+ */
+ const unsigned lhs_components = type->components();
+ if (single_scalar_parameter(parameters)) {
+ ir_rvalue *first_param = (ir_rvalue *)parameters->get_head_raw();
+ ir_rvalue *rhs = new(ctx) ir_swizzle(first_param, 0, 0, 0, 0,
+ lhs_components);
+ ir_dereference_variable *lhs = new(ctx) ir_dereference_variable(var);
+ const unsigned mask = (1U << lhs_components) - 1;
+
+ assert(rhs->type == lhs->type);
+
+ ir_instruction *inst = new(ctx) ir_assignment(lhs, rhs, NULL, mask);
+ instructions->push_tail(inst);
+ } else {
+ unsigned base_component = 0;
+ unsigned base_lhs_component = 0;
+ ir_constant_data data;
+ unsigned constant_mask = 0, constant_components = 0;
+
+ memset(&data, 0, sizeof(data));
+
+ foreach_in_list(ir_rvalue, param, parameters) {
+ unsigned rhs_components = param->type->components();
+
+ /* Do not try to assign more components to the vector than it has! */
+ if ((rhs_components + base_lhs_component) > lhs_components) {
+ rhs_components = lhs_components - base_lhs_component;
+ }
+
+ const ir_constant *const c = param->as_constant();
+ if (c != NULL) {
+ for (unsigned i = 0; i < rhs_components; i++) {
+ switch (c->type->base_type) {
+ case GLSL_TYPE_UINT:
+ data.u[i + base_component] = c->get_uint_component(i);
+ break;
+ case GLSL_TYPE_INT:
+ data.i[i + base_component] = c->get_int_component(i);
+ break;
+ case GLSL_TYPE_FLOAT:
+ data.f[i + base_component] = c->get_float_component(i);
+ break;
+ case GLSL_TYPE_DOUBLE:
+ data.d[i + base_component] = c->get_double_component(i);
+ break;
+ case GLSL_TYPE_BOOL:
+ data.b[i + base_component] = c->get_bool_component(i);
+ break;
+ case GLSL_TYPE_UINT64:
+ data.u64[i + base_component] = c->get_uint64_component(i);
+ break;
+ case GLSL_TYPE_INT64:
+ data.i64[i + base_component] = c->get_int64_component(i);
+ break;
+ default:
+ assert(!"Should not get here.");
+ break;
+ }
+ }
+
+ /* Mask of fields to be written in the assignment. */
+ constant_mask |= ((1U << rhs_components) - 1) << base_lhs_component;
+ constant_components += rhs_components;
+
+ base_component += rhs_components;
+ }
+ /* Advance the component index by the number of components
+ * that were just assigned.
+ */
+ base_lhs_component += rhs_components;
+ }
+
+ if (constant_mask != 0) {
+ ir_dereference *lhs = new(ctx) ir_dereference_variable(var);
+ const glsl_type *rhs_type =
+ glsl_type::get_instance(var->type->base_type,
+ constant_components,
+ 1);
+ ir_rvalue *rhs = new(ctx) ir_constant(rhs_type, &data);
+
+ ir_instruction *inst =
+ new(ctx) ir_assignment(lhs, rhs, NULL, constant_mask);
+ instructions->push_tail(inst);
+ }
+
+ base_component = 0;
+ foreach_in_list(ir_rvalue, param, parameters) {
+ unsigned rhs_components = param->type->components();
+
+ /* Do not try to assign more components to the vector than it has! */
+ if ((rhs_components + base_component) > lhs_components) {
+ rhs_components = lhs_components - base_component;
+ }
+
+ /* If we do not have any components left to copy, break out of the
+ * loop. This can happen when initializing a vec4 with a mat3 as the
+ * mat3 would have been broken into a series of column vectors.
+ */
+ if (rhs_components == 0) {
+ break;
+ }
+
+ const ir_constant *const c = param->as_constant();
+ if (c == NULL) {
+ /* Mask of fields to be written in the assignment. */
+ const unsigned write_mask = ((1U << rhs_components) - 1)
+ << base_component;
+
+ ir_dereference *lhs = new(ctx) ir_dereference_variable(var);
+
+ /* Generate a swizzle so that LHS and RHS sizes match. */
+ ir_rvalue *rhs =
+ new(ctx) ir_swizzle(param, 0, 1, 2, 3, rhs_components);
+
+ ir_instruction *inst =
+ new(ctx) ir_assignment(lhs, rhs, NULL, write_mask);
+ instructions->push_tail(inst);
+ }
+
+ /* Advance the component index by the number of components that were
+ * just assigned.
+ */
+ base_component += rhs_components;
+ }
+ }
+ return new(ctx) ir_dereference_variable(var);
+}
+
+
+/**
+ * Generate assignment of a portion of a vector to a portion of a matrix column
+ *
+ * \param src_base First component of the source to be used in assignment
+ * \param column Column of destination to be assiged
+ * \param row_base First component of the destination column to be assigned
+ * \param count Number of components to be assigned
+ *
+ * \note
+ * \c src_base + \c count must be less than or equal to the number of
+ * components in the source vector.
+ */
+static ir_instruction *
+assign_to_matrix_column(ir_variable *var, unsigned column, unsigned row_base,
+ ir_rvalue *src, unsigned src_base, unsigned count,
+ void *mem_ctx)
+{
+ ir_constant *col_idx = new(mem_ctx) ir_constant(column);
+ ir_dereference *column_ref = new(mem_ctx) ir_dereference_array(var,
+ col_idx);
+
+ assert(column_ref->type->components() >= (row_base + count));
+ assert(src->type->components() >= (src_base + count));
+
+ /* Generate a swizzle that extracts the number of components from the source
+ * that are to be assigned to the column of the matrix.
+ */
+ if (count < src->type->vector_elements) {
+ src = new(mem_ctx) ir_swizzle(src,
+ src_base + 0, src_base + 1,
+ src_base + 2, src_base + 3,
+ count);
+ }
+
+ /* Mask of fields to be written in the assignment. */
+ const unsigned write_mask = ((1U << count) - 1) << row_base;
+
+ return new(mem_ctx) ir_assignment(column_ref, src, NULL, write_mask);
+}
+
+
+/**
+ * Generate inline code for a matrix constructor
+ *
+ * The generated constructor code will consist of a temporary variable
+ * declaration of the same type as the constructor. A sequence of assignments
+ * from constructor parameters to the temporary will follow.
+ *
+ * \return
+ * An \c ir_dereference_variable of the temprorary generated in the constructor
+ * body.
+ */
+static ir_rvalue *
+emit_inline_matrix_constructor(const glsl_type *type,
+ exec_list *instructions,
+ exec_list *parameters,
+ void *ctx)
+{
+ assert(!parameters->is_empty());
+
+ ir_variable *var = new(ctx) ir_variable(type, "mat_ctor", ir_var_temporary);
+ instructions->push_tail(var);
+
+ /* There are three kinds of matrix constructors.
+ *
+ * - Construct a matrix from a single scalar by replicating that scalar to
+ * along the diagonal of the matrix and setting all other components to
+ * zero.
+ *
+ * - Construct a matrix from an arbirary combination of vectors and
+ * scalars. The components of the constructor parameters are assigned
+ * to the matrix in column-major order until the matrix is full.
+ *
+ * - Construct a matrix from a single matrix. The source matrix is copied
+ * to the upper left portion of the constructed matrix, and the remaining
+ * elements take values from the identity matrix.
+ */
+ ir_rvalue *const first_param = (ir_rvalue *) parameters->get_head_raw();
+ if (single_scalar_parameter(parameters)) {
+ /* Assign the scalar to the X component of a vec4, and fill the remaining
+ * components with zero.
+ */
+ glsl_base_type param_base_type = first_param->type->base_type;
+ assert(first_param->type->is_float() || first_param->type->is_double());
+ ir_variable *rhs_var =
+ new(ctx) ir_variable(glsl_type::get_instance(param_base_type, 4, 1),
+ "mat_ctor_vec",
+ ir_var_temporary);
+ instructions->push_tail(rhs_var);
+
+ ir_constant_data zero;
+ for (unsigned i = 0; i < 4; i++)
+ if (first_param->type->is_float())
+ zero.f[i] = 0.0;
+ else
+ zero.d[i] = 0.0;
+
+ ir_instruction *inst =
+ new(ctx) ir_assignment(new(ctx) ir_dereference_variable(rhs_var),
+ new(ctx) ir_constant(rhs_var->type, &zero));
+ instructions->push_tail(inst);
+
+ ir_dereference *const rhs_ref =
+ new(ctx) ir_dereference_variable(rhs_var);
+
+ inst = new(ctx) ir_assignment(rhs_ref, first_param, NULL, 0x01);
+ instructions->push_tail(inst);
+
+ /* Assign the temporary vector to each column of the destination matrix
+ * with a swizzle that puts the X component on the diagonal of the
+ * matrix. In some cases this may mean that the X component does not
+ * get assigned into the column at all (i.e., when the matrix has more
+ * columns than rows).
+ */
+ static const unsigned rhs_swiz[4][4] = {
+ { 0, 1, 1, 1 },
+ { 1, 0, 1, 1 },
+ { 1, 1, 0, 1 },
+ { 1, 1, 1, 0 }
+ };
+
+ const unsigned cols_to_init = MIN2(type->matrix_columns,
+ type->vector_elements);
+ for (unsigned i = 0; i < cols_to_init; i++) {
+ ir_constant *const col_idx = new(ctx) ir_constant(i);
+ ir_rvalue *const col_ref = new(ctx) ir_dereference_array(var,
+ col_idx);
+
+ ir_rvalue *const rhs_ref = new(ctx) ir_dereference_variable(rhs_var);
+ ir_rvalue *const rhs = new(ctx) ir_swizzle(rhs_ref, rhs_swiz[i],
+ type->vector_elements);
+
+ inst = new(ctx) ir_assignment(col_ref, rhs);
+ instructions->push_tail(inst);
+ }
+
+ for (unsigned i = cols_to_init; i < type->matrix_columns; i++) {
+ ir_constant *const col_idx = new(ctx) ir_constant(i);
+ ir_rvalue *const col_ref = new(ctx) ir_dereference_array(var,
+ col_idx);
+
+ ir_rvalue *const rhs_ref = new(ctx) ir_dereference_variable(rhs_var);
+ ir_rvalue *const rhs = new(ctx) ir_swizzle(rhs_ref, 1, 1, 1, 1,
+ type->vector_elements);
+
+ inst = new(ctx) ir_assignment(col_ref, rhs);
+ instructions->push_tail(inst);
+ }
+ } else if (first_param->type->is_matrix()) {
+ /* From page 50 (56 of the PDF) of the GLSL 1.50 spec:
+ *
+ * "If a matrix is constructed from a matrix, then each component
+ * (column i, row j) in the result that has a corresponding
+ * component (column i, row j) in the argument will be initialized
+ * from there. All other components will be initialized to the
+ * identity matrix. If a matrix argument is given to a matrix
+ * constructor, it is an error to have any other arguments."
+ */
+ assert(first_param->next->is_tail_sentinel());
+ ir_rvalue *const src_matrix = first_param;
+
+ /* If the source matrix is smaller, pre-initialize the relavent parts of
+ * the destination matrix to the identity matrix.
+ */
+ if ((src_matrix->type->matrix_columns < var->type->matrix_columns) ||
+ (src_matrix->type->vector_elements < var->type->vector_elements)) {
+
+ /* If the source matrix has fewer rows, every column of the
+ * destination must be initialized. Otherwise only the columns in
+ * the destination that do not exist in the source must be
+ * initialized.
+ */
+ unsigned col =
+ (src_matrix->type->vector_elements < var->type->vector_elements)
+ ? 0 : src_matrix->type->matrix_columns;
+
+ const glsl_type *const col_type = var->type->column_type();
+ for (/* empty */; col < var->type->matrix_columns; col++) {
+ ir_constant_data ident;
+
+ if (!col_type->is_double()) {
+ ident.f[0] = 0.0f;
+ ident.f[1] = 0.0f;
+ ident.f[2] = 0.0f;
+ ident.f[3] = 0.0f;
+ ident.f[col] = 1.0f;
+ } else {
+ ident.d[0] = 0.0;
+ ident.d[1] = 0.0;
+ ident.d[2] = 0.0;
+ ident.d[3] = 0.0;
+ ident.d[col] = 1.0;
+ }
+
+ ir_rvalue *const rhs = new(ctx) ir_constant(col_type, &ident);
+
+ ir_rvalue *const lhs =
+ new(ctx) ir_dereference_array(var, new(ctx) ir_constant(col));
+
+ ir_instruction *inst = new(ctx) ir_assignment(lhs, rhs);
+ instructions->push_tail(inst);
+ }
+ }
+
+ /* Assign columns from the source matrix to the destination matrix.
+ *
+ * Since the parameter will be used in the RHS of multiple assignments,
+ * generate a temporary and copy the paramter there.
+ */
+ ir_variable *const rhs_var =
+ new(ctx) ir_variable(first_param->type, "mat_ctor_mat",
+ ir_var_temporary);
+ instructions->push_tail(rhs_var);
+
+ ir_dereference *const rhs_var_ref =
+ new(ctx) ir_dereference_variable(rhs_var);
+ ir_instruction *const inst =
+ new(ctx) ir_assignment(rhs_var_ref, first_param);
+ instructions->push_tail(inst);
+
+ const unsigned last_row = MIN2(src_matrix->type->vector_elements,
+ var->type->vector_elements);
+ const unsigned last_col = MIN2(src_matrix->type->matrix_columns,
+ var->type->matrix_columns);
+
+ unsigned swiz[4] = { 0, 0, 0, 0 };
+ for (unsigned i = 1; i < last_row; i++)
+ swiz[i] = i;
+
+ const unsigned write_mask = (1U << last_row) - 1;
+
+ for (unsigned i = 0; i < last_col; i++) {
+ ir_dereference *const lhs =
+ new(ctx) ir_dereference_array(var, new(ctx) ir_constant(i));
+ ir_rvalue *const rhs_col =
+ new(ctx) ir_dereference_array(rhs_var, new(ctx) ir_constant(i));
+
+ /* If one matrix has columns that are smaller than the columns of the
+ * other matrix, wrap the column access of the larger with a swizzle
+ * so that the LHS and RHS of the assignment have the same size (and
+ * therefore have the same type).
+ *
+ * It would be perfectly valid to unconditionally generate the
+ * swizzles, this this will typically result in a more compact IR
+ * tree.
+ */
+ ir_rvalue *rhs;
+ if (lhs->type->vector_elements != rhs_col->type->vector_elements) {
+ rhs = new(ctx) ir_swizzle(rhs_col, swiz, last_row);
+ } else {
+ rhs = rhs_col;
+ }
+
+ ir_instruction *inst =
+ new(ctx) ir_assignment(lhs, rhs, NULL, write_mask);
+ instructions->push_tail(inst);
+ }
+ } else {
+ const unsigned cols = type->matrix_columns;
+ const unsigned rows = type->vector_elements;
+ unsigned remaining_slots = rows * cols;
+ unsigned col_idx = 0;
+ unsigned row_idx = 0;
+
+ foreach_in_list(ir_rvalue, rhs, parameters) {
+ unsigned rhs_components = rhs->type->components();
+ unsigned rhs_base = 0;
+
+ if (remaining_slots == 0)
+ break;
+
+ /* Since the parameter might be used in the RHS of two assignments,
+ * generate a temporary and copy the paramter there.
+ */
+ ir_variable *rhs_var =
+ new(ctx) ir_variable(rhs->type, "mat_ctor_vec", ir_var_temporary);
+ instructions->push_tail(rhs_var);
+
+ ir_dereference *rhs_var_ref =
+ new(ctx) ir_dereference_variable(rhs_var);
+ ir_instruction *inst = new(ctx) ir_assignment(rhs_var_ref, rhs);
+ instructions->push_tail(inst);
+
+ do {
+ /* Assign the current parameter to as many components of the matrix
+ * as it will fill.
+ *
+ * NOTE: A single vector parameter can span two matrix columns. A
+ * single vec4, for example, can completely fill a mat2.
+ */
+ unsigned count = MIN2(rows - row_idx,
+ rhs_components - rhs_base);
+
+ rhs_var_ref = new(ctx) ir_dereference_variable(rhs_var);
+ ir_instruction *inst = assign_to_matrix_column(var, col_idx,
+ row_idx,
+ rhs_var_ref,
+ rhs_base,
+ count, ctx);
+ instructions->push_tail(inst);
+ rhs_base += count;
+ row_idx += count;
+ remaining_slots -= count;
+
+ /* Sometimes, there is still data left in the parameters and
+ * components left to be set in the destination but in other
+ * column.
+ */
+ if (row_idx >= rows) {
+ row_idx = 0;
+ col_idx++;
+ }
+ } while(remaining_slots > 0 && rhs_base < rhs_components);
+ }
+ }
+
+ return new(ctx) ir_dereference_variable(var);
+}
+
+
+static ir_rvalue *
+emit_inline_record_constructor(const glsl_type *type,
+ exec_list *instructions,
+ exec_list *parameters,
+ void *mem_ctx)
+{
+ ir_variable *const var =
+ new(mem_ctx) ir_variable(type, "record_ctor", ir_var_temporary);
+ ir_dereference_variable *const d =
+ new(mem_ctx) ir_dereference_variable(var);
+
+ instructions->push_tail(var);
+
+ exec_node *node = parameters->get_head_raw();
+ for (unsigned i = 0; i < type->length; i++) {
+ assert(!node->is_tail_sentinel());
+
+ ir_dereference *const lhs =
+ new(mem_ctx) ir_dereference_record(d->clone(mem_ctx, NULL),
+ type->fields.structure[i].name);
+
+ ir_rvalue *const rhs = ((ir_instruction *) node)->as_rvalue();
+ assert(rhs != NULL);
+
+ ir_instruction *const assign = new(mem_ctx) ir_assignment(lhs, rhs);
+
+ instructions->push_tail(assign);
+ node = node->next;
+ }
+
+ return d;
+}
+
+
+static ir_rvalue *
+process_record_constructor(exec_list *instructions,
+ const glsl_type *constructor_type,
+ YYLTYPE *loc, exec_list *parameters,
+ struct _mesa_glsl_parse_state *state)
+{
+ void *ctx = state;
+ /* From page 32 (page 38 of the PDF) of the GLSL 1.20 spec:
+ *
+ * "The arguments to the constructor will be used to set the structure's
+ * fields, in order, using one argument per field. Each argument must
+ * be the same type as the field it sets, or be a type that can be
+ * converted to the field's type according to Section 4.1.10 “Implicit
+ * Conversions.”"
+ *
+ * From page 35 (page 41 of the PDF) of the GLSL 4.20 spec:
+ *
+ * "In all cases, the innermost initializer (i.e., not a list of
+ * initializers enclosed in curly braces) applied to an object must
+ * have the same type as the object being initialized or be a type that
+ * can be converted to the object's type according to section 4.1.10
+ * "Implicit Conversions". In the latter case, an implicit conversion
+ * will be done on the initializer before the assignment is done."
+ */
+ exec_list actual_parameters;
+
+ const unsigned parameter_count =
+ process_parameters(instructions, &actual_parameters, parameters,
+ state);
+
+ if (parameter_count != constructor_type->length) {
+ _mesa_glsl_error(loc, state,
+ "%s parameters in constructor for `%s'",
+ parameter_count > constructor_type->length
+ ? "too many": "insufficient",
+ constructor_type->name);
+ return ir_rvalue::error_value(ctx);
+ }
+
+ bool all_parameters_are_constant = true;
+
+ int i = 0;
+ /* Type cast each parameter and, if possible, fold constants. */
+ foreach_in_list_safe(ir_rvalue, ir, &actual_parameters) {
+
+ const glsl_struct_field *struct_field =
+ &constructor_type->fields.structure[i];
+
+ /* Apply implicit conversions (not the scalar constructor rules, see the
+ * spec quote above!) and attempt to convert the parameter to a constant
+ * valued expression. After doing so, track whether or not all the
+ * parameters to the constructor are trivially constant valued
+ * expressions.
+ */
+ all_parameters_are_constant &=
+ implicitly_convert_component(ir, struct_field->type->base_type,
+ state);
+
+ if (ir->type != struct_field->type) {
+ _mesa_glsl_error(loc, state,
+ "parameter type mismatch in constructor for `%s.%s' "
+ "(%s vs %s)",
+ constructor_type->name,
+ struct_field->name,
+ ir->type->name,
+ struct_field->type->name);
+ return ir_rvalue::error_value(ctx);
+ }
+
+ i++;
+ }
+
+ if (all_parameters_are_constant) {
+ return new(ctx) ir_constant(constructor_type, &actual_parameters);
+ } else {
+ return emit_inline_record_constructor(constructor_type, instructions,
+ &actual_parameters, state);
+ }
+}
+
+ir_rvalue *
+ast_function_expression::handle_method(exec_list *instructions,
+ struct _mesa_glsl_parse_state *state)
+{
+ const ast_expression *field = subexpressions[0];
+ ir_rvalue *op;
+ ir_rvalue *result;
+ void *ctx = state;
+ /* Handle "method calls" in GLSL 1.20 - namely, array.length() */
+ YYLTYPE loc = get_location();
+ state->check_version(120, 300, &loc, "methods not supported");
+
+ const char *method;
+ method = field->primary_expression.identifier;
+
+ /* This would prevent to raise "uninitialized variable" warnings when
+ * calling array.length.
+ */
+ field->subexpressions[0]->set_is_lhs(true);
+ op = field->subexpressions[0]->hir(instructions, state);
+ if (strcmp(method, "length") == 0) {
+ if (!this->expressions.is_empty()) {
+ _mesa_glsl_error(&loc, state, "length method takes no arguments");
+ goto fail;
+ }
+
+ if (op->type->is_array()) {
+ if (op->type->is_unsized_array()) {
+ if (!state->has_shader_storage_buffer_objects()) {
+ _mesa_glsl_error(&loc, state,
+ "length called on unsized array"
+ " only available with"
+ " ARB_shader_storage_buffer_object");
+ }
+ /* Calculate length of an unsized array in run-time */
+ result = new(ctx) ir_expression(ir_unop_ssbo_unsized_array_length,
+ op);
+ } else {
+ result = new(ctx) ir_constant(op->type->array_size());
+ }
+ } else if (op->type->is_vector()) {
+ if (state->has_420pack()) {
+ /* .length() returns int. */
+ result = new(ctx) ir_constant((int) op->type->vector_elements);
+ } else {
+ _mesa_glsl_error(&loc, state, "length method on matrix only"
+ " available with ARB_shading_language_420pack");
+ goto fail;
+ }
+ } else if (op->type->is_matrix()) {
+ if (state->has_420pack()) {
+ /* .length() returns int. */
+ result = new(ctx) ir_constant((int) op->type->matrix_columns);
+ } else {
+ _mesa_glsl_error(&loc, state, "length method on matrix only"
+ " available with ARB_shading_language_420pack");
+ goto fail;
+ }
+ } else {
+ _mesa_glsl_error(&loc, state, "length called on scalar.");
+ goto fail;
+ }
+ } else {
+ _mesa_glsl_error(&loc, state, "unknown method: `%s'", method);
+ goto fail;
+ }
+ return result;
+ fail:
+ return ir_rvalue::error_value(ctx);
+}
+
+static inline bool is_valid_constructor(const glsl_type *type,
+ struct _mesa_glsl_parse_state *state)
+{
+ return type->is_numeric() || type->is_boolean() ||
+ (state->has_bindless() && (type->is_sampler() || type->is_image()));
+}
+
+ir_rvalue *
+ast_function_expression::hir(exec_list *instructions,
+ struct _mesa_glsl_parse_state *state)
+{
+ void *ctx = state;
+ /* There are three sorts of function calls.
+ *
+ * 1. constructors - The first subexpression is an ast_type_specifier.
+ * 2. methods - Only the .length() method of array types.
+ * 3. functions - Calls to regular old functions.
+ *
+ */
+ if (is_constructor()) {
+ const ast_type_specifier *type =
+ (ast_type_specifier *) subexpressions[0];
+ YYLTYPE loc = type->get_location();
+ const char *name;
+
+ const glsl_type *const constructor_type = type->glsl_type(& name, state);
+
+ /* constructor_type can be NULL if a variable with the same name as the
+ * structure has come into scope.
+ */
+ if (constructor_type == NULL) {
+ _mesa_glsl_error(& loc, state, "unknown type `%s' (structure name "
+ "may be shadowed by a variable with the same name)",
+ type->type_name);
+ return ir_rvalue::error_value(ctx);
+ }
+
+
+ /* Constructors for opaque types are illegal.
+ *
+ * From section 4.1.7 of the ARB_bindless_texture spec:
+ *
+ * "Samplers are represented using 64-bit integer handles, and may be "
+ * converted to and from 64-bit integers using constructors."
+ *
+ * From section 4.1.X of the ARB_bindless_texture spec:
+ *
+ * "Images are represented using 64-bit integer handles, and may be
+ * converted to and from 64-bit integers using constructors."
+ */
+ if (constructor_type->contains_atomic() ||
+ (!state->has_bindless() && constructor_type->contains_opaque())) {
+ _mesa_glsl_error(& loc, state, "cannot construct %s type `%s'",
+ state->has_bindless() ? "atomic" : "opaque",
+ constructor_type->name);
+ return ir_rvalue::error_value(ctx);
+ }
+
+ if (constructor_type->is_subroutine()) {
+ _mesa_glsl_error(& loc, state,
+ "subroutine name cannot be a constructor `%s'",
+ constructor_type->name);
+ return ir_rvalue::error_value(ctx);
+ }
+
+ if (constructor_type->is_array()) {
+ if (!state->check_version(120, 300, &loc,
+ "array constructors forbidden")) {
+ return ir_rvalue::error_value(ctx);
+ }
+
+ return process_array_constructor(instructions, constructor_type,
+ & loc, &this->expressions, state);
+ }
+
+
+ /* There are two kinds of constructor calls. Constructors for arrays and
+ * structures must have the exact number of arguments with matching types
+ * in the correct order. These constructors follow essentially the same
+ * type matching rules as functions.
+ *
+ * Constructors for built-in language types, such as mat4 and vec2, are
+ * free form. The only requirements are that the parameters must provide
+ * enough values of the correct scalar type and that no arguments are
+ * given past the last used argument.
+ *
+ * When using the C-style initializer syntax from GLSL 4.20, constructors
+ * must have the exact number of arguments with matching types in the
+ * correct order.
+ */
+ if (constructor_type->is_struct()) {
+ return process_record_constructor(instructions, constructor_type,
+ &loc, &this->expressions,
+ state);
+ }
+
+ if (!is_valid_constructor(constructor_type, state))
+ return ir_rvalue::error_value(ctx);
+
+ /* Total number of components of the type being constructed. */
+ const unsigned type_components = constructor_type->components();
+
+ /* Number of components from parameters that have actually been
+ * consumed. This is used to perform several kinds of error checking.
+ */
+ unsigned components_used = 0;
+
+ unsigned matrix_parameters = 0;
+ unsigned nonmatrix_parameters = 0;
+ exec_list actual_parameters;
+
+ foreach_list_typed(ast_node, ast, link, &this->expressions) {
+ ir_rvalue *result = ast->hir(instructions, state);
+
+ /* From page 50 (page 56 of the PDF) of the GLSL 1.50 spec:
+ *
+ * "It is an error to provide extra arguments beyond this
+ * last used argument."
+ */
+ if (components_used >= type_components) {
+ _mesa_glsl_error(& loc, state, "too many parameters to `%s' "
+ "constructor",
+ constructor_type->name);
+ return ir_rvalue::error_value(ctx);
+ }
+
+ if (!is_valid_constructor(result->type, state)) {
+ _mesa_glsl_error(& loc, state, "cannot construct `%s' from a "
+ "non-numeric data type",
+ constructor_type->name);
+ return ir_rvalue::error_value(ctx);
+ }
+
+ /* Count the number of matrix and nonmatrix parameters. This
+ * is used below to enforce some of the constructor rules.
+ */
+ if (result->type->is_matrix())
+ matrix_parameters++;
+ else
+ nonmatrix_parameters++;
+
+ actual_parameters.push_tail(result);
+ components_used += result->type->components();
+ }
+
+ /* From page 28 (page 34 of the PDF) of the GLSL 1.10 spec:
+ *
+ * "It is an error to construct matrices from other matrices. This
+ * is reserved for future use."
+ */
+ if (matrix_parameters > 0
+ && constructor_type->is_matrix()
+ && !state->check_version(120, 100, &loc,
+ "cannot construct `%s' from a matrix",
+ constructor_type->name)) {
+ return ir_rvalue::error_value(ctx);
+ }
+
+ /* From page 50 (page 56 of the PDF) of the GLSL 1.50 spec:
+ *
+ * "If a matrix argument is given to a matrix constructor, it is
+ * an error to have any other arguments."
+ */
+ if ((matrix_parameters > 0)
+ && ((matrix_parameters + nonmatrix_parameters) > 1)
+ && constructor_type->is_matrix()) {
+ _mesa_glsl_error(& loc, state, "for matrix `%s' constructor, "
+ "matrix must be only parameter",
+ constructor_type->name);
+ return ir_rvalue::error_value(ctx);
+ }
+
+ /* From page 28 (page 34 of the PDF) of the GLSL 1.10 spec:
+ *
+ * "In these cases, there must be enough components provided in the
+ * arguments to provide an initializer for every component in the
+ * constructed value."
+ */
+ if (components_used < type_components && components_used != 1
+ && matrix_parameters == 0) {
+ _mesa_glsl_error(& loc, state, "too few components to construct "
+ "`%s'",
+ constructor_type->name);
+ return ir_rvalue::error_value(ctx);
+ }
+
+ /* Matrices can never be consumed as is by any constructor but matrix
+ * constructors. If the constructor type is not matrix, always break the
+ * matrix up into a series of column vectors.
+ */
+ if (!constructor_type->is_matrix()) {
+ foreach_in_list_safe(ir_rvalue, matrix, &actual_parameters) {
+ if (!matrix->type->is_matrix())
+ continue;
+
+ /* Create a temporary containing the matrix. */
+ ir_variable *var = new(ctx) ir_variable(matrix->type, "matrix_tmp",
+ ir_var_temporary);
+ instructions->push_tail(var);
+ instructions->push_tail(
+ new(ctx) ir_assignment(new(ctx) ir_dereference_variable(var),
+ matrix));
+ var->constant_value = matrix->constant_expression_value(ctx);
+
+ /* Replace the matrix with dereferences of its columns. */
+ for (int i = 0; i < matrix->type->matrix_columns; i++) {
+ matrix->insert_before(
+ new (ctx) ir_dereference_array(var,
+ new(ctx) ir_constant(i)));
+ }
+ matrix->remove();
+ }
+ }
+
+ bool all_parameters_are_constant = true;
+
+ /* Type cast each parameter and, if possible, fold constants.*/
+ foreach_in_list_safe(ir_rvalue, ir, &actual_parameters) {
+ const glsl_type *desired_type;
+
+ /* From section 5.4.1 of the ARB_bindless_texture spec:
+ *
+ * "In the following four constructors, the low 32 bits of the sampler
+ * type correspond to the .x component of the uvec2 and the high 32
+ * bits correspond to the .y component."
+ *
+ * uvec2(any sampler type) // Converts a sampler type to a
+ * // pair of 32-bit unsigned integers
+ * any sampler type(uvec2) // Converts a pair of 32-bit unsigned integers to
+ * // a sampler type
+ * uvec2(any image type) // Converts an image type to a
+ * // pair of 32-bit unsigned integers
+ * any image type(uvec2) // Converts a pair of 32-bit unsigned integers to
+ * // an image type
+ */
+ if (ir->type->is_sampler() || ir->type->is_image()) {
+ /* Convert a sampler/image type to a pair of 32-bit unsigned
+ * integers as defined by ARB_bindless_texture.
+ */
+ if (constructor_type != glsl_type::uvec2_type) {
+ _mesa_glsl_error(&loc, state, "sampler and image types can only "
+ "be converted to a pair of 32-bit unsigned "
+ "integers");
+ }
+ desired_type = glsl_type::uvec2_type;
+ } else if (constructor_type->is_sampler() ||
+ constructor_type->is_image()) {
+ /* Convert a pair of 32-bit unsigned integers to a sampler or image
+ * type as defined by ARB_bindless_texture.
+ */
+ if (ir->type != glsl_type::uvec2_type) {
+ _mesa_glsl_error(&loc, state, "sampler and image types can only "
+ "be converted from a pair of 32-bit unsigned "
+ "integers");
+ }
+ desired_type = constructor_type;
+ } else {
+ desired_type =
+ glsl_type::get_instance(constructor_type->base_type,
+ ir->type->vector_elements,
+ ir->type->matrix_columns);
+ }
+
+ ir_rvalue *result = convert_component(ir, desired_type);
+
+ /* Attempt to convert the parameter to a constant valued expression.
+ * After doing so, track whether or not all the parameters to the
+ * constructor are trivially constant valued expressions.
+ */
+ ir_rvalue *const constant = result->constant_expression_value(ctx);
+
+ if (constant != NULL)
+ result = constant;
+ else
+ all_parameters_are_constant = false;
+
+ if (result != ir) {
+ ir->replace_with(result);
+ }
+ }
+
+ /* If all of the parameters are trivially constant, create a
+ * constant representing the complete collection of parameters.
+ */
+ if (all_parameters_are_constant) {
+ return new(ctx) ir_constant(constructor_type, &actual_parameters);
+ } else if (constructor_type->is_scalar()) {
+ return dereference_component((ir_rvalue *)
+ actual_parameters.get_head_raw(),
+ 0);
+ } else if (constructor_type->is_vector()) {
+ return emit_inline_vector_constructor(constructor_type,
+ instructions,
+ &actual_parameters,
+ ctx);
+ } else {
+ assert(constructor_type->is_matrix());
+ return emit_inline_matrix_constructor(constructor_type,
+ instructions,
+ &actual_parameters,
+ ctx);
+ }
+ } else if (subexpressions[0]->oper == ast_field_selection) {
+ return handle_method(instructions, state);
+ } else {
+ const ast_expression *id = subexpressions[0];
+ const char *func_name = NULL;
+ YYLTYPE loc = get_location();
+ exec_list actual_parameters;
+ ir_variable *sub_var = NULL;
+ ir_rvalue *array_idx = NULL;
+
+ process_parameters(instructions, &actual_parameters, &this->expressions,
+ state);
+
+ if (id->oper == ast_array_index) {
+ array_idx = generate_array_index(ctx, instructions, state, loc,
+ id->subexpressions[0],
+ id->subexpressions[1], &func_name,
+ &actual_parameters);
+ } else if (id->oper == ast_identifier) {
+ func_name = id->primary_expression.identifier;
+ } else {
+ _mesa_glsl_error(&loc, state, "function name is not an identifier");
+ }
+
+ /* an error was emitted earlier */
+ if (!func_name)
+ return ir_rvalue::error_value(ctx);
+
+ ir_function_signature *sig =
+ match_function_by_name(func_name, &actual_parameters, state);
+
+ ir_rvalue *value = NULL;
+ if (sig == NULL) {
+ sig = match_subroutine_by_name(func_name, &actual_parameters,
+ state, &sub_var);
+ }
+
+ if (sig == NULL) {
+ no_matching_function_error(func_name, &loc,
+ &actual_parameters, state);
+ value = ir_rvalue::error_value(ctx);
+ } else if (!verify_parameter_modes(state, sig,
+ actual_parameters,
+ this->expressions)) {
+ /* an error has already been emitted */
+ value = ir_rvalue::error_value(ctx);
+ } else if (sig->is_builtin() && strcmp(func_name, "ftransform") == 0) {
+ /* ftransform refers to global variables, and we don't have any code
+ * for remapping the variable references in the built-in shader.
+ */
+ ir_variable *mvp =
+ state->symbols->get_variable("gl_ModelViewProjectionMatrix");
+ ir_variable *vtx = state->symbols->get_variable("gl_Vertex");
+ value = new(ctx) ir_expression(ir_binop_mul, glsl_type::vec4_type,
+ new(ctx) ir_dereference_variable(mvp),
+ new(ctx) ir_dereference_variable(vtx));
+ } else {
+ bool is_begin_interlock = false;
+ bool is_end_interlock = false;
+ if (sig->is_builtin() &&
+ state->stage == MESA_SHADER_FRAGMENT &&
+ state->ARB_fragment_shader_interlock_enable) {
+ is_begin_interlock = strcmp(func_name, "beginInvocationInterlockARB") == 0;
+ is_end_interlock = strcmp(func_name, "endInvocationInterlockARB") == 0;
+ }
+
+ if (sig->is_builtin() &&
+ ((state->stage == MESA_SHADER_TESS_CTRL &&
+ strcmp(func_name, "barrier") == 0) ||
+ is_begin_interlock || is_end_interlock)) {
+ if (state->current_function == NULL ||
+ strcmp(state->current_function->function_name(), "main") != 0) {
+ _mesa_glsl_error(&loc, state,
+ "%s() may only be used in main()", func_name);
+ }
+
+ if (state->found_return) {
+ _mesa_glsl_error(&loc, state,
+ "%s() may not be used after return", func_name);
+ }
+
+ if (instructions != &state->current_function->body) {
+ _mesa_glsl_error(&loc, state,
+ "%s() may not be used in control flow", func_name);
+ }
+ }
+
+ /* There can be only one begin/end interlock pair in the function. */
+ if (is_begin_interlock) {
+ if (state->found_begin_interlock)
+ _mesa_glsl_error(&loc, state,
+ "beginInvocationInterlockARB may not be used twice");
+ state->found_begin_interlock = true;
+ } else if (is_end_interlock) {
+ if (!state->found_begin_interlock)
+ _mesa_glsl_error(&loc, state,
+ "endInvocationInterlockARB may not be used "
+ "before beginInvocationInterlockARB");
+ if (state->found_end_interlock)
+ _mesa_glsl_error(&loc, state,
+ "endInvocationInterlockARB may not be used twice");
+ state->found_end_interlock = true;
+ }
+
+ value = generate_call(instructions, sig, &actual_parameters, sub_var,
+ array_idx, state);
+ if (!value) {
+ ir_variable *const tmp = new(ctx) ir_variable(glsl_type::void_type,
+ "void_var",
+ ir_var_temporary);
+ instructions->push_tail(tmp);
+ value = new(ctx) ir_dereference_variable(tmp);
+ }
+ }
+
+ return value;
+ }
+
+ unreachable("not reached");
+}
+
+bool
+ast_function_expression::has_sequence_subexpression() const
+{
+ foreach_list_typed(const ast_node, ast, link, &this->expressions) {
+ if (ast->has_sequence_subexpression())
+ return true;
+ }
+
+ return false;
+}
+
+ir_rvalue *
+ast_aggregate_initializer::hir(exec_list *instructions,
+ struct _mesa_glsl_parse_state *state)
+{
+ void *ctx = state;
+ YYLTYPE loc = this->get_location();
+
+ if (!this->constructor_type) {
+ _mesa_glsl_error(&loc, state, "type of C-style initializer unknown");
+ return ir_rvalue::error_value(ctx);
+ }
+ const glsl_type *const constructor_type = this->constructor_type;
+
+ if (!state->has_420pack()) {
+ _mesa_glsl_error(&loc, state, "C-style initialization requires the "
+ "GL_ARB_shading_language_420pack extension");
+ return ir_rvalue::error_value(ctx);
+ }
+
+ if (constructor_type->is_array()) {
+ return process_array_constructor(instructions, constructor_type, &loc,
+ &this->expressions, state);
+ }
+
+ if (constructor_type->is_struct()) {
+ return process_record_constructor(instructions, constructor_type, &loc,
+ &this->expressions, state);
+ }
+
+ return process_vec_mat_constructor(instructions, constructor_type, &loc,
+ &this->expressions, state);
+}
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/ast_to_hir.cpp b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/ast_to_hir.cpp
new file mode 100644
index 0000000000..c00298a160
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/ast_to_hir.cpp
@@ -0,0 +1,8997 @@
+/*
+ * Copyright © 2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+/**
+ * \file ast_to_hir.c
+ * Convert abstract syntax to to high-level intermediate reprensentation (HIR).
+ *
+ * During the conversion to HIR, the majority of the symantic checking is
+ * preformed on the program. This includes:
+ *
+ * * Symbol table management
+ * * Type checking
+ * * Function binding
+ *
+ * The majority of this work could be done during parsing, and the parser could
+ * probably generate HIR directly. However, this results in frequent changes
+ * to the parser code. Since we do not assume that every system this complier
+ * is built on will have Flex and Bison installed, we have to store the code
+ * generated by these tools in our version control system. In other parts of
+ * the system we've seen problems where a parser was changed but the generated
+ * code was not committed, merge conflicts where created because two developers
+ * had slightly different versions of Bison installed, etc.
+ *
+ * I have also noticed that running Bison generated parsers in GDB is very
+ * irritating. When you get a segfault on '$$ = $1->foo', you can't very
+ * well 'print $1' in GDB.
+ *
+ * As a result, my preference is to put as little C code as possible in the
+ * parser (and lexer) sources.
+ */
+
+#include "glsl_symbol_table.h"
+#include "glsl_parser_extras.h"
+#include "ast.h"
+#include "compiler/glsl_types.h"
+#include "util/hash_table.h"
+#include "main/mtypes.h"
+#include "main/macros.h"
+#include "main/shaderobj.h"
+#include "ir.h"
+#include "ir_builder.h"
+#include "builtin_functions.h"
+
+using namespace ir_builder;
+
+static void
+detect_conflicting_assignments(struct _mesa_glsl_parse_state *state,
+ exec_list *instructions);
+static void
+verify_subroutine_associated_funcs(struct _mesa_glsl_parse_state *state);
+
+static void
+remove_per_vertex_blocks(exec_list *instructions,
+ _mesa_glsl_parse_state *state, ir_variable_mode mode);
+
+/**
+ * Visitor class that finds the first instance of any write-only variable that
+ * is ever read, if any
+ */
+class read_from_write_only_variable_visitor : public ir_hierarchical_visitor
+{
+public:
+ read_from_write_only_variable_visitor() : found(NULL)
+ {
+ }
+
+ virtual ir_visitor_status visit(ir_dereference_variable *ir)
+ {
+ if (this->in_assignee)
+ return visit_continue;
+
+ ir_variable *var = ir->variable_referenced();
+ /* We can have memory_write_only set on both images and buffer variables,
+ * but in the former there is a distinction between reads from
+ * the variable itself (write_only) and from the memory they point to
+ * (memory_write_only), while in the case of buffer variables there is
+ * no such distinction, that is why this check here is limited to
+ * buffer variables alone.
+ */
+ if (!var || var->data.mode != ir_var_shader_storage)
+ return visit_continue;
+
+ if (var->data.memory_write_only) {
+ found = var;
+ return visit_stop;
+ }
+
+ return visit_continue;
+ }
+
+ ir_variable *get_variable() {
+ return found;
+ }
+
+ virtual ir_visitor_status visit_enter(ir_expression *ir)
+ {
+ /* .length() doesn't actually read anything */
+ if (ir->operation == ir_unop_ssbo_unsized_array_length)
+ return visit_continue_with_parent;
+
+ return visit_continue;
+ }
+
+private:
+ ir_variable *found;
+};
+
+void
+_mesa_ast_to_hir(exec_list *instructions, struct _mesa_glsl_parse_state *state)
+{
+ _mesa_glsl_initialize_variables(instructions, state);
+
+ state->symbols->separate_function_namespace = state->language_version == 110;
+
+ state->current_function = NULL;
+
+ state->toplevel_ir = instructions;
+
+ state->gs_input_prim_type_specified = false;
+ state->tcs_output_vertices_specified = false;
+ state->cs_input_local_size_specified = false;
+
+ /* Section 4.2 of the GLSL 1.20 specification states:
+ * "The built-in functions are scoped in a scope outside the global scope
+ * users declare global variables in. That is, a shader's global scope,
+ * available for user-defined functions and global variables, is nested
+ * inside the scope containing the built-in functions."
+ *
+ * Since built-in functions like ftransform() access built-in variables,
+ * it follows that those must be in the outer scope as well.
+ *
+ * We push scope here to create this nesting effect...but don't pop.
+ * This way, a shader's globals are still in the symbol table for use
+ * by the linker.
+ */
+ state->symbols->push_scope();
+
+ foreach_list_typed (ast_node, ast, link, & state->translation_unit)
+ ast->hir(instructions, state);
+
+ verify_subroutine_associated_funcs(state);
+ detect_recursion_unlinked(state, instructions);
+ detect_conflicting_assignments(state, instructions);
+
+ state->toplevel_ir = NULL;
+
+ /* Move all of the variable declarations to the front of the IR list, and
+ * reverse the order. This has the (intended!) side effect that vertex
+ * shader inputs and fragment shader outputs will appear in the IR in the
+ * same order that they appeared in the shader code. This results in the
+ * locations being assigned in the declared order. Many (arguably buggy)
+ * applications depend on this behavior, and it matches what nearly all
+ * other drivers do.
+ * However, do not push the declarations before struct decls or precision
+ * statements.
+ */
+ ir_instruction* before_node = (ir_instruction*)instructions->get_head();
+ ir_instruction* after_node = NULL;
+ while (before_node && (before_node->ir_type == ir_type_precision || before_node->ir_type == ir_type_typedecl))
+ {
+ after_node = before_node;
+ before_node = (ir_instruction*)before_node->next;
+ }
+
+ foreach_in_list_safe(ir_instruction, node, instructions) {
+ ir_variable *const var = node->as_variable();
+
+ if (var == NULL)
+ continue;
+
+ var->remove();
+ if (after_node)
+ after_node->insert_after(var);
+ else
+ instructions->push_head(var);
+ }
+
+ /* Figure out if gl_FragCoord is actually used in fragment shader */
+ ir_variable *const var = state->symbols->get_variable("gl_FragCoord");
+ if (var != NULL)
+ state->fs_uses_gl_fragcoord = var->data.used;
+
+ /* From section 7.1 (Built-In Language Variables) of the GLSL 4.10 spec:
+ *
+ * If multiple shaders using members of a built-in block belonging to
+ * the same interface are linked together in the same program, they
+ * must all redeclare the built-in block in the same way, as described
+ * in section 4.3.7 "Interface Blocks" for interface block matching, or
+ * a link error will result.
+ *
+ * The phrase "using members of a built-in block" implies that if two
+ * shaders are linked together and one of them *does not use* any members
+ * of the built-in block, then that shader does not need to have a matching
+ * redeclaration of the built-in block.
+ *
+ * This appears to be a clarification to the behaviour established for
+ * gl_PerVertex by GLSL 1.50, therefore implement it regardless of GLSL
+ * version.
+ *
+ * The definition of "interface" in section 4.3.7 that applies here is as
+ * follows:
+ *
+ * The boundary between adjacent programmable pipeline stages: This
+ * spans all the outputs in all compilation units of the first stage
+ * and all the inputs in all compilation units of the second stage.
+ *
+ * Therefore this rule applies to both inter- and intra-stage linking.
+ *
+ * The easiest way to implement this is to check whether the shader uses
+ * gl_PerVertex right after ast-to-ir conversion, and if it doesn't, simply
+ * remove all the relevant variable declaration from the IR, so that the
+ * linker won't see them and complain about mismatches.
+ */
+ remove_per_vertex_blocks(instructions, state, ir_var_shader_in);
+ remove_per_vertex_blocks(instructions, state, ir_var_shader_out);
+
+ /* Check that we don't have reads from write-only variables */
+ read_from_write_only_variable_visitor v;
+ v.run(instructions);
+ ir_variable *error_var = v.get_variable();
+ if (error_var) {
+ /* It would be nice to have proper location information, but for that
+ * we would need to check this as we process each kind of AST node
+ */
+ YYLTYPE loc;
+ memset(&loc, 0, sizeof(loc));
+ _mesa_glsl_error(&loc, state, "Read from write-only variable `%s'",
+ error_var->name);
+ }
+}
+
+
+static ir_expression_operation
+get_implicit_conversion_operation(const glsl_type *to, const glsl_type *from,
+ struct _mesa_glsl_parse_state *state)
+{
+ switch (to->base_type) {
+ case GLSL_TYPE_FLOAT:
+ switch (from->base_type) {
+ case GLSL_TYPE_INT: return ir_unop_i2f;
+ case GLSL_TYPE_UINT: return ir_unop_u2f;
+ default: return (ir_expression_operation)0;
+ }
+
+ case GLSL_TYPE_UINT:
+ if (!state->has_implicit_uint_to_int_conversion())
+ return (ir_expression_operation)0;
+ switch (from->base_type) {
+ case GLSL_TYPE_INT: return ir_unop_i2u;
+ default: return (ir_expression_operation)0;
+ }
+
+ case GLSL_TYPE_DOUBLE:
+ if (!state->has_double())
+ return (ir_expression_operation)0;
+ switch (from->base_type) {
+ case GLSL_TYPE_INT: return ir_unop_i2d;
+ case GLSL_TYPE_UINT: return ir_unop_u2d;
+ case GLSL_TYPE_FLOAT: return ir_unop_f2d;
+ case GLSL_TYPE_INT64: return ir_unop_i642d;
+ case GLSL_TYPE_UINT64: return ir_unop_u642d;
+ default: return (ir_expression_operation)0;
+ }
+
+ case GLSL_TYPE_UINT64:
+ if (!state->has_int64())
+ return (ir_expression_operation)0;
+ switch (from->base_type) {
+ case GLSL_TYPE_INT: return ir_unop_i2u64;
+ case GLSL_TYPE_UINT: return ir_unop_u2u64;
+ case GLSL_TYPE_INT64: return ir_unop_i642u64;
+ default: return (ir_expression_operation)0;
+ }
+
+ case GLSL_TYPE_INT64:
+ if (!state->has_int64())
+ return (ir_expression_operation)0;
+ switch (from->base_type) {
+ case GLSL_TYPE_INT: return ir_unop_i2i64;
+ default: return (ir_expression_operation)0;
+ }
+
+ default: return (ir_expression_operation)0;
+ }
+}
+
+
+/**
+ * If a conversion is available, convert one operand to a different type
+ *
+ * The \c from \c ir_rvalue is converted "in place".
+ *
+ * \param to Type that the operand it to be converted to
+ * \param from Operand that is being converted
+ * \param state GLSL compiler state
+ *
+ * \return
+ * If a conversion is possible (or unnecessary), \c true is returned.
+ * Otherwise \c false is returned.
+ */
+static bool
+apply_implicit_conversion(const glsl_type *to, ir_rvalue * &from,
+ struct _mesa_glsl_parse_state *state)
+{
+ void *ctx = state;
+ if (to->base_type == from->type->base_type)
+ return true;
+
+ /* Prior to GLSL 1.20, there are no implicit conversions */
+ if (!state->has_implicit_conversions())
+ return false;
+
+ /* From page 27 (page 33 of the PDF) of the GLSL 1.50 spec:
+ *
+ * "There are no implicit array or structure conversions. For
+ * example, an array of int cannot be implicitly converted to an
+ * array of float.
+ */
+ if (!to->is_numeric() || !from->type->is_numeric())
+ return false;
+
+ /* We don't actually want the specific type `to`, we want a type
+ * with the same base type as `to`, but the same vector width as
+ * `from`.
+ */
+ to = glsl_type::get_instance(to->base_type, from->type->vector_elements,
+ from->type->matrix_columns);
+
+ ir_expression_operation op = get_implicit_conversion_operation(to, from->type, state);
+ if (op) {
+ from = new(ctx) ir_expression(op, to, from, NULL);
+ return true;
+ } else {
+ return false;
+ }
+}
+
+
+static const struct glsl_type *
+arithmetic_result_type(ir_rvalue * &value_a, ir_rvalue * &value_b,
+ bool multiply,
+ struct _mesa_glsl_parse_state *state, YYLTYPE *loc)
+{
+ const glsl_type *type_a = value_a->type;
+ const glsl_type *type_b = value_b->type;
+
+ /* From GLSL 1.50 spec, page 56:
+ *
+ * "The arithmetic binary operators add (+), subtract (-),
+ * multiply (*), and divide (/) operate on integer and
+ * floating-point scalars, vectors, and matrices."
+ */
+ if (!type_a->is_numeric() || !type_b->is_numeric()) {
+ _mesa_glsl_error(loc, state,
+ "operands to arithmetic operators must be numeric");
+ return glsl_type::error_type;
+ }
+
+
+ /* "If one operand is floating-point based and the other is
+ * not, then the conversions from Section 4.1.10 "Implicit
+ * Conversions" are applied to the non-floating-point-based operand."
+ */
+ if (!apply_implicit_conversion(type_a, value_b, state)
+ && !apply_implicit_conversion(type_b, value_a, state)) {
+ _mesa_glsl_error(loc, state,
+ "could not implicitly convert operands to "
+ "arithmetic operator");
+ return glsl_type::error_type;
+ }
+ type_a = value_a->type;
+ type_b = value_b->type;
+
+ /* "If the operands are integer types, they must both be signed or
+ * both be unsigned."
+ *
+ * From this rule and the preceeding conversion it can be inferred that
+ * both types must be GLSL_TYPE_FLOAT, or GLSL_TYPE_UINT, or GLSL_TYPE_INT.
+ * The is_numeric check above already filtered out the case where either
+ * type is not one of these, so now the base types need only be tested for
+ * equality.
+ */
+ if (type_a->base_type != type_b->base_type) {
+ _mesa_glsl_error(loc, state,
+ "base type mismatch for arithmetic operator");
+ return glsl_type::error_type;
+ }
+
+ /* "All arithmetic binary operators result in the same fundamental type
+ * (signed integer, unsigned integer, or floating-point) as the
+ * operands they operate on, after operand type conversion. After
+ * conversion, the following cases are valid
+ *
+ * * The two operands are scalars. In this case the operation is
+ * applied, resulting in a scalar."
+ */
+ if (type_a->is_scalar() && type_b->is_scalar())
+ return type_a;
+
+ /* "* One operand is a scalar, and the other is a vector or matrix.
+ * In this case, the scalar operation is applied independently to each
+ * component of the vector or matrix, resulting in the same size
+ * vector or matrix."
+ */
+ if (type_a->is_scalar()) {
+ if (!type_b->is_scalar())
+ return type_b;
+ } else if (type_b->is_scalar()) {
+ return type_a;
+ }
+
+ /* All of the combinations of <scalar, scalar>, <vector, scalar>,
+ * <scalar, vector>, <scalar, matrix>, and <matrix, scalar> have been
+ * handled.
+ */
+ assert(!type_a->is_scalar());
+ assert(!type_b->is_scalar());
+
+ /* "* The two operands are vectors of the same size. In this case, the
+ * operation is done component-wise resulting in the same size
+ * vector."
+ */
+ if (type_a->is_vector() && type_b->is_vector()) {
+ if (type_a == type_b) {
+ return type_a;
+ } else {
+ _mesa_glsl_error(loc, state,
+ "vector size mismatch for arithmetic operator");
+ return glsl_type::error_type;
+ }
+ }
+
+ /* All of the combinations of <scalar, scalar>, <vector, scalar>,
+ * <scalar, vector>, <scalar, matrix>, <matrix, scalar>, and
+ * <vector, vector> have been handled. At least one of the operands must
+ * be matrix. Further, since there are no integer matrix types, the base
+ * type of both operands must be float.
+ */
+ assert(type_a->is_matrix() || type_b->is_matrix());
+ assert(type_a->is_float() || type_a->is_double());
+ assert(type_b->is_float() || type_b->is_double());
+
+ /* "* The operator is add (+), subtract (-), or divide (/), and the
+ * operands are matrices with the same number of rows and the same
+ * number of columns. In this case, the operation is done component-
+ * wise resulting in the same size matrix."
+ * * The operator is multiply (*), where both operands are matrices or
+ * one operand is a vector and the other a matrix. A right vector
+ * operand is treated as a column vector and a left vector operand as a
+ * row vector. In all these cases, it is required that the number of
+ * columns of the left operand is equal to the number of rows of the
+ * right operand. Then, the multiply (*) operation does a linear
+ * algebraic multiply, yielding an object that has the same number of
+ * rows as the left operand and the same number of columns as the right
+ * operand. Section 5.10 "Vector and Matrix Operations" explains in
+ * more detail how vectors and matrices are operated on."
+ */
+ if (! multiply) {
+ if (type_a == type_b)
+ return type_a;
+ } else {
+ const glsl_type *type = glsl_type::get_mul_type(type_a, type_b);
+
+ if (type == glsl_type::error_type) {
+ _mesa_glsl_error(loc, state,
+ "size mismatch for matrix multiplication");
+ }
+
+ return type;
+ }
+
+
+ /* "All other cases are illegal."
+ */
+ _mesa_glsl_error(loc, state, "type mismatch");
+ return glsl_type::error_type;
+}
+
+
+static const struct glsl_type *
+unary_arithmetic_result_type(const struct glsl_type *type,
+ struct _mesa_glsl_parse_state *state, YYLTYPE *loc)
+{
+ /* From GLSL 1.50 spec, page 57:
+ *
+ * "The arithmetic unary operators negate (-), post- and pre-increment
+ * and decrement (-- and ++) operate on integer or floating-point
+ * values (including vectors and matrices). All unary operators work
+ * component-wise on their operands. These result with the same type
+ * they operated on."
+ */
+ if (!type->is_numeric()) {
+ _mesa_glsl_error(loc, state,
+ "operands to arithmetic operators must be numeric");
+ return glsl_type::error_type;
+ }
+
+ return type;
+}
+
+/**
+ * \brief Return the result type of a bit-logic operation.
+ *
+ * If the given types to the bit-logic operator are invalid, return
+ * glsl_type::error_type.
+ *
+ * \param value_a LHS of bit-logic op
+ * \param value_b RHS of bit-logic op
+ */
+static const struct glsl_type *
+bit_logic_result_type(ir_rvalue * &value_a, ir_rvalue * &value_b,
+ ast_operators op,
+ struct _mesa_glsl_parse_state *state, YYLTYPE *loc)
+{
+ const glsl_type *type_a = value_a->type;
+ const glsl_type *type_b = value_b->type;
+
+ if (!state->check_bitwise_operations_allowed(loc)) {
+ return glsl_type::error_type;
+ }
+
+ /* From page 50 (page 56 of PDF) of GLSL 1.30 spec:
+ *
+ * "The bitwise operators and (&), exclusive-or (^), and inclusive-or
+ * (|). The operands must be of type signed or unsigned integers or
+ * integer vectors."
+ */
+ if (!type_a->is_integer_32_64()) {
+ _mesa_glsl_error(loc, state, "LHS of `%s' must be an integer",
+ ast_expression::operator_string(op));
+ return glsl_type::error_type;
+ }
+ if (!type_b->is_integer_32_64()) {
+ _mesa_glsl_error(loc, state, "RHS of `%s' must be an integer",
+ ast_expression::operator_string(op));
+ return glsl_type::error_type;
+ }
+
+ /* Prior to GLSL 4.0 / GL_ARB_gpu_shader5, implicit conversions didn't
+ * make sense for bitwise operations, as they don't operate on floats.
+ *
+ * GLSL 4.0 added implicit int -> uint conversions, which are relevant
+ * here. It wasn't clear whether or not we should apply them to bitwise
+ * operations. However, Khronos has decided that they should in future
+ * language revisions. Applications also rely on this behavior. We opt
+ * to apply them in general, but issue a portability warning.
+ *
+ * See https://www.khronos.org/bugzilla/show_bug.cgi?id=1405
+ */
+ if (type_a->base_type != type_b->base_type) {
+ if (!apply_implicit_conversion(type_a, value_b, state)
+ && !apply_implicit_conversion(type_b, value_a, state)) {
+ _mesa_glsl_error(loc, state,
+ "could not implicitly convert operands to "
+ "`%s` operator",
+ ast_expression::operator_string(op));
+ return glsl_type::error_type;
+ } else {
+ _mesa_glsl_warning(loc, state,
+ "some implementations may not support implicit "
+ "int -> uint conversions for `%s' operators; "
+ "consider casting explicitly for portability",
+ ast_expression::operator_string(op));
+ }
+ type_a = value_a->type;
+ type_b = value_b->type;
+ }
+
+ /* "The fundamental types of the operands (signed or unsigned) must
+ * match,"
+ */
+ if (type_a->base_type != type_b->base_type) {
+ _mesa_glsl_error(loc, state, "operands of `%s' must have the same "
+ "base type", ast_expression::operator_string(op));
+ return glsl_type::error_type;
+ }
+
+ /* "The operands cannot be vectors of differing size." */
+ if (type_a->is_vector() &&
+ type_b->is_vector() &&
+ type_a->vector_elements != type_b->vector_elements) {
+ _mesa_glsl_error(loc, state, "operands of `%s' cannot be vectors of "
+ "different sizes", ast_expression::operator_string(op));
+ return glsl_type::error_type;
+ }
+
+ /* "If one operand is a scalar and the other a vector, the scalar is
+ * applied component-wise to the vector, resulting in the same type as
+ * the vector. The fundamental types of the operands [...] will be the
+ * resulting fundamental type."
+ */
+ if (type_a->is_scalar())
+ return type_b;
+ else
+ return type_a;
+}
+
+static const struct glsl_type *
+modulus_result_type(ir_rvalue * &value_a, ir_rvalue * &value_b,
+ struct _mesa_glsl_parse_state *state, YYLTYPE *loc)
+{
+ const glsl_type *type_a = value_a->type;
+ const glsl_type *type_b = value_b->type;
+
+ if (!state->EXT_gpu_shader4_enable &&
+ !state->check_version(130, 300, loc, "operator '%%' is reserved")) {
+ return glsl_type::error_type;
+ }
+
+ /* Section 5.9 (Expressions) of the GLSL 4.00 specification says:
+ *
+ * "The operator modulus (%) operates on signed or unsigned integers or
+ * integer vectors."
+ */
+ if (!type_a->is_integer_32_64()) {
+ _mesa_glsl_error(loc, state, "LHS of operator %% must be an integer");
+ return glsl_type::error_type;
+ }
+ if (!type_b->is_integer_32_64()) {
+ _mesa_glsl_error(loc, state, "RHS of operator %% must be an integer");
+ return glsl_type::error_type;
+ }
+
+ /* "If the fundamental types in the operands do not match, then the
+ * conversions from section 4.1.10 "Implicit Conversions" are applied
+ * to create matching types."
+ *
+ * Note that GLSL 4.00 (and GL_ARB_gpu_shader5) introduced implicit
+ * int -> uint conversion rules. Prior to that, there were no implicit
+ * conversions. So it's harmless to apply them universally - no implicit
+ * conversions will exist. If the types don't match, we'll receive false,
+ * and raise an error, satisfying the GLSL 1.50 spec, page 56:
+ *
+ * "The operand types must both be signed or unsigned."
+ */
+ if (!apply_implicit_conversion(type_a, value_b, state) &&
+ !apply_implicit_conversion(type_b, value_a, state)) {
+ _mesa_glsl_error(loc, state,
+ "could not implicitly convert operands to "
+ "modulus (%%) operator");
+ return glsl_type::error_type;
+ }
+ type_a = value_a->type;
+ type_b = value_b->type;
+
+ /* "The operands cannot be vectors of differing size. If one operand is
+ * a scalar and the other vector, then the scalar is applied component-
+ * wise to the vector, resulting in the same type as the vector. If both
+ * are vectors of the same size, the result is computed component-wise."
+ */
+ if (type_a->is_vector()) {
+ if (!type_b->is_vector()
+ || (type_a->vector_elements == type_b->vector_elements))
+ return type_a;
+ } else
+ return type_b;
+
+ /* "The operator modulus (%) is not defined for any other data types
+ * (non-integer types)."
+ */
+ _mesa_glsl_error(loc, state, "type mismatch");
+ return glsl_type::error_type;
+}
+
+
+static const struct glsl_type *
+relational_result_type(ir_rvalue * &value_a, ir_rvalue * &value_b,
+ struct _mesa_glsl_parse_state *state, YYLTYPE *loc)
+{
+ const glsl_type *type_a = value_a->type;
+ const glsl_type *type_b = value_b->type;
+
+ /* From GLSL 1.50 spec, page 56:
+ * "The relational operators greater than (>), less than (<), greater
+ * than or equal (>=), and less than or equal (<=) operate only on
+ * scalar integer and scalar floating-point expressions."
+ */
+ if (!type_a->is_numeric()
+ || !type_b->is_numeric()
+ || !type_a->is_scalar()
+ || !type_b->is_scalar()) {
+ _mesa_glsl_error(loc, state,
+ "operands to relational operators must be scalar and "
+ "numeric");
+ return glsl_type::error_type;
+ }
+
+ /* "Either the operands' types must match, or the conversions from
+ * Section 4.1.10 "Implicit Conversions" will be applied to the integer
+ * operand, after which the types must match."
+ */
+ if (!apply_implicit_conversion(type_a, value_b, state)
+ && !apply_implicit_conversion(type_b, value_a, state)) {
+ _mesa_glsl_error(loc, state,
+ "could not implicitly convert operands to "
+ "relational operator");
+ return glsl_type::error_type;
+ }
+ type_a = value_a->type;
+ type_b = value_b->type;
+
+ if (type_a->base_type != type_b->base_type) {
+ _mesa_glsl_error(loc, state, "base type mismatch");
+ return glsl_type::error_type;
+ }
+
+ /* "The result is scalar Boolean."
+ */
+ return glsl_type::bool_type;
+}
+
+/**
+ * \brief Return the result type of a bit-shift operation.
+ *
+ * If the given types to the bit-shift operator are invalid, return
+ * glsl_type::error_type.
+ *
+ * \param type_a Type of LHS of bit-shift op
+ * \param type_b Type of RHS of bit-shift op
+ */
+static const struct glsl_type *
+shift_result_type(const struct glsl_type *type_a,
+ const struct glsl_type *type_b,
+ ast_operators op,
+ struct _mesa_glsl_parse_state *state, YYLTYPE *loc)
+{
+ if (!state->check_bitwise_operations_allowed(loc)) {
+ return glsl_type::error_type;
+ }
+
+ /* From page 50 (page 56 of the PDF) of the GLSL 1.30 spec:
+ *
+ * "The shift operators (<<) and (>>). For both operators, the operands
+ * must be signed or unsigned integers or integer vectors. One operand
+ * can be signed while the other is unsigned."
+ */
+ if (!type_a->is_integer_32_64()) {
+ _mesa_glsl_error(loc, state, "LHS of operator %s must be an integer or "
+ "integer vector", ast_expression::operator_string(op));
+ return glsl_type::error_type;
+
+ }
+ if (!type_b->is_integer_32()) {
+ _mesa_glsl_error(loc, state, "RHS of operator %s must be an integer or "
+ "integer vector", ast_expression::operator_string(op));
+ return glsl_type::error_type;
+ }
+
+ /* "If the first operand is a scalar, the second operand has to be
+ * a scalar as well."
+ */
+ if (type_a->is_scalar() && !type_b->is_scalar()) {
+ _mesa_glsl_error(loc, state, "if the first operand of %s is scalar, the "
+ "second must be scalar as well",
+ ast_expression::operator_string(op));
+ return glsl_type::error_type;
+ }
+
+ /* If both operands are vectors, check that they have same number of
+ * elements.
+ */
+ if (type_a->is_vector() &&
+ type_b->is_vector() &&
+ type_a->vector_elements != type_b->vector_elements) {
+ _mesa_glsl_error(loc, state, "vector operands to operator %s must "
+ "have same number of elements",
+ ast_expression::operator_string(op));
+ return glsl_type::error_type;
+ }
+
+ /* "In all cases, the resulting type will be the same type as the left
+ * operand."
+ */
+ return type_a;
+}
+
+/**
+ * Returns the innermost array index expression in an rvalue tree.
+ * This is the largest indexing level -- if an array of blocks, then
+ * it is the block index rather than an indexing expression for an
+ * array-typed member of an array of blocks.
+ */
+static ir_rvalue *
+find_innermost_array_index(ir_rvalue *rv)
+{
+ ir_dereference_array *last = NULL;
+ while (rv) {
+ if (rv->as_dereference_array()) {
+ last = rv->as_dereference_array();
+ rv = last->array;
+ } else if (rv->as_dereference_record())
+ rv = rv->as_dereference_record()->record;
+ else if (rv->as_swizzle())
+ rv = rv->as_swizzle()->val;
+ else
+ rv = NULL;
+ }
+
+ if (last)
+ return last->array_index;
+
+ return NULL;
+}
+
+/**
+ * Validates that a value can be assigned to a location with a specified type
+ *
+ * Validates that \c rhs can be assigned to some location. If the types are
+ * not an exact match but an automatic conversion is possible, \c rhs will be
+ * converted.
+ *
+ * \return
+ * \c NULL if \c rhs cannot be assigned to a location with type \c lhs_type.
+ * Otherwise the actual RHS to be assigned will be returned. This may be
+ * \c rhs, or it may be \c rhs after some type conversion.
+ *
+ * \note
+ * In addition to being used for assignments, this function is used to
+ * type-check return values.
+ */
+static ir_rvalue *
+validate_assignment(struct _mesa_glsl_parse_state *state,
+ YYLTYPE loc, ir_rvalue *lhs,
+ ir_rvalue *rhs, bool is_initializer)
+{
+ /* If there is already some error in the RHS, just return it. Anything
+ * else will lead to an avalanche of error message back to the user.
+ */
+ if (rhs->type->is_error())
+ return rhs;
+
+ /* In the Tessellation Control Shader:
+ * If a per-vertex output variable is used as an l-value, it is an error
+ * if the expression indicating the vertex number is not the identifier
+ * `gl_InvocationID`.
+ */
+ if (state->stage == MESA_SHADER_TESS_CTRL && !lhs->type->is_error()) {
+ ir_variable *var = lhs->variable_referenced();
+ if (var && var->data.mode == ir_var_shader_out && !var->data.patch) {
+ ir_rvalue *index = find_innermost_array_index(lhs);
+ ir_variable *index_var = index ? index->variable_referenced() : NULL;
+ if (!index_var || strcmp(index_var->name, "gl_InvocationID") != 0) {
+ _mesa_glsl_error(&loc, state,
+ "Tessellation control shader outputs can only "
+ "be indexed by gl_InvocationID");
+ return NULL;
+ }
+ }
+ }
+
+ /* If the types are identical, the assignment can trivially proceed.
+ */
+ if (rhs->type == lhs->type)
+ return rhs;
+
+ /* If the array element types are the same and the LHS is unsized,
+ * the assignment is okay for initializers embedded in variable
+ * declarations.
+ *
+ * Note: Whole-array assignments are not permitted in GLSL 1.10, but this
+ * is handled by ir_dereference::is_lvalue.
+ */
+ const glsl_type *lhs_t = lhs->type;
+ const glsl_type *rhs_t = rhs->type;
+ bool unsized_array = false;
+ while(lhs_t->is_array()) {
+ if (rhs_t == lhs_t)
+ break; /* the rest of the inner arrays match so break out early */
+ if (!rhs_t->is_array()) {
+ unsized_array = false;
+ break; /* number of dimensions mismatch */
+ }
+ if (lhs_t->length == rhs_t->length) {
+ lhs_t = lhs_t->fields.array;
+ rhs_t = rhs_t->fields.array;
+ continue;
+ } else if (lhs_t->is_unsized_array()) {
+ unsized_array = true;
+ } else {
+ unsized_array = false;
+ break; /* sized array mismatch */
+ }
+ lhs_t = lhs_t->fields.array;
+ rhs_t = rhs_t->fields.array;
+ }
+ if (unsized_array) {
+ if (is_initializer) {
+ if (rhs->type->get_scalar_type() == lhs->type->get_scalar_type())
+ return rhs;
+ } else {
+ _mesa_glsl_error(&loc, state,
+ "implicitly sized arrays cannot be assigned");
+ return NULL;
+ }
+ }
+
+ /* Check for implicit conversion in GLSL 1.20 */
+ if (apply_implicit_conversion(lhs->type, rhs, state)) {
+ if (rhs->type == lhs->type)
+ return rhs;
+ }
+
+ _mesa_glsl_error(&loc, state,
+ "%s of type %s cannot be assigned to "
+ "variable of type %s",
+ is_initializer ? "initializer" : "value",
+ rhs->type->name, lhs->type->name);
+
+ return NULL;
+}
+
+static void
+mark_whole_array_access(ir_rvalue *access)
+{
+ ir_dereference_variable *deref = access->as_dereference_variable();
+
+ if (deref && deref->var) {
+ deref->var->data.max_array_access = deref->type->length - 1;
+ }
+}
+
+static bool
+do_assignment(exec_list *instructions, struct _mesa_glsl_parse_state *state,
+ const char *non_lvalue_description,
+ ir_rvalue *lhs, ir_rvalue *rhs,
+ ir_rvalue **out_rvalue, bool needs_rvalue,
+ bool is_initializer,
+ YYLTYPE lhs_loc)
+{
+ void *ctx = state;
+ bool error_emitted = (lhs->type->is_error() || rhs->type->is_error());
+
+ ir_variable *lhs_var = lhs->variable_referenced();
+ if (lhs_var)
+ lhs_var->data.assigned = true;
+
+ if (!error_emitted) {
+ if (non_lvalue_description != NULL) {
+ _mesa_glsl_error(&lhs_loc, state,
+ "assignment to %s",
+ non_lvalue_description);
+ error_emitted = true;
+ } else if (lhs_var != NULL && (lhs_var->data.read_only ||
+ (lhs_var->data.mode == ir_var_shader_storage &&
+ lhs_var->data.memory_read_only))) {
+ /* We can have memory_read_only set on both images and buffer variables,
+ * but in the former there is a distinction between assignments to
+ * the variable itself (read_only) and to the memory they point to
+ * (memory_read_only), while in the case of buffer variables there is
+ * no such distinction, that is why this check here is limited to
+ * buffer variables alone.
+ */
+ _mesa_glsl_error(&lhs_loc, state,
+ "assignment to read-only variable '%s'",
+ lhs_var->name);
+ error_emitted = true;
+ } else if (lhs->type->is_array() &&
+ !state->check_version(120, 300, &lhs_loc,
+ "whole array assignment forbidden")) {
+ /* From page 32 (page 38 of the PDF) of the GLSL 1.10 spec:
+ *
+ * "Other binary or unary expressions, non-dereferenced
+ * arrays, function names, swizzles with repeated fields,
+ * and constants cannot be l-values."
+ *
+ * The restriction on arrays is lifted in GLSL 1.20 and GLSL ES 3.00.
+ */
+ error_emitted = true;
+ } else if (!lhs->is_lvalue(state)) {
+ _mesa_glsl_error(& lhs_loc, state, "non-lvalue in assignment");
+ error_emitted = true;
+ }
+ }
+
+ ir_rvalue *new_rhs =
+ validate_assignment(state, lhs_loc, lhs, rhs, is_initializer);
+ if (new_rhs != NULL) {
+ rhs = new_rhs;
+
+ /* If the LHS array was not declared with a size, it takes it size from
+ * the RHS. If the LHS is an l-value and a whole array, it must be a
+ * dereference of a variable. Any other case would require that the LHS
+ * is either not an l-value or not a whole array.
+ */
+ if (lhs->type->is_unsized_array()) {
+ ir_dereference *const d = lhs->as_dereference();
+
+ assert(d != NULL);
+
+ ir_variable *const var = d->variable_referenced();
+
+ assert(var != NULL);
+
+ if (var->data.max_array_access >= rhs->type->array_size()) {
+ /* FINISHME: This should actually log the location of the RHS. */
+ _mesa_glsl_error(& lhs_loc, state, "array size must be > %u due to "
+ "previous access",
+ var->data.max_array_access);
+ }
+
+ var->type = glsl_type::get_array_instance(lhs->type->fields.array,
+ rhs->type->array_size());
+ d->type = var->type;
+ }
+ if (lhs->type->is_array()) {
+ mark_whole_array_access(rhs);
+ mark_whole_array_access(lhs);
+ }
+ } else {
+ error_emitted = true;
+ }
+
+ /* Most callers of do_assignment (assign, add_assign, pre_inc/dec,
+ * but not post_inc) need the converted assigned value as an rvalue
+ * to handle things like:
+ *
+ * i = j += 1;
+ */
+ if (needs_rvalue) {
+ ir_rvalue *rvalue;
+ if (!error_emitted) {
+ ir_variable *var = new(ctx) ir_variable(rhs->type, "assignment_tmp",
+ ir_var_temporary);
+ instructions->push_tail(var);
+ instructions->push_tail(assign(var, rhs));
+
+ ir_dereference_variable *deref_var =
+ new(ctx) ir_dereference_variable(var);
+ instructions->push_tail(new(ctx) ir_assignment(lhs, deref_var));
+ rvalue = new(ctx) ir_dereference_variable(var);
+ } else {
+ rvalue = ir_rvalue::error_value(ctx);
+ }
+ *out_rvalue = rvalue;
+ } else {
+ if (!error_emitted)
+ instructions->push_tail(new(ctx) ir_assignment(lhs, rhs));
+ *out_rvalue = NULL;
+ }
+
+ return error_emitted;
+}
+
+static ir_rvalue *
+get_lvalue_copy(exec_list *instructions, ir_rvalue *lvalue)
+{
+ void *ctx = ralloc_parent(lvalue);
+ ir_variable *var;
+
+ var = new(ctx) ir_variable(lvalue->type, "_post_incdec_tmp",
+ ir_var_temporary);
+ instructions->push_tail(var);
+
+ instructions->push_tail(new(ctx) ir_assignment(new(ctx) ir_dereference_variable(var),
+ lvalue));
+
+ return new(ctx) ir_dereference_variable(var);
+}
+
+
+ir_rvalue *
+ast_node::hir(exec_list *instructions, struct _mesa_glsl_parse_state *state)
+{
+ (void) instructions;
+ (void) state;
+
+ return NULL;
+}
+
+bool
+ast_node::has_sequence_subexpression() const
+{
+ return false;
+}
+
+void
+ast_node::set_is_lhs(bool /* new_value */)
+{
+}
+
+void
+ast_function_expression::hir_no_rvalue(exec_list *instructions,
+ struct _mesa_glsl_parse_state *state)
+{
+ (void)hir(instructions, state);
+}
+
+void
+ast_aggregate_initializer::hir_no_rvalue(exec_list *instructions,
+ struct _mesa_glsl_parse_state *state)
+{
+ (void)hir(instructions, state);
+}
+
+static ir_rvalue *
+do_comparison(void *mem_ctx, int operation, ir_rvalue *op0, ir_rvalue *op1)
+{
+ int join_op;
+ ir_rvalue *cmp = NULL;
+
+ if (operation == ir_binop_all_equal)
+ join_op = ir_binop_logic_and;
+ else
+ join_op = ir_binop_logic_or;
+
+ switch (op0->type->base_type) {
+ case GLSL_TYPE_FLOAT:
+ case GLSL_TYPE_FLOAT16:
+ case GLSL_TYPE_UINT:
+ case GLSL_TYPE_INT:
+ case GLSL_TYPE_BOOL:
+ case GLSL_TYPE_DOUBLE:
+ case GLSL_TYPE_UINT64:
+ case GLSL_TYPE_INT64:
+ case GLSL_TYPE_UINT16:
+ case GLSL_TYPE_INT16:
+ case GLSL_TYPE_UINT8:
+ case GLSL_TYPE_INT8:
+ return new(mem_ctx) ir_expression(operation, op0, op1);
+
+ case GLSL_TYPE_ARRAY: {
+ for (unsigned int i = 0; i < op0->type->length; i++) {
+ ir_rvalue *e0, *e1, *result;
+
+ e0 = new(mem_ctx) ir_dereference_array(op0->clone(mem_ctx, NULL),
+ new(mem_ctx) ir_constant(i));
+ e1 = new(mem_ctx) ir_dereference_array(op1->clone(mem_ctx, NULL),
+ new(mem_ctx) ir_constant(i));
+ result = do_comparison(mem_ctx, operation, e0, e1);
+
+ if (cmp) {
+ cmp = new(mem_ctx) ir_expression(join_op, cmp, result);
+ } else {
+ cmp = result;
+ }
+ }
+
+ mark_whole_array_access(op0);
+ mark_whole_array_access(op1);
+ break;
+ }
+
+ case GLSL_TYPE_STRUCT: {
+ for (unsigned int i = 0; i < op0->type->length; i++) {
+ ir_rvalue *e0, *e1, *result;
+ const char *field_name = op0->type->fields.structure[i].name;
+
+ e0 = new(mem_ctx) ir_dereference_record(op0->clone(mem_ctx, NULL),
+ field_name);
+ e1 = new(mem_ctx) ir_dereference_record(op1->clone(mem_ctx, NULL),
+ field_name);
+ result = do_comparison(mem_ctx, operation, e0, e1);
+
+ if (cmp) {
+ cmp = new(mem_ctx) ir_expression(join_op, cmp, result);
+ } else {
+ cmp = result;
+ }
+ }
+ break;
+ }
+
+ case GLSL_TYPE_ERROR:
+ case GLSL_TYPE_VOID:
+ case GLSL_TYPE_SAMPLER:
+ case GLSL_TYPE_IMAGE:
+ case GLSL_TYPE_INTERFACE:
+ case GLSL_TYPE_ATOMIC_UINT:
+ case GLSL_TYPE_SUBROUTINE:
+ case GLSL_TYPE_FUNCTION:
+ /* I assume a comparison of a struct containing a sampler just
+ * ignores the sampler present in the type.
+ */
+ break;
+ }
+
+ if (cmp == NULL)
+ cmp = new(mem_ctx) ir_constant(true);
+
+ return cmp;
+}
+
+/* For logical operations, we want to ensure that the operands are
+ * scalar booleans. If it isn't, emit an error and return a constant
+ * boolean to avoid triggering cascading error messages.
+ */
+static ir_rvalue *
+get_scalar_boolean_operand(exec_list *instructions,
+ struct _mesa_glsl_parse_state *state,
+ ast_expression *parent_expr,
+ int operand,
+ const char *operand_name,
+ bool *error_emitted)
+{
+ ast_expression *expr = parent_expr->subexpressions[operand];
+ void *ctx = state;
+ ir_rvalue *val = expr->hir(instructions, state);
+
+ if (val->type->is_boolean() && val->type->is_scalar())
+ return val;
+
+ if (!*error_emitted) {
+ YYLTYPE loc = expr->get_location();
+ _mesa_glsl_error(&loc, state, "%s of `%s' must be scalar boolean",
+ operand_name,
+ parent_expr->operator_string(parent_expr->oper));
+ *error_emitted = true;
+ }
+
+ return new(ctx) ir_constant(true);
+}
+
+/**
+ * If name refers to a builtin array whose maximum allowed size is less than
+ * size, report an error and return true. Otherwise return false.
+ */
+void
+check_builtin_array_max_size(const char *name, unsigned size,
+ YYLTYPE loc, struct _mesa_glsl_parse_state *state)
+{
+ if ((strcmp("gl_TexCoord", name) == 0)
+ && (size > state->Const.MaxTextureCoords)) {
+ /* From page 54 (page 60 of the PDF) of the GLSL 1.20 spec:
+ *
+ * "The size [of gl_TexCoord] can be at most
+ * gl_MaxTextureCoords."
+ */
+ _mesa_glsl_error(&loc, state, "`gl_TexCoord' array size cannot "
+ "be larger than gl_MaxTextureCoords (%u)",
+ state->Const.MaxTextureCoords);
+ } else if (strcmp("gl_ClipDistance", name) == 0) {
+ state->clip_dist_size = size;
+ if (size + state->cull_dist_size > state->Const.MaxClipPlanes) {
+ /* From section 7.1 (Vertex Shader Special Variables) of the
+ * GLSL 1.30 spec:
+ *
+ * "The gl_ClipDistance array is predeclared as unsized and
+ * must be sized by the shader either redeclaring it with a
+ * size or indexing it only with integral constant
+ * expressions. ... The size can be at most
+ * gl_MaxClipDistances."
+ */
+ _mesa_glsl_error(&loc, state, "`gl_ClipDistance' array size cannot "
+ "be larger than gl_MaxClipDistances (%u)",
+ state->Const.MaxClipPlanes);
+ }
+ } else if (strcmp("gl_CullDistance", name) == 0) {
+ state->cull_dist_size = size;
+ if (size + state->clip_dist_size > state->Const.MaxClipPlanes) {
+ /* From the ARB_cull_distance spec:
+ *
+ * "The gl_CullDistance array is predeclared as unsized and
+ * must be sized by the shader either redeclaring it with
+ * a size or indexing it only with integral constant
+ * expressions. The size determines the number and set of
+ * enabled cull distances and can be at most
+ * gl_MaxCullDistances."
+ */
+ _mesa_glsl_error(&loc, state, "`gl_CullDistance' array size cannot "
+ "be larger than gl_MaxCullDistances (%u)",
+ state->Const.MaxClipPlanes);
+ }
+ }
+}
+
+/**
+ * Create the constant 1, of a which is appropriate for incrementing and
+ * decrementing values of the given GLSL type. For example, if type is vec4,
+ * this creates a constant value of 1.0 having type float.
+ *
+ * If the given type is invalid for increment and decrement operators, return
+ * a floating point 1--the error will be detected later.
+ */
+static ir_rvalue *
+constant_one_for_inc_dec(void *ctx, const glsl_type *type)
+{
+ switch (type->base_type) {
+ case GLSL_TYPE_UINT:
+ return new(ctx) ir_constant((unsigned) 1);
+ case GLSL_TYPE_INT:
+ return new(ctx) ir_constant(1);
+ case GLSL_TYPE_UINT64:
+ return new(ctx) ir_constant((uint64_t) 1);
+ case GLSL_TYPE_INT64:
+ return new(ctx) ir_constant((int64_t) 1);
+ default:
+ case GLSL_TYPE_FLOAT:
+ return new(ctx) ir_constant(1.0f);
+ }
+}
+
+ir_rvalue *
+ast_expression::hir(exec_list *instructions,
+ struct _mesa_glsl_parse_state *state)
+{
+ return do_hir(instructions, state, true);
+}
+
+void
+ast_expression::hir_no_rvalue(exec_list *instructions,
+ struct _mesa_glsl_parse_state *state)
+{
+ do_hir(instructions, state, false);
+}
+
+void
+ast_expression::set_is_lhs(bool new_value)
+{
+ /* is_lhs is tracked only to print "variable used uninitialized" warnings,
+ * if we lack an identifier we can just skip it.
+ */
+ if (this->primary_expression.identifier == NULL)
+ return;
+
+ this->is_lhs = new_value;
+
+ /* We need to go through the subexpressions tree to cover cases like
+ * ast_field_selection
+ */
+ if (this->subexpressions[0] != NULL)
+ this->subexpressions[0]->set_is_lhs(new_value);
+}
+
+ir_rvalue *
+ast_expression::do_hir(exec_list *instructions,
+ struct _mesa_glsl_parse_state *state,
+ bool needs_rvalue)
+{
+ void *ctx = state;
+ static const int operations[AST_NUM_OPERATORS] = {
+ -1, /* ast_assign doesn't convert to ir_expression. */
+ -1, /* ast_plus doesn't convert to ir_expression. */
+ ir_unop_neg,
+ ir_binop_add,
+ ir_binop_sub,
+ ir_binop_mul,
+ ir_binop_div,
+ ir_binop_mod,
+ ir_binop_lshift,
+ ir_binop_rshift,
+ ir_binop_less,
+ ir_binop_less, /* This is correct. See the ast_greater case below. */
+ ir_binop_gequal, /* This is correct. See the ast_lequal case below. */
+ ir_binop_gequal,
+ ir_binop_all_equal,
+ ir_binop_any_nequal,
+ ir_binop_bit_and,
+ ir_binop_bit_xor,
+ ir_binop_bit_or,
+ ir_unop_bit_not,
+ ir_binop_logic_and,
+ ir_binop_logic_xor,
+ ir_binop_logic_or,
+ ir_unop_logic_not,
+
+ /* Note: The following block of expression types actually convert
+ * to multiple IR instructions.
+ */
+ ir_binop_mul, /* ast_mul_assign */
+ ir_binop_div, /* ast_div_assign */
+ ir_binop_mod, /* ast_mod_assign */
+ ir_binop_add, /* ast_add_assign */
+ ir_binop_sub, /* ast_sub_assign */
+ ir_binop_lshift, /* ast_ls_assign */
+ ir_binop_rshift, /* ast_rs_assign */
+ ir_binop_bit_and, /* ast_and_assign */
+ ir_binop_bit_xor, /* ast_xor_assign */
+ ir_binop_bit_or, /* ast_or_assign */
+
+ -1, /* ast_conditional doesn't convert to ir_expression. */
+ ir_binop_add, /* ast_pre_inc. */
+ ir_binop_sub, /* ast_pre_dec. */
+ ir_binop_add, /* ast_post_inc. */
+ ir_binop_sub, /* ast_post_dec. */
+ -1, /* ast_field_selection doesn't conv to ir_expression. */
+ -1, /* ast_array_index doesn't convert to ir_expression. */
+ -1, /* ast_function_call doesn't conv to ir_expression. */
+ -1, /* ast_identifier doesn't convert to ir_expression. */
+ -1, /* ast_int_constant doesn't convert to ir_expression. */
+ -1, /* ast_uint_constant doesn't conv to ir_expression. */
+ -1, /* ast_float_constant doesn't conv to ir_expression. */
+ -1, /* ast_bool_constant doesn't conv to ir_expression. */
+ -1, /* ast_sequence doesn't convert to ir_expression. */
+ -1, /* ast_aggregate shouldn't ever even get here. */
+ };
+ ir_rvalue *result = NULL;
+ ir_rvalue *op[3];
+ const struct glsl_type *type, *orig_type;
+ bool error_emitted = false;
+ YYLTYPE loc;
+
+ loc = this->get_location();
+
+ switch (this->oper) {
+ case ast_aggregate:
+ unreachable("ast_aggregate: Should never get here.");
+
+ case ast_assign: {
+ this->subexpressions[0]->set_is_lhs(true);
+ op[0] = this->subexpressions[0]->hir(instructions, state);
+ op[1] = this->subexpressions[1]->hir(instructions, state);
+
+ error_emitted =
+ do_assignment(instructions, state,
+ this->subexpressions[0]->non_lvalue_description,
+ op[0], op[1], &result, needs_rvalue, false,
+ this->subexpressions[0]->get_location());
+ break;
+ }
+
+ case ast_plus:
+ op[0] = this->subexpressions[0]->hir(instructions, state);
+
+ type = unary_arithmetic_result_type(op[0]->type, state, & loc);
+
+ error_emitted = type->is_error();
+
+ result = op[0];
+ break;
+
+ case ast_neg:
+ op[0] = this->subexpressions[0]->hir(instructions, state);
+
+ type = unary_arithmetic_result_type(op[0]->type, state, & loc);
+
+ error_emitted = type->is_error();
+
+ result = new(ctx) ir_expression(operations[this->oper], type,
+ op[0], NULL);
+ break;
+
+ case ast_add:
+ case ast_sub:
+ case ast_mul:
+ case ast_div:
+ op[0] = this->subexpressions[0]->hir(instructions, state);
+ op[1] = this->subexpressions[1]->hir(instructions, state);
+
+ type = arithmetic_result_type(op[0], op[1],
+ (this->oper == ast_mul),
+ state, & loc);
+ error_emitted = type->is_error();
+
+ result = new(ctx) ir_expression(operations[this->oper], type,
+ op[0], op[1]);
+ break;
+
+ case ast_mod:
+ op[0] = this->subexpressions[0]->hir(instructions, state);
+ op[1] = this->subexpressions[1]->hir(instructions, state);
+
+ type = modulus_result_type(op[0], op[1], state, &loc);
+
+ assert(operations[this->oper] == ir_binop_mod);
+
+ result = new(ctx) ir_expression(operations[this->oper], type,
+ op[0], op[1]);
+ error_emitted = type->is_error();
+ break;
+
+ case ast_lshift:
+ case ast_rshift:
+ if (!state->check_bitwise_operations_allowed(&loc)) {
+ error_emitted = true;
+ }
+
+ op[0] = this->subexpressions[0]->hir(instructions, state);
+ op[1] = this->subexpressions[1]->hir(instructions, state);
+ type = shift_result_type(op[0]->type, op[1]->type, this->oper, state,
+ &loc);
+ result = new(ctx) ir_expression(operations[this->oper], type,
+ op[0], op[1]);
+ error_emitted = op[0]->type->is_error() || op[1]->type->is_error();
+ break;
+
+ case ast_less:
+ case ast_greater:
+ case ast_lequal:
+ case ast_gequal:
+ op[0] = this->subexpressions[0]->hir(instructions, state);
+ op[1] = this->subexpressions[1]->hir(instructions, state);
+
+ type = relational_result_type(op[0], op[1], state, & loc);
+
+ /* The relational operators must either generate an error or result
+ * in a scalar boolean. See page 57 of the GLSL 1.50 spec.
+ */
+ assert(type->is_error()
+ || (type->is_boolean() && type->is_scalar()));
+
+ /* Like NIR, GLSL IR does not have opcodes for > or <=. Instead, swap
+ * the arguments and use < or >=.
+ */
+ if (this->oper == ast_greater || this->oper == ast_lequal) {
+ ir_rvalue *const tmp = op[0];
+ op[0] = op[1];
+ op[1] = tmp;
+ }
+
+ result = new(ctx) ir_expression(operations[this->oper], type,
+ op[0], op[1]);
+ error_emitted = type->is_error();
+ break;
+
+ case ast_nequal:
+ case ast_equal:
+ op[0] = this->subexpressions[0]->hir(instructions, state);
+ op[1] = this->subexpressions[1]->hir(instructions, state);
+
+ /* From page 58 (page 64 of the PDF) of the GLSL 1.50 spec:
+ *
+ * "The equality operators equal (==), and not equal (!=)
+ * operate on all types. They result in a scalar Boolean. If
+ * the operand types do not match, then there must be a
+ * conversion from Section 4.1.10 "Implicit Conversions"
+ * applied to one operand that can make them match, in which
+ * case this conversion is done."
+ */
+
+ if (op[0]->type == glsl_type::void_type || op[1]->type == glsl_type::void_type) {
+ _mesa_glsl_error(& loc, state, "`%s': wrong operand types: "
+ "no operation `%1$s' exists that takes a left-hand "
+ "operand of type 'void' or a right operand of type "
+ "'void'", (this->oper == ast_equal) ? "==" : "!=");
+ error_emitted = true;
+ } else if ((!apply_implicit_conversion(op[0]->type, op[1], state)
+ && !apply_implicit_conversion(op[1]->type, op[0], state))
+ || (op[0]->type != op[1]->type)) {
+ _mesa_glsl_error(& loc, state, "operands of `%s' must have the same "
+ "type", (this->oper == ast_equal) ? "==" : "!=");
+ error_emitted = true;
+ } else if ((op[0]->type->is_array() || op[1]->type->is_array()) &&
+ !state->check_version(120, 300, &loc,
+ "array comparisons forbidden")) {
+ error_emitted = true;
+ } else if ((op[0]->type->contains_subroutine() ||
+ op[1]->type->contains_subroutine())) {
+ _mesa_glsl_error(&loc, state, "subroutine comparisons forbidden");
+ error_emitted = true;
+ } else if ((op[0]->type->contains_opaque() ||
+ op[1]->type->contains_opaque())) {
+ _mesa_glsl_error(&loc, state, "opaque type comparisons forbidden");
+ error_emitted = true;
+ }
+
+ if (error_emitted) {
+ result = new(ctx) ir_constant(false);
+ } else {
+ result = do_comparison(ctx, operations[this->oper], op[0], op[1]);
+ assert(result->type == glsl_type::bool_type);
+ }
+ break;
+
+ case ast_bit_and:
+ case ast_bit_xor:
+ case ast_bit_or:
+ op[0] = this->subexpressions[0]->hir(instructions, state);
+ op[1] = this->subexpressions[1]->hir(instructions, state);
+ type = bit_logic_result_type(op[0], op[1], this->oper, state, &loc);
+ result = new(ctx) ir_expression(operations[this->oper], type,
+ op[0], op[1]);
+ error_emitted = op[0]->type->is_error() || op[1]->type->is_error();
+ break;
+
+ case ast_bit_not:
+ op[0] = this->subexpressions[0]->hir(instructions, state);
+
+ if (!state->check_bitwise_operations_allowed(&loc)) {
+ error_emitted = true;
+ }
+
+ if (!op[0]->type->is_integer_32_64()) {
+ _mesa_glsl_error(&loc, state, "operand of `~' must be an integer");
+ error_emitted = true;
+ }
+
+ type = error_emitted ? glsl_type::error_type : op[0]->type;
+ result = new(ctx) ir_expression(ir_unop_bit_not, type, op[0], NULL);
+ break;
+
+ case ast_logic_and: {
+ exec_list rhs_instructions;
+ op[0] = get_scalar_boolean_operand(instructions, state, this, 0,
+ "LHS", &error_emitted);
+ op[1] = get_scalar_boolean_operand(&rhs_instructions, state, this, 1,
+ "RHS", &error_emitted);
+
+ if (rhs_instructions.is_empty()) {
+ result = new(ctx) ir_expression(ir_binop_logic_and, op[0], op[1]);
+ } else {
+ ir_variable *const tmp = new(ctx) ir_variable(glsl_type::bool_type,
+ "and_tmp",
+ ir_var_temporary);
+ instructions->push_tail(tmp);
+
+ ir_if *const stmt = new(ctx) ir_if(op[0]);
+ instructions->push_tail(stmt);
+
+ stmt->then_instructions.append_list(&rhs_instructions);
+ ir_dereference *const then_deref = new(ctx) ir_dereference_variable(tmp);
+ ir_assignment *const then_assign =
+ new(ctx) ir_assignment(then_deref, op[1]);
+ stmt->then_instructions.push_tail(then_assign);
+
+ ir_dereference *const else_deref = new(ctx) ir_dereference_variable(tmp);
+ ir_assignment *const else_assign =
+ new(ctx) ir_assignment(else_deref, new(ctx) ir_constant(false));
+ stmt->else_instructions.push_tail(else_assign);
+
+ result = new(ctx) ir_dereference_variable(tmp);
+ }
+ break;
+ }
+
+ case ast_logic_or: {
+ exec_list rhs_instructions;
+ op[0] = get_scalar_boolean_operand(instructions, state, this, 0,
+ "LHS", &error_emitted);
+ op[1] = get_scalar_boolean_operand(&rhs_instructions, state, this, 1,
+ "RHS", &error_emitted);
+
+ if (rhs_instructions.is_empty()) {
+ result = new(ctx) ir_expression(ir_binop_logic_or, op[0], op[1]);
+ } else {
+ ir_variable *const tmp = new(ctx) ir_variable(glsl_type::bool_type,
+ "or_tmp",
+ ir_var_temporary);
+ instructions->push_tail(tmp);
+
+ ir_if *const stmt = new(ctx) ir_if(op[0]);
+ instructions->push_tail(stmt);
+
+ ir_dereference *const then_deref = new(ctx) ir_dereference_variable(tmp);
+ ir_assignment *const then_assign =
+ new(ctx) ir_assignment(then_deref, new(ctx) ir_constant(true));
+ stmt->then_instructions.push_tail(then_assign);
+
+ stmt->else_instructions.append_list(&rhs_instructions);
+ ir_dereference *const else_deref = new(ctx) ir_dereference_variable(tmp);
+ ir_assignment *const else_assign =
+ new(ctx) ir_assignment(else_deref, op[1]);
+ stmt->else_instructions.push_tail(else_assign);
+
+ result = new(ctx) ir_dereference_variable(tmp);
+ }
+ break;
+ }
+
+ case ast_logic_xor:
+ /* From page 33 (page 39 of the PDF) of the GLSL 1.10 spec:
+ *
+ * "The logical binary operators and (&&), or ( | | ), and
+ * exclusive or (^^). They operate only on two Boolean
+ * expressions and result in a Boolean expression."
+ */
+ op[0] = get_scalar_boolean_operand(instructions, state, this, 0, "LHS",
+ &error_emitted);
+ op[1] = get_scalar_boolean_operand(instructions, state, this, 1, "RHS",
+ &error_emitted);
+
+ result = new(ctx) ir_expression(operations[this->oper], glsl_type::bool_type,
+ op[0], op[1]);
+ break;
+
+ case ast_logic_not:
+ op[0] = get_scalar_boolean_operand(instructions, state, this, 0,
+ "operand", &error_emitted);
+
+ result = new(ctx) ir_expression(operations[this->oper], glsl_type::bool_type,
+ op[0], NULL);
+ break;
+
+ case ast_mul_assign:
+ case ast_div_assign:
+ case ast_add_assign:
+ case ast_sub_assign: {
+ this->subexpressions[0]->set_is_lhs(true);
+ op[0] = this->subexpressions[0]->hir(instructions, state);
+ op[1] = this->subexpressions[1]->hir(instructions, state);
+
+ orig_type = op[0]->type;
+
+ /* Break out if operand types were not parsed successfully. */
+ if ((op[0]->type == glsl_type::error_type ||
+ op[1]->type == glsl_type::error_type)) {
+ error_emitted = true;
+ break;
+ }
+
+ type = arithmetic_result_type(op[0], op[1],
+ (this->oper == ast_mul_assign),
+ state, & loc);
+
+ if (type != orig_type) {
+ _mesa_glsl_error(& loc, state,
+ "could not implicitly convert "
+ "%s to %s", type->name, orig_type->name);
+ type = glsl_type::error_type;
+ }
+
+ ir_rvalue *temp_rhs = new(ctx) ir_expression(operations[this->oper], type,
+ op[0], op[1]);
+
+ error_emitted =
+ do_assignment(instructions, state,
+ this->subexpressions[0]->non_lvalue_description,
+ op[0]->clone(ctx, NULL), temp_rhs,
+ &result, needs_rvalue, false,
+ this->subexpressions[0]->get_location());
+
+ /* GLSL 1.10 does not allow array assignment. However, we don't have to
+ * explicitly test for this because none of the binary expression
+ * operators allow array operands either.
+ */
+
+ break;
+ }
+
+ case ast_mod_assign: {
+ this->subexpressions[0]->set_is_lhs(true);
+ op[0] = this->subexpressions[0]->hir(instructions, state);
+ op[1] = this->subexpressions[1]->hir(instructions, state);
+
+ orig_type = op[0]->type;
+ type = modulus_result_type(op[0], op[1], state, &loc);
+
+ if (type != orig_type) {
+ _mesa_glsl_error(& loc, state,
+ "could not implicitly convert "
+ "%s to %s", type->name, orig_type->name);
+ type = glsl_type::error_type;
+ }
+
+ assert(operations[this->oper] == ir_binop_mod);
+
+ ir_rvalue *temp_rhs;
+ temp_rhs = new(ctx) ir_expression(operations[this->oper], type,
+ op[0], op[1]);
+
+ error_emitted =
+ do_assignment(instructions, state,
+ this->subexpressions[0]->non_lvalue_description,
+ op[0]->clone(ctx, NULL), temp_rhs,
+ &result, needs_rvalue, false,
+ this->subexpressions[0]->get_location());
+ break;
+ }
+
+ case ast_ls_assign:
+ case ast_rs_assign: {
+ this->subexpressions[0]->set_is_lhs(true);
+ op[0] = this->subexpressions[0]->hir(instructions, state);
+ op[1] = this->subexpressions[1]->hir(instructions, state);
+ type = shift_result_type(op[0]->type, op[1]->type, this->oper, state,
+ &loc);
+ ir_rvalue *temp_rhs = new(ctx) ir_expression(operations[this->oper],
+ type, op[0], op[1]);
+ error_emitted =
+ do_assignment(instructions, state,
+ this->subexpressions[0]->non_lvalue_description,
+ op[0]->clone(ctx, NULL), temp_rhs,
+ &result, needs_rvalue, false,
+ this->subexpressions[0]->get_location());
+ break;
+ }
+
+ case ast_and_assign:
+ case ast_xor_assign:
+ case ast_or_assign: {
+ this->subexpressions[0]->set_is_lhs(true);
+ op[0] = this->subexpressions[0]->hir(instructions, state);
+ op[1] = this->subexpressions[1]->hir(instructions, state);
+
+ orig_type = op[0]->type;
+ type = bit_logic_result_type(op[0], op[1], this->oper, state, &loc);
+
+ if (type != orig_type) {
+ _mesa_glsl_error(& loc, state,
+ "could not implicitly convert "
+ "%s to %s", type->name, orig_type->name);
+ type = glsl_type::error_type;
+ }
+
+ ir_rvalue *temp_rhs = new(ctx) ir_expression(operations[this->oper],
+ type, op[0], op[1]);
+ error_emitted =
+ do_assignment(instructions, state,
+ this->subexpressions[0]->non_lvalue_description,
+ op[0]->clone(ctx, NULL), temp_rhs,
+ &result, needs_rvalue, false,
+ this->subexpressions[0]->get_location());
+ break;
+ }
+
+ case ast_conditional: {
+ /* From page 59 (page 65 of the PDF) of the GLSL 1.50 spec:
+ *
+ * "The ternary selection operator (?:). It operates on three
+ * expressions (exp1 ? exp2 : exp3). This operator evaluates the
+ * first expression, which must result in a scalar Boolean."
+ */
+ op[0] = get_scalar_boolean_operand(instructions, state, this, 0,
+ "condition", &error_emitted);
+
+ /* The :? operator is implemented by generating an anonymous temporary
+ * followed by an if-statement. The last instruction in each branch of
+ * the if-statement assigns a value to the anonymous temporary. This
+ * temporary is the r-value of the expression.
+ */
+ exec_list then_instructions;
+ exec_list else_instructions;
+
+ op[1] = this->subexpressions[1]->hir(&then_instructions, state);
+ op[2] = this->subexpressions[2]->hir(&else_instructions, state);
+
+ /* From page 59 (page 65 of the PDF) of the GLSL 1.50 spec:
+ *
+ * "The second and third expressions can be any type, as
+ * long their types match, or there is a conversion in
+ * Section 4.1.10 "Implicit Conversions" that can be applied
+ * to one of the expressions to make their types match. This
+ * resulting matching type is the type of the entire
+ * expression."
+ */
+ if ((!apply_implicit_conversion(op[1]->type, op[2], state)
+ && !apply_implicit_conversion(op[2]->type, op[1], state))
+ || (op[1]->type != op[2]->type)) {
+ YYLTYPE loc = this->subexpressions[1]->get_location();
+
+ _mesa_glsl_error(& loc, state, "second and third operands of ?: "
+ "operator must have matching types");
+ error_emitted = true;
+ type = glsl_type::error_type;
+ } else {
+ type = op[1]->type;
+ }
+
+ /* From page 33 (page 39 of the PDF) of the GLSL 1.10 spec:
+ *
+ * "The second and third expressions must be the same type, but can
+ * be of any type other than an array."
+ */
+ if (type->is_array() &&
+ !state->check_version(120, 300, &loc,
+ "second and third operands of ?: operator "
+ "cannot be arrays")) {
+ error_emitted = true;
+ }
+
+ /* From section 4.1.7 of the GLSL 4.50 spec (Opaque Types):
+ *
+ * "Except for array indexing, structure member selection, and
+ * parentheses, opaque variables are not allowed to be operands in
+ * expressions; such use results in a compile-time error."
+ */
+ if (type->contains_opaque()) {
+ if (!(state->has_bindless() && (type->is_image() || type->is_sampler()))) {
+ _mesa_glsl_error(&loc, state, "variables of type %s cannot be "
+ "operands of the ?: operator", type->name);
+ error_emitted = true;
+ }
+ }
+
+ ir_constant *cond_val = op[0]->constant_expression_value(ctx);
+
+ if (then_instructions.is_empty()
+ && else_instructions.is_empty()
+ && cond_val != NULL) {
+ result = cond_val->value.b[0] ? op[1] : op[2];
+ } else {
+ /* The copy to conditional_tmp reads the whole array. */
+ if (type->is_array()) {
+ mark_whole_array_access(op[1]);
+ mark_whole_array_access(op[2]);
+ }
+
+ ir_variable *const tmp =
+ new(ctx) ir_variable(type, "conditional_tmp", ir_var_temporary);
+ instructions->push_tail(tmp);
+
+ ir_if *const stmt = new(ctx) ir_if(op[0]);
+ instructions->push_tail(stmt);
+
+ then_instructions.move_nodes_to(& stmt->then_instructions);
+ ir_dereference *const then_deref =
+ new(ctx) ir_dereference_variable(tmp);
+ ir_assignment *const then_assign =
+ new(ctx) ir_assignment(then_deref, op[1]);
+ stmt->then_instructions.push_tail(then_assign);
+
+ else_instructions.move_nodes_to(& stmt->else_instructions);
+ ir_dereference *const else_deref =
+ new(ctx) ir_dereference_variable(tmp);
+ ir_assignment *const else_assign =
+ new(ctx) ir_assignment(else_deref, op[2]);
+ stmt->else_instructions.push_tail(else_assign);
+
+ result = new(ctx) ir_dereference_variable(tmp);
+ }
+ break;
+ }
+
+ case ast_pre_inc:
+ case ast_pre_dec: {
+ this->non_lvalue_description = (this->oper == ast_pre_inc)
+ ? "pre-increment operation" : "pre-decrement operation";
+
+ op[0] = this->subexpressions[0]->hir(instructions, state);
+ op[1] = constant_one_for_inc_dec(ctx, op[0]->type);
+
+ type = arithmetic_result_type(op[0], op[1], false, state, & loc);
+
+ ir_rvalue *temp_rhs;
+ temp_rhs = new(ctx) ir_expression(operations[this->oper], type,
+ op[0], op[1]);
+
+ error_emitted =
+ do_assignment(instructions, state,
+ this->subexpressions[0]->non_lvalue_description,
+ op[0]->clone(ctx, NULL), temp_rhs,
+ &result, needs_rvalue, false,
+ this->subexpressions[0]->get_location());
+ break;
+ }
+
+ case ast_post_inc:
+ case ast_post_dec: {
+ this->non_lvalue_description = (this->oper == ast_post_inc)
+ ? "post-increment operation" : "post-decrement operation";
+ op[0] = this->subexpressions[0]->hir(instructions, state);
+ op[1] = constant_one_for_inc_dec(ctx, op[0]->type);
+
+ error_emitted = op[0]->type->is_error() || op[1]->type->is_error();
+
+ if (error_emitted) {
+ result = ir_rvalue::error_value(ctx);
+ break;
+ }
+
+ type = arithmetic_result_type(op[0], op[1], false, state, & loc);
+
+ ir_rvalue *temp_rhs;
+ temp_rhs = new(ctx) ir_expression(operations[this->oper], type,
+ op[0], op[1]);
+
+ /* Get a temporary of a copy of the lvalue before it's modified.
+ * This may get thrown away later.
+ */
+ result = get_lvalue_copy(instructions, op[0]->clone(ctx, NULL));
+
+ ir_rvalue *junk_rvalue;
+ error_emitted =
+ do_assignment(instructions, state,
+ this->subexpressions[0]->non_lvalue_description,
+ op[0]->clone(ctx, NULL), temp_rhs,
+ &junk_rvalue, false, false,
+ this->subexpressions[0]->get_location());
+
+ break;
+ }
+
+ case ast_field_selection:
+ result = _mesa_ast_field_selection_to_hir(this, instructions, state);
+ break;
+
+ case ast_array_index: {
+ YYLTYPE index_loc = subexpressions[1]->get_location();
+
+ /* Getting if an array is being used uninitialized is beyond what we get
+ * from ir_value.data.assigned. Setting is_lhs as true would force to
+ * not raise a uninitialized warning when using an array
+ */
+ subexpressions[0]->set_is_lhs(true);
+ op[0] = subexpressions[0]->hir(instructions, state);
+ op[1] = subexpressions[1]->hir(instructions, state);
+
+ result = _mesa_ast_array_index_to_hir(ctx, state, op[0], op[1],
+ loc, index_loc);
+
+ if (result->type->is_error())
+ error_emitted = true;
+
+ break;
+ }
+
+ case ast_unsized_array_dim:
+ unreachable("ast_unsized_array_dim: Should never get here.");
+
+ case ast_function_call:
+ /* Should *NEVER* get here. ast_function_call should always be handled
+ * by ast_function_expression::hir.
+ */
+ unreachable("ast_function_call: handled elsewhere ");
+
+ case ast_identifier: {
+ /* ast_identifier can appear several places in a full abstract syntax
+ * tree. This particular use must be at location specified in the grammar
+ * as 'variable_identifier'.
+ */
+ ir_variable *var =
+ state->symbols->get_variable(this->primary_expression.identifier);
+
+ if (var == NULL) {
+ /* the identifier might be a subroutine name */
+ char *sub_name;
+ sub_name = ralloc_asprintf(ctx, "%s_%s", _mesa_shader_stage_to_subroutine_prefix(state->stage), this->primary_expression.identifier);
+ var = state->symbols->get_variable(sub_name);
+ ralloc_free(sub_name);
+ }
+
+ if (var != NULL) {
+ var->data.used = true;
+ result = new(ctx) ir_dereference_variable(var);
+
+ if ((var->data.mode == ir_var_auto || var->data.mode == ir_var_shader_out)
+ && !this->is_lhs
+ && result->variable_referenced()->data.assigned != true
+ && !is_gl_identifier(var->name)) {
+ _mesa_glsl_warning(&loc, state, "`%s' used uninitialized",
+ this->primary_expression.identifier);
+ }
+
+ /* From the EXT_shader_framebuffer_fetch spec:
+ *
+ * "Unless the GL_EXT_shader_framebuffer_fetch extension has been
+ * enabled in addition, it's an error to use gl_LastFragData if it
+ * hasn't been explicitly redeclared with layout(noncoherent)."
+ */
+ if (var->data.fb_fetch_output && var->data.memory_coherent &&
+ !state->EXT_shader_framebuffer_fetch_enable) {
+ _mesa_glsl_error(&loc, state,
+ "invalid use of framebuffer fetch output not "
+ "qualified with layout(noncoherent)");
+ }
+
+ } else {
+ _mesa_glsl_error(& loc, state, "`%s' undeclared",
+ this->primary_expression.identifier);
+
+ result = ir_rvalue::error_value(ctx);
+ error_emitted = true;
+ }
+ break;
+ }
+
+ case ast_int_constant:
+ result = new(ctx) ir_constant(this->primary_expression.int_constant);
+ break;
+
+ case ast_uint_constant:
+ result = new(ctx) ir_constant(this->primary_expression.uint_constant);
+ break;
+
+ case ast_float_constant:
+ result = new(ctx) ir_constant(this->primary_expression.float_constant);
+ break;
+
+ case ast_bool_constant:
+ result = new(ctx) ir_constant(bool(this->primary_expression.bool_constant));
+ break;
+
+ case ast_double_constant:
+ result = new(ctx) ir_constant(this->primary_expression.double_constant);
+ break;
+
+ case ast_uint64_constant:
+ result = new(ctx) ir_constant(this->primary_expression.uint64_constant);
+ break;
+
+ case ast_int64_constant:
+ result = new(ctx) ir_constant(this->primary_expression.int64_constant);
+ break;
+
+ case ast_sequence: {
+ /* It should not be possible to generate a sequence in the AST without
+ * any expressions in it.
+ */
+ assert(!this->expressions.is_empty());
+
+ /* The r-value of a sequence is the last expression in the sequence. If
+ * the other expressions in the sequence do not have side-effects (and
+ * therefore add instructions to the instruction list), they get dropped
+ * on the floor.
+ */
+ exec_node *previous_tail = NULL;
+ YYLTYPE previous_operand_loc = loc;
+
+ foreach_list_typed (ast_node, ast, link, &this->expressions) {
+ /* If one of the operands of comma operator does not generate any
+ * code, we want to emit a warning. At each pass through the loop
+ * previous_tail will point to the last instruction in the stream
+ * *before* processing the previous operand. Naturally,
+ * instructions->get_tail_raw() will point to the last instruction in
+ * the stream *after* processing the previous operand. If the two
+ * pointers match, then the previous operand had no effect.
+ *
+ * The warning behavior here differs slightly from GCC. GCC will
+ * only emit a warning if none of the left-hand operands have an
+ * effect. However, it will emit a warning for each. I believe that
+ * there are some cases in C (especially with GCC extensions) where
+ * it is useful to have an intermediate step in a sequence have no
+ * effect, but I don't think these cases exist in GLSL. Either way,
+ * it would be a giant hassle to replicate that behavior.
+ */
+ if (previous_tail == instructions->get_tail_raw()) {
+ _mesa_glsl_warning(&previous_operand_loc, state,
+ "left-hand operand of comma expression has "
+ "no effect");
+ }
+
+ /* The tail is directly accessed instead of using the get_tail()
+ * method for performance reasons. get_tail() has extra code to
+ * return NULL when the list is empty. We don't care about that
+ * here, so using get_tail_raw() is fine.
+ */
+ previous_tail = instructions->get_tail_raw();
+ previous_operand_loc = ast->get_location();
+
+ result = ast->hir(instructions, state);
+ }
+
+ /* Any errors should have already been emitted in the loop above.
+ */
+ error_emitted = true;
+ break;
+ }
+ }
+ type = NULL; /* use result->type, not type. */
+ assert(error_emitted || (result != NULL || !needs_rvalue));
+
+ if (result && result->type->is_error() && !error_emitted)
+ _mesa_glsl_error(& loc, state, "type mismatch");
+
+ return result;
+}
+
+bool
+ast_expression::has_sequence_subexpression() const
+{
+ switch (this->oper) {
+ case ast_plus:
+ case ast_neg:
+ case ast_bit_not:
+ case ast_logic_not:
+ case ast_pre_inc:
+ case ast_pre_dec:
+ case ast_post_inc:
+ case ast_post_dec:
+ return this->subexpressions[0]->has_sequence_subexpression();
+
+ case ast_assign:
+ case ast_add:
+ case ast_sub:
+ case ast_mul:
+ case ast_div:
+ case ast_mod:
+ case ast_lshift:
+ case ast_rshift:
+ case ast_less:
+ case ast_greater:
+ case ast_lequal:
+ case ast_gequal:
+ case ast_nequal:
+ case ast_equal:
+ case ast_bit_and:
+ case ast_bit_xor:
+ case ast_bit_or:
+ case ast_logic_and:
+ case ast_logic_or:
+ case ast_logic_xor:
+ case ast_array_index:
+ case ast_mul_assign:
+ case ast_div_assign:
+ case ast_add_assign:
+ case ast_sub_assign:
+ case ast_mod_assign:
+ case ast_ls_assign:
+ case ast_rs_assign:
+ case ast_and_assign:
+ case ast_xor_assign:
+ case ast_or_assign:
+ return this->subexpressions[0]->has_sequence_subexpression() ||
+ this->subexpressions[1]->has_sequence_subexpression();
+
+ case ast_conditional:
+ return this->subexpressions[0]->has_sequence_subexpression() ||
+ this->subexpressions[1]->has_sequence_subexpression() ||
+ this->subexpressions[2]->has_sequence_subexpression();
+
+ case ast_sequence:
+ return true;
+
+ case ast_field_selection:
+ case ast_identifier:
+ case ast_int_constant:
+ case ast_uint_constant:
+ case ast_float_constant:
+ case ast_bool_constant:
+ case ast_double_constant:
+ case ast_int64_constant:
+ case ast_uint64_constant:
+ return false;
+
+ case ast_aggregate:
+ return false;
+
+ case ast_function_call:
+ unreachable("should be handled by ast_function_expression::hir");
+
+ case ast_unsized_array_dim:
+ unreachable("ast_unsized_array_dim: Should never get here.");
+ }
+
+ return false;
+}
+
+ir_rvalue *
+ast_expression_statement::hir(exec_list *instructions,
+ struct _mesa_glsl_parse_state *state)
+{
+ /* It is possible to have expression statements that don't have an
+ * expression. This is the solitary semicolon:
+ *
+ * for (i = 0; i < 5; i++)
+ * ;
+ *
+ * In this case the expression will be NULL. Test for NULL and don't do
+ * anything in that case.
+ */
+ if (expression != NULL)
+ expression->hir_no_rvalue(instructions, state);
+
+ /* Statements do not have r-values.
+ */
+ return NULL;
+}
+
+
+ir_rvalue *
+ast_compound_statement::hir(exec_list *instructions,
+ struct _mesa_glsl_parse_state *state)
+{
+ if (new_scope)
+ state->symbols->push_scope();
+
+ foreach_list_typed (ast_node, ast, link, &this->statements)
+ ast->hir(instructions, state);
+
+ if (new_scope)
+ state->symbols->pop_scope();
+
+ /* Compound statements do not have r-values.
+ */
+ return NULL;
+}
+
+/**
+ * Evaluate the given exec_node (which should be an ast_node representing
+ * a single array dimension) and return its integer value.
+ */
+static unsigned
+process_array_size(exec_node *node,
+ struct _mesa_glsl_parse_state *state)
+{
+ void *mem_ctx = state;
+
+ exec_list dummy_instructions;
+
+ ast_node *array_size = exec_node_data(ast_node, node, link);
+
+ /**
+ * Dimensions other than the outermost dimension can by unsized if they
+ * are immediately sized by a constructor or initializer.
+ */
+ if (((ast_expression*)array_size)->oper == ast_unsized_array_dim)
+ return 0;
+
+ ir_rvalue *const ir = array_size->hir(& dummy_instructions, state);
+ YYLTYPE loc = array_size->get_location();
+
+ if (ir == NULL) {
+ _mesa_glsl_error(& loc, state,
+ "array size could not be resolved");
+ return 0;
+ }
+
+ if (!ir->type->is_integer_32()) {
+ _mesa_glsl_error(& loc, state,
+ "array size must be integer type");
+ return 0;
+ }
+
+ if (!ir->type->is_scalar()) {
+ _mesa_glsl_error(& loc, state,
+ "array size must be scalar type");
+ return 0;
+ }
+
+ ir_constant *const size = ir->constant_expression_value(mem_ctx);
+ if (size == NULL ||
+ (state->is_version(120, 300) &&
+ array_size->has_sequence_subexpression())) {
+ _mesa_glsl_error(& loc, state, "array size must be a "
+ "constant valued expression");
+ return 0;
+ }
+
+ if (size->value.i[0] <= 0) {
+ _mesa_glsl_error(& loc, state, "array size must be > 0");
+ return 0;
+ }
+
+ assert(size->type == ir->type);
+
+ /* If the array size is const (and we've verified that
+ * it is) then no instructions should have been emitted
+ * when we converted it to HIR. If they were emitted,
+ * then either the array size isn't const after all, or
+ * we are emitting unnecessary instructions.
+ */
+ assert(dummy_instructions.is_empty());
+
+ return size->value.u[0];
+}
+
+static const glsl_type *
+process_array_type(YYLTYPE *loc, const glsl_type *base,
+ ast_array_specifier *array_specifier,
+ struct _mesa_glsl_parse_state *state)
+{
+ const glsl_type *array_type = base;
+
+ if (array_specifier != NULL) {
+ if (base->is_array()) {
+
+ /* From page 19 (page 25) of the GLSL 1.20 spec:
+ *
+ * "Only one-dimensional arrays may be declared."
+ */
+ if (!state->check_arrays_of_arrays_allowed(loc)) {
+ return glsl_type::error_type;
+ }
+ }
+
+ for (exec_node *node = array_specifier->array_dimensions.get_tail_raw();
+ !node->is_head_sentinel(); node = node->prev) {
+ unsigned array_size = process_array_size(node, state);
+ array_type = glsl_type::get_array_instance(array_type, array_size);
+ }
+ }
+
+ return array_type;
+}
+
+static bool
+precision_qualifier_allowed(const glsl_type *type)
+{
+ /* Precision qualifiers apply to floating point, integer and opaque
+ * types.
+ *
+ * Section 4.5.2 (Precision Qualifiers) of the GLSL 1.30 spec says:
+ * "Any floating point or any integer declaration can have the type
+ * preceded by one of these precision qualifiers [...] Literal
+ * constants do not have precision qualifiers. Neither do Boolean
+ * variables.
+ *
+ * Section 4.5 (Precision and Precision Qualifiers) of the GLSL 1.30
+ * spec also says:
+ *
+ * "Precision qualifiers are added for code portability with OpenGL
+ * ES, not for functionality. They have the same syntax as in OpenGL
+ * ES."
+ *
+ * Section 8 (Built-In Functions) of the GLSL ES 1.00 spec says:
+ *
+ * "uniform lowp sampler2D sampler;
+ * highp vec2 coord;
+ * ...
+ * lowp vec4 col = texture2D (sampler, coord);
+ * // texture2D returns lowp"
+ *
+ * From this, we infer that GLSL 1.30 (and later) should allow precision
+ * qualifiers on sampler types just like float and integer types.
+ */
+ const glsl_type *const t = type->without_array();
+
+ return (t->is_float() || t->is_integer_32() || t->contains_opaque()) &&
+ !t->is_struct();
+}
+
+const glsl_type *
+ast_type_specifier::glsl_type(const char **name,
+ struct _mesa_glsl_parse_state *state) const
+{
+ const struct glsl_type *type;
+
+ if (this->type != NULL)
+ type = this->type;
+ else if (structure)
+ type = structure->type;
+ else
+ type = state->symbols->get_type(this->type_name);
+ *name = this->type_name;
+
+ YYLTYPE loc = this->get_location();
+ type = process_array_type(&loc, type, this->array_specifier, state);
+
+ return type;
+}
+
+/**
+ * From the OpenGL ES 3.0 spec, 4.5.4 Default Precision Qualifiers:
+ *
+ * "The precision statement
+ *
+ * precision precision-qualifier type;
+ *
+ * can be used to establish a default precision qualifier. The type field can
+ * be either int or float or any of the sampler types, (...) If type is float,
+ * the directive applies to non-precision-qualified floating point type
+ * (scalar, vector, and matrix) declarations. If type is int, the directive
+ * applies to all non-precision-qualified integer type (scalar, vector, signed,
+ * and unsigned) declarations."
+ *
+ * We use the symbol table to keep the values of the default precisions for
+ * each 'type' in each scope and we use the 'type' string from the precision
+ * statement as key in the symbol table. When we want to retrieve the default
+ * precision associated with a given glsl_type we need to know the type string
+ * associated with it. This is what this function returns.
+ */
+static const char *
+get_type_name_for_precision_qualifier(const glsl_type *type)
+{
+ switch (type->base_type) {
+ case GLSL_TYPE_FLOAT:
+ return "float";
+ case GLSL_TYPE_UINT:
+ case GLSL_TYPE_INT:
+ return "int";
+ case GLSL_TYPE_ATOMIC_UINT:
+ return "atomic_uint";
+ case GLSL_TYPE_IMAGE:
+ /* fallthrough */
+ case GLSL_TYPE_SAMPLER: {
+ const unsigned type_idx =
+ type->sampler_array + 2 * type->sampler_shadow;
+ const unsigned offset = type->is_sampler() ? 0 : 4;
+ assert(type_idx < 4);
+ switch (type->sampled_type) {
+ case GLSL_TYPE_FLOAT:
+ switch (type->sampler_dimensionality) {
+ case GLSL_SAMPLER_DIM_1D: {
+ assert(type->is_sampler());
+ static const char *const names[4] = {
+ "sampler1D", "sampler1DArray",
+ "sampler1DShadow", "sampler1DArrayShadow"
+ };
+ return names[type_idx];
+ }
+ case GLSL_SAMPLER_DIM_2D: {
+ static const char *const names[8] = {
+ "sampler2D", "sampler2DArray",
+ "sampler2DShadow", "sampler2DArrayShadow",
+ "image2D", "image2DArray", NULL, NULL
+ };
+ return names[offset + type_idx];
+ }
+ case GLSL_SAMPLER_DIM_3D: {
+ static const char *const names[8] = {
+ "sampler3D", NULL, NULL, NULL,
+ "image3D", NULL, NULL, NULL
+ };
+ return names[offset + type_idx];
+ }
+ case GLSL_SAMPLER_DIM_CUBE: {
+ static const char *const names[8] = {
+ "samplerCube", "samplerCubeArray",
+ "samplerCubeShadow", "samplerCubeArrayShadow",
+ "imageCube", NULL, NULL, NULL
+ };
+ return names[offset + type_idx];
+ }
+ case GLSL_SAMPLER_DIM_MS: {
+ assert(type->is_sampler());
+ static const char *const names[4] = {
+ "sampler2DMS", "sampler2DMSArray", NULL, NULL
+ };
+ return names[type_idx];
+ }
+ case GLSL_SAMPLER_DIM_RECT: {
+ assert(type->is_sampler());
+ static const char *const names[4] = {
+ "samplerRect", NULL, "samplerRectShadow", NULL
+ };
+ return names[type_idx];
+ }
+ case GLSL_SAMPLER_DIM_BUF: {
+ static const char *const names[8] = {
+ "samplerBuffer", NULL, NULL, NULL,
+ "imageBuffer", NULL, NULL, NULL
+ };
+ return names[offset + type_idx];
+ }
+ case GLSL_SAMPLER_DIM_EXTERNAL: {
+ assert(type->is_sampler());
+ static const char *const names[4] = {
+ "samplerExternalOES", NULL, NULL, NULL
+ };
+ return names[type_idx];
+ }
+ default:
+ unreachable("Unsupported sampler/image dimensionality");
+ } /* sampler/image float dimensionality */
+ break;
+ case GLSL_TYPE_INT:
+ switch (type->sampler_dimensionality) {
+ case GLSL_SAMPLER_DIM_1D: {
+ assert(type->is_sampler());
+ static const char *const names[4] = {
+ "isampler1D", "isampler1DArray", NULL, NULL
+ };
+ return names[type_idx];
+ }
+ case GLSL_SAMPLER_DIM_2D: {
+ static const char *const names[8] = {
+ "isampler2D", "isampler2DArray", NULL, NULL,
+ "iimage2D", "iimage2DArray", NULL, NULL
+ };
+ return names[offset + type_idx];
+ }
+ case GLSL_SAMPLER_DIM_3D: {
+ static const char *const names[8] = {
+ "isampler3D", NULL, NULL, NULL,
+ "iimage3D", NULL, NULL, NULL
+ };
+ return names[offset + type_idx];
+ }
+ case GLSL_SAMPLER_DIM_CUBE: {
+ static const char *const names[8] = {
+ "isamplerCube", "isamplerCubeArray", NULL, NULL,
+ "iimageCube", NULL, NULL, NULL
+ };
+ return names[offset + type_idx];
+ }
+ case GLSL_SAMPLER_DIM_MS: {
+ assert(type->is_sampler());
+ static const char *const names[4] = {
+ "isampler2DMS", "isampler2DMSArray", NULL, NULL
+ };
+ return names[type_idx];
+ }
+ case GLSL_SAMPLER_DIM_RECT: {
+ assert(type->is_sampler());
+ static const char *const names[4] = {
+ "isamplerRect", NULL, "isamplerRectShadow", NULL
+ };
+ return names[type_idx];
+ }
+ case GLSL_SAMPLER_DIM_BUF: {
+ static const char *const names[8] = {
+ "isamplerBuffer", NULL, NULL, NULL,
+ "iimageBuffer", NULL, NULL, NULL
+ };
+ return names[offset + type_idx];
+ }
+ default:
+ unreachable("Unsupported isampler/iimage dimensionality");
+ } /* sampler/image int dimensionality */
+ break;
+ case GLSL_TYPE_UINT:
+ switch (type->sampler_dimensionality) {
+ case GLSL_SAMPLER_DIM_1D: {
+ assert(type->is_sampler());
+ static const char *const names[4] = {
+ "usampler1D", "usampler1DArray", NULL, NULL
+ };
+ return names[type_idx];
+ }
+ case GLSL_SAMPLER_DIM_2D: {
+ static const char *const names[8] = {
+ "usampler2D", "usampler2DArray", NULL, NULL,
+ "uimage2D", "uimage2DArray", NULL, NULL
+ };
+ return names[offset + type_idx];
+ }
+ case GLSL_SAMPLER_DIM_3D: {
+ static const char *const names[8] = {
+ "usampler3D", NULL, NULL, NULL,
+ "uimage3D", NULL, NULL, NULL
+ };
+ return names[offset + type_idx];
+ }
+ case GLSL_SAMPLER_DIM_CUBE: {
+ static const char *const names[8] = {
+ "usamplerCube", "usamplerCubeArray", NULL, NULL,
+ "uimageCube", NULL, NULL, NULL
+ };
+ return names[offset + type_idx];
+ }
+ case GLSL_SAMPLER_DIM_MS: {
+ assert(type->is_sampler());
+ static const char *const names[4] = {
+ "usampler2DMS", "usampler2DMSArray", NULL, NULL
+ };
+ return names[type_idx];
+ }
+ case GLSL_SAMPLER_DIM_RECT: {
+ assert(type->is_sampler());
+ static const char *const names[4] = {
+ "usamplerRect", NULL, "usamplerRectShadow", NULL
+ };
+ return names[type_idx];
+ }
+ case GLSL_SAMPLER_DIM_BUF: {
+ static const char *const names[8] = {
+ "usamplerBuffer", NULL, NULL, NULL,
+ "uimageBuffer", NULL, NULL, NULL
+ };
+ return names[offset + type_idx];
+ }
+ default:
+ unreachable("Unsupported usampler/uimage dimensionality");
+ } /* sampler/image uint dimensionality */
+ break;
+ default:
+ unreachable("Unsupported sampler/image type");
+ } /* sampler/image type */
+ break;
+ } /* GLSL_TYPE_SAMPLER/GLSL_TYPE_IMAGE */
+ break;
+ default:
+ unreachable("Unsupported type");
+ } /* base type */
+}
+
+static unsigned
+select_gles_precision(unsigned qual_precision,
+ const glsl_type *type,
+ struct _mesa_glsl_parse_state *state, YYLTYPE *loc)
+{
+ /* Precision qualifiers do not have any meaning in Desktop GLSL.
+ * In GLES we take the precision from the type qualifier if present,
+ * otherwise, if the type of the variable allows precision qualifiers at
+ * all, we look for the default precision qualifier for that type in the
+ * current scope.
+ */
+ assert(state->es_shader);
+
+ unsigned precision = GLSL_PRECISION_NONE;
+ if (qual_precision) {
+ precision = qual_precision;
+ } else if (precision_qualifier_allowed(type)) {
+ const char *type_name =
+ get_type_name_for_precision_qualifier(type->without_array());
+ assert(type_name != NULL);
+
+ precision =
+ state->symbols->get_default_precision_qualifier(type_name);
+ if (precision == ast_precision_none) {
+ _mesa_glsl_error(loc, state,
+ "No precision specified in this scope for type `%s'",
+ type->name);
+ }
+ }
+
+
+ /* Section 4.1.7.3 (Atomic Counters) of the GLSL ES 3.10 spec says:
+ *
+ * "The default precision of all atomic types is highp. It is an error to
+ * declare an atomic type with a different precision or to specify the
+ * default precision for an atomic type to be lowp or mediump."
+ */
+ if (type->is_atomic_uint() && precision != ast_precision_high) {
+ _mesa_glsl_error(loc, state,
+ "atomic_uint can only have highp precision qualifier");
+ }
+
+ return precision;
+}
+
+const glsl_type *
+ast_fully_specified_type::glsl_type(const char **name,
+ struct _mesa_glsl_parse_state *state) const
+{
+ return this->specifier->glsl_type(name, state);
+}
+
+/**
+ * Determine whether a toplevel variable declaration declares a varying. This
+ * function operates by examining the variable's mode and the shader target,
+ * so it correctly identifies linkage variables regardless of whether they are
+ * declared using the deprecated "varying" syntax or the new "in/out" syntax.
+ *
+ * Passing a non-toplevel variable declaration (e.g. a function parameter) to
+ * this function will produce undefined results.
+ */
+static bool
+is_varying_var(ir_variable *var, gl_shader_stage target)
+{
+ switch (target) {
+ case MESA_SHADER_VERTEX:
+ return var->data.mode == ir_var_shader_out;
+ case MESA_SHADER_FRAGMENT:
+ return var->data.mode == ir_var_shader_in ||
+ (var->data.mode == ir_var_system_value &&
+ var->data.location == SYSTEM_VALUE_FRAG_COORD);
+ default:
+ return var->data.mode == ir_var_shader_out || var->data.mode == ir_var_shader_in;
+ }
+}
+
+static bool
+is_allowed_invariant(ir_variable *var, struct _mesa_glsl_parse_state *state)
+{
+ if (is_varying_var(var, state->stage))
+ return true;
+
+ /* From Section 4.6.1 ("The Invariant Qualifier") GLSL 1.20 spec:
+ * "Only variables output from a vertex shader can be candidates
+ * for invariance".
+ */
+ if (!state->is_version(130, 100))
+ return false;
+
+ /*
+ * Later specs remove this language - so allowed invariant
+ * on fragment shader outputs as well.
+ */
+ if (state->stage == MESA_SHADER_FRAGMENT &&
+ var->data.mode == ir_var_shader_out)
+ return true;
+ return false;
+}
+
+/**
+ * Matrix layout qualifiers are only allowed on certain types
+ */
+static void
+validate_matrix_layout_for_type(struct _mesa_glsl_parse_state *state,
+ YYLTYPE *loc,
+ const glsl_type *type,
+ ir_variable *var)
+{
+ if (var && !var->is_in_buffer_block()) {
+ /* Layout qualifiers may only apply to interface blocks and fields in
+ * them.
+ */
+ _mesa_glsl_error(loc, state,
+ "uniform block layout qualifiers row_major and "
+ "column_major may not be applied to variables "
+ "outside of uniform blocks");
+ } else if (!type->without_array()->is_matrix()) {
+ /* The OpenGL ES 3.0 conformance tests did not originally allow
+ * matrix layout qualifiers on non-matrices. However, the OpenGL
+ * 4.4 and OpenGL ES 3.0 (revision TBD) specifications were
+ * amended to specifically allow these layouts on all types. Emit
+ * a warning so that people know their code may not be portable.
+ */
+ _mesa_glsl_warning(loc, state,
+ "uniform block layout qualifiers row_major and "
+ "column_major applied to non-matrix types may "
+ "be rejected by older compilers");
+ }
+}
+
+static bool
+validate_xfb_buffer_qualifier(YYLTYPE *loc,
+ struct _mesa_glsl_parse_state *state,
+ unsigned xfb_buffer) {
+ if (xfb_buffer >= state->Const.MaxTransformFeedbackBuffers) {
+ _mesa_glsl_error(loc, state,
+ "invalid xfb_buffer specified %d is larger than "
+ "MAX_TRANSFORM_FEEDBACK_BUFFERS - 1 (%d).",
+ xfb_buffer,
+ state->Const.MaxTransformFeedbackBuffers - 1);
+ return false;
+ }
+
+ return true;
+}
+
+/* From the ARB_enhanced_layouts spec:
+ *
+ * "Variables and block members qualified with *xfb_offset* can be
+ * scalars, vectors, matrices, structures, and (sized) arrays of these.
+ * The offset must be a multiple of the size of the first component of
+ * the first qualified variable or block member, or a compile-time error
+ * results. Further, if applied to an aggregate containing a double,
+ * the offset must also be a multiple of 8, and the space taken in the
+ * buffer will be a multiple of 8.
+ */
+static bool
+validate_xfb_offset_qualifier(YYLTYPE *loc,
+ struct _mesa_glsl_parse_state *state,
+ int xfb_offset, const glsl_type *type,
+ unsigned component_size) {
+ const glsl_type *t_without_array = type->without_array();
+
+ if (xfb_offset != -1 && type->is_unsized_array()) {
+ _mesa_glsl_error(loc, state,
+ "xfb_offset can't be used with unsized arrays.");
+ return false;
+ }
+
+ /* Make sure nested structs don't contain unsized arrays, and validate
+ * any xfb_offsets on interface members.
+ */
+ if (t_without_array->is_struct() || t_without_array->is_interface())
+ for (unsigned int i = 0; i < t_without_array->length; i++) {
+ const glsl_type *member_t = t_without_array->fields.structure[i].type;
+
+ /* When the interface block doesn't have an xfb_offset qualifier then
+ * we apply the component size rules at the member level.
+ */
+ if (xfb_offset == -1)
+ component_size = member_t->contains_double() ? 8 : 4;
+
+ int xfb_offset = t_without_array->fields.structure[i].offset;
+ validate_xfb_offset_qualifier(loc, state, xfb_offset, member_t,
+ component_size);
+ }
+
+ /* Nested structs or interface block without offset may not have had an
+ * offset applied yet so return.
+ */
+ if (xfb_offset == -1) {
+ return true;
+ }
+
+ if (xfb_offset % component_size) {
+ _mesa_glsl_error(loc, state,
+ "invalid qualifier xfb_offset=%d must be a multiple "
+ "of the first component size of the first qualified "
+ "variable or block member. Or double if an aggregate "
+ "that contains a double (%d).",
+ xfb_offset, component_size);
+ return false;
+ }
+
+ return true;
+}
+
+static bool
+validate_stream_qualifier(YYLTYPE *loc, struct _mesa_glsl_parse_state *state,
+ unsigned stream)
+{
+ if (stream >= state->ctx->Const.MaxVertexStreams) {
+ _mesa_glsl_error(loc, state,
+ "invalid stream specified %d is larger than "
+ "MAX_VERTEX_STREAMS - 1 (%d).",
+ stream, state->ctx->Const.MaxVertexStreams - 1);
+ return false;
+ }
+
+ return true;
+}
+
+static void
+apply_explicit_binding(struct _mesa_glsl_parse_state *state,
+ YYLTYPE *loc,
+ ir_variable *var,
+ const glsl_type *type,
+ const ast_type_qualifier *qual)
+{
+ if (!qual->flags.q.uniform && !qual->flags.q.buffer) {
+ _mesa_glsl_error(loc, state,
+ "the \"binding\" qualifier only applies to uniforms and "
+ "shader storage buffer objects");
+ return;
+ }
+
+ unsigned qual_binding;
+ if (!process_qualifier_constant(state, loc, "binding", qual->binding,
+ &qual_binding)) {
+ return;
+ }
+
+ const struct gl_context *const ctx = state->ctx;
+ unsigned elements = type->is_array() ? type->arrays_of_arrays_size() : 1;
+ unsigned max_index = qual_binding + elements - 1;
+ const glsl_type *base_type = type->without_array();
+
+ if (base_type->is_interface()) {
+ /* UBOs. From page 60 of the GLSL 4.20 specification:
+ * "If the binding point for any uniform block instance is less than zero,
+ * or greater than or equal to the implementation-dependent maximum
+ * number of uniform buffer bindings, a compilation error will occur.
+ * When the binding identifier is used with a uniform block instanced as
+ * an array of size N, all elements of the array from binding through
+ * binding + N – 1 must be within this range."
+ *
+ * The implementation-dependent maximum is GL_MAX_UNIFORM_BUFFER_BINDINGS.
+ */
+ if (qual->flags.q.uniform &&
+ max_index >= ctx->Const.MaxUniformBufferBindings) {
+ _mesa_glsl_error(loc, state, "layout(binding = %u) for %d UBOs exceeds "
+ "the maximum number of UBO binding points (%d)",
+ qual_binding, elements,
+ ctx->Const.MaxUniformBufferBindings);
+ return;
+ }
+
+ /* SSBOs. From page 67 of the GLSL 4.30 specification:
+ * "If the binding point for any uniform or shader storage block instance
+ * is less than zero, or greater than or equal to the
+ * implementation-dependent maximum number of uniform buffer bindings, a
+ * compile-time error will occur. When the binding identifier is used
+ * with a uniform or shader storage block instanced as an array of size
+ * N, all elements of the array from binding through binding + N – 1 must
+ * be within this range."
+ */
+ if (qual->flags.q.buffer &&
+ max_index >= ctx->Const.MaxShaderStorageBufferBindings) {
+ _mesa_glsl_error(loc, state, "layout(binding = %u) for %d SSBOs exceeds "
+ "the maximum number of SSBO binding points (%d)",
+ qual_binding, elements,
+ ctx->Const.MaxShaderStorageBufferBindings);
+ return;
+ }
+ } else if (base_type->is_sampler()) {
+ /* Samplers. From page 63 of the GLSL 4.20 specification:
+ * "If the binding is less than zero, or greater than or equal to the
+ * implementation-dependent maximum supported number of units, a
+ * compilation error will occur. When the binding identifier is used
+ * with an array of size N, all elements of the array from binding
+ * through binding + N - 1 must be within this range."
+ */
+ unsigned limit = ctx->Const.MaxCombinedTextureImageUnits;
+
+ if (max_index >= limit) {
+ _mesa_glsl_error(loc, state, "layout(binding = %d) for %d samplers "
+ "exceeds the maximum number of texture image units "
+ "(%u)", qual_binding, elements, limit);
+
+ return;
+ }
+ } else if (base_type->contains_atomic()) {
+ assert(ctx->Const.MaxAtomicBufferBindings <= MAX_COMBINED_ATOMIC_BUFFERS);
+ if (qual_binding >= ctx->Const.MaxAtomicBufferBindings) {
+ _mesa_glsl_error(loc, state, "layout(binding = %d) exceeds the "
+ "maximum number of atomic counter buffer bindings "
+ "(%u)", qual_binding,
+ ctx->Const.MaxAtomicBufferBindings);
+
+ return;
+ }
+ } else if ((state->is_version(420, 310) ||
+ state->ARB_shading_language_420pack_enable) &&
+ base_type->is_image()) {
+ assert(ctx->Const.MaxImageUnits <= MAX_IMAGE_UNITS);
+ if (max_index >= ctx->Const.MaxImageUnits) {
+ _mesa_glsl_error(loc, state, "Image binding %d exceeds the "
+ "maximum number of image units (%d)", max_index,
+ ctx->Const.MaxImageUnits);
+ return;
+ }
+
+ } else {
+ _mesa_glsl_error(loc, state,
+ "the \"binding\" qualifier only applies to uniform "
+ "blocks, storage blocks, opaque variables, or arrays "
+ "thereof");
+ return;
+ }
+
+ var->data.explicit_binding = true;
+ var->data.binding = qual_binding;
+
+ return;
+}
+
+static void
+validate_fragment_flat_interpolation_input(struct _mesa_glsl_parse_state *state,
+ YYLTYPE *loc,
+ const glsl_interp_mode interpolation,
+ const struct glsl_type *var_type,
+ ir_variable_mode mode)
+{
+ if (state->stage != MESA_SHADER_FRAGMENT ||
+ interpolation == INTERP_MODE_FLAT ||
+ mode != ir_var_shader_in)
+ return;
+
+ /* Integer fragment inputs must be qualified with 'flat'. In GLSL ES,
+ * so must integer vertex outputs.
+ *
+ * From section 4.3.4 ("Inputs") of the GLSL 1.50 spec:
+ * "Fragment shader inputs that are signed or unsigned integers or
+ * integer vectors must be qualified with the interpolation qualifier
+ * flat."
+ *
+ * From section 4.3.4 ("Input Variables") of the GLSL 3.00 ES spec:
+ * "Fragment shader inputs that are, or contain, signed or unsigned
+ * integers or integer vectors must be qualified with the
+ * interpolation qualifier flat."
+ *
+ * From section 4.3.6 ("Output Variables") of the GLSL 3.00 ES spec:
+ * "Vertex shader outputs that are, or contain, signed or unsigned
+ * integers or integer vectors must be qualified with the
+ * interpolation qualifier flat."
+ *
+ * Note that prior to GLSL 1.50, this requirement applied to vertex
+ * outputs rather than fragment inputs. That creates problems in the
+ * presence of geometry shaders, so we adopt the GLSL 1.50 rule for all
+ * desktop GL shaders. For GLSL ES shaders, we follow the spec and
+ * apply the restriction to both vertex outputs and fragment inputs.
+ *
+ * Note also that the desktop GLSL specs are missing the text "or
+ * contain"; this is presumably an oversight, since there is no
+ * reasonable way to interpolate a fragment shader input that contains
+ * an integer. See Khronos bug #15671.
+ */
+ if ((state->is_version(130, 300) || state->EXT_gpu_shader4_enable)
+ && var_type->contains_integer()) {
+ _mesa_glsl_error(loc, state, "if a fragment input is (or contains) "
+ "an integer, then it must be qualified with 'flat'");
+ }
+
+ /* Double fragment inputs must be qualified with 'flat'.
+ *
+ * From the "Overview" of the ARB_gpu_shader_fp64 extension spec:
+ * "This extension does not support interpolation of double-precision
+ * values; doubles used as fragment shader inputs must be qualified as
+ * "flat"."
+ *
+ * From section 4.3.4 ("Inputs") of the GLSL 4.00 spec:
+ * "Fragment shader inputs that are signed or unsigned integers, integer
+ * vectors, or any double-precision floating-point type must be
+ * qualified with the interpolation qualifier flat."
+ *
+ * Note that the GLSL specs are missing the text "or contain"; this is
+ * presumably an oversight. See Khronos bug #15671.
+ *
+ * The 'double' type does not exist in GLSL ES so far.
+ */
+ if (state->has_double()
+ && var_type->contains_double()) {
+ _mesa_glsl_error(loc, state, "if a fragment input is (or contains) "
+ "a double, then it must be qualified with 'flat'");
+ }
+
+ /* Bindless sampler/image fragment inputs must be qualified with 'flat'.
+ *
+ * From section 4.3.4 of the ARB_bindless_texture spec:
+ *
+ * "(modify last paragraph, p. 35, allowing samplers and images as
+ * fragment shader inputs) ... Fragment inputs can only be signed and
+ * unsigned integers and integer vectors, floating point scalars,
+ * floating-point vectors, matrices, sampler and image types, or arrays
+ * or structures of these. Fragment shader inputs that are signed or
+ * unsigned integers, integer vectors, or any double-precision floating-
+ * point type, or any sampler or image type must be qualified with the
+ * interpolation qualifier "flat"."
+ */
+ if (state->has_bindless()
+ && (var_type->contains_sampler() || var_type->contains_image())) {
+ _mesa_glsl_error(loc, state, "if a fragment input is (or contains) "
+ "a bindless sampler (or image), then it must be "
+ "qualified with 'flat'");
+ }
+}
+
+static void
+validate_interpolation_qualifier(struct _mesa_glsl_parse_state *state,
+ YYLTYPE *loc,
+ const glsl_interp_mode interpolation,
+ const struct ast_type_qualifier *qual,
+ const struct glsl_type *var_type,
+ ir_variable_mode mode)
+{
+ /* Interpolation qualifiers can only apply to shader inputs or outputs, but
+ * not to vertex shader inputs nor fragment shader outputs.
+ *
+ * From section 4.3 ("Storage Qualifiers") of the GLSL 1.30 spec:
+ * "Outputs from a vertex shader (out) and inputs to a fragment
+ * shader (in) can be further qualified with one or more of these
+ * interpolation qualifiers"
+ * ...
+ * "These interpolation qualifiers may only precede the qualifiers in,
+ * centroid in, out, or centroid out in a declaration. They do not apply
+ * to the deprecated storage qualifiers varying or centroid
+ * varying. They also do not apply to inputs into a vertex shader or
+ * outputs from a fragment shader."
+ *
+ * From section 4.3 ("Storage Qualifiers") of the GLSL ES 3.00 spec:
+ * "Outputs from a shader (out) and inputs to a shader (in) can be
+ * further qualified with one of these interpolation qualifiers."
+ * ...
+ * "These interpolation qualifiers may only precede the qualifiers
+ * in, centroid in, out, or centroid out in a declaration. They do
+ * not apply to inputs into a vertex shader or outputs from a
+ * fragment shader."
+ */
+ if ((state->is_version(130, 300) || state->EXT_gpu_shader4_enable)
+ && interpolation != INTERP_MODE_NONE) {
+ const char *i = interpolation_string(interpolation);
+ if (mode != ir_var_shader_in && mode != ir_var_shader_out)
+ _mesa_glsl_error(loc, state,
+ "interpolation qualifier `%s' can only be applied to "
+ "shader inputs or outputs.", i);
+
+ switch (state->stage) {
+ case MESA_SHADER_VERTEX:
+ if (mode == ir_var_shader_in) {
+ _mesa_glsl_error(loc, state,
+ "interpolation qualifier '%s' cannot be applied to "
+ "vertex shader inputs", i);
+ }
+ break;
+ case MESA_SHADER_FRAGMENT:
+ if (mode == ir_var_shader_out) {
+ _mesa_glsl_error(loc, state,
+ "interpolation qualifier '%s' cannot be applied to "
+ "fragment shader outputs", i);
+ }
+ break;
+ default:
+ break;
+ }
+ }
+
+ /* Interpolation qualifiers cannot be applied to 'centroid' and
+ * 'centroid varying'.
+ *
+ * From section 4.3 ("Storage Qualifiers") of the GLSL 1.30 spec:
+ * "interpolation qualifiers may only precede the qualifiers in,
+ * centroid in, out, or centroid out in a declaration. They do not apply
+ * to the deprecated storage qualifiers varying or centroid varying."
+ *
+ * These deprecated storage qualifiers do not exist in GLSL ES 3.00.
+ *
+ * GL_EXT_gpu_shader4 allows this.
+ */
+ if (state->is_version(130, 0) && !state->EXT_gpu_shader4_enable
+ && interpolation != INTERP_MODE_NONE
+ && qual->flags.q.varying) {
+
+ const char *i = interpolation_string(interpolation);
+ const char *s;
+ if (qual->flags.q.centroid)
+ s = "centroid varying";
+ else
+ s = "varying";
+
+ _mesa_glsl_error(loc, state,
+ "qualifier '%s' cannot be applied to the "
+ "deprecated storage qualifier '%s'", i, s);
+ }
+
+ validate_fragment_flat_interpolation_input(state, loc, interpolation,
+ var_type, mode);
+}
+
+static glsl_interp_mode
+interpret_interpolation_qualifier(const struct ast_type_qualifier *qual,
+ const struct glsl_type *var_type,
+ ir_variable_mode mode,
+ struct _mesa_glsl_parse_state *state,
+ YYLTYPE *loc)
+{
+ glsl_interp_mode interpolation;
+ if (qual->flags.q.flat)
+ interpolation = INTERP_MODE_FLAT;
+ else if (qual->flags.q.noperspective)
+ interpolation = INTERP_MODE_NOPERSPECTIVE;
+ else if (qual->flags.q.smooth)
+ interpolation = INTERP_MODE_SMOOTH;
+ else
+ interpolation = INTERP_MODE_NONE;
+
+ validate_interpolation_qualifier(state, loc,
+ interpolation,
+ qual, var_type, mode);
+
+ return interpolation;
+}
+
+
+static void
+apply_explicit_location(const struct ast_type_qualifier *qual,
+ ir_variable *var,
+ struct _mesa_glsl_parse_state *state,
+ YYLTYPE *loc)
+{
+ bool fail = false;
+
+ unsigned qual_location;
+ if (!process_qualifier_constant(state, loc, "location", qual->location,
+ &qual_location)) {
+ return;
+ }
+
+ /* Checks for GL_ARB_explicit_uniform_location. */
+ if (qual->flags.q.uniform) {
+ if (!state->check_explicit_uniform_location_allowed(loc, var))
+ return;
+
+ const struct gl_context *const ctx = state->ctx;
+ unsigned max_loc = qual_location + var->type->uniform_locations() - 1;
+
+ if (max_loc >= ctx->Const.MaxUserAssignableUniformLocations) {
+ _mesa_glsl_error(loc, state, "location(s) consumed by uniform %s "
+ ">= MAX_UNIFORM_LOCATIONS (%u)", var->name,
+ ctx->Const.MaxUserAssignableUniformLocations);
+ return;
+ }
+
+ var->data.explicit_location = true;
+ var->data.location = qual_location;
+ return;
+ }
+
+ /* Between GL_ARB_explicit_attrib_location an
+ * GL_ARB_separate_shader_objects, the inputs and outputs of any shader
+ * stage can be assigned explicit locations. The checking here associates
+ * the correct extension with the correct stage's input / output:
+ *
+ * input output
+ * ----- ------
+ * vertex explicit_loc sso
+ * tess control sso sso
+ * tess eval sso sso
+ * geometry sso sso
+ * fragment sso explicit_loc
+ */
+ switch (state->stage) {
+ case MESA_SHADER_VERTEX:
+ if (var->data.mode == ir_var_shader_in) {
+ if (!state->check_explicit_attrib_location_allowed(loc, var))
+ return;
+
+ break;
+ }
+
+ if (var->data.mode == ir_var_shader_out) {
+ if (!state->check_separate_shader_objects_allowed(loc, var))
+ return;
+
+ break;
+ }
+
+ fail = true;
+ break;
+
+ case MESA_SHADER_TESS_CTRL:
+ case MESA_SHADER_TESS_EVAL:
+ case MESA_SHADER_GEOMETRY:
+ if (var->data.mode == ir_var_shader_in || var->data.mode == ir_var_shader_out) {
+ if (!state->check_separate_shader_objects_allowed(loc, var))
+ return;
+
+ break;
+ }
+
+ fail = true;
+ break;
+
+ case MESA_SHADER_FRAGMENT:
+ if (var->data.mode == ir_var_shader_in) {
+ if (!state->check_separate_shader_objects_allowed(loc, var))
+ return;
+
+ break;
+ }
+
+ if (var->data.mode == ir_var_shader_out) {
+ if (!state->check_explicit_attrib_location_allowed(loc, var))
+ return;
+
+ break;
+ }
+
+ fail = true;
+ break;
+
+ case MESA_SHADER_COMPUTE:
+ _mesa_glsl_error(loc, state,
+ "compute shader variables cannot be given "
+ "explicit locations");
+ return;
+ default:
+ fail = true;
+ break;
+ };
+
+ if (fail) {
+ _mesa_glsl_error(loc, state,
+ "%s cannot be given an explicit location in %s shader",
+ mode_string(var),
+ _mesa_shader_stage_to_string(state->stage));
+ } else {
+ var->data.explicit_location = true;
+
+ switch (state->stage) {
+ case MESA_SHADER_VERTEX:
+ var->data.location = (var->data.mode == ir_var_shader_in)
+ ? (qual_location + VERT_ATTRIB_GENERIC0)
+ : (qual_location + VARYING_SLOT_VAR0);
+ break;
+
+ case MESA_SHADER_TESS_CTRL:
+ case MESA_SHADER_TESS_EVAL:
+ case MESA_SHADER_GEOMETRY:
+ if (var->data.patch)
+ var->data.location = qual_location + VARYING_SLOT_PATCH0;
+ else
+ var->data.location = qual_location + VARYING_SLOT_VAR0;
+ break;
+
+ case MESA_SHADER_FRAGMENT:
+ var->data.location = (var->data.mode == ir_var_shader_out)
+ ? (qual_location + FRAG_RESULT_DATA0)
+ : (qual_location + VARYING_SLOT_VAR0);
+ break;
+ default:
+ assert(!"Unexpected shader type");
+ break;
+ }
+
+ /* Check if index was set for the uniform instead of the function */
+ if (qual->flags.q.explicit_index && qual->is_subroutine_decl()) {
+ _mesa_glsl_error(loc, state, "an index qualifier can only be "
+ "used with subroutine functions");
+ return;
+ }
+
+ unsigned qual_index;
+ if (qual->flags.q.explicit_index &&
+ process_qualifier_constant(state, loc, "index", qual->index,
+ &qual_index)) {
+ /* From the GLSL 4.30 specification, section 4.4.2 (Output
+ * Layout Qualifiers):
+ *
+ * "It is also a compile-time error if a fragment shader
+ * sets a layout index to less than 0 or greater than 1."
+ *
+ * Older specifications don't mandate a behavior; we take
+ * this as a clarification and always generate the error.
+ */
+ if (qual_index > 1) {
+ _mesa_glsl_error(loc, state,
+ "explicit index may only be 0 or 1");
+ } else {
+ var->data.explicit_index = true;
+ var->data.index = qual_index;
+ }
+ }
+ }
+}
+
+static bool
+validate_storage_for_sampler_image_types(ir_variable *var,
+ struct _mesa_glsl_parse_state *state,
+ YYLTYPE *loc)
+{
+ /* From section 4.1.7 of the GLSL 4.40 spec:
+ *
+ * "[Opaque types] can only be declared as function
+ * parameters or uniform-qualified variables."
+ *
+ * From section 4.1.7 of the ARB_bindless_texture spec:
+ *
+ * "Samplers may be declared as shader inputs and outputs, as uniform
+ * variables, as temporary variables, and as function parameters."
+ *
+ * From section 4.1.X of the ARB_bindless_texture spec:
+ *
+ * "Images may be declared as shader inputs and outputs, as uniform
+ * variables, as temporary variables, and as function parameters."
+ */
+ if (state->has_bindless()) {
+ if (var->data.mode != ir_var_auto &&
+ var->data.mode != ir_var_uniform &&
+ var->data.mode != ir_var_shader_in &&
+ var->data.mode != ir_var_shader_out &&
+ var->data.mode != ir_var_function_in &&
+ var->data.mode != ir_var_function_out &&
+ var->data.mode != ir_var_function_inout) {
+ _mesa_glsl_error(loc, state, "bindless image/sampler variables may "
+ "only be declared as shader inputs and outputs, as "
+ "uniform variables, as temporary variables and as "
+ "function parameters");
+ return false;
+ }
+ } else {
+ if (var->data.mode != ir_var_uniform &&
+ var->data.mode != ir_var_function_in) {
+ _mesa_glsl_error(loc, state, "image/sampler variables may only be "
+ "declared as function parameters or "
+ "uniform-qualified global variables");
+ return false;
+ }
+ }
+ return true;
+}
+
+static bool
+validate_memory_qualifier_for_type(struct _mesa_glsl_parse_state *state,
+ YYLTYPE *loc,
+ const struct ast_type_qualifier *qual,
+ const glsl_type *type)
+{
+ /* From Section 4.10 (Memory Qualifiers) of the GLSL 4.50 spec:
+ *
+ * "Memory qualifiers are only supported in the declarations of image
+ * variables, buffer variables, and shader storage blocks; it is an error
+ * to use such qualifiers in any other declarations.
+ */
+ if (!type->is_image() && !qual->flags.q.buffer) {
+ if (qual->flags.q.read_only ||
+ qual->flags.q.write_only ||
+ qual->flags.q.coherent ||
+ qual->flags.q._volatile ||
+ qual->flags.q.restrict_flag) {
+ _mesa_glsl_error(loc, state, "memory qualifiers may only be applied "
+ "in the declarations of image variables, buffer "
+ "variables, and shader storage blocks");
+ return false;
+ }
+ }
+ return true;
+}
+
+static bool
+validate_image_format_qualifier_for_type(struct _mesa_glsl_parse_state *state,
+ YYLTYPE *loc,
+ const struct ast_type_qualifier *qual,
+ const glsl_type *type)
+{
+ /* From section 4.4.6.2 (Format Layout Qualifiers) of the GLSL 4.50 spec:
+ *
+ * "Format layout qualifiers can be used on image variable declarations
+ * (those declared with a basic type having “image ” in its keyword)."
+ */
+ if (!type->is_image() && qual->flags.q.explicit_image_format) {
+ _mesa_glsl_error(loc, state, "format layout qualifiers may only be "
+ "applied to images");
+ return false;
+ }
+ return true;
+}
+
+static void
+apply_image_qualifier_to_variable(const struct ast_type_qualifier *qual,
+ ir_variable *var,
+ struct _mesa_glsl_parse_state *state,
+ YYLTYPE *loc)
+{
+ const glsl_type *base_type = var->type->without_array();
+
+ if (!validate_image_format_qualifier_for_type(state, loc, qual, base_type) ||
+ !validate_memory_qualifier_for_type(state, loc, qual, base_type))
+ return;
+
+ if (!base_type->is_image())
+ return;
+
+ if (!validate_storage_for_sampler_image_types(var, state, loc))
+ return;
+
+ var->data.memory_read_only |= qual->flags.q.read_only;
+ var->data.memory_write_only |= qual->flags.q.write_only;
+ var->data.memory_coherent |= qual->flags.q.coherent;
+ var->data.memory_volatile |= qual->flags.q._volatile;
+ var->data.memory_restrict |= qual->flags.q.restrict_flag;
+
+ if (qual->flags.q.explicit_image_format) {
+ if (var->data.mode == ir_var_function_in) {
+ _mesa_glsl_error(loc, state, "format qualifiers cannot be used on "
+ "image function parameters");
+ }
+
+ if (qual->image_base_type != base_type->sampled_type) {
+ _mesa_glsl_error(loc, state, "format qualifier doesn't match the base "
+ "data type of the image");
+ }
+
+ var->data.image_format = qual->image_format;
+ } else if (state->has_image_load_formatted()) {
+ if (var->data.mode == ir_var_uniform &&
+ state->EXT_shader_image_load_formatted_warn) {
+ _mesa_glsl_warning(loc, state, "GL_EXT_image_load_formatted used");
+ }
+ } else {
+ if (var->data.mode == ir_var_uniform) {
+ if (state->es_shader ||
+ !(state->is_version(420, 310) || state->ARB_shader_image_load_store_enable)) {
+ _mesa_glsl_error(loc, state, "all image uniforms must have a "
+ "format layout qualifier");
+ } else if (!qual->flags.q.write_only) {
+ _mesa_glsl_error(loc, state, "image uniforms not qualified with "
+ "`writeonly' must have a format layout qualifier");
+ }
+ }
+ var->data.image_format = PIPE_FORMAT_NONE;
+ }
+
+ /* From page 70 of the GLSL ES 3.1 specification:
+ *
+ * "Except for image variables qualified with the format qualifiers r32f,
+ * r32i, and r32ui, image variables must specify either memory qualifier
+ * readonly or the memory qualifier writeonly."
+ */
+ if (state->es_shader &&
+ var->data.image_format != PIPE_FORMAT_R32_FLOAT &&
+ var->data.image_format != PIPE_FORMAT_R32_SINT &&
+ var->data.image_format != PIPE_FORMAT_R32_UINT &&
+ !var->data.memory_read_only &&
+ !var->data.memory_write_only) {
+ _mesa_glsl_error(loc, state, "image variables of format other than r32f, "
+ "r32i or r32ui must be qualified `readonly' or "
+ "`writeonly'");
+ }
+}
+
+static inline const char*
+get_layout_qualifier_string(bool origin_upper_left, bool pixel_center_integer)
+{
+ if (origin_upper_left && pixel_center_integer)
+ return "origin_upper_left, pixel_center_integer";
+ else if (origin_upper_left)
+ return "origin_upper_left";
+ else if (pixel_center_integer)
+ return "pixel_center_integer";
+ else
+ return " ";
+}
+
+static inline bool
+is_conflicting_fragcoord_redeclaration(struct _mesa_glsl_parse_state *state,
+ const struct ast_type_qualifier *qual)
+{
+ /* If gl_FragCoord was previously declared, and the qualifiers were
+ * different in any way, return true.
+ */
+ if (state->fs_redeclares_gl_fragcoord) {
+ return (state->fs_pixel_center_integer != qual->flags.q.pixel_center_integer
+ || state->fs_origin_upper_left != qual->flags.q.origin_upper_left);
+ }
+
+ return false;
+}
+
+static inline bool
+is_conflicting_layer_redeclaration(struct _mesa_glsl_parse_state *state,
+ const struct ast_type_qualifier *qual)
+{
+ if (state->redeclares_gl_layer) {
+ return state->layer_viewport_relative != qual->flags.q.viewport_relative;
+ }
+ return false;
+}
+
+static inline void
+validate_array_dimensions(const glsl_type *t,
+ struct _mesa_glsl_parse_state *state,
+ YYLTYPE *loc) {
+ if (t->is_array()) {
+ t = t->fields.array;
+ while (t->is_array()) {
+ if (t->is_unsized_array()) {
+ _mesa_glsl_error(loc, state,
+ "only the outermost array dimension can "
+ "be unsized",
+ t->name);
+ break;
+ }
+ t = t->fields.array;
+ }
+ }
+}
+
+static void
+apply_bindless_qualifier_to_variable(const struct ast_type_qualifier *qual,
+ ir_variable *var,
+ struct _mesa_glsl_parse_state *state,
+ YYLTYPE *loc)
+{
+ bool has_local_qualifiers = qual->flags.q.bindless_sampler ||
+ qual->flags.q.bindless_image ||
+ qual->flags.q.bound_sampler ||
+ qual->flags.q.bound_image;
+
+ /* The ARB_bindless_texture spec says:
+ *
+ * "Modify Section 4.4.6 Opaque-Uniform Layout Qualifiers of the GLSL 4.30
+ * spec"
+ *
+ * "If these layout qualifiers are applied to other types of default block
+ * uniforms, or variables with non-uniform storage, a compile-time error
+ * will be generated."
+ */
+ if (has_local_qualifiers && !qual->flags.q.uniform) {
+ _mesa_glsl_error(loc, state, "ARB_bindless_texture layout qualifiers "
+ "can only be applied to default block uniforms or "
+ "variables with uniform storage");
+ return;
+ }
+
+ /* The ARB_bindless_texture spec doesn't state anything in this situation,
+ * but it makes sense to only allow bindless_sampler/bound_sampler for
+ * sampler types, and respectively bindless_image/bound_image for image
+ * types.
+ */
+ if ((qual->flags.q.bindless_sampler || qual->flags.q.bound_sampler) &&
+ !var->type->contains_sampler()) {
+ _mesa_glsl_error(loc, state, "bindless_sampler or bound_sampler can only "
+ "be applied to sampler types");
+ return;
+ }
+
+ if ((qual->flags.q.bindless_image || qual->flags.q.bound_image) &&
+ !var->type->contains_image()) {
+ _mesa_glsl_error(loc, state, "bindless_image or bound_image can only be "
+ "applied to image types");
+ return;
+ }
+
+ /* The bindless_sampler/bindless_image (and respectively
+ * bound_sampler/bound_image) layout qualifiers can be set at global and at
+ * local scope.
+ */
+ if (var->type->contains_sampler() || var->type->contains_image()) {
+ var->data.bindless = qual->flags.q.bindless_sampler ||
+ qual->flags.q.bindless_image ||
+ state->bindless_sampler_specified ||
+ state->bindless_image_specified;
+
+ var->data.bound = qual->flags.q.bound_sampler ||
+ qual->flags.q.bound_image ||
+ state->bound_sampler_specified ||
+ state->bound_image_specified;
+ }
+}
+
+static void
+apply_layout_qualifier_to_variable(const struct ast_type_qualifier *qual,
+ ir_variable *var,
+ struct _mesa_glsl_parse_state *state,
+ YYLTYPE *loc)
+{
+ if (var->name != NULL && strcmp(var->name, "gl_FragCoord") == 0) {
+
+ /* Section 4.3.8.1, page 39 of GLSL 1.50 spec says:
+ *
+ * "Within any shader, the first redeclarations of gl_FragCoord
+ * must appear before any use of gl_FragCoord."
+ *
+ * Generate a compiler error if above condition is not met by the
+ * fragment shader.
+ */
+ ir_variable *earlier = state->symbols->get_variable("gl_FragCoord");
+ if (earlier != NULL &&
+ earlier->data.used &&
+ !state->fs_redeclares_gl_fragcoord) {
+ _mesa_glsl_error(loc, state,
+ "gl_FragCoord used before its first redeclaration "
+ "in fragment shader");
+ }
+
+ /* Make sure all gl_FragCoord redeclarations specify the same layout
+ * qualifiers.
+ */
+ if (is_conflicting_fragcoord_redeclaration(state, qual)) {
+ const char *const qual_string =
+ get_layout_qualifier_string(qual->flags.q.origin_upper_left,
+ qual->flags.q.pixel_center_integer);
+
+ const char *const state_string =
+ get_layout_qualifier_string(state->fs_origin_upper_left,
+ state->fs_pixel_center_integer);
+
+ _mesa_glsl_error(loc, state,
+ "gl_FragCoord redeclared with different layout "
+ "qualifiers (%s) and (%s) ",
+ state_string,
+ qual_string);
+ }
+ state->fs_origin_upper_left = qual->flags.q.origin_upper_left;
+ state->fs_pixel_center_integer = qual->flags.q.pixel_center_integer;
+ state->fs_redeclares_gl_fragcoord_with_no_layout_qualifiers =
+ !qual->flags.q.origin_upper_left && !qual->flags.q.pixel_center_integer;
+ state->fs_redeclares_gl_fragcoord =
+ state->fs_origin_upper_left ||
+ state->fs_pixel_center_integer ||
+ state->fs_redeclares_gl_fragcoord_with_no_layout_qualifiers;
+ }
+
+ if ((qual->flags.q.origin_upper_left || qual->flags.q.pixel_center_integer)
+ && (strcmp(var->name, "gl_FragCoord") != 0)) {
+ const char *const qual_string = (qual->flags.q.origin_upper_left)
+ ? "origin_upper_left" : "pixel_center_integer";
+
+ _mesa_glsl_error(loc, state,
+ "layout qualifier `%s' can only be applied to "
+ "fragment shader input `gl_FragCoord'",
+ qual_string);
+ }
+
+ if (qual->flags.q.explicit_location) {
+ apply_explicit_location(qual, var, state, loc);
+
+ if (qual->flags.q.explicit_component) {
+ unsigned qual_component;
+ if (process_qualifier_constant(state, loc, "component",
+ qual->component, &qual_component)) {
+ const glsl_type *type = var->type->without_array();
+ unsigned components = type->component_slots();
+
+ if (type->is_matrix() || type->is_struct()) {
+ _mesa_glsl_error(loc, state, "component layout qualifier "
+ "cannot be applied to a matrix, a structure, "
+ "a block, or an array containing any of "
+ "these.");
+ } else if (components > 4 && type->is_64bit()) {
+ _mesa_glsl_error(loc, state, "component layout qualifier "
+ "cannot be applied to dvec%u.",
+ components / 2);
+ } else if (qual_component != 0 &&
+ (qual_component + components - 1) > 3) {
+ _mesa_glsl_error(loc, state, "component overflow (%u > 3)",
+ (qual_component + components - 1));
+ } else if (qual_component == 1 && type->is_64bit()) {
+ /* We don't bother checking for 3 as it should be caught by the
+ * overflow check above.
+ */
+ _mesa_glsl_error(loc, state, "doubles cannot begin at "
+ "component 1 or 3");
+ } else {
+ var->data.explicit_component = true;
+ var->data.location_frac = qual_component;
+ }
+ }
+ }
+ } else if (qual->flags.q.explicit_index) {
+ if (!qual->subroutine_list)
+ _mesa_glsl_error(loc, state,
+ "explicit index requires explicit location");
+ } else if (qual->flags.q.explicit_component) {
+ _mesa_glsl_error(loc, state,
+ "explicit component requires explicit location");
+ }
+
+ if (qual->flags.q.explicit_binding) {
+ apply_explicit_binding(state, loc, var, var->type, qual);
+ }
+
+ if (state->stage == MESA_SHADER_GEOMETRY &&
+ qual->flags.q.out && qual->flags.q.stream) {
+ unsigned qual_stream;
+ if (process_qualifier_constant(state, loc, "stream", qual->stream,
+ &qual_stream) &&
+ validate_stream_qualifier(loc, state, qual_stream)) {
+ var->data.stream = qual_stream;
+ }
+ }
+
+ if (qual->flags.q.out && qual->flags.q.xfb_buffer) {
+ unsigned qual_xfb_buffer;
+ if (process_qualifier_constant(state, loc, "xfb_buffer",
+ qual->xfb_buffer, &qual_xfb_buffer) &&
+ validate_xfb_buffer_qualifier(loc, state, qual_xfb_buffer)) {
+ var->data.xfb_buffer = qual_xfb_buffer;
+ if (qual->flags.q.explicit_xfb_buffer)
+ var->data.explicit_xfb_buffer = true;
+ }
+ }
+
+ if (qual->flags.q.explicit_xfb_offset) {
+ unsigned qual_xfb_offset;
+ unsigned component_size = var->type->contains_double() ? 8 : 4;
+
+ if (process_qualifier_constant(state, loc, "xfb_offset",
+ qual->offset, &qual_xfb_offset) &&
+ validate_xfb_offset_qualifier(loc, state, (int) qual_xfb_offset,
+ var->type, component_size)) {
+ var->data.offset = qual_xfb_offset;
+ var->data.explicit_xfb_offset = true;
+ }
+ }
+
+ if (qual->flags.q.explicit_xfb_stride) {
+ unsigned qual_xfb_stride;
+ if (process_qualifier_constant(state, loc, "xfb_stride",
+ qual->xfb_stride, &qual_xfb_stride)) {
+ var->data.xfb_stride = qual_xfb_stride;
+ var->data.explicit_xfb_stride = true;
+ }
+ }
+
+ if (var->type->contains_atomic()) {
+ if (var->data.mode == ir_var_uniform) {
+ if (var->data.explicit_binding) {
+ unsigned *offset =
+ &state->atomic_counter_offsets[var->data.binding];
+
+ if (*offset % ATOMIC_COUNTER_SIZE)
+ _mesa_glsl_error(loc, state,
+ "misaligned atomic counter offset");
+
+ var->data.offset = *offset;
+ *offset += var->type->atomic_size();
+
+ } else {
+ _mesa_glsl_error(loc, state,
+ "atomic counters require explicit binding point");
+ }
+ } else if (var->data.mode != ir_var_function_in) {
+ _mesa_glsl_error(loc, state, "atomic counters may only be declared as "
+ "function parameters or uniform-qualified "
+ "global variables");
+ }
+ }
+
+ if (var->type->contains_sampler() &&
+ !validate_storage_for_sampler_image_types(var, state, loc))
+ return;
+
+ /* Is the 'layout' keyword used with parameters that allow relaxed checking.
+ * Many implementations of GL_ARB_fragment_coord_conventions_enable and some
+ * implementations (only Mesa?) GL_ARB_explicit_attrib_location_enable
+ * allowed the layout qualifier to be used with 'varying' and 'attribute'.
+ * These extensions and all following extensions that add the 'layout'
+ * keyword have been modified to require the use of 'in' or 'out'.
+ *
+ * The following extension do not allow the deprecated keywords:
+ *
+ * GL_AMD_conservative_depth
+ * GL_ARB_conservative_depth
+ * GL_ARB_gpu_shader5
+ * GL_ARB_separate_shader_objects
+ * GL_ARB_tessellation_shader
+ * GL_ARB_transform_feedback3
+ * GL_ARB_uniform_buffer_object
+ *
+ * It is unknown whether GL_EXT_shader_image_load_store or GL_NV_gpu_shader5
+ * allow layout with the deprecated keywords.
+ */
+ const bool relaxed_layout_qualifier_checking =
+ state->ARB_fragment_coord_conventions_enable;
+
+ const bool uses_deprecated_qualifier = qual->flags.q.attribute
+ || qual->flags.q.varying;
+ if (qual->has_layout() && uses_deprecated_qualifier) {
+ if (relaxed_layout_qualifier_checking) {
+ _mesa_glsl_warning(loc, state,
+ "`layout' qualifier may not be used with "
+ "`attribute' or `varying'");
+ } else {
+ _mesa_glsl_error(loc, state,
+ "`layout' qualifier may not be used with "
+ "`attribute' or `varying'");
+ }
+ }
+
+ /* Layout qualifiers for gl_FragDepth, which are enabled by extension
+ * AMD_conservative_depth.
+ */
+ if (qual->flags.q.depth_type
+ && !state->is_version(420, 0)
+ && !state->AMD_conservative_depth_enable
+ && !state->ARB_conservative_depth_enable) {
+ _mesa_glsl_error(loc, state,
+ "extension GL_AMD_conservative_depth or "
+ "GL_ARB_conservative_depth must be enabled "
+ "to use depth layout qualifiers");
+ } else if (qual->flags.q.depth_type
+ && strcmp(var->name, "gl_FragDepth") != 0) {
+ _mesa_glsl_error(loc, state,
+ "depth layout qualifiers can be applied only to "
+ "gl_FragDepth");
+ }
+
+ switch (qual->depth_type) {
+ case ast_depth_any:
+ var->data.depth_layout = ir_depth_layout_any;
+ break;
+ case ast_depth_greater:
+ var->data.depth_layout = ir_depth_layout_greater;
+ break;
+ case ast_depth_less:
+ var->data.depth_layout = ir_depth_layout_less;
+ break;
+ case ast_depth_unchanged:
+ var->data.depth_layout = ir_depth_layout_unchanged;
+ break;
+ default:
+ var->data.depth_layout = ir_depth_layout_none;
+ break;
+ }
+
+ if (qual->flags.q.std140 ||
+ qual->flags.q.std430 ||
+ qual->flags.q.packed ||
+ qual->flags.q.shared) {
+ _mesa_glsl_error(loc, state,
+ "uniform and shader storage block layout qualifiers "
+ "std140, std430, packed, and shared can only be "
+ "applied to uniform or shader storage blocks, not "
+ "members");
+ }
+
+ if (qual->flags.q.row_major || qual->flags.q.column_major) {
+ validate_matrix_layout_for_type(state, loc, var->type, var);
+ }
+
+ /* From section 4.4.1.3 of the GLSL 4.50 specification (Fragment Shader
+ * Inputs):
+ *
+ * "Fragment shaders also allow the following layout qualifier on in only
+ * (not with variable declarations)
+ * layout-qualifier-id
+ * early_fragment_tests
+ * [...]"
+ */
+ if (qual->flags.q.early_fragment_tests) {
+ _mesa_glsl_error(loc, state, "early_fragment_tests layout qualifier only "
+ "valid in fragment shader input layout declaration.");
+ }
+
+ if (qual->flags.q.inner_coverage) {
+ _mesa_glsl_error(loc, state, "inner_coverage layout qualifier only "
+ "valid in fragment shader input layout declaration.");
+ }
+
+ if (qual->flags.q.post_depth_coverage) {
+ _mesa_glsl_error(loc, state, "post_depth_coverage layout qualifier only "
+ "valid in fragment shader input layout declaration.");
+ }
+
+ if (state->has_bindless())
+ apply_bindless_qualifier_to_variable(qual, var, state, loc);
+
+ if (qual->flags.q.pixel_interlock_ordered ||
+ qual->flags.q.pixel_interlock_unordered ||
+ qual->flags.q.sample_interlock_ordered ||
+ qual->flags.q.sample_interlock_unordered) {
+ _mesa_glsl_error(loc, state, "interlock layout qualifiers: "
+ "pixel_interlock_ordered, pixel_interlock_unordered, "
+ "sample_interlock_ordered and sample_interlock_unordered, "
+ "only valid in fragment shader input layout declaration.");
+ }
+
+ if (var->name != NULL && strcmp(var->name, "gl_Layer") == 0) {
+ if (is_conflicting_layer_redeclaration(state, qual)) {
+ _mesa_glsl_error(loc, state, "gl_Layer redeclaration with "
+ "different viewport_relative setting than earlier");
+ }
+ state->redeclares_gl_layer = 1;
+ if (qual->flags.q.viewport_relative) {
+ state->layer_viewport_relative = 1;
+ }
+ } else if (qual->flags.q.viewport_relative) {
+ _mesa_glsl_error(loc, state,
+ "viewport_relative qualifier "
+ "can only be applied to gl_Layer.");
+ }
+}
+
+static void
+apply_type_qualifier_to_variable(const struct ast_type_qualifier *qual,
+ ir_variable *var,
+ struct _mesa_glsl_parse_state *state,
+ YYLTYPE *loc,
+ bool is_parameter)
+{
+ STATIC_ASSERT(sizeof(qual->flags.q) <= sizeof(qual->flags.i));
+
+ if (qual->flags.q.invariant) {
+ if (var->data.used) {
+ _mesa_glsl_error(loc, state,
+ "variable `%s' may not be redeclared "
+ "`invariant' after being used",
+ var->name);
+ } else {
+ var->data.explicit_invariant = true;
+ var->data.invariant = true;
+ }
+ }
+
+ if (qual->flags.q.precise) {
+ if (var->data.used) {
+ _mesa_glsl_error(loc, state,
+ "variable `%s' may not be redeclared "
+ "`precise' after being used",
+ var->name);
+ } else {
+ var->data.precise = 1;
+ }
+ }
+
+ if (qual->is_subroutine_decl() && !qual->flags.q.uniform) {
+ _mesa_glsl_error(loc, state,
+ "`subroutine' may only be applied to uniforms, "
+ "subroutine type declarations, or function definitions");
+ }
+
+ if (qual->flags.q.constant || qual->flags.q.attribute
+ || qual->flags.q.uniform
+ || (qual->flags.q.varying && (state->stage == MESA_SHADER_FRAGMENT)))
+ var->data.read_only = 1;
+
+ if (qual->flags.q.centroid)
+ var->data.centroid = 1;
+
+ if (qual->flags.q.sample)
+ var->data.sample = 1;
+
+ /* Precision qualifiers do not hold any meaning in Desktop GLSL */
+ if (state->es_shader) {
+ var->data.precision =
+ select_gles_precision(qual->precision, var->type, state, loc);
+ }
+
+ if (qual->flags.q.patch)
+ var->data.patch = 1;
+
+ if (qual->flags.q.attribute && state->stage != MESA_SHADER_VERTEX) {
+ var->type = glsl_type::error_type;
+ _mesa_glsl_error(loc, state,
+ "`attribute' variables may not be declared in the "
+ "%s shader",
+ _mesa_shader_stage_to_string(state->stage));
+ }
+
+ /* Disallow layout qualifiers which may only appear on layout declarations. */
+ if (qual->flags.q.prim_type) {
+ _mesa_glsl_error(loc, state,
+ "Primitive type may only be specified on GS input or output "
+ "layout declaration, not on variables.");
+ }
+
+ /* Section 6.1.1 (Function Calling Conventions) of the GLSL 1.10 spec says:
+ *
+ * "However, the const qualifier cannot be used with out or inout."
+ *
+ * The same section of the GLSL 4.40 spec further clarifies this saying:
+ *
+ * "The const qualifier cannot be used with out or inout, or a
+ * compile-time error results."
+ */
+ if (is_parameter && qual->flags.q.constant && qual->flags.q.out) {
+ _mesa_glsl_error(loc, state,
+ "`const' may not be applied to `out' or `inout' "
+ "function parameters");
+ }
+
+ /* If there is no qualifier that changes the mode of the variable, leave
+ * the setting alone.
+ */
+ assert(var->data.mode != ir_var_temporary);
+ if (qual->flags.q.in && qual->flags.q.out)
+ var->data.mode = is_parameter ? ir_var_function_inout : ir_var_shader_out;
+ else if (qual->flags.q.in)
+ var->data.mode = is_parameter ? ir_var_function_in : ir_var_shader_in;
+ else if (qual->flags.q.attribute
+ || (qual->flags.q.varying && (state->stage == MESA_SHADER_FRAGMENT)))
+ var->data.mode = ir_var_shader_in;
+ else if (qual->flags.q.out)
+ var->data.mode = is_parameter ? ir_var_function_out : ir_var_shader_out;
+ else if (qual->flags.q.varying && (state->stage == MESA_SHADER_VERTEX))
+ var->data.mode = ir_var_shader_out;
+ else if (qual->flags.q.uniform)
+ var->data.mode = ir_var_uniform;
+ else if (qual->flags.q.buffer)
+ var->data.mode = ir_var_shader_storage;
+ else if (qual->flags.q.shared_storage)
+ var->data.mode = ir_var_shader_shared;
+
+ if (!is_parameter && state->has_framebuffer_fetch() &&
+ state->stage == MESA_SHADER_FRAGMENT) {
+ if (state->is_version(130, 300))
+ var->data.fb_fetch_output = qual->flags.q.in && qual->flags.q.out;
+ else
+ var->data.fb_fetch_output = (strcmp(var->name, "gl_LastFragData") == 0);
+ }
+
+ if (var->data.fb_fetch_output) {
+ var->data.assigned = true;
+ var->data.memory_coherent = !qual->flags.q.non_coherent;
+
+ /* From the EXT_shader_framebuffer_fetch spec:
+ *
+ * "It is an error to declare an inout fragment output not qualified
+ * with layout(noncoherent) if the GL_EXT_shader_framebuffer_fetch
+ * extension hasn't been enabled."
+ */
+ if (var->data.memory_coherent &&
+ !state->EXT_shader_framebuffer_fetch_enable)
+ _mesa_glsl_error(loc, state,
+ "invalid declaration of framebuffer fetch output not "
+ "qualified with layout(noncoherent)");
+
+ } else {
+ /* From the EXT_shader_framebuffer_fetch spec:
+ *
+ * "Fragment outputs declared inout may specify the following layout
+ * qualifier: [...] noncoherent"
+ */
+ if (qual->flags.q.non_coherent)
+ _mesa_glsl_error(loc, state,
+ "invalid layout(noncoherent) qualifier not part of "
+ "framebuffer fetch output declaration");
+ }
+
+ if (!is_parameter && is_varying_var(var, state->stage)) {
+ /* User-defined ins/outs are not permitted in compute shaders. */
+ if (state->stage == MESA_SHADER_COMPUTE) {
+ _mesa_glsl_error(loc, state,
+ "user-defined input and output variables are not "
+ "permitted in compute shaders");
+ }
+
+ /* This variable is being used to link data between shader stages (in
+ * pre-glsl-1.30 parlance, it's a "varying"). Check that it has a type
+ * that is allowed for such purposes.
+ *
+ * From page 25 (page 31 of the PDF) of the GLSL 1.10 spec:
+ *
+ * "The varying qualifier can be used only with the data types
+ * float, vec2, vec3, vec4, mat2, mat3, and mat4, or arrays of
+ * these."
+ *
+ * This was relaxed in GLSL version 1.30 and GLSL ES version 3.00. From
+ * page 31 (page 37 of the PDF) of the GLSL 1.30 spec:
+ *
+ * "Fragment inputs can only be signed and unsigned integers and
+ * integer vectors, float, floating-point vectors, matrices, or
+ * arrays of these. Structures cannot be input.
+ *
+ * Similar text exists in the section on vertex shader outputs.
+ *
+ * Similar text exists in the GLSL ES 3.00 spec, except that the GLSL ES
+ * 3.00 spec allows structs as well. Varying structs are also allowed
+ * in GLSL 1.50.
+ *
+ * From section 4.3.4 of the ARB_bindless_texture spec:
+ *
+ * "(modify third paragraph of the section to allow sampler and image
+ * types) ... Vertex shader inputs can only be float,
+ * single-precision floating-point scalars, single-precision
+ * floating-point vectors, matrices, signed and unsigned integers
+ * and integer vectors, sampler and image types."
+ *
+ * From section 4.3.6 of the ARB_bindless_texture spec:
+ *
+ * "Output variables can only be floating-point scalars,
+ * floating-point vectors, matrices, signed or unsigned integers or
+ * integer vectors, sampler or image types, or arrays or structures
+ * of any these."
+ */
+ switch (var->type->without_array()->base_type) {
+ case GLSL_TYPE_FLOAT:
+ /* Ok in all GLSL versions */
+ break;
+ case GLSL_TYPE_UINT:
+ case GLSL_TYPE_INT:
+ if (state->is_version(130, 300) || state->EXT_gpu_shader4_enable)
+ break;
+ _mesa_glsl_error(loc, state,
+ "varying variables must be of base type float in %s",
+ state->get_version_string());
+ break;
+ case GLSL_TYPE_STRUCT:
+ if (state->is_version(150, 300))
+ break;
+ _mesa_glsl_error(loc, state,
+ "varying variables may not be of type struct");
+ break;
+ case GLSL_TYPE_DOUBLE:
+ case GLSL_TYPE_UINT64:
+ case GLSL_TYPE_INT64:
+ break;
+ case GLSL_TYPE_SAMPLER:
+ case GLSL_TYPE_IMAGE:
+ if (state->has_bindless())
+ break;
+ /* fallthrough */
+ default:
+ _mesa_glsl_error(loc, state, "illegal type for a varying variable");
+ break;
+ }
+ }
+
+ if (state->all_invariant && var->data.mode == ir_var_shader_out) {
+ var->data.explicit_invariant = true;
+ var->data.invariant = true;
+ }
+
+ var->data.interpolation =
+ interpret_interpolation_qualifier(qual, var->type,
+ (ir_variable_mode) var->data.mode,
+ state, loc);
+
+ /* Does the declaration use the deprecated 'attribute' or 'varying'
+ * keywords?
+ */
+ const bool uses_deprecated_qualifier = qual->flags.q.attribute
+ || qual->flags.q.varying;
+
+
+ /* Validate auxiliary storage qualifiers */
+
+ /* From section 4.3.4 of the GLSL 1.30 spec:
+ * "It is an error to use centroid in in a vertex shader."
+ *
+ * From section 4.3.4 of the GLSL ES 3.00 spec:
+ * "It is an error to use centroid in or interpolation qualifiers in
+ * a vertex shader input."
+ */
+
+ /* Section 4.3.6 of the GLSL 1.30 specification states:
+ * "It is an error to use centroid out in a fragment shader."
+ *
+ * The GL_ARB_shading_language_420pack extension specification states:
+ * "It is an error to use auxiliary storage qualifiers or interpolation
+ * qualifiers on an output in a fragment shader."
+ */
+ if (qual->flags.q.sample && (!is_varying_var(var, state->stage) || uses_deprecated_qualifier)) {
+ _mesa_glsl_error(loc, state,
+ "sample qualifier may only be used on `in` or `out` "
+ "variables between shader stages");
+ }
+ if (qual->flags.q.centroid && !is_varying_var(var, state->stage)) {
+ _mesa_glsl_error(loc, state,
+ "centroid qualifier may only be used with `in', "
+ "`out' or `varying' variables between shader stages");
+ }
+
+ if (qual->flags.q.shared_storage && state->stage != MESA_SHADER_COMPUTE) {
+ _mesa_glsl_error(loc, state,
+ "the shared storage qualifiers can only be used with "
+ "compute shaders");
+ }
+
+ apply_image_qualifier_to_variable(qual, var, state, loc);
+}
+
+/**
+ * Get the variable that is being redeclared by this declaration or if it
+ * does not exist, the current declared variable.
+ *
+ * Semantic checks to verify the validity of the redeclaration are also
+ * performed. If semantic checks fail, compilation error will be emitted via
+ * \c _mesa_glsl_error, but a non-\c NULL pointer will still be returned.
+ *
+ * \returns
+ * A pointer to an existing variable in the current scope if the declaration
+ * is a redeclaration, current variable otherwise. \c is_declared boolean
+ * will return \c true if the declaration is a redeclaration, \c false
+ * otherwise.
+ */
+static ir_variable *
+get_variable_being_redeclared(ir_variable **var_ptr, YYLTYPE loc,
+ struct _mesa_glsl_parse_state *state,
+ bool allow_all_redeclarations,
+ bool *is_redeclaration)
+{
+ ir_variable *var = *var_ptr;
+
+ /* Check if this declaration is actually a re-declaration, either to
+ * resize an array or add qualifiers to an existing variable.
+ *
+ * This is allowed for variables in the current scope, or when at
+ * global scope (for built-ins in the implicit outer scope).
+ */
+ ir_variable *earlier = state->symbols->get_variable(var->name);
+ if (earlier == NULL ||
+ (state->current_function != NULL &&
+ !state->symbols->name_declared_this_scope(var->name))) {
+ *is_redeclaration = false;
+ return var;
+ }
+
+ *is_redeclaration = true;
+
+ if (earlier->data.how_declared == ir_var_declared_implicitly) {
+ /* Verify that the redeclaration of a built-in does not change the
+ * storage qualifier. There are a couple special cases.
+ *
+ * 1. Some built-in variables that are defined as 'in' in the
+ * specification are implemented as system values. Allow
+ * ir_var_system_value -> ir_var_shader_in.
+ *
+ * 2. gl_LastFragData is implemented as a ir_var_shader_out, but the
+ * specification requires that redeclarations omit any qualifier.
+ * Allow ir_var_shader_out -> ir_var_auto for this one variable.
+ */
+ if (earlier->data.mode != var->data.mode &&
+ !(earlier->data.mode == ir_var_system_value &&
+ var->data.mode == ir_var_shader_in) &&
+ !(strcmp(var->name, "gl_LastFragData") == 0 &&
+ var->data.mode == ir_var_auto)) {
+ _mesa_glsl_error(&loc, state,
+ "redeclaration cannot change qualification of `%s'",
+ var->name);
+ }
+ }
+
+ /* From page 24 (page 30 of the PDF) of the GLSL 1.50 spec,
+ *
+ * "It is legal to declare an array without a size and then
+ * later re-declare the same name as an array of the same
+ * type and specify a size."
+ */
+ if (earlier->type->is_unsized_array() && var->type->is_array()
+ && (var->type->fields.array == earlier->type->fields.array)) {
+ const int size = var->type->array_size();
+ check_builtin_array_max_size(var->name, size, loc, state);
+ if ((size > 0) && (size <= earlier->data.max_array_access)) {
+ _mesa_glsl_error(& loc, state, "array size must be > %u due to "
+ "previous access",
+ earlier->data.max_array_access);
+ }
+
+ earlier->type = var->type;
+ delete var;
+ var = NULL;
+ *var_ptr = NULL;
+ } else if (earlier->type != var->type) {
+ _mesa_glsl_error(&loc, state,
+ "redeclaration of `%s' has incorrect type",
+ var->name);
+ } else if ((state->ARB_fragment_coord_conventions_enable ||
+ state->is_version(150, 0))
+ && strcmp(var->name, "gl_FragCoord") == 0) {
+ /* Allow redeclaration of gl_FragCoord for ARB_fcc layout
+ * qualifiers.
+ *
+ * We don't really need to do anything here, just allow the
+ * redeclaration. Any error on the gl_FragCoord is handled on the ast
+ * level at apply_layout_qualifier_to_variable using the
+ * ast_type_qualifier and _mesa_glsl_parse_state, or later at
+ * linker.cpp.
+ */
+ /* According to section 4.3.7 of the GLSL 1.30 spec,
+ * the following built-in varaibles can be redeclared with an
+ * interpolation qualifier:
+ * * gl_FrontColor
+ * * gl_BackColor
+ * * gl_FrontSecondaryColor
+ * * gl_BackSecondaryColor
+ * * gl_Color
+ * * gl_SecondaryColor
+ */
+ } else if (state->is_version(130, 0)
+ && (strcmp(var->name, "gl_FrontColor") == 0
+ || strcmp(var->name, "gl_BackColor") == 0
+ || strcmp(var->name, "gl_FrontSecondaryColor") == 0
+ || strcmp(var->name, "gl_BackSecondaryColor") == 0
+ || strcmp(var->name, "gl_Color") == 0
+ || strcmp(var->name, "gl_SecondaryColor") == 0)) {
+ earlier->data.interpolation = var->data.interpolation;
+
+ /* Layout qualifiers for gl_FragDepth. */
+ } else if ((state->is_version(420, 0) ||
+ state->AMD_conservative_depth_enable ||
+ state->ARB_conservative_depth_enable)
+ && strcmp(var->name, "gl_FragDepth") == 0) {
+
+ /** From the AMD_conservative_depth spec:
+ * Within any shader, the first redeclarations of gl_FragDepth
+ * must appear before any use of gl_FragDepth.
+ */
+ if (earlier->data.used) {
+ _mesa_glsl_error(&loc, state,
+ "the first redeclaration of gl_FragDepth "
+ "must appear before any use of gl_FragDepth");
+ }
+
+ /* Prevent inconsistent redeclaration of depth layout qualifier. */
+ if (earlier->data.depth_layout != ir_depth_layout_none
+ && earlier->data.depth_layout != var->data.depth_layout) {
+ _mesa_glsl_error(&loc, state,
+ "gl_FragDepth: depth layout is declared here "
+ "as '%s, but it was previously declared as "
+ "'%s'",
+ depth_layout_string(var->data.depth_layout),
+ depth_layout_string(earlier->data.depth_layout));
+ }
+
+ earlier->data.depth_layout = var->data.depth_layout;
+
+ } else if (state->has_framebuffer_fetch() &&
+ strcmp(var->name, "gl_LastFragData") == 0 &&
+ var->data.mode == ir_var_auto) {
+ /* According to the EXT_shader_framebuffer_fetch spec:
+ *
+ * "By default, gl_LastFragData is declared with the mediump precision
+ * qualifier. This can be changed by redeclaring the corresponding
+ * variables with the desired precision qualifier."
+ *
+ * "Fragment shaders may specify the following layout qualifier only for
+ * redeclaring the built-in gl_LastFragData array [...]: noncoherent"
+ */
+ earlier->data.precision = var->data.precision;
+ earlier->data.memory_coherent = var->data.memory_coherent;
+
+ } else if (state->NV_viewport_array2_enable &&
+ strcmp(var->name, "gl_Layer") == 0 &&
+ earlier->data.how_declared == ir_var_declared_implicitly) {
+ /* No need to do anything, just allow it. Qualifier is stored in state */
+
+ } else if ((earlier->data.how_declared == ir_var_declared_implicitly &&
+ state->allow_builtin_variable_redeclaration) ||
+ allow_all_redeclarations) {
+ /* Allow verbatim redeclarations of built-in variables. Not explicitly
+ * valid, but some applications do it.
+ */
+ } else {
+ _mesa_glsl_error(&loc, state, "`%s' redeclared", var->name);
+ }
+
+ return earlier;
+}
+
+/**
+ * Generate the IR for an initializer in a variable declaration
+ */
+static ir_rvalue *
+process_initializer(ir_variable *var, ast_declaration *decl,
+ ast_fully_specified_type *type,
+ exec_list *initializer_instructions,
+ struct _mesa_glsl_parse_state *state)
+{
+ void *mem_ctx = state;
+ ir_rvalue *result = NULL;
+
+ YYLTYPE initializer_loc = decl->initializer->get_location();
+
+ /* From page 24 (page 30 of the PDF) of the GLSL 1.10 spec:
+ *
+ * "All uniform variables are read-only and are initialized either
+ * directly by an application via API commands, or indirectly by
+ * OpenGL."
+ */
+ if (var->data.mode == ir_var_uniform) {
+ state->check_version(120, 0, &initializer_loc,
+ "cannot initialize uniform %s",
+ var->name);
+ }
+
+ /* Section 4.3.7 "Buffer Variables" of the GLSL 4.30 spec:
+ *
+ * "Buffer variables cannot have initializers."
+ */
+ if (var->data.mode == ir_var_shader_storage) {
+ _mesa_glsl_error(&initializer_loc, state,
+ "cannot initialize buffer variable %s",
+ var->name);
+ }
+
+ /* From section 4.1.7 of the GLSL 4.40 spec:
+ *
+ * "Opaque variables [...] are initialized only through the
+ * OpenGL API; they cannot be declared with an initializer in a
+ * shader."
+ *
+ * From section 4.1.7 of the ARB_bindless_texture spec:
+ *
+ * "Samplers may be declared as shader inputs and outputs, as uniform
+ * variables, as temporary variables, and as function parameters."
+ *
+ * From section 4.1.X of the ARB_bindless_texture spec:
+ *
+ * "Images may be declared as shader inputs and outputs, as uniform
+ * variables, as temporary variables, and as function parameters."
+ */
+ if (var->type->contains_atomic() ||
+ (!state->has_bindless() && var->type->contains_opaque())) {
+ _mesa_glsl_error(&initializer_loc, state,
+ "cannot initialize %s variable %s",
+ var->name, state->has_bindless() ? "atomic" : "opaque");
+ }
+
+ if ((var->data.mode == ir_var_shader_in) && (state->current_function == NULL)) {
+ _mesa_glsl_error(&initializer_loc, state,
+ "cannot initialize %s shader input / %s %s",
+ _mesa_shader_stage_to_string(state->stage),
+ (state->stage == MESA_SHADER_VERTEX)
+ ? "attribute" : "varying",
+ var->name);
+ }
+
+ if (var->data.mode == ir_var_shader_out && state->current_function == NULL) {
+ _mesa_glsl_error(&initializer_loc, state,
+ "cannot initialize %s shader output %s",
+ _mesa_shader_stage_to_string(state->stage),
+ var->name);
+ }
+
+ /* If the initializer is an ast_aggregate_initializer, recursively store
+ * type information from the LHS into it, so that its hir() function can do
+ * type checking.
+ */
+ if (decl->initializer->oper == ast_aggregate)
+ _mesa_ast_set_aggregate_type(var->type, decl->initializer);
+
+ ir_dereference *const lhs = new(state) ir_dereference_variable(var);
+ ir_rvalue *rhs = decl->initializer->hir(initializer_instructions, state);
+
+ /* Calculate the constant value if this is a const or uniform
+ * declaration.
+ *
+ * Section 4.3 (Storage Qualifiers) of the GLSL ES 1.00.17 spec says:
+ *
+ * "Declarations of globals without a storage qualifier, or with
+ * just the const qualifier, may include initializers, in which case
+ * they will be initialized before the first line of main() is
+ * executed. Such initializers must be a constant expression."
+ *
+ * The same section of the GLSL ES 3.00.4 spec has similar language.
+ */
+ if (type->qualifier.flags.q.constant
+ || type->qualifier.flags.q.uniform
+ || (state->es_shader && state->current_function == NULL)) {
+ ir_rvalue *new_rhs = validate_assignment(state, initializer_loc,
+ lhs, rhs, true);
+ if (new_rhs != NULL) {
+ rhs = new_rhs;
+
+ /* Section 4.3.3 (Constant Expressions) of the GLSL ES 3.00.4 spec
+ * says:
+ *
+ * "A constant expression is one of
+ *
+ * ...
+ *
+ * - an expression formed by an operator on operands that are
+ * all constant expressions, including getting an element of
+ * a constant array, or a field of a constant structure, or
+ * components of a constant vector. However, the sequence
+ * operator ( , ) and the assignment operators ( =, +=, ...)
+ * are not included in the operators that can create a
+ * constant expression."
+ *
+ * Section 12.43 (Sequence operator and constant expressions) says:
+ *
+ * "Should the following construct be allowed?
+ *
+ * float a[2,3];
+ *
+ * The expression within the brackets uses the sequence operator
+ * (',') and returns the integer 3 so the construct is declaring
+ * a single-dimensional array of size 3. In some languages, the
+ * construct declares a two-dimensional array. It would be
+ * preferable to make this construct illegal to avoid confusion.
+ *
+ * One possibility is to change the definition of the sequence
+ * operator so that it does not return a constant-expression and
+ * hence cannot be used to declare an array size.
+ *
+ * RESOLUTION: The result of a sequence operator is not a
+ * constant-expression."
+ *
+ * Section 4.3.3 (Constant Expressions) of the GLSL 4.30.9 spec
+ * contains language almost identical to the section 4.3.3 in the
+ * GLSL ES 3.00.4 spec. This is a new limitation for these GLSL
+ * versions.
+ */
+ ir_constant *constant_value =
+ rhs->constant_expression_value(mem_ctx);
+
+ if (!constant_value ||
+ (state->is_version(430, 300) &&
+ decl->initializer->has_sequence_subexpression())) {
+ const char *const variable_mode =
+ (type->qualifier.flags.q.constant)
+ ? "const"
+ : ((type->qualifier.flags.q.uniform) ? "uniform" : "global");
+
+ /* If ARB_shading_language_420pack is enabled, initializers of
+ * const-qualified local variables do not have to be constant
+ * expressions. Const-qualified global variables must still be
+ * initialized with constant expressions.
+ */
+ if (!state->has_420pack()
+ || state->current_function == NULL) {
+ _mesa_glsl_error(& initializer_loc, state,
+ "initializer of %s variable `%s' must be a "
+ "constant expression",
+ variable_mode,
+ decl->identifier);
+ if (var->type->is_numeric()) {
+ /* Reduce cascading errors. */
+ var->constant_value = type->qualifier.flags.q.constant
+ ? ir_constant::zero(state, var->type) : NULL;
+ }
+ }
+ } else {
+ rhs = constant_value;
+ var->constant_value = type->qualifier.flags.q.constant
+ ? constant_value : NULL;
+ }
+ } else {
+ if (var->type->is_numeric()) {
+ /* Reduce cascading errors. */
+ rhs = var->constant_value = type->qualifier.flags.q.constant
+ ? ir_constant::zero(state, var->type) : NULL;
+ }
+ }
+ }
+
+ if (rhs && !rhs->type->is_error()) {
+ bool temp = var->data.read_only;
+ if (type->qualifier.flags.q.constant)
+ var->data.read_only = false;
+
+ /* Never emit code to initialize a uniform.
+ */
+ const glsl_type *initializer_type;
+ bool error_emitted = false;
+ if (!type->qualifier.flags.q.uniform) {
+ error_emitted =
+ do_assignment(initializer_instructions, state,
+ NULL, lhs, rhs,
+ &result, true, true,
+ type->get_location());
+ initializer_type = result->type;
+ } else
+ initializer_type = rhs->type;
+
+ if (!error_emitted) {
+ var->constant_initializer = rhs->constant_expression_value(mem_ctx);
+ var->data.has_initializer = true;
+
+ /* If the declared variable is an unsized array, it must inherrit
+ * its full type from the initializer. A declaration such as
+ *
+ * uniform float a[] = float[](1.0, 2.0, 3.0, 3.0);
+ *
+ * becomes
+ *
+ * uniform float a[4] = float[](1.0, 2.0, 3.0, 3.0);
+ *
+ * The assignment generated in the if-statement (below) will also
+ * automatically handle this case for non-uniforms.
+ *
+ * If the declared variable is not an array, the types must
+ * already match exactly. As a result, the type assignment
+ * here can be done unconditionally. For non-uniforms the call
+ * to do_assignment can change the type of the initializer (via
+ * the implicit conversion rules). For uniforms the initializer
+ * must be a constant expression, and the type of that expression
+ * was validated above.
+ */
+ var->type = initializer_type;
+ }
+
+ var->data.read_only = temp;
+ }
+
+ return result;
+}
+
+static void
+validate_layout_qualifier_vertex_count(struct _mesa_glsl_parse_state *state,
+ YYLTYPE loc, ir_variable *var,
+ unsigned num_vertices,
+ unsigned *size,
+ const char *var_category)
+{
+ if (var->type->is_unsized_array()) {
+ /* Section 4.3.8.1 (Input Layout Qualifiers) of the GLSL 1.50 spec says:
+ *
+ * All geometry shader input unsized array declarations will be
+ * sized by an earlier input layout qualifier, when present, as per
+ * the following table.
+ *
+ * Followed by a table mapping each allowed input layout qualifier to
+ * the corresponding input length.
+ *
+ * Similarly for tessellation control shader outputs.
+ */
+ if (num_vertices != 0)
+ var->type = glsl_type::get_array_instance(var->type->fields.array,
+ num_vertices);
+ } else {
+ /* Section 4.3.8.1 (Input Layout Qualifiers) of the GLSL 1.50 spec
+ * includes the following examples of compile-time errors:
+ *
+ * // code sequence within one shader...
+ * in vec4 Color1[]; // size unknown
+ * ...Color1.length()...// illegal, length() unknown
+ * in vec4 Color2[2]; // size is 2
+ * ...Color1.length()...// illegal, Color1 still has no size
+ * in vec4 Color3[3]; // illegal, input sizes are inconsistent
+ * layout(lines) in; // legal, input size is 2, matching
+ * in vec4 Color4[3]; // illegal, contradicts layout
+ * ...
+ *
+ * To detect the case illustrated by Color3, we verify that the size of
+ * an explicitly-sized array matches the size of any previously declared
+ * explicitly-sized array. To detect the case illustrated by Color4, we
+ * verify that the size of an explicitly-sized array is consistent with
+ * any previously declared input layout.
+ */
+ if (num_vertices != 0 && var->type->length != num_vertices) {
+ _mesa_glsl_error(&loc, state,
+ "%s size contradicts previously declared layout "
+ "(size is %u, but layout requires a size of %u)",
+ var_category, var->type->length, num_vertices);
+ } else if (*size != 0 && var->type->length != *size) {
+ _mesa_glsl_error(&loc, state,
+ "%s sizes are inconsistent (size is %u, but a "
+ "previous declaration has size %u)",
+ var_category, var->type->length, *size);
+ } else {
+ *size = var->type->length;
+ }
+ }
+}
+
+static void
+handle_tess_ctrl_shader_output_decl(struct _mesa_glsl_parse_state *state,
+ YYLTYPE loc, ir_variable *var)
+{
+ unsigned num_vertices = 0;
+
+ if (state->tcs_output_vertices_specified) {
+ if (!state->out_qualifier->vertices->
+ process_qualifier_constant(state, "vertices",
+ &num_vertices, false)) {
+ return;
+ }
+
+ if (num_vertices > state->Const.MaxPatchVertices) {
+ _mesa_glsl_error(&loc, state, "vertices (%d) exceeds "
+ "GL_MAX_PATCH_VERTICES", num_vertices);
+ return;
+ }
+ }
+
+ if (!var->type->is_array() && !var->data.patch) {
+ _mesa_glsl_error(&loc, state,
+ "tessellation control shader outputs must be arrays");
+
+ /* To avoid cascading failures, short circuit the checks below. */
+ return;
+ }
+
+ if (var->data.patch)
+ return;
+
+ validate_layout_qualifier_vertex_count(state, loc, var, num_vertices,
+ &state->tcs_output_size,
+ "tessellation control shader output");
+}
+
+/**
+ * Do additional processing necessary for tessellation control/evaluation shader
+ * input declarations. This covers both interface block arrays and bare input
+ * variables.
+ */
+static void
+handle_tess_shader_input_decl(struct _mesa_glsl_parse_state *state,
+ YYLTYPE loc, ir_variable *var)
+{
+ if (!var->type->is_array() && !var->data.patch) {
+ _mesa_glsl_error(&loc, state,
+ "per-vertex tessellation shader inputs must be arrays");
+ /* Avoid cascading failures. */
+ return;
+ }
+
+ if (var->data.patch)
+ return;
+
+ /* The ARB_tessellation_shader spec says:
+ *
+ * "Declaring an array size is optional. If no size is specified, it
+ * will be taken from the implementation-dependent maximum patch size
+ * (gl_MaxPatchVertices). If a size is specified, it must match the
+ * maximum patch size; otherwise, a compile or link error will occur."
+ *
+ * This text appears twice, once for TCS inputs, and again for TES inputs.
+ */
+ if (var->type->is_unsized_array()) {
+ var->type = glsl_type::get_array_instance(var->type->fields.array,
+ state->Const.MaxPatchVertices);
+ } else if (var->type->length != state->Const.MaxPatchVertices) {
+ _mesa_glsl_error(&loc, state,
+ "per-vertex tessellation shader input arrays must be "
+ "sized to gl_MaxPatchVertices (%d).",
+ state->Const.MaxPatchVertices);
+ }
+}
+
+
+/**
+ * Do additional processing necessary for geometry shader input declarations
+ * (this covers both interface blocks arrays and bare input variables).
+ */
+static void
+handle_geometry_shader_input_decl(struct _mesa_glsl_parse_state *state,
+ YYLTYPE loc, ir_variable *var)
+{
+ unsigned num_vertices = 0;
+
+ if (state->gs_input_prim_type_specified) {
+ num_vertices = vertices_per_prim(state->in_qualifier->prim_type);
+ }
+
+ /* Geometry shader input variables must be arrays. Caller should have
+ * reported an error for this.
+ */
+ if (!var->type->is_array()) {
+ assert(state->error);
+
+ /* To avoid cascading failures, short circuit the checks below. */
+ return;
+ }
+
+ validate_layout_qualifier_vertex_count(state, loc, var, num_vertices,
+ &state->gs_input_size,
+ "geometry shader input");
+}
+
+static void
+validate_identifier(const char *identifier, YYLTYPE loc,
+ struct _mesa_glsl_parse_state *state)
+{
+ /* From page 15 (page 21 of the PDF) of the GLSL 1.10 spec,
+ *
+ * "Identifiers starting with "gl_" are reserved for use by
+ * OpenGL, and may not be declared in a shader as either a
+ * variable or a function."
+ */
+ if (is_gl_identifier(identifier)) {
+ _mesa_glsl_error(&loc, state,
+ "identifier `%s' uses reserved `gl_' prefix",
+ identifier);
+ } else if (strstr(identifier, "__")) {
+ /* From page 14 (page 20 of the PDF) of the GLSL 1.10
+ * spec:
+ *
+ * "In addition, all identifiers containing two
+ * consecutive underscores (__) are reserved as
+ * possible future keywords."
+ *
+ * The intention is that names containing __ are reserved for internal
+ * use by the implementation, and names prefixed with GL_ are reserved
+ * for use by Khronos. Names simply containing __ are dangerous to use,
+ * but should be allowed.
+ *
+ * A future version of the GLSL specification will clarify this.
+ */
+ _mesa_glsl_warning(&loc, state,
+ "identifier `%s' uses reserved `__' string",
+ identifier);
+ }
+}
+
+ir_rvalue *
+ast_declarator_list::hir(exec_list *instructions,
+ struct _mesa_glsl_parse_state *state)
+{
+ void *ctx = state;
+ const struct glsl_type *decl_type;
+ const char *type_name = NULL;
+ ir_rvalue *result = NULL;
+ YYLTYPE loc = this->get_location();
+
+ /* From page 46 (page 52 of the PDF) of the GLSL 1.50 spec:
+ *
+ * "To ensure that a particular output variable is invariant, it is
+ * necessary to use the invariant qualifier. It can either be used to
+ * qualify a previously declared variable as being invariant
+ *
+ * invariant gl_Position; // make existing gl_Position be invariant"
+ *
+ * In these cases the parser will set the 'invariant' flag in the declarator
+ * list, and the type will be NULL.
+ */
+ if (this->invariant) {
+ assert(this->type == NULL);
+
+ if (state->current_function != NULL) {
+ _mesa_glsl_error(& loc, state,
+ "all uses of `invariant' keyword must be at global "
+ "scope");
+ }
+
+ foreach_list_typed (ast_declaration, decl, link, &this->declarations) {
+ assert(decl->array_specifier == NULL);
+ assert(decl->initializer == NULL);
+
+ ir_variable *const earlier =
+ state->symbols->get_variable(decl->identifier);
+ if (earlier == NULL) {
+ _mesa_glsl_error(& loc, state,
+ "undeclared variable `%s' cannot be marked "
+ "invariant", decl->identifier);
+ } else if (!is_allowed_invariant(earlier, state)) {
+ _mesa_glsl_error(&loc, state,
+ "`%s' cannot be marked invariant; interfaces between "
+ "shader stages only.", decl->identifier);
+ } else if (earlier->data.used) {
+ _mesa_glsl_error(& loc, state,
+ "variable `%s' may not be redeclared "
+ "`invariant' after being used",
+ earlier->name);
+ } else {
+ earlier->data.explicit_invariant = true;
+ earlier->data.invariant = true;
+ }
+ }
+
+ /* Invariant redeclarations do not have r-values.
+ */
+ return NULL;
+ }
+
+ if (this->precise) {
+ assert(this->type == NULL);
+
+ foreach_list_typed (ast_declaration, decl, link, &this->declarations) {
+ assert(decl->array_specifier == NULL);
+ assert(decl->initializer == NULL);
+
+ ir_variable *const earlier =
+ state->symbols->get_variable(decl->identifier);
+ if (earlier == NULL) {
+ _mesa_glsl_error(& loc, state,
+ "undeclared variable `%s' cannot be marked "
+ "precise", decl->identifier);
+ } else if (state->current_function != NULL &&
+ !state->symbols->name_declared_this_scope(decl->identifier)) {
+ /* Note: we have to check if we're in a function, since
+ * builtins are treated as having come from another scope.
+ */
+ _mesa_glsl_error(& loc, state,
+ "variable `%s' from an outer scope may not be "
+ "redeclared `precise' in this scope",
+ earlier->name);
+ } else if (earlier->data.used) {
+ _mesa_glsl_error(& loc, state,
+ "variable `%s' may not be redeclared "
+ "`precise' after being used",
+ earlier->name);
+ } else {
+ earlier->data.precise = true;
+ }
+ }
+
+ /* Precise redeclarations do not have r-values either. */
+ return NULL;
+ }
+
+ assert(this->type != NULL);
+ assert(!this->invariant);
+ assert(!this->precise);
+
+ /* GL_EXT_shader_image_load_store base type uses GLSL_TYPE_VOID as a special value to
+ * indicate that it needs to be updated later (see glsl_parser.yy).
+ * This is done here, based on the layout qualifier and the type of the image var
+ */
+ if (this->type->qualifier.flags.q.explicit_image_format &&
+ this->type->specifier->type->is_image() &&
+ this->type->qualifier.image_base_type == GLSL_TYPE_VOID) {
+ /* "The ARB_shader_image_load_store says:
+ * If both extensions are enabled in the shading language, the "size*" layout
+ * qualifiers are treated as format qualifiers, and are mapped to equivalent
+ * format qualifiers in the table below, according to the type of image
+ * variable.
+ * image* iimage* uimage*
+ * -------- -------- --------
+ * size1x8 n/a r8i r8ui
+ * size1x16 r16f r16i r16ui
+ * size1x32 r32f r32i r32ui
+ * size2x32 rg32f rg32i rg32ui
+ * size4x32 rgba32f rgba32i rgba32ui"
+ */
+ if (strncmp(this->type->specifier->type_name, "image", strlen("image")) == 0) {
+ switch (this->type->qualifier.image_format) {
+ case PIPE_FORMAT_R8_SINT:
+ /* No valid qualifier in this case, driver will need to look at
+ * the underlying image's format (just like no qualifier being
+ * present).
+ */
+ this->type->qualifier.image_format = PIPE_FORMAT_NONE;
+ break;
+ case PIPE_FORMAT_R16_SINT:
+ this->type->qualifier.image_format = PIPE_FORMAT_R16_FLOAT;
+ break;
+ case PIPE_FORMAT_R32_SINT:
+ this->type->qualifier.image_format = PIPE_FORMAT_R32_FLOAT;
+ break;
+ case PIPE_FORMAT_R32G32_SINT:
+ this->type->qualifier.image_format = PIPE_FORMAT_R32G32_FLOAT;
+ break;
+ case PIPE_FORMAT_R32G32B32A32_SINT:
+ this->type->qualifier.image_format = PIPE_FORMAT_R32G32B32A32_FLOAT;
+ break;
+ default:
+ unreachable("Unknown image format");
+ }
+ this->type->qualifier.image_base_type = GLSL_TYPE_FLOAT;
+ } else if (strncmp(this->type->specifier->type_name, "uimage", strlen("uimage")) == 0) {
+ switch (this->type->qualifier.image_format) {
+ case PIPE_FORMAT_R8_SINT:
+ this->type->qualifier.image_format = PIPE_FORMAT_R8_UINT;
+ break;
+ case PIPE_FORMAT_R16_SINT:
+ this->type->qualifier.image_format = PIPE_FORMAT_R16_UINT;
+ break;
+ case PIPE_FORMAT_R32_SINT:
+ this->type->qualifier.image_format = PIPE_FORMAT_R32_UINT;
+ break;
+ case PIPE_FORMAT_R32G32_SINT:
+ this->type->qualifier.image_format = PIPE_FORMAT_R32G32_UINT;
+ break;
+ case PIPE_FORMAT_R32G32B32A32_SINT:
+ this->type->qualifier.image_format = PIPE_FORMAT_R32G32B32A32_UINT;
+ break;
+ default:
+ unreachable("Unknown image format");
+ }
+ this->type->qualifier.image_base_type = GLSL_TYPE_UINT;
+ } else if (strncmp(this->type->specifier->type_name, "iimage", strlen("iimage")) == 0) {
+ this->type->qualifier.image_base_type = GLSL_TYPE_INT;
+ } else {
+ assert(false);
+ }
+ }
+
+ /* The type specifier may contain a structure definition. Process that
+ * before any of the variable declarations.
+ */
+ (void) this->type->specifier->hir(instructions, state);
+
+ decl_type = this->type->glsl_type(& type_name, state);
+
+ /* Section 4.3.7 "Buffer Variables" of the GLSL 4.30 spec:
+ * "Buffer variables may only be declared inside interface blocks
+ * (section 4.3.9 “Interface Blocks”), which are then referred to as
+ * shader storage blocks. It is a compile-time error to declare buffer
+ * variables at global scope (outside a block)."
+ */
+ if (type->qualifier.flags.q.buffer && !decl_type->is_interface()) {
+ _mesa_glsl_error(&loc, state,
+ "buffer variables cannot be declared outside "
+ "interface blocks");
+ }
+
+ /* An offset-qualified atomic counter declaration sets the default
+ * offset for the next declaration within the same atomic counter
+ * buffer.
+ */
+ if (decl_type && decl_type->contains_atomic()) {
+ if (type->qualifier.flags.q.explicit_binding &&
+ type->qualifier.flags.q.explicit_offset) {
+ unsigned qual_binding;
+ unsigned qual_offset;
+ if (process_qualifier_constant(state, &loc, "binding",
+ type->qualifier.binding,
+ &qual_binding)
+ && process_qualifier_constant(state, &loc, "offset",
+ type->qualifier.offset,
+ &qual_offset)) {
+ if (qual_binding < ARRAY_SIZE(state->atomic_counter_offsets))
+ state->atomic_counter_offsets[qual_binding] = qual_offset;
+ }
+ }
+
+ ast_type_qualifier allowed_atomic_qual_mask;
+ allowed_atomic_qual_mask.flags.i = 0;
+ allowed_atomic_qual_mask.flags.q.explicit_binding = 1;
+ allowed_atomic_qual_mask.flags.q.explicit_offset = 1;
+ allowed_atomic_qual_mask.flags.q.uniform = 1;
+
+ type->qualifier.validate_flags(&loc, state, allowed_atomic_qual_mask,
+ "invalid layout qualifier for",
+ "atomic_uint");
+ }
+
+ if (this->declarations.is_empty()) {
+ /* If there is no structure involved in the program text, there are two
+ * possible scenarios:
+ *
+ * - The program text contained something like 'vec4;'. This is an
+ * empty declaration. It is valid but weird. Emit a warning.
+ *
+ * - The program text contained something like 'S;' and 'S' is not the
+ * name of a known structure type. This is both invalid and weird.
+ * Emit an error.
+ *
+ * - The program text contained something like 'mediump float;'
+ * when the programmer probably meant 'precision mediump
+ * float;' Emit a warning with a description of what they
+ * probably meant to do.
+ *
+ * Note that if decl_type is NULL and there is a structure involved,
+ * there must have been some sort of error with the structure. In this
+ * case we assume that an error was already generated on this line of
+ * code for the structure. There is no need to generate an additional,
+ * confusing error.
+ */
+ assert(this->type->specifier->structure == NULL || decl_type != NULL
+ || state->error);
+
+ if (decl_type == NULL) {
+ _mesa_glsl_error(&loc, state,
+ "invalid type `%s' in empty declaration",
+ type_name);
+ } else {
+ if (decl_type->is_array()) {
+ /* From Section 13.22 (Array Declarations) of the GLSL ES 3.2
+ * spec:
+ *
+ * "... any declaration that leaves the size undefined is
+ * disallowed as this would add complexity and there are no
+ * use-cases."
+ */
+ if (state->es_shader && decl_type->is_unsized_array()) {
+ _mesa_glsl_error(&loc, state, "array size must be explicitly "
+ "or implicitly defined");
+ }
+
+ /* From Section 4.12 (Empty Declarations) of the GLSL 4.5 spec:
+ *
+ * "The combinations of types and qualifiers that cause
+ * compile-time or link-time errors are the same whether or not
+ * the declaration is empty."
+ */
+ validate_array_dimensions(decl_type, state, &loc);
+ }
+
+ if (decl_type->is_atomic_uint()) {
+ /* Empty atomic counter declarations are allowed and useful
+ * to set the default offset qualifier.
+ */
+ return NULL;
+ } else if (this->type->qualifier.precision != ast_precision_none) {
+ if (this->type->specifier->structure != NULL) {
+ _mesa_glsl_error(&loc, state,
+ "precision qualifiers can't be applied "
+ "to structures");
+ } else {
+ static const char *const precision_names[] = {
+ "highp",
+ "highp",
+ "mediump",
+ "lowp"
+ };
+
+ _mesa_glsl_warning(&loc, state,
+ "empty declaration with precision "
+ "qualifier, to set the default precision, "
+ "use `precision %s %s;'",
+ precision_names[this->type->
+ qualifier.precision],
+ type_name);
+ }
+ } else if (this->type->specifier->structure == NULL) {
+ _mesa_glsl_warning(&loc, state, "empty declaration");
+ }
+ }
+ }
+
+ foreach_list_typed (ast_declaration, decl, link, &this->declarations) {
+ const struct glsl_type *var_type;
+ ir_variable *var;
+ const char *identifier = decl->identifier;
+ /* FINISHME: Emit a warning if a variable declaration shadows a
+ * FINISHME: declaration at a higher scope.
+ */
+
+ if ((decl_type == NULL) || decl_type->is_void()) {
+ if (type_name != NULL) {
+ _mesa_glsl_error(& loc, state,
+ "invalid type `%s' in declaration of `%s'",
+ type_name, decl->identifier);
+ } else {
+ _mesa_glsl_error(& loc, state,
+ "invalid type in declaration of `%s'",
+ decl->identifier);
+ }
+ continue;
+ }
+
+ if (this->type->qualifier.is_subroutine_decl()) {
+ const glsl_type *t;
+ const char *name;
+
+ t = state->symbols->get_type(this->type->specifier->type_name);
+ if (!t)
+ _mesa_glsl_error(& loc, state,
+ "invalid type in declaration of `%s'",
+ decl->identifier);
+ name = ralloc_asprintf(ctx, "%s_%s", _mesa_shader_stage_to_subroutine_prefix(state->stage), decl->identifier);
+
+ identifier = name;
+
+ }
+ var_type = process_array_type(&loc, decl_type, decl->array_specifier,
+ state);
+
+ var = new(ctx) ir_variable(var_type, identifier, ir_var_auto);
+
+ /* The 'varying in' and 'varying out' qualifiers can only be used with
+ * ARB_geometry_shader4 and EXT_geometry_shader4, which we don't support
+ * yet.
+ */
+ if (this->type->qualifier.flags.q.varying) {
+ if (this->type->qualifier.flags.q.in) {
+ _mesa_glsl_error(& loc, state,
+ "`varying in' qualifier in declaration of "
+ "`%s' only valid for geometry shaders using "
+ "ARB_geometry_shader4 or EXT_geometry_shader4",
+ decl->identifier);
+ } else if (this->type->qualifier.flags.q.out) {
+ _mesa_glsl_error(& loc, state,
+ "`varying out' qualifier in declaration of "
+ "`%s' only valid for geometry shaders using "
+ "ARB_geometry_shader4 or EXT_geometry_shader4",
+ decl->identifier);
+ }
+ }
+
+ /* From page 22 (page 28 of the PDF) of the GLSL 1.10 specification;
+ *
+ * "Global variables can only use the qualifiers const,
+ * attribute, uniform, or varying. Only one may be
+ * specified.
+ *
+ * Local variables can only use the qualifier const."
+ *
+ * This is relaxed in GLSL 1.30 and GLSL ES 3.00. It is also relaxed by
+ * any extension that adds the 'layout' keyword.
+ */
+ if (!state->is_version(130, 300)
+ && !state->has_explicit_attrib_location()
+ && !state->has_separate_shader_objects()
+ && !state->ARB_fragment_coord_conventions_enable) {
+ /* GL_EXT_gpu_shader4 only allows "varying out" on fragment shader
+ * outputs. (the varying flag is not set by the parser)
+ */
+ if (this->type->qualifier.flags.q.out &&
+ (!state->EXT_gpu_shader4_enable ||
+ state->stage != MESA_SHADER_FRAGMENT)) {
+ _mesa_glsl_error(& loc, state,
+ "`out' qualifier in declaration of `%s' "
+ "only valid for function parameters in %s",
+ decl->identifier, state->get_version_string());
+ }
+ if (this->type->qualifier.flags.q.in) {
+ _mesa_glsl_error(& loc, state,
+ "`in' qualifier in declaration of `%s' "
+ "only valid for function parameters in %s",
+ decl->identifier, state->get_version_string());
+ }
+ /* FINISHME: Test for other invalid qualifiers. */
+ }
+
+ apply_type_qualifier_to_variable(& this->type->qualifier, var, state,
+ & loc, false);
+ apply_layout_qualifier_to_variable(&this->type->qualifier, var, state,
+ &loc);
+
+ if ((var->data.mode == ir_var_auto || var->data.mode == ir_var_temporary
+ || var->data.mode == ir_var_shader_out)
+ && (var->type->is_numeric() || var->type->is_boolean())
+ && state->zero_init) {
+ const ir_constant_data data = { { 0 } };
+ var->data.has_initializer = true;
+ var->constant_initializer = new(var) ir_constant(var->type, &data);
+ }
+
+ if (this->type->qualifier.flags.q.invariant) {
+ if (!is_allowed_invariant(var, state)) {
+ _mesa_glsl_error(&loc, state,
+ "`%s' cannot be marked invariant; interfaces between "
+ "shader stages only", var->name);
+ }
+ }
+
+ if (state->current_function != NULL) {
+ const char *mode = NULL;
+ const char *extra = "";
+
+ /* There is no need to check for 'inout' here because the parser will
+ * only allow that in function parameter lists.
+ */
+ if (this->type->qualifier.flags.q.attribute) {
+ mode = "attribute";
+ } else if (this->type->qualifier.is_subroutine_decl()) {
+ mode = "subroutine uniform";
+ } else if (this->type->qualifier.flags.q.uniform) {
+ mode = "uniform";
+ } else if (this->type->qualifier.flags.q.varying) {
+ mode = "varying";
+ } else if (this->type->qualifier.flags.q.in) {
+ mode = "in";
+ extra = " or in function parameter list";
+ } else if (this->type->qualifier.flags.q.out) {
+ mode = "out";
+ extra = " or in function parameter list";
+ }
+
+ if (mode) {
+ _mesa_glsl_error(& loc, state,
+ "%s variable `%s' must be declared at "
+ "global scope%s",
+ mode, var->name, extra);
+ }
+ } else if (var->data.mode == ir_var_shader_in) {
+ var->data.read_only = true;
+
+ if (state->stage == MESA_SHADER_VERTEX) {
+ bool error_emitted = false;
+
+ /* From page 31 (page 37 of the PDF) of the GLSL 1.50 spec:
+ *
+ * "Vertex shader inputs can only be float, floating-point
+ * vectors, matrices, signed and unsigned integers and integer
+ * vectors. Vertex shader inputs can also form arrays of these
+ * types, but not structures."
+ *
+ * From page 31 (page 27 of the PDF) of the GLSL 1.30 spec:
+ *
+ * "Vertex shader inputs can only be float, floating-point
+ * vectors, matrices, signed and unsigned integers and integer
+ * vectors. They cannot be arrays or structures."
+ *
+ * From page 23 (page 29 of the PDF) of the GLSL 1.20 spec:
+ *
+ * "The attribute qualifier can be used only with float,
+ * floating-point vectors, and matrices. Attribute variables
+ * cannot be declared as arrays or structures."
+ *
+ * From page 33 (page 39 of the PDF) of the GLSL ES 3.00 spec:
+ *
+ * "Vertex shader inputs can only be float, floating-point
+ * vectors, matrices, signed and unsigned integers and integer
+ * vectors. Vertex shader inputs cannot be arrays or
+ * structures."
+ *
+ * From section 4.3.4 of the ARB_bindless_texture spec:
+ *
+ * "(modify third paragraph of the section to allow sampler and
+ * image types) ... Vertex shader inputs can only be float,
+ * single-precision floating-point scalars, single-precision
+ * floating-point vectors, matrices, signed and unsigned
+ * integers and integer vectors, sampler and image types."
+ */
+ const glsl_type *check_type = var->type->without_array();
+
+ switch (check_type->base_type) {
+ case GLSL_TYPE_FLOAT:
+ break;
+ case GLSL_TYPE_UINT64:
+ case GLSL_TYPE_INT64:
+ break;
+ case GLSL_TYPE_UINT:
+ case GLSL_TYPE_INT:
+ if (state->is_version(120, 300) || state->EXT_gpu_shader4_enable)
+ break;
+ case GLSL_TYPE_DOUBLE:
+ if (check_type->is_double() && (state->is_version(410, 0) || state->ARB_vertex_attrib_64bit_enable))
+ break;
+ case GLSL_TYPE_SAMPLER:
+ if (check_type->is_sampler() && state->has_bindless())
+ break;
+ case GLSL_TYPE_IMAGE:
+ if (check_type->is_image() && state->has_bindless())
+ break;
+ /* FALLTHROUGH */
+ default:
+ _mesa_glsl_error(& loc, state,
+ "vertex shader input / attribute cannot have "
+ "type %s`%s'",
+ var->type->is_array() ? "array of " : "",
+ check_type->name);
+ error_emitted = true;
+ }
+
+ if (!error_emitted && var->type->is_array() &&
+ !state->check_version(150, 0, &loc,
+ "vertex shader input / attribute "
+ "cannot have array type")) {
+ error_emitted = true;
+ }
+ } else if (state->stage == MESA_SHADER_GEOMETRY) {
+ /* From section 4.3.4 (Inputs) of the GLSL 1.50 spec:
+ *
+ * Geometry shader input variables get the per-vertex values
+ * written out by vertex shader output variables of the same
+ * names. Since a geometry shader operates on a set of
+ * vertices, each input varying variable (or input block, see
+ * interface blocks below) needs to be declared as an array.
+ */
+ if (!var->type->is_array()) {
+ _mesa_glsl_error(&loc, state,
+ "geometry shader inputs must be arrays");
+ }
+
+ handle_geometry_shader_input_decl(state, loc, var);
+ } else if (state->stage == MESA_SHADER_FRAGMENT) {
+ /* From section 4.3.4 (Input Variables) of the GLSL ES 3.10 spec:
+ *
+ * It is a compile-time error to declare a fragment shader
+ * input with, or that contains, any of the following types:
+ *
+ * * A boolean type
+ * * An opaque type
+ * * An array of arrays
+ * * An array of structures
+ * * A structure containing an array
+ * * A structure containing a structure
+ */
+ if (state->es_shader) {
+ const glsl_type *check_type = var->type->without_array();
+ if (check_type->is_boolean() ||
+ check_type->contains_opaque()) {
+ _mesa_glsl_error(&loc, state,
+ "fragment shader input cannot have type %s",
+ check_type->name);
+ }
+ if (var->type->is_array() &&
+ var->type->fields.array->is_array()) {
+ _mesa_glsl_error(&loc, state,
+ "%s shader output "
+ "cannot have an array of arrays",
+ _mesa_shader_stage_to_string(state->stage));
+ }
+ if (var->type->is_array() &&
+ var->type->fields.array->is_struct()) {
+ _mesa_glsl_error(&loc, state,
+ "fragment shader input "
+ "cannot have an array of structs");
+ }
+ if (var->type->is_struct()) {
+ for (unsigned i = 0; i < var->type->length; i++) {
+ if (var->type->fields.structure[i].type->is_array() ||
+ var->type->fields.structure[i].type->is_struct())
+ _mesa_glsl_error(&loc, state,
+ "fragment shader input cannot have "
+ "a struct that contains an "
+ "array or struct");
+ }
+ }
+ }
+ } else if (state->stage == MESA_SHADER_TESS_CTRL ||
+ state->stage == MESA_SHADER_TESS_EVAL) {
+ handle_tess_shader_input_decl(state, loc, var);
+ }
+ } else if (var->data.mode == ir_var_shader_out) {
+ const glsl_type *check_type = var->type->without_array();
+
+ /* From section 4.3.6 (Output variables) of the GLSL 4.40 spec:
+ *
+ * It is a compile-time error to declare a fragment shader output
+ * that contains any of the following:
+ *
+ * * A Boolean type (bool, bvec2 ...)
+ * * A double-precision scalar or vector (double, dvec2 ...)
+ * * An opaque type
+ * * Any matrix type
+ * * A structure
+ */
+ if (state->stage == MESA_SHADER_FRAGMENT) {
+ if (check_type->is_struct() || check_type->is_matrix())
+ _mesa_glsl_error(&loc, state,
+ "fragment shader output "
+ "cannot have struct or matrix type");
+ switch (check_type->base_type) {
+ case GLSL_TYPE_UINT:
+ case GLSL_TYPE_INT:
+ case GLSL_TYPE_FLOAT:
+ break;
+ default:
+ _mesa_glsl_error(&loc, state,
+ "fragment shader output cannot have "
+ "type %s", check_type->name);
+ }
+ }
+
+ /* From section 4.3.6 (Output Variables) of the GLSL ES 3.10 spec:
+ *
+ * It is a compile-time error to declare a vertex shader output
+ * with, or that contains, any of the following types:
+ *
+ * * A boolean type
+ * * An opaque type
+ * * An array of arrays
+ * * An array of structures
+ * * A structure containing an array
+ * * A structure containing a structure
+ *
+ * It is a compile-time error to declare a fragment shader output
+ * with, or that contains, any of the following types:
+ *
+ * * A boolean type
+ * * An opaque type
+ * * A matrix
+ * * A structure
+ * * An array of array
+ *
+ * ES 3.20 updates this to apply to tessellation and geometry shaders
+ * as well. Because there are per-vertex arrays in the new stages,
+ * it strikes the "array of..." rules and replaces them with these:
+ *
+ * * For per-vertex-arrayed variables (applies to tessellation
+ * control, tessellation evaluation and geometry shaders):
+ *
+ * * Per-vertex-arrayed arrays of arrays
+ * * Per-vertex-arrayed arrays of structures
+ *
+ * * For non-per-vertex-arrayed variables:
+ *
+ * * An array of arrays
+ * * An array of structures
+ *
+ * which basically says to unwrap the per-vertex aspect and apply
+ * the old rules.
+ */
+ if (state->es_shader) {
+ if (var->type->is_array() &&
+ var->type->fields.array->is_array()) {
+ _mesa_glsl_error(&loc, state,
+ "%s shader output "
+ "cannot have an array of arrays",
+ _mesa_shader_stage_to_string(state->stage));
+ }
+ if (state->stage <= MESA_SHADER_GEOMETRY) {
+ const glsl_type *type = var->type;
+
+ if (state->stage == MESA_SHADER_TESS_CTRL &&
+ !var->data.patch && var->type->is_array()) {
+ type = var->type->fields.array;
+ }
+
+ if (type->is_array() && type->fields.array->is_struct()) {
+ _mesa_glsl_error(&loc, state,
+ "%s shader output cannot have "
+ "an array of structs",
+ _mesa_shader_stage_to_string(state->stage));
+ }
+ if (type->is_struct()) {
+ for (unsigned i = 0; i < type->length; i++) {
+ if (type->fields.structure[i].type->is_array() ||
+ type->fields.structure[i].type->is_struct())
+ _mesa_glsl_error(&loc, state,
+ "%s shader output cannot have a "
+ "struct that contains an "
+ "array or struct",
+ _mesa_shader_stage_to_string(state->stage));
+ }
+ }
+ }
+ }
+
+ if (state->stage == MESA_SHADER_TESS_CTRL) {
+ handle_tess_ctrl_shader_output_decl(state, loc, var);
+ }
+ } else if (var->type->contains_subroutine()) {
+ /* declare subroutine uniforms as hidden */
+ var->data.how_declared = ir_var_hidden;
+ }
+
+ /* From section 4.3.4 of the GLSL 4.00 spec:
+ * "Input variables may not be declared using the patch in qualifier
+ * in tessellation control or geometry shaders."
+ *
+ * From section 4.3.6 of the GLSL 4.00 spec:
+ * "It is an error to use patch out in a vertex, tessellation
+ * evaluation, or geometry shader."
+ *
+ * This doesn't explicitly forbid using them in a fragment shader, but
+ * that's probably just an oversight.
+ */
+ if (state->stage != MESA_SHADER_TESS_EVAL
+ && this->type->qualifier.flags.q.patch
+ && this->type->qualifier.flags.q.in) {
+
+ _mesa_glsl_error(&loc, state, "'patch in' can only be used in a "
+ "tessellation evaluation shader");
+ }
+
+ if (state->stage != MESA_SHADER_TESS_CTRL
+ && this->type->qualifier.flags.q.patch
+ && this->type->qualifier.flags.q.out) {
+
+ _mesa_glsl_error(&loc, state, "'patch out' can only be used in a "
+ "tessellation control shader");
+ }
+
+ /* Precision qualifiers exists only in GLSL versions 1.00 and >= 1.30.
+ */
+ if (this->type->qualifier.precision != ast_precision_none) {
+ state->check_precision_qualifiers_allowed(&loc);
+ }
+
+ if (this->type->qualifier.precision != ast_precision_none &&
+ !precision_qualifier_allowed(var->type)) {
+ _mesa_glsl_error(&loc, state,
+ "precision qualifiers apply only to floating point"
+ ", integer and opaque types");
+ }
+
+ /* From section 4.1.7 of the GLSL 4.40 spec:
+ *
+ * "[Opaque types] can only be declared as function
+ * parameters or uniform-qualified variables."
+ *
+ * From section 4.1.7 of the ARB_bindless_texture spec:
+ *
+ * "Samplers may be declared as shader inputs and outputs, as uniform
+ * variables, as temporary variables, and as function parameters."
+ *
+ * From section 4.1.X of the ARB_bindless_texture spec:
+ *
+ * "Images may be declared as shader inputs and outputs, as uniform
+ * variables, as temporary variables, and as function parameters."
+ */
+ if (!this->type->qualifier.flags.q.uniform &&
+ (var_type->contains_atomic() ||
+ (!state->has_bindless() && var_type->contains_opaque()))) {
+ _mesa_glsl_error(&loc, state,
+ "%s variables must be declared uniform",
+ state->has_bindless() ? "atomic" : "opaque");
+ }
+
+ /* Process the initializer and add its instructions to a temporary
+ * list. This list will be added to the instruction stream (below) after
+ * the declaration is added. This is done because in some cases (such as
+ * redeclarations) the declaration may not actually be added to the
+ * instruction stream.
+ */
+ exec_list initializer_instructions;
+
+ /* Examine var name here since var may get deleted in the next call */
+ bool var_is_gl_id = is_gl_identifier(var->name);
+
+ bool is_redeclaration;
+ var = get_variable_being_redeclared(&var, decl->get_location(), state,
+ false /* allow_all_redeclarations */,
+ &is_redeclaration);
+ if (is_redeclaration) {
+ if (var_is_gl_id &&
+ var->data.how_declared == ir_var_declared_in_block) {
+ _mesa_glsl_error(&loc, state,
+ "`%s' has already been redeclared using "
+ "gl_PerVertex", var->name);
+ }
+ var->data.how_declared = ir_var_declared_normally;
+ }
+
+ if (decl->initializer != NULL) {
+ result = process_initializer(var,
+ decl, this->type,
+ &initializer_instructions, state);
+ } else {
+ validate_array_dimensions(var_type, state, &loc);
+ }
+
+ /* From page 23 (page 29 of the PDF) of the GLSL 1.10 spec:
+ *
+ * "It is an error to write to a const variable outside of
+ * its declaration, so they must be initialized when
+ * declared."
+ */
+ if (this->type->qualifier.flags.q.constant && decl->initializer == NULL) {
+ _mesa_glsl_error(& loc, state,
+ "const declaration of `%s' must be initialized",
+ decl->identifier);
+ }
+
+ if (state->es_shader) {
+ const glsl_type *const t = var->type;
+
+ /* Skip the unsized array check for TCS/TES/GS inputs & TCS outputs.
+ *
+ * The GL_OES_tessellation_shader spec says about inputs:
+ *
+ * "Declaring an array size is optional. If no size is specified,
+ * it will be taken from the implementation-dependent maximum
+ * patch size (gl_MaxPatchVertices)."
+ *
+ * and about TCS outputs:
+ *
+ * "If no size is specified, it will be taken from output patch
+ * size declared in the shader."
+ *
+ * The GL_OES_geometry_shader spec says:
+ *
+ * "All geometry shader input unsized array declarations will be
+ * sized by an earlier input primitive layout qualifier, when
+ * present, as per the following table."
+ */
+ const bool implicitly_sized =
+ (var->data.mode == ir_var_shader_in &&
+ state->stage >= MESA_SHADER_TESS_CTRL &&
+ state->stage <= MESA_SHADER_GEOMETRY) ||
+ (var->data.mode == ir_var_shader_out &&
+ state->stage == MESA_SHADER_TESS_CTRL);
+
+ if (t->is_unsized_array() && !implicitly_sized)
+ /* Section 10.17 of the GLSL ES 1.00 specification states that
+ * unsized array declarations have been removed from the language.
+ * Arrays that are sized using an initializer are still explicitly
+ * sized. However, GLSL ES 1.00 does not allow array
+ * initializers. That is only allowed in GLSL ES 3.00.
+ *
+ * Section 4.1.9 (Arrays) of the GLSL ES 3.00 spec says:
+ *
+ * "An array type can also be formed without specifying a size
+ * if the definition includes an initializer:
+ *
+ * float x[] = float[2] (1.0, 2.0); // declares an array of size 2
+ * float y[] = float[] (1.0, 2.0, 3.0); // declares an array of size 3
+ *
+ * float a[5];
+ * float b[] = a;"
+ */
+ _mesa_glsl_error(& loc, state,
+ "unsized array declarations are not allowed in "
+ "GLSL ES");
+ }
+
+ /* Section 4.4.6.1 Atomic Counter Layout Qualifiers of the GLSL 4.60 spec:
+ *
+ * "It is a compile-time error to declare an unsized array of
+ * atomic_uint"
+ */
+ if (var->type->is_unsized_array() &&
+ var->type->without_array()->base_type == GLSL_TYPE_ATOMIC_UINT) {
+ _mesa_glsl_error(& loc, state,
+ "Unsized array of atomic_uint is not allowed");
+ }
+
+ /* If the declaration is not a redeclaration, there are a few additional
+ * semantic checks that must be applied. In addition, variable that was
+ * created for the declaration should be added to the IR stream.
+ */
+ if (!is_redeclaration) {
+ validate_identifier(decl->identifier, loc, state);
+
+ /* Add the variable to the symbol table. Note that the initializer's
+ * IR was already processed earlier (though it hasn't been emitted
+ * yet), without the variable in scope.
+ *
+ * This differs from most C-like languages, but it follows the GLSL
+ * specification. From page 28 (page 34 of the PDF) of the GLSL 1.50
+ * spec:
+ *
+ * "Within a declaration, the scope of a name starts immediately
+ * after the initializer if present or immediately after the name
+ * being declared if not."
+ */
+ if (!state->symbols->add_variable(var)) {
+ YYLTYPE loc = this->get_location();
+ _mesa_glsl_error(&loc, state, "name `%s' already taken in the "
+ "current scope", decl->identifier);
+ continue;
+ }
+
+ /* Push the variable declaration to the top. It means that all the
+ * variable declarations will appear in a funny last-to-first order,
+ * but otherwise we run into trouble if a function is prototyped, a
+ * global var is decled, then the function is defined with usage of
+ * the global var. See glslparsertest's CorrectModule.frag.
+ * However, do not insert declarations before default precision statements
+ * or type declarations.
+ */
+ ir_instruction* before_node = (ir_instruction*)instructions->get_head();
+ while (before_node && (before_node->ir_type == ir_type_precision || before_node->ir_type == ir_type_typedecl))
+ before_node = (ir_instruction*)before_node->next;
+ if (before_node)
+ before_node->insert_before(var);
+ else
+ instructions->push_head(var);
+ }
+
+ instructions->append_list(&initializer_instructions);
+ }
+
+
+ /* Generally, variable declarations do not have r-values. However,
+ * one is used for the declaration in
+ *
+ * while (bool b = some_condition()) {
+ * ...
+ * }
+ *
+ * so we return the rvalue from the last seen declaration here.
+ */
+ return result;
+}
+
+
+ir_rvalue *
+ast_parameter_declarator::hir(exec_list *instructions,
+ struct _mesa_glsl_parse_state *state)
+{
+ void *ctx = state;
+ const struct glsl_type *type;
+ const char *name = NULL;
+ YYLTYPE loc = this->get_location();
+
+ type = this->type->glsl_type(& name, state);
+
+ if (type == NULL) {
+ if (name != NULL) {
+ _mesa_glsl_error(& loc, state,
+ "invalid type `%s' in declaration of `%s'",
+ name, this->identifier);
+ } else {
+ _mesa_glsl_error(& loc, state,
+ "invalid type in declaration of `%s'",
+ this->identifier);
+ }
+
+ type = glsl_type::error_type;
+ }
+
+ /* From page 62 (page 68 of the PDF) of the GLSL 1.50 spec:
+ *
+ * "Functions that accept no input arguments need not use void in the
+ * argument list because prototypes (or definitions) are required and
+ * therefore there is no ambiguity when an empty argument list "( )" is
+ * declared. The idiom "(void)" as a parameter list is provided for
+ * convenience."
+ *
+ * Placing this check here prevents a void parameter being set up
+ * for a function, which avoids tripping up checks for main taking
+ * parameters and lookups of an unnamed symbol.
+ */
+ if (type->is_void()) {
+ if (this->identifier != NULL)
+ _mesa_glsl_error(& loc, state,
+ "named parameter cannot have type `void'");
+
+ is_void = true;
+ return NULL;
+ }
+
+ if (formal_parameter && (this->identifier == NULL)) {
+ _mesa_glsl_error(& loc, state, "formal parameter lacks a name");
+ return NULL;
+ }
+
+ /* This only handles "vec4 foo[..]". The earlier specifier->glsl_type(...)
+ * call already handled the "vec4[..] foo" case.
+ */
+ type = process_array_type(&loc, type, this->array_specifier, state);
+
+ if (!type->is_error() && type->is_unsized_array()) {
+ _mesa_glsl_error(&loc, state, "arrays passed as parameters must have "
+ "a declared size");
+ type = glsl_type::error_type;
+ }
+
+ is_void = false;
+ ir_variable *var = new(ctx)
+ ir_variable(type, this->identifier, ir_var_function_in);
+
+ /* Apply any specified qualifiers to the parameter declaration. Note that
+ * for function parameters the default mode is 'in'.
+ */
+ apply_type_qualifier_to_variable(& this->type->qualifier, var, state, & loc,
+ true);
+
+ /* From section 4.1.7 of the GLSL 4.40 spec:
+ *
+ * "Opaque variables cannot be treated as l-values; hence cannot
+ * be used as out or inout function parameters, nor can they be
+ * assigned into."
+ *
+ * From section 4.1.7 of the ARB_bindless_texture spec:
+ *
+ * "Samplers can be used as l-values, so can be assigned into and used
+ * as "out" and "inout" function parameters."
+ *
+ * From section 4.1.X of the ARB_bindless_texture spec:
+ *
+ * "Images can be used as l-values, so can be assigned into and used as
+ * "out" and "inout" function parameters."
+ */
+ if ((var->data.mode == ir_var_function_inout || var->data.mode == ir_var_function_out)
+ && (type->contains_atomic() ||
+ (!state->has_bindless() && type->contains_opaque()))) {
+ _mesa_glsl_error(&loc, state, "out and inout parameters cannot "
+ "contain %s variables",
+ state->has_bindless() ? "atomic" : "opaque");
+ type = glsl_type::error_type;
+ }
+
+ /* From page 39 (page 45 of the PDF) of the GLSL 1.10 spec:
+ *
+ * "When calling a function, expressions that do not evaluate to
+ * l-values cannot be passed to parameters declared as out or inout."
+ *
+ * From page 32 (page 38 of the PDF) of the GLSL 1.10 spec:
+ *
+ * "Other binary or unary expressions, non-dereferenced arrays,
+ * function names, swizzles with repeated fields, and constants
+ * cannot be l-values."
+ *
+ * So for GLSL 1.10, passing an array as an out or inout parameter is not
+ * allowed. This restriction is removed in GLSL 1.20, and in GLSL ES.
+ */
+ if ((var->data.mode == ir_var_function_inout || var->data.mode == ir_var_function_out)
+ && type->is_array()
+ && !state->check_version(120, 100, &loc,
+ "arrays cannot be out or inout parameters")) {
+ type = glsl_type::error_type;
+ }
+
+ instructions->push_tail(var);
+
+ /* Parameter declarations do not have r-values.
+ */
+ return NULL;
+}
+
+
+void
+ast_parameter_declarator::parameters_to_hir(exec_list *ast_parameters,
+ bool formal,
+ exec_list *ir_parameters,
+ _mesa_glsl_parse_state *state)
+{
+ ast_parameter_declarator *void_param = NULL;
+ unsigned count = 0;
+
+ foreach_list_typed (ast_parameter_declarator, param, link, ast_parameters) {
+ param->formal_parameter = formal;
+ param->hir(ir_parameters, state);
+
+ if (param->is_void)
+ void_param = param;
+
+ count++;
+ }
+
+ if ((void_param != NULL) && (count > 1)) {
+ YYLTYPE loc = void_param->get_location();
+
+ _mesa_glsl_error(& loc, state,
+ "`void' parameter must be only parameter");
+ }
+}
+
+
+void
+emit_function(_mesa_glsl_parse_state *state, ir_function *f)
+{
+ /* IR invariants disallow function declarations or definitions
+ * nested within other function definitions. But there is no
+ * requirement about the relative order of function declarations
+ * and definitions with respect to one another. So simply insert
+ * the new ir_function block at the end of the toplevel instruction
+ * list.
+ */
+ state->toplevel_ir->push_tail(f);
+}
+
+
+ir_rvalue *
+ast_function::hir(exec_list *instructions,
+ struct _mesa_glsl_parse_state *state)
+{
+ void *ctx = state;
+ ir_function *f = NULL;
+ ir_function_signature *sig = NULL;
+ exec_list hir_parameters;
+ YYLTYPE loc = this->get_location();
+
+ const char *const name = identifier;
+
+ /* New functions are always added to the top-level IR instruction stream,
+ * so this instruction list pointer is ignored. See also emit_function
+ * (called below).
+ */
+ (void) instructions;
+
+ /* From page 21 (page 27 of the PDF) of the GLSL 1.20 spec,
+ *
+ * "Function declarations (prototypes) cannot occur inside of functions;
+ * they must be at global scope, or for the built-in functions, outside
+ * the global scope."
+ *
+ * From page 27 (page 33 of the PDF) of the GLSL ES 1.00.16 spec,
+ *
+ * "User defined functions may only be defined within the global scope."
+ *
+ * Note that this language does not appear in GLSL 1.10.
+ */
+ if ((state->current_function != NULL) &&
+ state->is_version(120, 100)) {
+ YYLTYPE loc = this->get_location();
+ _mesa_glsl_error(&loc, state,
+ "declaration of function `%s' not allowed within "
+ "function body", name);
+ }
+
+ validate_identifier(name, this->get_location(), state);
+
+ /* Convert the list of function parameters to HIR now so that they can be
+ * used below to compare this function's signature with previously seen
+ * signatures for functions with the same name.
+ */
+ ast_parameter_declarator::parameters_to_hir(& this->parameters,
+ is_definition,
+ & hir_parameters, state);
+
+ const char *return_type_name;
+ const glsl_type *return_type =
+ this->return_type->glsl_type(& return_type_name, state);
+
+ if (!return_type) {
+ YYLTYPE loc = this->get_location();
+ _mesa_glsl_error(&loc, state,
+ "function `%s' has undeclared return type `%s'",
+ name, return_type_name);
+ return_type = glsl_type::error_type;
+ }
+
+ /* ARB_shader_subroutine states:
+ * "Subroutine declarations cannot be prototyped. It is an error to prepend
+ * subroutine(...) to a function declaration."
+ */
+ if (this->return_type->qualifier.subroutine_list && !is_definition) {
+ YYLTYPE loc = this->get_location();
+ _mesa_glsl_error(&loc, state,
+ "function declaration `%s' cannot have subroutine prepended",
+ name);
+ }
+
+ /* From page 56 (page 62 of the PDF) of the GLSL 1.30 spec:
+ * "No qualifier is allowed on the return type of a function."
+ */
+ if (this->return_type->has_qualifiers(state)) {
+ YYLTYPE loc = this->get_location();
+ _mesa_glsl_error(& loc, state,
+ "function `%s' return type has qualifiers", name);
+ }
+
+ /* Section 6.1 (Function Definitions) of the GLSL 1.20 spec says:
+ *
+ * "Arrays are allowed as arguments and as the return type. In both
+ * cases, the array must be explicitly sized."
+ */
+ if (return_type->is_unsized_array()) {
+ YYLTYPE loc = this->get_location();
+ _mesa_glsl_error(& loc, state,
+ "function `%s' return type array must be explicitly "
+ "sized", name);
+ }
+
+ /* From Section 6.1 (Function Definitions) of the GLSL 1.00 spec:
+ *
+ * "Arrays are allowed as arguments, but not as the return type. [...]
+ * The return type can also be a structure if the structure does not
+ * contain an array."
+ */
+ if (state->language_version == 100 && return_type->contains_array()) {
+ YYLTYPE loc = this->get_location();
+ _mesa_glsl_error(& loc, state,
+ "function `%s' return type contains an array", name);
+ }
+
+ /* From section 4.1.7 of the GLSL 4.40 spec:
+ *
+ * "[Opaque types] can only be declared as function parameters
+ * or uniform-qualified variables."
+ *
+ * The ARB_bindless_texture spec doesn't clearly state this, but as it says
+ * "Replace Section 4.1.7 (Samplers), p. 25" and, "Replace Section 4.1.X,
+ * (Images)", this should be allowed.
+ */
+ if (return_type->contains_atomic() ||
+ (!state->has_bindless() && return_type->contains_opaque())) {
+ YYLTYPE loc = this->get_location();
+ _mesa_glsl_error(&loc, state,
+ "function `%s' return type can't contain an %s type",
+ name, state->has_bindless() ? "atomic" : "opaque");
+ }
+
+ /**/
+ if (return_type->is_subroutine()) {
+ YYLTYPE loc = this->get_location();
+ _mesa_glsl_error(&loc, state,
+ "function `%s' return type can't be a subroutine type",
+ name);
+ }
+
+ /* Get the precision for the return type */
+ unsigned return_precision;
+
+ if (state->es_shader) {
+ YYLTYPE loc = this->get_location();
+ return_precision =
+ select_gles_precision(this->return_type->qualifier.precision,
+ return_type,
+ state,
+ &loc);
+ } else {
+ return_precision = GLSL_PRECISION_NONE;
+ }
+
+ /* Create an ir_function if one doesn't already exist. */
+ f = state->symbols->get_function(name);
+ if (f == NULL) {
+ f = new(ctx) ir_function(name);
+ if (!this->return_type->qualifier.is_subroutine_decl()) {
+ if (!state->symbols->add_function(f)) {
+ /* This function name shadows a non-function use of the same name. */
+ YYLTYPE loc = this->get_location();
+ _mesa_glsl_error(&loc, state, "function name `%s' conflicts with "
+ "non-function", name);
+ return NULL;
+ }
+ }
+ emit_function(state, f);
+ }
+
+ /* From GLSL ES 3.0 spec, chapter 6.1 "Function Definitions", page 71:
+ *
+ * "A shader cannot redefine or overload built-in functions."
+ *
+ * While in GLSL ES 1.0 specification, chapter 8 "Built-in Functions":
+ *
+ * "User code can overload the built-in functions but cannot redefine
+ * them."
+ */
+ if (state->es_shader) {
+ /* Local shader has no exact candidates; check the built-ins. */
+ if (state->language_version >= 300 &&
+ _mesa_glsl_has_builtin_function(state, name)) {
+ YYLTYPE loc = this->get_location();
+ _mesa_glsl_error(& loc, state,
+ "A shader cannot redefine or overload built-in "
+ "function `%s' in GLSL ES 3.00", name);
+ return NULL;
+ }
+
+ if (state->language_version == 100) {
+ ir_function_signature *sig =
+ _mesa_glsl_find_builtin_function(state, name, &hir_parameters);
+ if (sig && sig->is_builtin()) {
+ _mesa_glsl_error(& loc, state,
+ "A shader cannot redefine built-in "
+ "function `%s' in GLSL ES 1.00", name);
+ }
+ }
+ }
+
+ /* Verify that this function's signature either doesn't match a previously
+ * seen signature for a function with the same name, or, if a match is found,
+ * that the previously seen signature does not have an associated definition.
+ */
+ if (state->es_shader || f->has_user_signature()) {
+ sig = f->exact_matching_signature(state, &hir_parameters);
+ if (sig != NULL) {
+ const char *badvar = sig->qualifiers_match(&hir_parameters);
+ if (badvar != NULL) {
+ YYLTYPE loc = this->get_location();
+
+ _mesa_glsl_error(&loc, state, "function `%s' parameter `%s' "
+ "qualifiers don't match prototype", name, badvar);
+ }
+
+ if (sig->return_type != return_type) {
+ YYLTYPE loc = this->get_location();
+
+ _mesa_glsl_error(&loc, state, "function `%s' return type doesn't "
+ "match prototype", name);
+ }
+
+ if (sig->return_precision != return_precision) {
+ YYLTYPE loc = this->get_location();
+
+ _mesa_glsl_error(&loc, state, "function `%s' return type precision "
+ "doesn't match prototype", name);
+ }
+
+ if (sig->is_defined) {
+ if (is_definition) {
+ YYLTYPE loc = this->get_location();
+ _mesa_glsl_error(& loc, state, "function `%s' redefined", name);
+ } else {
+ /* We just encountered a prototype that exactly matches a
+ * function that's already been defined. This is redundant,
+ * and we should ignore it.
+ */
+ return NULL;
+ }
+ } else if (state->language_version == 100 && !is_definition) {
+ /* From the GLSL 1.00 spec, section 4.2.7:
+ *
+ * "A particular variable, structure or function declaration
+ * may occur at most once within a scope with the exception
+ * that a single function prototype plus the corresponding
+ * function definition are allowed."
+ */
+ YYLTYPE loc = this->get_location();
+ _mesa_glsl_error(&loc, state, "function `%s' redeclared", name);
+ }
+ }
+ }
+
+ /* Verify the return type of main() */
+ if (strcmp(name, "main") == 0) {
+ if (! return_type->is_void()) {
+ YYLTYPE loc = this->get_location();
+
+ _mesa_glsl_error(& loc, state, "main() must return void");
+ }
+
+ if (!hir_parameters.is_empty()) {
+ YYLTYPE loc = this->get_location();
+
+ _mesa_glsl_error(& loc, state, "main() must not take any parameters");
+ }
+ }
+
+ /* Finish storing the information about this new function in its signature.
+ */
+ if (sig == NULL) {
+ sig = new(ctx) ir_function_signature(return_type);
+ sig->return_precision = return_precision;
+ f->add_signature(sig);
+ }
+
+ sig->replace_parameters(&hir_parameters);
+ signature = sig;
+
+ if (this->return_type->qualifier.subroutine_list) {
+ int idx;
+
+ if (this->return_type->qualifier.flags.q.explicit_index) {
+ unsigned qual_index;
+ if (process_qualifier_constant(state, &loc, "index",
+ this->return_type->qualifier.index,
+ &qual_index)) {
+ if (!state->has_explicit_uniform_location()) {
+ _mesa_glsl_error(&loc, state, "subroutine index requires "
+ "GL_ARB_explicit_uniform_location or "
+ "GLSL 4.30");
+ } else if (qual_index >= MAX_SUBROUTINES) {
+ _mesa_glsl_error(&loc, state,
+ "invalid subroutine index (%d) index must "
+ "be a number between 0 and "
+ "GL_MAX_SUBROUTINES - 1 (%d)", qual_index,
+ MAX_SUBROUTINES - 1);
+ } else {
+ f->subroutine_index = qual_index;
+ }
+ }
+ }
+
+ f->num_subroutine_types = this->return_type->qualifier.subroutine_list->declarations.length();
+ f->subroutine_types = ralloc_array(state, const struct glsl_type *,
+ f->num_subroutine_types);
+ idx = 0;
+ foreach_list_typed(ast_declaration, decl, link, &this->return_type->qualifier.subroutine_list->declarations) {
+ const struct glsl_type *type;
+ /* the subroutine type must be already declared */
+ type = state->symbols->get_type(decl->identifier);
+ if (!type) {
+ _mesa_glsl_error(& loc, state, "unknown type '%s' in subroutine function definition", decl->identifier);
+ }
+
+ for (int i = 0; i < state->num_subroutine_types; i++) {
+ ir_function *fn = state->subroutine_types[i];
+ ir_function_signature *tsig = NULL;
+
+ if (strcmp(fn->name, decl->identifier))
+ continue;
+
+ tsig = fn->matching_signature(state, &sig->parameters,
+ false);
+ if (!tsig) {
+ _mesa_glsl_error(& loc, state, "subroutine type mismatch '%s' - signatures do not match\n", decl->identifier);
+ } else {
+ if (tsig->return_type != sig->return_type) {
+ _mesa_glsl_error(& loc, state, "subroutine type mismatch '%s' - return types do not match\n", decl->identifier);
+ }
+ }
+ }
+ f->subroutine_types[idx++] = type;
+ }
+ state->subroutines = (ir_function **)reralloc(state, state->subroutines,
+ ir_function *,
+ state->num_subroutines + 1);
+ state->subroutines[state->num_subroutines] = f;
+ state->num_subroutines++;
+
+ }
+
+ if (this->return_type->qualifier.is_subroutine_decl()) {
+ if (!state->symbols->add_type(this->identifier, glsl_type::get_subroutine_instance(this->identifier))) {
+ _mesa_glsl_error(& loc, state, "type '%s' previously defined", this->identifier);
+ return NULL;
+ }
+ state->subroutine_types = (ir_function **)reralloc(state, state->subroutine_types,
+ ir_function *,
+ state->num_subroutine_types + 1);
+ state->subroutine_types[state->num_subroutine_types] = f;
+ state->num_subroutine_types++;
+
+ f->is_subroutine = true;
+ }
+
+ /* Function declarations (prototypes) do not have r-values.
+ */
+ return NULL;
+}
+
+
+ir_rvalue *
+ast_function_definition::hir(exec_list *instructions,
+ struct _mesa_glsl_parse_state *state)
+{
+ prototype->is_definition = true;
+ prototype->hir(instructions, state);
+
+ ir_function_signature *signature = prototype->signature;
+ if (signature == NULL)
+ return NULL;
+
+ assert(state->current_function == NULL);
+ state->current_function = signature;
+ state->found_return = false;
+ state->found_begin_interlock = false;
+ state->found_end_interlock = false;
+
+ /* Duplicate parameters declared in the prototype as concrete variables.
+ * Add these to the symbol table.
+ */
+ state->symbols->push_scope();
+ foreach_in_list(ir_variable, var, &signature->parameters) {
+ assert(var->as_variable() != NULL);
+
+ /* The only way a parameter would "exist" is if two parameters have
+ * the same name.
+ */
+ if (state->symbols->name_declared_this_scope(var->name)) {
+ YYLTYPE loc = this->get_location();
+
+ _mesa_glsl_error(& loc, state, "parameter `%s' redeclared", var->name);
+ } else {
+ state->symbols->add_variable(var);
+ }
+ }
+
+ /* Convert the body of the function to HIR. */
+ this->body->hir(&signature->body, state);
+ signature->is_defined = true;
+
+ state->symbols->pop_scope();
+
+ assert(state->current_function == signature);
+ state->current_function = NULL;
+
+ if (!signature->return_type->is_void() && !state->found_return) {
+ YYLTYPE loc = this->get_location();
+ _mesa_glsl_error(& loc, state, "function `%s' has non-void return type "
+ "%s, but no return statement",
+ signature->function_name(),
+ signature->return_type->name);
+ }
+
+ /* Function definitions do not have r-values.
+ */
+ return NULL;
+}
+
+
+ir_rvalue *
+ast_jump_statement::hir(exec_list *instructions,
+ struct _mesa_glsl_parse_state *state)
+{
+ void *ctx = state;
+
+ switch (mode) {
+ case ast_return: {
+ ir_return *inst;
+ assert(state->current_function);
+
+ if (opt_return_value) {
+ ir_rvalue *ret = opt_return_value->hir(instructions, state);
+
+ /* The value of the return type can be NULL if the shader says
+ * 'return foo();' and foo() is a function that returns void.
+ *
+ * NOTE: The GLSL spec doesn't say that this is an error. The type
+ * of the return value is void. If the return type of the function is
+ * also void, then this should compile without error. Seriously.
+ */
+ const glsl_type *const ret_type =
+ (ret == NULL) ? glsl_type::void_type : ret->type;
+
+ /* Implicit conversions are not allowed for return values prior to
+ * ARB_shading_language_420pack.
+ */
+ if (state->current_function->return_type != ret_type) {
+ YYLTYPE loc = this->get_location();
+
+ if (state->has_420pack()) {
+ if (!apply_implicit_conversion(state->current_function->return_type,
+ ret, state)
+ || (ret->type != state->current_function->return_type)) {
+ _mesa_glsl_error(& loc, state,
+ "could not implicitly convert return value "
+ "to %s, in function `%s'",
+ state->current_function->return_type->name,
+ state->current_function->function_name());
+ }
+ } else {
+ _mesa_glsl_error(& loc, state,
+ "`return' with wrong type %s, in function `%s' "
+ "returning %s",
+ ret_type->name,
+ state->current_function->function_name(),
+ state->current_function->return_type->name);
+ }
+ } else if (state->current_function->return_type->base_type ==
+ GLSL_TYPE_VOID) {
+ YYLTYPE loc = this->get_location();
+
+ /* The ARB_shading_language_420pack, GLSL ES 3.0, and GLSL 4.20
+ * specs add a clarification:
+ *
+ * "A void function can only use return without a return argument, even if
+ * the return argument has void type. Return statements only accept values:
+ *
+ * void func1() { }
+ * void func2() { return func1(); } // illegal return statement"
+ */
+ _mesa_glsl_error(& loc, state,
+ "void functions can only use `return' without a "
+ "return argument");
+ }
+
+ inst = new(ctx) ir_return(ret);
+ } else {
+ if (state->current_function->return_type->base_type !=
+ GLSL_TYPE_VOID) {
+ YYLTYPE loc = this->get_location();
+
+ _mesa_glsl_error(& loc, state,
+ "`return' with no value, in function %s returning "
+ "non-void",
+ state->current_function->function_name());
+ }
+ inst = new(ctx) ir_return;
+ }
+
+ state->found_return = true;
+ instructions->push_tail(inst);
+ break;
+ }
+
+ case ast_discard:
+ if (state->stage != MESA_SHADER_FRAGMENT) {
+ YYLTYPE loc = this->get_location();
+
+ _mesa_glsl_error(& loc, state,
+ "`discard' may only appear in a fragment shader");
+ }
+ instructions->push_tail(new(ctx) ir_discard);
+ break;
+
+ case ast_break:
+ case ast_continue:
+ if (mode == ast_continue &&
+ state->loop_nesting_ast == NULL) {
+ YYLTYPE loc = this->get_location();
+
+ _mesa_glsl_error(& loc, state, "continue may only appear in a loop");
+ } else if (mode == ast_break &&
+ state->loop_nesting_ast == NULL &&
+ state->switch_state.switch_nesting_ast == NULL) {
+ YYLTYPE loc = this->get_location();
+
+ _mesa_glsl_error(& loc, state,
+ "break may only appear in a loop or a switch");
+ } else {
+ /* For a loop, inline the for loop expression again, since we don't
+ * know where near the end of the loop body the normal copy of it is
+ * going to be placed. Same goes for the condition for a do-while
+ * loop.
+ */
+ if (state->loop_nesting_ast != NULL &&
+ mode == ast_continue) {
+ if (state->loop_nesting_ast->rest_expression) {
+ state->loop_nesting_ast->rest_expression->hir(instructions,
+ state);
+ }
+ if (state->loop_nesting_ast->mode ==
+ ast_iteration_statement::ast_do_while) {
+ state->loop_nesting_ast->condition_to_hir(instructions, state);
+ }
+ }
+
+ if (state->switch_state.is_switch_innermost &&
+ mode == ast_break) {
+ /* Force break out of switch by setting is_break switch state.
+ */
+ ir_variable *const is_break_var = state->switch_state.is_break_var;
+ ir_dereference_variable *const deref_is_break_var =
+ new(ctx) ir_dereference_variable(is_break_var);
+ ir_constant *const true_val = new(ctx) ir_constant(true);
+ ir_assignment *const set_break_var =
+ new(ctx) ir_assignment(deref_is_break_var, true_val);
+
+ instructions->push_tail(set_break_var);
+ } else {
+ ir_loop_jump *const jump =
+ new(ctx) ir_loop_jump((mode == ast_break)
+ ? ir_loop_jump::jump_break
+ : ir_loop_jump::jump_continue);
+ instructions->push_tail(jump);
+ }
+ }
+
+ break;
+ }
+
+ /* Jump instructions do not have r-values.
+ */
+ return NULL;
+}
+
+
+ir_rvalue *
+ast_demote_statement::hir(exec_list *instructions,
+ struct _mesa_glsl_parse_state *state)
+{
+ void *ctx = state;
+
+ if (state->stage != MESA_SHADER_FRAGMENT) {
+ YYLTYPE loc = this->get_location();
+
+ _mesa_glsl_error(& loc, state,
+ "`demote' may only appear in a fragment shader");
+ }
+
+ instructions->push_tail(new(ctx) ir_demote);
+
+ return NULL;
+}
+
+
+ir_rvalue *
+ast_selection_statement::hir(exec_list *instructions,
+ struct _mesa_glsl_parse_state *state)
+{
+ void *ctx = state;
+
+ ir_rvalue *const condition = this->condition->hir(instructions, state);
+
+ /* From page 66 (page 72 of the PDF) of the GLSL 1.50 spec:
+ *
+ * "Any expression whose type evaluates to a Boolean can be used as the
+ * conditional expression bool-expression. Vector types are not accepted
+ * as the expression to if."
+ *
+ * The checks are separated so that higher quality diagnostics can be
+ * generated for cases where both rules are violated.
+ */
+ if (!condition->type->is_boolean() || !condition->type->is_scalar()) {
+ YYLTYPE loc = this->condition->get_location();
+
+ _mesa_glsl_error(& loc, state, "if-statement condition must be scalar "
+ "boolean");
+ }
+
+ ir_if *const stmt = new(ctx) ir_if(condition);
+
+ if (then_statement != NULL) {
+ state->symbols->push_scope();
+ then_statement->hir(& stmt->then_instructions, state);
+ state->symbols->pop_scope();
+ }
+
+ if (else_statement != NULL) {
+ state->symbols->push_scope();
+ else_statement->hir(& stmt->else_instructions, state);
+ state->symbols->pop_scope();
+ }
+
+ instructions->push_tail(stmt);
+
+ /* if-statements do not have r-values.
+ */
+ return NULL;
+}
+
+
+struct case_label {
+ /** Value of the case label. */
+ unsigned value;
+
+ /** Does this label occur after the default? */
+ bool after_default;
+
+ /**
+ * AST for the case label.
+ *
+ * This is only used to generate error messages for duplicate labels.
+ */
+ ast_expression *ast;
+};
+
+/* Used for detection of duplicate case values, compare
+ * given contents directly.
+ */
+static bool
+compare_case_value(const void *a, const void *b)
+{
+ return ((struct case_label *) a)->value == ((struct case_label *) b)->value;
+}
+
+
+/* Used for detection of duplicate case values, just
+ * returns key contents as is.
+ */
+static unsigned
+key_contents(const void *key)
+{
+ return ((struct case_label *) key)->value;
+}
+
+
+ir_rvalue *
+ast_switch_statement::hir(exec_list *instructions,
+ struct _mesa_glsl_parse_state *state)
+{
+ void *ctx = state;
+
+ ir_rvalue *const test_expression =
+ this->test_expression->hir(instructions, state);
+
+ /* From page 66 (page 55 of the PDF) of the GLSL 1.50 spec:
+ *
+ * "The type of init-expression in a switch statement must be a
+ * scalar integer."
+ */
+ if (!test_expression->type->is_scalar() ||
+ !test_expression->type->is_integer_32()) {
+ YYLTYPE loc = this->test_expression->get_location();
+
+ _mesa_glsl_error(& loc,
+ state,
+ "switch-statement expression must be scalar "
+ "integer");
+ return NULL;
+ }
+
+ /* Track the switch-statement nesting in a stack-like manner.
+ */
+ struct glsl_switch_state saved = state->switch_state;
+
+ state->switch_state.is_switch_innermost = true;
+ state->switch_state.switch_nesting_ast = this;
+ state->switch_state.labels_ht =
+ _mesa_hash_table_create(NULL, key_contents,
+ compare_case_value);
+ state->switch_state.previous_default = NULL;
+
+ /* Initalize is_fallthru state to false.
+ */
+ ir_rvalue *const is_fallthru_val = new (ctx) ir_constant(false);
+ state->switch_state.is_fallthru_var =
+ new(ctx) ir_variable(glsl_type::bool_type,
+ "switch_is_fallthru_tmp",
+ ir_var_temporary);
+ instructions->push_tail(state->switch_state.is_fallthru_var);
+
+ ir_dereference_variable *deref_is_fallthru_var =
+ new(ctx) ir_dereference_variable(state->switch_state.is_fallthru_var);
+ instructions->push_tail(new(ctx) ir_assignment(deref_is_fallthru_var,
+ is_fallthru_val));
+
+ /* Initialize is_break state to false.
+ */
+ ir_rvalue *const is_break_val = new (ctx) ir_constant(false);
+ state->switch_state.is_break_var =
+ new(ctx) ir_variable(glsl_type::bool_type,
+ "switch_is_break_tmp",
+ ir_var_temporary);
+ instructions->push_tail(state->switch_state.is_break_var);
+
+ ir_dereference_variable *deref_is_break_var =
+ new(ctx) ir_dereference_variable(state->switch_state.is_break_var);
+ instructions->push_tail(new(ctx) ir_assignment(deref_is_break_var,
+ is_break_val));
+
+ state->switch_state.run_default =
+ new(ctx) ir_variable(glsl_type::bool_type,
+ "run_default_tmp",
+ ir_var_temporary);
+ instructions->push_tail(state->switch_state.run_default);
+
+ /* Cache test expression.
+ */
+ test_to_hir(instructions, state);
+
+ /* Emit code for body of switch stmt.
+ */
+ body->hir(instructions, state);
+
+ _mesa_hash_table_destroy(state->switch_state.labels_ht, NULL);
+
+ state->switch_state = saved;
+
+ /* Switch statements do not have r-values. */
+ return NULL;
+}
+
+
+void
+ast_switch_statement::test_to_hir(exec_list *instructions,
+ struct _mesa_glsl_parse_state *state)
+{
+ void *ctx = state;
+
+ /* set to true to avoid a duplicate "use of uninitialized variable" warning
+ * on the switch test case. The first one would be already raised when
+ * getting the test_expression at ast_switch_statement::hir
+ */
+ test_expression->set_is_lhs(true);
+ /* Cache value of test expression. */
+ ir_rvalue *const test_val = test_expression->hir(instructions, state);
+
+ state->switch_state.test_var = new(ctx) ir_variable(test_val->type,
+ "switch_test_tmp",
+ ir_var_temporary);
+ ir_dereference_variable *deref_test_var =
+ new(ctx) ir_dereference_variable(state->switch_state.test_var);
+
+ instructions->push_tail(state->switch_state.test_var);
+ instructions->push_tail(new(ctx) ir_assignment(deref_test_var, test_val));
+}
+
+
+ir_rvalue *
+ast_switch_body::hir(exec_list *instructions,
+ struct _mesa_glsl_parse_state *state)
+{
+ if (stmts != NULL)
+ stmts->hir(instructions, state);
+
+ /* Switch bodies do not have r-values. */
+ return NULL;
+}
+
+ir_rvalue *
+ast_case_statement_list::hir(exec_list *instructions,
+ struct _mesa_glsl_parse_state *state)
+{
+ exec_list default_case, after_default, tmp;
+
+ foreach_list_typed (ast_case_statement, case_stmt, link, & this->cases) {
+ case_stmt->hir(&tmp, state);
+
+ /* Default case. */
+ if (state->switch_state.previous_default && default_case.is_empty()) {
+ default_case.append_list(&tmp);
+ continue;
+ }
+
+ /* If default case found, append 'after_default' list. */
+ if (!default_case.is_empty())
+ after_default.append_list(&tmp);
+ else
+ instructions->append_list(&tmp);
+ }
+
+ /* Handle the default case. This is done here because default might not be
+ * the last case. We need to add checks against following cases first to see
+ * if default should be chosen or not.
+ */
+ if (!default_case.is_empty()) {
+ ir_factory body(instructions, state);
+
+ ir_expression *cmp = NULL;
+
+ hash_table_foreach(state->switch_state.labels_ht, entry) {
+ const struct case_label *const l = (struct case_label *) entry->data;
+
+ /* If the switch init-value is the value of one of the labels that
+ * occurs after the default case, disable execution of the default
+ * case.
+ */
+ if (l->after_default) {
+ ir_constant *const cnst =
+ state->switch_state.test_var->type->base_type == GLSL_TYPE_UINT
+ ? body.constant(unsigned(l->value))
+ : body.constant(int(l->value));
+
+ cmp = cmp == NULL
+ ? equal(cnst, state->switch_state.test_var)
+ : logic_or(cmp, equal(cnst, state->switch_state.test_var));
+ }
+ }
+
+ if (cmp != NULL)
+ body.emit(assign(state->switch_state.run_default, logic_not(cmp)));
+ else
+ body.emit(assign(state->switch_state.run_default, body.constant(true)));
+
+ /* Append default case and all cases after it. */
+ instructions->append_list(&default_case);
+ instructions->append_list(&after_default);
+ }
+
+ /* Case statements do not have r-values. */
+ return NULL;
+}
+
+ir_rvalue *
+ast_case_statement::hir(exec_list *instructions,
+ struct _mesa_glsl_parse_state *state)
+{
+ labels->hir(instructions, state);
+
+ /* Conditionally set fallthru state based on break state. */
+ ir_factory reset_fallthru(instructions, state);
+ reset_fallthru.emit(assign(state->switch_state.is_fallthru_var,
+ logic_and(state->switch_state.is_fallthru_var,
+ logic_not(state->switch_state.is_break_var))));
+
+ /* Guard case statements depending on fallthru state. */
+ ir_dereference_variable *const deref_fallthru_guard =
+ new(state) ir_dereference_variable(state->switch_state.is_fallthru_var);
+ ir_if *const test_fallthru = new(state) ir_if(deref_fallthru_guard);
+
+ foreach_list_typed (ast_node, stmt, link, & this->stmts)
+ stmt->hir(& test_fallthru->then_instructions, state);
+
+ instructions->push_tail(test_fallthru);
+
+ /* Case statements do not have r-values. */
+ return NULL;
+}
+
+
+ir_rvalue *
+ast_case_label_list::hir(exec_list *instructions,
+ struct _mesa_glsl_parse_state *state)
+{
+ foreach_list_typed (ast_case_label, label, link, & this->labels)
+ label->hir(instructions, state);
+
+ /* Case labels do not have r-values. */
+ return NULL;
+}
+
+ir_rvalue *
+ast_case_label::hir(exec_list *instructions,
+ struct _mesa_glsl_parse_state *state)
+{
+ ir_factory body(instructions, state);
+
+ ir_variable *const fallthru_var = state->switch_state.is_fallthru_var;
+
+ /* If not default case, ... */
+ if (this->test_value != NULL) {
+ /* Conditionally set fallthru state based on
+ * comparison of cached test expression value to case label.
+ */
+ ir_rvalue *const label_rval = this->test_value->hir(instructions, state);
+ ir_constant *label_const =
+ label_rval->constant_expression_value(body.mem_ctx);
+
+ if (!label_const) {
+ YYLTYPE loc = this->test_value->get_location();
+
+ _mesa_glsl_error(& loc, state,
+ "switch statement case label must be a "
+ "constant expression");
+
+ /* Stuff a dummy value in to allow processing to continue. */
+ label_const = body.constant(0);
+ } else {
+ hash_entry *entry =
+ _mesa_hash_table_search(state->switch_state.labels_ht,
+ &label_const->value.u[0]);
+
+ if (entry) {
+ const struct case_label *const l =
+ (struct case_label *) entry->data;
+ const ast_expression *const previous_label = l->ast;
+ YYLTYPE loc = this->test_value->get_location();
+
+ _mesa_glsl_error(& loc, state, "duplicate case value");
+
+ loc = previous_label->get_location();
+ _mesa_glsl_error(& loc, state, "this is the previous case label");
+ } else {
+ struct case_label *l = ralloc(state->switch_state.labels_ht,
+ struct case_label);
+
+ l->value = label_const->value.u[0];
+ l->after_default = state->switch_state.previous_default != NULL;
+ l->ast = this->test_value;
+
+ _mesa_hash_table_insert(state->switch_state.labels_ht,
+ &label_const->value.u[0],
+ l);
+ }
+ }
+
+ /* Create an r-value version of the ir_constant label here (after we may
+ * have created a fake one in error cases) that can be passed to
+ * apply_implicit_conversion below.
+ */
+ ir_rvalue *label = label_const;
+
+ ir_rvalue *deref_test_var =
+ new(body.mem_ctx) ir_dereference_variable(state->switch_state.test_var);
+
+ /*
+ * From GLSL 4.40 specification section 6.2 ("Selection"):
+ *
+ * "The type of the init-expression value in a switch statement must
+ * be a scalar int or uint. The type of the constant-expression value
+ * in a case label also must be a scalar int or uint. When any pair
+ * of these values is tested for "equal value" and the types do not
+ * match, an implicit conversion will be done to convert the int to a
+ * uint (see section 4.1.10 “Implicit Conversions”) before the compare
+ * is done."
+ */
+ if (label->type != state->switch_state.test_var->type) {
+ YYLTYPE loc = this->test_value->get_location();
+
+ const glsl_type *type_a = label->type;
+ const glsl_type *type_b = state->switch_state.test_var->type;
+
+ /* Check if int->uint implicit conversion is supported. */
+ bool integer_conversion_supported =
+ glsl_type::int_type->can_implicitly_convert_to(glsl_type::uint_type,
+ state);
+
+ if ((!type_a->is_integer_32() || !type_b->is_integer_32()) ||
+ !integer_conversion_supported) {
+ _mesa_glsl_error(&loc, state, "type mismatch with switch "
+ "init-expression and case label (%s != %s)",
+ type_a->name, type_b->name);
+ } else {
+ /* Conversion of the case label. */
+ if (type_a->base_type == GLSL_TYPE_INT) {
+ if (!apply_implicit_conversion(glsl_type::uint_type,
+ label, state))
+ _mesa_glsl_error(&loc, state, "implicit type conversion error");
+ } else {
+ /* Conversion of the init-expression value. */
+ if (!apply_implicit_conversion(glsl_type::uint_type,
+ deref_test_var, state))
+ _mesa_glsl_error(&loc, state, "implicit type conversion error");
+ }
+ }
+
+ /* If the implicit conversion was allowed, the types will already be
+ * the same. If the implicit conversion wasn't allowed, smash the
+ * type of the label anyway. This will prevent the expression
+ * constructor (below) from failing an assertion.
+ */
+ label->type = deref_test_var->type;
+ }
+
+ body.emit(assign(fallthru_var,
+ logic_or(fallthru_var, equal(label, deref_test_var))));
+ } else { /* default case */
+ if (state->switch_state.previous_default) {
+ YYLTYPE loc = this->get_location();
+ _mesa_glsl_error(& loc, state,
+ "multiple default labels in one switch");
+
+ loc = state->switch_state.previous_default->get_location();
+ _mesa_glsl_error(& loc, state, "this is the first default label");
+ }
+ state->switch_state.previous_default = this;
+
+ /* Set fallthru condition on 'run_default' bool. */
+ body.emit(assign(fallthru_var,
+ logic_or(fallthru_var,
+ state->switch_state.run_default)));
+ }
+
+ /* Case statements do not have r-values. */
+ return NULL;
+}
+
+void
+ast_iteration_statement::condition_to_hir(exec_list *instructions,
+ struct _mesa_glsl_parse_state *state)
+{
+ void *ctx = state;
+
+ if (condition != NULL) {
+ ir_rvalue *const cond =
+ condition->hir(instructions, state);
+
+ if ((cond == NULL)
+ || !cond->type->is_boolean() || !cond->type->is_scalar()) {
+ YYLTYPE loc = condition->get_location();
+
+ _mesa_glsl_error(& loc, state,
+ "loop condition must be scalar boolean");
+ } else {
+ /* As the first code in the loop body, generate a block that looks
+ * like 'if (!condition) break;' as the loop termination condition.
+ */
+ ir_rvalue *const not_cond =
+ new(ctx) ir_expression(ir_unop_logic_not, cond);
+
+ ir_if *const if_stmt = new(ctx) ir_if(not_cond);
+
+ ir_jump *const break_stmt =
+ new(ctx) ir_loop_jump(ir_loop_jump::jump_break);
+
+ if_stmt->then_instructions.push_tail(break_stmt);
+ instructions->push_tail(if_stmt);
+ }
+ }
+}
+
+
+ir_rvalue *
+ast_iteration_statement::hir(exec_list *instructions,
+ struct _mesa_glsl_parse_state *state)
+{
+ void *ctx = state;
+
+ /* For-loops and while-loops start a new scope, but do-while loops do not.
+ */
+ if (mode != ast_do_while)
+ state->symbols->push_scope();
+
+ if (init_statement != NULL)
+ init_statement->hir(instructions, state);
+
+ ir_loop *const stmt = new(ctx) ir_loop();
+ instructions->push_tail(stmt);
+
+ /* Track the current loop nesting. */
+ ast_iteration_statement *nesting_ast = state->loop_nesting_ast;
+
+ state->loop_nesting_ast = this;
+
+ /* Likewise, indicate that following code is closest to a loop,
+ * NOT closest to a switch.
+ */
+ bool saved_is_switch_innermost = state->switch_state.is_switch_innermost;
+ state->switch_state.is_switch_innermost = false;
+
+ if (mode != ast_do_while)
+ condition_to_hir(&stmt->body_instructions, state);
+
+ if (body != NULL)
+ body->hir(& stmt->body_instructions, state);
+
+ if (rest_expression != NULL)
+ rest_expression->hir(& stmt->body_instructions, state);
+
+ if (mode == ast_do_while)
+ condition_to_hir(&stmt->body_instructions, state);
+
+ if (mode != ast_do_while)
+ state->symbols->pop_scope();
+
+ /* Restore previous nesting before returning. */
+ state->loop_nesting_ast = nesting_ast;
+ state->switch_state.is_switch_innermost = saved_is_switch_innermost;
+
+ /* Loops do not have r-values.
+ */
+ return NULL;
+}
+
+
+/**
+ * Determine if the given type is valid for establishing a default precision
+ * qualifier.
+ *
+ * From GLSL ES 3.00 section 4.5.4 ("Default Precision Qualifiers"):
+ *
+ * "The precision statement
+ *
+ * precision precision-qualifier type;
+ *
+ * can be used to establish a default precision qualifier. The type field
+ * can be either int or float or any of the sampler types, and the
+ * precision-qualifier can be lowp, mediump, or highp."
+ *
+ * GLSL ES 1.00 has similar language. GLSL 1.30 doesn't allow precision
+ * qualifiers on sampler types, but this seems like an oversight (since the
+ * intention of including these in GLSL 1.30 is to allow compatibility with ES
+ * shaders). So we allow int, float, and all sampler types regardless of GLSL
+ * version.
+ */
+static bool
+is_valid_default_precision_type(const struct glsl_type *const type)
+{
+ if (type == NULL)
+ return false;
+
+ switch (type->base_type) {
+ case GLSL_TYPE_INT:
+ case GLSL_TYPE_FLOAT:
+ /* "int" and "float" are valid, but vectors and matrices are not. */
+ return type->vector_elements == 1 && type->matrix_columns == 1;
+ case GLSL_TYPE_SAMPLER:
+ case GLSL_TYPE_IMAGE:
+ case GLSL_TYPE_ATOMIC_UINT:
+ return true;
+ default:
+ return false;
+ }
+}
+
+
+ir_rvalue *
+ast_type_specifier::hir(exec_list *instructions,
+ struct _mesa_glsl_parse_state *state)
+{
+ if (this->default_precision == ast_precision_none && this->structure == NULL)
+ return NULL;
+
+ YYLTYPE loc = this->get_location();
+
+ /* If this is a precision statement, check that the type to which it is
+ * applied is either float or int.
+ *
+ * From section 4.5.3 of the GLSL 1.30 spec:
+ * "The precision statement
+ * precision precision-qualifier type;
+ * can be used to establish a default precision qualifier. The type
+ * field can be either int or float [...]. Any other types or
+ * qualifiers will result in an error.
+ */
+ if (this->default_precision != ast_precision_none) {
+ if (!state->check_precision_qualifiers_allowed(&loc))
+ return NULL;
+
+ if (this->structure != NULL) {
+ _mesa_glsl_error(&loc, state,
+ "precision qualifiers do not apply to structures");
+ return NULL;
+ }
+
+ if (this->array_specifier != NULL) {
+ _mesa_glsl_error(&loc, state,
+ "default precision statements do not apply to "
+ "arrays");
+ return NULL;
+ }
+
+ const struct glsl_type *const type =
+ state->symbols->get_type(this->type_name);
+ if (!is_valid_default_precision_type(type)) {
+ _mesa_glsl_error(&loc, state,
+ "default precision statements apply only to "
+ "float, int, and opaque types");
+ return NULL;
+ }
+
+ if (state->es_shader) {
+ /* Section 4.5.3 (Default Precision Qualifiers) of the GLSL ES 1.00
+ * spec says:
+ *
+ * "Non-precision qualified declarations will use the precision
+ * qualifier specified in the most recent precision statement
+ * that is still in scope. The precision statement has the same
+ * scoping rules as variable declarations. If it is declared
+ * inside a compound statement, its effect stops at the end of
+ * the innermost statement it was declared in. Precision
+ * statements in nested scopes override precision statements in
+ * outer scopes. Multiple precision statements for the same basic
+ * type can appear inside the same scope, with later statements
+ * overriding earlier statements within that scope."
+ *
+ * Default precision specifications follow the same scope rules as
+ * variables. So, we can track the state of the default precision
+ * qualifiers in the symbol table, and the rules will just work. This
+ * is a slight abuse of the symbol table, but it has the semantics
+ * that we want.
+ */
+ state->symbols->add_default_precision_qualifier(this->type_name,
+ this->default_precision);
+ }
+
+ {
+ void *ctx = state;
+
+ const char* precision_type = NULL;
+ switch (this->default_precision) {
+ case GLSL_PRECISION_HIGH:
+ precision_type = "highp";
+ break;
+ case GLSL_PRECISION_MEDIUM:
+ precision_type = "mediump";
+ break;
+ case GLSL_PRECISION_LOW:
+ precision_type = "lowp";
+ break;
+ case GLSL_PRECISION_NONE:
+ precision_type = "";
+ break;
+ }
+
+ char* precision_statement = ralloc_asprintf(ctx, "precision %s %s", precision_type, this->type_name);
+ ir_precision_statement *const stmt = new(ctx) ir_precision_statement(precision_statement);
+
+ instructions->push_head(stmt);
+ }
+
+ return NULL;
+ }
+
+ /* _mesa_ast_set_aggregate_type() sets the <structure> field so that
+ * process_record_constructor() can do type-checking on C-style initializer
+ * expressions of structs, but ast_struct_specifier should only be translated
+ * to HIR if it is declaring the type of a structure.
+ *
+ * The ->is_declaration field is false for initializers of variables
+ * declared separately from the struct's type definition.
+ *
+ * struct S { ... }; (is_declaration = true)
+ * struct T { ... } t = { ... }; (is_declaration = true)
+ * S s = { ... }; (is_declaration = false)
+ */
+ if (this->structure != NULL && this->structure->is_declaration)
+ return this->structure->hir(instructions, state);
+
+ return NULL;
+}
+
+
+/**
+ * Process a structure or interface block tree into an array of structure fields
+ *
+ * After parsing, where there are some syntax differnces, structures and
+ * interface blocks are almost identical. They are similar enough that the
+ * AST for each can be processed the same way into a set of
+ * \c glsl_struct_field to describe the members.
+ *
+ * If we're processing an interface block, var_mode should be the type of the
+ * interface block (ir_var_shader_in, ir_var_shader_out, ir_var_uniform or
+ * ir_var_shader_storage). If we're processing a structure, var_mode should be
+ * ir_var_auto.
+ *
+ * \return
+ * The number of fields processed. A pointer to the array structure fields is
+ * stored in \c *fields_ret.
+ */
+static unsigned
+ast_process_struct_or_iface_block_members(exec_list *instructions,
+ struct _mesa_glsl_parse_state *state,
+ exec_list *declarations,
+ glsl_struct_field **fields_ret,
+ bool is_interface,
+ enum glsl_matrix_layout matrix_layout,
+ bool allow_reserved_names,
+ ir_variable_mode var_mode,
+ ast_type_qualifier *layout,
+ unsigned block_stream,
+ unsigned block_xfb_buffer,
+ unsigned block_xfb_offset,
+ unsigned expl_location,
+ unsigned expl_align)
+{
+ unsigned decl_count = 0;
+ unsigned next_offset = 0;
+
+ /* Make an initial pass over the list of fields to determine how
+ * many there are. Each element in this list is an ast_declarator_list.
+ * This means that we actually need to count the number of elements in the
+ * 'declarations' list in each of the elements.
+ */
+ foreach_list_typed (ast_declarator_list, decl_list, link, declarations) {
+ decl_count += decl_list->declarations.length();
+ }
+
+ /* Allocate storage for the fields and process the field
+ * declarations. As the declarations are processed, try to also convert
+ * the types to HIR. This ensures that structure definitions embedded in
+ * other structure definitions or in interface blocks are processed.
+ */
+ glsl_struct_field *const fields = rzalloc_array(state, glsl_struct_field,
+ decl_count);
+
+ bool first_member = true;
+ bool first_member_has_explicit_location = false;
+
+ unsigned i = 0;
+ foreach_list_typed (ast_declarator_list, decl_list, link, declarations) {
+ const char *type_name;
+ YYLTYPE loc = decl_list->get_location();
+
+ decl_list->type->specifier->hir(instructions, state);
+
+ /* Section 4.1.8 (Structures) of the GLSL 1.10 spec says:
+ *
+ * "Anonymous structures are not supported; so embedded structures
+ * must have a declarator. A name given to an embedded struct is
+ * scoped at the same level as the struct it is embedded in."
+ *
+ * The same section of the GLSL 1.20 spec says:
+ *
+ * "Anonymous structures are not supported. Embedded structures are
+ * not supported."
+ *
+ * The GLSL ES 1.00 and 3.00 specs have similar langauge. So, we allow
+ * embedded structures in 1.10 only.
+ */
+ if (state->language_version != 110 &&
+ decl_list->type->specifier->structure != NULL)
+ _mesa_glsl_error(&loc, state,
+ "embedded structure declarations are not allowed");
+
+ const glsl_type *decl_type =
+ decl_list->type->glsl_type(& type_name, state);
+
+ const struct ast_type_qualifier *const qual =
+ &decl_list->type->qualifier;
+
+ /* From section 4.3.9 of the GLSL 4.40 spec:
+ *
+ * "[In interface blocks] opaque types are not allowed."
+ *
+ * It should be impossible for decl_type to be NULL here. Cases that
+ * might naturally lead to decl_type being NULL, especially for the
+ * is_interface case, will have resulted in compilation having
+ * already halted due to a syntax error.
+ */
+ assert(decl_type);
+
+ if (is_interface) {
+ /* From section 4.3.7 of the ARB_bindless_texture spec:
+ *
+ * "(remove the following bullet from the last list on p. 39,
+ * thereby permitting sampler types in interface blocks; image
+ * types are also permitted in blocks by this extension)"
+ *
+ * * sampler types are not allowed
+ */
+ if (decl_type->contains_atomic() ||
+ (!state->has_bindless() && decl_type->contains_opaque())) {
+ _mesa_glsl_error(&loc, state, "uniform/buffer in non-default "
+ "interface block contains %s variable",
+ state->has_bindless() ? "atomic" : "opaque");
+ }
+ } else {
+ if (decl_type->contains_atomic()) {
+ /* From section 4.1.7.3 of the GLSL 4.40 spec:
+ *
+ * "Members of structures cannot be declared as atomic counter
+ * types."
+ */
+ _mesa_glsl_error(&loc, state, "atomic counter in structure");
+ }
+
+ if (!state->has_bindless() && decl_type->contains_image()) {
+ /* FINISHME: Same problem as with atomic counters.
+ * FINISHME: Request clarification from Khronos and add
+ * FINISHME: spec quotation here.
+ */
+ _mesa_glsl_error(&loc, state, "image in structure");
+ }
+ }
+
+ if (qual->flags.q.explicit_binding) {
+ _mesa_glsl_error(&loc, state,
+ "binding layout qualifier cannot be applied "
+ "to struct or interface block members");
+ }
+
+ if (is_interface) {
+ if (!first_member) {
+ if (!layout->flags.q.explicit_location &&
+ ((first_member_has_explicit_location &&
+ !qual->flags.q.explicit_location) ||
+ (!first_member_has_explicit_location &&
+ qual->flags.q.explicit_location))) {
+ _mesa_glsl_error(&loc, state,
+ "when block-level location layout qualifier "
+ "is not supplied either all members must "
+ "have a location layout qualifier or all "
+ "members must not have a location layout "
+ "qualifier");
+ }
+ } else {
+ first_member = false;
+ first_member_has_explicit_location =
+ qual->flags.q.explicit_location;
+ }
+ }
+
+ if (qual->flags.q.std140 ||
+ qual->flags.q.std430 ||
+ qual->flags.q.packed ||
+ qual->flags.q.shared) {
+ _mesa_glsl_error(&loc, state,
+ "uniform/shader storage block layout qualifiers "
+ "std140, std430, packed, and shared can only be "
+ "applied to uniform/shader storage blocks, not "
+ "members");
+ }
+
+ if (qual->flags.q.constant) {
+ _mesa_glsl_error(&loc, state,
+ "const storage qualifier cannot be applied "
+ "to struct or interface block members");
+ }
+
+ validate_memory_qualifier_for_type(state, &loc, qual, decl_type);
+ validate_image_format_qualifier_for_type(state, &loc, qual, decl_type);
+
+ /* From Section 4.4.2.3 (Geometry Outputs) of the GLSL 4.50 spec:
+ *
+ * "A block member may be declared with a stream identifier, but
+ * the specified stream must match the stream associated with the
+ * containing block."
+ */
+ if (qual->flags.q.explicit_stream) {
+ unsigned qual_stream;
+ if (process_qualifier_constant(state, &loc, "stream",
+ qual->stream, &qual_stream) &&
+ qual_stream != block_stream) {
+ _mesa_glsl_error(&loc, state, "stream layout qualifier on "
+ "interface block member does not match "
+ "the interface block (%u vs %u)", qual_stream,
+ block_stream);
+ }
+ }
+
+ int xfb_buffer;
+ unsigned explicit_xfb_buffer = 0;
+ if (qual->flags.q.explicit_xfb_buffer) {
+ unsigned qual_xfb_buffer;
+ if (process_qualifier_constant(state, &loc, "xfb_buffer",
+ qual->xfb_buffer, &qual_xfb_buffer)) {
+ explicit_xfb_buffer = 1;
+ if (qual_xfb_buffer != block_xfb_buffer)
+ _mesa_glsl_error(&loc, state, "xfb_buffer layout qualifier on "
+ "interface block member does not match "
+ "the interface block (%u vs %u)",
+ qual_xfb_buffer, block_xfb_buffer);
+ }
+ xfb_buffer = (int) qual_xfb_buffer;
+ } else {
+ if (layout)
+ explicit_xfb_buffer = layout->flags.q.explicit_xfb_buffer;
+ xfb_buffer = (int) block_xfb_buffer;
+ }
+
+ int xfb_stride = -1;
+ if (qual->flags.q.explicit_xfb_stride) {
+ unsigned qual_xfb_stride;
+ if (process_qualifier_constant(state, &loc, "xfb_stride",
+ qual->xfb_stride, &qual_xfb_stride)) {
+ xfb_stride = (int) qual_xfb_stride;
+ }
+ }
+
+ if (qual->flags.q.uniform && qual->has_interpolation()) {
+ _mesa_glsl_error(&loc, state,
+ "interpolation qualifiers cannot be used "
+ "with uniform interface blocks");
+ }
+
+ if ((qual->flags.q.uniform || !is_interface) &&
+ qual->has_auxiliary_storage()) {
+ _mesa_glsl_error(&loc, state,
+ "auxiliary storage qualifiers cannot be used "
+ "in uniform blocks or structures.");
+ }
+
+ if (qual->flags.q.row_major || qual->flags.q.column_major) {
+ if (!qual->flags.q.uniform && !qual->flags.q.buffer) {
+ _mesa_glsl_error(&loc, state,
+ "row_major and column_major can only be "
+ "applied to interface blocks");
+ } else
+ validate_matrix_layout_for_type(state, &loc, decl_type, NULL);
+ }
+
+ foreach_list_typed (ast_declaration, decl, link,
+ &decl_list->declarations) {
+ YYLTYPE loc = decl->get_location();
+
+ if (!allow_reserved_names)
+ validate_identifier(decl->identifier, loc, state);
+
+ const struct glsl_type *field_type =
+ process_array_type(&loc, decl_type, decl->array_specifier, state);
+ validate_array_dimensions(field_type, state, &loc);
+ fields[i].type = field_type;
+ fields[i].name = decl->identifier;
+ fields[i].interpolation =
+ interpret_interpolation_qualifier(qual, field_type,
+ var_mode, state, &loc);
+ fields[i].centroid = qual->flags.q.centroid ? 1 : 0;
+ fields[i].sample = qual->flags.q.sample ? 1 : 0;
+ fields[i].patch = qual->flags.q.patch ? 1 : 0;
+ fields[i].offset = -1;
+ fields[i].explicit_xfb_buffer = explicit_xfb_buffer;
+ fields[i].xfb_buffer = xfb_buffer;
+ fields[i].xfb_stride = xfb_stride;
+
+ if (qual->flags.q.explicit_location) {
+ unsigned qual_location;
+ if (process_qualifier_constant(state, &loc, "location",
+ qual->location, &qual_location)) {
+ fields[i].location = qual_location +
+ (fields[i].patch ? VARYING_SLOT_PATCH0 : VARYING_SLOT_VAR0);
+ expl_location = fields[i].location +
+ fields[i].type->count_attribute_slots(false);
+ }
+ } else {
+ if (layout && layout->flags.q.explicit_location) {
+ fields[i].location = expl_location;
+ expl_location += fields[i].type->count_attribute_slots(false);
+ } else {
+ fields[i].location = -1;
+ }
+ }
+
+ /* Offset can only be used with std430 and std140 layouts an initial
+ * value of 0 is used for error detection.
+ */
+ unsigned align = 0;
+ unsigned size = 0;
+ if (layout) {
+ bool row_major;
+ if (qual->flags.q.row_major ||
+ matrix_layout == GLSL_MATRIX_LAYOUT_ROW_MAJOR) {
+ row_major = true;
+ } else {
+ row_major = false;
+ }
+
+ if(layout->flags.q.std140) {
+ align = field_type->std140_base_alignment(row_major);
+ size = field_type->std140_size(row_major);
+ } else if (layout->flags.q.std430) {
+ align = field_type->std430_base_alignment(row_major);
+ size = field_type->std430_size(row_major);
+ }
+ }
+
+ if (qual->flags.q.explicit_offset) {
+ unsigned qual_offset;
+ if (process_qualifier_constant(state, &loc, "offset",
+ qual->offset, &qual_offset)) {
+ if (align != 0 && size != 0) {
+ if (next_offset > qual_offset)
+ _mesa_glsl_error(&loc, state, "layout qualifier "
+ "offset overlaps previous member");
+
+ if (qual_offset % align) {
+ _mesa_glsl_error(&loc, state, "layout qualifier offset "
+ "must be a multiple of the base "
+ "alignment of %s", field_type->name);
+ }
+ fields[i].offset = qual_offset;
+ next_offset = qual_offset + size;
+ } else {
+ _mesa_glsl_error(&loc, state, "offset can only be used "
+ "with std430 and std140 layouts");
+ }
+ }
+ }
+
+ if (qual->flags.q.explicit_align || expl_align != 0) {
+ unsigned offset = fields[i].offset != -1 ? fields[i].offset :
+ next_offset;
+ if (align == 0 || size == 0) {
+ _mesa_glsl_error(&loc, state, "align can only be used with "
+ "std430 and std140 layouts");
+ } else if (qual->flags.q.explicit_align) {
+ unsigned member_align;
+ if (process_qualifier_constant(state, &loc, "align",
+ qual->align, &member_align)) {
+ if (member_align == 0 ||
+ member_align & (member_align - 1)) {
+ _mesa_glsl_error(&loc, state, "align layout qualifier "
+ "is not a power of 2");
+ } else {
+ fields[i].offset = glsl_align(offset, member_align);
+ next_offset = fields[i].offset + size;
+ }
+ }
+ } else {
+ fields[i].offset = glsl_align(offset, expl_align);
+ next_offset = fields[i].offset + size;
+ }
+ } else if (!qual->flags.q.explicit_offset) {
+ if (align != 0 && size != 0)
+ next_offset = glsl_align(next_offset, align) + size;
+ }
+
+ /* From the ARB_enhanced_layouts spec:
+ *
+ * "The given offset applies to the first component of the first
+ * member of the qualified entity. Then, within the qualified
+ * entity, subsequent components are each assigned, in order, to
+ * the next available offset aligned to a multiple of that
+ * component's size. Aggregate types are flattened down to the
+ * component level to get this sequence of components."
+ */
+ if (qual->flags.q.explicit_xfb_offset) {
+ unsigned xfb_offset;
+ if (process_qualifier_constant(state, &loc, "xfb_offset",
+ qual->offset, &xfb_offset)) {
+ fields[i].offset = xfb_offset;
+ block_xfb_offset = fields[i].offset +
+ 4 * field_type->component_slots();
+ }
+ } else {
+ if (layout && layout->flags.q.explicit_xfb_offset) {
+ unsigned align = field_type->is_64bit() ? 8 : 4;
+ fields[i].offset = glsl_align(block_xfb_offset, align);
+ block_xfb_offset += 4 * field_type->component_slots();
+ }
+ }
+
+ /* Propogate row- / column-major information down the fields of the
+ * structure or interface block. Structures need this data because
+ * the structure may contain a structure that contains ... a matrix
+ * that need the proper layout.
+ */
+ if (is_interface && layout &&
+ (layout->flags.q.uniform || layout->flags.q.buffer) &&
+ (field_type->without_array()->is_matrix()
+ || field_type->without_array()->is_struct())) {
+ /* If no layout is specified for the field, inherit the layout
+ * from the block.
+ */
+ fields[i].matrix_layout = matrix_layout;
+
+ if (qual->flags.q.row_major)
+ fields[i].matrix_layout = GLSL_MATRIX_LAYOUT_ROW_MAJOR;
+ else if (qual->flags.q.column_major)
+ fields[i].matrix_layout = GLSL_MATRIX_LAYOUT_COLUMN_MAJOR;
+
+ /* If we're processing an uniform or buffer block, the matrix
+ * layout must be decided by this point.
+ */
+ assert(fields[i].matrix_layout == GLSL_MATRIX_LAYOUT_ROW_MAJOR
+ || fields[i].matrix_layout == GLSL_MATRIX_LAYOUT_COLUMN_MAJOR);
+ }
+
+ /* Memory qualifiers are allowed on buffer and image variables, while
+ * the format qualifier is only accepted for images.
+ */
+ if (var_mode == ir_var_shader_storage ||
+ field_type->without_array()->is_image()) {
+ /* For readonly and writeonly qualifiers the field definition,
+ * if set, overwrites the layout qualifier.
+ */
+ if (qual->flags.q.read_only || qual->flags.q.write_only) {
+ fields[i].memory_read_only = qual->flags.q.read_only;
+ fields[i].memory_write_only = qual->flags.q.write_only;
+ } else {
+ fields[i].memory_read_only =
+ layout ? layout->flags.q.read_only : 0;
+ fields[i].memory_write_only =
+ layout ? layout->flags.q.write_only : 0;
+ }
+
+ /* For other qualifiers, we set the flag if either the layout
+ * qualifier or the field qualifier are set
+ */
+ fields[i].memory_coherent = qual->flags.q.coherent ||
+ (layout && layout->flags.q.coherent);
+ fields[i].memory_volatile = qual->flags.q._volatile ||
+ (layout && layout->flags.q._volatile);
+ fields[i].memory_restrict = qual->flags.q.restrict_flag ||
+ (layout && layout->flags.q.restrict_flag);
+
+ if (field_type->without_array()->is_image()) {
+ if (qual->flags.q.explicit_image_format) {
+ if (qual->image_base_type !=
+ field_type->without_array()->sampled_type) {
+ _mesa_glsl_error(&loc, state, "format qualifier doesn't "
+ "match the base data type of the image");
+ }
+
+ fields[i].image_format = qual->image_format;
+ } else {
+ if (!qual->flags.q.write_only) {
+ _mesa_glsl_error(&loc, state, "image not qualified with "
+ "`writeonly' must have a format layout "
+ "qualifier");
+ }
+
+ fields[i].image_format = PIPE_FORMAT_NONE;
+ }
+ }
+ }
+
+ /* Precision qualifiers do not hold any meaning in Desktop GLSL */
+ if (state->es_shader) {
+ fields[i].precision = select_gles_precision(qual->precision,
+ field_type,
+ state,
+ &loc);
+ } else {
+ fields[i].precision = qual->precision;
+ }
+
+ i++;
+ }
+ }
+
+ assert(i == decl_count);
+
+ *fields_ret = fields;
+ return decl_count;
+}
+
+
+ir_rvalue *
+ast_struct_specifier::hir(exec_list *instructions,
+ struct _mesa_glsl_parse_state *state)
+{
+ YYLTYPE loc = this->get_location();
+
+ unsigned expl_location = 0;
+ if (layout && layout->flags.q.explicit_location) {
+ if (!process_qualifier_constant(state, &loc, "location",
+ layout->location, &expl_location)) {
+ return NULL;
+ } else {
+ expl_location = VARYING_SLOT_VAR0 + expl_location;
+ }
+ }
+
+ glsl_struct_field *fields;
+ unsigned decl_count =
+ ast_process_struct_or_iface_block_members(instructions,
+ state,
+ &this->declarations,
+ &fields,
+ false,
+ GLSL_MATRIX_LAYOUT_INHERITED,
+ false /* allow_reserved_names */,
+ ir_var_auto,
+ layout,
+ 0, /* for interface only */
+ 0, /* for interface only */
+ 0, /* for interface only */
+ expl_location,
+ 0 /* for interface only */);
+
+ validate_identifier(this->name, loc, state);
+
+ type = glsl_type::get_struct_instance(fields, decl_count, this->name);
+
+ if (!type->is_anonymous() && !state->symbols->add_type(name, type)) {
+ const glsl_type *match = state->symbols->get_type(name);
+ /* allow struct matching for desktop GL - older UE4 does this */
+ if (match != NULL && state->is_version(130, 0) && match->record_compare(type, true, false))
+ _mesa_glsl_warning(& loc, state, "struct `%s' previously defined", name);
+ else
+ _mesa_glsl_error(& loc, state, "struct `%s' previously defined", name);
+ } else {
+ const glsl_type **s = reralloc(state, state->user_structures,
+ const glsl_type *,
+ state->num_user_structures + 1);
+ if (s != NULL) {
+ s[state->num_user_structures] = type;
+ state->user_structures = s;
+ state->num_user_structures++;
+
+ ir_typedecl_statement* stmt = new(state) ir_typedecl_statement(type);
+ /* Push the struct declarations to the top.
+ * However, do not insert declarations before default precision
+ * statements or other declarations
+ */
+ ir_instruction* before_node = (ir_instruction*)instructions->get_head();
+ while (before_node &&
+ (before_node->ir_type == ir_type_precision ||
+ before_node->ir_type == ir_type_typedecl))
+ before_node = (ir_instruction*)before_node->next;
+ if (before_node)
+ before_node->insert_before(stmt);
+ else
+ instructions->push_head(stmt);
+ }
+ }
+
+ /* Structure type definitions do not have r-values.
+ */
+ return NULL;
+}
+
+
+/**
+ * Visitor class which detects whether a given interface block has been used.
+ */
+class interface_block_usage_visitor : public ir_hierarchical_visitor
+{
+public:
+ interface_block_usage_visitor(ir_variable_mode mode, const glsl_type *block)
+ : mode(mode), block(block), found(false)
+ {
+ }
+
+ virtual ir_visitor_status visit(ir_dereference_variable *ir)
+ {
+ if (ir->var->data.mode == mode && ir->var->get_interface_type() == block) {
+ found = true;
+ return visit_stop;
+ }
+ return visit_continue;
+ }
+
+ bool usage_found() const
+ {
+ return this->found;
+ }
+
+private:
+ ir_variable_mode mode;
+ const glsl_type *block;
+ bool found;
+};
+
+static bool
+is_unsized_array_last_element(ir_variable *v)
+{
+ const glsl_type *interface_type = v->get_interface_type();
+ int length = interface_type->length;
+
+ assert(v->type->is_unsized_array());
+
+ /* Check if it is the last element of the interface */
+ if (strcmp(interface_type->fields.structure[length-1].name, v->name) == 0)
+ return true;
+ return false;
+}
+
+static void
+apply_memory_qualifiers(ir_variable *var, glsl_struct_field field)
+{
+ var->data.memory_read_only = field.memory_read_only;
+ var->data.memory_write_only = field.memory_write_only;
+ var->data.memory_coherent = field.memory_coherent;
+ var->data.memory_volatile = field.memory_volatile;
+ var->data.memory_restrict = field.memory_restrict;
+}
+
+ir_rvalue *
+ast_interface_block::hir(exec_list *instructions,
+ struct _mesa_glsl_parse_state *state)
+{
+ YYLTYPE loc = this->get_location();
+
+ /* Interface blocks must be declared at global scope */
+ if (state->current_function != NULL) {
+ _mesa_glsl_error(&loc, state,
+ "Interface block `%s' must be declared "
+ "at global scope",
+ this->block_name);
+ }
+
+ /* Validate qualifiers:
+ *
+ * - Layout Qualifiers as per the table in Section 4.4
+ * ("Layout Qualifiers") of the GLSL 4.50 spec.
+ *
+ * - Memory Qualifiers as per Section 4.10 ("Memory Qualifiers") of the
+ * GLSL 4.50 spec:
+ *
+ * "Additionally, memory qualifiers may also be used in the declaration
+ * of shader storage blocks"
+ *
+ * Note the table in Section 4.4 says std430 is allowed on both uniform and
+ * buffer blocks however Section 4.4.5 (Uniform and Shader Storage Block
+ * Layout Qualifiers) of the GLSL 4.50 spec says:
+ *
+ * "The std430 qualifier is supported only for shader storage blocks;
+ * using std430 on a uniform block will result in a compile-time error."
+ */
+ ast_type_qualifier allowed_blk_qualifiers;
+ allowed_blk_qualifiers.flags.i = 0;
+ if (this->layout.flags.q.buffer || this->layout.flags.q.uniform) {
+ allowed_blk_qualifiers.flags.q.shared = 1;
+ allowed_blk_qualifiers.flags.q.packed = 1;
+ allowed_blk_qualifiers.flags.q.std140 = 1;
+ allowed_blk_qualifiers.flags.q.row_major = 1;
+ allowed_blk_qualifiers.flags.q.column_major = 1;
+ allowed_blk_qualifiers.flags.q.explicit_align = 1;
+ allowed_blk_qualifiers.flags.q.explicit_binding = 1;
+ if (this->layout.flags.q.buffer) {
+ allowed_blk_qualifiers.flags.q.buffer = 1;
+ allowed_blk_qualifiers.flags.q.std430 = 1;
+ allowed_blk_qualifiers.flags.q.coherent = 1;
+ allowed_blk_qualifiers.flags.q._volatile = 1;
+ allowed_blk_qualifiers.flags.q.restrict_flag = 1;
+ allowed_blk_qualifiers.flags.q.read_only = 1;
+ allowed_blk_qualifiers.flags.q.write_only = 1;
+ } else {
+ allowed_blk_qualifiers.flags.q.uniform = 1;
+ }
+ } else {
+ /* Interface block */
+ assert(this->layout.flags.q.in || this->layout.flags.q.out);
+
+ allowed_blk_qualifiers.flags.q.explicit_location = 1;
+ if (this->layout.flags.q.out) {
+ allowed_blk_qualifiers.flags.q.out = 1;
+ if (state->stage == MESA_SHADER_GEOMETRY ||
+ state->stage == MESA_SHADER_TESS_CTRL ||
+ state->stage == MESA_SHADER_TESS_EVAL ||
+ state->stage == MESA_SHADER_VERTEX ) {
+ allowed_blk_qualifiers.flags.q.explicit_xfb_offset = 1;
+ allowed_blk_qualifiers.flags.q.explicit_xfb_buffer = 1;
+ allowed_blk_qualifiers.flags.q.xfb_buffer = 1;
+ allowed_blk_qualifiers.flags.q.explicit_xfb_stride = 1;
+ allowed_blk_qualifiers.flags.q.xfb_stride = 1;
+ if (state->stage == MESA_SHADER_GEOMETRY) {
+ allowed_blk_qualifiers.flags.q.stream = 1;
+ allowed_blk_qualifiers.flags.q.explicit_stream = 1;
+ }
+ if (state->stage == MESA_SHADER_TESS_CTRL) {
+ allowed_blk_qualifiers.flags.q.patch = 1;
+ }
+ }
+ } else {
+ allowed_blk_qualifiers.flags.q.in = 1;
+ if (state->stage == MESA_SHADER_TESS_EVAL) {
+ allowed_blk_qualifiers.flags.q.patch = 1;
+ }
+ }
+ }
+
+ this->layout.validate_flags(&loc, state, allowed_blk_qualifiers,
+ "invalid qualifier for block",
+ this->block_name);
+
+ enum glsl_interface_packing packing;
+ if (this->layout.flags.q.std140) {
+ packing = GLSL_INTERFACE_PACKING_STD140;
+ } else if (this->layout.flags.q.packed) {
+ packing = GLSL_INTERFACE_PACKING_PACKED;
+ } else if (this->layout.flags.q.std430) {
+ packing = GLSL_INTERFACE_PACKING_STD430;
+ } else {
+ /* The default layout is shared.
+ */
+ packing = GLSL_INTERFACE_PACKING_SHARED;
+ }
+
+ ir_variable_mode var_mode;
+ const char *iface_type_name;
+ if (this->layout.flags.q.in) {
+ var_mode = ir_var_shader_in;
+ iface_type_name = "in";
+ } else if (this->layout.flags.q.out) {
+ var_mode = ir_var_shader_out;
+ iface_type_name = "out";
+ } else if (this->layout.flags.q.uniform) {
+ var_mode = ir_var_uniform;
+ iface_type_name = "uniform";
+ } else if (this->layout.flags.q.buffer) {
+ var_mode = ir_var_shader_storage;
+ iface_type_name = "buffer";
+ } else {
+ var_mode = ir_var_auto;
+ iface_type_name = "UNKNOWN";
+ assert(!"interface block layout qualifier not found!");
+ }
+
+ enum glsl_matrix_layout matrix_layout = GLSL_MATRIX_LAYOUT_INHERITED;
+ if (this->layout.flags.q.row_major)
+ matrix_layout = GLSL_MATRIX_LAYOUT_ROW_MAJOR;
+ else if (this->layout.flags.q.column_major)
+ matrix_layout = GLSL_MATRIX_LAYOUT_COLUMN_MAJOR;
+
+ bool redeclaring_per_vertex = strcmp(this->block_name, "gl_PerVertex") == 0;
+ exec_list declared_variables;
+ glsl_struct_field *fields;
+
+ /* For blocks that accept memory qualifiers (i.e. shader storage), verify
+ * that we don't have incompatible qualifiers
+ */
+ if (this->layout.flags.q.read_only && this->layout.flags.q.write_only) {
+ _mesa_glsl_error(&loc, state,
+ "Interface block sets both readonly and writeonly");
+ }
+
+ unsigned qual_stream;
+ if (!process_qualifier_constant(state, &loc, "stream", this->layout.stream,
+ &qual_stream) ||
+ !validate_stream_qualifier(&loc, state, qual_stream)) {
+ /* If the stream qualifier is invalid it doesn't make sense to continue
+ * on and try to compare stream layouts on member variables against it
+ * so just return early.
+ */
+ return NULL;
+ }
+
+ unsigned qual_xfb_buffer;
+ if (!process_qualifier_constant(state, &loc, "xfb_buffer",
+ layout.xfb_buffer, &qual_xfb_buffer) ||
+ !validate_xfb_buffer_qualifier(&loc, state, qual_xfb_buffer)) {
+ return NULL;
+ }
+
+ unsigned qual_xfb_offset;
+ if (layout.flags.q.explicit_xfb_offset) {
+ if (!process_qualifier_constant(state, &loc, "xfb_offset",
+ layout.offset, &qual_xfb_offset)) {
+ return NULL;
+ }
+ }
+
+ unsigned qual_xfb_stride;
+ if (layout.flags.q.explicit_xfb_stride) {
+ if (!process_qualifier_constant(state, &loc, "xfb_stride",
+ layout.xfb_stride, &qual_xfb_stride)) {
+ return NULL;
+ }
+ }
+
+ unsigned expl_location = 0;
+ if (layout.flags.q.explicit_location) {
+ if (!process_qualifier_constant(state, &loc, "location",
+ layout.location, &expl_location)) {
+ return NULL;
+ } else {
+ expl_location += this->layout.flags.q.patch ? VARYING_SLOT_PATCH0
+ : VARYING_SLOT_VAR0;
+ }
+ }
+
+ unsigned expl_align = 0;
+ if (layout.flags.q.explicit_align) {
+ if (!process_qualifier_constant(state, &loc, "align",
+ layout.align, &expl_align)) {
+ return NULL;
+ } else {
+ if (expl_align == 0 || expl_align & (expl_align - 1)) {
+ _mesa_glsl_error(&loc, state, "align layout qualifier is not a "
+ "power of 2.");
+ return NULL;
+ }
+ }
+ }
+
+ unsigned int num_variables =
+ ast_process_struct_or_iface_block_members(&declared_variables,
+ state,
+ &this->declarations,
+ &fields,
+ true,
+ matrix_layout,
+ redeclaring_per_vertex,
+ var_mode,
+ &this->layout,
+ qual_stream,
+ qual_xfb_buffer,
+ qual_xfb_offset,
+ expl_location,
+ expl_align);
+
+ if (!redeclaring_per_vertex) {
+ validate_identifier(this->block_name, loc, state);
+
+ /* From section 4.3.9 ("Interface Blocks") of the GLSL 4.50 spec:
+ *
+ * "Block names have no other use within a shader beyond interface
+ * matching; it is a compile-time error to use a block name at global
+ * scope for anything other than as a block name."
+ */
+ ir_variable *var = state->symbols->get_variable(this->block_name);
+ if (var && !var->type->is_interface()) {
+ _mesa_glsl_error(&loc, state, "Block name `%s' is "
+ "already used in the scope.",
+ this->block_name);
+ }
+ }
+
+ const glsl_type *earlier_per_vertex = NULL;
+ if (redeclaring_per_vertex) {
+ /* Find the previous declaration of gl_PerVertex. If we're redeclaring
+ * the named interface block gl_in, we can find it by looking at the
+ * previous declaration of gl_in. Otherwise we can find it by looking
+ * at the previous decalartion of any of the built-in outputs,
+ * e.g. gl_Position.
+ *
+ * Also check that the instance name and array-ness of the redeclaration
+ * are correct.
+ */
+ switch (var_mode) {
+ case ir_var_shader_in:
+ if (ir_variable *earlier_gl_in =
+ state->symbols->get_variable("gl_in")) {
+ earlier_per_vertex = earlier_gl_in->get_interface_type();
+ } else {
+ _mesa_glsl_error(&loc, state,
+ "redeclaration of gl_PerVertex input not allowed "
+ "in the %s shader",
+ _mesa_shader_stage_to_string(state->stage));
+ }
+ if (this->instance_name == NULL ||
+ strcmp(this->instance_name, "gl_in") != 0 || this->array_specifier == NULL ||
+ !this->array_specifier->is_single_dimension()) {
+ _mesa_glsl_error(&loc, state,
+ "gl_PerVertex input must be redeclared as "
+ "gl_in[]");
+ }
+ break;
+ case ir_var_shader_out:
+ if (ir_variable *earlier_gl_Position =
+ state->symbols->get_variable("gl_Position")) {
+ earlier_per_vertex = earlier_gl_Position->get_interface_type();
+ } else if (ir_variable *earlier_gl_out =
+ state->symbols->get_variable("gl_out")) {
+ earlier_per_vertex = earlier_gl_out->get_interface_type();
+ } else {
+ _mesa_glsl_error(&loc, state,
+ "redeclaration of gl_PerVertex output not "
+ "allowed in the %s shader",
+ _mesa_shader_stage_to_string(state->stage));
+ }
+ if (state->stage == MESA_SHADER_TESS_CTRL) {
+ if (this->instance_name == NULL ||
+ strcmp(this->instance_name, "gl_out") != 0 || this->array_specifier == NULL) {
+ _mesa_glsl_error(&loc, state,
+ "gl_PerVertex output must be redeclared as "
+ "gl_out[]");
+ }
+ } else {
+ if (this->instance_name != NULL) {
+ _mesa_glsl_error(&loc, state,
+ "gl_PerVertex output may not be redeclared with "
+ "an instance name");
+ }
+ }
+ break;
+ default:
+ _mesa_glsl_error(&loc, state,
+ "gl_PerVertex must be declared as an input or an "
+ "output");
+ break;
+ }
+
+ if (earlier_per_vertex == NULL) {
+ /* An error has already been reported. Bail out to avoid null
+ * dereferences later in this function.
+ */
+ return NULL;
+ }
+
+ /* Copy locations from the old gl_PerVertex interface block. */
+ for (unsigned i = 0; i < num_variables; i++) {
+ int j = earlier_per_vertex->field_index(fields[i].name);
+ if (j == -1) {
+ _mesa_glsl_error(&loc, state,
+ "redeclaration of gl_PerVertex must be a subset "
+ "of the built-in members of gl_PerVertex");
+ } else {
+ fields[i].location =
+ earlier_per_vertex->fields.structure[j].location;
+ fields[i].offset =
+ earlier_per_vertex->fields.structure[j].offset;
+ fields[i].interpolation =
+ earlier_per_vertex->fields.structure[j].interpolation;
+ fields[i].centroid =
+ earlier_per_vertex->fields.structure[j].centroid;
+ fields[i].sample =
+ earlier_per_vertex->fields.structure[j].sample;
+ fields[i].patch =
+ earlier_per_vertex->fields.structure[j].patch;
+ fields[i].precision =
+ earlier_per_vertex->fields.structure[j].precision;
+ fields[i].explicit_xfb_buffer =
+ earlier_per_vertex->fields.structure[j].explicit_xfb_buffer;
+ fields[i].xfb_buffer =
+ earlier_per_vertex->fields.structure[j].xfb_buffer;
+ fields[i].xfb_stride =
+ earlier_per_vertex->fields.structure[j].xfb_stride;
+ }
+ }
+
+ /* From section 7.1 ("Built-in Language Variables") of the GLSL 4.10
+ * spec:
+ *
+ * If a built-in interface block is redeclared, it must appear in
+ * the shader before any use of any member included in the built-in
+ * declaration, or a compilation error will result.
+ *
+ * This appears to be a clarification to the behaviour established for
+ * gl_PerVertex by GLSL 1.50, therefore we implement this behaviour
+ * regardless of GLSL version.
+ */
+ interface_block_usage_visitor v(var_mode, earlier_per_vertex);
+ v.run(instructions);
+ if (v.usage_found()) {
+ _mesa_glsl_error(&loc, state,
+ "redeclaration of a built-in interface block must "
+ "appear before any use of any member of the "
+ "interface block");
+ }
+ }
+
+ const glsl_type *block_type =
+ glsl_type::get_interface_instance(fields,
+ num_variables,
+ packing,
+ matrix_layout ==
+ GLSL_MATRIX_LAYOUT_ROW_MAJOR,
+ this->block_name);
+
+ unsigned component_size = block_type->contains_double() ? 8 : 4;
+ int xfb_offset =
+ layout.flags.q.explicit_xfb_offset ? (int) qual_xfb_offset : -1;
+ validate_xfb_offset_qualifier(&loc, state, xfb_offset, block_type,
+ component_size);
+
+ if (!state->symbols->add_interface(block_type->name, block_type, var_mode)) {
+ YYLTYPE loc = this->get_location();
+ _mesa_glsl_error(&loc, state, "interface block `%s' with type `%s' "
+ "already taken in the current scope",
+ this->block_name, iface_type_name);
+ }
+
+ /* Since interface blocks cannot contain statements, it should be
+ * impossible for the block to generate any instructions.
+ */
+ assert(declared_variables.is_empty());
+
+ /* From section 4.3.4 (Inputs) of the GLSL 1.50 spec:
+ *
+ * Geometry shader input variables get the per-vertex values written
+ * out by vertex shader output variables of the same names. Since a
+ * geometry shader operates on a set of vertices, each input varying
+ * variable (or input block, see interface blocks below) needs to be
+ * declared as an array.
+ */
+ if (state->stage == MESA_SHADER_GEOMETRY && this->array_specifier == NULL &&
+ var_mode == ir_var_shader_in) {
+ _mesa_glsl_error(&loc, state, "geometry shader inputs must be arrays");
+ } else if ((state->stage == MESA_SHADER_TESS_CTRL ||
+ state->stage == MESA_SHADER_TESS_EVAL) &&
+ !this->layout.flags.q.patch &&
+ this->array_specifier == NULL &&
+ var_mode == ir_var_shader_in) {
+ _mesa_glsl_error(&loc, state, "per-vertex tessellation shader inputs must be arrays");
+ } else if (state->stage == MESA_SHADER_TESS_CTRL &&
+ !this->layout.flags.q.patch &&
+ this->array_specifier == NULL &&
+ var_mode == ir_var_shader_out) {
+ _mesa_glsl_error(&loc, state, "tessellation control shader outputs must be arrays");
+ }
+
+
+ ir_typedecl_statement* stmt = new(state) ir_typedecl_statement(block_type);
+ /* Push the interface declarations to the top.
+ * However, do not insert declarations before default precision
+ * statements or other declarations
+ */
+ ir_instruction* before_node = (ir_instruction*)instructions->get_head();
+ while (before_node &&
+ (before_node->ir_type == ir_type_precision ||
+ before_node->ir_type == ir_type_typedecl))
+ before_node = (ir_instruction*)before_node->next;
+ if (before_node)
+ before_node->insert_before(stmt);
+ else
+ instructions->push_head(stmt);
+
+ /* Page 39 (page 45 of the PDF) of section 4.3.7 in the GLSL ES 3.00 spec
+ * says:
+ *
+ * "If an instance name (instance-name) is used, then it puts all the
+ * members inside a scope within its own name space, accessed with the
+ * field selector ( . ) operator (analogously to structures)."
+ */
+ if (this->instance_name) {
+ if (redeclaring_per_vertex) {
+ /* When a built-in in an unnamed interface block is redeclared,
+ * get_variable_being_redeclared() calls
+ * check_builtin_array_max_size() to make sure that built-in array
+ * variables aren't redeclared to illegal sizes. But we're looking
+ * at a redeclaration of a named built-in interface block. So we
+ * have to manually call check_builtin_array_max_size() for all parts
+ * of the interface that are arrays.
+ */
+ for (unsigned i = 0; i < num_variables; i++) {
+ if (fields[i].type->is_array()) {
+ const unsigned size = fields[i].type->array_size();
+ check_builtin_array_max_size(fields[i].name, size, loc, state);
+ }
+ }
+ } else {
+ validate_identifier(this->instance_name, loc, state);
+ }
+
+ ir_variable *var;
+
+ if (this->array_specifier != NULL) {
+ const glsl_type *block_array_type =
+ process_array_type(&loc, block_type, this->array_specifier, state);
+
+ /* Section 4.3.7 (Interface Blocks) of the GLSL 1.50 spec says:
+ *
+ * For uniform blocks declared an array, each individual array
+ * element corresponds to a separate buffer object backing one
+ * instance of the block. As the array size indicates the number
+ * of buffer objects needed, uniform block array declarations
+ * must specify an array size.
+ *
+ * And a few paragraphs later:
+ *
+ * Geometry shader input blocks must be declared as arrays and
+ * follow the array declaration and linking rules for all
+ * geometry shader inputs. All other input and output block
+ * arrays must specify an array size.
+ *
+ * The same applies to tessellation shaders.
+ *
+ * The upshot of this is that the only circumstance where an
+ * interface array size *doesn't* need to be specified is on a
+ * geometry shader input, tessellation control shader input,
+ * tessellation control shader output, and tessellation evaluation
+ * shader input.
+ */
+ if (block_array_type->is_unsized_array()) {
+ bool allow_inputs = state->stage == MESA_SHADER_GEOMETRY ||
+ state->stage == MESA_SHADER_TESS_CTRL ||
+ state->stage == MESA_SHADER_TESS_EVAL;
+ bool allow_outputs = state->stage == MESA_SHADER_TESS_CTRL;
+
+ if (this->layout.flags.q.in) {
+ if (!allow_inputs)
+ _mesa_glsl_error(&loc, state,
+ "unsized input block arrays not allowed in "
+ "%s shader",
+ _mesa_shader_stage_to_string(state->stage));
+ } else if (this->layout.flags.q.out) {
+ if (!allow_outputs)
+ _mesa_glsl_error(&loc, state,
+ "unsized output block arrays not allowed in "
+ "%s shader",
+ _mesa_shader_stage_to_string(state->stage));
+ } else {
+ /* by elimination, this is a uniform block array */
+ _mesa_glsl_error(&loc, state,
+ "unsized uniform block arrays not allowed in "
+ "%s shader",
+ _mesa_shader_stage_to_string(state->stage));
+ }
+ }
+
+ /* From section 4.3.9 (Interface Blocks) of the GLSL ES 3.10 spec:
+ *
+ * * Arrays of arrays of blocks are not allowed
+ */
+ if (state->es_shader && block_array_type->is_array() &&
+ block_array_type->fields.array->is_array()) {
+ _mesa_glsl_error(&loc, state,
+ "arrays of arrays interface blocks are "
+ "not allowed");
+ }
+
+ var = new(state) ir_variable(block_array_type,
+ this->instance_name,
+ var_mode);
+ } else {
+ var = new(state) ir_variable(block_type,
+ this->instance_name,
+ var_mode);
+ }
+
+ var->data.matrix_layout = matrix_layout == GLSL_MATRIX_LAYOUT_INHERITED
+ ? GLSL_MATRIX_LAYOUT_COLUMN_MAJOR : matrix_layout;
+
+ if (var_mode == ir_var_shader_in || var_mode == ir_var_uniform)
+ var->data.read_only = true;
+
+ var->data.patch = this->layout.flags.q.patch;
+
+ if (state->stage == MESA_SHADER_GEOMETRY && var_mode == ir_var_shader_in)
+ handle_geometry_shader_input_decl(state, loc, var);
+ else if ((state->stage == MESA_SHADER_TESS_CTRL ||
+ state->stage == MESA_SHADER_TESS_EVAL) && var_mode == ir_var_shader_in)
+ handle_tess_shader_input_decl(state, loc, var);
+ else if (state->stage == MESA_SHADER_TESS_CTRL && var_mode == ir_var_shader_out)
+ handle_tess_ctrl_shader_output_decl(state, loc, var);
+
+ for (unsigned i = 0; i < num_variables; i++) {
+ if (var->data.mode == ir_var_shader_storage)
+ apply_memory_qualifiers(var, fields[i]);
+ }
+
+ if (ir_variable *earlier =
+ state->symbols->get_variable(this->instance_name)) {
+ if (!redeclaring_per_vertex) {
+ _mesa_glsl_error(&loc, state, "`%s' redeclared",
+ this->instance_name);
+ }
+ earlier->data.how_declared = ir_var_declared_normally;
+ earlier->type = var->type;
+ earlier->reinit_interface_type(block_type);
+ delete var;
+ } else {
+ if (this->layout.flags.q.explicit_binding) {
+ apply_explicit_binding(state, &loc, var, var->type,
+ &this->layout);
+ }
+
+ var->data.stream = qual_stream;
+ if (layout.flags.q.explicit_location) {
+ var->data.location = expl_location;
+ var->data.explicit_location = true;
+ }
+
+ state->symbols->add_variable(var);
+ instructions->push_tail(var);
+ }
+ } else {
+ /* In order to have an array size, the block must also be declared with
+ * an instance name.
+ */
+ assert(this->array_specifier == NULL);
+
+ for (unsigned i = 0; i < num_variables; i++) {
+ ir_variable *var =
+ new(state) ir_variable(fields[i].type,
+ ralloc_strdup(state, fields[i].name),
+ var_mode);
+ var->data.interpolation = fields[i].interpolation;
+ var->data.centroid = fields[i].centroid;
+ var->data.sample = fields[i].sample;
+ var->data.patch = fields[i].patch;
+ var->data.stream = qual_stream;
+ var->data.location = fields[i].location;
+
+ if (fields[i].location != -1)
+ var->data.explicit_location = true;
+
+ var->data.explicit_xfb_buffer = fields[i].explicit_xfb_buffer;
+ var->data.xfb_buffer = fields[i].xfb_buffer;
+
+ if (fields[i].offset != -1)
+ var->data.explicit_xfb_offset = true;
+ var->data.offset = fields[i].offset;
+
+ var->init_interface_type(block_type);
+
+ if (var_mode == ir_var_shader_in || var_mode == ir_var_uniform)
+ var->data.read_only = true;
+
+ /* Precision qualifiers do not have any meaning in Desktop GLSL */
+ if (state->es_shader) {
+ var->data.precision =
+ select_gles_precision(fields[i].precision, fields[i].type,
+ state, &loc);
+ }
+
+ if (fields[i].matrix_layout == GLSL_MATRIX_LAYOUT_INHERITED) {
+ var->data.matrix_layout = matrix_layout == GLSL_MATRIX_LAYOUT_INHERITED
+ ? GLSL_MATRIX_LAYOUT_COLUMN_MAJOR : matrix_layout;
+ } else {
+ var->data.matrix_layout = fields[i].matrix_layout;
+ }
+
+ if (var->data.mode == ir_var_shader_storage)
+ apply_memory_qualifiers(var, fields[i]);
+
+ /* Examine var name here since var may get deleted in the next call */
+ bool var_is_gl_id = is_gl_identifier(var->name);
+
+ if (redeclaring_per_vertex) {
+ bool is_redeclaration;
+ var =
+ get_variable_being_redeclared(&var, loc, state,
+ true /* allow_all_redeclarations */,
+ &is_redeclaration);
+ if (!var_is_gl_id || !is_redeclaration) {
+ _mesa_glsl_error(&loc, state,
+ "redeclaration of gl_PerVertex can only "
+ "include built-in variables");
+ } else if (var->data.how_declared == ir_var_declared_normally) {
+ _mesa_glsl_error(&loc, state,
+ "`%s' has already been redeclared",
+ var->name);
+ } else {
+ var->data.how_declared = ir_var_declared_in_block;
+ var->reinit_interface_type(block_type);
+ }
+ continue;
+ }
+
+ if (state->symbols->get_variable(var->name) != NULL)
+ _mesa_glsl_error(&loc, state, "`%s' redeclared", var->name);
+
+ /* Propagate the "binding" keyword into this UBO/SSBO's fields.
+ * The UBO declaration itself doesn't get an ir_variable unless it
+ * has an instance name. This is ugly.
+ */
+ if (this->layout.flags.q.explicit_binding) {
+ apply_explicit_binding(state, &loc, var,
+ var->get_interface_type(), &this->layout);
+ }
+
+ if (var->type->is_unsized_array()) {
+ if (var->is_in_shader_storage_block() &&
+ is_unsized_array_last_element(var)) {
+ var->data.from_ssbo_unsized_array = true;
+ } else {
+ /* From GLSL ES 3.10 spec, section 4.1.9 "Arrays":
+ *
+ * "If an array is declared as the last member of a shader storage
+ * block and the size is not specified at compile-time, it is
+ * sized at run-time. In all other cases, arrays are sized only
+ * at compile-time."
+ *
+ * In desktop GLSL it is allowed to have unsized-arrays that are
+ * not last, as long as we can determine that they are implicitly
+ * sized.
+ */
+ if (state->es_shader) {
+ _mesa_glsl_error(&loc, state, "unsized array `%s' "
+ "definition: only last member of a shader "
+ "storage block can be defined as unsized "
+ "array", fields[i].name);
+ }
+ }
+ }
+
+ state->symbols->add_variable(var);
+ instructions->push_tail(var);
+ }
+
+ if (redeclaring_per_vertex && block_type != earlier_per_vertex) {
+ /* From section 7.1 ("Built-in Language Variables") of the GLSL 4.10 spec:
+ *
+ * It is also a compilation error ... to redeclare a built-in
+ * block and then use a member from that built-in block that was
+ * not included in the redeclaration.
+ *
+ * This appears to be a clarification to the behaviour established
+ * for gl_PerVertex by GLSL 1.50, therefore we implement this
+ * behaviour regardless of GLSL version.
+ *
+ * To prevent the shader from using a member that was not included in
+ * the redeclaration, we disable any ir_variables that are still
+ * associated with the old declaration of gl_PerVertex (since we've
+ * already updated all of the variables contained in the new
+ * gl_PerVertex to point to it).
+ *
+ * As a side effect this will prevent
+ * validate_intrastage_interface_blocks() from getting confused and
+ * thinking there are conflicting definitions of gl_PerVertex in the
+ * shader.
+ */
+ foreach_in_list_safe(ir_instruction, node, instructions) {
+ ir_variable *const var = node->as_variable();
+ if (var != NULL &&
+ var->get_interface_type() == earlier_per_vertex &&
+ var->data.mode == var_mode) {
+ if (var->data.how_declared == ir_var_declared_normally) {
+ _mesa_glsl_error(&loc, state,
+ "redeclaration of gl_PerVertex cannot "
+ "follow a redeclaration of `%s'",
+ var->name);
+ }
+ state->symbols->disable_variable(var->name);
+ var->remove();
+ }
+ }
+ }
+ }
+
+ return NULL;
+}
+
+
+ir_rvalue *
+ast_tcs_output_layout::hir(exec_list *instructions,
+ struct _mesa_glsl_parse_state *state)
+{
+ YYLTYPE loc = this->get_location();
+
+ unsigned num_vertices;
+ if (!state->out_qualifier->vertices->
+ process_qualifier_constant(state, "vertices", &num_vertices,
+ false)) {
+ /* return here to stop cascading incorrect error messages */
+ return NULL;
+ }
+
+ /* If any shader outputs occurred before this declaration and specified an
+ * array size, make sure the size they specified is consistent with the
+ * primitive type.
+ */
+ if (state->tcs_output_size != 0 && state->tcs_output_size != num_vertices) {
+ _mesa_glsl_error(&loc, state,
+ "this tessellation control shader output layout "
+ "specifies %u vertices, but a previous output "
+ "is declared with size %u",
+ num_vertices, state->tcs_output_size);
+ return NULL;
+ }
+
+ state->tcs_output_vertices_specified = true;
+
+ /* If any shader outputs occurred before this declaration and did not
+ * specify an array size, their size is determined now.
+ */
+ foreach_in_list (ir_instruction, node, instructions) {
+ ir_variable *var = node->as_variable();
+ if (var == NULL || var->data.mode != ir_var_shader_out)
+ continue;
+
+ /* Note: Not all tessellation control shader output are arrays. */
+ if (!var->type->is_unsized_array() || var->data.patch)
+ continue;
+
+ if (var->data.max_array_access >= (int)num_vertices) {
+ _mesa_glsl_error(&loc, state,
+ "this tessellation control shader output layout "
+ "specifies %u vertices, but an access to element "
+ "%u of output `%s' already exists", num_vertices,
+ var->data.max_array_access, var->name);
+ } else {
+ var->type = glsl_type::get_array_instance(var->type->fields.array,
+ num_vertices);
+ }
+ }
+
+ return NULL;
+}
+
+
+ir_rvalue *
+ast_gs_input_layout::hir(exec_list *instructions,
+ struct _mesa_glsl_parse_state *state)
+{
+ YYLTYPE loc = this->get_location();
+
+ /* Should have been prevented by the parser. */
+ assert(!state->gs_input_prim_type_specified
+ || state->in_qualifier->prim_type == this->prim_type);
+
+ /* If any shader inputs occurred before this declaration and specified an
+ * array size, make sure the size they specified is consistent with the
+ * primitive type.
+ */
+ unsigned num_vertices = vertices_per_prim(this->prim_type);
+ if (state->gs_input_size != 0 && state->gs_input_size != num_vertices) {
+ _mesa_glsl_error(&loc, state,
+ "this geometry shader input layout implies %u vertices"
+ " per primitive, but a previous input is declared"
+ " with size %u", num_vertices, state->gs_input_size);
+ return NULL;
+ }
+
+ state->gs_input_prim_type_specified = true;
+
+ /* If any shader inputs occurred before this declaration and did not
+ * specify an array size, their size is determined now.
+ */
+ foreach_in_list(ir_instruction, node, instructions) {
+ ir_variable *var = node->as_variable();
+ if (var == NULL || var->data.mode != ir_var_shader_in)
+ continue;
+
+ /* Note: gl_PrimitiveIDIn has mode ir_var_shader_in, but it's not an
+ * array; skip it.
+ */
+
+ if (var->type->is_unsized_array()) {
+ if (var->data.max_array_access >= (int)num_vertices) {
+ _mesa_glsl_error(&loc, state,
+ "this geometry shader input layout implies %u"
+ " vertices, but an access to element %u of input"
+ " `%s' already exists", num_vertices,
+ var->data.max_array_access, var->name);
+ } else {
+ var->type = glsl_type::get_array_instance(var->type->fields.array,
+ num_vertices);
+ }
+ }
+ }
+
+ return NULL;
+}
+
+
+ir_rvalue *
+ast_cs_input_layout::hir(exec_list *instructions,
+ struct _mesa_glsl_parse_state *state)
+{
+ YYLTYPE loc = this->get_location();
+
+ /* From the ARB_compute_shader specification:
+ *
+ * If the local size of the shader in any dimension is greater
+ * than the maximum size supported by the implementation for that
+ * dimension, a compile-time error results.
+ *
+ * It is not clear from the spec how the error should be reported if
+ * the total size of the work group exceeds
+ * MAX_COMPUTE_WORK_GROUP_INVOCATIONS, but it seems reasonable to
+ * report it at compile time as well.
+ */
+ GLuint64 total_invocations = 1;
+ unsigned qual_local_size[3];
+ for (int i = 0; i < 3; i++) {
+
+ char *local_size_str = ralloc_asprintf(NULL, "invalid local_size_%c",
+ 'x' + i);
+ /* Infer a local_size of 1 for unspecified dimensions */
+ if (this->local_size[i] == NULL) {
+ qual_local_size[i] = 1;
+ } else if (!this->local_size[i]->
+ process_qualifier_constant(state, local_size_str,
+ &qual_local_size[i], false)) {
+ ralloc_free(local_size_str);
+ return NULL;
+ }
+ ralloc_free(local_size_str);
+
+ if (qual_local_size[i] > state->ctx->Const.MaxComputeWorkGroupSize[i]) {
+ _mesa_glsl_error(&loc, state,
+ "local_size_%c exceeds MAX_COMPUTE_WORK_GROUP_SIZE"
+ " (%d)", 'x' + i,
+ state->ctx->Const.MaxComputeWorkGroupSize[i]);
+ break;
+ }
+ total_invocations *= qual_local_size[i];
+ if (total_invocations >
+ state->ctx->Const.MaxComputeWorkGroupInvocations) {
+ _mesa_glsl_error(&loc, state,
+ "product of local_sizes exceeds "
+ "MAX_COMPUTE_WORK_GROUP_INVOCATIONS (%d)",
+ state->ctx->Const.MaxComputeWorkGroupInvocations);
+ break;
+ }
+ }
+
+ /* If any compute input layout declaration preceded this one, make sure it
+ * was consistent with this one.
+ */
+ if (state->cs_input_local_size_specified) {
+ for (int i = 0; i < 3; i++) {
+ if (state->cs_input_local_size[i] != qual_local_size[i]) {
+ _mesa_glsl_error(&loc, state,
+ "compute shader input layout does not match"
+ " previous declaration");
+ return NULL;
+ }
+ }
+ }
+
+ /* The ARB_compute_variable_group_size spec says:
+ *
+ * If a compute shader including a *local_size_variable* qualifier also
+ * declares a fixed local group size using the *local_size_x*,
+ * *local_size_y*, or *local_size_z* qualifiers, a compile-time error
+ * results
+ */
+ if (state->cs_input_local_size_variable_specified) {
+ _mesa_glsl_error(&loc, state,
+ "compute shader can't include both a variable and a "
+ "fixed local group size");
+ return NULL;
+ }
+
+ state->cs_input_local_size_specified = true;
+ for (int i = 0; i < 3; i++)
+ state->cs_input_local_size[i] = qual_local_size[i];
+
+ /* We may now declare the built-in constant gl_WorkGroupSize (see
+ * builtin_variable_generator::generate_constants() for why we didn't
+ * declare it earlier).
+ */
+ ir_variable *var = new(state->symbols)
+ ir_variable(glsl_type::uvec3_type, "gl_WorkGroupSize", ir_var_auto);
+ var->data.how_declared = ir_var_declared_implicitly;
+ var->data.read_only = true;
+ instructions->push_tail(var);
+ state->symbols->add_variable(var);
+ ir_constant_data data;
+ memset(&data, 0, sizeof(data));
+ for (int i = 0; i < 3; i++)
+ data.u[i] = qual_local_size[i];
+ var->constant_value = new(var) ir_constant(glsl_type::uvec3_type, &data);
+ var->constant_initializer =
+ new(var) ir_constant(glsl_type::uvec3_type, &data);
+ var->data.has_initializer = true;
+
+ return NULL;
+}
+
+
+static void
+detect_conflicting_assignments(struct _mesa_glsl_parse_state *state,
+ exec_list *instructions)
+{
+ bool gl_FragColor_assigned = false;
+ bool gl_FragData_assigned = false;
+ bool gl_FragSecondaryColor_assigned = false;
+ bool gl_FragSecondaryData_assigned = false;
+ bool user_defined_fs_output_assigned = false;
+ ir_variable *user_defined_fs_output = NULL;
+
+ /* It would be nice to have proper location information. */
+ YYLTYPE loc;
+ memset(&loc, 0, sizeof(loc));
+
+ foreach_in_list(ir_instruction, node, instructions) {
+ ir_variable *var = node->as_variable();
+
+ if (!var || !var->data.assigned)
+ continue;
+
+ if (strcmp(var->name, "gl_FragColor") == 0)
+ gl_FragColor_assigned = true;
+ else if (strcmp(var->name, "gl_FragData") == 0)
+ gl_FragData_assigned = true;
+ else if (strcmp(var->name, "gl_SecondaryFragColorEXT") == 0)
+ gl_FragSecondaryColor_assigned = true;
+ else if (strcmp(var->name, "gl_SecondaryFragDataEXT") == 0)
+ gl_FragSecondaryData_assigned = true;
+ else if (!is_gl_identifier(var->name)) {
+ if (state->stage == MESA_SHADER_FRAGMENT &&
+ var->data.mode == ir_var_shader_out) {
+ user_defined_fs_output_assigned = true;
+ user_defined_fs_output = var;
+ }
+ }
+ }
+
+ /* From the GLSL 1.30 spec:
+ *
+ * "If a shader statically assigns a value to gl_FragColor, it
+ * may not assign a value to any element of gl_FragData. If a
+ * shader statically writes a value to any element of
+ * gl_FragData, it may not assign a value to
+ * gl_FragColor. That is, a shader may assign values to either
+ * gl_FragColor or gl_FragData, but not both. Multiple shaders
+ * linked together must also consistently write just one of
+ * these variables. Similarly, if user declared output
+ * variables are in use (statically assigned to), then the
+ * built-in variables gl_FragColor and gl_FragData may not be
+ * assigned to. These incorrect usages all generate compile
+ * time errors."
+ */
+ if (gl_FragColor_assigned && gl_FragData_assigned) {
+ _mesa_glsl_error(&loc, state, "fragment shader writes to both "
+ "`gl_FragColor' and `gl_FragData'");
+ } else if (gl_FragColor_assigned && user_defined_fs_output_assigned) {
+ _mesa_glsl_error(&loc, state, "fragment shader writes to both "
+ "`gl_FragColor' and `%s'",
+ user_defined_fs_output->name);
+ } else if (gl_FragSecondaryColor_assigned && gl_FragSecondaryData_assigned) {
+ _mesa_glsl_error(&loc, state, "fragment shader writes to both "
+ "`gl_FragSecondaryColorEXT' and"
+ " `gl_FragSecondaryDataEXT'");
+ } else if (gl_FragColor_assigned && gl_FragSecondaryData_assigned) {
+ _mesa_glsl_error(&loc, state, "fragment shader writes to both "
+ "`gl_FragColor' and"
+ " `gl_FragSecondaryDataEXT'");
+ } else if (gl_FragData_assigned && gl_FragSecondaryColor_assigned) {
+ _mesa_glsl_error(&loc, state, "fragment shader writes to both "
+ "`gl_FragData' and"
+ " `gl_FragSecondaryColorEXT'");
+ } else if (gl_FragData_assigned && user_defined_fs_output_assigned) {
+ _mesa_glsl_error(&loc, state, "fragment shader writes to both "
+ "`gl_FragData' and `%s'",
+ user_defined_fs_output->name);
+ }
+
+ if ((gl_FragSecondaryColor_assigned || gl_FragSecondaryData_assigned) &&
+ !state->EXT_blend_func_extended_enable) {
+ _mesa_glsl_error(&loc, state,
+ "Dual source blending requires EXT_blend_func_extended");
+ }
+}
+
+static void
+verify_subroutine_associated_funcs(struct _mesa_glsl_parse_state *state)
+{
+ YYLTYPE loc;
+ memset(&loc, 0, sizeof(loc));
+
+ /* Section 6.1.2 (Subroutines) of the GLSL 4.00 spec says:
+ *
+ * "A program will fail to compile or link if any shader
+ * or stage contains two or more functions with the same
+ * name if the name is associated with a subroutine type."
+ */
+
+ for (int i = 0; i < state->num_subroutines; i++) {
+ unsigned definitions = 0;
+ ir_function *fn = state->subroutines[i];
+ /* Calculate number of function definitions with the same name */
+ foreach_in_list(ir_function_signature, sig, &fn->signatures) {
+ if (sig->is_defined) {
+ if (++definitions > 1) {
+ _mesa_glsl_error(&loc, state,
+ "%s shader contains two or more function "
+ "definitions with name `%s', which is "
+ "associated with a subroutine type.\n",
+ _mesa_shader_stage_to_string(state->stage),
+ fn->name);
+ return;
+ }
+ }
+ }
+ }
+}
+
+static void
+remove_per_vertex_blocks(exec_list *instructions,
+ _mesa_glsl_parse_state *state, ir_variable_mode mode)
+{
+ /* Find the gl_PerVertex interface block of the appropriate (in/out) mode,
+ * if it exists in this shader type.
+ */
+ const glsl_type *per_vertex = NULL;
+ switch (mode) {
+ case ir_var_shader_in:
+ if (ir_variable *gl_in = state->symbols->get_variable("gl_in"))
+ per_vertex = gl_in->get_interface_type();
+ break;
+ case ir_var_shader_out:
+ if (ir_variable *gl_Position =
+ state->symbols->get_variable("gl_Position")) {
+ per_vertex = gl_Position->get_interface_type();
+ }
+ break;
+ default:
+ assert(!"Unexpected mode");
+ break;
+ }
+
+ /* If we didn't find a built-in gl_PerVertex interface block, then we don't
+ * need to do anything.
+ */
+ if (per_vertex == NULL)
+ return;
+
+ /* If the interface block is used by the shader, then we don't need to do
+ * anything.
+ */
+ interface_block_usage_visitor v(mode, per_vertex);
+ v.run(instructions);
+ if (v.usage_found())
+ return;
+
+ /* Remove any ir_variable declarations that refer to the interface block
+ * we're removing.
+ */
+ foreach_in_list_safe(ir_instruction, node, instructions) {
+ ir_variable *const var = node->as_variable();
+ if (var != NULL && var->get_interface_type() == per_vertex &&
+ var->data.mode == mode) {
+ state->symbols->disable_variable(var->name);
+ var->remove();
+ }
+ }
+}
+
+ir_rvalue *
+ast_warnings_toggle::hir(exec_list *,
+ struct _mesa_glsl_parse_state *state)
+{
+ state->warnings_enabled = enable;
+ return NULL;
+}
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/ast_type.cpp b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/ast_type.cpp
new file mode 100644
index 0000000000..8026302f0a
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/ast_type.cpp
@@ -0,0 +1,1012 @@
+/*
+ * Copyright © 2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include "ast.h"
+
+void
+ast_type_specifier::print(void) const
+{
+ if (structure) {
+ structure->print();
+ } else {
+ printf("%s ", type_name);
+ }
+
+ if (array_specifier) {
+ array_specifier->print();
+ }
+}
+
+bool
+ast_fully_specified_type::has_qualifiers(_mesa_glsl_parse_state *state) const
+{
+ /* 'subroutine' isnt a real qualifier. */
+ ast_type_qualifier subroutine_only;
+ subroutine_only.flags.i = 0;
+ subroutine_only.flags.q.subroutine = 1;
+ if (state->has_explicit_uniform_location()) {
+ subroutine_only.flags.q.explicit_index = 1;
+ }
+ return (this->qualifier.flags.i & ~subroutine_only.flags.i) != 0;
+}
+
+bool ast_type_qualifier::has_interpolation() const
+{
+ return this->flags.q.smooth
+ || this->flags.q.flat
+ || this->flags.q.noperspective;
+}
+
+bool
+ast_type_qualifier::has_layout() const
+{
+ return this->flags.q.origin_upper_left
+ || this->flags.q.pixel_center_integer
+ || this->flags.q.depth_type
+ || this->flags.q.std140
+ || this->flags.q.std430
+ || this->flags.q.shared
+ || this->flags.q.column_major
+ || this->flags.q.row_major
+ || this->flags.q.packed
+ || this->flags.q.bindless_sampler
+ || this->flags.q.bindless_image
+ || this->flags.q.bound_sampler
+ || this->flags.q.bound_image
+ || this->flags.q.explicit_align
+ || this->flags.q.explicit_component
+ || this->flags.q.explicit_location
+ || this->flags.q.explicit_image_format
+ || this->flags.q.explicit_index
+ || this->flags.q.explicit_binding
+ || this->flags.q.explicit_offset
+ || this->flags.q.explicit_stream
+ || this->flags.q.explicit_xfb_buffer
+ || this->flags.q.explicit_xfb_offset
+ || this->flags.q.explicit_xfb_stride;
+}
+
+bool
+ast_type_qualifier::has_storage() const
+{
+ return this->flags.q.constant
+ || this->flags.q.attribute
+ || this->flags.q.varying
+ || this->flags.q.in
+ || this->flags.q.out
+ || this->flags.q.uniform
+ || this->flags.q.buffer
+ || this->flags.q.shared_storage;
+}
+
+bool
+ast_type_qualifier::has_auxiliary_storage() const
+{
+ return this->flags.q.centroid
+ || this->flags.q.sample
+ || this->flags.q.patch;
+}
+
+bool ast_type_qualifier::has_memory() const
+{
+ return this->flags.q.coherent
+ || this->flags.q._volatile
+ || this->flags.q.restrict_flag
+ || this->flags.q.read_only
+ || this->flags.q.write_only;
+}
+
+bool ast_type_qualifier::is_subroutine_decl() const
+{
+ return this->flags.q.subroutine && !this->subroutine_list;
+}
+
+static bool
+validate_prim_type(YYLTYPE *loc,
+ _mesa_glsl_parse_state *state,
+ const ast_type_qualifier &qualifier,
+ const ast_type_qualifier &new_qualifier)
+{
+ /* Input layout qualifiers can be specified multiple
+ * times in separate declarations, as long as they match.
+ */
+ if (qualifier.flags.q.prim_type && new_qualifier.flags.q.prim_type
+ && qualifier.prim_type != new_qualifier.prim_type) {
+ _mesa_glsl_error(loc, state,
+ "conflicting input primitive %s specified",
+ state->stage == MESA_SHADER_GEOMETRY ?
+ "type" : "mode");
+ return false;
+ }
+
+ return true;
+}
+
+static bool
+validate_vertex_spacing(YYLTYPE *loc,
+ _mesa_glsl_parse_state *state,
+ const ast_type_qualifier &qualifier,
+ const ast_type_qualifier &new_qualifier)
+{
+ if (qualifier.flags.q.vertex_spacing && new_qualifier.flags.q.vertex_spacing
+ && qualifier.vertex_spacing != new_qualifier.vertex_spacing) {
+ _mesa_glsl_error(loc, state,
+ "conflicting vertex spacing specified");
+ return false;
+ }
+
+ return true;
+}
+
+static bool
+validate_ordering(YYLTYPE *loc,
+ _mesa_glsl_parse_state *state,
+ const ast_type_qualifier &qualifier,
+ const ast_type_qualifier &new_qualifier)
+{
+ if (qualifier.flags.q.ordering && new_qualifier.flags.q.ordering
+ && qualifier.ordering != new_qualifier.ordering) {
+ _mesa_glsl_error(loc, state,
+ "conflicting ordering specified");
+ return false;
+ }
+
+ return true;
+}
+
+static bool
+validate_point_mode(ASSERTED const ast_type_qualifier &qualifier,
+ ASSERTED const ast_type_qualifier &new_qualifier)
+{
+ /* Point mode can only be true if the flag is set. */
+ assert (!qualifier.flags.q.point_mode || !new_qualifier.flags.q.point_mode
+ || (qualifier.point_mode && new_qualifier.point_mode));
+
+ return true;
+}
+
+static void
+merge_bindless_qualifier(_mesa_glsl_parse_state *state)
+{
+ if (state->default_uniform_qualifier->flags.q.bindless_sampler) {
+ state->bindless_sampler_specified = true;
+ state->default_uniform_qualifier->flags.q.bindless_sampler = false;
+ }
+
+ if (state->default_uniform_qualifier->flags.q.bindless_image) {
+ state->bindless_image_specified = true;
+ state->default_uniform_qualifier->flags.q.bindless_image = false;
+ }
+
+ if (state->default_uniform_qualifier->flags.q.bound_sampler) {
+ state->bound_sampler_specified = true;
+ state->default_uniform_qualifier->flags.q.bound_sampler = false;
+ }
+
+ if (state->default_uniform_qualifier->flags.q.bound_image) {
+ state->bound_image_specified = true;
+ state->default_uniform_qualifier->flags.q.bound_image = false;
+ }
+}
+
+/**
+ * This function merges duplicate layout identifiers.
+ *
+ * It deals with duplicates within a single layout qualifier, among multiple
+ * layout qualifiers on a single declaration and on several declarations for
+ * the same variable.
+ *
+ * The is_single_layout_merge and is_multiple_layouts_merge parameters are
+ * used to differentiate among them.
+ */
+bool
+ast_type_qualifier::merge_qualifier(YYLTYPE *loc,
+ _mesa_glsl_parse_state *state,
+ const ast_type_qualifier &q,
+ bool is_single_layout_merge,
+ bool is_multiple_layouts_merge)
+{
+ bool r = true;
+ ast_type_qualifier ubo_mat_mask;
+ ubo_mat_mask.flags.i = 0;
+ ubo_mat_mask.flags.q.row_major = 1;
+ ubo_mat_mask.flags.q.column_major = 1;
+
+ ast_type_qualifier ubo_layout_mask;
+ ubo_layout_mask.flags.i = 0;
+ ubo_layout_mask.flags.q.std140 = 1;
+ ubo_layout_mask.flags.q.packed = 1;
+ ubo_layout_mask.flags.q.shared = 1;
+ ubo_layout_mask.flags.q.std430 = 1;
+
+ ast_type_qualifier ubo_binding_mask;
+ ubo_binding_mask.flags.i = 0;
+ ubo_binding_mask.flags.q.explicit_binding = 1;
+ ubo_binding_mask.flags.q.explicit_offset = 1;
+
+ ast_type_qualifier stream_layout_mask;
+ stream_layout_mask.flags.i = 0;
+ stream_layout_mask.flags.q.stream = 1;
+
+ /* FIXME: We should probably do interface and function param validation
+ * separately.
+ */
+ ast_type_qualifier input_layout_mask;
+ input_layout_mask.flags.i = 0;
+ input_layout_mask.flags.q.centroid = 1;
+ /* Function params can have constant */
+ input_layout_mask.flags.q.constant = 1;
+ input_layout_mask.flags.q.explicit_component = 1;
+ input_layout_mask.flags.q.explicit_location = 1;
+ input_layout_mask.flags.q.flat = 1;
+ input_layout_mask.flags.q.in = 1;
+ input_layout_mask.flags.q.invariant = 1;
+ input_layout_mask.flags.q.noperspective = 1;
+ input_layout_mask.flags.q.origin_upper_left = 1;
+ /* Function params 'inout' will set this */
+ input_layout_mask.flags.q.out = 1;
+ input_layout_mask.flags.q.patch = 1;
+ input_layout_mask.flags.q.pixel_center_integer = 1;
+ input_layout_mask.flags.q.precise = 1;
+ input_layout_mask.flags.q.sample = 1;
+ input_layout_mask.flags.q.smooth = 1;
+ input_layout_mask.flags.q.non_coherent = 1;
+
+ if (state->has_bindless()) {
+ /* Allow to use image qualifiers with shader inputs/outputs. */
+ input_layout_mask.flags.q.coherent = 1;
+ input_layout_mask.flags.q._volatile = 1;
+ input_layout_mask.flags.q.restrict_flag = 1;
+ input_layout_mask.flags.q.read_only = 1;
+ input_layout_mask.flags.q.write_only = 1;
+ input_layout_mask.flags.q.explicit_image_format = 1;
+ }
+
+ /* Uniform block layout qualifiers get to overwrite each
+ * other (rightmost having priority), while all other
+ * qualifiers currently don't allow duplicates.
+ */
+ ast_type_qualifier allowed_duplicates_mask;
+ allowed_duplicates_mask.flags.i =
+ ubo_mat_mask.flags.i |
+ ubo_layout_mask.flags.i |
+ ubo_binding_mask.flags.i;
+
+ /* Geometry shaders can have several layout qualifiers
+ * assigning different stream values.
+ */
+ if (state->stage == MESA_SHADER_GEOMETRY) {
+ allowed_duplicates_mask.flags.i |=
+ stream_layout_mask.flags.i;
+ }
+
+ if (is_single_layout_merge && !state->has_enhanced_layouts() &&
+ (this->flags.i & q.flags.i & ~allowed_duplicates_mask.flags.i) != 0) {
+ _mesa_glsl_error(loc, state, "duplicate layout qualifiers used");
+ return false;
+ }
+
+ if (is_multiple_layouts_merge && !state->has_420pack_or_es31()) {
+ _mesa_glsl_error(loc, state,
+ "duplicate layout(...) qualifiers");
+ return false;
+ }
+
+ if (q.flags.q.prim_type) {
+ r &= validate_prim_type(loc, state, *this, q);
+ this->flags.q.prim_type = 1;
+ this->prim_type = q.prim_type;
+ }
+
+ if (q.flags.q.max_vertices) {
+ if (this->flags.q.max_vertices
+ && !is_single_layout_merge && !is_multiple_layouts_merge) {
+ this->max_vertices->merge_qualifier(q.max_vertices);
+ } else {
+ this->flags.q.max_vertices = 1;
+ this->max_vertices = q.max_vertices;
+ }
+ }
+
+ if (q.subroutine_list) {
+ if (this->subroutine_list) {
+ _mesa_glsl_error(loc, state,
+ "conflicting subroutine qualifiers used");
+ } else {
+ this->subroutine_list = q.subroutine_list;
+ }
+ }
+
+ if (q.flags.q.invocations) {
+ if (this->flags.q.invocations
+ && !is_single_layout_merge && !is_multiple_layouts_merge) {
+ this->invocations->merge_qualifier(q.invocations);
+ } else {
+ this->flags.q.invocations = 1;
+ this->invocations = q.invocations;
+ }
+ }
+
+ if (state->stage == MESA_SHADER_GEOMETRY &&
+ state->has_explicit_attrib_stream()) {
+ if (!this->flags.q.explicit_stream) {
+ if (q.flags.q.stream) {
+ this->flags.q.stream = 1;
+ this->stream = q.stream;
+ } else if (!this->flags.q.stream && this->flags.q.out &&
+ !this->flags.q.in) {
+ /* Assign default global stream value */
+ this->flags.q.stream = 1;
+ this->stream = state->out_qualifier->stream;
+ }
+ }
+ }
+
+ if (state->has_enhanced_layouts()) {
+ if (!this->flags.q.explicit_xfb_buffer) {
+ if (q.flags.q.xfb_buffer) {
+ this->flags.q.xfb_buffer = 1;
+ this->xfb_buffer = q.xfb_buffer;
+ } else if (!this->flags.q.xfb_buffer && this->flags.q.out &&
+ !this->flags.q.in) {
+ /* Assign global xfb_buffer value */
+ this->flags.q.xfb_buffer = 1;
+ this->xfb_buffer = state->out_qualifier->xfb_buffer;
+ }
+ }
+
+ if (q.flags.q.explicit_xfb_stride) {
+ this->flags.q.xfb_stride = 1;
+ this->flags.q.explicit_xfb_stride = 1;
+ this->xfb_stride = q.xfb_stride;
+ }
+ }
+
+ if (q.flags.q.vertices) {
+ if (this->flags.q.vertices
+ && !is_single_layout_merge && !is_multiple_layouts_merge) {
+ this->vertices->merge_qualifier(q.vertices);
+ } else {
+ this->flags.q.vertices = 1;
+ this->vertices = q.vertices;
+ }
+ }
+
+ if (q.flags.q.vertex_spacing) {
+ r &= validate_vertex_spacing(loc, state, *this, q);
+ this->flags.q.vertex_spacing = 1;
+ this->vertex_spacing = q.vertex_spacing;
+ }
+
+ if (q.flags.q.ordering) {
+ r &= validate_ordering(loc, state, *this, q);
+ this->flags.q.ordering = 1;
+ this->ordering = q.ordering;
+ }
+
+ if (q.flags.q.point_mode) {
+ r &= validate_point_mode(*this, q);
+ this->flags.q.point_mode = 1;
+ this->point_mode = q.point_mode;
+ }
+
+ if (q.flags.q.early_fragment_tests)
+ this->flags.q.early_fragment_tests = true;
+
+ if ((q.flags.i & ubo_mat_mask.flags.i) != 0)
+ this->flags.i &= ~ubo_mat_mask.flags.i;
+ if ((q.flags.i & ubo_layout_mask.flags.i) != 0)
+ this->flags.i &= ~ubo_layout_mask.flags.i;
+
+ for (int i = 0; i < 3; i++) {
+ if (q.flags.q.local_size & (1 << i)) {
+ if (this->local_size[i]
+ && !is_single_layout_merge && !is_multiple_layouts_merge) {
+ this->local_size[i]->merge_qualifier(q.local_size[i]);
+ } else {
+ this->local_size[i] = q.local_size[i];
+ }
+ }
+ }
+
+ if (q.flags.q.local_size_variable)
+ this->flags.q.local_size_variable = true;
+
+ if (q.flags.q.bindless_sampler)
+ this->flags.q.bindless_sampler = true;
+
+ if (q.flags.q.bindless_image)
+ this->flags.q.bindless_image = true;
+
+ if (q.flags.q.bound_sampler)
+ this->flags.q.bound_sampler = true;
+
+ if (q.flags.q.bound_image)
+ this->flags.q.bound_image = true;
+
+ if (q.flags.q.derivative_group) {
+ this->flags.q.derivative_group = true;
+ this->derivative_group = q.derivative_group;
+ }
+
+ this->flags.i |= q.flags.i;
+
+ if (this->flags.q.in &&
+ (this->flags.i & ~input_layout_mask.flags.i) != 0) {
+ _mesa_glsl_error(loc, state, "invalid input layout qualifier used");
+ return false;
+ }
+
+ if (q.flags.q.explicit_align)
+ this->align = q.align;
+
+ if (q.flags.q.explicit_location)
+ this->location = q.location;
+
+ if (q.flags.q.explicit_index)
+ this->index = q.index;
+
+ if (q.flags.q.explicit_component)
+ this->component = q.component;
+
+ if (q.flags.q.explicit_binding)
+ this->binding = q.binding;
+
+ if (q.flags.q.explicit_offset || q.flags.q.explicit_xfb_offset)
+ this->offset = q.offset;
+
+ if (q.precision != ast_precision_none)
+ this->precision = q.precision;
+
+ if (q.flags.q.explicit_image_format) {
+ this->image_format = q.image_format;
+ this->image_base_type = q.image_base_type;
+ }
+
+ if (q.flags.q.bindless_sampler ||
+ q.flags.q.bindless_image ||
+ q.flags.q.bound_sampler ||
+ q.flags.q.bound_image)
+ merge_bindless_qualifier(state);
+
+ if (state->EXT_gpu_shader4_enable &&
+ state->stage == MESA_SHADER_FRAGMENT &&
+ this->flags.q.varying && q.flags.q.out) {
+ this->flags.q.varying = 0;
+ this->flags.q.out = 1;
+ }
+
+ return r;
+}
+
+bool
+ast_type_qualifier::validate_out_qualifier(YYLTYPE *loc,
+ _mesa_glsl_parse_state *state)
+{
+ bool r = true;
+ ast_type_qualifier valid_out_mask;
+ valid_out_mask.flags.i = 0;
+
+ switch (state->stage) {
+ case MESA_SHADER_GEOMETRY:
+ if (this->flags.q.prim_type) {
+ /* Make sure this is a valid output primitive type. */
+ switch (this->prim_type) {
+ case GL_POINTS:
+ case GL_LINE_STRIP:
+ case GL_TRIANGLE_STRIP:
+ break;
+ default:
+ r = false;
+ _mesa_glsl_error(loc, state, "invalid geometry shader output "
+ "primitive type");
+ break;
+ }
+ }
+
+ valid_out_mask.flags.q.stream = 1;
+ valid_out_mask.flags.q.explicit_stream = 1;
+ valid_out_mask.flags.q.explicit_xfb_buffer = 1;
+ valid_out_mask.flags.q.xfb_buffer = 1;
+ valid_out_mask.flags.q.explicit_xfb_stride = 1;
+ valid_out_mask.flags.q.xfb_stride = 1;
+ valid_out_mask.flags.q.max_vertices = 1;
+ valid_out_mask.flags.q.prim_type = 1;
+ break;
+ case MESA_SHADER_TESS_CTRL:
+ valid_out_mask.flags.q.vertices = 1;
+ valid_out_mask.flags.q.explicit_xfb_buffer = 1;
+ valid_out_mask.flags.q.xfb_buffer = 1;
+ valid_out_mask.flags.q.explicit_xfb_stride = 1;
+ valid_out_mask.flags.q.xfb_stride = 1;
+ break;
+ case MESA_SHADER_TESS_EVAL:
+ case MESA_SHADER_VERTEX:
+ valid_out_mask.flags.q.explicit_xfb_buffer = 1;
+ valid_out_mask.flags.q.xfb_buffer = 1;
+ valid_out_mask.flags.q.explicit_xfb_stride = 1;
+ valid_out_mask.flags.q.xfb_stride = 1;
+ break;
+ case MESA_SHADER_FRAGMENT:
+ valid_out_mask.flags.q.blend_support = 1;
+ break;
+ default:
+ r = false;
+ _mesa_glsl_error(loc, state,
+ "out layout qualifiers only valid in "
+ "geometry, tessellation, vertex and fragment shaders");
+ }
+
+ /* Generate an error when invalid output layout qualifiers are used. */
+ if ((this->flags.i & ~valid_out_mask.flags.i) != 0) {
+ r = false;
+ _mesa_glsl_error(loc, state, "invalid output layout qualifiers used");
+ }
+
+ return r;
+}
+
+bool
+ast_type_qualifier::merge_into_out_qualifier(YYLTYPE *loc,
+ _mesa_glsl_parse_state *state,
+ ast_node* &node)
+{
+ const bool r = state->out_qualifier->merge_qualifier(loc, state,
+ *this, false);
+
+ switch (state->stage) {
+ case MESA_SHADER_GEOMETRY:
+ /* Allow future assignments of global out's stream id value */
+ state->out_qualifier->flags.q.explicit_stream = 0;
+ break;
+ case MESA_SHADER_TESS_CTRL:
+ node = new(state->linalloc) ast_tcs_output_layout(*loc);
+ break;
+ default:
+ break;
+ }
+
+ /* Allow future assignments of global out's */
+ state->out_qualifier->flags.q.explicit_xfb_buffer = 0;
+ state->out_qualifier->flags.q.explicit_xfb_stride = 0;
+
+ return r;
+}
+
+bool
+ast_type_qualifier::validate_in_qualifier(YYLTYPE *loc,
+ _mesa_glsl_parse_state *state)
+{
+ bool r = true;
+ ast_type_qualifier valid_in_mask;
+ valid_in_mask.flags.i = 0;
+
+ switch (state->stage) {
+ case MESA_SHADER_TESS_EVAL:
+ if (this->flags.q.prim_type) {
+ /* Make sure this is a valid input primitive type. */
+ switch (this->prim_type) {
+ case GL_TRIANGLES:
+ case GL_QUADS:
+ case GL_ISOLINES:
+ break;
+ default:
+ r = false;
+ _mesa_glsl_error(loc, state,
+ "invalid tessellation evaluation "
+ "shader input primitive type");
+ break;
+ }
+ }
+
+ valid_in_mask.flags.q.prim_type = 1;
+ valid_in_mask.flags.q.vertex_spacing = 1;
+ valid_in_mask.flags.q.ordering = 1;
+ valid_in_mask.flags.q.point_mode = 1;
+ break;
+ case MESA_SHADER_GEOMETRY:
+ if (this->flags.q.prim_type) {
+ /* Make sure this is a valid input primitive type. */
+ switch (this->prim_type) {
+ case GL_POINTS:
+ case GL_LINES:
+ case GL_LINES_ADJACENCY:
+ case GL_TRIANGLES:
+ case GL_TRIANGLES_ADJACENCY:
+ break;
+ default:
+ r = false;
+ _mesa_glsl_error(loc, state,
+ "invalid geometry shader input primitive type");
+ break;
+ }
+ }
+
+ valid_in_mask.flags.q.prim_type = 1;
+ valid_in_mask.flags.q.invocations = 1;
+ break;
+ case MESA_SHADER_FRAGMENT:
+ valid_in_mask.flags.q.early_fragment_tests = 1;
+ valid_in_mask.flags.q.inner_coverage = 1;
+ valid_in_mask.flags.q.post_depth_coverage = 1;
+ valid_in_mask.flags.q.pixel_interlock_ordered = 1;
+ valid_in_mask.flags.q.pixel_interlock_unordered = 1;
+ valid_in_mask.flags.q.sample_interlock_ordered = 1;
+ valid_in_mask.flags.q.sample_interlock_unordered = 1;
+ break;
+ case MESA_SHADER_COMPUTE:
+ valid_in_mask.flags.q.local_size = 7;
+ valid_in_mask.flags.q.local_size_variable = 1;
+ valid_in_mask.flags.q.derivative_group = 1;
+ break;
+ default:
+ r = false;
+ _mesa_glsl_error(loc, state,
+ "input layout qualifiers only valid in "
+ "geometry, tessellation, fragment and compute shaders");
+ break;
+ }
+
+ /* Generate an error when invalid input layout qualifiers are used. */
+ if ((this->flags.i & ~valid_in_mask.flags.i) != 0) {
+ r = false;
+ _mesa_glsl_error(loc, state, "invalid input layout qualifiers used");
+ }
+
+ /* The checks below are also performed when merging but we want to spit an
+ * error against the default global input qualifier as soon as we can, with
+ * the closest error location in the shader.
+ */
+ r &= validate_prim_type(loc, state, *state->in_qualifier, *this);
+ r &= validate_vertex_spacing(loc, state, *state->in_qualifier, *this);
+ r &= validate_ordering(loc, state, *state->in_qualifier, *this);
+ r &= validate_point_mode(*state->in_qualifier, *this);
+
+ return r;
+}
+
+bool
+ast_type_qualifier::merge_into_in_qualifier(YYLTYPE *loc,
+ _mesa_glsl_parse_state *state,
+ ast_node* &node)
+{
+ bool r = true;
+ void *lin_ctx = state->linalloc;
+
+ /* We create the gs_input_layout node before merging so, in the future, no
+ * more repeated nodes will be created as we will have the flag set.
+ */
+ if (state->stage == MESA_SHADER_GEOMETRY
+ && this->flags.q.prim_type && !state->in_qualifier->flags.q.prim_type) {
+ node = new(lin_ctx) ast_gs_input_layout(*loc, this->prim_type);
+ }
+
+ r = state->in_qualifier->merge_qualifier(loc, state, *this, false);
+
+ if (state->in_qualifier->flags.q.early_fragment_tests) {
+ state->fs_early_fragment_tests = true;
+ state->in_qualifier->flags.q.early_fragment_tests = false;
+ }
+
+ if (state->in_qualifier->flags.q.inner_coverage) {
+ state->fs_inner_coverage = true;
+ state->in_qualifier->flags.q.inner_coverage = false;
+ }
+
+ if (state->in_qualifier->flags.q.post_depth_coverage) {
+ state->fs_post_depth_coverage = true;
+ state->in_qualifier->flags.q.post_depth_coverage = false;
+ }
+
+ if (state->fs_inner_coverage && state->fs_post_depth_coverage) {
+ _mesa_glsl_error(loc, state,
+ "inner_coverage & post_depth_coverage layout qualifiers "
+ "are mutally exclusives");
+ r = false;
+ }
+
+ if (state->in_qualifier->flags.q.pixel_interlock_ordered) {
+ state->fs_pixel_interlock_ordered = true;
+ state->in_qualifier->flags.q.pixel_interlock_ordered = false;
+ }
+
+ if (state->in_qualifier->flags.q.pixel_interlock_unordered) {
+ state->fs_pixel_interlock_unordered = true;
+ state->in_qualifier->flags.q.pixel_interlock_unordered = false;
+ }
+
+ if (state->in_qualifier->flags.q.sample_interlock_ordered) {
+ state->fs_sample_interlock_ordered = true;
+ state->in_qualifier->flags.q.sample_interlock_ordered = false;
+ }
+
+ if (state->in_qualifier->flags.q.sample_interlock_unordered) {
+ state->fs_sample_interlock_unordered = true;
+ state->in_qualifier->flags.q.sample_interlock_unordered = false;
+ }
+
+ if (state->fs_pixel_interlock_ordered +
+ state->fs_pixel_interlock_unordered +
+ state->fs_sample_interlock_ordered +
+ state->fs_sample_interlock_unordered > 1) {
+ _mesa_glsl_error(loc, state,
+ "only one interlock mode can be used at any time.");
+ r = false;
+ }
+
+ if (state->in_qualifier->flags.q.derivative_group) {
+ if (state->cs_derivative_group != DERIVATIVE_GROUP_NONE) {
+ if (state->in_qualifier->derivative_group != DERIVATIVE_GROUP_NONE &&
+ state->cs_derivative_group != state->in_qualifier->derivative_group) {
+ _mesa_glsl_error(loc, state,
+ "conflicting derivative groups.");
+ r = false;
+ }
+ } else {
+ state->cs_derivative_group = state->in_qualifier->derivative_group;
+ }
+ }
+
+ /* We allow the creation of multiple cs_input_layout nodes. Coherence among
+ * all existing nodes is checked later, when the AST node is transformed
+ * into HIR.
+ */
+ if (state->in_qualifier->flags.q.local_size) {
+ node = new(lin_ctx) ast_cs_input_layout(*loc,
+ state->in_qualifier->local_size);
+ state->in_qualifier->flags.q.local_size = 0;
+ for (int i = 0; i < 3; i++)
+ state->in_qualifier->local_size[i] = NULL;
+ }
+
+ if (state->in_qualifier->flags.q.local_size_variable) {
+ state->cs_input_local_size_variable_specified = true;
+ state->in_qualifier->flags.q.local_size_variable = false;
+ }
+
+ return r;
+}
+
+bool
+ast_type_qualifier::push_to_global(YYLTYPE *loc,
+ _mesa_glsl_parse_state *state)
+{
+ if (this->flags.q.xfb_stride) {
+ this->flags.q.xfb_stride = 0;
+
+ unsigned buff_idx;
+ if (process_qualifier_constant(state, loc, "xfb_buffer",
+ this->xfb_buffer, &buff_idx)) {
+ if (state->out_qualifier->out_xfb_stride[buff_idx]) {
+ state->out_qualifier->out_xfb_stride[buff_idx]->merge_qualifier(
+ new(state->linalloc) ast_layout_expression(*loc,
+ this->xfb_stride));
+ } else {
+ state->out_qualifier->out_xfb_stride[buff_idx] =
+ new(state->linalloc) ast_layout_expression(*loc,
+ this->xfb_stride);
+ }
+ }
+ }
+
+ return true;
+}
+
+/**
+ * Check if the current type qualifier has any illegal flags.
+ *
+ * If so, print an error message, followed by a list of illegal flags.
+ *
+ * \param message The error message to print.
+ * \param allowed_flags A list of valid flags.
+ */
+bool
+ast_type_qualifier::validate_flags(YYLTYPE *loc,
+ _mesa_glsl_parse_state *state,
+ const ast_type_qualifier &allowed_flags,
+ const char *message, const char *name)
+{
+ ast_type_qualifier bad;
+ bad.flags.i = this->flags.i & ~allowed_flags.flags.i;
+ if (bad.flags.i == 0)
+ return true;
+
+ _mesa_glsl_error(loc, state,
+ "%s '%s':"
+ "%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s"
+ "%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s"
+ "%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n",
+ message, name,
+ bad.flags.q.invariant ? " invariant" : "",
+ bad.flags.q.precise ? " precise" : "",
+ bad.flags.q.constant ? " constant" : "",
+ bad.flags.q.attribute ? " attribute" : "",
+ bad.flags.q.varying ? " varying" : "",
+ bad.flags.q.in ? " in" : "",
+ bad.flags.q.out ? " out" : "",
+ bad.flags.q.centroid ? " centroid" : "",
+ bad.flags.q.sample ? " sample" : "",
+ bad.flags.q.patch ? " patch" : "",
+ bad.flags.q.uniform ? " uniform" : "",
+ bad.flags.q.buffer ? " buffer" : "",
+ bad.flags.q.shared_storage ? " shared_storage" : "",
+ bad.flags.q.smooth ? " smooth" : "",
+ bad.flags.q.flat ? " flat" : "",
+ bad.flags.q.noperspective ? " noperspective" : "",
+ bad.flags.q.origin_upper_left ? " origin_upper_left" : "",
+ bad.flags.q.pixel_center_integer ? " pixel_center_integer" : "",
+ bad.flags.q.explicit_align ? " align" : "",
+ bad.flags.q.explicit_component ? " component" : "",
+ bad.flags.q.explicit_location ? " location" : "",
+ bad.flags.q.explicit_index ? " index" : "",
+ bad.flags.q.explicit_binding ? " binding" : "",
+ bad.flags.q.explicit_offset ? " offset" : "",
+ bad.flags.q.depth_type ? " depth_type" : "",
+ bad.flags.q.std140 ? " std140" : "",
+ bad.flags.q.std430 ? " std430" : "",
+ bad.flags.q.shared ? " shared" : "",
+ bad.flags.q.packed ? " packed" : "",
+ bad.flags.q.column_major ? " column_major" : "",
+ bad.flags.q.row_major ? " row_major" : "",
+ bad.flags.q.prim_type ? " prim_type" : "",
+ bad.flags.q.max_vertices ? " max_vertices" : "",
+ bad.flags.q.local_size ? " local_size" : "",
+ bad.flags.q.local_size_variable ? " local_size_variable" : "",
+ bad.flags.q.early_fragment_tests ? " early_fragment_tests" : "",
+ bad.flags.q.explicit_image_format ? " image_format" : "",
+ bad.flags.q.coherent ? " coherent" : "",
+ bad.flags.q._volatile ? " _volatile" : "",
+ bad.flags.q.restrict_flag ? " restrict_flag" : "",
+ bad.flags.q.read_only ? " read_only" : "",
+ bad.flags.q.write_only ? " write_only" : "",
+ bad.flags.q.invocations ? " invocations" : "",
+ bad.flags.q.stream ? " stream" : "",
+ bad.flags.q.explicit_stream ? " stream" : "",
+ bad.flags.q.explicit_xfb_offset ? " xfb_offset" : "",
+ bad.flags.q.xfb_buffer ? " xfb_buffer" : "",
+ bad.flags.q.explicit_xfb_buffer ? " xfb_buffer" : "",
+ bad.flags.q.xfb_stride ? " xfb_stride" : "",
+ bad.flags.q.explicit_xfb_stride ? " xfb_stride" : "",
+ bad.flags.q.vertex_spacing ? " vertex_spacing" : "",
+ bad.flags.q.ordering ? " ordering" : "",
+ bad.flags.q.point_mode ? " point_mode" : "",
+ bad.flags.q.vertices ? " vertices" : "",
+ bad.flags.q.subroutine ? " subroutine" : "",
+ bad.flags.q.blend_support ? " blend_support" : "",
+ bad.flags.q.inner_coverage ? " inner_coverage" : "",
+ bad.flags.q.bindless_sampler ? " bindless_sampler" : "",
+ bad.flags.q.bindless_image ? " bindless_image" : "",
+ bad.flags.q.bound_sampler ? " bound_sampler" : "",
+ bad.flags.q.bound_image ? " bound_image" : "",
+ bad.flags.q.post_depth_coverage ? " post_depth_coverage" : "",
+ bad.flags.q.pixel_interlock_ordered ? " pixel_interlock_ordered" : "",
+ bad.flags.q.pixel_interlock_unordered ? " pixel_interlock_unordered": "",
+ bad.flags.q.sample_interlock_ordered ? " sample_interlock_ordered": "",
+ bad.flags.q.sample_interlock_unordered ? " sample_interlock_unordered": "",
+ bad.flags.q.non_coherent ? " noncoherent" : "");
+ return false;
+}
+
+bool
+ast_layout_expression::process_qualifier_constant(struct _mesa_glsl_parse_state *state,
+ const char *qual_indentifier,
+ unsigned *value,
+ bool can_be_zero)
+{
+ int min_value = 0;
+ bool first_pass = true;
+ *value = 0;
+
+ if (!can_be_zero)
+ min_value = 1;
+
+ for (exec_node *node = layout_const_expressions.get_head_raw();
+ !node->is_tail_sentinel(); node = node->next) {
+
+ exec_list dummy_instructions;
+ ast_node *const_expression = exec_node_data(ast_node, node, link);
+
+ ir_rvalue *const ir = const_expression->hir(&dummy_instructions, state);
+
+ ir_constant *const const_int =
+ ir->constant_expression_value(ralloc_parent(ir));
+
+ if (const_int == NULL || !const_int->type->is_integer_32()) {
+ YYLTYPE loc = const_expression->get_location();
+ _mesa_glsl_error(&loc, state, "%s must be an integral constant "
+ "expression", qual_indentifier);
+ return false;
+ }
+
+ if (const_int->value.i[0] < min_value) {
+ YYLTYPE loc = const_expression->get_location();
+ _mesa_glsl_error(&loc, state, "%s layout qualifier is invalid "
+ "(%d < %d)", qual_indentifier,
+ const_int->value.i[0], min_value);
+ return false;
+ }
+
+ if (!first_pass && *value != const_int->value.u[0]) {
+ YYLTYPE loc = const_expression->get_location();
+ _mesa_glsl_error(&loc, state, "%s layout qualifier does not "
+ "match previous declaration (%d vs %d)",
+ qual_indentifier, *value, const_int->value.i[0]);
+ return false;
+ } else {
+ first_pass = false;
+ *value = const_int->value.u[0];
+ }
+
+ /* If the location is const (and we've verified that
+ * it is) then no instructions should have been emitted
+ * when we converted it to HIR. If they were emitted,
+ * then either the location isn't const after all, or
+ * we are emitting unnecessary instructions.
+ */
+ assert(dummy_instructions.is_empty());
+ }
+
+ return true;
+}
+
+bool
+process_qualifier_constant(struct _mesa_glsl_parse_state *state,
+ YYLTYPE *loc,
+ const char *qual_indentifier,
+ ast_expression *const_expression,
+ unsigned *value)
+{
+ exec_list dummy_instructions;
+
+ if (const_expression == NULL) {
+ *value = 0;
+ return true;
+ }
+
+ ir_rvalue *const ir = const_expression->hir(&dummy_instructions, state);
+
+ ir_constant *const const_int =
+ ir->constant_expression_value(ralloc_parent(ir));
+ if (const_int == NULL || !const_int->type->is_integer_32()) {
+ _mesa_glsl_error(loc, state, "%s must be an integral constant "
+ "expression", qual_indentifier);
+ return false;
+ }
+
+ if (const_int->value.i[0] < 0) {
+ _mesa_glsl_error(loc, state, "%s layout qualifier is invalid (%d < 0)",
+ qual_indentifier, const_int->value.u[0]);
+ return false;
+ }
+
+ /* If the location is const (and we've verified that
+ * it is) then no instructions should have been emitted
+ * when we converted it to HIR. If they were emitted,
+ * then either the location isn't const after all, or
+ * we are emitting unnecessary instructions.
+ */
+ assert(dummy_instructions.is_empty());
+
+ *value = const_int->value.u[0];
+ return true;
+}
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/builtin_functions.cpp b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/builtin_functions.cpp
new file mode 100644
index 0000000000..3dafcf0c77
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/builtin_functions.cpp
@@ -0,0 +1,7677 @@
+/*
+ * Copyright © 2013 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+/**
+ * \file builtin_functions.cpp
+ *
+ * Support for GLSL built-in functions.
+ *
+ * This file is split into several main components:
+ *
+ * 1. Availability predicates
+ *
+ * A series of small functions that check whether the current shader
+ * supports the version/extensions required to expose a built-in.
+ *
+ * 2. Core builtin_builder class functionality
+ *
+ * 3. Lists of built-in functions
+ *
+ * The builtin_builder::create_builtins() function contains lists of all
+ * built-in function signatures, where they're available, what types they
+ * take, and so on.
+ *
+ * 4. Implementations of built-in function signatures
+ *
+ * A series of functions which create ir_function_signatures and emit IR
+ * via ir_builder to implement them.
+ *
+ * 5. External API
+ *
+ * A few functions the rest of the compiler can use to interact with the
+ * built-in function module. For example, searching for a built-in by
+ * name and parameters.
+ */
+
+
+/**
+ * Unfortunately, some versions of MinGW produce bad code if this file
+ * is compiled with -O2 or -O3. The resulting driver will crash in random
+ * places if the app uses GLSL.
+ * The work-around is to disable optimizations for just this file. Luckily,
+ * this code is basically just executed once.
+ *
+ * MinGW 4.6.3 (in Ubuntu 13.10) does not have this bug.
+ * MinGW 5.3.1 (in Ubuntu 16.04) definitely has this bug.
+ * MinGW 6.2.0 (in Ubuntu 16.10) definitely has this bug.
+ * MinGW x.y.z - don't know. Assume versions after 4.6.x are buggy
+ */
+
+#if defined(__MINGW32__) && ((__GNUC__ * 100) + __GNUC_MINOR >= 407)
+#warning "disabling optimizations for this file to work around compiler bug"
+#pragma GCC optimize("O1")
+#endif
+
+
+#include <stdarg.h>
+#include <stdio.h>
+#include "main/mtypes.h"
+#include "main/shaderobj.h"
+#include "ir_builder.h"
+#include "glsl_parser_extras.h"
+#include "program/prog_instruction.h"
+#include <math.h>
+#include "builtin_functions.h"
+#include "util/hash_table.h"
+
+#define M_PIf ((float) M_PI)
+#define M_PI_2f ((float) M_PI_2)
+#define M_PI_4f ((float) M_PI_4)
+
+using namespace ir_builder;
+
+/**
+ * Availability predicates:
+ * @{
+ */
+static bool
+always_available(const _mesa_glsl_parse_state *)
+{
+ return true;
+}
+
+static bool
+compatibility_vs_only(const _mesa_glsl_parse_state *state)
+{
+ return state->stage == MESA_SHADER_VERTEX &&
+ (state->compat_shader || state->ARB_compatibility_enable) &&
+ !state->es_shader;
+}
+
+static bool
+derivatives_only(const _mesa_glsl_parse_state *state)
+{
+ return state->stage == MESA_SHADER_FRAGMENT ||
+ (state->stage == MESA_SHADER_COMPUTE &&
+ state->NV_compute_shader_derivatives_enable);
+}
+
+static bool
+gs_only(const _mesa_glsl_parse_state *state)
+{
+ return state->stage == MESA_SHADER_GEOMETRY;
+}
+
+static bool
+v110(const _mesa_glsl_parse_state *state)
+{
+ return !state->es_shader;
+}
+
+static bool
+v110_derivatives_only(const _mesa_glsl_parse_state *state)
+{
+ return !state->es_shader &&
+ derivatives_only(state);
+}
+
+static bool
+v120(const _mesa_glsl_parse_state *state)
+{
+ return state->is_version(120, 300);
+}
+
+static bool
+v130(const _mesa_glsl_parse_state *state)
+{
+ return state->is_version(130, 300);
+}
+
+static bool
+v130_desktop(const _mesa_glsl_parse_state *state)
+{
+ return state->is_version(130, 0);
+}
+
+static bool
+v460_desktop(const _mesa_glsl_parse_state *state)
+{
+ return state->is_version(460, 0);
+}
+
+static bool
+v130_derivatives_only(const _mesa_glsl_parse_state *state)
+{
+ return state->is_version(130, 300) &&
+ derivatives_only(state);
+}
+
+static bool
+v140_or_es3(const _mesa_glsl_parse_state *state)
+{
+ return state->is_version(140, 300);
+}
+
+static bool
+v400_derivatives_only(const _mesa_glsl_parse_state *state)
+{
+ return state->is_version(400, 0) &&
+ derivatives_only(state);
+}
+
+static bool
+texture_rectangle(const _mesa_glsl_parse_state *state)
+{
+ return state->ARB_texture_rectangle_enable;
+}
+
+static bool
+texture_external(const _mesa_glsl_parse_state *state)
+{
+ return state->OES_EGL_image_external_enable;
+}
+
+static bool
+texture_external_es3(const _mesa_glsl_parse_state *state)
+{
+ return state->OES_EGL_image_external_essl3_enable &&
+ state->es_shader &&
+ state->is_version(0, 300);
+}
+
+/** True if texturing functions with explicit LOD are allowed. */
+static bool
+lod_exists_in_stage(const _mesa_glsl_parse_state *state)
+{
+ /* Texturing functions with "Lod" in their name exist:
+ * - In the vertex shader stage (for all languages)
+ * - In any stage for GLSL 1.30+ or GLSL ES 3.00
+ * - In any stage for desktop GLSL with ARB_shader_texture_lod enabled.
+ *
+ * Since ARB_shader_texture_lod can only be enabled on desktop GLSL, we
+ * don't need to explicitly check state->es_shader.
+ */
+ return state->stage == MESA_SHADER_VERTEX ||
+ state->is_version(130, 300) ||
+ state->ARB_shader_texture_lod_enable ||
+ state->EXT_gpu_shader4_enable;
+}
+
+static bool
+v110_lod(const _mesa_glsl_parse_state *state)
+{
+ return !state->es_shader && lod_exists_in_stage(state);
+}
+
+static bool
+texture_buffer(const _mesa_glsl_parse_state *state)
+{
+ return state->is_version(140, 320) ||
+ state->EXT_texture_buffer_enable ||
+ state->OES_texture_buffer_enable;
+}
+
+static bool
+shader_texture_lod(const _mesa_glsl_parse_state *state)
+{
+ return state->ARB_shader_texture_lod_enable;
+}
+
+static bool
+shader_texture_lod_and_rect(const _mesa_glsl_parse_state *state)
+{
+ return state->ARB_shader_texture_lod_enable &&
+ state->ARB_texture_rectangle_enable;
+}
+
+static bool
+shader_bit_encoding(const _mesa_glsl_parse_state *state)
+{
+ return state->is_version(330, 300) ||
+ state->ARB_shader_bit_encoding_enable ||
+ state->ARB_gpu_shader5_enable;
+}
+
+static bool
+shader_integer_mix(const _mesa_glsl_parse_state *state)
+{
+ return state->is_version(450, 310) ||
+ state->ARB_ES3_1_compatibility_enable ||
+ (v130(state) && state->EXT_shader_integer_mix_enable);
+}
+
+static bool
+shader_packing_or_es3(const _mesa_glsl_parse_state *state)
+{
+ return state->ARB_shading_language_packing_enable ||
+ state->is_version(420, 300);
+}
+
+static bool
+shader_packing_or_es3_or_gpu_shader5(const _mesa_glsl_parse_state *state)
+{
+ return state->ARB_shading_language_packing_enable ||
+ state->ARB_gpu_shader5_enable ||
+ state->is_version(400, 300);
+}
+
+static bool
+gpu_shader4(const _mesa_glsl_parse_state *state)
+{
+ return state->EXT_gpu_shader4_enable;
+}
+
+static bool
+gpu_shader4_integer(const _mesa_glsl_parse_state *state)
+{
+ return state->EXT_gpu_shader4_enable &&
+ state->ctx->Extensions.EXT_texture_integer;
+}
+
+static bool
+gpu_shader4_array(const _mesa_glsl_parse_state *state)
+{
+ return state->EXT_gpu_shader4_enable &&
+ state->ctx->Extensions.EXT_texture_array;
+}
+
+static bool
+gpu_shader4_array_integer(const _mesa_glsl_parse_state *state)
+{
+ return gpu_shader4_array(state) &&
+ state->ctx->Extensions.EXT_texture_integer;
+}
+
+static bool
+gpu_shader4_rect(const _mesa_glsl_parse_state *state)
+{
+ return state->EXT_gpu_shader4_enable &&
+ state->ctx->Extensions.NV_texture_rectangle;
+}
+
+static bool
+gpu_shader4_rect_integer(const _mesa_glsl_parse_state *state)
+{
+ return gpu_shader4_rect(state) &&
+ state->ctx->Extensions.EXT_texture_integer;
+}
+
+static bool
+gpu_shader4_tbo(const _mesa_glsl_parse_state *state)
+{
+ return state->EXT_gpu_shader4_enable &&
+ state->ctx->Extensions.EXT_texture_buffer_object;
+}
+
+static bool
+gpu_shader4_tbo_integer(const _mesa_glsl_parse_state *state)
+{
+ return gpu_shader4_tbo(state) &&
+ state->ctx->Extensions.EXT_texture_integer;
+}
+
+static bool
+gpu_shader4_derivs_only(const _mesa_glsl_parse_state *state)
+{
+ return state->EXT_gpu_shader4_enable &&
+ derivatives_only(state);
+}
+
+static bool
+gpu_shader4_integer_derivs_only(const _mesa_glsl_parse_state *state)
+{
+ return gpu_shader4_derivs_only(state) &&
+ state->ctx->Extensions.EXT_texture_integer;
+}
+
+static bool
+gpu_shader4_array_derivs_only(const _mesa_glsl_parse_state *state)
+{
+ return gpu_shader4_derivs_only(state) &&
+ state->ctx->Extensions.EXT_texture_array;
+}
+
+static bool
+gpu_shader4_array_integer_derivs_only(const _mesa_glsl_parse_state *state)
+{
+ return gpu_shader4_array_derivs_only(state) &&
+ state->ctx->Extensions.EXT_texture_integer;
+}
+
+static bool
+v130_or_gpu_shader4(const _mesa_glsl_parse_state *state)
+{
+ return state->is_version(130, 300) || state->EXT_gpu_shader4_enable;
+}
+
+static bool
+v130_or_gpu_shader4_and_tex_shadow_lod(const _mesa_glsl_parse_state *state)
+{
+ return v130_or_gpu_shader4(state) &&
+ state->EXT_texture_shadow_lod_enable;
+}
+
+static bool
+gpu_shader5(const _mesa_glsl_parse_state *state)
+{
+ return state->is_version(400, 0) || state->ARB_gpu_shader5_enable;
+}
+
+static bool
+gpu_shader5_es(const _mesa_glsl_parse_state *state)
+{
+ return state->is_version(400, 320) ||
+ state->ARB_gpu_shader5_enable ||
+ state->EXT_gpu_shader5_enable ||
+ state->OES_gpu_shader5_enable;
+}
+
+static bool
+gpu_shader5_or_OES_texture_cube_map_array(const _mesa_glsl_parse_state *state)
+{
+ return state->is_version(400, 320) ||
+ state->ARB_gpu_shader5_enable ||
+ state->EXT_texture_cube_map_array_enable ||
+ state->OES_texture_cube_map_array_enable;
+}
+
+static bool
+es31_not_gs5(const _mesa_glsl_parse_state *state)
+{
+ return state->is_version(0, 310) && !gpu_shader5_es(state);
+}
+
+static bool
+gpu_shader5_or_es31(const _mesa_glsl_parse_state *state)
+{
+ return state->is_version(400, 310) || state->ARB_gpu_shader5_enable;
+}
+
+static bool
+shader_packing_or_es31_or_gpu_shader5(const _mesa_glsl_parse_state *state)
+{
+ return state->ARB_shading_language_packing_enable ||
+ state->ARB_gpu_shader5_enable ||
+ state->is_version(400, 310);
+}
+
+static bool
+gpu_shader5_or_es31_or_integer_functions(const _mesa_glsl_parse_state *state)
+{
+ return gpu_shader5_or_es31(state) ||
+ state->MESA_shader_integer_functions_enable;
+}
+
+static bool
+fs_interpolate_at(const _mesa_glsl_parse_state *state)
+{
+ return state->stage == MESA_SHADER_FRAGMENT &&
+ (state->is_version(400, 320) ||
+ state->ARB_gpu_shader5_enable ||
+ state->OES_shader_multisample_interpolation_enable);
+}
+
+
+static bool
+texture_array_lod(const _mesa_glsl_parse_state *state)
+{
+ return lod_exists_in_stage(state) &&
+ (state->EXT_texture_array_enable ||
+ (state->EXT_gpu_shader4_enable &&
+ state->ctx->Extensions.EXT_texture_array));
+}
+
+static bool
+texture_array(const _mesa_glsl_parse_state *state)
+{
+ return state->EXT_texture_array_enable ||
+ (state->EXT_gpu_shader4_enable &&
+ state->ctx->Extensions.EXT_texture_array);
+}
+
+static bool
+texture_array_derivs_only(const _mesa_glsl_parse_state *state)
+{
+ return derivatives_only(state) &&
+ texture_array(state);
+}
+
+static bool
+texture_multisample(const _mesa_glsl_parse_state *state)
+{
+ return state->is_version(150, 310) ||
+ state->ARB_texture_multisample_enable;
+}
+
+static bool
+texture_multisample_array(const _mesa_glsl_parse_state *state)
+{
+ return state->is_version(150, 320) ||
+ state->ARB_texture_multisample_enable ||
+ state->OES_texture_storage_multisample_2d_array_enable;
+}
+
+static bool
+texture_samples_identical(const _mesa_glsl_parse_state *state)
+{
+ return texture_multisample(state) &&
+ state->EXT_shader_samples_identical_enable;
+}
+
+static bool
+texture_samples_identical_array(const _mesa_glsl_parse_state *state)
+{
+ return texture_multisample_array(state) &&
+ state->EXT_shader_samples_identical_enable;
+}
+
+static bool
+derivatives_texture_cube_map_array(const _mesa_glsl_parse_state *state)
+{
+ return state->has_texture_cube_map_array() &&
+ derivatives_only(state);
+}
+
+static bool
+texture_cube_map_array(const _mesa_glsl_parse_state *state)
+{
+ return state->has_texture_cube_map_array();
+}
+
+static bool
+v130_or_gpu_shader4_and_tex_cube_map_array(const _mesa_glsl_parse_state *state)
+{
+ return texture_cube_map_array(state) &&
+ v130_or_gpu_shader4(state) &&
+ state->EXT_texture_shadow_lod_enable;
+}
+
+static bool
+texture_query_levels(const _mesa_glsl_parse_state *state)
+{
+ return state->is_version(430, 0) ||
+ state->ARB_texture_query_levels_enable;
+}
+
+static bool
+texture_query_lod(const _mesa_glsl_parse_state *state)
+{
+ return derivatives_only(state) &&
+ (state->ARB_texture_query_lod_enable ||
+ state->EXT_texture_query_lod_enable);
+}
+
+static bool
+texture_gather_cube_map_array(const _mesa_glsl_parse_state *state)
+{
+ return state->is_version(400, 320) ||
+ state->ARB_texture_gather_enable ||
+ state->ARB_gpu_shader5_enable ||
+ state->EXT_texture_cube_map_array_enable ||
+ state->OES_texture_cube_map_array_enable;
+}
+
+static bool
+texture_texture4(const _mesa_glsl_parse_state *state)
+{
+ return state->AMD_texture_texture4_enable;
+}
+
+static bool
+texture_gather_or_es31(const _mesa_glsl_parse_state *state)
+{
+ return state->is_version(400, 310) ||
+ state->ARB_texture_gather_enable ||
+ state->ARB_gpu_shader5_enable;
+}
+
+/* Only ARB_texture_gather but not GLSL 4.0 or ARB_gpu_shader5.
+ * used for relaxation of const offset requirements.
+ */
+static bool
+texture_gather_only_or_es31(const _mesa_glsl_parse_state *state)
+{
+ return !state->is_version(400, 320) &&
+ !state->ARB_gpu_shader5_enable &&
+ !state->EXT_gpu_shader5_enable &&
+ !state->OES_gpu_shader5_enable &&
+ (state->ARB_texture_gather_enable ||
+ state->is_version(0, 310));
+}
+
+/* Desktop GL or OES_standard_derivatives */
+static bool
+derivatives(const _mesa_glsl_parse_state *state)
+{
+ return derivatives_only(state) &&
+ (state->is_version(110, 300) ||
+ state->OES_standard_derivatives_enable ||
+ state->ctx->Const.AllowGLSLRelaxedES);
+}
+
+static bool
+derivative_control(const _mesa_glsl_parse_state *state)
+{
+ return derivatives_only(state) &&
+ (state->is_version(450, 0) ||
+ state->ARB_derivative_control_enable);
+}
+
+static bool
+tex1d_lod(const _mesa_glsl_parse_state *state)
+{
+ return !state->es_shader && lod_exists_in_stage(state);
+}
+
+/** True if sampler3D exists */
+static bool
+tex3d(const _mesa_glsl_parse_state *state)
+{
+ /* sampler3D exists in all desktop GLSL versions, GLSL ES 1.00 with the
+ * OES_texture_3D extension, and in GLSL ES 3.00.
+ */
+ return !state->es_shader ||
+ state->OES_texture_3D_enable ||
+ state->language_version >= 300;
+}
+
+static bool
+derivatives_tex3d(const _mesa_glsl_parse_state *state)
+{
+ return (!state->es_shader || state->OES_texture_3D_enable) &&
+ derivatives_only(state);
+}
+
+static bool
+tex3d_lod(const _mesa_glsl_parse_state *state)
+{
+ return tex3d(state) && lod_exists_in_stage(state);
+}
+
+static bool
+shader_atomic_counters(const _mesa_glsl_parse_state *state)
+{
+ return state->has_atomic_counters();
+}
+
+static bool
+shader_atomic_counter_ops(const _mesa_glsl_parse_state *state)
+{
+ return state->ARB_shader_atomic_counter_ops_enable;
+}
+
+static bool
+shader_atomic_counter_ops_or_v460_desktop(const _mesa_glsl_parse_state *state)
+{
+ return state->ARB_shader_atomic_counter_ops_enable || v460_desktop(state);
+}
+
+static bool
+shader_ballot(const _mesa_glsl_parse_state *state)
+{
+ return state->ARB_shader_ballot_enable;
+}
+
+static bool
+supports_arb_fragment_shader_interlock(const _mesa_glsl_parse_state *state)
+{
+ return state->ARB_fragment_shader_interlock_enable;
+}
+
+static bool
+supports_nv_fragment_shader_interlock(const _mesa_glsl_parse_state *state)
+{
+ return state->NV_fragment_shader_interlock_enable;
+}
+
+static bool
+shader_clock(const _mesa_glsl_parse_state *state)
+{
+ return state->ARB_shader_clock_enable;
+}
+
+static bool
+shader_clock_int64(const _mesa_glsl_parse_state *state)
+{
+ return state->ARB_shader_clock_enable &&
+ (state->ARB_gpu_shader_int64_enable ||
+ state->AMD_gpu_shader_int64_enable);
+}
+
+static bool
+shader_storage_buffer_object(const _mesa_glsl_parse_state *state)
+{
+ return state->has_shader_storage_buffer_objects();
+}
+
+static bool
+shader_trinary_minmax(const _mesa_glsl_parse_state *state)
+{
+ return state->AMD_shader_trinary_minmax_enable;
+}
+
+static bool
+shader_image_load_store(const _mesa_glsl_parse_state *state)
+{
+ return (state->is_version(420, 310) ||
+ state->ARB_shader_image_load_store_enable ||
+ state->EXT_shader_image_load_store_enable);
+}
+
+static bool
+shader_image_load_store_ext(const _mesa_glsl_parse_state *state)
+{
+ return state->EXT_shader_image_load_store_enable;
+}
+
+static bool
+shader_image_atomic(const _mesa_glsl_parse_state *state)
+{
+ return (state->is_version(420, 320) ||
+ state->ARB_shader_image_load_store_enable ||
+ state->EXT_shader_image_load_store_enable ||
+ state->OES_shader_image_atomic_enable);
+}
+
+static bool
+shader_image_atomic_exchange_float(const _mesa_glsl_parse_state *state)
+{
+ return (state->is_version(450, 320) ||
+ state->ARB_ES3_1_compatibility_enable ||
+ state->OES_shader_image_atomic_enable ||
+ state->NV_shader_atomic_float_enable);
+}
+
+static bool
+shader_image_atomic_add_float(const _mesa_glsl_parse_state *state)
+{
+ return state->NV_shader_atomic_float_enable;
+}
+
+static bool
+shader_image_size(const _mesa_glsl_parse_state *state)
+{
+ return state->is_version(430, 310) ||
+ state->ARB_shader_image_size_enable;
+}
+
+static bool
+shader_samples(const _mesa_glsl_parse_state *state)
+{
+ return state->is_version(450, 0) ||
+ state->ARB_shader_texture_image_samples_enable;
+}
+
+static bool
+gs_streams(const _mesa_glsl_parse_state *state)
+{
+ return gpu_shader5(state) && gs_only(state);
+}
+
+static bool
+fp64(const _mesa_glsl_parse_state *state)
+{
+ return state->has_double();
+}
+
+static bool
+int64(const _mesa_glsl_parse_state *state)
+{
+ return state->has_int64();
+}
+
+static bool
+int64_fp64(const _mesa_glsl_parse_state *state)
+{
+ return state->has_int64() && state->has_double();
+}
+
+static bool
+compute_shader(const _mesa_glsl_parse_state *state)
+{
+ return state->stage == MESA_SHADER_COMPUTE;
+}
+
+static bool
+compute_shader_supported(const _mesa_glsl_parse_state *state)
+{
+ return state->has_compute_shader();
+}
+
+static bool
+buffer_atomics_supported(const _mesa_glsl_parse_state *state)
+{
+ return compute_shader(state) || shader_storage_buffer_object(state);
+}
+
+static bool
+barrier_supported(const _mesa_glsl_parse_state *state)
+{
+ return compute_shader(state) ||
+ state->stage == MESA_SHADER_TESS_CTRL;
+}
+
+static bool
+vote(const _mesa_glsl_parse_state *state)
+{
+ return state->ARB_shader_group_vote_enable;
+}
+
+static bool
+vote_or_v460_desktop(const _mesa_glsl_parse_state *state)
+{
+ return state->ARB_shader_group_vote_enable || v460_desktop(state);
+}
+
+static bool
+integer_functions_supported(const _mesa_glsl_parse_state *state)
+{
+ return state->extensions->MESA_shader_integer_functions;
+}
+
+static bool
+NV_shader_atomic_float_supported(const _mesa_glsl_parse_state *state)
+{
+ return state->extensions->NV_shader_atomic_float;
+}
+
+static bool
+shader_atomic_float_add(const _mesa_glsl_parse_state *state)
+{
+ return state->NV_shader_atomic_float_enable;
+}
+
+static bool
+shader_atomic_float_exchange(const _mesa_glsl_parse_state *state)
+{
+ return state->NV_shader_atomic_float_enable ||
+ state->INTEL_shader_atomic_float_minmax_enable;
+}
+
+static bool
+INTEL_shader_atomic_float_minmax_supported(const _mesa_glsl_parse_state *state)
+{
+ return state->extensions->INTEL_shader_atomic_float_minmax;
+}
+
+static bool
+shader_atomic_float_minmax(const _mesa_glsl_parse_state *state)
+{
+ return state->INTEL_shader_atomic_float_minmax_enable;
+}
+
+static bool
+demote_to_helper_invocation(const _mesa_glsl_parse_state *state)
+{
+ return state->EXT_demote_to_helper_invocation_enable;
+}
+
+static bool
+shader_integer_functions2(const _mesa_glsl_parse_state *state)
+{
+ return state->INTEL_shader_integer_functions2_enable;
+}
+
+static bool
+shader_integer_functions2_int64(const _mesa_glsl_parse_state *state)
+{
+ return state->INTEL_shader_integer_functions2_enable && state->has_int64();
+}
+
+static bool
+is_nir(const _mesa_glsl_parse_state *state)
+{
+ return state->ctx->Const.ShaderCompilerOptions[state->stage].NirOptions;
+}
+
+static bool
+is_not_nir(const _mesa_glsl_parse_state *state)
+{
+ return !is_nir(state);
+}
+
+/** @} */
+
+/******************************************************************************/
+
+namespace {
+
+/**
+ * builtin_builder: A singleton object representing the core of the built-in
+ * function module.
+ *
+ * It generates IR for every built-in function signature, and organizes them
+ * into functions.
+ */
+class builtin_builder {
+public:
+ builtin_builder();
+ ~builtin_builder();
+
+ void initialize();
+ void release();
+ ir_function_signature *find(_mesa_glsl_parse_state *state,
+ const char *name, exec_list *actual_parameters);
+
+ /**
+ * A shader to hold all the built-in signatures; created by this module.
+ *
+ * This includes signatures for every built-in, regardless of version or
+ * enabled extensions. The availability predicate associated with each
+ * signature allows matching_signature() to filter out the irrelevant ones.
+ */
+ gl_shader *shader;
+
+private:
+ void *mem_ctx;
+
+ void create_shader();
+ void create_intrinsics();
+ void create_builtins();
+
+ /**
+ * IR builder helpers:
+ *
+ * These convenience functions assist in emitting IR, but don't necessarily
+ * fit in ir_builder itself. Many of them rely on having a mem_ctx class
+ * member available.
+ */
+ ir_variable *in_var(const glsl_type *type, const char *name);
+ ir_variable *out_var(const glsl_type *type, const char *name);
+ ir_constant *imm(float f, unsigned vector_elements=1);
+ ir_constant *imm(bool b, unsigned vector_elements=1);
+ ir_constant *imm(int i, unsigned vector_elements=1);
+ ir_constant *imm(unsigned u, unsigned vector_elements=1);
+ ir_constant *imm(double d, unsigned vector_elements=1);
+ ir_constant *imm(const glsl_type *type, const ir_constant_data &);
+ ir_dereference_variable *var_ref(ir_variable *var);
+ ir_dereference_array *array_ref(ir_variable *var, int i);
+ ir_swizzle *matrix_elt(ir_variable *var, int col, int row);
+
+ ir_expression *asin_expr(ir_variable *x, float p0, float p1);
+ void do_atan(ir_factory &body, const glsl_type *type, ir_variable *res, operand y_over_x);
+
+ /**
+ * Call function \param f with parameters specified as the linked
+ * list \param params of \c ir_variable objects. \param ret should
+ * point to the ir_variable that will hold the function return
+ * value, or be \c NULL if the function has void return type.
+ */
+ ir_call *call(ir_function *f, ir_variable *ret, exec_list params);
+
+ /** Create a new function and add the given signatures. */
+ void add_function(const char *name, ...);
+
+ typedef ir_function_signature *(builtin_builder::*image_prototype_ctr)(const glsl_type *image_type,
+ unsigned num_arguments,
+ unsigned flags);
+
+ /**
+ * Create a new image built-in function for all known image types.
+ * \p flags is a bitfield of \c image_function_flags flags.
+ */
+ void add_image_function(const char *name,
+ const char *intrinsic_name,
+ image_prototype_ctr prototype,
+ unsigned num_arguments,
+ unsigned flags,
+ enum ir_intrinsic_id id);
+
+ /**
+ * Create new functions for all known image built-ins and types.
+ * If \p glsl is \c true, use the GLSL built-in names and emit code
+ * to call into the actual compiler intrinsic. If \p glsl is
+ * false, emit a function prototype with no body for each image
+ * intrinsic name.
+ */
+ void add_image_functions(bool glsl);
+
+ ir_function_signature *new_sig(const glsl_type *return_type,
+ builtin_available_predicate avail,
+ int num_params, ...);
+
+ /**
+ * Function signature generators:
+ * @{
+ */
+ ir_function_signature *unop(builtin_available_predicate avail,
+ ir_expression_operation opcode,
+ const glsl_type *return_type,
+ const glsl_type *param_type);
+ ir_function_signature *binop(builtin_available_predicate avail,
+ ir_expression_operation opcode,
+ const glsl_type *return_type,
+ const glsl_type *param0_type,
+ const glsl_type *param1_type,
+ bool swap_operands = false);
+
+#define B0(X) ir_function_signature *_##X();
+#define B1(X) ir_function_signature *_##X(const glsl_type *);
+#define B2(X) ir_function_signature *_##X(const glsl_type *, const glsl_type *);
+#define B3(X) ir_function_signature *_##X(const glsl_type *, const glsl_type *, const glsl_type *);
+#define BA1(X) ir_function_signature *_##X(builtin_available_predicate, const glsl_type *);
+#define BA2(X) ir_function_signature *_##X(builtin_available_predicate, const glsl_type *, const glsl_type *);
+ B1(radians)
+ B1(degrees)
+ B1(sin)
+ B1(cos)
+ B1(tan)
+ B1(asin)
+ B1(acos)
+ B1(atan2)
+ B1(atan)
+ B1(atan2_op)
+ B1(atan_op)
+ B1(sinh)
+ B1(cosh)
+ B1(tanh)
+ B1(asinh)
+ B1(acosh)
+ B1(atanh)
+ B1(pow)
+ B1(exp)
+ B1(log)
+ B1(exp2)
+ B1(log2)
+ BA1(sqrt)
+ BA1(inversesqrt)
+ BA1(abs)
+ BA1(sign)
+ BA1(floor)
+ BA1(truncate)
+ BA1(trunc)
+ BA1(round)
+ BA1(roundEven)
+ BA1(ceil)
+ BA1(fract)
+ BA2(mod)
+ BA1(modf)
+ BA2(min)
+ BA2(max)
+ BA2(clamp)
+ BA2(mix_lrp)
+ ir_function_signature *_mix_sel(builtin_available_predicate avail,
+ const glsl_type *val_type,
+ const glsl_type *blend_type);
+ BA2(step)
+ BA2(smoothstep)
+ BA1(isnan)
+ BA1(isinf)
+ B1(floatBitsToInt)
+ B1(floatBitsToUint)
+ B1(intBitsToFloat)
+ B1(uintBitsToFloat)
+
+ BA1(doubleBitsToInt64)
+ BA1(doubleBitsToUint64)
+ BA1(int64BitsToDouble)
+ BA1(uint64BitsToDouble)
+
+ ir_function_signature *_packUnorm2x16(builtin_available_predicate avail);
+ ir_function_signature *_packSnorm2x16(builtin_available_predicate avail);
+ ir_function_signature *_packUnorm4x8(builtin_available_predicate avail);
+ ir_function_signature *_packSnorm4x8(builtin_available_predicate avail);
+ ir_function_signature *_unpackUnorm2x16(builtin_available_predicate avail);
+ ir_function_signature *_unpackSnorm2x16(builtin_available_predicate avail);
+ ir_function_signature *_unpackUnorm4x8(builtin_available_predicate avail);
+ ir_function_signature *_unpackSnorm4x8(builtin_available_predicate avail);
+ ir_function_signature *_packHalf2x16(builtin_available_predicate avail);
+ ir_function_signature *_unpackHalf2x16(builtin_available_predicate avail);
+ ir_function_signature *_packDouble2x32(builtin_available_predicate avail);
+ ir_function_signature *_unpackDouble2x32(builtin_available_predicate avail);
+ ir_function_signature *_packInt2x32(builtin_available_predicate avail);
+ ir_function_signature *_unpackInt2x32(builtin_available_predicate avail);
+ ir_function_signature *_packUint2x32(builtin_available_predicate avail);
+ ir_function_signature *_unpackUint2x32(builtin_available_predicate avail);
+
+ BA1(length)
+ BA1(distance);
+ BA1(dot);
+ BA1(cross);
+ BA1(normalize);
+ B0(ftransform);
+ BA1(faceforward);
+ BA1(reflect);
+ BA1(refract);
+ BA1(matrixCompMult);
+ BA1(outerProduct);
+ BA1(determinant_mat2);
+ BA1(determinant_mat3);
+ BA1(determinant_mat4);
+ BA1(inverse_mat2);
+ BA1(inverse_mat3);
+ BA1(inverse_mat4);
+ BA1(transpose);
+ BA1(lessThan);
+ BA1(lessThanEqual);
+ BA1(greaterThan);
+ BA1(greaterThanEqual);
+ BA1(equal);
+ BA1(notEqual);
+ B1(any);
+ B1(all);
+ B1(not);
+ BA2(textureSize);
+ BA1(textureSamples);
+
+/** Flags to _texture() */
+#define TEX_PROJECT 1
+#define TEX_OFFSET 2
+#define TEX_COMPONENT 4
+#define TEX_OFFSET_NONCONST 8
+#define TEX_OFFSET_ARRAY 16
+
+ ir_function_signature *_texture(ir_texture_opcode opcode,
+ builtin_available_predicate avail,
+ const glsl_type *return_type,
+ const glsl_type *sampler_type,
+ const glsl_type *coord_type,
+ int flags = 0);
+ ir_function_signature *_textureCubeArrayShadow(ir_texture_opcode opcode,
+ builtin_available_predicate avail,
+ const glsl_type *x);
+ ir_function_signature *_texelFetch(builtin_available_predicate avail,
+ const glsl_type *return_type,
+ const glsl_type *sampler_type,
+ const glsl_type *coord_type,
+ const glsl_type *offset_type = NULL);
+
+ B0(EmitVertex)
+ B0(EndPrimitive)
+ ir_function_signature *_EmitStreamVertex(builtin_available_predicate avail,
+ const glsl_type *stream_type);
+ ir_function_signature *_EndStreamPrimitive(builtin_available_predicate avail,
+ const glsl_type *stream_type);
+ B0(barrier)
+
+ BA2(textureQueryLod);
+ BA1(textureQueryLevels);
+ BA2(textureSamplesIdentical);
+ B1(dFdx);
+ B1(dFdy);
+ B1(fwidth);
+ B1(dFdxCoarse);
+ B1(dFdyCoarse);
+ B1(fwidthCoarse);
+ B1(dFdxFine);
+ B1(dFdyFine);
+ B1(fwidthFine);
+ B1(noise1);
+ B1(noise2);
+ B1(noise3);
+ B1(noise4);
+
+ B1(bitfieldExtract)
+ B1(bitfieldInsert)
+ B1(bitfieldReverse)
+ B1(bitCount)
+ B1(findLSB)
+ B1(findMSB)
+ BA1(countLeadingZeros)
+ BA1(countTrailingZeros)
+ BA1(fma)
+ B2(ldexp)
+ B2(frexp)
+ B2(dfrexp)
+ B1(uaddCarry)
+ B1(usubBorrow)
+ BA1(addSaturate)
+ BA1(subtractSaturate)
+ BA1(absoluteDifference)
+ BA1(average)
+ BA1(averageRounded)
+ B1(mulExtended)
+ BA1(multiply32x16)
+ B1(interpolateAtCentroid)
+ B1(interpolateAtOffset)
+ B1(interpolateAtSample)
+
+ ir_function_signature *_atomic_counter_intrinsic(builtin_available_predicate avail,
+ enum ir_intrinsic_id id);
+ ir_function_signature *_atomic_counter_intrinsic1(builtin_available_predicate avail,
+ enum ir_intrinsic_id id);
+ ir_function_signature *_atomic_counter_intrinsic2(builtin_available_predicate avail,
+ enum ir_intrinsic_id id);
+ ir_function_signature *_atomic_counter_op(const char *intrinsic,
+ builtin_available_predicate avail);
+ ir_function_signature *_atomic_counter_op1(const char *intrinsic,
+ builtin_available_predicate avail);
+ ir_function_signature *_atomic_counter_op2(const char *intrinsic,
+ builtin_available_predicate avail);
+
+ ir_function_signature *_atomic_intrinsic2(builtin_available_predicate avail,
+ const glsl_type *type,
+ enum ir_intrinsic_id id);
+ ir_function_signature *_atomic_op2(const char *intrinsic,
+ builtin_available_predicate avail,
+ const glsl_type *type);
+ ir_function_signature *_atomic_intrinsic3(builtin_available_predicate avail,
+ const glsl_type *type,
+ enum ir_intrinsic_id id);
+ ir_function_signature *_atomic_op3(const char *intrinsic,
+ builtin_available_predicate avail,
+ const glsl_type *type);
+
+ B1(min3)
+ B1(max3)
+ B1(mid3)
+
+ ir_function_signature *_image_prototype(const glsl_type *image_type,
+ unsigned num_arguments,
+ unsigned flags);
+ ir_function_signature *_image_size_prototype(const glsl_type *image_type,
+ unsigned num_arguments,
+ unsigned flags);
+ ir_function_signature *_image_samples_prototype(const glsl_type *image_type,
+ unsigned num_arguments,
+ unsigned flags);
+ ir_function_signature *_image(image_prototype_ctr prototype,
+ const glsl_type *image_type,
+ const char *intrinsic_name,
+ unsigned num_arguments,
+ unsigned flags,
+ enum ir_intrinsic_id id);
+
+ ir_function_signature *_memory_barrier_intrinsic(
+ builtin_available_predicate avail,
+ enum ir_intrinsic_id id);
+ ir_function_signature *_memory_barrier(const char *intrinsic_name,
+ builtin_available_predicate avail);
+
+ ir_function_signature *_ballot_intrinsic();
+ ir_function_signature *_ballot();
+ ir_function_signature *_read_first_invocation_intrinsic(const glsl_type *type);
+ ir_function_signature *_read_first_invocation(const glsl_type *type);
+ ir_function_signature *_read_invocation_intrinsic(const glsl_type *type);
+ ir_function_signature *_read_invocation(const glsl_type *type);
+
+
+ ir_function_signature *_invocation_interlock_intrinsic(
+ builtin_available_predicate avail,
+ enum ir_intrinsic_id id);
+ ir_function_signature *_invocation_interlock(
+ const char *intrinsic_name,
+ builtin_available_predicate avail);
+
+ ir_function_signature *_shader_clock_intrinsic(builtin_available_predicate avail,
+ const glsl_type *type);
+ ir_function_signature *_shader_clock(builtin_available_predicate avail,
+ const glsl_type *type);
+
+ ir_function_signature *_vote_intrinsic(builtin_available_predicate avail,
+ enum ir_intrinsic_id id);
+ ir_function_signature *_vote(const char *intrinsic_name,
+ builtin_available_predicate avail);
+
+ ir_function_signature *_helper_invocation_intrinsic();
+ ir_function_signature *_helper_invocation();
+
+#undef B0
+#undef B1
+#undef B2
+#undef B3
+#undef BA1
+#undef BA2
+ /** @} */
+};
+
+enum image_function_flags {
+ IMAGE_FUNCTION_EMIT_STUB = (1 << 0),
+ IMAGE_FUNCTION_RETURNS_VOID = (1 << 1),
+ IMAGE_FUNCTION_HAS_VECTOR_DATA_TYPE = (1 << 2),
+ IMAGE_FUNCTION_SUPPORTS_FLOAT_DATA_TYPE = (1 << 3),
+ IMAGE_FUNCTION_READ_ONLY = (1 << 4),
+ IMAGE_FUNCTION_WRITE_ONLY = (1 << 5),
+ IMAGE_FUNCTION_AVAIL_ATOMIC = (1 << 6),
+ IMAGE_FUNCTION_MS_ONLY = (1 << 7),
+ IMAGE_FUNCTION_AVAIL_ATOMIC_EXCHANGE = (1 << 8),
+ IMAGE_FUNCTION_AVAIL_ATOMIC_ADD = (1 << 9),
+ IMAGE_FUNCTION_EXT_ONLY = (1 << 10),
+};
+
+} /* anonymous namespace */
+
+/**
+ * Core builtin_builder functionality:
+ * @{
+ */
+builtin_builder::builtin_builder()
+ : shader(NULL)
+{
+ mem_ctx = NULL;
+}
+
+builtin_builder::~builtin_builder()
+{
+ ralloc_free(mem_ctx);
+}
+
+ir_function_signature *
+builtin_builder::find(_mesa_glsl_parse_state *state,
+ const char *name, exec_list *actual_parameters)
+{
+ /* The shader currently being compiled requested a built-in function;
+ * it needs to link against builtin_builder::shader in order to get them.
+ *
+ * Even if we don't find a matching signature, we still need to do this so
+ * that the "no matching signature" error will list potential candidates
+ * from the available built-ins.
+ */
+ state->uses_builtin_functions = true;
+
+ ir_function *f = shader->symbols->get_function(name);
+ if (f == NULL)
+ return NULL;
+
+ ir_function_signature *sig =
+ f->matching_signature(state, actual_parameters, true);
+ if (sig == NULL)
+ return NULL;
+
+ return sig;
+}
+
+void
+builtin_builder::initialize()
+{
+ /* If already initialized, don't do it again. */
+ if (mem_ctx != NULL)
+ return;
+
+ glsl_type_singleton_init_or_ref();
+
+ mem_ctx = ralloc_context(NULL);
+ create_shader();
+ create_intrinsics();
+ create_builtins();
+}
+
+void
+builtin_builder::release()
+{
+ ralloc_free(mem_ctx);
+ mem_ctx = NULL;
+
+ ralloc_free(shader);
+ shader = NULL;
+
+ glsl_type_singleton_decref();
+}
+
+void
+builtin_builder::create_shader()
+{
+ /* The target doesn't actually matter. There's no target for generic
+ * GLSL utility code that could be linked against any stage, so just
+ * arbitrarily pick GL_VERTEX_SHADER.
+ */
+ shader = _mesa_new_shader(0, MESA_SHADER_VERTEX);
+ shader->symbols = new(mem_ctx) glsl_symbol_table;
+}
+
+/** @} */
+
+/**
+ * Create ir_function and ir_function_signature objects for each
+ * intrinsic.
+ */
+void
+builtin_builder::create_intrinsics()
+{
+ add_function("__intrinsic_atomic_read",
+ _atomic_counter_intrinsic(shader_atomic_counters,
+ ir_intrinsic_atomic_counter_read),
+ NULL);
+ add_function("__intrinsic_atomic_increment",
+ _atomic_counter_intrinsic(shader_atomic_counters,
+ ir_intrinsic_atomic_counter_increment),
+ NULL);
+ add_function("__intrinsic_atomic_predecrement",
+ _atomic_counter_intrinsic(shader_atomic_counters,
+ ir_intrinsic_atomic_counter_predecrement),
+ NULL);
+
+ add_function("__intrinsic_atomic_add",
+ _atomic_intrinsic2(buffer_atomics_supported,
+ glsl_type::uint_type,
+ ir_intrinsic_generic_atomic_add),
+ _atomic_intrinsic2(buffer_atomics_supported,
+ glsl_type::int_type,
+ ir_intrinsic_generic_atomic_add),
+ _atomic_intrinsic2(NV_shader_atomic_float_supported,
+ glsl_type::float_type,
+ ir_intrinsic_generic_atomic_add),
+ _atomic_counter_intrinsic1(shader_atomic_counter_ops_or_v460_desktop,
+ ir_intrinsic_atomic_counter_add),
+ NULL);
+ add_function("__intrinsic_atomic_min",
+ _atomic_intrinsic2(buffer_atomics_supported,
+ glsl_type::uint_type,
+ ir_intrinsic_generic_atomic_min),
+ _atomic_intrinsic2(buffer_atomics_supported,
+ glsl_type::int_type,
+ ir_intrinsic_generic_atomic_min),
+ _atomic_intrinsic2(INTEL_shader_atomic_float_minmax_supported,
+ glsl_type::float_type,
+ ir_intrinsic_generic_atomic_min),
+ _atomic_counter_intrinsic1(shader_atomic_counter_ops_or_v460_desktop,
+ ir_intrinsic_atomic_counter_min),
+ NULL);
+ add_function("__intrinsic_atomic_max",
+ _atomic_intrinsic2(buffer_atomics_supported,
+ glsl_type::uint_type,
+ ir_intrinsic_generic_atomic_max),
+ _atomic_intrinsic2(buffer_atomics_supported,
+ glsl_type::int_type,
+ ir_intrinsic_generic_atomic_max),
+ _atomic_intrinsic2(INTEL_shader_atomic_float_minmax_supported,
+ glsl_type::float_type,
+ ir_intrinsic_generic_atomic_max),
+ _atomic_counter_intrinsic1(shader_atomic_counter_ops_or_v460_desktop,
+ ir_intrinsic_atomic_counter_max),
+ NULL);
+ add_function("__intrinsic_atomic_and",
+ _atomic_intrinsic2(buffer_atomics_supported,
+ glsl_type::uint_type,
+ ir_intrinsic_generic_atomic_and),
+ _atomic_intrinsic2(buffer_atomics_supported,
+ glsl_type::int_type,
+ ir_intrinsic_generic_atomic_and),
+ _atomic_counter_intrinsic1(shader_atomic_counter_ops_or_v460_desktop,
+ ir_intrinsic_atomic_counter_and),
+ NULL);
+ add_function("__intrinsic_atomic_or",
+ _atomic_intrinsic2(buffer_atomics_supported,
+ glsl_type::uint_type,
+ ir_intrinsic_generic_atomic_or),
+ _atomic_intrinsic2(buffer_atomics_supported,
+ glsl_type::int_type,
+ ir_intrinsic_generic_atomic_or),
+ _atomic_counter_intrinsic1(shader_atomic_counter_ops_or_v460_desktop,
+ ir_intrinsic_atomic_counter_or),
+ NULL);
+ add_function("__intrinsic_atomic_xor",
+ _atomic_intrinsic2(buffer_atomics_supported,
+ glsl_type::uint_type,
+ ir_intrinsic_generic_atomic_xor),
+ _atomic_intrinsic2(buffer_atomics_supported,
+ glsl_type::int_type,
+ ir_intrinsic_generic_atomic_xor),
+ _atomic_counter_intrinsic1(shader_atomic_counter_ops_or_v460_desktop,
+ ir_intrinsic_atomic_counter_xor),
+ NULL);
+ add_function("__intrinsic_atomic_exchange",
+ _atomic_intrinsic2(buffer_atomics_supported,
+ glsl_type::uint_type,
+ ir_intrinsic_generic_atomic_exchange),
+ _atomic_intrinsic2(buffer_atomics_supported,
+ glsl_type::int_type,
+ ir_intrinsic_generic_atomic_exchange),
+ _atomic_intrinsic2(NV_shader_atomic_float_supported,
+ glsl_type::float_type,
+ ir_intrinsic_generic_atomic_exchange),
+ _atomic_counter_intrinsic1(shader_atomic_counter_ops_or_v460_desktop,
+ ir_intrinsic_atomic_counter_exchange),
+ NULL);
+ add_function("__intrinsic_atomic_comp_swap",
+ _atomic_intrinsic3(buffer_atomics_supported,
+ glsl_type::uint_type,
+ ir_intrinsic_generic_atomic_comp_swap),
+ _atomic_intrinsic3(buffer_atomics_supported,
+ glsl_type::int_type,
+ ir_intrinsic_generic_atomic_comp_swap),
+ _atomic_intrinsic3(INTEL_shader_atomic_float_minmax_supported,
+ glsl_type::float_type,
+ ir_intrinsic_generic_atomic_comp_swap),
+ _atomic_counter_intrinsic2(shader_atomic_counter_ops_or_v460_desktop,
+ ir_intrinsic_atomic_counter_comp_swap),
+ NULL);
+
+ add_image_functions(false);
+
+ add_function("__intrinsic_memory_barrier",
+ _memory_barrier_intrinsic(shader_image_load_store,
+ ir_intrinsic_memory_barrier),
+ NULL);
+ add_function("__intrinsic_group_memory_barrier",
+ _memory_barrier_intrinsic(compute_shader,
+ ir_intrinsic_group_memory_barrier),
+ NULL);
+ add_function("__intrinsic_memory_barrier_atomic_counter",
+ _memory_barrier_intrinsic(compute_shader_supported,
+ ir_intrinsic_memory_barrier_atomic_counter),
+ NULL);
+ add_function("__intrinsic_memory_barrier_buffer",
+ _memory_barrier_intrinsic(compute_shader_supported,
+ ir_intrinsic_memory_barrier_buffer),
+ NULL);
+ add_function("__intrinsic_memory_barrier_image",
+ _memory_barrier_intrinsic(compute_shader_supported,
+ ir_intrinsic_memory_barrier_image),
+ NULL);
+ add_function("__intrinsic_memory_barrier_shared",
+ _memory_barrier_intrinsic(compute_shader,
+ ir_intrinsic_memory_barrier_shared),
+ NULL);
+
+ add_function("__intrinsic_begin_invocation_interlock",
+ _invocation_interlock_intrinsic(
+ supports_arb_fragment_shader_interlock,
+ ir_intrinsic_begin_invocation_interlock), NULL);
+
+ add_function("__intrinsic_end_invocation_interlock",
+ _invocation_interlock_intrinsic(
+ supports_arb_fragment_shader_interlock,
+ ir_intrinsic_end_invocation_interlock), NULL);
+
+ add_function("__intrinsic_shader_clock",
+ _shader_clock_intrinsic(shader_clock,
+ glsl_type::uvec2_type),
+ NULL);
+
+ add_function("__intrinsic_vote_all",
+ _vote_intrinsic(vote_or_v460_desktop, ir_intrinsic_vote_all),
+ NULL);
+ add_function("__intrinsic_vote_any",
+ _vote_intrinsic(vote_or_v460_desktop, ir_intrinsic_vote_any),
+ NULL);
+ add_function("__intrinsic_vote_eq",
+ _vote_intrinsic(vote_or_v460_desktop, ir_intrinsic_vote_eq),
+ NULL);
+
+ add_function("__intrinsic_ballot", _ballot_intrinsic(), NULL);
+
+ add_function("__intrinsic_read_invocation",
+ _read_invocation_intrinsic(glsl_type::float_type),
+ _read_invocation_intrinsic(glsl_type::vec2_type),
+ _read_invocation_intrinsic(glsl_type::vec3_type),
+ _read_invocation_intrinsic(glsl_type::vec4_type),
+
+ _read_invocation_intrinsic(glsl_type::int_type),
+ _read_invocation_intrinsic(glsl_type::ivec2_type),
+ _read_invocation_intrinsic(glsl_type::ivec3_type),
+ _read_invocation_intrinsic(glsl_type::ivec4_type),
+
+ _read_invocation_intrinsic(glsl_type::uint_type),
+ _read_invocation_intrinsic(glsl_type::uvec2_type),
+ _read_invocation_intrinsic(glsl_type::uvec3_type),
+ _read_invocation_intrinsic(glsl_type::uvec4_type),
+ NULL);
+
+ add_function("__intrinsic_read_first_invocation",
+ _read_first_invocation_intrinsic(glsl_type::float_type),
+ _read_first_invocation_intrinsic(glsl_type::vec2_type),
+ _read_first_invocation_intrinsic(glsl_type::vec3_type),
+ _read_first_invocation_intrinsic(glsl_type::vec4_type),
+
+ _read_first_invocation_intrinsic(glsl_type::int_type),
+ _read_first_invocation_intrinsic(glsl_type::ivec2_type),
+ _read_first_invocation_intrinsic(glsl_type::ivec3_type),
+ _read_first_invocation_intrinsic(glsl_type::ivec4_type),
+
+ _read_first_invocation_intrinsic(glsl_type::uint_type),
+ _read_first_invocation_intrinsic(glsl_type::uvec2_type),
+ _read_first_invocation_intrinsic(glsl_type::uvec3_type),
+ _read_first_invocation_intrinsic(glsl_type::uvec4_type),
+ NULL);
+
+ add_function("__intrinsic_helper_invocation",
+ _helper_invocation_intrinsic(), NULL);
+}
+
+/**
+ * Create ir_function and ir_function_signature objects for each built-in.
+ *
+ * Contains a list of every available built-in.
+ */
+void
+builtin_builder::create_builtins()
+{
+#define F(NAME) \
+ add_function(#NAME, \
+ _##NAME(glsl_type::float_type), \
+ _##NAME(glsl_type::vec2_type), \
+ _##NAME(glsl_type::vec3_type), \
+ _##NAME(glsl_type::vec4_type), \
+ NULL);
+
+#define FD(NAME) \
+ add_function(#NAME, \
+ _##NAME(always_available, glsl_type::float_type), \
+ _##NAME(always_available, glsl_type::vec2_type), \
+ _##NAME(always_available, glsl_type::vec3_type), \
+ _##NAME(always_available, glsl_type::vec4_type), \
+ _##NAME(fp64, glsl_type::double_type), \
+ _##NAME(fp64, glsl_type::dvec2_type), \
+ _##NAME(fp64, glsl_type::dvec3_type), \
+ _##NAME(fp64, glsl_type::dvec4_type), \
+ NULL);
+
+#define FD130(NAME) \
+ add_function(#NAME, \
+ _##NAME(v130, glsl_type::float_type), \
+ _##NAME(v130, glsl_type::vec2_type), \
+ _##NAME(v130, glsl_type::vec3_type), \
+ _##NAME(v130, glsl_type::vec4_type), \
+ _##NAME(fp64, glsl_type::double_type), \
+ _##NAME(fp64, glsl_type::dvec2_type), \
+ _##NAME(fp64, glsl_type::dvec3_type), \
+ _##NAME(fp64, glsl_type::dvec4_type), \
+ NULL);
+
+#define FDGS5(NAME) \
+ add_function(#NAME, \
+ _##NAME(gpu_shader5_es, glsl_type::float_type), \
+ _##NAME(gpu_shader5_es, glsl_type::vec2_type), \
+ _##NAME(gpu_shader5_es, glsl_type::vec3_type), \
+ _##NAME(gpu_shader5_es, glsl_type::vec4_type), \
+ _##NAME(fp64, glsl_type::double_type), \
+ _##NAME(fp64, glsl_type::dvec2_type), \
+ _##NAME(fp64, glsl_type::dvec3_type), \
+ _##NAME(fp64, glsl_type::dvec4_type), \
+ NULL);
+
+#define FI(NAME) \
+ add_function(#NAME, \
+ _##NAME(glsl_type::float_type), \
+ _##NAME(glsl_type::vec2_type), \
+ _##NAME(glsl_type::vec3_type), \
+ _##NAME(glsl_type::vec4_type), \
+ _##NAME(glsl_type::int_type), \
+ _##NAME(glsl_type::ivec2_type), \
+ _##NAME(glsl_type::ivec3_type), \
+ _##NAME(glsl_type::ivec4_type), \
+ NULL);
+
+#define FI64(NAME) \
+ add_function(#NAME, \
+ _##NAME(always_available, glsl_type::float_type), \
+ _##NAME(always_available, glsl_type::vec2_type), \
+ _##NAME(always_available, glsl_type::vec3_type), \
+ _##NAME(always_available, glsl_type::vec4_type), \
+ _##NAME(always_available, glsl_type::int_type), \
+ _##NAME(always_available, glsl_type::ivec2_type), \
+ _##NAME(always_available, glsl_type::ivec3_type), \
+ _##NAME(always_available, glsl_type::ivec4_type), \
+ _##NAME(fp64, glsl_type::double_type), \
+ _##NAME(fp64, glsl_type::dvec2_type), \
+ _##NAME(fp64, glsl_type::dvec3_type), \
+ _##NAME(fp64, glsl_type::dvec4_type), \
+ _##NAME(int64, glsl_type::int64_t_type), \
+ _##NAME(int64, glsl_type::i64vec2_type), \
+ _##NAME(int64, glsl_type::i64vec3_type), \
+ _##NAME(int64, glsl_type::i64vec4_type), \
+ NULL);
+
+#define FIUD_VEC(NAME) \
+ add_function(#NAME, \
+ _##NAME(always_available, glsl_type::vec2_type), \
+ _##NAME(always_available, glsl_type::vec3_type), \
+ _##NAME(always_available, glsl_type::vec4_type), \
+ \
+ _##NAME(always_available, glsl_type::ivec2_type), \
+ _##NAME(always_available, glsl_type::ivec3_type), \
+ _##NAME(always_available, glsl_type::ivec4_type), \
+ \
+ _##NAME(v130_or_gpu_shader4, glsl_type::uvec2_type), \
+ _##NAME(v130_or_gpu_shader4, glsl_type::uvec3_type), \
+ _##NAME(v130_or_gpu_shader4, glsl_type::uvec4_type), \
+ _##NAME(fp64, glsl_type::dvec2_type), \
+ _##NAME(fp64, glsl_type::dvec3_type), \
+ _##NAME(fp64, glsl_type::dvec4_type), \
+ _##NAME(int64, glsl_type::int64_t_type), \
+ _##NAME(int64, glsl_type::i64vec2_type), \
+ _##NAME(int64, glsl_type::i64vec3_type), \
+ _##NAME(int64, glsl_type::i64vec4_type), \
+ _##NAME(int64, glsl_type::uint64_t_type), \
+ _##NAME(int64, glsl_type::u64vec2_type), \
+ _##NAME(int64, glsl_type::u64vec3_type), \
+ _##NAME(int64, glsl_type::u64vec4_type), \
+ NULL);
+
+#define IU(NAME) \
+ add_function(#NAME, \
+ _##NAME(glsl_type::int_type), \
+ _##NAME(glsl_type::ivec2_type), \
+ _##NAME(glsl_type::ivec3_type), \
+ _##NAME(glsl_type::ivec4_type), \
+ \
+ _##NAME(glsl_type::uint_type), \
+ _##NAME(glsl_type::uvec2_type), \
+ _##NAME(glsl_type::uvec3_type), \
+ _##NAME(glsl_type::uvec4_type), \
+ NULL);
+
+#define FIUBD_VEC(NAME) \
+ add_function(#NAME, \
+ _##NAME(always_available, glsl_type::vec2_type), \
+ _##NAME(always_available, glsl_type::vec3_type), \
+ _##NAME(always_available, glsl_type::vec4_type), \
+ \
+ _##NAME(always_available, glsl_type::ivec2_type), \
+ _##NAME(always_available, glsl_type::ivec3_type), \
+ _##NAME(always_available, glsl_type::ivec4_type), \
+ \
+ _##NAME(v130_or_gpu_shader4, glsl_type::uvec2_type), \
+ _##NAME(v130_or_gpu_shader4, glsl_type::uvec3_type), \
+ _##NAME(v130_or_gpu_shader4, glsl_type::uvec4_type), \
+ \
+ _##NAME(always_available, glsl_type::bvec2_type), \
+ _##NAME(always_available, glsl_type::bvec3_type), \
+ _##NAME(always_available, glsl_type::bvec4_type), \
+ \
+ _##NAME(fp64, glsl_type::dvec2_type), \
+ _##NAME(fp64, glsl_type::dvec3_type), \
+ _##NAME(fp64, glsl_type::dvec4_type), \
+ _##NAME(int64, glsl_type::int64_t_type), \
+ _##NAME(int64, glsl_type::i64vec2_type), \
+ _##NAME(int64, glsl_type::i64vec3_type), \
+ _##NAME(int64, glsl_type::i64vec4_type), \
+ _##NAME(int64, glsl_type::uint64_t_type), \
+ _##NAME(int64, glsl_type::u64vec2_type), \
+ _##NAME(int64, glsl_type::u64vec3_type), \
+ _##NAME(int64, glsl_type::u64vec4_type), \
+ NULL);
+
+#define FIUD2_MIXED(NAME) \
+ add_function(#NAME, \
+ _##NAME(always_available, glsl_type::float_type, glsl_type::float_type), \
+ _##NAME(always_available, glsl_type::vec2_type, glsl_type::float_type), \
+ _##NAME(always_available, glsl_type::vec3_type, glsl_type::float_type), \
+ _##NAME(always_available, glsl_type::vec4_type, glsl_type::float_type), \
+ \
+ _##NAME(always_available, glsl_type::vec2_type, glsl_type::vec2_type), \
+ _##NAME(always_available, glsl_type::vec3_type, glsl_type::vec3_type), \
+ _##NAME(always_available, glsl_type::vec4_type, glsl_type::vec4_type), \
+ \
+ _##NAME(always_available, glsl_type::int_type, glsl_type::int_type), \
+ _##NAME(always_available, glsl_type::ivec2_type, glsl_type::int_type), \
+ _##NAME(always_available, glsl_type::ivec3_type, glsl_type::int_type), \
+ _##NAME(always_available, glsl_type::ivec4_type, glsl_type::int_type), \
+ \
+ _##NAME(always_available, glsl_type::ivec2_type, glsl_type::ivec2_type), \
+ _##NAME(always_available, glsl_type::ivec3_type, glsl_type::ivec3_type), \
+ _##NAME(always_available, glsl_type::ivec4_type, glsl_type::ivec4_type), \
+ \
+ _##NAME(v130_or_gpu_shader4, glsl_type::uint_type, glsl_type::uint_type), \
+ _##NAME(v130_or_gpu_shader4, glsl_type::uvec2_type, glsl_type::uint_type), \
+ _##NAME(v130_or_gpu_shader4, glsl_type::uvec3_type, glsl_type::uint_type), \
+ _##NAME(v130_or_gpu_shader4, glsl_type::uvec4_type, glsl_type::uint_type), \
+ \
+ _##NAME(v130_or_gpu_shader4, glsl_type::uvec2_type, glsl_type::uvec2_type), \
+ _##NAME(v130_or_gpu_shader4, glsl_type::uvec3_type, glsl_type::uvec3_type), \
+ _##NAME(v130_or_gpu_shader4, glsl_type::uvec4_type, glsl_type::uvec4_type), \
+ \
+ _##NAME(fp64, glsl_type::double_type, glsl_type::double_type), \
+ _##NAME(fp64, glsl_type::dvec2_type, glsl_type::double_type), \
+ _##NAME(fp64, glsl_type::dvec3_type, glsl_type::double_type), \
+ _##NAME(fp64, glsl_type::dvec4_type, glsl_type::double_type), \
+ _##NAME(fp64, glsl_type::dvec2_type, glsl_type::dvec2_type), \
+ _##NAME(fp64, glsl_type::dvec3_type, glsl_type::dvec3_type), \
+ _##NAME(fp64, glsl_type::dvec4_type, glsl_type::dvec4_type), \
+ \
+ _##NAME(int64, glsl_type::int64_t_type, glsl_type::int64_t_type), \
+ _##NAME(int64, glsl_type::i64vec2_type, glsl_type::int64_t_type), \
+ _##NAME(int64, glsl_type::i64vec3_type, glsl_type::int64_t_type), \
+ _##NAME(int64, glsl_type::i64vec4_type, glsl_type::int64_t_type), \
+ _##NAME(int64, glsl_type::i64vec2_type, glsl_type::i64vec2_type), \
+ _##NAME(int64, glsl_type::i64vec3_type, glsl_type::i64vec3_type), \
+ _##NAME(int64, glsl_type::i64vec4_type, glsl_type::i64vec4_type), \
+ _##NAME(int64, glsl_type::uint64_t_type, glsl_type::uint64_t_type), \
+ _##NAME(int64, glsl_type::u64vec2_type, glsl_type::uint64_t_type), \
+ _##NAME(int64, glsl_type::u64vec3_type, glsl_type::uint64_t_type), \
+ _##NAME(int64, glsl_type::u64vec4_type, glsl_type::uint64_t_type), \
+ _##NAME(int64, glsl_type::u64vec2_type, glsl_type::u64vec2_type), \
+ _##NAME(int64, glsl_type::u64vec3_type, glsl_type::u64vec3_type), \
+ _##NAME(int64, glsl_type::u64vec4_type, glsl_type::u64vec4_type), \
+ NULL);
+
+ F(radians)
+ F(degrees)
+ F(sin)
+ F(cos)
+ F(tan)
+ F(asin)
+ F(acos)
+
+ add_function("atan",
+ _atan_op(glsl_type::float_type),
+ _atan_op(glsl_type::vec2_type),
+ _atan_op(glsl_type::vec3_type),
+ _atan_op(glsl_type::vec4_type),
+ _atan2_op(glsl_type::float_type),
+ _atan2_op(glsl_type::vec2_type),
+ _atan2_op(glsl_type::vec3_type),
+ _atan2_op(glsl_type::vec4_type),
+ NULL);
+
+ F(sinh)
+ F(cosh)
+ F(tanh)
+ F(asinh)
+ F(acosh)
+ F(atanh)
+ F(pow)
+ F(exp)
+ F(log)
+ F(exp2)
+ F(log2)
+ FD(sqrt)
+ FD(inversesqrt)
+ FI64(abs)
+ FI64(sign)
+ FD(floor)
+ FD(trunc)
+ FD(round)
+ FD(roundEven)
+ FD(ceil)
+ FD(fract)
+
+ add_function("truncate",
+ _truncate(gpu_shader4, glsl_type::float_type),
+ _truncate(gpu_shader4, glsl_type::vec2_type),
+ _truncate(gpu_shader4, glsl_type::vec3_type),
+ _truncate(gpu_shader4, glsl_type::vec4_type),
+ NULL);
+
+
+ add_function("mod",
+ _mod(always_available, glsl_type::float_type, glsl_type::float_type),
+ _mod(always_available, glsl_type::vec2_type, glsl_type::float_type),
+ _mod(always_available, glsl_type::vec3_type, glsl_type::float_type),
+ _mod(always_available, glsl_type::vec4_type, glsl_type::float_type),
+
+ _mod(always_available, glsl_type::vec2_type, glsl_type::vec2_type),
+ _mod(always_available, glsl_type::vec3_type, glsl_type::vec3_type),
+ _mod(always_available, glsl_type::vec4_type, glsl_type::vec4_type),
+
+ _mod(fp64, glsl_type::double_type, glsl_type::double_type),
+ _mod(fp64, glsl_type::dvec2_type, glsl_type::double_type),
+ _mod(fp64, glsl_type::dvec3_type, glsl_type::double_type),
+ _mod(fp64, glsl_type::dvec4_type, glsl_type::double_type),
+
+ _mod(fp64, glsl_type::dvec2_type, glsl_type::dvec2_type),
+ _mod(fp64, glsl_type::dvec3_type, glsl_type::dvec3_type),
+ _mod(fp64, glsl_type::dvec4_type, glsl_type::dvec4_type),
+ NULL);
+
+ FD(modf)
+
+ FIUD2_MIXED(min)
+ FIUD2_MIXED(max)
+ FIUD2_MIXED(clamp)
+
+ add_function("mix",
+ _mix_lrp(always_available, glsl_type::float_type, glsl_type::float_type),
+ _mix_lrp(always_available, glsl_type::vec2_type, glsl_type::float_type),
+ _mix_lrp(always_available, glsl_type::vec3_type, glsl_type::float_type),
+ _mix_lrp(always_available, glsl_type::vec4_type, glsl_type::float_type),
+
+ _mix_lrp(always_available, glsl_type::vec2_type, glsl_type::vec2_type),
+ _mix_lrp(always_available, glsl_type::vec3_type, glsl_type::vec3_type),
+ _mix_lrp(always_available, glsl_type::vec4_type, glsl_type::vec4_type),
+
+ _mix_lrp(fp64, glsl_type::double_type, glsl_type::double_type),
+ _mix_lrp(fp64, glsl_type::dvec2_type, glsl_type::double_type),
+ _mix_lrp(fp64, glsl_type::dvec3_type, glsl_type::double_type),
+ _mix_lrp(fp64, glsl_type::dvec4_type, glsl_type::double_type),
+
+ _mix_lrp(fp64, glsl_type::dvec2_type, glsl_type::dvec2_type),
+ _mix_lrp(fp64, glsl_type::dvec3_type, glsl_type::dvec3_type),
+ _mix_lrp(fp64, glsl_type::dvec4_type, glsl_type::dvec4_type),
+
+ _mix_sel(v130, glsl_type::float_type, glsl_type::bool_type),
+ _mix_sel(v130, glsl_type::vec2_type, glsl_type::bvec2_type),
+ _mix_sel(v130, glsl_type::vec3_type, glsl_type::bvec3_type),
+ _mix_sel(v130, glsl_type::vec4_type, glsl_type::bvec4_type),
+
+ _mix_sel(fp64, glsl_type::double_type, glsl_type::bool_type),
+ _mix_sel(fp64, glsl_type::dvec2_type, glsl_type::bvec2_type),
+ _mix_sel(fp64, glsl_type::dvec3_type, glsl_type::bvec3_type),
+ _mix_sel(fp64, glsl_type::dvec4_type, glsl_type::bvec4_type),
+
+ _mix_sel(shader_integer_mix, glsl_type::int_type, glsl_type::bool_type),
+ _mix_sel(shader_integer_mix, glsl_type::ivec2_type, glsl_type::bvec2_type),
+ _mix_sel(shader_integer_mix, glsl_type::ivec3_type, glsl_type::bvec3_type),
+ _mix_sel(shader_integer_mix, glsl_type::ivec4_type, glsl_type::bvec4_type),
+
+ _mix_sel(shader_integer_mix, glsl_type::uint_type, glsl_type::bool_type),
+ _mix_sel(shader_integer_mix, glsl_type::uvec2_type, glsl_type::bvec2_type),
+ _mix_sel(shader_integer_mix, glsl_type::uvec3_type, glsl_type::bvec3_type),
+ _mix_sel(shader_integer_mix, glsl_type::uvec4_type, glsl_type::bvec4_type),
+
+ _mix_sel(shader_integer_mix, glsl_type::bool_type, glsl_type::bool_type),
+ _mix_sel(shader_integer_mix, glsl_type::bvec2_type, glsl_type::bvec2_type),
+ _mix_sel(shader_integer_mix, glsl_type::bvec3_type, glsl_type::bvec3_type),
+ _mix_sel(shader_integer_mix, glsl_type::bvec4_type, glsl_type::bvec4_type),
+
+ _mix_sel(int64, glsl_type::int64_t_type, glsl_type::bool_type),
+ _mix_sel(int64, glsl_type::i64vec2_type, glsl_type::bvec2_type),
+ _mix_sel(int64, glsl_type::i64vec3_type, glsl_type::bvec3_type),
+ _mix_sel(int64, glsl_type::i64vec4_type, glsl_type::bvec4_type),
+
+ _mix_sel(int64, glsl_type::uint64_t_type, glsl_type::bool_type),
+ _mix_sel(int64, glsl_type::u64vec2_type, glsl_type::bvec2_type),
+ _mix_sel(int64, glsl_type::u64vec3_type, glsl_type::bvec3_type),
+ _mix_sel(int64, glsl_type::u64vec4_type, glsl_type::bvec4_type),
+ NULL);
+
+ add_function("step",
+ _step(always_available, glsl_type::float_type, glsl_type::float_type),
+ _step(always_available, glsl_type::float_type, glsl_type::vec2_type),
+ _step(always_available, glsl_type::float_type, glsl_type::vec3_type),
+ _step(always_available, glsl_type::float_type, glsl_type::vec4_type),
+
+ _step(always_available, glsl_type::vec2_type, glsl_type::vec2_type),
+ _step(always_available, glsl_type::vec3_type, glsl_type::vec3_type),
+ _step(always_available, glsl_type::vec4_type, glsl_type::vec4_type),
+ _step(fp64, glsl_type::double_type, glsl_type::double_type),
+ _step(fp64, glsl_type::double_type, glsl_type::dvec2_type),
+ _step(fp64, glsl_type::double_type, glsl_type::dvec3_type),
+ _step(fp64, glsl_type::double_type, glsl_type::dvec4_type),
+
+ _step(fp64, glsl_type::dvec2_type, glsl_type::dvec2_type),
+ _step(fp64, glsl_type::dvec3_type, glsl_type::dvec3_type),
+ _step(fp64, glsl_type::dvec4_type, glsl_type::dvec4_type),
+ NULL);
+
+ add_function("smoothstep",
+ _smoothstep(always_available, glsl_type::float_type, glsl_type::float_type),
+ _smoothstep(always_available, glsl_type::float_type, glsl_type::vec2_type),
+ _smoothstep(always_available, glsl_type::float_type, glsl_type::vec3_type),
+ _smoothstep(always_available, glsl_type::float_type, glsl_type::vec4_type),
+
+ _smoothstep(always_available, glsl_type::vec2_type, glsl_type::vec2_type),
+ _smoothstep(always_available, glsl_type::vec3_type, glsl_type::vec3_type),
+ _smoothstep(always_available, glsl_type::vec4_type, glsl_type::vec4_type),
+ _smoothstep(fp64, glsl_type::double_type, glsl_type::double_type),
+ _smoothstep(fp64, glsl_type::double_type, glsl_type::dvec2_type),
+ _smoothstep(fp64, glsl_type::double_type, glsl_type::dvec3_type),
+ _smoothstep(fp64, glsl_type::double_type, glsl_type::dvec4_type),
+
+ _smoothstep(fp64, glsl_type::dvec2_type, glsl_type::dvec2_type),
+ _smoothstep(fp64, glsl_type::dvec3_type, glsl_type::dvec3_type),
+ _smoothstep(fp64, glsl_type::dvec4_type, glsl_type::dvec4_type),
+ NULL);
+
+ FD130(isnan)
+ FD130(isinf)
+
+ F(floatBitsToInt)
+ F(floatBitsToUint)
+ add_function("intBitsToFloat",
+ _intBitsToFloat(glsl_type::int_type),
+ _intBitsToFloat(glsl_type::ivec2_type),
+ _intBitsToFloat(glsl_type::ivec3_type),
+ _intBitsToFloat(glsl_type::ivec4_type),
+ NULL);
+ add_function("uintBitsToFloat",
+ _uintBitsToFloat(glsl_type::uint_type),
+ _uintBitsToFloat(glsl_type::uvec2_type),
+ _uintBitsToFloat(glsl_type::uvec3_type),
+ _uintBitsToFloat(glsl_type::uvec4_type),
+ NULL);
+
+ add_function("doubleBitsToInt64",
+ _doubleBitsToInt64(int64_fp64, glsl_type::double_type),
+ _doubleBitsToInt64(int64_fp64, glsl_type::dvec2_type),
+ _doubleBitsToInt64(int64_fp64, glsl_type::dvec3_type),
+ _doubleBitsToInt64(int64_fp64, glsl_type::dvec4_type),
+ NULL);
+
+ add_function("doubleBitsToUint64",
+ _doubleBitsToUint64(int64_fp64, glsl_type::double_type),
+ _doubleBitsToUint64(int64_fp64, glsl_type::dvec2_type),
+ _doubleBitsToUint64(int64_fp64, glsl_type::dvec3_type),
+ _doubleBitsToUint64(int64_fp64, glsl_type::dvec4_type),
+ NULL);
+
+ add_function("int64BitsToDouble",
+ _int64BitsToDouble(int64_fp64, glsl_type::int64_t_type),
+ _int64BitsToDouble(int64_fp64, glsl_type::i64vec2_type),
+ _int64BitsToDouble(int64_fp64, glsl_type::i64vec3_type),
+ _int64BitsToDouble(int64_fp64, glsl_type::i64vec4_type),
+ NULL);
+
+ add_function("uint64BitsToDouble",
+ _uint64BitsToDouble(int64_fp64, glsl_type::uint64_t_type),
+ _uint64BitsToDouble(int64_fp64, glsl_type::u64vec2_type),
+ _uint64BitsToDouble(int64_fp64, glsl_type::u64vec3_type),
+ _uint64BitsToDouble(int64_fp64, glsl_type::u64vec4_type),
+ NULL);
+
+ add_function("packUnorm2x16", _packUnorm2x16(shader_packing_or_es3_or_gpu_shader5), NULL);
+ add_function("packSnorm2x16", _packSnorm2x16(shader_packing_or_es3), NULL);
+ add_function("packUnorm4x8", _packUnorm4x8(shader_packing_or_es31_or_gpu_shader5), NULL);
+ add_function("packSnorm4x8", _packSnorm4x8(shader_packing_or_es31_or_gpu_shader5), NULL);
+ add_function("unpackUnorm2x16", _unpackUnorm2x16(shader_packing_or_es3_or_gpu_shader5), NULL);
+ add_function("unpackSnorm2x16", _unpackSnorm2x16(shader_packing_or_es3), NULL);
+ add_function("unpackUnorm4x8", _unpackUnorm4x8(shader_packing_or_es31_or_gpu_shader5), NULL);
+ add_function("unpackSnorm4x8", _unpackSnorm4x8(shader_packing_or_es31_or_gpu_shader5), NULL);
+ add_function("packHalf2x16", _packHalf2x16(shader_packing_or_es3), NULL);
+ add_function("unpackHalf2x16", _unpackHalf2x16(shader_packing_or_es3), NULL);
+ add_function("packDouble2x32", _packDouble2x32(fp64), NULL);
+ add_function("unpackDouble2x32", _unpackDouble2x32(fp64), NULL);
+
+ add_function("packInt2x32", _packInt2x32(int64), NULL);
+ add_function("unpackInt2x32", _unpackInt2x32(int64), NULL);
+ add_function("packUint2x32", _packUint2x32(int64), NULL);
+ add_function("unpackUint2x32", _unpackUint2x32(int64), NULL);
+
+ FD(length)
+ FD(distance)
+ FD(dot)
+
+ add_function("cross", _cross(always_available, glsl_type::vec3_type),
+ _cross(fp64, glsl_type::dvec3_type), NULL);
+
+ FD(normalize)
+ add_function("ftransform", _ftransform(), NULL);
+ FD(faceforward)
+ FD(reflect)
+ FD(refract)
+ // ...
+ add_function("matrixCompMult",
+ _matrixCompMult(always_available, glsl_type::mat2_type),
+ _matrixCompMult(always_available, glsl_type::mat3_type),
+ _matrixCompMult(always_available, glsl_type::mat4_type),
+ _matrixCompMult(always_available, glsl_type::mat2x3_type),
+ _matrixCompMult(always_available, glsl_type::mat2x4_type),
+ _matrixCompMult(always_available, glsl_type::mat3x2_type),
+ _matrixCompMult(always_available, glsl_type::mat3x4_type),
+ _matrixCompMult(always_available, glsl_type::mat4x2_type),
+ _matrixCompMult(always_available, glsl_type::mat4x3_type),
+ _matrixCompMult(fp64, glsl_type::dmat2_type),
+ _matrixCompMult(fp64, glsl_type::dmat3_type),
+ _matrixCompMult(fp64, glsl_type::dmat4_type),
+ _matrixCompMult(fp64, glsl_type::dmat2x3_type),
+ _matrixCompMult(fp64, glsl_type::dmat2x4_type),
+ _matrixCompMult(fp64, glsl_type::dmat3x2_type),
+ _matrixCompMult(fp64, glsl_type::dmat3x4_type),
+ _matrixCompMult(fp64, glsl_type::dmat4x2_type),
+ _matrixCompMult(fp64, glsl_type::dmat4x3_type),
+ NULL);
+ add_function("outerProduct",
+ _outerProduct(v120, glsl_type::mat2_type),
+ _outerProduct(v120, glsl_type::mat3_type),
+ _outerProduct(v120, glsl_type::mat4_type),
+ _outerProduct(v120, glsl_type::mat2x3_type),
+ _outerProduct(v120, glsl_type::mat2x4_type),
+ _outerProduct(v120, glsl_type::mat3x2_type),
+ _outerProduct(v120, glsl_type::mat3x4_type),
+ _outerProduct(v120, glsl_type::mat4x2_type),
+ _outerProduct(v120, glsl_type::mat4x3_type),
+ _outerProduct(fp64, glsl_type::dmat2_type),
+ _outerProduct(fp64, glsl_type::dmat3_type),
+ _outerProduct(fp64, glsl_type::dmat4_type),
+ _outerProduct(fp64, glsl_type::dmat2x3_type),
+ _outerProduct(fp64, glsl_type::dmat2x4_type),
+ _outerProduct(fp64, glsl_type::dmat3x2_type),
+ _outerProduct(fp64, glsl_type::dmat3x4_type),
+ _outerProduct(fp64, glsl_type::dmat4x2_type),
+ _outerProduct(fp64, glsl_type::dmat4x3_type),
+ NULL);
+ add_function("determinant",
+ _determinant_mat2(v120, glsl_type::mat2_type),
+ _determinant_mat3(v120, glsl_type::mat3_type),
+ _determinant_mat4(v120, glsl_type::mat4_type),
+ _determinant_mat2(fp64, glsl_type::dmat2_type),
+ _determinant_mat3(fp64, glsl_type::dmat3_type),
+ _determinant_mat4(fp64, glsl_type::dmat4_type),
+
+ NULL);
+ add_function("inverse",
+ _inverse_mat2(v140_or_es3, glsl_type::mat2_type),
+ _inverse_mat3(v140_or_es3, glsl_type::mat3_type),
+ _inverse_mat4(v140_or_es3, glsl_type::mat4_type),
+ _inverse_mat2(fp64, glsl_type::dmat2_type),
+ _inverse_mat3(fp64, glsl_type::dmat3_type),
+ _inverse_mat4(fp64, glsl_type::dmat4_type),
+ NULL);
+ add_function("transpose",
+ _transpose(v120, glsl_type::mat2_type),
+ _transpose(v120, glsl_type::mat3_type),
+ _transpose(v120, glsl_type::mat4_type),
+ _transpose(v120, glsl_type::mat2x3_type),
+ _transpose(v120, glsl_type::mat2x4_type),
+ _transpose(v120, glsl_type::mat3x2_type),
+ _transpose(v120, glsl_type::mat3x4_type),
+ _transpose(v120, glsl_type::mat4x2_type),
+ _transpose(v120, glsl_type::mat4x3_type),
+ _transpose(fp64, glsl_type::dmat2_type),
+ _transpose(fp64, glsl_type::dmat3_type),
+ _transpose(fp64, glsl_type::dmat4_type),
+ _transpose(fp64, glsl_type::dmat2x3_type),
+ _transpose(fp64, glsl_type::dmat2x4_type),
+ _transpose(fp64, glsl_type::dmat3x2_type),
+ _transpose(fp64, glsl_type::dmat3x4_type),
+ _transpose(fp64, glsl_type::dmat4x2_type),
+ _transpose(fp64, glsl_type::dmat4x3_type),
+ NULL);
+ FIUD_VEC(lessThan)
+ FIUD_VEC(lessThanEqual)
+ FIUD_VEC(greaterThan)
+ FIUD_VEC(greaterThanEqual)
+ FIUBD_VEC(notEqual)
+ FIUBD_VEC(equal)
+
+ add_function("any",
+ _any(glsl_type::bvec2_type),
+ _any(glsl_type::bvec3_type),
+ _any(glsl_type::bvec4_type),
+ NULL);
+
+ add_function("all",
+ _all(glsl_type::bvec2_type),
+ _all(glsl_type::bvec3_type),
+ _all(glsl_type::bvec4_type),
+ NULL);
+
+ add_function("not",
+ _not(glsl_type::bvec2_type),
+ _not(glsl_type::bvec3_type),
+ _not(glsl_type::bvec4_type),
+ NULL);
+
+ add_function("textureSize",
+ _textureSize(v130, glsl_type::int_type, glsl_type::sampler1D_type),
+ _textureSize(v130, glsl_type::int_type, glsl_type::isampler1D_type),
+ _textureSize(v130, glsl_type::int_type, glsl_type::usampler1D_type),
+
+ _textureSize(v130, glsl_type::ivec2_type, glsl_type::sampler2D_type),
+ _textureSize(v130, glsl_type::ivec2_type, glsl_type::isampler2D_type),
+ _textureSize(v130, glsl_type::ivec2_type, glsl_type::usampler2D_type),
+
+ _textureSize(v130, glsl_type::ivec3_type, glsl_type::sampler3D_type),
+ _textureSize(v130, glsl_type::ivec3_type, glsl_type::isampler3D_type),
+ _textureSize(v130, glsl_type::ivec3_type, glsl_type::usampler3D_type),
+
+ _textureSize(v130, glsl_type::ivec2_type, glsl_type::samplerCube_type),
+ _textureSize(v130, glsl_type::ivec2_type, glsl_type::isamplerCube_type),
+ _textureSize(v130, glsl_type::ivec2_type, glsl_type::usamplerCube_type),
+
+ _textureSize(v130, glsl_type::int_type, glsl_type::sampler1DShadow_type),
+ _textureSize(v130, glsl_type::ivec2_type, glsl_type::sampler2DShadow_type),
+ _textureSize(v130, glsl_type::ivec2_type, glsl_type::samplerCubeShadow_type),
+
+ _textureSize(v130, glsl_type::ivec2_type, glsl_type::sampler1DArray_type),
+ _textureSize(v130, glsl_type::ivec2_type, glsl_type::isampler1DArray_type),
+ _textureSize(v130, glsl_type::ivec2_type, glsl_type::usampler1DArray_type),
+ _textureSize(v130, glsl_type::ivec3_type, glsl_type::sampler2DArray_type),
+ _textureSize(v130, glsl_type::ivec3_type, glsl_type::isampler2DArray_type),
+ _textureSize(v130, glsl_type::ivec3_type, glsl_type::usampler2DArray_type),
+
+ _textureSize(v130, glsl_type::ivec2_type, glsl_type::sampler1DArrayShadow_type),
+ _textureSize(v130, glsl_type::ivec3_type, glsl_type::sampler2DArrayShadow_type),
+
+ _textureSize(texture_cube_map_array, glsl_type::ivec3_type, glsl_type::samplerCubeArray_type),
+ _textureSize(texture_cube_map_array, glsl_type::ivec3_type, glsl_type::isamplerCubeArray_type),
+ _textureSize(texture_cube_map_array, glsl_type::ivec3_type, glsl_type::usamplerCubeArray_type),
+ _textureSize(texture_cube_map_array, glsl_type::ivec3_type, glsl_type::samplerCubeArrayShadow_type),
+
+ _textureSize(v130, glsl_type::ivec2_type, glsl_type::sampler2DRect_type),
+ _textureSize(v130, glsl_type::ivec2_type, glsl_type::isampler2DRect_type),
+ _textureSize(v130, glsl_type::ivec2_type, glsl_type::usampler2DRect_type),
+ _textureSize(v130, glsl_type::ivec2_type, glsl_type::sampler2DRectShadow_type),
+
+ _textureSize(texture_buffer, glsl_type::int_type, glsl_type::samplerBuffer_type),
+ _textureSize(texture_buffer, glsl_type::int_type, glsl_type::isamplerBuffer_type),
+ _textureSize(texture_buffer, glsl_type::int_type, glsl_type::usamplerBuffer_type),
+ _textureSize(texture_multisample, glsl_type::ivec2_type, glsl_type::sampler2DMS_type),
+ _textureSize(texture_multisample, glsl_type::ivec2_type, glsl_type::isampler2DMS_type),
+ _textureSize(texture_multisample, glsl_type::ivec2_type, glsl_type::usampler2DMS_type),
+
+ _textureSize(texture_multisample_array, glsl_type::ivec3_type, glsl_type::sampler2DMSArray_type),
+ _textureSize(texture_multisample_array, glsl_type::ivec3_type, glsl_type::isampler2DMSArray_type),
+ _textureSize(texture_multisample_array, glsl_type::ivec3_type, glsl_type::usampler2DMSArray_type),
+
+ _textureSize(texture_external_es3, glsl_type::ivec2_type, glsl_type::samplerExternalOES_type),
+ NULL);
+
+ add_function("textureSize1D",
+ _textureSize(gpu_shader4, glsl_type::int_type, glsl_type::sampler1D_type),
+ _textureSize(gpu_shader4_integer, glsl_type::int_type, glsl_type::isampler1D_type),
+ _textureSize(gpu_shader4_integer, glsl_type::int_type, glsl_type::usampler1D_type),
+ NULL);
+
+ add_function("textureSize2D",
+ _textureSize(gpu_shader4, glsl_type::ivec2_type, glsl_type::sampler2D_type),
+ _textureSize(gpu_shader4_integer, glsl_type::ivec2_type, glsl_type::isampler2D_type),
+ _textureSize(gpu_shader4_integer, glsl_type::ivec2_type, glsl_type::usampler2D_type),
+ NULL);
+
+ add_function("textureSize3D",
+ _textureSize(gpu_shader4, glsl_type::ivec3_type, glsl_type::sampler3D_type),
+ _textureSize(gpu_shader4_integer, glsl_type::ivec3_type, glsl_type::isampler3D_type),
+ _textureSize(gpu_shader4_integer, glsl_type::ivec3_type, glsl_type::usampler3D_type),
+ NULL);
+
+ add_function("textureSizeCube",
+ _textureSize(gpu_shader4, glsl_type::ivec2_type, glsl_type::samplerCube_type),
+ _textureSize(gpu_shader4_integer, glsl_type::ivec2_type, glsl_type::isamplerCube_type),
+ _textureSize(gpu_shader4_integer, glsl_type::ivec2_type, glsl_type::usamplerCube_type),
+ NULL);
+
+ add_function("textureSize1DArray",
+ _textureSize(gpu_shader4_array, glsl_type::ivec2_type, glsl_type::sampler1DArray_type),
+ _textureSize(gpu_shader4_array_integer, glsl_type::ivec2_type, glsl_type::isampler1DArray_type),
+ _textureSize(gpu_shader4_array_integer, glsl_type::ivec2_type, glsl_type::usampler1DArray_type),
+ NULL);
+
+ add_function("textureSize2DArray",
+ _textureSize(gpu_shader4_array, glsl_type::ivec3_type, glsl_type::sampler2DArray_type),
+ _textureSize(gpu_shader4_array_integer, glsl_type::ivec3_type, glsl_type::isampler2DArray_type),
+ _textureSize(gpu_shader4_array_integer, glsl_type::ivec3_type, glsl_type::usampler2DArray_type),
+ NULL);
+
+ add_function("textureSize2DRect",
+ _textureSize(gpu_shader4_rect, glsl_type::ivec2_type, glsl_type::sampler2DRect_type),
+ _textureSize(gpu_shader4_rect_integer, glsl_type::ivec2_type, glsl_type::isampler2DRect_type),
+ _textureSize(gpu_shader4_rect_integer, glsl_type::ivec2_type, glsl_type::usampler2DRect_type),
+ NULL);
+
+ add_function("textureSizeBuffer",
+ _textureSize(gpu_shader4_tbo, glsl_type::int_type, glsl_type::samplerBuffer_type),
+ _textureSize(gpu_shader4_tbo_integer, glsl_type::int_type, glsl_type::isamplerBuffer_type),
+ _textureSize(gpu_shader4_tbo_integer, glsl_type::int_type, glsl_type::usamplerBuffer_type),
+ NULL);
+
+ add_function("textureSamples",
+ _textureSamples(shader_samples, glsl_type::sampler2DMS_type),
+ _textureSamples(shader_samples, glsl_type::isampler2DMS_type),
+ _textureSamples(shader_samples, glsl_type::usampler2DMS_type),
+
+ _textureSamples(shader_samples, glsl_type::sampler2DMSArray_type),
+ _textureSamples(shader_samples, glsl_type::isampler2DMSArray_type),
+ _textureSamples(shader_samples, glsl_type::usampler2DMSArray_type),
+ NULL);
+
+ add_function("texture",
+ _texture(ir_tex, v130, glsl_type::vec4_type, glsl_type::sampler1D_type, glsl_type::float_type),
+ _texture(ir_tex, v130, glsl_type::ivec4_type, glsl_type::isampler1D_type, glsl_type::float_type),
+ _texture(ir_tex, v130, glsl_type::uvec4_type, glsl_type::usampler1D_type, glsl_type::float_type),
+
+ _texture(ir_tex, v130, glsl_type::vec4_type, glsl_type::sampler2D_type, glsl_type::vec2_type),
+ _texture(ir_tex, v130, glsl_type::ivec4_type, glsl_type::isampler2D_type, glsl_type::vec2_type),
+ _texture(ir_tex, v130, glsl_type::uvec4_type, glsl_type::usampler2D_type, glsl_type::vec2_type),
+
+ _texture(ir_tex, v130, glsl_type::vec4_type, glsl_type::sampler3D_type, glsl_type::vec3_type),
+ _texture(ir_tex, v130, glsl_type::ivec4_type, glsl_type::isampler3D_type, glsl_type::vec3_type),
+ _texture(ir_tex, v130, glsl_type::uvec4_type, glsl_type::usampler3D_type, glsl_type::vec3_type),
+
+ _texture(ir_tex, v130, glsl_type::vec4_type, glsl_type::samplerCube_type, glsl_type::vec3_type),
+ _texture(ir_tex, v130, glsl_type::ivec4_type, glsl_type::isamplerCube_type, glsl_type::vec3_type),
+ _texture(ir_tex, v130, glsl_type::uvec4_type, glsl_type::usamplerCube_type, glsl_type::vec3_type),
+
+ _texture(ir_tex, v130, glsl_type::float_type, glsl_type::sampler1DShadow_type, glsl_type::vec3_type),
+ _texture(ir_tex, v130, glsl_type::float_type, glsl_type::sampler2DShadow_type, glsl_type::vec3_type),
+ _texture(ir_tex, v130, glsl_type::float_type, glsl_type::samplerCubeShadow_type, glsl_type::vec4_type),
+
+ _texture(ir_tex, v130, glsl_type::vec4_type, glsl_type::sampler1DArray_type, glsl_type::vec2_type),
+ _texture(ir_tex, v130, glsl_type::ivec4_type, glsl_type::isampler1DArray_type, glsl_type::vec2_type),
+ _texture(ir_tex, v130, glsl_type::uvec4_type, glsl_type::usampler1DArray_type, glsl_type::vec2_type),
+
+ _texture(ir_tex, v130, glsl_type::vec4_type, glsl_type::sampler2DArray_type, glsl_type::vec3_type),
+ _texture(ir_tex, v130, glsl_type::ivec4_type, glsl_type::isampler2DArray_type, glsl_type::vec3_type),
+ _texture(ir_tex, v130, glsl_type::uvec4_type, glsl_type::usampler2DArray_type, glsl_type::vec3_type),
+
+ _texture(ir_tex, texture_cube_map_array, glsl_type::vec4_type, glsl_type::samplerCubeArray_type, glsl_type::vec4_type),
+ _texture(ir_tex, texture_cube_map_array, glsl_type::ivec4_type, glsl_type::isamplerCubeArray_type, glsl_type::vec4_type),
+ _texture(ir_tex, texture_cube_map_array, glsl_type::uvec4_type, glsl_type::usamplerCubeArray_type, glsl_type::vec4_type),
+
+ _texture(ir_tex, v130, glsl_type::float_type, glsl_type::sampler1DArrayShadow_type, glsl_type::vec3_type),
+ _texture(ir_tex, v130, glsl_type::float_type, glsl_type::sampler2DArrayShadow_type, glsl_type::vec4_type),
+ /* samplerCubeArrayShadow is special; it has an extra parameter
+ * for the shadow comparator since there is no vec5 type.
+ */
+ _textureCubeArrayShadow(ir_tex, texture_cube_map_array, glsl_type::samplerCubeArrayShadow_type),
+
+ _texture(ir_tex, v130, glsl_type::vec4_type, glsl_type::sampler2DRect_type, glsl_type::vec2_type),
+ _texture(ir_tex, v130, glsl_type::ivec4_type, glsl_type::isampler2DRect_type, glsl_type::vec2_type),
+ _texture(ir_tex, v130, glsl_type::uvec4_type, glsl_type::usampler2DRect_type, glsl_type::vec2_type),
+
+ _texture(ir_tex, v130, glsl_type::float_type, glsl_type::sampler2DRectShadow_type, glsl_type::vec3_type),
+
+ _texture(ir_tex, texture_external_es3, glsl_type::vec4_type, glsl_type::samplerExternalOES_type, glsl_type::vec2_type),
+
+ _texture(ir_txb, v130_derivatives_only, glsl_type::vec4_type, glsl_type::sampler1D_type, glsl_type::float_type),
+ _texture(ir_txb, v130_derivatives_only, glsl_type::ivec4_type, glsl_type::isampler1D_type, glsl_type::float_type),
+ _texture(ir_txb, v130_derivatives_only, glsl_type::uvec4_type, glsl_type::usampler1D_type, glsl_type::float_type),
+
+ _texture(ir_txb, v130_derivatives_only, glsl_type::vec4_type, glsl_type::sampler2D_type, glsl_type::vec2_type),
+ _texture(ir_txb, v130_derivatives_only, glsl_type::ivec4_type, glsl_type::isampler2D_type, glsl_type::vec2_type),
+ _texture(ir_txb, v130_derivatives_only, glsl_type::uvec4_type, glsl_type::usampler2D_type, glsl_type::vec2_type),
+
+ _texture(ir_txb, v130_derivatives_only, glsl_type::vec4_type, glsl_type::sampler3D_type, glsl_type::vec3_type),
+ _texture(ir_txb, v130_derivatives_only, glsl_type::ivec4_type, glsl_type::isampler3D_type, glsl_type::vec3_type),
+ _texture(ir_txb, v130_derivatives_only, glsl_type::uvec4_type, glsl_type::usampler3D_type, glsl_type::vec3_type),
+
+ _texture(ir_txb, v130_derivatives_only, glsl_type::vec4_type, glsl_type::samplerCube_type, glsl_type::vec3_type),
+ _texture(ir_txb, v130_derivatives_only, glsl_type::ivec4_type, glsl_type::isamplerCube_type, glsl_type::vec3_type),
+ _texture(ir_txb, v130_derivatives_only, glsl_type::uvec4_type, glsl_type::usamplerCube_type, glsl_type::vec3_type),
+
+ _texture(ir_txb, v130_derivatives_only, glsl_type::float_type, glsl_type::sampler1DShadow_type, glsl_type::vec3_type),
+ _texture(ir_txb, v130_derivatives_only, glsl_type::float_type, glsl_type::sampler2DShadow_type, glsl_type::vec3_type),
+ _texture(ir_txb, v130_derivatives_only, glsl_type::float_type, glsl_type::samplerCubeShadow_type, glsl_type::vec4_type),
+
+ _texture(ir_txb, v130_derivatives_only, glsl_type::vec4_type, glsl_type::sampler1DArray_type, glsl_type::vec2_type),
+ _texture(ir_txb, v130_derivatives_only, glsl_type::ivec4_type, glsl_type::isampler1DArray_type, glsl_type::vec2_type),
+ _texture(ir_txb, v130_derivatives_only, glsl_type::uvec4_type, glsl_type::usampler1DArray_type, glsl_type::vec2_type),
+
+ _texture(ir_txb, v130_derivatives_only, glsl_type::vec4_type, glsl_type::sampler2DArray_type, glsl_type::vec3_type),
+ _texture(ir_txb, v130_derivatives_only, glsl_type::ivec4_type, glsl_type::isampler2DArray_type, glsl_type::vec3_type),
+ _texture(ir_txb, v130_derivatives_only, glsl_type::uvec4_type, glsl_type::usampler2DArray_type, glsl_type::vec3_type),
+
+ _texture(ir_txb, derivatives_texture_cube_map_array, glsl_type::vec4_type, glsl_type::samplerCubeArray_type, glsl_type::vec4_type),
+ _texture(ir_txb, derivatives_texture_cube_map_array, glsl_type::ivec4_type, glsl_type::isamplerCubeArray_type, glsl_type::vec4_type),
+ _texture(ir_txb, derivatives_texture_cube_map_array, glsl_type::uvec4_type, glsl_type::usamplerCubeArray_type, glsl_type::vec4_type),
+
+ _texture(ir_txb, v130_derivatives_only, glsl_type::float_type, glsl_type::sampler1DArrayShadow_type, glsl_type::vec3_type),
+ _texture(ir_tex, v130_or_gpu_shader4_and_tex_shadow_lod, glsl_type::float_type, glsl_type::sampler2DArrayShadow_type, glsl_type::vec4_type),
+ _texture(ir_txb, v130_or_gpu_shader4_and_tex_shadow_lod, glsl_type::float_type, glsl_type::sampler2DArrayShadow_type, glsl_type::vec4_type),
+
+ _textureCubeArrayShadow(ir_tex, v130_or_gpu_shader4_and_tex_cube_map_array, glsl_type::samplerCubeArrayShadow_type),
+ _textureCubeArrayShadow(ir_txb, v130_or_gpu_shader4_and_tex_cube_map_array, glsl_type::samplerCubeArrayShadow_type),
+ NULL);
+
+ add_function("textureLod",
+ _texture(ir_txl, v130, glsl_type::vec4_type, glsl_type::sampler1D_type, glsl_type::float_type),
+ _texture(ir_txl, v130, glsl_type::ivec4_type, glsl_type::isampler1D_type, glsl_type::float_type),
+ _texture(ir_txl, v130, glsl_type::uvec4_type, glsl_type::usampler1D_type, glsl_type::float_type),
+
+ _texture(ir_txl, v130, glsl_type::vec4_type, glsl_type::sampler2D_type, glsl_type::vec2_type),
+ _texture(ir_txl, v130, glsl_type::ivec4_type, glsl_type::isampler2D_type, glsl_type::vec2_type),
+ _texture(ir_txl, v130, glsl_type::uvec4_type, glsl_type::usampler2D_type, glsl_type::vec2_type),
+
+ _texture(ir_txl, v130, glsl_type::vec4_type, glsl_type::sampler3D_type, glsl_type::vec3_type),
+ _texture(ir_txl, v130, glsl_type::ivec4_type, glsl_type::isampler3D_type, glsl_type::vec3_type),
+ _texture(ir_txl, v130, glsl_type::uvec4_type, glsl_type::usampler3D_type, glsl_type::vec3_type),
+
+ _texture(ir_txl, v130, glsl_type::vec4_type, glsl_type::samplerCube_type, glsl_type::vec3_type),
+ _texture(ir_txl, v130, glsl_type::ivec4_type, glsl_type::isamplerCube_type, glsl_type::vec3_type),
+ _texture(ir_txl, v130, glsl_type::uvec4_type, glsl_type::usamplerCube_type, glsl_type::vec3_type),
+
+ _texture(ir_txl, v130, glsl_type::float_type, glsl_type::sampler1DShadow_type, glsl_type::vec3_type),
+ _texture(ir_txl, v130, glsl_type::float_type, glsl_type::sampler2DShadow_type, glsl_type::vec3_type),
+
+ _texture(ir_txl, v130, glsl_type::vec4_type, glsl_type::sampler1DArray_type, glsl_type::vec2_type),
+ _texture(ir_txl, v130, glsl_type::ivec4_type, glsl_type::isampler1DArray_type, glsl_type::vec2_type),
+ _texture(ir_txl, v130, glsl_type::uvec4_type, glsl_type::usampler1DArray_type, glsl_type::vec2_type),
+
+ _texture(ir_txl, v130, glsl_type::vec4_type, glsl_type::sampler2DArray_type, glsl_type::vec3_type),
+ _texture(ir_txl, v130, glsl_type::ivec4_type, glsl_type::isampler2DArray_type, glsl_type::vec3_type),
+ _texture(ir_txl, v130, glsl_type::uvec4_type, glsl_type::usampler2DArray_type, glsl_type::vec3_type),
+
+ _texture(ir_txl, texture_cube_map_array, glsl_type::vec4_type, glsl_type::samplerCubeArray_type, glsl_type::vec4_type),
+ _texture(ir_txl, texture_cube_map_array, glsl_type::ivec4_type, glsl_type::isamplerCubeArray_type, glsl_type::vec4_type),
+ _texture(ir_txl, texture_cube_map_array, glsl_type::uvec4_type, glsl_type::usamplerCubeArray_type, glsl_type::vec4_type),
+
+ _texture(ir_txl, v130, glsl_type::float_type, glsl_type::sampler1DArrayShadow_type, glsl_type::vec3_type),
+ _texture(ir_txl, v130_or_gpu_shader4_and_tex_shadow_lod, glsl_type::float_type, glsl_type::sampler2DArrayShadow_type, glsl_type::vec4_type),
+ _texture(ir_txl, v130_or_gpu_shader4_and_tex_shadow_lod, glsl_type::float_type, glsl_type::samplerCubeShadow_type, glsl_type::vec4_type),
+ _textureCubeArrayShadow(ir_txl, v130_or_gpu_shader4_and_tex_cube_map_array, glsl_type::samplerCubeArrayShadow_type),
+ NULL);
+
+ add_function("textureOffset",
+ _texture(ir_tex, v130, glsl_type::vec4_type, glsl_type::sampler1D_type, glsl_type::float_type, TEX_OFFSET),
+ _texture(ir_tex, v130, glsl_type::ivec4_type, glsl_type::isampler1D_type, glsl_type::float_type, TEX_OFFSET),
+ _texture(ir_tex, v130, glsl_type::uvec4_type, glsl_type::usampler1D_type, glsl_type::float_type, TEX_OFFSET),
+
+ _texture(ir_tex, v130, glsl_type::vec4_type, glsl_type::sampler2D_type, glsl_type::vec2_type, TEX_OFFSET),
+ _texture(ir_tex, v130, glsl_type::ivec4_type, glsl_type::isampler2D_type, glsl_type::vec2_type, TEX_OFFSET),
+ _texture(ir_tex, v130, glsl_type::uvec4_type, glsl_type::usampler2D_type, glsl_type::vec2_type, TEX_OFFSET),
+
+ _texture(ir_tex, v130, glsl_type::vec4_type, glsl_type::sampler3D_type, glsl_type::vec3_type, TEX_OFFSET),
+ _texture(ir_tex, v130, glsl_type::ivec4_type, glsl_type::isampler3D_type, glsl_type::vec3_type, TEX_OFFSET),
+ _texture(ir_tex, v130, glsl_type::uvec4_type, glsl_type::usampler3D_type, glsl_type::vec3_type, TEX_OFFSET),
+
+ _texture(ir_tex, v130, glsl_type::vec4_type, glsl_type::sampler2DRect_type, glsl_type::vec2_type, TEX_OFFSET),
+ _texture(ir_tex, v130, glsl_type::ivec4_type, glsl_type::isampler2DRect_type, glsl_type::vec2_type, TEX_OFFSET),
+ _texture(ir_tex, v130, glsl_type::uvec4_type, glsl_type::usampler2DRect_type, glsl_type::vec2_type, TEX_OFFSET),
+
+ _texture(ir_tex, v130, glsl_type::float_type, glsl_type::sampler2DRectShadow_type, glsl_type::vec3_type, TEX_OFFSET),
+
+ _texture(ir_tex, v130, glsl_type::float_type, glsl_type::sampler1DShadow_type, glsl_type::vec3_type, TEX_OFFSET),
+ _texture(ir_tex, v130, glsl_type::float_type, glsl_type::sampler2DShadow_type, glsl_type::vec3_type, TEX_OFFSET),
+
+ _texture(ir_tex, v130, glsl_type::vec4_type, glsl_type::sampler1DArray_type, glsl_type::vec2_type, TEX_OFFSET),
+ _texture(ir_tex, v130, glsl_type::ivec4_type, glsl_type::isampler1DArray_type, glsl_type::vec2_type, TEX_OFFSET),
+ _texture(ir_tex, v130, glsl_type::uvec4_type, glsl_type::usampler1DArray_type, glsl_type::vec2_type, TEX_OFFSET),
+
+ _texture(ir_tex, v130, glsl_type::vec4_type, glsl_type::sampler2DArray_type, glsl_type::vec3_type, TEX_OFFSET),
+ _texture(ir_tex, v130, glsl_type::ivec4_type, glsl_type::isampler2DArray_type, glsl_type::vec3_type, TEX_OFFSET),
+ _texture(ir_tex, v130, glsl_type::uvec4_type, glsl_type::usampler2DArray_type, glsl_type::vec3_type, TEX_OFFSET),
+
+ _texture(ir_tex, v130, glsl_type::float_type, glsl_type::sampler1DArrayShadow_type, glsl_type::vec3_type, TEX_OFFSET),
+ /* The next one was forgotten in GLSL 1.30 spec. It's from
+ * EXT_gpu_shader4 originally. It was added in 4.30 with the
+ * wrong syntax. This was corrected in 4.40. 4.30 indicates
+ * that it was intended to be included previously, so allow it
+ * in 1.30.
+ */
+ _texture(ir_tex, v130_desktop, glsl_type::float_type, glsl_type::sampler2DArrayShadow_type, glsl_type::vec4_type, TEX_OFFSET),
+
+ _texture(ir_txb, v130_derivatives_only, glsl_type::vec4_type, glsl_type::sampler1D_type, glsl_type::float_type, TEX_OFFSET),
+ _texture(ir_txb, v130_derivatives_only, glsl_type::ivec4_type, glsl_type::isampler1D_type, glsl_type::float_type, TEX_OFFSET),
+ _texture(ir_txb, v130_derivatives_only, glsl_type::uvec4_type, glsl_type::usampler1D_type, glsl_type::float_type, TEX_OFFSET),
+
+ _texture(ir_txb, v130_derivatives_only, glsl_type::vec4_type, glsl_type::sampler2D_type, glsl_type::vec2_type, TEX_OFFSET),
+ _texture(ir_txb, v130_derivatives_only, glsl_type::ivec4_type, glsl_type::isampler2D_type, glsl_type::vec2_type, TEX_OFFSET),
+ _texture(ir_txb, v130_derivatives_only, glsl_type::uvec4_type, glsl_type::usampler2D_type, glsl_type::vec2_type, TEX_OFFSET),
+
+ _texture(ir_txb, v130_derivatives_only, glsl_type::vec4_type, glsl_type::sampler3D_type, glsl_type::vec3_type, TEX_OFFSET),
+ _texture(ir_txb, v130_derivatives_only, glsl_type::ivec4_type, glsl_type::isampler3D_type, glsl_type::vec3_type, TEX_OFFSET),
+ _texture(ir_txb, v130_derivatives_only, glsl_type::uvec4_type, glsl_type::usampler3D_type, glsl_type::vec3_type, TEX_OFFSET),
+
+ _texture(ir_txb, v130_derivatives_only, glsl_type::float_type, glsl_type::sampler1DShadow_type, glsl_type::vec3_type, TEX_OFFSET),
+ _texture(ir_txb, v130_derivatives_only, glsl_type::float_type, glsl_type::sampler2DShadow_type, glsl_type::vec3_type, TEX_OFFSET),
+
+ _texture(ir_txb, v130_derivatives_only, glsl_type::vec4_type, glsl_type::sampler1DArray_type, glsl_type::vec2_type, TEX_OFFSET),
+ _texture(ir_txb, v130_derivatives_only, glsl_type::ivec4_type, glsl_type::isampler1DArray_type, glsl_type::vec2_type, TEX_OFFSET),
+ _texture(ir_txb, v130_derivatives_only, glsl_type::uvec4_type, glsl_type::usampler1DArray_type, glsl_type::vec2_type, TEX_OFFSET),
+
+ _texture(ir_txb, v130_derivatives_only, glsl_type::vec4_type, glsl_type::sampler2DArray_type, glsl_type::vec3_type, TEX_OFFSET),
+ _texture(ir_txb, v130_derivatives_only, glsl_type::ivec4_type, glsl_type::isampler2DArray_type, glsl_type::vec3_type, TEX_OFFSET),
+ _texture(ir_txb, v130_derivatives_only, glsl_type::uvec4_type, glsl_type::usampler2DArray_type, glsl_type::vec3_type, TEX_OFFSET),
+
+ _texture(ir_txb, v130_derivatives_only, glsl_type::float_type, glsl_type::sampler1DArrayShadow_type, glsl_type::vec3_type, TEX_OFFSET),
+ _texture(ir_tex, v130_or_gpu_shader4_and_tex_shadow_lod, glsl_type::float_type, glsl_type::sampler2DArrayShadow_type, glsl_type::vec4_type, TEX_OFFSET),
+ _texture(ir_txb, v130_or_gpu_shader4_and_tex_shadow_lod, glsl_type::float_type, glsl_type::sampler2DArrayShadow_type, glsl_type::vec4_type, TEX_OFFSET),
+ NULL);
+
+ add_function("texture1DOffset",
+ _texture(ir_tex, gpu_shader4, glsl_type::vec4_type, glsl_type::sampler1D_type, glsl_type::float_type, TEX_OFFSET),
+ _texture(ir_tex, gpu_shader4_integer, glsl_type::ivec4_type, glsl_type::isampler1D_type, glsl_type::float_type, TEX_OFFSET),
+ _texture(ir_tex, gpu_shader4_integer, glsl_type::uvec4_type, glsl_type::usampler1D_type, glsl_type::float_type, TEX_OFFSET),
+ _texture(ir_txb, gpu_shader4_derivs_only, glsl_type::vec4_type, glsl_type::sampler1D_type, glsl_type::float_type, TEX_OFFSET),
+ _texture(ir_txb, gpu_shader4_integer_derivs_only, glsl_type::ivec4_type, glsl_type::isampler1D_type, glsl_type::float_type, TEX_OFFSET),
+ _texture(ir_txb, gpu_shader4_integer_derivs_only, glsl_type::uvec4_type, glsl_type::usampler1D_type, glsl_type::float_type, TEX_OFFSET),
+ NULL);
+
+ add_function("texture2DOffset",
+ _texture(ir_tex, gpu_shader4, glsl_type::vec4_type, glsl_type::sampler2D_type, glsl_type::vec2_type, TEX_OFFSET),
+ _texture(ir_tex, gpu_shader4_integer, glsl_type::ivec4_type, glsl_type::isampler2D_type, glsl_type::vec2_type, TEX_OFFSET),
+ _texture(ir_tex, gpu_shader4_integer, glsl_type::uvec4_type, glsl_type::usampler2D_type, glsl_type::vec2_type, TEX_OFFSET),
+ _texture(ir_txb, gpu_shader4_derivs_only, glsl_type::vec4_type, glsl_type::sampler2D_type, glsl_type::vec2_type, TEX_OFFSET),
+ _texture(ir_txb, gpu_shader4_integer_derivs_only, glsl_type::ivec4_type, glsl_type::isampler2D_type, glsl_type::vec2_type, TEX_OFFSET),
+ _texture(ir_txb, gpu_shader4_integer_derivs_only, glsl_type::uvec4_type, glsl_type::usampler2D_type, glsl_type::vec2_type, TEX_OFFSET),
+ NULL);
+
+ add_function("texture3DOffset",
+ _texture(ir_tex, gpu_shader4, glsl_type::vec4_type, glsl_type::sampler3D_type, glsl_type::vec3_type, TEX_OFFSET),
+ _texture(ir_tex, gpu_shader4_integer, glsl_type::ivec4_type, glsl_type::isampler3D_type, glsl_type::vec3_type, TEX_OFFSET),
+ _texture(ir_tex, gpu_shader4_integer, glsl_type::uvec4_type, glsl_type::usampler3D_type, glsl_type::vec3_type, TEX_OFFSET),
+ _texture(ir_txb, gpu_shader4_derivs_only, glsl_type::vec4_type, glsl_type::sampler3D_type, glsl_type::vec3_type, TEX_OFFSET),
+ _texture(ir_txb, gpu_shader4_integer_derivs_only, glsl_type::ivec4_type, glsl_type::isampler3D_type, glsl_type::vec3_type, TEX_OFFSET),
+ _texture(ir_txb, gpu_shader4_integer_derivs_only, glsl_type::uvec4_type, glsl_type::usampler3D_type, glsl_type::vec3_type, TEX_OFFSET),
+ NULL);
+
+ add_function("texture2DRectOffset",
+ _texture(ir_tex, gpu_shader4_rect, glsl_type::vec4_type, glsl_type::sampler2DRect_type, glsl_type::vec2_type, TEX_OFFSET),
+ _texture(ir_tex, gpu_shader4_rect_integer, glsl_type::ivec4_type, glsl_type::isampler2DRect_type, glsl_type::vec2_type, TEX_OFFSET),
+ _texture(ir_tex, gpu_shader4_rect_integer, glsl_type::uvec4_type, glsl_type::usampler2DRect_type, glsl_type::vec2_type, TEX_OFFSET),
+ NULL);
+
+ add_function("shadow2DRectOffset",
+ _texture(ir_tex, gpu_shader4_rect, glsl_type::vec4_type, glsl_type::sampler2DRectShadow_type, glsl_type::vec3_type, TEX_OFFSET),
+ NULL);
+
+ add_function("shadow1DOffset",
+ _texture(ir_tex, gpu_shader4, glsl_type::vec4_type, glsl_type::sampler1DShadow_type, glsl_type::vec3_type, TEX_OFFSET),
+ _texture(ir_txb, gpu_shader4_derivs_only, glsl_type::vec4_type, glsl_type::sampler1DShadow_type, glsl_type::vec3_type, TEX_OFFSET),
+ NULL);
+
+ add_function("shadow2DOffset",
+ _texture(ir_tex, gpu_shader4, glsl_type::vec4_type, glsl_type::sampler2DShadow_type, glsl_type::vec3_type, TEX_OFFSET),
+ _texture(ir_txb, gpu_shader4_derivs_only, glsl_type::vec4_type, glsl_type::sampler2DShadow_type, glsl_type::vec3_type, TEX_OFFSET),
+ NULL);
+
+ add_function("texture1DArrayOffset",
+ _texture(ir_tex, gpu_shader4_array, glsl_type::vec4_type, glsl_type::sampler1DArray_type, glsl_type::vec2_type, TEX_OFFSET),
+ _texture(ir_tex, gpu_shader4_array_integer, glsl_type::ivec4_type, glsl_type::isampler1DArray_type, glsl_type::vec2_type, TEX_OFFSET),
+ _texture(ir_tex, gpu_shader4_array_integer, glsl_type::uvec4_type, glsl_type::usampler1DArray_type, glsl_type::vec2_type, TEX_OFFSET),
+ _texture(ir_txb, gpu_shader4_array_derivs_only, glsl_type::vec4_type, glsl_type::sampler1DArray_type, glsl_type::vec2_type, TEX_OFFSET),
+ _texture(ir_txb, gpu_shader4_array_integer_derivs_only, glsl_type::ivec4_type, glsl_type::isampler1DArray_type, glsl_type::vec2_type, TEX_OFFSET),
+ _texture(ir_txb, gpu_shader4_array_integer_derivs_only, glsl_type::uvec4_type, glsl_type::usampler1DArray_type, glsl_type::vec2_type, TEX_OFFSET),
+ NULL);
+
+ add_function("texture2DArrayOffset",
+ _texture(ir_tex, gpu_shader4_array, glsl_type::vec4_type, glsl_type::sampler2DArray_type, glsl_type::vec3_type, TEX_OFFSET),
+ _texture(ir_tex, gpu_shader4_array_integer, glsl_type::ivec4_type, glsl_type::isampler2DArray_type, glsl_type::vec3_type, TEX_OFFSET),
+ _texture(ir_tex, gpu_shader4_array_integer, glsl_type::uvec4_type, glsl_type::usampler2DArray_type, glsl_type::vec3_type, TEX_OFFSET),
+ _texture(ir_txb, gpu_shader4_array_derivs_only, glsl_type::vec4_type, glsl_type::sampler2DArray_type, glsl_type::vec3_type, TEX_OFFSET),
+ _texture(ir_txb, gpu_shader4_array_integer_derivs_only, glsl_type::ivec4_type, glsl_type::isampler2DArray_type, glsl_type::vec3_type, TEX_OFFSET),
+ _texture(ir_txb, gpu_shader4_array_integer_derivs_only, glsl_type::uvec4_type, glsl_type::usampler2DArray_type, glsl_type::vec3_type, TEX_OFFSET),
+ NULL);
+
+ add_function("shadow1DArrayOffset",
+ _texture(ir_tex, gpu_shader4_array, glsl_type::vec4_type, glsl_type::sampler1DArrayShadow_type, glsl_type::vec3_type, TEX_OFFSET),
+ _texture(ir_txb, gpu_shader4_array_derivs_only, glsl_type::vec4_type, glsl_type::sampler1DArrayShadow_type, glsl_type::vec3_type, TEX_OFFSET),
+ NULL);
+
+ add_function("shadow2DArrayOffset",
+ _texture(ir_tex, gpu_shader4_array, glsl_type::vec4_type, glsl_type::sampler2DArrayShadow_type, glsl_type::vec4_type, TEX_OFFSET),
+ NULL);
+
+ add_function("textureProj",
+ _texture(ir_tex, v130, glsl_type::vec4_type, glsl_type::sampler1D_type, glsl_type::vec2_type, TEX_PROJECT),
+ _texture(ir_tex, v130, glsl_type::ivec4_type, glsl_type::isampler1D_type, glsl_type::vec2_type, TEX_PROJECT),
+ _texture(ir_tex, v130, glsl_type::uvec4_type, glsl_type::usampler1D_type, glsl_type::vec2_type, TEX_PROJECT),
+ _texture(ir_tex, v130, glsl_type::vec4_type, glsl_type::sampler1D_type, glsl_type::vec4_type, TEX_PROJECT),
+ _texture(ir_tex, v130, glsl_type::ivec4_type, glsl_type::isampler1D_type, glsl_type::vec4_type, TEX_PROJECT),
+ _texture(ir_tex, v130, glsl_type::uvec4_type, glsl_type::usampler1D_type, glsl_type::vec4_type, TEX_PROJECT),
+
+ _texture(ir_tex, v130, glsl_type::vec4_type, glsl_type::sampler2D_type, glsl_type::vec3_type, TEX_PROJECT),
+ _texture(ir_tex, v130, glsl_type::ivec4_type, glsl_type::isampler2D_type, glsl_type::vec3_type, TEX_PROJECT),
+ _texture(ir_tex, v130, glsl_type::uvec4_type, glsl_type::usampler2D_type, glsl_type::vec3_type, TEX_PROJECT),
+ _texture(ir_tex, v130, glsl_type::vec4_type, glsl_type::sampler2D_type, glsl_type::vec4_type, TEX_PROJECT),
+ _texture(ir_tex, v130, glsl_type::ivec4_type, glsl_type::isampler2D_type, glsl_type::vec4_type, TEX_PROJECT),
+ _texture(ir_tex, v130, glsl_type::uvec4_type, glsl_type::usampler2D_type, glsl_type::vec4_type, TEX_PROJECT),
+
+ _texture(ir_tex, v130, glsl_type::vec4_type, glsl_type::sampler3D_type, glsl_type::vec4_type, TEX_PROJECT),
+ _texture(ir_tex, v130, glsl_type::ivec4_type, glsl_type::isampler3D_type, glsl_type::vec4_type, TEX_PROJECT),
+ _texture(ir_tex, v130, glsl_type::uvec4_type, glsl_type::usampler3D_type, glsl_type::vec4_type, TEX_PROJECT),
+
+ _texture(ir_tex, v130, glsl_type::float_type, glsl_type::sampler1DShadow_type, glsl_type::vec4_type, TEX_PROJECT),
+ _texture(ir_tex, v130, glsl_type::float_type, glsl_type::sampler2DShadow_type, glsl_type::vec4_type, TEX_PROJECT),
+
+ _texture(ir_tex, v130, glsl_type::vec4_type, glsl_type::sampler2DRect_type, glsl_type::vec3_type, TEX_PROJECT),
+ _texture(ir_tex, v130, glsl_type::ivec4_type, glsl_type::isampler2DRect_type, glsl_type::vec3_type, TEX_PROJECT),
+ _texture(ir_tex, texture_external_es3, glsl_type::vec4_type, glsl_type::samplerExternalOES_type, glsl_type::vec3_type, TEX_PROJECT),
+ _texture(ir_tex, texture_external_es3, glsl_type::vec4_type, glsl_type::samplerExternalOES_type, glsl_type::vec4_type, TEX_PROJECT),
+
+ _texture(ir_tex, v130, glsl_type::uvec4_type, glsl_type::usampler2DRect_type, glsl_type::vec3_type, TEX_PROJECT),
+ _texture(ir_tex, v130, glsl_type::vec4_type, glsl_type::sampler2DRect_type, glsl_type::vec4_type, TEX_PROJECT),
+ _texture(ir_tex, v130, glsl_type::ivec4_type, glsl_type::isampler2DRect_type, glsl_type::vec4_type, TEX_PROJECT),
+ _texture(ir_tex, v130, glsl_type::uvec4_type, glsl_type::usampler2DRect_type, glsl_type::vec4_type, TEX_PROJECT),
+
+ _texture(ir_tex, v130, glsl_type::float_type, glsl_type::sampler2DRectShadow_type, glsl_type::vec4_type, TEX_PROJECT),
+
+ _texture(ir_txb, v130_derivatives_only, glsl_type::vec4_type, glsl_type::sampler1D_type, glsl_type::vec2_type, TEX_PROJECT),
+ _texture(ir_txb, v130_derivatives_only, glsl_type::ivec4_type, glsl_type::isampler1D_type, glsl_type::vec2_type, TEX_PROJECT),
+ _texture(ir_txb, v130_derivatives_only, glsl_type::uvec4_type, glsl_type::usampler1D_type, glsl_type::vec2_type, TEX_PROJECT),
+ _texture(ir_txb, v130_derivatives_only, glsl_type::vec4_type, glsl_type::sampler1D_type, glsl_type::vec4_type, TEX_PROJECT),
+ _texture(ir_txb, v130_derivatives_only, glsl_type::ivec4_type, glsl_type::isampler1D_type, glsl_type::vec4_type, TEX_PROJECT),
+ _texture(ir_txb, v130_derivatives_only, glsl_type::uvec4_type, glsl_type::usampler1D_type, glsl_type::vec4_type, TEX_PROJECT),
+
+ _texture(ir_txb, v130_derivatives_only, glsl_type::vec4_type, glsl_type::sampler2D_type, glsl_type::vec3_type, TEX_PROJECT),
+ _texture(ir_txb, v130_derivatives_only, glsl_type::ivec4_type, glsl_type::isampler2D_type, glsl_type::vec3_type, TEX_PROJECT),
+ _texture(ir_txb, v130_derivatives_only, glsl_type::uvec4_type, glsl_type::usampler2D_type, glsl_type::vec3_type, TEX_PROJECT),
+ _texture(ir_txb, v130_derivatives_only, glsl_type::vec4_type, glsl_type::sampler2D_type, glsl_type::vec4_type, TEX_PROJECT),
+ _texture(ir_txb, v130_derivatives_only, glsl_type::ivec4_type, glsl_type::isampler2D_type, glsl_type::vec4_type, TEX_PROJECT),
+ _texture(ir_txb, v130_derivatives_only, glsl_type::uvec4_type, glsl_type::usampler2D_type, glsl_type::vec4_type, TEX_PROJECT),
+
+ _texture(ir_txb, v130_derivatives_only, glsl_type::vec4_type, glsl_type::sampler3D_type, glsl_type::vec4_type, TEX_PROJECT),
+ _texture(ir_txb, v130_derivatives_only, glsl_type::ivec4_type, glsl_type::isampler3D_type, glsl_type::vec4_type, TEX_PROJECT),
+ _texture(ir_txb, v130_derivatives_only, glsl_type::uvec4_type, glsl_type::usampler3D_type, glsl_type::vec4_type, TEX_PROJECT),
+
+ _texture(ir_txb, v130_derivatives_only, glsl_type::float_type, glsl_type::sampler1DShadow_type, glsl_type::vec4_type, TEX_PROJECT),
+ _texture(ir_txb, v130_derivatives_only, glsl_type::float_type, glsl_type::sampler2DShadow_type, glsl_type::vec4_type, TEX_PROJECT),
+ NULL);
+
+ add_function("texelFetch",
+ _texelFetch(v130, glsl_type::vec4_type, glsl_type::sampler1D_type, glsl_type::int_type),
+ _texelFetch(v130, glsl_type::ivec4_type, glsl_type::isampler1D_type, glsl_type::int_type),
+ _texelFetch(v130, glsl_type::uvec4_type, glsl_type::usampler1D_type, glsl_type::int_type),
+
+ _texelFetch(v130, glsl_type::vec4_type, glsl_type::sampler2D_type, glsl_type::ivec2_type),
+ _texelFetch(v130, glsl_type::ivec4_type, glsl_type::isampler2D_type, glsl_type::ivec2_type),
+ _texelFetch(v130, glsl_type::uvec4_type, glsl_type::usampler2D_type, glsl_type::ivec2_type),
+
+ _texelFetch(v130, glsl_type::vec4_type, glsl_type::sampler3D_type, glsl_type::ivec3_type),
+ _texelFetch(v130, glsl_type::ivec4_type, glsl_type::isampler3D_type, glsl_type::ivec3_type),
+ _texelFetch(v130, glsl_type::uvec4_type, glsl_type::usampler3D_type, glsl_type::ivec3_type),
+
+ _texelFetch(v130, glsl_type::vec4_type, glsl_type::sampler2DRect_type, glsl_type::ivec2_type),
+ _texelFetch(v130, glsl_type::ivec4_type, glsl_type::isampler2DRect_type, glsl_type::ivec2_type),
+ _texelFetch(v130, glsl_type::uvec4_type, glsl_type::usampler2DRect_type, glsl_type::ivec2_type),
+
+ _texelFetch(v130, glsl_type::vec4_type, glsl_type::sampler1DArray_type, glsl_type::ivec2_type),
+ _texelFetch(v130, glsl_type::ivec4_type, glsl_type::isampler1DArray_type, glsl_type::ivec2_type),
+ _texelFetch(v130, glsl_type::uvec4_type, glsl_type::usampler1DArray_type, glsl_type::ivec2_type),
+
+ _texelFetch(v130, glsl_type::vec4_type, glsl_type::sampler2DArray_type, glsl_type::ivec3_type),
+ _texelFetch(v130, glsl_type::ivec4_type, glsl_type::isampler2DArray_type, glsl_type::ivec3_type),
+ _texelFetch(v130, glsl_type::uvec4_type, glsl_type::usampler2DArray_type, glsl_type::ivec3_type),
+
+ _texelFetch(texture_buffer, glsl_type::vec4_type, glsl_type::samplerBuffer_type, glsl_type::int_type),
+ _texelFetch(texture_buffer, glsl_type::ivec4_type, glsl_type::isamplerBuffer_type, glsl_type::int_type),
+ _texelFetch(texture_buffer, glsl_type::uvec4_type, glsl_type::usamplerBuffer_type, glsl_type::int_type),
+
+ _texelFetch(texture_multisample, glsl_type::vec4_type, glsl_type::sampler2DMS_type, glsl_type::ivec2_type),
+ _texelFetch(texture_multisample, glsl_type::ivec4_type, glsl_type::isampler2DMS_type, glsl_type::ivec2_type),
+ _texelFetch(texture_multisample, glsl_type::uvec4_type, glsl_type::usampler2DMS_type, glsl_type::ivec2_type),
+
+ _texelFetch(texture_multisample_array, glsl_type::vec4_type, glsl_type::sampler2DMSArray_type, glsl_type::ivec3_type),
+ _texelFetch(texture_multisample_array, glsl_type::ivec4_type, glsl_type::isampler2DMSArray_type, glsl_type::ivec3_type),
+ _texelFetch(texture_multisample_array, glsl_type::uvec4_type, glsl_type::usampler2DMSArray_type, glsl_type::ivec3_type),
+
+ _texelFetch(texture_external_es3, glsl_type::vec4_type, glsl_type::samplerExternalOES_type, glsl_type::ivec2_type),
+
+ NULL);
+
+ add_function("texelFetch1D",
+ _texelFetch(gpu_shader4, glsl_type::vec4_type, glsl_type::sampler1D_type, glsl_type::int_type),
+ _texelFetch(gpu_shader4_integer, glsl_type::ivec4_type, glsl_type::isampler1D_type, glsl_type::int_type),
+ _texelFetch(gpu_shader4_integer, glsl_type::uvec4_type, glsl_type::usampler1D_type, glsl_type::int_type),
+ NULL);
+
+ add_function("texelFetch2D",
+ _texelFetch(gpu_shader4, glsl_type::vec4_type, glsl_type::sampler2D_type, glsl_type::ivec2_type),
+ _texelFetch(gpu_shader4_integer, glsl_type::ivec4_type, glsl_type::isampler2D_type, glsl_type::ivec2_type),
+ _texelFetch(gpu_shader4_integer, glsl_type::uvec4_type, glsl_type::usampler2D_type, glsl_type::ivec2_type),
+ NULL);
+
+ add_function("texelFetch3D",
+ _texelFetch(gpu_shader4, glsl_type::vec4_type, glsl_type::sampler3D_type, glsl_type::ivec3_type),
+ _texelFetch(gpu_shader4_integer, glsl_type::ivec4_type, glsl_type::isampler3D_type, glsl_type::ivec3_type),
+ _texelFetch(gpu_shader4_integer, glsl_type::uvec4_type, glsl_type::usampler3D_type, glsl_type::ivec3_type),
+ NULL);
+
+ add_function("texelFetch2DRect",
+ _texelFetch(gpu_shader4_rect, glsl_type::vec4_type, glsl_type::sampler2DRect_type, glsl_type::ivec2_type),
+ _texelFetch(gpu_shader4_rect_integer, glsl_type::ivec4_type, glsl_type::isampler2DRect_type, glsl_type::ivec2_type),
+ _texelFetch(gpu_shader4_rect_integer, glsl_type::uvec4_type, glsl_type::usampler2DRect_type, glsl_type::ivec2_type),
+ NULL);
+
+ add_function("texelFetch1DArray",
+ _texelFetch(gpu_shader4_array, glsl_type::vec4_type, glsl_type::sampler1DArray_type, glsl_type::ivec2_type),
+ _texelFetch(gpu_shader4_array_integer, glsl_type::ivec4_type, glsl_type::isampler1DArray_type, glsl_type::ivec2_type),
+ _texelFetch(gpu_shader4_array_integer, glsl_type::uvec4_type, glsl_type::usampler1DArray_type, glsl_type::ivec2_type),
+ NULL);
+
+ add_function("texelFetch2DArray",
+ _texelFetch(gpu_shader4_array, glsl_type::vec4_type, glsl_type::sampler2DArray_type, glsl_type::ivec3_type),
+ _texelFetch(gpu_shader4_array_integer, glsl_type::ivec4_type, glsl_type::isampler2DArray_type, glsl_type::ivec3_type),
+ _texelFetch(gpu_shader4_array_integer, glsl_type::uvec4_type, glsl_type::usampler2DArray_type, glsl_type::ivec3_type),
+ NULL);
+
+ add_function("texelFetchBuffer",
+ _texelFetch(gpu_shader4_tbo, glsl_type::vec4_type, glsl_type::samplerBuffer_type, glsl_type::int_type),
+ _texelFetch(gpu_shader4_tbo_integer, glsl_type::ivec4_type, glsl_type::isamplerBuffer_type, glsl_type::int_type),
+ _texelFetch(gpu_shader4_tbo_integer, glsl_type::uvec4_type, glsl_type::usamplerBuffer_type, glsl_type::int_type),
+ NULL);
+
+ add_function("texelFetchOffset",
+ _texelFetch(v130, glsl_type::vec4_type, glsl_type::sampler1D_type, glsl_type::int_type, glsl_type::int_type),
+ _texelFetch(v130, glsl_type::ivec4_type, glsl_type::isampler1D_type, glsl_type::int_type, glsl_type::int_type),
+ _texelFetch(v130, glsl_type::uvec4_type, glsl_type::usampler1D_type, glsl_type::int_type, glsl_type::int_type),
+
+ _texelFetch(v130, glsl_type::vec4_type, glsl_type::sampler2D_type, glsl_type::ivec2_type, glsl_type::ivec2_type),
+ _texelFetch(v130, glsl_type::ivec4_type, glsl_type::isampler2D_type, glsl_type::ivec2_type, glsl_type::ivec2_type),
+ _texelFetch(v130, glsl_type::uvec4_type, glsl_type::usampler2D_type, glsl_type::ivec2_type, glsl_type::ivec2_type),
+
+ _texelFetch(v130, glsl_type::vec4_type, glsl_type::sampler3D_type, glsl_type::ivec3_type, glsl_type::ivec3_type),
+ _texelFetch(v130, glsl_type::ivec4_type, glsl_type::isampler3D_type, glsl_type::ivec3_type, glsl_type::ivec3_type),
+ _texelFetch(v130, glsl_type::uvec4_type, glsl_type::usampler3D_type, glsl_type::ivec3_type, glsl_type::ivec3_type),
+
+ _texelFetch(v130, glsl_type::vec4_type, glsl_type::sampler2DRect_type, glsl_type::ivec2_type, glsl_type::ivec2_type),
+ _texelFetch(v130, glsl_type::ivec4_type, glsl_type::isampler2DRect_type, glsl_type::ivec2_type, glsl_type::ivec2_type),
+ _texelFetch(v130, glsl_type::uvec4_type, glsl_type::usampler2DRect_type, glsl_type::ivec2_type, glsl_type::ivec2_type),
+
+ _texelFetch(v130, glsl_type::vec4_type, glsl_type::sampler1DArray_type, glsl_type::ivec2_type, glsl_type::int_type),
+ _texelFetch(v130, glsl_type::ivec4_type, glsl_type::isampler1DArray_type, glsl_type::ivec2_type, glsl_type::int_type),
+ _texelFetch(v130, glsl_type::uvec4_type, glsl_type::usampler1DArray_type, glsl_type::ivec2_type, glsl_type::int_type),
+
+ _texelFetch(v130, glsl_type::vec4_type, glsl_type::sampler2DArray_type, glsl_type::ivec3_type, glsl_type::ivec2_type),
+ _texelFetch(v130, glsl_type::ivec4_type, glsl_type::isampler2DArray_type, glsl_type::ivec3_type, glsl_type::ivec2_type),
+ _texelFetch(v130, glsl_type::uvec4_type, glsl_type::usampler2DArray_type, glsl_type::ivec3_type, glsl_type::ivec2_type),
+
+ NULL);
+
+ add_function("texelFetch1DOffset",
+ _texelFetch(gpu_shader4, glsl_type::vec4_type, glsl_type::sampler1D_type, glsl_type::int_type, glsl_type::int_type),
+ _texelFetch(gpu_shader4_integer, glsl_type::ivec4_type, glsl_type::isampler1D_type, glsl_type::int_type, glsl_type::int_type),
+ _texelFetch(gpu_shader4_integer, glsl_type::uvec4_type, glsl_type::usampler1D_type, glsl_type::int_type, glsl_type::int_type),
+ NULL);
+
+ add_function("texelFetch2DOffset",
+ _texelFetch(gpu_shader4, glsl_type::vec4_type, glsl_type::sampler2D_type, glsl_type::ivec2_type, glsl_type::ivec2_type),
+ _texelFetch(gpu_shader4_integer, glsl_type::ivec4_type, glsl_type::isampler2D_type, glsl_type::ivec2_type, glsl_type::ivec2_type),
+ _texelFetch(gpu_shader4_integer, glsl_type::uvec4_type, glsl_type::usampler2D_type, glsl_type::ivec2_type, glsl_type::ivec2_type),
+ NULL);
+
+ add_function("texelFetch3DOffset",
+ _texelFetch(gpu_shader4, glsl_type::vec4_type, glsl_type::sampler3D_type, glsl_type::ivec3_type, glsl_type::ivec3_type),
+ _texelFetch(gpu_shader4_integer, glsl_type::ivec4_type, glsl_type::isampler3D_type, glsl_type::ivec3_type, glsl_type::ivec3_type),
+ _texelFetch(gpu_shader4_integer, glsl_type::uvec4_type, glsl_type::usampler3D_type, glsl_type::ivec3_type, glsl_type::ivec3_type),
+ NULL);
+
+ add_function("texelFetch2DRectOffset",
+ _texelFetch(gpu_shader4_rect, glsl_type::vec4_type, glsl_type::sampler2DRect_type, glsl_type::ivec2_type, glsl_type::ivec2_type),
+ _texelFetch(gpu_shader4_rect_integer, glsl_type::ivec4_type, glsl_type::isampler2DRect_type, glsl_type::ivec2_type, glsl_type::ivec2_type),
+ _texelFetch(gpu_shader4_rect_integer, glsl_type::uvec4_type, glsl_type::usampler2DRect_type, glsl_type::ivec2_type, glsl_type::ivec2_type),
+ NULL);
+
+ add_function("texelFetch1DArrayOffset",
+ _texelFetch(gpu_shader4_array, glsl_type::vec4_type, glsl_type::sampler1DArray_type, glsl_type::ivec2_type, glsl_type::int_type),
+ _texelFetch(gpu_shader4_array_integer, glsl_type::ivec4_type, glsl_type::isampler1DArray_type, glsl_type::ivec2_type, glsl_type::int_type),
+ _texelFetch(gpu_shader4_array_integer, glsl_type::uvec4_type, glsl_type::usampler1DArray_type, glsl_type::ivec2_type, glsl_type::int_type),
+ NULL);
+
+ add_function("texelFetch2DArrayOffset",
+ _texelFetch(gpu_shader4_array, glsl_type::vec4_type, glsl_type::sampler2DArray_type, glsl_type::ivec3_type, glsl_type::ivec2_type),
+ _texelFetch(gpu_shader4_array_integer, glsl_type::ivec4_type, glsl_type::isampler2DArray_type, glsl_type::ivec3_type, glsl_type::ivec2_type),
+ _texelFetch(gpu_shader4_array_integer, glsl_type::uvec4_type, glsl_type::usampler2DArray_type, glsl_type::ivec3_type, glsl_type::ivec2_type),
+ NULL);
+
+ add_function("textureProjOffset",
+ _texture(ir_tex, v130, glsl_type::vec4_type, glsl_type::sampler1D_type, glsl_type::vec2_type, TEX_PROJECT | TEX_OFFSET),
+ _texture(ir_tex, v130, glsl_type::ivec4_type, glsl_type::isampler1D_type, glsl_type::vec2_type, TEX_PROJECT | TEX_OFFSET),
+ _texture(ir_tex, v130, glsl_type::uvec4_type, glsl_type::usampler1D_type, glsl_type::vec2_type, TEX_PROJECT | TEX_OFFSET),
+ _texture(ir_tex, v130, glsl_type::vec4_type, glsl_type::sampler1D_type, glsl_type::vec4_type, TEX_PROJECT | TEX_OFFSET),
+ _texture(ir_tex, v130, glsl_type::ivec4_type, glsl_type::isampler1D_type, glsl_type::vec4_type, TEX_PROJECT | TEX_OFFSET),
+ _texture(ir_tex, v130, glsl_type::uvec4_type, glsl_type::usampler1D_type, glsl_type::vec4_type, TEX_PROJECT | TEX_OFFSET),
+
+ _texture(ir_tex, v130, glsl_type::vec4_type, glsl_type::sampler2D_type, glsl_type::vec3_type, TEX_PROJECT | TEX_OFFSET),
+ _texture(ir_tex, v130, glsl_type::ivec4_type, glsl_type::isampler2D_type, glsl_type::vec3_type, TEX_PROJECT | TEX_OFFSET),
+ _texture(ir_tex, v130, glsl_type::uvec4_type, glsl_type::usampler2D_type, glsl_type::vec3_type, TEX_PROJECT | TEX_OFFSET),
+ _texture(ir_tex, v130, glsl_type::vec4_type, glsl_type::sampler2D_type, glsl_type::vec4_type, TEX_PROJECT | TEX_OFFSET),
+ _texture(ir_tex, v130, glsl_type::ivec4_type, glsl_type::isampler2D_type, glsl_type::vec4_type, TEX_PROJECT | TEX_OFFSET),
+ _texture(ir_tex, v130, glsl_type::uvec4_type, glsl_type::usampler2D_type, glsl_type::vec4_type, TEX_PROJECT | TEX_OFFSET),
+
+ _texture(ir_tex, v130, glsl_type::vec4_type, glsl_type::sampler3D_type, glsl_type::vec4_type, TEX_PROJECT | TEX_OFFSET),
+ _texture(ir_tex, v130, glsl_type::ivec4_type, glsl_type::isampler3D_type, glsl_type::vec4_type, TEX_PROJECT | TEX_OFFSET),
+ _texture(ir_tex, v130, glsl_type::uvec4_type, glsl_type::usampler3D_type, glsl_type::vec4_type, TEX_PROJECT | TEX_OFFSET),
+
+ _texture(ir_tex, v130, glsl_type::float_type, glsl_type::sampler1DShadow_type, glsl_type::vec4_type, TEX_PROJECT | TEX_OFFSET),
+ _texture(ir_tex, v130, glsl_type::float_type, glsl_type::sampler2DShadow_type, glsl_type::vec4_type, TEX_PROJECT | TEX_OFFSET),
+
+ _texture(ir_tex, v130, glsl_type::vec4_type, glsl_type::sampler2DRect_type, glsl_type::vec3_type, TEX_PROJECT | TEX_OFFSET),
+ _texture(ir_tex, v130, glsl_type::ivec4_type, glsl_type::isampler2DRect_type, glsl_type::vec3_type, TEX_PROJECT | TEX_OFFSET),
+ _texture(ir_tex, v130, glsl_type::uvec4_type, glsl_type::usampler2DRect_type, glsl_type::vec3_type, TEX_PROJECT | TEX_OFFSET),
+ _texture(ir_tex, v130, glsl_type::vec4_type, glsl_type::sampler2DRect_type, glsl_type::vec4_type, TEX_PROJECT | TEX_OFFSET),
+ _texture(ir_tex, v130, glsl_type::ivec4_type, glsl_type::isampler2DRect_type, glsl_type::vec4_type, TEX_PROJECT | TEX_OFFSET),
+ _texture(ir_tex, v130, glsl_type::uvec4_type, glsl_type::usampler2DRect_type, glsl_type::vec4_type, TEX_PROJECT | TEX_OFFSET),
+
+ _texture(ir_tex, v130, glsl_type::float_type, glsl_type::sampler2DRectShadow_type, glsl_type::vec4_type, TEX_PROJECT | TEX_OFFSET),
+
+ _texture(ir_txb, v130_derivatives_only, glsl_type::vec4_type, glsl_type::sampler1D_type, glsl_type::vec2_type, TEX_PROJECT | TEX_OFFSET),
+ _texture(ir_txb, v130_derivatives_only, glsl_type::ivec4_type, glsl_type::isampler1D_type, glsl_type::vec2_type, TEX_PROJECT | TEX_OFFSET),
+ _texture(ir_txb, v130_derivatives_only, glsl_type::uvec4_type, glsl_type::usampler1D_type, glsl_type::vec2_type, TEX_PROJECT | TEX_OFFSET),
+ _texture(ir_txb, v130_derivatives_only, glsl_type::vec4_type, glsl_type::sampler1D_type, glsl_type::vec4_type, TEX_PROJECT | TEX_OFFSET),
+ _texture(ir_txb, v130_derivatives_only, glsl_type::ivec4_type, glsl_type::isampler1D_type, glsl_type::vec4_type, TEX_PROJECT | TEX_OFFSET),
+ _texture(ir_txb, v130_derivatives_only, glsl_type::uvec4_type, glsl_type::usampler1D_type, glsl_type::vec4_type, TEX_PROJECT | TEX_OFFSET),
+
+ _texture(ir_txb, v130_derivatives_only, glsl_type::vec4_type, glsl_type::sampler2D_type, glsl_type::vec3_type, TEX_PROJECT | TEX_OFFSET),
+ _texture(ir_txb, v130_derivatives_only, glsl_type::ivec4_type, glsl_type::isampler2D_type, glsl_type::vec3_type, TEX_PROJECT | TEX_OFFSET),
+ _texture(ir_txb, v130_derivatives_only, glsl_type::uvec4_type, glsl_type::usampler2D_type, glsl_type::vec3_type, TEX_PROJECT | TEX_OFFSET),
+ _texture(ir_txb, v130_derivatives_only, glsl_type::vec4_type, glsl_type::sampler2D_type, glsl_type::vec4_type, TEX_PROJECT | TEX_OFFSET),
+ _texture(ir_txb, v130_derivatives_only, glsl_type::ivec4_type, glsl_type::isampler2D_type, glsl_type::vec4_type, TEX_PROJECT | TEX_OFFSET),
+ _texture(ir_txb, v130_derivatives_only, glsl_type::uvec4_type, glsl_type::usampler2D_type, glsl_type::vec4_type, TEX_PROJECT | TEX_OFFSET),
+
+ _texture(ir_txb, v130_derivatives_only, glsl_type::vec4_type, glsl_type::sampler3D_type, glsl_type::vec4_type, TEX_PROJECT | TEX_OFFSET),
+ _texture(ir_txb, v130_derivatives_only, glsl_type::ivec4_type, glsl_type::isampler3D_type, glsl_type::vec4_type, TEX_PROJECT | TEX_OFFSET),
+ _texture(ir_txb, v130_derivatives_only, glsl_type::uvec4_type, glsl_type::usampler3D_type, glsl_type::vec4_type, TEX_PROJECT | TEX_OFFSET),
+
+ _texture(ir_txb, v130_derivatives_only, glsl_type::float_type, glsl_type::sampler1DShadow_type, glsl_type::vec4_type, TEX_PROJECT | TEX_OFFSET),
+ _texture(ir_txb, v130_derivatives_only, glsl_type::float_type, glsl_type::sampler2DShadow_type, glsl_type::vec4_type, TEX_PROJECT | TEX_OFFSET),
+ NULL);
+
+ add_function("texture1DProjOffset",
+ _texture(ir_tex, gpu_shader4, glsl_type::vec4_type, glsl_type::sampler1D_type, glsl_type::vec2_type, TEX_PROJECT | TEX_OFFSET),
+ _texture(ir_tex, gpu_shader4_integer, glsl_type::ivec4_type, glsl_type::isampler1D_type, glsl_type::vec2_type, TEX_PROJECT | TEX_OFFSET),
+ _texture(ir_tex, gpu_shader4_integer, glsl_type::uvec4_type, glsl_type::usampler1D_type, glsl_type::vec2_type, TEX_PROJECT | TEX_OFFSET),
+ _texture(ir_tex, gpu_shader4, glsl_type::vec4_type, glsl_type::sampler1D_type, glsl_type::vec4_type, TEX_PROJECT | TEX_OFFSET),
+ _texture(ir_tex, gpu_shader4_integer, glsl_type::ivec4_type, glsl_type::isampler1D_type, glsl_type::vec4_type, TEX_PROJECT | TEX_OFFSET),
+ _texture(ir_tex, gpu_shader4_integer, glsl_type::uvec4_type, glsl_type::usampler1D_type, glsl_type::vec4_type, TEX_PROJECT | TEX_OFFSET),
+ _texture(ir_txb, gpu_shader4_derivs_only, glsl_type::vec4_type, glsl_type::sampler1D_type, glsl_type::vec2_type, TEX_PROJECT | TEX_OFFSET),
+ _texture(ir_txb, gpu_shader4_integer_derivs_only, glsl_type::ivec4_type, glsl_type::isampler1D_type, glsl_type::vec2_type, TEX_PROJECT | TEX_OFFSET),
+ _texture(ir_txb, gpu_shader4_integer_derivs_only, glsl_type::uvec4_type, glsl_type::usampler1D_type, glsl_type::vec2_type, TEX_PROJECT | TEX_OFFSET),
+ _texture(ir_txb, gpu_shader4_derivs_only, glsl_type::vec4_type, glsl_type::sampler1D_type, glsl_type::vec4_type, TEX_PROJECT | TEX_OFFSET),
+ _texture(ir_txb, gpu_shader4_integer_derivs_only, glsl_type::ivec4_type, glsl_type::isampler1D_type, glsl_type::vec4_type, TEX_PROJECT | TEX_OFFSET),
+ _texture(ir_txb, gpu_shader4_integer_derivs_only, glsl_type::uvec4_type, glsl_type::usampler1D_type, glsl_type::vec4_type, TEX_PROJECT | TEX_OFFSET),
+ NULL);
+
+ add_function("texture2DProjOffset",
+ _texture(ir_tex, gpu_shader4, glsl_type::vec4_type, glsl_type::sampler2D_type, glsl_type::vec3_type, TEX_PROJECT | TEX_OFFSET),
+ _texture(ir_tex, gpu_shader4_integer, glsl_type::ivec4_type, glsl_type::isampler2D_type, glsl_type::vec3_type, TEX_PROJECT | TEX_OFFSET),
+ _texture(ir_tex, gpu_shader4_integer, glsl_type::uvec4_type, glsl_type::usampler2D_type, glsl_type::vec3_type, TEX_PROJECT | TEX_OFFSET),
+ _texture(ir_tex, gpu_shader4, glsl_type::vec4_type, glsl_type::sampler2D_type, glsl_type::vec4_type, TEX_PROJECT | TEX_OFFSET),
+ _texture(ir_tex, gpu_shader4_integer, glsl_type::ivec4_type, glsl_type::isampler2D_type, glsl_type::vec4_type, TEX_PROJECT | TEX_OFFSET),
+ _texture(ir_tex, gpu_shader4_integer, glsl_type::uvec4_type, glsl_type::usampler2D_type, glsl_type::vec4_type, TEX_PROJECT | TEX_OFFSET),
+ _texture(ir_txb, gpu_shader4_derivs_only, glsl_type::vec4_type, glsl_type::sampler2D_type, glsl_type::vec3_type, TEX_PROJECT | TEX_OFFSET),
+ _texture(ir_txb, gpu_shader4_integer_derivs_only, glsl_type::ivec4_type, glsl_type::isampler2D_type, glsl_type::vec3_type, TEX_PROJECT | TEX_OFFSET),
+ _texture(ir_txb, gpu_shader4_integer_derivs_only, glsl_type::uvec4_type, glsl_type::usampler2D_type, glsl_type::vec3_type, TEX_PROJECT | TEX_OFFSET),
+ _texture(ir_txb, gpu_shader4_derivs_only, glsl_type::vec4_type, glsl_type::sampler2D_type, glsl_type::vec4_type, TEX_PROJECT | TEX_OFFSET),
+ _texture(ir_txb, gpu_shader4_integer_derivs_only, glsl_type::ivec4_type, glsl_type::isampler2D_type, glsl_type::vec4_type, TEX_PROJECT | TEX_OFFSET),
+ _texture(ir_txb, gpu_shader4_integer_derivs_only, glsl_type::uvec4_type, glsl_type::usampler2D_type, glsl_type::vec4_type, TEX_PROJECT | TEX_OFFSET),
+ NULL);
+
+ add_function("texture3DProjOffset",
+ _texture(ir_tex, gpu_shader4, glsl_type::vec4_type, glsl_type::sampler3D_type, glsl_type::vec4_type, TEX_PROJECT | TEX_OFFSET),
+ _texture(ir_tex, gpu_shader4_integer, glsl_type::ivec4_type, glsl_type::isampler3D_type, glsl_type::vec4_type, TEX_PROJECT | TEX_OFFSET),
+ _texture(ir_tex, gpu_shader4_integer, glsl_type::uvec4_type, glsl_type::usampler3D_type, glsl_type::vec4_type, TEX_PROJECT | TEX_OFFSET),
+ _texture(ir_txb, gpu_shader4_derivs_only, glsl_type::vec4_type, glsl_type::sampler3D_type, glsl_type::vec4_type, TEX_PROJECT | TEX_OFFSET),
+ _texture(ir_txb, gpu_shader4_integer_derivs_only, glsl_type::ivec4_type, glsl_type::isampler3D_type, glsl_type::vec4_type, TEX_PROJECT | TEX_OFFSET),
+ _texture(ir_txb, gpu_shader4_integer_derivs_only, glsl_type::uvec4_type, glsl_type::usampler3D_type, glsl_type::vec4_type, TEX_PROJECT | TEX_OFFSET),
+ NULL);
+
+ add_function("shadow1DProjOffset",
+ _texture(ir_tex, gpu_shader4, glsl_type::vec4_type, glsl_type::sampler1DShadow_type, glsl_type::vec4_type, TEX_PROJECT | TEX_OFFSET),
+ _texture(ir_txb, gpu_shader4_derivs_only, glsl_type::vec4_type, glsl_type::sampler1DShadow_type, glsl_type::vec4_type, TEX_PROJECT | TEX_OFFSET),
+ NULL);
+
+ add_function("shadow2DProjOffset",
+ _texture(ir_tex, gpu_shader4, glsl_type::vec4_type, glsl_type::sampler2DShadow_type, glsl_type::vec4_type, TEX_PROJECT | TEX_OFFSET),
+ _texture(ir_txb, gpu_shader4_derivs_only, glsl_type::vec4_type, glsl_type::sampler2DShadow_type, glsl_type::vec4_type, TEX_PROJECT | TEX_OFFSET),
+ NULL);
+
+ add_function("texture2DRectProjOffset",
+ _texture(ir_tex, gpu_shader4_rect, glsl_type::vec4_type, glsl_type::sampler2DRect_type, glsl_type::vec3_type, TEX_PROJECT | TEX_OFFSET),
+ _texture(ir_tex, gpu_shader4_rect_integer, glsl_type::ivec4_type, glsl_type::isampler2DRect_type, glsl_type::vec3_type, TEX_PROJECT | TEX_OFFSET),
+ _texture(ir_tex, gpu_shader4_rect_integer, glsl_type::uvec4_type, glsl_type::usampler2DRect_type, glsl_type::vec3_type, TEX_PROJECT | TEX_OFFSET),
+ _texture(ir_tex, gpu_shader4_rect, glsl_type::vec4_type, glsl_type::sampler2DRect_type, glsl_type::vec4_type, TEX_PROJECT | TEX_OFFSET),
+ _texture(ir_tex, gpu_shader4_rect_integer, glsl_type::ivec4_type, glsl_type::isampler2DRect_type, glsl_type::vec4_type, TEX_PROJECT | TEX_OFFSET),
+ _texture(ir_tex, gpu_shader4_rect_integer, glsl_type::uvec4_type, glsl_type::usampler2DRect_type, glsl_type::vec4_type, TEX_PROJECT | TEX_OFFSET),
+ NULL);
+
+ add_function("shadow2DRectProjOffset",
+ _texture(ir_tex, gpu_shader4_rect, glsl_type::vec4_type, glsl_type::sampler2DRectShadow_type, glsl_type::vec4_type, TEX_PROJECT | TEX_OFFSET),
+ NULL);
+
+ add_function("textureLodOffset",
+ _texture(ir_txl, v130, glsl_type::vec4_type, glsl_type::sampler1D_type, glsl_type::float_type, TEX_OFFSET),
+ _texture(ir_txl, v130, glsl_type::ivec4_type, glsl_type::isampler1D_type, glsl_type::float_type, TEX_OFFSET),
+ _texture(ir_txl, v130, glsl_type::uvec4_type, glsl_type::usampler1D_type, glsl_type::float_type, TEX_OFFSET),
+
+ _texture(ir_txl, v130, glsl_type::vec4_type, glsl_type::sampler2D_type, glsl_type::vec2_type, TEX_OFFSET),
+ _texture(ir_txl, v130, glsl_type::ivec4_type, glsl_type::isampler2D_type, glsl_type::vec2_type, TEX_OFFSET),
+ _texture(ir_txl, v130, glsl_type::uvec4_type, glsl_type::usampler2D_type, glsl_type::vec2_type, TEX_OFFSET),
+
+ _texture(ir_txl, v130, glsl_type::vec4_type, glsl_type::sampler3D_type, glsl_type::vec3_type, TEX_OFFSET),
+ _texture(ir_txl, v130, glsl_type::ivec4_type, glsl_type::isampler3D_type, glsl_type::vec3_type, TEX_OFFSET),
+ _texture(ir_txl, v130, glsl_type::uvec4_type, glsl_type::usampler3D_type, glsl_type::vec3_type, TEX_OFFSET),
+
+ _texture(ir_txl, v130, glsl_type::float_type, glsl_type::sampler1DShadow_type, glsl_type::vec3_type, TEX_OFFSET),
+ _texture(ir_txl, v130, glsl_type::float_type, glsl_type::sampler2DShadow_type, glsl_type::vec3_type, TEX_OFFSET),
+
+ _texture(ir_txl, v130, glsl_type::vec4_type, glsl_type::sampler1DArray_type, glsl_type::vec2_type, TEX_OFFSET),
+ _texture(ir_txl, v130, glsl_type::ivec4_type, glsl_type::isampler1DArray_type, glsl_type::vec2_type, TEX_OFFSET),
+ _texture(ir_txl, v130, glsl_type::uvec4_type, glsl_type::usampler1DArray_type, glsl_type::vec2_type, TEX_OFFSET),
+
+ _texture(ir_txl, v130, glsl_type::vec4_type, glsl_type::sampler2DArray_type, glsl_type::vec3_type, TEX_OFFSET),
+ _texture(ir_txl, v130, glsl_type::ivec4_type, glsl_type::isampler2DArray_type, glsl_type::vec3_type, TEX_OFFSET),
+ _texture(ir_txl, v130, glsl_type::uvec4_type, glsl_type::usampler2DArray_type, glsl_type::vec3_type, TEX_OFFSET),
+
+ _texture(ir_txl, v130, glsl_type::float_type, glsl_type::sampler1DArrayShadow_type, glsl_type::vec3_type, TEX_OFFSET),
+ _texture(ir_txl, v130_or_gpu_shader4_and_tex_shadow_lod, glsl_type::float_type, glsl_type::sampler2DArrayShadow_type, glsl_type::vec4_type, TEX_OFFSET),
+ NULL);
+
+ add_function("texture1DLodOffset",
+ _texture(ir_txl, gpu_shader4, glsl_type::vec4_type, glsl_type::sampler1D_type, glsl_type::float_type, TEX_OFFSET),
+ _texture(ir_txl, gpu_shader4_integer, glsl_type::ivec4_type, glsl_type::isampler1D_type, glsl_type::float_type, TEX_OFFSET),
+ _texture(ir_txl, gpu_shader4_integer, glsl_type::uvec4_type, glsl_type::usampler1D_type, glsl_type::float_type, TEX_OFFSET),
+ NULL);
+
+ add_function("texture2DLodOffset",
+ _texture(ir_txl, gpu_shader4, glsl_type::vec4_type, glsl_type::sampler2D_type, glsl_type::vec2_type, TEX_OFFSET),
+ _texture(ir_txl, gpu_shader4_integer, glsl_type::ivec4_type, glsl_type::isampler2D_type, glsl_type::vec2_type, TEX_OFFSET),
+ _texture(ir_txl, gpu_shader4_integer, glsl_type::uvec4_type, glsl_type::usampler2D_type, glsl_type::vec2_type, TEX_OFFSET),
+ NULL);
+
+ add_function("texture3DLodOffset",
+ _texture(ir_txl, gpu_shader4, glsl_type::vec4_type, glsl_type::sampler3D_type, glsl_type::vec3_type, TEX_OFFSET),
+ _texture(ir_txl, gpu_shader4_integer, glsl_type::ivec4_type, glsl_type::isampler3D_type, glsl_type::vec3_type, TEX_OFFSET),
+ _texture(ir_txl, gpu_shader4_integer, glsl_type::uvec4_type, glsl_type::usampler3D_type, glsl_type::vec3_type, TEX_OFFSET),
+ NULL);
+
+ add_function("shadow1DLodOffset",
+ _texture(ir_txl, gpu_shader4, glsl_type::vec4_type, glsl_type::sampler1DShadow_type, glsl_type::vec3_type, TEX_OFFSET),
+ NULL);
+
+ add_function("shadow2DLodOffset",
+ _texture(ir_txl, gpu_shader4, glsl_type::vec4_type, glsl_type::sampler2DShadow_type, glsl_type::vec3_type, TEX_OFFSET),
+ NULL);
+
+ add_function("texture1DArrayLodOffset",
+ _texture(ir_txl, gpu_shader4_array, glsl_type::vec4_type, glsl_type::sampler1DArray_type, glsl_type::vec2_type, TEX_OFFSET),
+ _texture(ir_txl, gpu_shader4_array_integer, glsl_type::ivec4_type, glsl_type::isampler1DArray_type, glsl_type::vec2_type, TEX_OFFSET),
+ _texture(ir_txl, gpu_shader4_array_integer, glsl_type::uvec4_type, glsl_type::usampler1DArray_type, glsl_type::vec2_type, TEX_OFFSET),
+ NULL);
+
+ add_function("texture2DArrayLodOffset",
+ _texture(ir_txl, gpu_shader4_array, glsl_type::vec4_type, glsl_type::sampler2DArray_type, glsl_type::vec3_type, TEX_OFFSET),
+ _texture(ir_txl, gpu_shader4_array_integer, glsl_type::ivec4_type, glsl_type::isampler2DArray_type, glsl_type::vec3_type, TEX_OFFSET),
+ _texture(ir_txl, gpu_shader4_array_integer, glsl_type::uvec4_type, glsl_type::usampler2DArray_type, glsl_type::vec3_type, TEX_OFFSET),
+ NULL);
+
+ add_function("shadow1DArrayLodOffset",
+ _texture(ir_txl, gpu_shader4_array, glsl_type::vec4_type, glsl_type::sampler1DArrayShadow_type, glsl_type::vec3_type, TEX_OFFSET),
+ NULL);
+
+ add_function("textureProjLod",
+ _texture(ir_txl, v130, glsl_type::vec4_type, glsl_type::sampler1D_type, glsl_type::vec2_type, TEX_PROJECT),
+ _texture(ir_txl, v130, glsl_type::ivec4_type, glsl_type::isampler1D_type, glsl_type::vec2_type, TEX_PROJECT),
+ _texture(ir_txl, v130, glsl_type::uvec4_type, glsl_type::usampler1D_type, glsl_type::vec2_type, TEX_PROJECT),
+ _texture(ir_txl, v130, glsl_type::vec4_type, glsl_type::sampler1D_type, glsl_type::vec4_type, TEX_PROJECT),
+ _texture(ir_txl, v130, glsl_type::ivec4_type, glsl_type::isampler1D_type, glsl_type::vec4_type, TEX_PROJECT),
+ _texture(ir_txl, v130, glsl_type::uvec4_type, glsl_type::usampler1D_type, glsl_type::vec4_type, TEX_PROJECT),
+
+ _texture(ir_txl, v130, glsl_type::vec4_type, glsl_type::sampler2D_type, glsl_type::vec3_type, TEX_PROJECT),
+ _texture(ir_txl, v130, glsl_type::ivec4_type, glsl_type::isampler2D_type, glsl_type::vec3_type, TEX_PROJECT),
+ _texture(ir_txl, v130, glsl_type::uvec4_type, glsl_type::usampler2D_type, glsl_type::vec3_type, TEX_PROJECT),
+ _texture(ir_txl, v130, glsl_type::vec4_type, glsl_type::sampler2D_type, glsl_type::vec4_type, TEX_PROJECT),
+ _texture(ir_txl, v130, glsl_type::ivec4_type, glsl_type::isampler2D_type, glsl_type::vec4_type, TEX_PROJECT),
+ _texture(ir_txl, v130, glsl_type::uvec4_type, glsl_type::usampler2D_type, glsl_type::vec4_type, TEX_PROJECT),
+
+ _texture(ir_txl, v130, glsl_type::vec4_type, glsl_type::sampler3D_type, glsl_type::vec4_type, TEX_PROJECT),
+ _texture(ir_txl, v130, glsl_type::ivec4_type, glsl_type::isampler3D_type, glsl_type::vec4_type, TEX_PROJECT),
+ _texture(ir_txl, v130, glsl_type::uvec4_type, glsl_type::usampler3D_type, glsl_type::vec4_type, TEX_PROJECT),
+
+ _texture(ir_txl, v130, glsl_type::float_type, glsl_type::sampler1DShadow_type, glsl_type::vec4_type, TEX_PROJECT),
+ _texture(ir_txl, v130, glsl_type::float_type, glsl_type::sampler2DShadow_type, glsl_type::vec4_type, TEX_PROJECT),
+ NULL);
+
+ add_function("textureProjLodOffset",
+ _texture(ir_txl, v130, glsl_type::vec4_type, glsl_type::sampler1D_type, glsl_type::vec2_type, TEX_PROJECT | TEX_OFFSET),
+ _texture(ir_txl, v130, glsl_type::ivec4_type, glsl_type::isampler1D_type, glsl_type::vec2_type, TEX_PROJECT | TEX_OFFSET),
+ _texture(ir_txl, v130, glsl_type::uvec4_type, glsl_type::usampler1D_type, glsl_type::vec2_type, TEX_PROJECT | TEX_OFFSET),
+ _texture(ir_txl, v130, glsl_type::vec4_type, glsl_type::sampler1D_type, glsl_type::vec4_type, TEX_PROJECT | TEX_OFFSET),
+ _texture(ir_txl, v130, glsl_type::ivec4_type, glsl_type::isampler1D_type, glsl_type::vec4_type, TEX_PROJECT | TEX_OFFSET),
+ _texture(ir_txl, v130, glsl_type::uvec4_type, glsl_type::usampler1D_type, glsl_type::vec4_type, TEX_PROJECT | TEX_OFFSET),
+
+ _texture(ir_txl, v130, glsl_type::vec4_type, glsl_type::sampler2D_type, glsl_type::vec3_type, TEX_PROJECT | TEX_OFFSET),
+ _texture(ir_txl, v130, glsl_type::ivec4_type, glsl_type::isampler2D_type, glsl_type::vec3_type, TEX_PROJECT | TEX_OFFSET),
+ _texture(ir_txl, v130, glsl_type::uvec4_type, glsl_type::usampler2D_type, glsl_type::vec3_type, TEX_PROJECT | TEX_OFFSET),
+ _texture(ir_txl, v130, glsl_type::vec4_type, glsl_type::sampler2D_type, glsl_type::vec4_type, TEX_PROJECT | TEX_OFFSET),
+ _texture(ir_txl, v130, glsl_type::ivec4_type, glsl_type::isampler2D_type, glsl_type::vec4_type, TEX_PROJECT | TEX_OFFSET),
+ _texture(ir_txl, v130, glsl_type::uvec4_type, glsl_type::usampler2D_type, glsl_type::vec4_type, TEX_PROJECT | TEX_OFFSET),
+
+ _texture(ir_txl, v130, glsl_type::vec4_type, glsl_type::sampler3D_type, glsl_type::vec4_type, TEX_PROJECT | TEX_OFFSET),
+ _texture(ir_txl, v130, glsl_type::ivec4_type, glsl_type::isampler3D_type, glsl_type::vec4_type, TEX_PROJECT | TEX_OFFSET),
+ _texture(ir_txl, v130, glsl_type::uvec4_type, glsl_type::usampler3D_type, glsl_type::vec4_type, TEX_PROJECT | TEX_OFFSET),
+
+ _texture(ir_txl, v130, glsl_type::float_type, glsl_type::sampler1DShadow_type, glsl_type::vec4_type, TEX_PROJECT | TEX_OFFSET),
+ _texture(ir_txl, v130, glsl_type::float_type, glsl_type::sampler2DShadow_type, glsl_type::vec4_type, TEX_PROJECT | TEX_OFFSET),
+ NULL);
+
+ add_function("texture1DProjLodOffset",
+ _texture(ir_txl, gpu_shader4, glsl_type::vec4_type, glsl_type::sampler1D_type, glsl_type::vec2_type, TEX_PROJECT | TEX_OFFSET),
+ _texture(ir_txl, gpu_shader4_integer, glsl_type::ivec4_type, glsl_type::isampler1D_type, glsl_type::vec2_type, TEX_PROJECT | TEX_OFFSET),
+ _texture(ir_txl, gpu_shader4_integer, glsl_type::uvec4_type, glsl_type::usampler1D_type, glsl_type::vec2_type, TEX_PROJECT | TEX_OFFSET),
+ _texture(ir_txl, gpu_shader4, glsl_type::vec4_type, glsl_type::sampler1D_type, glsl_type::vec4_type, TEX_PROJECT | TEX_OFFSET),
+ _texture(ir_txl, gpu_shader4_integer, glsl_type::ivec4_type, glsl_type::isampler1D_type, glsl_type::vec4_type, TEX_PROJECT | TEX_OFFSET),
+ _texture(ir_txl, gpu_shader4_integer, glsl_type::uvec4_type, glsl_type::usampler1D_type, glsl_type::vec4_type, TEX_PROJECT | TEX_OFFSET),
+ NULL);
+
+ add_function("texture2DProjLodOffset",
+ _texture(ir_txl, gpu_shader4, glsl_type::vec4_type, glsl_type::sampler2D_type, glsl_type::vec3_type, TEX_PROJECT | TEX_OFFSET),
+ _texture(ir_txl, gpu_shader4_integer, glsl_type::ivec4_type, glsl_type::isampler2D_type, glsl_type::vec3_type, TEX_PROJECT | TEX_OFFSET),
+ _texture(ir_txl, gpu_shader4_integer, glsl_type::uvec4_type, glsl_type::usampler2D_type, glsl_type::vec3_type, TEX_PROJECT | TEX_OFFSET),
+ _texture(ir_txl, gpu_shader4, glsl_type::vec4_type, glsl_type::sampler2D_type, glsl_type::vec4_type, TEX_PROJECT | TEX_OFFSET),
+ _texture(ir_txl, gpu_shader4_integer, glsl_type::ivec4_type, glsl_type::isampler2D_type, glsl_type::vec4_type, TEX_PROJECT | TEX_OFFSET),
+ _texture(ir_txl, gpu_shader4_integer, glsl_type::uvec4_type, glsl_type::usampler2D_type, glsl_type::vec4_type, TEX_PROJECT | TEX_OFFSET),
+ NULL);
+
+ add_function("texture3DProjLodOffset",
+ _texture(ir_txl, gpu_shader4, glsl_type::vec4_type, glsl_type::sampler3D_type, glsl_type::vec4_type, TEX_PROJECT | TEX_OFFSET),
+ _texture(ir_txl, gpu_shader4_integer, glsl_type::ivec4_type, glsl_type::isampler3D_type, glsl_type::vec4_type, TEX_PROJECT | TEX_OFFSET),
+ _texture(ir_txl, gpu_shader4_integer, glsl_type::uvec4_type, glsl_type::usampler3D_type, glsl_type::vec4_type, TEX_PROJECT | TEX_OFFSET),
+ NULL);
+
+ add_function("shadow1DProjLodOffset",
+ _texture(ir_txl, gpu_shader4, glsl_type::vec4_type, glsl_type::sampler1DShadow_type, glsl_type::vec4_type, TEX_PROJECT | TEX_OFFSET),
+ NULL);
+
+ add_function("shadow2DProjLodOffset",
+ _texture(ir_txl, gpu_shader4, glsl_type::vec4_type, glsl_type::sampler2DShadow_type, glsl_type::vec4_type, TEX_PROJECT | TEX_OFFSET),
+ NULL);
+
+ add_function("textureGrad",
+ _texture(ir_txd, v130, glsl_type::vec4_type, glsl_type::sampler1D_type, glsl_type::float_type),
+ _texture(ir_txd, v130, glsl_type::ivec4_type, glsl_type::isampler1D_type, glsl_type::float_type),
+ _texture(ir_txd, v130, glsl_type::uvec4_type, glsl_type::usampler1D_type, glsl_type::float_type),
+
+ _texture(ir_txd, v130, glsl_type::vec4_type, glsl_type::sampler2D_type, glsl_type::vec2_type),
+ _texture(ir_txd, v130, glsl_type::ivec4_type, glsl_type::isampler2D_type, glsl_type::vec2_type),
+ _texture(ir_txd, v130, glsl_type::uvec4_type, glsl_type::usampler2D_type, glsl_type::vec2_type),
+
+ _texture(ir_txd, v130, glsl_type::vec4_type, glsl_type::sampler3D_type, glsl_type::vec3_type),
+ _texture(ir_txd, v130, glsl_type::ivec4_type, glsl_type::isampler3D_type, glsl_type::vec3_type),
+ _texture(ir_txd, v130, glsl_type::uvec4_type, glsl_type::usampler3D_type, glsl_type::vec3_type),
+
+ _texture(ir_txd, v130, glsl_type::vec4_type, glsl_type::samplerCube_type, glsl_type::vec3_type),
+ _texture(ir_txd, v130, glsl_type::ivec4_type, glsl_type::isamplerCube_type, glsl_type::vec3_type),
+ _texture(ir_txd, v130, glsl_type::uvec4_type, glsl_type::usamplerCube_type, glsl_type::vec3_type),
+
+ _texture(ir_txd, v130, glsl_type::vec4_type, glsl_type::sampler2DRect_type, glsl_type::vec2_type),
+ _texture(ir_txd, v130, glsl_type::ivec4_type, glsl_type::isampler2DRect_type, glsl_type::vec2_type),
+ _texture(ir_txd, v130, glsl_type::uvec4_type, glsl_type::usampler2DRect_type, glsl_type::vec2_type),
+
+ _texture(ir_txd, v130, glsl_type::float_type, glsl_type::sampler2DRectShadow_type, glsl_type::vec3_type),
+
+ _texture(ir_txd, v130, glsl_type::float_type, glsl_type::sampler1DShadow_type, glsl_type::vec3_type),
+ _texture(ir_txd, v130, glsl_type::float_type, glsl_type::sampler2DShadow_type, glsl_type::vec3_type),
+ _texture(ir_txd, v130, glsl_type::float_type, glsl_type::samplerCubeShadow_type, glsl_type::vec4_type),
+
+ _texture(ir_txd, v130, glsl_type::vec4_type, glsl_type::sampler1DArray_type, glsl_type::vec2_type),
+ _texture(ir_txd, v130, glsl_type::ivec4_type, glsl_type::isampler1DArray_type, glsl_type::vec2_type),
+ _texture(ir_txd, v130, glsl_type::uvec4_type, glsl_type::usampler1DArray_type, glsl_type::vec2_type),
+
+ _texture(ir_txd, v130, glsl_type::vec4_type, glsl_type::sampler2DArray_type, glsl_type::vec3_type),
+ _texture(ir_txd, v130, glsl_type::ivec4_type, glsl_type::isampler2DArray_type, glsl_type::vec3_type),
+ _texture(ir_txd, v130, glsl_type::uvec4_type, glsl_type::usampler2DArray_type, glsl_type::vec3_type),
+
+ _texture(ir_txd, texture_cube_map_array, glsl_type::vec4_type, glsl_type::samplerCubeArray_type, glsl_type::vec4_type),
+ _texture(ir_txd, texture_cube_map_array, glsl_type::ivec4_type, glsl_type::isamplerCubeArray_type, glsl_type::vec4_type),
+ _texture(ir_txd, texture_cube_map_array, glsl_type::uvec4_type, glsl_type::usamplerCubeArray_type, glsl_type::vec4_type),
+
+ _texture(ir_txd, v130, glsl_type::float_type, glsl_type::sampler1DArrayShadow_type, glsl_type::vec3_type),
+ _texture(ir_txd, v130, glsl_type::float_type, glsl_type::sampler2DArrayShadow_type, glsl_type::vec4_type),
+ NULL);
+
+ add_function("textureGradOffset",
+ _texture(ir_txd, v130, glsl_type::vec4_type, glsl_type::sampler1D_type, glsl_type::float_type, TEX_OFFSET),
+ _texture(ir_txd, v130, glsl_type::ivec4_type, glsl_type::isampler1D_type, glsl_type::float_type, TEX_OFFSET),
+ _texture(ir_txd, v130, glsl_type::uvec4_type, glsl_type::usampler1D_type, glsl_type::float_type, TEX_OFFSET),
+
+ _texture(ir_txd, v130, glsl_type::vec4_type, glsl_type::sampler2D_type, glsl_type::vec2_type, TEX_OFFSET),
+ _texture(ir_txd, v130, glsl_type::ivec4_type, glsl_type::isampler2D_type, glsl_type::vec2_type, TEX_OFFSET),
+ _texture(ir_txd, v130, glsl_type::uvec4_type, glsl_type::usampler2D_type, glsl_type::vec2_type, TEX_OFFSET),
+
+ _texture(ir_txd, v130, glsl_type::vec4_type, glsl_type::sampler3D_type, glsl_type::vec3_type, TEX_OFFSET),
+ _texture(ir_txd, v130, glsl_type::ivec4_type, glsl_type::isampler3D_type, glsl_type::vec3_type, TEX_OFFSET),
+ _texture(ir_txd, v130, glsl_type::uvec4_type, glsl_type::usampler3D_type, glsl_type::vec3_type, TEX_OFFSET),
+
+ _texture(ir_txd, v130, glsl_type::vec4_type, glsl_type::sampler2DRect_type, glsl_type::vec2_type, TEX_OFFSET),
+ _texture(ir_txd, v130, glsl_type::ivec4_type, glsl_type::isampler2DRect_type, glsl_type::vec2_type, TEX_OFFSET),
+ _texture(ir_txd, v130, glsl_type::uvec4_type, glsl_type::usampler2DRect_type, glsl_type::vec2_type, TEX_OFFSET),
+
+ _texture(ir_txd, v130, glsl_type::float_type, glsl_type::sampler2DRectShadow_type, glsl_type::vec3_type, TEX_OFFSET),
+
+ _texture(ir_txd, v130, glsl_type::float_type, glsl_type::sampler1DShadow_type, glsl_type::vec3_type, TEX_OFFSET),
+ _texture(ir_txd, v130, glsl_type::float_type, glsl_type::sampler2DShadow_type, glsl_type::vec3_type, TEX_OFFSET),
+
+ _texture(ir_txd, v130, glsl_type::vec4_type, glsl_type::sampler1DArray_type, glsl_type::vec2_type, TEX_OFFSET),
+ _texture(ir_txd, v130, glsl_type::ivec4_type, glsl_type::isampler1DArray_type, glsl_type::vec2_type, TEX_OFFSET),
+ _texture(ir_txd, v130, glsl_type::uvec4_type, glsl_type::usampler1DArray_type, glsl_type::vec2_type, TEX_OFFSET),
+
+ _texture(ir_txd, v130, glsl_type::vec4_type, glsl_type::sampler2DArray_type, glsl_type::vec3_type, TEX_OFFSET),
+ _texture(ir_txd, v130, glsl_type::ivec4_type, glsl_type::isampler2DArray_type, glsl_type::vec3_type, TEX_OFFSET),
+ _texture(ir_txd, v130, glsl_type::uvec4_type, glsl_type::usampler2DArray_type, glsl_type::vec3_type, TEX_OFFSET),
+
+ _texture(ir_txd, v130, glsl_type::float_type, glsl_type::sampler1DArrayShadow_type, glsl_type::vec3_type, TEX_OFFSET),
+ _texture(ir_txd, v130, glsl_type::float_type, glsl_type::sampler2DArrayShadow_type, glsl_type::vec4_type, TEX_OFFSET),
+ NULL);
+
+ add_function("texture1DGradOffset",
+ _texture(ir_txd, gpu_shader4, glsl_type::vec4_type, glsl_type::sampler1D_type, glsl_type::float_type, TEX_OFFSET),
+ _texture(ir_txd, gpu_shader4_integer, glsl_type::ivec4_type, glsl_type::isampler1D_type, glsl_type::float_type, TEX_OFFSET),
+ _texture(ir_txd, gpu_shader4_integer, glsl_type::uvec4_type, glsl_type::usampler1D_type, glsl_type::float_type, TEX_OFFSET),
+ NULL);
+
+ add_function("texture2DGradOffset",
+ _texture(ir_txd, gpu_shader4, glsl_type::vec4_type, glsl_type::sampler2D_type, glsl_type::vec2_type, TEX_OFFSET),
+ _texture(ir_txd, gpu_shader4_integer, glsl_type::ivec4_type, glsl_type::isampler2D_type, glsl_type::vec2_type, TEX_OFFSET),
+ _texture(ir_txd, gpu_shader4_integer, glsl_type::uvec4_type, glsl_type::usampler2D_type, glsl_type::vec2_type, TEX_OFFSET),
+ NULL);
+
+ add_function("texture3DGradOffset",
+ _texture(ir_txd, gpu_shader4, glsl_type::vec4_type, glsl_type::sampler3D_type, glsl_type::vec3_type, TEX_OFFSET),
+ _texture(ir_txd, gpu_shader4_integer, glsl_type::ivec4_type, glsl_type::isampler3D_type, glsl_type::vec3_type, TEX_OFFSET),
+ _texture(ir_txd, gpu_shader4_integer, glsl_type::uvec4_type, glsl_type::usampler3D_type, glsl_type::vec3_type, TEX_OFFSET),
+ NULL);
+
+ add_function("texture2DRectGradOffset",
+ _texture(ir_txd, gpu_shader4_rect, glsl_type::vec4_type, glsl_type::sampler2DRect_type, glsl_type::vec2_type, TEX_OFFSET),
+ _texture(ir_txd, gpu_shader4_rect_integer, glsl_type::ivec4_type, glsl_type::isampler2DRect_type, glsl_type::vec2_type, TEX_OFFSET),
+ _texture(ir_txd, gpu_shader4_rect_integer, glsl_type::uvec4_type, glsl_type::usampler2DRect_type, glsl_type::vec2_type, TEX_OFFSET),
+ NULL);
+
+ add_function("shadow2DRectGradOffset",
+ _texture(ir_txd, gpu_shader4_rect, glsl_type::vec4_type, glsl_type::sampler2DRectShadow_type, glsl_type::vec3_type, TEX_OFFSET),
+ NULL);
+
+ add_function("shadow1DGradOffset",
+ _texture(ir_txd, gpu_shader4, glsl_type::vec4_type, glsl_type::sampler1DShadow_type, glsl_type::vec3_type, TEX_OFFSET),
+ NULL);
+
+ add_function("shadow2DGradOffset",
+ _texture(ir_txd, gpu_shader4, glsl_type::vec4_type, glsl_type::sampler2DShadow_type, glsl_type::vec3_type, TEX_OFFSET),
+ NULL);
+
+ add_function("texture1DArrayGradOffset",
+ _texture(ir_txd, gpu_shader4_array, glsl_type::vec4_type, glsl_type::sampler1DArray_type, glsl_type::vec2_type, TEX_OFFSET),
+ _texture(ir_txd, gpu_shader4_array_integer, glsl_type::ivec4_type, glsl_type::isampler1DArray_type, glsl_type::vec2_type, TEX_OFFSET),
+ _texture(ir_txd, gpu_shader4_array_integer, glsl_type::uvec4_type, glsl_type::usampler1DArray_type, glsl_type::vec2_type, TEX_OFFSET),
+ NULL);
+
+ add_function("texture2DArrayGradOffset",
+ _texture(ir_txd, gpu_shader4_array, glsl_type::vec4_type, glsl_type::sampler2DArray_type, glsl_type::vec3_type, TEX_OFFSET),
+ _texture(ir_txd, gpu_shader4_array_integer, glsl_type::ivec4_type, glsl_type::isampler2DArray_type, glsl_type::vec3_type, TEX_OFFSET),
+ _texture(ir_txd, gpu_shader4_array_integer, glsl_type::uvec4_type, glsl_type::usampler2DArray_type, glsl_type::vec3_type, TEX_OFFSET),
+ NULL);
+
+ add_function("shadow1DArrayGradOffset",
+ _texture(ir_txd, gpu_shader4_array, glsl_type::vec4_type, glsl_type::sampler1DArrayShadow_type, glsl_type::vec3_type, TEX_OFFSET),
+ NULL);
+
+ add_function("shadow2DArrayGradOffset",
+ _texture(ir_txd, gpu_shader4_array, glsl_type::vec4_type, glsl_type::sampler2DArrayShadow_type, glsl_type::vec4_type, TEX_OFFSET),
+ NULL);
+
+ add_function("textureProjGrad",
+ _texture(ir_txd, v130, glsl_type::vec4_type, glsl_type::sampler1D_type, glsl_type::vec2_type, TEX_PROJECT),
+ _texture(ir_txd, v130, glsl_type::ivec4_type, glsl_type::isampler1D_type, glsl_type::vec2_type, TEX_PROJECT),
+ _texture(ir_txd, v130, glsl_type::uvec4_type, glsl_type::usampler1D_type, glsl_type::vec2_type, TEX_PROJECT),
+ _texture(ir_txd, v130, glsl_type::vec4_type, glsl_type::sampler1D_type, glsl_type::vec4_type, TEX_PROJECT),
+ _texture(ir_txd, v130, glsl_type::ivec4_type, glsl_type::isampler1D_type, glsl_type::vec4_type, TEX_PROJECT),
+ _texture(ir_txd, v130, glsl_type::uvec4_type, glsl_type::usampler1D_type, glsl_type::vec4_type, TEX_PROJECT),
+
+ _texture(ir_txd, v130, glsl_type::vec4_type, glsl_type::sampler2D_type, glsl_type::vec3_type, TEX_PROJECT),
+ _texture(ir_txd, v130, glsl_type::ivec4_type, glsl_type::isampler2D_type, glsl_type::vec3_type, TEX_PROJECT),
+ _texture(ir_txd, v130, glsl_type::uvec4_type, glsl_type::usampler2D_type, glsl_type::vec3_type, TEX_PROJECT),
+ _texture(ir_txd, v130, glsl_type::vec4_type, glsl_type::sampler2D_type, glsl_type::vec4_type, TEX_PROJECT),
+ _texture(ir_txd, v130, glsl_type::ivec4_type, glsl_type::isampler2D_type, glsl_type::vec4_type, TEX_PROJECT),
+ _texture(ir_txd, v130, glsl_type::uvec4_type, glsl_type::usampler2D_type, glsl_type::vec4_type, TEX_PROJECT),
+
+ _texture(ir_txd, v130, glsl_type::vec4_type, glsl_type::sampler3D_type, glsl_type::vec4_type, TEX_PROJECT),
+ _texture(ir_txd, v130, glsl_type::ivec4_type, glsl_type::isampler3D_type, glsl_type::vec4_type, TEX_PROJECT),
+ _texture(ir_txd, v130, glsl_type::uvec4_type, glsl_type::usampler3D_type, glsl_type::vec4_type, TEX_PROJECT),
+
+ _texture(ir_txd, v130, glsl_type::vec4_type, glsl_type::sampler2DRect_type, glsl_type::vec3_type, TEX_PROJECT),
+ _texture(ir_txd, v130, glsl_type::ivec4_type, glsl_type::isampler2DRect_type, glsl_type::vec3_type, TEX_PROJECT),
+ _texture(ir_txd, v130, glsl_type::uvec4_type, glsl_type::usampler2DRect_type, glsl_type::vec3_type, TEX_PROJECT),
+ _texture(ir_txd, v130, glsl_type::vec4_type, glsl_type::sampler2DRect_type, glsl_type::vec4_type, TEX_PROJECT),
+ _texture(ir_txd, v130, glsl_type::ivec4_type, glsl_type::isampler2DRect_type, glsl_type::vec4_type, TEX_PROJECT),
+ _texture(ir_txd, v130, glsl_type::uvec4_type, glsl_type::usampler2DRect_type, glsl_type::vec4_type, TEX_PROJECT),
+
+ _texture(ir_txd, v130, glsl_type::float_type, glsl_type::sampler2DRectShadow_type, glsl_type::vec4_type, TEX_PROJECT),
+
+ _texture(ir_txd, v130, glsl_type::float_type, glsl_type::sampler1DShadow_type, glsl_type::vec4_type, TEX_PROJECT),
+ _texture(ir_txd, v130, glsl_type::float_type, glsl_type::sampler2DShadow_type, glsl_type::vec4_type, TEX_PROJECT),
+ NULL);
+
+ add_function("textureProjGradOffset",
+ _texture(ir_txd, v130, glsl_type::vec4_type, glsl_type::sampler1D_type, glsl_type::vec2_type, TEX_PROJECT | TEX_OFFSET),
+ _texture(ir_txd, v130, glsl_type::ivec4_type, glsl_type::isampler1D_type, glsl_type::vec2_type, TEX_PROJECT | TEX_OFFSET),
+ _texture(ir_txd, v130, glsl_type::uvec4_type, glsl_type::usampler1D_type, glsl_type::vec2_type, TEX_PROJECT | TEX_OFFSET),
+ _texture(ir_txd, v130, glsl_type::vec4_type, glsl_type::sampler1D_type, glsl_type::vec4_type, TEX_PROJECT | TEX_OFFSET),
+ _texture(ir_txd, v130, glsl_type::ivec4_type, glsl_type::isampler1D_type, glsl_type::vec4_type, TEX_PROJECT | TEX_OFFSET),
+ _texture(ir_txd, v130, glsl_type::uvec4_type, glsl_type::usampler1D_type, glsl_type::vec4_type, TEX_PROJECT | TEX_OFFSET),
+
+ _texture(ir_txd, v130, glsl_type::vec4_type, glsl_type::sampler2D_type, glsl_type::vec3_type, TEX_PROJECT | TEX_OFFSET),
+ _texture(ir_txd, v130, glsl_type::ivec4_type, glsl_type::isampler2D_type, glsl_type::vec3_type, TEX_PROJECT | TEX_OFFSET),
+ _texture(ir_txd, v130, glsl_type::uvec4_type, glsl_type::usampler2D_type, glsl_type::vec3_type, TEX_PROJECT | TEX_OFFSET),
+ _texture(ir_txd, v130, glsl_type::vec4_type, glsl_type::sampler2D_type, glsl_type::vec4_type, TEX_PROJECT | TEX_OFFSET),
+ _texture(ir_txd, v130, glsl_type::ivec4_type, glsl_type::isampler2D_type, glsl_type::vec4_type, TEX_PROJECT | TEX_OFFSET),
+ _texture(ir_txd, v130, glsl_type::uvec4_type, glsl_type::usampler2D_type, glsl_type::vec4_type, TEX_PROJECT | TEX_OFFSET),
+
+ _texture(ir_txd, v130, glsl_type::vec4_type, glsl_type::sampler3D_type, glsl_type::vec4_type, TEX_PROJECT | TEX_OFFSET),
+ _texture(ir_txd, v130, glsl_type::ivec4_type, glsl_type::isampler3D_type, glsl_type::vec4_type, TEX_PROJECT | TEX_OFFSET),
+ _texture(ir_txd, v130, glsl_type::uvec4_type, glsl_type::usampler3D_type, glsl_type::vec4_type, TEX_PROJECT | TEX_OFFSET),
+
+ _texture(ir_txd, v130, glsl_type::vec4_type, glsl_type::sampler2DRect_type, glsl_type::vec3_type, TEX_PROJECT | TEX_OFFSET),
+ _texture(ir_txd, v130, glsl_type::ivec4_type, glsl_type::isampler2DRect_type, glsl_type::vec3_type, TEX_PROJECT | TEX_OFFSET),
+ _texture(ir_txd, v130, glsl_type::uvec4_type, glsl_type::usampler2DRect_type, glsl_type::vec3_type, TEX_PROJECT | TEX_OFFSET),
+ _texture(ir_txd, v130, glsl_type::vec4_type, glsl_type::sampler2DRect_type, glsl_type::vec4_type, TEX_PROJECT | TEX_OFFSET),
+ _texture(ir_txd, v130, glsl_type::ivec4_type, glsl_type::isampler2DRect_type, glsl_type::vec4_type, TEX_PROJECT | TEX_OFFSET),
+ _texture(ir_txd, v130, glsl_type::uvec4_type, glsl_type::usampler2DRect_type, glsl_type::vec4_type, TEX_PROJECT | TEX_OFFSET),
+
+ _texture(ir_txd, v130, glsl_type::float_type, glsl_type::sampler2DRectShadow_type, glsl_type::vec4_type, TEX_PROJECT | TEX_OFFSET),
+
+ _texture(ir_txd, v130, glsl_type::float_type, glsl_type::sampler1DShadow_type, glsl_type::vec4_type, TEX_PROJECT | TEX_OFFSET),
+ _texture(ir_txd, v130, glsl_type::float_type, glsl_type::sampler2DShadow_type, glsl_type::vec4_type, TEX_PROJECT | TEX_OFFSET),
+ NULL);
+
+ add_function("texture1DProjGradOffset",
+ _texture(ir_txd, gpu_shader4, glsl_type::vec4_type, glsl_type::sampler1D_type, glsl_type::vec2_type, TEX_PROJECT | TEX_OFFSET),
+ _texture(ir_txd, gpu_shader4_integer, glsl_type::ivec4_type, glsl_type::isampler1D_type, glsl_type::vec2_type, TEX_PROJECT | TEX_OFFSET),
+ _texture(ir_txd, gpu_shader4_integer, glsl_type::uvec4_type, glsl_type::usampler1D_type, glsl_type::vec2_type, TEX_PROJECT | TEX_OFFSET),
+ _texture(ir_txd, gpu_shader4, glsl_type::vec4_type, glsl_type::sampler1D_type, glsl_type::vec4_type, TEX_PROJECT | TEX_OFFSET),
+ _texture(ir_txd, gpu_shader4_integer, glsl_type::ivec4_type, glsl_type::isampler1D_type, glsl_type::vec4_type, TEX_PROJECT | TEX_OFFSET),
+ _texture(ir_txd, gpu_shader4_integer, glsl_type::uvec4_type, glsl_type::usampler1D_type, glsl_type::vec4_type, TEX_PROJECT | TEX_OFFSET),
+ NULL);
+
+ add_function("texture2DProjGradOffset",
+ _texture(ir_txd, gpu_shader4, glsl_type::vec4_type, glsl_type::sampler2D_type, glsl_type::vec3_type, TEX_PROJECT | TEX_OFFSET),
+ _texture(ir_txd, gpu_shader4_integer, glsl_type::ivec4_type, glsl_type::isampler2D_type, glsl_type::vec3_type, TEX_PROJECT | TEX_OFFSET),
+ _texture(ir_txd, gpu_shader4_integer, glsl_type::uvec4_type, glsl_type::usampler2D_type, glsl_type::vec3_type, TEX_PROJECT | TEX_OFFSET),
+ _texture(ir_txd, gpu_shader4, glsl_type::vec4_type, glsl_type::sampler2D_type, glsl_type::vec4_type, TEX_PROJECT | TEX_OFFSET),
+ _texture(ir_txd, gpu_shader4_integer, glsl_type::ivec4_type, glsl_type::isampler2D_type, glsl_type::vec4_type, TEX_PROJECT | TEX_OFFSET),
+ _texture(ir_txd, gpu_shader4_integer, glsl_type::uvec4_type, glsl_type::usampler2D_type, glsl_type::vec4_type, TEX_PROJECT | TEX_OFFSET),
+ NULL);
+
+ add_function("texture3DProjGradOffset",
+ _texture(ir_txd, gpu_shader4, glsl_type::vec4_type, glsl_type::sampler3D_type, glsl_type::vec4_type, TEX_PROJECT | TEX_OFFSET),
+ _texture(ir_txd, gpu_shader4_integer, glsl_type::ivec4_type, glsl_type::isampler3D_type, glsl_type::vec4_type, TEX_PROJECT | TEX_OFFSET),
+ _texture(ir_txd, gpu_shader4_integer, glsl_type::uvec4_type, glsl_type::usampler3D_type, glsl_type::vec4_type, TEX_PROJECT | TEX_OFFSET),
+ NULL);
+
+ add_function("texture2DRectProjGradOffset",
+ _texture(ir_txd, gpu_shader4_rect, glsl_type::vec4_type, glsl_type::sampler2DRect_type, glsl_type::vec3_type, TEX_PROJECT | TEX_OFFSET),
+ _texture(ir_txd, gpu_shader4_rect_integer, glsl_type::ivec4_type, glsl_type::isampler2DRect_type, glsl_type::vec3_type, TEX_PROJECT | TEX_OFFSET),
+ _texture(ir_txd, gpu_shader4_rect_integer, glsl_type::uvec4_type, glsl_type::usampler2DRect_type, glsl_type::vec3_type, TEX_PROJECT | TEX_OFFSET),
+ _texture(ir_txd, gpu_shader4_rect, glsl_type::vec4_type, glsl_type::sampler2DRect_type, glsl_type::vec4_type, TEX_PROJECT | TEX_OFFSET),
+ _texture(ir_txd, gpu_shader4_rect_integer, glsl_type::ivec4_type, glsl_type::isampler2DRect_type, glsl_type::vec4_type, TEX_PROJECT | TEX_OFFSET),
+ _texture(ir_txd, gpu_shader4_rect_integer, glsl_type::uvec4_type, glsl_type::usampler2DRect_type, glsl_type::vec4_type, TEX_PROJECT | TEX_OFFSET),
+ NULL);
+
+ add_function("shadow2DRectProjGradOffset",
+ _texture(ir_txd, gpu_shader4_rect, glsl_type::vec4_type, glsl_type::sampler2DRectShadow_type, glsl_type::vec4_type, TEX_PROJECT | TEX_OFFSET),
+ NULL);
+
+ add_function("shadow1DProjGradOffset",
+ _texture(ir_txd, gpu_shader4, glsl_type::vec4_type, glsl_type::sampler1DShadow_type, glsl_type::vec4_type, TEX_PROJECT | TEX_OFFSET),
+ NULL);
+
+ add_function("shadow2DProjGradOffset",
+ _texture(ir_txd, gpu_shader4, glsl_type::vec4_type, glsl_type::sampler2DShadow_type, glsl_type::vec4_type, TEX_PROJECT | TEX_OFFSET),
+ NULL);
+
+ add_function("EmitVertex", _EmitVertex(), NULL);
+ add_function("EndPrimitive", _EndPrimitive(), NULL);
+ add_function("EmitStreamVertex",
+ _EmitStreamVertex(gs_streams, glsl_type::uint_type),
+ _EmitStreamVertex(gs_streams, glsl_type::int_type),
+ NULL);
+ add_function("EndStreamPrimitive",
+ _EndStreamPrimitive(gs_streams, glsl_type::uint_type),
+ _EndStreamPrimitive(gs_streams, glsl_type::int_type),
+ NULL);
+ add_function("barrier", _barrier(), NULL);
+
+ add_function("textureQueryLOD",
+ _textureQueryLod(texture_query_lod, glsl_type::sampler1D_type, glsl_type::float_type),
+ _textureQueryLod(texture_query_lod, glsl_type::isampler1D_type, glsl_type::float_type),
+ _textureQueryLod(texture_query_lod, glsl_type::usampler1D_type, glsl_type::float_type),
+
+ _textureQueryLod(texture_query_lod, glsl_type::sampler2D_type, glsl_type::vec2_type),
+ _textureQueryLod(texture_query_lod, glsl_type::isampler2D_type, glsl_type::vec2_type),
+ _textureQueryLod(texture_query_lod, glsl_type::usampler2D_type, glsl_type::vec2_type),
+
+ _textureQueryLod(texture_query_lod, glsl_type::sampler3D_type, glsl_type::vec3_type),
+ _textureQueryLod(texture_query_lod, glsl_type::isampler3D_type, glsl_type::vec3_type),
+ _textureQueryLod(texture_query_lod, glsl_type::usampler3D_type, glsl_type::vec3_type),
+
+ _textureQueryLod(texture_query_lod, glsl_type::samplerCube_type, glsl_type::vec3_type),
+ _textureQueryLod(texture_query_lod, glsl_type::isamplerCube_type, glsl_type::vec3_type),
+ _textureQueryLod(texture_query_lod, glsl_type::usamplerCube_type, glsl_type::vec3_type),
+
+ _textureQueryLod(texture_query_lod, glsl_type::sampler1DArray_type, glsl_type::float_type),
+ _textureQueryLod(texture_query_lod, glsl_type::isampler1DArray_type, glsl_type::float_type),
+ _textureQueryLod(texture_query_lod, glsl_type::usampler1DArray_type, glsl_type::float_type),
+
+ _textureQueryLod(texture_query_lod, glsl_type::sampler2DArray_type, glsl_type::vec2_type),
+ _textureQueryLod(texture_query_lod, glsl_type::isampler2DArray_type, glsl_type::vec2_type),
+ _textureQueryLod(texture_query_lod, glsl_type::usampler2DArray_type, glsl_type::vec2_type),
+
+ _textureQueryLod(texture_query_lod, glsl_type::samplerCubeArray_type, glsl_type::vec3_type),
+ _textureQueryLod(texture_query_lod, glsl_type::isamplerCubeArray_type, glsl_type::vec3_type),
+ _textureQueryLod(texture_query_lod, glsl_type::usamplerCubeArray_type, glsl_type::vec3_type),
+
+ _textureQueryLod(texture_query_lod, glsl_type::sampler1DShadow_type, glsl_type::float_type),
+ _textureQueryLod(texture_query_lod, glsl_type::sampler2DShadow_type, glsl_type::vec2_type),
+ _textureQueryLod(texture_query_lod, glsl_type::samplerCubeShadow_type, glsl_type::vec3_type),
+ _textureQueryLod(texture_query_lod, glsl_type::sampler1DArrayShadow_type, glsl_type::float_type),
+ _textureQueryLod(texture_query_lod, glsl_type::sampler2DArrayShadow_type, glsl_type::vec2_type),
+ _textureQueryLod(texture_query_lod, glsl_type::samplerCubeArrayShadow_type, glsl_type::vec3_type),
+ NULL);
+
+ add_function("textureQueryLod",
+ _textureQueryLod(v400_derivatives_only, glsl_type::sampler1D_type, glsl_type::float_type),
+ _textureQueryLod(v400_derivatives_only, glsl_type::isampler1D_type, glsl_type::float_type),
+ _textureQueryLod(v400_derivatives_only, glsl_type::usampler1D_type, glsl_type::float_type),
+
+ _textureQueryLod(v400_derivatives_only, glsl_type::sampler2D_type, glsl_type::vec2_type),
+ _textureQueryLod(v400_derivatives_only, glsl_type::isampler2D_type, glsl_type::vec2_type),
+ _textureQueryLod(v400_derivatives_only, glsl_type::usampler2D_type, glsl_type::vec2_type),
+
+ _textureQueryLod(v400_derivatives_only, glsl_type::sampler3D_type, glsl_type::vec3_type),
+ _textureQueryLod(v400_derivatives_only, glsl_type::isampler3D_type, glsl_type::vec3_type),
+ _textureQueryLod(v400_derivatives_only, glsl_type::usampler3D_type, glsl_type::vec3_type),
+
+ _textureQueryLod(v400_derivatives_only, glsl_type::samplerCube_type, glsl_type::vec3_type),
+ _textureQueryLod(v400_derivatives_only, glsl_type::isamplerCube_type, glsl_type::vec3_type),
+ _textureQueryLod(v400_derivatives_only, glsl_type::usamplerCube_type, glsl_type::vec3_type),
+
+ _textureQueryLod(v400_derivatives_only, glsl_type::sampler1DArray_type, glsl_type::float_type),
+ _textureQueryLod(v400_derivatives_only, glsl_type::isampler1DArray_type, glsl_type::float_type),
+ _textureQueryLod(v400_derivatives_only, glsl_type::usampler1DArray_type, glsl_type::float_type),
+
+ _textureQueryLod(v400_derivatives_only, glsl_type::sampler2DArray_type, glsl_type::vec2_type),
+ _textureQueryLod(v400_derivatives_only, glsl_type::isampler2DArray_type, glsl_type::vec2_type),
+ _textureQueryLod(v400_derivatives_only, glsl_type::usampler2DArray_type, glsl_type::vec2_type),
+
+ _textureQueryLod(v400_derivatives_only, glsl_type::samplerCubeArray_type, glsl_type::vec3_type),
+ _textureQueryLod(v400_derivatives_only, glsl_type::isamplerCubeArray_type, glsl_type::vec3_type),
+ _textureQueryLod(v400_derivatives_only, glsl_type::usamplerCubeArray_type, glsl_type::vec3_type),
+
+ _textureQueryLod(v400_derivatives_only, glsl_type::sampler1DShadow_type, glsl_type::float_type),
+ _textureQueryLod(v400_derivatives_only, glsl_type::sampler2DShadow_type, glsl_type::vec2_type),
+ _textureQueryLod(v400_derivatives_only, glsl_type::samplerCubeShadow_type, glsl_type::vec3_type),
+ _textureQueryLod(v400_derivatives_only, glsl_type::sampler1DArrayShadow_type, glsl_type::float_type),
+ _textureQueryLod(v400_derivatives_only, glsl_type::sampler2DArrayShadow_type, glsl_type::vec2_type),
+ _textureQueryLod(v400_derivatives_only, glsl_type::samplerCubeArrayShadow_type, glsl_type::vec3_type),
+ NULL);
+
+ add_function("textureQueryLevels",
+ _textureQueryLevels(texture_query_levels, glsl_type::sampler1D_type),
+ _textureQueryLevels(texture_query_levels, glsl_type::sampler2D_type),
+ _textureQueryLevels(texture_query_levels, glsl_type::sampler3D_type),
+ _textureQueryLevels(texture_query_levels, glsl_type::samplerCube_type),
+ _textureQueryLevels(texture_query_levels, glsl_type::sampler1DArray_type),
+ _textureQueryLevels(texture_query_levels, glsl_type::sampler2DArray_type),
+ _textureQueryLevels(texture_query_levels, glsl_type::samplerCubeArray_type),
+ _textureQueryLevels(texture_query_levels, glsl_type::sampler1DShadow_type),
+ _textureQueryLevels(texture_query_levels, glsl_type::sampler2DShadow_type),
+ _textureQueryLevels(texture_query_levels, glsl_type::samplerCubeShadow_type),
+ _textureQueryLevels(texture_query_levels, glsl_type::sampler1DArrayShadow_type),
+ _textureQueryLevels(texture_query_levels, glsl_type::sampler2DArrayShadow_type),
+ _textureQueryLevels(texture_query_levels, glsl_type::samplerCubeArrayShadow_type),
+
+ _textureQueryLevels(texture_query_levels, glsl_type::isampler1D_type),
+ _textureQueryLevels(texture_query_levels, glsl_type::isampler2D_type),
+ _textureQueryLevels(texture_query_levels, glsl_type::isampler3D_type),
+ _textureQueryLevels(texture_query_levels, glsl_type::isamplerCube_type),
+ _textureQueryLevels(texture_query_levels, glsl_type::isampler1DArray_type),
+ _textureQueryLevels(texture_query_levels, glsl_type::isampler2DArray_type),
+ _textureQueryLevels(texture_query_levels, glsl_type::isamplerCubeArray_type),
+
+ _textureQueryLevels(texture_query_levels, glsl_type::usampler1D_type),
+ _textureQueryLevels(texture_query_levels, glsl_type::usampler2D_type),
+ _textureQueryLevels(texture_query_levels, glsl_type::usampler3D_type),
+ _textureQueryLevels(texture_query_levels, glsl_type::usamplerCube_type),
+ _textureQueryLevels(texture_query_levels, glsl_type::usampler1DArray_type),
+ _textureQueryLevels(texture_query_levels, glsl_type::usampler2DArray_type),
+ _textureQueryLevels(texture_query_levels, glsl_type::usamplerCubeArray_type),
+
+ NULL);
+
+ add_function("textureSamplesIdenticalEXT",
+ _textureSamplesIdentical(texture_samples_identical, glsl_type::sampler2DMS_type, glsl_type::ivec2_type),
+ _textureSamplesIdentical(texture_samples_identical, glsl_type::isampler2DMS_type, glsl_type::ivec2_type),
+ _textureSamplesIdentical(texture_samples_identical, glsl_type::usampler2DMS_type, glsl_type::ivec2_type),
+
+ _textureSamplesIdentical(texture_samples_identical_array, glsl_type::sampler2DMSArray_type, glsl_type::ivec3_type),
+ _textureSamplesIdentical(texture_samples_identical_array, glsl_type::isampler2DMSArray_type, glsl_type::ivec3_type),
+ _textureSamplesIdentical(texture_samples_identical_array, glsl_type::usampler2DMSArray_type, glsl_type::ivec3_type),
+ NULL);
+
+ add_function("texture1D",
+ _texture(ir_tex, v110, glsl_type::vec4_type, glsl_type::sampler1D_type, glsl_type::float_type),
+ _texture(ir_txb, v110_derivatives_only, glsl_type::vec4_type, glsl_type::sampler1D_type, glsl_type::float_type),
+ _texture(ir_tex, gpu_shader4_integer, glsl_type::ivec4_type, glsl_type::isampler1D_type, glsl_type::float_type),
+ _texture(ir_txb, gpu_shader4_integer_derivs_only, glsl_type::ivec4_type, glsl_type::isampler1D_type, glsl_type::float_type),
+ _texture(ir_tex, gpu_shader4_integer, glsl_type::uvec4_type, glsl_type::usampler1D_type, glsl_type::float_type),
+ _texture(ir_txb, gpu_shader4_integer_derivs_only, glsl_type::uvec4_type, glsl_type::usampler1D_type, glsl_type::float_type),
+ NULL);
+
+ add_function("texture1DArray",
+ _texture(ir_tex, texture_array, glsl_type::vec4_type, glsl_type::sampler1DArray_type, glsl_type::vec2_type),
+ _texture(ir_txb, texture_array_derivs_only,glsl_type::vec4_type, glsl_type::sampler1DArray_type, glsl_type::vec2_type),
+ _texture(ir_tex, gpu_shader4_array_integer, glsl_type::ivec4_type, glsl_type::isampler1DArray_type, glsl_type::vec2_type),
+ _texture(ir_txb, gpu_shader4_array_integer_derivs_only, glsl_type::ivec4_type, glsl_type::isampler1DArray_type, glsl_type::vec2_type),
+ _texture(ir_tex, gpu_shader4_array_integer, glsl_type::uvec4_type, glsl_type::usampler1DArray_type, glsl_type::vec2_type),
+ _texture(ir_txb, gpu_shader4_array_integer_derivs_only, glsl_type::uvec4_type, glsl_type::usampler1DArray_type, glsl_type::vec2_type),
+ NULL);
+
+ add_function("texture1DProj",
+ _texture(ir_tex, v110, glsl_type::vec4_type, glsl_type::sampler1D_type, glsl_type::vec2_type, TEX_PROJECT),
+ _texture(ir_tex, v110, glsl_type::vec4_type, glsl_type::sampler1D_type, glsl_type::vec4_type, TEX_PROJECT),
+ _texture(ir_txb, v110_derivatives_only, glsl_type::vec4_type, glsl_type::sampler1D_type, glsl_type::vec2_type, TEX_PROJECT),
+ _texture(ir_txb, v110_derivatives_only, glsl_type::vec4_type, glsl_type::sampler1D_type, glsl_type::vec4_type, TEX_PROJECT),
+ _texture(ir_tex, gpu_shader4_integer, glsl_type::ivec4_type, glsl_type::isampler1D_type, glsl_type::vec2_type, TEX_PROJECT),
+ _texture(ir_tex, gpu_shader4_integer, glsl_type::ivec4_type, glsl_type::isampler1D_type, glsl_type::vec4_type, TEX_PROJECT),
+ _texture(ir_txb, gpu_shader4_integer_derivs_only, glsl_type::ivec4_type, glsl_type::isampler1D_type, glsl_type::vec2_type, TEX_PROJECT),
+ _texture(ir_txb, gpu_shader4_integer_derivs_only, glsl_type::ivec4_type, glsl_type::isampler1D_type, glsl_type::vec4_type, TEX_PROJECT),
+ _texture(ir_tex, gpu_shader4_integer, glsl_type::uvec4_type, glsl_type::usampler1D_type, glsl_type::vec2_type, TEX_PROJECT),
+ _texture(ir_tex, gpu_shader4_integer, glsl_type::uvec4_type, glsl_type::usampler1D_type, glsl_type::vec4_type, TEX_PROJECT),
+ _texture(ir_txb, gpu_shader4_integer_derivs_only, glsl_type::uvec4_type, glsl_type::usampler1D_type, glsl_type::vec2_type, TEX_PROJECT),
+ _texture(ir_txb, gpu_shader4_integer_derivs_only, glsl_type::uvec4_type, glsl_type::usampler1D_type, glsl_type::vec4_type, TEX_PROJECT),
+ NULL);
+
+ add_function("texture1DLod",
+ _texture(ir_txl, tex1d_lod, glsl_type::vec4_type, glsl_type::sampler1D_type, glsl_type::float_type),
+ _texture(ir_txl, gpu_shader4_integer, glsl_type::ivec4_type, glsl_type::isampler1D_type, glsl_type::float_type),
+ _texture(ir_txl, gpu_shader4_integer, glsl_type::uvec4_type, glsl_type::usampler1D_type, glsl_type::float_type),
+ NULL);
+
+ add_function("texture1DArrayLod",
+ _texture(ir_txl, texture_array_lod, glsl_type::vec4_type, glsl_type::sampler1DArray_type, glsl_type::vec2_type),
+ _texture(ir_txl, gpu_shader4_array_integer, glsl_type::ivec4_type, glsl_type::isampler1DArray_type, glsl_type::vec2_type),
+ _texture(ir_txl, gpu_shader4_array_integer, glsl_type::uvec4_type, glsl_type::usampler1DArray_type, glsl_type::vec2_type),
+ NULL);
+
+ add_function("texture1DProjLod",
+ _texture(ir_txl, tex1d_lod, glsl_type::vec4_type, glsl_type::sampler1D_type, glsl_type::vec2_type, TEX_PROJECT),
+ _texture(ir_txl, tex1d_lod, glsl_type::vec4_type, glsl_type::sampler1D_type, glsl_type::vec4_type, TEX_PROJECT),
+ _texture(ir_txl, gpu_shader4_integer, glsl_type::ivec4_type, glsl_type::isampler1D_type, glsl_type::vec2_type, TEX_PROJECT),
+ _texture(ir_txl, gpu_shader4_integer, glsl_type::ivec4_type, glsl_type::isampler1D_type, glsl_type::vec4_type, TEX_PROJECT),
+ _texture(ir_txl, gpu_shader4_integer, glsl_type::uvec4_type, glsl_type::usampler1D_type, glsl_type::vec2_type, TEX_PROJECT),
+ _texture(ir_txl, gpu_shader4_integer, glsl_type::uvec4_type, glsl_type::usampler1D_type, glsl_type::vec4_type, TEX_PROJECT),
+ NULL);
+
+ add_function("texture2D",
+ _texture(ir_tex, always_available, glsl_type::vec4_type, glsl_type::sampler2D_type, glsl_type::vec2_type),
+ _texture(ir_txb, derivatives_only, glsl_type::vec4_type, glsl_type::sampler2D_type, glsl_type::vec2_type),
+ _texture(ir_tex, gpu_shader4_integer, glsl_type::ivec4_type, glsl_type::isampler2D_type, glsl_type::vec2_type),
+ _texture(ir_txb, gpu_shader4_integer_derivs_only, glsl_type::ivec4_type, glsl_type::isampler2D_type, glsl_type::vec2_type),
+ _texture(ir_tex, gpu_shader4_integer, glsl_type::uvec4_type, glsl_type::usampler2D_type, glsl_type::vec2_type),
+ _texture(ir_txb, gpu_shader4_integer_derivs_only, glsl_type::uvec4_type, glsl_type::usampler2D_type, glsl_type::vec2_type),
+ _texture(ir_tex, texture_external, glsl_type::vec4_type, glsl_type::samplerExternalOES_type, glsl_type::vec2_type),
+ NULL);
+
+ add_function("texture2DArray",
+ _texture(ir_tex, texture_array, glsl_type::vec4_type, glsl_type::sampler2DArray_type, glsl_type::vec3_type),
+ _texture(ir_txb, texture_array_derivs_only, glsl_type::vec4_type, glsl_type::sampler2DArray_type, glsl_type::vec3_type),
+ _texture(ir_tex, gpu_shader4_array_integer, glsl_type::ivec4_type, glsl_type::isampler2DArray_type, glsl_type::vec3_type),
+ _texture(ir_txb, gpu_shader4_array_integer_derivs_only, glsl_type::ivec4_type, glsl_type::isampler2DArray_type, glsl_type::vec3_type),
+ _texture(ir_tex, gpu_shader4_array_integer, glsl_type::uvec4_type, glsl_type::usampler2DArray_type, glsl_type::vec3_type),
+ _texture(ir_txb, gpu_shader4_array_integer_derivs_only, glsl_type::uvec4_type, glsl_type::usampler2DArray_type, glsl_type::vec3_type),
+ NULL);
+
+ add_function("texture2DProj",
+ _texture(ir_tex, always_available, glsl_type::vec4_type, glsl_type::sampler2D_type, glsl_type::vec3_type, TEX_PROJECT),
+ _texture(ir_tex, always_available, glsl_type::vec4_type, glsl_type::sampler2D_type, glsl_type::vec4_type, TEX_PROJECT),
+ _texture(ir_txb, derivatives_only, glsl_type::vec4_type, glsl_type::sampler2D_type, glsl_type::vec3_type, TEX_PROJECT),
+ _texture(ir_txb, derivatives_only, glsl_type::vec4_type, glsl_type::sampler2D_type, glsl_type::vec4_type, TEX_PROJECT),
+ _texture(ir_tex, gpu_shader4_integer, glsl_type::ivec4_type, glsl_type::isampler2D_type, glsl_type::vec3_type, TEX_PROJECT),
+ _texture(ir_tex, gpu_shader4_integer, glsl_type::ivec4_type, glsl_type::isampler2D_type, glsl_type::vec4_type, TEX_PROJECT),
+ _texture(ir_txb, gpu_shader4_integer_derivs_only, glsl_type::ivec4_type, glsl_type::isampler2D_type, glsl_type::vec3_type, TEX_PROJECT),
+ _texture(ir_txb, gpu_shader4_integer_derivs_only, glsl_type::ivec4_type, glsl_type::isampler2D_type, glsl_type::vec4_type, TEX_PROJECT),
+ _texture(ir_tex, gpu_shader4_integer, glsl_type::uvec4_type, glsl_type::usampler2D_type, glsl_type::vec3_type, TEX_PROJECT),
+ _texture(ir_tex, gpu_shader4_integer, glsl_type::uvec4_type, glsl_type::usampler2D_type, glsl_type::vec4_type, TEX_PROJECT),
+ _texture(ir_txb, gpu_shader4_integer_derivs_only, glsl_type::uvec4_type, glsl_type::usampler2D_type, glsl_type::vec3_type, TEX_PROJECT),
+ _texture(ir_txb, gpu_shader4_integer_derivs_only, glsl_type::uvec4_type, glsl_type::usampler2D_type, glsl_type::vec4_type, TEX_PROJECT),
+ _texture(ir_tex, texture_external, glsl_type::vec4_type, glsl_type::samplerExternalOES_type, glsl_type::vec3_type, TEX_PROJECT),
+ _texture(ir_tex, texture_external, glsl_type::vec4_type, glsl_type::samplerExternalOES_type, glsl_type::vec4_type, TEX_PROJECT),
+ NULL);
+
+ add_function("texture2DLod",
+ _texture(ir_txl, lod_exists_in_stage, glsl_type::vec4_type, glsl_type::sampler2D_type, glsl_type::vec2_type),
+ _texture(ir_txl, gpu_shader4_integer, glsl_type::ivec4_type, glsl_type::isampler2D_type, glsl_type::vec2_type),
+ _texture(ir_txl, gpu_shader4_integer, glsl_type::uvec4_type, glsl_type::usampler2D_type, glsl_type::vec2_type),
+ NULL);
+
+ add_function("texture2DArrayLod",
+ _texture(ir_txl, texture_array_lod, glsl_type::vec4_type, glsl_type::sampler2DArray_type, glsl_type::vec3_type),
+ _texture(ir_txl, gpu_shader4_array_integer, glsl_type::ivec4_type, glsl_type::isampler2DArray_type, glsl_type::vec3_type),
+ _texture(ir_txl, gpu_shader4_array_integer, glsl_type::uvec4_type, glsl_type::usampler2DArray_type, glsl_type::vec3_type),
+ NULL);
+
+ add_function("texture2DProjLod",
+ _texture(ir_txl, lod_exists_in_stage, glsl_type::vec4_type, glsl_type::sampler2D_type, glsl_type::vec3_type, TEX_PROJECT),
+ _texture(ir_txl, lod_exists_in_stage, glsl_type::vec4_type, glsl_type::sampler2D_type, glsl_type::vec4_type, TEX_PROJECT),
+ _texture(ir_txl, gpu_shader4_integer, glsl_type::ivec4_type, glsl_type::isampler2D_type, glsl_type::vec3_type, TEX_PROJECT),
+ _texture(ir_txl, gpu_shader4_integer, glsl_type::ivec4_type, glsl_type::isampler2D_type, glsl_type::vec4_type, TEX_PROJECT),
+ _texture(ir_txl, gpu_shader4_integer, glsl_type::uvec4_type, glsl_type::usampler2D_type, glsl_type::vec3_type, TEX_PROJECT),
+ _texture(ir_txl, gpu_shader4_integer, glsl_type::uvec4_type, glsl_type::usampler2D_type, glsl_type::vec4_type, TEX_PROJECT),
+ NULL);
+
+ add_function("texture3D",
+ _texture(ir_tex, tex3d, glsl_type::vec4_type, glsl_type::sampler3D_type, glsl_type::vec3_type),
+ _texture(ir_txb, derivatives_tex3d, glsl_type::vec4_type, glsl_type::sampler3D_type, glsl_type::vec3_type),
+ _texture(ir_tex, gpu_shader4_integer, glsl_type::ivec4_type, glsl_type::isampler3D_type, glsl_type::vec3_type),
+ _texture(ir_txb, gpu_shader4_integer_derivs_only, glsl_type::ivec4_type, glsl_type::isampler3D_type, glsl_type::vec3_type),
+ _texture(ir_tex, gpu_shader4_integer, glsl_type::uvec4_type, glsl_type::usampler3D_type, glsl_type::vec3_type),
+ _texture(ir_txb, gpu_shader4_integer_derivs_only, glsl_type::uvec4_type, glsl_type::usampler3D_type, glsl_type::vec3_type),
+ NULL);
+
+ add_function("texture3DProj",
+ _texture(ir_tex, tex3d, glsl_type::vec4_type, glsl_type::sampler3D_type, glsl_type::vec4_type, TEX_PROJECT),
+ _texture(ir_txb, derivatives_tex3d, glsl_type::vec4_type, glsl_type::sampler3D_type, glsl_type::vec4_type, TEX_PROJECT),
+ _texture(ir_tex, gpu_shader4_integer, glsl_type::ivec4_type, glsl_type::isampler3D_type, glsl_type::vec4_type, TEX_PROJECT),
+ _texture(ir_txb, gpu_shader4_integer_derivs_only, glsl_type::ivec4_type, glsl_type::isampler3D_type, glsl_type::vec4_type, TEX_PROJECT),
+ _texture(ir_tex, gpu_shader4_integer, glsl_type::uvec4_type, glsl_type::usampler3D_type, glsl_type::vec4_type, TEX_PROJECT),
+ _texture(ir_txb, gpu_shader4_integer_derivs_only, glsl_type::uvec4_type, glsl_type::usampler3D_type, glsl_type::vec4_type, TEX_PROJECT),
+ NULL);
+
+ add_function("texture3DLod",
+ _texture(ir_txl, tex3d_lod, glsl_type::vec4_type, glsl_type::sampler3D_type, glsl_type::vec3_type),
+ _texture(ir_txl, gpu_shader4_integer, glsl_type::ivec4_type, glsl_type::isampler3D_type, glsl_type::vec3_type),
+ _texture(ir_txl, gpu_shader4_integer, glsl_type::uvec4_type, glsl_type::usampler3D_type, glsl_type::vec3_type),
+ NULL);
+
+ add_function("texture3DProjLod",
+ _texture(ir_txl, tex3d_lod, glsl_type::vec4_type, glsl_type::sampler3D_type, glsl_type::vec4_type, TEX_PROJECT),
+ _texture(ir_txl, gpu_shader4_integer, glsl_type::ivec4_type, glsl_type::isampler3D_type, glsl_type::vec4_type, TEX_PROJECT),
+ _texture(ir_txl, gpu_shader4_integer, glsl_type::uvec4_type, glsl_type::usampler3D_type, glsl_type::vec4_type, TEX_PROJECT),
+ NULL);
+
+ add_function("textureCube",
+ _texture(ir_tex, always_available, glsl_type::vec4_type, glsl_type::samplerCube_type, glsl_type::vec3_type),
+ _texture(ir_txb, derivatives_only, glsl_type::vec4_type, glsl_type::samplerCube_type, glsl_type::vec3_type),
+ _texture(ir_tex, gpu_shader4_integer, glsl_type::ivec4_type, glsl_type::isamplerCube_type, glsl_type::vec3_type),
+ _texture(ir_txb, gpu_shader4_integer_derivs_only, glsl_type::ivec4_type, glsl_type::isamplerCube_type, glsl_type::vec3_type),
+ _texture(ir_tex, gpu_shader4_integer, glsl_type::uvec4_type, glsl_type::usamplerCube_type, glsl_type::vec3_type),
+ _texture(ir_txb, gpu_shader4_integer_derivs_only, glsl_type::uvec4_type, glsl_type::usamplerCube_type, glsl_type::vec3_type),
+ NULL);
+
+ add_function("textureCubeLod",
+ _texture(ir_txl, lod_exists_in_stage, glsl_type::vec4_type, glsl_type::samplerCube_type, glsl_type::vec3_type),
+ _texture(ir_txl, gpu_shader4_integer, glsl_type::ivec4_type, glsl_type::isamplerCube_type, glsl_type::vec3_type),
+ _texture(ir_txl, gpu_shader4_integer, glsl_type::uvec4_type, glsl_type::usamplerCube_type, glsl_type::vec3_type),
+ NULL);
+
+ add_function("texture2DRect",
+ _texture(ir_tex, texture_rectangle, glsl_type::vec4_type, glsl_type::sampler2DRect_type, glsl_type::vec2_type),
+ _texture(ir_tex, gpu_shader4_rect_integer, glsl_type::ivec4_type, glsl_type::isampler2DRect_type, glsl_type::vec2_type),
+ _texture(ir_tex, gpu_shader4_rect_integer, glsl_type::uvec4_type, glsl_type::usampler2DRect_type, glsl_type::vec2_type),
+ NULL);
+
+ add_function("texture2DRectProj",
+ _texture(ir_tex, texture_rectangle, glsl_type::vec4_type, glsl_type::sampler2DRect_type, glsl_type::vec3_type, TEX_PROJECT),
+ _texture(ir_tex, texture_rectangle, glsl_type::vec4_type, glsl_type::sampler2DRect_type, glsl_type::vec4_type, TEX_PROJECT),
+ _texture(ir_tex, gpu_shader4_rect_integer, glsl_type::ivec4_type, glsl_type::isampler2DRect_type, glsl_type::vec3_type, TEX_PROJECT),
+ _texture(ir_tex, gpu_shader4_rect_integer, glsl_type::ivec4_type, glsl_type::isampler2DRect_type, glsl_type::vec4_type, TEX_PROJECT),
+ _texture(ir_tex, gpu_shader4_rect_integer, glsl_type::uvec4_type, glsl_type::usampler2DRect_type, glsl_type::vec3_type, TEX_PROJECT),
+ _texture(ir_tex, gpu_shader4_rect_integer, glsl_type::uvec4_type, glsl_type::usampler2DRect_type, glsl_type::vec4_type, TEX_PROJECT),
+ NULL);
+
+ add_function("shadow1D",
+ _texture(ir_tex, v110, glsl_type::vec4_type, glsl_type::sampler1DShadow_type, glsl_type::vec3_type),
+ _texture(ir_txb, v110_derivatives_only, glsl_type::vec4_type, glsl_type::sampler1DShadow_type, glsl_type::vec3_type),
+ NULL);
+
+ add_function("shadow1DArray",
+ _texture(ir_tex, texture_array, glsl_type::vec4_type, glsl_type::sampler1DArrayShadow_type, glsl_type::vec3_type),
+ _texture(ir_txb, texture_array_derivs_only, glsl_type::vec4_type, glsl_type::sampler1DArrayShadow_type, glsl_type::vec3_type),
+ NULL);
+
+ add_function("shadow2D",
+ _texture(ir_tex, v110, glsl_type::vec4_type, glsl_type::sampler2DShadow_type, glsl_type::vec3_type),
+ _texture(ir_txb, v110_derivatives_only, glsl_type::vec4_type, glsl_type::sampler2DShadow_type, glsl_type::vec3_type),
+ NULL);
+
+ add_function("shadow2DArray",
+ _texture(ir_tex, texture_array, glsl_type::vec4_type, glsl_type::sampler2DArrayShadow_type, glsl_type::vec4_type),
+ _texture(ir_txb, texture_array_derivs_only, glsl_type::vec4_type, glsl_type::sampler2DArrayShadow_type, glsl_type::vec4_type),
+ NULL);
+
+ add_function("shadow1DProj",
+ _texture(ir_tex, v110, glsl_type::vec4_type, glsl_type::sampler1DShadow_type, glsl_type::vec4_type, TEX_PROJECT),
+ _texture(ir_txb, v110_derivatives_only, glsl_type::vec4_type, glsl_type::sampler1DShadow_type, glsl_type::vec4_type, TEX_PROJECT),
+ NULL);
+
+ add_function("shadow2DArray",
+ _texture(ir_tex, texture_array, glsl_type::vec4_type, glsl_type::sampler2DArrayShadow_type, glsl_type::vec4_type),
+ _texture(ir_txb, texture_array_derivs_only, glsl_type::vec4_type, glsl_type::sampler2DArrayShadow_type, glsl_type::vec4_type),
+ NULL);
+
+ add_function("shadowCube",
+ _texture(ir_tex, gpu_shader4, glsl_type::vec4_type, glsl_type::samplerCubeShadow_type, glsl_type::vec4_type),
+ _texture(ir_txb, gpu_shader4_derivs_only, glsl_type::vec4_type, glsl_type::samplerCubeShadow_type, glsl_type::vec4_type),
+ NULL);
+
+ add_function("shadow2DProj",
+ _texture(ir_tex, v110, glsl_type::vec4_type, glsl_type::sampler2DShadow_type, glsl_type::vec4_type, TEX_PROJECT),
+ _texture(ir_txb, v110_derivatives_only, glsl_type::vec4_type, glsl_type::sampler2DShadow_type, glsl_type::vec4_type, TEX_PROJECT),
+ NULL);
+
+ add_function("shadow1DLod",
+ _texture(ir_txl, v110_lod, glsl_type::vec4_type, glsl_type::sampler1DShadow_type, glsl_type::vec3_type),
+ NULL);
+
+ add_function("shadow2DLod",
+ _texture(ir_txl, v110_lod, glsl_type::vec4_type, glsl_type::sampler2DShadow_type, glsl_type::vec3_type),
+ NULL);
+
+ add_function("shadow1DArrayLod",
+ _texture(ir_txl, texture_array_lod, glsl_type::vec4_type, glsl_type::sampler1DArrayShadow_type, glsl_type::vec3_type),
+ NULL);
+
+ add_function("shadow1DProjLod",
+ _texture(ir_txl, v110_lod, glsl_type::vec4_type, glsl_type::sampler1DShadow_type, glsl_type::vec4_type, TEX_PROJECT),
+ NULL);
+
+ add_function("shadow2DProjLod",
+ _texture(ir_txl, v110_lod, glsl_type::vec4_type, glsl_type::sampler2DShadow_type, glsl_type::vec4_type, TEX_PROJECT),
+ NULL);
+
+ add_function("shadow2DRect",
+ _texture(ir_tex, texture_rectangle, glsl_type::vec4_type, glsl_type::sampler2DRectShadow_type, glsl_type::vec3_type),
+ NULL);
+
+ add_function("shadow2DRectProj",
+ _texture(ir_tex, texture_rectangle, glsl_type::vec4_type, glsl_type::sampler2DRectShadow_type, glsl_type::vec4_type, TEX_PROJECT),
+ NULL);
+
+ add_function("texture1DGradARB",
+ _texture(ir_txd, shader_texture_lod, glsl_type::vec4_type, glsl_type::sampler1D_type, glsl_type::float_type),
+ NULL);
+
+ add_function("texture1DProjGradARB",
+ _texture(ir_txd, shader_texture_lod, glsl_type::vec4_type, glsl_type::sampler1D_type, glsl_type::vec2_type, TEX_PROJECT),
+ _texture(ir_txd, shader_texture_lod, glsl_type::vec4_type, glsl_type::sampler1D_type, glsl_type::vec4_type, TEX_PROJECT),
+ NULL);
+
+ add_function("texture2DGradARB",
+ _texture(ir_txd, shader_texture_lod, glsl_type::vec4_type, glsl_type::sampler2D_type, glsl_type::vec2_type),
+ NULL);
+
+ add_function("texture2DProjGradARB",
+ _texture(ir_txd, shader_texture_lod, glsl_type::vec4_type, glsl_type::sampler2D_type, glsl_type::vec3_type, TEX_PROJECT),
+ _texture(ir_txd, shader_texture_lod, glsl_type::vec4_type, glsl_type::sampler2D_type, glsl_type::vec4_type, TEX_PROJECT),
+ NULL);
+
+ add_function("texture3DGradARB",
+ _texture(ir_txd, shader_texture_lod, glsl_type::vec4_type, glsl_type::sampler3D_type, glsl_type::vec3_type),
+ NULL);
+
+ add_function("texture3DProjGradARB",
+ _texture(ir_txd, shader_texture_lod, glsl_type::vec4_type, glsl_type::sampler3D_type, glsl_type::vec4_type, TEX_PROJECT),
+ NULL);
+
+ add_function("textureCubeGradARB",
+ _texture(ir_txd, shader_texture_lod, glsl_type::vec4_type, glsl_type::samplerCube_type, glsl_type::vec3_type),
+ NULL);
+
+ add_function("shadow1DGradARB",
+ _texture(ir_txd, shader_texture_lod, glsl_type::vec4_type, glsl_type::sampler1DShadow_type, glsl_type::vec3_type),
+ NULL);
+
+ add_function("shadow1DProjGradARB",
+ _texture(ir_txd, shader_texture_lod, glsl_type::vec4_type, glsl_type::sampler1DShadow_type, glsl_type::vec4_type, TEX_PROJECT),
+ NULL);
+
+ add_function("shadow2DGradARB",
+ _texture(ir_txd, shader_texture_lod, glsl_type::vec4_type, glsl_type::sampler2DShadow_type, glsl_type::vec3_type),
+ NULL);
+
+ add_function("shadow2DProjGradARB",
+ _texture(ir_txd, shader_texture_lod, glsl_type::vec4_type, glsl_type::sampler2DShadow_type, glsl_type::vec4_type, TEX_PROJECT),
+ NULL);
+
+ add_function("texture2DRectGradARB",
+ _texture(ir_txd, shader_texture_lod_and_rect, glsl_type::vec4_type, glsl_type::sampler2DRect_type, glsl_type::vec2_type),
+ NULL);
+
+ add_function("texture2DRectProjGradARB",
+ _texture(ir_txd, shader_texture_lod_and_rect, glsl_type::vec4_type, glsl_type::sampler2DRect_type, glsl_type::vec3_type, TEX_PROJECT),
+ _texture(ir_txd, shader_texture_lod_and_rect, glsl_type::vec4_type, glsl_type::sampler2DRect_type, glsl_type::vec4_type, TEX_PROJECT),
+ NULL);
+
+ add_function("shadow2DRectGradARB",
+ _texture(ir_txd, shader_texture_lod_and_rect, glsl_type::vec4_type, glsl_type::sampler2DRectShadow_type, glsl_type::vec3_type),
+ NULL);
+
+ add_function("shadow2DRectProjGradARB",
+ _texture(ir_txd, shader_texture_lod_and_rect, glsl_type::vec4_type, glsl_type::sampler2DRectShadow_type, glsl_type::vec4_type, TEX_PROJECT),
+ NULL);
+
+ add_function("texture4",
+ _texture(ir_tg4, texture_texture4, glsl_type::vec4_type, glsl_type::sampler2D_type, glsl_type::vec2_type),
+ NULL);
+
+ add_function("texture1DGrad",
+ _texture(ir_txd, gpu_shader4, glsl_type::vec4_type, glsl_type::sampler1D_type, glsl_type::float_type),
+ _texture(ir_txd, gpu_shader4_integer, glsl_type::ivec4_type, glsl_type::isampler1D_type, glsl_type::float_type),
+ _texture(ir_txd, gpu_shader4_integer, glsl_type::uvec4_type, glsl_type::usampler1D_type, glsl_type::float_type),
+ NULL);
+
+ add_function("texture1DProjGrad",
+ _texture(ir_txd, gpu_shader4, glsl_type::vec4_type, glsl_type::sampler1D_type, glsl_type::vec2_type, TEX_PROJECT),
+ _texture(ir_txd, gpu_shader4, glsl_type::vec4_type, glsl_type::sampler1D_type, glsl_type::vec4_type, TEX_PROJECT),
+ _texture(ir_txd, gpu_shader4_integer, glsl_type::ivec4_type, glsl_type::isampler1D_type, glsl_type::vec2_type, TEX_PROJECT),
+ _texture(ir_txd, gpu_shader4_integer, glsl_type::ivec4_type, glsl_type::isampler1D_type, glsl_type::vec4_type, TEX_PROJECT),
+ _texture(ir_txd, gpu_shader4_integer, glsl_type::uvec4_type, glsl_type::usampler1D_type, glsl_type::vec2_type, TEX_PROJECT),
+ _texture(ir_txd, gpu_shader4_integer, glsl_type::uvec4_type, glsl_type::usampler1D_type, glsl_type::vec4_type, TEX_PROJECT),
+ NULL);
+
+ add_function("texture1DArrayGrad",
+ _texture(ir_txd, gpu_shader4_array, glsl_type::vec4_type, glsl_type::sampler1DArray_type, glsl_type::vec2_type),
+ _texture(ir_txd, gpu_shader4_array_integer, glsl_type::ivec4_type, glsl_type::isampler1DArray_type, glsl_type::vec2_type),
+ _texture(ir_txd, gpu_shader4_array_integer, glsl_type::uvec4_type, glsl_type::usampler1DArray_type, glsl_type::vec2_type),
+ NULL);
+
+ add_function("texture2DGrad",
+ _texture(ir_txd, gpu_shader4, glsl_type::vec4_type, glsl_type::sampler2D_type, glsl_type::vec2_type),
+ _texture(ir_txd, gpu_shader4_integer, glsl_type::ivec4_type, glsl_type::isampler2D_type, glsl_type::vec2_type),
+ _texture(ir_txd, gpu_shader4_integer, glsl_type::uvec4_type, glsl_type::usampler2D_type, glsl_type::vec2_type),
+ NULL);
+
+ add_function("texture2DProjGrad",
+ _texture(ir_txd, gpu_shader4, glsl_type::vec4_type, glsl_type::sampler2D_type, glsl_type::vec3_type, TEX_PROJECT),
+ _texture(ir_txd, gpu_shader4, glsl_type::vec4_type, glsl_type::sampler2D_type, glsl_type::vec4_type, TEX_PROJECT),
+ _texture(ir_txd, gpu_shader4_integer, glsl_type::ivec4_type, glsl_type::isampler2D_type, glsl_type::vec3_type, TEX_PROJECT),
+ _texture(ir_txd, gpu_shader4_integer, glsl_type::ivec4_type, glsl_type::isampler2D_type, glsl_type::vec4_type, TEX_PROJECT),
+ _texture(ir_txd, gpu_shader4_integer, glsl_type::uvec4_type, glsl_type::usampler2D_type, glsl_type::vec3_type, TEX_PROJECT),
+ _texture(ir_txd, gpu_shader4_integer, glsl_type::uvec4_type, glsl_type::usampler2D_type, glsl_type::vec4_type, TEX_PROJECT),
+ NULL);
+
+ add_function("texture2DArrayGrad",
+ _texture(ir_txd, gpu_shader4_array, glsl_type::vec4_type, glsl_type::sampler2DArray_type, glsl_type::vec3_type),
+ _texture(ir_txd, gpu_shader4_array_integer, glsl_type::ivec4_type, glsl_type::isampler2DArray_type, glsl_type::vec3_type),
+ _texture(ir_txd, gpu_shader4_array_integer, glsl_type::uvec4_type, glsl_type::usampler2DArray_type, glsl_type::vec3_type),
+ NULL);
+
+ add_function("texture3DGrad",
+ _texture(ir_txd, gpu_shader4, glsl_type::vec4_type, glsl_type::sampler3D_type, glsl_type::vec3_type),
+ _texture(ir_txd, gpu_shader4_integer, glsl_type::ivec4_type, glsl_type::isampler3D_type, glsl_type::vec3_type),
+ _texture(ir_txd, gpu_shader4_integer, glsl_type::uvec4_type, glsl_type::usampler3D_type, glsl_type::vec3_type),
+ NULL);
+
+ add_function("texture3DProjGrad",
+ _texture(ir_txd, gpu_shader4, glsl_type::vec4_type, glsl_type::sampler3D_type, glsl_type::vec4_type, TEX_PROJECT),
+ _texture(ir_txd, gpu_shader4_integer, glsl_type::ivec4_type, glsl_type::isampler3D_type, glsl_type::vec4_type, TEX_PROJECT),
+ _texture(ir_txd, gpu_shader4_integer, glsl_type::uvec4_type, glsl_type::usampler3D_type, glsl_type::vec4_type, TEX_PROJECT),
+ NULL);
+
+ add_function("textureCubeGrad",
+ _texture(ir_txd, gpu_shader4, glsl_type::vec4_type, glsl_type::samplerCube_type, glsl_type::vec3_type),
+ _texture(ir_txd, gpu_shader4_integer, glsl_type::ivec4_type, glsl_type::isamplerCube_type, glsl_type::vec3_type),
+ _texture(ir_txd, gpu_shader4_integer, glsl_type::uvec4_type, glsl_type::usamplerCube_type, glsl_type::vec3_type),
+ NULL);
+
+ add_function("shadow1DGrad",
+ _texture(ir_txd, gpu_shader4, glsl_type::vec4_type, glsl_type::sampler1DShadow_type, glsl_type::vec3_type),
+ NULL);
+
+ add_function("shadow1DProjGrad",
+ _texture(ir_txd, gpu_shader4, glsl_type::vec4_type, glsl_type::sampler1DShadow_type, glsl_type::vec4_type, TEX_PROJECT),
+ NULL);
+
+ add_function("shadow1DArrayGrad",
+ _texture(ir_txd, gpu_shader4_array, glsl_type::vec4_type, glsl_type::sampler1DArrayShadow_type, glsl_type::vec3_type),
+ NULL);
+
+ add_function("shadow2DGrad",
+ _texture(ir_txd, gpu_shader4, glsl_type::vec4_type, glsl_type::sampler2DShadow_type, glsl_type::vec3_type),
+ NULL);
+
+ add_function("shadow2DProjGrad",
+ _texture(ir_txd, gpu_shader4, glsl_type::vec4_type, glsl_type::sampler2DShadow_type, glsl_type::vec4_type, TEX_PROJECT),
+ NULL);
+
+ add_function("shadow2DArrayGrad",
+ _texture(ir_txd, gpu_shader4_array, glsl_type::vec4_type, glsl_type::sampler2DArrayShadow_type, glsl_type::vec4_type),
+ NULL);
+
+ add_function("texture2DRectGrad",
+ _texture(ir_txd, gpu_shader4_rect, glsl_type::vec4_type, glsl_type::sampler2DRect_type, glsl_type::vec2_type),
+ _texture(ir_txd, gpu_shader4_rect_integer, glsl_type::ivec4_type, glsl_type::isampler2DRect_type, glsl_type::vec2_type),
+ _texture(ir_txd, gpu_shader4_rect_integer, glsl_type::uvec4_type, glsl_type::usampler2DRect_type, glsl_type::vec2_type),
+ NULL);
+
+ add_function("texture2DRectProjGrad",
+ _texture(ir_txd, gpu_shader4_rect, glsl_type::vec4_type, glsl_type::sampler2DRect_type, glsl_type::vec3_type, TEX_PROJECT),
+ _texture(ir_txd, gpu_shader4_rect, glsl_type::vec4_type, glsl_type::sampler2DRect_type, glsl_type::vec4_type, TEX_PROJECT),
+ _texture(ir_txd, gpu_shader4_rect_integer, glsl_type::ivec4_type, glsl_type::isampler2DRect_type, glsl_type::vec3_type, TEX_PROJECT),
+ _texture(ir_txd, gpu_shader4_rect_integer, glsl_type::ivec4_type, glsl_type::isampler2DRect_type, glsl_type::vec4_type, TEX_PROJECT),
+ _texture(ir_txd, gpu_shader4_rect_integer, glsl_type::uvec4_type, glsl_type::usampler2DRect_type, glsl_type::vec3_type, TEX_PROJECT),
+ _texture(ir_txd, gpu_shader4_rect_integer, glsl_type::uvec4_type, glsl_type::usampler2DRect_type, glsl_type::vec4_type, TEX_PROJECT),
+ NULL);
+
+ add_function("shadow2DRectGrad",
+ _texture(ir_txd, gpu_shader4_rect, glsl_type::vec4_type, glsl_type::sampler2DRectShadow_type, glsl_type::vec3_type),
+ NULL);
+
+ add_function("shadow2DRectProjGrad",
+ _texture(ir_txd, gpu_shader4_rect, glsl_type::vec4_type, glsl_type::sampler2DRectShadow_type, glsl_type::vec4_type, TEX_PROJECT),
+ NULL);
+
+ add_function("shadowCubeGrad",
+ _texture(ir_txd, gpu_shader4, glsl_type::vec4_type, glsl_type::samplerCubeShadow_type, glsl_type::vec4_type),
+ NULL);
+
+ add_function("textureGather",
+ _texture(ir_tg4, texture_gather_or_es31, glsl_type::vec4_type, glsl_type::sampler2D_type, glsl_type::vec2_type),
+ _texture(ir_tg4, texture_gather_or_es31, glsl_type::ivec4_type, glsl_type::isampler2D_type, glsl_type::vec2_type),
+ _texture(ir_tg4, texture_gather_or_es31, glsl_type::uvec4_type, glsl_type::usampler2D_type, glsl_type::vec2_type),
+
+ _texture(ir_tg4, gpu_shader5, glsl_type::vec4_type, glsl_type::sampler2DRect_type, glsl_type::vec2_type),
+ _texture(ir_tg4, gpu_shader5, glsl_type::ivec4_type, glsl_type::isampler2DRect_type, glsl_type::vec2_type),
+ _texture(ir_tg4, gpu_shader5, glsl_type::uvec4_type, glsl_type::usampler2DRect_type, glsl_type::vec2_type),
+
+ _texture(ir_tg4, texture_gather_or_es31, glsl_type::vec4_type, glsl_type::sampler2DArray_type, glsl_type::vec3_type),
+ _texture(ir_tg4, texture_gather_or_es31, glsl_type::ivec4_type, glsl_type::isampler2DArray_type, glsl_type::vec3_type),
+ _texture(ir_tg4, texture_gather_or_es31, glsl_type::uvec4_type, glsl_type::usampler2DArray_type, glsl_type::vec3_type),
+
+ _texture(ir_tg4, texture_gather_or_es31, glsl_type::vec4_type, glsl_type::samplerCube_type, glsl_type::vec3_type),
+ _texture(ir_tg4, texture_gather_or_es31, glsl_type::ivec4_type, glsl_type::isamplerCube_type, glsl_type::vec3_type),
+ _texture(ir_tg4, texture_gather_or_es31, glsl_type::uvec4_type, glsl_type::usamplerCube_type, glsl_type::vec3_type),
+
+ _texture(ir_tg4, texture_gather_cube_map_array, glsl_type::vec4_type, glsl_type::samplerCubeArray_type, glsl_type::vec4_type),
+ _texture(ir_tg4, texture_gather_cube_map_array, glsl_type::ivec4_type, glsl_type::isamplerCubeArray_type, glsl_type::vec4_type),
+ _texture(ir_tg4, texture_gather_cube_map_array, glsl_type::uvec4_type, glsl_type::usamplerCubeArray_type, glsl_type::vec4_type),
+
+ _texture(ir_tg4, gpu_shader5_or_es31, glsl_type::vec4_type, glsl_type::sampler2D_type, glsl_type::vec2_type, TEX_COMPONENT),
+ _texture(ir_tg4, gpu_shader5_or_es31, glsl_type::ivec4_type, glsl_type::isampler2D_type, glsl_type::vec2_type, TEX_COMPONENT),
+ _texture(ir_tg4, gpu_shader5_or_es31, glsl_type::uvec4_type, glsl_type::usampler2D_type, glsl_type::vec2_type, TEX_COMPONENT),
+
+ _texture(ir_tg4, gpu_shader5, glsl_type::vec4_type, glsl_type::sampler2DRect_type, glsl_type::vec2_type, TEX_COMPONENT),
+ _texture(ir_tg4, gpu_shader5, glsl_type::ivec4_type, glsl_type::isampler2DRect_type, glsl_type::vec2_type, TEX_COMPONENT),
+ _texture(ir_tg4, gpu_shader5, glsl_type::uvec4_type, glsl_type::usampler2DRect_type, glsl_type::vec2_type, TEX_COMPONENT),
+
+ _texture(ir_tg4, gpu_shader5_or_es31, glsl_type::vec4_type, glsl_type::sampler2DArray_type, glsl_type::vec3_type, TEX_COMPONENT),
+ _texture(ir_tg4, gpu_shader5_or_es31, glsl_type::ivec4_type, glsl_type::isampler2DArray_type, glsl_type::vec3_type, TEX_COMPONENT),
+ _texture(ir_tg4, gpu_shader5_or_es31, glsl_type::uvec4_type, glsl_type::usampler2DArray_type, glsl_type::vec3_type, TEX_COMPONENT),
+
+ _texture(ir_tg4, gpu_shader5_or_es31, glsl_type::vec4_type, glsl_type::samplerCube_type, glsl_type::vec3_type, TEX_COMPONENT),
+ _texture(ir_tg4, gpu_shader5_or_es31, glsl_type::ivec4_type, glsl_type::isamplerCube_type, glsl_type::vec3_type, TEX_COMPONENT),
+ _texture(ir_tg4, gpu_shader5_or_es31, glsl_type::uvec4_type, glsl_type::usamplerCube_type, glsl_type::vec3_type, TEX_COMPONENT),
+
+ _texture(ir_tg4, gpu_shader5_or_OES_texture_cube_map_array, glsl_type::vec4_type, glsl_type::samplerCubeArray_type, glsl_type::vec4_type, TEX_COMPONENT),
+ _texture(ir_tg4, gpu_shader5_or_OES_texture_cube_map_array, glsl_type::ivec4_type, glsl_type::isamplerCubeArray_type, glsl_type::vec4_type, TEX_COMPONENT),
+ _texture(ir_tg4, gpu_shader5_or_OES_texture_cube_map_array, glsl_type::uvec4_type, glsl_type::usamplerCubeArray_type, glsl_type::vec4_type, TEX_COMPONENT),
+
+ _texture(ir_tg4, gpu_shader5_or_es31, glsl_type::vec4_type, glsl_type::sampler2DShadow_type, glsl_type::vec2_type),
+ _texture(ir_tg4, gpu_shader5_or_es31, glsl_type::vec4_type, glsl_type::sampler2DArrayShadow_type, glsl_type::vec3_type),
+ _texture(ir_tg4, gpu_shader5_or_es31, glsl_type::vec4_type, glsl_type::samplerCubeShadow_type, glsl_type::vec3_type),
+ _texture(ir_tg4, gpu_shader5_or_OES_texture_cube_map_array, glsl_type::vec4_type, glsl_type::samplerCubeArrayShadow_type, glsl_type::vec4_type),
+ _texture(ir_tg4, gpu_shader5, glsl_type::vec4_type, glsl_type::sampler2DRectShadow_type, glsl_type::vec2_type),
+ NULL);
+
+ add_function("textureGatherOffset",
+ _texture(ir_tg4, texture_gather_only_or_es31, glsl_type::vec4_type, glsl_type::sampler2D_type, glsl_type::vec2_type, TEX_OFFSET),
+ _texture(ir_tg4, texture_gather_only_or_es31, glsl_type::ivec4_type, glsl_type::isampler2D_type, glsl_type::vec2_type, TEX_OFFSET),
+ _texture(ir_tg4, texture_gather_only_or_es31, glsl_type::uvec4_type, glsl_type::usampler2D_type, glsl_type::vec2_type, TEX_OFFSET),
+
+ _texture(ir_tg4, texture_gather_only_or_es31, glsl_type::vec4_type, glsl_type::sampler2DArray_type, glsl_type::vec3_type, TEX_OFFSET),
+ _texture(ir_tg4, texture_gather_only_or_es31, glsl_type::ivec4_type, glsl_type::isampler2DArray_type, glsl_type::vec3_type, TEX_OFFSET),
+ _texture(ir_tg4, texture_gather_only_or_es31, glsl_type::uvec4_type, glsl_type::usampler2DArray_type, glsl_type::vec3_type, TEX_OFFSET),
+
+ _texture(ir_tg4, es31_not_gs5, glsl_type::vec4_type, glsl_type::sampler2D_type, glsl_type::vec2_type, TEX_OFFSET | TEX_COMPONENT),
+ _texture(ir_tg4, es31_not_gs5, glsl_type::ivec4_type, glsl_type::isampler2D_type, glsl_type::vec2_type, TEX_OFFSET | TEX_COMPONENT),
+ _texture(ir_tg4, es31_not_gs5, glsl_type::uvec4_type, glsl_type::usampler2D_type, glsl_type::vec2_type, TEX_OFFSET | TEX_COMPONENT),
+
+ _texture(ir_tg4, es31_not_gs5, glsl_type::vec4_type, glsl_type::sampler2DArray_type, glsl_type::vec3_type, TEX_OFFSET | TEX_COMPONENT),
+ _texture(ir_tg4, es31_not_gs5, glsl_type::ivec4_type, glsl_type::isampler2DArray_type, glsl_type::vec3_type, TEX_OFFSET | TEX_COMPONENT),
+ _texture(ir_tg4, es31_not_gs5, glsl_type::uvec4_type, glsl_type::usampler2DArray_type, glsl_type::vec3_type, TEX_OFFSET | TEX_COMPONENT),
+
+ _texture(ir_tg4, gpu_shader5_es, glsl_type::vec4_type, glsl_type::sampler2D_type, glsl_type::vec2_type, TEX_OFFSET_NONCONST),
+ _texture(ir_tg4, gpu_shader5_es, glsl_type::ivec4_type, glsl_type::isampler2D_type, glsl_type::vec2_type, TEX_OFFSET_NONCONST),
+ _texture(ir_tg4, gpu_shader5_es, glsl_type::uvec4_type, glsl_type::usampler2D_type, glsl_type::vec2_type, TEX_OFFSET_NONCONST),
+
+ _texture(ir_tg4, gpu_shader5_es, glsl_type::vec4_type, glsl_type::sampler2DArray_type, glsl_type::vec3_type, TEX_OFFSET_NONCONST),
+ _texture(ir_tg4, gpu_shader5_es, glsl_type::ivec4_type, glsl_type::isampler2DArray_type, glsl_type::vec3_type, TEX_OFFSET_NONCONST),
+ _texture(ir_tg4, gpu_shader5_es, glsl_type::uvec4_type, glsl_type::usampler2DArray_type, glsl_type::vec3_type, TEX_OFFSET_NONCONST),
+
+ _texture(ir_tg4, gpu_shader5, glsl_type::vec4_type, glsl_type::sampler2DRect_type, glsl_type::vec2_type, TEX_OFFSET_NONCONST),
+ _texture(ir_tg4, gpu_shader5, glsl_type::ivec4_type, glsl_type::isampler2DRect_type, glsl_type::vec2_type, TEX_OFFSET_NONCONST),
+ _texture(ir_tg4, gpu_shader5, glsl_type::uvec4_type, glsl_type::usampler2DRect_type, glsl_type::vec2_type, TEX_OFFSET_NONCONST),
+
+ _texture(ir_tg4, gpu_shader5_es, glsl_type::vec4_type, glsl_type::sampler2D_type, glsl_type::vec2_type, TEX_OFFSET_NONCONST | TEX_COMPONENT),
+ _texture(ir_tg4, gpu_shader5_es, glsl_type::ivec4_type, glsl_type::isampler2D_type, glsl_type::vec2_type, TEX_OFFSET_NONCONST | TEX_COMPONENT),
+ _texture(ir_tg4, gpu_shader5_es, glsl_type::uvec4_type, glsl_type::usampler2D_type, glsl_type::vec2_type, TEX_OFFSET_NONCONST | TEX_COMPONENT),
+
+ _texture(ir_tg4, gpu_shader5_es, glsl_type::vec4_type, glsl_type::sampler2DArray_type, glsl_type::vec3_type, TEX_OFFSET_NONCONST | TEX_COMPONENT),
+ _texture(ir_tg4, gpu_shader5_es, glsl_type::ivec4_type, glsl_type::isampler2DArray_type, glsl_type::vec3_type, TEX_OFFSET_NONCONST | TEX_COMPONENT),
+ _texture(ir_tg4, gpu_shader5_es, glsl_type::uvec4_type, glsl_type::usampler2DArray_type, glsl_type::vec3_type, TEX_OFFSET_NONCONST | TEX_COMPONENT),
+
+ _texture(ir_tg4, gpu_shader5, glsl_type::vec4_type, glsl_type::sampler2DRect_type, glsl_type::vec2_type, TEX_OFFSET_NONCONST | TEX_COMPONENT),
+ _texture(ir_tg4, gpu_shader5, glsl_type::ivec4_type, glsl_type::isampler2DRect_type, glsl_type::vec2_type, TEX_OFFSET_NONCONST | TEX_COMPONENT),
+ _texture(ir_tg4, gpu_shader5, glsl_type::uvec4_type, glsl_type::usampler2DRect_type, glsl_type::vec2_type, TEX_OFFSET_NONCONST | TEX_COMPONENT),
+
+ _texture(ir_tg4, gpu_shader5_es, glsl_type::vec4_type, glsl_type::sampler2DShadow_type, glsl_type::vec2_type, TEX_OFFSET_NONCONST),
+ _texture(ir_tg4, gpu_shader5_es, glsl_type::vec4_type, glsl_type::sampler2DArrayShadow_type, glsl_type::vec3_type, TEX_OFFSET_NONCONST),
+ _texture(ir_tg4, gpu_shader5, glsl_type::vec4_type, glsl_type::sampler2DRectShadow_type, glsl_type::vec2_type, TEX_OFFSET_NONCONST),
+
+ _texture(ir_tg4, es31_not_gs5, glsl_type::vec4_type, glsl_type::sampler2DShadow_type, glsl_type::vec2_type, TEX_OFFSET),
+ _texture(ir_tg4, es31_not_gs5, glsl_type::vec4_type, glsl_type::sampler2DArrayShadow_type, glsl_type::vec3_type, TEX_OFFSET),
+ NULL);
+
+ add_function("textureGatherOffsets",
+ _texture(ir_tg4, gpu_shader5_es, glsl_type::vec4_type, glsl_type::sampler2D_type, glsl_type::vec2_type, TEX_OFFSET_ARRAY),
+ _texture(ir_tg4, gpu_shader5_es, glsl_type::ivec4_type, glsl_type::isampler2D_type, glsl_type::vec2_type, TEX_OFFSET_ARRAY),
+ _texture(ir_tg4, gpu_shader5_es, glsl_type::uvec4_type, glsl_type::usampler2D_type, glsl_type::vec2_type, TEX_OFFSET_ARRAY),
+
+ _texture(ir_tg4, gpu_shader5_es, glsl_type::vec4_type, glsl_type::sampler2D_type, glsl_type::vec2_type, TEX_OFFSET_ARRAY | TEX_COMPONENT),
+ _texture(ir_tg4, gpu_shader5_es, glsl_type::ivec4_type, glsl_type::isampler2D_type, glsl_type::vec2_type, TEX_OFFSET_ARRAY | TEX_COMPONENT),
+ _texture(ir_tg4, gpu_shader5_es, glsl_type::uvec4_type, glsl_type::usampler2D_type, glsl_type::vec2_type, TEX_OFFSET_ARRAY | TEX_COMPONENT),
+
+ _texture(ir_tg4, gpu_shader5_es, glsl_type::vec4_type, glsl_type::sampler2DArray_type, glsl_type::vec3_type, TEX_OFFSET_ARRAY),
+ _texture(ir_tg4, gpu_shader5_es, glsl_type::ivec4_type, glsl_type::isampler2DArray_type, glsl_type::vec3_type, TEX_OFFSET_ARRAY),
+ _texture(ir_tg4, gpu_shader5_es, glsl_type::uvec4_type, glsl_type::usampler2DArray_type, glsl_type::vec3_type, TEX_OFFSET_ARRAY),
+
+ _texture(ir_tg4, gpu_shader5_es, glsl_type::vec4_type, glsl_type::sampler2DArray_type, glsl_type::vec3_type, TEX_OFFSET_ARRAY | TEX_COMPONENT),
+ _texture(ir_tg4, gpu_shader5_es, glsl_type::ivec4_type, glsl_type::isampler2DArray_type, glsl_type::vec3_type, TEX_OFFSET_ARRAY | TEX_COMPONENT),
+ _texture(ir_tg4, gpu_shader5_es, glsl_type::uvec4_type, glsl_type::usampler2DArray_type, glsl_type::vec3_type, TEX_OFFSET_ARRAY | TEX_COMPONENT),
+
+ _texture(ir_tg4, gpu_shader5, glsl_type::vec4_type, glsl_type::sampler2DRect_type, glsl_type::vec2_type, TEX_OFFSET_ARRAY),
+ _texture(ir_tg4, gpu_shader5, glsl_type::ivec4_type, glsl_type::isampler2DRect_type, glsl_type::vec2_type, TEX_OFFSET_ARRAY),
+ _texture(ir_tg4, gpu_shader5, glsl_type::uvec4_type, glsl_type::usampler2DRect_type, glsl_type::vec2_type, TEX_OFFSET_ARRAY),
+
+ _texture(ir_tg4, gpu_shader5, glsl_type::vec4_type, glsl_type::sampler2DRect_type, glsl_type::vec2_type, TEX_OFFSET_ARRAY | TEX_COMPONENT),
+ _texture(ir_tg4, gpu_shader5, glsl_type::ivec4_type, glsl_type::isampler2DRect_type, glsl_type::vec2_type, TEX_OFFSET_ARRAY | TEX_COMPONENT),
+ _texture(ir_tg4, gpu_shader5, glsl_type::uvec4_type, glsl_type::usampler2DRect_type, glsl_type::vec2_type, TEX_OFFSET_ARRAY | TEX_COMPONENT),
+
+ _texture(ir_tg4, gpu_shader5_es, glsl_type::vec4_type, glsl_type::sampler2DShadow_type, glsl_type::vec2_type, TEX_OFFSET_ARRAY),
+ _texture(ir_tg4, gpu_shader5_es, glsl_type::vec4_type, glsl_type::sampler2DArrayShadow_type, glsl_type::vec3_type, TEX_OFFSET_ARRAY),
+ _texture(ir_tg4, gpu_shader5, glsl_type::vec4_type, glsl_type::sampler2DRectShadow_type, glsl_type::vec2_type, TEX_OFFSET_ARRAY),
+ NULL);
+
+ F(dFdx)
+ F(dFdy)
+ F(fwidth)
+ F(dFdxCoarse)
+ F(dFdyCoarse)
+ F(fwidthCoarse)
+ F(dFdxFine)
+ F(dFdyFine)
+ F(fwidthFine)
+ F(noise1)
+ F(noise2)
+ F(noise3)
+ F(noise4)
+
+ IU(bitfieldExtract)
+ IU(bitfieldInsert)
+ IU(bitfieldReverse)
+ IU(bitCount)
+ IU(findLSB)
+ IU(findMSB)
+ FDGS5(fma)
+
+ add_function("ldexp",
+ _ldexp(glsl_type::float_type, glsl_type::int_type),
+ _ldexp(glsl_type::vec2_type, glsl_type::ivec2_type),
+ _ldexp(glsl_type::vec3_type, glsl_type::ivec3_type),
+ _ldexp(glsl_type::vec4_type, glsl_type::ivec4_type),
+ _ldexp(glsl_type::double_type, glsl_type::int_type),
+ _ldexp(glsl_type::dvec2_type, glsl_type::ivec2_type),
+ _ldexp(glsl_type::dvec3_type, glsl_type::ivec3_type),
+ _ldexp(glsl_type::dvec4_type, glsl_type::ivec4_type),
+ NULL);
+
+ add_function("frexp",
+ _frexp(glsl_type::float_type, glsl_type::int_type),
+ _frexp(glsl_type::vec2_type, glsl_type::ivec2_type),
+ _frexp(glsl_type::vec3_type, glsl_type::ivec3_type),
+ _frexp(glsl_type::vec4_type, glsl_type::ivec4_type),
+ _dfrexp(glsl_type::double_type, glsl_type::int_type),
+ _dfrexp(glsl_type::dvec2_type, glsl_type::ivec2_type),
+ _dfrexp(glsl_type::dvec3_type, glsl_type::ivec3_type),
+ _dfrexp(glsl_type::dvec4_type, glsl_type::ivec4_type),
+ NULL);
+ add_function("uaddCarry",
+ _uaddCarry(glsl_type::uint_type),
+ _uaddCarry(glsl_type::uvec2_type),
+ _uaddCarry(glsl_type::uvec3_type),
+ _uaddCarry(glsl_type::uvec4_type),
+ NULL);
+ add_function("usubBorrow",
+ _usubBorrow(glsl_type::uint_type),
+ _usubBorrow(glsl_type::uvec2_type),
+ _usubBorrow(glsl_type::uvec3_type),
+ _usubBorrow(glsl_type::uvec4_type),
+ NULL);
+ add_function("imulExtended",
+ _mulExtended(glsl_type::int_type),
+ _mulExtended(glsl_type::ivec2_type),
+ _mulExtended(glsl_type::ivec3_type),
+ _mulExtended(glsl_type::ivec4_type),
+ NULL);
+ add_function("umulExtended",
+ _mulExtended(glsl_type::uint_type),
+ _mulExtended(glsl_type::uvec2_type),
+ _mulExtended(glsl_type::uvec3_type),
+ _mulExtended(glsl_type::uvec4_type),
+ NULL);
+ add_function("interpolateAtCentroid",
+ _interpolateAtCentroid(glsl_type::float_type),
+ _interpolateAtCentroid(glsl_type::vec2_type),
+ _interpolateAtCentroid(glsl_type::vec3_type),
+ _interpolateAtCentroid(glsl_type::vec4_type),
+ NULL);
+ add_function("interpolateAtOffset",
+ _interpolateAtOffset(glsl_type::float_type),
+ _interpolateAtOffset(glsl_type::vec2_type),
+ _interpolateAtOffset(glsl_type::vec3_type),
+ _interpolateAtOffset(glsl_type::vec4_type),
+ NULL);
+ add_function("interpolateAtSample",
+ _interpolateAtSample(glsl_type::float_type),
+ _interpolateAtSample(glsl_type::vec2_type),
+ _interpolateAtSample(glsl_type::vec3_type),
+ _interpolateAtSample(glsl_type::vec4_type),
+ NULL);
+
+ add_function("atomicCounter",
+ _atomic_counter_op("__intrinsic_atomic_read",
+ shader_atomic_counters),
+ NULL);
+ add_function("atomicCounterIncrement",
+ _atomic_counter_op("__intrinsic_atomic_increment",
+ shader_atomic_counters),
+ NULL);
+ add_function("atomicCounterDecrement",
+ _atomic_counter_op("__intrinsic_atomic_predecrement",
+ shader_atomic_counters),
+ NULL);
+
+ add_function("atomicCounterAddARB",
+ _atomic_counter_op1("__intrinsic_atomic_add",
+ shader_atomic_counter_ops),
+ NULL);
+ add_function("atomicCounterSubtractARB",
+ _atomic_counter_op1("__intrinsic_atomic_sub",
+ shader_atomic_counter_ops),
+ NULL);
+ add_function("atomicCounterMinARB",
+ _atomic_counter_op1("__intrinsic_atomic_min",
+ shader_atomic_counter_ops),
+ NULL);
+ add_function("atomicCounterMaxARB",
+ _atomic_counter_op1("__intrinsic_atomic_max",
+ shader_atomic_counter_ops),
+ NULL);
+ add_function("atomicCounterAndARB",
+ _atomic_counter_op1("__intrinsic_atomic_and",
+ shader_atomic_counter_ops),
+ NULL);
+ add_function("atomicCounterOrARB",
+ _atomic_counter_op1("__intrinsic_atomic_or",
+ shader_atomic_counter_ops),
+ NULL);
+ add_function("atomicCounterXorARB",
+ _atomic_counter_op1("__intrinsic_atomic_xor",
+ shader_atomic_counter_ops),
+ NULL);
+ add_function("atomicCounterExchangeARB",
+ _atomic_counter_op1("__intrinsic_atomic_exchange",
+ shader_atomic_counter_ops),
+ NULL);
+ add_function("atomicCounterCompSwapARB",
+ _atomic_counter_op2("__intrinsic_atomic_comp_swap",
+ shader_atomic_counter_ops),
+ NULL);
+
+ add_function("atomicCounterAdd",
+ _atomic_counter_op1("__intrinsic_atomic_add",
+ v460_desktop),
+ NULL);
+ add_function("atomicCounterSubtract",
+ _atomic_counter_op1("__intrinsic_atomic_sub",
+ v460_desktop),
+ NULL);
+ add_function("atomicCounterMin",
+ _atomic_counter_op1("__intrinsic_atomic_min",
+ v460_desktop),
+ NULL);
+ add_function("atomicCounterMax",
+ _atomic_counter_op1("__intrinsic_atomic_max",
+ v460_desktop),
+ NULL);
+ add_function("atomicCounterAnd",
+ _atomic_counter_op1("__intrinsic_atomic_and",
+ v460_desktop),
+ NULL);
+ add_function("atomicCounterOr",
+ _atomic_counter_op1("__intrinsic_atomic_or",
+ v460_desktop),
+ NULL);
+ add_function("atomicCounterXor",
+ _atomic_counter_op1("__intrinsic_atomic_xor",
+ v460_desktop),
+ NULL);
+ add_function("atomicCounterExchange",
+ _atomic_counter_op1("__intrinsic_atomic_exchange",
+ v460_desktop),
+ NULL);
+ add_function("atomicCounterCompSwap",
+ _atomic_counter_op2("__intrinsic_atomic_comp_swap",
+ v460_desktop),
+ NULL);
+
+ add_function("atomicAdd",
+ _atomic_op2("__intrinsic_atomic_add",
+ buffer_atomics_supported,
+ glsl_type::uint_type),
+ _atomic_op2("__intrinsic_atomic_add",
+ buffer_atomics_supported,
+ glsl_type::int_type),
+ _atomic_op2("__intrinsic_atomic_add",
+ shader_atomic_float_add,
+ glsl_type::float_type),
+ NULL);
+ add_function("atomicMin",
+ _atomic_op2("__intrinsic_atomic_min",
+ buffer_atomics_supported,
+ glsl_type::uint_type),
+ _atomic_op2("__intrinsic_atomic_min",
+ buffer_atomics_supported,
+ glsl_type::int_type),
+ _atomic_op2("__intrinsic_atomic_min",
+ shader_atomic_float_minmax,
+ glsl_type::float_type),
+ NULL);
+ add_function("atomicMax",
+ _atomic_op2("__intrinsic_atomic_max",
+ buffer_atomics_supported,
+ glsl_type::uint_type),
+ _atomic_op2("__intrinsic_atomic_max",
+ buffer_atomics_supported,
+ glsl_type::int_type),
+ _atomic_op2("__intrinsic_atomic_max",
+ shader_atomic_float_minmax,
+ glsl_type::float_type),
+ NULL);
+ add_function("atomicAnd",
+ _atomic_op2("__intrinsic_atomic_and",
+ buffer_atomics_supported,
+ glsl_type::uint_type),
+ _atomic_op2("__intrinsic_atomic_and",
+ buffer_atomics_supported,
+ glsl_type::int_type),
+ NULL);
+ add_function("atomicOr",
+ _atomic_op2("__intrinsic_atomic_or",
+ buffer_atomics_supported,
+ glsl_type::uint_type),
+ _atomic_op2("__intrinsic_atomic_or",
+ buffer_atomics_supported,
+ glsl_type::int_type),
+ NULL);
+ add_function("atomicXor",
+ _atomic_op2("__intrinsic_atomic_xor",
+ buffer_atomics_supported,
+ glsl_type::uint_type),
+ _atomic_op2("__intrinsic_atomic_xor",
+ buffer_atomics_supported,
+ glsl_type::int_type),
+ NULL);
+ add_function("atomicExchange",
+ _atomic_op2("__intrinsic_atomic_exchange",
+ buffer_atomics_supported,
+ glsl_type::uint_type),
+ _atomic_op2("__intrinsic_atomic_exchange",
+ buffer_atomics_supported,
+ glsl_type::int_type),
+ _atomic_op2("__intrinsic_atomic_exchange",
+ shader_atomic_float_exchange,
+ glsl_type::float_type),
+ NULL);
+ add_function("atomicCompSwap",
+ _atomic_op3("__intrinsic_atomic_comp_swap",
+ buffer_atomics_supported,
+ glsl_type::uint_type),
+ _atomic_op3("__intrinsic_atomic_comp_swap",
+ buffer_atomics_supported,
+ glsl_type::int_type),
+ _atomic_op3("__intrinsic_atomic_comp_swap",
+ shader_atomic_float_minmax,
+ glsl_type::float_type),
+ NULL);
+
+ add_function("min3",
+ _min3(glsl_type::float_type),
+ _min3(glsl_type::vec2_type),
+ _min3(glsl_type::vec3_type),
+ _min3(glsl_type::vec4_type),
+
+ _min3(glsl_type::int_type),
+ _min3(glsl_type::ivec2_type),
+ _min3(glsl_type::ivec3_type),
+ _min3(glsl_type::ivec4_type),
+
+ _min3(glsl_type::uint_type),
+ _min3(glsl_type::uvec2_type),
+ _min3(glsl_type::uvec3_type),
+ _min3(glsl_type::uvec4_type),
+ NULL);
+
+ add_function("max3",
+ _max3(glsl_type::float_type),
+ _max3(glsl_type::vec2_type),
+ _max3(glsl_type::vec3_type),
+ _max3(glsl_type::vec4_type),
+
+ _max3(glsl_type::int_type),
+ _max3(glsl_type::ivec2_type),
+ _max3(glsl_type::ivec3_type),
+ _max3(glsl_type::ivec4_type),
+
+ _max3(glsl_type::uint_type),
+ _max3(glsl_type::uvec2_type),
+ _max3(glsl_type::uvec3_type),
+ _max3(glsl_type::uvec4_type),
+ NULL);
+
+ add_function("mid3",
+ _mid3(glsl_type::float_type),
+ _mid3(glsl_type::vec2_type),
+ _mid3(glsl_type::vec3_type),
+ _mid3(glsl_type::vec4_type),
+
+ _mid3(glsl_type::int_type),
+ _mid3(glsl_type::ivec2_type),
+ _mid3(glsl_type::ivec3_type),
+ _mid3(glsl_type::ivec4_type),
+
+ _mid3(glsl_type::uint_type),
+ _mid3(glsl_type::uvec2_type),
+ _mid3(glsl_type::uvec3_type),
+ _mid3(glsl_type::uvec4_type),
+ NULL);
+
+ add_image_functions(true);
+
+ add_function("memoryBarrier",
+ _memory_barrier("__intrinsic_memory_barrier",
+ shader_image_load_store),
+ NULL);
+ add_function("groupMemoryBarrier",
+ _memory_barrier("__intrinsic_group_memory_barrier",
+ compute_shader),
+ NULL);
+ add_function("memoryBarrierAtomicCounter",
+ _memory_barrier("__intrinsic_memory_barrier_atomic_counter",
+ compute_shader_supported),
+ NULL);
+ add_function("memoryBarrierBuffer",
+ _memory_barrier("__intrinsic_memory_barrier_buffer",
+ compute_shader_supported),
+ NULL);
+ add_function("memoryBarrierImage",
+ _memory_barrier("__intrinsic_memory_barrier_image",
+ compute_shader_supported),
+ NULL);
+ add_function("memoryBarrierShared",
+ _memory_barrier("__intrinsic_memory_barrier_shared",
+ compute_shader),
+ NULL);
+
+ add_function("ballotARB", _ballot(), NULL);
+
+ add_function("readInvocationARB",
+ _read_invocation(glsl_type::float_type),
+ _read_invocation(glsl_type::vec2_type),
+ _read_invocation(glsl_type::vec3_type),
+ _read_invocation(glsl_type::vec4_type),
+
+ _read_invocation(glsl_type::int_type),
+ _read_invocation(glsl_type::ivec2_type),
+ _read_invocation(glsl_type::ivec3_type),
+ _read_invocation(glsl_type::ivec4_type),
+
+ _read_invocation(glsl_type::uint_type),
+ _read_invocation(glsl_type::uvec2_type),
+ _read_invocation(glsl_type::uvec3_type),
+ _read_invocation(glsl_type::uvec4_type),
+ NULL);
+
+ add_function("readFirstInvocationARB",
+ _read_first_invocation(glsl_type::float_type),
+ _read_first_invocation(glsl_type::vec2_type),
+ _read_first_invocation(glsl_type::vec3_type),
+ _read_first_invocation(glsl_type::vec4_type),
+
+ _read_first_invocation(glsl_type::int_type),
+ _read_first_invocation(glsl_type::ivec2_type),
+ _read_first_invocation(glsl_type::ivec3_type),
+ _read_first_invocation(glsl_type::ivec4_type),
+
+ _read_first_invocation(glsl_type::uint_type),
+ _read_first_invocation(glsl_type::uvec2_type),
+ _read_first_invocation(glsl_type::uvec3_type),
+ _read_first_invocation(glsl_type::uvec4_type),
+ NULL);
+
+ add_function("clock2x32ARB",
+ _shader_clock(shader_clock,
+ glsl_type::uvec2_type),
+ NULL);
+
+ add_function("clockARB",
+ _shader_clock(shader_clock_int64,
+ glsl_type::uint64_t_type),
+ NULL);
+
+ add_function("beginInvocationInterlockARB",
+ _invocation_interlock(
+ "__intrinsic_begin_invocation_interlock",
+ supports_arb_fragment_shader_interlock),
+ NULL);
+
+ add_function("endInvocationInterlockARB",
+ _invocation_interlock(
+ "__intrinsic_end_invocation_interlock",
+ supports_arb_fragment_shader_interlock),
+ NULL);
+
+ add_function("beginInvocationInterlockNV",
+ _invocation_interlock(
+ "__intrinsic_begin_invocation_interlock",
+ supports_nv_fragment_shader_interlock),
+ NULL);
+
+ add_function("endInvocationInterlockNV",
+ _invocation_interlock(
+ "__intrinsic_end_invocation_interlock",
+ supports_nv_fragment_shader_interlock),
+ NULL);
+
+ add_function("anyInvocationARB",
+ _vote("__intrinsic_vote_any", vote),
+ NULL);
+
+ add_function("allInvocationsARB",
+ _vote("__intrinsic_vote_all", vote),
+ NULL);
+
+ add_function("allInvocationsEqualARB",
+ _vote("__intrinsic_vote_eq", vote),
+ NULL);
+
+ add_function("anyInvocation",
+ _vote("__intrinsic_vote_any", v460_desktop),
+ NULL);
+
+ add_function("allInvocations",
+ _vote("__intrinsic_vote_all", v460_desktop),
+ NULL);
+
+ add_function("allInvocationsEqual",
+ _vote("__intrinsic_vote_eq", v460_desktop),
+ NULL);
+
+ add_function("helperInvocationEXT", _helper_invocation(), NULL);
+
+ add_function("__builtin_idiv64",
+ generate_ir::idiv64(mem_ctx, integer_functions_supported),
+ NULL);
+
+ add_function("__builtin_imod64",
+ generate_ir::imod64(mem_ctx, integer_functions_supported),
+ NULL);
+
+ add_function("__builtin_sign64",
+ generate_ir::sign64(mem_ctx, integer_functions_supported),
+ NULL);
+
+ add_function("__builtin_udiv64",
+ generate_ir::udiv64(mem_ctx, integer_functions_supported),
+ NULL);
+
+ add_function("__builtin_umod64",
+ generate_ir::umod64(mem_ctx, integer_functions_supported),
+ NULL);
+
+ add_function("__builtin_umul64",
+ generate_ir::umul64(mem_ctx, integer_functions_supported),
+ NULL);
+
+ add_function("countLeadingZeros",
+ _countLeadingZeros(shader_integer_functions2,
+ glsl_type::uint_type),
+ _countLeadingZeros(shader_integer_functions2,
+ glsl_type::uvec2_type),
+ _countLeadingZeros(shader_integer_functions2,
+ glsl_type::uvec3_type),
+ _countLeadingZeros(shader_integer_functions2,
+ glsl_type::uvec4_type),
+ NULL);
+
+ add_function("countTrailingZeros",
+ _countTrailingZeros(shader_integer_functions2,
+ glsl_type::uint_type),
+ _countTrailingZeros(shader_integer_functions2,
+ glsl_type::uvec2_type),
+ _countTrailingZeros(shader_integer_functions2,
+ glsl_type::uvec3_type),
+ _countTrailingZeros(shader_integer_functions2,
+ glsl_type::uvec4_type),
+ NULL);
+
+ add_function("absoluteDifference",
+ _absoluteDifference(shader_integer_functions2,
+ glsl_type::int_type),
+ _absoluteDifference(shader_integer_functions2,
+ glsl_type::ivec2_type),
+ _absoluteDifference(shader_integer_functions2,
+ glsl_type::ivec3_type),
+ _absoluteDifference(shader_integer_functions2,
+ glsl_type::ivec4_type),
+ _absoluteDifference(shader_integer_functions2,
+ glsl_type::uint_type),
+ _absoluteDifference(shader_integer_functions2,
+ glsl_type::uvec2_type),
+ _absoluteDifference(shader_integer_functions2,
+ glsl_type::uvec3_type),
+ _absoluteDifference(shader_integer_functions2,
+ glsl_type::uvec4_type),
+
+ _absoluteDifference(shader_integer_functions2_int64,
+ glsl_type::int64_t_type),
+ _absoluteDifference(shader_integer_functions2_int64,
+ glsl_type::i64vec2_type),
+ _absoluteDifference(shader_integer_functions2_int64,
+ glsl_type::i64vec3_type),
+ _absoluteDifference(shader_integer_functions2_int64,
+ glsl_type::i64vec4_type),
+ _absoluteDifference(shader_integer_functions2_int64,
+ glsl_type::uint64_t_type),
+ _absoluteDifference(shader_integer_functions2_int64,
+ glsl_type::u64vec2_type),
+ _absoluteDifference(shader_integer_functions2_int64,
+ glsl_type::u64vec3_type),
+ _absoluteDifference(shader_integer_functions2_int64,
+ glsl_type::u64vec4_type),
+ NULL);
+
+ add_function("addSaturate",
+ _addSaturate(shader_integer_functions2,
+ glsl_type::int_type),
+ _addSaturate(shader_integer_functions2,
+ glsl_type::ivec2_type),
+ _addSaturate(shader_integer_functions2,
+ glsl_type::ivec3_type),
+ _addSaturate(shader_integer_functions2,
+ glsl_type::ivec4_type),
+ _addSaturate(shader_integer_functions2,
+ glsl_type::uint_type),
+ _addSaturate(shader_integer_functions2,
+ glsl_type::uvec2_type),
+ _addSaturate(shader_integer_functions2,
+ glsl_type::uvec3_type),
+ _addSaturate(shader_integer_functions2,
+ glsl_type::uvec4_type),
+
+ _addSaturate(shader_integer_functions2_int64,
+ glsl_type::int64_t_type),
+ _addSaturate(shader_integer_functions2_int64,
+ glsl_type::i64vec2_type),
+ _addSaturate(shader_integer_functions2_int64,
+ glsl_type::i64vec3_type),
+ _addSaturate(shader_integer_functions2_int64,
+ glsl_type::i64vec4_type),
+ _addSaturate(shader_integer_functions2_int64,
+ glsl_type::uint64_t_type),
+ _addSaturate(shader_integer_functions2_int64,
+ glsl_type::u64vec2_type),
+ _addSaturate(shader_integer_functions2_int64,
+ glsl_type::u64vec3_type),
+ _addSaturate(shader_integer_functions2_int64,
+ glsl_type::u64vec4_type),
+ NULL);
+
+ add_function("average",
+ _average(shader_integer_functions2,
+ glsl_type::int_type),
+ _average(shader_integer_functions2,
+ glsl_type::ivec2_type),
+ _average(shader_integer_functions2,
+ glsl_type::ivec3_type),
+ _average(shader_integer_functions2,
+ glsl_type::ivec4_type),
+ _average(shader_integer_functions2,
+ glsl_type::uint_type),
+ _average(shader_integer_functions2,
+ glsl_type::uvec2_type),
+ _average(shader_integer_functions2,
+ glsl_type::uvec3_type),
+ _average(shader_integer_functions2,
+ glsl_type::uvec4_type),
+
+ _average(shader_integer_functions2_int64,
+ glsl_type::int64_t_type),
+ _average(shader_integer_functions2_int64,
+ glsl_type::i64vec2_type),
+ _average(shader_integer_functions2_int64,
+ glsl_type::i64vec3_type),
+ _average(shader_integer_functions2_int64,
+ glsl_type::i64vec4_type),
+ _average(shader_integer_functions2_int64,
+ glsl_type::uint64_t_type),
+ _average(shader_integer_functions2_int64,
+ glsl_type::u64vec2_type),
+ _average(shader_integer_functions2_int64,
+ glsl_type::u64vec3_type),
+ _average(shader_integer_functions2_int64,
+ glsl_type::u64vec4_type),
+ NULL);
+
+ add_function("averageRounded",
+ _averageRounded(shader_integer_functions2,
+ glsl_type::int_type),
+ _averageRounded(shader_integer_functions2,
+ glsl_type::ivec2_type),
+ _averageRounded(shader_integer_functions2,
+ glsl_type::ivec3_type),
+ _averageRounded(shader_integer_functions2,
+ glsl_type::ivec4_type),
+ _averageRounded(shader_integer_functions2,
+ glsl_type::uint_type),
+ _averageRounded(shader_integer_functions2,
+ glsl_type::uvec2_type),
+ _averageRounded(shader_integer_functions2,
+ glsl_type::uvec3_type),
+ _averageRounded(shader_integer_functions2,
+ glsl_type::uvec4_type),
+
+ _averageRounded(shader_integer_functions2_int64,
+ glsl_type::int64_t_type),
+ _averageRounded(shader_integer_functions2_int64,
+ glsl_type::i64vec2_type),
+ _averageRounded(shader_integer_functions2_int64,
+ glsl_type::i64vec3_type),
+ _averageRounded(shader_integer_functions2_int64,
+ glsl_type::i64vec4_type),
+ _averageRounded(shader_integer_functions2_int64,
+ glsl_type::uint64_t_type),
+ _averageRounded(shader_integer_functions2_int64,
+ glsl_type::u64vec2_type),
+ _averageRounded(shader_integer_functions2_int64,
+ glsl_type::u64vec3_type),
+ _averageRounded(shader_integer_functions2_int64,
+ glsl_type::u64vec4_type),
+ NULL);
+
+ add_function("subtractSaturate",
+ _subtractSaturate(shader_integer_functions2,
+ glsl_type::int_type),
+ _subtractSaturate(shader_integer_functions2,
+ glsl_type::ivec2_type),
+ _subtractSaturate(shader_integer_functions2,
+ glsl_type::ivec3_type),
+ _subtractSaturate(shader_integer_functions2,
+ glsl_type::ivec4_type),
+ _subtractSaturate(shader_integer_functions2,
+ glsl_type::uint_type),
+ _subtractSaturate(shader_integer_functions2,
+ glsl_type::uvec2_type),
+ _subtractSaturate(shader_integer_functions2,
+ glsl_type::uvec3_type),
+ _subtractSaturate(shader_integer_functions2,
+ glsl_type::uvec4_type),
+
+ _subtractSaturate(shader_integer_functions2_int64,
+ glsl_type::int64_t_type),
+ _subtractSaturate(shader_integer_functions2_int64,
+ glsl_type::i64vec2_type),
+ _subtractSaturate(shader_integer_functions2_int64,
+ glsl_type::i64vec3_type),
+ _subtractSaturate(shader_integer_functions2_int64,
+ glsl_type::i64vec4_type),
+ _subtractSaturate(shader_integer_functions2_int64,
+ glsl_type::uint64_t_type),
+ _subtractSaturate(shader_integer_functions2_int64,
+ glsl_type::u64vec2_type),
+ _subtractSaturate(shader_integer_functions2_int64,
+ glsl_type::u64vec3_type),
+ _subtractSaturate(shader_integer_functions2_int64,
+ glsl_type::u64vec4_type),
+ NULL);
+
+ add_function("multiply32x16",
+ _multiply32x16(shader_integer_functions2,
+ glsl_type::int_type),
+ _multiply32x16(shader_integer_functions2,
+ glsl_type::ivec2_type),
+ _multiply32x16(shader_integer_functions2,
+ glsl_type::ivec3_type),
+ _multiply32x16(shader_integer_functions2,
+ glsl_type::ivec4_type),
+ _multiply32x16(shader_integer_functions2,
+ glsl_type::uint_type),
+ _multiply32x16(shader_integer_functions2,
+ glsl_type::uvec2_type),
+ _multiply32x16(shader_integer_functions2,
+ glsl_type::uvec3_type),
+ _multiply32x16(shader_integer_functions2,
+ glsl_type::uvec4_type),
+ NULL);
+
+#undef F
+#undef FI
+#undef FIUD_VEC
+#undef FIUBD_VEC
+#undef FIU2_MIXED
+}
+
+void
+builtin_builder::add_function(const char *name, ...)
+{
+ va_list ap;
+
+ ir_function *f = new(mem_ctx) ir_function(name);
+
+ va_start(ap, name);
+ while (true) {
+ ir_function_signature *sig = va_arg(ap, ir_function_signature *);
+ if (sig == NULL)
+ break;
+
+ if (false) {
+ exec_list stuff;
+ stuff.push_tail(sig);
+ validate_ir_tree(&stuff);
+ }
+
+ f->add_signature(sig);
+ }
+ va_end(ap);
+
+ shader->symbols->add_function(f);
+}
+
+void
+builtin_builder::add_image_function(const char *name,
+ const char *intrinsic_name,
+ image_prototype_ctr prototype,
+ unsigned num_arguments,
+ unsigned flags,
+ enum ir_intrinsic_id intrinsic_id)
+{
+ static const glsl_type *const types[] = {
+ glsl_type::image1D_type,
+ glsl_type::image2D_type,
+ glsl_type::image3D_type,
+ glsl_type::image2DRect_type,
+ glsl_type::imageCube_type,
+ glsl_type::imageBuffer_type,
+ glsl_type::image1DArray_type,
+ glsl_type::image2DArray_type,
+ glsl_type::imageCubeArray_type,
+ glsl_type::image2DMS_type,
+ glsl_type::image2DMSArray_type,
+ glsl_type::iimage1D_type,
+ glsl_type::iimage2D_type,
+ glsl_type::iimage3D_type,
+ glsl_type::iimage2DRect_type,
+ glsl_type::iimageCube_type,
+ glsl_type::iimageBuffer_type,
+ glsl_type::iimage1DArray_type,
+ glsl_type::iimage2DArray_type,
+ glsl_type::iimageCubeArray_type,
+ glsl_type::iimage2DMS_type,
+ glsl_type::iimage2DMSArray_type,
+ glsl_type::uimage1D_type,
+ glsl_type::uimage2D_type,
+ glsl_type::uimage3D_type,
+ glsl_type::uimage2DRect_type,
+ glsl_type::uimageCube_type,
+ glsl_type::uimageBuffer_type,
+ glsl_type::uimage1DArray_type,
+ glsl_type::uimage2DArray_type,
+ glsl_type::uimageCubeArray_type,
+ glsl_type::uimage2DMS_type,
+ glsl_type::uimage2DMSArray_type
+ };
+
+ ir_function *f = new(mem_ctx) ir_function(name);
+
+ for (unsigned i = 0; i < ARRAY_SIZE(types); ++i) {
+ if ((types[i]->sampled_type != GLSL_TYPE_FLOAT ||
+ (flags & IMAGE_FUNCTION_SUPPORTS_FLOAT_DATA_TYPE)) &&
+ (types[i]->sampler_dimensionality == GLSL_SAMPLER_DIM_MS ||
+ !(flags & IMAGE_FUNCTION_MS_ONLY)))
+ f->add_signature(_image(prototype, types[i], intrinsic_name,
+ num_arguments, flags, intrinsic_id));
+ }
+
+ shader->symbols->add_function(f);
+}
+
+void
+builtin_builder::add_image_functions(bool glsl)
+{
+ const unsigned flags = (glsl ? IMAGE_FUNCTION_EMIT_STUB : 0);
+
+ add_image_function(glsl ? "imageLoad" : "__intrinsic_image_load",
+ "__intrinsic_image_load",
+ &builtin_builder::_image_prototype, 0,
+ (flags | IMAGE_FUNCTION_HAS_VECTOR_DATA_TYPE |
+ IMAGE_FUNCTION_SUPPORTS_FLOAT_DATA_TYPE |
+ IMAGE_FUNCTION_READ_ONLY),
+ ir_intrinsic_image_load);
+
+ add_image_function(glsl ? "imageStore" : "__intrinsic_image_store",
+ "__intrinsic_image_store",
+ &builtin_builder::_image_prototype, 1,
+ (flags | IMAGE_FUNCTION_RETURNS_VOID |
+ IMAGE_FUNCTION_HAS_VECTOR_DATA_TYPE |
+ IMAGE_FUNCTION_SUPPORTS_FLOAT_DATA_TYPE |
+ IMAGE_FUNCTION_WRITE_ONLY),
+ ir_intrinsic_image_store);
+
+ const unsigned atom_flags = flags | IMAGE_FUNCTION_AVAIL_ATOMIC;
+
+ add_image_function(glsl ? "imageAtomicAdd" : "__intrinsic_image_atomic_add",
+ "__intrinsic_image_atomic_add",
+ &builtin_builder::_image_prototype, 1,
+ (flags | IMAGE_FUNCTION_AVAIL_ATOMIC_ADD |
+ IMAGE_FUNCTION_SUPPORTS_FLOAT_DATA_TYPE),
+ ir_intrinsic_image_atomic_add);
+
+ add_image_function(glsl ? "imageAtomicMin" : "__intrinsic_image_atomic_min",
+ "__intrinsic_image_atomic_min",
+ &builtin_builder::_image_prototype, 1, atom_flags,
+ ir_intrinsic_image_atomic_min);
+
+ add_image_function(glsl ? "imageAtomicMax" : "__intrinsic_image_atomic_max",
+ "__intrinsic_image_atomic_max",
+ &builtin_builder::_image_prototype, 1, atom_flags,
+ ir_intrinsic_image_atomic_max);
+
+ add_image_function(glsl ? "imageAtomicAnd" : "__intrinsic_image_atomic_and",
+ "__intrinsic_image_atomic_and",
+ &builtin_builder::_image_prototype, 1, atom_flags,
+ ir_intrinsic_image_atomic_and);
+
+ add_image_function(glsl ? "imageAtomicOr" : "__intrinsic_image_atomic_or",
+ "__intrinsic_image_atomic_or",
+ &builtin_builder::_image_prototype, 1, atom_flags,
+ ir_intrinsic_image_atomic_or);
+
+ add_image_function(glsl ? "imageAtomicXor" : "__intrinsic_image_atomic_xor",
+ "__intrinsic_image_atomic_xor",
+ &builtin_builder::_image_prototype, 1, atom_flags,
+ ir_intrinsic_image_atomic_xor);
+
+ add_image_function((glsl ? "imageAtomicExchange" :
+ "__intrinsic_image_atomic_exchange"),
+ "__intrinsic_image_atomic_exchange",
+ &builtin_builder::_image_prototype, 1,
+ (flags | IMAGE_FUNCTION_AVAIL_ATOMIC_EXCHANGE |
+ IMAGE_FUNCTION_SUPPORTS_FLOAT_DATA_TYPE),
+ ir_intrinsic_image_atomic_exchange);
+
+ add_image_function((glsl ? "imageAtomicCompSwap" :
+ "__intrinsic_image_atomic_comp_swap"),
+ "__intrinsic_image_atomic_comp_swap",
+ &builtin_builder::_image_prototype, 2, atom_flags,
+ ir_intrinsic_image_atomic_comp_swap);
+
+ add_image_function(glsl ? "imageSize" : "__intrinsic_image_size",
+ "__intrinsic_image_size",
+ &builtin_builder::_image_size_prototype, 1,
+ flags | IMAGE_FUNCTION_SUPPORTS_FLOAT_DATA_TYPE,
+ ir_intrinsic_image_size);
+
+ add_image_function(glsl ? "imageSamples" : "__intrinsic_image_samples",
+ "__intrinsic_image_samples",
+ &builtin_builder::_image_samples_prototype, 1,
+ flags | IMAGE_FUNCTION_SUPPORTS_FLOAT_DATA_TYPE |
+ IMAGE_FUNCTION_MS_ONLY,
+ ir_intrinsic_image_samples);
+
+ /* EXT_shader_image_load_store */
+ add_image_function(glsl ? "imageAtomicIncWrap" : "__intrinsic_image_atomic_inc_wrap",
+ "__intrinsic_image_atomic_inc_wrap",
+ &builtin_builder::_image_prototype, 1,
+ (atom_flags | IMAGE_FUNCTION_EXT_ONLY),
+ ir_intrinsic_image_atomic_inc_wrap);
+ add_image_function(glsl ? "imageAtomicDecWrap" : "__intrinsic_image_atomic_dec_wrap",
+ "__intrinsic_image_atomic_dec_wrap",
+ &builtin_builder::_image_prototype, 1,
+ (atom_flags | IMAGE_FUNCTION_EXT_ONLY),
+ ir_intrinsic_image_atomic_dec_wrap);
+}
+
+ir_variable *
+builtin_builder::in_var(const glsl_type *type, const char *name)
+{
+ return new(mem_ctx) ir_variable(type, name, ir_var_function_in);
+}
+
+ir_variable *
+builtin_builder::out_var(const glsl_type *type, const char *name)
+{
+ return new(mem_ctx) ir_variable(type, name, ir_var_function_out);
+}
+
+ir_constant *
+builtin_builder::imm(bool b, unsigned vector_elements)
+{
+ return new(mem_ctx) ir_constant(b, vector_elements);
+}
+
+ir_constant *
+builtin_builder::imm(float f, unsigned vector_elements)
+{
+ return new(mem_ctx) ir_constant(f, vector_elements);
+}
+
+ir_constant *
+builtin_builder::imm(int i, unsigned vector_elements)
+{
+ return new(mem_ctx) ir_constant(i, vector_elements);
+}
+
+ir_constant *
+builtin_builder::imm(unsigned u, unsigned vector_elements)
+{
+ return new(mem_ctx) ir_constant(u, vector_elements);
+}
+
+ir_constant *
+builtin_builder::imm(double d, unsigned vector_elements)
+{
+ return new(mem_ctx) ir_constant(d, vector_elements);
+}
+
+ir_constant *
+builtin_builder::imm(const glsl_type *type, const ir_constant_data &data)
+{
+ return new(mem_ctx) ir_constant(type, &data);
+}
+
+#define IMM_FP(type, val) (type->is_double()) ? imm(val) : imm((float)val)
+
+ir_dereference_variable *
+builtin_builder::var_ref(ir_variable *var)
+{
+ return new(mem_ctx) ir_dereference_variable(var);
+}
+
+ir_dereference_array *
+builtin_builder::array_ref(ir_variable *var, int idx)
+{
+ return new(mem_ctx) ir_dereference_array(var, imm(idx));
+}
+
+/** Return an element of a matrix */
+ir_swizzle *
+builtin_builder::matrix_elt(ir_variable *var, int column, int row)
+{
+ return swizzle(array_ref(var, column), row, 1);
+}
+
+/**
+ * Implementations of built-in functions:
+ * @{
+ */
+ir_function_signature *
+builtin_builder::new_sig(const glsl_type *return_type,
+ builtin_available_predicate avail,
+ int num_params,
+ ...)
+{
+ va_list ap;
+
+ ir_function_signature *sig =
+ new(mem_ctx) ir_function_signature(return_type, avail);
+
+ exec_list plist;
+ va_start(ap, num_params);
+ for (int i = 0; i < num_params; i++) {
+ plist.push_tail(va_arg(ap, ir_variable *));
+ }
+ va_end(ap);
+
+ sig->replace_parameters(&plist);
+ return sig;
+}
+
+#define MAKE_SIG(return_type, avail, ...) \
+ ir_function_signature *sig = \
+ new_sig(return_type, avail, __VA_ARGS__); \
+ ir_factory body(&sig->body, mem_ctx); \
+ sig->is_defined = true;
+
+#define MAKE_INTRINSIC(return_type, id, avail, ...) \
+ ir_function_signature *sig = \
+ new_sig(return_type, avail, __VA_ARGS__); \
+ sig->intrinsic_id = id;
+
+ir_function_signature *
+builtin_builder::unop(builtin_available_predicate avail,
+ ir_expression_operation opcode,
+ const glsl_type *return_type,
+ const glsl_type *param_type)
+{
+ ir_variable *x = in_var(param_type, "x");
+ MAKE_SIG(return_type, avail, 1, x);
+ body.emit(ret(expr(opcode, x)));
+ return sig;
+}
+
+#define UNOP(NAME, OPCODE, AVAIL) \
+ir_function_signature * \
+builtin_builder::_##NAME(const glsl_type *type) \
+{ \
+ return unop(&AVAIL, OPCODE, type, type); \
+}
+
+#define UNOPA(NAME, OPCODE) \
+ir_function_signature * \
+builtin_builder::_##NAME(builtin_available_predicate avail, const glsl_type *type) \
+{ \
+ return unop(avail, OPCODE, type, type); \
+}
+
+ir_function_signature *
+builtin_builder::binop(builtin_available_predicate avail,
+ ir_expression_operation opcode,
+ const glsl_type *return_type,
+ const glsl_type *param0_type,
+ const glsl_type *param1_type,
+ bool swap_operands)
+{
+ ir_variable *x = in_var(param0_type, "x");
+ ir_variable *y = in_var(param1_type, "y");
+ MAKE_SIG(return_type, avail, 2, x, y);
+
+ if (swap_operands)
+ body.emit(ret(expr(opcode, y, x)));
+ else
+ body.emit(ret(expr(opcode, x, y)));
+
+ return sig;
+}
+
+#define BINOP(NAME, OPCODE, AVAIL) \
+ir_function_signature * \
+builtin_builder::_##NAME(const glsl_type *return_type, \
+ const glsl_type *param0_type, \
+ const glsl_type *param1_type) \
+{ \
+ return binop(&AVAIL, OPCODE, return_type, param0_type, param1_type); \
+}
+
+/**
+ * Angle and Trigonometry Functions @{
+ */
+
+ir_function_signature *
+builtin_builder::_radians(const glsl_type *type)
+{
+ ir_variable *degrees = in_var(type, "degrees");
+ MAKE_SIG(type, always_available, 1, degrees);
+ body.emit(ret(mul(degrees, imm(0.0174532925f))));
+ return sig;
+}
+
+ir_function_signature *
+builtin_builder::_degrees(const glsl_type *type)
+{
+ ir_variable *radians = in_var(type, "radians");
+ MAKE_SIG(type, always_available, 1, radians);
+ body.emit(ret(mul(radians, imm(57.29578f))));
+ return sig;
+}
+
+UNOP(sin, ir_unop_sin, always_available)
+UNOP(cos, ir_unop_cos, always_available)
+
+ir_function_signature *
+builtin_builder::_tan(const glsl_type *type)
+{
+ ir_variable *theta = in_var(type, "theta");
+ MAKE_SIG(type, always_available, 1, theta);
+ body.emit(ret(div(sin(theta), cos(theta))));
+ return sig;
+}
+
+ir_expression *
+builtin_builder::asin_expr(ir_variable *x, float p0, float p1)
+{
+ return mul(sign(x),
+ sub(imm(M_PI_2f),
+ mul(sqrt(sub(imm(1.0f), abs(x))),
+ add(imm(M_PI_2f),
+ mul(abs(x),
+ add(imm(M_PI_4f - 1.0f),
+ mul(abs(x),
+ add(imm(p0),
+ mul(abs(x), imm(p1))))))))));
+}
+
+/**
+ * Generate a ir_call to a function with a set of parameters
+ *
+ * The input \c params can either be a list of \c ir_variable or a list of
+ * \c ir_dereference_variable. In the latter case, all nodes will be removed
+ * from \c params and used directly as the parameters to the generated
+ * \c ir_call.
+ */
+ir_call *
+builtin_builder::call(ir_function *f, ir_variable *ret, exec_list params)
+{
+ exec_list actual_params;
+
+ foreach_in_list_safe(ir_instruction, ir, &params) {
+ ir_dereference_variable *d = ir->as_dereference_variable();
+ if (d != NULL) {
+ d->remove();
+ actual_params.push_tail(d);
+ } else {
+ ir_variable *var = ir->as_variable();
+ assert(var != NULL);
+ actual_params.push_tail(var_ref(var));
+ }
+ }
+
+ ir_function_signature *sig =
+ f->exact_matching_signature(NULL, &actual_params);
+ if (!sig)
+ return NULL;
+
+ ir_dereference_variable *deref =
+ (sig->return_type->is_void() ? NULL : var_ref(ret));
+
+ return new(mem_ctx) ir_call(sig, deref, &actual_params);
+}
+
+ir_function_signature *
+builtin_builder::_asin(const glsl_type *type)
+{
+ ir_variable *x = in_var(type, "x");
+ MAKE_SIG(type, always_available, 1, x);
+
+ body.emit(ret(asin_expr(x, 0.086566724f, -0.03102955f)));
+
+ return sig;
+}
+
+ir_function_signature *
+builtin_builder::_acos(const glsl_type *type)
+{
+ ir_variable *x = in_var(type, "x");
+ MAKE_SIG(type, always_available, 1, x);
+
+ body.emit(ret(sub(imm(M_PI_2f), asin_expr(x, 0.08132463f, -0.02363318f))));
+
+ return sig;
+}
+
+ir_function_signature *
+builtin_builder::_atan2(const glsl_type *type)
+{
+ const unsigned n = type->vector_elements;
+ ir_variable *y = in_var(type, "y");
+ ir_variable *x = in_var(type, "x");
+ MAKE_SIG(type, is_not_nir, 2, y, x);
+
+ /* If we're on the left half-plane rotate the coordinates π/2 clock-wise
+ * for the y=0 discontinuity to end up aligned with the vertical
+ * discontinuity of atan(s/t) along t=0. This also makes sure that we
+ * don't attempt to divide by zero along the vertical line, which may give
+ * unspecified results on non-GLSL 4.1-capable hardware.
+ */
+ ir_variable *flip = body.make_temp(glsl_type::bvec(n), "flip");
+ body.emit(assign(flip, gequal(imm(0.0f, n), x)));
+ ir_variable *s = body.make_temp(type, "s");
+ body.emit(assign(s, csel(flip, abs(x), y)));
+ ir_variable *t = body.make_temp(type, "t");
+ body.emit(assign(t, csel(flip, y, abs(x))));
+
+ /* If the magnitude of the denominator exceeds some huge value, scale down
+ * the arguments in order to prevent the reciprocal operation from flushing
+ * its result to zero, which would cause precision problems, and for s
+ * infinite would cause us to return a NaN instead of the correct finite
+ * value.
+ *
+ * If fmin and fmax are respectively the smallest and largest positive
+ * normalized floating point values representable by the implementation,
+ * the constants below should be in agreement with:
+ *
+ * huge <= 1 / fmin
+ * scale <= 1 / fmin / fmax (for |t| >= huge)
+ *
+ * In addition scale should be a negative power of two in order to avoid
+ * loss of precision. The values chosen below should work for most usual
+ * floating point representations with at least the dynamic range of ATI's
+ * 24-bit representation.
+ */
+ ir_constant *huge = imm(1e18f, n);
+ ir_variable *scale = body.make_temp(type, "scale");
+ body.emit(assign(scale, csel(gequal(abs(t), huge),
+ imm(0.25f, n), imm(1.0f, n))));
+ ir_variable *rcp_scaled_t = body.make_temp(type, "rcp_scaled_t");
+ body.emit(assign(rcp_scaled_t, rcp(mul(t, scale))));
+ ir_expression *s_over_t = mul(mul(s, scale), rcp_scaled_t);
+
+ /* For |x| = |y| assume tan = 1 even if infinite (i.e. pretend momentarily
+ * that ∞/∞ = 1) in order to comply with the rather artificial rules
+ * inherited from IEEE 754-2008, namely:
+ *
+ * "atan2(±∞, −∞) is ±3π/4
+ * atan2(±∞, +∞) is ±π/4"
+ *
+ * Note that this is inconsistent with the rules for the neighborhood of
+ * zero that are based on iterated limits:
+ *
+ * "atan2(±0, −0) is ±π
+ * atan2(±0, +0) is ±0"
+ *
+ * but GLSL specifically allows implementations to deviate from IEEE rules
+ * at (0,0), so we take that license (i.e. pretend that 0/0 = 1 here as
+ * well).
+ */
+ ir_expression *tan = csel(equal(abs(x), abs(y)),
+ imm(1.0f, n), abs(s_over_t));
+
+ /* Calculate the arctangent and fix up the result if we had flipped the
+ * coordinate system.
+ */
+ ir_variable *arc = body.make_temp(type, "arc");
+ do_atan(body, type, arc, tan);
+ body.emit(assign(arc, add(arc, mul(b2f(flip), imm(M_PI_2f)))));
+
+ /* Rather convoluted calculation of the sign of the result. When x < 0 we
+ * cannot use fsign because we need to be able to distinguish between
+ * negative and positive zero. Unfortunately we cannot use bitwise
+ * arithmetic tricks either because of back-ends without integer support.
+ * When x >= 0 rcp_scaled_t will always be non-negative so this won't be
+ * able to distinguish between negative and positive zero, but we don't
+ * care because atan2 is continuous along the whole positive y = 0
+ * half-line, so it won't affect the result significantly.
+ */
+ body.emit(ret(csel(less(min2(y, rcp_scaled_t), imm(0.0f, n)),
+ neg(arc), arc)));
+
+ return sig;
+}
+
+void
+builtin_builder::do_atan(ir_factory &body, const glsl_type *type, ir_variable *res, operand y_over_x)
+{
+ /*
+ * range-reduction, first step:
+ *
+ * / y_over_x if |y_over_x| <= 1.0;
+ * x = <
+ * \ 1.0 / y_over_x otherwise
+ */
+ ir_variable *x = body.make_temp(type, "atan_x");
+ body.emit(assign(x, div(min2(abs(y_over_x),
+ imm(1.0f)),
+ max2(abs(y_over_x),
+ imm(1.0f)))));
+
+ /*
+ * approximate atan by evaluating polynomial:
+ *
+ * x * 0.9999793128310355 - x^3 * 0.3326756418091246 +
+ * x^5 * 0.1938924977115610 - x^7 * 0.1173503194786851 +
+ * x^9 * 0.0536813784310406 - x^11 * 0.0121323213173444
+ */
+ ir_variable *tmp = body.make_temp(type, "atan_tmp");
+ body.emit(assign(tmp, mul(x, x)));
+ body.emit(assign(tmp, mul(add(mul(sub(mul(add(mul(sub(mul(add(mul(imm(-0.0121323213173444f),
+ tmp),
+ imm(0.0536813784310406f)),
+ tmp),
+ imm(0.1173503194786851f)),
+ tmp),
+ imm(0.1938924977115610f)),
+ tmp),
+ imm(0.3326756418091246f)),
+ tmp),
+ imm(0.9999793128310355f)),
+ x)));
+
+ /* range-reduction fixup */
+ body.emit(assign(tmp, add(tmp,
+ mul(b2f(greater(abs(y_over_x),
+ imm(1.0f, type->components()))),
+ add(mul(tmp,
+ imm(-2.0f)),
+ imm(M_PI_2f))))));
+
+ /* sign fixup */
+ body.emit(assign(res, mul(tmp, sign(y_over_x))));
+}
+
+ir_function_signature *
+builtin_builder::_atan(const glsl_type *type)
+{
+ ir_variable *y_over_x = in_var(type, "y_over_x");
+ MAKE_SIG(type, is_not_nir, 1, y_over_x);
+
+ ir_variable *tmp = body.make_temp(type, "tmp");
+ do_atan(body, type, tmp, y_over_x);
+ body.emit(ret(tmp));
+
+ return sig;
+}
+
+ir_function_signature *
+builtin_builder::_sinh(const glsl_type *type)
+{
+ ir_variable *x = in_var(type, "x");
+ MAKE_SIG(type, v130, 1, x);
+
+ /* 0.5 * (e^x - e^(-x)) */
+ body.emit(ret(mul(imm(0.5f), sub(exp(x), exp(neg(x))))));
+
+ return sig;
+}
+
+ir_function_signature *
+builtin_builder::_cosh(const glsl_type *type)
+{
+ ir_variable *x = in_var(type, "x");
+ MAKE_SIG(type, v130, 1, x);
+
+ /* 0.5 * (e^x + e^(-x)) */
+ body.emit(ret(mul(imm(0.5f), add(exp(x), exp(neg(x))))));
+
+ return sig;
+}
+
+ir_function_signature *
+builtin_builder::_tanh(const glsl_type *type)
+{
+ ir_variable *x = in_var(type, "x");
+ MAKE_SIG(type, v130, 1, x);
+
+ /* Clamp x to [-10, +10] to avoid precision problems.
+ * When x > 10, e^(-x) is so small relative to e^x that it gets flushed to
+ * zero in the computation e^x + e^(-x). The same happens in the other
+ * direction when x < -10.
+ */
+ ir_variable *t = body.make_temp(type, "tmp");
+ body.emit(assign(t, min2(max2(x, imm(-10.0f)), imm(10.0f))));
+
+ /* (e^x - e^(-x)) / (e^x + e^(-x)) */
+ body.emit(ret(div(sub(exp(t), exp(neg(t))),
+ add(exp(t), exp(neg(t))))));
+
+ return sig;
+}
+
+ir_function_signature *
+builtin_builder::_asinh(const glsl_type *type)
+{
+ ir_variable *x = in_var(type, "x");
+ MAKE_SIG(type, v130, 1, x);
+
+ body.emit(ret(mul(sign(x), log(add(abs(x), sqrt(add(mul(x, x),
+ imm(1.0f))))))));
+ return sig;
+}
+
+ir_function_signature *
+builtin_builder::_acosh(const glsl_type *type)
+{
+ ir_variable *x = in_var(type, "x");
+ MAKE_SIG(type, v130, 1, x);
+
+ body.emit(ret(log(add(x, sqrt(sub(mul(x, x), imm(1.0f)))))));
+ return sig;
+}
+
+ir_function_signature *
+builtin_builder::_atanh(const glsl_type *type)
+{
+ ir_variable *x = in_var(type, "x");
+ MAKE_SIG(type, v130, 1, x);
+
+ body.emit(ret(mul(imm(0.5f), log(div(add(imm(1.0f), x),
+ sub(imm(1.0f), x))))));
+ return sig;
+}
+/** @} */
+
+/**
+ * Exponential Functions @{
+ */
+
+ir_function_signature *
+builtin_builder::_pow(const glsl_type *type)
+{
+ return binop(always_available, ir_binop_pow, type, type, type);
+}
+
+UNOP(exp, ir_unop_exp, always_available)
+UNOP(log, ir_unop_log, always_available)
+UNOP(exp2, ir_unop_exp2, always_available)
+UNOP(log2, ir_unop_log2, always_available)
+UNOP(atan_op, ir_unop_atan, always_available)
+UNOPA(sqrt, ir_unop_sqrt)
+UNOPA(inversesqrt, ir_unop_rsq)
+
+/** @} */
+
+UNOPA(abs, ir_unop_abs)
+UNOPA(sign, ir_unop_sign)
+UNOPA(floor, ir_unop_floor)
+UNOPA(truncate, ir_unop_trunc)
+UNOPA(trunc, ir_unop_trunc)
+UNOPA(round, ir_unop_round_even)
+UNOPA(roundEven, ir_unop_round_even)
+UNOPA(ceil, ir_unop_ceil)
+UNOPA(fract, ir_unop_fract)
+
+ir_function_signature *
+builtin_builder::_mod(builtin_available_predicate avail,
+ const glsl_type *x_type, const glsl_type *y_type)
+{
+ return binop(avail, ir_binop_mod, x_type, x_type, y_type);
+}
+
+ir_function_signature *
+builtin_builder::_modf(builtin_available_predicate avail, const glsl_type *type)
+{
+ ir_variable *x = in_var(type, "x");
+ ir_variable *i = out_var(type, "i");
+ MAKE_SIG(type, avail, 2, x, i);
+
+ ir_variable *t = body.make_temp(type, "t");
+ body.emit(assign(t, expr(ir_unop_trunc, x)));
+ body.emit(assign(i, t));
+ body.emit(ret(sub(x, t)));
+
+ return sig;
+}
+
+ir_function_signature *
+builtin_builder::_min(builtin_available_predicate avail,
+ const glsl_type *x_type, const glsl_type *y_type)
+{
+ return binop(avail, ir_binop_min, x_type, x_type, y_type);
+}
+
+ir_function_signature *
+builtin_builder::_max(builtin_available_predicate avail,
+ const glsl_type *x_type, const glsl_type *y_type)
+{
+ return binop(avail, ir_binop_max, x_type, x_type, y_type);
+}
+
+ir_function_signature *
+builtin_builder::_clamp(builtin_available_predicate avail,
+ const glsl_type *val_type, const glsl_type *bound_type)
+{
+ ir_variable *x = in_var(val_type, "x");
+ ir_variable *minVal = in_var(bound_type, "minVal");
+ ir_variable *maxVal = in_var(bound_type, "maxVal");
+ MAKE_SIG(val_type, avail, 3, x, minVal, maxVal);
+
+ body.emit(ret(clamp(x, minVal, maxVal)));
+
+ return sig;
+}
+
+ir_function_signature *
+builtin_builder::_mix_lrp(builtin_available_predicate avail, const glsl_type *val_type, const glsl_type *blend_type)
+{
+ ir_variable *x = in_var(val_type, "x");
+ ir_variable *y = in_var(val_type, "y");
+ ir_variable *a = in_var(blend_type, "a");
+ MAKE_SIG(val_type, avail, 3, x, y, a);
+
+ body.emit(ret(lrp(x, y, a)));
+
+ return sig;
+}
+
+ir_function_signature *
+builtin_builder::_mix_sel(builtin_available_predicate avail,
+ const glsl_type *val_type,
+ const glsl_type *blend_type)
+{
+ ir_variable *x = in_var(val_type, "x");
+ ir_variable *y = in_var(val_type, "y");
+ ir_variable *a = in_var(blend_type, "a");
+ MAKE_SIG(val_type, avail, 3, x, y, a);
+
+ /* csel matches the ternary operator in that a selector of true choses the
+ * first argument. This differs from mix(x, y, false) which choses the
+ * second argument (to remain consistent with the interpolating version of
+ * mix() which takes a blend factor from 0.0 to 1.0 where 0.0 is only x.
+ *
+ * To handle the behavior mismatch, reverse the x and y arguments.
+ */
+ body.emit(ret(csel(a, y, x)));
+
+ return sig;
+}
+
+ir_function_signature *
+builtin_builder::_step(builtin_available_predicate avail, const glsl_type *edge_type, const glsl_type *x_type)
+{
+ ir_variable *edge = in_var(edge_type, "edge");
+ ir_variable *x = in_var(x_type, "x");
+ MAKE_SIG(x_type, avail, 2, edge, x);
+
+ ir_variable *t = body.make_temp(x_type, "t");
+ if (x_type->vector_elements == 1) {
+ /* Both are floats */
+ if (edge_type->is_double())
+ body.emit(assign(t, f2d(b2f(gequal(x, edge)))));
+ else
+ body.emit(assign(t, b2f(gequal(x, edge))));
+ } else if (edge_type->vector_elements == 1) {
+ /* x is a vector but edge is a float */
+ for (int i = 0; i < x_type->vector_elements; i++) {
+ if (edge_type->is_double())
+ body.emit(assign(t, f2d(b2f(gequal(swizzle(x, i, 1), edge))), 1 << i));
+ else
+ body.emit(assign(t, b2f(gequal(swizzle(x, i, 1), edge)), 1 << i));
+ }
+ } else {
+ /* Both are vectors */
+ for (int i = 0; i < x_type->vector_elements; i++) {
+ if (edge_type->is_double())
+ body.emit(assign(t, f2d(b2f(gequal(swizzle(x, i, 1), swizzle(edge, i, 1)))),
+ 1 << i));
+ else
+ body.emit(assign(t, b2f(gequal(swizzle(x, i, 1), swizzle(edge, i, 1))),
+ 1 << i));
+
+ }
+ }
+ body.emit(ret(t));
+
+ return sig;
+}
+
+ir_function_signature *
+builtin_builder::_smoothstep(builtin_available_predicate avail, const glsl_type *edge_type, const glsl_type *x_type)
+{
+ ir_variable *edge0 = in_var(edge_type, "edge0");
+ ir_variable *edge1 = in_var(edge_type, "edge1");
+ ir_variable *x = in_var(x_type, "x");
+ MAKE_SIG(x_type, avail, 3, edge0, edge1, x);
+
+ /* From the GLSL 1.10 specification:
+ *
+ * genType t;
+ * t = clamp((x - edge0) / (edge1 - edge0), 0, 1);
+ * return t * t * (3 - 2 * t);
+ */
+
+ ir_variable *t = body.make_temp(x_type, "t");
+ body.emit(assign(t, clamp(div(sub(x, edge0), sub(edge1, edge0)),
+ IMM_FP(x_type, 0.0), IMM_FP(x_type, 1.0))));
+
+ body.emit(ret(mul(t, mul(t, sub(IMM_FP(x_type, 3.0), mul(IMM_FP(x_type, 2.0), t))))));
+
+ return sig;
+}
+
+ir_function_signature *
+builtin_builder::_isnan(builtin_available_predicate avail, const glsl_type *type)
+{
+ ir_variable *x = in_var(type, "x");
+ MAKE_SIG(glsl_type::bvec(type->vector_elements), avail, 1, x);
+
+ body.emit(ret(nequal(x, x)));
+
+ return sig;
+}
+
+ir_function_signature *
+builtin_builder::_isinf(builtin_available_predicate avail, const glsl_type *type)
+{
+ ir_variable *x = in_var(type, "x");
+ MAKE_SIG(glsl_type::bvec(type->vector_elements), avail, 1, x);
+
+ ir_constant_data infinities;
+ for (int i = 0; i < type->vector_elements; i++) {
+ switch (type->base_type) {
+ case GLSL_TYPE_FLOAT:
+ infinities.f[i] = INFINITY;
+ break;
+ case GLSL_TYPE_DOUBLE:
+ infinities.d[i] = INFINITY;
+ break;
+ default:
+ unreachable("unknown type");
+ }
+ }
+
+ body.emit(ret(equal(abs(x), imm(type, infinities))));
+
+ return sig;
+}
+
+ir_function_signature *
+builtin_builder::_atan2_op(const glsl_type *x_type)
+{
+ return binop(always_available, ir_binop_atan2, x_type, x_type, x_type);
+}
+
+ir_function_signature *
+builtin_builder::_floatBitsToInt(const glsl_type *type)
+{
+ ir_variable *x = in_var(type, "x");
+ MAKE_SIG(glsl_type::ivec(type->vector_elements), shader_bit_encoding, 1, x);
+ body.emit(ret(bitcast_f2i(x)));
+ return sig;
+}
+
+ir_function_signature *
+builtin_builder::_floatBitsToUint(const glsl_type *type)
+{
+ ir_variable *x = in_var(type, "x");
+ MAKE_SIG(glsl_type::uvec(type->vector_elements), shader_bit_encoding, 1, x);
+ body.emit(ret(bitcast_f2u(x)));
+ return sig;
+}
+
+ir_function_signature *
+builtin_builder::_intBitsToFloat(const glsl_type *type)
+{
+ ir_variable *x = in_var(type, "x");
+ MAKE_SIG(glsl_type::vec(type->vector_elements), shader_bit_encoding, 1, x);
+ body.emit(ret(bitcast_i2f(x)));
+ return sig;
+}
+
+ir_function_signature *
+builtin_builder::_uintBitsToFloat(const glsl_type *type)
+{
+ ir_variable *x = in_var(type, "x");
+ MAKE_SIG(glsl_type::vec(type->vector_elements), shader_bit_encoding, 1, x);
+ body.emit(ret(bitcast_u2f(x)));
+ return sig;
+}
+
+ir_function_signature *
+builtin_builder::_doubleBitsToInt64(builtin_available_predicate avail, const glsl_type *type)
+{
+ ir_variable *x = in_var(type, "x");
+ MAKE_SIG(glsl_type::i64vec(type->vector_elements), avail, 1, x);
+ body.emit(ret(bitcast_d2i64(x)));
+ return sig;
+}
+
+ir_function_signature *
+builtin_builder::_doubleBitsToUint64(builtin_available_predicate avail, const glsl_type *type)
+{
+ ir_variable *x = in_var(type, "x");
+ MAKE_SIG(glsl_type::u64vec(type->vector_elements), avail, 1, x);
+ body.emit(ret(bitcast_d2u64(x)));
+ return sig;
+}
+
+ir_function_signature *
+builtin_builder::_int64BitsToDouble(builtin_available_predicate avail, const glsl_type *type)
+{
+ ir_variable *x = in_var(type, "x");
+ MAKE_SIG(glsl_type::dvec(type->vector_elements), avail, 1, x);
+ body.emit(ret(bitcast_i642d(x)));
+ return sig;
+}
+
+ir_function_signature *
+builtin_builder::_uint64BitsToDouble(builtin_available_predicate avail, const glsl_type *type)
+{
+ ir_variable *x = in_var(type, "x");
+ MAKE_SIG(glsl_type::dvec(type->vector_elements), avail, 1, x);
+ body.emit(ret(bitcast_u642d(x)));
+ return sig;
+}
+
+ir_function_signature *
+builtin_builder::_packUnorm2x16(builtin_available_predicate avail)
+{
+ ir_variable *v = in_var(glsl_type::vec2_type, "v");
+ MAKE_SIG(glsl_type::uint_type, avail, 1, v);
+ body.emit(ret(expr(ir_unop_pack_unorm_2x16, v)));
+ return sig;
+}
+
+ir_function_signature *
+builtin_builder::_packSnorm2x16(builtin_available_predicate avail)
+{
+ ir_variable *v = in_var(glsl_type::vec2_type, "v");
+ MAKE_SIG(glsl_type::uint_type, avail, 1, v);
+ body.emit(ret(expr(ir_unop_pack_snorm_2x16, v)));
+ return sig;
+}
+
+ir_function_signature *
+builtin_builder::_packUnorm4x8(builtin_available_predicate avail)
+{
+ ir_variable *v = in_var(glsl_type::vec4_type, "v");
+ MAKE_SIG(glsl_type::uint_type, avail, 1, v);
+ body.emit(ret(expr(ir_unop_pack_unorm_4x8, v)));
+ return sig;
+}
+
+ir_function_signature *
+builtin_builder::_packSnorm4x8(builtin_available_predicate avail)
+{
+ ir_variable *v = in_var(glsl_type::vec4_type, "v");
+ MAKE_SIG(glsl_type::uint_type, avail, 1, v);
+ body.emit(ret(expr(ir_unop_pack_snorm_4x8, v)));
+ return sig;
+}
+
+ir_function_signature *
+builtin_builder::_unpackUnorm2x16(builtin_available_predicate avail)
+{
+ ir_variable *p = in_var(glsl_type::uint_type, "p");
+ MAKE_SIG(glsl_type::vec2_type, avail, 1, p);
+ body.emit(ret(expr(ir_unop_unpack_unorm_2x16, p)));
+ return sig;
+}
+
+ir_function_signature *
+builtin_builder::_unpackSnorm2x16(builtin_available_predicate avail)
+{
+ ir_variable *p = in_var(glsl_type::uint_type, "p");
+ MAKE_SIG(glsl_type::vec2_type, avail, 1, p);
+ body.emit(ret(expr(ir_unop_unpack_snorm_2x16, p)));
+ return sig;
+}
+
+
+ir_function_signature *
+builtin_builder::_unpackUnorm4x8(builtin_available_predicate avail)
+{
+ ir_variable *p = in_var(glsl_type::uint_type, "p");
+ MAKE_SIG(glsl_type::vec4_type, avail, 1, p);
+ body.emit(ret(expr(ir_unop_unpack_unorm_4x8, p)));
+ return sig;
+}
+
+ir_function_signature *
+builtin_builder::_unpackSnorm4x8(builtin_available_predicate avail)
+{
+ ir_variable *p = in_var(glsl_type::uint_type, "p");
+ MAKE_SIG(glsl_type::vec4_type, avail, 1, p);
+ body.emit(ret(expr(ir_unop_unpack_snorm_4x8, p)));
+ return sig;
+}
+
+ir_function_signature *
+builtin_builder::_packHalf2x16(builtin_available_predicate avail)
+{
+ ir_variable *v = in_var(glsl_type::vec2_type, "v");
+ MAKE_SIG(glsl_type::uint_type, avail, 1, v);
+ body.emit(ret(expr(ir_unop_pack_half_2x16, v)));
+ return sig;
+}
+
+ir_function_signature *
+builtin_builder::_unpackHalf2x16(builtin_available_predicate avail)
+{
+ ir_variable *p = in_var(glsl_type::uint_type, "p");
+ MAKE_SIG(glsl_type::vec2_type, avail, 1, p);
+ body.emit(ret(expr(ir_unop_unpack_half_2x16, p)));
+ return sig;
+}
+
+ir_function_signature *
+builtin_builder::_packDouble2x32(builtin_available_predicate avail)
+{
+ ir_variable *v = in_var(glsl_type::uvec2_type, "v");
+ MAKE_SIG(glsl_type::double_type, avail, 1, v);
+ body.emit(ret(expr(ir_unop_pack_double_2x32, v)));
+ return sig;
+}
+
+ir_function_signature *
+builtin_builder::_unpackDouble2x32(builtin_available_predicate avail)
+{
+ ir_variable *p = in_var(glsl_type::double_type, "p");
+ MAKE_SIG(glsl_type::uvec2_type, avail, 1, p);
+ body.emit(ret(expr(ir_unop_unpack_double_2x32, p)));
+ return sig;
+}
+
+ir_function_signature *
+builtin_builder::_packInt2x32(builtin_available_predicate avail)
+{
+ ir_variable *v = in_var(glsl_type::ivec2_type, "v");
+ MAKE_SIG(glsl_type::int64_t_type, avail, 1, v);
+ body.emit(ret(expr(ir_unop_pack_int_2x32, v)));
+ return sig;
+}
+
+ir_function_signature *
+builtin_builder::_unpackInt2x32(builtin_available_predicate avail)
+{
+ ir_variable *p = in_var(glsl_type::int64_t_type, "p");
+ MAKE_SIG(glsl_type::ivec2_type, avail, 1, p);
+ body.emit(ret(expr(ir_unop_unpack_int_2x32, p)));
+ return sig;
+}
+
+ir_function_signature *
+builtin_builder::_packUint2x32(builtin_available_predicate avail)
+{
+ ir_variable *v = in_var(glsl_type::uvec2_type, "v");
+ MAKE_SIG(glsl_type::uint64_t_type, avail, 1, v);
+ body.emit(ret(expr(ir_unop_pack_uint_2x32, v)));
+ return sig;
+}
+
+ir_function_signature *
+builtin_builder::_unpackUint2x32(builtin_available_predicate avail)
+{
+ ir_variable *p = in_var(glsl_type::uint64_t_type, "p");
+ MAKE_SIG(glsl_type::uvec2_type, avail, 1, p);
+ body.emit(ret(expr(ir_unop_unpack_uint_2x32, p)));
+ return sig;
+}
+
+ir_function_signature *
+builtin_builder::_length(builtin_available_predicate avail, const glsl_type *type)
+{
+ ir_variable *x = in_var(type, "x");
+ MAKE_SIG(type->get_base_type(), avail, 1, x);
+
+ body.emit(ret(sqrt(dot(x, x))));
+
+ return sig;
+}
+
+ir_function_signature *
+builtin_builder::_distance(builtin_available_predicate avail, const glsl_type *type)
+{
+ ir_variable *p0 = in_var(type, "p0");
+ ir_variable *p1 = in_var(type, "p1");
+ MAKE_SIG(type->get_base_type(), avail, 2, p0, p1);
+
+ if (type->vector_elements == 1) {
+ body.emit(ret(abs(sub(p0, p1))));
+ } else {
+ ir_variable *p = body.make_temp(type, "p");
+ body.emit(assign(p, sub(p0, p1)));
+ body.emit(ret(sqrt(dot(p, p))));
+ }
+
+ return sig;
+}
+
+ir_function_signature *
+builtin_builder::_dot(builtin_available_predicate avail, const glsl_type *type)
+{
+ if (type->vector_elements == 1)
+ return binop(avail, ir_binop_mul, type, type, type);
+
+ return binop(avail, ir_binop_dot,
+ type->get_base_type(), type, type);
+}
+
+ir_function_signature *
+builtin_builder::_cross(builtin_available_predicate avail, const glsl_type *type)
+{
+ ir_variable *a = in_var(type, "a");
+ ir_variable *b = in_var(type, "b");
+ MAKE_SIG(type, avail, 2, a, b);
+
+ int yzx = MAKE_SWIZZLE4(SWIZZLE_Y, SWIZZLE_Z, SWIZZLE_X, 0);
+ int zxy = MAKE_SWIZZLE4(SWIZZLE_Z, SWIZZLE_X, SWIZZLE_Y, 0);
+
+ body.emit(ret(sub(mul(swizzle(a, yzx, 3), swizzle(b, zxy, 3)),
+ mul(swizzle(a, zxy, 3), swizzle(b, yzx, 3)))));
+
+ return sig;
+}
+
+ir_function_signature *
+builtin_builder::_normalize(builtin_available_predicate avail, const glsl_type *type)
+{
+ ir_variable *x = in_var(type, "x");
+ MAKE_SIG(type, avail, 1, x);
+
+ if (type->vector_elements == 1) {
+ body.emit(ret(sign(x)));
+ } else {
+ body.emit(ret(mul(x, rsq(dot(x, x)))));
+ }
+
+ return sig;
+}
+
+ir_function_signature *
+builtin_builder::_ftransform()
+{
+ MAKE_SIG(glsl_type::vec4_type, compatibility_vs_only, 0);
+
+ /* ftransform() refers to global variables, and is always emitted
+ * directly by ast_function.cpp. Just emit a prototype here so we
+ * can recognize calls to it.
+ */
+ return sig;
+}
+
+ir_function_signature *
+builtin_builder::_faceforward(builtin_available_predicate avail, const glsl_type *type)
+{
+ ir_variable *N = in_var(type, "N");
+ ir_variable *I = in_var(type, "I");
+ ir_variable *Nref = in_var(type, "Nref");
+ MAKE_SIG(type, avail, 3, N, I, Nref);
+
+ body.emit(if_tree(less(dot(Nref, I), IMM_FP(type, 0.0)),
+ ret(N), ret(neg(N))));
+
+ return sig;
+}
+
+ir_function_signature *
+builtin_builder::_reflect(builtin_available_predicate avail, const glsl_type *type)
+{
+ ir_variable *I = in_var(type, "I");
+ ir_variable *N = in_var(type, "N");
+ MAKE_SIG(type, avail, 2, I, N);
+
+ /* I - 2 * dot(N, I) * N */
+ body.emit(ret(sub(I, mul(IMM_FP(type, 2.0), mul(dot(N, I), N)))));
+
+ return sig;
+}
+
+ir_function_signature *
+builtin_builder::_refract(builtin_available_predicate avail, const glsl_type *type)
+{
+ ir_variable *I = in_var(type, "I");
+ ir_variable *N = in_var(type, "N");
+ ir_variable *eta = in_var(type->get_base_type(), "eta");
+ MAKE_SIG(type, avail, 3, I, N, eta);
+
+ ir_variable *n_dot_i = body.make_temp(type->get_base_type(), "n_dot_i");
+ body.emit(assign(n_dot_i, dot(N, I)));
+
+ /* From the GLSL 1.10 specification:
+ * k = 1.0 - eta * eta * (1.0 - dot(N, I) * dot(N, I))
+ * if (k < 0.0)
+ * return genType(0.0)
+ * else
+ * return eta * I - (eta * dot(N, I) + sqrt(k)) * N
+ */
+ ir_variable *k = body.make_temp(type->get_base_type(), "k");
+ body.emit(assign(k, sub(IMM_FP(type, 1.0),
+ mul(eta, mul(eta, sub(IMM_FP(type, 1.0),
+ mul(n_dot_i, n_dot_i)))))));
+ body.emit(if_tree(less(k, IMM_FP(type, 0.0)),
+ ret(ir_constant::zero(mem_ctx, type)),
+ ret(sub(mul(eta, I),
+ mul(add(mul(eta, n_dot_i), sqrt(k)), N)))));
+
+ return sig;
+}
+
+ir_function_signature *
+builtin_builder::_matrixCompMult(builtin_available_predicate avail, const glsl_type *type)
+{
+ ir_variable *x = in_var(type, "x");
+ ir_variable *y = in_var(type, "y");
+ MAKE_SIG(type, avail, 2, x, y);
+
+ ir_variable *z = body.make_temp(type, "z");
+ for (int i = 0; i < type->matrix_columns; i++) {
+ body.emit(assign(array_ref(z, i), mul(array_ref(x, i), array_ref(y, i))));
+ }
+ body.emit(ret(z));
+
+ return sig;
+}
+
+ir_function_signature *
+builtin_builder::_outerProduct(builtin_available_predicate avail, const glsl_type *type)
+{
+ ir_variable *c;
+ ir_variable *r;
+
+ if (type->is_double()) {
+ r = in_var(glsl_type::dvec(type->matrix_columns), "r");
+ c = in_var(glsl_type::dvec(type->vector_elements), "c");
+ } else {
+ r = in_var(glsl_type::vec(type->matrix_columns), "r");
+ c = in_var(glsl_type::vec(type->vector_elements), "c");
+ }
+ MAKE_SIG(type, avail, 2, c, r);
+
+ ir_variable *m = body.make_temp(type, "m");
+ for (int i = 0; i < type->matrix_columns; i++) {
+ body.emit(assign(array_ref(m, i), mul(c, swizzle(r, i, 1))));
+ }
+ body.emit(ret(m));
+
+ return sig;
+}
+
+ir_function_signature *
+builtin_builder::_transpose(builtin_available_predicate avail, const glsl_type *orig_type)
+{
+ const glsl_type *transpose_type =
+ glsl_type::get_instance(orig_type->base_type,
+ orig_type->matrix_columns,
+ orig_type->vector_elements);
+
+ ir_variable *m = in_var(orig_type, "m");
+ MAKE_SIG(transpose_type, avail, 1, m);
+
+ ir_variable *t = body.make_temp(transpose_type, "t");
+ for (int i = 0; i < orig_type->matrix_columns; i++) {
+ for (int j = 0; j < orig_type->vector_elements; j++) {
+ body.emit(assign(array_ref(t, j),
+ matrix_elt(m, i, j),
+ 1 << i));
+ }
+ }
+ body.emit(ret(t));
+
+ return sig;
+}
+
+ir_function_signature *
+builtin_builder::_determinant_mat2(builtin_available_predicate avail, const glsl_type *type)
+{
+ ir_variable *m = in_var(type, "m");
+ MAKE_SIG(type->get_base_type(), avail, 1, m);
+
+ body.emit(ret(sub(mul(matrix_elt(m, 0, 0), matrix_elt(m, 1, 1)),
+ mul(matrix_elt(m, 1, 0), matrix_elt(m, 0, 1)))));
+
+ return sig;
+}
+
+ir_function_signature *
+builtin_builder::_determinant_mat3(builtin_available_predicate avail, const glsl_type *type)
+{
+ ir_variable *m = in_var(type, "m");
+ MAKE_SIG(type->get_base_type(), avail, 1, m);
+
+ ir_expression *f1 =
+ sub(mul(matrix_elt(m, 1, 1), matrix_elt(m, 2, 2)),
+ mul(matrix_elt(m, 1, 2), matrix_elt(m, 2, 1)));
+
+ ir_expression *f2 =
+ sub(mul(matrix_elt(m, 1, 0), matrix_elt(m, 2, 2)),
+ mul(matrix_elt(m, 1, 2), matrix_elt(m, 2, 0)));
+
+ ir_expression *f3 =
+ sub(mul(matrix_elt(m, 1, 0), matrix_elt(m, 2, 1)),
+ mul(matrix_elt(m, 1, 1), matrix_elt(m, 2, 0)));
+
+ body.emit(ret(add(sub(mul(matrix_elt(m, 0, 0), f1),
+ mul(matrix_elt(m, 0, 1), f2)),
+ mul(matrix_elt(m, 0, 2), f3))));
+
+ return sig;
+}
+
+ir_function_signature *
+builtin_builder::_determinant_mat4(builtin_available_predicate avail, const glsl_type *type)
+{
+ ir_variable *m = in_var(type, "m");
+ const glsl_type *btype = type->get_base_type();
+ MAKE_SIG(btype, avail, 1, m);
+
+ ir_variable *SubFactor00 = body.make_temp(btype, "SubFactor00");
+ ir_variable *SubFactor01 = body.make_temp(btype, "SubFactor01");
+ ir_variable *SubFactor02 = body.make_temp(btype, "SubFactor02");
+ ir_variable *SubFactor03 = body.make_temp(btype, "SubFactor03");
+ ir_variable *SubFactor04 = body.make_temp(btype, "SubFactor04");
+ ir_variable *SubFactor05 = body.make_temp(btype, "SubFactor05");
+ ir_variable *SubFactor06 = body.make_temp(btype, "SubFactor06");
+ ir_variable *SubFactor07 = body.make_temp(btype, "SubFactor07");
+ ir_variable *SubFactor08 = body.make_temp(btype, "SubFactor08");
+ ir_variable *SubFactor09 = body.make_temp(btype, "SubFactor09");
+ ir_variable *SubFactor10 = body.make_temp(btype, "SubFactor10");
+ ir_variable *SubFactor11 = body.make_temp(btype, "SubFactor11");
+ ir_variable *SubFactor12 = body.make_temp(btype, "SubFactor12");
+ ir_variable *SubFactor13 = body.make_temp(btype, "SubFactor13");
+ ir_variable *SubFactor14 = body.make_temp(btype, "SubFactor14");
+ ir_variable *SubFactor15 = body.make_temp(btype, "SubFactor15");
+ ir_variable *SubFactor16 = body.make_temp(btype, "SubFactor16");
+ ir_variable *SubFactor17 = body.make_temp(btype, "SubFactor17");
+ ir_variable *SubFactor18 = body.make_temp(btype, "SubFactor18");
+
+ body.emit(assign(SubFactor00, sub(mul(matrix_elt(m, 2, 2), matrix_elt(m, 3, 3)), mul(matrix_elt(m, 3, 2), matrix_elt(m, 2, 3)))));
+ body.emit(assign(SubFactor01, sub(mul(matrix_elt(m, 2, 1), matrix_elt(m, 3, 3)), mul(matrix_elt(m, 3, 1), matrix_elt(m, 2, 3)))));
+ body.emit(assign(SubFactor02, sub(mul(matrix_elt(m, 2, 1), matrix_elt(m, 3, 2)), mul(matrix_elt(m, 3, 1), matrix_elt(m, 2, 2)))));
+ body.emit(assign(SubFactor03, sub(mul(matrix_elt(m, 2, 0), matrix_elt(m, 3, 3)), mul(matrix_elt(m, 3, 0), matrix_elt(m, 2, 3)))));
+ body.emit(assign(SubFactor04, sub(mul(matrix_elt(m, 2, 0), matrix_elt(m, 3, 2)), mul(matrix_elt(m, 3, 0), matrix_elt(m, 2, 2)))));
+ body.emit(assign(SubFactor05, sub(mul(matrix_elt(m, 2, 0), matrix_elt(m, 3, 1)), mul(matrix_elt(m, 3, 0), matrix_elt(m, 2, 1)))));
+ body.emit(assign(SubFactor06, sub(mul(matrix_elt(m, 1, 2), matrix_elt(m, 3, 3)), mul(matrix_elt(m, 3, 2), matrix_elt(m, 1, 3)))));
+ body.emit(assign(SubFactor07, sub(mul(matrix_elt(m, 1, 1), matrix_elt(m, 3, 3)), mul(matrix_elt(m, 3, 1), matrix_elt(m, 1, 3)))));
+ body.emit(assign(SubFactor08, sub(mul(matrix_elt(m, 1, 1), matrix_elt(m, 3, 2)), mul(matrix_elt(m, 3, 1), matrix_elt(m, 1, 2)))));
+ body.emit(assign(SubFactor09, sub(mul(matrix_elt(m, 1, 0), matrix_elt(m, 3, 3)), mul(matrix_elt(m, 3, 0), matrix_elt(m, 1, 3)))));
+ body.emit(assign(SubFactor10, sub(mul(matrix_elt(m, 1, 0), matrix_elt(m, 3, 2)), mul(matrix_elt(m, 3, 0), matrix_elt(m, 1, 2)))));
+ body.emit(assign(SubFactor11, sub(mul(matrix_elt(m, 1, 1), matrix_elt(m, 3, 3)), mul(matrix_elt(m, 3, 1), matrix_elt(m, 1, 3)))));
+ body.emit(assign(SubFactor12, sub(mul(matrix_elt(m, 1, 0), matrix_elt(m, 3, 1)), mul(matrix_elt(m, 3, 0), matrix_elt(m, 1, 1)))));
+ body.emit(assign(SubFactor13, sub(mul(matrix_elt(m, 1, 2), matrix_elt(m, 2, 3)), mul(matrix_elt(m, 2, 2), matrix_elt(m, 1, 3)))));
+ body.emit(assign(SubFactor14, sub(mul(matrix_elt(m, 1, 1), matrix_elt(m, 2, 3)), mul(matrix_elt(m, 2, 1), matrix_elt(m, 1, 3)))));
+ body.emit(assign(SubFactor15, sub(mul(matrix_elt(m, 1, 1), matrix_elt(m, 2, 2)), mul(matrix_elt(m, 2, 1), matrix_elt(m, 1, 2)))));
+ body.emit(assign(SubFactor16, sub(mul(matrix_elt(m, 1, 0), matrix_elt(m, 2, 3)), mul(matrix_elt(m, 2, 0), matrix_elt(m, 1, 3)))));
+ body.emit(assign(SubFactor17, sub(mul(matrix_elt(m, 1, 0), matrix_elt(m, 2, 2)), mul(matrix_elt(m, 2, 0), matrix_elt(m, 1, 2)))));
+ body.emit(assign(SubFactor18, sub(mul(matrix_elt(m, 1, 0), matrix_elt(m, 2, 1)), mul(matrix_elt(m, 2, 0), matrix_elt(m, 1, 1)))));
+
+ ir_variable *adj_0 = body.make_temp(btype == glsl_type::float_type ? glsl_type::vec4_type : glsl_type::dvec4_type, "adj_0");
+
+ body.emit(assign(adj_0,
+ add(sub(mul(matrix_elt(m, 1, 1), SubFactor00),
+ mul(matrix_elt(m, 1, 2), SubFactor01)),
+ mul(matrix_elt(m, 1, 3), SubFactor02)),
+ WRITEMASK_X));
+ body.emit(assign(adj_0, neg(
+ add(sub(mul(matrix_elt(m, 1, 0), SubFactor00),
+ mul(matrix_elt(m, 1, 2), SubFactor03)),
+ mul(matrix_elt(m, 1, 3), SubFactor04))),
+ WRITEMASK_Y));
+ body.emit(assign(adj_0,
+ add(sub(mul(matrix_elt(m, 1, 0), SubFactor01),
+ mul(matrix_elt(m, 1, 1), SubFactor03)),
+ mul(matrix_elt(m, 1, 3), SubFactor05)),
+ WRITEMASK_Z));
+ body.emit(assign(adj_0, neg(
+ add(sub(mul(matrix_elt(m, 1, 0), SubFactor02),
+ mul(matrix_elt(m, 1, 1), SubFactor04)),
+ mul(matrix_elt(m, 1, 2), SubFactor05))),
+ WRITEMASK_W));
+
+ body.emit(ret(dot(array_ref(m, 0), adj_0)));
+
+ return sig;
+}
+
+ir_function_signature *
+builtin_builder::_inverse_mat2(builtin_available_predicate avail, const glsl_type *type)
+{
+ ir_variable *m = in_var(type, "m");
+ MAKE_SIG(type, avail, 1, m);
+
+ ir_variable *adj = body.make_temp(type, "adj");
+ body.emit(assign(array_ref(adj, 0), matrix_elt(m, 1, 1), 1 << 0));
+ body.emit(assign(array_ref(adj, 0), neg(matrix_elt(m, 0, 1)), 1 << 1));
+ body.emit(assign(array_ref(adj, 1), neg(matrix_elt(m, 1, 0)), 1 << 0));
+ body.emit(assign(array_ref(adj, 1), matrix_elt(m, 0, 0), 1 << 1));
+
+ ir_expression *det =
+ sub(mul(matrix_elt(m, 0, 0), matrix_elt(m, 1, 1)),
+ mul(matrix_elt(m, 1, 0), matrix_elt(m, 0, 1)));
+
+ body.emit(ret(div(adj, det)));
+ return sig;
+}
+
+ir_function_signature *
+builtin_builder::_inverse_mat3(builtin_available_predicate avail, const glsl_type *type)
+{
+ ir_variable *m = in_var(type, "m");
+ const glsl_type *btype = type->get_base_type();
+ MAKE_SIG(type, avail, 1, m);
+
+ ir_variable *f11_22_21_12 = body.make_temp(btype, "f11_22_21_12");
+ ir_variable *f10_22_20_12 = body.make_temp(btype, "f10_22_20_12");
+ ir_variable *f10_21_20_11 = body.make_temp(btype, "f10_21_20_11");
+
+ body.emit(assign(f11_22_21_12,
+ sub(mul(matrix_elt(m, 1, 1), matrix_elt(m, 2, 2)),
+ mul(matrix_elt(m, 2, 1), matrix_elt(m, 1, 2)))));
+ body.emit(assign(f10_22_20_12,
+ sub(mul(matrix_elt(m, 1, 0), matrix_elt(m, 2, 2)),
+ mul(matrix_elt(m, 2, 0), matrix_elt(m, 1, 2)))));
+ body.emit(assign(f10_21_20_11,
+ sub(mul(matrix_elt(m, 1, 0), matrix_elt(m, 2, 1)),
+ mul(matrix_elt(m, 2, 0), matrix_elt(m, 1, 1)))));
+
+ ir_variable *adj = body.make_temp(type, "adj");
+ body.emit(assign(array_ref(adj, 0), f11_22_21_12, WRITEMASK_X));
+ body.emit(assign(array_ref(adj, 1), neg(f10_22_20_12), WRITEMASK_X));
+ body.emit(assign(array_ref(adj, 2), f10_21_20_11, WRITEMASK_X));
+
+ body.emit(assign(array_ref(adj, 0), neg(
+ sub(mul(matrix_elt(m, 0, 1), matrix_elt(m, 2, 2)),
+ mul(matrix_elt(m, 2, 1), matrix_elt(m, 0, 2)))),
+ WRITEMASK_Y));
+ body.emit(assign(array_ref(adj, 1),
+ sub(mul(matrix_elt(m, 0, 0), matrix_elt(m, 2, 2)),
+ mul(matrix_elt(m, 2, 0), matrix_elt(m, 0, 2))),
+ WRITEMASK_Y));
+ body.emit(assign(array_ref(adj, 2), neg(
+ sub(mul(matrix_elt(m, 0, 0), matrix_elt(m, 2, 1)),
+ mul(matrix_elt(m, 2, 0), matrix_elt(m, 0, 1)))),
+ WRITEMASK_Y));
+
+ body.emit(assign(array_ref(adj, 0),
+ sub(mul(matrix_elt(m, 0, 1), matrix_elt(m, 1, 2)),
+ mul(matrix_elt(m, 1, 1), matrix_elt(m, 0, 2))),
+ WRITEMASK_Z));
+ body.emit(assign(array_ref(adj, 1), neg(
+ sub(mul(matrix_elt(m, 0, 0), matrix_elt(m, 1, 2)),
+ mul(matrix_elt(m, 1, 0), matrix_elt(m, 0, 2)))),
+ WRITEMASK_Z));
+ body.emit(assign(array_ref(adj, 2),
+ sub(mul(matrix_elt(m, 0, 0), matrix_elt(m, 1, 1)),
+ mul(matrix_elt(m, 1, 0), matrix_elt(m, 0, 1))),
+ WRITEMASK_Z));
+
+ ir_expression *det =
+ add(sub(mul(matrix_elt(m, 0, 0), f11_22_21_12),
+ mul(matrix_elt(m, 0, 1), f10_22_20_12)),
+ mul(matrix_elt(m, 0, 2), f10_21_20_11));
+
+ body.emit(ret(div(adj, det)));
+
+ return sig;
+}
+
+ir_function_signature *
+builtin_builder::_inverse_mat4(builtin_available_predicate avail, const glsl_type *type)
+{
+ ir_variable *m = in_var(type, "m");
+ const glsl_type *btype = type->get_base_type();
+ MAKE_SIG(type, avail, 1, m);
+
+ ir_variable *SubFactor00 = body.make_temp(btype, "SubFactor00");
+ ir_variable *SubFactor01 = body.make_temp(btype, "SubFactor01");
+ ir_variable *SubFactor02 = body.make_temp(btype, "SubFactor02");
+ ir_variable *SubFactor03 = body.make_temp(btype, "SubFactor03");
+ ir_variable *SubFactor04 = body.make_temp(btype, "SubFactor04");
+ ir_variable *SubFactor05 = body.make_temp(btype, "SubFactor05");
+ ir_variable *SubFactor06 = body.make_temp(btype, "SubFactor06");
+ ir_variable *SubFactor07 = body.make_temp(btype, "SubFactor07");
+ ir_variable *SubFactor08 = body.make_temp(btype, "SubFactor08");
+ ir_variable *SubFactor09 = body.make_temp(btype, "SubFactor09");
+ ir_variable *SubFactor10 = body.make_temp(btype, "SubFactor10");
+ ir_variable *SubFactor11 = body.make_temp(btype, "SubFactor11");
+ ir_variable *SubFactor12 = body.make_temp(btype, "SubFactor12");
+ ir_variable *SubFactor13 = body.make_temp(btype, "SubFactor13");
+ ir_variable *SubFactor14 = body.make_temp(btype, "SubFactor14");
+ ir_variable *SubFactor15 = body.make_temp(btype, "SubFactor15");
+ ir_variable *SubFactor16 = body.make_temp(btype, "SubFactor16");
+ ir_variable *SubFactor17 = body.make_temp(btype, "SubFactor17");
+ ir_variable *SubFactor18 = body.make_temp(btype, "SubFactor18");
+
+ body.emit(assign(SubFactor00, sub(mul(matrix_elt(m, 2, 2), matrix_elt(m, 3, 3)), mul(matrix_elt(m, 3, 2), matrix_elt(m, 2, 3)))));
+ body.emit(assign(SubFactor01, sub(mul(matrix_elt(m, 2, 1), matrix_elt(m, 3, 3)), mul(matrix_elt(m, 3, 1), matrix_elt(m, 2, 3)))));
+ body.emit(assign(SubFactor02, sub(mul(matrix_elt(m, 2, 1), matrix_elt(m, 3, 2)), mul(matrix_elt(m, 3, 1), matrix_elt(m, 2, 2)))));
+ body.emit(assign(SubFactor03, sub(mul(matrix_elt(m, 2, 0), matrix_elt(m, 3, 3)), mul(matrix_elt(m, 3, 0), matrix_elt(m, 2, 3)))));
+ body.emit(assign(SubFactor04, sub(mul(matrix_elt(m, 2, 0), matrix_elt(m, 3, 2)), mul(matrix_elt(m, 3, 0), matrix_elt(m, 2, 2)))));
+ body.emit(assign(SubFactor05, sub(mul(matrix_elt(m, 2, 0), matrix_elt(m, 3, 1)), mul(matrix_elt(m, 3, 0), matrix_elt(m, 2, 1)))));
+ body.emit(assign(SubFactor06, sub(mul(matrix_elt(m, 1, 2), matrix_elt(m, 3, 3)), mul(matrix_elt(m, 3, 2), matrix_elt(m, 1, 3)))));
+ body.emit(assign(SubFactor07, sub(mul(matrix_elt(m, 1, 1), matrix_elt(m, 3, 3)), mul(matrix_elt(m, 3, 1), matrix_elt(m, 1, 3)))));
+ body.emit(assign(SubFactor08, sub(mul(matrix_elt(m, 1, 1), matrix_elt(m, 3, 2)), mul(matrix_elt(m, 3, 1), matrix_elt(m, 1, 2)))));
+ body.emit(assign(SubFactor09, sub(mul(matrix_elt(m, 1, 0), matrix_elt(m, 3, 3)), mul(matrix_elt(m, 3, 0), matrix_elt(m, 1, 3)))));
+ body.emit(assign(SubFactor10, sub(mul(matrix_elt(m, 1, 0), matrix_elt(m, 3, 2)), mul(matrix_elt(m, 3, 0), matrix_elt(m, 1, 2)))));
+ body.emit(assign(SubFactor11, sub(mul(matrix_elt(m, 1, 1), matrix_elt(m, 3, 3)), mul(matrix_elt(m, 3, 1), matrix_elt(m, 1, 3)))));
+ body.emit(assign(SubFactor12, sub(mul(matrix_elt(m, 1, 0), matrix_elt(m, 3, 1)), mul(matrix_elt(m, 3, 0), matrix_elt(m, 1, 1)))));
+ body.emit(assign(SubFactor13, sub(mul(matrix_elt(m, 1, 2), matrix_elt(m, 2, 3)), mul(matrix_elt(m, 2, 2), matrix_elt(m, 1, 3)))));
+ body.emit(assign(SubFactor14, sub(mul(matrix_elt(m, 1, 1), matrix_elt(m, 2, 3)), mul(matrix_elt(m, 2, 1), matrix_elt(m, 1, 3)))));
+ body.emit(assign(SubFactor15, sub(mul(matrix_elt(m, 1, 1), matrix_elt(m, 2, 2)), mul(matrix_elt(m, 2, 1), matrix_elt(m, 1, 2)))));
+ body.emit(assign(SubFactor16, sub(mul(matrix_elt(m, 1, 0), matrix_elt(m, 2, 3)), mul(matrix_elt(m, 2, 0), matrix_elt(m, 1, 3)))));
+ body.emit(assign(SubFactor17, sub(mul(matrix_elt(m, 1, 0), matrix_elt(m, 2, 2)), mul(matrix_elt(m, 2, 0), matrix_elt(m, 1, 2)))));
+ body.emit(assign(SubFactor18, sub(mul(matrix_elt(m, 1, 0), matrix_elt(m, 2, 1)), mul(matrix_elt(m, 2, 0), matrix_elt(m, 1, 1)))));
+
+ ir_variable *adj = body.make_temp(btype == glsl_type::float_type ? glsl_type::mat4_type : glsl_type::dmat4_type, "adj");
+ body.emit(assign(array_ref(adj, 0),
+ add(sub(mul(matrix_elt(m, 1, 1), SubFactor00),
+ mul(matrix_elt(m, 1, 2), SubFactor01)),
+ mul(matrix_elt(m, 1, 3), SubFactor02)),
+ WRITEMASK_X));
+ body.emit(assign(array_ref(adj, 1), neg(
+ add(sub(mul(matrix_elt(m, 1, 0), SubFactor00),
+ mul(matrix_elt(m, 1, 2), SubFactor03)),
+ mul(matrix_elt(m, 1, 3), SubFactor04))),
+ WRITEMASK_X));
+ body.emit(assign(array_ref(adj, 2),
+ add(sub(mul(matrix_elt(m, 1, 0), SubFactor01),
+ mul(matrix_elt(m, 1, 1), SubFactor03)),
+ mul(matrix_elt(m, 1, 3), SubFactor05)),
+ WRITEMASK_X));
+ body.emit(assign(array_ref(adj, 3), neg(
+ add(sub(mul(matrix_elt(m, 1, 0), SubFactor02),
+ mul(matrix_elt(m, 1, 1), SubFactor04)),
+ mul(matrix_elt(m, 1, 2), SubFactor05))),
+ WRITEMASK_X));
+
+ body.emit(assign(array_ref(adj, 0), neg(
+ add(sub(mul(matrix_elt(m, 0, 1), SubFactor00),
+ mul(matrix_elt(m, 0, 2), SubFactor01)),
+ mul(matrix_elt(m, 0, 3), SubFactor02))),
+ WRITEMASK_Y));
+ body.emit(assign(array_ref(adj, 1),
+ add(sub(mul(matrix_elt(m, 0, 0), SubFactor00),
+ mul(matrix_elt(m, 0, 2), SubFactor03)),
+ mul(matrix_elt(m, 0, 3), SubFactor04)),
+ WRITEMASK_Y));
+ body.emit(assign(array_ref(adj, 2), neg(
+ add(sub(mul(matrix_elt(m, 0, 0), SubFactor01),
+ mul(matrix_elt(m, 0, 1), SubFactor03)),
+ mul(matrix_elt(m, 0, 3), SubFactor05))),
+ WRITEMASK_Y));
+ body.emit(assign(array_ref(adj, 3),
+ add(sub(mul(matrix_elt(m, 0, 0), SubFactor02),
+ mul(matrix_elt(m, 0, 1), SubFactor04)),
+ mul(matrix_elt(m, 0, 2), SubFactor05)),
+ WRITEMASK_Y));
+
+ body.emit(assign(array_ref(adj, 0),
+ add(sub(mul(matrix_elt(m, 0, 1), SubFactor06),
+ mul(matrix_elt(m, 0, 2), SubFactor07)),
+ mul(matrix_elt(m, 0, 3), SubFactor08)),
+ WRITEMASK_Z));
+ body.emit(assign(array_ref(adj, 1), neg(
+ add(sub(mul(matrix_elt(m, 0, 0), SubFactor06),
+ mul(matrix_elt(m, 0, 2), SubFactor09)),
+ mul(matrix_elt(m, 0, 3), SubFactor10))),
+ WRITEMASK_Z));
+ body.emit(assign(array_ref(adj, 2),
+ add(sub(mul(matrix_elt(m, 0, 0), SubFactor11),
+ mul(matrix_elt(m, 0, 1), SubFactor09)),
+ mul(matrix_elt(m, 0, 3), SubFactor12)),
+ WRITEMASK_Z));
+ body.emit(assign(array_ref(adj, 3), neg(
+ add(sub(mul(matrix_elt(m, 0, 0), SubFactor08),
+ mul(matrix_elt(m, 0, 1), SubFactor10)),
+ mul(matrix_elt(m, 0, 2), SubFactor12))),
+ WRITEMASK_Z));
+
+ body.emit(assign(array_ref(adj, 0), neg(
+ add(sub(mul(matrix_elt(m, 0, 1), SubFactor13),
+ mul(matrix_elt(m, 0, 2), SubFactor14)),
+ mul(matrix_elt(m, 0, 3), SubFactor15))),
+ WRITEMASK_W));
+ body.emit(assign(array_ref(adj, 1),
+ add(sub(mul(matrix_elt(m, 0, 0), SubFactor13),
+ mul(matrix_elt(m, 0, 2), SubFactor16)),
+ mul(matrix_elt(m, 0, 3), SubFactor17)),
+ WRITEMASK_W));
+ body.emit(assign(array_ref(adj, 2), neg(
+ add(sub(mul(matrix_elt(m, 0, 0), SubFactor14),
+ mul(matrix_elt(m, 0, 1), SubFactor16)),
+ mul(matrix_elt(m, 0, 3), SubFactor18))),
+ WRITEMASK_W));
+ body.emit(assign(array_ref(adj, 3),
+ add(sub(mul(matrix_elt(m, 0, 0), SubFactor15),
+ mul(matrix_elt(m, 0, 1), SubFactor17)),
+ mul(matrix_elt(m, 0, 2), SubFactor18)),
+ WRITEMASK_W));
+
+ ir_expression *det =
+ add(mul(matrix_elt(m, 0, 0), matrix_elt(adj, 0, 0)),
+ add(mul(matrix_elt(m, 0, 1), matrix_elt(adj, 1, 0)),
+ add(mul(matrix_elt(m, 0, 2), matrix_elt(adj, 2, 0)),
+ mul(matrix_elt(m, 0, 3), matrix_elt(adj, 3, 0)))));
+
+ body.emit(ret(div(adj, det)));
+
+ return sig;
+}
+
+
+ir_function_signature *
+builtin_builder::_lessThan(builtin_available_predicate avail,
+ const glsl_type *type)
+{
+ return binop(avail, ir_binop_less,
+ glsl_type::bvec(type->vector_elements), type, type);
+}
+
+ir_function_signature *
+builtin_builder::_lessThanEqual(builtin_available_predicate avail,
+ const glsl_type *type)
+{
+ return binop(avail, ir_binop_gequal,
+ glsl_type::bvec(type->vector_elements), type, type,
+ true);
+}
+
+ir_function_signature *
+builtin_builder::_greaterThan(builtin_available_predicate avail,
+ const glsl_type *type)
+{
+ return binop(avail, ir_binop_less,
+ glsl_type::bvec(type->vector_elements), type, type,
+ true);
+}
+
+ir_function_signature *
+builtin_builder::_greaterThanEqual(builtin_available_predicate avail,
+ const glsl_type *type)
+{
+ return binop(avail, ir_binop_gequal,
+ glsl_type::bvec(type->vector_elements), type, type);
+}
+
+ir_function_signature *
+builtin_builder::_equal(builtin_available_predicate avail,
+ const glsl_type *type)
+{
+ return binop(avail, ir_binop_equal,
+ glsl_type::bvec(type->vector_elements), type, type);
+}
+
+ir_function_signature *
+builtin_builder::_notEqual(builtin_available_predicate avail,
+ const glsl_type *type)
+{
+ return binop(avail, ir_binop_nequal,
+ glsl_type::bvec(type->vector_elements), type, type);
+}
+
+ir_function_signature *
+builtin_builder::_any(const glsl_type *type)
+{
+ ir_variable *v = in_var(type, "v");
+ MAKE_SIG(glsl_type::bool_type, always_available, 1, v);
+
+ const unsigned vec_elem = v->type->vector_elements;
+ body.emit(ret(expr(ir_binop_any_nequal, v, imm(false, vec_elem))));
+
+ return sig;
+}
+
+ir_function_signature *
+builtin_builder::_all(const glsl_type *type)
+{
+ ir_variable *v = in_var(type, "v");
+ MAKE_SIG(glsl_type::bool_type, always_available, 1, v);
+
+ const unsigned vec_elem = v->type->vector_elements;
+ body.emit(ret(expr(ir_binop_all_equal, v, imm(true, vec_elem))));
+
+ return sig;
+}
+
+UNOP(not, ir_unop_logic_not, always_available)
+
+ir_function_signature *
+builtin_builder::_textureSize(builtin_available_predicate avail,
+ const glsl_type *return_type,
+ const glsl_type *sampler_type)
+{
+ ir_variable *s = in_var(sampler_type, "sampler");
+ /* The sampler always exists; add optional lod later. */
+ MAKE_SIG(return_type, avail, 1, s);
+
+ ir_texture *tex = new(mem_ctx) ir_texture(ir_txs);
+ tex->set_sampler(new(mem_ctx) ir_dereference_variable(s), return_type);
+
+ if (ir_texture::has_lod(sampler_type)) {
+ ir_variable *lod = in_var(glsl_type::int_type, "lod");
+ sig->parameters.push_tail(lod);
+ tex->lod_info.lod = var_ref(lod);
+ } else {
+ tex->lod_info.lod = imm(0u);
+ }
+
+ body.emit(ret(tex));
+
+ return sig;
+}
+
+ir_function_signature *
+builtin_builder::_textureSamples(builtin_available_predicate avail,
+ const glsl_type *sampler_type)
+{
+ ir_variable *s = in_var(sampler_type, "sampler");
+ MAKE_SIG(glsl_type::int_type, avail, 1, s);
+
+ ir_texture *tex = new(mem_ctx) ir_texture(ir_texture_samples);
+ tex->set_sampler(new(mem_ctx) ir_dereference_variable(s), glsl_type::int_type);
+ body.emit(ret(tex));
+
+ return sig;
+}
+
+ir_function_signature *
+builtin_builder::_texture(ir_texture_opcode opcode,
+ builtin_available_predicate avail,
+ const glsl_type *return_type,
+ const glsl_type *sampler_type,
+ const glsl_type *coord_type,
+ int flags)
+{
+ ir_variable *s = in_var(sampler_type, "sampler");
+ ir_variable *P = in_var(coord_type, "P");
+ /* The sampler and coordinate always exist; add optional parameters later. */
+ MAKE_SIG(return_type, avail, 2, s, P);
+
+ ir_texture *tex = new(mem_ctx) ir_texture(opcode);
+ tex->set_sampler(var_ref(s), return_type);
+
+ const int coord_size = sampler_type->coordinate_components();
+
+ if (coord_size == coord_type->vector_elements) {
+ tex->coordinate = var_ref(P);
+ } else {
+ /* The incoming coordinate also has the projector or shadow comparator,
+ * so we need to swizzle those away.
+ */
+ tex->coordinate = swizzle_for_size(P, coord_size);
+ }
+
+ /* The projector is always in the last component. */
+ if (flags & TEX_PROJECT)
+ tex->projector = swizzle(P, coord_type->vector_elements - 1, 1);
+
+ if (sampler_type->sampler_shadow) {
+ if (opcode == ir_tg4) {
+ /* gather has refz as a separate parameter, immediately after the
+ * coordinate
+ */
+ ir_variable *refz = in_var(glsl_type::float_type, "refz");
+ sig->parameters.push_tail(refz);
+ tex->shadow_comparator = var_ref(refz);
+ } else {
+ /* The shadow comparator is normally in the Z component, but a few types
+ * have sufficiently large coordinates that it's in W.
+ */
+ tex->shadow_comparator = swizzle(P, MAX2(coord_size, SWIZZLE_Z), 1);
+ }
+ }
+
+ if (opcode == ir_txl) {
+ ir_variable *lod = in_var(glsl_type::float_type, "lod");
+ sig->parameters.push_tail(lod);
+ tex->lod_info.lod = var_ref(lod);
+ } else if (opcode == ir_txd) {
+ int grad_size = coord_size - (sampler_type->sampler_array ? 1 : 0);
+ ir_variable *dPdx = in_var(glsl_type::vec(grad_size), "dPdx");
+ ir_variable *dPdy = in_var(glsl_type::vec(grad_size), "dPdy");
+ sig->parameters.push_tail(dPdx);
+ sig->parameters.push_tail(dPdy);
+ tex->lod_info.grad.dPdx = var_ref(dPdx);
+ tex->lod_info.grad.dPdy = var_ref(dPdy);
+ }
+
+ if (flags & (TEX_OFFSET | TEX_OFFSET_NONCONST)) {
+ int offset_size = coord_size - (sampler_type->sampler_array ? 1 : 0);
+ ir_variable *offset =
+ new(mem_ctx) ir_variable(glsl_type::ivec(offset_size), "offset",
+ (flags & TEX_OFFSET) ? ir_var_const_in : ir_var_function_in);
+ sig->parameters.push_tail(offset);
+ tex->offset = var_ref(offset);
+ }
+
+ if (flags & TEX_OFFSET_ARRAY) {
+ ir_variable *offsets =
+ new(mem_ctx) ir_variable(glsl_type::get_array_instance(glsl_type::ivec2_type, 4),
+ "offsets", ir_var_const_in);
+ sig->parameters.push_tail(offsets);
+ tex->offset = var_ref(offsets);
+ }
+
+ if (opcode == ir_tg4) {
+ if (flags & TEX_COMPONENT) {
+ ir_variable *component =
+ new(mem_ctx) ir_variable(glsl_type::int_type, "comp", ir_var_const_in);
+ sig->parameters.push_tail(component);
+ tex->lod_info.component = var_ref(component);
+ }
+ else {
+ tex->lod_info.component = imm(0);
+ }
+ }
+
+ /* The "bias" parameter comes /after/ the "offset" parameter, which is
+ * inconsistent with both textureLodOffset and textureGradOffset.
+ */
+ if (opcode == ir_txb) {
+ ir_variable *bias = in_var(glsl_type::float_type, "bias");
+ sig->parameters.push_tail(bias);
+ tex->lod_info.bias = var_ref(bias);
+ }
+
+ body.emit(ret(tex));
+
+ return sig;
+}
+
+ir_function_signature *
+builtin_builder::_textureCubeArrayShadow(ir_texture_opcode opcode,
+ builtin_available_predicate avail,
+ const glsl_type *sampler_type)
+{
+ ir_variable *s = in_var(sampler_type, "sampler");
+ ir_variable *P = in_var(glsl_type::vec4_type, "P");
+ ir_variable *compare = in_var(glsl_type::float_type, "compare");
+ MAKE_SIG(glsl_type::float_type, avail, 3, s, P, compare);
+
+ ir_texture *tex = new(mem_ctx) ir_texture(opcode);
+ tex->set_sampler(var_ref(s), glsl_type::float_type);
+
+ tex->coordinate = var_ref(P);
+ tex->shadow_comparator = var_ref(compare);
+
+ if (opcode == ir_txb) {
+ ir_variable *bias = in_var(glsl_type::float_type, "bias");
+ sig->parameters.push_tail(bias);
+ tex->lod_info.bias = var_ref(bias);
+ }
+
+ if (opcode == ir_txl) {
+ ir_variable *lod = in_var(glsl_type::float_type, "lod");
+ sig->parameters.push_tail(lod);
+ tex->lod_info.lod = var_ref(lod);
+ }
+
+ body.emit(ret(tex));
+
+ return sig;
+}
+
+ir_function_signature *
+builtin_builder::_texelFetch(builtin_available_predicate avail,
+ const glsl_type *return_type,
+ const glsl_type *sampler_type,
+ const glsl_type *coord_type,
+ const glsl_type *offset_type)
+{
+ ir_variable *s = in_var(sampler_type, "sampler");
+ ir_variable *P = in_var(coord_type, "P");
+ /* The sampler and coordinate always exist; add optional parameters later. */
+ MAKE_SIG(return_type, avail, 2, s, P);
+
+ ir_texture *tex = new(mem_ctx) ir_texture(ir_txf);
+ tex->coordinate = var_ref(P);
+ tex->set_sampler(var_ref(s), return_type);
+
+ if (sampler_type->sampler_dimensionality == GLSL_SAMPLER_DIM_MS) {
+ ir_variable *sample = in_var(glsl_type::int_type, "sample");
+ sig->parameters.push_tail(sample);
+ tex->lod_info.sample_index = var_ref(sample);
+ tex->op = ir_txf_ms;
+ } else if (ir_texture::has_lod(sampler_type)) {
+ ir_variable *lod = in_var(glsl_type::int_type, "lod");
+ sig->parameters.push_tail(lod);
+ tex->lod_info.lod = var_ref(lod);
+ } else {
+ tex->lod_info.lod = imm(0u);
+ }
+
+ if (offset_type != NULL) {
+ ir_variable *offset =
+ new(mem_ctx) ir_variable(offset_type, "offset", ir_var_const_in);
+ sig->parameters.push_tail(offset);
+ tex->offset = var_ref(offset);
+ }
+
+ body.emit(ret(tex));
+
+ return sig;
+}
+
+ir_function_signature *
+builtin_builder::_EmitVertex()
+{
+ MAKE_SIG(glsl_type::void_type, gs_only, 0);
+
+ ir_rvalue *stream = new(mem_ctx) ir_constant(0, 1);
+ body.emit(new(mem_ctx) ir_emit_vertex(stream));
+
+ return sig;
+}
+
+ir_function_signature *
+builtin_builder::_EmitStreamVertex(builtin_available_predicate avail,
+ const glsl_type *stream_type)
+{
+ /* Section 8.12 (Geometry Shader Functions) of the GLSL 4.0 spec says:
+ *
+ * "Emit the current values of output variables to the current output
+ * primitive on stream stream. The argument to stream must be a constant
+ * integral expression."
+ */
+ ir_variable *stream =
+ new(mem_ctx) ir_variable(stream_type, "stream", ir_var_const_in);
+
+ MAKE_SIG(glsl_type::void_type, avail, 1, stream);
+
+ body.emit(new(mem_ctx) ir_emit_vertex(var_ref(stream)));
+
+ return sig;
+}
+
+ir_function_signature *
+builtin_builder::_EndPrimitive()
+{
+ MAKE_SIG(glsl_type::void_type, gs_only, 0);
+
+ ir_rvalue *stream = new(mem_ctx) ir_constant(0, 1);
+ body.emit(new(mem_ctx) ir_end_primitive(stream));
+
+ return sig;
+}
+
+ir_function_signature *
+builtin_builder::_EndStreamPrimitive(builtin_available_predicate avail,
+ const glsl_type *stream_type)
+{
+ /* Section 8.12 (Geometry Shader Functions) of the GLSL 4.0 spec says:
+ *
+ * "Completes the current output primitive on stream stream and starts
+ * a new one. The argument to stream must be a constant integral
+ * expression."
+ */
+ ir_variable *stream =
+ new(mem_ctx) ir_variable(stream_type, "stream", ir_var_const_in);
+
+ MAKE_SIG(glsl_type::void_type, avail, 1, stream);
+
+ body.emit(new(mem_ctx) ir_end_primitive(var_ref(stream)));
+
+ return sig;
+}
+
+ir_function_signature *
+builtin_builder::_barrier()
+{
+ MAKE_SIG(glsl_type::void_type, barrier_supported, 0);
+
+ body.emit(new(mem_ctx) ir_barrier());
+ return sig;
+}
+
+ir_function_signature *
+builtin_builder::_textureQueryLod(builtin_available_predicate avail,
+ const glsl_type *sampler_type,
+ const glsl_type *coord_type)
+{
+ ir_variable *s = in_var(sampler_type, "sampler");
+ ir_variable *coord = in_var(coord_type, "coord");
+ /* The sampler and coordinate always exist; add optional parameters later. */
+ MAKE_SIG(glsl_type::vec2_type, avail, 2, s, coord);
+
+ ir_texture *tex = new(mem_ctx) ir_texture(ir_lod);
+ tex->coordinate = var_ref(coord);
+ tex->set_sampler(var_ref(s), glsl_type::vec2_type);
+
+ body.emit(ret(tex));
+
+ return sig;
+}
+
+ir_function_signature *
+builtin_builder::_textureQueryLevels(builtin_available_predicate avail,
+ const glsl_type *sampler_type)
+{
+ ir_variable *s = in_var(sampler_type, "sampler");
+ const glsl_type *return_type = glsl_type::int_type;
+ MAKE_SIG(return_type, avail, 1, s);
+
+ ir_texture *tex = new(mem_ctx) ir_texture(ir_query_levels);
+ tex->set_sampler(var_ref(s), return_type);
+
+ body.emit(ret(tex));
+
+ return sig;
+}
+
+ir_function_signature *
+builtin_builder::_textureSamplesIdentical(builtin_available_predicate avail,
+ const glsl_type *sampler_type,
+ const glsl_type *coord_type)
+{
+ ir_variable *s = in_var(sampler_type, "sampler");
+ ir_variable *P = in_var(coord_type, "P");
+ const glsl_type *return_type = glsl_type::bool_type;
+ MAKE_SIG(return_type, avail, 2, s, P);
+
+ ir_texture *tex = new(mem_ctx) ir_texture(ir_samples_identical);
+ tex->coordinate = var_ref(P);
+ tex->set_sampler(var_ref(s), return_type);
+
+ body.emit(ret(tex));
+
+ return sig;
+}
+
+UNOP(dFdx, ir_unop_dFdx, derivatives)
+UNOP(dFdxCoarse, ir_unop_dFdx_coarse, derivative_control)
+UNOP(dFdxFine, ir_unop_dFdx_fine, derivative_control)
+UNOP(dFdy, ir_unop_dFdy, derivatives)
+UNOP(dFdyCoarse, ir_unop_dFdy_coarse, derivative_control)
+UNOP(dFdyFine, ir_unop_dFdy_fine, derivative_control)
+
+ir_function_signature *
+builtin_builder::_fwidth(const glsl_type *type)
+{
+ ir_variable *p = in_var(type, "p");
+ MAKE_SIG(type, derivatives, 1, p);
+
+ body.emit(ret(add(abs(expr(ir_unop_dFdx, p)), abs(expr(ir_unop_dFdy, p)))));
+
+ return sig;
+}
+
+ir_function_signature *
+builtin_builder::_fwidthCoarse(const glsl_type *type)
+{
+ ir_variable *p = in_var(type, "p");
+ MAKE_SIG(type, derivative_control, 1, p);
+
+ body.emit(ret(add(abs(expr(ir_unop_dFdx_coarse, p)),
+ abs(expr(ir_unop_dFdy_coarse, p)))));
+
+ return sig;
+}
+
+ir_function_signature *
+builtin_builder::_fwidthFine(const glsl_type *type)
+{
+ ir_variable *p = in_var(type, "p");
+ MAKE_SIG(type, derivative_control, 1, p);
+
+ body.emit(ret(add(abs(expr(ir_unop_dFdx_fine, p)),
+ abs(expr(ir_unop_dFdy_fine, p)))));
+
+ return sig;
+}
+
+ir_function_signature *
+builtin_builder::_noise1(const glsl_type *type)
+{
+ /* From the GLSL 4.60 specification:
+ *
+ * "The noise functions noise1, noise2, noise3, and noise4 have been
+ * deprecated starting with version 4.4 of GLSL. When not generating
+ * SPIR-V they are defined to return the value 0.0 or a vector whose
+ * components are all 0.0. When generating SPIR-V the noise functions
+ * are not declared and may not be used."
+ *
+ * In earlier versions of the GLSL specification attempt to define some
+ * sort of statistical noise function. However, the function's
+ * characteristics have always been such that always returning 0 is
+ * valid and Mesa has always returned 0 for noise on most drivers.
+ */
+ ir_variable *p = in_var(type, "p");
+ MAKE_SIG(glsl_type::float_type, v110, 1, p);
+ body.emit(ret(imm(glsl_type::float_type, ir_constant_data())));
+ return sig;
+}
+
+ir_function_signature *
+builtin_builder::_noise2(const glsl_type *type)
+{
+ /* See builtin_builder::_noise1 */
+ ir_variable *p = in_var(type, "p");
+ MAKE_SIG(glsl_type::vec2_type, v110, 1, p);
+ body.emit(ret(imm(glsl_type::vec2_type, ir_constant_data())));
+ return sig;
+}
+
+ir_function_signature *
+builtin_builder::_noise3(const glsl_type *type)
+{
+ /* See builtin_builder::_noise1 */
+ ir_variable *p = in_var(type, "p");
+ MAKE_SIG(glsl_type::vec3_type, v110, 1, p);
+ body.emit(ret(imm(glsl_type::vec3_type, ir_constant_data())));
+ return sig;
+}
+
+ir_function_signature *
+builtin_builder::_noise4(const glsl_type *type)
+{
+ /* See builtin_builder::_noise1 */
+ ir_variable *p = in_var(type, "p");
+ MAKE_SIG(glsl_type::vec4_type, v110, 1, p);
+ body.emit(ret(imm(glsl_type::vec4_type, ir_constant_data())));
+ return sig;
+}
+
+ir_function_signature *
+builtin_builder::_bitfieldExtract(const glsl_type *type)
+{
+ bool is_uint = type->base_type == GLSL_TYPE_UINT;
+ ir_variable *value = in_var(type, "value");
+ ir_variable *offset = in_var(glsl_type::int_type, "offset");
+ ir_variable *bits = in_var(glsl_type::int_type, "bits");
+ MAKE_SIG(type, gpu_shader5_or_es31_or_integer_functions, 3, value, offset,
+ bits);
+
+ operand cast_offset = is_uint ? i2u(offset) : operand(offset);
+ operand cast_bits = is_uint ? i2u(bits) : operand(bits);
+
+ body.emit(ret(expr(ir_triop_bitfield_extract, value,
+ swizzle(cast_offset, SWIZZLE_XXXX, type->vector_elements),
+ swizzle(cast_bits, SWIZZLE_XXXX, type->vector_elements))));
+
+ return sig;
+}
+
+ir_function_signature *
+builtin_builder::_bitfieldInsert(const glsl_type *type)
+{
+ bool is_uint = type->base_type == GLSL_TYPE_UINT;
+ ir_variable *base = in_var(type, "base");
+ ir_variable *insert = in_var(type, "insert");
+ ir_variable *offset = in_var(glsl_type::int_type, "offset");
+ ir_variable *bits = in_var(glsl_type::int_type, "bits");
+ MAKE_SIG(type, gpu_shader5_or_es31_or_integer_functions, 4, base, insert,
+ offset, bits);
+
+ operand cast_offset = is_uint ? i2u(offset) : operand(offset);
+ operand cast_bits = is_uint ? i2u(bits) : operand(bits);
+
+ body.emit(ret(bitfield_insert(base, insert,
+ swizzle(cast_offset, SWIZZLE_XXXX, type->vector_elements),
+ swizzle(cast_bits, SWIZZLE_XXXX, type->vector_elements))));
+
+ return sig;
+}
+
+UNOP(bitfieldReverse, ir_unop_bitfield_reverse, gpu_shader5_or_es31_or_integer_functions)
+
+ir_function_signature *
+builtin_builder::_bitCount(const glsl_type *type)
+{
+ return unop(gpu_shader5_or_es31_or_integer_functions, ir_unop_bit_count,
+ glsl_type::ivec(type->vector_elements), type);
+}
+
+ir_function_signature *
+builtin_builder::_findLSB(const glsl_type *type)
+{
+ return unop(gpu_shader5_or_es31_or_integer_functions, ir_unop_find_lsb,
+ glsl_type::ivec(type->vector_elements), type);
+}
+
+ir_function_signature *
+builtin_builder::_findMSB(const glsl_type *type)
+{
+ return unop(gpu_shader5_or_es31_or_integer_functions, ir_unop_find_msb,
+ glsl_type::ivec(type->vector_elements), type);
+}
+
+ir_function_signature *
+builtin_builder::_countLeadingZeros(builtin_available_predicate avail,
+ const glsl_type *type)
+{
+ return unop(avail, ir_unop_clz,
+ glsl_type::uvec(type->vector_elements), type);
+}
+
+ir_function_signature *
+builtin_builder::_countTrailingZeros(builtin_available_predicate avail,
+ const glsl_type *type)
+{
+ ir_variable *a = in_var(type, "a");
+ MAKE_SIG(glsl_type::uvec(type->vector_elements), avail, 1, a);
+
+ body.emit(ret(ir_builder::min2(
+ ir_builder::i2u(ir_builder::expr(ir_unop_find_lsb, a)),
+ imm(32u))));
+
+ return sig;
+}
+
+ir_function_signature *
+builtin_builder::_fma(builtin_available_predicate avail, const glsl_type *type)
+{
+ ir_variable *a = in_var(type, "a");
+ ir_variable *b = in_var(type, "b");
+ ir_variable *c = in_var(type, "c");
+ MAKE_SIG(type, avail, 3, a, b, c);
+
+ body.emit(ret(ir_builder::fma(a, b, c)));
+
+ return sig;
+}
+
+ir_function_signature *
+builtin_builder::_ldexp(const glsl_type *x_type, const glsl_type *exp_type)
+{
+ return binop(x_type->is_double() ? fp64 : gpu_shader5_or_es31_or_integer_functions,
+ ir_binop_ldexp, x_type, x_type, exp_type);
+}
+
+ir_function_signature *
+builtin_builder::_dfrexp(const glsl_type *x_type, const glsl_type *exp_type)
+{
+ ir_variable *x = in_var(x_type, "x");
+ ir_variable *exponent = out_var(exp_type, "exp");
+ MAKE_SIG(x_type, fp64, 2, x, exponent);
+
+ body.emit(assign(exponent, expr(ir_unop_frexp_exp, x)));
+
+ body.emit(ret(expr(ir_unop_frexp_sig, x)));
+ return sig;
+}
+
+ir_function_signature *
+builtin_builder::_frexp(const glsl_type *x_type, const glsl_type *exp_type)
+{
+ ir_variable *x = in_var(x_type, "x");
+ ir_variable *exponent = out_var(exp_type, "exp");
+ MAKE_SIG(x_type, gpu_shader5_or_es31_or_integer_functions, 2, x, exponent);
+
+ const unsigned vec_elem = x_type->vector_elements;
+ const glsl_type *bvec = glsl_type::get_instance(GLSL_TYPE_BOOL, vec_elem, 1);
+ const glsl_type *uvec = glsl_type::get_instance(GLSL_TYPE_UINT, vec_elem, 1);
+
+ /* Single-precision floating-point values are stored as
+ * 1 sign bit;
+ * 8 exponent bits;
+ * 23 mantissa bits.
+ *
+ * An exponent shift of 23 will shift the mantissa out, leaving only the
+ * exponent and sign bit (which itself may be zero, if the absolute value
+ * was taken before the bitcast and shift.
+ */
+ ir_constant *exponent_shift = imm(23);
+ ir_constant *exponent_bias = imm(-126, vec_elem);
+
+ ir_constant *sign_mantissa_mask = imm(0x807fffffu, vec_elem);
+
+ /* Exponent of floating-point values in the range [0.5, 1.0). */
+ ir_constant *exponent_value = imm(0x3f000000u, vec_elem);
+
+ ir_variable *is_not_zero = body.make_temp(bvec, "is_not_zero");
+ body.emit(assign(is_not_zero, nequal(abs(x), imm(0.0f, vec_elem))));
+
+ /* Since abs(x) ensures that the sign bit is zero, we don't need to bitcast
+ * to unsigned integers to ensure that 1 bits aren't shifted in.
+ */
+ body.emit(assign(exponent, rshift(bitcast_f2i(abs(x)), exponent_shift)));
+ body.emit(assign(exponent, add(exponent, csel(is_not_zero, exponent_bias,
+ imm(0, vec_elem)))));
+
+ ir_variable *bits = body.make_temp(uvec, "bits");
+ body.emit(assign(bits, bitcast_f2u(x)));
+ body.emit(assign(bits, bit_and(bits, sign_mantissa_mask)));
+ body.emit(assign(bits, bit_or(bits, csel(is_not_zero, exponent_value,
+ imm(0u, vec_elem)))));
+ body.emit(ret(bitcast_u2f(bits)));
+
+ return sig;
+}
+
+ir_function_signature *
+builtin_builder::_uaddCarry(const glsl_type *type)
+{
+ ir_variable *x = in_var(type, "x");
+ ir_variable *y = in_var(type, "y");
+ ir_variable *carry = out_var(type, "carry");
+ MAKE_SIG(type, gpu_shader5_or_es31_or_integer_functions, 3, x, y, carry);
+
+ body.emit(assign(carry, ir_builder::carry(x, y)));
+ body.emit(ret(add(x, y)));
+
+ return sig;
+}
+
+ir_function_signature *
+builtin_builder::_addSaturate(builtin_available_predicate avail,
+ const glsl_type *type)
+{
+ return binop(avail, ir_binop_add_sat, type, type, type);
+}
+
+ir_function_signature *
+builtin_builder::_usubBorrow(const glsl_type *type)
+{
+ ir_variable *x = in_var(type, "x");
+ ir_variable *y = in_var(type, "y");
+ ir_variable *borrow = out_var(type, "borrow");
+ MAKE_SIG(type, gpu_shader5_or_es31_or_integer_functions, 3, x, y, borrow);
+
+ body.emit(assign(borrow, ir_builder::borrow(x, y)));
+ body.emit(ret(sub(x, y)));
+
+ return sig;
+}
+
+ir_function_signature *
+builtin_builder::_subtractSaturate(builtin_available_predicate avail,
+ const glsl_type *type)
+{
+ return binop(avail, ir_binop_sub_sat, type, type, type);
+}
+
+ir_function_signature *
+builtin_builder::_absoluteDifference(builtin_available_predicate avail,
+ const glsl_type *type)
+{
+ /* absoluteDifference returns an unsigned type that has the same number of
+ * bits and number of vector elements as the type of the operands.
+ */
+ return binop(avail, ir_binop_abs_sub,
+ glsl_type::get_instance(glsl_unsigned_base_type_of(type->base_type),
+ type->vector_elements, 1),
+ type, type);
+}
+
+ir_function_signature *
+builtin_builder::_average(builtin_available_predicate avail,
+ const glsl_type *type)
+{
+ return binop(avail, ir_binop_avg, type, type, type);
+}
+
+ir_function_signature *
+builtin_builder::_averageRounded(builtin_available_predicate avail,
+ const glsl_type *type)
+{
+ return binop(avail, ir_binop_avg_round, type, type, type);
+}
+
+/**
+ * For both imulExtended() and umulExtended() built-ins.
+ */
+ir_function_signature *
+builtin_builder::_mulExtended(const glsl_type *type)
+{
+ const glsl_type *mul_type, *unpack_type;
+ ir_expression_operation unpack_op;
+
+ if (type->base_type == GLSL_TYPE_INT) {
+ unpack_op = ir_unop_unpack_int_2x32;
+ mul_type = glsl_type::get_instance(GLSL_TYPE_INT64, type->vector_elements, 1);
+ unpack_type = glsl_type::ivec2_type;
+ } else {
+ unpack_op = ir_unop_unpack_uint_2x32;
+ mul_type = glsl_type::get_instance(GLSL_TYPE_UINT64, type->vector_elements, 1);
+ unpack_type = glsl_type::uvec2_type;
+ }
+
+ ir_variable *x = in_var(type, "x");
+ ir_variable *y = in_var(type, "y");
+ ir_variable *msb = out_var(type, "msb");
+ ir_variable *lsb = out_var(type, "lsb");
+ MAKE_SIG(glsl_type::void_type, gpu_shader5_or_es31_or_integer_functions, 4, x, y, msb, lsb);
+
+ ir_variable *unpack_val = body.make_temp(unpack_type, "_unpack_val");
+
+ ir_expression *mul_res = new(mem_ctx) ir_expression(ir_binop_mul, mul_type,
+ new(mem_ctx)ir_dereference_variable(x),
+ new(mem_ctx)ir_dereference_variable(y));
+
+ if (type->vector_elements == 1) {
+ body.emit(assign(unpack_val, expr(unpack_op, mul_res)));
+ body.emit(assign(msb, swizzle_y(unpack_val)));
+ body.emit(assign(lsb, swizzle_x(unpack_val)));
+ } else {
+ for (int i = 0; i < type->vector_elements; i++) {
+ body.emit(assign(unpack_val, expr(unpack_op, swizzle(mul_res, i, 1))));
+ body.emit(assign(array_ref(msb, i), swizzle_y(unpack_val)));
+ body.emit(assign(array_ref(lsb, i), swizzle_x(unpack_val)));
+ }
+ }
+
+ return sig;
+}
+
+ir_function_signature *
+builtin_builder::_multiply32x16(builtin_available_predicate avail,
+ const glsl_type *type)
+{
+ return binop(avail, ir_binop_mul_32x16, type, type, type);
+}
+
+ir_function_signature *
+builtin_builder::_interpolateAtCentroid(const glsl_type *type)
+{
+ ir_variable *interpolant = in_var(type, "interpolant");
+ interpolant->data.must_be_shader_input = 1;
+ MAKE_SIG(type, fs_interpolate_at, 1, interpolant);
+
+ body.emit(ret(interpolate_at_centroid(interpolant)));
+
+ return sig;
+}
+
+ir_function_signature *
+builtin_builder::_interpolateAtOffset(const glsl_type *type)
+{
+ ir_variable *interpolant = in_var(type, "interpolant");
+ interpolant->data.must_be_shader_input = 1;
+ ir_variable *offset = in_var(glsl_type::vec2_type, "offset");
+ MAKE_SIG(type, fs_interpolate_at, 2, interpolant, offset);
+
+ body.emit(ret(interpolate_at_offset(interpolant, offset)));
+
+ return sig;
+}
+
+ir_function_signature *
+builtin_builder::_interpolateAtSample(const glsl_type *type)
+{
+ ir_variable *interpolant = in_var(type, "interpolant");
+ interpolant->data.must_be_shader_input = 1;
+ ir_variable *sample_num = in_var(glsl_type::int_type, "sample_num");
+ MAKE_SIG(type, fs_interpolate_at, 2, interpolant, sample_num);
+
+ body.emit(ret(interpolate_at_sample(interpolant, sample_num)));
+
+ return sig;
+}
+
+ir_function_signature *
+builtin_builder::_atomic_counter_intrinsic(builtin_available_predicate avail,
+ enum ir_intrinsic_id id)
+{
+ ir_variable *counter = in_var(glsl_type::atomic_uint_type, "counter");
+ MAKE_INTRINSIC(glsl_type::uint_type, id, avail, 1, counter);
+ return sig;
+}
+
+ir_function_signature *
+builtin_builder::_atomic_counter_intrinsic1(builtin_available_predicate avail,
+ enum ir_intrinsic_id id)
+{
+ ir_variable *counter = in_var(glsl_type::atomic_uint_type, "counter");
+ ir_variable *data = in_var(glsl_type::uint_type, "data");
+ MAKE_INTRINSIC(glsl_type::uint_type, id, avail, 2, counter, data);
+ return sig;
+}
+
+ir_function_signature *
+builtin_builder::_atomic_counter_intrinsic2(builtin_available_predicate avail,
+ enum ir_intrinsic_id id)
+{
+ ir_variable *counter = in_var(glsl_type::atomic_uint_type, "counter");
+ ir_variable *compare = in_var(glsl_type::uint_type, "compare");
+ ir_variable *data = in_var(glsl_type::uint_type, "data");
+ MAKE_INTRINSIC(glsl_type::uint_type, id, avail, 3, counter, compare, data);
+ return sig;
+}
+
+ir_function_signature *
+builtin_builder::_atomic_intrinsic2(builtin_available_predicate avail,
+ const glsl_type *type,
+ enum ir_intrinsic_id id)
+{
+ ir_variable *atomic = in_var(type, "atomic");
+ ir_variable *data = in_var(type, "data");
+ MAKE_INTRINSIC(type, id, avail, 2, atomic, data);
+ return sig;
+}
+
+ir_function_signature *
+builtin_builder::_atomic_intrinsic3(builtin_available_predicate avail,
+ const glsl_type *type,
+ enum ir_intrinsic_id id)
+{
+ ir_variable *atomic = in_var(type, "atomic");
+ ir_variable *data1 = in_var(type, "data1");
+ ir_variable *data2 = in_var(type, "data2");
+ MAKE_INTRINSIC(type, id, avail, 3, atomic, data1, data2);
+ return sig;
+}
+
+ir_function_signature *
+builtin_builder::_atomic_counter_op(const char *intrinsic,
+ builtin_available_predicate avail)
+{
+ ir_variable *counter = in_var(glsl_type::atomic_uint_type, "atomic_counter");
+ MAKE_SIG(glsl_type::uint_type, avail, 1, counter);
+
+ ir_variable *retval = body.make_temp(glsl_type::uint_type, "atomic_retval");
+ body.emit(call(shader->symbols->get_function(intrinsic), retval,
+ sig->parameters));
+ body.emit(ret(retval));
+ return sig;
+}
+
+ir_function_signature *
+builtin_builder::_atomic_counter_op1(const char *intrinsic,
+ builtin_available_predicate avail)
+{
+ ir_variable *counter = in_var(glsl_type::atomic_uint_type, "atomic_counter");
+ ir_variable *data = in_var(glsl_type::uint_type, "data");
+ MAKE_SIG(glsl_type::uint_type, avail, 2, counter, data);
+
+ ir_variable *retval = body.make_temp(glsl_type::uint_type, "atomic_retval");
+
+ /* Instead of generating an __intrinsic_atomic_sub, generate an
+ * __intrinsic_atomic_add with the data parameter negated.
+ */
+ if (strcmp("__intrinsic_atomic_sub", intrinsic) == 0) {
+ ir_variable *const neg_data =
+ body.make_temp(glsl_type::uint_type, "neg_data");
+
+ body.emit(assign(neg_data, neg(data)));
+
+ exec_list parameters;
+
+ parameters.push_tail(new(mem_ctx) ir_dereference_variable(counter));
+ parameters.push_tail(new(mem_ctx) ir_dereference_variable(neg_data));
+
+ ir_function *const func =
+ shader->symbols->get_function("__intrinsic_atomic_add");
+ ir_instruction *const c = call(func, retval, parameters);
+
+ assert(c != NULL);
+ assert(parameters.is_empty());
+
+ body.emit(c);
+ } else {
+ body.emit(call(shader->symbols->get_function(intrinsic), retval,
+ sig->parameters));
+ }
+
+ body.emit(ret(retval));
+ return sig;
+}
+
+ir_function_signature *
+builtin_builder::_atomic_counter_op2(const char *intrinsic,
+ builtin_available_predicate avail)
+{
+ ir_variable *counter = in_var(glsl_type::atomic_uint_type, "atomic_counter");
+ ir_variable *compare = in_var(glsl_type::uint_type, "compare");
+ ir_variable *data = in_var(glsl_type::uint_type, "data");
+ MAKE_SIG(glsl_type::uint_type, avail, 3, counter, compare, data);
+
+ ir_variable *retval = body.make_temp(glsl_type::uint_type, "atomic_retval");
+ body.emit(call(shader->symbols->get_function(intrinsic), retval,
+ sig->parameters));
+ body.emit(ret(retval));
+ return sig;
+}
+
+ir_function_signature *
+builtin_builder::_atomic_op2(const char *intrinsic,
+ builtin_available_predicate avail,
+ const glsl_type *type)
+{
+ ir_variable *atomic = in_var(type, "atomic_var");
+ ir_variable *data = in_var(type, "atomic_data");
+ MAKE_SIG(type, avail, 2, atomic, data);
+
+ ir_variable *retval = body.make_temp(type, "atomic_retval");
+ body.emit(call(shader->symbols->get_function(intrinsic), retval,
+ sig->parameters));
+ body.emit(ret(retval));
+ return sig;
+}
+
+ir_function_signature *
+builtin_builder::_atomic_op3(const char *intrinsic,
+ builtin_available_predicate avail,
+ const glsl_type *type)
+{
+ ir_variable *atomic = in_var(type, "atomic_var");
+ ir_variable *data1 = in_var(type, "atomic_data1");
+ ir_variable *data2 = in_var(type, "atomic_data2");
+ MAKE_SIG(type, avail, 3, atomic, data1, data2);
+
+ ir_variable *retval = body.make_temp(type, "atomic_retval");
+ body.emit(call(shader->symbols->get_function(intrinsic), retval,
+ sig->parameters));
+ body.emit(ret(retval));
+ return sig;
+}
+
+ir_function_signature *
+builtin_builder::_min3(const glsl_type *type)
+{
+ ir_variable *x = in_var(type, "x");
+ ir_variable *y = in_var(type, "y");
+ ir_variable *z = in_var(type, "z");
+ MAKE_SIG(type, shader_trinary_minmax, 3, x, y, z);
+
+ ir_expression *min3 = min2(x, min2(y,z));
+ body.emit(ret(min3));
+
+ return sig;
+}
+
+ir_function_signature *
+builtin_builder::_max3(const glsl_type *type)
+{
+ ir_variable *x = in_var(type, "x");
+ ir_variable *y = in_var(type, "y");
+ ir_variable *z = in_var(type, "z");
+ MAKE_SIG(type, shader_trinary_minmax, 3, x, y, z);
+
+ ir_expression *max3 = max2(x, max2(y,z));
+ body.emit(ret(max3));
+
+ return sig;
+}
+
+ir_function_signature *
+builtin_builder::_mid3(const glsl_type *type)
+{
+ ir_variable *x = in_var(type, "x");
+ ir_variable *y = in_var(type, "y");
+ ir_variable *z = in_var(type, "z");
+ MAKE_SIG(type, shader_trinary_minmax, 3, x, y, z);
+
+ ir_expression *mid3 = max2(min2(x, y), max2(min2(x, z), min2(y, z)));
+ body.emit(ret(mid3));
+
+ return sig;
+}
+
+static builtin_available_predicate
+get_image_available_predicate(const glsl_type *type, unsigned flags)
+{
+ if ((flags & IMAGE_FUNCTION_AVAIL_ATOMIC_EXCHANGE) &&
+ type->sampled_type == GLSL_TYPE_FLOAT)
+ return shader_image_atomic_exchange_float;
+
+ if ((flags & IMAGE_FUNCTION_AVAIL_ATOMIC_ADD) &&
+ type->sampled_type == GLSL_TYPE_FLOAT)
+ return shader_image_atomic_add_float;
+
+ else if (flags & (IMAGE_FUNCTION_AVAIL_ATOMIC_EXCHANGE |
+ IMAGE_FUNCTION_AVAIL_ATOMIC_ADD |
+ IMAGE_FUNCTION_AVAIL_ATOMIC))
+ return shader_image_atomic;
+
+ else if (flags & IMAGE_FUNCTION_EXT_ONLY)
+ return shader_image_load_store_ext;
+
+ else
+ return shader_image_load_store;
+}
+
+ir_function_signature *
+builtin_builder::_image_prototype(const glsl_type *image_type,
+ unsigned num_arguments,
+ unsigned flags)
+{
+ const glsl_type *data_type = glsl_type::get_instance(
+ image_type->sampled_type,
+ (flags & IMAGE_FUNCTION_HAS_VECTOR_DATA_TYPE ? 4 : 1),
+ 1);
+ const glsl_type *ret_type = (flags & IMAGE_FUNCTION_RETURNS_VOID ?
+ glsl_type::void_type : data_type);
+
+ /* Addressing arguments that are always present. */
+ ir_variable *image = in_var(image_type, "image");
+ ir_variable *coord = in_var(
+ glsl_type::ivec(image_type->coordinate_components()), "coord");
+
+ ir_function_signature *sig = new_sig(
+ ret_type, get_image_available_predicate(image_type, flags),
+ 2, image, coord);
+
+ /* Sample index for multisample images. */
+ if (image_type->sampler_dimensionality == GLSL_SAMPLER_DIM_MS)
+ sig->parameters.push_tail(in_var(glsl_type::int_type, "sample"));
+
+ /* Data arguments. */
+ for (unsigned i = 0; i < num_arguments; ++i) {
+ char *arg_name = ralloc_asprintf(NULL, "arg%d", i);
+ sig->parameters.push_tail(in_var(data_type, arg_name));
+ ralloc_free(arg_name);
+ }
+
+ /* Set the maximal set of qualifiers allowed for this image
+ * built-in. Function calls with arguments having fewer
+ * qualifiers than present in the prototype are allowed by the
+ * spec, but not with more, i.e. this will make the compiler
+ * accept everything that needs to be accepted, and reject cases
+ * like loads from write-only or stores to read-only images.
+ */
+ image->data.memory_read_only = (flags & IMAGE_FUNCTION_READ_ONLY) != 0;
+ image->data.memory_write_only = (flags & IMAGE_FUNCTION_WRITE_ONLY) != 0;
+ image->data.memory_coherent = true;
+ image->data.memory_volatile = true;
+ image->data.memory_restrict = true;
+
+ return sig;
+}
+
+ir_function_signature *
+builtin_builder::_image_size_prototype(const glsl_type *image_type,
+ unsigned /* num_arguments */,
+ unsigned /* flags */)
+{
+ const glsl_type *ret_type;
+ unsigned num_components = image_type->coordinate_components();
+
+ /* From the ARB_shader_image_size extension:
+ * "Cube images return the dimensions of one face."
+ */
+ if (image_type->sampler_dimensionality == GLSL_SAMPLER_DIM_CUBE &&
+ !image_type->sampler_array) {
+ num_components = 2;
+ }
+
+ /* FIXME: Add the highp precision qualifier for GLES 3.10 when it is
+ * supported by mesa.
+ */
+ ret_type = glsl_type::get_instance(GLSL_TYPE_INT, num_components, 1);
+
+ ir_variable *image = in_var(image_type, "image");
+ ir_function_signature *sig = new_sig(ret_type, shader_image_size, 1, image);
+
+ /* Set the maximal set of qualifiers allowed for this image
+ * built-in. Function calls with arguments having fewer
+ * qualifiers than present in the prototype are allowed by the
+ * spec, but not with more, i.e. this will make the compiler
+ * accept everything that needs to be accepted, and reject cases
+ * like loads from write-only or stores to read-only images.
+ */
+ image->data.memory_read_only = true;
+ image->data.memory_write_only = true;
+ image->data.memory_coherent = true;
+ image->data.memory_volatile = true;
+ image->data.memory_restrict = true;
+
+ return sig;
+}
+
+ir_function_signature *
+builtin_builder::_image_samples_prototype(const glsl_type *image_type,
+ unsigned /* num_arguments */,
+ unsigned /* flags */)
+{
+ ir_variable *image = in_var(image_type, "image");
+ ir_function_signature *sig =
+ new_sig(glsl_type::int_type, shader_samples, 1, image);
+
+ /* Set the maximal set of qualifiers allowed for this image
+ * built-in. Function calls with arguments having fewer
+ * qualifiers than present in the prototype are allowed by the
+ * spec, but not with more, i.e. this will make the compiler
+ * accept everything that needs to be accepted, and reject cases
+ * like loads from write-only or stores to read-only images.
+ */
+ image->data.memory_read_only = true;
+ image->data.memory_write_only = true;
+ image->data.memory_coherent = true;
+ image->data.memory_volatile = true;
+ image->data.memory_restrict = true;
+
+ return sig;
+}
+
+ir_function_signature *
+builtin_builder::_image(image_prototype_ctr prototype,
+ const glsl_type *image_type,
+ const char *intrinsic_name,
+ unsigned num_arguments,
+ unsigned flags,
+ enum ir_intrinsic_id id)
+{
+ ir_function_signature *sig = (this->*prototype)(image_type,
+ num_arguments, flags);
+
+ if (flags & IMAGE_FUNCTION_EMIT_STUB) {
+ ir_factory body(&sig->body, mem_ctx);
+ ir_function *f = shader->symbols->get_function(intrinsic_name);
+
+ if (flags & IMAGE_FUNCTION_RETURNS_VOID) {
+ body.emit(call(f, NULL, sig->parameters));
+ } else {
+ ir_variable *ret_val =
+ body.make_temp(sig->return_type, "_ret_val");
+ body.emit(call(f, ret_val, sig->parameters));
+ body.emit(ret(ret_val));
+ }
+
+ sig->is_defined = true;
+
+ } else {
+ sig->intrinsic_id = id;
+ }
+
+ return sig;
+}
+
+ir_function_signature *
+builtin_builder::_memory_barrier_intrinsic(builtin_available_predicate avail,
+ enum ir_intrinsic_id id)
+{
+ MAKE_INTRINSIC(glsl_type::void_type, id, avail, 0);
+ return sig;
+}
+
+ir_function_signature *
+builtin_builder::_memory_barrier(const char *intrinsic_name,
+ builtin_available_predicate avail)
+{
+ MAKE_SIG(glsl_type::void_type, avail, 0);
+ body.emit(call(shader->symbols->get_function(intrinsic_name),
+ NULL, sig->parameters));
+ return sig;
+}
+
+ir_function_signature *
+builtin_builder::_ballot_intrinsic()
+{
+ ir_variable *value = in_var(glsl_type::bool_type, "value");
+ MAKE_INTRINSIC(glsl_type::uint64_t_type, ir_intrinsic_ballot, shader_ballot,
+ 1, value);
+ return sig;
+}
+
+ir_function_signature *
+builtin_builder::_ballot()
+{
+ ir_variable *value = in_var(glsl_type::bool_type, "value");
+
+ MAKE_SIG(glsl_type::uint64_t_type, shader_ballot, 1, value);
+ ir_variable *retval = body.make_temp(glsl_type::uint64_t_type, "retval");
+
+ body.emit(call(shader->symbols->get_function("__intrinsic_ballot"),
+ retval, sig->parameters));
+ body.emit(ret(retval));
+ return sig;
+}
+
+ir_function_signature *
+builtin_builder::_read_first_invocation_intrinsic(const glsl_type *type)
+{
+ ir_variable *value = in_var(type, "value");
+ MAKE_INTRINSIC(type, ir_intrinsic_read_first_invocation, shader_ballot,
+ 1, value);
+ return sig;
+}
+
+ir_function_signature *
+builtin_builder::_read_first_invocation(const glsl_type *type)
+{
+ ir_variable *value = in_var(type, "value");
+
+ MAKE_SIG(type, shader_ballot, 1, value);
+ ir_variable *retval = body.make_temp(type, "retval");
+
+ body.emit(call(shader->symbols->get_function("__intrinsic_read_first_invocation"),
+ retval, sig->parameters));
+ body.emit(ret(retval));
+ return sig;
+}
+
+ir_function_signature *
+builtin_builder::_read_invocation_intrinsic(const glsl_type *type)
+{
+ ir_variable *value = in_var(type, "value");
+ ir_variable *invocation = in_var(glsl_type::uint_type, "invocation");
+ MAKE_INTRINSIC(type, ir_intrinsic_read_invocation, shader_ballot,
+ 2, value, invocation);
+ return sig;
+}
+
+ir_function_signature *
+builtin_builder::_read_invocation(const glsl_type *type)
+{
+ ir_variable *value = in_var(type, "value");
+ ir_variable *invocation = in_var(glsl_type::uint_type, "invocation");
+
+ MAKE_SIG(type, shader_ballot, 2, value, invocation);
+ ir_variable *retval = body.make_temp(type, "retval");
+
+ body.emit(call(shader->symbols->get_function("__intrinsic_read_invocation"),
+ retval, sig->parameters));
+ body.emit(ret(retval));
+ return sig;
+}
+
+ir_function_signature *
+builtin_builder::_invocation_interlock_intrinsic(builtin_available_predicate avail,
+ enum ir_intrinsic_id id)
+{
+ MAKE_INTRINSIC(glsl_type::void_type, id, avail, 0);
+ return sig;
+}
+
+ir_function_signature *
+builtin_builder::_invocation_interlock(const char *intrinsic_name,
+ builtin_available_predicate avail)
+{
+ MAKE_SIG(glsl_type::void_type, avail, 0);
+ body.emit(call(shader->symbols->get_function(intrinsic_name),
+ NULL, sig->parameters));
+ return sig;
+}
+
+ir_function_signature *
+builtin_builder::_shader_clock_intrinsic(builtin_available_predicate avail,
+ const glsl_type *type)
+{
+ MAKE_INTRINSIC(type, ir_intrinsic_shader_clock, avail, 0);
+ return sig;
+}
+
+ir_function_signature *
+builtin_builder::_shader_clock(builtin_available_predicate avail,
+ const glsl_type *type)
+{
+ MAKE_SIG(type, avail, 0);
+
+ ir_variable *retval = body.make_temp(glsl_type::uvec2_type, "clock_retval");
+
+ body.emit(call(shader->symbols->get_function("__intrinsic_shader_clock"),
+ retval, sig->parameters));
+
+ if (type == glsl_type::uint64_t_type) {
+ body.emit(ret(expr(ir_unop_pack_uint_2x32, retval)));
+ } else {
+ body.emit(ret(retval));
+ }
+
+ return sig;
+}
+
+ir_function_signature *
+builtin_builder::_vote_intrinsic(builtin_available_predicate avail,
+ enum ir_intrinsic_id id)
+{
+ ir_variable *value = in_var(glsl_type::bool_type, "value");
+ MAKE_INTRINSIC(glsl_type::bool_type, id, avail, 1, value);
+ return sig;
+}
+
+ir_function_signature *
+builtin_builder::_vote(const char *intrinsic_name,
+ builtin_available_predicate avail)
+{
+ ir_variable *value = in_var(glsl_type::bool_type, "value");
+
+ MAKE_SIG(glsl_type::bool_type, avail, 1, value);
+
+ ir_variable *retval = body.make_temp(glsl_type::bool_type, "retval");
+
+ body.emit(call(shader->symbols->get_function(intrinsic_name),
+ retval, sig->parameters));
+ body.emit(ret(retval));
+ return sig;
+}
+
+ir_function_signature *
+builtin_builder::_helper_invocation_intrinsic()
+{
+ MAKE_INTRINSIC(glsl_type::bool_type, ir_intrinsic_helper_invocation,
+ demote_to_helper_invocation, 0);
+ return sig;
+}
+
+ir_function_signature *
+builtin_builder::_helper_invocation()
+{
+ MAKE_SIG(glsl_type::bool_type, demote_to_helper_invocation, 0);
+
+ ir_variable *retval = body.make_temp(glsl_type::bool_type, "retval");
+
+ body.emit(call(shader->symbols->get_function("__intrinsic_helper_invocation"),
+ retval, sig->parameters));
+ body.emit(ret(retval));
+
+ return sig;
+}
+
+/** @} */
+
+/******************************************************************************/
+
+/* The singleton instance of builtin_builder. */
+static builtin_builder builtins;
+static mtx_t builtins_lock = _MTX_INITIALIZER_NP;
+static uint32_t builtin_users = 0;
+
+/**
+ * External API (exposing the built-in module to the rest of the compiler):
+ * @{
+ */
+extern "C" void
+_mesa_glsl_builtin_functions_init_or_ref()
+{
+ mtx_lock(&builtins_lock);
+ if (builtin_users++ == 0)
+ builtins.initialize();
+ mtx_unlock(&builtins_lock);
+}
+
+extern "C" void
+_mesa_glsl_builtin_functions_decref()
+{
+ mtx_lock(&builtins_lock);
+ assert(builtin_users != 0);
+ if (--builtin_users == 0)
+ builtins.release();
+ mtx_unlock(&builtins_lock);
+}
+
+ir_function_signature *
+_mesa_glsl_find_builtin_function(_mesa_glsl_parse_state *state,
+ const char *name, exec_list *actual_parameters)
+{
+ ir_function_signature *s;
+ mtx_lock(&builtins_lock);
+ s = builtins.find(state, name, actual_parameters);
+ mtx_unlock(&builtins_lock);
+
+ return s;
+}
+
+bool
+_mesa_glsl_has_builtin_function(_mesa_glsl_parse_state *state, const char *name)
+{
+ ir_function *f;
+ bool ret = false;
+ mtx_lock(&builtins_lock);
+ f = builtins.shader->symbols->get_function(name);
+ if (f != NULL) {
+ foreach_in_list(ir_function_signature, sig, &f->signatures) {
+ if (sig->is_builtin_available(state)) {
+ ret = true;
+ break;
+ }
+ }
+ }
+ mtx_unlock(&builtins_lock);
+
+ return ret;
+}
+
+gl_shader *
+_mesa_glsl_get_builtin_function_shader()
+{
+ return builtins.shader;
+}
+
+
+/**
+ * Get the function signature for main from a shader
+ */
+ir_function_signature *
+_mesa_get_main_function_signature(glsl_symbol_table *symbols)
+{
+ ir_function *const f = symbols->get_function("main");
+ if (f != NULL) {
+ exec_list void_parameters;
+
+ /* Look for the 'void main()' signature and ensure that it's defined.
+ * This keeps the linker from accidentally pick a shader that just
+ * contains a prototype for main.
+ *
+ * We don't have to check for multiple definitions of main (in multiple
+ * shaders) because that would have already been caught above.
+ */
+ ir_function_signature *sig =
+ f->matching_signature(NULL, &void_parameters, false);
+ if ((sig != NULL) && sig->is_defined) {
+ return sig;
+ }
+ }
+
+ return NULL;
+}
+
+/** @} */
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/builtin_functions.h b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/builtin_functions.h
new file mode 100644
index 0000000000..ff3d4e9f43
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/builtin_functions.h
@@ -0,0 +1,84 @@
+/*
+ * Copyright © 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef BULITIN_FUNCTIONS_H
+#define BULITIN_FUNCTIONS_H
+
+struct gl_shader;
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+void
+_mesa_glsl_builtin_functions_init_or_ref();
+
+void
+_mesa_glsl_builtin_functions_decref(void);
+
+#ifdef __cplusplus
+
+} /* extern "C" */
+
+extern ir_function_signature *
+_mesa_glsl_find_builtin_function(_mesa_glsl_parse_state *state,
+ const char *name, exec_list *actual_parameters);
+
+extern bool
+_mesa_glsl_has_builtin_function(_mesa_glsl_parse_state *state,
+ const char *name);
+
+extern gl_shader *
+_mesa_glsl_get_builtin_function_shader(void);
+
+extern ir_function_signature *
+_mesa_get_main_function_signature(glsl_symbol_table *symbols);
+
+namespace generate_ir {
+
+ir_function_signature *
+udiv64(void *mem_ctx, builtin_available_predicate avail);
+
+ir_function_signature *
+idiv64(void *mem_ctx, builtin_available_predicate avail);
+
+ir_function_signature *
+umod64(void *mem_ctx, builtin_available_predicate avail);
+
+ir_function_signature *
+imod64(void *mem_ctx, builtin_available_predicate avail);
+
+ir_function_signature *
+umul64(void *mem_ctx, builtin_available_predicate avail);
+
+ir_function_signature *
+sign64(void *mem_ctx, builtin_available_predicate avail);
+
+ir_function_signature *
+udivmod64(void *mem_ctx, builtin_available_predicate avail);
+
+}
+
+#endif /* __cplusplus */
+
+#endif /* BULITIN_FUNCTIONS_H */
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/builtin_int64.h b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/builtin_int64.h
new file mode 100644
index 0000000000..6812d4bf97
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/builtin_int64.h
@@ -0,0 +1,1196 @@
+ir_function_signature *
+umul64(void *mem_ctx, builtin_available_predicate avail)
+{
+ ir_function_signature *const sig =
+ new(mem_ctx) ir_function_signature(glsl_type::uvec2_type, avail);
+ ir_factory body(&sig->body, mem_ctx);
+ sig->is_defined = true;
+
+ exec_list sig_parameters;
+
+ ir_variable *const r0001 = new(mem_ctx) ir_variable(glsl_type::uvec2_type, "a", ir_var_function_in);
+ sig_parameters.push_tail(r0001);
+ ir_variable *const r0002 = new(mem_ctx) ir_variable(glsl_type::uvec2_type, "b", ir_var_function_in);
+ sig_parameters.push_tail(r0002);
+ ir_variable *const r0003 = new(mem_ctx) ir_variable(glsl_type::uvec2_type, "result", ir_var_auto);
+ body.emit(r0003);
+ body.emit(assign(r0003, imul_high(swizzle_x(r0001), swizzle_x(r0002)), 0x02));
+
+ body.emit(assign(r0003, mul(swizzle_x(r0001), swizzle_x(r0002)), 0x01));
+
+ ir_expression *const r0004 = mul(swizzle_x(r0001), swizzle_y(r0002));
+ ir_expression *const r0005 = mul(swizzle_y(r0001), swizzle_x(r0002));
+ ir_expression *const r0006 = add(r0004, r0005);
+ body.emit(assign(r0003, add(swizzle_y(r0003), r0006), 0x02));
+
+ body.emit(ret(r0003));
+
+ sig->replace_parameters(&sig_parameters);
+ return sig;
+}
+ir_function_signature *
+sign64(void *mem_ctx, builtin_available_predicate avail)
+{
+ ir_function_signature *const sig =
+ new(mem_ctx) ir_function_signature(glsl_type::ivec2_type, avail);
+ ir_factory body(&sig->body, mem_ctx);
+ sig->is_defined = true;
+
+ exec_list sig_parameters;
+
+ ir_variable *const r0007 = new(mem_ctx) ir_variable(glsl_type::ivec2_type, "a", ir_var_function_in);
+ sig_parameters.push_tail(r0007);
+ ir_variable *const r0008 = new(mem_ctx) ir_variable(glsl_type::ivec2_type, "result", ir_var_auto);
+ body.emit(r0008);
+ body.emit(assign(r0008, rshift(swizzle_y(r0007), body.constant(int(31))), 0x02));
+
+ ir_expression *const r0009 = bit_or(swizzle_x(r0007), swizzle_y(r0007));
+ ir_expression *const r000A = nequal(r0009, body.constant(int(0)));
+ ir_expression *const r000B = expr(ir_unop_b2i, r000A);
+ body.emit(assign(r0008, bit_or(swizzle_y(r0008), r000B), 0x01));
+
+ body.emit(ret(r0008));
+
+ sig->replace_parameters(&sig_parameters);
+ return sig;
+}
+ir_function_signature *
+udivmod64(void *mem_ctx, builtin_available_predicate avail)
+{
+ ir_function_signature *const sig =
+ new(mem_ctx) ir_function_signature(glsl_type::uvec4_type, avail);
+ ir_factory body(&sig->body, mem_ctx);
+ sig->is_defined = true;
+
+ exec_list sig_parameters;
+
+ ir_variable *const r000C = new(mem_ctx) ir_variable(glsl_type::uvec2_type, "n", ir_var_function_in);
+ sig_parameters.push_tail(r000C);
+ ir_variable *const r000D = new(mem_ctx) ir_variable(glsl_type::uvec2_type, "d", ir_var_function_in);
+ sig_parameters.push_tail(r000D);
+ ir_variable *const r000E = new(mem_ctx) ir_variable(glsl_type::int_type, "i", ir_var_auto);
+ body.emit(r000E);
+ ir_variable *const r000F = new(mem_ctx) ir_variable(glsl_type::uint64_t_type, "n64", ir_var_auto);
+ body.emit(r000F);
+ ir_variable *const r0010 = new(mem_ctx) ir_variable(glsl_type::int_type, "log2_denom", ir_var_auto);
+ body.emit(r0010);
+ ir_variable *const r0011 = new(mem_ctx) ir_variable(glsl_type::uvec2_type, "quot", ir_var_auto);
+ body.emit(r0011);
+ body.emit(assign(r0011, ir_constant::zero(mem_ctx, glsl_type::uvec2_type), 0x03));
+
+ ir_expression *const r0012 = expr(ir_unop_find_msb, swizzle_y(r000D));
+ body.emit(assign(r0010, add(r0012, body.constant(int(32))), 0x01));
+
+ /* IF CONDITION */
+ ir_expression *const r0014 = equal(swizzle_y(r000D), body.constant(0u));
+ ir_expression *const r0015 = gequal(swizzle_y(r000C), swizzle_x(r000D));
+ ir_expression *const r0016 = logic_and(r0014, r0015);
+ ir_if *f0013 = new(mem_ctx) ir_if(operand(r0016).val);
+ exec_list *const f0013_parent_instructions = body.instructions;
+
+ /* THEN INSTRUCTIONS */
+ body.instructions = &f0013->then_instructions;
+
+ ir_variable *const r0017 = new(mem_ctx) ir_variable(glsl_type::int_type, "i", ir_var_auto);
+ body.emit(r0017);
+ ir_variable *const r0018 = body.make_temp(glsl_type::int_type, "findMSB_retval");
+ body.emit(assign(r0018, expr(ir_unop_find_msb, swizzle_x(r000D)), 0x01));
+
+ body.emit(assign(r0010, r0018, 0x01));
+
+ body.emit(assign(r0017, body.constant(int(31)), 0x01));
+
+ /* LOOP BEGIN */
+ ir_loop *f0019 = new(mem_ctx) ir_loop();
+ exec_list *const f0019_parent_instructions = body.instructions;
+
+ body.instructions = &f0019->body_instructions;
+
+ /* IF CONDITION */
+ ir_expression *const r001B = less(r0017, body.constant(int(1)));
+ ir_if *f001A = new(mem_ctx) ir_if(operand(r001B).val);
+ exec_list *const f001A_parent_instructions = body.instructions;
+
+ /* THEN INSTRUCTIONS */
+ body.instructions = &f001A->then_instructions;
+
+ body.emit(new(mem_ctx) ir_loop_jump(ir_loop_jump::jump_break));
+
+
+ body.instructions = f001A_parent_instructions;
+ body.emit(f001A);
+
+ /* END IF */
+
+ /* IF CONDITION */
+ ir_expression *const r001D = sub(body.constant(int(31)), r0017);
+ ir_expression *const r001E = lequal(r0018, r001D);
+ ir_expression *const r001F = lshift(swizzle_x(r000D), r0017);
+ ir_expression *const r0020 = lequal(r001F, swizzle_y(r000C));
+ ir_expression *const r0021 = logic_and(r001E, r0020);
+ ir_if *f001C = new(mem_ctx) ir_if(operand(r0021).val);
+ exec_list *const f001C_parent_instructions = body.instructions;
+
+ /* THEN INSTRUCTIONS */
+ body.instructions = &f001C->then_instructions;
+
+ ir_expression *const r0022 = lshift(swizzle_x(r000D), r0017);
+ body.emit(assign(r000C, sub(swizzle_y(r000C), r0022), 0x02));
+
+ ir_expression *const r0023 = lshift(body.constant(1u), r0017);
+ body.emit(assign(r0011, bit_or(swizzle_y(r0011), r0023), 0x02));
+
+
+ body.instructions = f001C_parent_instructions;
+ body.emit(f001C);
+
+ /* END IF */
+
+ body.emit(assign(r0017, add(r0017, body.constant(int(-1))), 0x01));
+
+ /* LOOP END */
+
+ body.instructions = f0019_parent_instructions;
+ body.emit(f0019);
+
+ /* IF CONDITION */
+ ir_expression *const r0025 = lequal(swizzle_x(r000D), swizzle_y(r000C));
+ ir_if *f0024 = new(mem_ctx) ir_if(operand(r0025).val);
+ exec_list *const f0024_parent_instructions = body.instructions;
+
+ /* THEN INSTRUCTIONS */
+ body.instructions = &f0024->then_instructions;
+
+ body.emit(assign(r000C, sub(swizzle_y(r000C), swizzle_x(r000D)), 0x02));
+
+ body.emit(assign(r0011, bit_or(swizzle_y(r0011), body.constant(1u)), 0x02));
+
+
+ body.instructions = f0024_parent_instructions;
+ body.emit(f0024);
+
+ /* END IF */
+
+
+ body.instructions = f0013_parent_instructions;
+ body.emit(f0013);
+
+ /* END IF */
+
+ ir_variable *const r0026 = body.make_temp(glsl_type::uint64_t_type, "packUint2x32_retval");
+ body.emit(assign(r0026, expr(ir_unop_pack_uint_2x32, r000D), 0x01));
+
+ body.emit(assign(r000F, expr(ir_unop_pack_uint_2x32, r000C), 0x01));
+
+ body.emit(assign(r000E, body.constant(int(31)), 0x01));
+
+ /* LOOP BEGIN */
+ ir_loop *f0027 = new(mem_ctx) ir_loop();
+ exec_list *const f0027_parent_instructions = body.instructions;
+
+ body.instructions = &f0027->body_instructions;
+
+ /* IF CONDITION */
+ ir_expression *const r0029 = less(r000E, body.constant(int(1)));
+ ir_if *f0028 = new(mem_ctx) ir_if(operand(r0029).val);
+ exec_list *const f0028_parent_instructions = body.instructions;
+
+ /* THEN INSTRUCTIONS */
+ body.instructions = &f0028->then_instructions;
+
+ body.emit(new(mem_ctx) ir_loop_jump(ir_loop_jump::jump_break));
+
+
+ body.instructions = f0028_parent_instructions;
+ body.emit(f0028);
+
+ /* END IF */
+
+ /* IF CONDITION */
+ ir_expression *const r002B = sub(body.constant(int(63)), r000E);
+ ir_expression *const r002C = lequal(r0010, r002B);
+ ir_expression *const r002D = lshift(r0026, r000E);
+ ir_expression *const r002E = lequal(r002D, r000F);
+ ir_expression *const r002F = logic_and(r002C, r002E);
+ ir_if *f002A = new(mem_ctx) ir_if(operand(r002F).val);
+ exec_list *const f002A_parent_instructions = body.instructions;
+
+ /* THEN INSTRUCTIONS */
+ body.instructions = &f002A->then_instructions;
+
+ ir_expression *const r0030 = lshift(r0026, r000E);
+ body.emit(assign(r000F, sub(r000F, r0030), 0x01));
+
+ ir_expression *const r0031 = lshift(body.constant(1u), r000E);
+ body.emit(assign(r0011, bit_or(swizzle_x(r0011), r0031), 0x01));
+
+
+ body.instructions = f002A_parent_instructions;
+ body.emit(f002A);
+
+ /* END IF */
+
+ body.emit(assign(r000E, add(r000E, body.constant(int(-1))), 0x01));
+
+ /* LOOP END */
+
+ body.instructions = f0027_parent_instructions;
+ body.emit(f0027);
+
+ /* IF CONDITION */
+ ir_expression *const r0033 = lequal(r0026, r000F);
+ ir_if *f0032 = new(mem_ctx) ir_if(operand(r0033).val);
+ exec_list *const f0032_parent_instructions = body.instructions;
+
+ /* THEN INSTRUCTIONS */
+ body.instructions = &f0032->then_instructions;
+
+ body.emit(assign(r000F, sub(r000F, r0026), 0x01));
+
+ body.emit(assign(r0011, bit_or(swizzle_x(r0011), body.constant(1u)), 0x01));
+
+
+ body.instructions = f0032_parent_instructions;
+ body.emit(f0032);
+
+ /* END IF */
+
+ ir_variable *const r0034 = body.make_temp(glsl_type::uvec4_type, "vec_ctor");
+ body.emit(assign(r0034, r0011, 0x03));
+
+ body.emit(assign(r0034, expr(ir_unop_unpack_uint_2x32, r000F), 0x0c));
+
+ body.emit(ret(r0034));
+
+ sig->replace_parameters(&sig_parameters);
+ return sig;
+}
+ir_function_signature *
+udiv64(void *mem_ctx, builtin_available_predicate avail)
+{
+ ir_function_signature *const sig =
+ new(mem_ctx) ir_function_signature(glsl_type::uvec2_type, avail);
+ ir_factory body(&sig->body, mem_ctx);
+ sig->is_defined = true;
+
+ exec_list sig_parameters;
+
+ ir_variable *const r0035 = new(mem_ctx) ir_variable(glsl_type::uvec2_type, "n", ir_var_function_in);
+ sig_parameters.push_tail(r0035);
+ ir_variable *const r0036 = new(mem_ctx) ir_variable(glsl_type::uvec2_type, "d", ir_var_function_in);
+ sig_parameters.push_tail(r0036);
+ ir_variable *const r0037 = body.make_temp(glsl_type::uvec2_type, "n");
+ body.emit(assign(r0037, r0035, 0x03));
+
+ ir_variable *const r0038 = new(mem_ctx) ir_variable(glsl_type::int_type, "i", ir_var_auto);
+ body.emit(r0038);
+ ir_variable *const r0039 = new(mem_ctx) ir_variable(glsl_type::uint64_t_type, "n64", ir_var_auto);
+ body.emit(r0039);
+ ir_variable *const r003A = new(mem_ctx) ir_variable(glsl_type::int_type, "log2_denom", ir_var_auto);
+ body.emit(r003A);
+ ir_variable *const r003B = new(mem_ctx) ir_variable(glsl_type::uvec2_type, "quot", ir_var_auto);
+ body.emit(r003B);
+ body.emit(assign(r003B, ir_constant::zero(mem_ctx, glsl_type::uvec2_type), 0x03));
+
+ ir_expression *const r003C = expr(ir_unop_find_msb, swizzle_y(r0036));
+ body.emit(assign(r003A, add(r003C, body.constant(int(32))), 0x01));
+
+ /* IF CONDITION */
+ ir_expression *const r003E = equal(swizzle_y(r0036), body.constant(0u));
+ ir_expression *const r003F = gequal(swizzle_y(r0035), swizzle_x(r0036));
+ ir_expression *const r0040 = logic_and(r003E, r003F);
+ ir_if *f003D = new(mem_ctx) ir_if(operand(r0040).val);
+ exec_list *const f003D_parent_instructions = body.instructions;
+
+ /* THEN INSTRUCTIONS */
+ body.instructions = &f003D->then_instructions;
+
+ ir_variable *const r0041 = new(mem_ctx) ir_variable(glsl_type::int_type, "i", ir_var_auto);
+ body.emit(r0041);
+ ir_variable *const r0042 = body.make_temp(glsl_type::int_type, "findMSB_retval");
+ body.emit(assign(r0042, expr(ir_unop_find_msb, swizzle_x(r0036)), 0x01));
+
+ body.emit(assign(r003A, r0042, 0x01));
+
+ body.emit(assign(r0041, body.constant(int(31)), 0x01));
+
+ /* LOOP BEGIN */
+ ir_loop *f0043 = new(mem_ctx) ir_loop();
+ exec_list *const f0043_parent_instructions = body.instructions;
+
+ body.instructions = &f0043->body_instructions;
+
+ /* IF CONDITION */
+ ir_expression *const r0045 = less(r0041, body.constant(int(1)));
+ ir_if *f0044 = new(mem_ctx) ir_if(operand(r0045).val);
+ exec_list *const f0044_parent_instructions = body.instructions;
+
+ /* THEN INSTRUCTIONS */
+ body.instructions = &f0044->then_instructions;
+
+ body.emit(new(mem_ctx) ir_loop_jump(ir_loop_jump::jump_break));
+
+
+ body.instructions = f0044_parent_instructions;
+ body.emit(f0044);
+
+ /* END IF */
+
+ /* IF CONDITION */
+ ir_expression *const r0047 = sub(body.constant(int(31)), r0041);
+ ir_expression *const r0048 = lequal(r0042, r0047);
+ ir_expression *const r0049 = lshift(swizzle_x(r0036), r0041);
+ ir_expression *const r004A = lequal(r0049, swizzle_y(r0037));
+ ir_expression *const r004B = logic_and(r0048, r004A);
+ ir_if *f0046 = new(mem_ctx) ir_if(operand(r004B).val);
+ exec_list *const f0046_parent_instructions = body.instructions;
+
+ /* THEN INSTRUCTIONS */
+ body.instructions = &f0046->then_instructions;
+
+ ir_expression *const r004C = lshift(swizzle_x(r0036), r0041);
+ body.emit(assign(r0037, sub(swizzle_y(r0037), r004C), 0x02));
+
+ ir_expression *const r004D = lshift(body.constant(1u), r0041);
+ body.emit(assign(r003B, bit_or(swizzle_y(r003B), r004D), 0x02));
+
+
+ body.instructions = f0046_parent_instructions;
+ body.emit(f0046);
+
+ /* END IF */
+
+ body.emit(assign(r0041, add(r0041, body.constant(int(-1))), 0x01));
+
+ /* LOOP END */
+
+ body.instructions = f0043_parent_instructions;
+ body.emit(f0043);
+
+ /* IF CONDITION */
+ ir_expression *const r004F = lequal(swizzle_x(r0036), swizzle_y(r0037));
+ ir_if *f004E = new(mem_ctx) ir_if(operand(r004F).val);
+ exec_list *const f004E_parent_instructions = body.instructions;
+
+ /* THEN INSTRUCTIONS */
+ body.instructions = &f004E->then_instructions;
+
+ body.emit(assign(r0037, sub(swizzle_y(r0037), swizzle_x(r0036)), 0x02));
+
+ body.emit(assign(r003B, bit_or(swizzle_y(r003B), body.constant(1u)), 0x02));
+
+
+ body.instructions = f004E_parent_instructions;
+ body.emit(f004E);
+
+ /* END IF */
+
+
+ body.instructions = f003D_parent_instructions;
+ body.emit(f003D);
+
+ /* END IF */
+
+ ir_variable *const r0050 = body.make_temp(glsl_type::uint64_t_type, "packUint2x32_retval");
+ body.emit(assign(r0050, expr(ir_unop_pack_uint_2x32, r0036), 0x01));
+
+ body.emit(assign(r0039, expr(ir_unop_pack_uint_2x32, r0037), 0x01));
+
+ body.emit(assign(r0038, body.constant(int(31)), 0x01));
+
+ /* LOOP BEGIN */
+ ir_loop *f0051 = new(mem_ctx) ir_loop();
+ exec_list *const f0051_parent_instructions = body.instructions;
+
+ body.instructions = &f0051->body_instructions;
+
+ /* IF CONDITION */
+ ir_expression *const r0053 = less(r0038, body.constant(int(1)));
+ ir_if *f0052 = new(mem_ctx) ir_if(operand(r0053).val);
+ exec_list *const f0052_parent_instructions = body.instructions;
+
+ /* THEN INSTRUCTIONS */
+ body.instructions = &f0052->then_instructions;
+
+ body.emit(new(mem_ctx) ir_loop_jump(ir_loop_jump::jump_break));
+
+
+ body.instructions = f0052_parent_instructions;
+ body.emit(f0052);
+
+ /* END IF */
+
+ /* IF CONDITION */
+ ir_expression *const r0055 = sub(body.constant(int(63)), r0038);
+ ir_expression *const r0056 = lequal(r003A, r0055);
+ ir_expression *const r0057 = lshift(r0050, r0038);
+ ir_expression *const r0058 = lequal(r0057, r0039);
+ ir_expression *const r0059 = logic_and(r0056, r0058);
+ ir_if *f0054 = new(mem_ctx) ir_if(operand(r0059).val);
+ exec_list *const f0054_parent_instructions = body.instructions;
+
+ /* THEN INSTRUCTIONS */
+ body.instructions = &f0054->then_instructions;
+
+ ir_expression *const r005A = lshift(r0050, r0038);
+ body.emit(assign(r0039, sub(r0039, r005A), 0x01));
+
+ ir_expression *const r005B = lshift(body.constant(1u), r0038);
+ body.emit(assign(r003B, bit_or(swizzle_x(r003B), r005B), 0x01));
+
+
+ body.instructions = f0054_parent_instructions;
+ body.emit(f0054);
+
+ /* END IF */
+
+ body.emit(assign(r0038, add(r0038, body.constant(int(-1))), 0x01));
+
+ /* LOOP END */
+
+ body.instructions = f0051_parent_instructions;
+ body.emit(f0051);
+
+ /* IF CONDITION */
+ ir_expression *const r005D = lequal(r0050, r0039);
+ ir_if *f005C = new(mem_ctx) ir_if(operand(r005D).val);
+ exec_list *const f005C_parent_instructions = body.instructions;
+
+ /* THEN INSTRUCTIONS */
+ body.instructions = &f005C->then_instructions;
+
+ body.emit(assign(r0039, sub(r0039, r0050), 0x01));
+
+ body.emit(assign(r003B, bit_or(swizzle_x(r003B), body.constant(1u)), 0x01));
+
+
+ body.instructions = f005C_parent_instructions;
+ body.emit(f005C);
+
+ /* END IF */
+
+ body.emit(ret(r003B));
+
+ sig->replace_parameters(&sig_parameters);
+ return sig;
+}
+ir_function_signature *
+idiv64(void *mem_ctx, builtin_available_predicate avail)
+{
+ ir_function_signature *const sig =
+ new(mem_ctx) ir_function_signature(glsl_type::ivec2_type, avail);
+ ir_factory body(&sig->body, mem_ctx);
+ sig->is_defined = true;
+
+ exec_list sig_parameters;
+
+ ir_variable *const r005E = new(mem_ctx) ir_variable(glsl_type::ivec2_type, "_n", ir_var_function_in);
+ sig_parameters.push_tail(r005E);
+ ir_variable *const r005F = new(mem_ctx) ir_variable(glsl_type::ivec2_type, "_d", ir_var_function_in);
+ sig_parameters.push_tail(r005F);
+ ir_variable *const r0060 = new(mem_ctx) ir_variable(glsl_type::bool_type, "negate", ir_var_auto);
+ body.emit(r0060);
+ ir_expression *const r0061 = less(swizzle_y(r005E), body.constant(int(0)));
+ ir_expression *const r0062 = less(swizzle_y(r005F), body.constant(int(0)));
+ body.emit(assign(r0060, nequal(r0061, r0062), 0x01));
+
+ ir_variable *const r0063 = body.make_temp(glsl_type::uvec2_type, "n");
+ ir_expression *const r0064 = expr(ir_unop_pack_int_2x32, r005E);
+ ir_expression *const r0065 = expr(ir_unop_abs, r0064);
+ ir_expression *const r0066 = expr(ir_unop_i642u64, r0065);
+ body.emit(assign(r0063, expr(ir_unop_unpack_uint_2x32, r0066), 0x03));
+
+ ir_variable *const r0067 = body.make_temp(glsl_type::uvec2_type, "d");
+ ir_expression *const r0068 = expr(ir_unop_pack_int_2x32, r005F);
+ ir_expression *const r0069 = expr(ir_unop_abs, r0068);
+ ir_expression *const r006A = expr(ir_unop_i642u64, r0069);
+ body.emit(assign(r0067, expr(ir_unop_unpack_uint_2x32, r006A), 0x03));
+
+ ir_variable *const r006B = new(mem_ctx) ir_variable(glsl_type::int_type, "i", ir_var_auto);
+ body.emit(r006B);
+ ir_variable *const r006C = new(mem_ctx) ir_variable(glsl_type::uint64_t_type, "n64", ir_var_auto);
+ body.emit(r006C);
+ ir_variable *const r006D = new(mem_ctx) ir_variable(glsl_type::int_type, "log2_denom", ir_var_auto);
+ body.emit(r006D);
+ ir_variable *const r006E = new(mem_ctx) ir_variable(glsl_type::uvec2_type, "quot", ir_var_auto);
+ body.emit(r006E);
+ body.emit(assign(r006E, ir_constant::zero(mem_ctx, glsl_type::uvec2_type), 0x03));
+
+ ir_expression *const r006F = expr(ir_unop_find_msb, swizzle_y(r0067));
+ body.emit(assign(r006D, add(r006F, body.constant(int(32))), 0x01));
+
+ /* IF CONDITION */
+ ir_expression *const r0071 = equal(swizzle_y(r0067), body.constant(0u));
+ ir_expression *const r0072 = gequal(swizzle_y(r0063), swizzle_x(r0067));
+ ir_expression *const r0073 = logic_and(r0071, r0072);
+ ir_if *f0070 = new(mem_ctx) ir_if(operand(r0073).val);
+ exec_list *const f0070_parent_instructions = body.instructions;
+
+ /* THEN INSTRUCTIONS */
+ body.instructions = &f0070->then_instructions;
+
+ ir_variable *const r0074 = new(mem_ctx) ir_variable(glsl_type::int_type, "i", ir_var_auto);
+ body.emit(r0074);
+ ir_variable *const r0075 = body.make_temp(glsl_type::int_type, "findMSB_retval");
+ body.emit(assign(r0075, expr(ir_unop_find_msb, swizzle_x(r0067)), 0x01));
+
+ body.emit(assign(r006D, r0075, 0x01));
+
+ body.emit(assign(r0074, body.constant(int(31)), 0x01));
+
+ /* LOOP BEGIN */
+ ir_loop *f0076 = new(mem_ctx) ir_loop();
+ exec_list *const f0076_parent_instructions = body.instructions;
+
+ body.instructions = &f0076->body_instructions;
+
+ /* IF CONDITION */
+ ir_expression *const r0078 = less(r0074, body.constant(int(1)));
+ ir_if *f0077 = new(mem_ctx) ir_if(operand(r0078).val);
+ exec_list *const f0077_parent_instructions = body.instructions;
+
+ /* THEN INSTRUCTIONS */
+ body.instructions = &f0077->then_instructions;
+
+ body.emit(new(mem_ctx) ir_loop_jump(ir_loop_jump::jump_break));
+
+
+ body.instructions = f0077_parent_instructions;
+ body.emit(f0077);
+
+ /* END IF */
+
+ /* IF CONDITION */
+ ir_expression *const r007A = sub(body.constant(int(31)), r0074);
+ ir_expression *const r007B = lequal(r0075, r007A);
+ ir_expression *const r007C = lshift(swizzle_x(r0067), r0074);
+ ir_expression *const r007D = lequal(r007C, swizzle_y(r0063));
+ ir_expression *const r007E = logic_and(r007B, r007D);
+ ir_if *f0079 = new(mem_ctx) ir_if(operand(r007E).val);
+ exec_list *const f0079_parent_instructions = body.instructions;
+
+ /* THEN INSTRUCTIONS */
+ body.instructions = &f0079->then_instructions;
+
+ ir_expression *const r007F = lshift(swizzle_x(r0067), r0074);
+ body.emit(assign(r0063, sub(swizzle_y(r0063), r007F), 0x02));
+
+ ir_expression *const r0080 = lshift(body.constant(1u), r0074);
+ body.emit(assign(r006E, bit_or(swizzle_y(r006E), r0080), 0x02));
+
+
+ body.instructions = f0079_parent_instructions;
+ body.emit(f0079);
+
+ /* END IF */
+
+ body.emit(assign(r0074, add(r0074, body.constant(int(-1))), 0x01));
+
+ /* LOOP END */
+
+ body.instructions = f0076_parent_instructions;
+ body.emit(f0076);
+
+ /* IF CONDITION */
+ ir_expression *const r0082 = lequal(swizzle_x(r0067), swizzle_y(r0063));
+ ir_if *f0081 = new(mem_ctx) ir_if(operand(r0082).val);
+ exec_list *const f0081_parent_instructions = body.instructions;
+
+ /* THEN INSTRUCTIONS */
+ body.instructions = &f0081->then_instructions;
+
+ body.emit(assign(r0063, sub(swizzle_y(r0063), swizzle_x(r0067)), 0x02));
+
+ body.emit(assign(r006E, bit_or(swizzle_y(r006E), body.constant(1u)), 0x02));
+
+
+ body.instructions = f0081_parent_instructions;
+ body.emit(f0081);
+
+ /* END IF */
+
+
+ body.instructions = f0070_parent_instructions;
+ body.emit(f0070);
+
+ /* END IF */
+
+ ir_variable *const r0083 = body.make_temp(glsl_type::uint64_t_type, "packUint2x32_retval");
+ body.emit(assign(r0083, expr(ir_unop_pack_uint_2x32, r0067), 0x01));
+
+ body.emit(assign(r006C, expr(ir_unop_pack_uint_2x32, r0063), 0x01));
+
+ body.emit(assign(r006B, body.constant(int(31)), 0x01));
+
+ /* LOOP BEGIN */
+ ir_loop *f0084 = new(mem_ctx) ir_loop();
+ exec_list *const f0084_parent_instructions = body.instructions;
+
+ body.instructions = &f0084->body_instructions;
+
+ /* IF CONDITION */
+ ir_expression *const r0086 = less(r006B, body.constant(int(1)));
+ ir_if *f0085 = new(mem_ctx) ir_if(operand(r0086).val);
+ exec_list *const f0085_parent_instructions = body.instructions;
+
+ /* THEN INSTRUCTIONS */
+ body.instructions = &f0085->then_instructions;
+
+ body.emit(new(mem_ctx) ir_loop_jump(ir_loop_jump::jump_break));
+
+
+ body.instructions = f0085_parent_instructions;
+ body.emit(f0085);
+
+ /* END IF */
+
+ /* IF CONDITION */
+ ir_expression *const r0088 = sub(body.constant(int(63)), r006B);
+ ir_expression *const r0089 = lequal(r006D, r0088);
+ ir_expression *const r008A = lshift(r0083, r006B);
+ ir_expression *const r008B = lequal(r008A, r006C);
+ ir_expression *const r008C = logic_and(r0089, r008B);
+ ir_if *f0087 = new(mem_ctx) ir_if(operand(r008C).val);
+ exec_list *const f0087_parent_instructions = body.instructions;
+
+ /* THEN INSTRUCTIONS */
+ body.instructions = &f0087->then_instructions;
+
+ ir_expression *const r008D = lshift(r0083, r006B);
+ body.emit(assign(r006C, sub(r006C, r008D), 0x01));
+
+ ir_expression *const r008E = lshift(body.constant(1u), r006B);
+ body.emit(assign(r006E, bit_or(swizzle_x(r006E), r008E), 0x01));
+
+
+ body.instructions = f0087_parent_instructions;
+ body.emit(f0087);
+
+ /* END IF */
+
+ body.emit(assign(r006B, add(r006B, body.constant(int(-1))), 0x01));
+
+ /* LOOP END */
+
+ body.instructions = f0084_parent_instructions;
+ body.emit(f0084);
+
+ /* IF CONDITION */
+ ir_expression *const r0090 = lequal(r0083, r006C);
+ ir_if *f008F = new(mem_ctx) ir_if(operand(r0090).val);
+ exec_list *const f008F_parent_instructions = body.instructions;
+
+ /* THEN INSTRUCTIONS */
+ body.instructions = &f008F->then_instructions;
+
+ body.emit(assign(r006C, sub(r006C, r0083), 0x01));
+
+ body.emit(assign(r006E, bit_or(swizzle_x(r006E), body.constant(1u)), 0x01));
+
+
+ body.instructions = f008F_parent_instructions;
+ body.emit(f008F);
+
+ /* END IF */
+
+ ir_variable *const r0091 = body.make_temp(glsl_type::ivec2_type, "conditional_tmp");
+ /* IF CONDITION */
+ ir_if *f0092 = new(mem_ctx) ir_if(operand(r0060).val);
+ exec_list *const f0092_parent_instructions = body.instructions;
+
+ /* THEN INSTRUCTIONS */
+ body.instructions = &f0092->then_instructions;
+
+ ir_expression *const r0093 = expr(ir_unop_pack_uint_2x32, r006E);
+ ir_expression *const r0094 = expr(ir_unop_u642i64, r0093);
+ ir_expression *const r0095 = neg(r0094);
+ body.emit(assign(r0091, expr(ir_unop_unpack_int_2x32, r0095), 0x03));
+
+
+ /* ELSE INSTRUCTIONS */
+ body.instructions = &f0092->else_instructions;
+
+ body.emit(assign(r0091, expr(ir_unop_u2i, r006E), 0x03));
+
+
+ body.instructions = f0092_parent_instructions;
+ body.emit(f0092);
+
+ /* END IF */
+
+ body.emit(ret(r0091));
+
+ sig->replace_parameters(&sig_parameters);
+ return sig;
+}
+ir_function_signature *
+umod64(void *mem_ctx, builtin_available_predicate avail)
+{
+ ir_function_signature *const sig =
+ new(mem_ctx) ir_function_signature(glsl_type::uvec2_type, avail);
+ ir_factory body(&sig->body, mem_ctx);
+ sig->is_defined = true;
+
+ exec_list sig_parameters;
+
+ ir_variable *const r0096 = new(mem_ctx) ir_variable(glsl_type::uvec2_type, "n", ir_var_function_in);
+ sig_parameters.push_tail(r0096);
+ ir_variable *const r0097 = new(mem_ctx) ir_variable(glsl_type::uvec2_type, "d", ir_var_function_in);
+ sig_parameters.push_tail(r0097);
+ ir_variable *const r0098 = body.make_temp(glsl_type::uvec2_type, "n");
+ body.emit(assign(r0098, r0096, 0x03));
+
+ ir_variable *const r0099 = new(mem_ctx) ir_variable(glsl_type::int_type, "i", ir_var_auto);
+ body.emit(r0099);
+ ir_variable *const r009A = new(mem_ctx) ir_variable(glsl_type::uint64_t_type, "n64", ir_var_auto);
+ body.emit(r009A);
+ ir_variable *const r009B = new(mem_ctx) ir_variable(glsl_type::int_type, "log2_denom", ir_var_auto);
+ body.emit(r009B);
+ ir_variable *const r009C = new(mem_ctx) ir_variable(glsl_type::uvec2_type, "quot", ir_var_auto);
+ body.emit(r009C);
+ body.emit(assign(r009C, ir_constant::zero(mem_ctx, glsl_type::uvec2_type), 0x03));
+
+ ir_expression *const r009D = expr(ir_unop_find_msb, swizzle_y(r0097));
+ body.emit(assign(r009B, add(r009D, body.constant(int(32))), 0x01));
+
+ /* IF CONDITION */
+ ir_expression *const r009F = equal(swizzle_y(r0097), body.constant(0u));
+ ir_expression *const r00A0 = gequal(swizzle_y(r0096), swizzle_x(r0097));
+ ir_expression *const r00A1 = logic_and(r009F, r00A0);
+ ir_if *f009E = new(mem_ctx) ir_if(operand(r00A1).val);
+ exec_list *const f009E_parent_instructions = body.instructions;
+
+ /* THEN INSTRUCTIONS */
+ body.instructions = &f009E->then_instructions;
+
+ ir_variable *const r00A2 = new(mem_ctx) ir_variable(glsl_type::int_type, "i", ir_var_auto);
+ body.emit(r00A2);
+ ir_variable *const r00A3 = body.make_temp(glsl_type::int_type, "findMSB_retval");
+ body.emit(assign(r00A3, expr(ir_unop_find_msb, swizzle_x(r0097)), 0x01));
+
+ body.emit(assign(r009B, r00A3, 0x01));
+
+ body.emit(assign(r00A2, body.constant(int(31)), 0x01));
+
+ /* LOOP BEGIN */
+ ir_loop *f00A4 = new(mem_ctx) ir_loop();
+ exec_list *const f00A4_parent_instructions = body.instructions;
+
+ body.instructions = &f00A4->body_instructions;
+
+ /* IF CONDITION */
+ ir_expression *const r00A6 = less(r00A2, body.constant(int(1)));
+ ir_if *f00A5 = new(mem_ctx) ir_if(operand(r00A6).val);
+ exec_list *const f00A5_parent_instructions = body.instructions;
+
+ /* THEN INSTRUCTIONS */
+ body.instructions = &f00A5->then_instructions;
+
+ body.emit(new(mem_ctx) ir_loop_jump(ir_loop_jump::jump_break));
+
+
+ body.instructions = f00A5_parent_instructions;
+ body.emit(f00A5);
+
+ /* END IF */
+
+ /* IF CONDITION */
+ ir_expression *const r00A8 = sub(body.constant(int(31)), r00A2);
+ ir_expression *const r00A9 = lequal(r00A3, r00A8);
+ ir_expression *const r00AA = lshift(swizzle_x(r0097), r00A2);
+ ir_expression *const r00AB = lequal(r00AA, swizzle_y(r0098));
+ ir_expression *const r00AC = logic_and(r00A9, r00AB);
+ ir_if *f00A7 = new(mem_ctx) ir_if(operand(r00AC).val);
+ exec_list *const f00A7_parent_instructions = body.instructions;
+
+ /* THEN INSTRUCTIONS */
+ body.instructions = &f00A7->then_instructions;
+
+ ir_expression *const r00AD = lshift(swizzle_x(r0097), r00A2);
+ body.emit(assign(r0098, sub(swizzle_y(r0098), r00AD), 0x02));
+
+ ir_expression *const r00AE = lshift(body.constant(1u), r00A2);
+ body.emit(assign(r009C, bit_or(swizzle_y(r009C), r00AE), 0x02));
+
+
+ body.instructions = f00A7_parent_instructions;
+ body.emit(f00A7);
+
+ /* END IF */
+
+ body.emit(assign(r00A2, add(r00A2, body.constant(int(-1))), 0x01));
+
+ /* LOOP END */
+
+ body.instructions = f00A4_parent_instructions;
+ body.emit(f00A4);
+
+ /* IF CONDITION */
+ ir_expression *const r00B0 = lequal(swizzle_x(r0097), swizzle_y(r0098));
+ ir_if *f00AF = new(mem_ctx) ir_if(operand(r00B0).val);
+ exec_list *const f00AF_parent_instructions = body.instructions;
+
+ /* THEN INSTRUCTIONS */
+ body.instructions = &f00AF->then_instructions;
+
+ body.emit(assign(r0098, sub(swizzle_y(r0098), swizzle_x(r0097)), 0x02));
+
+ body.emit(assign(r009C, bit_or(swizzle_y(r009C), body.constant(1u)), 0x02));
+
+
+ body.instructions = f00AF_parent_instructions;
+ body.emit(f00AF);
+
+ /* END IF */
+
+
+ body.instructions = f009E_parent_instructions;
+ body.emit(f009E);
+
+ /* END IF */
+
+ ir_variable *const r00B1 = body.make_temp(glsl_type::uint64_t_type, "packUint2x32_retval");
+ body.emit(assign(r00B1, expr(ir_unop_pack_uint_2x32, r0097), 0x01));
+
+ body.emit(assign(r009A, expr(ir_unop_pack_uint_2x32, r0098), 0x01));
+
+ body.emit(assign(r0099, body.constant(int(31)), 0x01));
+
+ /* LOOP BEGIN */
+ ir_loop *f00B2 = new(mem_ctx) ir_loop();
+ exec_list *const f00B2_parent_instructions = body.instructions;
+
+ body.instructions = &f00B2->body_instructions;
+
+ /* IF CONDITION */
+ ir_expression *const r00B4 = less(r0099, body.constant(int(1)));
+ ir_if *f00B3 = new(mem_ctx) ir_if(operand(r00B4).val);
+ exec_list *const f00B3_parent_instructions = body.instructions;
+
+ /* THEN INSTRUCTIONS */
+ body.instructions = &f00B3->then_instructions;
+
+ body.emit(new(mem_ctx) ir_loop_jump(ir_loop_jump::jump_break));
+
+
+ body.instructions = f00B3_parent_instructions;
+ body.emit(f00B3);
+
+ /* END IF */
+
+ /* IF CONDITION */
+ ir_expression *const r00B6 = sub(body.constant(int(63)), r0099);
+ ir_expression *const r00B7 = lequal(r009B, r00B6);
+ ir_expression *const r00B8 = lshift(r00B1, r0099);
+ ir_expression *const r00B9 = lequal(r00B8, r009A);
+ ir_expression *const r00BA = logic_and(r00B7, r00B9);
+ ir_if *f00B5 = new(mem_ctx) ir_if(operand(r00BA).val);
+ exec_list *const f00B5_parent_instructions = body.instructions;
+
+ /* THEN INSTRUCTIONS */
+ body.instructions = &f00B5->then_instructions;
+
+ ir_expression *const r00BB = lshift(r00B1, r0099);
+ body.emit(assign(r009A, sub(r009A, r00BB), 0x01));
+
+ ir_expression *const r00BC = lshift(body.constant(1u), r0099);
+ body.emit(assign(r009C, bit_or(swizzle_x(r009C), r00BC), 0x01));
+
+
+ body.instructions = f00B5_parent_instructions;
+ body.emit(f00B5);
+
+ /* END IF */
+
+ body.emit(assign(r0099, add(r0099, body.constant(int(-1))), 0x01));
+
+ /* LOOP END */
+
+ body.instructions = f00B2_parent_instructions;
+ body.emit(f00B2);
+
+ /* IF CONDITION */
+ ir_expression *const r00BE = lequal(r00B1, r009A);
+ ir_if *f00BD = new(mem_ctx) ir_if(operand(r00BE).val);
+ exec_list *const f00BD_parent_instructions = body.instructions;
+
+ /* THEN INSTRUCTIONS */
+ body.instructions = &f00BD->then_instructions;
+
+ body.emit(assign(r009A, sub(r009A, r00B1), 0x01));
+
+ body.emit(assign(r009C, bit_or(swizzle_x(r009C), body.constant(1u)), 0x01));
+
+
+ body.instructions = f00BD_parent_instructions;
+ body.emit(f00BD);
+
+ /* END IF */
+
+ ir_variable *const r00BF = body.make_temp(glsl_type::uvec4_type, "vec_ctor");
+ body.emit(assign(r00BF, r009C, 0x03));
+
+ body.emit(assign(r00BF, expr(ir_unop_unpack_uint_2x32, r009A), 0x0c));
+
+ ir_swizzle *const r00C0 = swizzle(r00BF, MAKE_SWIZZLE4(SWIZZLE_Z, SWIZZLE_W, SWIZZLE_X, SWIZZLE_X), 2);
+ body.emit(ret(r00C0));
+
+ sig->replace_parameters(&sig_parameters);
+ return sig;
+}
+ir_function_signature *
+imod64(void *mem_ctx, builtin_available_predicate avail)
+{
+ ir_function_signature *const sig =
+ new(mem_ctx) ir_function_signature(glsl_type::ivec2_type, avail);
+ ir_factory body(&sig->body, mem_ctx);
+ sig->is_defined = true;
+
+ exec_list sig_parameters;
+
+ ir_variable *const r00C1 = new(mem_ctx) ir_variable(glsl_type::ivec2_type, "_n", ir_var_function_in);
+ sig_parameters.push_tail(r00C1);
+ ir_variable *const r00C2 = new(mem_ctx) ir_variable(glsl_type::ivec2_type, "_d", ir_var_function_in);
+ sig_parameters.push_tail(r00C2);
+ ir_variable *const r00C3 = new(mem_ctx) ir_variable(glsl_type::bool_type, "negate", ir_var_auto);
+ body.emit(r00C3);
+ ir_expression *const r00C4 = less(swizzle_y(r00C1), body.constant(int(0)));
+ ir_expression *const r00C5 = less(swizzle_y(r00C2), body.constant(int(0)));
+ body.emit(assign(r00C3, nequal(r00C4, r00C5), 0x01));
+
+ ir_variable *const r00C6 = body.make_temp(glsl_type::uvec2_type, "n");
+ ir_expression *const r00C7 = expr(ir_unop_pack_int_2x32, r00C1);
+ ir_expression *const r00C8 = expr(ir_unop_abs, r00C7);
+ ir_expression *const r00C9 = expr(ir_unop_i642u64, r00C8);
+ body.emit(assign(r00C6, expr(ir_unop_unpack_uint_2x32, r00C9), 0x03));
+
+ ir_variable *const r00CA = body.make_temp(glsl_type::uvec2_type, "d");
+ ir_expression *const r00CB = expr(ir_unop_pack_int_2x32, r00C2);
+ ir_expression *const r00CC = expr(ir_unop_abs, r00CB);
+ ir_expression *const r00CD = expr(ir_unop_i642u64, r00CC);
+ body.emit(assign(r00CA, expr(ir_unop_unpack_uint_2x32, r00CD), 0x03));
+
+ ir_variable *const r00CE = new(mem_ctx) ir_variable(glsl_type::int_type, "i", ir_var_auto);
+ body.emit(r00CE);
+ ir_variable *const r00CF = new(mem_ctx) ir_variable(glsl_type::uint64_t_type, "n64", ir_var_auto);
+ body.emit(r00CF);
+ ir_variable *const r00D0 = new(mem_ctx) ir_variable(glsl_type::int_type, "log2_denom", ir_var_auto);
+ body.emit(r00D0);
+ ir_variable *const r00D1 = new(mem_ctx) ir_variable(glsl_type::uvec2_type, "quot", ir_var_auto);
+ body.emit(r00D1);
+ body.emit(assign(r00D1, ir_constant::zero(mem_ctx, glsl_type::uvec2_type), 0x03));
+
+ ir_expression *const r00D2 = expr(ir_unop_find_msb, swizzle_y(r00CA));
+ body.emit(assign(r00D0, add(r00D2, body.constant(int(32))), 0x01));
+
+ /* IF CONDITION */
+ ir_expression *const r00D4 = equal(swizzle_y(r00CA), body.constant(0u));
+ ir_expression *const r00D5 = gequal(swizzle_y(r00C6), swizzle_x(r00CA));
+ ir_expression *const r00D6 = logic_and(r00D4, r00D5);
+ ir_if *f00D3 = new(mem_ctx) ir_if(operand(r00D6).val);
+ exec_list *const f00D3_parent_instructions = body.instructions;
+
+ /* THEN INSTRUCTIONS */
+ body.instructions = &f00D3->then_instructions;
+
+ ir_variable *const r00D7 = new(mem_ctx) ir_variable(glsl_type::int_type, "i", ir_var_auto);
+ body.emit(r00D7);
+ ir_variable *const r00D8 = body.make_temp(glsl_type::int_type, "findMSB_retval");
+ body.emit(assign(r00D8, expr(ir_unop_find_msb, swizzle_x(r00CA)), 0x01));
+
+ body.emit(assign(r00D0, r00D8, 0x01));
+
+ body.emit(assign(r00D7, body.constant(int(31)), 0x01));
+
+ /* LOOP BEGIN */
+ ir_loop *f00D9 = new(mem_ctx) ir_loop();
+ exec_list *const f00D9_parent_instructions = body.instructions;
+
+ body.instructions = &f00D9->body_instructions;
+
+ /* IF CONDITION */
+ ir_expression *const r00DB = less(r00D7, body.constant(int(1)));
+ ir_if *f00DA = new(mem_ctx) ir_if(operand(r00DB).val);
+ exec_list *const f00DA_parent_instructions = body.instructions;
+
+ /* THEN INSTRUCTIONS */
+ body.instructions = &f00DA->then_instructions;
+
+ body.emit(new(mem_ctx) ir_loop_jump(ir_loop_jump::jump_break));
+
+
+ body.instructions = f00DA_parent_instructions;
+ body.emit(f00DA);
+
+ /* END IF */
+
+ /* IF CONDITION */
+ ir_expression *const r00DD = sub(body.constant(int(31)), r00D7);
+ ir_expression *const r00DE = lequal(r00D8, r00DD);
+ ir_expression *const r00DF = lshift(swizzle_x(r00CA), r00D7);
+ ir_expression *const r00E0 = lequal(r00DF, swizzle_y(r00C6));
+ ir_expression *const r00E1 = logic_and(r00DE, r00E0);
+ ir_if *f00DC = new(mem_ctx) ir_if(operand(r00E1).val);
+ exec_list *const f00DC_parent_instructions = body.instructions;
+
+ /* THEN INSTRUCTIONS */
+ body.instructions = &f00DC->then_instructions;
+
+ ir_expression *const r00E2 = lshift(swizzle_x(r00CA), r00D7);
+ body.emit(assign(r00C6, sub(swizzle_y(r00C6), r00E2), 0x02));
+
+ ir_expression *const r00E3 = lshift(body.constant(1u), r00D7);
+ body.emit(assign(r00D1, bit_or(swizzle_y(r00D1), r00E3), 0x02));
+
+
+ body.instructions = f00DC_parent_instructions;
+ body.emit(f00DC);
+
+ /* END IF */
+
+ body.emit(assign(r00D7, add(r00D7, body.constant(int(-1))), 0x01));
+
+ /* LOOP END */
+
+ body.instructions = f00D9_parent_instructions;
+ body.emit(f00D9);
+
+ /* IF CONDITION */
+ ir_expression *const r00E5 = lequal(swizzle_x(r00CA), swizzle_y(r00C6));
+ ir_if *f00E4 = new(mem_ctx) ir_if(operand(r00E5).val);
+ exec_list *const f00E4_parent_instructions = body.instructions;
+
+ /* THEN INSTRUCTIONS */
+ body.instructions = &f00E4->then_instructions;
+
+ body.emit(assign(r00C6, sub(swizzle_y(r00C6), swizzle_x(r00CA)), 0x02));
+
+ body.emit(assign(r00D1, bit_or(swizzle_y(r00D1), body.constant(1u)), 0x02));
+
+
+ body.instructions = f00E4_parent_instructions;
+ body.emit(f00E4);
+
+ /* END IF */
+
+
+ body.instructions = f00D3_parent_instructions;
+ body.emit(f00D3);
+
+ /* END IF */
+
+ ir_variable *const r00E6 = body.make_temp(glsl_type::uint64_t_type, "packUint2x32_retval");
+ body.emit(assign(r00E6, expr(ir_unop_pack_uint_2x32, r00CA), 0x01));
+
+ body.emit(assign(r00CF, expr(ir_unop_pack_uint_2x32, r00C6), 0x01));
+
+ body.emit(assign(r00CE, body.constant(int(31)), 0x01));
+
+ /* LOOP BEGIN */
+ ir_loop *f00E7 = new(mem_ctx) ir_loop();
+ exec_list *const f00E7_parent_instructions = body.instructions;
+
+ body.instructions = &f00E7->body_instructions;
+
+ /* IF CONDITION */
+ ir_expression *const r00E9 = less(r00CE, body.constant(int(1)));
+ ir_if *f00E8 = new(mem_ctx) ir_if(operand(r00E9).val);
+ exec_list *const f00E8_parent_instructions = body.instructions;
+
+ /* THEN INSTRUCTIONS */
+ body.instructions = &f00E8->then_instructions;
+
+ body.emit(new(mem_ctx) ir_loop_jump(ir_loop_jump::jump_break));
+
+
+ body.instructions = f00E8_parent_instructions;
+ body.emit(f00E8);
+
+ /* END IF */
+
+ /* IF CONDITION */
+ ir_expression *const r00EB = sub(body.constant(int(63)), r00CE);
+ ir_expression *const r00EC = lequal(r00D0, r00EB);
+ ir_expression *const r00ED = lshift(r00E6, r00CE);
+ ir_expression *const r00EE = lequal(r00ED, r00CF);
+ ir_expression *const r00EF = logic_and(r00EC, r00EE);
+ ir_if *f00EA = new(mem_ctx) ir_if(operand(r00EF).val);
+ exec_list *const f00EA_parent_instructions = body.instructions;
+
+ /* THEN INSTRUCTIONS */
+ body.instructions = &f00EA->then_instructions;
+
+ ir_expression *const r00F0 = lshift(r00E6, r00CE);
+ body.emit(assign(r00CF, sub(r00CF, r00F0), 0x01));
+
+ ir_expression *const r00F1 = lshift(body.constant(1u), r00CE);
+ body.emit(assign(r00D1, bit_or(swizzle_x(r00D1), r00F1), 0x01));
+
+
+ body.instructions = f00EA_parent_instructions;
+ body.emit(f00EA);
+
+ /* END IF */
+
+ body.emit(assign(r00CE, add(r00CE, body.constant(int(-1))), 0x01));
+
+ /* LOOP END */
+
+ body.instructions = f00E7_parent_instructions;
+ body.emit(f00E7);
+
+ /* IF CONDITION */
+ ir_expression *const r00F3 = lequal(r00E6, r00CF);
+ ir_if *f00F2 = new(mem_ctx) ir_if(operand(r00F3).val);
+ exec_list *const f00F2_parent_instructions = body.instructions;
+
+ /* THEN INSTRUCTIONS */
+ body.instructions = &f00F2->then_instructions;
+
+ body.emit(assign(r00CF, sub(r00CF, r00E6), 0x01));
+
+ body.emit(assign(r00D1, bit_or(swizzle_x(r00D1), body.constant(1u)), 0x01));
+
+
+ body.instructions = f00F2_parent_instructions;
+ body.emit(f00F2);
+
+ /* END IF */
+
+ ir_variable *const r00F4 = body.make_temp(glsl_type::uvec4_type, "vec_ctor");
+ body.emit(assign(r00F4, r00D1, 0x03));
+
+ body.emit(assign(r00F4, expr(ir_unop_unpack_uint_2x32, r00CF), 0x0c));
+
+ ir_variable *const r00F5 = body.make_temp(glsl_type::ivec2_type, "conditional_tmp");
+ /* IF CONDITION */
+ ir_if *f00F6 = new(mem_ctx) ir_if(operand(r00C3).val);
+ exec_list *const f00F6_parent_instructions = body.instructions;
+
+ /* THEN INSTRUCTIONS */
+ body.instructions = &f00F6->then_instructions;
+
+ ir_swizzle *const r00F7 = swizzle(r00F4, MAKE_SWIZZLE4(SWIZZLE_Z, SWIZZLE_W, SWIZZLE_X, SWIZZLE_X), 2);
+ ir_expression *const r00F8 = expr(ir_unop_pack_uint_2x32, r00F7);
+ ir_expression *const r00F9 = expr(ir_unop_u642i64, r00F8);
+ ir_expression *const r00FA = neg(r00F9);
+ body.emit(assign(r00F5, expr(ir_unop_unpack_int_2x32, r00FA), 0x03));
+
+
+ /* ELSE INSTRUCTIONS */
+ body.instructions = &f00F6->else_instructions;
+
+ ir_swizzle *const r00FB = swizzle(r00F4, MAKE_SWIZZLE4(SWIZZLE_Z, SWIZZLE_W, SWIZZLE_X, SWIZZLE_X), 2);
+ body.emit(assign(r00F5, expr(ir_unop_u2i, r00FB), 0x03));
+
+
+ body.instructions = f00F6_parent_instructions;
+ body.emit(f00F6);
+
+ /* END IF */
+
+ body.emit(ret(r00F5));
+
+ sig->replace_parameters(&sig_parameters);
+ return sig;
+}
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/builtin_types.cpp b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/builtin_types.cpp
new file mode 100644
index 0000000000..d3a28acf27
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/builtin_types.cpp
@@ -0,0 +1,474 @@
+/*
+ * Copyright © 2013 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+/**
+ * \file builtin_types.cpp
+ *
+ * The glsl_type class has static members to represent all the built-in types
+ * (such as the glsl_type::_float_type flyweight) as well as convenience pointer
+ * accessors (such as glsl_type::float_type). Those global variables are
+ * declared and initialized in this file.
+ *
+ * This also contains _mesa_glsl_initialize_types(), a function which populates
+ * a symbol table with the available built-in types for a particular language
+ * version and set of enabled extensions.
+ */
+
+#include "compiler/glsl_types.h"
+#include "glsl_parser_extras.h"
+#include "util/macros.h"
+#include "main/mtypes.h"
+
+/**
+ * Declarations of type flyweights (glsl_type::_foo_type) and
+ * convenience pointers (glsl_type::foo_type).
+ * @{
+ */
+#define DECL_TYPE(NAME, ...)
+
+#define STRUCT_TYPE(NAME) \
+ const glsl_type glsl_type::_struct_##NAME##_type = \
+ glsl_type(NAME##_fields, ARRAY_SIZE(NAME##_fields), #NAME); \
+ const glsl_type *const glsl_type::struct_##NAME##_type = \
+ &glsl_type::_struct_##NAME##_type;
+
+static const struct glsl_struct_field gl_DepthRangeParameters_fields[] = {
+ glsl_struct_field(glsl_type::float_type, GLSL_PRECISION_HIGH, "near"),
+ glsl_struct_field(glsl_type::float_type, GLSL_PRECISION_HIGH, "far"),
+ glsl_struct_field(glsl_type::float_type, GLSL_PRECISION_HIGH, "diff"),
+};
+
+static const struct glsl_struct_field gl_PointParameters_fields[] = {
+ glsl_struct_field(glsl_type::float_type, "size"),
+ glsl_struct_field(glsl_type::float_type, "sizeMin"),
+ glsl_struct_field(glsl_type::float_type, "sizeMax"),
+ glsl_struct_field(glsl_type::float_type, "fadeThresholdSize"),
+ glsl_struct_field(glsl_type::float_type, "distanceConstantAttenuation"),
+ glsl_struct_field(glsl_type::float_type, "distanceLinearAttenuation"),
+ glsl_struct_field(glsl_type::float_type, "distanceQuadraticAttenuation"),
+};
+
+static const struct glsl_struct_field gl_MaterialParameters_fields[] = {
+ glsl_struct_field(glsl_type::vec4_type, "emission"),
+ glsl_struct_field(glsl_type::vec4_type, "ambient"),
+ glsl_struct_field(glsl_type::vec4_type, "diffuse"),
+ glsl_struct_field(glsl_type::vec4_type, "specular"),
+ glsl_struct_field(glsl_type::float_type, "shininess"),
+};
+
+static const struct glsl_struct_field gl_LightSourceParameters_fields[] = {
+ glsl_struct_field(glsl_type::vec4_type, "ambient"),
+ glsl_struct_field(glsl_type::vec4_type, "diffuse"),
+ glsl_struct_field(glsl_type::vec4_type, "specular"),
+ glsl_struct_field(glsl_type::vec4_type, "position"),
+ glsl_struct_field(glsl_type::vec4_type, "halfVector"),
+ glsl_struct_field(glsl_type::vec3_type, "spotDirection"),
+ glsl_struct_field(glsl_type::float_type, "spotExponent"),
+ glsl_struct_field(glsl_type::float_type, "spotCutoff"),
+ glsl_struct_field(glsl_type::float_type, "spotCosCutoff"),
+ glsl_struct_field(glsl_type::float_type, "constantAttenuation"),
+ glsl_struct_field(glsl_type::float_type, "linearAttenuation"),
+ glsl_struct_field(glsl_type::float_type, "quadraticAttenuation"),
+};
+
+static const struct glsl_struct_field gl_LightModelParameters_fields[] = {
+ glsl_struct_field(glsl_type::vec4_type, "ambient"),
+};
+
+static const struct glsl_struct_field gl_LightModelProducts_fields[] = {
+ glsl_struct_field(glsl_type::vec4_type, "sceneColor"),
+};
+
+static const struct glsl_struct_field gl_LightProducts_fields[] = {
+ glsl_struct_field(glsl_type::vec4_type, "ambient"),
+ glsl_struct_field(glsl_type::vec4_type, "diffuse"),
+ glsl_struct_field(glsl_type::vec4_type, "specular"),
+};
+
+static const struct glsl_struct_field gl_FogParameters_fields[] = {
+ glsl_struct_field(glsl_type::vec4_type, "color"),
+ glsl_struct_field(glsl_type::float_type, "density"),
+ glsl_struct_field(glsl_type::float_type, "start"),
+ glsl_struct_field(glsl_type::float_type, "end"),
+ glsl_struct_field(glsl_type::float_type, "scale"),
+};
+
+#include "compiler/builtin_type_macros.h"
+/** @} */
+
+/**
+ * Code to populate a symbol table with the built-in types available in a
+ * particular shading language version. The table below contains tags every
+ * type with the GLSL/GLSL ES versions where it was introduced.
+ *
+ * @{
+ */
+#define T(TYPE, MIN_GL, MIN_ES) \
+ { glsl_type::TYPE##_type, MIN_GL, MIN_ES },
+
+static const struct builtin_type_versions {
+ const glsl_type *const type;
+ int min_gl;
+ int min_es;
+} builtin_type_versions[] = {
+ T(void, 110, 100)
+ T(bool, 110, 100)
+ T(bvec2, 110, 100)
+ T(bvec3, 110, 100)
+ T(bvec4, 110, 100)
+ T(int, 110, 100)
+ T(ivec2, 110, 100)
+ T(ivec3, 110, 100)
+ T(ivec4, 110, 100)
+ T(uint, 130, 300)
+ T(uvec2, 130, 300)
+ T(uvec3, 130, 300)
+ T(uvec4, 130, 300)
+ T(float, 110, 100)
+ T(vec2, 110, 100)
+ T(vec3, 110, 100)
+ T(vec4, 110, 100)
+ T(mat2, 110, 100)
+ T(mat3, 110, 100)
+ T(mat4, 110, 100)
+ T(mat2x3, 120, 300)
+ T(mat2x4, 120, 300)
+ T(mat3x2, 120, 300)
+ T(mat3x4, 120, 300)
+ T(mat4x2, 120, 300)
+ T(mat4x3, 120, 300)
+
+ T(double, 400, 999)
+ T(dvec2, 400, 999)
+ T(dvec3, 400, 999)
+ T(dvec4, 400, 999)
+ T(dmat2, 400, 999)
+ T(dmat3, 400, 999)
+ T(dmat4, 400, 999)
+ T(dmat2x3, 400, 999)
+ T(dmat2x4, 400, 999)
+ T(dmat3x2, 400, 999)
+ T(dmat3x4, 400, 999)
+ T(dmat4x2, 400, 999)
+ T(dmat4x3, 400, 999)
+
+ T(sampler1D, 110, 999)
+ T(sampler2D, 110, 100)
+ T(sampler3D, 110, 300)
+ T(samplerCube, 110, 100)
+ T(sampler1DArray, 130, 999)
+ T(sampler2DArray, 130, 300)
+ T(samplerCubeArray, 400, 320)
+ T(sampler2DRect, 140, 999)
+ T(samplerBuffer, 140, 320)
+ T(sampler2DMS, 150, 310)
+ T(sampler2DMSArray, 150, 320)
+
+ T(isampler1D, 130, 999)
+ T(isampler2D, 130, 300)
+ T(isampler3D, 130, 300)
+ T(isamplerCube, 130, 300)
+ T(isampler1DArray, 130, 999)
+ T(isampler2DArray, 130, 300)
+ T(isamplerCubeArray, 400, 320)
+ T(isampler2DRect, 140, 999)
+ T(isamplerBuffer, 140, 320)
+ T(isampler2DMS, 150, 310)
+ T(isampler2DMSArray, 150, 320)
+
+ T(usampler1D, 130, 999)
+ T(usampler2D, 130, 300)
+ T(usampler3D, 130, 300)
+ T(usamplerCube, 130, 300)
+ T(usampler1DArray, 130, 999)
+ T(usampler2DArray, 130, 300)
+ T(usamplerCubeArray, 400, 320)
+ T(usampler2DRect, 140, 999)
+ T(usamplerBuffer, 140, 320)
+ T(usampler2DMS, 150, 310)
+ T(usampler2DMSArray, 150, 320)
+
+ T(sampler1DShadow, 110, 999)
+ T(sampler2DShadow, 110, 300)
+ T(samplerCubeShadow, 130, 300)
+ T(sampler1DArrayShadow, 130, 999)
+ T(sampler2DArrayShadow, 130, 300)
+ T(samplerCubeArrayShadow, 400, 320)
+ T(sampler2DRectShadow, 140, 999)
+
+ T(struct_gl_DepthRangeParameters, 110, 100)
+
+ T(image1D, 420, 999)
+ T(image2D, 420, 310)
+ T(image3D, 420, 310)
+ T(image2DRect, 420, 999)
+ T(imageCube, 420, 310)
+ T(imageBuffer, 420, 320)
+ T(image1DArray, 420, 999)
+ T(image2DArray, 420, 310)
+ T(imageCubeArray, 420, 320)
+ T(image2DMS, 420, 999)
+ T(image2DMSArray, 420, 999)
+ T(iimage1D, 420, 999)
+ T(iimage2D, 420, 310)
+ T(iimage3D, 420, 310)
+ T(iimage2DRect, 420, 999)
+ T(iimageCube, 420, 310)
+ T(iimageBuffer, 420, 320)
+ T(iimage1DArray, 420, 999)
+ T(iimage2DArray, 420, 310)
+ T(iimageCubeArray, 420, 320)
+ T(iimage2DMS, 420, 999)
+ T(iimage2DMSArray, 420, 999)
+ T(uimage1D, 420, 999)
+ T(uimage2D, 420, 310)
+ T(uimage3D, 420, 310)
+ T(uimage2DRect, 420, 999)
+ T(uimageCube, 420, 310)
+ T(uimageBuffer, 420, 320)
+ T(uimage1DArray, 420, 999)
+ T(uimage2DArray, 420, 310)
+ T(uimageCubeArray, 420, 320)
+ T(uimage2DMS, 420, 999)
+ T(uimage2DMSArray, 420, 999)
+
+ T(atomic_uint, 420, 310)
+};
+
+static const glsl_type *const deprecated_types[] = {
+ glsl_type::struct_gl_PointParameters_type,
+ glsl_type::struct_gl_MaterialParameters_type,
+ glsl_type::struct_gl_LightSourceParameters_type,
+ glsl_type::struct_gl_LightModelParameters_type,
+ glsl_type::struct_gl_LightModelProducts_type,
+ glsl_type::struct_gl_LightProducts_type,
+ glsl_type::struct_gl_FogParameters_type,
+};
+
+static inline void
+add_type(glsl_symbol_table *symbols, const glsl_type *const type)
+{
+ symbols->add_type(type->name, type);
+}
+
+/**
+ * Populate the symbol table with available built-in types.
+ */
+void
+_mesa_glsl_initialize_types(struct _mesa_glsl_parse_state *state)
+{
+ struct glsl_symbol_table *symbols = state->symbols;
+
+ for (unsigned i = 0; i < ARRAY_SIZE(builtin_type_versions); i++) {
+ const struct builtin_type_versions *const t = &builtin_type_versions[i];
+ if (state->is_version(t->min_gl, t->min_es)) {
+ add_type(symbols, t->type);
+ }
+ }
+
+ /* Add deprecated structure types. While these were deprecated in 1.30,
+ * they're still present. We've removed them in 1.40+ (OpenGL 3.1+).
+ */
+ if (state->compat_shader || state->ARB_compatibility_enable) {
+ for (unsigned i = 0; i < ARRAY_SIZE(deprecated_types); i++) {
+ add_type(symbols, deprecated_types[i]);
+ }
+ }
+
+ /* Add types for enabled extensions. They may have already been added
+ * by the version-based loop, but attempting to add them a second time
+ * is harmless.
+ */
+ if (state->ARB_texture_cube_map_array_enable ||
+ state->EXT_texture_cube_map_array_enable ||
+ state->OES_texture_cube_map_array_enable) {
+ add_type(symbols, glsl_type::samplerCubeArray_type);
+ add_type(symbols, glsl_type::samplerCubeArrayShadow_type);
+ add_type(symbols, glsl_type::isamplerCubeArray_type);
+ add_type(symbols, glsl_type::usamplerCubeArray_type);
+ }
+
+ if (state->ARB_texture_multisample_enable) {
+ add_type(symbols, glsl_type::sampler2DMS_type);
+ add_type(symbols, glsl_type::isampler2DMS_type);
+ add_type(symbols, glsl_type::usampler2DMS_type);
+ }
+ if (state->ARB_texture_multisample_enable ||
+ state->OES_texture_storage_multisample_2d_array_enable) {
+ add_type(symbols, glsl_type::sampler2DMSArray_type);
+ add_type(symbols, glsl_type::isampler2DMSArray_type);
+ add_type(symbols, glsl_type::usampler2DMSArray_type);
+ }
+
+ if (state->ARB_texture_rectangle_enable) {
+ add_type(symbols, glsl_type::sampler2DRect_type);
+ add_type(symbols, glsl_type::sampler2DRectShadow_type);
+ }
+
+ if (state->EXT_gpu_shader4_enable) {
+ add_type(symbols, glsl_type::uint_type);
+ add_type(symbols, glsl_type::uvec2_type);
+ add_type(symbols, glsl_type::uvec3_type);
+ add_type(symbols, glsl_type::uvec4_type);
+
+ add_type(symbols, glsl_type::samplerCubeShadow_type);
+
+ if (state->ctx->Extensions.EXT_texture_array) {
+ add_type(symbols, glsl_type::sampler1DArray_type);
+ add_type(symbols, glsl_type::sampler2DArray_type);
+ add_type(symbols, glsl_type::sampler1DArrayShadow_type);
+ add_type(symbols, glsl_type::sampler2DArrayShadow_type);
+ }
+ if (state->ctx->Extensions.EXT_texture_buffer_object) {
+ add_type(symbols, glsl_type::samplerBuffer_type);
+ }
+
+ if (state->ctx->Extensions.EXT_texture_integer) {
+ add_type(symbols, glsl_type::isampler1D_type);
+ add_type(symbols, glsl_type::isampler2D_type);
+ add_type(symbols, glsl_type::isampler3D_type);
+ add_type(symbols, glsl_type::isamplerCube_type);
+
+ add_type(symbols, glsl_type::usampler1D_type);
+ add_type(symbols, glsl_type::usampler2D_type);
+ add_type(symbols, glsl_type::usampler3D_type);
+ add_type(symbols, glsl_type::usamplerCube_type);
+
+ if (state->ctx->Extensions.NV_texture_rectangle) {
+ add_type(symbols, glsl_type::isampler2DRect_type);
+ add_type(symbols, glsl_type::usampler2DRect_type);
+ }
+ if (state->ctx->Extensions.EXT_texture_array) {
+ add_type(symbols, glsl_type::isampler1DArray_type);
+ add_type(symbols, glsl_type::isampler2DArray_type);
+ add_type(symbols, glsl_type::usampler1DArray_type);
+ add_type(symbols, glsl_type::usampler2DArray_type);
+ }
+ if (state->ctx->Extensions.EXT_texture_buffer_object) {
+ add_type(symbols, glsl_type::isamplerBuffer_type);
+ add_type(symbols, glsl_type::usamplerBuffer_type);
+ }
+ }
+ }
+
+ if (state->EXT_texture_array_enable) {
+ add_type(symbols, glsl_type::sampler1DArray_type);
+ add_type(symbols, glsl_type::sampler2DArray_type);
+ add_type(symbols, glsl_type::sampler1DArrayShadow_type);
+ add_type(symbols, glsl_type::sampler2DArrayShadow_type);
+ }
+
+ if (state->OES_EGL_image_external_enable ||
+ state->OES_EGL_image_external_essl3_enable) {
+ add_type(symbols, glsl_type::samplerExternalOES_type);
+ }
+
+ if (state->OES_texture_3D_enable) {
+ add_type(symbols, glsl_type::sampler3D_type);
+ }
+
+ if (state->ARB_shader_image_load_store_enable ||
+ state->EXT_texture_cube_map_array_enable ||
+ state->OES_texture_cube_map_array_enable) {
+ add_type(symbols, glsl_type::imageCubeArray_type);
+ add_type(symbols, glsl_type::iimageCubeArray_type);
+ add_type(symbols, glsl_type::uimageCubeArray_type);
+ }
+
+ if (state->ARB_shader_image_load_store_enable) {
+ add_type(symbols, glsl_type::image1D_type);
+ add_type(symbols, glsl_type::image2D_type);
+ add_type(symbols, glsl_type::image3D_type);
+ add_type(symbols, glsl_type::image2DRect_type);
+ add_type(symbols, glsl_type::imageCube_type);
+ add_type(symbols, glsl_type::imageBuffer_type);
+ add_type(symbols, glsl_type::image1DArray_type);
+ add_type(symbols, glsl_type::image2DArray_type);
+ add_type(symbols, glsl_type::image2DMS_type);
+ add_type(symbols, glsl_type::image2DMSArray_type);
+ add_type(symbols, glsl_type::iimage1D_type);
+ add_type(symbols, glsl_type::iimage2D_type);
+ add_type(symbols, glsl_type::iimage3D_type);
+ add_type(symbols, glsl_type::iimage2DRect_type);
+ add_type(symbols, glsl_type::iimageCube_type);
+ add_type(symbols, glsl_type::iimageBuffer_type);
+ add_type(symbols, glsl_type::iimage1DArray_type);
+ add_type(symbols, glsl_type::iimage2DArray_type);
+ add_type(symbols, glsl_type::iimage2DMS_type);
+ add_type(symbols, glsl_type::iimage2DMSArray_type);
+ add_type(symbols, glsl_type::uimage1D_type);
+ add_type(symbols, glsl_type::uimage2D_type);
+ add_type(symbols, glsl_type::uimage3D_type);
+ add_type(symbols, glsl_type::uimage2DRect_type);
+ add_type(symbols, glsl_type::uimageCube_type);
+ add_type(symbols, glsl_type::uimageBuffer_type);
+ add_type(symbols, glsl_type::uimage1DArray_type);
+ add_type(symbols, glsl_type::uimage2DArray_type);
+ add_type(symbols, glsl_type::uimage2DMS_type);
+ add_type(symbols, glsl_type::uimage2DMSArray_type);
+ }
+
+ if (state->EXT_texture_buffer_enable || state->OES_texture_buffer_enable) {
+ add_type(symbols, glsl_type::samplerBuffer_type);
+ add_type(symbols, glsl_type::isamplerBuffer_type);
+ add_type(symbols, glsl_type::usamplerBuffer_type);
+
+ add_type(symbols, glsl_type::imageBuffer_type);
+ add_type(symbols, glsl_type::iimageBuffer_type);
+ add_type(symbols, glsl_type::uimageBuffer_type);
+ }
+
+ if (state->has_atomic_counters()) {
+ add_type(symbols, glsl_type::atomic_uint_type);
+ }
+
+ if (state->ARB_gpu_shader_fp64_enable) {
+ add_type(symbols, glsl_type::double_type);
+ add_type(symbols, glsl_type::dvec2_type);
+ add_type(symbols, glsl_type::dvec3_type);
+ add_type(symbols, glsl_type::dvec4_type);
+ add_type(symbols, glsl_type::dmat2_type);
+ add_type(symbols, glsl_type::dmat3_type);
+ add_type(symbols, glsl_type::dmat4_type);
+ add_type(symbols, glsl_type::dmat2x3_type);
+ add_type(symbols, glsl_type::dmat2x4_type);
+ add_type(symbols, glsl_type::dmat3x2_type);
+ add_type(symbols, glsl_type::dmat3x4_type);
+ add_type(symbols, glsl_type::dmat4x2_type);
+ add_type(symbols, glsl_type::dmat4x3_type);
+ }
+
+ if (state->ARB_gpu_shader_int64_enable ||
+ state->AMD_gpu_shader_int64_enable) {
+ add_type(symbols, glsl_type::int64_t_type);
+ add_type(symbols, glsl_type::i64vec2_type);
+ add_type(symbols, glsl_type::i64vec3_type);
+ add_type(symbols, glsl_type::i64vec4_type);
+
+ add_type(symbols, glsl_type::uint64_t_type);
+ add_type(symbols, glsl_type::u64vec2_type);
+ add_type(symbols, glsl_type::u64vec3_type);
+ add_type(symbols, glsl_type::u64vec4_type);
+ }
+}
+/** @} */
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/builtin_variables.cpp b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/builtin_variables.cpp
new file mode 100644
index 0000000000..c9fdac8466
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/builtin_variables.cpp
@@ -0,0 +1,1624 @@
+/*
+ * Copyright © 2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+
+/**
+ * Building this file with MinGW g++ 7.3 or 7.4 with:
+ * scons platform=windows toolchain=crossmingw machine=x86 build=profile
+ * triggers an internal compiler error.
+ * Overriding the optimization level to -O1 works around the issue.
+ * MinGW 5.3.1 does not seem to have the bug, neither does 8.3. So for now
+ * we're simply testing for version 7.x here.
+ */
+#if defined(__MINGW32__) && __GNUC__ == 7
+#warning "disabling optimizations for this file to work around compiler bug in MinGW gcc 7.x"
+#pragma GCC optimize("O1")
+#endif
+
+
+#include "ir.h"
+#include "ir_builder.h"
+#include "linker.h"
+#include "glsl_parser_extras.h"
+#include "glsl_symbol_table.h"
+#include "main/mtypes.h"
+#include "main/uniforms.h"
+#include "program/prog_statevars.h"
+#include "program/prog_instruction.h"
+#include "builtin_functions.h"
+
+using namespace ir_builder;
+
+static const struct gl_builtin_uniform_element gl_NumSamples_elements[] = {
+ {NULL, {STATE_NUM_SAMPLES, 0, 0}, SWIZZLE_XXXX}
+};
+
+static const struct gl_builtin_uniform_element gl_DepthRange_elements[] = {
+ {"near", {STATE_DEPTH_RANGE, 0, 0}, SWIZZLE_XXXX},
+ {"far", {STATE_DEPTH_RANGE, 0, 0}, SWIZZLE_YYYY},
+ {"diff", {STATE_DEPTH_RANGE, 0, 0}, SWIZZLE_ZZZZ},
+};
+
+static const struct gl_builtin_uniform_element gl_ClipPlane_elements[] = {
+ {NULL, {STATE_CLIPPLANE, 0, 0}, SWIZZLE_XYZW}
+};
+
+static const struct gl_builtin_uniform_element gl_Point_elements[] = {
+ {"size", {STATE_POINT_SIZE}, SWIZZLE_XXXX},
+ {"sizeMin", {STATE_POINT_SIZE}, SWIZZLE_YYYY},
+ {"sizeMax", {STATE_POINT_SIZE}, SWIZZLE_ZZZZ},
+ {"fadeThresholdSize", {STATE_POINT_SIZE}, SWIZZLE_WWWW},
+ {"distanceConstantAttenuation", {STATE_POINT_ATTENUATION}, SWIZZLE_XXXX},
+ {"distanceLinearAttenuation", {STATE_POINT_ATTENUATION}, SWIZZLE_YYYY},
+ {"distanceQuadraticAttenuation", {STATE_POINT_ATTENUATION}, SWIZZLE_ZZZZ},
+};
+
+static const struct gl_builtin_uniform_element gl_FrontMaterial_elements[] = {
+ {"emission", {STATE_MATERIAL, 0, STATE_EMISSION}, SWIZZLE_XYZW},
+ {"ambient", {STATE_MATERIAL, 0, STATE_AMBIENT}, SWIZZLE_XYZW},
+ {"diffuse", {STATE_MATERIAL, 0, STATE_DIFFUSE}, SWIZZLE_XYZW},
+ {"specular", {STATE_MATERIAL, 0, STATE_SPECULAR}, SWIZZLE_XYZW},
+ {"shininess", {STATE_MATERIAL, 0, STATE_SHININESS}, SWIZZLE_XXXX},
+};
+
+static const struct gl_builtin_uniform_element gl_BackMaterial_elements[] = {
+ {"emission", {STATE_MATERIAL, 1, STATE_EMISSION}, SWIZZLE_XYZW},
+ {"ambient", {STATE_MATERIAL, 1, STATE_AMBIENT}, SWIZZLE_XYZW},
+ {"diffuse", {STATE_MATERIAL, 1, STATE_DIFFUSE}, SWIZZLE_XYZW},
+ {"specular", {STATE_MATERIAL, 1, STATE_SPECULAR}, SWIZZLE_XYZW},
+ {"shininess", {STATE_MATERIAL, 1, STATE_SHININESS}, SWIZZLE_XXXX},
+};
+
+static const struct gl_builtin_uniform_element gl_LightSource_elements[] = {
+ {"ambient", {STATE_LIGHT, 0, STATE_AMBIENT}, SWIZZLE_XYZW},
+ {"diffuse", {STATE_LIGHT, 0, STATE_DIFFUSE}, SWIZZLE_XYZW},
+ {"specular", {STATE_LIGHT, 0, STATE_SPECULAR}, SWIZZLE_XYZW},
+ {"position", {STATE_LIGHT, 0, STATE_POSITION}, SWIZZLE_XYZW},
+ {"halfVector", {STATE_LIGHT, 0, STATE_HALF_VECTOR}, SWIZZLE_XYZW},
+ {"spotDirection", {STATE_LIGHT, 0, STATE_SPOT_DIRECTION},
+ MAKE_SWIZZLE4(SWIZZLE_X,
+ SWIZZLE_Y,
+ SWIZZLE_Z,
+ SWIZZLE_Z)},
+ {"spotExponent", {STATE_LIGHT, 0, STATE_ATTENUATION}, SWIZZLE_WWWW},
+ {"spotCutoff", {STATE_LIGHT, 0, STATE_SPOT_CUTOFF}, SWIZZLE_XXXX},
+ {"spotCosCutoff", {STATE_LIGHT, 0, STATE_SPOT_DIRECTION}, SWIZZLE_WWWW},
+ {"constantAttenuation", {STATE_LIGHT, 0, STATE_ATTENUATION}, SWIZZLE_XXXX},
+ {"linearAttenuation", {STATE_LIGHT, 0, STATE_ATTENUATION}, SWIZZLE_YYYY},
+ {"quadraticAttenuation", {STATE_LIGHT, 0, STATE_ATTENUATION}, SWIZZLE_ZZZZ},
+};
+
+static const struct gl_builtin_uniform_element gl_LightModel_elements[] = {
+ {"ambient", {STATE_LIGHTMODEL_AMBIENT, 0}, SWIZZLE_XYZW},
+};
+
+static const struct gl_builtin_uniform_element gl_FrontLightModelProduct_elements[] = {
+ {"sceneColor", {STATE_LIGHTMODEL_SCENECOLOR, 0}, SWIZZLE_XYZW},
+};
+
+static const struct gl_builtin_uniform_element gl_BackLightModelProduct_elements[] = {
+ {"sceneColor", {STATE_LIGHTMODEL_SCENECOLOR, 1}, SWIZZLE_XYZW},
+};
+
+static const struct gl_builtin_uniform_element gl_FrontLightProduct_elements[] = {
+ {"ambient", {STATE_LIGHTPROD, 0, 0, STATE_AMBIENT}, SWIZZLE_XYZW},
+ {"diffuse", {STATE_LIGHTPROD, 0, 0, STATE_DIFFUSE}, SWIZZLE_XYZW},
+ {"specular", {STATE_LIGHTPROD, 0, 0, STATE_SPECULAR}, SWIZZLE_XYZW},
+};
+
+static const struct gl_builtin_uniform_element gl_BackLightProduct_elements[] = {
+ {"ambient", {STATE_LIGHTPROD, 0, 1, STATE_AMBIENT}, SWIZZLE_XYZW},
+ {"diffuse", {STATE_LIGHTPROD, 0, 1, STATE_DIFFUSE}, SWIZZLE_XYZW},
+ {"specular", {STATE_LIGHTPROD, 0, 1, STATE_SPECULAR}, SWIZZLE_XYZW},
+};
+
+static const struct gl_builtin_uniform_element gl_TextureEnvColor_elements[] = {
+ {NULL, {STATE_TEXENV_COLOR, 0}, SWIZZLE_XYZW},
+};
+
+static const struct gl_builtin_uniform_element gl_EyePlaneS_elements[] = {
+ {NULL, {STATE_TEXGEN, 0, STATE_TEXGEN_EYE_S}, SWIZZLE_XYZW},
+};
+
+static const struct gl_builtin_uniform_element gl_EyePlaneT_elements[] = {
+ {NULL, {STATE_TEXGEN, 0, STATE_TEXGEN_EYE_T}, SWIZZLE_XYZW},
+};
+
+static const struct gl_builtin_uniform_element gl_EyePlaneR_elements[] = {
+ {NULL, {STATE_TEXGEN, 0, STATE_TEXGEN_EYE_R}, SWIZZLE_XYZW},
+};
+
+static const struct gl_builtin_uniform_element gl_EyePlaneQ_elements[] = {
+ {NULL, {STATE_TEXGEN, 0, STATE_TEXGEN_EYE_Q}, SWIZZLE_XYZW},
+};
+
+static const struct gl_builtin_uniform_element gl_ObjectPlaneS_elements[] = {
+ {NULL, {STATE_TEXGEN, 0, STATE_TEXGEN_OBJECT_S}, SWIZZLE_XYZW},
+};
+
+static const struct gl_builtin_uniform_element gl_ObjectPlaneT_elements[] = {
+ {NULL, {STATE_TEXGEN, 0, STATE_TEXGEN_OBJECT_T}, SWIZZLE_XYZW},
+};
+
+static const struct gl_builtin_uniform_element gl_ObjectPlaneR_elements[] = {
+ {NULL, {STATE_TEXGEN, 0, STATE_TEXGEN_OBJECT_R}, SWIZZLE_XYZW},
+};
+
+static const struct gl_builtin_uniform_element gl_ObjectPlaneQ_elements[] = {
+ {NULL, {STATE_TEXGEN, 0, STATE_TEXGEN_OBJECT_Q}, SWIZZLE_XYZW},
+};
+
+static const struct gl_builtin_uniform_element gl_Fog_elements[] = {
+ {"color", {STATE_FOG_COLOR}, SWIZZLE_XYZW},
+ {"density", {STATE_FOG_PARAMS}, SWIZZLE_XXXX},
+ {"start", {STATE_FOG_PARAMS}, SWIZZLE_YYYY},
+ {"end", {STATE_FOG_PARAMS}, SWIZZLE_ZZZZ},
+ {"scale", {STATE_FOG_PARAMS}, SWIZZLE_WWWW},
+};
+
+static const struct gl_builtin_uniform_element gl_NormalScale_elements[] = {
+ {NULL, {STATE_NORMAL_SCALE}, SWIZZLE_XXXX},
+};
+
+static const struct gl_builtin_uniform_element gl_FogParamsOptimizedMESA_elements[] = {
+ {NULL, {STATE_INTERNAL, STATE_FOG_PARAMS_OPTIMIZED}, SWIZZLE_XYZW},
+};
+
+static const struct gl_builtin_uniform_element gl_CurrentAttribVertMESA_elements[] = {
+ {NULL, {STATE_INTERNAL, STATE_CURRENT_ATTRIB, 0}, SWIZZLE_XYZW},
+};
+
+static const struct gl_builtin_uniform_element gl_CurrentAttribFragMESA_elements[] = {
+ {NULL, {STATE_INTERNAL, STATE_CURRENT_ATTRIB_MAYBE_VP_CLAMPED, 0}, SWIZZLE_XYZW},
+};
+
+#define MATRIX(name, statevar, modifier) \
+ static const struct gl_builtin_uniform_element name ## _elements[] = { \
+ { NULL, { statevar, 0, 0, 0, modifier}, SWIZZLE_XYZW }, \
+ { NULL, { statevar, 0, 1, 1, modifier}, SWIZZLE_XYZW }, \
+ { NULL, { statevar, 0, 2, 2, modifier}, SWIZZLE_XYZW }, \
+ { NULL, { statevar, 0, 3, 3, modifier}, SWIZZLE_XYZW }, \
+ }
+
+MATRIX(gl_ModelViewMatrix,
+ STATE_MODELVIEW_MATRIX, STATE_MATRIX_TRANSPOSE);
+MATRIX(gl_ModelViewMatrixInverse,
+ STATE_MODELVIEW_MATRIX, STATE_MATRIX_INVTRANS);
+MATRIX(gl_ModelViewMatrixTranspose,
+ STATE_MODELVIEW_MATRIX, 0);
+MATRIX(gl_ModelViewMatrixInverseTranspose,
+ STATE_MODELVIEW_MATRIX, STATE_MATRIX_INVERSE);
+
+MATRIX(gl_ProjectionMatrix,
+ STATE_PROJECTION_MATRIX, STATE_MATRIX_TRANSPOSE);
+MATRIX(gl_ProjectionMatrixInverse,
+ STATE_PROJECTION_MATRIX, STATE_MATRIX_INVTRANS);
+MATRIX(gl_ProjectionMatrixTranspose,
+ STATE_PROJECTION_MATRIX, 0);
+MATRIX(gl_ProjectionMatrixInverseTranspose,
+ STATE_PROJECTION_MATRIX, STATE_MATRIX_INVERSE);
+
+MATRIX(gl_ModelViewProjectionMatrix,
+ STATE_MVP_MATRIX, STATE_MATRIX_TRANSPOSE);
+MATRIX(gl_ModelViewProjectionMatrixInverse,
+ STATE_MVP_MATRIX, STATE_MATRIX_INVTRANS);
+MATRIX(gl_ModelViewProjectionMatrixTranspose,
+ STATE_MVP_MATRIX, 0);
+MATRIX(gl_ModelViewProjectionMatrixInverseTranspose,
+ STATE_MVP_MATRIX, STATE_MATRIX_INVERSE);
+
+MATRIX(gl_TextureMatrix,
+ STATE_TEXTURE_MATRIX, STATE_MATRIX_TRANSPOSE);
+MATRIX(gl_TextureMatrixInverse,
+ STATE_TEXTURE_MATRIX, STATE_MATRIX_INVTRANS);
+MATRIX(gl_TextureMatrixTranspose,
+ STATE_TEXTURE_MATRIX, 0);
+MATRIX(gl_TextureMatrixInverseTranspose,
+ STATE_TEXTURE_MATRIX, STATE_MATRIX_INVERSE);
+
+static const struct gl_builtin_uniform_element gl_NormalMatrix_elements[] = {
+ { NULL, { STATE_MODELVIEW_MATRIX, 0, 0, 0, STATE_MATRIX_INVERSE},
+ MAKE_SWIZZLE4(SWIZZLE_X, SWIZZLE_Y, SWIZZLE_Z, SWIZZLE_Z) },
+ { NULL, { STATE_MODELVIEW_MATRIX, 0, 1, 1, STATE_MATRIX_INVERSE},
+ MAKE_SWIZZLE4(SWIZZLE_X, SWIZZLE_Y, SWIZZLE_Z, SWIZZLE_Z) },
+ { NULL, { STATE_MODELVIEW_MATRIX, 0, 2, 2, STATE_MATRIX_INVERSE},
+ MAKE_SWIZZLE4(SWIZZLE_X, SWIZZLE_Y, SWIZZLE_Z, SWIZZLE_Z) },
+};
+
+#undef MATRIX
+
+#define STATEVAR(name) {#name, name ## _elements, ARRAY_SIZE(name ## _elements)}
+
+static const struct gl_builtin_uniform_desc _mesa_builtin_uniform_desc[] = {
+ STATEVAR(gl_NumSamples),
+ STATEVAR(gl_DepthRange),
+ STATEVAR(gl_ClipPlane),
+ STATEVAR(gl_Point),
+ STATEVAR(gl_FrontMaterial),
+ STATEVAR(gl_BackMaterial),
+ STATEVAR(gl_LightSource),
+ STATEVAR(gl_LightModel),
+ STATEVAR(gl_FrontLightModelProduct),
+ STATEVAR(gl_BackLightModelProduct),
+ STATEVAR(gl_FrontLightProduct),
+ STATEVAR(gl_BackLightProduct),
+ STATEVAR(gl_TextureEnvColor),
+ STATEVAR(gl_EyePlaneS),
+ STATEVAR(gl_EyePlaneT),
+ STATEVAR(gl_EyePlaneR),
+ STATEVAR(gl_EyePlaneQ),
+ STATEVAR(gl_ObjectPlaneS),
+ STATEVAR(gl_ObjectPlaneT),
+ STATEVAR(gl_ObjectPlaneR),
+ STATEVAR(gl_ObjectPlaneQ),
+ STATEVAR(gl_Fog),
+
+ STATEVAR(gl_ModelViewMatrix),
+ STATEVAR(gl_ModelViewMatrixInverse),
+ STATEVAR(gl_ModelViewMatrixTranspose),
+ STATEVAR(gl_ModelViewMatrixInverseTranspose),
+
+ STATEVAR(gl_ProjectionMatrix),
+ STATEVAR(gl_ProjectionMatrixInverse),
+ STATEVAR(gl_ProjectionMatrixTranspose),
+ STATEVAR(gl_ProjectionMatrixInverseTranspose),
+
+ STATEVAR(gl_ModelViewProjectionMatrix),
+ STATEVAR(gl_ModelViewProjectionMatrixInverse),
+ STATEVAR(gl_ModelViewProjectionMatrixTranspose),
+ STATEVAR(gl_ModelViewProjectionMatrixInverseTranspose),
+
+ STATEVAR(gl_TextureMatrix),
+ STATEVAR(gl_TextureMatrixInverse),
+ STATEVAR(gl_TextureMatrixTranspose),
+ STATEVAR(gl_TextureMatrixInverseTranspose),
+
+ STATEVAR(gl_NormalMatrix),
+ STATEVAR(gl_NormalScale),
+
+ STATEVAR(gl_FogParamsOptimizedMESA),
+ STATEVAR(gl_CurrentAttribVertMESA),
+ STATEVAR(gl_CurrentAttribFragMESA),
+
+ {NULL, NULL, 0}
+};
+
+
+namespace {
+
+/**
+ * Data structure that accumulates fields for the gl_PerVertex interface
+ * block.
+ */
+class per_vertex_accumulator
+{
+public:
+ per_vertex_accumulator();
+ void add_field(int slot, const glsl_type *type, int precision,
+ const char *name);
+ const glsl_type *construct_interface_instance() const;
+
+private:
+ glsl_struct_field fields[11];
+ unsigned num_fields;
+};
+
+
+per_vertex_accumulator::per_vertex_accumulator()
+ : fields(),
+ num_fields(0)
+{
+}
+
+
+void
+per_vertex_accumulator::add_field(int slot, const glsl_type *type,
+ int precision, const char *name)
+{
+ assert(this->num_fields < ARRAY_SIZE(this->fields));
+ this->fields[this->num_fields].type = type;
+ this->fields[this->num_fields].name = name;
+ this->fields[this->num_fields].matrix_layout = GLSL_MATRIX_LAYOUT_INHERITED;
+ this->fields[this->num_fields].location = slot;
+ this->fields[this->num_fields].offset = -1;
+ this->fields[this->num_fields].interpolation = INTERP_MODE_NONE;
+ this->fields[this->num_fields].centroid = 0;
+ this->fields[this->num_fields].sample = 0;
+ this->fields[this->num_fields].patch = 0;
+ this->fields[this->num_fields].precision = precision;
+ this->fields[this->num_fields].memory_read_only = 0;
+ this->fields[this->num_fields].memory_write_only = 0;
+ this->fields[this->num_fields].memory_coherent = 0;
+ this->fields[this->num_fields].memory_volatile = 0;
+ this->fields[this->num_fields].memory_restrict = 0;
+ this->fields[this->num_fields].image_format = PIPE_FORMAT_NONE;
+ this->fields[this->num_fields].explicit_xfb_buffer = 0;
+ this->fields[this->num_fields].xfb_buffer = -1;
+ this->fields[this->num_fields].xfb_stride = -1;
+ this->num_fields++;
+}
+
+
+const glsl_type *
+per_vertex_accumulator::construct_interface_instance() const
+{
+ return glsl_type::get_interface_instance(this->fields, this->num_fields,
+ GLSL_INTERFACE_PACKING_STD140,
+ false,
+ "gl_PerVertex");
+}
+
+
+class builtin_variable_generator
+{
+public:
+ builtin_variable_generator(exec_list *instructions,
+ struct _mesa_glsl_parse_state *state);
+ void generate_constants();
+ void generate_uniforms();
+ void generate_special_vars();
+ void generate_vs_special_vars();
+ void generate_tcs_special_vars();
+ void generate_tes_special_vars();
+ void generate_gs_special_vars();
+ void generate_fs_special_vars();
+ void generate_cs_special_vars();
+ void generate_varyings();
+
+private:
+ const glsl_type *array(const glsl_type *base, unsigned elements)
+ {
+ return glsl_type::get_array_instance(base, elements);
+ }
+
+ const glsl_type *type(const char *name)
+ {
+ return symtab->get_type(name);
+ }
+
+ ir_variable *add_input(int slot, const glsl_type *type, int precision,
+ const char *name)
+ {
+ return add_variable(name, type, precision, ir_var_shader_in, slot);
+ }
+
+ ir_variable *add_input(int slot, const glsl_type *type, const char *name)
+ {
+ return add_input(slot, type, GLSL_PRECISION_NONE, name);
+ }
+
+ ir_variable *add_output(int slot, const glsl_type *type, int precision,
+ const char *name)
+ {
+ return add_variable(name, type, precision, ir_var_shader_out, slot);
+ }
+
+ ir_variable *add_output(int slot, const glsl_type *type, const char *name)
+ {
+ return add_output(slot, type, GLSL_PRECISION_NONE, name);
+ }
+
+ ir_variable *add_index_output(int slot, int index, const glsl_type *type,
+ int precision, const char *name)
+ {
+ return add_index_variable(name, type, precision, ir_var_shader_out, slot,
+ index);
+ }
+
+ ir_variable *add_system_value(int slot, const glsl_type *type, int precision,
+ const char *name)
+ {
+ return add_variable(name, type, precision, ir_var_system_value, slot);
+ }
+ ir_variable *add_system_value(int slot, const glsl_type *type,
+ const char *name)
+ {
+ return add_system_value(slot, type, GLSL_PRECISION_NONE, name);
+ }
+
+ ir_variable *add_variable(const char *name, const glsl_type *type,
+ int precision, enum ir_variable_mode mode,
+ int slot);
+ ir_variable *add_index_variable(const char *name, const glsl_type *type,
+ int precision, enum ir_variable_mode mode,
+ int slot, int index);
+ ir_variable *add_uniform(const glsl_type *type, int precision,
+ const char *name);
+ ir_variable *add_uniform(const glsl_type *type, const char *name)
+ {
+ return add_uniform(type, GLSL_PRECISION_NONE, name);
+ }
+ ir_variable *add_const(const char *name, int precision, int value);
+ ir_variable *add_const(const char *name, int value)
+ {
+ return add_const(name, GLSL_PRECISION_MEDIUM, value);
+ }
+ ir_variable *add_const_ivec3(const char *name, int x, int y, int z);
+ void add_varying(int slot, const glsl_type *type, int precision,
+ const char *name);
+ void add_varying(int slot, const glsl_type *type, const char *name)
+ {
+ add_varying(slot, type, GLSL_PRECISION_NONE, name);
+ }
+
+ exec_list * const instructions;
+ struct _mesa_glsl_parse_state * const state;
+ glsl_symbol_table * const symtab;
+
+ /**
+ * True if compatibility-profile-only variables should be included. (In
+ * desktop GL, these are always included when the GLSL version is 1.30 and
+ * or below).
+ */
+ const bool compatibility;
+
+ const glsl_type * const bool_t;
+ const glsl_type * const int_t;
+ const glsl_type * const uint_t;
+ const glsl_type * const uint64_t;
+ const glsl_type * const float_t;
+ const glsl_type * const vec2_t;
+ const glsl_type * const vec3_t;
+ const glsl_type * const vec4_t;
+ const glsl_type * const uvec3_t;
+ const glsl_type * const mat3_t;
+ const glsl_type * const mat4_t;
+
+ per_vertex_accumulator per_vertex_in;
+ per_vertex_accumulator per_vertex_out;
+};
+
+
+builtin_variable_generator::builtin_variable_generator(
+ exec_list *instructions, struct _mesa_glsl_parse_state *state)
+ : instructions(instructions), state(state), symtab(state->symbols),
+ compatibility(state->compat_shader || state->ARB_compatibility_enable),
+ bool_t(glsl_type::bool_type), int_t(glsl_type::int_type),
+ uint_t(glsl_type::uint_type),
+ uint64_t(glsl_type::uint64_t_type),
+ float_t(glsl_type::float_type), vec2_t(glsl_type::vec2_type),
+ vec3_t(glsl_type::vec3_type), vec4_t(glsl_type::vec4_type),
+ uvec3_t(glsl_type::uvec3_type),
+ mat3_t(glsl_type::mat3_type), mat4_t(glsl_type::mat4_type)
+{
+}
+
+ir_variable *
+builtin_variable_generator::add_index_variable(const char *name,
+ const glsl_type *type,
+ int precision,
+ enum ir_variable_mode mode,
+ int slot, int index)
+{
+ ir_variable *var = new(symtab) ir_variable(type, name, mode);
+ var->data.how_declared = ir_var_declared_implicitly;
+
+ switch (var->data.mode) {
+ case ir_var_auto:
+ case ir_var_shader_in:
+ case ir_var_uniform:
+ case ir_var_system_value:
+ var->data.read_only = true;
+ break;
+ case ir_var_shader_out:
+ case ir_var_shader_storage:
+ break;
+ default:
+ /* The only variables that are added using this function should be
+ * uniforms, shader storage, shader inputs, and shader outputs, constants
+ * (which use ir_var_auto), and system values.
+ */
+ assert(0);
+ break;
+ }
+
+ var->data.location = slot;
+ var->data.explicit_location = (slot >= 0);
+ var->data.explicit_index = 1;
+ var->data.index = index;
+
+ if (state->es_shader)
+ var->data.precision = precision;
+
+ /* Once the variable is created an initialized, add it to the symbol table
+ * and add the declaration to the IR stream.
+ */
+ instructions->push_tail(var);
+
+ symtab->add_variable(var);
+ return var;
+}
+
+ir_variable *
+builtin_variable_generator::add_variable(const char *name,
+ const glsl_type *type,
+ int precision,
+ enum ir_variable_mode mode, int slot)
+{
+ ir_variable *var = new(symtab) ir_variable(type, name, mode);
+ var->data.how_declared = ir_var_declared_implicitly;
+
+ switch (var->data.mode) {
+ case ir_var_auto:
+ case ir_var_shader_in:
+ case ir_var_uniform:
+ case ir_var_system_value:
+ var->data.read_only = true;
+ break;
+ case ir_var_shader_out:
+ case ir_var_shader_storage:
+ break;
+ default:
+ /* The only variables that are added using this function should be
+ * uniforms, shader storage, shader inputs, and shader outputs, constants
+ * (which use ir_var_auto), and system values.
+ */
+ assert(0);
+ break;
+ }
+
+ var->data.location = slot;
+ var->data.explicit_location = (slot >= 0);
+ var->data.explicit_index = 0;
+
+ if (state->es_shader)
+ var->data.precision = precision;
+
+ /* Once the variable is created an initialized, add it to the symbol table
+ * and add the declaration to the IR stream.
+ */
+ instructions->push_tail(var);
+
+ symtab->add_variable(var);
+ return var;
+}
+
+extern "C" const struct gl_builtin_uniform_desc *
+_mesa_glsl_get_builtin_uniform_desc(const char *name)
+{
+ for (unsigned i = 0; _mesa_builtin_uniform_desc[i].name != NULL; i++) {
+ if (strcmp(_mesa_builtin_uniform_desc[i].name, name) == 0) {
+ return &_mesa_builtin_uniform_desc[i];
+ }
+ }
+ return NULL;
+}
+
+ir_variable *
+builtin_variable_generator::add_uniform(const glsl_type *type,
+ int precision,
+ const char *name)
+{
+ ir_variable *const uni =
+ add_variable(name, type, precision, ir_var_uniform, -1);
+
+ const struct gl_builtin_uniform_desc* const statevar =
+ _mesa_glsl_get_builtin_uniform_desc(name);
+ assert(statevar != NULL);
+
+ const unsigned array_count = type->is_array() ? type->length : 1;
+
+ ir_state_slot *slots =
+ uni->allocate_state_slots(array_count * statevar->num_elements);
+
+ for (unsigned a = 0; a < array_count; a++) {
+ for (unsigned j = 0; j < statevar->num_elements; j++) {
+ const struct gl_builtin_uniform_element *element =
+ &statevar->elements[j];
+
+ memcpy(slots->tokens, element->tokens, sizeof(element->tokens));
+ if (type->is_array()) {
+ if (strcmp(name, "gl_CurrentAttribVertMESA") == 0 ||
+ strcmp(name, "gl_CurrentAttribFragMESA") == 0) {
+ slots->tokens[2] = a;
+ } else {
+ slots->tokens[1] = a;
+ }
+ }
+
+ slots->swizzle = element->swizzle;
+ slots++;
+ }
+ }
+
+ return uni;
+}
+
+
+ir_variable *
+builtin_variable_generator::add_const(const char *name, int precision,
+ int value)
+{
+ ir_variable *const var = add_variable(name, glsl_type::int_type,
+ precision, ir_var_auto, -1);
+ var->constant_value = new(var) ir_constant(value);
+ var->constant_initializer = new(var) ir_constant(value);
+ var->data.has_initializer = true;
+ return var;
+}
+
+
+ir_variable *
+builtin_variable_generator::add_const_ivec3(const char *name, int x, int y,
+ int z)
+{
+ ir_variable *const var = add_variable(name, glsl_type::ivec3_type,
+ GLSL_PRECISION_HIGH,
+ ir_var_auto, -1);
+ ir_constant_data data;
+ memset(&data, 0, sizeof(data));
+ data.i[0] = x;
+ data.i[1] = y;
+ data.i[2] = z;
+ var->constant_value = new(var) ir_constant(glsl_type::ivec3_type, &data);
+ var->constant_initializer =
+ new(var) ir_constant(glsl_type::ivec3_type, &data);
+ var->data.has_initializer = true;
+ return var;
+}
+
+
+void
+builtin_variable_generator::generate_constants()
+{
+ add_const("gl_MaxVertexAttribs", state->Const.MaxVertexAttribs);
+ add_const("gl_MaxVertexTextureImageUnits",
+ state->Const.MaxVertexTextureImageUnits);
+ add_const("gl_MaxCombinedTextureImageUnits",
+ state->Const.MaxCombinedTextureImageUnits);
+ add_const("gl_MaxTextureImageUnits", state->Const.MaxTextureImageUnits);
+ add_const("gl_MaxDrawBuffers", state->Const.MaxDrawBuffers);
+
+ /* Max uniforms/varyings: GLSL ES counts these in units of vectors; desktop
+ * GL counts them in units of "components" or "floats" and also in units
+ * of vectors since GL 4.1
+ */
+ if (!state->es_shader) {
+ add_const("gl_MaxFragmentUniformComponents",
+ state->Const.MaxFragmentUniformComponents);
+ add_const("gl_MaxVertexUniformComponents",
+ state->Const.MaxVertexUniformComponents);
+ }
+
+ if (state->is_version(410, 100)) {
+ add_const("gl_MaxVertexUniformVectors",
+ state->Const.MaxVertexUniformComponents / 4);
+ add_const("gl_MaxFragmentUniformVectors",
+ state->Const.MaxFragmentUniformComponents / 4);
+
+ /* In GLSL ES 3.00, gl_MaxVaryingVectors was split out to separate
+ * vertex and fragment shader constants.
+ */
+ if (state->is_version(0, 300)) {
+ add_const("gl_MaxVertexOutputVectors",
+ state->ctx->Const.Program[MESA_SHADER_VERTEX].MaxOutputComponents / 4);
+ add_const("gl_MaxFragmentInputVectors",
+ state->ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxInputComponents / 4);
+ } else {
+ add_const("gl_MaxVaryingVectors",
+ state->ctx->Const.MaxVarying);
+ }
+
+ /* EXT_blend_func_extended brings a built in constant
+ * for determining number of dual source draw buffers
+ */
+ if (state->EXT_blend_func_extended_enable) {
+ add_const("gl_MaxDualSourceDrawBuffersEXT",
+ state->Const.MaxDualSourceDrawBuffers);
+ }
+ } else {
+ /* Note: gl_MaxVaryingFloats was deprecated in GLSL 1.30+, but not
+ * removed
+ */
+ add_const("gl_MaxVaryingFloats", state->ctx->Const.MaxVarying * 4);
+ }
+
+ /* Texel offsets were introduced in ARB_shading_language_420pack (which
+ * requires desktop GLSL version 130), and adopted into desktop GLSL
+ * version 4.20 and GLSL ES version 3.00.
+ */
+ if ((state->is_version(130, 0) &&
+ state->ARB_shading_language_420pack_enable) ||
+ state->is_version(420, 300)) {
+ add_const("gl_MinProgramTexelOffset",
+ state->Const.MinProgramTexelOffset);
+ add_const("gl_MaxProgramTexelOffset",
+ state->Const.MaxProgramTexelOffset);
+ }
+
+ if (state->has_clip_distance()) {
+ add_const("gl_MaxClipDistances", state->Const.MaxClipPlanes);
+ }
+ if (state->is_version(130, 0)) {
+ add_const("gl_MaxVaryingComponents", state->ctx->Const.MaxVarying * 4);
+ }
+ if (state->has_cull_distance()) {
+ add_const("gl_MaxCullDistances", state->Const.MaxClipPlanes);
+ add_const("gl_MaxCombinedClipAndCullDistances",
+ state->Const.MaxClipPlanes);
+ }
+
+ if (state->has_geometry_shader()) {
+ add_const("gl_MaxVertexOutputComponents",
+ state->Const.MaxVertexOutputComponents);
+ add_const("gl_MaxGeometryInputComponents",
+ state->Const.MaxGeometryInputComponents);
+ add_const("gl_MaxGeometryOutputComponents",
+ state->Const.MaxGeometryOutputComponents);
+ add_const("gl_MaxFragmentInputComponents",
+ state->Const.MaxFragmentInputComponents);
+ add_const("gl_MaxGeometryTextureImageUnits",
+ state->Const.MaxGeometryTextureImageUnits);
+ add_const("gl_MaxGeometryOutputVertices",
+ state->Const.MaxGeometryOutputVertices);
+ add_const("gl_MaxGeometryTotalOutputComponents",
+ state->Const.MaxGeometryTotalOutputComponents);
+ add_const("gl_MaxGeometryUniformComponents",
+ state->Const.MaxGeometryUniformComponents);
+
+ /* Note: the GLSL 1.50-4.40 specs require
+ * gl_MaxGeometryVaryingComponents to be present, and to be at least 64.
+ * But they do not define what it means (and there does not appear to be
+ * any corresponding constant in the GL specs). However,
+ * ARB_geometry_shader4 defines MAX_GEOMETRY_VARYING_COMPONENTS_ARB to
+ * be the maximum number of components available for use as geometry
+ * outputs. So we assume this is a synonym for
+ * gl_MaxGeometryOutputComponents.
+ */
+ add_const("gl_MaxGeometryVaryingComponents",
+ state->Const.MaxGeometryOutputComponents);
+ }
+
+ if (compatibility) {
+ /* Note: gl_MaxLights stopped being listed as an explicit constant in
+ * GLSL 1.30, however it continues to be referred to (as a minimum size
+ * for compatibility-mode uniforms) all the way up through GLSL 4.30, so
+ * this seems like it was probably an oversight.
+ */
+ add_const("gl_MaxLights", state->Const.MaxLights);
+
+ add_const("gl_MaxClipPlanes", state->Const.MaxClipPlanes);
+
+ /* Note: gl_MaxTextureUnits wasn't made compatibility-only until GLSL
+ * 1.50, however this seems like it was probably an oversight.
+ */
+ add_const("gl_MaxTextureUnits", state->Const.MaxTextureUnits);
+
+ /* Note: gl_MaxTextureCoords was left out of GLSL 1.40, but it was
+ * re-introduced in GLSL 1.50, so this seems like it was probably an
+ * oversight.
+ */
+ add_const("gl_MaxTextureCoords", state->Const.MaxTextureCoords);
+ }
+
+ if (state->has_atomic_counters()) {
+ add_const("gl_MaxVertexAtomicCounters",
+ state->Const.MaxVertexAtomicCounters);
+ add_const("gl_MaxFragmentAtomicCounters",
+ state->Const.MaxFragmentAtomicCounters);
+ add_const("gl_MaxCombinedAtomicCounters",
+ state->Const.MaxCombinedAtomicCounters);
+ add_const("gl_MaxAtomicCounterBindings",
+ state->Const.MaxAtomicBufferBindings);
+
+ if (state->has_geometry_shader()) {
+ add_const("gl_MaxGeometryAtomicCounters",
+ state->Const.MaxGeometryAtomicCounters);
+ }
+ if (state->is_version(110, 320)) {
+ add_const("gl_MaxTessControlAtomicCounters",
+ state->Const.MaxTessControlAtomicCounters);
+ add_const("gl_MaxTessEvaluationAtomicCounters",
+ state->Const.MaxTessEvaluationAtomicCounters);
+ }
+ }
+
+ if (state->is_version(420, 310)) {
+ add_const("gl_MaxVertexAtomicCounterBuffers",
+ state->Const.MaxVertexAtomicCounterBuffers);
+ add_const("gl_MaxFragmentAtomicCounterBuffers",
+ state->Const.MaxFragmentAtomicCounterBuffers);
+ add_const("gl_MaxCombinedAtomicCounterBuffers",
+ state->Const.MaxCombinedAtomicCounterBuffers);
+ add_const("gl_MaxAtomicCounterBufferSize",
+ state->Const.MaxAtomicCounterBufferSize);
+
+ if (state->has_geometry_shader()) {
+ add_const("gl_MaxGeometryAtomicCounterBuffers",
+ state->Const.MaxGeometryAtomicCounterBuffers);
+ }
+ if (state->is_version(110, 320)) {
+ add_const("gl_MaxTessControlAtomicCounterBuffers",
+ state->Const.MaxTessControlAtomicCounterBuffers);
+ add_const("gl_MaxTessEvaluationAtomicCounterBuffers",
+ state->Const.MaxTessEvaluationAtomicCounterBuffers);
+ }
+ }
+
+ if (state->is_version(430, 310) || state->ARB_compute_shader_enable) {
+ add_const("gl_MaxComputeAtomicCounterBuffers",
+ state->Const.MaxComputeAtomicCounterBuffers);
+ add_const("gl_MaxComputeAtomicCounters",
+ state->Const.MaxComputeAtomicCounters);
+ add_const("gl_MaxComputeImageUniforms",
+ state->Const.MaxComputeImageUniforms);
+ add_const("gl_MaxComputeTextureImageUnits",
+ state->Const.MaxComputeTextureImageUnits);
+ add_const("gl_MaxComputeUniformComponents",
+ state->Const.MaxComputeUniformComponents);
+
+ add_const_ivec3("gl_MaxComputeWorkGroupCount",
+ state->Const.MaxComputeWorkGroupCount[0],
+ state->Const.MaxComputeWorkGroupCount[1],
+ state->Const.MaxComputeWorkGroupCount[2]);
+ add_const_ivec3("gl_MaxComputeWorkGroupSize",
+ state->Const.MaxComputeWorkGroupSize[0],
+ state->Const.MaxComputeWorkGroupSize[1],
+ state->Const.MaxComputeWorkGroupSize[2]);
+
+ /* From the GLSL 4.40 spec, section 7.1 (Built-In Language Variables):
+ *
+ * The built-in constant gl_WorkGroupSize is a compute-shader
+ * constant containing the local work-group size of the shader. The
+ * size of the work group in the X, Y, and Z dimensions is stored in
+ * the x, y, and z components. The constants values in
+ * gl_WorkGroupSize will match those specified in the required
+ * local_size_x, local_size_y, and local_size_z layout qualifiers
+ * for the current shader. This is a constant so that it can be
+ * used to size arrays of memory that can be shared within the local
+ * work group. It is a compile-time error to use gl_WorkGroupSize
+ * in a shader that does not declare a fixed local group size, or
+ * before that shader has declared a fixed local group size, using
+ * local_size_x, local_size_y, and local_size_z.
+ *
+ * To prevent the shader from trying to refer to gl_WorkGroupSize before
+ * the layout declaration, we don't define it here. Intead we define it
+ * in ast_cs_input_layout::hir().
+ */
+ }
+
+ if (state->has_enhanced_layouts()) {
+ add_const("gl_MaxTransformFeedbackBuffers",
+ state->Const.MaxTransformFeedbackBuffers);
+ add_const("gl_MaxTransformFeedbackInterleavedComponents",
+ state->Const.MaxTransformFeedbackInterleavedComponents);
+ }
+
+ if (state->has_shader_image_load_store()) {
+ add_const("gl_MaxImageUnits",
+ state->Const.MaxImageUnits);
+ add_const("gl_MaxVertexImageUniforms",
+ state->Const.MaxVertexImageUniforms);
+ add_const("gl_MaxFragmentImageUniforms",
+ state->Const.MaxFragmentImageUniforms);
+ add_const("gl_MaxCombinedImageUniforms",
+ state->Const.MaxCombinedImageUniforms);
+
+ if (state->has_geometry_shader()) {
+ add_const("gl_MaxGeometryImageUniforms",
+ state->Const.MaxGeometryImageUniforms);
+ }
+
+ if (!state->es_shader) {
+ add_const("gl_MaxCombinedImageUnitsAndFragmentOutputs",
+ state->Const.MaxCombinedShaderOutputResources);
+ add_const("gl_MaxImageSamples",
+ state->Const.MaxImageSamples);
+ }
+
+ if (state->has_tessellation_shader()) {
+ add_const("gl_MaxTessControlImageUniforms",
+ state->Const.MaxTessControlImageUniforms);
+ add_const("gl_MaxTessEvaluationImageUniforms",
+ state->Const.MaxTessEvaluationImageUniforms);
+ }
+ }
+
+ if (state->is_version(440, 310) ||
+ state->ARB_ES3_1_compatibility_enable) {
+ add_const("gl_MaxCombinedShaderOutputResources",
+ state->Const.MaxCombinedShaderOutputResources);
+ }
+
+ if (state->is_version(410, 0) ||
+ state->ARB_viewport_array_enable ||
+ state->OES_viewport_array_enable) {
+ add_const("gl_MaxViewports", GLSL_PRECISION_HIGH,
+ state->Const.MaxViewports);
+ }
+
+ if (state->has_tessellation_shader()) {
+ add_const("gl_MaxPatchVertices", state->Const.MaxPatchVertices);
+ add_const("gl_MaxTessGenLevel", state->Const.MaxTessGenLevel);
+ add_const("gl_MaxTessControlInputComponents", state->Const.MaxTessControlInputComponents);
+ add_const("gl_MaxTessControlOutputComponents", state->Const.MaxTessControlOutputComponents);
+ add_const("gl_MaxTessControlTextureImageUnits", state->Const.MaxTessControlTextureImageUnits);
+ add_const("gl_MaxTessEvaluationInputComponents", state->Const.MaxTessEvaluationInputComponents);
+ add_const("gl_MaxTessEvaluationOutputComponents", state->Const.MaxTessEvaluationOutputComponents);
+ add_const("gl_MaxTessEvaluationTextureImageUnits", state->Const.MaxTessEvaluationTextureImageUnits);
+ add_const("gl_MaxTessPatchComponents", state->Const.MaxTessPatchComponents);
+ add_const("gl_MaxTessControlTotalOutputComponents", state->Const.MaxTessControlTotalOutputComponents);
+ add_const("gl_MaxTessControlUniformComponents", state->Const.MaxTessControlUniformComponents);
+ add_const("gl_MaxTessEvaluationUniformComponents", state->Const.MaxTessEvaluationUniformComponents);
+ }
+
+ if (state->is_version(450, 320) ||
+ state->OES_sample_variables_enable ||
+ state->ARB_ES3_1_compatibility_enable)
+ add_const("gl_MaxSamples", state->Const.MaxSamples);
+}
+
+
+/**
+ * Generate uniform variables (which exist in all types of shaders).
+ */
+void
+builtin_variable_generator::generate_uniforms()
+{
+ if (state->is_version(400, 320) ||
+ state->ARB_sample_shading_enable ||
+ state->OES_sample_variables_enable)
+ add_uniform(int_t, GLSL_PRECISION_LOW, "gl_NumSamples");
+ add_uniform(type("gl_DepthRangeParameters"), "gl_DepthRange");
+ add_uniform(array(vec4_t, VERT_ATTRIB_MAX), "gl_CurrentAttribVertMESA");
+ add_uniform(array(vec4_t, VARYING_SLOT_MAX), "gl_CurrentAttribFragMESA");
+
+ if (compatibility) {
+ add_uniform(mat4_t, "gl_ModelViewMatrix");
+ add_uniform(mat4_t, "gl_ProjectionMatrix");
+ add_uniform(mat4_t, "gl_ModelViewProjectionMatrix");
+ add_uniform(mat3_t, "gl_NormalMatrix");
+ add_uniform(mat4_t, "gl_ModelViewMatrixInverse");
+ add_uniform(mat4_t, "gl_ProjectionMatrixInverse");
+ add_uniform(mat4_t, "gl_ModelViewProjectionMatrixInverse");
+ add_uniform(mat4_t, "gl_ModelViewMatrixTranspose");
+ add_uniform(mat4_t, "gl_ProjectionMatrixTranspose");
+ add_uniform(mat4_t, "gl_ModelViewProjectionMatrixTranspose");
+ add_uniform(mat4_t, "gl_ModelViewMatrixInverseTranspose");
+ add_uniform(mat4_t, "gl_ProjectionMatrixInverseTranspose");
+ add_uniform(mat4_t, "gl_ModelViewProjectionMatrixInverseTranspose");
+ add_uniform(float_t, "gl_NormalScale");
+ add_uniform(type("gl_LightModelParameters"), "gl_LightModel");
+ add_uniform(vec4_t, "gl_FogParamsOptimizedMESA");
+
+ const glsl_type *const mat4_array_type =
+ array(mat4_t, state->Const.MaxTextureCoords);
+ add_uniform(mat4_array_type, "gl_TextureMatrix");
+ add_uniform(mat4_array_type, "gl_TextureMatrixInverse");
+ add_uniform(mat4_array_type, "gl_TextureMatrixTranspose");
+ add_uniform(mat4_array_type, "gl_TextureMatrixInverseTranspose");
+
+ add_uniform(array(vec4_t, state->Const.MaxClipPlanes), "gl_ClipPlane");
+ add_uniform(type("gl_PointParameters"), "gl_Point");
+
+ const glsl_type *const material_parameters_type =
+ type("gl_MaterialParameters");
+ add_uniform(material_parameters_type, "gl_FrontMaterial");
+ add_uniform(material_parameters_type, "gl_BackMaterial");
+
+ add_uniform(array(type("gl_LightSourceParameters"),
+ state->Const.MaxLights),
+ "gl_LightSource");
+
+ const glsl_type *const light_model_products_type =
+ type("gl_LightModelProducts");
+ add_uniform(light_model_products_type, "gl_FrontLightModelProduct");
+ add_uniform(light_model_products_type, "gl_BackLightModelProduct");
+
+ const glsl_type *const light_products_type =
+ array(type("gl_LightProducts"), state->Const.MaxLights);
+ add_uniform(light_products_type, "gl_FrontLightProduct");
+ add_uniform(light_products_type, "gl_BackLightProduct");
+
+ add_uniform(array(vec4_t, state->Const.MaxTextureUnits),
+ "gl_TextureEnvColor");
+
+ const glsl_type *const texcoords_vec4 =
+ array(vec4_t, state->Const.MaxTextureCoords);
+ add_uniform(texcoords_vec4, "gl_EyePlaneS");
+ add_uniform(texcoords_vec4, "gl_EyePlaneT");
+ add_uniform(texcoords_vec4, "gl_EyePlaneR");
+ add_uniform(texcoords_vec4, "gl_EyePlaneQ");
+ add_uniform(texcoords_vec4, "gl_ObjectPlaneS");
+ add_uniform(texcoords_vec4, "gl_ObjectPlaneT");
+ add_uniform(texcoords_vec4, "gl_ObjectPlaneR");
+ add_uniform(texcoords_vec4, "gl_ObjectPlaneQ");
+
+ add_uniform(type("gl_FogParameters"), "gl_Fog");
+ }
+}
+
+
+/**
+ * Generate special variables which exist in all shaders.
+ */
+void
+builtin_variable_generator::generate_special_vars()
+{
+ if (state->ARB_shader_ballot_enable) {
+ add_system_value(SYSTEM_VALUE_SUBGROUP_SIZE, uint_t, "gl_SubGroupSizeARB");
+ add_system_value(SYSTEM_VALUE_SUBGROUP_INVOCATION, uint_t, "gl_SubGroupInvocationARB");
+ add_system_value(SYSTEM_VALUE_SUBGROUP_EQ_MASK, uint64_t, "gl_SubGroupEqMaskARB");
+ add_system_value(SYSTEM_VALUE_SUBGROUP_GE_MASK, uint64_t, "gl_SubGroupGeMaskARB");
+ add_system_value(SYSTEM_VALUE_SUBGROUP_GT_MASK, uint64_t, "gl_SubGroupGtMaskARB");
+ add_system_value(SYSTEM_VALUE_SUBGROUP_LE_MASK, uint64_t, "gl_SubGroupLeMaskARB");
+ add_system_value(SYSTEM_VALUE_SUBGROUP_LT_MASK, uint64_t, "gl_SubGroupLtMaskARB");
+ }
+}
+
+
+/**
+ * Generate variables which only exist in vertex shaders.
+ */
+void
+builtin_variable_generator::generate_vs_special_vars()
+{
+ ir_variable *var;
+
+ if (state->is_version(130, 300) || state->EXT_gpu_shader4_enable) {
+ add_system_value(SYSTEM_VALUE_VERTEX_ID, int_t, GLSL_PRECISION_HIGH,
+ "gl_VertexID");
+ }
+ if (state->is_version(460, 0)) {
+ add_system_value(SYSTEM_VALUE_BASE_VERTEX, int_t, "gl_BaseVertex");
+ add_system_value(SYSTEM_VALUE_BASE_INSTANCE, int_t, "gl_BaseInstance");
+ add_system_value(SYSTEM_VALUE_DRAW_ID, int_t, "gl_DrawID");
+ }
+ if (state->EXT_draw_instanced_enable && state->is_version(0, 100))
+ add_system_value(SYSTEM_VALUE_INSTANCE_ID, int_t, GLSL_PRECISION_HIGH,
+ "gl_InstanceIDEXT");
+
+ if (state->ARB_draw_instanced_enable)
+ add_system_value(SYSTEM_VALUE_INSTANCE_ID, int_t, "gl_InstanceIDARB");
+
+ if (state->ARB_draw_instanced_enable || state->is_version(140, 300) ||
+ state->EXT_gpu_shader4_enable) {
+ add_system_value(SYSTEM_VALUE_INSTANCE_ID, int_t, GLSL_PRECISION_HIGH,
+ "gl_InstanceID");
+ }
+ if (state->ARB_shader_draw_parameters_enable) {
+ add_system_value(SYSTEM_VALUE_BASE_VERTEX, int_t, "gl_BaseVertexARB");
+ add_system_value(SYSTEM_VALUE_BASE_INSTANCE, int_t, "gl_BaseInstanceARB");
+ add_system_value(SYSTEM_VALUE_DRAW_ID, int_t, "gl_DrawIDARB");
+ }
+ if (state->AMD_vertex_shader_layer_enable ||
+ state->ARB_shader_viewport_layer_array_enable ||
+ state->NV_viewport_array2_enable) {
+ var = add_output(VARYING_SLOT_LAYER, int_t, "gl_Layer");
+ var->data.interpolation = INTERP_MODE_FLAT;
+ }
+ if (state->AMD_vertex_shader_viewport_index_enable ||
+ state->ARB_shader_viewport_layer_array_enable ||
+ state->NV_viewport_array2_enable) {
+ var = add_output(VARYING_SLOT_VIEWPORT, int_t, "gl_ViewportIndex");
+ var->data.interpolation = INTERP_MODE_FLAT;
+ }
+ if (state->NV_viewport_array2_enable) {
+ /* From the NV_viewport_array2 specification:
+ *
+ * "The variable gl_ViewportMask[] is available as an output variable
+ * in the VTG languages. The array has ceil(v/32) elements where v is
+ * the maximum number of viewports supported by the implementation."
+ *
+ * Since no drivers expose more than 16 viewports, we can simply set the
+ * array size to 1 rather than computing it and dealing with varying
+ * slot complication.
+ */
+ var = add_output(VARYING_SLOT_VIEWPORT_MASK, array(int_t, 1),
+ "gl_ViewportMask");
+ var->data.interpolation = INTERP_MODE_FLAT;
+ }
+ if (compatibility) {
+ add_input(VERT_ATTRIB_POS, vec4_t, "gl_Vertex");
+ add_input(VERT_ATTRIB_NORMAL, vec3_t, "gl_Normal");
+ add_input(VERT_ATTRIB_COLOR0, vec4_t, "gl_Color");
+ add_input(VERT_ATTRIB_COLOR1, vec4_t, "gl_SecondaryColor");
+ add_input(VERT_ATTRIB_TEX0, vec4_t, "gl_MultiTexCoord0");
+ add_input(VERT_ATTRIB_TEX1, vec4_t, "gl_MultiTexCoord1");
+ add_input(VERT_ATTRIB_TEX2, vec4_t, "gl_MultiTexCoord2");
+ add_input(VERT_ATTRIB_TEX3, vec4_t, "gl_MultiTexCoord3");
+ add_input(VERT_ATTRIB_TEX4, vec4_t, "gl_MultiTexCoord4");
+ add_input(VERT_ATTRIB_TEX5, vec4_t, "gl_MultiTexCoord5");
+ add_input(VERT_ATTRIB_TEX6, vec4_t, "gl_MultiTexCoord6");
+ add_input(VERT_ATTRIB_TEX7, vec4_t, "gl_MultiTexCoord7");
+ add_input(VERT_ATTRIB_FOG, float_t, "gl_FogCoord");
+ }
+}
+
+
+/**
+ * Generate variables which only exist in tessellation control shaders.
+ */
+void
+builtin_variable_generator::generate_tcs_special_vars()
+{
+ add_system_value(SYSTEM_VALUE_PRIMITIVE_ID, int_t, GLSL_PRECISION_HIGH,
+ "gl_PrimitiveID");
+ add_system_value(SYSTEM_VALUE_INVOCATION_ID, int_t, GLSL_PRECISION_HIGH,
+ "gl_InvocationID");
+ add_system_value(SYSTEM_VALUE_VERTICES_IN, int_t, GLSL_PRECISION_HIGH,
+ "gl_PatchVerticesIn");
+
+ add_output(VARYING_SLOT_TESS_LEVEL_OUTER, array(float_t, 4),
+ GLSL_PRECISION_HIGH, "gl_TessLevelOuter")->data.patch = 1;
+ add_output(VARYING_SLOT_TESS_LEVEL_INNER, array(float_t, 2),
+ GLSL_PRECISION_HIGH, "gl_TessLevelInner")->data.patch = 1;
+ /* XXX What to do if multiple are flipped on? */
+ int bbox_slot = state->ctx->Const.NoPrimitiveBoundingBoxOutput ? -1 :
+ VARYING_SLOT_BOUNDING_BOX0;
+ if (state->EXT_primitive_bounding_box_enable)
+ add_output(bbox_slot, array(vec4_t, 2), "gl_BoundingBoxEXT")
+ ->data.patch = 1;
+ if (state->OES_primitive_bounding_box_enable) {
+ add_output(bbox_slot, array(vec4_t, 2), GLSL_PRECISION_HIGH,
+ "gl_BoundingBoxOES")->data.patch = 1;
+ }
+ if (state->is_version(0, 320) || state->ARB_ES3_2_compatibility_enable) {
+ add_output(bbox_slot, array(vec4_t, 2), GLSL_PRECISION_HIGH,
+ "gl_BoundingBox")->data.patch = 1;
+ }
+
+ /* NOTE: These are completely pointless. Writing these will never go
+ * anywhere. But the specs demands it. So we add them with a slot of -1,
+ * which makes the data go nowhere.
+ */
+ if (state->NV_viewport_array2_enable) {
+ add_output(-1, int_t, "gl_Layer");
+ add_output(-1, int_t, "gl_ViewportIndex");
+ add_output(-1, array(int_t, 1), "gl_ViewportMask");
+ }
+
+}
+
+
+/**
+ * Generate variables which only exist in tessellation evaluation shaders.
+ */
+void
+builtin_variable_generator::generate_tes_special_vars()
+{
+ ir_variable *var;
+
+ add_system_value(SYSTEM_VALUE_PRIMITIVE_ID, int_t, GLSL_PRECISION_HIGH,
+ "gl_PrimitiveID");
+ add_system_value(SYSTEM_VALUE_VERTICES_IN, int_t, GLSL_PRECISION_HIGH,
+ "gl_PatchVerticesIn");
+ add_system_value(SYSTEM_VALUE_TESS_COORD, vec3_t, GLSL_PRECISION_HIGH,
+ "gl_TessCoord");
+ if (this->state->ctx->Const.GLSLTessLevelsAsInputs) {
+ add_input(VARYING_SLOT_TESS_LEVEL_OUTER, array(float_t, 4),
+ GLSL_PRECISION_HIGH, "gl_TessLevelOuter")->data.patch = 1;
+ add_input(VARYING_SLOT_TESS_LEVEL_INNER, array(float_t, 2),
+ GLSL_PRECISION_HIGH, "gl_TessLevelInner")->data.patch = 1;
+ } else {
+ add_system_value(SYSTEM_VALUE_TESS_LEVEL_OUTER, array(float_t, 4),
+ GLSL_PRECISION_HIGH, "gl_TessLevelOuter");
+ add_system_value(SYSTEM_VALUE_TESS_LEVEL_INNER, array(float_t, 2),
+ GLSL_PRECISION_HIGH, "gl_TessLevelInner");
+ }
+ if (state->ARB_shader_viewport_layer_array_enable ||
+ state->NV_viewport_array2_enable) {
+ var = add_output(VARYING_SLOT_LAYER, int_t, "gl_Layer");
+ var->data.interpolation = INTERP_MODE_FLAT;
+ var = add_output(VARYING_SLOT_VIEWPORT, int_t, "gl_ViewportIndex");
+ var->data.interpolation = INTERP_MODE_FLAT;
+ }
+ if (state->NV_viewport_array2_enable) {
+ var = add_output(VARYING_SLOT_VIEWPORT_MASK, array(int_t, 1),
+ "gl_ViewportMask");
+ var->data.interpolation = INTERP_MODE_FLAT;
+ }
+}
+
+
+/**
+ * Generate variables which only exist in geometry shaders.
+ */
+void
+builtin_variable_generator::generate_gs_special_vars()
+{
+ ir_variable *var;
+
+ var = add_output(VARYING_SLOT_LAYER, int_t, GLSL_PRECISION_HIGH, "gl_Layer");
+ var->data.interpolation = INTERP_MODE_FLAT;
+ if (state->is_version(410, 0) || state->ARB_viewport_array_enable ||
+ state->OES_viewport_array_enable) {
+ var = add_output(VARYING_SLOT_VIEWPORT, int_t, GLSL_PRECISION_HIGH,
+ "gl_ViewportIndex");
+ var->data.interpolation = INTERP_MODE_FLAT;
+ }
+ if (state->NV_viewport_array2_enable) {
+ var = add_output(VARYING_SLOT_VIEWPORT_MASK, array(int_t, 1),
+ "gl_ViewportMask");
+ var->data.interpolation = INTERP_MODE_FLAT;
+ }
+ if (state->is_version(400, 320) || state->ARB_gpu_shader5_enable ||
+ state->OES_geometry_shader_enable || state->EXT_geometry_shader_enable) {
+ add_system_value(SYSTEM_VALUE_INVOCATION_ID, int_t, GLSL_PRECISION_HIGH,
+ "gl_InvocationID");
+ }
+
+ /* Although gl_PrimitiveID appears in tessellation control and tessellation
+ * evaluation shaders, it has a different function there than it has in
+ * geometry shaders, so we treat it (and its counterpart gl_PrimitiveIDIn)
+ * as special geometry shader variables.
+ *
+ * Note that although the general convention of suffixing geometry shader
+ * input varyings with "In" was not adopted into GLSL 1.50, it is used in
+ * the specific case of gl_PrimitiveIDIn. So we don't need to treat
+ * gl_PrimitiveIDIn as an {ARB,EXT}_geometry_shader4-only variable.
+ */
+ var = add_input(VARYING_SLOT_PRIMITIVE_ID, int_t, GLSL_PRECISION_HIGH,
+ "gl_PrimitiveIDIn");
+ var->data.interpolation = INTERP_MODE_FLAT;
+ var = add_output(VARYING_SLOT_PRIMITIVE_ID, int_t, GLSL_PRECISION_HIGH,
+ "gl_PrimitiveID");
+ var->data.interpolation = INTERP_MODE_FLAT;
+}
+
+
+/**
+ * Generate variables which only exist in fragment shaders.
+ */
+void
+builtin_variable_generator::generate_fs_special_vars()
+{
+ ir_variable *var;
+
+ int frag_coord_precision = (state->is_version(0, 300) ?
+ GLSL_PRECISION_HIGH :
+ GLSL_PRECISION_MEDIUM);
+
+ if (this->state->ctx->Const.GLSLFragCoordIsSysVal) {
+ add_system_value(SYSTEM_VALUE_FRAG_COORD, vec4_t, frag_coord_precision,
+ "gl_FragCoord");
+ } else {
+ add_input(VARYING_SLOT_POS, vec4_t, frag_coord_precision, "gl_FragCoord");
+ }
+
+ if (this->state->ctx->Const.GLSLFrontFacingIsSysVal) {
+ var = add_system_value(SYSTEM_VALUE_FRONT_FACE, bool_t, "gl_FrontFacing");
+ var->data.interpolation = INTERP_MODE_FLAT;
+ } else {
+ var = add_input(VARYING_SLOT_FACE, bool_t, "gl_FrontFacing");
+ var->data.interpolation = INTERP_MODE_FLAT;
+ }
+
+ if (state->is_version(120, 100)) {
+ if (this->state->ctx->Const.GLSLPointCoordIsSysVal)
+ add_system_value(SYSTEM_VALUE_POINT_COORD, vec2_t,
+ GLSL_PRECISION_MEDIUM, "gl_PointCoord");
+ else
+ add_input(VARYING_SLOT_PNTC, vec2_t, GLSL_PRECISION_MEDIUM,
+ "gl_PointCoord");
+ }
+
+ if (state->has_geometry_shader() || state->EXT_gpu_shader4_enable) {
+ var = add_input(VARYING_SLOT_PRIMITIVE_ID, int_t, GLSL_PRECISION_HIGH,
+ "gl_PrimitiveID");
+ var->data.interpolation = INTERP_MODE_FLAT;
+ }
+
+ /* gl_FragColor and gl_FragData were deprecated starting in desktop GLSL
+ * 1.30, and were relegated to the compatibility profile in GLSL 4.20.
+ * They were removed from GLSL ES 3.00.
+ */
+ if (compatibility || !state->is_version(420, 300)) {
+ add_output(FRAG_RESULT_COLOR, vec4_t, GLSL_PRECISION_MEDIUM,
+ "gl_FragColor");
+ add_output(FRAG_RESULT_DATA0,
+ array(vec4_t, state->Const.MaxDrawBuffers),
+ GLSL_PRECISION_MEDIUM,
+ "gl_FragData");
+ }
+
+ if (state->has_framebuffer_fetch() && !state->is_version(130, 300)) {
+ ir_variable *const var =
+ add_output(FRAG_RESULT_DATA0,
+ array(vec4_t, state->Const.MaxDrawBuffers),
+ "gl_LastFragData");
+ var->data.precision = GLSL_PRECISION_MEDIUM;
+ var->data.read_only = 1;
+ var->data.fb_fetch_output = 1;
+ var->data.memory_coherent = 1;
+ }
+
+ if (state->es_shader && state->language_version == 100 && state->EXT_blend_func_extended_enable) {
+ add_index_output(FRAG_RESULT_COLOR, 1, vec4_t,
+ GLSL_PRECISION_MEDIUM, "gl_SecondaryFragColorEXT");
+ add_index_output(FRAG_RESULT_DATA0, 1,
+ array(vec4_t, state->Const.MaxDualSourceDrawBuffers),
+ GLSL_PRECISION_MEDIUM, "gl_SecondaryFragDataEXT");
+ }
+
+ /* gl_FragDepth has always been in desktop GLSL, but did not appear in GLSL
+ * ES 1.00.
+ */
+ if (state->is_version(110, 300)) {
+ add_output(FRAG_RESULT_DEPTH, float_t, GLSL_PRECISION_HIGH,
+ "gl_FragDepth");
+ }
+
+ if (state->EXT_frag_depth_enable)
+ add_output(FRAG_RESULT_DEPTH, float_t, "gl_FragDepthEXT");
+
+ if (state->ARB_shader_stencil_export_enable) {
+ ir_variable *const var =
+ add_output(FRAG_RESULT_STENCIL, int_t, "gl_FragStencilRefARB");
+ if (state->ARB_shader_stencil_export_warn)
+ var->enable_extension_warning("GL_ARB_shader_stencil_export");
+ }
+
+ if (state->AMD_shader_stencil_export_enable) {
+ ir_variable *const var =
+ add_output(FRAG_RESULT_STENCIL, int_t, "gl_FragStencilRefAMD");
+ if (state->AMD_shader_stencil_export_warn)
+ var->enable_extension_warning("GL_AMD_shader_stencil_export");
+ }
+
+ if (state->is_version(400, 320) ||
+ state->ARB_sample_shading_enable ||
+ state->OES_sample_variables_enable) {
+ add_system_value(SYSTEM_VALUE_SAMPLE_ID, int_t, GLSL_PRECISION_LOW,
+ "gl_SampleID");
+ add_system_value(SYSTEM_VALUE_SAMPLE_POS, vec2_t, GLSL_PRECISION_MEDIUM,
+ "gl_SamplePosition");
+ /* From the ARB_sample_shading specification:
+ * "The number of elements in the array is ceil(<s>/32), where
+ * <s> is the maximum number of color samples supported by the
+ * implementation."
+ * Since no drivers expose more than 32x MSAA, we can simply set
+ * the array size to 1 rather than computing it.
+ */
+ add_output(FRAG_RESULT_SAMPLE_MASK, array(int_t, 1),
+ GLSL_PRECISION_HIGH, "gl_SampleMask");
+ }
+
+ if (state->is_version(400, 320) ||
+ state->ARB_gpu_shader5_enable ||
+ state->OES_sample_variables_enable) {
+ add_system_value(SYSTEM_VALUE_SAMPLE_MASK_IN, array(int_t, 1),
+ GLSL_PRECISION_HIGH, "gl_SampleMaskIn");
+ }
+
+ if (state->is_version(430, 320) ||
+ state->ARB_fragment_layer_viewport_enable ||
+ state->OES_geometry_shader_enable ||
+ state->EXT_geometry_shader_enable) {
+ var = add_input(VARYING_SLOT_LAYER, int_t, GLSL_PRECISION_HIGH,
+ "gl_Layer");
+ var->data.interpolation = INTERP_MODE_FLAT;
+ }
+
+ if (state->is_version(430, 0) ||
+ state->ARB_fragment_layer_viewport_enable ||
+ state->OES_viewport_array_enable) {
+ var = add_input(VARYING_SLOT_VIEWPORT, int_t, "gl_ViewportIndex");
+ var->data.interpolation = INTERP_MODE_FLAT;
+ }
+
+ if (state->is_version(450, 310) || state->ARB_ES3_1_compatibility_enable)
+ add_system_value(SYSTEM_VALUE_HELPER_INVOCATION, bool_t, "gl_HelperInvocation");
+}
+
+
+/**
+ * Generate variables which only exist in compute shaders.
+ */
+void
+builtin_variable_generator::generate_cs_special_vars()
+{
+ add_system_value(SYSTEM_VALUE_LOCAL_INVOCATION_ID, uvec3_t,
+ "gl_LocalInvocationID");
+ add_system_value(SYSTEM_VALUE_WORK_GROUP_ID, uvec3_t, "gl_WorkGroupID");
+ add_system_value(SYSTEM_VALUE_NUM_WORK_GROUPS, uvec3_t, "gl_NumWorkGroups");
+
+ if (state->ARB_compute_variable_group_size_enable) {
+ add_system_value(SYSTEM_VALUE_LOCAL_GROUP_SIZE,
+ uvec3_t, "gl_LocalGroupSizeARB");
+ }
+
+ add_system_value(SYSTEM_VALUE_GLOBAL_INVOCATION_ID,
+ uvec3_t, "gl_GlobalInvocationID");
+ add_system_value(SYSTEM_VALUE_LOCAL_INVOCATION_INDEX,
+ uint_t, "gl_LocalInvocationIndex");
+}
+
+
+/**
+ * Add a single "varying" variable. The variable's type and direction (input
+ * or output) are adjusted as appropriate for the type of shader being
+ * compiled.
+ */
+void
+builtin_variable_generator::add_varying(int slot, const glsl_type *type,
+ int precision, const char *name)
+{
+ switch (state->stage) {
+ case MESA_SHADER_TESS_CTRL:
+ case MESA_SHADER_TESS_EVAL:
+ case MESA_SHADER_GEOMETRY:
+ this->per_vertex_in.add_field(slot, type, precision, name);
+ /* FALLTHROUGH */
+ case MESA_SHADER_VERTEX:
+ this->per_vertex_out.add_field(slot, type, precision, name);
+ break;
+ case MESA_SHADER_FRAGMENT:
+ add_input(slot, type, precision, name);
+ break;
+ case MESA_SHADER_COMPUTE:
+ /* Compute shaders don't have varyings. */
+ break;
+ default:
+ break;
+ }
+}
+
+
+/**
+ * Generate variables that are used to communicate data from one shader stage
+ * to the next ("varyings").
+ */
+void
+builtin_variable_generator::generate_varyings()
+{
+ struct gl_shader_compiler_options *options =
+ &state->ctx->Const.ShaderCompilerOptions[state->stage];
+
+ /* gl_Position and gl_PointSize are not visible from fragment shaders. */
+ if (state->stage != MESA_SHADER_FRAGMENT) {
+ add_varying(VARYING_SLOT_POS, vec4_t, GLSL_PRECISION_HIGH, "gl_Position");
+ if (!state->es_shader ||
+ state->stage == MESA_SHADER_VERTEX ||
+ (state->stage == MESA_SHADER_GEOMETRY &&
+ (state->OES_geometry_point_size_enable ||
+ state->EXT_geometry_point_size_enable)) ||
+ ((state->stage == MESA_SHADER_TESS_CTRL ||
+ state->stage == MESA_SHADER_TESS_EVAL) &&
+ (state->OES_tessellation_point_size_enable ||
+ state->EXT_tessellation_point_size_enable))) {
+ add_varying(VARYING_SLOT_PSIZ,
+ float_t,
+ state->is_version(0, 300) ?
+ GLSL_PRECISION_HIGH :
+ GLSL_PRECISION_MEDIUM,
+ "gl_PointSize");
+ }
+ }
+
+ if (state->has_clip_distance()) {
+ add_varying(VARYING_SLOT_CLIP_DIST0, array(float_t, 0),
+ GLSL_PRECISION_HIGH, "gl_ClipDistance");
+ }
+ if (state->has_cull_distance()) {
+ add_varying(VARYING_SLOT_CULL_DIST0, array(float_t, 0),
+ GLSL_PRECISION_HIGH, "gl_CullDistance");
+ }
+
+ if (compatibility) {
+ add_varying(VARYING_SLOT_TEX0, array(vec4_t, 0), "gl_TexCoord");
+ add_varying(VARYING_SLOT_FOGC, float_t, "gl_FogFragCoord");
+ if (state->stage == MESA_SHADER_FRAGMENT) {
+ add_varying(VARYING_SLOT_COL0, vec4_t, "gl_Color");
+ add_varying(VARYING_SLOT_COL1, vec4_t, "gl_SecondaryColor");
+ } else {
+ add_varying(VARYING_SLOT_CLIP_VERTEX, vec4_t, "gl_ClipVertex");
+ add_varying(VARYING_SLOT_COL0, vec4_t, "gl_FrontColor");
+ add_varying(VARYING_SLOT_BFC0, vec4_t, "gl_BackColor");
+ add_varying(VARYING_SLOT_COL1, vec4_t, "gl_FrontSecondaryColor");
+ add_varying(VARYING_SLOT_BFC1, vec4_t, "gl_BackSecondaryColor");
+ }
+ }
+
+ /* Section 7.1 (Built-In Language Variables) of the GLSL 4.00 spec
+ * says:
+ *
+ * "In the tessellation control language, built-in variables are
+ * intrinsically declared as:
+ *
+ * in gl_PerVertex {
+ * vec4 gl_Position;
+ * float gl_PointSize;
+ * float gl_ClipDistance[];
+ * } gl_in[gl_MaxPatchVertices];"
+ */
+ if (state->stage == MESA_SHADER_TESS_CTRL ||
+ state->stage == MESA_SHADER_TESS_EVAL) {
+ const glsl_type *per_vertex_in_type =
+ this->per_vertex_in.construct_interface_instance();
+ add_variable("gl_in", array(per_vertex_in_type, state->Const.MaxPatchVertices),
+ GLSL_PRECISION_NONE, ir_var_shader_in, -1);
+ }
+ if (state->stage == MESA_SHADER_GEOMETRY) {
+ const glsl_type *per_vertex_in_type =
+ this->per_vertex_in.construct_interface_instance();
+ add_variable("gl_in", array(per_vertex_in_type, 0),
+ GLSL_PRECISION_NONE, ir_var_shader_in, -1);
+ }
+ if (state->stage == MESA_SHADER_TESS_CTRL) {
+ const glsl_type *per_vertex_out_type =
+ this->per_vertex_out.construct_interface_instance();
+ add_variable("gl_out", array(per_vertex_out_type, 0),
+ GLSL_PRECISION_NONE, ir_var_shader_out, -1);
+ }
+ if (state->stage == MESA_SHADER_VERTEX ||
+ state->stage == MESA_SHADER_TESS_EVAL ||
+ state->stage == MESA_SHADER_GEOMETRY) {
+ const glsl_type *per_vertex_out_type =
+ this->per_vertex_out.construct_interface_instance();
+ const glsl_struct_field *fields = per_vertex_out_type->fields.structure;
+ for (unsigned i = 0; i < per_vertex_out_type->length; i++) {
+ ir_variable *var =
+ add_variable(fields[i].name, fields[i].type, fields[i].precision,
+ ir_var_shader_out, fields[i].location);
+ var->data.interpolation = fields[i].interpolation;
+ var->data.centroid = fields[i].centroid;
+ var->data.sample = fields[i].sample;
+ var->data.patch = fields[i].patch;
+ var->init_interface_type(per_vertex_out_type);
+
+ var->data.invariant = fields[i].location == VARYING_SLOT_POS &&
+ options->PositionAlwaysInvariant;
+ }
+ }
+}
+
+
+}; /* Anonymous namespace */
+
+
+void
+_mesa_glsl_initialize_variables(exec_list *instructions,
+ struct _mesa_glsl_parse_state *state)
+{
+ builtin_variable_generator gen(instructions, state);
+
+ gen.generate_constants();
+ gen.generate_uniforms();
+ gen.generate_special_vars();
+
+ gen.generate_varyings();
+
+ switch (state->stage) {
+ case MESA_SHADER_VERTEX:
+ gen.generate_vs_special_vars();
+ break;
+ case MESA_SHADER_TESS_CTRL:
+ gen.generate_tcs_special_vars();
+ break;
+ case MESA_SHADER_TESS_EVAL:
+ gen.generate_tes_special_vars();
+ break;
+ case MESA_SHADER_GEOMETRY:
+ gen.generate_gs_special_vars();
+ break;
+ case MESA_SHADER_FRAGMENT:
+ gen.generate_fs_special_vars();
+ break;
+ case MESA_SHADER_COMPUTE:
+ gen.generate_cs_special_vars();
+ break;
+ default:
+ break;
+ }
+}
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/float64.glsl b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/float64.glsl
new file mode 100644
index 0000000000..dd1179012c
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/float64.glsl
@@ -0,0 +1,1818 @@
+/*
+ * The implementations contained in this file are heavily based on the
+ * implementations found in the Berkeley SoftFloat library. As such, they are
+ * licensed under the same 3-clause BSD license:
+ *
+ * License for Berkeley SoftFloat Release 3e
+ *
+ * John R. Hauser
+ * 2018 January 20
+ *
+ * The following applies to the whole of SoftFloat Release 3e as well as to
+ * each source file individually.
+ *
+ * Copyright 2011, 2012, 2013, 2014, 2015, 2016, 2017, 2018 The Regents of the
+ * University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions, and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions, and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+#version 430
+#extension GL_ARB_gpu_shader_int64 : enable
+#extension GL_ARB_shader_bit_encoding : enable
+#extension GL_EXT_shader_integer_mix : enable
+#extension GL_MESA_shader_integer_functions : enable
+
+#pragma warning(off)
+
+/* Software IEEE floating-point rounding mode.
+ * GLSL spec section "4.7.1 Range and Precision":
+ * The rounding mode cannot be set and is undefined.
+ * But here, we are able to define the rounding mode at the compilation time.
+ */
+#define FLOAT_ROUND_NEAREST_EVEN 0
+#define FLOAT_ROUND_TO_ZERO 1
+#define FLOAT_ROUND_DOWN 2
+#define FLOAT_ROUND_UP 3
+#define FLOAT_ROUNDING_MODE FLOAT_ROUND_NEAREST_EVEN
+
+/* Relax propagation of NaN. Binary operations with a NaN source will still
+ * produce a NaN result, but it won't follow strict IEEE rules.
+ */
+#define RELAXED_NAN_PROPAGATION
+
+/* Absolute value of a Float64 :
+ * Clear the sign bit
+ */
+uint64_t
+__fabs64(uint64_t __a)
+{
+ uvec2 a = unpackUint2x32(__a);
+ a.y &= 0x7FFFFFFFu;
+ return packUint2x32(a);
+}
+
+/* Returns 1 if the double-precision floating-point value `a' is a NaN;
+ * otherwise returns 0.
+ */
+bool
+__is_nan(uint64_t __a)
+{
+ uvec2 a = unpackUint2x32(__a);
+ return (0xFFE00000u <= (a.y<<1)) &&
+ ((a.x != 0u) || ((a.y & 0x000FFFFFu) != 0u));
+}
+
+/* Negate value of a Float64 :
+ * Toggle the sign bit
+ */
+uint64_t
+__fneg64(uint64_t __a)
+{
+ uvec2 a = unpackUint2x32(__a);
+ a.y ^= (1u << 31);
+ return packUint2x32(a);
+}
+
+uint64_t
+__fsign64(uint64_t __a)
+{
+ uvec2 a = unpackUint2x32(__a);
+ uvec2 retval;
+ retval.x = 0u;
+ retval.y = mix((a.y & 0x80000000u) | 0x3FF00000u, 0u, (a.y << 1 | a.x) == 0u);
+ return packUint2x32(retval);
+}
+
+/* Returns the fraction bits of the double-precision floating-point value `a'.*/
+uint
+__extractFloat64FracLo(uint64_t a)
+{
+ return unpackUint2x32(a).x;
+}
+
+uint
+__extractFloat64FracHi(uint64_t a)
+{
+ return unpackUint2x32(a).y & 0x000FFFFFu;
+}
+
+/* Returns the exponent bits of the double-precision floating-point value `a'.*/
+int
+__extractFloat64Exp(uint64_t __a)
+{
+ uvec2 a = unpackUint2x32(__a);
+ return int((a.y>>20) & 0x7FFu);
+}
+
+bool
+__feq64_nonnan(uint64_t __a, uint64_t __b)
+{
+ uvec2 a = unpackUint2x32(__a);
+ uvec2 b = unpackUint2x32(__b);
+ return (a.x == b.x) &&
+ ((a.y == b.y) || ((a.x == 0u) && (((a.y | b.y)<<1) == 0u)));
+}
+
+/* Returns true if the double-precision floating-point value `a' is equal to the
+ * corresponding value `b', and false otherwise. The comparison is performed
+ * according to the IEEE Standard for Floating-Point Arithmetic.
+ */
+bool
+__feq64(uint64_t a, uint64_t b)
+{
+ if (__is_nan(a) || __is_nan(b))
+ return false;
+
+ return __feq64_nonnan(a, b);
+}
+
+/* Returns true if the double-precision floating-point value `a' is not equal
+ * to the corresponding value `b', and false otherwise. The comparison is
+ * performed according to the IEEE Standard for Floating-Point Arithmetic.
+ */
+bool
+__fne64(uint64_t a, uint64_t b)
+{
+ if (__is_nan(a) || __is_nan(b))
+ return true;
+
+ return !__feq64_nonnan(a, b);
+}
+
+/* Returns the sign bit of the double-precision floating-point value `a'.*/
+uint
+__extractFloat64Sign(uint64_t a)
+{
+ return unpackUint2x32(a).y & 0x80000000u;
+}
+
+/* Returns true if the signed 64-bit value formed by concatenating `a0' and
+ * `a1' is less than the signed 64-bit value formed by concatenating `b0' and
+ * `b1'. Otherwise, returns false.
+ */
+bool
+ilt64(uint a0, uint a1, uint b0, uint b1)
+{
+ return (int(a0) < int(b0)) || ((a0 == b0) && (a1 < b1));
+}
+
+bool
+__flt64_nonnan(uint64_t __a, uint64_t __b)
+{
+ uvec2 a = unpackUint2x32(__a);
+ uvec2 b = unpackUint2x32(__b);
+
+ /* IEEE 754 floating point numbers are specifically designed so that, with
+ * two exceptions, values can be compared by bit-casting to signed integers
+ * with the same number of bits.
+ *
+ * From https://en.wikipedia.org/wiki/IEEE_754-1985#Comparing_floating-point_numbers:
+ *
+ * When comparing as 2's-complement integers: If the sign bits differ,
+ * the negative number precedes the positive number, so 2's complement
+ * gives the correct result (except that negative zero and positive zero
+ * should be considered equal). If both values are positive, the 2's
+ * complement comparison again gives the correct result. Otherwise (two
+ * negative numbers), the correct FP ordering is the opposite of the 2's
+ * complement ordering.
+ *
+ * The logic implied by the above quotation is:
+ *
+ * !both_are_zero(a, b) && (both_negative(a, b) ? a > b : a < b)
+ *
+ * This is equivalent to
+ *
+ * fne(a, b) && (both_negative(a, b) ? a >= b : a < b)
+ *
+ * fne(a, b) && (both_negative(a, b) ? !(a < b) : a < b)
+ *
+ * fne(a, b) && ((both_negative(a, b) && !(a < b)) ||
+ * (!both_negative(a, b) && (a < b)))
+ *
+ * (A!|B)&(A|!B) is (A xor B) which is implemented here using !=.
+ *
+ * fne(a, b) && (both_negative(a, b) != (a < b))
+ */
+ bool lt = ilt64(a.y, a.x, b.y, b.x);
+ bool both_negative = (a.y & b.y & 0x80000000u) != 0;
+
+ return !__feq64_nonnan(__a, __b) && (lt != both_negative);
+}
+
+/* Returns true if the double-precision floating-point value `a' is less than
+ * the corresponding value `b', and false otherwise. The comparison is performed
+ * according to the IEEE Standard for Floating-Point Arithmetic.
+ */
+bool
+__flt64(uint64_t a, uint64_t b)
+{
+ /* This weird layout matters. Doing the "obvious" thing results in extra
+ * flow control being inserted to implement the short-circuit evaluation
+ * rules. Flow control is bad!
+ */
+ bool x = !__is_nan(a);
+ bool y = !__is_nan(b);
+ bool z = __flt64_nonnan(a, b);
+
+ return (x && y && z);
+}
+
+/* Returns true if the double-precision floating-point value `a' is greater
+ * than or equal to * the corresponding value `b', and false otherwise. The
+ * comparison is performed * according to the IEEE Standard for Floating-Point
+ * Arithmetic.
+ */
+bool
+__fge64(uint64_t a, uint64_t b)
+{
+ /* This weird layout matters. Doing the "obvious" thing results in extra
+ * flow control being inserted to implement the short-circuit evaluation
+ * rules. Flow control is bad!
+ */
+ bool x = !__is_nan(a);
+ bool y = !__is_nan(b);
+ bool z = !__flt64_nonnan(a, b);
+
+ return (x && y && z);
+}
+
+uint64_t
+__fsat64(uint64_t __a)
+{
+ uvec2 a = unpackUint2x32(__a);
+
+ /* fsat(NaN) should be zero. */
+ if (__is_nan(__a) || int(a.y) < 0)
+ return 0ul;
+
+ /* IEEE 754 floating point numbers are specifically designed so that, with
+ * two exceptions, values can be compared by bit-casting to signed integers
+ * with the same number of bits.
+ *
+ * From https://en.wikipedia.org/wiki/IEEE_754-1985#Comparing_floating-point_numbers:
+ *
+ * When comparing as 2's-complement integers: If the sign bits differ,
+ * the negative number precedes the positive number, so 2's complement
+ * gives the correct result (except that negative zero and positive zero
+ * should be considered equal). If both values are positive, the 2's
+ * complement comparison again gives the correct result. Otherwise (two
+ * negative numbers), the correct FP ordering is the opposite of the 2's
+ * complement ordering.
+ *
+ * We know that both values are not negative, and we know that at least one
+ * value is not zero. Therefore, we can just use the 2's complement
+ * comparison ordering.
+ */
+ if (ilt64(0x3FF00000, 0x00000000, a.y, a.x))
+ return 0x3FF0000000000000ul;
+
+ return __a;
+}
+
+/* Adds the 64-bit value formed by concatenating `a0' and `a1' to the 64-bit
+ * value formed by concatenating `b0' and `b1'. Addition is modulo 2^64, so
+ * any carry out is lost. The result is broken into two 32-bit pieces which
+ * are stored at the locations pointed to by `z0Ptr' and `z1Ptr'.
+ */
+void
+__add64(uint a0, uint a1, uint b0, uint b1,
+ out uint z0Ptr,
+ out uint z1Ptr)
+{
+ uint z1 = a1 + b1;
+ z1Ptr = z1;
+ z0Ptr = a0 + b0 + uint(z1 < a1);
+}
+
+
+/* Subtracts the 64-bit value formed by concatenating `b0' and `b1' from the
+ * 64-bit value formed by concatenating `a0' and `a1'. Subtraction is modulo
+ * 2^64, so any borrow out (carry out) is lost. The result is broken into two
+ * 32-bit pieces which are stored at the locations pointed to by `z0Ptr' and
+ * `z1Ptr'.
+ */
+void
+__sub64(uint a0, uint a1, uint b0, uint b1,
+ out uint z0Ptr,
+ out uint z1Ptr)
+{
+ z1Ptr = a1 - b1;
+ z0Ptr = a0 - b0 - uint(a1 < b1);
+}
+
+/* Shifts the 64-bit value formed by concatenating `a0' and `a1' right by the
+ * number of bits given in `count'. If any nonzero bits are shifted off, they
+ * are "jammed" into the least significant bit of the result by setting the
+ * least significant bit to 1. The value of `count' can be arbitrarily large;
+ * in particular, if `count' is greater than 64, the result will be either 0
+ * or 1, depending on whether the concatenation of `a0' and `a1' is zero or
+ * nonzero. The result is broken into two 32-bit pieces which are stored at
+ * the locations pointed to by `z0Ptr' and `z1Ptr'.
+ */
+void
+__shift64RightJamming(uint a0,
+ uint a1,
+ int count,
+ out uint z0Ptr,
+ out uint z1Ptr)
+{
+ uint z0;
+ uint z1;
+ int negCount = (-count) & 31;
+
+ z0 = mix(0u, a0, count == 0);
+ z0 = mix(z0, (a0 >> count), count < 32);
+
+ z1 = uint((a0 | a1) != 0u); /* count >= 64 */
+ uint z1_lt64 = (a0>>(count & 31)) | uint(((a0<<negCount) | a1) != 0u);
+ z1 = mix(z1, z1_lt64, count < 64);
+ z1 = mix(z1, (a0 | uint(a1 != 0u)), count == 32);
+ uint z1_lt32 = (a0<<negCount) | (a1>>count) | uint ((a1<<negCount) != 0u);
+ z1 = mix(z1, z1_lt32, count < 32);
+ z1 = mix(z1, a1, count == 0);
+ z1Ptr = z1;
+ z0Ptr = z0;
+}
+
+/* Shifts the 96-bit value formed by concatenating `a0', `a1', and `a2' right
+ * by 32 _plus_ the number of bits given in `count'. The shifted result is
+ * at most 64 nonzero bits; these are broken into two 32-bit pieces which are
+ * stored at the locations pointed to by `z0Ptr' and `z1Ptr'. The bits shifted
+ * off form a third 32-bit result as follows: The _last_ bit shifted off is
+ * the most-significant bit of the extra result, and the other 31 bits of the
+ * extra result are all zero if and only if _all_but_the_last_ bits shifted off
+ * were all zero. This extra result is stored in the location pointed to by
+ * `z2Ptr'. The value of `count' can be arbitrarily large.
+ * (This routine makes more sense if `a0', `a1', and `a2' are considered
+ * to form a fixed-point value with binary point between `a1' and `a2'. This
+ * fixed-point value is shifted right by the number of bits given in `count',
+ * and the integer part of the result is returned at the locations pointed to
+ * by `z0Ptr' and `z1Ptr'. The fractional part of the result may be slightly
+ * corrupted as described above, and is returned at the location pointed to by
+ * `z2Ptr'.)
+ */
+void
+__shift64ExtraRightJamming(uint a0, uint a1, uint a2,
+ int count,
+ out uint z0Ptr,
+ out uint z1Ptr,
+ out uint z2Ptr)
+{
+ uint z0 = 0u;
+ uint z1;
+ uint z2;
+ int negCount = (-count) & 31;
+
+ z2 = mix(uint(a0 != 0u), a0, count == 64);
+ z2 = mix(z2, a0 << negCount, count < 64);
+ z2 = mix(z2, a1 << negCount, count < 32);
+
+ z1 = mix(0u, (a0 >> (count & 31)), count < 64);
+ z1 = mix(z1, (a0<<negCount) | (a1>>count), count < 32);
+
+ a2 = mix(a2 | a1, a2, count < 32);
+ z0 = mix(z0, a0 >> count, count < 32);
+ z2 |= uint(a2 != 0u);
+
+ z0 = mix(z0, 0u, (count == 32));
+ z1 = mix(z1, a0, (count == 32));
+ z2 = mix(z2, a1, (count == 32));
+ z0 = mix(z0, a0, (count == 0));
+ z1 = mix(z1, a1, (count == 0));
+ z2 = mix(z2, a2, (count == 0));
+ z2Ptr = z2;
+ z1Ptr = z1;
+ z0Ptr = z0;
+}
+
+/* Shifts the 64-bit value formed by concatenating `a0' and `a1' left by the
+ * number of bits given in `count'. Any bits shifted off are lost. The value
+ * of `count' must be less than 32. The result is broken into two 32-bit
+ * pieces which are stored at the locations pointed to by `z0Ptr' and `z1Ptr'.
+ */
+void
+__shortShift64Left(uint a0, uint a1,
+ int count,
+ out uint z0Ptr,
+ out uint z1Ptr)
+{
+ z1Ptr = a1<<count;
+ z0Ptr = mix((a0 << count | (a1 >> ((-count) & 31))), a0, count == 0);
+}
+
+/* Packs the sign `zSign', the exponent `zExp', and the significand formed by
+ * the concatenation of `zFrac0' and `zFrac1' into a double-precision floating-
+ * point value, returning the result. After being shifted into the proper
+ * positions, the three fields `zSign', `zExp', and `zFrac0' are simply added
+ * together to form the most significant 32 bits of the result. This means
+ * that any integer portion of `zFrac0' will be added into the exponent. Since
+ * a properly normalized significand will have an integer portion equal to 1,
+ * the `zExp' input should be 1 less than the desired result exponent whenever
+ * `zFrac0' and `zFrac1' concatenated form a complete, normalized significand.
+ */
+uint64_t
+__packFloat64(uint zSign, int zExp, uint zFrac0, uint zFrac1)
+{
+ uvec2 z;
+
+ z.y = zSign + (uint(zExp) << 20) + zFrac0;
+ z.x = zFrac1;
+ return packUint2x32(z);
+}
+
+/* Takes an abstract floating-point value having sign `zSign', exponent `zExp',
+ * and extended significand formed by the concatenation of `zFrac0', `zFrac1',
+ * and `zFrac2', and returns the proper double-precision floating-point value
+ * corresponding to the abstract input. Ordinarily, the abstract value is
+ * simply rounded and packed into the double-precision format, with the inexact
+ * exception raised if the abstract input cannot be represented exactly.
+ * However, if the abstract value is too large, the overflow and inexact
+ * exceptions are raised and an infinity or maximal finite value is returned.
+ * If the abstract value is too small, the input value is rounded to a
+ * subnormal number, and the underflow and inexact exceptions are raised if the
+ * abstract input cannot be represented exactly as a subnormal double-precision
+ * floating-point number.
+ * The input significand must be normalized or smaller. If the input
+ * significand is not normalized, `zExp' must be 0; in that case, the result
+ * returned is a subnormal number, and it must not require rounding. In the
+ * usual case that the input significand is normalized, `zExp' must be 1 less
+ * than the "true" floating-point exponent. The handling of underflow and
+ * overflow follows the IEEE Standard for Floating-Point Arithmetic.
+ */
+uint64_t
+__roundAndPackFloat64(uint zSign,
+ int zExp,
+ uint zFrac0,
+ uint zFrac1,
+ uint zFrac2)
+{
+ bool roundNearestEven;
+ bool increment;
+
+ roundNearestEven = FLOAT_ROUNDING_MODE == FLOAT_ROUND_NEAREST_EVEN;
+ increment = int(zFrac2) < 0;
+ if (!roundNearestEven) {
+ if (FLOAT_ROUNDING_MODE == FLOAT_ROUND_TO_ZERO) {
+ increment = false;
+ } else {
+ if (zSign != 0u) {
+ increment = (FLOAT_ROUNDING_MODE == FLOAT_ROUND_DOWN) &&
+ (zFrac2 != 0u);
+ } else {
+ increment = (FLOAT_ROUNDING_MODE == FLOAT_ROUND_UP) &&
+ (zFrac2 != 0u);
+ }
+ }
+ }
+ if (0x7FD <= zExp) {
+ if ((0x7FD < zExp) ||
+ ((zExp == 0x7FD) &&
+ (0x001FFFFFu == zFrac0 && 0xFFFFFFFFu == zFrac1) &&
+ increment)) {
+ if ((FLOAT_ROUNDING_MODE == FLOAT_ROUND_TO_ZERO) ||
+ ((zSign != 0u) && (FLOAT_ROUNDING_MODE == FLOAT_ROUND_UP)) ||
+ ((zSign == 0u) && (FLOAT_ROUNDING_MODE == FLOAT_ROUND_DOWN))) {
+ return __packFloat64(zSign, 0x7FE, 0x000FFFFFu, 0xFFFFFFFFu);
+ }
+ return __packFloat64(zSign, 0x7FF, 0u, 0u);
+ }
+ }
+
+ if (zExp < 0) {
+ __shift64ExtraRightJamming(
+ zFrac0, zFrac1, zFrac2, -zExp, zFrac0, zFrac1, zFrac2);
+ zExp = 0;
+ if (roundNearestEven) {
+ increment = zFrac2 < 0u;
+ } else {
+ if (zSign != 0u) {
+ increment = (FLOAT_ROUNDING_MODE == FLOAT_ROUND_DOWN) &&
+ (zFrac2 != 0u);
+ } else {
+ increment = (FLOAT_ROUNDING_MODE == FLOAT_ROUND_UP) &&
+ (zFrac2 != 0u);
+ }
+ }
+ }
+
+ if (increment) {
+ __add64(zFrac0, zFrac1, 0u, 1u, zFrac0, zFrac1);
+ zFrac1 &= ~((zFrac2 + uint(zFrac2 == 0u)) & uint(roundNearestEven));
+ } else {
+ zExp = mix(zExp, 0, (zFrac0 | zFrac1) == 0u);
+ }
+ return __packFloat64(zSign, zExp, zFrac0, zFrac1);
+}
+
+uint64_t
+__roundAndPackUInt64(uint zSign, uint zFrac0, uint zFrac1, uint zFrac2)
+{
+ bool roundNearestEven;
+ bool increment;
+ uint64_t default_nan = 0xFFFFFFFFFFFFFFFFUL;
+
+ roundNearestEven = FLOAT_ROUNDING_MODE == FLOAT_ROUND_NEAREST_EVEN;
+
+ if (zFrac2 >= 0x80000000u)
+ increment = false;
+
+ if (!roundNearestEven) {
+ if (zSign != 0u) {
+ if ((FLOAT_ROUNDING_MODE == FLOAT_ROUND_DOWN) && (zFrac2 != 0u)) {
+ increment = false;
+ }
+ } else {
+ increment = (FLOAT_ROUNDING_MODE == FLOAT_ROUND_UP) &&
+ (zFrac2 != 0u);
+ }
+ }
+
+ if (increment) {
+ __add64(zFrac0, zFrac1, 0u, 1u, zFrac0, zFrac1);
+ if ((zFrac0 | zFrac1) != 0u)
+ zFrac1 &= ~(1u) + uint(zFrac2 == 0u) & uint(roundNearestEven);
+ }
+ return mix(packUint2x32(uvec2(zFrac1, zFrac0)), default_nan,
+ (zSign != 0u && (zFrac0 | zFrac1) != 0u));
+}
+
+int64_t
+__roundAndPackInt64(uint zSign, uint zFrac0, uint zFrac1, uint zFrac2)
+{
+ bool roundNearestEven;
+ bool increment;
+ int64_t default_NegNaN = -0x7FFFFFFFFFFFFFFEL;
+ int64_t default_PosNaN = 0xFFFFFFFFFFFFFFFFL;
+
+ roundNearestEven = FLOAT_ROUNDING_MODE == FLOAT_ROUND_NEAREST_EVEN;
+
+ if (zFrac2 >= 0x80000000u)
+ increment = false;
+
+ if (!roundNearestEven) {
+ if (zSign != 0u) {
+ increment = ((FLOAT_ROUNDING_MODE == FLOAT_ROUND_DOWN) &&
+ (zFrac2 != 0u));
+ } else {
+ increment = (FLOAT_ROUNDING_MODE == FLOAT_ROUND_UP) &&
+ (zFrac2 != 0u);
+ }
+ }
+
+ if (increment) {
+ __add64(zFrac0, zFrac1, 0u, 1u, zFrac0, zFrac1);
+ if ((zFrac0 | zFrac1) != 0u)
+ zFrac1 &= ~(1u) + uint(zFrac2 == 0u) & uint(roundNearestEven);
+ }
+
+ int64_t absZ = mix(int64_t(packUint2x32(uvec2(zFrac1, zFrac0))),
+ -int64_t(packUint2x32(uvec2(zFrac1, zFrac0))),
+ zSign != 0u);
+ int64_t nan = mix(default_PosNaN, default_NegNaN, zSign != 0u);
+ return mix(absZ, nan, ((zSign != 0u) != (absZ < 0)) && bool(absZ));
+}
+
+/* Returns the number of leading 0 bits before the most-significant 1 bit of
+ * `a'. If `a' is zero, 32 is returned.
+ */
+int
+__countLeadingZeros32(uint a)
+{
+ return 31 - findMSB(a);
+}
+
+/* Takes an abstract floating-point value having sign `zSign', exponent `zExp',
+ * and significand formed by the concatenation of `zSig0' and `zSig1', and
+ * returns the proper double-precision floating-point value corresponding
+ * to the abstract input. This routine is just like `__roundAndPackFloat64'
+ * except that the input significand has fewer bits and does not have to be
+ * normalized. In all cases, `zExp' must be 1 less than the "true" floating-
+ * point exponent.
+ */
+uint64_t
+__normalizeRoundAndPackFloat64(uint zSign,
+ int zExp,
+ uint zFrac0,
+ uint zFrac1)
+{
+ int shiftCount;
+ uint zFrac2;
+
+ if (zFrac0 == 0u) {
+ zExp -= 32;
+ zFrac0 = zFrac1;
+ zFrac1 = 0u;
+ }
+
+ shiftCount = __countLeadingZeros32(zFrac0) - 11;
+ if (0 <= shiftCount) {
+ zFrac2 = 0u;
+ __shortShift64Left(zFrac0, zFrac1, shiftCount, zFrac0, zFrac1);
+ } else {
+ __shift64ExtraRightJamming(
+ zFrac0, zFrac1, 0u, -shiftCount, zFrac0, zFrac1, zFrac2);
+ }
+ zExp -= shiftCount;
+ return __roundAndPackFloat64(zSign, zExp, zFrac0, zFrac1, zFrac2);
+}
+
+/* Takes two double-precision floating-point values `a' and `b', one of which
+ * is a NaN, and returns the appropriate NaN result.
+ */
+uint64_t
+__propagateFloat64NaN(uint64_t __a, uint64_t __b)
+{
+#if defined RELAXED_NAN_PROPAGATION
+ uvec2 a = unpackUint2x32(__a);
+ uvec2 b = unpackUint2x32(__b);
+
+ return packUint2x32(uvec2(a.x | b.x, a.y | b.y));
+#else
+ bool aIsNaN = __is_nan(__a);
+ bool bIsNaN = __is_nan(__b);
+ uvec2 a = unpackUint2x32(__a);
+ uvec2 b = unpackUint2x32(__b);
+ a.y |= 0x00080000u;
+ b.y |= 0x00080000u;
+
+ return packUint2x32(mix(b, mix(a, b, bvec2(bIsNaN, bIsNaN)), bvec2(aIsNaN, aIsNaN)));
+#endif
+}
+
+/* If a shader is in the soft-fp64 path, it almost certainly has register
+ * pressure problems. Choose a method to exchange two values that does not
+ * require a temporary.
+ */
+#define EXCHANGE(a, b) \
+ do { \
+ a ^= b; \
+ b ^= a; \
+ a ^= b; \
+ } while (false)
+
+/* Returns the result of adding the double-precision floating-point values
+ * `a' and `b'. The operation is performed according to the IEEE Standard for
+ * Floating-Point Arithmetic.
+ */
+uint64_t
+__fadd64(uint64_t a, uint64_t b)
+{
+ uint aSign = __extractFloat64Sign(a);
+ uint bSign = __extractFloat64Sign(b);
+ uint aFracLo = __extractFloat64FracLo(a);
+ uint aFracHi = __extractFloat64FracHi(a);
+ uint bFracLo = __extractFloat64FracLo(b);
+ uint bFracHi = __extractFloat64FracHi(b);
+ int aExp = __extractFloat64Exp(a);
+ int bExp = __extractFloat64Exp(b);
+ int expDiff = aExp - bExp;
+ if (aSign == bSign) {
+ uint zFrac0;
+ uint zFrac1;
+ uint zFrac2;
+ int zExp;
+
+ if (expDiff == 0) {
+ if (aExp == 0x7FF) {
+ bool propagate = ((aFracHi | bFracHi) | (aFracLo| bFracLo)) != 0u;
+ return mix(a, __propagateFloat64NaN(a, b), propagate);
+ }
+ __add64(aFracHi, aFracLo, bFracHi, bFracLo, zFrac0, zFrac1);
+ if (aExp == 0)
+ return __packFloat64(aSign, 0, zFrac0, zFrac1);
+ zFrac2 = 0u;
+ zFrac0 |= 0x00200000u;
+ zExp = aExp;
+ __shift64ExtraRightJamming(
+ zFrac0, zFrac1, zFrac2, 1, zFrac0, zFrac1, zFrac2);
+ } else {
+ if (expDiff < 0) {
+ EXCHANGE(aFracHi, bFracHi);
+ EXCHANGE(aFracLo, bFracLo);
+ EXCHANGE(aExp, bExp);
+ }
+
+ if (aExp == 0x7FF) {
+ bool propagate = (aFracHi | aFracLo) != 0u;
+ return mix(__packFloat64(aSign, 0x7ff, 0u, 0u), __propagateFloat64NaN(a, b), propagate);
+ }
+
+ expDiff = mix(abs(expDiff), abs(expDiff) - 1, bExp == 0);
+ bFracHi = mix(bFracHi | 0x00100000u, bFracHi, bExp == 0);
+ __shift64ExtraRightJamming(
+ bFracHi, bFracLo, 0u, expDiff, bFracHi, bFracLo, zFrac2);
+ zExp = aExp;
+
+ aFracHi |= 0x00100000u;
+ __add64(aFracHi, aFracLo, bFracHi, bFracLo, zFrac0, zFrac1);
+ --zExp;
+ if (!(zFrac0 < 0x00200000u)) {
+ __shift64ExtraRightJamming(zFrac0, zFrac1, zFrac2, 1, zFrac0, zFrac1, zFrac2);
+ ++zExp;
+ }
+ }
+ return __roundAndPackFloat64(aSign, zExp, zFrac0, zFrac1, zFrac2);
+
+ } else {
+ int zExp;
+
+ __shortShift64Left(aFracHi, aFracLo, 10, aFracHi, aFracLo);
+ __shortShift64Left(bFracHi, bFracLo, 10, bFracHi, bFracLo);
+ if (expDiff != 0) {
+ uint zFrac0;
+ uint zFrac1;
+
+ if (expDiff < 0) {
+ EXCHANGE(aFracHi, bFracHi);
+ EXCHANGE(aFracLo, bFracLo);
+ EXCHANGE(aExp, bExp);
+ aSign ^= 0x80000000u;
+ }
+
+ if (aExp == 0x7FF) {
+ bool propagate = (aFracHi | aFracLo) != 0u;
+ return mix(__packFloat64(aSign, 0x7ff, 0u, 0u), __propagateFloat64NaN(a, b), propagate);
+ }
+
+ expDiff = mix(abs(expDiff), abs(expDiff) - 1, bExp == 0);
+ bFracHi = mix(bFracHi | 0x40000000u, bFracHi, bExp == 0);
+ __shift64RightJamming(bFracHi, bFracLo, expDiff, bFracHi, bFracLo);
+ aFracHi |= 0x40000000u;
+ __sub64(aFracHi, aFracLo, bFracHi, bFracLo, zFrac0, zFrac1);
+ zExp = aExp;
+ --zExp;
+ return __normalizeRoundAndPackFloat64(aSign, zExp - 10, zFrac0, zFrac1);
+ }
+ if (aExp == 0x7FF) {
+ bool propagate = ((aFracHi | bFracHi) | (aFracLo | bFracLo)) != 0u;
+ return mix(0xFFFFFFFFFFFFFFFFUL, __propagateFloat64NaN(a, b), propagate);
+ }
+ bExp = mix(bExp, 1, aExp == 0);
+ aExp = mix(aExp, 1, aExp == 0);
+
+ uint zFrac0;
+ uint zFrac1;
+ uint sign_of_difference = 0;
+ if (bFracHi < aFracHi) {
+ __sub64(aFracHi, aFracLo, bFracHi, bFracLo, zFrac0, zFrac1);
+ }
+ else if (aFracHi < bFracHi) {
+ __sub64(bFracHi, bFracLo, aFracHi, aFracLo, zFrac0, zFrac1);
+ sign_of_difference = 0x80000000;
+ }
+ else if (bFracLo <= aFracLo) {
+ /* It is possible that zFrac0 and zFrac1 may be zero after this. */
+ __sub64(aFracHi, aFracLo, bFracHi, bFracLo, zFrac0, zFrac1);
+ }
+ else {
+ __sub64(bFracHi, bFracLo, aFracHi, aFracLo, zFrac0, zFrac1);
+ sign_of_difference = 0x80000000;
+ }
+ zExp = mix(bExp, aExp, sign_of_difference == 0u);
+ aSign ^= sign_of_difference;
+ uint64_t retval_0 = __packFloat64(uint(FLOAT_ROUNDING_MODE == FLOAT_ROUND_DOWN) << 31, 0, 0u, 0u);
+ uint64_t retval_1 = __normalizeRoundAndPackFloat64(aSign, zExp - 11, zFrac0, zFrac1);
+ return mix(retval_0, retval_1, zFrac0 != 0u || zFrac1 != 0u);
+ }
+}
+
+/* Multiplies the 64-bit value formed by concatenating `a0' and `a1' to the
+ * 64-bit value formed by concatenating `b0' and `b1' to obtain a 128-bit
+ * product. The product is broken into four 32-bit pieces which are stored at
+ * the locations pointed to by `z0Ptr', `z1Ptr', `z2Ptr', and `z3Ptr'.
+ */
+void
+__mul64To128(uint a0, uint a1, uint b0, uint b1,
+ out uint z0Ptr,
+ out uint z1Ptr,
+ out uint z2Ptr,
+ out uint z3Ptr)
+{
+ uint z0 = 0u;
+ uint z1 = 0u;
+ uint z2 = 0u;
+ uint z3 = 0u;
+ uint more1 = 0u;
+ uint more2 = 0u;
+
+ umulExtended(a1, b1, z2, z3);
+ umulExtended(a1, b0, z1, more2);
+ __add64(z1, more2, 0u, z2, z1, z2);
+ umulExtended(a0, b0, z0, more1);
+ __add64(z0, more1, 0u, z1, z0, z1);
+ umulExtended(a0, b1, more1, more2);
+ __add64(more1, more2, 0u, z2, more1, z2);
+ __add64(z0, z1, 0u, more1, z0, z1);
+ z3Ptr = z3;
+ z2Ptr = z2;
+ z1Ptr = z1;
+ z0Ptr = z0;
+}
+
+/* Normalizes the subnormal double-precision floating-point value represented
+ * by the denormalized significand formed by the concatenation of `aFrac0' and
+ * `aFrac1'. The normalized exponent is stored at the location pointed to by
+ * `zExpPtr'. The most significant 21 bits of the normalized significand are
+ * stored at the location pointed to by `zFrac0Ptr', and the least significant
+ * 32 bits of the normalized significand are stored at the location pointed to
+ * by `zFrac1Ptr'.
+ */
+void
+__normalizeFloat64Subnormal(uint aFrac0, uint aFrac1,
+ out int zExpPtr,
+ out uint zFrac0Ptr,
+ out uint zFrac1Ptr)
+{
+ int shiftCount;
+ uint temp_zfrac0, temp_zfrac1;
+ shiftCount = __countLeadingZeros32(mix(aFrac0, aFrac1, aFrac0 == 0u)) - 11;
+ zExpPtr = mix(1 - shiftCount, -shiftCount - 31, aFrac0 == 0u);
+
+ temp_zfrac0 = mix(aFrac1<<shiftCount, aFrac1>>(-shiftCount), shiftCount < 0);
+ temp_zfrac1 = mix(0u, aFrac1<<(shiftCount & 31), shiftCount < 0);
+
+ __shortShift64Left(aFrac0, aFrac1, shiftCount, zFrac0Ptr, zFrac1Ptr);
+
+ zFrac0Ptr = mix(zFrac0Ptr, temp_zfrac0, aFrac0 == 0);
+ zFrac1Ptr = mix(zFrac1Ptr, temp_zfrac1, aFrac0 == 0);
+}
+
+/* Returns the result of multiplying the double-precision floating-point values
+ * `a' and `b'. The operation is performed according to the IEEE Standard for
+ * Floating-Point Arithmetic.
+ */
+uint64_t
+__fmul64(uint64_t a, uint64_t b)
+{
+ uint zFrac0 = 0u;
+ uint zFrac1 = 0u;
+ uint zFrac2 = 0u;
+ uint zFrac3 = 0u;
+ int zExp;
+
+ uint aFracLo = __extractFloat64FracLo(a);
+ uint aFracHi = __extractFloat64FracHi(a);
+ uint bFracLo = __extractFloat64FracLo(b);
+ uint bFracHi = __extractFloat64FracHi(b);
+ int aExp = __extractFloat64Exp(a);
+ uint aSign = __extractFloat64Sign(a);
+ int bExp = __extractFloat64Exp(b);
+ uint bSign = __extractFloat64Sign(b);
+ uint zSign = aSign ^ bSign;
+ if (aExp == 0x7FF) {
+ if (((aFracHi | aFracLo) != 0u) ||
+ ((bExp == 0x7FF) && ((bFracHi | bFracLo) != 0u))) {
+ return __propagateFloat64NaN(a, b);
+ }
+ if ((uint(bExp) | bFracHi | bFracLo) == 0u)
+ return 0xFFFFFFFFFFFFFFFFUL;
+ return __packFloat64(zSign, 0x7FF, 0u, 0u);
+ }
+ if (bExp == 0x7FF) {
+ /* a cannot be NaN, but is b NaN? */
+ if ((bFracHi | bFracLo) != 0u)
+#if defined RELAXED_NAN_PROPAGATION
+ return b;
+#else
+ return __propagateFloat64NaN(a, b);
+#endif
+ if ((uint(aExp) | aFracHi | aFracLo) == 0u)
+ return 0xFFFFFFFFFFFFFFFFUL;
+ return __packFloat64(zSign, 0x7FF, 0u, 0u);
+ }
+ if (aExp == 0) {
+ if ((aFracHi | aFracLo) == 0u)
+ return __packFloat64(zSign, 0, 0u, 0u);
+ __normalizeFloat64Subnormal(aFracHi, aFracLo, aExp, aFracHi, aFracLo);
+ }
+ if (bExp == 0) {
+ if ((bFracHi | bFracLo) == 0u)
+ return __packFloat64(zSign, 0, 0u, 0u);
+ __normalizeFloat64Subnormal(bFracHi, bFracLo, bExp, bFracHi, bFracLo);
+ }
+ zExp = aExp + bExp - 0x400;
+ aFracHi |= 0x00100000u;
+ __shortShift64Left(bFracHi, bFracLo, 12, bFracHi, bFracLo);
+ __mul64To128(
+ aFracHi, aFracLo, bFracHi, bFracLo, zFrac0, zFrac1, zFrac2, zFrac3);
+ __add64(zFrac0, zFrac1, aFracHi, aFracLo, zFrac0, zFrac1);
+ zFrac2 |= uint(zFrac3 != 0u);
+ if (0x00200000u <= zFrac0) {
+ __shift64ExtraRightJamming(
+ zFrac0, zFrac1, zFrac2, 1, zFrac0, zFrac1, zFrac2);
+ ++zExp;
+ }
+ return __roundAndPackFloat64(zSign, zExp, zFrac0, zFrac1, zFrac2);
+}
+
+uint64_t
+__ffma64(uint64_t a, uint64_t b, uint64_t c)
+{
+ return __fadd64(__fmul64(a, b), c);
+}
+
+/* Shifts the 64-bit value formed by concatenating `a0' and `a1' right by the
+ * number of bits given in `count'. Any bits shifted off are lost. The value
+ * of `count' can be arbitrarily large; in particular, if `count' is greater
+ * than 64, the result will be 0. The result is broken into two 32-bit pieces
+ * which are stored at the locations pointed to by `z0Ptr' and `z1Ptr'.
+ */
+void
+__shift64Right(uint a0, uint a1,
+ int count,
+ out uint z0Ptr,
+ out uint z1Ptr)
+{
+ uint z0;
+ uint z1;
+ int negCount = (-count) & 31;
+
+ z0 = 0u;
+ z0 = mix(z0, (a0 >> count), count < 32);
+ z0 = mix(z0, a0, count == 0);
+
+ z1 = mix(0u, (a0 >> (count & 31)), count < 64);
+ z1 = mix(z1, (a0<<negCount) | (a1>>count), count < 32);
+ z1 = mix(z1, a0, count == 0);
+
+ z1Ptr = z1;
+ z0Ptr = z0;
+}
+
+/* Returns the result of converting the double-precision floating-point value
+ * `a' to the unsigned integer format. The conversion is performed according
+ * to the IEEE Standard for Floating-Point Arithmetic.
+ */
+uint
+__fp64_to_uint(uint64_t a)
+{
+ uint aFracLo = __extractFloat64FracLo(a);
+ uint aFracHi = __extractFloat64FracHi(a);
+ int aExp = __extractFloat64Exp(a);
+ uint aSign = __extractFloat64Sign(a);
+
+ if ((aExp == 0x7FF) && ((aFracHi | aFracLo) != 0u))
+ return 0xFFFFFFFFu;
+
+ aFracHi |= mix(0u, 0x00100000u, aExp != 0);
+
+ int shiftDist = 0x427 - aExp;
+ if (0 < shiftDist)
+ __shift64RightJamming(aFracHi, aFracLo, shiftDist, aFracHi, aFracLo);
+
+ if ((aFracHi & 0xFFFFF000u) != 0u)
+ return mix(~0u, 0u, aSign != 0u);
+
+ uint z = 0u;
+ uint zero = 0u;
+ __shift64Right(aFracHi, aFracLo, 12, zero, z);
+
+ uint expt = mix(~0u, 0u, aSign != 0u);
+
+ return mix(z, expt, (aSign != 0u) && (z != 0u));
+}
+
+uint64_t
+__uint_to_fp64(uint a)
+{
+ if (a == 0u)
+ return 0ul;
+
+ int shiftDist = __countLeadingZeros32(a) + 21;
+
+ uint aHigh = 0u;
+ uint aLow = 0u;
+ int negCount = (- shiftDist) & 31;
+
+ aHigh = mix(0u, a<< shiftDist - 32, shiftDist < 64);
+ aLow = 0u;
+ aHigh = mix(aHigh, 0u, shiftDist == 0);
+ aLow = mix(aLow, a, shiftDist ==0);
+ aHigh = mix(aHigh, a >> negCount, shiftDist < 32);
+ aLow = mix(aLow, a << shiftDist, shiftDist < 32);
+
+ return __packFloat64(0u, 0x432 - shiftDist, aHigh, aLow);
+}
+
+uint64_t
+__uint64_to_fp64(uint64_t a)
+{
+ if (a == 0u)
+ return 0ul;
+
+ uvec2 aFrac = unpackUint2x32(a);
+ uint aFracLo = __extractFloat64FracLo(a);
+ uint aFracHi = __extractFloat64FracHi(a);
+
+ if ((aFracHi & 0x80000000u) != 0u) {
+ __shift64RightJamming(aFracHi, aFracLo, 1, aFracHi, aFracLo);
+ return __roundAndPackFloat64(0, 0x433, aFracHi, aFracLo, 0u);
+ } else {
+ return __normalizeRoundAndPackFloat64(0, 0x432, aFrac.y, aFrac.x);
+ }
+}
+
+uint64_t
+__fp64_to_uint64(uint64_t a)
+{
+ uint aFracLo = __extractFloat64FracLo(a);
+ uint aFracHi = __extractFloat64FracHi(a);
+ int aExp = __extractFloat64Exp(a);
+ uint aSign = __extractFloat64Sign(a);
+ uint zFrac2 = 0u;
+ uint64_t default_nan = 0xFFFFFFFFFFFFFFFFUL;
+
+ aFracHi = mix(aFracHi, aFracHi | 0x00100000u, aExp != 0);
+ int shiftCount = 0x433 - aExp;
+
+ if ( shiftCount <= 0 ) {
+ if (shiftCount < -11 && aExp == 0x7FF) {
+ if ((aFracHi | aFracLo) != 0u)
+ return __propagateFloat64NaN(a, a);
+ return mix(default_nan, a, aSign == 0u);
+ }
+ __shortShift64Left(aFracHi, aFracLo, -shiftCount, aFracHi, aFracLo);
+ } else {
+ __shift64ExtraRightJamming(aFracHi, aFracLo, zFrac2, shiftCount,
+ aFracHi, aFracLo, zFrac2);
+ }
+ return __roundAndPackUInt64(aSign, aFracHi, aFracLo, zFrac2);
+}
+
+int64_t
+__fp64_to_int64(uint64_t a)
+{
+ uint zFrac2 = 0u;
+ uint aFracLo = __extractFloat64FracLo(a);
+ uint aFracHi = __extractFloat64FracHi(a);
+ int aExp = __extractFloat64Exp(a);
+ uint aSign = __extractFloat64Sign(a);
+ int64_t default_NegNaN = -0x7FFFFFFFFFFFFFFEL;
+ int64_t default_PosNaN = 0xFFFFFFFFFFFFFFFFL;
+
+ aFracHi = mix(aFracHi, aFracHi | 0x00100000u, aExp != 0);
+ int shiftCount = 0x433 - aExp;
+
+ if (shiftCount <= 0) {
+ if (shiftCount < -11 && aExp == 0x7FF) {
+ if ((aFracHi | aFracLo) != 0u)
+ return default_NegNaN;
+ return mix(default_NegNaN, default_PosNaN, aSign == 0u);
+ }
+ __shortShift64Left(aFracHi, aFracLo, -shiftCount, aFracHi, aFracLo);
+ } else {
+ __shift64ExtraRightJamming(aFracHi, aFracLo, zFrac2, shiftCount,
+ aFracHi, aFracLo, zFrac2);
+ }
+
+ return __roundAndPackInt64(aSign, aFracHi, aFracLo, zFrac2);
+}
+
+uint64_t
+__fp32_to_uint64(float f)
+{
+ uint a = floatBitsToUint(f);
+ uint aFrac = a & 0x007FFFFFu;
+ int aExp = int((a>>23) & 0xFFu);
+ uint aSign = a & 0x80000000u;
+ uint zFrac0 = 0u;
+ uint zFrac1 = 0u;
+ uint zFrac2 = 0u;
+ uint64_t default_nan = 0xFFFFFFFFFFFFFFFFUL;
+ int shiftCount = 0xBE - aExp;
+
+ if (shiftCount <0) {
+ if (aExp == 0xFF)
+ return default_nan;
+ }
+
+ aFrac = mix(aFrac, aFrac | 0x00800000u, aExp != 0);
+ __shortShift64Left(aFrac, 0, 40, zFrac0, zFrac1);
+
+ if (shiftCount != 0) {
+ __shift64ExtraRightJamming(zFrac0, zFrac1, zFrac2, shiftCount,
+ zFrac0, zFrac1, zFrac2);
+ }
+
+ return __roundAndPackUInt64(aSign, zFrac0, zFrac1, zFrac2);
+}
+
+int64_t
+__fp32_to_int64(float f)
+{
+ uint a = floatBitsToUint(f);
+ uint aFrac = a & 0x007FFFFFu;
+ int aExp = int((a>>23) & 0xFFu);
+ uint aSign = a & 0x80000000u;
+ uint zFrac0 = 0u;
+ uint zFrac1 = 0u;
+ uint zFrac2 = 0u;
+ int64_t default_NegNaN = -0x7FFFFFFFFFFFFFFEL;
+ int64_t default_PosNaN = 0xFFFFFFFFFFFFFFFFL;
+ int shiftCount = 0xBE - aExp;
+
+ if (shiftCount <0) {
+ if (aExp == 0xFF && aFrac != 0u)
+ return default_NegNaN;
+ return mix(default_NegNaN, default_PosNaN, aSign == 0u);
+ }
+
+ aFrac = mix(aFrac, aFrac | 0x00800000u, aExp != 0);
+ __shortShift64Left(aFrac, 0, 40, zFrac0, zFrac1);
+
+ if (shiftCount != 0) {
+ __shift64ExtraRightJamming(zFrac0, zFrac1, zFrac2, shiftCount,
+ zFrac0, zFrac1, zFrac2);
+ }
+
+ return __roundAndPackInt64(aSign, zFrac0, zFrac1, zFrac2);
+}
+
+uint64_t
+__int64_to_fp64(int64_t a)
+{
+ if (a==0)
+ return 0ul;
+
+ uint64_t absA = mix(uint64_t(a), uint64_t(-a), a < 0);
+ uint aFracHi = __extractFloat64FracHi(absA);
+ uvec2 aFrac = unpackUint2x32(absA);
+ uint zSign = uint(unpackInt2x32(a).y) & 0x80000000u;
+
+ if ((aFracHi & 0x80000000u) != 0u) {
+ return mix(0ul, __packFloat64(0x80000000u, 0x434, 0u, 0u), a < 0);
+ }
+
+ return __normalizeRoundAndPackFloat64(zSign, 0x432, aFrac.y, aFrac.x);
+}
+
+/* Returns the result of converting the double-precision floating-point value
+ * `a' to the 32-bit two's complement integer format. The conversion is
+ * performed according to the IEEE Standard for Floating-Point Arithmetic---
+ * which means in particular that the conversion is rounded according to the
+ * current rounding mode. If `a' is a NaN, the largest positive integer is
+ * returned. Otherwise, if the conversion overflows, the largest integer with
+ * the same sign as `a' is returned.
+ */
+int
+__fp64_to_int(uint64_t a)
+{
+ uint aFracLo = __extractFloat64FracLo(a);
+ uint aFracHi = __extractFloat64FracHi(a);
+ int aExp = __extractFloat64Exp(a);
+ uint aSign = __extractFloat64Sign(a);
+
+ uint absZ = 0u;
+ uint aFracExtra = 0u;
+ int shiftCount = aExp - 0x413;
+
+ if (0 <= shiftCount) {
+ if (0x41E < aExp) {
+ if ((aExp == 0x7FF) && bool(aFracHi | aFracLo))
+ aSign = 0u;
+ return mix(0x7FFFFFFF, 0x80000000, aSign != 0u);
+ }
+ __shortShift64Left(aFracHi | 0x00100000u, aFracLo, shiftCount, absZ, aFracExtra);
+ } else {
+ if (aExp < 0x3FF)
+ return 0;
+
+ aFracHi |= 0x00100000u;
+ aFracExtra = ( aFracHi << (shiftCount & 31)) | aFracLo;
+ absZ = aFracHi >> (- shiftCount);
+ }
+
+ int z = mix(int(absZ), -int(absZ), aSign != 0u);
+ int nan = mix(0x7FFFFFFF, 0x80000000, aSign != 0u);
+ return mix(z, nan, ((aSign != 0u) != (z < 0)) && bool(z));
+}
+
+/* Returns the result of converting the 32-bit two's complement integer `a'
+ * to the double-precision floating-point format. The conversion is performed
+ * according to the IEEE Standard for Floating-Point Arithmetic.
+ */
+uint64_t
+__int_to_fp64(int a)
+{
+ uint zFrac0 = 0u;
+ uint zFrac1 = 0u;
+ if (a==0)
+ return __packFloat64(0u, 0, 0u, 0u);
+ uint zSign = uint(a) & 0x80000000u;
+ uint absA = mix(uint(a), uint(-a), a < 0);
+ int shiftCount = __countLeadingZeros32(absA) - 11;
+ if (0 <= shiftCount) {
+ zFrac0 = absA << shiftCount;
+ zFrac1 = 0u;
+ } else {
+ __shift64Right(absA, 0u, -shiftCount, zFrac0, zFrac1);
+ }
+ return __packFloat64(zSign, 0x412 - shiftCount, zFrac0, zFrac1);
+}
+
+bool
+__fp64_to_bool(uint64_t a)
+{
+ return !__feq64_nonnan(__fabs64(a), 0ul);
+}
+
+uint64_t
+__bool_to_fp64(bool a)
+{
+ return packUint2x32(uvec2(0x00000000u, uint(-int(a) & 0x3ff00000)));
+}
+
+/* Packs the sign `zSign', exponent `zExp', and significand `zFrac' into a
+ * single-precision floating-point value, returning the result. After being
+ * shifted into the proper positions, the three fields are simply added
+ * together to form the result. This means that any integer portion of `zSig'
+ * will be added into the exponent. Since a properly normalized significand
+ * will have an integer portion equal to 1, the `zExp' input should be 1 less
+ * than the desired result exponent whenever `zFrac' is a complete, normalized
+ * significand.
+ */
+float
+__packFloat32(uint zSign, int zExp, uint zFrac)
+{
+ return uintBitsToFloat(zSign + (uint(zExp)<<23) + zFrac);
+}
+
+/* Takes an abstract floating-point value having sign `zSign', exponent `zExp',
+ * and significand `zFrac', and returns the proper single-precision floating-
+ * point value corresponding to the abstract input. Ordinarily, the abstract
+ * value is simply rounded and packed into the single-precision format, with
+ * the inexact exception raised if the abstract input cannot be represented
+ * exactly. However, if the abstract value is too large, the overflow and
+ * inexact exceptions are raised and an infinity or maximal finite value is
+ * returned. If the abstract value is too small, the input value is rounded to
+ * a subnormal number, and the underflow and inexact exceptions are raised if
+ * the abstract input cannot be represented exactly as a subnormal single-
+ * precision floating-point number.
+ * The input significand `zFrac' has its binary point between bits 30
+ * and 29, which is 7 bits to the left of the usual location. This shifted
+ * significand must be normalized or smaller. If `zFrac' is not normalized,
+ * `zExp' must be 0; in that case, the result returned is a subnormal number,
+ * and it must not require rounding. In the usual case that `zFrac' is
+ * normalized, `zExp' must be 1 less than the "true" floating-point exponent.
+ * The handling of underflow and overflow follows the IEEE Standard for
+ * Floating-Point Arithmetic.
+ */
+float
+__roundAndPackFloat32(uint zSign, int zExp, uint zFrac)
+{
+ bool roundNearestEven;
+ int roundIncrement;
+ int roundBits;
+
+ roundNearestEven = FLOAT_ROUNDING_MODE == FLOAT_ROUND_NEAREST_EVEN;
+ roundIncrement = 0x40;
+ if (!roundNearestEven) {
+ if (FLOAT_ROUNDING_MODE == FLOAT_ROUND_TO_ZERO) {
+ roundIncrement = 0;
+ } else {
+ roundIncrement = 0x7F;
+ if (zSign != 0u) {
+ if (FLOAT_ROUNDING_MODE == FLOAT_ROUND_UP)
+ roundIncrement = 0;
+ } else {
+ if (FLOAT_ROUNDING_MODE == FLOAT_ROUND_DOWN)
+ roundIncrement = 0;
+ }
+ }
+ }
+ roundBits = int(zFrac & 0x7Fu);
+ if (0xFDu <= uint(zExp)) {
+ if ((0xFD < zExp) || ((zExp == 0xFD) && (int(zFrac) + roundIncrement) < 0))
+ return __packFloat32(zSign, 0xFF, 0u) - float(roundIncrement == 0);
+ int count = -zExp;
+ bool zexp_lt0 = zExp < 0;
+ uint zFrac_lt0 = mix(uint(zFrac != 0u), (zFrac>>count) | uint((zFrac<<((-count) & 31)) != 0u), (-zExp) < 32);
+ zFrac = mix(zFrac, zFrac_lt0, zexp_lt0);
+ roundBits = mix(roundBits, int(zFrac) & 0x7f, zexp_lt0);
+ zExp = mix(zExp, 0, zexp_lt0);
+ }
+ zFrac = (zFrac + uint(roundIncrement))>>7;
+ zFrac &= ~uint(((roundBits ^ 0x40) == 0) && roundNearestEven);
+
+ return __packFloat32(zSign, mix(zExp, 0, zFrac == 0u), zFrac);
+}
+
+/* Returns the result of converting the double-precision floating-point value
+ * `a' to the single-precision floating-point format. The conversion is
+ * performed according to the IEEE Standard for Floating-Point Arithmetic.
+ */
+float
+__fp64_to_fp32(uint64_t __a)
+{
+ uvec2 a = unpackUint2x32(__a);
+ uint zFrac = 0u;
+ uint allZero = 0u;
+
+ uint aFracLo = __extractFloat64FracLo(__a);
+ uint aFracHi = __extractFloat64FracHi(__a);
+ int aExp = __extractFloat64Exp(__a);
+ uint aSign = __extractFloat64Sign(__a);
+ if (aExp == 0x7FF) {
+ __shortShift64Left(a.y, a.x, 12, a.y, a.x);
+ float rval = uintBitsToFloat(aSign | 0x7FC00000u | (a.y>>9));
+ rval = mix(__packFloat32(aSign, 0xFF, 0u), rval, (aFracHi | aFracLo) != 0u);
+ return rval;
+ }
+ __shift64RightJamming(aFracHi, aFracLo, 22, allZero, zFrac);
+ zFrac = mix(zFrac, zFrac | 0x40000000u, aExp != 0);
+ return __roundAndPackFloat32(aSign, aExp - 0x381, zFrac);
+}
+
+float
+__uint64_to_fp32(uint64_t __a)
+{
+ uvec2 aFrac = unpackUint2x32(__a);
+ int shiftCount = mix(__countLeadingZeros32(aFrac.y) - 33,
+ __countLeadingZeros32(aFrac.x) - 1,
+ aFrac.y == 0u);
+
+ if (0 <= shiftCount)
+ __shortShift64Left(aFrac.y, aFrac.x, shiftCount, aFrac.y, aFrac.x);
+ else
+ __shift64RightJamming(aFrac.y, aFrac.x, -shiftCount, aFrac.y, aFrac.x);
+
+ return __roundAndPackFloat32(0u, 0x9C - shiftCount, aFrac.x);
+}
+
+float
+__int64_to_fp32(int64_t __a)
+{
+ uint aSign = uint(unpackInt2x32(__a).y) & 0x80000000u;
+ uint64_t absA = mix(uint64_t(__a), uint64_t(-__a), __a < 0);
+ uvec2 aFrac = unpackUint2x32(absA);
+ int shiftCount = mix(__countLeadingZeros32(aFrac.y) - 33,
+ __countLeadingZeros32(aFrac.x) - 1,
+ aFrac.y == 0u);
+
+ if (0 <= shiftCount)
+ __shortShift64Left(aFrac.y, aFrac.x, shiftCount, aFrac.y, aFrac.x);
+ else
+ __shift64RightJamming(aFrac.y, aFrac.x, -shiftCount, aFrac.y, aFrac.x);
+
+ return __roundAndPackFloat32(aSign, 0x9C - shiftCount, aFrac.x);
+}
+
+/* Returns the result of converting the single-precision floating-point value
+ * `a' to the double-precision floating-point format.
+ */
+uint64_t
+__fp32_to_fp64(float f)
+{
+ uint a = floatBitsToUint(f);
+ uint aFrac = a & 0x007FFFFFu;
+ int aExp = int((a>>23) & 0xFFu);
+ uint aSign = a & 0x80000000u;
+ uint zFrac0 = 0u;
+ uint zFrac1 = 0u;
+
+ if (aExp == 0xFF) {
+ if (aFrac != 0u) {
+ uint nanLo = 0u;
+ uint nanHi = a<<9;
+ __shift64Right(nanHi, nanLo, 12, nanHi, nanLo);
+ nanHi |= aSign | 0x7FF80000u;
+ return packUint2x32(uvec2(nanLo, nanHi));
+ }
+ return __packFloat64(aSign, 0x7FF, 0u, 0u);
+ }
+
+ if (aExp == 0) {
+ if (aFrac == 0u)
+ return __packFloat64(aSign, 0, 0u, 0u);
+ /* Normalize subnormal */
+ int shiftCount = __countLeadingZeros32(aFrac) - 8;
+ aFrac <<= shiftCount;
+ aExp = 1 - shiftCount;
+ --aExp;
+ }
+
+ __shift64Right(aFrac, 0u, 3, zFrac0, zFrac1);
+ return __packFloat64(aSign, aExp + 0x380, zFrac0, zFrac1);
+}
+
+/* Adds the 96-bit value formed by concatenating `a0', `a1', and `a2' to the
+ * 96-bit value formed by concatenating `b0', `b1', and `b2'. Addition is
+ * modulo 2^96, so any carry out is lost. The result is broken into three
+ * 32-bit pieces which are stored at the locations pointed to by `z0Ptr',
+ * `z1Ptr', and `z2Ptr'.
+ */
+void
+__add96(uint a0, uint a1, uint a2,
+ uint b0, uint b1, uint b2,
+ out uint z0Ptr,
+ out uint z1Ptr,
+ out uint z2Ptr)
+{
+ uint z2 = a2 + b2;
+ uint carry1 = uint(z2 < a2);
+ uint z1 = a1 + b1;
+ uint carry0 = uint(z1 < a1);
+ uint z0 = a0 + b0;
+ z1 += carry1;
+ z0 += uint(z1 < carry1);
+ z0 += carry0;
+ z2Ptr = z2;
+ z1Ptr = z1;
+ z0Ptr = z0;
+}
+
+/* Subtracts the 96-bit value formed by concatenating `b0', `b1', and `b2' from
+ * the 96-bit value formed by concatenating `a0', `a1', and `a2'. Subtraction
+ * is modulo 2^96, so any borrow out (carry out) is lost. The result is broken
+ * into three 32-bit pieces which are stored at the locations pointed to by
+ * `z0Ptr', `z1Ptr', and `z2Ptr'.
+ */
+void
+__sub96(uint a0, uint a1, uint a2,
+ uint b0, uint b1, uint b2,
+ out uint z0Ptr,
+ out uint z1Ptr,
+ out uint z2Ptr)
+{
+ uint z2 = a2 - b2;
+ uint borrow1 = uint(a2 < b2);
+ uint z1 = a1 - b1;
+ uint borrow0 = uint(a1 < b1);
+ uint z0 = a0 - b0;
+ z0 -= uint(z1 < borrow1);
+ z1 -= borrow1;
+ z0 -= borrow0;
+ z2Ptr = z2;
+ z1Ptr = z1;
+ z0Ptr = z0;
+}
+
+/* Returns an approximation to the 32-bit integer quotient obtained by dividing
+ * `b' into the 64-bit value formed by concatenating `a0' and `a1'. The
+ * divisor `b' must be at least 2^31. If q is the exact quotient truncated
+ * toward zero, the approximation returned lies between q and q + 2 inclusive.
+ * If the exact quotient q is larger than 32 bits, the maximum positive 32-bit
+ * unsigned integer is returned.
+ */
+uint
+__estimateDiv64To32(uint a0, uint a1, uint b)
+{
+ uint b0;
+ uint b1;
+ uint rem0 = 0u;
+ uint rem1 = 0u;
+ uint term0 = 0u;
+ uint term1 = 0u;
+ uint z;
+
+ if (b <= a0)
+ return 0xFFFFFFFFu;
+ b0 = b>>16;
+ z = (b0<<16 <= a0) ? 0xFFFF0000u : (a0 / b0)<<16;
+ umulExtended(b, z, term0, term1);
+ __sub64(a0, a1, term0, term1, rem0, rem1);
+ while (int(rem0) < 0) {
+ z -= 0x10000u;
+ b1 = b<<16;
+ __add64(rem0, rem1, b0, b1, rem0, rem1);
+ }
+ rem0 = (rem0<<16) | (rem1>>16);
+ z |= (b0<<16 <= rem0) ? 0xFFFFu : rem0 / b0;
+ return z;
+}
+
+uint
+__sqrtOddAdjustments(int index)
+{
+ uint res = 0u;
+ if (index == 0)
+ res = 0x0004u;
+ if (index == 1)
+ res = 0x0022u;
+ if (index == 2)
+ res = 0x005Du;
+ if (index == 3)
+ res = 0x00B1u;
+ if (index == 4)
+ res = 0x011Du;
+ if (index == 5)
+ res = 0x019Fu;
+ if (index == 6)
+ res = 0x0236u;
+ if (index == 7)
+ res = 0x02E0u;
+ if (index == 8)
+ res = 0x039Cu;
+ if (index == 9)
+ res = 0x0468u;
+ if (index == 10)
+ res = 0x0545u;
+ if (index == 11)
+ res = 0x631u;
+ if (index == 12)
+ res = 0x072Bu;
+ if (index == 13)
+ res = 0x0832u;
+ if (index == 14)
+ res = 0x0946u;
+ if (index == 15)
+ res = 0x0A67u;
+
+ return res;
+}
+
+uint
+__sqrtEvenAdjustments(int index)
+{
+ uint res = 0u;
+ if (index == 0)
+ res = 0x0A2Du;
+ if (index == 1)
+ res = 0x08AFu;
+ if (index == 2)
+ res = 0x075Au;
+ if (index == 3)
+ res = 0x0629u;
+ if (index == 4)
+ res = 0x051Au;
+ if (index == 5)
+ res = 0x0429u;
+ if (index == 6)
+ res = 0x0356u;
+ if (index == 7)
+ res = 0x029Eu;
+ if (index == 8)
+ res = 0x0200u;
+ if (index == 9)
+ res = 0x0179u;
+ if (index == 10)
+ res = 0x0109u;
+ if (index == 11)
+ res = 0x00AFu;
+ if (index == 12)
+ res = 0x0068u;
+ if (index == 13)
+ res = 0x0034u;
+ if (index == 14)
+ res = 0x0012u;
+ if (index == 15)
+ res = 0x0002u;
+
+ return res;
+}
+
+/* Returns an approximation to the square root of the 32-bit significand given
+ * by `a'. Considered as an integer, `a' must be at least 2^31. If bit 0 of
+ * `aExp' (the least significant bit) is 1, the integer returned approximates
+ * 2^31*sqrt(`a'/2^31), where `a' is considered an integer. If bit 0 of `aExp'
+ * is 0, the integer returned approximates 2^31*sqrt(`a'/2^30). In either
+ * case, the approximation returned lies strictly within +/-2 of the exact
+ * value.
+ */
+uint
+__estimateSqrt32(int aExp, uint a)
+{
+ uint z;
+
+ int index = int(a>>27 & 15u);
+ if ((aExp & 1) != 0) {
+ z = 0x4000u + (a>>17) - __sqrtOddAdjustments(index);
+ z = ((a / z)<<14) + (z<<15);
+ a >>= 1;
+ } else {
+ z = 0x8000u + (a>>17) - __sqrtEvenAdjustments(index);
+ z = a / z + z;
+ z = (0x20000u <= z) ? 0xFFFF8000u : (z<<15);
+ if (z <= a)
+ return uint(int(a)>>1);
+ }
+ return ((__estimateDiv64To32(a, 0u, z))>>1) + (z>>1);
+}
+
+/* Returns the square root of the double-precision floating-point value `a'.
+ * The operation is performed according to the IEEE Standard for Floating-Point
+ * Arithmetic.
+ */
+uint64_t
+__fsqrt64(uint64_t a)
+{
+ uint zFrac0 = 0u;
+ uint zFrac1 = 0u;
+ uint zFrac2 = 0u;
+ uint doubleZFrac0 = 0u;
+ uint rem0 = 0u;
+ uint rem1 = 0u;
+ uint rem2 = 0u;
+ uint rem3 = 0u;
+ uint term0 = 0u;
+ uint term1 = 0u;
+ uint term2 = 0u;
+ uint term3 = 0u;
+ uint64_t default_nan = 0xFFFFFFFFFFFFFFFFUL;
+
+ uint aFracLo = __extractFloat64FracLo(a);
+ uint aFracHi = __extractFloat64FracHi(a);
+ int aExp = __extractFloat64Exp(a);
+ uint aSign = __extractFloat64Sign(a);
+ if (aExp == 0x7FF) {
+ if ((aFracHi | aFracLo) != 0u)
+ return __propagateFloat64NaN(a, a);
+ if (aSign == 0u)
+ return a;
+ return default_nan;
+ }
+ if (aSign != 0u) {
+ if ((uint(aExp) | aFracHi | aFracLo) == 0u)
+ return a;
+ return default_nan;
+ }
+ if (aExp == 0) {
+ if ((aFracHi | aFracLo) == 0u)
+ return __packFloat64(0u, 0, 0u, 0u);
+ __normalizeFloat64Subnormal(aFracHi, aFracLo, aExp, aFracHi, aFracLo);
+ }
+ int zExp = ((aExp - 0x3FF)>>1) + 0x3FE;
+ aFracHi |= 0x00100000u;
+ __shortShift64Left(aFracHi, aFracLo, 11, term0, term1);
+ zFrac0 = (__estimateSqrt32(aExp, term0)>>1) + 1u;
+ if (zFrac0 == 0u)
+ zFrac0 = 0x7FFFFFFFu;
+ doubleZFrac0 = zFrac0 + zFrac0;
+ __shortShift64Left(aFracHi, aFracLo, 9 - (aExp & 1), aFracHi, aFracLo);
+ umulExtended(zFrac0, zFrac0, term0, term1);
+ __sub64(aFracHi, aFracLo, term0, term1, rem0, rem1);
+ while (int(rem0) < 0) {
+ --zFrac0;
+ doubleZFrac0 -= 2u;
+ __add64(rem0, rem1, 0u, doubleZFrac0 | 1u, rem0, rem1);
+ }
+ zFrac1 = __estimateDiv64To32(rem1, 0u, doubleZFrac0);
+ if ((zFrac1 & 0x1FFu) <= 5u) {
+ if (zFrac1 == 0u)
+ zFrac1 = 1u;
+ umulExtended(doubleZFrac0, zFrac1, term1, term2);
+ __sub64(rem1, 0u, term1, term2, rem1, rem2);
+ umulExtended(zFrac1, zFrac1, term2, term3);
+ __sub96(rem1, rem2, 0u, 0u, term2, term3, rem1, rem2, rem3);
+ while (int(rem1) < 0) {
+ --zFrac1;
+ __shortShift64Left(0u, zFrac1, 1, term2, term3);
+ term3 |= 1u;
+ term2 |= doubleZFrac0;
+ __add96(rem1, rem2, rem3, 0u, term2, term3, rem1, rem2, rem3);
+ }
+ zFrac1 |= uint((rem1 | rem2 | rem3) != 0u);
+ }
+ __shift64ExtraRightJamming(zFrac0, zFrac1, 0u, 10, zFrac0, zFrac1, zFrac2);
+ return __roundAndPackFloat64(0u, zExp, zFrac0, zFrac1, zFrac2);
+}
+
+uint64_t
+__ftrunc64(uint64_t __a)
+{
+ uvec2 a = unpackUint2x32(__a);
+ int aExp = __extractFloat64Exp(__a);
+ uint zLo;
+ uint zHi;
+
+ int unbiasedExp = aExp - 1023;
+ int fracBits = 52 - unbiasedExp;
+ uint maskLo = mix(~0u << fracBits, 0u, fracBits >= 32);
+ uint maskHi = mix(~0u << (fracBits - 32), ~0u, fracBits < 33);
+ zLo = maskLo & a.x;
+ zHi = maskHi & a.y;
+
+ zLo = mix(zLo, 0u, unbiasedExp < 0);
+ zHi = mix(zHi, 0u, unbiasedExp < 0);
+ zLo = mix(zLo, a.x, unbiasedExp > 52);
+ zHi = mix(zHi, a.y, unbiasedExp > 52);
+ return packUint2x32(uvec2(zLo, zHi));
+}
+
+uint64_t
+__ffloor64(uint64_t a)
+{
+ /* The big assumtion is that when 'a' is NaN, __ftrunc(a) returns a. Based
+ * on that assumption, NaN values that don't have the sign bit will safely
+ * return NaN (identity). This is guarded by RELAXED_NAN_PROPAGATION
+ * because otherwise the NaN should have the "signal" bit set. The
+ * __fadd64 will ensure that occurs.
+ */
+ bool is_positive =
+#if defined RELAXED_NAN_PROPAGATION
+ int(unpackUint2x32(a).y) >= 0
+#else
+ __fge64(a, 0ul)
+#endif
+ ;
+ uint64_t tr = __ftrunc64(a);
+
+ if (is_positive || __feq64(tr, a)) {
+ return tr;
+ } else {
+ return __fadd64(tr, 0xbff0000000000000ul /* -1.0 */);
+ }
+}
+
+uint64_t
+__fround64(uint64_t __a)
+{
+ uvec2 a = unpackUint2x32(__a);
+ int unbiasedExp = __extractFloat64Exp(__a) - 1023;
+ uint aHi = a.y;
+ uint aLo = a.x;
+
+ if (unbiasedExp < 20) {
+ if (unbiasedExp < 0) {
+ if ((aHi & 0x80000000u) != 0u && aLo == 0u) {
+ return 0;
+ }
+ aHi &= 0x80000000u;
+ if ((a.y & 0x000FFFFFu) == 0u && a.x == 0u) {
+ aLo = 0u;
+ return packUint2x32(uvec2(aLo, aHi));
+ }
+ aHi = mix(aHi, (aHi | 0x3FF00000u), unbiasedExp == -1);
+ aLo = 0u;
+ } else {
+ uint maskExp = 0x000FFFFFu >> unbiasedExp;
+ uint lastBit = maskExp + 1;
+ aHi += 0x00080000u >> unbiasedExp;
+ if ((aHi & maskExp) == 0u)
+ aHi &= ~lastBit;
+ aHi &= ~maskExp;
+ aLo = 0u;
+ }
+ } else if (unbiasedExp > 51 || unbiasedExp == 1024) {
+ return __a;
+ } else {
+ uint maskExp = 0xFFFFFFFFu >> (unbiasedExp - 20);
+ if ((aLo & maskExp) == 0u)
+ return __a;
+ uint tmp = aLo + (1u << (51 - unbiasedExp));
+ if(tmp < aLo)
+ aHi += 1u;
+ aLo = tmp;
+ aLo &= ~maskExp;
+ }
+
+ return packUint2x32(uvec2(aLo, aHi));
+}
+
+uint64_t
+__fmin64(uint64_t a, uint64_t b)
+{
+ /* This weird layout matters. Doing the "obvious" thing results in extra
+ * flow control being inserted to implement the short-circuit evaluation
+ * rules. Flow control is bad!
+ */
+ bool b_nan = __is_nan(b);
+ bool a_lt_b = __flt64_nonnan(a, b);
+ bool a_nan = __is_nan(a);
+
+ return (b_nan || a_lt_b) && !a_nan ? a : b;
+}
+
+uint64_t
+__fmax64(uint64_t a, uint64_t b)
+{
+ /* This weird layout matters. Doing the "obvious" thing results in extra
+ * flow control being inserted to implement the short-circuit evaluation
+ * rules. Flow control is bad!
+ */
+ bool b_nan = __is_nan(b);
+ bool a_lt_b = __flt64_nonnan(a, b);
+ bool a_nan = __is_nan(a);
+
+ return (b_nan || a_lt_b) && !a_nan ? b : a;
+}
+
+uint64_t
+__ffract64(uint64_t a)
+{
+ return __fadd64(a, __fneg64(__ffloor64(a)));
+}
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/generate_ir.cpp b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/generate_ir.cpp
new file mode 100644
index 0000000000..255b0484f2
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/generate_ir.cpp
@@ -0,0 +1,33 @@
+/*
+ * Copyright © 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+#include "ir_builder.h"
+#include "builtin_functions.h"
+#include "program/prog_instruction.h" /* for SWIZZLE_X, &c. */
+
+using namespace ir_builder;
+
+namespace generate_ir {
+
+#include "builtin_int64.h"
+
+}
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/glcpp/README b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/glcpp/README
new file mode 100644
index 0000000000..0637935e28
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/glcpp/README
@@ -0,0 +1,30 @@
+glcpp -- GLSL "C" preprocessor
+
+This is a simple preprocessor designed to provide the preprocessing
+needs of the GLSL language. The requirements for this preprocessor are
+specified in the GLSL 1.30 specification availble from:
+
+http://www.opengl.org/registry/doc/GLSLangSpec.Full.1.30.10.pdf
+
+This specification is not precise on some semantics, (for example,
+#define and #if), defining these merely "as is standard for C++
+preprocessors". To fill in these details, I've been using a draft of
+the C99 standard as available from:
+
+http://www.open-std.org/jtc1/sc22/wg14/www/docs/n1256.pdf
+
+Any downstream compiler accepting output from glcpp should be prepared
+to encounter and deal with the following preprocessor macros:
+
+ #line
+ #pragma
+ #extension
+
+All other macros will be handled according to the GLSL specification
+and will not appear in the output.
+
+Known limitations
+-----------------
+A file that ends with a function-like macro name as the last
+non-whitespace token will result in a parse error, (where it should be
+passed through as is). \ No newline at end of file
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/glcpp/glcpp-lex.c b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/glcpp/glcpp-lex.c
new file mode 100644
index 0000000000..973d114078
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/glcpp/glcpp-lex.c
@@ -0,0 +1,3216 @@
+#line 1 "src/compiler/glsl/glcpp/glcpp-lex.c"
+
+#line 3 "src/compiler/glsl/glcpp/glcpp-lex.c"
+
+#define YY_INT_ALIGNED short int
+
+/* A lexical scanner generated by flex */
+
+#define FLEX_SCANNER
+#define YY_FLEX_MAJOR_VERSION 2
+#define YY_FLEX_MINOR_VERSION 6
+#define YY_FLEX_SUBMINOR_VERSION 4
+#if YY_FLEX_SUBMINOR_VERSION > 0
+#define FLEX_BETA
+#endif
+
+#ifdef yy_create_buffer
+#define glcpp__create_buffer_ALREADY_DEFINED
+#else
+#define yy_create_buffer glcpp__create_buffer
+#endif
+
+#ifdef yy_delete_buffer
+#define glcpp__delete_buffer_ALREADY_DEFINED
+#else
+#define yy_delete_buffer glcpp__delete_buffer
+#endif
+
+#ifdef yy_scan_buffer
+#define glcpp__scan_buffer_ALREADY_DEFINED
+#else
+#define yy_scan_buffer glcpp__scan_buffer
+#endif
+
+#ifdef yy_scan_string
+#define glcpp__scan_string_ALREADY_DEFINED
+#else
+#define yy_scan_string glcpp__scan_string
+#endif
+
+#ifdef yy_scan_bytes
+#define glcpp__scan_bytes_ALREADY_DEFINED
+#else
+#define yy_scan_bytes glcpp__scan_bytes
+#endif
+
+#ifdef yy_init_buffer
+#define glcpp__init_buffer_ALREADY_DEFINED
+#else
+#define yy_init_buffer glcpp__init_buffer
+#endif
+
+#ifdef yy_flush_buffer
+#define glcpp__flush_buffer_ALREADY_DEFINED
+#else
+#define yy_flush_buffer glcpp__flush_buffer
+#endif
+
+#ifdef yy_load_buffer_state
+#define glcpp__load_buffer_state_ALREADY_DEFINED
+#else
+#define yy_load_buffer_state glcpp__load_buffer_state
+#endif
+
+#ifdef yy_switch_to_buffer
+#define glcpp__switch_to_buffer_ALREADY_DEFINED
+#else
+#define yy_switch_to_buffer glcpp__switch_to_buffer
+#endif
+
+#ifdef yypush_buffer_state
+#define glcpp_push_buffer_state_ALREADY_DEFINED
+#else
+#define yypush_buffer_state glcpp_push_buffer_state
+#endif
+
+#ifdef yypop_buffer_state
+#define glcpp_pop_buffer_state_ALREADY_DEFINED
+#else
+#define yypop_buffer_state glcpp_pop_buffer_state
+#endif
+
+#ifdef yyensure_buffer_stack
+#define glcpp_ensure_buffer_stack_ALREADY_DEFINED
+#else
+#define yyensure_buffer_stack glcpp_ensure_buffer_stack
+#endif
+
+#ifdef yylex
+#define glcpp_lex_ALREADY_DEFINED
+#else
+#define yylex glcpp_lex
+#endif
+
+#ifdef yyrestart
+#define glcpp_restart_ALREADY_DEFINED
+#else
+#define yyrestart glcpp_restart
+#endif
+
+#ifdef yylex_init
+#define glcpp_lex_init_ALREADY_DEFINED
+#else
+#define yylex_init glcpp_lex_init
+#endif
+
+#ifdef yylex_init_extra
+#define glcpp_lex_init_extra_ALREADY_DEFINED
+#else
+#define yylex_init_extra glcpp_lex_init_extra
+#endif
+
+#ifdef yylex_destroy
+#define glcpp_lex_destroy_ALREADY_DEFINED
+#else
+#define yylex_destroy glcpp_lex_destroy
+#endif
+
+#ifdef yyget_debug
+#define glcpp_get_debug_ALREADY_DEFINED
+#else
+#define yyget_debug glcpp_get_debug
+#endif
+
+#ifdef yyset_debug
+#define glcpp_set_debug_ALREADY_DEFINED
+#else
+#define yyset_debug glcpp_set_debug
+#endif
+
+#ifdef yyget_extra
+#define glcpp_get_extra_ALREADY_DEFINED
+#else
+#define yyget_extra glcpp_get_extra
+#endif
+
+#ifdef yyset_extra
+#define glcpp_set_extra_ALREADY_DEFINED
+#else
+#define yyset_extra glcpp_set_extra
+#endif
+
+#ifdef yyget_in
+#define glcpp_get_in_ALREADY_DEFINED
+#else
+#define yyget_in glcpp_get_in
+#endif
+
+#ifdef yyset_in
+#define glcpp_set_in_ALREADY_DEFINED
+#else
+#define yyset_in glcpp_set_in
+#endif
+
+#ifdef yyget_out
+#define glcpp_get_out_ALREADY_DEFINED
+#else
+#define yyget_out glcpp_get_out
+#endif
+
+#ifdef yyset_out
+#define glcpp_set_out_ALREADY_DEFINED
+#else
+#define yyset_out glcpp_set_out
+#endif
+
+#ifdef yyget_leng
+#define glcpp_get_leng_ALREADY_DEFINED
+#else
+#define yyget_leng glcpp_get_leng
+#endif
+
+#ifdef yyget_text
+#define glcpp_get_text_ALREADY_DEFINED
+#else
+#define yyget_text glcpp_get_text
+#endif
+
+#ifdef yyget_lineno
+#define glcpp_get_lineno_ALREADY_DEFINED
+#else
+#define yyget_lineno glcpp_get_lineno
+#endif
+
+#ifdef yyset_lineno
+#define glcpp_set_lineno_ALREADY_DEFINED
+#else
+#define yyset_lineno glcpp_set_lineno
+#endif
+
+#ifdef yyget_column
+#define glcpp_get_column_ALREADY_DEFINED
+#else
+#define yyget_column glcpp_get_column
+#endif
+
+#ifdef yyset_column
+#define glcpp_set_column_ALREADY_DEFINED
+#else
+#define yyset_column glcpp_set_column
+#endif
+
+#ifdef yywrap
+#define glcpp_wrap_ALREADY_DEFINED
+#else
+#define yywrap glcpp_wrap
+#endif
+
+#ifdef yyget_lval
+#define glcpp_get_lval_ALREADY_DEFINED
+#else
+#define yyget_lval glcpp_get_lval
+#endif
+
+#ifdef yyset_lval
+#define glcpp_set_lval_ALREADY_DEFINED
+#else
+#define yyset_lval glcpp_set_lval
+#endif
+
+#ifdef yyget_lloc
+#define glcpp_get_lloc_ALREADY_DEFINED
+#else
+#define yyget_lloc glcpp_get_lloc
+#endif
+
+#ifdef yyset_lloc
+#define glcpp_set_lloc_ALREADY_DEFINED
+#else
+#define yyset_lloc glcpp_set_lloc
+#endif
+
+#ifdef yyalloc
+#define glcpp_alloc_ALREADY_DEFINED
+#else
+#define yyalloc glcpp_alloc
+#endif
+
+#ifdef yyrealloc
+#define glcpp_realloc_ALREADY_DEFINED
+#else
+#define yyrealloc glcpp_realloc
+#endif
+
+#ifdef yyfree
+#define glcpp_free_ALREADY_DEFINED
+#else
+#define yyfree glcpp_free
+#endif
+
+/* First, we deal with platform-specific or compiler-specific issues. */
+
+/* begin standard C headers. */
+#include <stdio.h>
+#include <string.h>
+#include <errno.h>
+#include <stdlib.h>
+
+/* end standard C headers. */
+
+/* flex integer type definitions */
+
+#ifndef FLEXINT_H
+#define FLEXINT_H
+
+/* C99 systems have <inttypes.h>. Non-C99 systems may or may not. */
+
+#if defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
+
+/* C99 says to define __STDC_LIMIT_MACROS before including stdint.h,
+ * if you want the limit (max/min) macros for int types.
+ */
+#ifndef __STDC_LIMIT_MACROS
+#define __STDC_LIMIT_MACROS 1
+#endif
+
+#include <inttypes.h>
+typedef int8_t flex_int8_t;
+typedef uint8_t flex_uint8_t;
+typedef int16_t flex_int16_t;
+typedef uint16_t flex_uint16_t;
+typedef int32_t flex_int32_t;
+typedef uint32_t flex_uint32_t;
+#else
+typedef signed char flex_int8_t;
+typedef short int flex_int16_t;
+typedef int flex_int32_t;
+typedef unsigned char flex_uint8_t;
+typedef unsigned short int flex_uint16_t;
+typedef unsigned int flex_uint32_t;
+
+/* Limits of integral types. */
+#ifndef INT8_MIN
+#define INT8_MIN (-128)
+#endif
+#ifndef INT16_MIN
+#define INT16_MIN (-32767-1)
+#endif
+#ifndef INT32_MIN
+#define INT32_MIN (-2147483647-1)
+#endif
+#ifndef INT8_MAX
+#define INT8_MAX (127)
+#endif
+#ifndef INT16_MAX
+#define INT16_MAX (32767)
+#endif
+#ifndef INT32_MAX
+#define INT32_MAX (2147483647)
+#endif
+#ifndef UINT8_MAX
+#define UINT8_MAX (255U)
+#endif
+#ifndef UINT16_MAX
+#define UINT16_MAX (65535U)
+#endif
+#ifndef UINT32_MAX
+#define UINT32_MAX (4294967295U)
+#endif
+
+#ifndef SIZE_MAX
+#define SIZE_MAX (~(size_t)0)
+#endif
+
+#endif /* ! C99 */
+
+#endif /* ! FLEXINT_H */
+
+/* begin standard C++ headers. */
+
+/* TODO: this is always defined, so inline it */
+#define yyconst const
+
+#if defined(__GNUC__) && __GNUC__ >= 3
+#define yynoreturn __attribute__((__noreturn__))
+#else
+#define yynoreturn
+#endif
+
+/* Returned upon end-of-file. */
+#define YY_NULL 0
+
+/* Promotes a possibly negative, possibly signed char to an
+ * integer in range [0..255] for use as an array index.
+ */
+#define YY_SC_TO_UI(c) ((YY_CHAR) (c))
+
+/* An opaque pointer. */
+#ifndef YY_TYPEDEF_YY_SCANNER_T
+#define YY_TYPEDEF_YY_SCANNER_T
+typedef void* yyscan_t;
+#endif
+
+/* For convenience, these vars (plus the bison vars far below)
+ are macros in the reentrant scanner. */
+#define yyin yyg->yyin_r
+#define yyout yyg->yyout_r
+#define yyextra yyg->yyextra_r
+#define yyleng yyg->yyleng_r
+#define yytext yyg->yytext_r
+#define yylineno (YY_CURRENT_BUFFER_LVALUE->yy_bs_lineno)
+#define yycolumn (YY_CURRENT_BUFFER_LVALUE->yy_bs_column)
+#define yy_flex_debug yyg->yy_flex_debug_r
+
+/* Enter a start condition. This macro really ought to take a parameter,
+ * but we do it the disgusting crufty way forced on us by the ()-less
+ * definition of BEGIN.
+ */
+#define BEGIN yyg->yy_start = 1 + 2 *
+/* Translate the current start state into a value that can be later handed
+ * to BEGIN to return to the state. The YYSTATE alias is for lex
+ * compatibility.
+ */
+#define YY_START ((yyg->yy_start - 1) / 2)
+#define YYSTATE YY_START
+/* Action number for EOF rule of a given start state. */
+#define YY_STATE_EOF(state) (YY_END_OF_BUFFER + state + 1)
+/* Special action meaning "start processing a new file". */
+#define YY_NEW_FILE yyrestart( yyin , yyscanner )
+#define YY_END_OF_BUFFER_CHAR 0
+
+/* Size of default input buffer. */
+#ifndef YY_BUF_SIZE
+#ifdef __ia64__
+/* On IA-64, the buffer size is 16k, not 8k.
+ * Moreover, YY_BUF_SIZE is 2*YY_READ_BUF_SIZE in the general case.
+ * Ditto for the __ia64__ case accordingly.
+ */
+#define YY_BUF_SIZE 32768
+#else
+#define YY_BUF_SIZE 16384
+#endif /* __ia64__ */
+#endif
+
+/* The state buf must be large enough to hold one state per character in the main buffer.
+ */
+#define YY_STATE_BUF_SIZE ((YY_BUF_SIZE + 2) * sizeof(yy_state_type))
+
+#ifndef YY_TYPEDEF_YY_BUFFER_STATE
+#define YY_TYPEDEF_YY_BUFFER_STATE
+typedef struct yy_buffer_state *YY_BUFFER_STATE;
+#endif
+
+#ifndef YY_TYPEDEF_YY_SIZE_T
+#define YY_TYPEDEF_YY_SIZE_T
+typedef size_t yy_size_t;
+#endif
+
+#define EOB_ACT_CONTINUE_SCAN 0
+#define EOB_ACT_END_OF_FILE 1
+#define EOB_ACT_LAST_MATCH 2
+
+ #define YY_LESS_LINENO(n)
+ #define YY_LINENO_REWIND_TO(ptr)
+
+/* Return all but the first "n" matched characters back to the input stream. */
+#define yyless(n) \
+ do \
+ { \
+ /* Undo effects of setting up yytext. */ \
+ int yyless_macro_arg = (n); \
+ YY_LESS_LINENO(yyless_macro_arg);\
+ *yy_cp = yyg->yy_hold_char; \
+ YY_RESTORE_YY_MORE_OFFSET \
+ yyg->yy_c_buf_p = yy_cp = yy_bp + yyless_macro_arg - YY_MORE_ADJ; \
+ YY_DO_BEFORE_ACTION; /* set up yytext again */ \
+ } \
+ while ( 0 )
+#define unput(c) yyunput( c, yyg->yytext_ptr , yyscanner )
+
+#ifndef YY_STRUCT_YY_BUFFER_STATE
+#define YY_STRUCT_YY_BUFFER_STATE
+struct yy_buffer_state
+ {
+ FILE *yy_input_file;
+
+ char *yy_ch_buf; /* input buffer */
+ char *yy_buf_pos; /* current position in input buffer */
+
+ /* Size of input buffer in bytes, not including room for EOB
+ * characters.
+ */
+ int yy_buf_size;
+
+ /* Number of characters read into yy_ch_buf, not including EOB
+ * characters.
+ */
+ int yy_n_chars;
+
+ /* Whether we "own" the buffer - i.e., we know we created it,
+ * and can realloc() it to grow it, and should free() it to
+ * delete it.
+ */
+ int yy_is_our_buffer;
+
+ /* Whether this is an "interactive" input source; if so, and
+ * if we're using stdio for input, then we want to use getc()
+ * instead of fread(), to make sure we stop fetching input after
+ * each newline.
+ */
+ int yy_is_interactive;
+
+ /* Whether we're considered to be at the beginning of a line.
+ * If so, '^' rules will be active on the next match, otherwise
+ * not.
+ */
+ int yy_at_bol;
+
+ int yy_bs_lineno; /**< The line count. */
+ int yy_bs_column; /**< The column count. */
+
+ /* Whether to try to fill the input buffer when we reach the
+ * end of it.
+ */
+ int yy_fill_buffer;
+
+ int yy_buffer_status;
+
+#define YY_BUFFER_NEW 0
+#define YY_BUFFER_NORMAL 1
+ /* When an EOF's been seen but there's still some text to process
+ * then we mark the buffer as YY_EOF_PENDING, to indicate that we
+ * shouldn't try reading from the input source any more. We might
+ * still have a bunch of tokens to match, though, because of
+ * possible backing-up.
+ *
+ * When we actually see the EOF, we change the status to "new"
+ * (via yyrestart()), so that the user can continue scanning by
+ * just pointing yyin at a new input file.
+ */
+#define YY_BUFFER_EOF_PENDING 2
+
+ };
+#endif /* !YY_STRUCT_YY_BUFFER_STATE */
+
+/* We provide macros for accessing buffer states in case in the
+ * future we want to put the buffer states in a more general
+ * "scanner state".
+ *
+ * Returns the top of the stack, or NULL.
+ */
+#define YY_CURRENT_BUFFER ( yyg->yy_buffer_stack \
+ ? yyg->yy_buffer_stack[yyg->yy_buffer_stack_top] \
+ : NULL)
+/* Same as previous macro, but useful when we know that the buffer stack is not
+ * NULL or when we need an lvalue. For internal use only.
+ */
+#define YY_CURRENT_BUFFER_LVALUE yyg->yy_buffer_stack[yyg->yy_buffer_stack_top]
+
+void yyrestart ( FILE *input_file , yyscan_t yyscanner );
+void yy_switch_to_buffer ( YY_BUFFER_STATE new_buffer , yyscan_t yyscanner );
+YY_BUFFER_STATE yy_create_buffer ( FILE *file, int size , yyscan_t yyscanner );
+void yy_delete_buffer ( YY_BUFFER_STATE b , yyscan_t yyscanner );
+void yy_flush_buffer ( YY_BUFFER_STATE b , yyscan_t yyscanner );
+void yypush_buffer_state ( YY_BUFFER_STATE new_buffer , yyscan_t yyscanner );
+void yypop_buffer_state ( yyscan_t yyscanner );
+
+static void yyensure_buffer_stack ( yyscan_t yyscanner );
+static void yy_load_buffer_state ( yyscan_t yyscanner );
+static void yy_init_buffer ( YY_BUFFER_STATE b, FILE *file , yyscan_t yyscanner );
+#define YY_FLUSH_BUFFER yy_flush_buffer( YY_CURRENT_BUFFER , yyscanner)
+
+YY_BUFFER_STATE yy_scan_buffer ( char *base, yy_size_t size , yyscan_t yyscanner );
+YY_BUFFER_STATE yy_scan_string ( const char *yy_str , yyscan_t yyscanner );
+YY_BUFFER_STATE yy_scan_bytes ( const char *bytes, int len , yyscan_t yyscanner );
+
+void *yyalloc ( yy_size_t , yyscan_t yyscanner );
+void *yyrealloc ( void *, yy_size_t , yyscan_t yyscanner );
+void yyfree ( void * , yyscan_t yyscanner );
+
+#define yy_new_buffer yy_create_buffer
+#define yy_set_interactive(is_interactive) \
+ { \
+ if ( ! YY_CURRENT_BUFFER ){ \
+ yyensure_buffer_stack (yyscanner); \
+ YY_CURRENT_BUFFER_LVALUE = \
+ yy_create_buffer( yyin, YY_BUF_SIZE , yyscanner); \
+ } \
+ YY_CURRENT_BUFFER_LVALUE->yy_is_interactive = is_interactive; \
+ }
+#define yy_set_bol(at_bol) \
+ { \
+ if ( ! YY_CURRENT_BUFFER ){\
+ yyensure_buffer_stack (yyscanner); \
+ YY_CURRENT_BUFFER_LVALUE = \
+ yy_create_buffer( yyin, YY_BUF_SIZE , yyscanner); \
+ } \
+ YY_CURRENT_BUFFER_LVALUE->yy_at_bol = at_bol; \
+ }
+#define YY_AT_BOL() (YY_CURRENT_BUFFER_LVALUE->yy_at_bol)
+
+/* Begin user sect3 */
+
+#define glcpp_wrap(yyscanner) (/*CONSTCOND*/1)
+#define YY_SKIP_YYWRAP
+typedef flex_uint8_t YY_CHAR;
+
+typedef int yy_state_type;
+
+#define yytext_ptr yytext_r
+
+static yy_state_type yy_get_previous_state ( yyscan_t yyscanner );
+static yy_state_type yy_try_NUL_trans ( yy_state_type current_state , yyscan_t yyscanner);
+static int yy_get_next_buffer ( yyscan_t yyscanner );
+static void yynoreturn yy_fatal_error ( const char* msg , yyscan_t yyscanner );
+
+/* Done after the current pattern has been matched and before the
+ * corresponding action - sets up yytext.
+ */
+#define YY_DO_BEFORE_ACTION \
+ yyg->yytext_ptr = yy_bp; \
+ yyleng = (int) (yy_cp - yy_bp); \
+ yyg->yy_hold_char = *yy_cp; \
+ *yy_cp = '\0'; \
+ yyg->yy_c_buf_p = yy_cp;
+#define YY_NUM_RULES 54
+#define YY_END_OF_BUFFER 55
+/* This struct is not used in this scanner,
+ but its presence is necessary. */
+struct yy_trans_info
+ {
+ flex_int32_t yy_verify;
+ flex_int32_t yy_nxt;
+ };
+static const flex_int16_t yy_accept[189] =
+ { 0,
+ 0, 0, 3, 3, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 55, 49, 50, 52, 52, 48,
+ 49, 8, 48, 48, 48, 48, 48, 48, 32, 31,
+ 48, 48, 48, 46, 46, 48, 3, 4, 4, 5,
+ 30, 28, 53, 27, 53, 25, 24, 14, 14, 25,
+ 25, 25, 25, 25, 25, 25, 25, 49, 52, 39,
+ 0, 49, 49, 44, 40, 42, 43, 47, 2, 1,
+ 47, 32, 47, 32, 47, 31, 31, 34, 36, 38,
+ 37, 35, 46, 46, 41, 3, 4, 4, 4, 5,
+ 6, 6, 5, 7, 30, 28, 29, 1, 26, 27,
+
+ 24, 14, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 51, 1, 33, 33, 46, 4, 6,
+ 29, 1, 0, 0, 0, 0, 0, 0, 17, 0,
+ 0, 0, 0, 0, 0, 0, 33, 46, 0, 0,
+ 19, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 46, 0, 18, 20, 21, 0, 15, 0, 0,
+ 13, 0, 23, 0, 46, 22, 21, 0, 16, 0,
+ 11, 0, 45, 22, 0, 0, 11, 11, 10, 0,
+ 0, 0, 9, 11, 0, 0, 12, 0
+ } ;
+
+static const YY_CHAR yy_ec[256] =
+ { 0,
+ 1, 1, 1, 1, 1, 1, 1, 1, 2, 3,
+ 2, 2, 4, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 5, 6, 7, 8, 1, 9, 10, 1, 11,
+ 9, 12, 13, 9, 14, 15, 16, 17, 18, 18,
+ 18, 18, 18, 18, 18, 19, 19, 20, 9, 21,
+ 22, 23, 20, 1, 24, 24, 24, 24, 25, 24,
+ 26, 26, 26, 26, 26, 26, 26, 26, 26, 27,
+ 26, 26, 26, 26, 28, 26, 26, 29, 26, 26,
+ 9, 1, 9, 9, 26, 1, 30, 24, 31, 32,
+
+ 33, 34, 35, 26, 36, 26, 26, 37, 38, 39,
+ 40, 41, 26, 42, 43, 44, 45, 46, 26, 47,
+ 26, 26, 9, 48, 9, 9, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1
+ } ;
+
+static const YY_CHAR yy_meta[49] =
+ { 0,
+ 1, 2, 3, 3, 4, 5, 1, 6, 5, 5,
+ 7, 8, 5, 5, 9, 5, 10, 10, 10, 5,
+ 6, 5, 6, 10, 10, 10, 10, 10, 10, 10,
+ 10, 11, 10, 10, 10, 10, 10, 10, 11, 10,
+ 10, 10, 10, 10, 10, 10, 10, 5
+ } ;
+
+static const flex_int16_t yy_base[208] =
+ { 0,
+ 0, 0, 46, 48, 60, 0, 50, 52, 107, 152,
+ 110, 112, 114, 116, 538, 121, 539, 533, 533, 513,
+ 125, 526, 539, 523, 519, 517, 112, 109, 182, 213,
+ 112, 508, 113, 0, 496, 480, 134, 523, 523, 146,
+ 0, 122, 130, 514, 539, 539, 158, 520, 520, 135,
+ 489, 122, 131, 485, 478, 480, 485, 166, 539, 539,
+ 165, 170, 174, 539, 539, 539, 539, 179, 539, 0,
+ 192, 217, 230, 165, 259, 0, 169, 539, 539, 539,
+ 539, 539, 0, 483, 539, 188, 512, 512, 539, 210,
+ 510, 510, 244, 539, 0, 200, 0, 216, 539, 496,
+
+ 237, 539, 451, 144, 440, 412, 400, 164, 412, 403,
+ 402, 395, 384, 539, 0, 221, 237, 370, 539, 539,
+ 0, 259, 369, 370, 355, 334, 327, 333, 539, 297,
+ 297, 291, 294, 291, 292, 281, 239, 284, 283, 0,
+ 539, 287, 278, 280, 284, 271, 251, 265, 257, 253,
+ 250, 248, 247, 539, 539, 0, 236, 539, 225, 205,
+ 283, 198, 539, 176, 169, 292, 0, 159, 539, 143,
+ 296, 135, 0, 300, 19, 301, 265, 305, 539, 309,
+ 18, 310, 311, 271, 0, 201, 539, 539, 331, 342,
+ 353, 364, 367, 369, 380, 391, 402, 413, 418, 429,
+
+ 440, 451, 462, 473, 482, 493, 501
+ } ;
+
+static const flex_int16_t yy_def[208] =
+ { 0,
+ 188, 1, 189, 189, 188, 5, 190, 190, 191, 191,
+ 190, 190, 190, 190, 188, 188, 188, 188, 188, 188,
+ 192, 188, 188, 188, 188, 188, 188, 188, 193, 193,
+ 188, 188, 188, 194, 194, 188, 195, 188, 188, 196,
+ 197, 188, 198, 199, 188, 188, 188, 188, 188, 188,
+ 188, 188, 188, 188, 188, 188, 188, 188, 188, 188,
+ 192, 188, 192, 188, 188, 188, 188, 193, 188, 200,
+ 193, 30, 193, 73, 73, 30, 73, 188, 188, 188,
+ 188, 188, 194, 194, 188, 195, 188, 188, 188, 196,
+ 188, 188, 196, 188, 197, 188, 201, 202, 188, 199,
+
+ 188, 188, 188, 188, 188, 188, 188, 203, 188, 188,
+ 188, 188, 188, 188, 200, 75, 75, 194, 188, 188,
+ 201, 202, 188, 188, 188, 188, 188, 188, 188, 188,
+ 188, 188, 188, 188, 188, 188, 73, 194, 188, 204,
+ 188, 188, 188, 188, 188, 188, 188, 188, 188, 188,
+ 188, 194, 188, 188, 188, 205, 188, 188, 188, 188,
+ 188, 188, 188, 188, 194, 188, 205, 188, 188, 188,
+ 206, 188, 194, 188, 188, 188, 206, 206, 188, 188,
+ 188, 188, 188, 206, 207, 207, 188, 0, 188, 188,
+ 188, 188, 188, 188, 188, 188, 188, 188, 188, 188,
+
+ 188, 188, 188, 188, 188, 188, 188
+ } ;
+
+static const flex_int16_t yy_nxt[588] =
+ { 0,
+ 16, 17, 18, 19, 17, 20, 21, 22, 23, 24,
+ 23, 23, 25, 26, 27, 28, 29, 30, 30, 16,
+ 31, 32, 33, 34, 34, 34, 34, 34, 34, 34,
+ 34, 35, 34, 34, 34, 34, 34, 34, 34, 34,
+ 34, 34, 34, 34, 34, 34, 34, 36, 38, 39,
+ 38, 39, 18, 19, 18, 19, 184, 40, 181, 40,
+ 41, 42, 18, 19, 42, 41, 41, 41, 41, 41,
+ 41, 41, 41, 41, 41, 43, 41, 41, 41, 41,
+ 41, 41, 41, 44, 44, 44, 44, 44, 44, 44,
+ 44, 44, 44, 44, 44, 44, 44, 44, 44, 44,
+
+ 44, 44, 44, 44, 44, 44, 44, 41, 47, 48,
+ 49, 47, 18, 19, 18, 19, 18, 19, 18, 19,
+ 69, 58, 50, 96, 70, 58, 96, 58, 68, 68,
+ 68, 62, 78, 79, 81, 82, 87, 88, 51, 52,
+ 58, 69, 53, 54, 63, 98, 69, 55, 91, 92,
+ 70, 56, 57, 47, 48, 49, 47, 93, 104, 101,
+ 105, 94, 101, 106, 108, 188, 58, 50, 107, 109,
+ 58, 114, 58, 180, 58, 176, 58, 188, 188, 124,
+ 62, 188, 188, 51, 52, 58, 125, 53, 54, 58,
+ 87, 88, 55, 63, 175, 130, 56, 57, 72, 72,
+
+ 173, 96, 131, 73, 96, 73, 73, 187, 73, 74,
+ 75, 73, 91, 92, 73, 172, 73, 115, 73, 73,
+ 115, 188, 73, 187, 73, 188, 74, 171, 75, 76,
+ 76, 76, 73, 72, 72, 71, 170, 73, 101, 73,
+ 77, 101, 71, 71, 74, 73, 91, 92, 137, 71,
+ 71, 188, 188, 73, 73, 93, 73, 77, 169, 94,
+ 115, 74, 73, 115, 137, 137, 161, 188, 188, 161,
+ 73, 188, 188, 188, 188, 116, 116, 116, 168, 166,
+ 165, 137, 116, 117, 161, 164, 163, 161, 116, 116,
+ 116, 117, 116, 174, 162, 160, 174, 178, 179, 179,
+
+ 178, 174, 182, 159, 174, 182, 178, 179, 179, 178,
+ 183, 182, 183, 183, 182, 183, 185, 158, 157, 156,
+ 155, 153, 152, 151, 150, 149, 148, 147, 146, 145,
+ 185, 37, 37, 37, 37, 37, 37, 37, 37, 37,
+ 37, 37, 45, 45, 45, 45, 45, 45, 45, 45,
+ 45, 45, 45, 46, 46, 46, 46, 46, 46, 46,
+ 46, 46, 46, 46, 61, 144, 143, 61, 61, 142,
+ 61, 61, 61, 61, 61, 71, 71, 71, 83, 83,
+ 86, 86, 86, 86, 86, 86, 86, 141, 86, 86,
+ 86, 90, 90, 90, 90, 90, 90, 90, 90, 90,
+
+ 90, 90, 95, 140, 139, 138, 95, 95, 95, 95,
+ 95, 95, 95, 97, 97, 97, 97, 97, 97, 97,
+ 97, 97, 97, 97, 100, 136, 135, 100, 100, 115,
+ 115, 134, 115, 115, 115, 115, 115, 115, 115, 115,
+ 121, 133, 132, 128, 121, 121, 121, 121, 121, 121,
+ 121, 122, 122, 127, 122, 122, 122, 122, 122, 122,
+ 122, 122, 129, 129, 129, 129, 129, 129, 129, 129,
+ 129, 126, 129, 154, 154, 154, 154, 154, 154, 154,
+ 154, 154, 167, 167, 123, 167, 167, 167, 167, 167,
+ 167, 167, 167, 177, 177, 177, 177, 177, 177, 177,
+
+ 177, 177, 177, 177, 186, 186, 99, 186, 186, 186,
+ 186, 186, 120, 120, 119, 119, 118, 113, 112, 111,
+ 110, 103, 102, 102, 99, 89, 89, 85, 84, 80,
+ 67, 66, 65, 64, 60, 59, 59, 188, 15, 188,
+ 188, 188, 188, 188, 188, 188, 188, 188, 188, 188,
+ 188, 188, 188, 188, 188, 188, 188, 188, 188, 188,
+ 188, 188, 188, 188, 188, 188, 188, 188, 188, 188,
+ 188, 188, 188, 188, 188, 188, 188, 188, 188, 188,
+ 188, 188, 188, 188, 188, 188, 188
+ } ;
+
+static const flex_int16_t yy_chk[588] =
+ { 0,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 3, 3,
+ 4, 4, 7, 7, 8, 8, 181, 3, 175, 4,
+ 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
+ 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
+ 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
+ 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
+
+ 5, 5, 5, 5, 5, 5, 5, 5, 9, 9,
+ 9, 9, 11, 11, 12, 12, 13, 13, 14, 14,
+ 28, 16, 9, 42, 28, 21, 42, 16, 27, 27,
+ 27, 21, 31, 31, 33, 33, 37, 37, 9, 9,
+ 16, 43, 9, 9, 21, 43, 50, 9, 40, 40,
+ 50, 9, 9, 10, 10, 10, 10, 40, 52, 47,
+ 52, 40, 47, 52, 53, 61, 58, 10, 52, 53,
+ 62, 61, 58, 172, 63, 170, 62, 74, 74, 104,
+ 63, 77, 77, 10, 10, 58, 104, 10, 10, 62,
+ 86, 86, 10, 63, 168, 108, 10, 10, 29, 29,
+
+ 165, 96, 108, 68, 96, 68, 29, 186, 29, 29,
+ 29, 68, 90, 90, 29, 164, 71, 98, 71, 68,
+ 98, 90, 29, 186, 71, 90, 29, 162, 29, 30,
+ 30, 30, 71, 72, 72, 72, 160, 30, 101, 30,
+ 30, 101, 73, 73, 72, 30, 93, 93, 116, 117,
+ 117, 137, 137, 30, 73, 93, 73, 30, 159, 93,
+ 122, 72, 73, 122, 117, 116, 148, 177, 177, 148,
+ 73, 75, 75, 184, 184, 75, 75, 75, 157, 153,
+ 152, 117, 75, 75, 161, 151, 150, 161, 75, 75,
+ 75, 75, 75, 166, 149, 147, 166, 171, 171, 171,
+
+ 171, 174, 176, 146, 174, 176, 178, 178, 178, 178,
+ 180, 182, 183, 180, 182, 183, 182, 145, 144, 143,
+ 142, 139, 138, 136, 135, 134, 133, 132, 131, 130,
+ 182, 189, 189, 189, 189, 189, 189, 189, 189, 189,
+ 189, 189, 190, 190, 190, 190, 190, 190, 190, 190,
+ 190, 190, 190, 191, 191, 191, 191, 191, 191, 191,
+ 191, 191, 191, 191, 192, 128, 127, 192, 192, 126,
+ 192, 192, 192, 192, 192, 193, 193, 193, 194, 194,
+ 195, 195, 195, 195, 195, 195, 195, 125, 195, 195,
+ 195, 196, 196, 196, 196, 196, 196, 196, 196, 196,
+
+ 196, 196, 197, 124, 123, 118, 197, 197, 197, 197,
+ 197, 197, 197, 198, 198, 198, 198, 198, 198, 198,
+ 198, 198, 198, 198, 199, 113, 112, 199, 199, 200,
+ 200, 111, 200, 200, 200, 200, 200, 200, 200, 200,
+ 201, 110, 109, 107, 201, 201, 201, 201, 201, 201,
+ 201, 202, 202, 106, 202, 202, 202, 202, 202, 202,
+ 202, 202, 203, 203, 203, 203, 203, 203, 203, 203,
+ 203, 105, 203, 204, 204, 204, 204, 204, 204, 204,
+ 204, 204, 205, 205, 103, 205, 205, 205, 205, 205,
+ 205, 205, 205, 206, 206, 206, 206, 206, 206, 206,
+
+ 206, 206, 206, 206, 207, 207, 100, 207, 207, 207,
+ 207, 207, 92, 91, 88, 87, 84, 57, 56, 55,
+ 54, 51, 49, 48, 44, 39, 38, 36, 35, 32,
+ 26, 25, 24, 22, 20, 19, 18, 15, 188, 188,
+ 188, 188, 188, 188, 188, 188, 188, 188, 188, 188,
+ 188, 188, 188, 188, 188, 188, 188, 188, 188, 188,
+ 188, 188, 188, 188, 188, 188, 188, 188, 188, 188,
+ 188, 188, 188, 188, 188, 188, 188, 188, 188, 188,
+ 188, 188, 188, 188, 188, 188, 188
+ } ;
+
+/* The intent behind this definition is that it'll catch
+ * any uses of REJECT which flex missed.
+ */
+#define REJECT reject_used_but_not_detected
+#define yymore() yymore_used_but_not_detected
+#define YY_MORE_ADJ 0
+#define YY_RESTORE_YY_MORE_OFFSET
+#line 1 "src/compiler/glsl/glcpp/glcpp-lex.l"
+#line 2 "src/compiler/glsl/glcpp/glcpp-lex.l"
+/*
+ * Copyright © 2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include <stdio.h>
+#include <string.h>
+#include <ctype.h>
+
+#include "glcpp.h"
+#include "glcpp-parse.h"
+
+/* Flex annoyingly generates some functions without making them
+ * static. Let's declare them here. */
+int glcpp_get_column (yyscan_t yyscanner);
+void glcpp_set_column (int column_no , yyscan_t yyscanner);
+
+#ifdef _MSC_VER
+#define YY_NO_UNISTD_H
+#endif
+
+#define YY_NO_INPUT
+
+#define YY_USER_ACTION \
+ do { \
+ if (parser->has_new_line_number) \
+ yylineno = parser->new_line_number; \
+ if (parser->has_new_source_number) \
+ yylloc->source = parser->new_source_number; \
+ yylloc->first_column = yycolumn + 1; \
+ yylloc->first_line = yylloc->last_line = yylineno; \
+ yycolumn += yyleng; \
+ yylloc->last_column = yycolumn + 1; \
+ parser->has_new_line_number = 0; \
+ parser->has_new_source_number = 0; \
+ } while(0);
+
+#define YY_USER_INIT \
+ do { \
+ yylineno = 1; \
+ yycolumn = 0; \
+ yylloc->source = 0; \
+ } while(0)
+
+/* It's ugly to have macros that have return statements inside of
+ * them, but flex-based lexer generation is all built around the
+ * return statement.
+ *
+ * To mitigate the ugliness, we defer as much of the logic as possible
+ * to an actual function, not a macro (see
+ * glcpplex_update_state_per_token) and we make the word RETURN
+ * prominent in all of the macros which may return.
+ *
+ * The most-commonly-used macro is RETURN_TOKEN which will perform all
+ * necessary state updates based on the provided token,, then
+ * conditionally return the token. It will not return a token if the
+ * parser is currently skipping tokens, (such as within #if
+ * 0...#else).
+ *
+ * The RETURN_TOKEN_NEVER_SKIP macro is a lower-level variant that
+ * makes the token returning unconditional. This is needed for things
+ * like #if and the tokens of its condition, (since these must be
+ * evaluated by the parser even when otherwise skipping).
+ *
+ * Finally, RETURN_STRING_TOKEN is a simple convenience wrapper on top
+ * of RETURN_TOKEN that performs a string copy of yytext before the
+ * return.
+ */
+#define RETURN_TOKEN_NEVER_SKIP(token) \
+ do { \
+ if (glcpp_lex_update_state_per_token (parser, token)) \
+ return token; \
+ } while (0)
+
+#define RETURN_TOKEN(token) \
+ do { \
+ if (! parser->skipping) { \
+ RETURN_TOKEN_NEVER_SKIP(token); \
+ } \
+ } while(0)
+
+#define RETURN_STRING_TOKEN(token) \
+ do { \
+ if (! parser->skipping) { \
+ /* We're not doing linear_strdup here, to avoid \
+ * an implicit call on strlen() for the length \
+ * of the string, as this is already found by \
+ * flex and stored in yyleng */ \
+ void *mem_ctx = yyextra->linalloc; \
+ yylval->str = linear_alloc_child(mem_ctx, \
+ yyleng + 1); \
+ memcpy(yylval->str, yytext, yyleng + 1); \
+ RETURN_TOKEN_NEVER_SKIP (token); \
+ } \
+ } while(0)
+
+
+/* Update all state necessary for each token being returned.
+ *
+ * Here we'll be tracking newlines and spaces so that the lexer can
+ * alter its behavior as necessary, (for example, '#' has special
+ * significance if it is the first non-whitespace, non-comment token
+ * in a line, but does not otherwise).
+ *
+ * NOTE: If this function returns FALSE, then no token should be
+ * returned at all. This is used to suprress duplicate SPACE tokens.
+ */
+static int
+glcpp_lex_update_state_per_token (glcpp_parser_t *parser, int token)
+{
+ if (token != NEWLINE && token != SPACE && token != HASH_TOKEN &&
+ !parser->lexing_version_directive) {
+ glcpp_parser_resolve_implicit_version(parser);
+ }
+
+ /* After the first non-space token in a line, we won't
+ * allow any '#' to introduce a directive. */
+ if (token == NEWLINE) {
+ parser->first_non_space_token_this_line = 1;
+ } else if (token != SPACE) {
+ parser->first_non_space_token_this_line = 0;
+ }
+
+ /* Track newlines just to know whether a newline needs
+ * to be inserted if end-of-file comes early. */
+ if (token == NEWLINE) {
+ parser->last_token_was_newline = 1;
+ } else {
+ parser->last_token_was_newline = 0;
+ }
+
+ /* Track spaces to avoid emitting multiple SPACE
+ * tokens in a row. */
+ if (token == SPACE) {
+ if (! parser->last_token_was_space) {
+ parser->last_token_was_space = 1;
+ return 1;
+ } else {
+ parser->last_token_was_space = 1;
+ return 0;
+ }
+ } else {
+ parser->last_token_was_space = 0;
+ return 1;
+ }
+}
+
+
+#line 1015 "src/compiler/glsl/glcpp/glcpp-lex.c"
+#line 178 "src/compiler/glsl/glcpp/glcpp-lex.l"
+ /* Note: When adding any start conditions to this list, you must also
+ * update the "Internal compiler error" catch-all rule near the end of
+ * this file. */
+
+/* The OTHER class is simply a catch-all for things that the CPP
+parser just doesn't care about. Since flex regular expressions that
+match longer strings take priority over those matching shorter
+strings, we have to be careful to avoid OTHER matching and hiding
+something that CPP does care about. So we simply exclude all
+characters that appear in any other expressions. */
+#line 1027 "src/compiler/glsl/glcpp/glcpp-lex.c"
+
+#define INITIAL 0
+#define COMMENT 1
+#define DEFINE 2
+#define DONE 3
+#define HASH 4
+#define NEWLINE_CATCHUP 5
+#define UNREACHABLE 6
+
+#define YY_EXTRA_TYPE glcpp_parser_t *
+
+/* Holds the entire state of the reentrant scanner. */
+struct yyguts_t
+ {
+
+ /* User-defined. Not touched by flex. */
+ YY_EXTRA_TYPE yyextra_r;
+
+ /* The rest are the same as the globals declared in the non-reentrant scanner. */
+ FILE *yyin_r, *yyout_r;
+ size_t yy_buffer_stack_top; /**< index of top of stack. */
+ size_t yy_buffer_stack_max; /**< capacity of stack. */
+ YY_BUFFER_STATE * yy_buffer_stack; /**< Stack as an array. */
+ char yy_hold_char;
+ int yy_n_chars;
+ int yyleng_r;
+ char *yy_c_buf_p;
+ int yy_init;
+ int yy_start;
+ int yy_did_buffer_switch_on_eof;
+ int yy_start_stack_ptr;
+ int yy_start_stack_depth;
+ int *yy_start_stack;
+ yy_state_type yy_last_accepting_state;
+ char* yy_last_accepting_cpos;
+
+ int yylineno_r;
+ int yy_flex_debug_r;
+
+ char *yytext_r;
+ int yy_more_flag;
+ int yy_more_len;
+
+ YYSTYPE * yylval_r;
+
+ YYLTYPE * yylloc_r;
+
+ }; /* end struct yyguts_t */
+
+static int yy_init_globals ( yyscan_t yyscanner );
+
+ /* This must go here because YYSTYPE and YYLTYPE are included
+ * from bison output in section 1.*/
+ # define yylval yyg->yylval_r
+
+ # define yylloc yyg->yylloc_r
+
+int yylex_init (yyscan_t* scanner);
+
+int yylex_init_extra ( YY_EXTRA_TYPE user_defined, yyscan_t* scanner);
+
+/* Accessor methods to globals.
+ These are made visible to non-reentrant scanners for convenience. */
+
+int yylex_destroy ( yyscan_t yyscanner );
+
+int yyget_debug ( yyscan_t yyscanner );
+
+void yyset_debug ( int debug_flag , yyscan_t yyscanner );
+
+YY_EXTRA_TYPE yyget_extra ( yyscan_t yyscanner );
+
+void yyset_extra ( YY_EXTRA_TYPE user_defined , yyscan_t yyscanner );
+
+FILE *yyget_in ( yyscan_t yyscanner );
+
+void yyset_in ( FILE * _in_str , yyscan_t yyscanner );
+
+FILE *yyget_out ( yyscan_t yyscanner );
+
+void yyset_out ( FILE * _out_str , yyscan_t yyscanner );
+
+ int yyget_leng ( yyscan_t yyscanner );
+
+char *yyget_text ( yyscan_t yyscanner );
+
+int yyget_lineno ( yyscan_t yyscanner );
+
+void yyset_lineno ( int _line_number , yyscan_t yyscanner );
+
+int yyget_column ( yyscan_t yyscanner );
+
+void yyset_column ( int _column_no , yyscan_t yyscanner );
+
+YYSTYPE * yyget_lval ( yyscan_t yyscanner );
+
+void yyset_lval ( YYSTYPE * yylval_param , yyscan_t yyscanner );
+
+ YYLTYPE *yyget_lloc ( yyscan_t yyscanner );
+
+ void yyset_lloc ( YYLTYPE * yylloc_param , yyscan_t yyscanner );
+
+/* Macros after this point can all be overridden by user definitions in
+ * section 1.
+ */
+
+#ifndef YY_SKIP_YYWRAP
+#ifdef __cplusplus
+extern "C" int yywrap ( yyscan_t yyscanner );
+#else
+extern int yywrap ( yyscan_t yyscanner );
+#endif
+#endif
+
+#ifndef YY_NO_UNPUT
+
+ static void yyunput ( int c, char *buf_ptr , yyscan_t yyscanner);
+
+#endif
+
+#ifndef yytext_ptr
+static void yy_flex_strncpy ( char *, const char *, int , yyscan_t yyscanner);
+#endif
+
+#ifdef YY_NEED_STRLEN
+static int yy_flex_strlen ( const char * , yyscan_t yyscanner);
+#endif
+
+#ifndef YY_NO_INPUT
+#ifdef __cplusplus
+static int yyinput ( yyscan_t yyscanner );
+#else
+static int input ( yyscan_t yyscanner );
+#endif
+
+#endif
+
+ static void yy_push_state ( int _new_state , yyscan_t yyscanner);
+
+ static void yy_pop_state ( yyscan_t yyscanner );
+
+ static int yy_top_state ( yyscan_t yyscanner );
+
+/* Amount of stuff to slurp up with each read. */
+#ifndef YY_READ_BUF_SIZE
+#ifdef __ia64__
+/* On IA-64, the buffer size is 16k, not 8k */
+#define YY_READ_BUF_SIZE 16384
+#else
+#define YY_READ_BUF_SIZE 8192
+#endif /* __ia64__ */
+#endif
+
+/* Copy whatever the last rule matched to the standard output. */
+#ifndef ECHO
+/* This used to be an fputs(), but since the string might contain NUL's,
+ * we now use fwrite().
+ */
+#define ECHO do { if (fwrite( yytext, (size_t) yyleng, 1, yyout )) {} } while (0)
+#endif
+
+/* Gets input and stuffs it into "buf". number of characters read, or YY_NULL,
+ * is returned in "result".
+ */
+#ifndef YY_INPUT
+#define YY_INPUT(buf,result,max_size) \
+ if ( YY_CURRENT_BUFFER_LVALUE->yy_is_interactive ) \
+ { \
+ int c = '*'; \
+ int n; \
+ for ( n = 0; n < max_size && \
+ (c = getc( yyin )) != EOF && c != '\n'; ++n ) \
+ buf[n] = (char) c; \
+ if ( c == '\n' ) \
+ buf[n++] = (char) c; \
+ if ( c == EOF && ferror( yyin ) ) \
+ YY_FATAL_ERROR( "input in flex scanner failed" ); \
+ result = n; \
+ } \
+ else \
+ { \
+ errno=0; \
+ while ( (result = (int) fread(buf, 1, (yy_size_t) max_size, yyin)) == 0 && ferror(yyin)) \
+ { \
+ if( errno != EINTR) \
+ { \
+ YY_FATAL_ERROR( "input in flex scanner failed" ); \
+ break; \
+ } \
+ errno=0; \
+ clearerr(yyin); \
+ } \
+ }\
+\
+
+#endif
+
+/* No semi-colon after return; correct usage is to write "yyterminate();" -
+ * we don't want an extra ';' after the "return" because that will cause
+ * some compilers to complain about unreachable statements.
+ */
+#ifndef yyterminate
+#define yyterminate() return YY_NULL
+#endif
+
+/* Number of entries by which start-condition stack grows. */
+#ifndef YY_START_STACK_INCR
+#define YY_START_STACK_INCR 25
+#endif
+
+/* Report a fatal error. */
+#ifndef YY_FATAL_ERROR
+#define YY_FATAL_ERROR(msg) yy_fatal_error( msg , yyscanner)
+#endif
+
+/* end tables serialization structures and prototypes */
+
+/* Default declaration of generated scanner - a define so the user can
+ * easily add parameters.
+ */
+#ifndef YY_DECL
+#define YY_DECL_IS_OURS 1
+
+extern int yylex \
+ (YYSTYPE * yylval_param, YYLTYPE * yylloc_param , yyscan_t yyscanner);
+
+#define YY_DECL int yylex \
+ (YYSTYPE * yylval_param, YYLTYPE * yylloc_param , yyscan_t yyscanner)
+#endif /* !YY_DECL */
+
+/* Code executed at the beginning of each rule, after yytext and yyleng
+ * have been set up.
+ */
+#ifndef YY_USER_ACTION
+#define YY_USER_ACTION
+#endif
+
+/* Code executed at the end of each rule. */
+#ifndef YY_BREAK
+#define YY_BREAK /*LINTED*/break;
+#endif
+
+#define YY_RULE_SETUP \
+ YY_USER_ACTION
+
+/** The main scanner function which does all the work.
+ */
+YY_DECL
+{
+ yy_state_type yy_current_state;
+ char *yy_cp, *yy_bp;
+ int yy_act;
+ struct yyguts_t * yyg = (struct yyguts_t*)yyscanner;
+
+ yylval = yylval_param;
+
+ yylloc = yylloc_param;
+
+ if ( !yyg->yy_init )
+ {
+ yyg->yy_init = 1;
+
+#ifdef YY_USER_INIT
+ YY_USER_INIT;
+#endif
+
+ if ( ! yyg->yy_start )
+ yyg->yy_start = 1; /* first start state */
+
+ if ( ! yyin )
+ yyin = stdin;
+
+ if ( ! yyout )
+ yyout = stdout;
+
+ if ( ! YY_CURRENT_BUFFER ) {
+ yyensure_buffer_stack (yyscanner);
+ YY_CURRENT_BUFFER_LVALUE =
+ yy_create_buffer( yyin, YY_BUF_SIZE , yyscanner);
+ }
+
+ yy_load_buffer_state( yyscanner );
+ }
+
+ {
+#line 208 "src/compiler/glsl/glcpp/glcpp-lex.l"
+
+
+ glcpp_parser_t *parser = yyextra;
+
+ /* When we lex a multi-line comment, we replace it (as
+ * specified) with a single space. But if the comment spanned
+ * multiple lines, then subsequent parsing stages will not
+ * count correct line numbers. To avoid this problem we keep
+ * track of all newlines that were commented out by a
+ * multi-line comment, and we emit a NEWLINE token for each at
+ * the next legal opportunity, (which is when the lexer would
+ * be emitting a NEWLINE token anyway).
+ */
+ if (YY_START == NEWLINE_CATCHUP) {
+ if (parser->commented_newlines)
+ parser->commented_newlines--;
+ if (parser->commented_newlines == 0)
+ BEGIN INITIAL;
+ RETURN_TOKEN_NEVER_SKIP (NEWLINE);
+ }
+
+ /* Set up the parser->skipping bit here before doing any lexing.
+ *
+ * This bit controls whether tokens are skipped, (as implemented by
+ * RETURN_TOKEN), such as between "#if 0" and "#endif".
+ *
+ * The parser maintains a skip_stack indicating whether we should be
+ * skipping, (and nested levels of #if/#ifdef/#ifndef/#endif) will
+ * push and pop items from the stack.
+ *
+ * Here are the rules for determining whether we are skipping:
+ *
+ * 1. If the skip stack is NULL, we are outside of all #if blocks
+ * and we are not skipping.
+ *
+ * 2. If the skip stack is non-NULL, the type of the top node in
+ * the stack determines whether to skip. A type of
+ * SKIP_NO_SKIP is used for blocks wheere we are emitting
+ * tokens, (such as between #if 1 and #endif, or after the
+ * #else of an #if 0, etc.).
+ *
+ * 3. The lexing_directive bit overrides the skip stack. This bit
+ * is set when we are actively lexing the expression for a
+ * pre-processor condition, (such as #if, #elif, or #else). In
+ * this case, even if otherwise skipping, we need to emit the
+ * tokens for this condition so that the parser can evaluate
+ * the expression. (For, #else, there's no expression, but we
+ * emit tokens so the parser can generate a nice error message
+ * if there are any tokens here).
+ */
+ if (parser->skip_stack &&
+ parser->skip_stack->type != SKIP_NO_SKIP &&
+ ! parser->lexing_directive)
+ {
+ parser->skipping = 1;
+ } else {
+ parser->skipping = 0;
+ }
+
+ /* Single-line comments */
+#line 1374 "src/compiler/glsl/glcpp/glcpp-lex.c"
+
+ while ( /*CONSTCOND*/1 ) /* loops until end-of-file is reached */
+ {
+ yy_cp = yyg->yy_c_buf_p;
+
+ /* Support of yytext. */
+ *yy_cp = yyg->yy_hold_char;
+
+ /* yy_bp points to the position in yy_ch_buf of the start of
+ * the current run.
+ */
+ yy_bp = yy_cp;
+
+ yy_current_state = yyg->yy_start;
+yy_match:
+ do
+ {
+ YY_CHAR yy_c = yy_ec[YY_SC_TO_UI(*yy_cp)] ;
+ if ( yy_accept[yy_current_state] )
+ {
+ yyg->yy_last_accepting_state = yy_current_state;
+ yyg->yy_last_accepting_cpos = yy_cp;
+ }
+ while ( yy_chk[yy_base[yy_current_state] + yy_c] != yy_current_state )
+ {
+ yy_current_state = (int) yy_def[yy_current_state];
+ if ( yy_current_state >= 189 )
+ yy_c = yy_meta[yy_c];
+ }
+ yy_current_state = yy_nxt[yy_base[yy_current_state] + yy_c];
+ ++yy_cp;
+ }
+ while ( yy_current_state != 188 );
+ yy_cp = yyg->yy_last_accepting_cpos;
+ yy_current_state = yyg->yy_last_accepting_state;
+
+yy_find_action:
+ yy_act = yy_accept[yy_current_state];
+
+ YY_DO_BEFORE_ACTION;
+
+do_action: /* This label is used only to access EOF actions. */
+
+ switch ( yy_act )
+ { /* beginning of action switch */
+ case 0: /* must back up */
+ /* undo the effects of YY_DO_BEFORE_ACTION */
+ *yy_cp = yyg->yy_hold_char;
+ yy_cp = yyg->yy_last_accepting_cpos;
+ yy_current_state = yyg->yy_last_accepting_state;
+ goto yy_find_action;
+
+case 1:
+YY_RULE_SETUP
+#line 268 "src/compiler/glsl/glcpp/glcpp-lex.l"
+{
+}
+ YY_BREAK
+/* Multi-line comments */
+case 2:
+YY_RULE_SETUP
+#line 272 "src/compiler/glsl/glcpp/glcpp-lex.l"
+{ yy_push_state(COMMENT, yyscanner); }
+ YY_BREAK
+case 3:
+YY_RULE_SETUP
+#line 273 "src/compiler/glsl/glcpp/glcpp-lex.l"
+
+ YY_BREAK
+case 4:
+/* rule 4 can match eol */
+YY_RULE_SETUP
+#line 274 "src/compiler/glsl/glcpp/glcpp-lex.l"
+{ yylineno++; yycolumn = 0; parser->commented_newlines++; }
+ YY_BREAK
+case 5:
+YY_RULE_SETUP
+#line 275 "src/compiler/glsl/glcpp/glcpp-lex.l"
+
+ YY_BREAK
+case 6:
+/* rule 6 can match eol */
+YY_RULE_SETUP
+#line 276 "src/compiler/glsl/glcpp/glcpp-lex.l"
+{ yylineno++; yycolumn = 0; parser->commented_newlines++; }
+ YY_BREAK
+case 7:
+YY_RULE_SETUP
+#line 277 "src/compiler/glsl/glcpp/glcpp-lex.l"
+{
+ yy_pop_state(yyscanner);
+ /* In the <HASH> start condition, we don't want any SPACE token. */
+ if (yyextra->space_tokens && YY_START != HASH)
+ RETURN_TOKEN (SPACE);
+}
+ YY_BREAK
+case 8:
+YY_RULE_SETUP
+#line 284 "src/compiler/glsl/glcpp/glcpp-lex.l"
+{
+
+ /* If the '#' is the first non-whitespace, non-comment token on this
+ * line, then it introduces a directive, switch to the <HASH> start
+ * condition.
+ *
+ * Otherwise, this is just punctuation, so return the HASH_TOKEN
+ * token. */
+ if (parser->first_non_space_token_this_line) {
+ BEGIN HASH;
+ yyextra->in_define = false;
+ }
+
+ RETURN_TOKEN_NEVER_SKIP (HASH_TOKEN);
+}
+ YY_BREAK
+case 9:
+YY_RULE_SETUP
+#line 300 "src/compiler/glsl/glcpp/glcpp-lex.l"
+{
+ BEGIN INITIAL;
+ yyextra->space_tokens = 0;
+ yyextra->lexing_version_directive = 1;
+ RETURN_STRING_TOKEN (VERSION_TOKEN);
+}
+ YY_BREAK
+/* Swallow empty #pragma directives, (to avoid confusing the
+ * downstream compiler).
+ *
+ * Note: We use a simple regular expression for the lookahead
+ * here. Specifically, we cannot use the complete {NEWLINE} expression
+ * since it uses alternation and we've found that there's a flex bug
+ * where using alternation in the lookahead portion of a pattern
+ * triggers a buffer overrun. */
+case 10:
+/* rule 10 can match eol */
+*yy_cp = yyg->yy_hold_char; /* undo effects of setting up yytext */
+YY_LINENO_REWIND_TO(yy_cp - 1);
+yyg->yy_c_buf_p = yy_cp -= 1;
+YY_DO_BEFORE_ACTION; /* set up yytext again */
+YY_RULE_SETUP
+#line 315 "src/compiler/glsl/glcpp/glcpp-lex.l"
+{
+ BEGIN INITIAL;
+}
+ YY_BREAK
+/* glcpp doesn't handle #extension, #version, or #pragma directives.
+ * Simply pass them through to the main compiler's lexer/parser. */
+case 11:
+YY_RULE_SETUP
+#line 321 "src/compiler/glsl/glcpp/glcpp-lex.l"
+{
+ BEGIN INITIAL;
+ RETURN_STRING_TOKEN (PRAGMA);
+}
+ YY_BREAK
+case 12:
+YY_RULE_SETUP
+#line 326 "src/compiler/glsl/glcpp/glcpp-lex.l"
+{
+ BEGIN INITIAL;
+ RETURN_STRING_TOKEN (INCLUDE);
+}
+ YY_BREAK
+case 13:
+YY_RULE_SETUP
+#line 331 "src/compiler/glsl/glcpp/glcpp-lex.l"
+{
+ BEGIN INITIAL;
+ RETURN_TOKEN (LINE);
+}
+ YY_BREAK
+case 14:
+/* rule 14 can match eol */
+YY_RULE_SETUP
+#line 336 "src/compiler/glsl/glcpp/glcpp-lex.l"
+{
+ BEGIN INITIAL;
+ yyextra->space_tokens = 0;
+ yylineno++;
+ yycolumn = 0;
+ RETURN_TOKEN_NEVER_SKIP (NEWLINE);
+}
+ YY_BREAK
+/* For the pre-processor directives, we return these tokens
+ * even when we are otherwise skipping. */
+case 15:
+YY_RULE_SETUP
+#line 346 "src/compiler/glsl/glcpp/glcpp-lex.l"
+{
+ if (!yyextra->in_define) {
+ BEGIN INITIAL;
+ yyextra->lexing_directive = 1;
+ yyextra->space_tokens = 0;
+ RETURN_TOKEN_NEVER_SKIP (IFDEF);
+ }
+}
+ YY_BREAK
+case 16:
+YY_RULE_SETUP
+#line 355 "src/compiler/glsl/glcpp/glcpp-lex.l"
+{
+ if (!yyextra->in_define) {
+ BEGIN INITIAL;
+ yyextra->lexing_directive = 1;
+ yyextra->space_tokens = 0;
+ RETURN_TOKEN_NEVER_SKIP (IFNDEF);
+ }
+}
+ YY_BREAK
+case 17:
+/* rule 17 can match eol */
+*yy_cp = yyg->yy_hold_char; /* undo effects of setting up yytext */
+YY_LINENO_REWIND_TO(yy_bp + 2);
+yyg->yy_c_buf_p = yy_cp = yy_bp + 2;
+YY_DO_BEFORE_ACTION; /* set up yytext again */
+YY_RULE_SETUP
+#line 364 "src/compiler/glsl/glcpp/glcpp-lex.l"
+{
+ if (!yyextra->in_define) {
+ BEGIN INITIAL;
+ yyextra->lexing_directive = 1;
+ yyextra->space_tokens = 0;
+ RETURN_TOKEN_NEVER_SKIP (IF);
+ }
+}
+ YY_BREAK
+case 18:
+/* rule 18 can match eol */
+*yy_cp = yyg->yy_hold_char; /* undo effects of setting up yytext */
+YY_LINENO_REWIND_TO(yy_bp + 4);
+yyg->yy_c_buf_p = yy_cp = yy_bp + 4;
+YY_DO_BEFORE_ACTION; /* set up yytext again */
+YY_RULE_SETUP
+#line 373 "src/compiler/glsl/glcpp/glcpp-lex.l"
+{
+ if (!yyextra->in_define) {
+ BEGIN INITIAL;
+ yyextra->lexing_directive = 1;
+ yyextra->space_tokens = 0;
+ RETURN_TOKEN_NEVER_SKIP (ELIF);
+ }
+}
+ YY_BREAK
+case 19:
+YY_RULE_SETUP
+#line 382 "src/compiler/glsl/glcpp/glcpp-lex.l"
+{
+ if (!yyextra->in_define) {
+ BEGIN INITIAL;
+ yyextra->space_tokens = 0;
+ RETURN_TOKEN_NEVER_SKIP (ELSE);
+ }
+}
+ YY_BREAK
+case 20:
+YY_RULE_SETUP
+#line 390 "src/compiler/glsl/glcpp/glcpp-lex.l"
+{
+ if (!yyextra->in_define) {
+ BEGIN INITIAL;
+ yyextra->space_tokens = 0;
+ RETURN_TOKEN_NEVER_SKIP (ENDIF);
+ }
+}
+ YY_BREAK
+case 21:
+YY_RULE_SETUP
+#line 398 "src/compiler/glsl/glcpp/glcpp-lex.l"
+{
+ BEGIN INITIAL;
+ RETURN_STRING_TOKEN (ERROR_TOKEN);
+}
+ YY_BREAK
+/* After we see a "#define" we enter the <DEFINE> start state
+ * for the lexer. Within <DEFINE> we are looking for the first
+ * identifier and specifically checking whether the identifier
+ * is followed by a '(' or not, (to lex either a
+ * FUNC_IDENTIFIER or an OBJ_IDENITIFIER token).
+ *
+ * While in the <DEFINE> state we also need to explicitly
+ * handle a few other things that may appear before the
+ * identifier:
+ *
+ * * Comments, (handled above with the main support for
+ * comments).
+ *
+ * * Whitespace (simply ignored)
+ *
+ * * Anything else, (not an identifier, not a comment,
+ * and not whitespace). This will generate an error.
+ */
+case 22:
+YY_RULE_SETUP
+#line 421 "src/compiler/glsl/glcpp/glcpp-lex.l"
+{
+ yyextra->in_define = true;
+ if (!parser->skipping) {
+ BEGIN DEFINE;
+ yyextra->space_tokens = 0;
+ RETURN_TOKEN (DEFINE_TOKEN);
+ }
+}
+ YY_BREAK
+case 23:
+YY_RULE_SETUP
+#line 430 "src/compiler/glsl/glcpp/glcpp-lex.l"
+{
+ BEGIN INITIAL;
+ yyextra->space_tokens = 0;
+ RETURN_TOKEN (UNDEF);
+}
+ YY_BREAK
+case 24:
+YY_RULE_SETUP
+#line 436 "src/compiler/glsl/glcpp/glcpp-lex.l"
+{
+ /* Nothing to do here. Importantly, don't leave the <HASH>
+ * start condition, since it's legal to have space between the
+ * '#' and the directive.. */
+}
+ YY_BREAK
+/* This will catch any non-directive garbage after a HASH */
+case 25:
+YY_RULE_SETUP
+#line 443 "src/compiler/glsl/glcpp/glcpp-lex.l"
+{
+ if (!parser->skipping) {
+ BEGIN INITIAL;
+ RETURN_TOKEN (GARBAGE);
+ }
+}
+ YY_BREAK
+/* An identifier immediately followed by '(' */
+case 26:
+*yy_cp = yyg->yy_hold_char; /* undo effects of setting up yytext */
+yyg->yy_c_buf_p = yy_cp -= 1;
+YY_DO_BEFORE_ACTION; /* set up yytext again */
+YY_RULE_SETUP
+#line 451 "src/compiler/glsl/glcpp/glcpp-lex.l"
+{
+ BEGIN INITIAL;
+ RETURN_STRING_TOKEN (FUNC_IDENTIFIER);
+}
+ YY_BREAK
+/* An identifier not immediately followed by '(' */
+case 27:
+YY_RULE_SETUP
+#line 457 "src/compiler/glsl/glcpp/glcpp-lex.l"
+{
+ BEGIN INITIAL;
+ RETURN_STRING_TOKEN (OBJ_IDENTIFIER);
+}
+ YY_BREAK
+/* Whitespace */
+case 28:
+YY_RULE_SETUP
+#line 463 "src/compiler/glsl/glcpp/glcpp-lex.l"
+{
+ /* Just ignore it. Nothing to do here. */
+}
+ YY_BREAK
+/* '/' not followed by '*', so not a comment. This is an error. */
+case 29:
+/* rule 29 can match eol */
+YY_RULE_SETUP
+#line 468 "src/compiler/glsl/glcpp/glcpp-lex.l"
+{
+ BEGIN INITIAL;
+ glcpp_error(yylloc, yyextra, "#define followed by a non-identifier: %s", yytext);
+ RETURN_STRING_TOKEN (INTEGER_STRING);
+}
+ YY_BREAK
+/* A character that can't start an identifier, comment, or
+ * space. This is an error. */
+case 30:
+YY_RULE_SETUP
+#line 476 "src/compiler/glsl/glcpp/glcpp-lex.l"
+{
+ BEGIN INITIAL;
+ glcpp_error(yylloc, yyextra, "#define followed by a non-identifier: %s", yytext);
+ RETURN_STRING_TOKEN (INTEGER_STRING);
+}
+ YY_BREAK
+case 31:
+YY_RULE_SETUP
+#line 482 "src/compiler/glsl/glcpp/glcpp-lex.l"
+{
+ RETURN_STRING_TOKEN (INTEGER_STRING);
+}
+ YY_BREAK
+case 32:
+YY_RULE_SETUP
+#line 486 "src/compiler/glsl/glcpp/glcpp-lex.l"
+{
+ RETURN_STRING_TOKEN (INTEGER_STRING);
+}
+ YY_BREAK
+case 33:
+YY_RULE_SETUP
+#line 490 "src/compiler/glsl/glcpp/glcpp-lex.l"
+{
+ RETURN_STRING_TOKEN (INTEGER_STRING);
+}
+ YY_BREAK
+case 34:
+YY_RULE_SETUP
+#line 494 "src/compiler/glsl/glcpp/glcpp-lex.l"
+{
+ RETURN_TOKEN (LEFT_SHIFT);
+}
+ YY_BREAK
+case 35:
+YY_RULE_SETUP
+#line 498 "src/compiler/glsl/glcpp/glcpp-lex.l"
+{
+ RETURN_TOKEN (RIGHT_SHIFT);
+}
+ YY_BREAK
+case 36:
+YY_RULE_SETUP
+#line 502 "src/compiler/glsl/glcpp/glcpp-lex.l"
+{
+ RETURN_TOKEN (LESS_OR_EQUAL);
+}
+ YY_BREAK
+case 37:
+YY_RULE_SETUP
+#line 506 "src/compiler/glsl/glcpp/glcpp-lex.l"
+{
+ RETURN_TOKEN (GREATER_OR_EQUAL);
+}
+ YY_BREAK
+case 38:
+YY_RULE_SETUP
+#line 510 "src/compiler/glsl/glcpp/glcpp-lex.l"
+{
+ RETURN_TOKEN (EQUAL);
+}
+ YY_BREAK
+case 39:
+YY_RULE_SETUP
+#line 514 "src/compiler/glsl/glcpp/glcpp-lex.l"
+{
+ RETURN_TOKEN (NOT_EQUAL);
+}
+ YY_BREAK
+case 40:
+YY_RULE_SETUP
+#line 518 "src/compiler/glsl/glcpp/glcpp-lex.l"
+{
+ RETURN_TOKEN (AND);
+}
+ YY_BREAK
+case 41:
+YY_RULE_SETUP
+#line 522 "src/compiler/glsl/glcpp/glcpp-lex.l"
+{
+ RETURN_TOKEN (OR);
+}
+ YY_BREAK
+case 42:
+YY_RULE_SETUP
+#line 526 "src/compiler/glsl/glcpp/glcpp-lex.l"
+{
+ RETURN_TOKEN (PLUS_PLUS);
+}
+ YY_BREAK
+case 43:
+YY_RULE_SETUP
+#line 530 "src/compiler/glsl/glcpp/glcpp-lex.l"
+{
+ RETURN_TOKEN (MINUS_MINUS);
+}
+ YY_BREAK
+case 44:
+YY_RULE_SETUP
+#line 534 "src/compiler/glsl/glcpp/glcpp-lex.l"
+{
+ if (! parser->skipping) {
+ if (parser->is_gles)
+ glcpp_error(yylloc, yyextra, "Token pasting (##) is illegal in GLES");
+ RETURN_TOKEN (PASTE);
+ }
+}
+ YY_BREAK
+case 45:
+YY_RULE_SETUP
+#line 542 "src/compiler/glsl/glcpp/glcpp-lex.l"
+{
+ RETURN_TOKEN (DEFINED);
+}
+ YY_BREAK
+case 46:
+YY_RULE_SETUP
+#line 546 "src/compiler/glsl/glcpp/glcpp-lex.l"
+{
+ RETURN_STRING_TOKEN (IDENTIFIER);
+}
+ YY_BREAK
+case 47:
+YY_RULE_SETUP
+#line 550 "src/compiler/glsl/glcpp/glcpp-lex.l"
+{
+ RETURN_STRING_TOKEN (OTHER);
+}
+ YY_BREAK
+case 48:
+YY_RULE_SETUP
+#line 554 "src/compiler/glsl/glcpp/glcpp-lex.l"
+{
+ RETURN_TOKEN (yytext[0]);
+}
+ YY_BREAK
+case 49:
+YY_RULE_SETUP
+#line 558 "src/compiler/glsl/glcpp/glcpp-lex.l"
+{
+ RETURN_STRING_TOKEN (OTHER);
+}
+ YY_BREAK
+case 50:
+YY_RULE_SETUP
+#line 562 "src/compiler/glsl/glcpp/glcpp-lex.l"
+{
+ if (yyextra->space_tokens) {
+ RETURN_TOKEN (SPACE);
+ }
+}
+ YY_BREAK
+case 51:
+YY_RULE_SETUP
+#line 568 "src/compiler/glsl/glcpp/glcpp-lex.l"
+{
+ RETURN_STRING_TOKEN (PATH);
+}
+ YY_BREAK
+/* We preserve all newlines, even between #if 0..#endif, so no
+ skipping.. */
+case 52:
+/* rule 52 can match eol */
+YY_RULE_SETUP
+#line 574 "src/compiler/glsl/glcpp/glcpp-lex.l"
+{
+ if (parser->commented_newlines) {
+ BEGIN NEWLINE_CATCHUP;
+ } else {
+ BEGIN INITIAL;
+ }
+ yyextra->space_tokens = 1;
+ yyextra->lexing_directive = 0;
+ yyextra->lexing_version_directive = 0;
+ yylineno++;
+ yycolumn = 0;
+ RETURN_TOKEN_NEVER_SKIP (NEWLINE);
+}
+ YY_BREAK
+case YY_STATE_EOF(INITIAL):
+case YY_STATE_EOF(COMMENT):
+case YY_STATE_EOF(DEFINE):
+case YY_STATE_EOF(HASH):
+#line 588 "src/compiler/glsl/glcpp/glcpp-lex.l"
+{
+ if (YY_START == COMMENT)
+ glcpp_error(yylloc, yyextra, "Unterminated comment");
+ BEGIN DONE; /* Don't keep matching this rule forever. */
+ yyextra->lexing_directive = 0;
+ yyextra->lexing_version_directive = 0;
+ if (! parser->last_token_was_newline)
+ RETURN_TOKEN (NEWLINE);
+}
+ YY_BREAK
+/* This is a catch-all to avoid the annoying default flex action which
+ * matches any character and prints it. If any input ever matches this
+ * rule, then we have made a mistake above and need to fix one or more
+ * of the preceding patterns to match that input. */
+case 53:
+YY_RULE_SETUP
+#line 603 "src/compiler/glsl/glcpp/glcpp-lex.l"
+{
+ glcpp_error(yylloc, yyextra, "Internal compiler error: Unexpected character: %s", yytext);
+
+ /* We don't actually use the UNREACHABLE start condition. We
+ only have this block here so that we can pretend to call some
+ generated functions, (to avoid "defined but not used"
+ warnings. */
+ if (YY_START == UNREACHABLE) {
+ unput('.');
+ yy_top_state(yyextra);
+ }
+}
+ YY_BREAK
+case 54:
+YY_RULE_SETUP
+#line 616 "src/compiler/glsl/glcpp/glcpp-lex.l"
+YY_FATAL_ERROR( "flex scanner jammed" );
+ YY_BREAK
+#line 1971 "src/compiler/glsl/glcpp/glcpp-lex.c"
+case YY_STATE_EOF(DONE):
+case YY_STATE_EOF(NEWLINE_CATCHUP):
+case YY_STATE_EOF(UNREACHABLE):
+ yyterminate();
+
+ case YY_END_OF_BUFFER:
+ {
+ /* Amount of text matched not including the EOB char. */
+ int yy_amount_of_matched_text = (int) (yy_cp - yyg->yytext_ptr) - 1;
+
+ /* Undo the effects of YY_DO_BEFORE_ACTION. */
+ *yy_cp = yyg->yy_hold_char;
+ YY_RESTORE_YY_MORE_OFFSET
+
+ if ( YY_CURRENT_BUFFER_LVALUE->yy_buffer_status == YY_BUFFER_NEW )
+ {
+ /* We're scanning a new file or input source. It's
+ * possible that this happened because the user
+ * just pointed yyin at a new source and called
+ * yylex(). If so, then we have to assure
+ * consistency between YY_CURRENT_BUFFER and our
+ * globals. Here is the right place to do so, because
+ * this is the first action (other than possibly a
+ * back-up) that will match for the new input source.
+ */
+ yyg->yy_n_chars = YY_CURRENT_BUFFER_LVALUE->yy_n_chars;
+ YY_CURRENT_BUFFER_LVALUE->yy_input_file = yyin;
+ YY_CURRENT_BUFFER_LVALUE->yy_buffer_status = YY_BUFFER_NORMAL;
+ }
+
+ /* Note that here we test for yy_c_buf_p "<=" to the position
+ * of the first EOB in the buffer, since yy_c_buf_p will
+ * already have been incremented past the NUL character
+ * (since all states make transitions on EOB to the
+ * end-of-buffer state). Contrast this with the test
+ * in input().
+ */
+ if ( yyg->yy_c_buf_p <= &YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[yyg->yy_n_chars] )
+ { /* This was really a NUL. */
+ yy_state_type yy_next_state;
+
+ yyg->yy_c_buf_p = yyg->yytext_ptr + yy_amount_of_matched_text;
+
+ yy_current_state = yy_get_previous_state( yyscanner );
+
+ /* Okay, we're now positioned to make the NUL
+ * transition. We couldn't have
+ * yy_get_previous_state() go ahead and do it
+ * for us because it doesn't know how to deal
+ * with the possibility of jamming (and we don't
+ * want to build jamming into it because then it
+ * will run more slowly).
+ */
+
+ yy_next_state = yy_try_NUL_trans( yy_current_state , yyscanner);
+
+ yy_bp = yyg->yytext_ptr + YY_MORE_ADJ;
+
+ if ( yy_next_state )
+ {
+ /* Consume the NUL. */
+ yy_cp = ++yyg->yy_c_buf_p;
+ yy_current_state = yy_next_state;
+ goto yy_match;
+ }
+
+ else
+ {
+ yy_cp = yyg->yy_last_accepting_cpos;
+ yy_current_state = yyg->yy_last_accepting_state;
+ goto yy_find_action;
+ }
+ }
+
+ else switch ( yy_get_next_buffer( yyscanner ) )
+ {
+ case EOB_ACT_END_OF_FILE:
+ {
+ yyg->yy_did_buffer_switch_on_eof = 0;
+
+ if ( yywrap( yyscanner ) )
+ {
+ /* Note: because we've taken care in
+ * yy_get_next_buffer() to have set up
+ * yytext, we can now set up
+ * yy_c_buf_p so that if some total
+ * hoser (like flex itself) wants to
+ * call the scanner after we return the
+ * YY_NULL, it'll still work - another
+ * YY_NULL will get returned.
+ */
+ yyg->yy_c_buf_p = yyg->yytext_ptr + YY_MORE_ADJ;
+
+ yy_act = YY_STATE_EOF(YY_START);
+ goto do_action;
+ }
+
+ else
+ {
+ if ( ! yyg->yy_did_buffer_switch_on_eof )
+ YY_NEW_FILE;
+ }
+ break;
+ }
+
+ case EOB_ACT_CONTINUE_SCAN:
+ yyg->yy_c_buf_p =
+ yyg->yytext_ptr + yy_amount_of_matched_text;
+
+ yy_current_state = yy_get_previous_state( yyscanner );
+
+ yy_cp = yyg->yy_c_buf_p;
+ yy_bp = yyg->yytext_ptr + YY_MORE_ADJ;
+ goto yy_match;
+
+ case EOB_ACT_LAST_MATCH:
+ yyg->yy_c_buf_p =
+ &YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[yyg->yy_n_chars];
+
+ yy_current_state = yy_get_previous_state( yyscanner );
+
+ yy_cp = yyg->yy_c_buf_p;
+ yy_bp = yyg->yytext_ptr + YY_MORE_ADJ;
+ goto yy_find_action;
+ }
+ break;
+ }
+
+ default:
+ YY_FATAL_ERROR(
+ "fatal flex scanner internal error--no action found" );
+ } /* end of action switch */
+ } /* end of scanning one token */
+ } /* end of user's declarations */
+} /* end of yylex */
+
+/* yy_get_next_buffer - try to read in a new buffer
+ *
+ * Returns a code representing an action:
+ * EOB_ACT_LAST_MATCH -
+ * EOB_ACT_CONTINUE_SCAN - continue scanning from current position
+ * EOB_ACT_END_OF_FILE - end of file
+ */
+static int yy_get_next_buffer (yyscan_t yyscanner)
+{
+ struct yyguts_t * yyg = (struct yyguts_t*)yyscanner;
+ char *dest = YY_CURRENT_BUFFER_LVALUE->yy_ch_buf;
+ char *source = yyg->yytext_ptr;
+ int number_to_move, i;
+ int ret_val;
+
+ if ( yyg->yy_c_buf_p > &YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[yyg->yy_n_chars + 1] )
+ YY_FATAL_ERROR(
+ "fatal flex scanner internal error--end of buffer missed" );
+
+ if ( YY_CURRENT_BUFFER_LVALUE->yy_fill_buffer == 0 )
+ { /* Don't try to fill the buffer, so this is an EOF. */
+ if ( yyg->yy_c_buf_p - yyg->yytext_ptr - YY_MORE_ADJ == 1 )
+ {
+ /* We matched a single character, the EOB, so
+ * treat this as a final EOF.
+ */
+ return EOB_ACT_END_OF_FILE;
+ }
+
+ else
+ {
+ /* We matched some text prior to the EOB, first
+ * process it.
+ */
+ return EOB_ACT_LAST_MATCH;
+ }
+ }
+
+ /* Try to read more data. */
+
+ /* First move last chars to start of buffer. */
+ number_to_move = (int) (yyg->yy_c_buf_p - yyg->yytext_ptr - 1);
+
+ for ( i = 0; i < number_to_move; ++i )
+ *(dest++) = *(source++);
+
+ if ( YY_CURRENT_BUFFER_LVALUE->yy_buffer_status == YY_BUFFER_EOF_PENDING )
+ /* don't do the read, it's not guaranteed to return an EOF,
+ * just force an EOF
+ */
+ YY_CURRENT_BUFFER_LVALUE->yy_n_chars = yyg->yy_n_chars = 0;
+
+ else
+ {
+ int num_to_read =
+ YY_CURRENT_BUFFER_LVALUE->yy_buf_size - number_to_move - 1;
+
+ while ( num_to_read <= 0 )
+ { /* Not enough room in the buffer - grow it. */
+
+ /* just a shorter name for the current buffer */
+ YY_BUFFER_STATE b = YY_CURRENT_BUFFER_LVALUE;
+
+ int yy_c_buf_p_offset =
+ (int) (yyg->yy_c_buf_p - b->yy_ch_buf);
+
+ if ( b->yy_is_our_buffer )
+ {
+ int new_size = b->yy_buf_size * 2;
+
+ if ( new_size <= 0 )
+ b->yy_buf_size += b->yy_buf_size / 8;
+ else
+ b->yy_buf_size *= 2;
+
+ b->yy_ch_buf = (char *)
+ /* Include room in for 2 EOB chars. */
+ yyrealloc( (void *) b->yy_ch_buf,
+ (yy_size_t) (b->yy_buf_size + 2) , yyscanner );
+ }
+ else
+ /* Can't grow it, we don't own it. */
+ b->yy_ch_buf = NULL;
+
+ if ( ! b->yy_ch_buf )
+ YY_FATAL_ERROR(
+ "fatal error - scanner input buffer overflow" );
+
+ yyg->yy_c_buf_p = &b->yy_ch_buf[yy_c_buf_p_offset];
+
+ num_to_read = YY_CURRENT_BUFFER_LVALUE->yy_buf_size -
+ number_to_move - 1;
+
+ }
+
+ if ( num_to_read > YY_READ_BUF_SIZE )
+ num_to_read = YY_READ_BUF_SIZE;
+
+ /* Read in more data. */
+ YY_INPUT( (&YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[number_to_move]),
+ yyg->yy_n_chars, num_to_read );
+
+ YY_CURRENT_BUFFER_LVALUE->yy_n_chars = yyg->yy_n_chars;
+ }
+
+ if ( yyg->yy_n_chars == 0 )
+ {
+ if ( number_to_move == YY_MORE_ADJ )
+ {
+ ret_val = EOB_ACT_END_OF_FILE;
+ yyrestart( yyin , yyscanner);
+ }
+
+ else
+ {
+ ret_val = EOB_ACT_LAST_MATCH;
+ YY_CURRENT_BUFFER_LVALUE->yy_buffer_status =
+ YY_BUFFER_EOF_PENDING;
+ }
+ }
+
+ else
+ ret_val = EOB_ACT_CONTINUE_SCAN;
+
+ if ((yyg->yy_n_chars + number_to_move) > YY_CURRENT_BUFFER_LVALUE->yy_buf_size) {
+ /* Extend the array by 50%, plus the number we really need. */
+ int new_size = yyg->yy_n_chars + number_to_move + (yyg->yy_n_chars >> 1);
+ YY_CURRENT_BUFFER_LVALUE->yy_ch_buf = (char *) yyrealloc(
+ (void *) YY_CURRENT_BUFFER_LVALUE->yy_ch_buf, (yy_size_t) new_size , yyscanner );
+ if ( ! YY_CURRENT_BUFFER_LVALUE->yy_ch_buf )
+ YY_FATAL_ERROR( "out of dynamic memory in yy_get_next_buffer()" );
+ /* "- 2" to take care of EOB's */
+ YY_CURRENT_BUFFER_LVALUE->yy_buf_size = (int) (new_size - 2);
+ }
+
+ yyg->yy_n_chars += number_to_move;
+ YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[yyg->yy_n_chars] = YY_END_OF_BUFFER_CHAR;
+ YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[yyg->yy_n_chars + 1] = YY_END_OF_BUFFER_CHAR;
+
+ yyg->yytext_ptr = &YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[0];
+
+ return ret_val;
+}
+
+/* yy_get_previous_state - get the state just before the EOB char was reached */
+
+ static yy_state_type yy_get_previous_state (yyscan_t yyscanner)
+{
+ yy_state_type yy_current_state;
+ char *yy_cp;
+ struct yyguts_t * yyg = (struct yyguts_t*)yyscanner;
+
+ yy_current_state = yyg->yy_start;
+
+ for ( yy_cp = yyg->yytext_ptr + YY_MORE_ADJ; yy_cp < yyg->yy_c_buf_p; ++yy_cp )
+ {
+ YY_CHAR yy_c = (*yy_cp ? yy_ec[YY_SC_TO_UI(*yy_cp)] : 1);
+ if ( yy_accept[yy_current_state] )
+ {
+ yyg->yy_last_accepting_state = yy_current_state;
+ yyg->yy_last_accepting_cpos = yy_cp;
+ }
+ while ( yy_chk[yy_base[yy_current_state] + yy_c] != yy_current_state )
+ {
+ yy_current_state = (int) yy_def[yy_current_state];
+ if ( yy_current_state >= 189 )
+ yy_c = yy_meta[yy_c];
+ }
+ yy_current_state = yy_nxt[yy_base[yy_current_state] + yy_c];
+ }
+
+ return yy_current_state;
+}
+
+/* yy_try_NUL_trans - try to make a transition on the NUL character
+ *
+ * synopsis
+ * next_state = yy_try_NUL_trans( current_state );
+ */
+ static yy_state_type yy_try_NUL_trans (yy_state_type yy_current_state , yyscan_t yyscanner)
+{
+ int yy_is_jam;
+ struct yyguts_t * yyg = (struct yyguts_t*)yyscanner; /* This var may be unused depending upon options. */
+ char *yy_cp = yyg->yy_c_buf_p;
+
+ YY_CHAR yy_c = 1;
+ if ( yy_accept[yy_current_state] )
+ {
+ yyg->yy_last_accepting_state = yy_current_state;
+ yyg->yy_last_accepting_cpos = yy_cp;
+ }
+ while ( yy_chk[yy_base[yy_current_state] + yy_c] != yy_current_state )
+ {
+ yy_current_state = (int) yy_def[yy_current_state];
+ if ( yy_current_state >= 189 )
+ yy_c = yy_meta[yy_c];
+ }
+ yy_current_state = yy_nxt[yy_base[yy_current_state] + yy_c];
+ yy_is_jam = (yy_current_state == 188);
+
+ (void)yyg;
+ return yy_is_jam ? 0 : yy_current_state;
+}
+
+#ifndef YY_NO_UNPUT
+
+ static void yyunput (int c, char * yy_bp , yyscan_t yyscanner)
+{
+ char *yy_cp;
+ struct yyguts_t * yyg = (struct yyguts_t*)yyscanner;
+
+ yy_cp = yyg->yy_c_buf_p;
+
+ /* undo effects of setting up yytext */
+ *yy_cp = yyg->yy_hold_char;
+
+ if ( yy_cp < YY_CURRENT_BUFFER_LVALUE->yy_ch_buf + 2 )
+ { /* need to shift things up to make room */
+ /* +2 for EOB chars. */
+ int number_to_move = yyg->yy_n_chars + 2;
+ char *dest = &YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[
+ YY_CURRENT_BUFFER_LVALUE->yy_buf_size + 2];
+ char *source =
+ &YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[number_to_move];
+
+ while ( source > YY_CURRENT_BUFFER_LVALUE->yy_ch_buf )
+ *--dest = *--source;
+
+ yy_cp += (int) (dest - source);
+ yy_bp += (int) (dest - source);
+ YY_CURRENT_BUFFER_LVALUE->yy_n_chars =
+ yyg->yy_n_chars = (int) YY_CURRENT_BUFFER_LVALUE->yy_buf_size;
+
+ if ( yy_cp < YY_CURRENT_BUFFER_LVALUE->yy_ch_buf + 2 )
+ YY_FATAL_ERROR( "flex scanner push-back overflow" );
+ }
+
+ *--yy_cp = (char) c;
+
+ yyg->yytext_ptr = yy_bp;
+ yyg->yy_hold_char = *yy_cp;
+ yyg->yy_c_buf_p = yy_cp;
+}
+
+#endif
+
+#ifndef YY_NO_INPUT
+#ifdef __cplusplus
+ static int yyinput (yyscan_t yyscanner)
+#else
+ static int input (yyscan_t yyscanner)
+#endif
+
+{
+ int c;
+ struct yyguts_t * yyg = (struct yyguts_t*)yyscanner;
+
+ *yyg->yy_c_buf_p = yyg->yy_hold_char;
+
+ if ( *yyg->yy_c_buf_p == YY_END_OF_BUFFER_CHAR )
+ {
+ /* yy_c_buf_p now points to the character we want to return.
+ * If this occurs *before* the EOB characters, then it's a
+ * valid NUL; if not, then we've hit the end of the buffer.
+ */
+ if ( yyg->yy_c_buf_p < &YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[yyg->yy_n_chars] )
+ /* This was really a NUL. */
+ *yyg->yy_c_buf_p = '\0';
+
+ else
+ { /* need more input */
+ int offset = (int) (yyg->yy_c_buf_p - yyg->yytext_ptr);
+ ++yyg->yy_c_buf_p;
+
+ switch ( yy_get_next_buffer( yyscanner ) )
+ {
+ case EOB_ACT_LAST_MATCH:
+ /* This happens because yy_g_n_b()
+ * sees that we've accumulated a
+ * token and flags that we need to
+ * try matching the token before
+ * proceeding. But for input(),
+ * there's no matching to consider.
+ * So convert the EOB_ACT_LAST_MATCH
+ * to EOB_ACT_END_OF_FILE.
+ */
+
+ /* Reset buffer status. */
+ yyrestart( yyin , yyscanner);
+
+ /*FALLTHROUGH*/
+
+ case EOB_ACT_END_OF_FILE:
+ {
+ if ( yywrap( yyscanner ) )
+ return 0;
+
+ if ( ! yyg->yy_did_buffer_switch_on_eof )
+ YY_NEW_FILE;
+#ifdef __cplusplus
+ return yyinput(yyscanner);
+#else
+ return input(yyscanner);
+#endif
+ }
+
+ case EOB_ACT_CONTINUE_SCAN:
+ yyg->yy_c_buf_p = yyg->yytext_ptr + offset;
+ break;
+ }
+ }
+ }
+
+ c = *(unsigned char *) yyg->yy_c_buf_p; /* cast for 8-bit char's */
+ *yyg->yy_c_buf_p = '\0'; /* preserve yytext */
+ yyg->yy_hold_char = *++yyg->yy_c_buf_p;
+
+ return c;
+}
+#endif /* ifndef YY_NO_INPUT */
+
+/** Immediately switch to a different input stream.
+ * @param input_file A readable stream.
+ * @param yyscanner The scanner object.
+ * @note This function does not reset the start condition to @c INITIAL .
+ */
+ void yyrestart (FILE * input_file , yyscan_t yyscanner)
+{
+ struct yyguts_t * yyg = (struct yyguts_t*)yyscanner;
+
+ if ( ! YY_CURRENT_BUFFER ){
+ yyensure_buffer_stack (yyscanner);
+ YY_CURRENT_BUFFER_LVALUE =
+ yy_create_buffer( yyin, YY_BUF_SIZE , yyscanner);
+ }
+
+ yy_init_buffer( YY_CURRENT_BUFFER, input_file , yyscanner);
+ yy_load_buffer_state( yyscanner );
+}
+
+/** Switch to a different input buffer.
+ * @param new_buffer The new input buffer.
+ * @param yyscanner The scanner object.
+ */
+ void yy_switch_to_buffer (YY_BUFFER_STATE new_buffer , yyscan_t yyscanner)
+{
+ struct yyguts_t * yyg = (struct yyguts_t*)yyscanner;
+
+ /* TODO. We should be able to replace this entire function body
+ * with
+ * yypop_buffer_state();
+ * yypush_buffer_state(new_buffer);
+ */
+ yyensure_buffer_stack (yyscanner);
+ if ( YY_CURRENT_BUFFER == new_buffer )
+ return;
+
+ if ( YY_CURRENT_BUFFER )
+ {
+ /* Flush out information for old buffer. */
+ *yyg->yy_c_buf_p = yyg->yy_hold_char;
+ YY_CURRENT_BUFFER_LVALUE->yy_buf_pos = yyg->yy_c_buf_p;
+ YY_CURRENT_BUFFER_LVALUE->yy_n_chars = yyg->yy_n_chars;
+ }
+
+ YY_CURRENT_BUFFER_LVALUE = new_buffer;
+ yy_load_buffer_state( yyscanner );
+
+ /* We don't actually know whether we did this switch during
+ * EOF (yywrap()) processing, but the only time this flag
+ * is looked at is after yywrap() is called, so it's safe
+ * to go ahead and always set it.
+ */
+ yyg->yy_did_buffer_switch_on_eof = 1;
+}
+
+static void yy_load_buffer_state (yyscan_t yyscanner)
+{
+ struct yyguts_t * yyg = (struct yyguts_t*)yyscanner;
+ yyg->yy_n_chars = YY_CURRENT_BUFFER_LVALUE->yy_n_chars;
+ yyg->yytext_ptr = yyg->yy_c_buf_p = YY_CURRENT_BUFFER_LVALUE->yy_buf_pos;
+ yyin = YY_CURRENT_BUFFER_LVALUE->yy_input_file;
+ yyg->yy_hold_char = *yyg->yy_c_buf_p;
+}
+
+/** Allocate and initialize an input buffer state.
+ * @param file A readable stream.
+ * @param size The character buffer size in bytes. When in doubt, use @c YY_BUF_SIZE.
+ * @param yyscanner The scanner object.
+ * @return the allocated buffer state.
+ */
+ YY_BUFFER_STATE yy_create_buffer (FILE * file, int size , yyscan_t yyscanner)
+{
+ YY_BUFFER_STATE b;
+
+ b = (YY_BUFFER_STATE) yyalloc( sizeof( struct yy_buffer_state ) , yyscanner );
+ if ( ! b )
+ YY_FATAL_ERROR( "out of dynamic memory in yy_create_buffer()" );
+
+ b->yy_buf_size = size;
+
+ /* yy_ch_buf has to be 2 characters longer than the size given because
+ * we need to put in 2 end-of-buffer characters.
+ */
+ b->yy_ch_buf = (char *) yyalloc( (yy_size_t) (b->yy_buf_size + 2) , yyscanner );
+ if ( ! b->yy_ch_buf )
+ YY_FATAL_ERROR( "out of dynamic memory in yy_create_buffer()" );
+
+ b->yy_is_our_buffer = 1;
+
+ yy_init_buffer( b, file , yyscanner);
+
+ return b;
+}
+
+/** Destroy the buffer.
+ * @param b a buffer created with yy_create_buffer()
+ * @param yyscanner The scanner object.
+ */
+ void yy_delete_buffer (YY_BUFFER_STATE b , yyscan_t yyscanner)
+{
+ struct yyguts_t * yyg = (struct yyguts_t*)yyscanner;
+
+ if ( ! b )
+ return;
+
+ if ( b == YY_CURRENT_BUFFER ) /* Not sure if we should pop here. */
+ YY_CURRENT_BUFFER_LVALUE = (YY_BUFFER_STATE) 0;
+
+ if ( b->yy_is_our_buffer )
+ yyfree( (void *) b->yy_ch_buf , yyscanner );
+
+ yyfree( (void *) b , yyscanner );
+}
+
+/* Initializes or reinitializes a buffer.
+ * This function is sometimes called more than once on the same buffer,
+ * such as during a yyrestart() or at EOF.
+ */
+ static void yy_init_buffer (YY_BUFFER_STATE b, FILE * file , yyscan_t yyscanner)
+
+{
+ int oerrno = errno;
+ struct yyguts_t * yyg = (struct yyguts_t*)yyscanner;
+
+ yy_flush_buffer( b , yyscanner);
+
+ b->yy_input_file = file;
+ b->yy_fill_buffer = 1;
+
+ /* If b is the current buffer, then yy_init_buffer was _probably_
+ * called from yyrestart() or through yy_get_next_buffer.
+ * In that case, we don't want to reset the lineno or column.
+ */
+ if (b != YY_CURRENT_BUFFER){
+ b->yy_bs_lineno = 1;
+ b->yy_bs_column = 0;
+ }
+
+ b->yy_is_interactive = 0;
+
+ errno = oerrno;
+}
+
+/** Discard all buffered characters. On the next scan, YY_INPUT will be called.
+ * @param b the buffer state to be flushed, usually @c YY_CURRENT_BUFFER.
+ * @param yyscanner The scanner object.
+ */
+ void yy_flush_buffer (YY_BUFFER_STATE b , yyscan_t yyscanner)
+{
+ struct yyguts_t * yyg = (struct yyguts_t*)yyscanner;
+ if ( ! b )
+ return;
+
+ b->yy_n_chars = 0;
+
+ /* We always need two end-of-buffer characters. The first causes
+ * a transition to the end-of-buffer state. The second causes
+ * a jam in that state.
+ */
+ b->yy_ch_buf[0] = YY_END_OF_BUFFER_CHAR;
+ b->yy_ch_buf[1] = YY_END_OF_BUFFER_CHAR;
+
+ b->yy_buf_pos = &b->yy_ch_buf[0];
+
+ b->yy_at_bol = 1;
+ b->yy_buffer_status = YY_BUFFER_NEW;
+
+ if ( b == YY_CURRENT_BUFFER )
+ yy_load_buffer_state( yyscanner );
+}
+
+/** Pushes the new state onto the stack. The new state becomes
+ * the current state. This function will allocate the stack
+ * if necessary.
+ * @param new_buffer The new state.
+ * @param yyscanner The scanner object.
+ */
+void yypush_buffer_state (YY_BUFFER_STATE new_buffer , yyscan_t yyscanner)
+{
+ struct yyguts_t * yyg = (struct yyguts_t*)yyscanner;
+ if (new_buffer == NULL)
+ return;
+
+ yyensure_buffer_stack(yyscanner);
+
+ /* This block is copied from yy_switch_to_buffer. */
+ if ( YY_CURRENT_BUFFER )
+ {
+ /* Flush out information for old buffer. */
+ *yyg->yy_c_buf_p = yyg->yy_hold_char;
+ YY_CURRENT_BUFFER_LVALUE->yy_buf_pos = yyg->yy_c_buf_p;
+ YY_CURRENT_BUFFER_LVALUE->yy_n_chars = yyg->yy_n_chars;
+ }
+
+ /* Only push if top exists. Otherwise, replace top. */
+ if (YY_CURRENT_BUFFER)
+ yyg->yy_buffer_stack_top++;
+ YY_CURRENT_BUFFER_LVALUE = new_buffer;
+
+ /* copied from yy_switch_to_buffer. */
+ yy_load_buffer_state( yyscanner );
+ yyg->yy_did_buffer_switch_on_eof = 1;
+}
+
+/** Removes and deletes the top of the stack, if present.
+ * The next element becomes the new top.
+ * @param yyscanner The scanner object.
+ */
+void yypop_buffer_state (yyscan_t yyscanner)
+{
+ struct yyguts_t * yyg = (struct yyguts_t*)yyscanner;
+ if (!YY_CURRENT_BUFFER)
+ return;
+
+ yy_delete_buffer(YY_CURRENT_BUFFER , yyscanner);
+ YY_CURRENT_BUFFER_LVALUE = NULL;
+ if (yyg->yy_buffer_stack_top > 0)
+ --yyg->yy_buffer_stack_top;
+
+ if (YY_CURRENT_BUFFER) {
+ yy_load_buffer_state( yyscanner );
+ yyg->yy_did_buffer_switch_on_eof = 1;
+ }
+}
+
+/* Allocates the stack if it does not exist.
+ * Guarantees space for at least one push.
+ */
+static void yyensure_buffer_stack (yyscan_t yyscanner)
+{
+ yy_size_t num_to_alloc;
+ struct yyguts_t * yyg = (struct yyguts_t*)yyscanner;
+
+ if (!yyg->yy_buffer_stack) {
+
+ /* First allocation is just for 2 elements, since we don't know if this
+ * scanner will even need a stack. We use 2 instead of 1 to avoid an
+ * immediate realloc on the next call.
+ */
+ num_to_alloc = 1; /* After all that talk, this was set to 1 anyways... */
+ yyg->yy_buffer_stack = (struct yy_buffer_state**)yyalloc
+ (num_to_alloc * sizeof(struct yy_buffer_state*)
+ , yyscanner);
+ if ( ! yyg->yy_buffer_stack )
+ YY_FATAL_ERROR( "out of dynamic memory in yyensure_buffer_stack()" );
+
+ memset(yyg->yy_buffer_stack, 0, num_to_alloc * sizeof(struct yy_buffer_state*));
+
+ yyg->yy_buffer_stack_max = num_to_alloc;
+ yyg->yy_buffer_stack_top = 0;
+ return;
+ }
+
+ if (yyg->yy_buffer_stack_top >= (yyg->yy_buffer_stack_max) - 1){
+
+ /* Increase the buffer to prepare for a possible push. */
+ yy_size_t grow_size = 8 /* arbitrary grow size */;
+
+ num_to_alloc = yyg->yy_buffer_stack_max + grow_size;
+ yyg->yy_buffer_stack = (struct yy_buffer_state**)yyrealloc
+ (yyg->yy_buffer_stack,
+ num_to_alloc * sizeof(struct yy_buffer_state*)
+ , yyscanner);
+ if ( ! yyg->yy_buffer_stack )
+ YY_FATAL_ERROR( "out of dynamic memory in yyensure_buffer_stack()" );
+
+ /* zero only the new slots.*/
+ memset(yyg->yy_buffer_stack + yyg->yy_buffer_stack_max, 0, grow_size * sizeof(struct yy_buffer_state*));
+ yyg->yy_buffer_stack_max = num_to_alloc;
+ }
+}
+
+/** Setup the input buffer state to scan directly from a user-specified character buffer.
+ * @param base the character buffer
+ * @param size the size in bytes of the character buffer
+ * @param yyscanner The scanner object.
+ * @return the newly allocated buffer state object.
+ */
+YY_BUFFER_STATE yy_scan_buffer (char * base, yy_size_t size , yyscan_t yyscanner)
+{
+ YY_BUFFER_STATE b;
+
+ if ( size < 2 ||
+ base[size-2] != YY_END_OF_BUFFER_CHAR ||
+ base[size-1] != YY_END_OF_BUFFER_CHAR )
+ /* They forgot to leave room for the EOB's. */
+ return NULL;
+
+ b = (YY_BUFFER_STATE) yyalloc( sizeof( struct yy_buffer_state ) , yyscanner );
+ if ( ! b )
+ YY_FATAL_ERROR( "out of dynamic memory in yy_scan_buffer()" );
+
+ b->yy_buf_size = (int) (size - 2); /* "- 2" to take care of EOB's */
+ b->yy_buf_pos = b->yy_ch_buf = base;
+ b->yy_is_our_buffer = 0;
+ b->yy_input_file = NULL;
+ b->yy_n_chars = b->yy_buf_size;
+ b->yy_is_interactive = 0;
+ b->yy_at_bol = 1;
+ b->yy_fill_buffer = 0;
+ b->yy_buffer_status = YY_BUFFER_NEW;
+
+ yy_switch_to_buffer( b , yyscanner );
+
+ return b;
+}
+
+/** Setup the input buffer state to scan a string. The next call to yylex() will
+ * scan from a @e copy of @a str.
+ * @param yystr a NUL-terminated string to scan
+ * @param yyscanner The scanner object.
+ * @return the newly allocated buffer state object.
+ * @note If you want to scan bytes that may contain NUL values, then use
+ * yy_scan_bytes() instead.
+ */
+YY_BUFFER_STATE yy_scan_string (const char * yystr , yyscan_t yyscanner)
+{
+
+ return yy_scan_bytes( yystr, (int) strlen(yystr) , yyscanner);
+}
+
+/** Setup the input buffer state to scan the given bytes. The next call to yylex() will
+ * scan from a @e copy of @a bytes.
+ * @param yybytes the byte buffer to scan
+ * @param _yybytes_len the number of bytes in the buffer pointed to by @a bytes.
+ * @param yyscanner The scanner object.
+ * @return the newly allocated buffer state object.
+ */
+YY_BUFFER_STATE yy_scan_bytes (const char * yybytes, int _yybytes_len , yyscan_t yyscanner)
+{
+ YY_BUFFER_STATE b;
+ char *buf;
+ yy_size_t n;
+ int i;
+
+ /* Get memory for full buffer, including space for trailing EOB's. */
+ n = (yy_size_t) (_yybytes_len + 2);
+ buf = (char *) yyalloc( n , yyscanner );
+ if ( ! buf )
+ YY_FATAL_ERROR( "out of dynamic memory in yy_scan_bytes()" );
+
+ for ( i = 0; i < _yybytes_len; ++i )
+ buf[i] = yybytes[i];
+
+ buf[_yybytes_len] = buf[_yybytes_len+1] = YY_END_OF_BUFFER_CHAR;
+
+ b = yy_scan_buffer( buf, n , yyscanner);
+ if ( ! b )
+ YY_FATAL_ERROR( "bad buffer in yy_scan_bytes()" );
+
+ /* It's okay to grow etc. this buffer, and we should throw it
+ * away when we're done.
+ */
+ b->yy_is_our_buffer = 1;
+
+ return b;
+}
+
+ static void yy_push_state (int _new_state , yyscan_t yyscanner)
+{
+ struct yyguts_t * yyg = (struct yyguts_t*)yyscanner;
+ if ( yyg->yy_start_stack_ptr >= yyg->yy_start_stack_depth )
+ {
+ yy_size_t new_size;
+
+ yyg->yy_start_stack_depth += YY_START_STACK_INCR;
+ new_size = (yy_size_t) yyg->yy_start_stack_depth * sizeof( int );
+
+ if ( ! yyg->yy_start_stack )
+ yyg->yy_start_stack = (int *) yyalloc( new_size , yyscanner );
+
+ else
+ yyg->yy_start_stack = (int *) yyrealloc(
+ (void *) yyg->yy_start_stack, new_size , yyscanner );
+
+ if ( ! yyg->yy_start_stack )
+ YY_FATAL_ERROR( "out of memory expanding start-condition stack" );
+ }
+
+ yyg->yy_start_stack[yyg->yy_start_stack_ptr++] = YY_START;
+
+ BEGIN(_new_state);
+}
+
+ static void yy_pop_state (yyscan_t yyscanner)
+{
+ struct yyguts_t * yyg = (struct yyguts_t*)yyscanner;
+ if ( --yyg->yy_start_stack_ptr < 0 )
+ YY_FATAL_ERROR( "start-condition stack underflow" );
+
+ BEGIN(yyg->yy_start_stack[yyg->yy_start_stack_ptr]);
+}
+
+ static int yy_top_state (yyscan_t yyscanner)
+{
+ struct yyguts_t * yyg = (struct yyguts_t*)yyscanner;
+ return yyg->yy_start_stack[yyg->yy_start_stack_ptr - 1];
+}
+
+#ifndef YY_EXIT_FAILURE
+#define YY_EXIT_FAILURE 2
+#endif
+
+static void yynoreturn yy_fatal_error (const char* msg , yyscan_t yyscanner)
+{
+ struct yyguts_t * yyg = (struct yyguts_t*)yyscanner;
+ (void)yyg;
+ fprintf( stderr, "%s\n", msg );
+ exit( YY_EXIT_FAILURE );
+}
+
+/* Redefine yyless() so it works in section 3 code. */
+
+#undef yyless
+#define yyless(n) \
+ do \
+ { \
+ /* Undo effects of setting up yytext. */ \
+ int yyless_macro_arg = (n); \
+ YY_LESS_LINENO(yyless_macro_arg);\
+ yytext[yyleng] = yyg->yy_hold_char; \
+ yyg->yy_c_buf_p = yytext + yyless_macro_arg; \
+ yyg->yy_hold_char = *yyg->yy_c_buf_p; \
+ *yyg->yy_c_buf_p = '\0'; \
+ yyleng = yyless_macro_arg; \
+ } \
+ while ( 0 )
+
+/* Accessor methods (get/set functions) to struct members. */
+
+/** Get the user-defined data for this scanner.
+ * @param yyscanner The scanner object.
+ */
+YY_EXTRA_TYPE yyget_extra (yyscan_t yyscanner)
+{
+ struct yyguts_t * yyg = (struct yyguts_t*)yyscanner;
+ return yyextra;
+}
+
+/** Get the current line number.
+ * @param yyscanner The scanner object.
+ */
+int yyget_lineno (yyscan_t yyscanner)
+{
+ struct yyguts_t * yyg = (struct yyguts_t*)yyscanner;
+
+ if (! YY_CURRENT_BUFFER)
+ return 0;
+
+ return yylineno;
+}
+
+/** Get the current column number.
+ * @param yyscanner The scanner object.
+ */
+int yyget_column (yyscan_t yyscanner)
+{
+ struct yyguts_t * yyg = (struct yyguts_t*)yyscanner;
+
+ if (! YY_CURRENT_BUFFER)
+ return 0;
+
+ return yycolumn;
+}
+
+/** Get the input stream.
+ * @param yyscanner The scanner object.
+ */
+FILE *yyget_in (yyscan_t yyscanner)
+{
+ struct yyguts_t * yyg = (struct yyguts_t*)yyscanner;
+ return yyin;
+}
+
+/** Get the output stream.
+ * @param yyscanner The scanner object.
+ */
+FILE *yyget_out (yyscan_t yyscanner)
+{
+ struct yyguts_t * yyg = (struct yyguts_t*)yyscanner;
+ return yyout;
+}
+
+/** Get the length of the current token.
+ * @param yyscanner The scanner object.
+ */
+int yyget_leng (yyscan_t yyscanner)
+{
+ struct yyguts_t * yyg = (struct yyguts_t*)yyscanner;
+ return yyleng;
+}
+
+/** Get the current token.
+ * @param yyscanner The scanner object.
+ */
+
+char *yyget_text (yyscan_t yyscanner)
+{
+ struct yyguts_t * yyg = (struct yyguts_t*)yyscanner;
+ return yytext;
+}
+
+/** Set the user-defined data. This data is never touched by the scanner.
+ * @param user_defined The data to be associated with this scanner.
+ * @param yyscanner The scanner object.
+ */
+void yyset_extra (YY_EXTRA_TYPE user_defined , yyscan_t yyscanner)
+{
+ struct yyguts_t * yyg = (struct yyguts_t*)yyscanner;
+ yyextra = user_defined ;
+}
+
+/** Set the current line number.
+ * @param _line_number line number
+ * @param yyscanner The scanner object.
+ */
+void yyset_lineno (int _line_number , yyscan_t yyscanner)
+{
+ struct yyguts_t * yyg = (struct yyguts_t*)yyscanner;
+
+ /* lineno is only valid if an input buffer exists. */
+ if (! YY_CURRENT_BUFFER )
+ YY_FATAL_ERROR( "yyset_lineno called with no buffer" );
+
+ yylineno = _line_number;
+}
+
+/** Set the current column.
+ * @param _column_no column number
+ * @param yyscanner The scanner object.
+ */
+void yyset_column (int _column_no , yyscan_t yyscanner)
+{
+ struct yyguts_t * yyg = (struct yyguts_t*)yyscanner;
+
+ /* column is only valid if an input buffer exists. */
+ if (! YY_CURRENT_BUFFER )
+ YY_FATAL_ERROR( "yyset_column called with no buffer" );
+
+ yycolumn = _column_no;
+}
+
+/** Set the input stream. This does not discard the current
+ * input buffer.
+ * @param _in_str A readable stream.
+ * @param yyscanner The scanner object.
+ * @see yy_switch_to_buffer
+ */
+void yyset_in (FILE * _in_str , yyscan_t yyscanner)
+{
+ struct yyguts_t * yyg = (struct yyguts_t*)yyscanner;
+ yyin = _in_str ;
+}
+
+void yyset_out (FILE * _out_str , yyscan_t yyscanner)
+{
+ struct yyguts_t * yyg = (struct yyguts_t*)yyscanner;
+ yyout = _out_str ;
+}
+
+int yyget_debug (yyscan_t yyscanner)
+{
+ struct yyguts_t * yyg = (struct yyguts_t*)yyscanner;
+ return yy_flex_debug;
+}
+
+void yyset_debug (int _bdebug , yyscan_t yyscanner)
+{
+ struct yyguts_t * yyg = (struct yyguts_t*)yyscanner;
+ yy_flex_debug = _bdebug ;
+}
+
+/* Accessor methods for yylval and yylloc */
+
+YYSTYPE * yyget_lval (yyscan_t yyscanner)
+{
+ struct yyguts_t * yyg = (struct yyguts_t*)yyscanner;
+ return yylval;
+}
+
+void yyset_lval (YYSTYPE * yylval_param , yyscan_t yyscanner)
+{
+ struct yyguts_t * yyg = (struct yyguts_t*)yyscanner;
+ yylval = yylval_param;
+}
+
+YYLTYPE *yyget_lloc (yyscan_t yyscanner)
+{
+ struct yyguts_t * yyg = (struct yyguts_t*)yyscanner;
+ return yylloc;
+}
+
+void yyset_lloc (YYLTYPE * yylloc_param , yyscan_t yyscanner)
+{
+ struct yyguts_t * yyg = (struct yyguts_t*)yyscanner;
+ yylloc = yylloc_param;
+}
+
+/* User-visible API */
+
+/* yylex_init is special because it creates the scanner itself, so it is
+ * the ONLY reentrant function that doesn't take the scanner as the last argument.
+ * That's why we explicitly handle the declaration, instead of using our macros.
+ */
+int yylex_init(yyscan_t* ptr_yy_globals)
+{
+ if (ptr_yy_globals == NULL){
+ errno = EINVAL;
+ return 1;
+ }
+
+ *ptr_yy_globals = (yyscan_t) yyalloc ( sizeof( struct yyguts_t ), NULL );
+
+ if (*ptr_yy_globals == NULL){
+ errno = ENOMEM;
+ return 1;
+ }
+
+ /* By setting to 0xAA, we expose bugs in yy_init_globals. Leave at 0x00 for releases. */
+ memset(*ptr_yy_globals,0x00,sizeof(struct yyguts_t));
+
+ return yy_init_globals ( *ptr_yy_globals );
+}
+
+/* yylex_init_extra has the same functionality as yylex_init, but follows the
+ * convention of taking the scanner as the last argument. Note however, that
+ * this is a *pointer* to a scanner, as it will be allocated by this call (and
+ * is the reason, too, why this function also must handle its own declaration).
+ * The user defined value in the first argument will be available to yyalloc in
+ * the yyextra field.
+ */
+int yylex_init_extra( YY_EXTRA_TYPE yy_user_defined, yyscan_t* ptr_yy_globals )
+{
+ struct yyguts_t dummy_yyguts;
+
+ yyset_extra (yy_user_defined, &dummy_yyguts);
+
+ if (ptr_yy_globals == NULL){
+ errno = EINVAL;
+ return 1;
+ }
+
+ *ptr_yy_globals = (yyscan_t) yyalloc ( sizeof( struct yyguts_t ), &dummy_yyguts );
+
+ if (*ptr_yy_globals == NULL){
+ errno = ENOMEM;
+ return 1;
+ }
+
+ /* By setting to 0xAA, we expose bugs in
+ yy_init_globals. Leave at 0x00 for releases. */
+ memset(*ptr_yy_globals,0x00,sizeof(struct yyguts_t));
+
+ yyset_extra (yy_user_defined, *ptr_yy_globals);
+
+ return yy_init_globals ( *ptr_yy_globals );
+}
+
+static int yy_init_globals (yyscan_t yyscanner)
+{
+ struct yyguts_t * yyg = (struct yyguts_t*)yyscanner;
+ /* Initialization is the same as for the non-reentrant scanner.
+ * This function is called from yylex_destroy(), so don't allocate here.
+ */
+
+ yyg->yy_buffer_stack = NULL;
+ yyg->yy_buffer_stack_top = 0;
+ yyg->yy_buffer_stack_max = 0;
+ yyg->yy_c_buf_p = NULL;
+ yyg->yy_init = 0;
+ yyg->yy_start = 0;
+
+ yyg->yy_start_stack_ptr = 0;
+ yyg->yy_start_stack_depth = 0;
+ yyg->yy_start_stack = NULL;
+
+/* Defined in main.c */
+#ifdef YY_STDINIT
+ yyin = stdin;
+ yyout = stdout;
+#else
+ yyin = NULL;
+ yyout = NULL;
+#endif
+
+ /* For future reference: Set errno on error, since we are called by
+ * yylex_init()
+ */
+ return 0;
+}
+
+/* yylex_destroy is for both reentrant and non-reentrant scanners. */
+int yylex_destroy (yyscan_t yyscanner)
+{
+ struct yyguts_t * yyg = (struct yyguts_t*)yyscanner;
+
+ /* Pop the buffer stack, destroying each element. */
+ while(YY_CURRENT_BUFFER){
+ yy_delete_buffer( YY_CURRENT_BUFFER , yyscanner );
+ YY_CURRENT_BUFFER_LVALUE = NULL;
+ yypop_buffer_state(yyscanner);
+ }
+
+ /* Destroy the stack itself. */
+ yyfree(yyg->yy_buffer_stack , yyscanner);
+ yyg->yy_buffer_stack = NULL;
+
+ /* Destroy the start condition stack. */
+ yyfree( yyg->yy_start_stack , yyscanner );
+ yyg->yy_start_stack = NULL;
+
+ /* Reset the globals. This is important in a non-reentrant scanner so the next time
+ * yylex() is called, initialization will occur. */
+ yy_init_globals( yyscanner);
+
+ /* Destroy the main struct (reentrant only). */
+ yyfree ( yyscanner , yyscanner );
+ yyscanner = NULL;
+ return 0;
+}
+
+/*
+ * Internal utility routines.
+ */
+
+#ifndef yytext_ptr
+static void yy_flex_strncpy (char* s1, const char * s2, int n , yyscan_t yyscanner)
+{
+ struct yyguts_t * yyg = (struct yyguts_t*)yyscanner;
+ (void)yyg;
+
+ int i;
+ for ( i = 0; i < n; ++i )
+ s1[i] = s2[i];
+}
+#endif
+
+#ifdef YY_NEED_STRLEN
+static int yy_flex_strlen (const char * s , yyscan_t yyscanner)
+{
+ int n;
+ for ( n = 0; s[n]; ++n )
+ ;
+
+ return n;
+}
+#endif
+
+void *yyalloc (yy_size_t size , yyscan_t yyscanner)
+{
+ struct yyguts_t * yyg = (struct yyguts_t*)yyscanner;
+ (void)yyg;
+ return malloc(size);
+}
+
+void *yyrealloc (void * ptr, yy_size_t size , yyscan_t yyscanner)
+{
+ struct yyguts_t * yyg = (struct yyguts_t*)yyscanner;
+ (void)yyg;
+
+ /* The cast to (char *) in the following accommodates both
+ * implementations that use char* generic pointers, and those
+ * that use void* generic pointers. It works with the latter
+ * because both ANSI C and C++ allow castless assignment from
+ * any pointer type to void*, and deal with argument conversions
+ * as though doing an assignment.
+ */
+ return realloc(ptr, size);
+}
+
+void yyfree (void * ptr , yyscan_t yyscanner)
+{
+ struct yyguts_t * yyg = (struct yyguts_t*)yyscanner;
+ (void)yyg;
+ free( (char *) ptr ); /* see yyrealloc() for (char *) cast */
+}
+
+#define YYTABLES_NAME "yytables"
+
+#line 616 "src/compiler/glsl/glcpp/glcpp-lex.l"
+
+
+void
+glcpp_lex_set_source_string(glcpp_parser_t *parser, const char *shader)
+{
+ yy_scan_string(shader, parser->scanner);
+}
+
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/glcpp/glcpp-lex.l b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/glcpp/glcpp-lex.l
new file mode 100644
index 0000000000..e07739b657
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/glcpp/glcpp-lex.l
@@ -0,0 +1,621 @@
+%{
+/*
+ * Copyright © 2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include <stdio.h>
+#include <string.h>
+#include <ctype.h>
+
+#include "glcpp.h"
+#include "glcpp-parse.h"
+
+/* Flex annoyingly generates some functions without making them
+ * static. Let's declare them here. */
+int glcpp_get_column (yyscan_t yyscanner);
+void glcpp_set_column (int column_no , yyscan_t yyscanner);
+
+#ifdef _MSC_VER
+#define YY_NO_UNISTD_H
+#endif
+
+#define YY_NO_INPUT
+
+#define YY_USER_ACTION \
+ do { \
+ if (parser->has_new_line_number) \
+ yylineno = parser->new_line_number; \
+ if (parser->has_new_source_number) \
+ yylloc->source = parser->new_source_number; \
+ yylloc->first_column = yycolumn + 1; \
+ yylloc->first_line = yylloc->last_line = yylineno; \
+ yycolumn += yyleng; \
+ yylloc->last_column = yycolumn + 1; \
+ parser->has_new_line_number = 0; \
+ parser->has_new_source_number = 0; \
+ } while(0);
+
+#define YY_USER_INIT \
+ do { \
+ yylineno = 1; \
+ yycolumn = 0; \
+ yylloc->source = 0; \
+ } while(0)
+
+/* It's ugly to have macros that have return statements inside of
+ * them, but flex-based lexer generation is all built around the
+ * return statement.
+ *
+ * To mitigate the ugliness, we defer as much of the logic as possible
+ * to an actual function, not a macro (see
+ * glcpplex_update_state_per_token) and we make the word RETURN
+ * prominent in all of the macros which may return.
+ *
+ * The most-commonly-used macro is RETURN_TOKEN which will perform all
+ * necessary state updates based on the provided token,, then
+ * conditionally return the token. It will not return a token if the
+ * parser is currently skipping tokens, (such as within #if
+ * 0...#else).
+ *
+ * The RETURN_TOKEN_NEVER_SKIP macro is a lower-level variant that
+ * makes the token returning unconditional. This is needed for things
+ * like #if and the tokens of its condition, (since these must be
+ * evaluated by the parser even when otherwise skipping).
+ *
+ * Finally, RETURN_STRING_TOKEN is a simple convenience wrapper on top
+ * of RETURN_TOKEN that performs a string copy of yytext before the
+ * return.
+ */
+#define RETURN_TOKEN_NEVER_SKIP(token) \
+ do { \
+ if (glcpp_lex_update_state_per_token (parser, token)) \
+ return token; \
+ } while (0)
+
+#define RETURN_TOKEN(token) \
+ do { \
+ if (! parser->skipping) { \
+ RETURN_TOKEN_NEVER_SKIP(token); \
+ } \
+ } while(0)
+
+#define RETURN_STRING_TOKEN(token) \
+ do { \
+ if (! parser->skipping) { \
+ /* We're not doing linear_strdup here, to avoid \
+ * an implicit call on strlen() for the length \
+ * of the string, as this is already found by \
+ * flex and stored in yyleng */ \
+ void *mem_ctx = yyextra->linalloc; \
+ yylval->str = linear_alloc_child(mem_ctx, \
+ yyleng + 1); \
+ memcpy(yylval->str, yytext, yyleng + 1); \
+ RETURN_TOKEN_NEVER_SKIP (token); \
+ } \
+ } while(0)
+
+
+/* Update all state necessary for each token being returned.
+ *
+ * Here we'll be tracking newlines and spaces so that the lexer can
+ * alter its behavior as necessary, (for example, '#' has special
+ * significance if it is the first non-whitespace, non-comment token
+ * in a line, but does not otherwise).
+ *
+ * NOTE: If this function returns FALSE, then no token should be
+ * returned at all. This is used to suprress duplicate SPACE tokens.
+ */
+static int
+glcpp_lex_update_state_per_token (glcpp_parser_t *parser, int token)
+{
+ if (token != NEWLINE && token != SPACE && token != HASH_TOKEN &&
+ !parser->lexing_version_directive) {
+ glcpp_parser_resolve_implicit_version(parser);
+ }
+
+ /* After the first non-space token in a line, we won't
+ * allow any '#' to introduce a directive. */
+ if (token == NEWLINE) {
+ parser->first_non_space_token_this_line = 1;
+ } else if (token != SPACE) {
+ parser->first_non_space_token_this_line = 0;
+ }
+
+ /* Track newlines just to know whether a newline needs
+ * to be inserted if end-of-file comes early. */
+ if (token == NEWLINE) {
+ parser->last_token_was_newline = 1;
+ } else {
+ parser->last_token_was_newline = 0;
+ }
+
+ /* Track spaces to avoid emitting multiple SPACE
+ * tokens in a row. */
+ if (token == SPACE) {
+ if (! parser->last_token_was_space) {
+ parser->last_token_was_space = 1;
+ return 1;
+ } else {
+ parser->last_token_was_space = 1;
+ return 0;
+ }
+ } else {
+ parser->last_token_was_space = 0;
+ return 1;
+ }
+}
+
+
+%}
+
+%option bison-bridge bison-locations reentrant noyywrap
+%option extra-type="glcpp_parser_t *"
+%option prefix="glcpp_"
+%option stack
+%option never-interactive
+%option warn nodefault
+
+ /* Note: When adding any start conditions to this list, you must also
+ * update the "Internal compiler error" catch-all rule near the end of
+ * this file. */
+
+%x COMMENT DEFINE DONE HASH NEWLINE_CATCHUP UNREACHABLE
+
+SPACE [[:space:]]
+NONSPACE [^[:space:]]
+HSPACE [ \t\v\f]
+HASH #
+NEWLINE (\r\n|\n\r|\r|\n)
+IDENTIFIER [_a-zA-Z][_a-zA-Z0-9]*
+PP_NUMBER [.]?[0-9]([._a-zA-Z0-9]|[eEpP][-+])*
+PUNCTUATION [][(){}.&*~!/%<>^|;,=+-]
+
+/* The OTHER class is simply a catch-all for things that the CPP
+parser just doesn't care about. Since flex regular expressions that
+match longer strings take priority over those matching shorter
+strings, we have to be careful to avoid OTHER matching and hiding
+something that CPP does care about. So we simply exclude all
+characters that appear in any other expressions. */
+
+OTHER [^][_#[:space:]#a-zA-Z0-9(){}.&*~!/%<>^|;,=+-]
+
+DIGITS [0-9][0-9]*
+DECIMAL_INTEGER [1-9][0-9]*[uU]?
+OCTAL_INTEGER 0[0-7]*[uU]?
+HEXADECIMAL_INTEGER 0[xX][0-9a-fA-F]+[uU]?
+PATH ["][]^./ _A-Za-z0-9+*%[(){}|&~=!:;,?-]*["]
+
+%%
+
+ glcpp_parser_t *parser = yyextra;
+
+ /* When we lex a multi-line comment, we replace it (as
+ * specified) with a single space. But if the comment spanned
+ * multiple lines, then subsequent parsing stages will not
+ * count correct line numbers. To avoid this problem we keep
+ * track of all newlines that were commented out by a
+ * multi-line comment, and we emit a NEWLINE token for each at
+ * the next legal opportunity, (which is when the lexer would
+ * be emitting a NEWLINE token anyway).
+ */
+ if (YY_START == NEWLINE_CATCHUP) {
+ if (parser->commented_newlines)
+ parser->commented_newlines--;
+ if (parser->commented_newlines == 0)
+ BEGIN INITIAL;
+ RETURN_TOKEN_NEVER_SKIP (NEWLINE);
+ }
+
+ /* Set up the parser->skipping bit here before doing any lexing.
+ *
+ * This bit controls whether tokens are skipped, (as implemented by
+ * RETURN_TOKEN), such as between "#if 0" and "#endif".
+ *
+ * The parser maintains a skip_stack indicating whether we should be
+ * skipping, (and nested levels of #if/#ifdef/#ifndef/#endif) will
+ * push and pop items from the stack.
+ *
+ * Here are the rules for determining whether we are skipping:
+ *
+ * 1. If the skip stack is NULL, we are outside of all #if blocks
+ * and we are not skipping.
+ *
+ * 2. If the skip stack is non-NULL, the type of the top node in
+ * the stack determines whether to skip. A type of
+ * SKIP_NO_SKIP is used for blocks wheere we are emitting
+ * tokens, (such as between #if 1 and #endif, or after the
+ * #else of an #if 0, etc.).
+ *
+ * 3. The lexing_directive bit overrides the skip stack. This bit
+ * is set when we are actively lexing the expression for a
+ * pre-processor condition, (such as #if, #elif, or #else). In
+ * this case, even if otherwise skipping, we need to emit the
+ * tokens for this condition so that the parser can evaluate
+ * the expression. (For, #else, there's no expression, but we
+ * emit tokens so the parser can generate a nice error message
+ * if there are any tokens here).
+ */
+ if (parser->skip_stack &&
+ parser->skip_stack->type != SKIP_NO_SKIP &&
+ ! parser->lexing_directive)
+ {
+ parser->skipping = 1;
+ } else {
+ parser->skipping = 0;
+ }
+
+ /* Single-line comments */
+<INITIAL,DEFINE,HASH>"//"[^\r\n]* {
+}
+
+ /* Multi-line comments */
+<INITIAL,DEFINE,HASH>"/*" { yy_push_state(COMMENT, yyscanner); }
+<COMMENT>[^*\r\n]*
+<COMMENT>[^*\r\n]*{NEWLINE} { yylineno++; yycolumn = 0; parser->commented_newlines++; }
+<COMMENT>"*"+[^*/\r\n]*
+<COMMENT>"*"+[^*/\r\n]*{NEWLINE} { yylineno++; yycolumn = 0; parser->commented_newlines++; }
+<COMMENT>"*"+"/" {
+ yy_pop_state(yyscanner);
+ /* In the <HASH> start condition, we don't want any SPACE token. */
+ if (yyextra->space_tokens && YY_START != HASH)
+ RETURN_TOKEN (SPACE);
+}
+
+{HASH} {
+
+ /* If the '#' is the first non-whitespace, non-comment token on this
+ * line, then it introduces a directive, switch to the <HASH> start
+ * condition.
+ *
+ * Otherwise, this is just punctuation, so return the HASH_TOKEN
+ * token. */
+ if (parser->first_non_space_token_this_line) {
+ BEGIN HASH;
+ yyextra->in_define = false;
+ }
+
+ RETURN_TOKEN_NEVER_SKIP (HASH_TOKEN);
+}
+
+<HASH>version{HSPACE}+ {
+ BEGIN INITIAL;
+ yyextra->space_tokens = 0;
+ yyextra->lexing_version_directive = 1;
+ RETURN_STRING_TOKEN (VERSION_TOKEN);
+}
+
+ /* Swallow empty #pragma directives, (to avoid confusing the
+ * downstream compiler).
+ *
+ * Note: We use a simple regular expression for the lookahead
+ * here. Specifically, we cannot use the complete {NEWLINE} expression
+ * since it uses alternation and we've found that there's a flex bug
+ * where using alternation in the lookahead portion of a pattern
+ * triggers a buffer overrun. */
+<HASH>pragma{HSPACE}*/[\r\n] {
+ BEGIN INITIAL;
+}
+
+ /* glcpp doesn't handle #extension, #version, or #pragma directives.
+ * Simply pass them through to the main compiler's lexer/parser. */
+<HASH>(extension|pragma)[^\r\n]* {
+ BEGIN INITIAL;
+ RETURN_STRING_TOKEN (PRAGMA);
+}
+
+<HASH>include{HSPACE}+["<][]^./ _A-Za-z0-9+*%[(){}|&~=!:;,?-]+[">] {
+ BEGIN INITIAL;
+ RETURN_STRING_TOKEN (INCLUDE);
+}
+
+<HASH>line{HSPACE}+ {
+ BEGIN INITIAL;
+ RETURN_TOKEN (LINE);
+}
+
+<HASH>{NEWLINE} {
+ BEGIN INITIAL;
+ yyextra->space_tokens = 0;
+ yylineno++;
+ yycolumn = 0;
+ RETURN_TOKEN_NEVER_SKIP (NEWLINE);
+}
+
+ /* For the pre-processor directives, we return these tokens
+ * even when we are otherwise skipping. */
+<HASH>ifdef {
+ if (!yyextra->in_define) {
+ BEGIN INITIAL;
+ yyextra->lexing_directive = 1;
+ yyextra->space_tokens = 0;
+ RETURN_TOKEN_NEVER_SKIP (IFDEF);
+ }
+}
+
+<HASH>ifndef {
+ if (!yyextra->in_define) {
+ BEGIN INITIAL;
+ yyextra->lexing_directive = 1;
+ yyextra->space_tokens = 0;
+ RETURN_TOKEN_NEVER_SKIP (IFNDEF);
+ }
+}
+
+<HASH>if/[^_a-zA-Z0-9] {
+ if (!yyextra->in_define) {
+ BEGIN INITIAL;
+ yyextra->lexing_directive = 1;
+ yyextra->space_tokens = 0;
+ RETURN_TOKEN_NEVER_SKIP (IF);
+ }
+}
+
+<HASH>elif/[^_a-zA-Z0-9] {
+ if (!yyextra->in_define) {
+ BEGIN INITIAL;
+ yyextra->lexing_directive = 1;
+ yyextra->space_tokens = 0;
+ RETURN_TOKEN_NEVER_SKIP (ELIF);
+ }
+}
+
+<HASH>else {
+ if (!yyextra->in_define) {
+ BEGIN INITIAL;
+ yyextra->space_tokens = 0;
+ RETURN_TOKEN_NEVER_SKIP (ELSE);
+ }
+}
+
+<HASH>endif {
+ if (!yyextra->in_define) {
+ BEGIN INITIAL;
+ yyextra->space_tokens = 0;
+ RETURN_TOKEN_NEVER_SKIP (ENDIF);
+ }
+}
+
+<HASH>error[^\r\n]* {
+ BEGIN INITIAL;
+ RETURN_STRING_TOKEN (ERROR_TOKEN);
+}
+
+ /* After we see a "#define" we enter the <DEFINE> start state
+ * for the lexer. Within <DEFINE> we are looking for the first
+ * identifier and specifically checking whether the identifier
+ * is followed by a '(' or not, (to lex either a
+ * FUNC_IDENTIFIER or an OBJ_IDENITIFIER token).
+ *
+ * While in the <DEFINE> state we also need to explicitly
+ * handle a few other things that may appear before the
+ * identifier:
+ *
+ * * Comments, (handled above with the main support for
+ * comments).
+ *
+ * * Whitespace (simply ignored)
+ *
+ * * Anything else, (not an identifier, not a comment,
+ * and not whitespace). This will generate an error.
+ */
+<HASH>define{HSPACE}* {
+ yyextra->in_define = true;
+ if (!parser->skipping) {
+ BEGIN DEFINE;
+ yyextra->space_tokens = 0;
+ RETURN_TOKEN (DEFINE_TOKEN);
+ }
+}
+
+<HASH>undef {
+ BEGIN INITIAL;
+ yyextra->space_tokens = 0;
+ RETURN_TOKEN (UNDEF);
+}
+
+<HASH>{HSPACE}+ {
+ /* Nothing to do here. Importantly, don't leave the <HASH>
+ * start condition, since it's legal to have space between the
+ * '#' and the directive.. */
+}
+
+ /* This will catch any non-directive garbage after a HASH */
+<HASH>{NONSPACE} {
+ if (!parser->skipping) {
+ BEGIN INITIAL;
+ RETURN_TOKEN (GARBAGE);
+ }
+}
+
+ /* An identifier immediately followed by '(' */
+<DEFINE>{IDENTIFIER}/"(" {
+ BEGIN INITIAL;
+ RETURN_STRING_TOKEN (FUNC_IDENTIFIER);
+}
+
+ /* An identifier not immediately followed by '(' */
+<DEFINE>{IDENTIFIER} {
+ BEGIN INITIAL;
+ RETURN_STRING_TOKEN (OBJ_IDENTIFIER);
+}
+
+ /* Whitespace */
+<DEFINE>{HSPACE}+ {
+ /* Just ignore it. Nothing to do here. */
+}
+
+ /* '/' not followed by '*', so not a comment. This is an error. */
+<DEFINE>[/][^*]{NONSPACE}* {
+ BEGIN INITIAL;
+ glcpp_error(yylloc, yyextra, "#define followed by a non-identifier: %s", yytext);
+ RETURN_STRING_TOKEN (INTEGER_STRING);
+}
+
+ /* A character that can't start an identifier, comment, or
+ * space. This is an error. */
+<DEFINE>[^_a-zA-Z/[:space:]]{NONSPACE}* {
+ BEGIN INITIAL;
+ glcpp_error(yylloc, yyextra, "#define followed by a non-identifier: %s", yytext);
+ RETURN_STRING_TOKEN (INTEGER_STRING);
+}
+
+{DECIMAL_INTEGER} {
+ RETURN_STRING_TOKEN (INTEGER_STRING);
+}
+
+{OCTAL_INTEGER} {
+ RETURN_STRING_TOKEN (INTEGER_STRING);
+}
+
+{HEXADECIMAL_INTEGER} {
+ RETURN_STRING_TOKEN (INTEGER_STRING);
+}
+
+"<<" {
+ RETURN_TOKEN (LEFT_SHIFT);
+}
+
+">>" {
+ RETURN_TOKEN (RIGHT_SHIFT);
+}
+
+"<=" {
+ RETURN_TOKEN (LESS_OR_EQUAL);
+}
+
+">=" {
+ RETURN_TOKEN (GREATER_OR_EQUAL);
+}
+
+"==" {
+ RETURN_TOKEN (EQUAL);
+}
+
+"!=" {
+ RETURN_TOKEN (NOT_EQUAL);
+}
+
+"&&" {
+ RETURN_TOKEN (AND);
+}
+
+"||" {
+ RETURN_TOKEN (OR);
+}
+
+"++" {
+ RETURN_TOKEN (PLUS_PLUS);
+}
+
+"--" {
+ RETURN_TOKEN (MINUS_MINUS);
+}
+
+"##" {
+ if (! parser->skipping) {
+ if (parser->is_gles)
+ glcpp_error(yylloc, yyextra, "Token pasting (##) is illegal in GLES");
+ RETURN_TOKEN (PASTE);
+ }
+}
+
+"defined" {
+ RETURN_TOKEN (DEFINED);
+}
+
+{IDENTIFIER} {
+ RETURN_STRING_TOKEN (IDENTIFIER);
+}
+
+{PP_NUMBER} {
+ RETURN_STRING_TOKEN (OTHER);
+}
+
+{PUNCTUATION} {
+ RETURN_TOKEN (yytext[0]);
+}
+
+{OTHER}+ {
+ RETURN_STRING_TOKEN (OTHER);
+}
+
+{HSPACE} {
+ if (yyextra->space_tokens) {
+ RETURN_TOKEN (SPACE);
+ }
+}
+
+{PATH} {
+ RETURN_STRING_TOKEN (PATH);
+}
+
+ /* We preserve all newlines, even between #if 0..#endif, so no
+ skipping.. */
+<*>{NEWLINE} {
+ if (parser->commented_newlines) {
+ BEGIN NEWLINE_CATCHUP;
+ } else {
+ BEGIN INITIAL;
+ }
+ yyextra->space_tokens = 1;
+ yyextra->lexing_directive = 0;
+ yyextra->lexing_version_directive = 0;
+ yylineno++;
+ yycolumn = 0;
+ RETURN_TOKEN_NEVER_SKIP (NEWLINE);
+}
+
+<INITIAL,COMMENT,DEFINE,HASH><<EOF>> {
+ if (YY_START == COMMENT)
+ glcpp_error(yylloc, yyextra, "Unterminated comment");
+ BEGIN DONE; /* Don't keep matching this rule forever. */
+ yyextra->lexing_directive = 0;
+ yyextra->lexing_version_directive = 0;
+ if (! parser->last_token_was_newline)
+ RETURN_TOKEN (NEWLINE);
+}
+
+ /* This is a catch-all to avoid the annoying default flex action which
+ * matches any character and prints it. If any input ever matches this
+ * rule, then we have made a mistake above and need to fix one or more
+ * of the preceding patterns to match that input. */
+
+<*>. {
+ glcpp_error(yylloc, yyextra, "Internal compiler error: Unexpected character: %s", yytext);
+
+ /* We don't actually use the UNREACHABLE start condition. We
+ only have this block here so that we can pretend to call some
+ generated functions, (to avoid "defined but not used"
+ warnings. */
+ if (YY_START == UNREACHABLE) {
+ unput('.');
+ yy_top_state(yyextra);
+ }
+}
+
+%%
+
+void
+glcpp_lex_set_source_string(glcpp_parser_t *parser, const char *shader)
+{
+ yy_scan_string(shader, parser->scanner);
+}
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/glcpp/glcpp-parse.c b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/glcpp/glcpp-parse.c
new file mode 100644
index 0000000000..c091805485
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/glcpp/glcpp-parse.c
@@ -0,0 +1,4912 @@
+/* A Bison parser, made by GNU Bison 3.5. */
+
+/* Bison implementation for Yacc-like parsers in C
+
+ Copyright (C) 1984, 1989-1990, 2000-2015, 2018-2019 Free Software Foundation,
+ Inc.
+
+ This program is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>. */
+
+/* As a special exception, you may create a larger work that contains
+ part or all of the Bison parser skeleton and distribute that work
+ under terms of your choice, so long as that work isn't itself a
+ parser generator using the skeleton or a modified version thereof
+ as a parser skeleton. Alternatively, if you modify or redistribute
+ the parser skeleton itself, you may (at your option) remove this
+ special exception, which will cause the skeleton and the resulting
+ Bison output files to be licensed under the GNU General Public
+ License without this special exception.
+
+ This special exception was added by the Free Software Foundation in
+ version 2.2 of Bison. */
+
+/* C LALR(1) parser skeleton written by Richard Stallman, by
+ simplifying the original so-called "semantic" parser. */
+
+/* All symbols defined below should begin with yy or YY, to avoid
+ infringing on user name space. This should be done even for local
+ variables, as they might otherwise be expanded by user macros.
+ There are some unavoidable exceptions within include files to
+ define necessary library symbols; they are noted "INFRINGES ON
+ USER NAME SPACE" below. */
+
+/* Undocumented macros, especially those whose name start with YY_,
+ are private implementation details. Do not rely on them. */
+
+/* Identify Bison output. */
+#define YYBISON 1
+
+/* Bison version. */
+#define YYBISON_VERSION "3.5"
+
+/* Skeleton name. */
+#define YYSKELETON_NAME "yacc.c"
+
+/* Pure parsers. */
+#define YYPURE 1
+
+/* Push parsers. */
+#define YYPUSH 0
+
+/* Pull parsers. */
+#define YYPULL 1
+
+
+/* Substitute the variable and function names. */
+#define yyparse glcpp_parser_parse
+#define yylex glcpp_parser_lex
+#define yyerror glcpp_parser_error
+#define yydebug glcpp_parser_debug
+#define yynerrs glcpp_parser_nerrs
+
+/* First part of user prologue. */
+#line 1 "src/compiler/glsl/glcpp/glcpp-parse.y"
+
+/*
+ * Copyright © 2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <assert.h>
+#include <inttypes.h>
+
+#include "glcpp.h"
+#include "main/mtypes.h"
+#include "util/strndup.h"
+
+const char *
+_mesa_lookup_shader_include(struct gl_context *ctx, char *path,
+ bool error_check);
+
+size_t
+_mesa_get_shader_include_cursor(struct gl_shared_state *shared);
+
+void
+_mesa_set_shader_include_cursor(struct gl_shared_state *shared, size_t cursor);
+
+static void
+yyerror(YYLTYPE *locp, glcpp_parser_t *parser, const char *error);
+
+static void
+_define_object_macro(glcpp_parser_t *parser,
+ YYLTYPE *loc,
+ const char *macro,
+ token_list_t *replacements);
+
+static void
+_define_function_macro(glcpp_parser_t *parser,
+ YYLTYPE *loc,
+ const char *macro,
+ string_list_t *parameters,
+ token_list_t *replacements);
+
+static string_list_t *
+_string_list_create(glcpp_parser_t *parser);
+
+static void
+_string_list_append_item(glcpp_parser_t *parser, string_list_t *list,
+ const char *str);
+
+static int
+_string_list_contains(string_list_t *list, const char *member, int *index);
+
+static const char *
+_string_list_has_duplicate(string_list_t *list);
+
+static int
+_string_list_length(string_list_t *list);
+
+static int
+_string_list_equal(string_list_t *a, string_list_t *b);
+
+static argument_list_t *
+_argument_list_create(glcpp_parser_t *parser);
+
+static void
+_argument_list_append(glcpp_parser_t *parser, argument_list_t *list,
+ token_list_t *argument);
+
+static int
+_argument_list_length(argument_list_t *list);
+
+static token_list_t *
+_argument_list_member_at(argument_list_t *list, int index);
+
+static token_t *
+_token_create_str(glcpp_parser_t *parser, int type, char *str);
+
+static token_t *
+_token_create_ival(glcpp_parser_t *parser, int type, int ival);
+
+static token_list_t *
+_token_list_create(glcpp_parser_t *parser);
+
+static void
+_token_list_append(glcpp_parser_t *parser, token_list_t *list, token_t *token);
+
+static void
+_token_list_append_list(token_list_t *list, token_list_t *tail);
+
+static int
+_token_list_equal_ignoring_space(token_list_t *a, token_list_t *b);
+
+static void
+_parser_active_list_push(glcpp_parser_t *parser, const char *identifier,
+ token_node_t *marker);
+
+static void
+_parser_active_list_pop(glcpp_parser_t *parser);
+
+static int
+_parser_active_list_contains(glcpp_parser_t *parser, const char *identifier);
+
+typedef enum {
+ EXPANSION_MODE_IGNORE_DEFINED,
+ EXPANSION_MODE_EVALUATE_DEFINED
+} expansion_mode_t;
+
+/* Expand list, and begin lexing from the result (after first
+ * prefixing a token of type 'head_token_type').
+ */
+static void
+_glcpp_parser_expand_and_lex_from(glcpp_parser_t *parser, int head_token_type,
+ token_list_t *list, expansion_mode_t mode);
+
+/* Perform macro expansion in-place on the given list. */
+static void
+_glcpp_parser_expand_token_list(glcpp_parser_t *parser, token_list_t *list,
+ expansion_mode_t mode);
+
+static void
+_glcpp_parser_print_expanded_token_list(glcpp_parser_t *parser,
+ token_list_t *list);
+
+static void
+_glcpp_parser_skip_stack_push_if(glcpp_parser_t *parser, YYLTYPE *loc,
+ int condition);
+
+static void
+_glcpp_parser_skip_stack_change_if(glcpp_parser_t *parser, YYLTYPE *loc,
+ const char *type, int condition);
+
+static void
+_glcpp_parser_skip_stack_pop(glcpp_parser_t *parser, YYLTYPE *loc);
+
+static void
+_glcpp_parser_handle_version_declaration(glcpp_parser_t *parser, intmax_t version,
+ const char *ident, bool explicitly_set);
+
+static int
+glcpp_parser_lex(YYSTYPE *yylval, YYLTYPE *yylloc, glcpp_parser_t *parser);
+
+static void
+glcpp_parser_lex_from(glcpp_parser_t *parser, token_list_t *list);
+
+struct define_include {
+ glcpp_parser_t *parser;
+ YYLTYPE *loc;
+};
+
+static void
+glcpp_parser_copy_defines(const void *key, void *data, void *closure);
+
+static void
+add_builtin_define(glcpp_parser_t *parser, const char *name, int value);
+
+
+#line 250 "src/compiler/glsl/glcpp/glcpp-parse.c"
+
+# ifndef YY_CAST
+# ifdef __cplusplus
+# define YY_CAST(Type, Val) static_cast<Type> (Val)
+# define YY_REINTERPRET_CAST(Type, Val) reinterpret_cast<Type> (Val)
+# else
+# define YY_CAST(Type, Val) ((Type) (Val))
+# define YY_REINTERPRET_CAST(Type, Val) ((Type) (Val))
+# endif
+# endif
+# ifndef YY_NULLPTR
+# if defined __cplusplus
+# if 201103L <= __cplusplus
+# define YY_NULLPTR nullptr
+# else
+# define YY_NULLPTR 0
+# endif
+# else
+# define YY_NULLPTR ((void*)0)
+# endif
+# endif
+
+/* Enabling verbose error messages. */
+#ifdef YYERROR_VERBOSE
+# undef YYERROR_VERBOSE
+# define YYERROR_VERBOSE 1
+#else
+# define YYERROR_VERBOSE 1
+#endif
+
+/* Use api.header.include to #include this header
+ instead of duplicating it here. */
+#ifndef YY_GLCPP_PARSER_SRC_COMPILER_GLSL_GLCPP_GLCPP_PARSE_H_INCLUDED
+# define YY_GLCPP_PARSER_SRC_COMPILER_GLSL_GLCPP_GLCPP_PARSE_H_INCLUDED
+/* Debug traces. */
+#ifndef YYDEBUG
+# define YYDEBUG 1
+#endif
+#if YYDEBUG
+extern int glcpp_parser_debug;
+#endif
+
+/* Token type. */
+#ifndef YYTOKENTYPE
+# define YYTOKENTYPE
+ enum yytokentype
+ {
+ DEFINED = 258,
+ ELIF_EXPANDED = 259,
+ HASH_TOKEN = 260,
+ DEFINE_TOKEN = 261,
+ FUNC_IDENTIFIER = 262,
+ OBJ_IDENTIFIER = 263,
+ ELIF = 264,
+ ELSE = 265,
+ ENDIF = 266,
+ ERROR_TOKEN = 267,
+ IF = 268,
+ IFDEF = 269,
+ IFNDEF = 270,
+ LINE = 271,
+ PRAGMA = 272,
+ UNDEF = 273,
+ VERSION_TOKEN = 274,
+ GARBAGE = 275,
+ IDENTIFIER = 276,
+ IF_EXPANDED = 277,
+ INTEGER = 278,
+ INTEGER_STRING = 279,
+ LINE_EXPANDED = 280,
+ NEWLINE = 281,
+ OTHER = 282,
+ PLACEHOLDER = 283,
+ SPACE = 284,
+ PLUS_PLUS = 285,
+ MINUS_MINUS = 286,
+ PATH = 287,
+ INCLUDE = 288,
+ PASTE = 289,
+ OR = 290,
+ AND = 291,
+ EQUAL = 292,
+ NOT_EQUAL = 293,
+ LESS_OR_EQUAL = 294,
+ GREATER_OR_EQUAL = 295,
+ LEFT_SHIFT = 296,
+ RIGHT_SHIFT = 297,
+ UNARY = 298
+ };
+#endif
+
+/* Value type. */
+
+/* Location type. */
+#if ! defined YYLTYPE && ! defined YYLTYPE_IS_DECLARED
+typedef struct YYLTYPE YYLTYPE;
+struct YYLTYPE
+{
+ int first_line;
+ int first_column;
+ int last_line;
+ int last_column;
+};
+# define YYLTYPE_IS_DECLARED 1
+# define YYLTYPE_IS_TRIVIAL 1
+#endif
+
+
+
+int glcpp_parser_parse (glcpp_parser_t *parser);
+
+#endif /* !YY_GLCPP_PARSER_SRC_COMPILER_GLSL_GLCPP_GLCPP_PARSE_H_INCLUDED */
+
+
+
+#ifdef short
+# undef short
+#endif
+
+/* On compilers that do not define __PTRDIFF_MAX__ etc., make sure
+ <limits.h> and (if available) <stdint.h> are included
+ so that the code can choose integer types of a good width. */
+
+#ifndef __PTRDIFF_MAX__
+# include <limits.h> /* INFRINGES ON USER NAME SPACE */
+# if defined __STDC_VERSION__ && 199901 <= __STDC_VERSION__
+# include <stdint.h> /* INFRINGES ON USER NAME SPACE */
+# define YY_STDINT_H
+# endif
+#endif
+
+/* Narrow types that promote to a signed type and that can represent a
+ signed or unsigned integer of at least N bits. In tables they can
+ save space and decrease cache pressure. Promoting to a signed type
+ helps avoid bugs in integer arithmetic. */
+
+#ifdef __INT_LEAST8_MAX__
+typedef __INT_LEAST8_TYPE__ yytype_int8;
+#elif defined YY_STDINT_H
+typedef int_least8_t yytype_int8;
+#else
+typedef signed char yytype_int8;
+#endif
+
+#ifdef __INT_LEAST16_MAX__
+typedef __INT_LEAST16_TYPE__ yytype_int16;
+#elif defined YY_STDINT_H
+typedef int_least16_t yytype_int16;
+#else
+typedef short yytype_int16;
+#endif
+
+#if defined __UINT_LEAST8_MAX__ && __UINT_LEAST8_MAX__ <= __INT_MAX__
+typedef __UINT_LEAST8_TYPE__ yytype_uint8;
+#elif (!defined __UINT_LEAST8_MAX__ && defined YY_STDINT_H \
+ && UINT_LEAST8_MAX <= INT_MAX)
+typedef uint_least8_t yytype_uint8;
+#elif !defined __UINT_LEAST8_MAX__ && UCHAR_MAX <= INT_MAX
+typedef unsigned char yytype_uint8;
+#else
+typedef short yytype_uint8;
+#endif
+
+#if defined __UINT_LEAST16_MAX__ && __UINT_LEAST16_MAX__ <= __INT_MAX__
+typedef __UINT_LEAST16_TYPE__ yytype_uint16;
+#elif (!defined __UINT_LEAST16_MAX__ && defined YY_STDINT_H \
+ && UINT_LEAST16_MAX <= INT_MAX)
+typedef uint_least16_t yytype_uint16;
+#elif !defined __UINT_LEAST16_MAX__ && USHRT_MAX <= INT_MAX
+typedef unsigned short yytype_uint16;
+#else
+typedef int yytype_uint16;
+#endif
+
+#ifndef YYPTRDIFF_T
+# if defined __PTRDIFF_TYPE__ && defined __PTRDIFF_MAX__
+# define YYPTRDIFF_T __PTRDIFF_TYPE__
+# define YYPTRDIFF_MAXIMUM __PTRDIFF_MAX__
+# elif defined PTRDIFF_MAX
+# ifndef ptrdiff_t
+# include <stddef.h> /* INFRINGES ON USER NAME SPACE */
+# endif
+# define YYPTRDIFF_T ptrdiff_t
+# define YYPTRDIFF_MAXIMUM PTRDIFF_MAX
+# else
+# define YYPTRDIFF_T long
+# define YYPTRDIFF_MAXIMUM LONG_MAX
+# endif
+#endif
+
+#ifndef YYSIZE_T
+# ifdef __SIZE_TYPE__
+# define YYSIZE_T __SIZE_TYPE__
+# elif defined size_t
+# define YYSIZE_T size_t
+# elif defined __STDC_VERSION__ && 199901 <= __STDC_VERSION__
+# include <stddef.h> /* INFRINGES ON USER NAME SPACE */
+# define YYSIZE_T size_t
+# else
+# define YYSIZE_T unsigned
+# endif
+#endif
+
+#define YYSIZE_MAXIMUM \
+ YY_CAST (YYPTRDIFF_T, \
+ (YYPTRDIFF_MAXIMUM < YY_CAST (YYSIZE_T, -1) \
+ ? YYPTRDIFF_MAXIMUM \
+ : YY_CAST (YYSIZE_T, -1)))
+
+#define YYSIZEOF(X) YY_CAST (YYPTRDIFF_T, sizeof (X))
+
+/* Stored state numbers (used for stacks). */
+typedef yytype_uint8 yy_state_t;
+
+/* State numbers in computations. */
+typedef int yy_state_fast_t;
+
+#ifndef YY_
+# if defined YYENABLE_NLS && YYENABLE_NLS
+# if ENABLE_NLS
+# include <libintl.h> /* INFRINGES ON USER NAME SPACE */
+# define YY_(Msgid) dgettext ("bison-runtime", Msgid)
+# endif
+# endif
+# ifndef YY_
+# define YY_(Msgid) Msgid
+# endif
+#endif
+
+#ifndef YY_ATTRIBUTE_PURE
+# if defined __GNUC__ && 2 < __GNUC__ + (96 <= __GNUC_MINOR__)
+# define YY_ATTRIBUTE_PURE __attribute__ ((__pure__))
+# else
+# define YY_ATTRIBUTE_PURE
+# endif
+#endif
+
+#ifndef YY_ATTRIBUTE_UNUSED
+# if defined __GNUC__ && 2 < __GNUC__ + (7 <= __GNUC_MINOR__)
+# define YY_ATTRIBUTE_UNUSED __attribute__ ((__unused__))
+# else
+# define YY_ATTRIBUTE_UNUSED
+# endif
+#endif
+
+/* Suppress unused-variable warnings by "using" E. */
+#if ! defined lint || defined __GNUC__
+# define YYUSE(E) ((void) (E))
+#else
+# define YYUSE(E) /* empty */
+#endif
+
+#if defined __GNUC__ && ! defined __ICC && 407 <= __GNUC__ * 100 + __GNUC_MINOR__
+/* Suppress an incorrect diagnostic about yylval being uninitialized. */
+# define YY_IGNORE_MAYBE_UNINITIALIZED_BEGIN \
+ _Pragma ("GCC diagnostic push") \
+ _Pragma ("GCC diagnostic ignored \"-Wuninitialized\"") \
+ _Pragma ("GCC diagnostic ignored \"-Wmaybe-uninitialized\"")
+# define YY_IGNORE_MAYBE_UNINITIALIZED_END \
+ _Pragma ("GCC diagnostic pop")
+#else
+# define YY_INITIAL_VALUE(Value) Value
+#endif
+#ifndef YY_IGNORE_MAYBE_UNINITIALIZED_BEGIN
+# define YY_IGNORE_MAYBE_UNINITIALIZED_BEGIN
+# define YY_IGNORE_MAYBE_UNINITIALIZED_END
+#endif
+#ifndef YY_INITIAL_VALUE
+# define YY_INITIAL_VALUE(Value) /* Nothing. */
+#endif
+
+#if defined __cplusplus && defined __GNUC__ && ! defined __ICC && 6 <= __GNUC__
+# define YY_IGNORE_USELESS_CAST_BEGIN \
+ _Pragma ("GCC diagnostic push") \
+ _Pragma ("GCC diagnostic ignored \"-Wuseless-cast\"")
+# define YY_IGNORE_USELESS_CAST_END \
+ _Pragma ("GCC diagnostic pop")
+#endif
+#ifndef YY_IGNORE_USELESS_CAST_BEGIN
+# define YY_IGNORE_USELESS_CAST_BEGIN
+# define YY_IGNORE_USELESS_CAST_END
+#endif
+
+
+#define YY_ASSERT(E) ((void) (0 && (E)))
+
+#if ! defined yyoverflow || YYERROR_VERBOSE
+
+/* The parser invokes alloca or malloc; define the necessary symbols. */
+
+# ifdef YYSTACK_USE_ALLOCA
+# if YYSTACK_USE_ALLOCA
+# ifdef __GNUC__
+# define YYSTACK_ALLOC __builtin_alloca
+# elif defined __BUILTIN_VA_ARG_INCR
+# include <alloca.h> /* INFRINGES ON USER NAME SPACE */
+# elif defined _AIX
+# define YYSTACK_ALLOC __alloca
+# elif defined _MSC_VER
+# include <malloc.h> /* INFRINGES ON USER NAME SPACE */
+# define alloca _alloca
+# else
+# define YYSTACK_ALLOC alloca
+# if ! defined _ALLOCA_H && ! defined EXIT_SUCCESS
+# include <stdlib.h> /* INFRINGES ON USER NAME SPACE */
+ /* Use EXIT_SUCCESS as a witness for stdlib.h. */
+# ifndef EXIT_SUCCESS
+# define EXIT_SUCCESS 0
+# endif
+# endif
+# endif
+# endif
+# endif
+
+# ifdef YYSTACK_ALLOC
+ /* Pacify GCC's 'empty if-body' warning. */
+# define YYSTACK_FREE(Ptr) do { /* empty */; } while (0)
+# ifndef YYSTACK_ALLOC_MAXIMUM
+ /* The OS might guarantee only one guard page at the bottom of the stack,
+ and a page size can be as small as 4096 bytes. So we cannot safely
+ invoke alloca (N) if N exceeds 4096. Use a slightly smaller number
+ to allow for a few compiler-allocated temporary stack slots. */
+# define YYSTACK_ALLOC_MAXIMUM 4032 /* reasonable circa 2006 */
+# endif
+# else
+# define YYSTACK_ALLOC YYMALLOC
+# define YYSTACK_FREE YYFREE
+# ifndef YYSTACK_ALLOC_MAXIMUM
+# define YYSTACK_ALLOC_MAXIMUM YYSIZE_MAXIMUM
+# endif
+# if (defined __cplusplus && ! defined EXIT_SUCCESS \
+ && ! ((defined YYMALLOC || defined malloc) \
+ && (defined YYFREE || defined free)))
+# include <stdlib.h> /* INFRINGES ON USER NAME SPACE */
+# ifndef EXIT_SUCCESS
+# define EXIT_SUCCESS 0
+# endif
+# endif
+# ifndef YYMALLOC
+# define YYMALLOC malloc
+# if ! defined malloc && ! defined EXIT_SUCCESS
+void *malloc (YYSIZE_T); /* INFRINGES ON USER NAME SPACE */
+# endif
+# endif
+# ifndef YYFREE
+# define YYFREE free
+# if ! defined free && ! defined EXIT_SUCCESS
+void free (void *); /* INFRINGES ON USER NAME SPACE */
+# endif
+# endif
+# endif
+#endif /* ! defined yyoverflow || YYERROR_VERBOSE */
+
+
+#if (! defined yyoverflow \
+ && (! defined __cplusplus \
+ || (defined YYLTYPE_IS_TRIVIAL && YYLTYPE_IS_TRIVIAL \
+ && defined YYSTYPE_IS_TRIVIAL && YYSTYPE_IS_TRIVIAL)))
+
+/* A type that is properly aligned for any stack member. */
+union yyalloc
+{
+ yy_state_t yyss_alloc;
+ YYSTYPE yyvs_alloc;
+ YYLTYPE yyls_alloc;
+};
+
+/* The size of the maximum gap between one aligned stack and the next. */
+# define YYSTACK_GAP_MAXIMUM (YYSIZEOF (union yyalloc) - 1)
+
+/* The size of an array large to enough to hold all stacks, each with
+ N elements. */
+# define YYSTACK_BYTES(N) \
+ ((N) * (YYSIZEOF (yy_state_t) + YYSIZEOF (YYSTYPE) \
+ + YYSIZEOF (YYLTYPE)) \
+ + 2 * YYSTACK_GAP_MAXIMUM)
+
+# define YYCOPY_NEEDED 1
+
+/* Relocate STACK from its old location to the new one. The
+ local variables YYSIZE and YYSTACKSIZE give the old and new number of
+ elements in the stack, and YYPTR gives the new location of the
+ stack. Advance YYPTR to a properly aligned location for the next
+ stack. */
+# define YYSTACK_RELOCATE(Stack_alloc, Stack) \
+ do \
+ { \
+ YYPTRDIFF_T yynewbytes; \
+ YYCOPY (&yyptr->Stack_alloc, Stack, yysize); \
+ Stack = &yyptr->Stack_alloc; \
+ yynewbytes = yystacksize * YYSIZEOF (*Stack) + YYSTACK_GAP_MAXIMUM; \
+ yyptr += yynewbytes / YYSIZEOF (*yyptr); \
+ } \
+ while (0)
+
+#endif
+
+#if defined YYCOPY_NEEDED && YYCOPY_NEEDED
+/* Copy COUNT objects from SRC to DST. The source and destination do
+ not overlap. */
+# ifndef YYCOPY
+# if defined __GNUC__ && 1 < __GNUC__
+# define YYCOPY(Dst, Src, Count) \
+ __builtin_memcpy (Dst, Src, YY_CAST (YYSIZE_T, (Count)) * sizeof (*(Src)))
+# else
+# define YYCOPY(Dst, Src, Count) \
+ do \
+ { \
+ YYPTRDIFF_T yyi; \
+ for (yyi = 0; yyi < (Count); yyi++) \
+ (Dst)[yyi] = (Src)[yyi]; \
+ } \
+ while (0)
+# endif
+# endif
+#endif /* !YYCOPY_NEEDED */
+
+/* YYFINAL -- State number of the termination state. */
+#define YYFINAL 2
+/* YYLAST -- Last index in YYTABLE. */
+#define YYLAST 731
+
+/* YYNTOKENS -- Number of terminals. */
+#define YYNTOKENS 66
+/* YYNNTS -- Number of nonterminals. */
+#define YYNNTS 20
+/* YYNRULES -- Number of rules. */
+#define YYNRULES 116
+/* YYNSTATES -- Number of states. */
+#define YYNSTATES 185
+
+#define YYUNDEFTOK 2
+#define YYMAXUTOK 298
+
+
+/* YYTRANSLATE(TOKEN-NUM) -- Symbol number corresponding to TOKEN-NUM
+ as returned by yylex, with out-of-bounds checking. */
+#define YYTRANSLATE(YYX) \
+ (0 <= (YYX) && (YYX) <= YYMAXUTOK ? yytranslate[YYX] : YYUNDEFTOK)
+
+/* YYTRANSLATE[TOKEN-NUM] -- Symbol number corresponding to TOKEN-NUM
+ as returned by yylex. */
+static const yytype_int8 yytranslate[] =
+{
+ 0, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 56, 2, 2, 2, 52, 39, 2,
+ 54, 55, 50, 48, 58, 49, 63, 51, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 64,
+ 42, 65, 43, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 59, 2, 60, 38, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 61, 37, 62, 57, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 1, 2, 3, 4,
+ 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
+ 15, 16, 17, 18, 19, 20, 21, 22, 23, 24,
+ 25, 26, 27, 28, 29, 30, 31, 32, 33, 34,
+ 35, 36, 40, 41, 44, 45, 46, 47, 53
+};
+
+#if YYDEBUG
+ /* YYRLINE[YYN] -- Source line where rule number YYN was defined. */
+static const yytype_int16 yyrline[] =
+{
+ 0, 220, 220, 222, 226, 227, 228, 232, 236, 241,
+ 246, 251, 260, 270, 273, 276, 282, 285, 286, 299,
+ 300, 352, 426, 447, 457, 463, 469, 495, 515, 515,
+ 528, 528, 531, 537, 543, 546, 552, 555, 558, 564,
+ 568, 573, 584, 588, 595, 606, 617, 624, 631, 638,
+ 645, 652, 659, 666, 673, 680, 687, 694, 701, 708,
+ 720, 732, 739, 743, 747, 751, 755, 761, 765, 772,
+ 773, 777, 778, 781, 783, 789, 794, 801, 805, 809,
+ 813, 817, 821, 825, 832, 833, 834, 835, 836, 837,
+ 838, 839, 840, 841, 842, 843, 844, 845, 846, 847,
+ 848, 849, 850, 851, 852, 853, 854, 855, 856, 857,
+ 858, 859, 860, 861, 862, 863, 864
+};
+#endif
+
+#if YYDEBUG || YYERROR_VERBOSE || 1
+/* YYTNAME[SYMBOL-NUM] -- String name of the symbol SYMBOL-NUM.
+ First, the terminals, then, starting at YYNTOKENS, nonterminals. */
+static const char *const yytname[] =
+{
+ "$end", "error", "$undefined", "DEFINED", "ELIF_EXPANDED", "HASH_TOKEN",
+ "DEFINE_TOKEN", "FUNC_IDENTIFIER", "OBJ_IDENTIFIER", "ELIF", "ELSE",
+ "ENDIF", "ERROR_TOKEN", "IF", "IFDEF", "IFNDEF", "LINE", "PRAGMA",
+ "UNDEF", "VERSION_TOKEN", "GARBAGE", "IDENTIFIER", "IF_EXPANDED",
+ "INTEGER", "INTEGER_STRING", "LINE_EXPANDED", "NEWLINE", "OTHER",
+ "PLACEHOLDER", "SPACE", "PLUS_PLUS", "MINUS_MINUS", "PATH", "INCLUDE",
+ "PASTE", "OR", "AND", "'|'", "'^'", "'&'", "EQUAL", "NOT_EQUAL", "'<'",
+ "'>'", "LESS_OR_EQUAL", "GREATER_OR_EQUAL", "LEFT_SHIFT", "RIGHT_SHIFT",
+ "'+'", "'-'", "'*'", "'/'", "'%'", "UNARY", "'('", "')'", "'!'", "'~'",
+ "','", "'['", "']'", "'{'", "'}'", "'.'", "';'", "'='", "$accept",
+ "input", "line", "expanded_line", "define", "control_line",
+ "control_line_success", "$@1", "$@2", "control_line_error",
+ "integer_constant", "version_constant", "expression", "identifier_list",
+ "text_line", "replacement_list", "junk", "pp_tokens",
+ "preprocessing_token", "operator", YY_NULLPTR
+};
+#endif
+
+# ifdef YYPRINT
+/* YYTOKNUM[NUM] -- (External) token number corresponding to the
+ (internal) symbol number NUM (which must be that of a token). */
+static const yytype_int16 yytoknum[] =
+{
+ 0, 256, 257, 258, 259, 260, 261, 262, 263, 264,
+ 265, 266, 267, 268, 269, 270, 271, 272, 273, 274,
+ 275, 276, 277, 278, 279, 280, 281, 282, 283, 284,
+ 285, 286, 287, 288, 289, 290, 291, 124, 94, 38,
+ 292, 293, 60, 62, 294, 295, 296, 297, 43, 45,
+ 42, 47, 37, 298, 40, 41, 33, 126, 44, 91,
+ 93, 123, 125, 46, 59, 61
+};
+# endif
+
+#define YYPACT_NINF (-145)
+
+#define yypact_value_is_default(Yyn) \
+ ((Yyn) == YYPACT_NINF)
+
+#define YYTABLE_NINF (-1)
+
+#define yytable_value_is_error(Yyn) \
+ 0
+
+ /* YYPACT[STATE-NUM] -- Index in YYTABLE of the portion describing
+ STATE-NUM. */
+static const yytype_int16 yypact[] =
+{
+ -145, 105, -145, -145, -15, 4, -145, -15, -145, 45,
+ -145, -145, -2, -145, -145, -145, -145, -145, -145, -145,
+ -145, -145, -145, -145, -145, -145, -145, -145, -145, -145,
+ -145, -145, -145, -145, -145, -145, -145, -145, -145, -145,
+ -145, -145, -145, -145, -145, -145, -145, -145, -145, -145,
+ -145, -145, -145, 155, -145, -145, -145, -145, -145, -15,
+ -15, -15, -15, -15, -145, 545, 18, 205, -145, -145,
+ 1, 255, -10, 10, 505, 12, 19, 24, 505, -145,
+ 17, 572, 26, -145, -145, -145, -145, -145, -145, 590,
+ -145, -145, -145, -15, -15, -15, -15, -15, -15, -15,
+ -15, -15, -15, -15, -15, -15, -15, -15, -15, -15,
+ -15, -3, 505, -145, -145, -145, 305, 48, 50, -145,
+ -145, 355, 505, 505, 405, -145, 53, -145, -14, 455,
+ -145, -145, -145, 60, 80, -145, 610, 626, 641, 655,
+ 668, 679, 679, 13, 13, 13, 13, 33, 33, -5,
+ -5, -145, -145, -145, -19, 85, 505, -145, -145, -145,
+ -145, 86, 505, 88, -145, -145, 89, -145, -145, -145,
+ -145, -145, 505, -26, -145, -145, -145, -145, 90, 505,
+ 96, -145, 92, -145, -145
+};
+
+ /* YYDEFACT[STATE-NUM] -- Default reduction number in state STATE-NUM.
+ Performed when YYTABLE does not specify something else to do. Zero
+ means the default is an error. */
+static const yytype_int8 yydefact[] =
+{
+ 2, 0, 1, 81, 0, 0, 77, 0, 78, 0,
+ 69, 82, 83, 115, 116, 79, 114, 110, 109, 108,
+ 107, 91, 105, 106, 101, 102, 103, 104, 99, 100,
+ 93, 94, 92, 97, 98, 86, 87, 96, 95, 112,
+ 84, 85, 88, 89, 90, 111, 113, 3, 7, 4,
+ 16, 17, 6, 0, 75, 80, 43, 40, 39, 0,
+ 0, 0, 0, 0, 42, 0, 0, 0, 28, 30,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 34,
+ 0, 0, 0, 5, 70, 83, 76, 65, 64, 0,
+ 62, 63, 9, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 71, 37, 19, 27, 0, 0, 0, 36,
+ 23, 0, 73, 73, 0, 35, 0, 41, 0, 0,
+ 21, 8, 10, 0, 0, 66, 44, 45, 46, 47,
+ 48, 50, 49, 54, 53, 52, 51, 56, 55, 58,
+ 57, 61, 60, 59, 0, 0, 72, 26, 29, 31,
+ 22, 0, 74, 0, 18, 20, 0, 32, 38, 12,
+ 11, 67, 71, 0, 13, 24, 25, 33, 0, 71,
+ 0, 14, 0, 68, 15
+};
+
+ /* YYPGOTO[NTERM-NUM]. */
+static const yytype_int16 yypgoto[] =
+{
+ -145, -145, -145, -145, -145, 59, -145, -145, -145, -145,
+ -4, -145, -6, -145, -145, -144, 0, -1, -49, -145
+};
+
+ /* YYDEFGOTO[NTERM-NUM]. */
+static const yytype_int16 yydefgoto[] =
+{
+ -1, 1, 47, 48, 114, 49, 50, 117, 118, 51,
+ 64, 128, 65, 173, 52, 155, 161, 156, 54, 55
+};
+
+ /* YYTABLE[YYPACT[STATE-NUM]] -- What to do in state STATE-NUM. If
+ positive, shift that token. If negative, reduce the rule whose
+ number is the opposite. If YYTABLE_NINF, syntax error. */
+static const yytype_uint8 yytable[] =
+{
+ 53, 81, 171, 5, 86, 82, 56, 166, 57, 58,
+ 66, 122, 167, 67, 68, 69, 70, 71, 72, 73,
+ 74, 75, 76, 77, 78, 111, 112, 119, 178, 179,
+ 79, 123, 180, 59, 60, 182, 172, 80, 125, 61,
+ 126, 62, 63, 130, 113, 108, 109, 110, 127, 57,
+ 58, 154, 132, 87, 88, 89, 90, 91, 133, 104,
+ 105, 106, 107, 108, 109, 110, 116, 86, 57, 58,
+ 121, 83, 86, 124, 158, 86, 159, 129, 134, 165,
+ 86, 106, 107, 108, 109, 110, 169, 136, 137, 138,
+ 139, 140, 141, 142, 143, 144, 145, 146, 147, 148,
+ 149, 150, 151, 152, 153, 2, 170, 86, 3, 4,
+ 5, 174, 175, 86, 176, 177, 181, 183, 184, 0,
+ 0, 162, 162, 163, 0, 0, 6, 7, 0, 8,
+ 9, 10, 11, 0, 12, 13, 14, 15, 0, 16,
+ 17, 18, 19, 20, 21, 22, 23, 24, 25, 26,
+ 27, 28, 29, 30, 31, 32, 33, 34, 3, 35,
+ 36, 37, 38, 39, 40, 41, 42, 43, 44, 45,
+ 46, 0, 0, 0, 0, 0, 6, 0, 0, 8,
+ 0, 84, 11, 0, 85, 13, 14, 15, 0, 16,
+ 17, 18, 19, 20, 21, 22, 23, 24, 25, 26,
+ 27, 28, 29, 30, 31, 32, 33, 34, 3, 35,
+ 36, 37, 38, 39, 40, 41, 42, 43, 44, 45,
+ 46, 0, 0, 0, 0, 0, 6, 0, 0, 8,
+ 0, 115, 11, 0, 85, 13, 14, 15, 0, 16,
+ 17, 18, 19, 20, 21, 22, 23, 24, 25, 26,
+ 27, 28, 29, 30, 31, 32, 33, 34, 3, 35,
+ 36, 37, 38, 39, 40, 41, 42, 43, 44, 45,
+ 46, 0, 0, 0, 0, 0, 6, 0, 0, 8,
+ 0, 120, 11, 0, 85, 13, 14, 15, 0, 16,
+ 17, 18, 19, 20, 21, 22, 23, 24, 25, 26,
+ 27, 28, 29, 30, 31, 32, 33, 34, 3, 35,
+ 36, 37, 38, 39, 40, 41, 42, 43, 44, 45,
+ 46, 0, 0, 0, 0, 0, 6, 0, 0, 8,
+ 0, 157, 11, 0, 85, 13, 14, 15, 0, 16,
+ 17, 18, 19, 20, 21, 22, 23, 24, 25, 26,
+ 27, 28, 29, 30, 31, 32, 33, 34, 3, 35,
+ 36, 37, 38, 39, 40, 41, 42, 43, 44, 45,
+ 46, 0, 0, 0, 0, 0, 6, 0, 0, 8,
+ 0, 160, 11, 0, 85, 13, 14, 15, 0, 16,
+ 17, 18, 19, 20, 21, 22, 23, 24, 25, 26,
+ 27, 28, 29, 30, 31, 32, 33, 34, 3, 35,
+ 36, 37, 38, 39, 40, 41, 42, 43, 44, 45,
+ 46, 0, 0, 0, 0, 0, 6, 0, 0, 8,
+ 0, 164, 11, 0, 85, 13, 14, 15, 0, 16,
+ 17, 18, 19, 20, 21, 22, 23, 24, 25, 26,
+ 27, 28, 29, 30, 31, 32, 33, 34, 3, 35,
+ 36, 37, 38, 39, 40, 41, 42, 43, 44, 45,
+ 46, 0, 0, 0, 0, 0, 6, 0, 0, 8,
+ 0, 168, 11, 0, 85, 13, 14, 15, 0, 16,
+ 17, 18, 19, 20, 21, 22, 23, 24, 25, 26,
+ 27, 28, 29, 30, 31, 32, 33, 34, 3, 35,
+ 36, 37, 38, 39, 40, 41, 42, 43, 44, 45,
+ 46, 0, 0, 0, 0, 0, 6, 0, 0, 8,
+ 0, 0, 11, 0, 85, 13, 14, 15, 0, 16,
+ 17, 18, 19, 20, 21, 22, 23, 24, 25, 26,
+ 27, 28, 29, 30, 31, 32, 33, 34, 0, 35,
+ 36, 37, 38, 39, 40, 41, 42, 43, 44, 45,
+ 46, 92, 0, 0, 0, 0, 0, 0, 0, 0,
+ 93, 94, 95, 96, 97, 98, 99, 100, 101, 102,
+ 103, 104, 105, 106, 107, 108, 109, 110, 131, 0,
+ 0, 0, 0, 0, 0, 0, 0, 93, 94, 95,
+ 96, 97, 98, 99, 100, 101, 102, 103, 104, 105,
+ 106, 107, 108, 109, 110, 93, 94, 95, 96, 97,
+ 98, 99, 100, 101, 102, 103, 104, 105, 106, 107,
+ 108, 109, 110, 0, 0, 135, 94, 95, 96, 97,
+ 98, 99, 100, 101, 102, 103, 104, 105, 106, 107,
+ 108, 109, 110, 95, 96, 97, 98, 99, 100, 101,
+ 102, 103, 104, 105, 106, 107, 108, 109, 110, 96,
+ 97, 98, 99, 100, 101, 102, 103, 104, 105, 106,
+ 107, 108, 109, 110, 97, 98, 99, 100, 101, 102,
+ 103, 104, 105, 106, 107, 108, 109, 110, 98, 99,
+ 100, 101, 102, 103, 104, 105, 106, 107, 108, 109,
+ 110, 100, 101, 102, 103, 104, 105, 106, 107, 108,
+ 109, 110
+};
+
+static const yytype_int16 yycheck[] =
+{
+ 1, 7, 21, 5, 53, 9, 21, 21, 23, 24,
+ 6, 21, 26, 9, 10, 11, 12, 13, 14, 15,
+ 16, 17, 18, 19, 20, 7, 8, 26, 172, 55,
+ 26, 21, 58, 48, 49, 179, 55, 33, 26, 54,
+ 21, 56, 57, 26, 26, 50, 51, 52, 24, 23,
+ 24, 54, 26, 59, 60, 61, 62, 63, 32, 46,
+ 47, 48, 49, 50, 51, 52, 67, 116, 23, 24,
+ 71, 12, 121, 74, 26, 124, 26, 78, 82, 26,
+ 129, 48, 49, 50, 51, 52, 26, 93, 94, 95,
+ 96, 97, 98, 99, 100, 101, 102, 103, 104, 105,
+ 106, 107, 108, 109, 110, 0, 26, 156, 3, 4,
+ 5, 26, 26, 162, 26, 26, 26, 21, 26, -1,
+ -1, 122, 123, 123, -1, -1, 21, 22, -1, 24,
+ 25, 26, 27, -1, 29, 30, 31, 32, -1, 34,
+ 35, 36, 37, 38, 39, 40, 41, 42, 43, 44,
+ 45, 46, 47, 48, 49, 50, 51, 52, 3, 54,
+ 55, 56, 57, 58, 59, 60, 61, 62, 63, 64,
+ 65, -1, -1, -1, -1, -1, 21, -1, -1, 24,
+ -1, 26, 27, -1, 29, 30, 31, 32, -1, 34,
+ 35, 36, 37, 38, 39, 40, 41, 42, 43, 44,
+ 45, 46, 47, 48, 49, 50, 51, 52, 3, 54,
+ 55, 56, 57, 58, 59, 60, 61, 62, 63, 64,
+ 65, -1, -1, -1, -1, -1, 21, -1, -1, 24,
+ -1, 26, 27, -1, 29, 30, 31, 32, -1, 34,
+ 35, 36, 37, 38, 39, 40, 41, 42, 43, 44,
+ 45, 46, 47, 48, 49, 50, 51, 52, 3, 54,
+ 55, 56, 57, 58, 59, 60, 61, 62, 63, 64,
+ 65, -1, -1, -1, -1, -1, 21, -1, -1, 24,
+ -1, 26, 27, -1, 29, 30, 31, 32, -1, 34,
+ 35, 36, 37, 38, 39, 40, 41, 42, 43, 44,
+ 45, 46, 47, 48, 49, 50, 51, 52, 3, 54,
+ 55, 56, 57, 58, 59, 60, 61, 62, 63, 64,
+ 65, -1, -1, -1, -1, -1, 21, -1, -1, 24,
+ -1, 26, 27, -1, 29, 30, 31, 32, -1, 34,
+ 35, 36, 37, 38, 39, 40, 41, 42, 43, 44,
+ 45, 46, 47, 48, 49, 50, 51, 52, 3, 54,
+ 55, 56, 57, 58, 59, 60, 61, 62, 63, 64,
+ 65, -1, -1, -1, -1, -1, 21, -1, -1, 24,
+ -1, 26, 27, -1, 29, 30, 31, 32, -1, 34,
+ 35, 36, 37, 38, 39, 40, 41, 42, 43, 44,
+ 45, 46, 47, 48, 49, 50, 51, 52, 3, 54,
+ 55, 56, 57, 58, 59, 60, 61, 62, 63, 64,
+ 65, -1, -1, -1, -1, -1, 21, -1, -1, 24,
+ -1, 26, 27, -1, 29, 30, 31, 32, -1, 34,
+ 35, 36, 37, 38, 39, 40, 41, 42, 43, 44,
+ 45, 46, 47, 48, 49, 50, 51, 52, 3, 54,
+ 55, 56, 57, 58, 59, 60, 61, 62, 63, 64,
+ 65, -1, -1, -1, -1, -1, 21, -1, -1, 24,
+ -1, 26, 27, -1, 29, 30, 31, 32, -1, 34,
+ 35, 36, 37, 38, 39, 40, 41, 42, 43, 44,
+ 45, 46, 47, 48, 49, 50, 51, 52, 3, 54,
+ 55, 56, 57, 58, 59, 60, 61, 62, 63, 64,
+ 65, -1, -1, -1, -1, -1, 21, -1, -1, 24,
+ -1, -1, 27, -1, 29, 30, 31, 32, -1, 34,
+ 35, 36, 37, 38, 39, 40, 41, 42, 43, 44,
+ 45, 46, 47, 48, 49, 50, 51, 52, -1, 54,
+ 55, 56, 57, 58, 59, 60, 61, 62, 63, 64,
+ 65, 26, -1, -1, -1, -1, -1, -1, -1, -1,
+ 35, 36, 37, 38, 39, 40, 41, 42, 43, 44,
+ 45, 46, 47, 48, 49, 50, 51, 52, 26, -1,
+ -1, -1, -1, -1, -1, -1, -1, 35, 36, 37,
+ 38, 39, 40, 41, 42, 43, 44, 45, 46, 47,
+ 48, 49, 50, 51, 52, 35, 36, 37, 38, 39,
+ 40, 41, 42, 43, 44, 45, 46, 47, 48, 49,
+ 50, 51, 52, -1, -1, 55, 36, 37, 38, 39,
+ 40, 41, 42, 43, 44, 45, 46, 47, 48, 49,
+ 50, 51, 52, 37, 38, 39, 40, 41, 42, 43,
+ 44, 45, 46, 47, 48, 49, 50, 51, 52, 38,
+ 39, 40, 41, 42, 43, 44, 45, 46, 47, 48,
+ 49, 50, 51, 52, 39, 40, 41, 42, 43, 44,
+ 45, 46, 47, 48, 49, 50, 51, 52, 40, 41,
+ 42, 43, 44, 45, 46, 47, 48, 49, 50, 51,
+ 52, 42, 43, 44, 45, 46, 47, 48, 49, 50,
+ 51, 52
+};
+
+ /* YYSTOS[STATE-NUM] -- The (internal number of the) accessing
+ symbol of state STATE-NUM. */
+static const yytype_int8 yystos[] =
+{
+ 0, 67, 0, 3, 4, 5, 21, 22, 24, 25,
+ 26, 27, 29, 30, 31, 32, 34, 35, 36, 37,
+ 38, 39, 40, 41, 42, 43, 44, 45, 46, 47,
+ 48, 49, 50, 51, 52, 54, 55, 56, 57, 58,
+ 59, 60, 61, 62, 63, 64, 65, 68, 69, 71,
+ 72, 75, 80, 83, 84, 85, 21, 23, 24, 48,
+ 49, 54, 56, 57, 76, 78, 6, 9, 10, 11,
+ 12, 13, 14, 15, 16, 17, 18, 19, 20, 26,
+ 33, 78, 76, 71, 26, 29, 84, 78, 78, 78,
+ 78, 78, 26, 35, 36, 37, 38, 39, 40, 41,
+ 42, 43, 44, 45, 46, 47, 48, 49, 50, 51,
+ 52, 7, 8, 26, 70, 26, 83, 73, 74, 26,
+ 26, 83, 21, 21, 83, 26, 21, 24, 77, 83,
+ 26, 26, 26, 32, 76, 55, 78, 78, 78, 78,
+ 78, 78, 78, 78, 78, 78, 78, 78, 78, 78,
+ 78, 78, 78, 78, 54, 81, 83, 26, 26, 26,
+ 26, 82, 83, 82, 26, 26, 21, 26, 26, 26,
+ 26, 21, 55, 79, 26, 26, 26, 26, 81, 55,
+ 58, 26, 81, 21, 26
+};
+
+ /* YYR1[YYN] -- Symbol number of symbol that rule YYN derives. */
+static const yytype_int8 yyr1[] =
+{
+ 0, 66, 67, 67, 68, 68, 68, 68, 69, 69,
+ 69, 69, 69, 70, 70, 70, 71, 71, 71, 72,
+ 72, 72, 72, 72, 72, 72, 72, 72, 73, 72,
+ 74, 72, 72, 72, 72, 72, 75, 75, 75, 76,
+ 76, 77, 78, 78, 78, 78, 78, 78, 78, 78,
+ 78, 78, 78, 78, 78, 78, 78, 78, 78, 78,
+ 78, 78, 78, 78, 78, 78, 78, 79, 79, 80,
+ 80, 81, 81, 82, 82, 83, 83, 84, 84, 84,
+ 84, 84, 84, 84, 85, 85, 85, 85, 85, 85,
+ 85, 85, 85, 85, 85, 85, 85, 85, 85, 85,
+ 85, 85, 85, 85, 85, 85, 85, 85, 85, 85,
+ 85, 85, 85, 85, 85, 85, 85
+};
+
+ /* YYR2[YYN] -- Number of symbols on the right hand side of rule YYN. */
+static const yytype_int8 yyr2[] =
+{
+ 0, 2, 0, 2, 1, 2, 1, 1, 3, 3,
+ 3, 4, 4, 3, 5, 6, 1, 1, 4, 3,
+ 4, 3, 4, 3, 5, 5, 4, 3, 0, 4,
+ 0, 4, 4, 5, 2, 3, 3, 3, 4, 1,
+ 1, 1, 1, 1, 3, 3, 3, 3, 3, 3,
+ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
+ 3, 3, 2, 2, 2, 2, 3, 1, 3, 1,
+ 2, 0, 1, 0, 1, 1, 2, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1
+};
+
+
+#define yyerrok (yyerrstatus = 0)
+#define yyclearin (yychar = YYEMPTY)
+#define YYEMPTY (-2)
+#define YYEOF 0
+
+#define YYACCEPT goto yyacceptlab
+#define YYABORT goto yyabortlab
+#define YYERROR goto yyerrorlab
+
+
+#define YYRECOVERING() (!!yyerrstatus)
+
+#define YYBACKUP(Token, Value) \
+ do \
+ if (yychar == YYEMPTY) \
+ { \
+ yychar = (Token); \
+ yylval = (Value); \
+ YYPOPSTACK (yylen); \
+ yystate = *yyssp; \
+ goto yybackup; \
+ } \
+ else \
+ { \
+ yyerror (&yylloc, parser, YY_("syntax error: cannot back up")); \
+ YYERROR; \
+ } \
+ while (0)
+
+/* Error token number */
+#define YYTERROR 1
+#define YYERRCODE 256
+
+
+/* YYLLOC_DEFAULT -- Set CURRENT to span from RHS[1] to RHS[N].
+ If N is 0, then set CURRENT to the empty location which ends
+ the previous symbol: RHS[0] (always defined). */
+
+#ifndef YYLLOC_DEFAULT
+# define YYLLOC_DEFAULT(Current, Rhs, N) \
+ do \
+ if (N) \
+ { \
+ (Current).first_line = YYRHSLOC (Rhs, 1).first_line; \
+ (Current).first_column = YYRHSLOC (Rhs, 1).first_column; \
+ (Current).last_line = YYRHSLOC (Rhs, N).last_line; \
+ (Current).last_column = YYRHSLOC (Rhs, N).last_column; \
+ } \
+ else \
+ { \
+ (Current).first_line = (Current).last_line = \
+ YYRHSLOC (Rhs, 0).last_line; \
+ (Current).first_column = (Current).last_column = \
+ YYRHSLOC (Rhs, 0).last_column; \
+ } \
+ while (0)
+#endif
+
+#define YYRHSLOC(Rhs, K) ((Rhs)[K])
+
+
+/* Enable debugging if requested. */
+#if YYDEBUG
+
+# ifndef YYFPRINTF
+# include <stdio.h> /* INFRINGES ON USER NAME SPACE */
+# define YYFPRINTF fprintf
+# endif
+
+# define YYDPRINTF(Args) \
+do { \
+ if (yydebug) \
+ YYFPRINTF Args; \
+} while (0)
+
+
+/* YY_LOCATION_PRINT -- Print the location on the stream.
+ This macro was not mandated originally: define only if we know
+ we won't break user code: when these are the locations we know. */
+
+#ifndef YY_LOCATION_PRINT
+# if defined YYLTYPE_IS_TRIVIAL && YYLTYPE_IS_TRIVIAL
+
+/* Print *YYLOCP on YYO. Private, do not rely on its existence. */
+
+YY_ATTRIBUTE_UNUSED
+static int
+yy_location_print_ (FILE *yyo, YYLTYPE const * const yylocp)
+{
+ int res = 0;
+ int end_col = 0 != yylocp->last_column ? yylocp->last_column - 1 : 0;
+ if (0 <= yylocp->first_line)
+ {
+ res += YYFPRINTF (yyo, "%d", yylocp->first_line);
+ if (0 <= yylocp->first_column)
+ res += YYFPRINTF (yyo, ".%d", yylocp->first_column);
+ }
+ if (0 <= yylocp->last_line)
+ {
+ if (yylocp->first_line < yylocp->last_line)
+ {
+ res += YYFPRINTF (yyo, "-%d", yylocp->last_line);
+ if (0 <= end_col)
+ res += YYFPRINTF (yyo, ".%d", end_col);
+ }
+ else if (0 <= end_col && yylocp->first_column < end_col)
+ res += YYFPRINTF (yyo, "-%d", end_col);
+ }
+ return res;
+ }
+
+# define YY_LOCATION_PRINT(File, Loc) \
+ yy_location_print_ (File, &(Loc))
+
+# else
+# define YY_LOCATION_PRINT(File, Loc) ((void) 0)
+# endif
+#endif
+
+
+# define YY_SYMBOL_PRINT(Title, Type, Value, Location) \
+do { \
+ if (yydebug) \
+ { \
+ YYFPRINTF (stderr, "%s ", Title); \
+ yy_symbol_print (stderr, \
+ Type, Value, Location, parser); \
+ YYFPRINTF (stderr, "\n"); \
+ } \
+} while (0)
+
+
+/*-----------------------------------.
+| Print this symbol's value on YYO. |
+`-----------------------------------*/
+
+static void
+yy_symbol_value_print (FILE *yyo, int yytype, YYSTYPE const * const yyvaluep, YYLTYPE const * const yylocationp, glcpp_parser_t *parser)
+{
+ FILE *yyoutput = yyo;
+ YYUSE (yyoutput);
+ YYUSE (yylocationp);
+ YYUSE (parser);
+ if (!yyvaluep)
+ return;
+# ifdef YYPRINT
+ if (yytype < YYNTOKENS)
+ YYPRINT (yyo, yytoknum[yytype], *yyvaluep);
+# endif
+ YY_IGNORE_MAYBE_UNINITIALIZED_BEGIN
+ YYUSE (yytype);
+ YY_IGNORE_MAYBE_UNINITIALIZED_END
+}
+
+
+/*---------------------------.
+| Print this symbol on YYO. |
+`---------------------------*/
+
+static void
+yy_symbol_print (FILE *yyo, int yytype, YYSTYPE const * const yyvaluep, YYLTYPE const * const yylocationp, glcpp_parser_t *parser)
+{
+ YYFPRINTF (yyo, "%s %s (",
+ yytype < YYNTOKENS ? "token" : "nterm", yytname[yytype]);
+
+ YY_LOCATION_PRINT (yyo, *yylocationp);
+ YYFPRINTF (yyo, ": ");
+ yy_symbol_value_print (yyo, yytype, yyvaluep, yylocationp, parser);
+ YYFPRINTF (yyo, ")");
+}
+
+/*------------------------------------------------------------------.
+| yy_stack_print -- Print the state stack from its BOTTOM up to its |
+| TOP (included). |
+`------------------------------------------------------------------*/
+
+static void
+yy_stack_print (yy_state_t *yybottom, yy_state_t *yytop)
+{
+ YYFPRINTF (stderr, "Stack now");
+ for (; yybottom <= yytop; yybottom++)
+ {
+ int yybot = *yybottom;
+ YYFPRINTF (stderr, " %d", yybot);
+ }
+ YYFPRINTF (stderr, "\n");
+}
+
+# define YY_STACK_PRINT(Bottom, Top) \
+do { \
+ if (yydebug) \
+ yy_stack_print ((Bottom), (Top)); \
+} while (0)
+
+
+/*------------------------------------------------.
+| Report that the YYRULE is going to be reduced. |
+`------------------------------------------------*/
+
+static void
+yy_reduce_print (yy_state_t *yyssp, YYSTYPE *yyvsp, YYLTYPE *yylsp, int yyrule, glcpp_parser_t *parser)
+{
+ int yylno = yyrline[yyrule];
+ int yynrhs = yyr2[yyrule];
+ int yyi;
+ YYFPRINTF (stderr, "Reducing stack by rule %d (line %d):\n",
+ yyrule - 1, yylno);
+ /* The symbols being reduced. */
+ for (yyi = 0; yyi < yynrhs; yyi++)
+ {
+ YYFPRINTF (stderr, " $%d = ", yyi + 1);
+ yy_symbol_print (stderr,
+ yystos[yyssp[yyi + 1 - yynrhs]],
+ &yyvsp[(yyi + 1) - (yynrhs)]
+ , &(yylsp[(yyi + 1) - (yynrhs)]) , parser);
+ YYFPRINTF (stderr, "\n");
+ }
+}
+
+# define YY_REDUCE_PRINT(Rule) \
+do { \
+ if (yydebug) \
+ yy_reduce_print (yyssp, yyvsp, yylsp, Rule, parser); \
+} while (0)
+
+/* Nonzero means print parse trace. It is left uninitialized so that
+ multiple parsers can coexist. */
+int yydebug;
+#else /* !YYDEBUG */
+# define YYDPRINTF(Args)
+# define YY_SYMBOL_PRINT(Title, Type, Value, Location)
+# define YY_STACK_PRINT(Bottom, Top)
+# define YY_REDUCE_PRINT(Rule)
+#endif /* !YYDEBUG */
+
+
+/* YYINITDEPTH -- initial size of the parser's stacks. */
+#ifndef YYINITDEPTH
+# define YYINITDEPTH 200
+#endif
+
+/* YYMAXDEPTH -- maximum size the stacks can grow to (effective only
+ if the built-in stack extension method is used).
+
+ Do not make this value too large; the results are undefined if
+ YYSTACK_ALLOC_MAXIMUM < YYSTACK_BYTES (YYMAXDEPTH)
+ evaluated with infinite-precision integer arithmetic. */
+
+#ifndef YYMAXDEPTH
+# define YYMAXDEPTH 10000
+#endif
+
+
+#if YYERROR_VERBOSE
+
+# ifndef yystrlen
+# if defined __GLIBC__ && defined _STRING_H
+# define yystrlen(S) (YY_CAST (YYPTRDIFF_T, strlen (S)))
+# else
+/* Return the length of YYSTR. */
+static YYPTRDIFF_T
+yystrlen (const char *yystr)
+{
+ YYPTRDIFF_T yylen;
+ for (yylen = 0; yystr[yylen]; yylen++)
+ continue;
+ return yylen;
+}
+# endif
+# endif
+
+# ifndef yystpcpy
+# if defined __GLIBC__ && defined _STRING_H && defined _GNU_SOURCE
+# define yystpcpy stpcpy
+# else
+/* Copy YYSRC to YYDEST, returning the address of the terminating '\0' in
+ YYDEST. */
+static char *
+yystpcpy (char *yydest, const char *yysrc)
+{
+ char *yyd = yydest;
+ const char *yys = yysrc;
+
+ while ((*yyd++ = *yys++) != '\0')
+ continue;
+
+ return yyd - 1;
+}
+# endif
+# endif
+
+# ifndef yytnamerr
+/* Copy to YYRES the contents of YYSTR after stripping away unnecessary
+ quotes and backslashes, so that it's suitable for yyerror. The
+ heuristic is that double-quoting is unnecessary unless the string
+ contains an apostrophe, a comma, or backslash (other than
+ backslash-backslash). YYSTR is taken from yytname. If YYRES is
+ null, do not copy; instead, return the length of what the result
+ would have been. */
+static YYPTRDIFF_T
+yytnamerr (char *yyres, const char *yystr)
+{
+ if (*yystr == '"')
+ {
+ YYPTRDIFF_T yyn = 0;
+ char const *yyp = yystr;
+
+ for (;;)
+ switch (*++yyp)
+ {
+ case '\'':
+ case ',':
+ goto do_not_strip_quotes;
+
+ case '\\':
+ if (*++yyp != '\\')
+ goto do_not_strip_quotes;
+ else
+ goto append;
+
+ append:
+ default:
+ if (yyres)
+ yyres[yyn] = *yyp;
+ yyn++;
+ break;
+
+ case '"':
+ if (yyres)
+ yyres[yyn] = '\0';
+ return yyn;
+ }
+ do_not_strip_quotes: ;
+ }
+
+ if (yyres)
+ return yystpcpy (yyres, yystr) - yyres;
+ else
+ return yystrlen (yystr);
+}
+# endif
+
+/* Copy into *YYMSG, which is of size *YYMSG_ALLOC, an error message
+ about the unexpected token YYTOKEN for the state stack whose top is
+ YYSSP.
+
+ Return 0 if *YYMSG was successfully written. Return 1 if *YYMSG is
+ not large enough to hold the message. In that case, also set
+ *YYMSG_ALLOC to the required number of bytes. Return 2 if the
+ required number of bytes is too large to store. */
+static int
+yysyntax_error (YYPTRDIFF_T *yymsg_alloc, char **yymsg,
+ yy_state_t *yyssp, int yytoken)
+{
+ enum { YYERROR_VERBOSE_ARGS_MAXIMUM = 5 };
+ /* Internationalized format string. */
+ const char *yyformat = YY_NULLPTR;
+ /* Arguments of yyformat: reported tokens (one for the "unexpected",
+ one per "expected"). */
+ char const *yyarg[YYERROR_VERBOSE_ARGS_MAXIMUM];
+ /* Actual size of YYARG. */
+ int yycount = 0;
+ /* Cumulated lengths of YYARG. */
+ YYPTRDIFF_T yysize = 0;
+
+ /* There are many possibilities here to consider:
+ - If this state is a consistent state with a default action, then
+ the only way this function was invoked is if the default action
+ is an error action. In that case, don't check for expected
+ tokens because there are none.
+ - The only way there can be no lookahead present (in yychar) is if
+ this state is a consistent state with a default action. Thus,
+ detecting the absence of a lookahead is sufficient to determine
+ that there is no unexpected or expected token to report. In that
+ case, just report a simple "syntax error".
+ - Don't assume there isn't a lookahead just because this state is a
+ consistent state with a default action. There might have been a
+ previous inconsistent state, consistent state with a non-default
+ action, or user semantic action that manipulated yychar.
+ - Of course, the expected token list depends on states to have
+ correct lookahead information, and it depends on the parser not
+ to perform extra reductions after fetching a lookahead from the
+ scanner and before detecting a syntax error. Thus, state merging
+ (from LALR or IELR) and default reductions corrupt the expected
+ token list. However, the list is correct for canonical LR with
+ one exception: it will still contain any token that will not be
+ accepted due to an error action in a later state.
+ */
+ if (yytoken != YYEMPTY)
+ {
+ int yyn = yypact[*yyssp];
+ YYPTRDIFF_T yysize0 = yytnamerr (YY_NULLPTR, yytname[yytoken]);
+ yysize = yysize0;
+ yyarg[yycount++] = yytname[yytoken];
+ if (!yypact_value_is_default (yyn))
+ {
+ /* Start YYX at -YYN if negative to avoid negative indexes in
+ YYCHECK. In other words, skip the first -YYN actions for
+ this state because they are default actions. */
+ int yyxbegin = yyn < 0 ? -yyn : 0;
+ /* Stay within bounds of both yycheck and yytname. */
+ int yychecklim = YYLAST - yyn + 1;
+ int yyxend = yychecklim < YYNTOKENS ? yychecklim : YYNTOKENS;
+ int yyx;
+
+ for (yyx = yyxbegin; yyx < yyxend; ++yyx)
+ if (yycheck[yyx + yyn] == yyx && yyx != YYTERROR
+ && !yytable_value_is_error (yytable[yyx + yyn]))
+ {
+ if (yycount == YYERROR_VERBOSE_ARGS_MAXIMUM)
+ {
+ yycount = 1;
+ yysize = yysize0;
+ break;
+ }
+ yyarg[yycount++] = yytname[yyx];
+ {
+ YYPTRDIFF_T yysize1
+ = yysize + yytnamerr (YY_NULLPTR, yytname[yyx]);
+ if (yysize <= yysize1 && yysize1 <= YYSTACK_ALLOC_MAXIMUM)
+ yysize = yysize1;
+ else
+ return 2;
+ }
+ }
+ }
+ }
+
+ switch (yycount)
+ {
+# define YYCASE_(N, S) \
+ case N: \
+ yyformat = S; \
+ break
+ default: /* Avoid compiler warnings. */
+ YYCASE_(0, YY_("syntax error"));
+ YYCASE_(1, YY_("syntax error, unexpected %s"));
+ YYCASE_(2, YY_("syntax error, unexpected %s, expecting %s"));
+ YYCASE_(3, YY_("syntax error, unexpected %s, expecting %s or %s"));
+ YYCASE_(4, YY_("syntax error, unexpected %s, expecting %s or %s or %s"));
+ YYCASE_(5, YY_("syntax error, unexpected %s, expecting %s or %s or %s or %s"));
+# undef YYCASE_
+ }
+
+ {
+ /* Don't count the "%s"s in the final size, but reserve room for
+ the terminator. */
+ YYPTRDIFF_T yysize1 = yysize + (yystrlen (yyformat) - 2 * yycount) + 1;
+ if (yysize <= yysize1 && yysize1 <= YYSTACK_ALLOC_MAXIMUM)
+ yysize = yysize1;
+ else
+ return 2;
+ }
+
+ if (*yymsg_alloc < yysize)
+ {
+ *yymsg_alloc = 2 * yysize;
+ if (! (yysize <= *yymsg_alloc
+ && *yymsg_alloc <= YYSTACK_ALLOC_MAXIMUM))
+ *yymsg_alloc = YYSTACK_ALLOC_MAXIMUM;
+ return 1;
+ }
+
+ /* Avoid sprintf, as that infringes on the user's name space.
+ Don't have undefined behavior even if the translation
+ produced a string with the wrong number of "%s"s. */
+ {
+ char *yyp = *yymsg;
+ int yyi = 0;
+ while ((*yyp = *yyformat) != '\0')
+ if (*yyp == '%' && yyformat[1] == 's' && yyi < yycount)
+ {
+ yyp += yytnamerr (yyp, yyarg[yyi++]);
+ yyformat += 2;
+ }
+ else
+ {
+ ++yyp;
+ ++yyformat;
+ }
+ }
+ return 0;
+}
+#endif /* YYERROR_VERBOSE */
+
+/*-----------------------------------------------.
+| Release the memory associated to this symbol. |
+`-----------------------------------------------*/
+
+static void
+yydestruct (const char *yymsg, int yytype, YYSTYPE *yyvaluep, YYLTYPE *yylocationp, glcpp_parser_t *parser)
+{
+ YYUSE (yyvaluep);
+ YYUSE (yylocationp);
+ YYUSE (parser);
+ if (!yymsg)
+ yymsg = "Deleting";
+ YY_SYMBOL_PRINT (yymsg, yytype, yyvaluep, yylocationp);
+
+ YY_IGNORE_MAYBE_UNINITIALIZED_BEGIN
+ YYUSE (yytype);
+ YY_IGNORE_MAYBE_UNINITIALIZED_END
+}
+
+
+
+
+/*----------.
+| yyparse. |
+`----------*/
+
+int
+yyparse (glcpp_parser_t *parser)
+{
+/* The lookahead symbol. */
+int yychar;
+
+
+/* The semantic value of the lookahead symbol. */
+/* Default value used for initialization, for pacifying older GCCs
+ or non-GCC compilers. */
+YY_INITIAL_VALUE (static YYSTYPE yyval_default;)
+YYSTYPE yylval YY_INITIAL_VALUE (= yyval_default);
+
+/* Location data for the lookahead symbol. */
+static YYLTYPE yyloc_default
+# if defined YYLTYPE_IS_TRIVIAL && YYLTYPE_IS_TRIVIAL
+ = { 1, 1, 1, 1 }
+# endif
+;
+YYLTYPE yylloc = yyloc_default;
+
+ /* Number of syntax errors so far. */
+ int yynerrs;
+
+ yy_state_fast_t yystate;
+ /* Number of tokens to shift before error messages enabled. */
+ int yyerrstatus;
+
+ /* The stacks and their tools:
+ 'yyss': related to states.
+ 'yyvs': related to semantic values.
+ 'yyls': related to locations.
+
+ Refer to the stacks through separate pointers, to allow yyoverflow
+ to reallocate them elsewhere. */
+
+ /* The state stack. */
+ yy_state_t yyssa[YYINITDEPTH];
+ yy_state_t *yyss;
+ yy_state_t *yyssp;
+
+ /* The semantic value stack. */
+ YYSTYPE yyvsa[YYINITDEPTH];
+ YYSTYPE *yyvs;
+ YYSTYPE *yyvsp;
+
+ /* The location stack. */
+ YYLTYPE yylsa[YYINITDEPTH];
+ YYLTYPE *yyls;
+ YYLTYPE *yylsp;
+
+ /* The locations where the error started and ended. */
+ YYLTYPE yyerror_range[3];
+
+ YYPTRDIFF_T yystacksize;
+
+ int yyn;
+ int yyresult;
+ /* Lookahead token as an internal (translated) token number. */
+ int yytoken = 0;
+ /* The variables used to return semantic value and location from the
+ action routines. */
+ YYSTYPE yyval;
+ YYLTYPE yyloc;
+
+#if YYERROR_VERBOSE
+ /* Buffer for error messages, and its allocated size. */
+ char yymsgbuf[128];
+ char *yymsg = yymsgbuf;
+ YYPTRDIFF_T yymsg_alloc = sizeof yymsgbuf;
+#endif
+
+#define YYPOPSTACK(N) (yyvsp -= (N), yyssp -= (N), yylsp -= (N))
+
+ /* The number of symbols on the RHS of the reduced rule.
+ Keep to zero when no symbol should be popped. */
+ int yylen = 0;
+
+ yyssp = yyss = yyssa;
+ yyvsp = yyvs = yyvsa;
+ yylsp = yyls = yylsa;
+ yystacksize = YYINITDEPTH;
+
+ YYDPRINTF ((stderr, "Starting parse\n"));
+
+ yystate = 0;
+ yyerrstatus = 0;
+ yynerrs = 0;
+ yychar = YYEMPTY; /* Cause a token to be read. */
+
+/* User initialization code. */
+#line 180 "src/compiler/glsl/glcpp/glcpp-parse.y"
+{
+ yylloc.first_line = 1;
+ yylloc.first_column = 1;
+ yylloc.last_line = 1;
+ yylloc.last_column = 1;
+ yylloc.source = 0;
+}
+
+#line 1689 "src/compiler/glsl/glcpp/glcpp-parse.c"
+
+ yylsp[0] = yylloc;
+ goto yysetstate;
+
+
+/*------------------------------------------------------------.
+| yynewstate -- push a new state, which is found in yystate. |
+`------------------------------------------------------------*/
+yynewstate:
+ /* In all cases, when you get here, the value and location stacks
+ have just been pushed. So pushing a state here evens the stacks. */
+ yyssp++;
+
+
+/*--------------------------------------------------------------------.
+| yysetstate -- set current state (the top of the stack) to yystate. |
+`--------------------------------------------------------------------*/
+yysetstate:
+ YYDPRINTF ((stderr, "Entering state %d\n", yystate));
+ YY_ASSERT (0 <= yystate && yystate < YYNSTATES);
+ YY_IGNORE_USELESS_CAST_BEGIN
+ *yyssp = YY_CAST (yy_state_t, yystate);
+ YY_IGNORE_USELESS_CAST_END
+
+ if (yyss + yystacksize - 1 <= yyssp)
+#if !defined yyoverflow && !defined YYSTACK_RELOCATE
+ goto yyexhaustedlab;
+#else
+ {
+ /* Get the current used size of the three stacks, in elements. */
+ YYPTRDIFF_T yysize = yyssp - yyss + 1;
+
+# if defined yyoverflow
+ {
+ /* Give user a chance to reallocate the stack. Use copies of
+ these so that the &'s don't force the real ones into
+ memory. */
+ yy_state_t *yyss1 = yyss;
+ YYSTYPE *yyvs1 = yyvs;
+ YYLTYPE *yyls1 = yyls;
+
+ /* Each stack pointer address is followed by the size of the
+ data in use in that stack, in bytes. This used to be a
+ conditional around just the two extra args, but that might
+ be undefined if yyoverflow is a macro. */
+ yyoverflow (YY_("memory exhausted"),
+ &yyss1, yysize * YYSIZEOF (*yyssp),
+ &yyvs1, yysize * YYSIZEOF (*yyvsp),
+ &yyls1, yysize * YYSIZEOF (*yylsp),
+ &yystacksize);
+ yyss = yyss1;
+ yyvs = yyvs1;
+ yyls = yyls1;
+ }
+# else /* defined YYSTACK_RELOCATE */
+ /* Extend the stack our own way. */
+ if (YYMAXDEPTH <= yystacksize)
+ goto yyexhaustedlab;
+ yystacksize *= 2;
+ if (YYMAXDEPTH < yystacksize)
+ yystacksize = YYMAXDEPTH;
+
+ {
+ yy_state_t *yyss1 = yyss;
+ union yyalloc *yyptr =
+ YY_CAST (union yyalloc *,
+ YYSTACK_ALLOC (YY_CAST (YYSIZE_T, YYSTACK_BYTES (yystacksize))));
+ if (! yyptr)
+ goto yyexhaustedlab;
+ YYSTACK_RELOCATE (yyss_alloc, yyss);
+ YYSTACK_RELOCATE (yyvs_alloc, yyvs);
+ YYSTACK_RELOCATE (yyls_alloc, yyls);
+# undef YYSTACK_RELOCATE
+ if (yyss1 != yyssa)
+ YYSTACK_FREE (yyss1);
+ }
+# endif
+
+ yyssp = yyss + yysize - 1;
+ yyvsp = yyvs + yysize - 1;
+ yylsp = yyls + yysize - 1;
+
+ YY_IGNORE_USELESS_CAST_BEGIN
+ YYDPRINTF ((stderr, "Stack size increased to %ld\n",
+ YY_CAST (long, yystacksize)));
+ YY_IGNORE_USELESS_CAST_END
+
+ if (yyss + yystacksize - 1 <= yyssp)
+ YYABORT;
+ }
+#endif /* !defined yyoverflow && !defined YYSTACK_RELOCATE */
+
+ if (yystate == YYFINAL)
+ YYACCEPT;
+
+ goto yybackup;
+
+
+/*-----------.
+| yybackup. |
+`-----------*/
+yybackup:
+ /* Do appropriate processing given the current state. Read a
+ lookahead token if we need one and don't already have one. */
+
+ /* First try to decide what to do without reference to lookahead token. */
+ yyn = yypact[yystate];
+ if (yypact_value_is_default (yyn))
+ goto yydefault;
+
+ /* Not known => get a lookahead token if don't already have one. */
+
+ /* YYCHAR is either YYEMPTY or YYEOF or a valid lookahead symbol. */
+ if (yychar == YYEMPTY)
+ {
+ YYDPRINTF ((stderr, "Reading a token: "));
+ yychar = yylex (&yylval, &yylloc, parser);
+ }
+
+ if (yychar <= YYEOF)
+ {
+ yychar = yytoken = YYEOF;
+ YYDPRINTF ((stderr, "Now at end of input.\n"));
+ }
+ else
+ {
+ yytoken = YYTRANSLATE (yychar);
+ YY_SYMBOL_PRINT ("Next token is", yytoken, &yylval, &yylloc);
+ }
+
+ /* If the proper action on seeing token YYTOKEN is to reduce or to
+ detect an error, take that action. */
+ yyn += yytoken;
+ if (yyn < 0 || YYLAST < yyn || yycheck[yyn] != yytoken)
+ goto yydefault;
+ yyn = yytable[yyn];
+ if (yyn <= 0)
+ {
+ if (yytable_value_is_error (yyn))
+ goto yyerrlab;
+ yyn = -yyn;
+ goto yyreduce;
+ }
+
+ /* Count tokens shifted since error; after three, turn off error
+ status. */
+ if (yyerrstatus)
+ yyerrstatus--;
+
+ /* Shift the lookahead token. */
+ YY_SYMBOL_PRINT ("Shifting", yytoken, &yylval, &yylloc);
+ yystate = yyn;
+ YY_IGNORE_MAYBE_UNINITIALIZED_BEGIN
+ *++yyvsp = yylval;
+ YY_IGNORE_MAYBE_UNINITIALIZED_END
+ *++yylsp = yylloc;
+
+ /* Discard the shifted token. */
+ yychar = YYEMPTY;
+ goto yynewstate;
+
+
+/*-----------------------------------------------------------.
+| yydefault -- do the default action for the current state. |
+`-----------------------------------------------------------*/
+yydefault:
+ yyn = yydefact[yystate];
+ if (yyn == 0)
+ goto yyerrlab;
+ goto yyreduce;
+
+
+/*-----------------------------.
+| yyreduce -- do a reduction. |
+`-----------------------------*/
+yyreduce:
+ /* yyn is the number of a rule to reduce with. */
+ yylen = yyr2[yyn];
+
+ /* If YYLEN is nonzero, implement the default value of the action:
+ '$$ = $1'.
+
+ Otherwise, the following line sets YYVAL to garbage.
+ This behavior is undocumented and Bison
+ users should not rely upon it. Assigning to YYVAL
+ unconditionally makes the parser a bit smaller, and it avoids a
+ GCC warning that YYVAL may be used uninitialized. */
+ yyval = yyvsp[1-yylen];
+
+ /* Default location. */
+ YYLLOC_DEFAULT (yyloc, (yylsp - yylen), yylen);
+ yyerror_range[1] = yyloc;
+ YY_REDUCE_PRINT (yyn);
+ switch (yyn)
+ {
+ case 6:
+#line 228 "src/compiler/glsl/glcpp/glcpp-parse.y"
+ {
+ _glcpp_parser_print_expanded_token_list (parser, (yyvsp[0].token_list));
+ _mesa_string_buffer_append_char(parser->output, '\n');
+ }
+#line 1891 "src/compiler/glsl/glcpp/glcpp-parse.c"
+ break;
+
+ case 8:
+#line 236 "src/compiler/glsl/glcpp/glcpp-parse.y"
+ {
+ if (parser->is_gles && (yyvsp[-1].expression_value).undefined_macro)
+ glcpp_error(& (yylsp[-2]), parser, "undefined macro %s in expression (illegal in GLES)", (yyvsp[-1].expression_value).undefined_macro);
+ _glcpp_parser_skip_stack_push_if (parser, & (yylsp[-2]), (yyvsp[-1].expression_value).value);
+ }
+#line 1901 "src/compiler/glsl/glcpp/glcpp-parse.c"
+ break;
+
+ case 9:
+#line 241 "src/compiler/glsl/glcpp/glcpp-parse.y"
+ {
+ if (parser->is_gles && (yyvsp[-1].expression_value).undefined_macro)
+ glcpp_error(& (yylsp[-2]), parser, "undefined macro %s in expression (illegal in GLES)", (yyvsp[-1].expression_value).undefined_macro);
+ _glcpp_parser_skip_stack_change_if (parser, & (yylsp[-2]), "elif", (yyvsp[-1].expression_value).value);
+ }
+#line 1911 "src/compiler/glsl/glcpp/glcpp-parse.c"
+ break;
+
+ case 10:
+#line 246 "src/compiler/glsl/glcpp/glcpp-parse.y"
+ {
+ parser->has_new_line_number = 1;
+ parser->new_line_number = (yyvsp[-1].ival);
+ _mesa_string_buffer_printf(parser->output, "#line %" PRIiMAX "\n", (yyvsp[-1].ival));
+ }
+#line 1921 "src/compiler/glsl/glcpp/glcpp-parse.c"
+ break;
+
+ case 11:
+#line 251 "src/compiler/glsl/glcpp/glcpp-parse.y"
+ {
+ parser->has_new_line_number = 1;
+ parser->new_line_number = (yyvsp[-2].ival);
+ parser->has_new_source_number = 1;
+ parser->new_source_number = (yyvsp[-1].ival);
+ _mesa_string_buffer_printf(parser->output,
+ "#line %" PRIiMAX " %" PRIiMAX "\n",
+ (yyvsp[-2].ival), (yyvsp[-1].ival));
+ }
+#line 1935 "src/compiler/glsl/glcpp/glcpp-parse.c"
+ break;
+
+ case 12:
+#line 260 "src/compiler/glsl/glcpp/glcpp-parse.y"
+ {
+ parser->has_new_line_number = 1;
+ parser->new_line_number = (yyvsp[-2].ival);
+ _mesa_string_buffer_printf(parser->output,
+ "#line %" PRIiMAX " %s\n",
+ (yyvsp[-2].ival), (yyvsp[-1].str));
+ }
+#line 1947 "src/compiler/glsl/glcpp/glcpp-parse.c"
+ break;
+
+ case 13:
+#line 270 "src/compiler/glsl/glcpp/glcpp-parse.y"
+ {
+ _define_object_macro (parser, & (yylsp[-2]), (yyvsp[-2].str), (yyvsp[-1].token_list));
+ }
+#line 1955 "src/compiler/glsl/glcpp/glcpp-parse.c"
+ break;
+
+ case 14:
+#line 273 "src/compiler/glsl/glcpp/glcpp-parse.y"
+ {
+ _define_function_macro (parser, & (yylsp[-4]), (yyvsp[-4].str), NULL, (yyvsp[-1].token_list));
+ }
+#line 1963 "src/compiler/glsl/glcpp/glcpp-parse.c"
+ break;
+
+ case 15:
+#line 276 "src/compiler/glsl/glcpp/glcpp-parse.y"
+ {
+ _define_function_macro (parser, & (yylsp[-5]), (yyvsp[-5].str), (yyvsp[-3].string_list), (yyvsp[-1].token_list));
+ }
+#line 1971 "src/compiler/glsl/glcpp/glcpp-parse.c"
+ break;
+
+ case 16:
+#line 282 "src/compiler/glsl/glcpp/glcpp-parse.y"
+ {
+ _mesa_string_buffer_append_char(parser->output, '\n');
+ }
+#line 1979 "src/compiler/glsl/glcpp/glcpp-parse.c"
+ break;
+
+ case 18:
+#line 286 "src/compiler/glsl/glcpp/glcpp-parse.y"
+ {
+
+ if (parser->skip_stack == NULL ||
+ parser->skip_stack->type == SKIP_NO_SKIP)
+ {
+ _glcpp_parser_expand_and_lex_from (parser,
+ LINE_EXPANDED, (yyvsp[-1].token_list),
+ EXPANSION_MODE_IGNORE_DEFINED);
+ }
+ }
+#line 1994 "src/compiler/glsl/glcpp/glcpp-parse.c"
+ break;
+
+ case 20:
+#line 300 "src/compiler/glsl/glcpp/glcpp-parse.y"
+ {
+ struct hash_entry *entry;
+
+ /* Section 3.4 (Preprocessor) of the GLSL ES 3.00 spec says:
+ *
+ * It is an error to undefine or to redefine a built-in
+ * (pre-defined) macro name.
+ *
+ * The GLSL ES 1.00 spec does not contain this text, but
+ * dEQP's preprocess test in GLES2 checks for it.
+ *
+ * Section 3.3 (Preprocessor) revision 7, of the GLSL 4.50
+ * spec says:
+ *
+ * By convention, all macro names containing two consecutive
+ * underscores ( __ ) are reserved for use by underlying
+ * software layers. Defining or undefining such a name
+ * in a shader does not itself result in an error, but may
+ * result in unintended behaviors that stem from having
+ * multiple definitions of the same name. All macro names
+ * prefixed with "GL_" (...) are also reseved, and defining
+ * such a name results in a compile-time error.
+ *
+ * The code below implements the same checks as GLSLang.
+ */
+ if (strncmp("GL_", (yyvsp[-1].str), 3) == 0)
+ glcpp_error(& (yylsp[-3]), parser, "Built-in (pre-defined)"
+ " names beginning with GL_ cannot be undefined.");
+ else if (strstr((yyvsp[-1].str), "__") != NULL) {
+ if (parser->is_gles
+ && parser->version >= 300
+ && (strcmp("__LINE__", (yyvsp[-1].str)) == 0
+ || strcmp("__FILE__", (yyvsp[-1].str)) == 0
+ || strcmp("__VERSION__", (yyvsp[-1].str)) == 0)) {
+ glcpp_error(& (yylsp[-3]), parser, "Built-in (pre-defined)"
+ " names cannot be undefined.");
+ } else if (parser->is_gles && parser->version <= 300) {
+ glcpp_error(& (yylsp[-3]), parser,
+ " names containing consecutive underscores"
+ " are reserved.");
+ } else {
+ glcpp_warning(& (yylsp[-3]), parser,
+ " names containing consecutive underscores"
+ " are reserved.");
+ }
+ }
+
+ entry = _mesa_hash_table_search (parser->defines, (yyvsp[-1].str));
+ if (entry) {
+ _mesa_hash_table_remove (parser->defines, entry);
+ }
+ }
+#line 2051 "src/compiler/glsl/glcpp/glcpp-parse.c"
+ break;
+
+ case 21:
+#line 352 "src/compiler/glsl/glcpp/glcpp-parse.y"
+ {
+ size_t include_cursor = _mesa_get_shader_include_cursor(parser->gl_ctx->Shared);
+
+ /* Remove leading and trailing "" or <> */
+ char *start = strchr((yyvsp[-1].str), '"');
+ if (!start) {
+ _mesa_set_shader_include_cursor(parser->gl_ctx->Shared, 0);
+ start = strchr((yyvsp[-1].str), '<');
+ }
+ char *path = strndup(start + 1, strlen(start + 1) - 1);
+
+ const char *shader =
+ _mesa_lookup_shader_include(parser->gl_ctx, path, false);
+ free(path);
+
+ if (!shader)
+ glcpp_error(&(yylsp[-2]), parser, "%s not found", (yyvsp[-1].str));
+ else {
+ /* Create a temporary parser with the same settings */
+ glcpp_parser_t *tmp_parser =
+ glcpp_parser_create(parser->gl_ctx, parser->extensions, parser->state);
+ tmp_parser->version_set = true;
+ tmp_parser->version = parser->version;
+
+ /* Set the shader source and run the lexer */
+ glcpp_lex_set_source_string(tmp_parser, shader);
+
+ /* Copy any existing define macros to the temporary
+ * shade include parser.
+ */
+ struct define_include di;
+ di.parser = tmp_parser;
+ di.loc = &(yylsp[-2]);
+
+ hash_table_call_foreach(parser->defines,
+ glcpp_parser_copy_defines,
+ &di);
+
+ /* Print out '#include' to the glsl parser. We do this
+ * so that it can do the error checking require to
+ * make sure the ARB_shading_language_include
+ * extension is enabled.
+ */
+ _mesa_string_buffer_printf(parser->output, "#include\n");
+
+ /* Parse the include string before adding to the
+ * preprocessor output.
+ */
+ glcpp_parser_parse(tmp_parser);
+ _mesa_string_buffer_printf(parser->info_log, "%s",
+ tmp_parser->info_log->buf);
+ _mesa_string_buffer_printf(parser->output, "%s",
+ tmp_parser->output->buf);
+
+ /* Copy any new define macros to the parent parser
+ * and steal the memory of our temp parser so we don't
+ * free these new defines before they are no longer
+ * needed.
+ */
+ di.parser = parser;
+ di.loc = &(yylsp[-2]);
+ ralloc_steal(parser, tmp_parser);
+
+ hash_table_call_foreach(tmp_parser->defines,
+ glcpp_parser_copy_defines,
+ &di);
+
+ /* Destroy tmp parser memory we no longer need */
+ glcpp_lex_destroy(tmp_parser->scanner);
+ _mesa_hash_table_destroy(tmp_parser->defines, NULL);
+ }
+
+ _mesa_set_shader_include_cursor(parser->gl_ctx->Shared, include_cursor);
+ }
+#line 2130 "src/compiler/glsl/glcpp/glcpp-parse.c"
+ break;
+
+ case 22:
+#line 426 "src/compiler/glsl/glcpp/glcpp-parse.y"
+ {
+ /* Be careful to only evaluate the 'if' expression if
+ * we are not skipping. When we are skipping, we
+ * simply push a new 0-valued 'if' onto the skip
+ * stack.
+ *
+ * This avoids generating diagnostics for invalid
+ * expressions that are being skipped. */
+ if (parser->skip_stack == NULL ||
+ parser->skip_stack->type == SKIP_NO_SKIP)
+ {
+ _glcpp_parser_expand_and_lex_from (parser,
+ IF_EXPANDED, (yyvsp[-1].token_list),
+ EXPANSION_MODE_EVALUATE_DEFINED);
+ }
+ else
+ {
+ _glcpp_parser_skip_stack_push_if (parser, & (yylsp[-3]), 0);
+ parser->skip_stack->type = SKIP_TO_ENDIF;
+ }
+ }
+#line 2156 "src/compiler/glsl/glcpp/glcpp-parse.c"
+ break;
+
+ case 23:
+#line 447 "src/compiler/glsl/glcpp/glcpp-parse.y"
+ {
+ /* #if without an expression is only an error if we
+ * are not skipping */
+ if (parser->skip_stack == NULL ||
+ parser->skip_stack->type == SKIP_NO_SKIP)
+ {
+ glcpp_error(& (yylsp[-2]), parser, "#if with no expression");
+ }
+ _glcpp_parser_skip_stack_push_if (parser, & (yylsp[-2]), 0);
+ }
+#line 2171 "src/compiler/glsl/glcpp/glcpp-parse.c"
+ break;
+
+ case 24:
+#line 457 "src/compiler/glsl/glcpp/glcpp-parse.y"
+ {
+ struct hash_entry *entry =
+ _mesa_hash_table_search(parser->defines, (yyvsp[-2].str));
+ macro_t *macro = entry ? entry->data : NULL;
+ _glcpp_parser_skip_stack_push_if (parser, & (yylsp[-4]), macro != NULL);
+ }
+#line 2182 "src/compiler/glsl/glcpp/glcpp-parse.c"
+ break;
+
+ case 25:
+#line 463 "src/compiler/glsl/glcpp/glcpp-parse.y"
+ {
+ struct hash_entry *entry =
+ _mesa_hash_table_search(parser->defines, (yyvsp[-2].str));
+ macro_t *macro = entry ? entry->data : NULL;
+ _glcpp_parser_skip_stack_push_if (parser, & (yylsp[-2]), macro == NULL);
+ }
+#line 2193 "src/compiler/glsl/glcpp/glcpp-parse.c"
+ break;
+
+ case 26:
+#line 469 "src/compiler/glsl/glcpp/glcpp-parse.y"
+ {
+ /* Be careful to only evaluate the 'elif' expression
+ * if we are not skipping. When we are skipping, we
+ * simply change to a 0-valued 'elif' on the skip
+ * stack.
+ *
+ * This avoids generating diagnostics for invalid
+ * expressions that are being skipped. */
+ if (parser->skip_stack &&
+ parser->skip_stack->type == SKIP_TO_ELSE)
+ {
+ _glcpp_parser_expand_and_lex_from (parser,
+ ELIF_EXPANDED, (yyvsp[-1].token_list),
+ EXPANSION_MODE_EVALUATE_DEFINED);
+ }
+ else if (parser->skip_stack &&
+ parser->skip_stack->has_else)
+ {
+ glcpp_error(& (yylsp[-3]), parser, "#elif after #else");
+ }
+ else
+ {
+ _glcpp_parser_skip_stack_change_if (parser, & (yylsp[-3]),
+ "elif", 0);
+ }
+ }
+#line 2224 "src/compiler/glsl/glcpp/glcpp-parse.c"
+ break;
+
+ case 27:
+#line 495 "src/compiler/glsl/glcpp/glcpp-parse.y"
+ {
+ /* #elif without an expression is an error unless we
+ * are skipping. */
+ if (parser->skip_stack &&
+ parser->skip_stack->type == SKIP_TO_ELSE)
+ {
+ glcpp_error(& (yylsp[-2]), parser, "#elif with no expression");
+ }
+ else if (parser->skip_stack &&
+ parser->skip_stack->has_else)
+ {
+ glcpp_error(& (yylsp[-2]), parser, "#elif after #else");
+ }
+ else
+ {
+ _glcpp_parser_skip_stack_change_if (parser, & (yylsp[-2]),
+ "elif", 0);
+ glcpp_warning(& (yylsp[-2]), parser, "ignoring illegal #elif without expression");
+ }
+ }
+#line 2249 "src/compiler/glsl/glcpp/glcpp-parse.c"
+ break;
+
+ case 28:
+#line 515 "src/compiler/glsl/glcpp/glcpp-parse.y"
+ { parser->lexing_directive = 1; }
+#line 2255 "src/compiler/glsl/glcpp/glcpp-parse.c"
+ break;
+
+ case 29:
+#line 515 "src/compiler/glsl/glcpp/glcpp-parse.y"
+ {
+ if (parser->skip_stack &&
+ parser->skip_stack->has_else)
+ {
+ glcpp_error(& (yylsp[-3]), parser, "multiple #else");
+ }
+ else
+ {
+ _glcpp_parser_skip_stack_change_if (parser, & (yylsp[-3]), "else", 1);
+ if (parser->skip_stack)
+ parser->skip_stack->has_else = true;
+ }
+ }
+#line 2273 "src/compiler/glsl/glcpp/glcpp-parse.c"
+ break;
+
+ case 30:
+#line 528 "src/compiler/glsl/glcpp/glcpp-parse.y"
+ {
+ _glcpp_parser_skip_stack_pop (parser, & (yylsp[-1]));
+ }
+#line 2281 "src/compiler/glsl/glcpp/glcpp-parse.c"
+ break;
+
+ case 32:
+#line 531 "src/compiler/glsl/glcpp/glcpp-parse.y"
+ {
+ if (parser->version_set) {
+ glcpp_error(& (yylsp[-3]), parser, "#version must appear on the first line");
+ }
+ _glcpp_parser_handle_version_declaration(parser, (yyvsp[-1].ival), NULL, true);
+ }
+#line 2292 "src/compiler/glsl/glcpp/glcpp-parse.c"
+ break;
+
+ case 33:
+#line 537 "src/compiler/glsl/glcpp/glcpp-parse.y"
+ {
+ if (parser->version_set) {
+ glcpp_error(& (yylsp[-4]), parser, "#version must appear on the first line");
+ }
+ _glcpp_parser_handle_version_declaration(parser, (yyvsp[-2].ival), (yyvsp[-1].str), true);
+ }
+#line 2303 "src/compiler/glsl/glcpp/glcpp-parse.c"
+ break;
+
+ case 34:
+#line 543 "src/compiler/glsl/glcpp/glcpp-parse.y"
+ {
+ glcpp_parser_resolve_implicit_version(parser);
+ }
+#line 2311 "src/compiler/glsl/glcpp/glcpp-parse.c"
+ break;
+
+ case 35:
+#line 546 "src/compiler/glsl/glcpp/glcpp-parse.y"
+ {
+ _mesa_string_buffer_printf(parser->output, "#%s", (yyvsp[-1].str));
+ }
+#line 2319 "src/compiler/glsl/glcpp/glcpp-parse.c"
+ break;
+
+ case 36:
+#line 552 "src/compiler/glsl/glcpp/glcpp-parse.y"
+ {
+ glcpp_error(& (yylsp[-2]), parser, "#%s", (yyvsp[-1].str));
+ }
+#line 2327 "src/compiler/glsl/glcpp/glcpp-parse.c"
+ break;
+
+ case 37:
+#line 555 "src/compiler/glsl/glcpp/glcpp-parse.y"
+ {
+ glcpp_error (& (yylsp[-2]), parser, "#define without macro name");
+ }
+#line 2335 "src/compiler/glsl/glcpp/glcpp-parse.c"
+ break;
+
+ case 38:
+#line 558 "src/compiler/glsl/glcpp/glcpp-parse.y"
+ {
+ glcpp_error (& (yylsp[-3]), parser, "Illegal non-directive after #");
+ }
+#line 2343 "src/compiler/glsl/glcpp/glcpp-parse.c"
+ break;
+
+ case 39:
+#line 564 "src/compiler/glsl/glcpp/glcpp-parse.y"
+ {
+ /* let strtoll detect the base */
+ (yyval.ival) = strtoll ((yyvsp[0].str), NULL, 0);
+ }
+#line 2352 "src/compiler/glsl/glcpp/glcpp-parse.c"
+ break;
+
+ case 40:
+#line 568 "src/compiler/glsl/glcpp/glcpp-parse.y"
+ {
+ (yyval.ival) = (yyvsp[0].ival);
+ }
+#line 2360 "src/compiler/glsl/glcpp/glcpp-parse.c"
+ break;
+
+ case 41:
+#line 573 "src/compiler/glsl/glcpp/glcpp-parse.y"
+ {
+ /* Both octal and hexadecimal constants begin with 0. */
+ if ((yyvsp[0].str)[0] == '0' && (yyvsp[0].str)[1] != '\0') {
+ glcpp_error(&(yylsp[0]), parser, "invalid #version \"%s\" (not a decimal constant)", (yyvsp[0].str));
+ (yyval.ival) = 0;
+ } else {
+ (yyval.ival) = strtoll((yyvsp[0].str), NULL, 10);
+ }
+ }
+#line 2374 "src/compiler/glsl/glcpp/glcpp-parse.c"
+ break;
+
+ case 42:
+#line 584 "src/compiler/glsl/glcpp/glcpp-parse.y"
+ {
+ (yyval.expression_value).value = (yyvsp[0].ival);
+ (yyval.expression_value).undefined_macro = NULL;
+ }
+#line 2383 "src/compiler/glsl/glcpp/glcpp-parse.c"
+ break;
+
+ case 43:
+#line 588 "src/compiler/glsl/glcpp/glcpp-parse.y"
+ {
+ (yyval.expression_value).value = 0;
+ if (parser->is_gles)
+ (yyval.expression_value).undefined_macro = linear_strdup(parser->linalloc, (yyvsp[0].str));
+ else
+ (yyval.expression_value).undefined_macro = NULL;
+ }
+#line 2395 "src/compiler/glsl/glcpp/glcpp-parse.c"
+ break;
+
+ case 44:
+#line 595 "src/compiler/glsl/glcpp/glcpp-parse.y"
+ {
+ (yyval.expression_value).value = (yyvsp[-2].expression_value).value || (yyvsp[0].expression_value).value;
+
+ /* Short-circuit: Only flag undefined from right side
+ * if left side evaluates to false.
+ */
+ if ((yyvsp[-2].expression_value).undefined_macro)
+ (yyval.expression_value).undefined_macro = (yyvsp[-2].expression_value).undefined_macro;
+ else if (! (yyvsp[-2].expression_value).value)
+ (yyval.expression_value).undefined_macro = (yyvsp[0].expression_value).undefined_macro;
+ }
+#line 2411 "src/compiler/glsl/glcpp/glcpp-parse.c"
+ break;
+
+ case 45:
+#line 606 "src/compiler/glsl/glcpp/glcpp-parse.y"
+ {
+ (yyval.expression_value).value = (yyvsp[-2].expression_value).value && (yyvsp[0].expression_value).value;
+
+ /* Short-circuit: Only flag undefined from right-side
+ * if left side evaluates to true.
+ */
+ if ((yyvsp[-2].expression_value).undefined_macro)
+ (yyval.expression_value).undefined_macro = (yyvsp[-2].expression_value).undefined_macro;
+ else if ((yyvsp[-2].expression_value).value)
+ (yyval.expression_value).undefined_macro = (yyvsp[0].expression_value).undefined_macro;
+ }
+#line 2427 "src/compiler/glsl/glcpp/glcpp-parse.c"
+ break;
+
+ case 46:
+#line 617 "src/compiler/glsl/glcpp/glcpp-parse.y"
+ {
+ (yyval.expression_value).value = (yyvsp[-2].expression_value).value | (yyvsp[0].expression_value).value;
+ if ((yyvsp[-2].expression_value).undefined_macro)
+ (yyval.expression_value).undefined_macro = (yyvsp[-2].expression_value).undefined_macro;
+ else
+ (yyval.expression_value).undefined_macro = (yyvsp[0].expression_value).undefined_macro;
+ }
+#line 2439 "src/compiler/glsl/glcpp/glcpp-parse.c"
+ break;
+
+ case 47:
+#line 624 "src/compiler/glsl/glcpp/glcpp-parse.y"
+ {
+ (yyval.expression_value).value = (yyvsp[-2].expression_value).value ^ (yyvsp[0].expression_value).value;
+ if ((yyvsp[-2].expression_value).undefined_macro)
+ (yyval.expression_value).undefined_macro = (yyvsp[-2].expression_value).undefined_macro;
+ else
+ (yyval.expression_value).undefined_macro = (yyvsp[0].expression_value).undefined_macro;
+ }
+#line 2451 "src/compiler/glsl/glcpp/glcpp-parse.c"
+ break;
+
+ case 48:
+#line 631 "src/compiler/glsl/glcpp/glcpp-parse.y"
+ {
+ (yyval.expression_value).value = (yyvsp[-2].expression_value).value & (yyvsp[0].expression_value).value;
+ if ((yyvsp[-2].expression_value).undefined_macro)
+ (yyval.expression_value).undefined_macro = (yyvsp[-2].expression_value).undefined_macro;
+ else
+ (yyval.expression_value).undefined_macro = (yyvsp[0].expression_value).undefined_macro;
+ }
+#line 2463 "src/compiler/glsl/glcpp/glcpp-parse.c"
+ break;
+
+ case 49:
+#line 638 "src/compiler/glsl/glcpp/glcpp-parse.y"
+ {
+ (yyval.expression_value).value = (yyvsp[-2].expression_value).value != (yyvsp[0].expression_value).value;
+ if ((yyvsp[-2].expression_value).undefined_macro)
+ (yyval.expression_value).undefined_macro = (yyvsp[-2].expression_value).undefined_macro;
+ else
+ (yyval.expression_value).undefined_macro = (yyvsp[0].expression_value).undefined_macro;
+ }
+#line 2475 "src/compiler/glsl/glcpp/glcpp-parse.c"
+ break;
+
+ case 50:
+#line 645 "src/compiler/glsl/glcpp/glcpp-parse.y"
+ {
+ (yyval.expression_value).value = (yyvsp[-2].expression_value).value == (yyvsp[0].expression_value).value;
+ if ((yyvsp[-2].expression_value).undefined_macro)
+ (yyval.expression_value).undefined_macro = (yyvsp[-2].expression_value).undefined_macro;
+ else
+ (yyval.expression_value).undefined_macro = (yyvsp[0].expression_value).undefined_macro;
+ }
+#line 2487 "src/compiler/glsl/glcpp/glcpp-parse.c"
+ break;
+
+ case 51:
+#line 652 "src/compiler/glsl/glcpp/glcpp-parse.y"
+ {
+ (yyval.expression_value).value = (yyvsp[-2].expression_value).value >= (yyvsp[0].expression_value).value;
+ if ((yyvsp[-2].expression_value).undefined_macro)
+ (yyval.expression_value).undefined_macro = (yyvsp[-2].expression_value).undefined_macro;
+ else
+ (yyval.expression_value).undefined_macro = (yyvsp[0].expression_value).undefined_macro;
+ }
+#line 2499 "src/compiler/glsl/glcpp/glcpp-parse.c"
+ break;
+
+ case 52:
+#line 659 "src/compiler/glsl/glcpp/glcpp-parse.y"
+ {
+ (yyval.expression_value).value = (yyvsp[-2].expression_value).value <= (yyvsp[0].expression_value).value;
+ if ((yyvsp[-2].expression_value).undefined_macro)
+ (yyval.expression_value).undefined_macro = (yyvsp[-2].expression_value).undefined_macro;
+ else
+ (yyval.expression_value).undefined_macro = (yyvsp[0].expression_value).undefined_macro;
+ }
+#line 2511 "src/compiler/glsl/glcpp/glcpp-parse.c"
+ break;
+
+ case 53:
+#line 666 "src/compiler/glsl/glcpp/glcpp-parse.y"
+ {
+ (yyval.expression_value).value = (yyvsp[-2].expression_value).value > (yyvsp[0].expression_value).value;
+ if ((yyvsp[-2].expression_value).undefined_macro)
+ (yyval.expression_value).undefined_macro = (yyvsp[-2].expression_value).undefined_macro;
+ else
+ (yyval.expression_value).undefined_macro = (yyvsp[0].expression_value).undefined_macro;
+ }
+#line 2523 "src/compiler/glsl/glcpp/glcpp-parse.c"
+ break;
+
+ case 54:
+#line 673 "src/compiler/glsl/glcpp/glcpp-parse.y"
+ {
+ (yyval.expression_value).value = (yyvsp[-2].expression_value).value < (yyvsp[0].expression_value).value;
+ if ((yyvsp[-2].expression_value).undefined_macro)
+ (yyval.expression_value).undefined_macro = (yyvsp[-2].expression_value).undefined_macro;
+ else
+ (yyval.expression_value).undefined_macro = (yyvsp[0].expression_value).undefined_macro;
+ }
+#line 2535 "src/compiler/glsl/glcpp/glcpp-parse.c"
+ break;
+
+ case 55:
+#line 680 "src/compiler/glsl/glcpp/glcpp-parse.y"
+ {
+ (yyval.expression_value).value = (yyvsp[-2].expression_value).value >> (yyvsp[0].expression_value).value;
+ if ((yyvsp[-2].expression_value).undefined_macro)
+ (yyval.expression_value).undefined_macro = (yyvsp[-2].expression_value).undefined_macro;
+ else
+ (yyval.expression_value).undefined_macro = (yyvsp[0].expression_value).undefined_macro;
+ }
+#line 2547 "src/compiler/glsl/glcpp/glcpp-parse.c"
+ break;
+
+ case 56:
+#line 687 "src/compiler/glsl/glcpp/glcpp-parse.y"
+ {
+ (yyval.expression_value).value = (yyvsp[-2].expression_value).value << (yyvsp[0].expression_value).value;
+ if ((yyvsp[-2].expression_value).undefined_macro)
+ (yyval.expression_value).undefined_macro = (yyvsp[-2].expression_value).undefined_macro;
+ else
+ (yyval.expression_value).undefined_macro = (yyvsp[0].expression_value).undefined_macro;
+ }
+#line 2559 "src/compiler/glsl/glcpp/glcpp-parse.c"
+ break;
+
+ case 57:
+#line 694 "src/compiler/glsl/glcpp/glcpp-parse.y"
+ {
+ (yyval.expression_value).value = (yyvsp[-2].expression_value).value - (yyvsp[0].expression_value).value;
+ if ((yyvsp[-2].expression_value).undefined_macro)
+ (yyval.expression_value).undefined_macro = (yyvsp[-2].expression_value).undefined_macro;
+ else
+ (yyval.expression_value).undefined_macro = (yyvsp[0].expression_value).undefined_macro;
+ }
+#line 2571 "src/compiler/glsl/glcpp/glcpp-parse.c"
+ break;
+
+ case 58:
+#line 701 "src/compiler/glsl/glcpp/glcpp-parse.y"
+ {
+ (yyval.expression_value).value = (yyvsp[-2].expression_value).value + (yyvsp[0].expression_value).value;
+ if ((yyvsp[-2].expression_value).undefined_macro)
+ (yyval.expression_value).undefined_macro = (yyvsp[-2].expression_value).undefined_macro;
+ else
+ (yyval.expression_value).undefined_macro = (yyvsp[0].expression_value).undefined_macro;
+ }
+#line 2583 "src/compiler/glsl/glcpp/glcpp-parse.c"
+ break;
+
+ case 59:
+#line 708 "src/compiler/glsl/glcpp/glcpp-parse.y"
+ {
+ if ((yyvsp[0].expression_value).value == 0) {
+ yyerror (& (yylsp[-2]), parser,
+ "zero modulus in preprocessor directive");
+ } else {
+ (yyval.expression_value).value = (yyvsp[-2].expression_value).value % (yyvsp[0].expression_value).value;
+ }
+ if ((yyvsp[-2].expression_value).undefined_macro)
+ (yyval.expression_value).undefined_macro = (yyvsp[-2].expression_value).undefined_macro;
+ else
+ (yyval.expression_value).undefined_macro = (yyvsp[0].expression_value).undefined_macro;
+ }
+#line 2600 "src/compiler/glsl/glcpp/glcpp-parse.c"
+ break;
+
+ case 60:
+#line 720 "src/compiler/glsl/glcpp/glcpp-parse.y"
+ {
+ if ((yyvsp[0].expression_value).value == 0) {
+ yyerror (& (yylsp[-2]), parser,
+ "division by 0 in preprocessor directive");
+ } else {
+ (yyval.expression_value).value = (yyvsp[-2].expression_value).value / (yyvsp[0].expression_value).value;
+ }
+ if ((yyvsp[-2].expression_value).undefined_macro)
+ (yyval.expression_value).undefined_macro = (yyvsp[-2].expression_value).undefined_macro;
+ else
+ (yyval.expression_value).undefined_macro = (yyvsp[0].expression_value).undefined_macro;
+ }
+#line 2617 "src/compiler/glsl/glcpp/glcpp-parse.c"
+ break;
+
+ case 61:
+#line 732 "src/compiler/glsl/glcpp/glcpp-parse.y"
+ {
+ (yyval.expression_value).value = (yyvsp[-2].expression_value).value * (yyvsp[0].expression_value).value;
+ if ((yyvsp[-2].expression_value).undefined_macro)
+ (yyval.expression_value).undefined_macro = (yyvsp[-2].expression_value).undefined_macro;
+ else
+ (yyval.expression_value).undefined_macro = (yyvsp[0].expression_value).undefined_macro;
+ }
+#line 2629 "src/compiler/glsl/glcpp/glcpp-parse.c"
+ break;
+
+ case 62:
+#line 739 "src/compiler/glsl/glcpp/glcpp-parse.y"
+ {
+ (yyval.expression_value).value = ! (yyvsp[0].expression_value).value;
+ (yyval.expression_value).undefined_macro = (yyvsp[0].expression_value).undefined_macro;
+ }
+#line 2638 "src/compiler/glsl/glcpp/glcpp-parse.c"
+ break;
+
+ case 63:
+#line 743 "src/compiler/glsl/glcpp/glcpp-parse.y"
+ {
+ (yyval.expression_value).value = ~ (yyvsp[0].expression_value).value;
+ (yyval.expression_value).undefined_macro = (yyvsp[0].expression_value).undefined_macro;
+ }
+#line 2647 "src/compiler/glsl/glcpp/glcpp-parse.c"
+ break;
+
+ case 64:
+#line 747 "src/compiler/glsl/glcpp/glcpp-parse.y"
+ {
+ (yyval.expression_value).value = - (yyvsp[0].expression_value).value;
+ (yyval.expression_value).undefined_macro = (yyvsp[0].expression_value).undefined_macro;
+ }
+#line 2656 "src/compiler/glsl/glcpp/glcpp-parse.c"
+ break;
+
+ case 65:
+#line 751 "src/compiler/glsl/glcpp/glcpp-parse.y"
+ {
+ (yyval.expression_value).value = + (yyvsp[0].expression_value).value;
+ (yyval.expression_value).undefined_macro = (yyvsp[0].expression_value).undefined_macro;
+ }
+#line 2665 "src/compiler/glsl/glcpp/glcpp-parse.c"
+ break;
+
+ case 66:
+#line 755 "src/compiler/glsl/glcpp/glcpp-parse.y"
+ {
+ (yyval.expression_value) = (yyvsp[-1].expression_value);
+ }
+#line 2673 "src/compiler/glsl/glcpp/glcpp-parse.c"
+ break;
+
+ case 67:
+#line 761 "src/compiler/glsl/glcpp/glcpp-parse.y"
+ {
+ (yyval.string_list) = _string_list_create (parser);
+ _string_list_append_item (parser, (yyval.string_list), (yyvsp[0].str));
+ }
+#line 2682 "src/compiler/glsl/glcpp/glcpp-parse.c"
+ break;
+
+ case 68:
+#line 765 "src/compiler/glsl/glcpp/glcpp-parse.y"
+ {
+ (yyval.string_list) = (yyvsp[-2].string_list);
+ _string_list_append_item (parser, (yyval.string_list), (yyvsp[0].str));
+ }
+#line 2691 "src/compiler/glsl/glcpp/glcpp-parse.c"
+ break;
+
+ case 69:
+#line 772 "src/compiler/glsl/glcpp/glcpp-parse.y"
+ { (yyval.token_list) = NULL; }
+#line 2697 "src/compiler/glsl/glcpp/glcpp-parse.c"
+ break;
+
+ case 71:
+#line 777 "src/compiler/glsl/glcpp/glcpp-parse.y"
+ { (yyval.token_list) = NULL; }
+#line 2703 "src/compiler/glsl/glcpp/glcpp-parse.c"
+ break;
+
+ case 74:
+#line 783 "src/compiler/glsl/glcpp/glcpp-parse.y"
+ {
+ glcpp_error(&(yylsp[0]), parser, "extra tokens at end of directive");
+ }
+#line 2711 "src/compiler/glsl/glcpp/glcpp-parse.c"
+ break;
+
+ case 75:
+#line 789 "src/compiler/glsl/glcpp/glcpp-parse.y"
+ {
+ parser->space_tokens = 1;
+ (yyval.token_list) = _token_list_create (parser);
+ _token_list_append (parser, (yyval.token_list), (yyvsp[0].token));
+ }
+#line 2721 "src/compiler/glsl/glcpp/glcpp-parse.c"
+ break;
+
+ case 76:
+#line 794 "src/compiler/glsl/glcpp/glcpp-parse.y"
+ {
+ (yyval.token_list) = (yyvsp[-1].token_list);
+ _token_list_append (parser, (yyval.token_list), (yyvsp[0].token));
+ }
+#line 2730 "src/compiler/glsl/glcpp/glcpp-parse.c"
+ break;
+
+ case 77:
+#line 801 "src/compiler/glsl/glcpp/glcpp-parse.y"
+ {
+ (yyval.token) = _token_create_str (parser, IDENTIFIER, (yyvsp[0].str));
+ (yyval.token)->location = yylloc;
+ }
+#line 2739 "src/compiler/glsl/glcpp/glcpp-parse.c"
+ break;
+
+ case 78:
+#line 805 "src/compiler/glsl/glcpp/glcpp-parse.y"
+ {
+ (yyval.token) = _token_create_str (parser, INTEGER_STRING, (yyvsp[0].str));
+ (yyval.token)->location = yylloc;
+ }
+#line 2748 "src/compiler/glsl/glcpp/glcpp-parse.c"
+ break;
+
+ case 79:
+#line 809 "src/compiler/glsl/glcpp/glcpp-parse.y"
+ {
+ (yyval.token) = _token_create_str (parser, PATH, (yyvsp[0].str));
+ (yyval.token)->location = yylloc;
+ }
+#line 2757 "src/compiler/glsl/glcpp/glcpp-parse.c"
+ break;
+
+ case 80:
+#line 813 "src/compiler/glsl/glcpp/glcpp-parse.y"
+ {
+ (yyval.token) = _token_create_ival (parser, (yyvsp[0].ival), (yyvsp[0].ival));
+ (yyval.token)->location = yylloc;
+ }
+#line 2766 "src/compiler/glsl/glcpp/glcpp-parse.c"
+ break;
+
+ case 81:
+#line 817 "src/compiler/glsl/glcpp/glcpp-parse.y"
+ {
+ (yyval.token) = _token_create_ival (parser, DEFINED, DEFINED);
+ (yyval.token)->location = yylloc;
+ }
+#line 2775 "src/compiler/glsl/glcpp/glcpp-parse.c"
+ break;
+
+ case 82:
+#line 821 "src/compiler/glsl/glcpp/glcpp-parse.y"
+ {
+ (yyval.token) = _token_create_str (parser, OTHER, (yyvsp[0].str));
+ (yyval.token)->location = yylloc;
+ }
+#line 2784 "src/compiler/glsl/glcpp/glcpp-parse.c"
+ break;
+
+ case 83:
+#line 825 "src/compiler/glsl/glcpp/glcpp-parse.y"
+ {
+ (yyval.token) = _token_create_ival (parser, SPACE, SPACE);
+ (yyval.token)->location = yylloc;
+ }
+#line 2793 "src/compiler/glsl/glcpp/glcpp-parse.c"
+ break;
+
+ case 84:
+#line 832 "src/compiler/glsl/glcpp/glcpp-parse.y"
+ { (yyval.ival) = '['; }
+#line 2799 "src/compiler/glsl/glcpp/glcpp-parse.c"
+ break;
+
+ case 85:
+#line 833 "src/compiler/glsl/glcpp/glcpp-parse.y"
+ { (yyval.ival) = ']'; }
+#line 2805 "src/compiler/glsl/glcpp/glcpp-parse.c"
+ break;
+
+ case 86:
+#line 834 "src/compiler/glsl/glcpp/glcpp-parse.y"
+ { (yyval.ival) = '('; }
+#line 2811 "src/compiler/glsl/glcpp/glcpp-parse.c"
+ break;
+
+ case 87:
+#line 835 "src/compiler/glsl/glcpp/glcpp-parse.y"
+ { (yyval.ival) = ')'; }
+#line 2817 "src/compiler/glsl/glcpp/glcpp-parse.c"
+ break;
+
+ case 88:
+#line 836 "src/compiler/glsl/glcpp/glcpp-parse.y"
+ { (yyval.ival) = '{'; }
+#line 2823 "src/compiler/glsl/glcpp/glcpp-parse.c"
+ break;
+
+ case 89:
+#line 837 "src/compiler/glsl/glcpp/glcpp-parse.y"
+ { (yyval.ival) = '}'; }
+#line 2829 "src/compiler/glsl/glcpp/glcpp-parse.c"
+ break;
+
+ case 90:
+#line 838 "src/compiler/glsl/glcpp/glcpp-parse.y"
+ { (yyval.ival) = '.'; }
+#line 2835 "src/compiler/glsl/glcpp/glcpp-parse.c"
+ break;
+
+ case 91:
+#line 839 "src/compiler/glsl/glcpp/glcpp-parse.y"
+ { (yyval.ival) = '&'; }
+#line 2841 "src/compiler/glsl/glcpp/glcpp-parse.c"
+ break;
+
+ case 92:
+#line 840 "src/compiler/glsl/glcpp/glcpp-parse.y"
+ { (yyval.ival) = '*'; }
+#line 2847 "src/compiler/glsl/glcpp/glcpp-parse.c"
+ break;
+
+ case 93:
+#line 841 "src/compiler/glsl/glcpp/glcpp-parse.y"
+ { (yyval.ival) = '+'; }
+#line 2853 "src/compiler/glsl/glcpp/glcpp-parse.c"
+ break;
+
+ case 94:
+#line 842 "src/compiler/glsl/glcpp/glcpp-parse.y"
+ { (yyval.ival) = '-'; }
+#line 2859 "src/compiler/glsl/glcpp/glcpp-parse.c"
+ break;
+
+ case 95:
+#line 843 "src/compiler/glsl/glcpp/glcpp-parse.y"
+ { (yyval.ival) = '~'; }
+#line 2865 "src/compiler/glsl/glcpp/glcpp-parse.c"
+ break;
+
+ case 96:
+#line 844 "src/compiler/glsl/glcpp/glcpp-parse.y"
+ { (yyval.ival) = '!'; }
+#line 2871 "src/compiler/glsl/glcpp/glcpp-parse.c"
+ break;
+
+ case 97:
+#line 845 "src/compiler/glsl/glcpp/glcpp-parse.y"
+ { (yyval.ival) = '/'; }
+#line 2877 "src/compiler/glsl/glcpp/glcpp-parse.c"
+ break;
+
+ case 98:
+#line 846 "src/compiler/glsl/glcpp/glcpp-parse.y"
+ { (yyval.ival) = '%'; }
+#line 2883 "src/compiler/glsl/glcpp/glcpp-parse.c"
+ break;
+
+ case 99:
+#line 847 "src/compiler/glsl/glcpp/glcpp-parse.y"
+ { (yyval.ival) = LEFT_SHIFT; }
+#line 2889 "src/compiler/glsl/glcpp/glcpp-parse.c"
+ break;
+
+ case 100:
+#line 848 "src/compiler/glsl/glcpp/glcpp-parse.y"
+ { (yyval.ival) = RIGHT_SHIFT; }
+#line 2895 "src/compiler/glsl/glcpp/glcpp-parse.c"
+ break;
+
+ case 101:
+#line 849 "src/compiler/glsl/glcpp/glcpp-parse.y"
+ { (yyval.ival) = '<'; }
+#line 2901 "src/compiler/glsl/glcpp/glcpp-parse.c"
+ break;
+
+ case 102:
+#line 850 "src/compiler/glsl/glcpp/glcpp-parse.y"
+ { (yyval.ival) = '>'; }
+#line 2907 "src/compiler/glsl/glcpp/glcpp-parse.c"
+ break;
+
+ case 103:
+#line 851 "src/compiler/glsl/glcpp/glcpp-parse.y"
+ { (yyval.ival) = LESS_OR_EQUAL; }
+#line 2913 "src/compiler/glsl/glcpp/glcpp-parse.c"
+ break;
+
+ case 104:
+#line 852 "src/compiler/glsl/glcpp/glcpp-parse.y"
+ { (yyval.ival) = GREATER_OR_EQUAL; }
+#line 2919 "src/compiler/glsl/glcpp/glcpp-parse.c"
+ break;
+
+ case 105:
+#line 853 "src/compiler/glsl/glcpp/glcpp-parse.y"
+ { (yyval.ival) = EQUAL; }
+#line 2925 "src/compiler/glsl/glcpp/glcpp-parse.c"
+ break;
+
+ case 106:
+#line 854 "src/compiler/glsl/glcpp/glcpp-parse.y"
+ { (yyval.ival) = NOT_EQUAL; }
+#line 2931 "src/compiler/glsl/glcpp/glcpp-parse.c"
+ break;
+
+ case 107:
+#line 855 "src/compiler/glsl/glcpp/glcpp-parse.y"
+ { (yyval.ival) = '^'; }
+#line 2937 "src/compiler/glsl/glcpp/glcpp-parse.c"
+ break;
+
+ case 108:
+#line 856 "src/compiler/glsl/glcpp/glcpp-parse.y"
+ { (yyval.ival) = '|'; }
+#line 2943 "src/compiler/glsl/glcpp/glcpp-parse.c"
+ break;
+
+ case 109:
+#line 857 "src/compiler/glsl/glcpp/glcpp-parse.y"
+ { (yyval.ival) = AND; }
+#line 2949 "src/compiler/glsl/glcpp/glcpp-parse.c"
+ break;
+
+ case 110:
+#line 858 "src/compiler/glsl/glcpp/glcpp-parse.y"
+ { (yyval.ival) = OR; }
+#line 2955 "src/compiler/glsl/glcpp/glcpp-parse.c"
+ break;
+
+ case 111:
+#line 859 "src/compiler/glsl/glcpp/glcpp-parse.y"
+ { (yyval.ival) = ';'; }
+#line 2961 "src/compiler/glsl/glcpp/glcpp-parse.c"
+ break;
+
+ case 112:
+#line 860 "src/compiler/glsl/glcpp/glcpp-parse.y"
+ { (yyval.ival) = ','; }
+#line 2967 "src/compiler/glsl/glcpp/glcpp-parse.c"
+ break;
+
+ case 113:
+#line 861 "src/compiler/glsl/glcpp/glcpp-parse.y"
+ { (yyval.ival) = '='; }
+#line 2973 "src/compiler/glsl/glcpp/glcpp-parse.c"
+ break;
+
+ case 114:
+#line 862 "src/compiler/glsl/glcpp/glcpp-parse.y"
+ { (yyval.ival) = PASTE; }
+#line 2979 "src/compiler/glsl/glcpp/glcpp-parse.c"
+ break;
+
+ case 115:
+#line 863 "src/compiler/glsl/glcpp/glcpp-parse.y"
+ { (yyval.ival) = PLUS_PLUS; }
+#line 2985 "src/compiler/glsl/glcpp/glcpp-parse.c"
+ break;
+
+ case 116:
+#line 864 "src/compiler/glsl/glcpp/glcpp-parse.y"
+ { (yyval.ival) = MINUS_MINUS; }
+#line 2991 "src/compiler/glsl/glcpp/glcpp-parse.c"
+ break;
+
+
+#line 2995 "src/compiler/glsl/glcpp/glcpp-parse.c"
+
+ default: break;
+ }
+ /* User semantic actions sometimes alter yychar, and that requires
+ that yytoken be updated with the new translation. We take the
+ approach of translating immediately before every use of yytoken.
+ One alternative is translating here after every semantic action,
+ but that translation would be missed if the semantic action invokes
+ YYABORT, YYACCEPT, or YYERROR immediately after altering yychar or
+ if it invokes YYBACKUP. In the case of YYABORT or YYACCEPT, an
+ incorrect destructor might then be invoked immediately. In the
+ case of YYERROR or YYBACKUP, subsequent parser actions might lead
+ to an incorrect destructor call or verbose syntax error message
+ before the lookahead is translated. */
+ YY_SYMBOL_PRINT ("-> $$ =", yyr1[yyn], &yyval, &yyloc);
+
+ YYPOPSTACK (yylen);
+ yylen = 0;
+ YY_STACK_PRINT (yyss, yyssp);
+
+ *++yyvsp = yyval;
+ *++yylsp = yyloc;
+
+ /* Now 'shift' the result of the reduction. Determine what state
+ that goes to, based on the state we popped back to and the rule
+ number reduced by. */
+ {
+ const int yylhs = yyr1[yyn] - YYNTOKENS;
+ const int yyi = yypgoto[yylhs] + *yyssp;
+ yystate = (0 <= yyi && yyi <= YYLAST && yycheck[yyi] == *yyssp
+ ? yytable[yyi]
+ : yydefgoto[yylhs]);
+ }
+
+ goto yynewstate;
+
+
+/*--------------------------------------.
+| yyerrlab -- here on detecting error. |
+`--------------------------------------*/
+yyerrlab:
+ /* Make sure we have latest lookahead translation. See comments at
+ user semantic actions for why this is necessary. */
+ yytoken = yychar == YYEMPTY ? YYEMPTY : YYTRANSLATE (yychar);
+
+ /* If not already recovering from an error, report this error. */
+ if (!yyerrstatus)
+ {
+ ++yynerrs;
+#if ! YYERROR_VERBOSE
+ yyerror (&yylloc, parser, YY_("syntax error"));
+#else
+# define YYSYNTAX_ERROR yysyntax_error (&yymsg_alloc, &yymsg, \
+ yyssp, yytoken)
+ {
+ char const *yymsgp = YY_("syntax error");
+ int yysyntax_error_status;
+ yysyntax_error_status = YYSYNTAX_ERROR;
+ if (yysyntax_error_status == 0)
+ yymsgp = yymsg;
+ else if (yysyntax_error_status == 1)
+ {
+ if (yymsg != yymsgbuf)
+ YYSTACK_FREE (yymsg);
+ yymsg = YY_CAST (char *, YYSTACK_ALLOC (YY_CAST (YYSIZE_T, yymsg_alloc)));
+ if (!yymsg)
+ {
+ yymsg = yymsgbuf;
+ yymsg_alloc = sizeof yymsgbuf;
+ yysyntax_error_status = 2;
+ }
+ else
+ {
+ yysyntax_error_status = YYSYNTAX_ERROR;
+ yymsgp = yymsg;
+ }
+ }
+ yyerror (&yylloc, parser, yymsgp);
+ if (yysyntax_error_status == 2)
+ goto yyexhaustedlab;
+ }
+# undef YYSYNTAX_ERROR
+#endif
+ }
+
+ yyerror_range[1] = yylloc;
+
+ if (yyerrstatus == 3)
+ {
+ /* If just tried and failed to reuse lookahead token after an
+ error, discard it. */
+
+ if (yychar <= YYEOF)
+ {
+ /* Return failure if at end of input. */
+ if (yychar == YYEOF)
+ YYABORT;
+ }
+ else
+ {
+ yydestruct ("Error: discarding",
+ yytoken, &yylval, &yylloc, parser);
+ yychar = YYEMPTY;
+ }
+ }
+
+ /* Else will try to reuse lookahead token after shifting the error
+ token. */
+ goto yyerrlab1;
+
+
+/*---------------------------------------------------.
+| yyerrorlab -- error raised explicitly by YYERROR. |
+`---------------------------------------------------*/
+yyerrorlab:
+ /* Pacify compilers when the user code never invokes YYERROR and the
+ label yyerrorlab therefore never appears in user code. */
+ if (0)
+ YYERROR;
+
+ /* Do not reclaim the symbols of the rule whose action triggered
+ this YYERROR. */
+ YYPOPSTACK (yylen);
+ yylen = 0;
+ YY_STACK_PRINT (yyss, yyssp);
+ yystate = *yyssp;
+ goto yyerrlab1;
+
+
+/*-------------------------------------------------------------.
+| yyerrlab1 -- common code for both syntax error and YYERROR. |
+`-------------------------------------------------------------*/
+yyerrlab1:
+ yyerrstatus = 3; /* Each real token shifted decrements this. */
+
+ for (;;)
+ {
+ yyn = yypact[yystate];
+ if (!yypact_value_is_default (yyn))
+ {
+ yyn += YYTERROR;
+ if (0 <= yyn && yyn <= YYLAST && yycheck[yyn] == YYTERROR)
+ {
+ yyn = yytable[yyn];
+ if (0 < yyn)
+ break;
+ }
+ }
+
+ /* Pop the current state because it cannot handle the error token. */
+ if (yyssp == yyss)
+ YYABORT;
+
+ yyerror_range[1] = *yylsp;
+ yydestruct ("Error: popping",
+ yystos[yystate], yyvsp, yylsp, parser);
+ YYPOPSTACK (1);
+ yystate = *yyssp;
+ YY_STACK_PRINT (yyss, yyssp);
+ }
+
+ YY_IGNORE_MAYBE_UNINITIALIZED_BEGIN
+ *++yyvsp = yylval;
+ YY_IGNORE_MAYBE_UNINITIALIZED_END
+
+ yyerror_range[2] = yylloc;
+ /* Using YYLLOC is tempting, but would change the location of
+ the lookahead. YYLOC is available though. */
+ YYLLOC_DEFAULT (yyloc, yyerror_range, 2);
+ *++yylsp = yyloc;
+
+ /* Shift the error token. */
+ YY_SYMBOL_PRINT ("Shifting", yystos[yyn], yyvsp, yylsp);
+
+ yystate = yyn;
+ goto yynewstate;
+
+
+/*-------------------------------------.
+| yyacceptlab -- YYACCEPT comes here. |
+`-------------------------------------*/
+yyacceptlab:
+ yyresult = 0;
+ goto yyreturn;
+
+
+/*-----------------------------------.
+| yyabortlab -- YYABORT comes here. |
+`-----------------------------------*/
+yyabortlab:
+ yyresult = 1;
+ goto yyreturn;
+
+
+#if !defined yyoverflow || YYERROR_VERBOSE
+/*-------------------------------------------------.
+| yyexhaustedlab -- memory exhaustion comes here. |
+`-------------------------------------------------*/
+yyexhaustedlab:
+ yyerror (&yylloc, parser, YY_("memory exhausted"));
+ yyresult = 2;
+ /* Fall through. */
+#endif
+
+
+/*-----------------------------------------------------.
+| yyreturn -- parsing is finished, return the result. |
+`-----------------------------------------------------*/
+yyreturn:
+ if (yychar != YYEMPTY)
+ {
+ /* Make sure we have latest lookahead translation. See comments at
+ user semantic actions for why this is necessary. */
+ yytoken = YYTRANSLATE (yychar);
+ yydestruct ("Cleanup: discarding lookahead",
+ yytoken, &yylval, &yylloc, parser);
+ }
+ /* Do not reclaim the symbols of the rule whose action triggered
+ this YYABORT or YYACCEPT. */
+ YYPOPSTACK (yylen);
+ YY_STACK_PRINT (yyss, yyssp);
+ while (yyssp != yyss)
+ {
+ yydestruct ("Cleanup: popping",
+ yystos[*yyssp], yyvsp, yylsp, parser);
+ YYPOPSTACK (1);
+ }
+#ifndef yyoverflow
+ if (yyss != yyssa)
+ YYSTACK_FREE (yyss);
+#endif
+#if YYERROR_VERBOSE
+ if (yymsg != yymsgbuf)
+ YYSTACK_FREE (yymsg);
+#endif
+ return yyresult;
+}
+#line 867 "src/compiler/glsl/glcpp/glcpp-parse.y"
+
+
+string_list_t *
+_string_list_create(glcpp_parser_t *parser)
+{
+ string_list_t *list;
+
+ list = linear_alloc_child(parser->linalloc, sizeof(string_list_t));
+ list->head = NULL;
+ list->tail = NULL;
+
+ return list;
+}
+
+void
+_string_list_append_item(glcpp_parser_t *parser, string_list_t *list,
+ const char *str)
+{
+ string_node_t *node;
+
+ node = linear_alloc_child(parser->linalloc, sizeof(string_node_t));
+ node->str = linear_strdup(parser->linalloc, str);
+
+ node->next = NULL;
+
+ if (list->head == NULL) {
+ list->head = node;
+ } else {
+ list->tail->next = node;
+ }
+
+ list->tail = node;
+}
+
+int
+_string_list_contains(string_list_t *list, const char *member, int *index)
+{
+ string_node_t *node;
+ int i;
+
+ if (list == NULL)
+ return 0;
+
+ for (i = 0, node = list->head; node; i++, node = node->next) {
+ if (strcmp (node->str, member) == 0) {
+ if (index)
+ *index = i;
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+/* Return duplicate string in list (if any), NULL otherwise. */
+const char *
+_string_list_has_duplicate(string_list_t *list)
+{
+ string_node_t *node, *dup;
+
+ if (list == NULL)
+ return NULL;
+
+ for (node = list->head; node; node = node->next) {
+ for (dup = node->next; dup; dup = dup->next) {
+ if (strcmp (node->str, dup->str) == 0)
+ return node->str;
+ }
+ }
+
+ return NULL;
+}
+
+int
+_string_list_length(string_list_t *list)
+{
+ int length = 0;
+ string_node_t *node;
+
+ if (list == NULL)
+ return 0;
+
+ for (node = list->head; node; node = node->next)
+ length++;
+
+ return length;
+}
+
+int
+_string_list_equal(string_list_t *a, string_list_t *b)
+{
+ string_node_t *node_a, *node_b;
+
+ if (a == NULL && b == NULL)
+ return 1;
+
+ if (a == NULL || b == NULL)
+ return 0;
+
+ for (node_a = a->head, node_b = b->head;
+ node_a && node_b;
+ node_a = node_a->next, node_b = node_b->next)
+ {
+ if (strcmp (node_a->str, node_b->str))
+ return 0;
+ }
+
+ /* Catch the case of lists being different lengths, (which
+ * would cause the loop above to terminate after the shorter
+ * list). */
+ return node_a == node_b;
+}
+
+argument_list_t *
+_argument_list_create(glcpp_parser_t *parser)
+{
+ argument_list_t *list;
+
+ list = linear_alloc_child(parser->linalloc, sizeof(argument_list_t));
+ list->head = NULL;
+ list->tail = NULL;
+
+ return list;
+}
+
+void
+_argument_list_append(glcpp_parser_t *parser,
+ argument_list_t *list, token_list_t *argument)
+{
+ argument_node_t *node;
+
+ node = linear_alloc_child(parser->linalloc, sizeof(argument_node_t));
+ node->argument = argument;
+
+ node->next = NULL;
+
+ if (list->head == NULL) {
+ list->head = node;
+ } else {
+ list->tail->next = node;
+ }
+
+ list->tail = node;
+}
+
+int
+_argument_list_length(argument_list_t *list)
+{
+ int length = 0;
+ argument_node_t *node;
+
+ if (list == NULL)
+ return 0;
+
+ for (node = list->head; node; node = node->next)
+ length++;
+
+ return length;
+}
+
+token_list_t *
+_argument_list_member_at(argument_list_t *list, int index)
+{
+ argument_node_t *node;
+ int i;
+
+ if (list == NULL)
+ return NULL;
+
+ node = list->head;
+ for (i = 0; i < index; i++) {
+ node = node->next;
+ if (node == NULL)
+ break;
+ }
+
+ if (node)
+ return node->argument;
+
+ return NULL;
+}
+
+token_t *
+_token_create_str(glcpp_parser_t *parser, int type, char *str)
+{
+ token_t *token;
+
+ token = linear_alloc_child(parser->linalloc, sizeof(token_t));
+ token->type = type;
+ token->value.str = str;
+
+ return token;
+}
+
+token_t *
+_token_create_ival(glcpp_parser_t *parser, int type, int ival)
+{
+ token_t *token;
+
+ token = linear_alloc_child(parser->linalloc, sizeof(token_t));
+ token->type = type;
+ token->value.ival = ival;
+
+ return token;
+}
+
+token_list_t *
+_token_list_create(glcpp_parser_t *parser)
+{
+ token_list_t *list;
+
+ list = linear_alloc_child(parser->linalloc, sizeof(token_list_t));
+ list->head = NULL;
+ list->tail = NULL;
+ list->non_space_tail = NULL;
+
+ return list;
+}
+
+void
+_token_list_append(glcpp_parser_t *parser, token_list_t *list, token_t *token)
+{
+ token_node_t *node;
+
+ node = linear_alloc_child(parser->linalloc, sizeof(token_node_t));
+ node->token = token;
+ node->next = NULL;
+
+ if (list->head == NULL) {
+ list->head = node;
+ } else {
+ list->tail->next = node;
+ }
+
+ list->tail = node;
+ if (token->type != SPACE)
+ list->non_space_tail = node;
+}
+
+void
+_token_list_append_list(token_list_t *list, token_list_t *tail)
+{
+ if (tail == NULL || tail->head == NULL)
+ return;
+
+ if (list->head == NULL) {
+ list->head = tail->head;
+ } else {
+ list->tail->next = tail->head;
+ }
+
+ list->tail = tail->tail;
+ list->non_space_tail = tail->non_space_tail;
+}
+
+static token_list_t *
+_token_list_copy(glcpp_parser_t *parser, token_list_t *other)
+{
+ token_list_t *copy;
+ token_node_t *node;
+
+ if (other == NULL)
+ return NULL;
+
+ copy = _token_list_create (parser);
+ for (node = other->head; node; node = node->next) {
+ token_t *new_token = linear_alloc_child(parser->linalloc, sizeof(token_t));
+ *new_token = *node->token;
+ _token_list_append (parser, copy, new_token);
+ }
+
+ return copy;
+}
+
+static void
+_token_list_trim_trailing_space(token_list_t *list)
+{
+ if (list->non_space_tail) {
+ list->non_space_tail->next = NULL;
+ list->tail = list->non_space_tail;
+ }
+}
+
+static int
+_token_list_is_empty_ignoring_space(token_list_t *l)
+{
+ token_node_t *n;
+
+ if (l == NULL)
+ return 1;
+
+ n = l->head;
+ while (n != NULL && n->token->type == SPACE)
+ n = n->next;
+
+ return n == NULL;
+}
+
+int
+_token_list_equal_ignoring_space(token_list_t *a, token_list_t *b)
+{
+ token_node_t *node_a, *node_b;
+
+ if (a == NULL || b == NULL) {
+ int a_empty = _token_list_is_empty_ignoring_space(a);
+ int b_empty = _token_list_is_empty_ignoring_space(b);
+ return a_empty == b_empty;
+ }
+
+ node_a = a->head;
+ node_b = b->head;
+
+ while (1)
+ {
+ if (node_a == NULL && node_b == NULL)
+ break;
+
+ /* Ignore trailing whitespace */
+ if (node_a == NULL && node_b->token->type == SPACE) {
+ while (node_b && node_b->token->type == SPACE)
+ node_b = node_b->next;
+ }
+
+ if (node_a == NULL && node_b == NULL)
+ break;
+
+ if (node_b == NULL && node_a->token->type == SPACE) {
+ while (node_a && node_a->token->type == SPACE)
+ node_a = node_a->next;
+ }
+
+ if (node_a == NULL && node_b == NULL)
+ break;
+
+ if (node_a == NULL || node_b == NULL)
+ return 0;
+ /* Make sure whitespace appears in the same places in both.
+ * It need not be exactly the same amount of whitespace,
+ * though.
+ */
+ if (node_a->token->type == SPACE && node_b->token->type == SPACE) {
+ while (node_a && node_a->token->type == SPACE)
+ node_a = node_a->next;
+ while (node_b && node_b->token->type == SPACE)
+ node_b = node_b->next;
+ continue;
+ }
+
+ if (node_a->token->type != node_b->token->type)
+ return 0;
+
+ switch (node_a->token->type) {
+ case INTEGER:
+ if (node_a->token->value.ival != node_b->token->value.ival) {
+ return 0;
+ }
+ break;
+ case IDENTIFIER:
+ case INTEGER_STRING:
+ case OTHER:
+ if (strcmp(node_a->token->value.str, node_b->token->value.str)) {
+ return 0;
+ }
+ break;
+ }
+
+ node_a = node_a->next;
+ node_b = node_b->next;
+ }
+
+ return 1;
+}
+
+static void
+_token_print(struct _mesa_string_buffer *out, token_t *token)
+{
+ if (token->type < 256) {
+ _mesa_string_buffer_append_char(out, token->type);
+ return;
+ }
+
+ switch (token->type) {
+ case INTEGER:
+ _mesa_string_buffer_printf(out, "%" PRIiMAX, token->value.ival);
+ break;
+ case IDENTIFIER:
+ case INTEGER_STRING:
+ case PATH:
+ case OTHER:
+ _mesa_string_buffer_append(out, token->value.str);
+ break;
+ case SPACE:
+ _mesa_string_buffer_append_char(out, ' ');
+ break;
+ case LEFT_SHIFT:
+ _mesa_string_buffer_append(out, "<<");
+ break;
+ case RIGHT_SHIFT:
+ _mesa_string_buffer_append(out, ">>");
+ break;
+ case LESS_OR_EQUAL:
+ _mesa_string_buffer_append(out, "<=");
+ break;
+ case GREATER_OR_EQUAL:
+ _mesa_string_buffer_append(out, ">=");
+ break;
+ case EQUAL:
+ _mesa_string_buffer_append(out, "==");
+ break;
+ case NOT_EQUAL:
+ _mesa_string_buffer_append(out, "!=");
+ break;
+ case AND:
+ _mesa_string_buffer_append(out, "&&");
+ break;
+ case OR:
+ _mesa_string_buffer_append(out, "||");
+ break;
+ case PASTE:
+ _mesa_string_buffer_append(out, "##");
+ break;
+ case PLUS_PLUS:
+ _mesa_string_buffer_append(out, "++");
+ break;
+ case MINUS_MINUS:
+ _mesa_string_buffer_append(out, "--");
+ break;
+ case DEFINED:
+ _mesa_string_buffer_append(out, "defined");
+ break;
+ case PLACEHOLDER:
+ /* Nothing to print. */
+ break;
+ default:
+ assert(!"Error: Don't know how to print token.");
+
+ break;
+ }
+}
+
+/* Return a new token formed by pasting 'token' and 'other'. Note that this
+ * function may return 'token' or 'other' directly rather than allocating
+ * anything new.
+ *
+ * Caution: Only very cursory error-checking is performed to see if
+ * the final result is a valid single token. */
+static token_t *
+_token_paste(glcpp_parser_t *parser, token_t *token, token_t *other)
+{
+ token_t *combined = NULL;
+
+ /* Pasting a placeholder onto anything makes no change. */
+ if (other->type == PLACEHOLDER)
+ return token;
+
+ /* When 'token' is a placeholder, just return 'other'. */
+ if (token->type == PLACEHOLDER)
+ return other;
+
+ /* A very few single-character punctuators can be combined
+ * with another to form a multi-character punctuator. */
+ switch (token->type) {
+ case '<':
+ if (other->type == '<')
+ combined = _token_create_ival (parser, LEFT_SHIFT, LEFT_SHIFT);
+ else if (other->type == '=')
+ combined = _token_create_ival (parser, LESS_OR_EQUAL, LESS_OR_EQUAL);
+ break;
+ case '>':
+ if (other->type == '>')
+ combined = _token_create_ival (parser, RIGHT_SHIFT, RIGHT_SHIFT);
+ else if (other->type == '=')
+ combined = _token_create_ival (parser, GREATER_OR_EQUAL, GREATER_OR_EQUAL);
+ break;
+ case '=':
+ if (other->type == '=')
+ combined = _token_create_ival (parser, EQUAL, EQUAL);
+ break;
+ case '!':
+ if (other->type == '=')
+ combined = _token_create_ival (parser, NOT_EQUAL, NOT_EQUAL);
+ break;
+ case '&':
+ if (other->type == '&')
+ combined = _token_create_ival (parser, AND, AND);
+ break;
+ case '|':
+ if (other->type == '|')
+ combined = _token_create_ival (parser, OR, OR);
+ break;
+ }
+
+ if (combined != NULL) {
+ /* Inherit the location from the first token */
+ combined->location = token->location;
+ return combined;
+ }
+
+ /* Two string-valued (or integer) tokens can usually just be
+ * mashed together. (We also handle a string followed by an
+ * integer here as well.)
+ *
+ * There are some exceptions here. Notably, if the first token
+ * is an integer (or a string representing an integer), then
+ * the second token must also be an integer or must be a
+ * string representing an integer that begins with a digit.
+ */
+ if ((token->type == IDENTIFIER || token->type == OTHER || token->type == INTEGER_STRING || token->type == INTEGER) &&
+ (other->type == IDENTIFIER || other->type == OTHER || other->type == INTEGER_STRING || other->type == INTEGER))
+ {
+ char *str;
+ int combined_type;
+
+ /* Check that pasting onto an integer doesn't create a
+ * non-integer, (that is, only digits can be
+ * pasted. */
+ if (token->type == INTEGER_STRING || token->type == INTEGER) {
+ switch (other->type) {
+ case INTEGER_STRING:
+ if (other->value.str[0] < '0' || other->value.str[0] > '9')
+ goto FAIL;
+ break;
+ case INTEGER:
+ if (other->value.ival < 0)
+ goto FAIL;
+ break;
+ default:
+ goto FAIL;
+ }
+ }
+
+ if (token->type == INTEGER)
+ str = linear_asprintf(parser->linalloc, "%" PRIiMAX, token->value.ival);
+ else
+ str = linear_strdup(parser->linalloc, token->value.str);
+
+ if (other->type == INTEGER)
+ linear_asprintf_append(parser->linalloc, &str, "%" PRIiMAX, other->value.ival);
+ else
+ linear_strcat(parser->linalloc, &str, other->value.str);
+
+ /* New token is same type as original token, unless we
+ * started with an integer, in which case we will be
+ * creating an integer-string. */
+ combined_type = token->type;
+ if (combined_type == INTEGER)
+ combined_type = INTEGER_STRING;
+
+ combined = _token_create_str (parser, combined_type, str);
+ combined->location = token->location;
+ return combined;
+ }
+
+ FAIL:
+ glcpp_error (&token->location, parser, "");
+ _mesa_string_buffer_append(parser->info_log, "Pasting \"");
+ _token_print(parser->info_log, token);
+ _mesa_string_buffer_append(parser->info_log, "\" and \"");
+ _token_print(parser->info_log, other);
+ _mesa_string_buffer_append(parser->info_log, "\" does not give a valid preprocessing token.\n");
+
+ return token;
+}
+
+static void
+_token_list_print(glcpp_parser_t *parser, token_list_t *list)
+{
+ token_node_t *node;
+
+ if (list == NULL)
+ return;
+
+ for (node = list->head; node; node = node->next)
+ _token_print(parser->output, node->token);
+}
+
+void
+yyerror(YYLTYPE *locp, glcpp_parser_t *parser, const char *error)
+{
+ glcpp_error(locp, parser, "%s", error);
+}
+
+static void
+add_builtin_define(glcpp_parser_t *parser, const char *name, int value)
+{
+ token_t *tok;
+ token_list_t *list;
+
+ tok = _token_create_ival (parser, INTEGER, value);
+
+ list = _token_list_create(parser);
+ _token_list_append(parser, list, tok);
+ _define_object_macro(parser, NULL, name, list);
+}
+
+/* Initial output buffer size, 4096 minus ralloc() overhead. It was selected
+ * to minimize total amount of allocated memory during shader-db run.
+ */
+#define INITIAL_PP_OUTPUT_BUF_SIZE 4048
+
+glcpp_parser_t *
+glcpp_parser_create(struct gl_context *gl_ctx,
+ glcpp_extension_iterator extensions, void *state)
+{
+ glcpp_parser_t *parser;
+
+ parser = ralloc (NULL, glcpp_parser_t);
+
+ glcpp_lex_init_extra (parser, &parser->scanner);
+ parser->defines = _mesa_hash_table_create(NULL, _mesa_hash_string,
+ _mesa_key_string_equal);
+ parser->linalloc = linear_alloc_parent(parser, 0);
+ parser->active = NULL;
+ parser->lexing_directive = 0;
+ parser->lexing_version_directive = 0;
+ parser->space_tokens = 1;
+ parser->last_token_was_newline = 0;
+ parser->last_token_was_space = 0;
+ parser->first_non_space_token_this_line = 1;
+ parser->newline_as_space = 0;
+ parser->in_control_line = 0;
+ parser->paren_count = 0;
+ parser->commented_newlines = 0;
+
+ parser->skip_stack = NULL;
+ parser->skipping = 0;
+
+ parser->lex_from_list = NULL;
+ parser->lex_from_node = NULL;
+
+ parser->output = _mesa_string_buffer_create(parser,
+ INITIAL_PP_OUTPUT_BUF_SIZE);
+ parser->info_log = _mesa_string_buffer_create(parser,
+ INITIAL_PP_OUTPUT_BUF_SIZE);
+ parser->error = 0;
+
+ parser->gl_ctx = gl_ctx;
+ parser->extensions = extensions;
+ parser->extension_list = &gl_ctx->Extensions;
+ parser->state = state;
+ parser->api = gl_ctx->API;
+ parser->version = 0;
+ parser->version_set = false;
+
+ parser->has_new_line_number = 0;
+ parser->new_line_number = 1;
+ parser->has_new_source_number = 0;
+ parser->new_source_number = 0;
+
+ parser->is_gles = false;
+
+ return parser;
+}
+
+void
+glcpp_parser_destroy(glcpp_parser_t *parser)
+{
+ glcpp_lex_destroy (parser->scanner);
+ _mesa_hash_table_destroy(parser->defines, NULL);
+ ralloc_free (parser);
+}
+
+typedef enum function_status
+{
+ FUNCTION_STATUS_SUCCESS,
+ FUNCTION_NOT_A_FUNCTION,
+ FUNCTION_UNBALANCED_PARENTHESES
+} function_status_t;
+
+/* Find a set of function-like macro arguments by looking for a
+ * balanced set of parentheses.
+ *
+ * When called, 'node' should be the opening-parenthesis token, (or
+ * perhaps preceeding SPACE tokens). Upon successful return *last will
+ * be the last consumed node, (corresponding to the closing right
+ * parenthesis).
+ *
+ * Return values:
+ *
+ * FUNCTION_STATUS_SUCCESS:
+ *
+ * Successfully parsed a set of function arguments.
+ *
+ * FUNCTION_NOT_A_FUNCTION:
+ *
+ * Macro name not followed by a '('. This is not an error, but
+ * simply that the macro name should be treated as a non-macro.
+ *
+ * FUNCTION_UNBALANCED_PARENTHESES
+ *
+ * Macro name is not followed by a balanced set of parentheses.
+ */
+static function_status_t
+_arguments_parse(glcpp_parser_t *parser,
+ argument_list_t *arguments, token_node_t *node,
+ token_node_t **last)
+{
+ token_list_t *argument;
+ int paren_count;
+
+ node = node->next;
+
+ /* Ignore whitespace before first parenthesis. */
+ while (node && node->token->type == SPACE)
+ node = node->next;
+
+ if (node == NULL || node->token->type != '(')
+ return FUNCTION_NOT_A_FUNCTION;
+
+ node = node->next;
+
+ argument = _token_list_create (parser);
+ _argument_list_append (parser, arguments, argument);
+
+ for (paren_count = 1; node; node = node->next) {
+ if (node->token->type == '(') {
+ paren_count++;
+ } else if (node->token->type == ')') {
+ paren_count--;
+ if (paren_count == 0)
+ break;
+ }
+
+ if (node->token->type == ',' && paren_count == 1) {
+ _token_list_trim_trailing_space (argument);
+ argument = _token_list_create (parser);
+ _argument_list_append (parser, arguments, argument);
+ } else {
+ if (argument->head == NULL) {
+ /* Don't treat initial whitespace as part of the argument. */
+ if (node->token->type == SPACE)
+ continue;
+ }
+ _token_list_append(parser, argument, node->token);
+ }
+ }
+
+ if (paren_count)
+ return FUNCTION_UNBALANCED_PARENTHESES;
+
+ *last = node;
+
+ return FUNCTION_STATUS_SUCCESS;
+}
+
+static token_list_t *
+_token_list_create_with_one_ival(glcpp_parser_t *parser, int type, int ival)
+{
+ token_list_t *list;
+ token_t *node;
+
+ list = _token_list_create(parser);
+ node = _token_create_ival(parser, type, ival);
+ _token_list_append(parser, list, node);
+
+ return list;
+}
+
+static token_list_t *
+_token_list_create_with_one_space(glcpp_parser_t *parser)
+{
+ return _token_list_create_with_one_ival(parser, SPACE, SPACE);
+}
+
+static token_list_t *
+_token_list_create_with_one_integer(glcpp_parser_t *parser, int ival)
+{
+ return _token_list_create_with_one_ival(parser, INTEGER, ival);
+}
+
+/* Evaluate a DEFINED token node (based on subsequent tokens in the list).
+ *
+ * Note: This function must only be called when "node" is a DEFINED token,
+ * (and will abort with an assertion failure otherwise).
+ *
+ * If "node" is followed, (ignoring any SPACE tokens), by an IDENTIFIER token
+ * (optionally preceded and followed by '(' and ')' tokens) then the following
+ * occurs:
+ *
+ * If the identifier is a defined macro, this function returns 1.
+ *
+ * If the identifier is not a defined macro, this function returns 0.
+ *
+ * In either case, *last will be updated to the last node in the list
+ * consumed by the evaluation, (either the token of the identifier or the
+ * token of the closing parenthesis).
+ *
+ * In all other cases, (such as "node is the final node of the list", or
+ * "missing closing parenthesis", etc.), this function generates a
+ * preprocessor error, returns -1 and *last will not be set.
+ */
+static int
+_glcpp_parser_evaluate_defined(glcpp_parser_t *parser, token_node_t *node,
+ token_node_t **last)
+{
+ token_node_t *argument, *defined = node;
+
+ assert(node->token->type == DEFINED);
+
+ node = node->next;
+
+ /* Ignore whitespace after DEFINED token. */
+ while (node && node->token->type == SPACE)
+ node = node->next;
+
+ if (node == NULL)
+ goto FAIL;
+
+ if (node->token->type == IDENTIFIER || node->token->type == OTHER) {
+ argument = node;
+ } else if (node->token->type == '(') {
+ node = node->next;
+
+ /* Ignore whitespace after '(' token. */
+ while (node && node->token->type == SPACE)
+ node = node->next;
+
+ if (node == NULL || (node->token->type != IDENTIFIER &&
+ node->token->type != OTHER)) {
+ goto FAIL;
+ }
+
+ argument = node;
+
+ node = node->next;
+
+ /* Ignore whitespace after identifier, before ')' token. */
+ while (node && node->token->type == SPACE)
+ node = node->next;
+
+ if (node == NULL || node->token->type != ')')
+ goto FAIL;
+ } else {
+ goto FAIL;
+ }
+
+ *last = node;
+
+ return _mesa_hash_table_search(parser->defines,
+ argument->token->value.str) ? 1 : 0;
+
+FAIL:
+ glcpp_error (&defined->token->location, parser,
+ "\"defined\" not followed by an identifier");
+ return -1;
+}
+
+/* Evaluate all DEFINED nodes in a given list, modifying the list in place.
+ */
+static void
+_glcpp_parser_evaluate_defined_in_list(glcpp_parser_t *parser,
+ token_list_t *list)
+{
+ token_node_t *node, *node_prev, *replacement, *last = NULL;
+ int value;
+
+ if (list == NULL)
+ return;
+
+ node_prev = NULL;
+ node = list->head;
+
+ while (node) {
+
+ if (node->token->type != DEFINED)
+ goto NEXT;
+
+ value = _glcpp_parser_evaluate_defined (parser, node, &last);
+ if (value == -1)
+ goto NEXT;
+
+ replacement = linear_alloc_child(parser->linalloc, sizeof(token_node_t));
+ replacement->token = _token_create_ival (parser, INTEGER, value);
+
+ /* Splice replacement node into list, replacing from "node"
+ * through "last". */
+ if (node_prev)
+ node_prev->next = replacement;
+ else
+ list->head = replacement;
+ replacement->next = last->next;
+ if (last == list->tail)
+ list->tail = replacement;
+
+ node = replacement;
+
+ NEXT:
+ node_prev = node;
+ node = node->next;
+ }
+}
+
+/* Perform macro expansion on 'list', placing the resulting tokens
+ * into a new list which is initialized with a first token of type
+ * 'head_token_type'. Then begin lexing from the resulting list,
+ * (return to the current lexing source when this list is exhausted).
+ *
+ * See the documentation of _glcpp_parser_expand_token_list for a description
+ * of the "mode" parameter.
+ */
+static void
+_glcpp_parser_expand_and_lex_from(glcpp_parser_t *parser, int head_token_type,
+ token_list_t *list, expansion_mode_t mode)
+{
+ token_list_t *expanded;
+ token_t *token;
+
+ expanded = _token_list_create (parser);
+ token = _token_create_ival (parser, head_token_type, head_token_type);
+ _token_list_append (parser, expanded, token);
+ _glcpp_parser_expand_token_list (parser, list, mode);
+ _token_list_append_list (expanded, list);
+ glcpp_parser_lex_from (parser, expanded);
+}
+
+static void
+_glcpp_parser_apply_pastes(glcpp_parser_t *parser, token_list_t *list)
+{
+ token_node_t *node;
+
+ node = list->head;
+ while (node) {
+ token_node_t *next_non_space;
+
+ /* Look ahead for a PASTE token, skipping space. */
+ next_non_space = node->next;
+ while (next_non_space && next_non_space->token->type == SPACE)
+ next_non_space = next_non_space->next;
+
+ if (next_non_space == NULL)
+ break;
+
+ if (next_non_space->token->type != PASTE) {
+ node = next_non_space;
+ continue;
+ }
+
+ /* Now find the next non-space token after the PASTE. */
+ next_non_space = next_non_space->next;
+ while (next_non_space && next_non_space->token->type == SPACE)
+ next_non_space = next_non_space->next;
+
+ if (next_non_space == NULL) {
+ yyerror(&node->token->location, parser, "'##' cannot appear at either end of a macro expansion\n");
+ return;
+ }
+
+ node->token = _token_paste(parser, node->token, next_non_space->token);
+ node->next = next_non_space->next;
+ if (next_non_space == list->tail)
+ list->tail = node;
+ }
+
+ list->non_space_tail = list->tail;
+}
+
+/* This is a helper function that's essentially part of the
+ * implementation of _glcpp_parser_expand_node. It shouldn't be called
+ * except for by that function.
+ *
+ * Returns NULL if node is a simple token with no expansion, (that is,
+ * although 'node' corresponds to an identifier defined as a
+ * function-like macro, it is not followed with a parenthesized
+ * argument list).
+ *
+ * Compute the complete expansion of node (which is a function-like
+ * macro) and subsequent nodes which are arguments.
+ *
+ * Returns the token list that results from the expansion and sets
+ * *last to the last node in the list that was consumed by the
+ * expansion. Specifically, *last will be set as follows: as the
+ * token of the closing right parenthesis.
+ *
+ * See the documentation of _glcpp_parser_expand_token_list for a description
+ * of the "mode" parameter.
+ */
+static token_list_t *
+_glcpp_parser_expand_function(glcpp_parser_t *parser, token_node_t *node,
+ token_node_t **last, expansion_mode_t mode)
+{
+ struct hash_entry *entry;
+ macro_t *macro;
+ const char *identifier;
+ argument_list_t *arguments;
+ function_status_t status;
+ token_list_t *substituted;
+ int parameter_index;
+
+ identifier = node->token->value.str;
+
+ entry = _mesa_hash_table_search(parser->defines, identifier);
+ macro = entry ? entry->data : NULL;
+
+ assert(macro->is_function);
+
+ arguments = _argument_list_create(parser);
+ status = _arguments_parse(parser, arguments, node, last);
+
+ switch (status) {
+ case FUNCTION_STATUS_SUCCESS:
+ break;
+ case FUNCTION_NOT_A_FUNCTION:
+ return NULL;
+ case FUNCTION_UNBALANCED_PARENTHESES:
+ glcpp_error(&node->token->location, parser, "Macro %s call has unbalanced parentheses\n", identifier);
+ return NULL;
+ }
+
+ /* Replace a macro defined as empty with a SPACE token. */
+ if (macro->replacements == NULL) {
+ return _token_list_create_with_one_space(parser);
+ }
+
+ if (!((_argument_list_length (arguments) ==
+ _string_list_length (macro->parameters)) ||
+ (_string_list_length (macro->parameters) == 0 &&
+ _argument_list_length (arguments) == 1 &&
+ arguments->head->argument->head == NULL))) {
+ glcpp_error(&node->token->location, parser,
+ "Error: macro %s invoked with %d arguments (expected %d)\n",
+ identifier, _argument_list_length (arguments),
+ _string_list_length(macro->parameters));
+ return NULL;
+ }
+
+ /* Perform argument substitution on the replacement list. */
+ substituted = _token_list_create(parser);
+
+ for (node = macro->replacements->head; node; node = node->next) {
+ if (node->token->type == IDENTIFIER &&
+ _string_list_contains(macro->parameters, node->token->value.str,
+ &parameter_index)) {
+ token_list_t *argument;
+ argument = _argument_list_member_at(arguments, parameter_index);
+ /* Before substituting, we expand the argument tokens, or append a
+ * placeholder token for an empty argument. */
+ if (argument->head) {
+ token_list_t *expanded_argument;
+ expanded_argument = _token_list_copy(parser, argument);
+ _glcpp_parser_expand_token_list(parser, expanded_argument, mode);
+ _token_list_append_list(substituted, expanded_argument);
+ } else {
+ token_t *new_token;
+
+ new_token = _token_create_ival(parser, PLACEHOLDER,
+ PLACEHOLDER);
+ _token_list_append(parser, substituted, new_token);
+ }
+ } else {
+ _token_list_append(parser, substituted, node->token);
+ }
+ }
+
+ /* After argument substitution, and before further expansion
+ * below, implement token pasting. */
+
+ _token_list_trim_trailing_space(substituted);
+
+ _glcpp_parser_apply_pastes(parser, substituted);
+
+ return substituted;
+}
+
+/* Compute the complete expansion of node, (and subsequent nodes after
+ * 'node' in the case that 'node' is a function-like macro and
+ * subsequent nodes are arguments).
+ *
+ * Returns NULL if node is a simple token with no expansion.
+ *
+ * Otherwise, returns the token list that results from the expansion
+ * and sets *last to the last node in the list that was consumed by
+ * the expansion. Specifically, *last will be set as follows:
+ *
+ * As 'node' in the case of object-like macro expansion.
+ *
+ * As the token of the closing right parenthesis in the case of
+ * function-like macro expansion.
+ *
+ * See the documentation of _glcpp_parser_expand_token_list for a description
+ * of the "mode" parameter.
+ */
+static token_list_t *
+_glcpp_parser_expand_node(glcpp_parser_t *parser, token_node_t *node,
+ token_node_t **last, expansion_mode_t mode,
+ int line)
+{
+ token_t *token = node->token;
+ const char *identifier;
+ struct hash_entry *entry;
+ macro_t *macro;
+
+ /* We only expand identifiers */
+ if (token->type != IDENTIFIER) {
+ return NULL;
+ }
+
+ *last = node;
+ identifier = token->value.str;
+
+ /* Special handling for __LINE__ and __FILE__, (not through
+ * the hash table). */
+ if (*identifier == '_') {
+ if (strcmp(identifier, "__LINE__") == 0)
+ return _token_list_create_with_one_integer(parser, line);
+
+ if (strcmp(identifier, "__FILE__") == 0)
+ return _token_list_create_with_one_integer(parser,
+ node->token->location.source);
+ }
+
+ /* Look up this identifier in the hash table. */
+ entry = _mesa_hash_table_search(parser->defines, identifier);
+ macro = entry ? entry->data : NULL;
+
+ /* Not a macro, so no expansion needed. */
+ if (macro == NULL)
+ return NULL;
+
+ /* Finally, don't expand this macro if we're already actively
+ * expanding it, (to avoid infinite recursion). */
+ if (_parser_active_list_contains (parser, identifier)) {
+ /* We change the token type here from IDENTIFIER to OTHER to prevent any
+ * future expansion of this unexpanded token. */
+ char *str;
+ token_list_t *expansion;
+ token_t *final;
+
+ str = linear_strdup(parser->linalloc, token->value.str);
+ final = _token_create_str(parser, OTHER, str);
+ expansion = _token_list_create(parser);
+ _token_list_append(parser, expansion, final);
+ return expansion;
+ }
+
+ if (! macro->is_function) {
+ token_list_t *replacement;
+
+ /* Replace a macro defined as empty with a SPACE token. */
+ if (macro->replacements == NULL)
+ return _token_list_create_with_one_space(parser);
+
+ replacement = _token_list_copy(parser, macro->replacements);
+ _glcpp_parser_apply_pastes(parser, replacement);
+ return replacement;
+ }
+
+ return _glcpp_parser_expand_function(parser, node, last, mode);
+}
+
+/* Push a new identifier onto the parser's active list.
+ *
+ * Here, 'marker' is the token node that appears in the list after the
+ * expansion of 'identifier'. That is, when the list iterator begins
+ * examining 'marker', then it is time to pop this node from the
+ * active stack.
+ */
+static void
+_parser_active_list_push(glcpp_parser_t *parser, const char *identifier,
+ token_node_t *marker)
+{
+ active_list_t *node;
+
+ node = linear_alloc_child(parser->linalloc, sizeof(active_list_t));
+ node->identifier = linear_strdup(parser->linalloc, identifier);
+ node->marker = marker;
+ node->next = parser->active;
+
+ parser->active = node;
+}
+
+static void
+_parser_active_list_pop(glcpp_parser_t *parser)
+{
+ active_list_t *node = parser->active;
+
+ if (node == NULL) {
+ parser->active = NULL;
+ return;
+ }
+
+ node = parser->active->next;
+ parser->active = node;
+}
+
+static int
+_parser_active_list_contains(glcpp_parser_t *parser, const char *identifier)
+{
+ active_list_t *node;
+
+ if (parser->active == NULL)
+ return 0;
+
+ for (node = parser->active; node; node = node->next)
+ if (strcmp(node->identifier, identifier) == 0)
+ return 1;
+
+ return 0;
+}
+
+/* Walk over the token list replacing nodes with their expansion.
+ * Whenever nodes are expanded the walking will walk over the new
+ * nodes, continuing to expand as necessary. The results are placed in
+ * 'list' itself.
+ *
+ * The "mode" argument controls the handling of any DEFINED tokens that
+ * result from expansion as follows:
+ *
+ * EXPANSION_MODE_IGNORE_DEFINED: Any resulting DEFINED tokens will be
+ * left in the final list, unevaluated. This is the correct mode
+ * for expanding any list in any context other than a
+ * preprocessor conditional, (#if or #elif).
+ *
+ * EXPANSION_MODE_EVALUATE_DEFINED: Any resulting DEFINED tokens will be
+ * evaluated to 0 or 1 tokens depending on whether the following
+ * token is the name of a defined macro. If the DEFINED token is
+ * not followed by an (optionally parenthesized) identifier, then
+ * an error will be generated. This the correct mode for
+ * expanding any list in the context of a preprocessor
+ * conditional, (#if or #elif).
+ */
+static void
+_glcpp_parser_expand_token_list(glcpp_parser_t *parser, token_list_t *list,
+ expansion_mode_t mode)
+{
+ token_node_t *node_prev;
+ token_node_t *node, *last = NULL;
+ token_list_t *expansion;
+ active_list_t *active_initial = parser->active;
+ int line;
+
+ if (list == NULL)
+ return;
+
+ _token_list_trim_trailing_space (list);
+
+ line = list->tail->token->location.last_line;
+
+ node_prev = NULL;
+ node = list->head;
+
+ if (mode == EXPANSION_MODE_EVALUATE_DEFINED)
+ _glcpp_parser_evaluate_defined_in_list (parser, list);
+
+ while (node) {
+
+ while (parser->active && parser->active->marker == node)
+ _parser_active_list_pop (parser);
+
+ expansion = _glcpp_parser_expand_node (parser, node, &last, mode, line);
+ if (expansion) {
+ token_node_t *n;
+
+ if (mode == EXPANSION_MODE_EVALUATE_DEFINED) {
+ _glcpp_parser_evaluate_defined_in_list (parser, expansion);
+ }
+
+ for (n = node; n != last->next; n = n->next)
+ while (parser->active && parser->active->marker == n) {
+ _parser_active_list_pop (parser);
+ }
+
+ _parser_active_list_push(parser, node->token->value.str, last->next);
+
+ /* Splice expansion into list, supporting a simple deletion if the
+ * expansion is empty.
+ */
+ if (expansion->head) {
+ if (node_prev)
+ node_prev->next = expansion->head;
+ else
+ list->head = expansion->head;
+ expansion->tail->next = last->next;
+ if (last == list->tail)
+ list->tail = expansion->tail;
+ } else {
+ if (node_prev)
+ node_prev->next = last->next;
+ else
+ list->head = last->next;
+ if (last == list->tail)
+ list->tail = NULL;
+ }
+ } else {
+ node_prev = node;
+ }
+ node = node_prev ? node_prev->next : list->head;
+ }
+
+ /* Remove any lingering effects of this invocation on the
+ * active list. That is, pop until the list looks like it did
+ * at the beginning of this function. */
+ while (parser->active && parser->active != active_initial)
+ _parser_active_list_pop (parser);
+
+ list->non_space_tail = list->tail;
+}
+
+void
+_glcpp_parser_print_expanded_token_list(glcpp_parser_t *parser,
+ token_list_t *list)
+{
+ if (list == NULL)
+ return;
+
+ _glcpp_parser_expand_token_list (parser, list, EXPANSION_MODE_IGNORE_DEFINED);
+
+ _token_list_trim_trailing_space (list);
+
+ _token_list_print (parser, list);
+}
+
+static void
+_check_for_reserved_macro_name(glcpp_parser_t *parser, YYLTYPE *loc,
+ const char *identifier)
+{
+ /* Section 3.3 (Preprocessor) of the GLSL 1.30 spec (and later) and
+ * the GLSL ES spec (all versions) say:
+ *
+ * "All macro names containing two consecutive underscores ( __ )
+ * are reserved for future use as predefined macro names. All
+ * macro names prefixed with "GL_" ("GL" followed by a single
+ * underscore) are also reserved."
+ *
+ * The intention is that names containing __ are reserved for internal
+ * use by the implementation, and names prefixed with GL_ are reserved
+ * for use by Khronos. Since every extension adds a name prefixed
+ * with GL_ (i.e., the name of the extension), that should be an
+ * error. Names simply containing __ are dangerous to use, but should
+ * be allowed.
+ *
+ * A future version of the GLSL specification will clarify this.
+ */
+ if (strstr(identifier, "__")) {
+ glcpp_warning(loc, parser, "Macro names containing \"__\" are reserved "
+ "for use by the implementation.\n");
+ }
+ if (strncmp(identifier, "GL_", 3) == 0) {
+ glcpp_error (loc, parser, "Macro names starting with \"GL_\" are reserved.\n");
+ }
+ if (strcmp(identifier, "defined") == 0) {
+ glcpp_error (loc, parser, "\"defined\" cannot be used as a macro name");
+ }
+}
+
+static int
+_macro_equal(macro_t *a, macro_t *b)
+{
+ if (a->is_function != b->is_function)
+ return 0;
+
+ if (a->is_function) {
+ if (! _string_list_equal (a->parameters, b->parameters))
+ return 0;
+ }
+
+ return _token_list_equal_ignoring_space(a->replacements, b->replacements);
+}
+
+void
+_define_object_macro(glcpp_parser_t *parser, YYLTYPE *loc,
+ const char *identifier, token_list_t *replacements)
+{
+ macro_t *macro, *previous;
+ struct hash_entry *entry;
+
+ /* We define pre-defined macros before we've started parsing the actual
+ * file. So if there's no location defined yet, that's what were doing and
+ * we don't want to generate an error for using the reserved names. */
+ if (loc != NULL)
+ _check_for_reserved_macro_name(parser, loc, identifier);
+
+ macro = linear_alloc_child(parser->linalloc, sizeof(macro_t));
+
+ macro->is_function = 0;
+ macro->parameters = NULL;
+ macro->identifier = linear_strdup(parser->linalloc, identifier);
+ macro->replacements = replacements;
+
+ entry = _mesa_hash_table_search(parser->defines, identifier);
+ previous = entry ? entry->data : NULL;
+ if (previous) {
+ if (_macro_equal (macro, previous)) {
+ return;
+ }
+ glcpp_error (loc, parser, "Redefinition of macro %s\n", identifier);
+ }
+
+ _mesa_hash_table_insert (parser->defines, identifier, macro);
+}
+
+void
+_define_function_macro(glcpp_parser_t *parser, YYLTYPE *loc,
+ const char *identifier, string_list_t *parameters,
+ token_list_t *replacements)
+{
+ macro_t *macro, *previous;
+ struct hash_entry *entry;
+ const char *dup;
+
+ _check_for_reserved_macro_name(parser, loc, identifier);
+
+ /* Check for any duplicate parameter names. */
+ if ((dup = _string_list_has_duplicate (parameters)) != NULL) {
+ glcpp_error (loc, parser, "Duplicate macro parameter \"%s\"", dup);
+ }
+
+ macro = linear_alloc_child(parser->linalloc, sizeof(macro_t));
+
+ macro->is_function = 1;
+ macro->parameters = parameters;
+ macro->identifier = linear_strdup(parser->linalloc, identifier);
+ macro->replacements = replacements;
+
+ entry = _mesa_hash_table_search(parser->defines, identifier);
+ previous = entry ? entry->data : NULL;
+ if (previous) {
+ if (_macro_equal (macro, previous)) {
+ return;
+ }
+ glcpp_error (loc, parser, "Redefinition of macro %s\n", identifier);
+ }
+
+ _mesa_hash_table_insert(parser->defines, identifier, macro);
+}
+
+static int
+glcpp_parser_lex(YYSTYPE *yylval, YYLTYPE *yylloc, glcpp_parser_t *parser)
+{
+ token_node_t *node;
+ int ret;
+
+ if (parser->lex_from_list == NULL) {
+ ret = glcpp_lex(yylval, yylloc, parser->scanner);
+
+ /* XXX: This ugly block of code exists for the sole
+ * purpose of converting a NEWLINE token into a SPACE
+ * token, but only in the case where we have seen a
+ * function-like macro name, but have not yet seen its
+ * closing parenthesis.
+ *
+ * There's perhaps a more compact way to do this with
+ * mid-rule actions in the grammar.
+ *
+ * I'm definitely not pleased with the complexity of
+ * this code here.
+ */
+ if (parser->newline_as_space) {
+ if (ret == '(') {
+ parser->paren_count++;
+ } else if (ret == ')') {
+ parser->paren_count--;
+ if (parser->paren_count == 0)
+ parser->newline_as_space = 0;
+ } else if (ret == NEWLINE) {
+ ret = SPACE;
+ } else if (ret != SPACE) {
+ if (parser->paren_count == 0)
+ parser->newline_as_space = 0;
+ }
+ } else if (parser->in_control_line) {
+ if (ret == NEWLINE)
+ parser->in_control_line = 0;
+ }
+ else if (ret == DEFINE_TOKEN || ret == UNDEF || ret == IF ||
+ ret == IFDEF || ret == IFNDEF || ret == ELIF || ret == ELSE ||
+ ret == ENDIF || ret == HASH_TOKEN) {
+ parser->in_control_line = 1;
+ } else if (ret == IDENTIFIER) {
+ struct hash_entry *entry = _mesa_hash_table_search(parser->defines,
+ yylval->str);
+ macro_t *macro = entry ? entry->data : NULL;
+ if (macro && macro->is_function) {
+ parser->newline_as_space = 1;
+ parser->paren_count = 0;
+ }
+ }
+
+ return ret;
+ }
+
+ node = parser->lex_from_node;
+
+ if (node == NULL) {
+ parser->lex_from_list = NULL;
+ return NEWLINE;
+ }
+
+ *yylval = node->token->value;
+ ret = node->token->type;
+
+ parser->lex_from_node = node->next;
+
+ return ret;
+}
+
+static void
+glcpp_parser_lex_from(glcpp_parser_t *parser, token_list_t *list)
+{
+ token_node_t *node;
+
+ assert (parser->lex_from_list == NULL);
+
+ /* Copy list, eliminating any space tokens. */
+ parser->lex_from_list = _token_list_create (parser);
+
+ for (node = list->head; node; node = node->next) {
+ if (node->token->type == SPACE)
+ continue;
+ _token_list_append (parser, parser->lex_from_list, node->token);
+ }
+
+ parser->lex_from_node = parser->lex_from_list->head;
+
+ /* It's possible the list consisted of nothing but whitespace. */
+ if (parser->lex_from_node == NULL) {
+ parser->lex_from_list = NULL;
+ }
+}
+
+static void
+_glcpp_parser_skip_stack_push_if(glcpp_parser_t *parser, YYLTYPE *loc,
+ int condition)
+{
+ skip_type_t current = SKIP_NO_SKIP;
+ skip_node_t *node;
+
+ if (parser->skip_stack)
+ current = parser->skip_stack->type;
+
+ node = linear_alloc_child(parser->linalloc, sizeof(skip_node_t));
+ node->loc = *loc;
+
+ if (current == SKIP_NO_SKIP) {
+ if (condition)
+ node->type = SKIP_NO_SKIP;
+ else
+ node->type = SKIP_TO_ELSE;
+ } else {
+ node->type = SKIP_TO_ENDIF;
+ }
+
+ node->has_else = false;
+ node->next = parser->skip_stack;
+ parser->skip_stack = node;
+}
+
+static void
+_glcpp_parser_skip_stack_change_if(glcpp_parser_t *parser, YYLTYPE *loc,
+ const char *type, int condition)
+{
+ if (parser->skip_stack == NULL) {
+ glcpp_error (loc, parser, "#%s without #if\n", type);
+ return;
+ }
+
+ if (parser->skip_stack->type == SKIP_TO_ELSE) {
+ if (condition)
+ parser->skip_stack->type = SKIP_NO_SKIP;
+ } else {
+ parser->skip_stack->type = SKIP_TO_ENDIF;
+ }
+}
+
+static void
+_glcpp_parser_skip_stack_pop(glcpp_parser_t *parser, YYLTYPE *loc)
+{
+ skip_node_t *node;
+
+ if (parser->skip_stack == NULL) {
+ glcpp_error (loc, parser, "#endif without #if\n");
+ return;
+ }
+
+ node = parser->skip_stack;
+ parser->skip_stack = node->next;
+}
+
+static void
+_glcpp_parser_handle_version_declaration(glcpp_parser_t *parser, intmax_t version,
+ const char *identifier,
+ bool explicitly_set)
+{
+ if (parser->version_set)
+ return;
+
+ parser->version = version;
+ parser->version_set = true;
+
+ add_builtin_define (parser, "__VERSION__", version);
+
+ parser->is_gles = (version == 100) ||
+ (identifier && (strcmp(identifier, "es") == 0));
+ bool is_compat = version >= 150 && identifier &&
+ strcmp(identifier, "compatibility") == 0;
+
+ /* Add pre-defined macros. */
+ if (parser->is_gles)
+ add_builtin_define(parser, "GL_ES", 1);
+ else if (is_compat)
+ add_builtin_define(parser, "GL_compatibility_profile", 1);
+ else if (version >= 150)
+ add_builtin_define(parser, "GL_core_profile", 1);
+
+ /* Currently, all ES2/ES3 implementations support highp in the
+ * fragment shader, so we always define this macro in ES2/ES3.
+ * If we ever get a driver that doesn't support highp, we'll
+ * need to add a flag to the gl_context and check that here.
+ */
+ if (version >= 130 || parser->is_gles)
+ add_builtin_define (parser, "GL_FRAGMENT_PRECISION_HIGH", 1);
+
+ /* Add all the extension macros available in this context */
+ if (parser->extensions)
+ parser->extensions(parser->state, add_builtin_define, parser,
+ version, parser->is_gles);
+
+ if (parser->extension_list) {
+ /* If MESA_shader_integer_functions is supported, then the building
+ * blocks required for the 64x64 => 64 multiply exist. Add defines for
+ * those functions so that they can be tested.
+ */
+ if (parser->extension_list->MESA_shader_integer_functions) {
+ add_builtin_define(parser, "__have_builtin_builtin_sign64", 1);
+ add_builtin_define(parser, "__have_builtin_builtin_umul64", 1);
+ add_builtin_define(parser, "__have_builtin_builtin_udiv64", 1);
+ add_builtin_define(parser, "__have_builtin_builtin_umod64", 1);
+ add_builtin_define(parser, "__have_builtin_builtin_idiv64", 1);
+ add_builtin_define(parser, "__have_builtin_builtin_imod64", 1);
+ }
+ }
+
+ if (explicitly_set) {
+ _mesa_string_buffer_printf(parser->output,
+ "#version %" PRIiMAX "%s%s", version,
+ identifier ? " " : "",
+ identifier ? identifier : "");
+ }
+}
+
+/* GLSL version if no version is explicitly specified. */
+#define IMPLICIT_GLSL_VERSION 110
+
+/* GLSL ES version if no version is explicitly specified. */
+#define IMPLICIT_GLSL_ES_VERSION 100
+
+void
+glcpp_parser_resolve_implicit_version(glcpp_parser_t *parser)
+{
+ int language_version = parser->api == API_OPENGLES2 ?
+ IMPLICIT_GLSL_ES_VERSION : IMPLICIT_GLSL_VERSION;
+
+ _glcpp_parser_handle_version_declaration(parser, language_version,
+ NULL, false);
+}
+
+static void
+glcpp_parser_copy_defines(const void *key, void *data, void *closure)
+{
+ struct define_include *di = (struct define_include *) closure;
+ macro_t *macro = (macro_t *) data;
+
+ /* If we hit an error on a previous pass, just return */
+ if (di->parser->error)
+ return;
+
+ const char *identifier = macro->identifier;
+ struct hash_entry *entry = _mesa_hash_table_search(di->parser->defines,
+ identifier);
+
+ macro_t *previous = entry ? entry->data : NULL;
+ if (previous) {
+ if (_macro_equal(macro, previous)) {
+ return;
+ }
+ glcpp_error(di->loc, di->parser, "Redefinition of macro %s\n",
+ identifier);
+ }
+
+ _mesa_hash_table_insert(di->parser->defines, identifier, macro);
+}
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/glcpp/glcpp-parse.h b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/glcpp/glcpp-parse.h
new file mode 100644
index 0000000000..18a9c7122d
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/glcpp/glcpp-parse.h
@@ -0,0 +1,116 @@
+/* A Bison parser, made by GNU Bison 3.5. */
+
+/* Bison interface for Yacc-like parsers in C
+
+ Copyright (C) 1984, 1989-1990, 2000-2015, 2018-2019 Free Software Foundation,
+ Inc.
+
+ This program is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>. */
+
+/* As a special exception, you may create a larger work that contains
+ part or all of the Bison parser skeleton and distribute that work
+ under terms of your choice, so long as that work isn't itself a
+ parser generator using the skeleton or a modified version thereof
+ as a parser skeleton. Alternatively, if you modify or redistribute
+ the parser skeleton itself, you may (at your option) remove this
+ special exception, which will cause the skeleton and the resulting
+ Bison output files to be licensed under the GNU General Public
+ License without this special exception.
+
+ This special exception was added by the Free Software Foundation in
+ version 2.2 of Bison. */
+
+/* Undocumented macros, especially those whose name start with YY_,
+ are private implementation details. Do not rely on them. */
+
+#ifndef YY_GLCPP_PARSER_SRC_COMPILER_GLSL_GLCPP_GLCPP_PARSE_H_INCLUDED
+# define YY_GLCPP_PARSER_SRC_COMPILER_GLSL_GLCPP_GLCPP_PARSE_H_INCLUDED
+/* Debug traces. */
+#ifndef YYDEBUG
+# define YYDEBUG 1
+#endif
+#if YYDEBUG
+extern int glcpp_parser_debug;
+#endif
+
+/* Token type. */
+#ifndef YYTOKENTYPE
+# define YYTOKENTYPE
+ enum yytokentype
+ {
+ DEFINED = 258,
+ ELIF_EXPANDED = 259,
+ HASH_TOKEN = 260,
+ DEFINE_TOKEN = 261,
+ FUNC_IDENTIFIER = 262,
+ OBJ_IDENTIFIER = 263,
+ ELIF = 264,
+ ELSE = 265,
+ ENDIF = 266,
+ ERROR_TOKEN = 267,
+ IF = 268,
+ IFDEF = 269,
+ IFNDEF = 270,
+ LINE = 271,
+ PRAGMA = 272,
+ UNDEF = 273,
+ VERSION_TOKEN = 274,
+ GARBAGE = 275,
+ IDENTIFIER = 276,
+ IF_EXPANDED = 277,
+ INTEGER = 278,
+ INTEGER_STRING = 279,
+ LINE_EXPANDED = 280,
+ NEWLINE = 281,
+ OTHER = 282,
+ PLACEHOLDER = 283,
+ SPACE = 284,
+ PLUS_PLUS = 285,
+ MINUS_MINUS = 286,
+ PATH = 287,
+ INCLUDE = 288,
+ PASTE = 289,
+ OR = 290,
+ AND = 291,
+ EQUAL = 292,
+ NOT_EQUAL = 293,
+ LESS_OR_EQUAL = 294,
+ GREATER_OR_EQUAL = 295,
+ LEFT_SHIFT = 296,
+ RIGHT_SHIFT = 297,
+ UNARY = 298
+ };
+#endif
+
+/* Value type. */
+
+/* Location type. */
+#if ! defined YYLTYPE && ! defined YYLTYPE_IS_DECLARED
+typedef struct YYLTYPE YYLTYPE;
+struct YYLTYPE
+{
+ int first_line;
+ int first_column;
+ int last_line;
+ int last_column;
+};
+# define YYLTYPE_IS_DECLARED 1
+# define YYLTYPE_IS_TRIVIAL 1
+#endif
+
+
+
+int glcpp_parser_parse (glcpp_parser_t *parser);
+
+#endif /* !YY_GLCPP_PARSER_SRC_COMPILER_GLSL_GLCPP_GLCPP_PARSE_H_INCLUDED */
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/glcpp/glcpp-parse.y b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/glcpp/glcpp-parse.y
new file mode 100644
index 0000000000..339071d07b
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/glcpp/glcpp-parse.y
@@ -0,0 +1,2546 @@
+%{
+/*
+ * Copyright © 2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <assert.h>
+#include <inttypes.h>
+
+#include "glcpp.h"
+#include "main/mtypes.h"
+#include "util/strndup.h"
+
+const char *
+_mesa_lookup_shader_include(struct gl_context *ctx, char *path,
+ bool error_check);
+
+size_t
+_mesa_get_shader_include_cursor(struct gl_shared_state *shared);
+
+void
+_mesa_set_shader_include_cursor(struct gl_shared_state *shared, size_t cursor);
+
+static void
+yyerror(YYLTYPE *locp, glcpp_parser_t *parser, const char *error);
+
+static void
+_define_object_macro(glcpp_parser_t *parser,
+ YYLTYPE *loc,
+ const char *macro,
+ token_list_t *replacements);
+
+static void
+_define_function_macro(glcpp_parser_t *parser,
+ YYLTYPE *loc,
+ const char *macro,
+ string_list_t *parameters,
+ token_list_t *replacements);
+
+static string_list_t *
+_string_list_create(glcpp_parser_t *parser);
+
+static void
+_string_list_append_item(glcpp_parser_t *parser, string_list_t *list,
+ const char *str);
+
+static int
+_string_list_contains(string_list_t *list, const char *member, int *index);
+
+static const char *
+_string_list_has_duplicate(string_list_t *list);
+
+static int
+_string_list_length(string_list_t *list);
+
+static int
+_string_list_equal(string_list_t *a, string_list_t *b);
+
+static argument_list_t *
+_argument_list_create(glcpp_parser_t *parser);
+
+static void
+_argument_list_append(glcpp_parser_t *parser, argument_list_t *list,
+ token_list_t *argument);
+
+static int
+_argument_list_length(argument_list_t *list);
+
+static token_list_t *
+_argument_list_member_at(argument_list_t *list, int index);
+
+static token_t *
+_token_create_str(glcpp_parser_t *parser, int type, char *str);
+
+static token_t *
+_token_create_ival(glcpp_parser_t *parser, int type, int ival);
+
+static token_list_t *
+_token_list_create(glcpp_parser_t *parser);
+
+static void
+_token_list_append(glcpp_parser_t *parser, token_list_t *list, token_t *token);
+
+static void
+_token_list_append_list(token_list_t *list, token_list_t *tail);
+
+static int
+_token_list_equal_ignoring_space(token_list_t *a, token_list_t *b);
+
+static void
+_parser_active_list_push(glcpp_parser_t *parser, const char *identifier,
+ token_node_t *marker);
+
+static void
+_parser_active_list_pop(glcpp_parser_t *parser);
+
+static int
+_parser_active_list_contains(glcpp_parser_t *parser, const char *identifier);
+
+typedef enum {
+ EXPANSION_MODE_IGNORE_DEFINED,
+ EXPANSION_MODE_EVALUATE_DEFINED
+} expansion_mode_t;
+
+/* Expand list, and begin lexing from the result (after first
+ * prefixing a token of type 'head_token_type').
+ */
+static void
+_glcpp_parser_expand_and_lex_from(glcpp_parser_t *parser, int head_token_type,
+ token_list_t *list, expansion_mode_t mode);
+
+/* Perform macro expansion in-place on the given list. */
+static void
+_glcpp_parser_expand_token_list(glcpp_parser_t *parser, token_list_t *list,
+ expansion_mode_t mode);
+
+static void
+_glcpp_parser_print_expanded_token_list(glcpp_parser_t *parser,
+ token_list_t *list);
+
+static void
+_glcpp_parser_skip_stack_push_if(glcpp_parser_t *parser, YYLTYPE *loc,
+ int condition);
+
+static void
+_glcpp_parser_skip_stack_change_if(glcpp_parser_t *parser, YYLTYPE *loc,
+ const char *type, int condition);
+
+static void
+_glcpp_parser_skip_stack_pop(glcpp_parser_t *parser, YYLTYPE *loc);
+
+static void
+_glcpp_parser_handle_version_declaration(glcpp_parser_t *parser, intmax_t version,
+ const char *ident, bool explicitly_set);
+
+static int
+glcpp_parser_lex(YYSTYPE *yylval, YYLTYPE *yylloc, glcpp_parser_t *parser);
+
+static void
+glcpp_parser_lex_from(glcpp_parser_t *parser, token_list_t *list);
+
+struct define_include {
+ glcpp_parser_t *parser;
+ YYLTYPE *loc;
+};
+
+static void
+glcpp_parser_copy_defines(const void *key, void *data, void *closure);
+
+static void
+add_builtin_define(glcpp_parser_t *parser, const char *name, int value);
+
+%}
+
+%define api.pure
+%define parse.error verbose
+
+%locations
+%initial-action {
+ @$.first_line = 1;
+ @$.first_column = 1;
+ @$.last_line = 1;
+ @$.last_column = 1;
+ @$.source = 0;
+}
+
+%parse-param {glcpp_parser_t *parser}
+%lex-param {glcpp_parser_t *parser}
+
+%expect 0
+
+ /* We use HASH_TOKEN, DEFINE_TOKEN and VERSION_TOKEN (as opposed to
+ * HASH, DEFINE, and VERSION) to avoid conflicts with other symbols,
+ * (such as the <HASH> and <DEFINE> start conditions in the lexer). */
+%token DEFINED ELIF_EXPANDED HASH_TOKEN DEFINE_TOKEN FUNC_IDENTIFIER OBJ_IDENTIFIER ELIF ELSE ENDIF ERROR_TOKEN IF IFDEF IFNDEF LINE PRAGMA UNDEF VERSION_TOKEN GARBAGE IDENTIFIER IF_EXPANDED INTEGER INTEGER_STRING LINE_EXPANDED NEWLINE OTHER PLACEHOLDER SPACE PLUS_PLUS MINUS_MINUS PATH INCLUDE
+%token PASTE
+%type <ival> INTEGER operator SPACE integer_constant version_constant
+%type <expression_value> expression
+%type <str> IDENTIFIER FUNC_IDENTIFIER OBJ_IDENTIFIER INTEGER_STRING OTHER ERROR_TOKEN PRAGMA PATH INCLUDE
+%type <string_list> identifier_list
+%type <token> preprocessing_token
+%type <token_list> pp_tokens replacement_list text_line
+%left OR
+%left AND
+%left '|'
+%left '^'
+%left '&'
+%left EQUAL NOT_EQUAL
+%left '<' '>' LESS_OR_EQUAL GREATER_OR_EQUAL
+%left LEFT_SHIFT RIGHT_SHIFT
+%left '+' '-'
+%left '*' '/' '%'
+%right UNARY
+
+%debug
+
+%%
+
+input:
+ /* empty */
+| input line
+;
+
+line:
+ control_line
+| SPACE control_line
+| text_line {
+ _glcpp_parser_print_expanded_token_list (parser, $1);
+ _mesa_string_buffer_append_char(parser->output, '\n');
+ }
+| expanded_line
+;
+
+expanded_line:
+ IF_EXPANDED expression NEWLINE {
+ if (parser->is_gles && $2.undefined_macro)
+ glcpp_error(& @1, parser, "undefined macro %s in expression (illegal in GLES)", $2.undefined_macro);
+ _glcpp_parser_skip_stack_push_if (parser, & @1, $2.value);
+ }
+| ELIF_EXPANDED expression NEWLINE {
+ if (parser->is_gles && $2.undefined_macro)
+ glcpp_error(& @1, parser, "undefined macro %s in expression (illegal in GLES)", $2.undefined_macro);
+ _glcpp_parser_skip_stack_change_if (parser, & @1, "elif", $2.value);
+ }
+| LINE_EXPANDED integer_constant NEWLINE {
+ parser->has_new_line_number = 1;
+ parser->new_line_number = $2;
+ _mesa_string_buffer_printf(parser->output, "#line %" PRIiMAX "\n", $2);
+ }
+| LINE_EXPANDED integer_constant integer_constant NEWLINE {
+ parser->has_new_line_number = 1;
+ parser->new_line_number = $2;
+ parser->has_new_source_number = 1;
+ parser->new_source_number = $3;
+ _mesa_string_buffer_printf(parser->output,
+ "#line %" PRIiMAX " %" PRIiMAX "\n",
+ $2, $3);
+ }
+| LINE_EXPANDED integer_constant PATH NEWLINE {
+ parser->has_new_line_number = 1;
+ parser->new_line_number = $2;
+ _mesa_string_buffer_printf(parser->output,
+ "#line %" PRIiMAX " %s\n",
+ $2, $3);
+ }
+;
+
+define:
+ OBJ_IDENTIFIER replacement_list NEWLINE {
+ _define_object_macro (parser, & @1, $1, $2);
+ }
+| FUNC_IDENTIFIER '(' ')' replacement_list NEWLINE {
+ _define_function_macro (parser, & @1, $1, NULL, $4);
+ }
+| FUNC_IDENTIFIER '(' identifier_list ')' replacement_list NEWLINE {
+ _define_function_macro (parser, & @1, $1, $3, $5);
+ }
+;
+
+control_line:
+ control_line_success {
+ _mesa_string_buffer_append_char(parser->output, '\n');
+ }
+| control_line_error
+| HASH_TOKEN LINE pp_tokens NEWLINE {
+
+ if (parser->skip_stack == NULL ||
+ parser->skip_stack->type == SKIP_NO_SKIP)
+ {
+ _glcpp_parser_expand_and_lex_from (parser,
+ LINE_EXPANDED, $3,
+ EXPANSION_MODE_IGNORE_DEFINED);
+ }
+ }
+;
+
+control_line_success:
+ HASH_TOKEN DEFINE_TOKEN define
+| HASH_TOKEN UNDEF IDENTIFIER NEWLINE {
+ struct hash_entry *entry;
+
+ /* Section 3.4 (Preprocessor) of the GLSL ES 3.00 spec says:
+ *
+ * It is an error to undefine or to redefine a built-in
+ * (pre-defined) macro name.
+ *
+ * The GLSL ES 1.00 spec does not contain this text, but
+ * dEQP's preprocess test in GLES2 checks for it.
+ *
+ * Section 3.3 (Preprocessor) revision 7, of the GLSL 4.50
+ * spec says:
+ *
+ * By convention, all macro names containing two consecutive
+ * underscores ( __ ) are reserved for use by underlying
+ * software layers. Defining or undefining such a name
+ * in a shader does not itself result in an error, but may
+ * result in unintended behaviors that stem from having
+ * multiple definitions of the same name. All macro names
+ * prefixed with "GL_" (...) are also reseved, and defining
+ * such a name results in a compile-time error.
+ *
+ * The code below implements the same checks as GLSLang.
+ */
+ if (strncmp("GL_", $3, 3) == 0)
+ glcpp_error(& @1, parser, "Built-in (pre-defined)"
+ " names beginning with GL_ cannot be undefined.");
+ else if (strstr($3, "__") != NULL) {
+ if (parser->is_gles
+ && parser->version >= 300
+ && (strcmp("__LINE__", $3) == 0
+ || strcmp("__FILE__", $3) == 0
+ || strcmp("__VERSION__", $3) == 0)) {
+ glcpp_error(& @1, parser, "Built-in (pre-defined)"
+ " names cannot be undefined.");
+ } else if (parser->is_gles && parser->version <= 300) {
+ glcpp_error(& @1, parser,
+ " names containing consecutive underscores"
+ " are reserved.");
+ } else {
+ glcpp_warning(& @1, parser,
+ " names containing consecutive underscores"
+ " are reserved.");
+ }
+ }
+
+ entry = _mesa_hash_table_search (parser->defines, $3);
+ if (entry) {
+ _mesa_hash_table_remove (parser->defines, entry);
+ }
+ }
+| HASH_TOKEN INCLUDE NEWLINE {
+ size_t include_cursor = _mesa_get_shader_include_cursor(parser->gl_ctx->Shared);
+
+ /* Remove leading and trailing "" or <> */
+ char *start = strchr($2, '"');
+ if (!start) {
+ _mesa_set_shader_include_cursor(parser->gl_ctx->Shared, 0);
+ start = strchr($2, '<');
+ }
+ char *path = strndup(start + 1, strlen(start + 1) - 1);
+
+ const char *shader =
+ _mesa_lookup_shader_include(parser->gl_ctx, path, false);
+ free(path);
+
+ if (!shader)
+ glcpp_error(&@1, parser, "%s not found", $2);
+ else {
+ /* Create a temporary parser with the same settings */
+ glcpp_parser_t *tmp_parser =
+ glcpp_parser_create(parser->gl_ctx, parser->extensions, parser->state);
+ tmp_parser->version_set = true;
+ tmp_parser->version = parser->version;
+
+ /* Set the shader source and run the lexer */
+ glcpp_lex_set_source_string(tmp_parser, shader);
+
+ /* Copy any existing define macros to the temporary
+ * shade include parser.
+ */
+ struct define_include di;
+ di.parser = tmp_parser;
+ di.loc = &@1;
+
+ hash_table_call_foreach(parser->defines,
+ glcpp_parser_copy_defines,
+ &di);
+
+ /* Print out '#include' to the glsl parser. We do this
+ * so that it can do the error checking require to
+ * make sure the ARB_shading_language_include
+ * extension is enabled.
+ */
+ _mesa_string_buffer_printf(parser->output, "#include\n");
+
+ /* Parse the include string before adding to the
+ * preprocessor output.
+ */
+ glcpp_parser_parse(tmp_parser);
+ _mesa_string_buffer_printf(parser->info_log, "%s",
+ tmp_parser->info_log->buf);
+ _mesa_string_buffer_printf(parser->output, "%s",
+ tmp_parser->output->buf);
+
+ /* Copy any new define macros to the parent parser
+ * and steal the memory of our temp parser so we don't
+ * free these new defines before they are no longer
+ * needed.
+ */
+ di.parser = parser;
+ di.loc = &@1;
+ ralloc_steal(parser, tmp_parser);
+
+ hash_table_call_foreach(tmp_parser->defines,
+ glcpp_parser_copy_defines,
+ &di);
+
+ /* Destroy tmp parser memory we no longer need */
+ glcpp_lex_destroy(tmp_parser->scanner);
+ _mesa_hash_table_destroy(tmp_parser->defines, NULL);
+ }
+
+ _mesa_set_shader_include_cursor(parser->gl_ctx->Shared, include_cursor);
+ }
+| HASH_TOKEN IF pp_tokens NEWLINE {
+ /* Be careful to only evaluate the 'if' expression if
+ * we are not skipping. When we are skipping, we
+ * simply push a new 0-valued 'if' onto the skip
+ * stack.
+ *
+ * This avoids generating diagnostics for invalid
+ * expressions that are being skipped. */
+ if (parser->skip_stack == NULL ||
+ parser->skip_stack->type == SKIP_NO_SKIP)
+ {
+ _glcpp_parser_expand_and_lex_from (parser,
+ IF_EXPANDED, $3,
+ EXPANSION_MODE_EVALUATE_DEFINED);
+ }
+ else
+ {
+ _glcpp_parser_skip_stack_push_if (parser, & @1, 0);
+ parser->skip_stack->type = SKIP_TO_ENDIF;
+ }
+ }
+| HASH_TOKEN IF NEWLINE {
+ /* #if without an expression is only an error if we
+ * are not skipping */
+ if (parser->skip_stack == NULL ||
+ parser->skip_stack->type == SKIP_NO_SKIP)
+ {
+ glcpp_error(& @1, parser, "#if with no expression");
+ }
+ _glcpp_parser_skip_stack_push_if (parser, & @1, 0);
+ }
+| HASH_TOKEN IFDEF IDENTIFIER junk NEWLINE {
+ struct hash_entry *entry =
+ _mesa_hash_table_search(parser->defines, $3);
+ macro_t *macro = entry ? entry->data : NULL;
+ _glcpp_parser_skip_stack_push_if (parser, & @1, macro != NULL);
+ }
+| HASH_TOKEN IFNDEF IDENTIFIER junk NEWLINE {
+ struct hash_entry *entry =
+ _mesa_hash_table_search(parser->defines, $3);
+ macro_t *macro = entry ? entry->data : NULL;
+ _glcpp_parser_skip_stack_push_if (parser, & @3, macro == NULL);
+ }
+| HASH_TOKEN ELIF pp_tokens NEWLINE {
+ /* Be careful to only evaluate the 'elif' expression
+ * if we are not skipping. When we are skipping, we
+ * simply change to a 0-valued 'elif' on the skip
+ * stack.
+ *
+ * This avoids generating diagnostics for invalid
+ * expressions that are being skipped. */
+ if (parser->skip_stack &&
+ parser->skip_stack->type == SKIP_TO_ELSE)
+ {
+ _glcpp_parser_expand_and_lex_from (parser,
+ ELIF_EXPANDED, $3,
+ EXPANSION_MODE_EVALUATE_DEFINED);
+ }
+ else if (parser->skip_stack &&
+ parser->skip_stack->has_else)
+ {
+ glcpp_error(& @1, parser, "#elif after #else");
+ }
+ else
+ {
+ _glcpp_parser_skip_stack_change_if (parser, & @1,
+ "elif", 0);
+ }
+ }
+| HASH_TOKEN ELIF NEWLINE {
+ /* #elif without an expression is an error unless we
+ * are skipping. */
+ if (parser->skip_stack &&
+ parser->skip_stack->type == SKIP_TO_ELSE)
+ {
+ glcpp_error(& @1, parser, "#elif with no expression");
+ }
+ else if (parser->skip_stack &&
+ parser->skip_stack->has_else)
+ {
+ glcpp_error(& @1, parser, "#elif after #else");
+ }
+ else
+ {
+ _glcpp_parser_skip_stack_change_if (parser, & @1,
+ "elif", 0);
+ glcpp_warning(& @1, parser, "ignoring illegal #elif without expression");
+ }
+ }
+| HASH_TOKEN ELSE { parser->lexing_directive = 1; } NEWLINE {
+ if (parser->skip_stack &&
+ parser->skip_stack->has_else)
+ {
+ glcpp_error(& @1, parser, "multiple #else");
+ }
+ else
+ {
+ _glcpp_parser_skip_stack_change_if (parser, & @1, "else", 1);
+ if (parser->skip_stack)
+ parser->skip_stack->has_else = true;
+ }
+ }
+| HASH_TOKEN ENDIF {
+ _glcpp_parser_skip_stack_pop (parser, & @1);
+ } NEWLINE
+| HASH_TOKEN VERSION_TOKEN version_constant NEWLINE {
+ if (parser->version_set) {
+ glcpp_error(& @1, parser, "#version must appear on the first line");
+ }
+ _glcpp_parser_handle_version_declaration(parser, $3, NULL, true);
+ }
+| HASH_TOKEN VERSION_TOKEN version_constant IDENTIFIER NEWLINE {
+ if (parser->version_set) {
+ glcpp_error(& @1, parser, "#version must appear on the first line");
+ }
+ _glcpp_parser_handle_version_declaration(parser, $3, $4, true);
+ }
+| HASH_TOKEN NEWLINE {
+ glcpp_parser_resolve_implicit_version(parser);
+ }
+| HASH_TOKEN PRAGMA NEWLINE {
+ _mesa_string_buffer_printf(parser->output, "#%s", $2);
+ }
+;
+
+control_line_error:
+ HASH_TOKEN ERROR_TOKEN NEWLINE {
+ glcpp_error(& @1, parser, "#%s", $2);
+ }
+| HASH_TOKEN DEFINE_TOKEN NEWLINE {
+ glcpp_error (& @1, parser, "#define without macro name");
+ }
+| HASH_TOKEN GARBAGE pp_tokens NEWLINE {
+ glcpp_error (& @1, parser, "Illegal non-directive after #");
+ }
+;
+
+integer_constant:
+ INTEGER_STRING {
+ /* let strtoll detect the base */
+ $$ = strtoll ($1, NULL, 0);
+ }
+| INTEGER {
+ $$ = $1;
+ }
+
+version_constant:
+ INTEGER_STRING {
+ /* Both octal and hexadecimal constants begin with 0. */
+ if ($1[0] == '0' && $1[1] != '\0') {
+ glcpp_error(&@1, parser, "invalid #version \"%s\" (not a decimal constant)", $1);
+ $$ = 0;
+ } else {
+ $$ = strtoll($1, NULL, 10);
+ }
+ }
+
+expression:
+ integer_constant {
+ $$.value = $1;
+ $$.undefined_macro = NULL;
+ }
+| IDENTIFIER {
+ $$.value = 0;
+ if (parser->is_gles)
+ $$.undefined_macro = linear_strdup(parser->linalloc, $1);
+ else
+ $$.undefined_macro = NULL;
+ }
+| expression OR expression {
+ $$.value = $1.value || $3.value;
+
+ /* Short-circuit: Only flag undefined from right side
+ * if left side evaluates to false.
+ */
+ if ($1.undefined_macro)
+ $$.undefined_macro = $1.undefined_macro;
+ else if (! $1.value)
+ $$.undefined_macro = $3.undefined_macro;
+ }
+| expression AND expression {
+ $$.value = $1.value && $3.value;
+
+ /* Short-circuit: Only flag undefined from right-side
+ * if left side evaluates to true.
+ */
+ if ($1.undefined_macro)
+ $$.undefined_macro = $1.undefined_macro;
+ else if ($1.value)
+ $$.undefined_macro = $3.undefined_macro;
+ }
+| expression '|' expression {
+ $$.value = $1.value | $3.value;
+ if ($1.undefined_macro)
+ $$.undefined_macro = $1.undefined_macro;
+ else
+ $$.undefined_macro = $3.undefined_macro;
+ }
+| expression '^' expression {
+ $$.value = $1.value ^ $3.value;
+ if ($1.undefined_macro)
+ $$.undefined_macro = $1.undefined_macro;
+ else
+ $$.undefined_macro = $3.undefined_macro;
+ }
+| expression '&' expression {
+ $$.value = $1.value & $3.value;
+ if ($1.undefined_macro)
+ $$.undefined_macro = $1.undefined_macro;
+ else
+ $$.undefined_macro = $3.undefined_macro;
+ }
+| expression NOT_EQUAL expression {
+ $$.value = $1.value != $3.value;
+ if ($1.undefined_macro)
+ $$.undefined_macro = $1.undefined_macro;
+ else
+ $$.undefined_macro = $3.undefined_macro;
+ }
+| expression EQUAL expression {
+ $$.value = $1.value == $3.value;
+ if ($1.undefined_macro)
+ $$.undefined_macro = $1.undefined_macro;
+ else
+ $$.undefined_macro = $3.undefined_macro;
+ }
+| expression GREATER_OR_EQUAL expression {
+ $$.value = $1.value >= $3.value;
+ if ($1.undefined_macro)
+ $$.undefined_macro = $1.undefined_macro;
+ else
+ $$.undefined_macro = $3.undefined_macro;
+ }
+| expression LESS_OR_EQUAL expression {
+ $$.value = $1.value <= $3.value;
+ if ($1.undefined_macro)
+ $$.undefined_macro = $1.undefined_macro;
+ else
+ $$.undefined_macro = $3.undefined_macro;
+ }
+| expression '>' expression {
+ $$.value = $1.value > $3.value;
+ if ($1.undefined_macro)
+ $$.undefined_macro = $1.undefined_macro;
+ else
+ $$.undefined_macro = $3.undefined_macro;
+ }
+| expression '<' expression {
+ $$.value = $1.value < $3.value;
+ if ($1.undefined_macro)
+ $$.undefined_macro = $1.undefined_macro;
+ else
+ $$.undefined_macro = $3.undefined_macro;
+ }
+| expression RIGHT_SHIFT expression {
+ $$.value = $1.value >> $3.value;
+ if ($1.undefined_macro)
+ $$.undefined_macro = $1.undefined_macro;
+ else
+ $$.undefined_macro = $3.undefined_macro;
+ }
+| expression LEFT_SHIFT expression {
+ $$.value = $1.value << $3.value;
+ if ($1.undefined_macro)
+ $$.undefined_macro = $1.undefined_macro;
+ else
+ $$.undefined_macro = $3.undefined_macro;
+ }
+| expression '-' expression {
+ $$.value = $1.value - $3.value;
+ if ($1.undefined_macro)
+ $$.undefined_macro = $1.undefined_macro;
+ else
+ $$.undefined_macro = $3.undefined_macro;
+ }
+| expression '+' expression {
+ $$.value = $1.value + $3.value;
+ if ($1.undefined_macro)
+ $$.undefined_macro = $1.undefined_macro;
+ else
+ $$.undefined_macro = $3.undefined_macro;
+ }
+| expression '%' expression {
+ if ($3.value == 0) {
+ yyerror (& @1, parser,
+ "zero modulus in preprocessor directive");
+ } else {
+ $$.value = $1.value % $3.value;
+ }
+ if ($1.undefined_macro)
+ $$.undefined_macro = $1.undefined_macro;
+ else
+ $$.undefined_macro = $3.undefined_macro;
+ }
+| expression '/' expression {
+ if ($3.value == 0) {
+ yyerror (& @1, parser,
+ "division by 0 in preprocessor directive");
+ } else {
+ $$.value = $1.value / $3.value;
+ }
+ if ($1.undefined_macro)
+ $$.undefined_macro = $1.undefined_macro;
+ else
+ $$.undefined_macro = $3.undefined_macro;
+ }
+| expression '*' expression {
+ $$.value = $1.value * $3.value;
+ if ($1.undefined_macro)
+ $$.undefined_macro = $1.undefined_macro;
+ else
+ $$.undefined_macro = $3.undefined_macro;
+ }
+| '!' expression %prec UNARY {
+ $$.value = ! $2.value;
+ $$.undefined_macro = $2.undefined_macro;
+ }
+| '~' expression %prec UNARY {
+ $$.value = ~ $2.value;
+ $$.undefined_macro = $2.undefined_macro;
+ }
+| '-' expression %prec UNARY {
+ $$.value = - $2.value;
+ $$.undefined_macro = $2.undefined_macro;
+ }
+| '+' expression %prec UNARY {
+ $$.value = + $2.value;
+ $$.undefined_macro = $2.undefined_macro;
+ }
+| '(' expression ')' {
+ $$ = $2;
+ }
+;
+
+identifier_list:
+ IDENTIFIER {
+ $$ = _string_list_create (parser);
+ _string_list_append_item (parser, $$, $1);
+ }
+| identifier_list ',' IDENTIFIER {
+ $$ = $1;
+ _string_list_append_item (parser, $$, $3);
+ }
+;
+
+text_line:
+ NEWLINE { $$ = NULL; }
+| pp_tokens NEWLINE
+;
+
+replacement_list:
+ /* empty */ { $$ = NULL; }
+| pp_tokens
+;
+
+junk:
+ /* empty */
+| pp_tokens {
+ glcpp_error(&@1, parser, "extra tokens at end of directive");
+ }
+;
+
+pp_tokens:
+ preprocessing_token {
+ parser->space_tokens = 1;
+ $$ = _token_list_create (parser);
+ _token_list_append (parser, $$, $1);
+ }
+| pp_tokens preprocessing_token {
+ $$ = $1;
+ _token_list_append (parser, $$, $2);
+ }
+;
+
+preprocessing_token:
+ IDENTIFIER {
+ $$ = _token_create_str (parser, IDENTIFIER, $1);
+ $$->location = yylloc;
+ }
+| INTEGER_STRING {
+ $$ = _token_create_str (parser, INTEGER_STRING, $1);
+ $$->location = yylloc;
+ }
+| PATH {
+ $$ = _token_create_str (parser, PATH, $1);
+ $$->location = yylloc;
+ }
+| operator {
+ $$ = _token_create_ival (parser, $1, $1);
+ $$->location = yylloc;
+ }
+| DEFINED {
+ $$ = _token_create_ival (parser, DEFINED, DEFINED);
+ $$->location = yylloc;
+ }
+| OTHER {
+ $$ = _token_create_str (parser, OTHER, $1);
+ $$->location = yylloc;
+ }
+| SPACE {
+ $$ = _token_create_ival (parser, SPACE, SPACE);
+ $$->location = yylloc;
+ }
+;
+
+operator:
+ '[' { $$ = '['; }
+| ']' { $$ = ']'; }
+| '(' { $$ = '('; }
+| ')' { $$ = ')'; }
+| '{' { $$ = '{'; }
+| '}' { $$ = '}'; }
+| '.' { $$ = '.'; }
+| '&' { $$ = '&'; }
+| '*' { $$ = '*'; }
+| '+' { $$ = '+'; }
+| '-' { $$ = '-'; }
+| '~' { $$ = '~'; }
+| '!' { $$ = '!'; }
+| '/' { $$ = '/'; }
+| '%' { $$ = '%'; }
+| LEFT_SHIFT { $$ = LEFT_SHIFT; }
+| RIGHT_SHIFT { $$ = RIGHT_SHIFT; }
+| '<' { $$ = '<'; }
+| '>' { $$ = '>'; }
+| LESS_OR_EQUAL { $$ = LESS_OR_EQUAL; }
+| GREATER_OR_EQUAL { $$ = GREATER_OR_EQUAL; }
+| EQUAL { $$ = EQUAL; }
+| NOT_EQUAL { $$ = NOT_EQUAL; }
+| '^' { $$ = '^'; }
+| '|' { $$ = '|'; }
+| AND { $$ = AND; }
+| OR { $$ = OR; }
+| ';' { $$ = ';'; }
+| ',' { $$ = ','; }
+| '=' { $$ = '='; }
+| PASTE { $$ = PASTE; }
+| PLUS_PLUS { $$ = PLUS_PLUS; }
+| MINUS_MINUS { $$ = MINUS_MINUS; }
+;
+
+%%
+
+string_list_t *
+_string_list_create(glcpp_parser_t *parser)
+{
+ string_list_t *list;
+
+ list = linear_alloc_child(parser->linalloc, sizeof(string_list_t));
+ list->head = NULL;
+ list->tail = NULL;
+
+ return list;
+}
+
+void
+_string_list_append_item(glcpp_parser_t *parser, string_list_t *list,
+ const char *str)
+{
+ string_node_t *node;
+
+ node = linear_alloc_child(parser->linalloc, sizeof(string_node_t));
+ node->str = linear_strdup(parser->linalloc, str);
+
+ node->next = NULL;
+
+ if (list->head == NULL) {
+ list->head = node;
+ } else {
+ list->tail->next = node;
+ }
+
+ list->tail = node;
+}
+
+int
+_string_list_contains(string_list_t *list, const char *member, int *index)
+{
+ string_node_t *node;
+ int i;
+
+ if (list == NULL)
+ return 0;
+
+ for (i = 0, node = list->head; node; i++, node = node->next) {
+ if (strcmp (node->str, member) == 0) {
+ if (index)
+ *index = i;
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+/* Return duplicate string in list (if any), NULL otherwise. */
+const char *
+_string_list_has_duplicate(string_list_t *list)
+{
+ string_node_t *node, *dup;
+
+ if (list == NULL)
+ return NULL;
+
+ for (node = list->head; node; node = node->next) {
+ for (dup = node->next; dup; dup = dup->next) {
+ if (strcmp (node->str, dup->str) == 0)
+ return node->str;
+ }
+ }
+
+ return NULL;
+}
+
+int
+_string_list_length(string_list_t *list)
+{
+ int length = 0;
+ string_node_t *node;
+
+ if (list == NULL)
+ return 0;
+
+ for (node = list->head; node; node = node->next)
+ length++;
+
+ return length;
+}
+
+int
+_string_list_equal(string_list_t *a, string_list_t *b)
+{
+ string_node_t *node_a, *node_b;
+
+ if (a == NULL && b == NULL)
+ return 1;
+
+ if (a == NULL || b == NULL)
+ return 0;
+
+ for (node_a = a->head, node_b = b->head;
+ node_a && node_b;
+ node_a = node_a->next, node_b = node_b->next)
+ {
+ if (strcmp (node_a->str, node_b->str))
+ return 0;
+ }
+
+ /* Catch the case of lists being different lengths, (which
+ * would cause the loop above to terminate after the shorter
+ * list). */
+ return node_a == node_b;
+}
+
+argument_list_t *
+_argument_list_create(glcpp_parser_t *parser)
+{
+ argument_list_t *list;
+
+ list = linear_alloc_child(parser->linalloc, sizeof(argument_list_t));
+ list->head = NULL;
+ list->tail = NULL;
+
+ return list;
+}
+
+void
+_argument_list_append(glcpp_parser_t *parser,
+ argument_list_t *list, token_list_t *argument)
+{
+ argument_node_t *node;
+
+ node = linear_alloc_child(parser->linalloc, sizeof(argument_node_t));
+ node->argument = argument;
+
+ node->next = NULL;
+
+ if (list->head == NULL) {
+ list->head = node;
+ } else {
+ list->tail->next = node;
+ }
+
+ list->tail = node;
+}
+
+int
+_argument_list_length(argument_list_t *list)
+{
+ int length = 0;
+ argument_node_t *node;
+
+ if (list == NULL)
+ return 0;
+
+ for (node = list->head; node; node = node->next)
+ length++;
+
+ return length;
+}
+
+token_list_t *
+_argument_list_member_at(argument_list_t *list, int index)
+{
+ argument_node_t *node;
+ int i;
+
+ if (list == NULL)
+ return NULL;
+
+ node = list->head;
+ for (i = 0; i < index; i++) {
+ node = node->next;
+ if (node == NULL)
+ break;
+ }
+
+ if (node)
+ return node->argument;
+
+ return NULL;
+}
+
+token_t *
+_token_create_str(glcpp_parser_t *parser, int type, char *str)
+{
+ token_t *token;
+
+ token = linear_alloc_child(parser->linalloc, sizeof(token_t));
+ token->type = type;
+ token->value.str = str;
+
+ return token;
+}
+
+token_t *
+_token_create_ival(glcpp_parser_t *parser, int type, int ival)
+{
+ token_t *token;
+
+ token = linear_alloc_child(parser->linalloc, sizeof(token_t));
+ token->type = type;
+ token->value.ival = ival;
+
+ return token;
+}
+
+token_list_t *
+_token_list_create(glcpp_parser_t *parser)
+{
+ token_list_t *list;
+
+ list = linear_alloc_child(parser->linalloc, sizeof(token_list_t));
+ list->head = NULL;
+ list->tail = NULL;
+ list->non_space_tail = NULL;
+
+ return list;
+}
+
+void
+_token_list_append(glcpp_parser_t *parser, token_list_t *list, token_t *token)
+{
+ token_node_t *node;
+
+ node = linear_alloc_child(parser->linalloc, sizeof(token_node_t));
+ node->token = token;
+ node->next = NULL;
+
+ if (list->head == NULL) {
+ list->head = node;
+ } else {
+ list->tail->next = node;
+ }
+
+ list->tail = node;
+ if (token->type != SPACE)
+ list->non_space_tail = node;
+}
+
+void
+_token_list_append_list(token_list_t *list, token_list_t *tail)
+{
+ if (tail == NULL || tail->head == NULL)
+ return;
+
+ if (list->head == NULL) {
+ list->head = tail->head;
+ } else {
+ list->tail->next = tail->head;
+ }
+
+ list->tail = tail->tail;
+ list->non_space_tail = tail->non_space_tail;
+}
+
+static token_list_t *
+_token_list_copy(glcpp_parser_t *parser, token_list_t *other)
+{
+ token_list_t *copy;
+ token_node_t *node;
+
+ if (other == NULL)
+ return NULL;
+
+ copy = _token_list_create (parser);
+ for (node = other->head; node; node = node->next) {
+ token_t *new_token = linear_alloc_child(parser->linalloc, sizeof(token_t));
+ *new_token = *node->token;
+ _token_list_append (parser, copy, new_token);
+ }
+
+ return copy;
+}
+
+static void
+_token_list_trim_trailing_space(token_list_t *list)
+{
+ if (list->non_space_tail) {
+ list->non_space_tail->next = NULL;
+ list->tail = list->non_space_tail;
+ }
+}
+
+static int
+_token_list_is_empty_ignoring_space(token_list_t *l)
+{
+ token_node_t *n;
+
+ if (l == NULL)
+ return 1;
+
+ n = l->head;
+ while (n != NULL && n->token->type == SPACE)
+ n = n->next;
+
+ return n == NULL;
+}
+
+int
+_token_list_equal_ignoring_space(token_list_t *a, token_list_t *b)
+{
+ token_node_t *node_a, *node_b;
+
+ if (a == NULL || b == NULL) {
+ int a_empty = _token_list_is_empty_ignoring_space(a);
+ int b_empty = _token_list_is_empty_ignoring_space(b);
+ return a_empty == b_empty;
+ }
+
+ node_a = a->head;
+ node_b = b->head;
+
+ while (1)
+ {
+ if (node_a == NULL && node_b == NULL)
+ break;
+
+ /* Ignore trailing whitespace */
+ if (node_a == NULL && node_b->token->type == SPACE) {
+ while (node_b && node_b->token->type == SPACE)
+ node_b = node_b->next;
+ }
+
+ if (node_a == NULL && node_b == NULL)
+ break;
+
+ if (node_b == NULL && node_a->token->type == SPACE) {
+ while (node_a && node_a->token->type == SPACE)
+ node_a = node_a->next;
+ }
+
+ if (node_a == NULL && node_b == NULL)
+ break;
+
+ if (node_a == NULL || node_b == NULL)
+ return 0;
+ /* Make sure whitespace appears in the same places in both.
+ * It need not be exactly the same amount of whitespace,
+ * though.
+ */
+ if (node_a->token->type == SPACE && node_b->token->type == SPACE) {
+ while (node_a && node_a->token->type == SPACE)
+ node_a = node_a->next;
+ while (node_b && node_b->token->type == SPACE)
+ node_b = node_b->next;
+ continue;
+ }
+
+ if (node_a->token->type != node_b->token->type)
+ return 0;
+
+ switch (node_a->token->type) {
+ case INTEGER:
+ if (node_a->token->value.ival != node_b->token->value.ival) {
+ return 0;
+ }
+ break;
+ case IDENTIFIER:
+ case INTEGER_STRING:
+ case OTHER:
+ if (strcmp(node_a->token->value.str, node_b->token->value.str)) {
+ return 0;
+ }
+ break;
+ }
+
+ node_a = node_a->next;
+ node_b = node_b->next;
+ }
+
+ return 1;
+}
+
+static void
+_token_print(struct _mesa_string_buffer *out, token_t *token)
+{
+ if (token->type < 256) {
+ _mesa_string_buffer_append_char(out, token->type);
+ return;
+ }
+
+ switch (token->type) {
+ case INTEGER:
+ _mesa_string_buffer_printf(out, "%" PRIiMAX, token->value.ival);
+ break;
+ case IDENTIFIER:
+ case INTEGER_STRING:
+ case PATH:
+ case OTHER:
+ _mesa_string_buffer_append(out, token->value.str);
+ break;
+ case SPACE:
+ _mesa_string_buffer_append_char(out, ' ');
+ break;
+ case LEFT_SHIFT:
+ _mesa_string_buffer_append(out, "<<");
+ break;
+ case RIGHT_SHIFT:
+ _mesa_string_buffer_append(out, ">>");
+ break;
+ case LESS_OR_EQUAL:
+ _mesa_string_buffer_append(out, "<=");
+ break;
+ case GREATER_OR_EQUAL:
+ _mesa_string_buffer_append(out, ">=");
+ break;
+ case EQUAL:
+ _mesa_string_buffer_append(out, "==");
+ break;
+ case NOT_EQUAL:
+ _mesa_string_buffer_append(out, "!=");
+ break;
+ case AND:
+ _mesa_string_buffer_append(out, "&&");
+ break;
+ case OR:
+ _mesa_string_buffer_append(out, "||");
+ break;
+ case PASTE:
+ _mesa_string_buffer_append(out, "##");
+ break;
+ case PLUS_PLUS:
+ _mesa_string_buffer_append(out, "++");
+ break;
+ case MINUS_MINUS:
+ _mesa_string_buffer_append(out, "--");
+ break;
+ case DEFINED:
+ _mesa_string_buffer_append(out, "defined");
+ break;
+ case PLACEHOLDER:
+ /* Nothing to print. */
+ break;
+ default:
+ assert(!"Error: Don't know how to print token.");
+
+ break;
+ }
+}
+
+/* Return a new token formed by pasting 'token' and 'other'. Note that this
+ * function may return 'token' or 'other' directly rather than allocating
+ * anything new.
+ *
+ * Caution: Only very cursory error-checking is performed to see if
+ * the final result is a valid single token. */
+static token_t *
+_token_paste(glcpp_parser_t *parser, token_t *token, token_t *other)
+{
+ token_t *combined = NULL;
+
+ /* Pasting a placeholder onto anything makes no change. */
+ if (other->type == PLACEHOLDER)
+ return token;
+
+ /* When 'token' is a placeholder, just return 'other'. */
+ if (token->type == PLACEHOLDER)
+ return other;
+
+ /* A very few single-character punctuators can be combined
+ * with another to form a multi-character punctuator. */
+ switch (token->type) {
+ case '<':
+ if (other->type == '<')
+ combined = _token_create_ival (parser, LEFT_SHIFT, LEFT_SHIFT);
+ else if (other->type == '=')
+ combined = _token_create_ival (parser, LESS_OR_EQUAL, LESS_OR_EQUAL);
+ break;
+ case '>':
+ if (other->type == '>')
+ combined = _token_create_ival (parser, RIGHT_SHIFT, RIGHT_SHIFT);
+ else if (other->type == '=')
+ combined = _token_create_ival (parser, GREATER_OR_EQUAL, GREATER_OR_EQUAL);
+ break;
+ case '=':
+ if (other->type == '=')
+ combined = _token_create_ival (parser, EQUAL, EQUAL);
+ break;
+ case '!':
+ if (other->type == '=')
+ combined = _token_create_ival (parser, NOT_EQUAL, NOT_EQUAL);
+ break;
+ case '&':
+ if (other->type == '&')
+ combined = _token_create_ival (parser, AND, AND);
+ break;
+ case '|':
+ if (other->type == '|')
+ combined = _token_create_ival (parser, OR, OR);
+ break;
+ }
+
+ if (combined != NULL) {
+ /* Inherit the location from the first token */
+ combined->location = token->location;
+ return combined;
+ }
+
+ /* Two string-valued (or integer) tokens can usually just be
+ * mashed together. (We also handle a string followed by an
+ * integer here as well.)
+ *
+ * There are some exceptions here. Notably, if the first token
+ * is an integer (or a string representing an integer), then
+ * the second token must also be an integer or must be a
+ * string representing an integer that begins with a digit.
+ */
+ if ((token->type == IDENTIFIER || token->type == OTHER || token->type == INTEGER_STRING || token->type == INTEGER) &&
+ (other->type == IDENTIFIER || other->type == OTHER || other->type == INTEGER_STRING || other->type == INTEGER))
+ {
+ char *str;
+ int combined_type;
+
+ /* Check that pasting onto an integer doesn't create a
+ * non-integer, (that is, only digits can be
+ * pasted. */
+ if (token->type == INTEGER_STRING || token->type == INTEGER) {
+ switch (other->type) {
+ case INTEGER_STRING:
+ if (other->value.str[0] < '0' || other->value.str[0] > '9')
+ goto FAIL;
+ break;
+ case INTEGER:
+ if (other->value.ival < 0)
+ goto FAIL;
+ break;
+ default:
+ goto FAIL;
+ }
+ }
+
+ if (token->type == INTEGER)
+ str = linear_asprintf(parser->linalloc, "%" PRIiMAX, token->value.ival);
+ else
+ str = linear_strdup(parser->linalloc, token->value.str);
+
+ if (other->type == INTEGER)
+ linear_asprintf_append(parser->linalloc, &str, "%" PRIiMAX, other->value.ival);
+ else
+ linear_strcat(parser->linalloc, &str, other->value.str);
+
+ /* New token is same type as original token, unless we
+ * started with an integer, in which case we will be
+ * creating an integer-string. */
+ combined_type = token->type;
+ if (combined_type == INTEGER)
+ combined_type = INTEGER_STRING;
+
+ combined = _token_create_str (parser, combined_type, str);
+ combined->location = token->location;
+ return combined;
+ }
+
+ FAIL:
+ glcpp_error (&token->location, parser, "");
+ _mesa_string_buffer_append(parser->info_log, "Pasting \"");
+ _token_print(parser->info_log, token);
+ _mesa_string_buffer_append(parser->info_log, "\" and \"");
+ _token_print(parser->info_log, other);
+ _mesa_string_buffer_append(parser->info_log, "\" does not give a valid preprocessing token.\n");
+
+ return token;
+}
+
+static void
+_token_list_print(glcpp_parser_t *parser, token_list_t *list)
+{
+ token_node_t *node;
+
+ if (list == NULL)
+ return;
+
+ for (node = list->head; node; node = node->next)
+ _token_print(parser->output, node->token);
+}
+
+void
+yyerror(YYLTYPE *locp, glcpp_parser_t *parser, const char *error)
+{
+ glcpp_error(locp, parser, "%s", error);
+}
+
+static void
+add_builtin_define(glcpp_parser_t *parser, const char *name, int value)
+{
+ token_t *tok;
+ token_list_t *list;
+
+ tok = _token_create_ival (parser, INTEGER, value);
+
+ list = _token_list_create(parser);
+ _token_list_append(parser, list, tok);
+ _define_object_macro(parser, NULL, name, list);
+}
+
+/* Initial output buffer size, 4096 minus ralloc() overhead. It was selected
+ * to minimize total amount of allocated memory during shader-db run.
+ */
+#define INITIAL_PP_OUTPUT_BUF_SIZE 4048
+
+glcpp_parser_t *
+glcpp_parser_create(struct gl_context *gl_ctx,
+ glcpp_extension_iterator extensions, void *state)
+{
+ glcpp_parser_t *parser;
+
+ parser = ralloc (NULL, glcpp_parser_t);
+
+ glcpp_lex_init_extra (parser, &parser->scanner);
+ parser->defines = _mesa_hash_table_create(NULL, _mesa_hash_string,
+ _mesa_key_string_equal);
+ parser->linalloc = linear_alloc_parent(parser, 0);
+ parser->active = NULL;
+ parser->lexing_directive = 0;
+ parser->lexing_version_directive = 0;
+ parser->space_tokens = 1;
+ parser->last_token_was_newline = 0;
+ parser->last_token_was_space = 0;
+ parser->first_non_space_token_this_line = 1;
+ parser->newline_as_space = 0;
+ parser->in_control_line = 0;
+ parser->paren_count = 0;
+ parser->commented_newlines = 0;
+
+ parser->skip_stack = NULL;
+ parser->skipping = 0;
+
+ parser->lex_from_list = NULL;
+ parser->lex_from_node = NULL;
+
+ parser->output = _mesa_string_buffer_create(parser,
+ INITIAL_PP_OUTPUT_BUF_SIZE);
+ parser->info_log = _mesa_string_buffer_create(parser,
+ INITIAL_PP_OUTPUT_BUF_SIZE);
+ parser->error = 0;
+
+ parser->gl_ctx = gl_ctx;
+ parser->extensions = extensions;
+ parser->extension_list = &gl_ctx->Extensions;
+ parser->state = state;
+ parser->api = gl_ctx->API;
+ parser->version = 0;
+ parser->version_set = false;
+
+ parser->has_new_line_number = 0;
+ parser->new_line_number = 1;
+ parser->has_new_source_number = 0;
+ parser->new_source_number = 0;
+
+ parser->is_gles = false;
+
+ return parser;
+}
+
+void
+glcpp_parser_destroy(glcpp_parser_t *parser)
+{
+ glcpp_lex_destroy (parser->scanner);
+ _mesa_hash_table_destroy(parser->defines, NULL);
+ ralloc_free (parser);
+}
+
+typedef enum function_status
+{
+ FUNCTION_STATUS_SUCCESS,
+ FUNCTION_NOT_A_FUNCTION,
+ FUNCTION_UNBALANCED_PARENTHESES
+} function_status_t;
+
+/* Find a set of function-like macro arguments by looking for a
+ * balanced set of parentheses.
+ *
+ * When called, 'node' should be the opening-parenthesis token, (or
+ * perhaps preceeding SPACE tokens). Upon successful return *last will
+ * be the last consumed node, (corresponding to the closing right
+ * parenthesis).
+ *
+ * Return values:
+ *
+ * FUNCTION_STATUS_SUCCESS:
+ *
+ * Successfully parsed a set of function arguments.
+ *
+ * FUNCTION_NOT_A_FUNCTION:
+ *
+ * Macro name not followed by a '('. This is not an error, but
+ * simply that the macro name should be treated as a non-macro.
+ *
+ * FUNCTION_UNBALANCED_PARENTHESES
+ *
+ * Macro name is not followed by a balanced set of parentheses.
+ */
+static function_status_t
+_arguments_parse(glcpp_parser_t *parser,
+ argument_list_t *arguments, token_node_t *node,
+ token_node_t **last)
+{
+ token_list_t *argument;
+ int paren_count;
+
+ node = node->next;
+
+ /* Ignore whitespace before first parenthesis. */
+ while (node && node->token->type == SPACE)
+ node = node->next;
+
+ if (node == NULL || node->token->type != '(')
+ return FUNCTION_NOT_A_FUNCTION;
+
+ node = node->next;
+
+ argument = _token_list_create (parser);
+ _argument_list_append (parser, arguments, argument);
+
+ for (paren_count = 1; node; node = node->next) {
+ if (node->token->type == '(') {
+ paren_count++;
+ } else if (node->token->type == ')') {
+ paren_count--;
+ if (paren_count == 0)
+ break;
+ }
+
+ if (node->token->type == ',' && paren_count == 1) {
+ _token_list_trim_trailing_space (argument);
+ argument = _token_list_create (parser);
+ _argument_list_append (parser, arguments, argument);
+ } else {
+ if (argument->head == NULL) {
+ /* Don't treat initial whitespace as part of the argument. */
+ if (node->token->type == SPACE)
+ continue;
+ }
+ _token_list_append(parser, argument, node->token);
+ }
+ }
+
+ if (paren_count)
+ return FUNCTION_UNBALANCED_PARENTHESES;
+
+ *last = node;
+
+ return FUNCTION_STATUS_SUCCESS;
+}
+
+static token_list_t *
+_token_list_create_with_one_ival(glcpp_parser_t *parser, int type, int ival)
+{
+ token_list_t *list;
+ token_t *node;
+
+ list = _token_list_create(parser);
+ node = _token_create_ival(parser, type, ival);
+ _token_list_append(parser, list, node);
+
+ return list;
+}
+
+static token_list_t *
+_token_list_create_with_one_space(glcpp_parser_t *parser)
+{
+ return _token_list_create_with_one_ival(parser, SPACE, SPACE);
+}
+
+static token_list_t *
+_token_list_create_with_one_integer(glcpp_parser_t *parser, int ival)
+{
+ return _token_list_create_with_one_ival(parser, INTEGER, ival);
+}
+
+/* Evaluate a DEFINED token node (based on subsequent tokens in the list).
+ *
+ * Note: This function must only be called when "node" is a DEFINED token,
+ * (and will abort with an assertion failure otherwise).
+ *
+ * If "node" is followed, (ignoring any SPACE tokens), by an IDENTIFIER token
+ * (optionally preceded and followed by '(' and ')' tokens) then the following
+ * occurs:
+ *
+ * If the identifier is a defined macro, this function returns 1.
+ *
+ * If the identifier is not a defined macro, this function returns 0.
+ *
+ * In either case, *last will be updated to the last node in the list
+ * consumed by the evaluation, (either the token of the identifier or the
+ * token of the closing parenthesis).
+ *
+ * In all other cases, (such as "node is the final node of the list", or
+ * "missing closing parenthesis", etc.), this function generates a
+ * preprocessor error, returns -1 and *last will not be set.
+ */
+static int
+_glcpp_parser_evaluate_defined(glcpp_parser_t *parser, token_node_t *node,
+ token_node_t **last)
+{
+ token_node_t *argument, *defined = node;
+
+ assert(node->token->type == DEFINED);
+
+ node = node->next;
+
+ /* Ignore whitespace after DEFINED token. */
+ while (node && node->token->type == SPACE)
+ node = node->next;
+
+ if (node == NULL)
+ goto FAIL;
+
+ if (node->token->type == IDENTIFIER || node->token->type == OTHER) {
+ argument = node;
+ } else if (node->token->type == '(') {
+ node = node->next;
+
+ /* Ignore whitespace after '(' token. */
+ while (node && node->token->type == SPACE)
+ node = node->next;
+
+ if (node == NULL || (node->token->type != IDENTIFIER &&
+ node->token->type != OTHER)) {
+ goto FAIL;
+ }
+
+ argument = node;
+
+ node = node->next;
+
+ /* Ignore whitespace after identifier, before ')' token. */
+ while (node && node->token->type == SPACE)
+ node = node->next;
+
+ if (node == NULL || node->token->type != ')')
+ goto FAIL;
+ } else {
+ goto FAIL;
+ }
+
+ *last = node;
+
+ return _mesa_hash_table_search(parser->defines,
+ argument->token->value.str) ? 1 : 0;
+
+FAIL:
+ glcpp_error (&defined->token->location, parser,
+ "\"defined\" not followed by an identifier");
+ return -1;
+}
+
+/* Evaluate all DEFINED nodes in a given list, modifying the list in place.
+ */
+static void
+_glcpp_parser_evaluate_defined_in_list(glcpp_parser_t *parser,
+ token_list_t *list)
+{
+ token_node_t *node, *node_prev, *replacement, *last = NULL;
+ int value;
+
+ if (list == NULL)
+ return;
+
+ node_prev = NULL;
+ node = list->head;
+
+ while (node) {
+
+ if (node->token->type != DEFINED)
+ goto NEXT;
+
+ value = _glcpp_parser_evaluate_defined (parser, node, &last);
+ if (value == -1)
+ goto NEXT;
+
+ replacement = linear_alloc_child(parser->linalloc, sizeof(token_node_t));
+ replacement->token = _token_create_ival (parser, INTEGER, value);
+
+ /* Splice replacement node into list, replacing from "node"
+ * through "last". */
+ if (node_prev)
+ node_prev->next = replacement;
+ else
+ list->head = replacement;
+ replacement->next = last->next;
+ if (last == list->tail)
+ list->tail = replacement;
+
+ node = replacement;
+
+ NEXT:
+ node_prev = node;
+ node = node->next;
+ }
+}
+
+/* Perform macro expansion on 'list', placing the resulting tokens
+ * into a new list which is initialized with a first token of type
+ * 'head_token_type'. Then begin lexing from the resulting list,
+ * (return to the current lexing source when this list is exhausted).
+ *
+ * See the documentation of _glcpp_parser_expand_token_list for a description
+ * of the "mode" parameter.
+ */
+static void
+_glcpp_parser_expand_and_lex_from(glcpp_parser_t *parser, int head_token_type,
+ token_list_t *list, expansion_mode_t mode)
+{
+ token_list_t *expanded;
+ token_t *token;
+
+ expanded = _token_list_create (parser);
+ token = _token_create_ival (parser, head_token_type, head_token_type);
+ _token_list_append (parser, expanded, token);
+ _glcpp_parser_expand_token_list (parser, list, mode);
+ _token_list_append_list (expanded, list);
+ glcpp_parser_lex_from (parser, expanded);
+}
+
+static void
+_glcpp_parser_apply_pastes(glcpp_parser_t *parser, token_list_t *list)
+{
+ token_node_t *node;
+
+ node = list->head;
+ while (node) {
+ token_node_t *next_non_space;
+
+ /* Look ahead for a PASTE token, skipping space. */
+ next_non_space = node->next;
+ while (next_non_space && next_non_space->token->type == SPACE)
+ next_non_space = next_non_space->next;
+
+ if (next_non_space == NULL)
+ break;
+
+ if (next_non_space->token->type != PASTE) {
+ node = next_non_space;
+ continue;
+ }
+
+ /* Now find the next non-space token after the PASTE. */
+ next_non_space = next_non_space->next;
+ while (next_non_space && next_non_space->token->type == SPACE)
+ next_non_space = next_non_space->next;
+
+ if (next_non_space == NULL) {
+ yyerror(&node->token->location, parser, "'##' cannot appear at either end of a macro expansion\n");
+ return;
+ }
+
+ node->token = _token_paste(parser, node->token, next_non_space->token);
+ node->next = next_non_space->next;
+ if (next_non_space == list->tail)
+ list->tail = node;
+ }
+
+ list->non_space_tail = list->tail;
+}
+
+/* This is a helper function that's essentially part of the
+ * implementation of _glcpp_parser_expand_node. It shouldn't be called
+ * except for by that function.
+ *
+ * Returns NULL if node is a simple token with no expansion, (that is,
+ * although 'node' corresponds to an identifier defined as a
+ * function-like macro, it is not followed with a parenthesized
+ * argument list).
+ *
+ * Compute the complete expansion of node (which is a function-like
+ * macro) and subsequent nodes which are arguments.
+ *
+ * Returns the token list that results from the expansion and sets
+ * *last to the last node in the list that was consumed by the
+ * expansion. Specifically, *last will be set as follows: as the
+ * token of the closing right parenthesis.
+ *
+ * See the documentation of _glcpp_parser_expand_token_list for a description
+ * of the "mode" parameter.
+ */
+static token_list_t *
+_glcpp_parser_expand_function(glcpp_parser_t *parser, token_node_t *node,
+ token_node_t **last, expansion_mode_t mode)
+{
+ struct hash_entry *entry;
+ macro_t *macro;
+ const char *identifier;
+ argument_list_t *arguments;
+ function_status_t status;
+ token_list_t *substituted;
+ int parameter_index;
+
+ identifier = node->token->value.str;
+
+ entry = _mesa_hash_table_search(parser->defines, identifier);
+ macro = entry ? entry->data : NULL;
+
+ assert(macro->is_function);
+
+ arguments = _argument_list_create(parser);
+ status = _arguments_parse(parser, arguments, node, last);
+
+ switch (status) {
+ case FUNCTION_STATUS_SUCCESS:
+ break;
+ case FUNCTION_NOT_A_FUNCTION:
+ return NULL;
+ case FUNCTION_UNBALANCED_PARENTHESES:
+ glcpp_error(&node->token->location, parser, "Macro %s call has unbalanced parentheses\n", identifier);
+ return NULL;
+ }
+
+ /* Replace a macro defined as empty with a SPACE token. */
+ if (macro->replacements == NULL) {
+ return _token_list_create_with_one_space(parser);
+ }
+
+ if (!((_argument_list_length (arguments) ==
+ _string_list_length (macro->parameters)) ||
+ (_string_list_length (macro->parameters) == 0 &&
+ _argument_list_length (arguments) == 1 &&
+ arguments->head->argument->head == NULL))) {
+ glcpp_error(&node->token->location, parser,
+ "Error: macro %s invoked with %d arguments (expected %d)\n",
+ identifier, _argument_list_length (arguments),
+ _string_list_length(macro->parameters));
+ return NULL;
+ }
+
+ /* Perform argument substitution on the replacement list. */
+ substituted = _token_list_create(parser);
+
+ for (node = macro->replacements->head; node; node = node->next) {
+ if (node->token->type == IDENTIFIER &&
+ _string_list_contains(macro->parameters, node->token->value.str,
+ &parameter_index)) {
+ token_list_t *argument;
+ argument = _argument_list_member_at(arguments, parameter_index);
+ /* Before substituting, we expand the argument tokens, or append a
+ * placeholder token for an empty argument. */
+ if (argument->head) {
+ token_list_t *expanded_argument;
+ expanded_argument = _token_list_copy(parser, argument);
+ _glcpp_parser_expand_token_list(parser, expanded_argument, mode);
+ _token_list_append_list(substituted, expanded_argument);
+ } else {
+ token_t *new_token;
+
+ new_token = _token_create_ival(parser, PLACEHOLDER,
+ PLACEHOLDER);
+ _token_list_append(parser, substituted, new_token);
+ }
+ } else {
+ _token_list_append(parser, substituted, node->token);
+ }
+ }
+
+ /* After argument substitution, and before further expansion
+ * below, implement token pasting. */
+
+ _token_list_trim_trailing_space(substituted);
+
+ _glcpp_parser_apply_pastes(parser, substituted);
+
+ return substituted;
+}
+
+/* Compute the complete expansion of node, (and subsequent nodes after
+ * 'node' in the case that 'node' is a function-like macro and
+ * subsequent nodes are arguments).
+ *
+ * Returns NULL if node is a simple token with no expansion.
+ *
+ * Otherwise, returns the token list that results from the expansion
+ * and sets *last to the last node in the list that was consumed by
+ * the expansion. Specifically, *last will be set as follows:
+ *
+ * As 'node' in the case of object-like macro expansion.
+ *
+ * As the token of the closing right parenthesis in the case of
+ * function-like macro expansion.
+ *
+ * See the documentation of _glcpp_parser_expand_token_list for a description
+ * of the "mode" parameter.
+ */
+static token_list_t *
+_glcpp_parser_expand_node(glcpp_parser_t *parser, token_node_t *node,
+ token_node_t **last, expansion_mode_t mode,
+ int line)
+{
+ token_t *token = node->token;
+ const char *identifier;
+ struct hash_entry *entry;
+ macro_t *macro;
+
+ /* We only expand identifiers */
+ if (token->type != IDENTIFIER) {
+ return NULL;
+ }
+
+ *last = node;
+ identifier = token->value.str;
+
+ /* Special handling for __LINE__ and __FILE__, (not through
+ * the hash table). */
+ if (*identifier == '_') {
+ if (strcmp(identifier, "__LINE__") == 0)
+ return _token_list_create_with_one_integer(parser, line);
+
+ if (strcmp(identifier, "__FILE__") == 0)
+ return _token_list_create_with_one_integer(parser,
+ node->token->location.source);
+ }
+
+ /* Look up this identifier in the hash table. */
+ entry = _mesa_hash_table_search(parser->defines, identifier);
+ macro = entry ? entry->data : NULL;
+
+ /* Not a macro, so no expansion needed. */
+ if (macro == NULL)
+ return NULL;
+
+ /* Finally, don't expand this macro if we're already actively
+ * expanding it, (to avoid infinite recursion). */
+ if (_parser_active_list_contains (parser, identifier)) {
+ /* We change the token type here from IDENTIFIER to OTHER to prevent any
+ * future expansion of this unexpanded token. */
+ char *str;
+ token_list_t *expansion;
+ token_t *final;
+
+ str = linear_strdup(parser->linalloc, token->value.str);
+ final = _token_create_str(parser, OTHER, str);
+ expansion = _token_list_create(parser);
+ _token_list_append(parser, expansion, final);
+ return expansion;
+ }
+
+ if (! macro->is_function) {
+ token_list_t *replacement;
+
+ /* Replace a macro defined as empty with a SPACE token. */
+ if (macro->replacements == NULL)
+ return _token_list_create_with_one_space(parser);
+
+ replacement = _token_list_copy(parser, macro->replacements);
+ _glcpp_parser_apply_pastes(parser, replacement);
+ return replacement;
+ }
+
+ return _glcpp_parser_expand_function(parser, node, last, mode);
+}
+
+/* Push a new identifier onto the parser's active list.
+ *
+ * Here, 'marker' is the token node that appears in the list after the
+ * expansion of 'identifier'. That is, when the list iterator begins
+ * examining 'marker', then it is time to pop this node from the
+ * active stack.
+ */
+static void
+_parser_active_list_push(glcpp_parser_t *parser, const char *identifier,
+ token_node_t *marker)
+{
+ active_list_t *node;
+
+ node = linear_alloc_child(parser->linalloc, sizeof(active_list_t));
+ node->identifier = linear_strdup(parser->linalloc, identifier);
+ node->marker = marker;
+ node->next = parser->active;
+
+ parser->active = node;
+}
+
+static void
+_parser_active_list_pop(glcpp_parser_t *parser)
+{
+ active_list_t *node = parser->active;
+
+ if (node == NULL) {
+ parser->active = NULL;
+ return;
+ }
+
+ node = parser->active->next;
+ parser->active = node;
+}
+
+static int
+_parser_active_list_contains(glcpp_parser_t *parser, const char *identifier)
+{
+ active_list_t *node;
+
+ if (parser->active == NULL)
+ return 0;
+
+ for (node = parser->active; node; node = node->next)
+ if (strcmp(node->identifier, identifier) == 0)
+ return 1;
+
+ return 0;
+}
+
+/* Walk over the token list replacing nodes with their expansion.
+ * Whenever nodes are expanded the walking will walk over the new
+ * nodes, continuing to expand as necessary. The results are placed in
+ * 'list' itself.
+ *
+ * The "mode" argument controls the handling of any DEFINED tokens that
+ * result from expansion as follows:
+ *
+ * EXPANSION_MODE_IGNORE_DEFINED: Any resulting DEFINED tokens will be
+ * left in the final list, unevaluated. This is the correct mode
+ * for expanding any list in any context other than a
+ * preprocessor conditional, (#if or #elif).
+ *
+ * EXPANSION_MODE_EVALUATE_DEFINED: Any resulting DEFINED tokens will be
+ * evaluated to 0 or 1 tokens depending on whether the following
+ * token is the name of a defined macro. If the DEFINED token is
+ * not followed by an (optionally parenthesized) identifier, then
+ * an error will be generated. This the correct mode for
+ * expanding any list in the context of a preprocessor
+ * conditional, (#if or #elif).
+ */
+static void
+_glcpp_parser_expand_token_list(glcpp_parser_t *parser, token_list_t *list,
+ expansion_mode_t mode)
+{
+ token_node_t *node_prev;
+ token_node_t *node, *last = NULL;
+ token_list_t *expansion;
+ active_list_t *active_initial = parser->active;
+ int line;
+
+ if (list == NULL)
+ return;
+
+ _token_list_trim_trailing_space (list);
+
+ line = list->tail->token->location.last_line;
+
+ node_prev = NULL;
+ node = list->head;
+
+ if (mode == EXPANSION_MODE_EVALUATE_DEFINED)
+ _glcpp_parser_evaluate_defined_in_list (parser, list);
+
+ while (node) {
+
+ while (parser->active && parser->active->marker == node)
+ _parser_active_list_pop (parser);
+
+ expansion = _glcpp_parser_expand_node (parser, node, &last, mode, line);
+ if (expansion) {
+ token_node_t *n;
+
+ if (mode == EXPANSION_MODE_EVALUATE_DEFINED) {
+ _glcpp_parser_evaluate_defined_in_list (parser, expansion);
+ }
+
+ for (n = node; n != last->next; n = n->next)
+ while (parser->active && parser->active->marker == n) {
+ _parser_active_list_pop (parser);
+ }
+
+ _parser_active_list_push(parser, node->token->value.str, last->next);
+
+ /* Splice expansion into list, supporting a simple deletion if the
+ * expansion is empty.
+ */
+ if (expansion->head) {
+ if (node_prev)
+ node_prev->next = expansion->head;
+ else
+ list->head = expansion->head;
+ expansion->tail->next = last->next;
+ if (last == list->tail)
+ list->tail = expansion->tail;
+ } else {
+ if (node_prev)
+ node_prev->next = last->next;
+ else
+ list->head = last->next;
+ if (last == list->tail)
+ list->tail = NULL;
+ }
+ } else {
+ node_prev = node;
+ }
+ node = node_prev ? node_prev->next : list->head;
+ }
+
+ /* Remove any lingering effects of this invocation on the
+ * active list. That is, pop until the list looks like it did
+ * at the beginning of this function. */
+ while (parser->active && parser->active != active_initial)
+ _parser_active_list_pop (parser);
+
+ list->non_space_tail = list->tail;
+}
+
+void
+_glcpp_parser_print_expanded_token_list(glcpp_parser_t *parser,
+ token_list_t *list)
+{
+ if (list == NULL)
+ return;
+
+ _glcpp_parser_expand_token_list (parser, list, EXPANSION_MODE_IGNORE_DEFINED);
+
+ _token_list_trim_trailing_space (list);
+
+ _token_list_print (parser, list);
+}
+
+static void
+_check_for_reserved_macro_name(glcpp_parser_t *parser, YYLTYPE *loc,
+ const char *identifier)
+{
+ /* Section 3.3 (Preprocessor) of the GLSL 1.30 spec (and later) and
+ * the GLSL ES spec (all versions) say:
+ *
+ * "All macro names containing two consecutive underscores ( __ )
+ * are reserved for future use as predefined macro names. All
+ * macro names prefixed with "GL_" ("GL" followed by a single
+ * underscore) are also reserved."
+ *
+ * The intention is that names containing __ are reserved for internal
+ * use by the implementation, and names prefixed with GL_ are reserved
+ * for use by Khronos. Since every extension adds a name prefixed
+ * with GL_ (i.e., the name of the extension), that should be an
+ * error. Names simply containing __ are dangerous to use, but should
+ * be allowed.
+ *
+ * A future version of the GLSL specification will clarify this.
+ */
+ if (strstr(identifier, "__")) {
+ glcpp_warning(loc, parser, "Macro names containing \"__\" are reserved "
+ "for use by the implementation.\n");
+ }
+ if (strncmp(identifier, "GL_", 3) == 0) {
+ glcpp_error (loc, parser, "Macro names starting with \"GL_\" are reserved.\n");
+ }
+ if (strcmp(identifier, "defined") == 0) {
+ glcpp_error (loc, parser, "\"defined\" cannot be used as a macro name");
+ }
+}
+
+static int
+_macro_equal(macro_t *a, macro_t *b)
+{
+ if (a->is_function != b->is_function)
+ return 0;
+
+ if (a->is_function) {
+ if (! _string_list_equal (a->parameters, b->parameters))
+ return 0;
+ }
+
+ return _token_list_equal_ignoring_space(a->replacements, b->replacements);
+}
+
+void
+_define_object_macro(glcpp_parser_t *parser, YYLTYPE *loc,
+ const char *identifier, token_list_t *replacements)
+{
+ macro_t *macro, *previous;
+ struct hash_entry *entry;
+
+ /* We define pre-defined macros before we've started parsing the actual
+ * file. So if there's no location defined yet, that's what were doing and
+ * we don't want to generate an error for using the reserved names. */
+ if (loc != NULL)
+ _check_for_reserved_macro_name(parser, loc, identifier);
+
+ macro = linear_alloc_child(parser->linalloc, sizeof(macro_t));
+
+ macro->is_function = 0;
+ macro->parameters = NULL;
+ macro->identifier = linear_strdup(parser->linalloc, identifier);
+ macro->replacements = replacements;
+
+ entry = _mesa_hash_table_search(parser->defines, identifier);
+ previous = entry ? entry->data : NULL;
+ if (previous) {
+ if (_macro_equal (macro, previous)) {
+ return;
+ }
+ glcpp_error (loc, parser, "Redefinition of macro %s\n", identifier);
+ }
+
+ _mesa_hash_table_insert (parser->defines, identifier, macro);
+}
+
+void
+_define_function_macro(glcpp_parser_t *parser, YYLTYPE *loc,
+ const char *identifier, string_list_t *parameters,
+ token_list_t *replacements)
+{
+ macro_t *macro, *previous;
+ struct hash_entry *entry;
+ const char *dup;
+
+ _check_for_reserved_macro_name(parser, loc, identifier);
+
+ /* Check for any duplicate parameter names. */
+ if ((dup = _string_list_has_duplicate (parameters)) != NULL) {
+ glcpp_error (loc, parser, "Duplicate macro parameter \"%s\"", dup);
+ }
+
+ macro = linear_alloc_child(parser->linalloc, sizeof(macro_t));
+
+ macro->is_function = 1;
+ macro->parameters = parameters;
+ macro->identifier = linear_strdup(parser->linalloc, identifier);
+ macro->replacements = replacements;
+
+ entry = _mesa_hash_table_search(parser->defines, identifier);
+ previous = entry ? entry->data : NULL;
+ if (previous) {
+ if (_macro_equal (macro, previous)) {
+ return;
+ }
+ glcpp_error (loc, parser, "Redefinition of macro %s\n", identifier);
+ }
+
+ _mesa_hash_table_insert(parser->defines, identifier, macro);
+}
+
+static int
+glcpp_parser_lex(YYSTYPE *yylval, YYLTYPE *yylloc, glcpp_parser_t *parser)
+{
+ token_node_t *node;
+ int ret;
+
+ if (parser->lex_from_list == NULL) {
+ ret = glcpp_lex(yylval, yylloc, parser->scanner);
+
+ /* XXX: This ugly block of code exists for the sole
+ * purpose of converting a NEWLINE token into a SPACE
+ * token, but only in the case where we have seen a
+ * function-like macro name, but have not yet seen its
+ * closing parenthesis.
+ *
+ * There's perhaps a more compact way to do this with
+ * mid-rule actions in the grammar.
+ *
+ * I'm definitely not pleased with the complexity of
+ * this code here.
+ */
+ if (parser->newline_as_space) {
+ if (ret == '(') {
+ parser->paren_count++;
+ } else if (ret == ')') {
+ parser->paren_count--;
+ if (parser->paren_count == 0)
+ parser->newline_as_space = 0;
+ } else if (ret == NEWLINE) {
+ ret = SPACE;
+ } else if (ret != SPACE) {
+ if (parser->paren_count == 0)
+ parser->newline_as_space = 0;
+ }
+ } else if (parser->in_control_line) {
+ if (ret == NEWLINE)
+ parser->in_control_line = 0;
+ }
+ else if (ret == DEFINE_TOKEN || ret == UNDEF || ret == IF ||
+ ret == IFDEF || ret == IFNDEF || ret == ELIF || ret == ELSE ||
+ ret == ENDIF || ret == HASH_TOKEN) {
+ parser->in_control_line = 1;
+ } else if (ret == IDENTIFIER) {
+ struct hash_entry *entry = _mesa_hash_table_search(parser->defines,
+ yylval->str);
+ macro_t *macro = entry ? entry->data : NULL;
+ if (macro && macro->is_function) {
+ parser->newline_as_space = 1;
+ parser->paren_count = 0;
+ }
+ }
+
+ return ret;
+ }
+
+ node = parser->lex_from_node;
+
+ if (node == NULL) {
+ parser->lex_from_list = NULL;
+ return NEWLINE;
+ }
+
+ *yylval = node->token->value;
+ ret = node->token->type;
+
+ parser->lex_from_node = node->next;
+
+ return ret;
+}
+
+static void
+glcpp_parser_lex_from(glcpp_parser_t *parser, token_list_t *list)
+{
+ token_node_t *node;
+
+ assert (parser->lex_from_list == NULL);
+
+ /* Copy list, eliminating any space tokens. */
+ parser->lex_from_list = _token_list_create (parser);
+
+ for (node = list->head; node; node = node->next) {
+ if (node->token->type == SPACE)
+ continue;
+ _token_list_append (parser, parser->lex_from_list, node->token);
+ }
+
+ parser->lex_from_node = parser->lex_from_list->head;
+
+ /* It's possible the list consisted of nothing but whitespace. */
+ if (parser->lex_from_node == NULL) {
+ parser->lex_from_list = NULL;
+ }
+}
+
+static void
+_glcpp_parser_skip_stack_push_if(glcpp_parser_t *parser, YYLTYPE *loc,
+ int condition)
+{
+ skip_type_t current = SKIP_NO_SKIP;
+ skip_node_t *node;
+
+ if (parser->skip_stack)
+ current = parser->skip_stack->type;
+
+ node = linear_alloc_child(parser->linalloc, sizeof(skip_node_t));
+ node->loc = *loc;
+
+ if (current == SKIP_NO_SKIP) {
+ if (condition)
+ node->type = SKIP_NO_SKIP;
+ else
+ node->type = SKIP_TO_ELSE;
+ } else {
+ node->type = SKIP_TO_ENDIF;
+ }
+
+ node->has_else = false;
+ node->next = parser->skip_stack;
+ parser->skip_stack = node;
+}
+
+static void
+_glcpp_parser_skip_stack_change_if(glcpp_parser_t *parser, YYLTYPE *loc,
+ const char *type, int condition)
+{
+ if (parser->skip_stack == NULL) {
+ glcpp_error (loc, parser, "#%s without #if\n", type);
+ return;
+ }
+
+ if (parser->skip_stack->type == SKIP_TO_ELSE) {
+ if (condition)
+ parser->skip_stack->type = SKIP_NO_SKIP;
+ } else {
+ parser->skip_stack->type = SKIP_TO_ENDIF;
+ }
+}
+
+static void
+_glcpp_parser_skip_stack_pop(glcpp_parser_t *parser, YYLTYPE *loc)
+{
+ skip_node_t *node;
+
+ if (parser->skip_stack == NULL) {
+ glcpp_error (loc, parser, "#endif without #if\n");
+ return;
+ }
+
+ node = parser->skip_stack;
+ parser->skip_stack = node->next;
+}
+
+static void
+_glcpp_parser_handle_version_declaration(glcpp_parser_t *parser, intmax_t version,
+ const char *identifier,
+ bool explicitly_set)
+{
+ if (parser->version_set)
+ return;
+
+ parser->version = version;
+ parser->version_set = true;
+
+ add_builtin_define (parser, "__VERSION__", version);
+
+ parser->is_gles = (version == 100) ||
+ (identifier && (strcmp(identifier, "es") == 0));
+ bool is_compat = version >= 150 && identifier &&
+ strcmp(identifier, "compatibility") == 0;
+
+ /* Add pre-defined macros. */
+ if (parser->is_gles)
+ add_builtin_define(parser, "GL_ES", 1);
+ else if (is_compat)
+ add_builtin_define(parser, "GL_compatibility_profile", 1);
+ else if (version >= 150)
+ add_builtin_define(parser, "GL_core_profile", 1);
+
+ /* Currently, all ES2/ES3 implementations support highp in the
+ * fragment shader, so we always define this macro in ES2/ES3.
+ * If we ever get a driver that doesn't support highp, we'll
+ * need to add a flag to the gl_context and check that here.
+ */
+ if (version >= 130 || parser->is_gles)
+ add_builtin_define (parser, "GL_FRAGMENT_PRECISION_HIGH", 1);
+
+ /* Add all the extension macros available in this context */
+ if (parser->extensions)
+ parser->extensions(parser->state, add_builtin_define, parser,
+ version, parser->is_gles);
+
+ if (parser->extension_list) {
+ /* If MESA_shader_integer_functions is supported, then the building
+ * blocks required for the 64x64 => 64 multiply exist. Add defines for
+ * those functions so that they can be tested.
+ */
+ if (parser->extension_list->MESA_shader_integer_functions) {
+ add_builtin_define(parser, "__have_builtin_builtin_sign64", 1);
+ add_builtin_define(parser, "__have_builtin_builtin_umul64", 1);
+ add_builtin_define(parser, "__have_builtin_builtin_udiv64", 1);
+ add_builtin_define(parser, "__have_builtin_builtin_umod64", 1);
+ add_builtin_define(parser, "__have_builtin_builtin_idiv64", 1);
+ add_builtin_define(parser, "__have_builtin_builtin_imod64", 1);
+ }
+ }
+
+ if (explicitly_set) {
+ _mesa_string_buffer_printf(parser->output,
+ "#version %" PRIiMAX "%s%s", version,
+ identifier ? " " : "",
+ identifier ? identifier : "");
+ }
+}
+
+/* GLSL version if no version is explicitly specified. */
+#define IMPLICIT_GLSL_VERSION 110
+
+/* GLSL ES version if no version is explicitly specified. */
+#define IMPLICIT_GLSL_ES_VERSION 100
+
+void
+glcpp_parser_resolve_implicit_version(glcpp_parser_t *parser)
+{
+ int language_version = parser->api == API_OPENGLES2 ?
+ IMPLICIT_GLSL_ES_VERSION : IMPLICIT_GLSL_VERSION;
+
+ _glcpp_parser_handle_version_declaration(parser, language_version,
+ NULL, false);
+}
+
+static void
+glcpp_parser_copy_defines(const void *key, void *data, void *closure)
+{
+ struct define_include *di = (struct define_include *) closure;
+ macro_t *macro = (macro_t *) data;
+
+ /* If we hit an error on a previous pass, just return */
+ if (di->parser->error)
+ return;
+
+ const char *identifier = macro->identifier;
+ struct hash_entry *entry = _mesa_hash_table_search(di->parser->defines,
+ identifier);
+
+ macro_t *previous = entry ? entry->data : NULL;
+ if (previous) {
+ if (_macro_equal(macro, previous)) {
+ return;
+ }
+ glcpp_error(di->loc, di->parser, "Redefinition of macro %s\n",
+ identifier);
+ }
+
+ _mesa_hash_table_insert(di->parser->defines, identifier, macro);
+}
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/glcpp/glcpp.c b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/glcpp/glcpp.c
new file mode 100644
index 0000000000..f08b14427f
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/glcpp/glcpp.c
@@ -0,0 +1,182 @@
+/*
+ * Copyright © 2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include <stdio.h>
+#include <string.h>
+#include <errno.h>
+#include <getopt.h>
+
+#include "glcpp.h"
+#include "main/mtypes.h"
+#include "main/shaderobj.h"
+#include "util/strtod.h"
+
+extern int glcpp_parser_debug;
+
+void
+_mesa_reference_shader(struct gl_context *ctx, struct gl_shader **ptr,
+ struct gl_shader *sh)
+{
+ (void) ctx;
+ *ptr = sh;
+}
+
+/* Read from fp until EOF and return a string of everything read.
+ */
+static char *
+load_text_fp (void *ctx, FILE *fp)
+{
+#define CHUNK 4096
+ char *text = NULL;
+ size_t text_size = 0;
+ size_t total_read = 0;
+ size_t bytes;
+
+ while (1) {
+ if (total_read + CHUNK + 1 > text_size) {
+ text_size = text_size ? text_size * 2 : CHUNK + 1;
+ text = reralloc_size (ctx, text, text_size);
+ if (text == NULL) {
+ fprintf (stderr, "Out of memory\n");
+ return NULL;
+ }
+ }
+ bytes = fread (text + total_read, 1, CHUNK, fp);
+ total_read += bytes;
+
+ if (bytes < CHUNK) {
+ break;
+ }
+ }
+
+ text[total_read] = '\0';
+
+ return text;
+}
+
+static char *
+load_text_file(void *ctx, const char *filename)
+{
+ char *text;
+ FILE *fp;
+
+ if (filename == NULL || strcmp (filename, "-") == 0)
+ return load_text_fp (ctx, stdin);
+
+ fp = fopen (filename, "r");
+ if (fp == NULL) {
+ fprintf (stderr, "Failed to open file %s: %s\n",
+ filename, strerror (errno));
+ return NULL;
+ }
+
+ text = load_text_fp (ctx, fp);
+
+ fclose(fp);
+
+ return text;
+}
+
+/* Initialize only those things that glcpp cares about.
+ */
+static void
+init_fake_gl_context (struct gl_context *gl_ctx)
+{
+ gl_ctx->API = API_OPENGL_COMPAT;
+ gl_ctx->Const.DisableGLSLLineContinuations = false;
+}
+
+static void
+usage (void)
+{
+ fprintf (stderr,
+ "Usage: glcpp [OPTIONS] [--] [<filename>]\n"
+ "\n"
+ "Pre-process the given filename (stdin if no filename given).\n"
+ "The following options are supported:\n"
+ " --disable-line-continuations Do not interpret lines ending with a\n"
+ " backslash ('\\') as a line continuation.\n");
+}
+
+enum {
+ DISABLE_LINE_CONTINUATIONS_OPT = CHAR_MAX + 1
+};
+
+static const struct option
+long_options[] = {
+ {"disable-line-continuations", no_argument, 0, DISABLE_LINE_CONTINUATIONS_OPT },
+ {"debug", no_argument, 0, 'd'},
+ {0, 0, 0, 0 }
+};
+
+int
+main (int argc, char *argv[])
+{
+ char *filename = NULL;
+ void *ctx = ralloc(NULL, void*);
+ char *info_log = ralloc_strdup(ctx, "");
+ const char *shader;
+ int ret;
+ struct gl_context gl_ctx;
+ int c;
+
+ init_fake_gl_context (&gl_ctx);
+
+ while ((c = getopt_long(argc, argv, "d", long_options, NULL)) != -1) {
+ switch (c) {
+ case DISABLE_LINE_CONTINUATIONS_OPT:
+ gl_ctx.Const.DisableGLSLLineContinuations = true;
+ break;
+ case 'd':
+ glcpp_parser_debug = 1;
+ break;
+ default:
+ usage ();
+ exit (1);
+ }
+ }
+
+ if (optind + 1 < argc) {
+ printf ("Unexpected argument: %s\n", argv[optind+1]);
+ usage ();
+ exit (1);
+ }
+ if (optind < argc) {
+ filename = argv[optind];
+ }
+
+ shader = load_text_file (ctx, filename);
+ if (shader == NULL)
+ return 1;
+
+ _mesa_locale_init();
+
+ ret = glcpp_preprocess(ctx, &shader, &info_log, NULL, NULL, &gl_ctx);
+
+ printf("%s", shader);
+ fprintf(stderr, "%s", info_log);
+
+ ralloc_free(ctx);
+
+ return ret;
+}
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/glcpp/glcpp.h b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/glcpp/glcpp.h
new file mode 100644
index 0000000000..38ea3949cd
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/glcpp/glcpp.h
@@ -0,0 +1,277 @@
+/*
+ * Copyright © 2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef GLCPP_H
+#define GLCPP_H
+
+#include <stdint.h>
+#include <stdbool.h>
+
+#include "main/menums.h"
+
+#include "util/ralloc.h"
+
+#include "util/hash_table.h"
+
+#include "util/string_buffer.h"
+
+struct gl_context;
+
+#define yyscan_t void*
+
+/* Some data types used for parser values. */
+
+typedef struct expression_value {
+ intmax_t value;
+ char *undefined_macro;
+} expression_value_t;
+
+
+typedef struct string_node {
+ const char *str;
+ struct string_node *next;
+} string_node_t;
+
+typedef struct string_list {
+ string_node_t *head;
+ string_node_t *tail;
+} string_list_t;
+
+typedef struct token token_t;
+typedef struct token_list token_list_t;
+
+typedef union YYSTYPE
+{
+ intmax_t ival;
+ expression_value_t expression_value;
+ char *str;
+ string_list_t *string_list;
+ token_t *token;
+ token_list_t *token_list;
+} YYSTYPE;
+
+# define YYSTYPE_IS_TRIVIAL 1
+# define YYSTYPE_IS_DECLARED 1
+
+typedef struct YYLTYPE {
+ int first_line;
+ int first_column;
+ int last_line;
+ int last_column;
+ unsigned source;
+} YYLTYPE;
+# define YYLTYPE_IS_DECLARED 1
+# define YYLTYPE_IS_TRIVIAL 1
+
+# define YYLLOC_DEFAULT(Current, Rhs, N) \
+do { \
+ if (N) \
+ { \
+ (Current).first_line = YYRHSLOC(Rhs, 1).first_line; \
+ (Current).first_column = YYRHSLOC(Rhs, 1).first_column; \
+ (Current).last_line = YYRHSLOC(Rhs, N).last_line; \
+ (Current).last_column = YYRHSLOC(Rhs, N).last_column; \
+ } \
+ else \
+ { \
+ (Current).first_line = (Current).last_line = \
+ YYRHSLOC(Rhs, 0).last_line; \
+ (Current).first_column = (Current).last_column = \
+ YYRHSLOC(Rhs, 0).last_column; \
+ } \
+ (Current).source = 0; \
+} while (0)
+
+struct token {
+ int type;
+ YYSTYPE value;
+ YYLTYPE location;
+};
+
+typedef struct token_node {
+ token_t *token;
+ struct token_node *next;
+} token_node_t;
+
+struct token_list {
+ token_node_t *head;
+ token_node_t *tail;
+ token_node_t *non_space_tail;
+};
+
+typedef struct argument_node {
+ token_list_t *argument;
+ struct argument_node *next;
+} argument_node_t;
+
+typedef struct argument_list {
+ argument_node_t *head;
+ argument_node_t *tail;
+} argument_list_t;
+
+typedef struct glcpp_parser glcpp_parser_t;
+
+typedef enum {
+ TOKEN_CLASS_IDENTIFIER,
+ TOKEN_CLASS_IDENTIFIER_FINALIZED,
+ TOKEN_CLASS_FUNC_MACRO,
+ TOKEN_CLASS_OBJ_MACRO
+} token_class_t;
+
+token_class_t
+glcpp_parser_classify_token (glcpp_parser_t *parser,
+ const char *identifier,
+ int *parameter_index);
+
+typedef struct {
+ int is_function;
+ string_list_t *parameters;
+ const char *identifier;
+ token_list_t *replacements;
+} macro_t;
+
+typedef struct expansion_node {
+ macro_t *macro;
+ token_node_t *replacements;
+ struct expansion_node *next;
+} expansion_node_t;
+
+typedef enum skip_type {
+ SKIP_NO_SKIP,
+ SKIP_TO_ELSE,
+ SKIP_TO_ENDIF
+} skip_type_t;
+
+typedef struct skip_node {
+ skip_type_t type;
+ bool has_else;
+ YYLTYPE loc; /* location of the initial #if/#elif/... */
+ struct skip_node *next;
+} skip_node_t;
+
+typedef struct active_list {
+ const char *identifier;
+ token_node_t *marker;
+ struct active_list *next;
+} active_list_t;
+
+struct _mesa_glsl_parse_state;
+
+typedef void (*glcpp_extension_iterator)(
+ struct _mesa_glsl_parse_state *state,
+ void (*add_builtin_define)(glcpp_parser_t *, const char *, int),
+ glcpp_parser_t *data,
+ unsigned version,
+ bool es);
+
+struct glcpp_parser {
+ void *linalloc;
+ yyscan_t scanner;
+ struct hash_table *defines;
+ active_list_t *active;
+ int lexing_directive;
+ int lexing_version_directive;
+ int space_tokens;
+ int last_token_was_newline;
+ int last_token_was_space;
+ int first_non_space_token_this_line;
+ int newline_as_space;
+ int in_control_line;
+ bool in_define;
+ int paren_count;
+ int commented_newlines;
+ skip_node_t *skip_stack;
+ int skipping;
+ token_list_t *lex_from_list;
+ token_node_t *lex_from_node;
+ struct _mesa_string_buffer *output;
+ struct _mesa_string_buffer *info_log;
+ int error;
+ glcpp_extension_iterator extensions;
+ const struct gl_extensions *extension_list;
+ void *state;
+ gl_api api;
+ struct gl_context *gl_ctx;
+ unsigned version;
+
+ /**
+ * Has the #version been set?
+ *
+ * A separate flag is used because any possible sentinel value in
+ * \c ::version could also be set by a #version line.
+ */
+ bool version_set;
+
+ bool has_new_line_number;
+ int new_line_number;
+ bool has_new_source_number;
+ int new_source_number;
+ bool is_gles;
+};
+
+glcpp_parser_t *
+glcpp_parser_create(struct gl_context *gl_ctx,
+ glcpp_extension_iterator extensions, void *state);
+
+int
+glcpp_parser_parse (glcpp_parser_t *parser);
+
+void
+glcpp_parser_destroy (glcpp_parser_t *parser);
+
+void
+glcpp_parser_resolve_implicit_version(glcpp_parser_t *parser);
+
+int
+glcpp_preprocess(void *ralloc_ctx, const char **shader, char **info_log,
+ glcpp_extension_iterator extensions, void *state,
+ struct gl_context *g_ctx);
+
+/* Functions for writing to the info log */
+
+void
+glcpp_error (YYLTYPE *locp, glcpp_parser_t *parser, const char *fmt, ...);
+
+void
+glcpp_warning (YYLTYPE *locp, glcpp_parser_t *parser, const char *fmt, ...);
+
+/* Generated by glcpp-lex.l to glcpp-lex.c */
+
+int
+glcpp_lex_init_extra (glcpp_parser_t *parser, yyscan_t* scanner);
+
+void
+glcpp_lex_set_source_string(glcpp_parser_t *parser, const char *shader);
+
+int
+glcpp_lex (YYSTYPE *lvalp, YYLTYPE *llocp, yyscan_t scanner);
+
+int
+glcpp_lex_destroy (yyscan_t scanner);
+
+/* Generated by glcpp-parse.y to glcpp-parse.c */
+
+int
+yyparse (glcpp_parser_t *parser);
+
+#endif
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/glcpp/pp.c b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/glcpp/pp.c
new file mode 100644
index 0000000000..aadf899a7d
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/glcpp/pp.c
@@ -0,0 +1,256 @@
+/*
+ * Copyright © 2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include <assert.h>
+#include <string.h>
+#include <ctype.h>
+#include "glcpp.h"
+#include "main/mtypes.h"
+
+void
+glcpp_error (YYLTYPE *locp, glcpp_parser_t *parser, const char *fmt, ...)
+{
+ va_list ap;
+
+ parser->error = 1;
+ _mesa_string_buffer_printf(parser->info_log,
+ "%u:%u(%u): "
+ "preprocessor error: ",
+ locp->source,
+ locp->first_line,
+ locp->first_column);
+ va_start(ap, fmt);
+ _mesa_string_buffer_vprintf(parser->info_log, fmt, ap);
+ va_end(ap);
+ _mesa_string_buffer_append_char(parser->info_log, '\n');
+}
+
+void
+glcpp_warning (YYLTYPE *locp, glcpp_parser_t *parser, const char *fmt, ...)
+{
+ va_list ap;
+
+ _mesa_string_buffer_printf(parser->info_log,
+ "%u:%u(%u): "
+ "preprocessor warning: ",
+ locp->source,
+ locp->first_line,
+ locp->first_column);
+ va_start(ap, fmt);
+ _mesa_string_buffer_vprintf(parser->info_log, fmt, ap);
+ va_end(ap);
+ _mesa_string_buffer_append_char(parser->info_log, '\n');
+}
+
+/* Given str, (that's expected to start with a newline terminator of some
+ * sort), return a pointer to the first character in str after the newline.
+ *
+ * A newline terminator can be any of the following sequences:
+ *
+ * "\r\n"
+ * "\n\r"
+ * "\n"
+ * "\r"
+ *
+ * And the longest such sequence will be skipped.
+ */
+static const char *
+skip_newline (const char *str)
+{
+ const char *ret = str;
+
+ if (ret == NULL)
+ return ret;
+
+ if (*ret == '\0')
+ return ret;
+
+ if (*ret == '\r') {
+ ret++;
+ if (*ret && *ret == '\n')
+ ret++;
+ } else if (*ret == '\n') {
+ ret++;
+ if (*ret && *ret == '\r')
+ ret++;
+ }
+
+ return ret;
+}
+
+/* Initial output buffer size, 4096 minus ralloc() overhead. It was selected
+ * to minimize total amount of allocated memory during shader-db run.
+ */
+#define INITIAL_PP_OUTPUT_BUF_SIZE 4048
+
+/* Remove any line continuation characters in the shader, (whether in
+ * preprocessing directives or in GLSL code).
+ */
+static char *
+remove_line_continuations(glcpp_parser_t *ctx, const char *shader)
+{
+ struct _mesa_string_buffer *sb =
+ _mesa_string_buffer_create(ctx, INITIAL_PP_OUTPUT_BUF_SIZE);
+
+ const char *backslash, *newline, *search_start;
+ const char *cr, *lf;
+ char newline_separator[3];
+ int collapsed_newlines = 0;
+ int separator_len;
+
+ backslash = strchr(shader, '\\');
+
+ /* No line continuations were found in this shader, our job is done */
+ if (backslash == NULL)
+ return (char *) shader;
+
+ search_start = shader;
+
+ /* Determine what flavor of newlines this shader is using. GLSL
+ * provides for 4 different possible ways to separate lines, (using
+ * one or two characters):
+ *
+ * "\n" (line-feed, like Linux, Unix, and new Mac OS)
+ * "\r" (carriage-return, like old Mac files)
+ * "\r\n" (carriage-return + line-feed, like DOS files)
+ * "\n\r" (line-feed + carriage-return, like nothing, really)
+ *
+ * This code explicitly supports a shader that uses a mixture of
+ * newline terminators and will properly handle line continuation
+ * backslashes followed by any of the above.
+ *
+ * But, since we must also insert additional newlines in the output
+ * (for any collapsed lines) we attempt to maintain consistency by
+ * examining the first encountered newline terminator, and using the
+ * same terminator for any newlines we insert.
+ */
+ cr = strchr(search_start, '\r');
+ lf = strchr(search_start, '\n');
+
+ newline_separator[0] = '\n';
+ newline_separator[1] = '\0';
+ newline_separator[2] = '\0';
+
+ if (cr == NULL) {
+ /* Nothing to do. */
+ } else if (lf == NULL) {
+ newline_separator[0] = '\r';
+ } else if (lf == cr + 1) {
+ newline_separator[0] = '\r';
+ newline_separator[1] = '\n';
+ } else if (cr == lf + 1) {
+ newline_separator[0] = '\n';
+ newline_separator[1] = '\r';
+ }
+ separator_len = strlen(newline_separator);
+
+ while (true) {
+ /* If we have previously collapsed any line-continuations,
+ * then we want to insert additional newlines at the next
+ * occurrence of a newline character to avoid changing any
+ * line numbers.
+ */
+ if (collapsed_newlines) {
+ cr = strchr (search_start, '\r');
+ lf = strchr (search_start, '\n');
+ if (cr && lf)
+ newline = cr < lf ? cr : lf;
+ else if (cr)
+ newline = cr;
+ else
+ newline = lf;
+ if (newline &&
+ (backslash == NULL || newline < backslash))
+ {
+ _mesa_string_buffer_append_len(sb, shader,
+ newline - shader + 1);
+ while (collapsed_newlines) {
+ _mesa_string_buffer_append_len(sb,
+ newline_separator,
+ separator_len);
+ collapsed_newlines--;
+ }
+ shader = skip_newline (newline);
+ search_start = shader;
+ }
+ }
+
+ search_start = backslash + 1;
+
+ if (backslash == NULL)
+ break;
+
+ /* At each line continuation, (backslash followed by a
+ * newline), copy all preceding text to the output, then
+ * advance the shader pointer to the character after the
+ * newline.
+ */
+ if (backslash[1] == '\r' || backslash[1] == '\n')
+ {
+ collapsed_newlines++;
+ _mesa_string_buffer_append_len(sb, shader, backslash - shader);
+ shader = skip_newline (backslash + 1);
+ search_start = shader;
+ }
+
+ backslash = strchr(search_start, '\\');
+ }
+
+ _mesa_string_buffer_append(sb, shader);
+
+ return sb->buf;
+}
+
+int
+glcpp_preprocess(void *ralloc_ctx, const char **shader, char **info_log,
+ glcpp_extension_iterator extensions, void *state,
+ struct gl_context *gl_ctx)
+{
+ int errors;
+ glcpp_parser_t *parser =
+ glcpp_parser_create(gl_ctx, extensions, state);
+
+ if (! gl_ctx->Const.DisableGLSLLineContinuations)
+ *shader = remove_line_continuations(parser, *shader);
+
+ glcpp_lex_set_source_string (parser, *shader);
+
+ glcpp_parser_parse (parser);
+
+ if (parser->skip_stack)
+ glcpp_error (&parser->skip_stack->loc, parser, "Unterminated #if\n");
+
+ glcpp_parser_resolve_implicit_version(parser);
+
+ ralloc_strcat(info_log, parser->info_log->buf);
+
+ /* Crimp the buffer first, to conserve memory */
+ _mesa_string_buffer_crimp_to_fit(parser->output);
+
+ ralloc_steal(ralloc_ctx, parser->output->buf);
+ *shader = parser->output->buf;
+
+ errors = parser->error;
+ glcpp_parser_destroy (parser);
+ return errors;
+}
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/glcpp/pp_standalone_scaffolding.c b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/glcpp/pp_standalone_scaffolding.c
new file mode 100644
index 0000000000..20a2252ee1
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/glcpp/pp_standalone_scaffolding.c
@@ -0,0 +1,57 @@
+/*
+ * Copyright © 2019 Timothy Arceri
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+/* This file declares stripped-down versions of functions that
+ * normally exist outside of the glsl folder, so that they can be used
+ * when running the GLSL compiler standalone (for unit testing or
+ * compiling builtins).
+ */
+
+#include "pp_standalone_scaffolding.h"
+
+const char *
+_mesa_lookup_shader_include(struct gl_context *ctx, char *path,
+ bool error_check)
+{
+ (void) ctx;
+ (void) path;
+ (void) error_check;
+
+ return NULL;
+}
+
+size_t
+_mesa_get_shader_include_cursor(struct gl_shared_state *shared)
+{
+ (void) shared;
+
+ return 0;
+}
+
+void
+_mesa_set_shader_include_cursor(struct gl_shared_state *shared,
+ size_t cursor)
+{
+ (void) shared;
+ (void) cursor;
+}
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/glcpp/pp_standalone_scaffolding.h b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/glcpp/pp_standalone_scaffolding.h
new file mode 100644
index 0000000000..a35c04ee70
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/glcpp/pp_standalone_scaffolding.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright © 2019 Timothy Arceri
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+/* This file declares stripped-down versions of functions that
+ * normally exist outside of the glcpp folder, so that they can be used
+ * when running the GLSL compiler standalone (for unit testing or
+ * compiling builtins).
+ */
+
+#ifndef PP_STANDALONE_SCAFFOLDING_H
+#define PP_STANDALONE_SCAFFOLDING_H
+
+#include <stddef.h>
+#include "main/mtypes.h"
+
+const char *
+_mesa_lookup_shader_include(struct gl_context *ctx, char *path,
+ bool error_check);
+
+size_t
+_mesa_get_shader_include_cursor(struct gl_shared_state *shared);
+
+void
+_mesa_set_shader_include_cursor(struct gl_shared_state *shared,
+ size_t cursor);
+
+#endif /* PP_STANDALONE_SCAFFOLDING_H */
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/glsl_lexer.cpp b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/glsl_lexer.cpp
new file mode 100644
index 0000000000..b586f78fbc
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/glsl_lexer.cpp
@@ -0,0 +1,4750 @@
+#line 1 "src/compiler/glsl/glsl_lexer.cpp"
+
+#line 3 "src/compiler/glsl/glsl_lexer.cpp"
+
+#define YY_INT_ALIGNED short int
+
+/* A lexical scanner generated by flex */
+
+#define FLEX_SCANNER
+#define YY_FLEX_MAJOR_VERSION 2
+#define YY_FLEX_MINOR_VERSION 6
+#define YY_FLEX_SUBMINOR_VERSION 4
+#if YY_FLEX_SUBMINOR_VERSION > 0
+#define FLEX_BETA
+#endif
+
+#ifdef yy_create_buffer
+#define _mesa_glsl_lexer__create_buffer_ALREADY_DEFINED
+#else
+#define yy_create_buffer _mesa_glsl_lexer__create_buffer
+#endif
+
+#ifdef yy_delete_buffer
+#define _mesa_glsl_lexer__delete_buffer_ALREADY_DEFINED
+#else
+#define yy_delete_buffer _mesa_glsl_lexer__delete_buffer
+#endif
+
+#ifdef yy_scan_buffer
+#define _mesa_glsl_lexer__scan_buffer_ALREADY_DEFINED
+#else
+#define yy_scan_buffer _mesa_glsl_lexer__scan_buffer
+#endif
+
+#ifdef yy_scan_string
+#define _mesa_glsl_lexer__scan_string_ALREADY_DEFINED
+#else
+#define yy_scan_string _mesa_glsl_lexer__scan_string
+#endif
+
+#ifdef yy_scan_bytes
+#define _mesa_glsl_lexer__scan_bytes_ALREADY_DEFINED
+#else
+#define yy_scan_bytes _mesa_glsl_lexer__scan_bytes
+#endif
+
+#ifdef yy_init_buffer
+#define _mesa_glsl_lexer__init_buffer_ALREADY_DEFINED
+#else
+#define yy_init_buffer _mesa_glsl_lexer__init_buffer
+#endif
+
+#ifdef yy_flush_buffer
+#define _mesa_glsl_lexer__flush_buffer_ALREADY_DEFINED
+#else
+#define yy_flush_buffer _mesa_glsl_lexer__flush_buffer
+#endif
+
+#ifdef yy_load_buffer_state
+#define _mesa_glsl_lexer__load_buffer_state_ALREADY_DEFINED
+#else
+#define yy_load_buffer_state _mesa_glsl_lexer__load_buffer_state
+#endif
+
+#ifdef yy_switch_to_buffer
+#define _mesa_glsl_lexer__switch_to_buffer_ALREADY_DEFINED
+#else
+#define yy_switch_to_buffer _mesa_glsl_lexer__switch_to_buffer
+#endif
+
+#ifdef yypush_buffer_state
+#define _mesa_glsl_lexer_push_buffer_state_ALREADY_DEFINED
+#else
+#define yypush_buffer_state _mesa_glsl_lexer_push_buffer_state
+#endif
+
+#ifdef yypop_buffer_state
+#define _mesa_glsl_lexer_pop_buffer_state_ALREADY_DEFINED
+#else
+#define yypop_buffer_state _mesa_glsl_lexer_pop_buffer_state
+#endif
+
+#ifdef yyensure_buffer_stack
+#define _mesa_glsl_lexer_ensure_buffer_stack_ALREADY_DEFINED
+#else
+#define yyensure_buffer_stack _mesa_glsl_lexer_ensure_buffer_stack
+#endif
+
+#ifdef yylex
+#define _mesa_glsl_lexer_lex_ALREADY_DEFINED
+#else
+#define yylex _mesa_glsl_lexer_lex
+#endif
+
+#ifdef yyrestart
+#define _mesa_glsl_lexer_restart_ALREADY_DEFINED
+#else
+#define yyrestart _mesa_glsl_lexer_restart
+#endif
+
+#ifdef yylex_init
+#define _mesa_glsl_lexer_lex_init_ALREADY_DEFINED
+#else
+#define yylex_init _mesa_glsl_lexer_lex_init
+#endif
+
+#ifdef yylex_init_extra
+#define _mesa_glsl_lexer_lex_init_extra_ALREADY_DEFINED
+#else
+#define yylex_init_extra _mesa_glsl_lexer_lex_init_extra
+#endif
+
+#ifdef yylex_destroy
+#define _mesa_glsl_lexer_lex_destroy_ALREADY_DEFINED
+#else
+#define yylex_destroy _mesa_glsl_lexer_lex_destroy
+#endif
+
+#ifdef yyget_debug
+#define _mesa_glsl_lexer_get_debug_ALREADY_DEFINED
+#else
+#define yyget_debug _mesa_glsl_lexer_get_debug
+#endif
+
+#ifdef yyset_debug
+#define _mesa_glsl_lexer_set_debug_ALREADY_DEFINED
+#else
+#define yyset_debug _mesa_glsl_lexer_set_debug
+#endif
+
+#ifdef yyget_extra
+#define _mesa_glsl_lexer_get_extra_ALREADY_DEFINED
+#else
+#define yyget_extra _mesa_glsl_lexer_get_extra
+#endif
+
+#ifdef yyset_extra
+#define _mesa_glsl_lexer_set_extra_ALREADY_DEFINED
+#else
+#define yyset_extra _mesa_glsl_lexer_set_extra
+#endif
+
+#ifdef yyget_in
+#define _mesa_glsl_lexer_get_in_ALREADY_DEFINED
+#else
+#define yyget_in _mesa_glsl_lexer_get_in
+#endif
+
+#ifdef yyset_in
+#define _mesa_glsl_lexer_set_in_ALREADY_DEFINED
+#else
+#define yyset_in _mesa_glsl_lexer_set_in
+#endif
+
+#ifdef yyget_out
+#define _mesa_glsl_lexer_get_out_ALREADY_DEFINED
+#else
+#define yyget_out _mesa_glsl_lexer_get_out
+#endif
+
+#ifdef yyset_out
+#define _mesa_glsl_lexer_set_out_ALREADY_DEFINED
+#else
+#define yyset_out _mesa_glsl_lexer_set_out
+#endif
+
+#ifdef yyget_leng
+#define _mesa_glsl_lexer_get_leng_ALREADY_DEFINED
+#else
+#define yyget_leng _mesa_glsl_lexer_get_leng
+#endif
+
+#ifdef yyget_text
+#define _mesa_glsl_lexer_get_text_ALREADY_DEFINED
+#else
+#define yyget_text _mesa_glsl_lexer_get_text
+#endif
+
+#ifdef yyget_lineno
+#define _mesa_glsl_lexer_get_lineno_ALREADY_DEFINED
+#else
+#define yyget_lineno _mesa_glsl_lexer_get_lineno
+#endif
+
+#ifdef yyset_lineno
+#define _mesa_glsl_lexer_set_lineno_ALREADY_DEFINED
+#else
+#define yyset_lineno _mesa_glsl_lexer_set_lineno
+#endif
+
+#ifdef yyget_column
+#define _mesa_glsl_lexer_get_column_ALREADY_DEFINED
+#else
+#define yyget_column _mesa_glsl_lexer_get_column
+#endif
+
+#ifdef yyset_column
+#define _mesa_glsl_lexer_set_column_ALREADY_DEFINED
+#else
+#define yyset_column _mesa_glsl_lexer_set_column
+#endif
+
+#ifdef yywrap
+#define _mesa_glsl_lexer_wrap_ALREADY_DEFINED
+#else
+#define yywrap _mesa_glsl_lexer_wrap
+#endif
+
+#ifdef yyget_lval
+#define _mesa_glsl_lexer_get_lval_ALREADY_DEFINED
+#else
+#define yyget_lval _mesa_glsl_lexer_get_lval
+#endif
+
+#ifdef yyset_lval
+#define _mesa_glsl_lexer_set_lval_ALREADY_DEFINED
+#else
+#define yyset_lval _mesa_glsl_lexer_set_lval
+#endif
+
+#ifdef yyget_lloc
+#define _mesa_glsl_lexer_get_lloc_ALREADY_DEFINED
+#else
+#define yyget_lloc _mesa_glsl_lexer_get_lloc
+#endif
+
+#ifdef yyset_lloc
+#define _mesa_glsl_lexer_set_lloc_ALREADY_DEFINED
+#else
+#define yyset_lloc _mesa_glsl_lexer_set_lloc
+#endif
+
+#ifdef yyalloc
+#define _mesa_glsl_lexer_alloc_ALREADY_DEFINED
+#else
+#define yyalloc _mesa_glsl_lexer_alloc
+#endif
+
+#ifdef yyrealloc
+#define _mesa_glsl_lexer_realloc_ALREADY_DEFINED
+#else
+#define yyrealloc _mesa_glsl_lexer_realloc
+#endif
+
+#ifdef yyfree
+#define _mesa_glsl_lexer_free_ALREADY_DEFINED
+#else
+#define yyfree _mesa_glsl_lexer_free
+#endif
+
+/* First, we deal with platform-specific or compiler-specific issues. */
+
+/* begin standard C headers. */
+#include <stdio.h>
+#include <string.h>
+#include <errno.h>
+#include <stdlib.h>
+
+/* end standard C headers. */
+
+/* flex integer type definitions */
+
+#ifndef FLEXINT_H
+#define FLEXINT_H
+
+/* C99 systems have <inttypes.h>. Non-C99 systems may or may not. */
+
+#if defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
+
+/* C99 says to define __STDC_LIMIT_MACROS before including stdint.h,
+ * if you want the limit (max/min) macros for int types.
+ */
+#ifndef __STDC_LIMIT_MACROS
+#define __STDC_LIMIT_MACROS 1
+#endif
+
+#include <inttypes.h>
+typedef int8_t flex_int8_t;
+typedef uint8_t flex_uint8_t;
+typedef int16_t flex_int16_t;
+typedef uint16_t flex_uint16_t;
+typedef int32_t flex_int32_t;
+typedef uint32_t flex_uint32_t;
+#else
+typedef signed char flex_int8_t;
+typedef short int flex_int16_t;
+typedef int flex_int32_t;
+typedef unsigned char flex_uint8_t;
+typedef unsigned short int flex_uint16_t;
+typedef unsigned int flex_uint32_t;
+
+/* Limits of integral types. */
+#ifndef INT8_MIN
+#define INT8_MIN (-128)
+#endif
+#ifndef INT16_MIN
+#define INT16_MIN (-32767-1)
+#endif
+#ifndef INT32_MIN
+#define INT32_MIN (-2147483647-1)
+#endif
+#ifndef INT8_MAX
+#define INT8_MAX (127)
+#endif
+#ifndef INT16_MAX
+#define INT16_MAX (32767)
+#endif
+#ifndef INT32_MAX
+#define INT32_MAX (2147483647)
+#endif
+#ifndef UINT8_MAX
+#define UINT8_MAX (255U)
+#endif
+#ifndef UINT16_MAX
+#define UINT16_MAX (65535U)
+#endif
+#ifndef UINT32_MAX
+#define UINT32_MAX (4294967295U)
+#endif
+
+#ifndef SIZE_MAX
+#define SIZE_MAX (~(size_t)0)
+#endif
+
+#endif /* ! C99 */
+
+#endif /* ! FLEXINT_H */
+
+/* begin standard C++ headers. */
+
+/* TODO: this is always defined, so inline it */
+#define yyconst const
+
+#if defined(__GNUC__) && __GNUC__ >= 3
+#define yynoreturn __attribute__((__noreturn__))
+#else
+#define yynoreturn
+#endif
+
+/* Returned upon end-of-file. */
+#define YY_NULL 0
+
+/* Promotes a possibly negative, possibly signed char to an
+ * integer in range [0..255] for use as an array index.
+ */
+#define YY_SC_TO_UI(c) ((YY_CHAR) (c))
+
+/* An opaque pointer. */
+#ifndef YY_TYPEDEF_YY_SCANNER_T
+#define YY_TYPEDEF_YY_SCANNER_T
+typedef void* yyscan_t;
+#endif
+
+/* For convenience, these vars (plus the bison vars far below)
+ are macros in the reentrant scanner. */
+#define yyin yyg->yyin_r
+#define yyout yyg->yyout_r
+#define yyextra yyg->yyextra_r
+#define yyleng yyg->yyleng_r
+#define yytext yyg->yytext_r
+#define yylineno (YY_CURRENT_BUFFER_LVALUE->yy_bs_lineno)
+#define yycolumn (YY_CURRENT_BUFFER_LVALUE->yy_bs_column)
+#define yy_flex_debug yyg->yy_flex_debug_r
+
+/* Enter a start condition. This macro really ought to take a parameter,
+ * but we do it the disgusting crufty way forced on us by the ()-less
+ * definition of BEGIN.
+ */
+#define BEGIN yyg->yy_start = 1 + 2 *
+/* Translate the current start state into a value that can be later handed
+ * to BEGIN to return to the state. The YYSTATE alias is for lex
+ * compatibility.
+ */
+#define YY_START ((yyg->yy_start - 1) / 2)
+#define YYSTATE YY_START
+/* Action number for EOF rule of a given start state. */
+#define YY_STATE_EOF(state) (YY_END_OF_BUFFER + state + 1)
+/* Special action meaning "start processing a new file". */
+#define YY_NEW_FILE yyrestart( yyin , yyscanner )
+#define YY_END_OF_BUFFER_CHAR 0
+
+/* Size of default input buffer. */
+#ifndef YY_BUF_SIZE
+#ifdef __ia64__
+/* On IA-64, the buffer size is 16k, not 8k.
+ * Moreover, YY_BUF_SIZE is 2*YY_READ_BUF_SIZE in the general case.
+ * Ditto for the __ia64__ case accordingly.
+ */
+#define YY_BUF_SIZE 32768
+#else
+#define YY_BUF_SIZE 16384
+#endif /* __ia64__ */
+#endif
+
+/* The state buf must be large enough to hold one state per character in the main buffer.
+ */
+#define YY_STATE_BUF_SIZE ((YY_BUF_SIZE + 2) * sizeof(yy_state_type))
+
+#ifndef YY_TYPEDEF_YY_BUFFER_STATE
+#define YY_TYPEDEF_YY_BUFFER_STATE
+typedef struct yy_buffer_state *YY_BUFFER_STATE;
+#endif
+
+#ifndef YY_TYPEDEF_YY_SIZE_T
+#define YY_TYPEDEF_YY_SIZE_T
+typedef size_t yy_size_t;
+#endif
+
+#define EOB_ACT_CONTINUE_SCAN 0
+#define EOB_ACT_END_OF_FILE 1
+#define EOB_ACT_LAST_MATCH 2
+
+ #define YY_LESS_LINENO(n)
+ #define YY_LINENO_REWIND_TO(ptr)
+
+/* Return all but the first "n" matched characters back to the input stream. */
+#define yyless(n) \
+ do \
+ { \
+ /* Undo effects of setting up yytext. */ \
+ int yyless_macro_arg = (n); \
+ YY_LESS_LINENO(yyless_macro_arg);\
+ *yy_cp = yyg->yy_hold_char; \
+ YY_RESTORE_YY_MORE_OFFSET \
+ yyg->yy_c_buf_p = yy_cp = yy_bp + yyless_macro_arg - YY_MORE_ADJ; \
+ YY_DO_BEFORE_ACTION; /* set up yytext again */ \
+ } \
+ while ( 0 )
+#define unput(c) yyunput( c, yyg->yytext_ptr , yyscanner )
+
+#ifndef YY_STRUCT_YY_BUFFER_STATE
+#define YY_STRUCT_YY_BUFFER_STATE
+struct yy_buffer_state
+ {
+ FILE *yy_input_file;
+
+ char *yy_ch_buf; /* input buffer */
+ char *yy_buf_pos; /* current position in input buffer */
+
+ /* Size of input buffer in bytes, not including room for EOB
+ * characters.
+ */
+ int yy_buf_size;
+
+ /* Number of characters read into yy_ch_buf, not including EOB
+ * characters.
+ */
+ int yy_n_chars;
+
+ /* Whether we "own" the buffer - i.e., we know we created it,
+ * and can realloc() it to grow it, and should free() it to
+ * delete it.
+ */
+ int yy_is_our_buffer;
+
+ /* Whether this is an "interactive" input source; if so, and
+ * if we're using stdio for input, then we want to use getc()
+ * instead of fread(), to make sure we stop fetching input after
+ * each newline.
+ */
+ int yy_is_interactive;
+
+ /* Whether we're considered to be at the beginning of a line.
+ * If so, '^' rules will be active on the next match, otherwise
+ * not.
+ */
+ int yy_at_bol;
+
+ int yy_bs_lineno; /**< The line count. */
+ int yy_bs_column; /**< The column count. */
+
+ /* Whether to try to fill the input buffer when we reach the
+ * end of it.
+ */
+ int yy_fill_buffer;
+
+ int yy_buffer_status;
+
+#define YY_BUFFER_NEW 0
+#define YY_BUFFER_NORMAL 1
+ /* When an EOF's been seen but there's still some text to process
+ * then we mark the buffer as YY_EOF_PENDING, to indicate that we
+ * shouldn't try reading from the input source any more. We might
+ * still have a bunch of tokens to match, though, because of
+ * possible backing-up.
+ *
+ * When we actually see the EOF, we change the status to "new"
+ * (via yyrestart()), so that the user can continue scanning by
+ * just pointing yyin at a new input file.
+ */
+#define YY_BUFFER_EOF_PENDING 2
+
+ };
+#endif /* !YY_STRUCT_YY_BUFFER_STATE */
+
+/* We provide macros for accessing buffer states in case in the
+ * future we want to put the buffer states in a more general
+ * "scanner state".
+ *
+ * Returns the top of the stack, or NULL.
+ */
+#define YY_CURRENT_BUFFER ( yyg->yy_buffer_stack \
+ ? yyg->yy_buffer_stack[yyg->yy_buffer_stack_top] \
+ : NULL)
+/* Same as previous macro, but useful when we know that the buffer stack is not
+ * NULL or when we need an lvalue. For internal use only.
+ */
+#define YY_CURRENT_BUFFER_LVALUE yyg->yy_buffer_stack[yyg->yy_buffer_stack_top]
+
+void yyrestart ( FILE *input_file , yyscan_t yyscanner );
+void yy_switch_to_buffer ( YY_BUFFER_STATE new_buffer , yyscan_t yyscanner );
+YY_BUFFER_STATE yy_create_buffer ( FILE *file, int size , yyscan_t yyscanner );
+void yy_delete_buffer ( YY_BUFFER_STATE b , yyscan_t yyscanner );
+void yy_flush_buffer ( YY_BUFFER_STATE b , yyscan_t yyscanner );
+void yypush_buffer_state ( YY_BUFFER_STATE new_buffer , yyscan_t yyscanner );
+void yypop_buffer_state ( yyscan_t yyscanner );
+
+static void yyensure_buffer_stack ( yyscan_t yyscanner );
+static void yy_load_buffer_state ( yyscan_t yyscanner );
+static void yy_init_buffer ( YY_BUFFER_STATE b, FILE *file , yyscan_t yyscanner );
+#define YY_FLUSH_BUFFER yy_flush_buffer( YY_CURRENT_BUFFER , yyscanner)
+
+YY_BUFFER_STATE yy_scan_buffer ( char *base, yy_size_t size , yyscan_t yyscanner );
+YY_BUFFER_STATE yy_scan_string ( const char *yy_str , yyscan_t yyscanner );
+YY_BUFFER_STATE yy_scan_bytes ( const char *bytes, int len , yyscan_t yyscanner );
+
+void *yyalloc ( yy_size_t , yyscan_t yyscanner );
+void *yyrealloc ( void *, yy_size_t , yyscan_t yyscanner );
+void yyfree ( void * , yyscan_t yyscanner );
+
+#define yy_new_buffer yy_create_buffer
+#define yy_set_interactive(is_interactive) \
+ { \
+ if ( ! YY_CURRENT_BUFFER ){ \
+ yyensure_buffer_stack (yyscanner); \
+ YY_CURRENT_BUFFER_LVALUE = \
+ yy_create_buffer( yyin, YY_BUF_SIZE , yyscanner); \
+ } \
+ YY_CURRENT_BUFFER_LVALUE->yy_is_interactive = is_interactive; \
+ }
+#define yy_set_bol(at_bol) \
+ { \
+ if ( ! YY_CURRENT_BUFFER ){\
+ yyensure_buffer_stack (yyscanner); \
+ YY_CURRENT_BUFFER_LVALUE = \
+ yy_create_buffer( yyin, YY_BUF_SIZE , yyscanner); \
+ } \
+ YY_CURRENT_BUFFER_LVALUE->yy_at_bol = at_bol; \
+ }
+#define YY_AT_BOL() (YY_CURRENT_BUFFER_LVALUE->yy_at_bol)
+
+/* Begin user sect3 */
+
+#define _mesa_glsl_lexer_wrap(yyscanner) (/*CONSTCOND*/1)
+#define YY_SKIP_YYWRAP
+typedef flex_uint8_t YY_CHAR;
+
+typedef int yy_state_type;
+
+#define yytext_ptr yytext_r
+
+static yy_state_type yy_get_previous_state ( yyscan_t yyscanner );
+static yy_state_type yy_try_NUL_trans ( yy_state_type current_state , yyscan_t yyscanner);
+static int yy_get_next_buffer ( yyscan_t yyscanner );
+static void yynoreturn yy_fatal_error ( const char* msg , yyscan_t yyscanner );
+
+/* Done after the current pattern has been matched and before the
+ * corresponding action - sets up yytext.
+ */
+#define YY_DO_BEFORE_ACTION \
+ yyg->yytext_ptr = yy_bp; \
+ yyleng = (int) (yy_cp - yy_bp); \
+ yyg->yy_hold_char = *yy_cp; \
+ *yy_cp = '\0'; \
+ yyg->yy_c_buf_p = yy_cp;
+#define YY_NUM_RULES 280
+#define YY_END_OF_BUFFER 281
+/* This struct is not used in this scanner,
+ but its presence is necessary. */
+struct yy_trans_info
+ {
+ flex_int32_t yy_verify;
+ flex_int32_t yy_nxt;
+ };
+static const flex_int16_t yy_accept[1112] =
+ { 0,
+ 0, 0, 20, 20, 0, 0, 281, 279, 1, 27,
+ 279, 279, 279, 279, 279, 279, 278, 279, 185, 183,
+ 279, 279, 279, 277, 279, 277, 277, 277, 277, 277,
+ 277, 277, 277, 277, 277, 277, 277, 277, 277, 277,
+ 277, 277, 277, 277, 277, 279, 1, 279, 26, 20,
+ 25, 26, 24, 23, 21, 22, 18, 17, 1, 167,
+ 176, 168, 179, 173, 162, 175, 163, 182, 187, 174,
+ 188, 185, 0, 0, 185, 185, 0, 185, 183, 183,
+ 183, 183, 171, 164, 166, 165, 172, 277, 180, 170,
+ 277, 277, 277, 277, 277, 277, 277, 277, 277, 277,
+
+ 277, 277, 277, 277, 36, 277, 277, 277, 277, 277,
+ 277, 277, 277, 277, 277, 277, 277, 277, 277, 40,
+ 277, 277, 68, 277, 277, 277, 277, 277, 277, 277,
+ 277, 277, 277, 277, 277, 277, 277, 277, 277, 277,
+ 277, 277, 277, 277, 277, 277, 277, 277, 277, 277,
+ 277, 277, 277, 277, 277, 277, 277, 277, 181, 169,
+ 1, 0, 0, 2, 0, 0, 0, 0, 0, 20,
+ 19, 23, 22, 0, 187, 0, 0, 186, 0, 188,
+ 0, 0, 0, 189, 184, 177, 178, 277, 196, 277,
+ 277, 277, 277, 277, 277, 277, 277, 277, 277, 277,
+
+ 277, 277, 277, 277, 277, 277, 277, 277, 277, 277,
+ 277, 277, 277, 277, 277, 39, 277, 277, 277, 277,
+ 277, 277, 277, 277, 277, 277, 277, 32, 277, 277,
+ 277, 277, 277, 277, 277, 277, 277, 277, 277, 69,
+ 277, 277, 277, 277, 277, 277, 277, 277, 277, 277,
+ 277, 277, 277, 277, 277, 277, 277, 277, 277, 277,
+ 277, 277, 277, 277, 277, 277, 277, 277, 277, 277,
+ 277, 277, 277, 277, 277, 277, 277, 0, 0, 0,
+ 0, 0, 19, 0, 187, 191, 0, 186, 0, 0,
+ 0, 188, 192, 189, 0, 0, 184, 184, 184, 277,
+
+ 277, 277, 30, 277, 277, 277, 254, 247, 277, 277,
+ 277, 277, 277, 277, 277, 277, 277, 277, 277, 277,
+ 38, 199, 277, 277, 277, 277, 76, 277, 277, 204,
+ 217, 277, 277, 277, 277, 277, 277, 277, 277, 277,
+ 277, 277, 277, 277, 277, 214, 250, 56, 57, 58,
+ 277, 277, 277, 277, 277, 277, 277, 277, 277, 277,
+ 277, 277, 277, 277, 277, 277, 277, 277, 277, 277,
+ 277, 277, 277, 277, 277, 277, 202, 194, 277, 277,
+ 277, 33, 277, 277, 277, 277, 277, 277, 277, 53,
+ 54, 55, 160, 277, 277, 277, 0, 0, 0, 0,
+
+ 0, 0, 186, 190, 193, 277, 277, 277, 34, 277,
+ 44, 45, 46, 277, 197, 277, 277, 29, 277, 277,
+ 277, 277, 228, 229, 230, 277, 225, 226, 227, 277,
+ 195, 277, 218, 31, 240, 241, 242, 252, 222, 223,
+ 224, 277, 277, 277, 277, 70, 220, 277, 277, 277,
+ 277, 47, 48, 49, 277, 277, 277, 277, 277, 277,
+ 277, 277, 277, 277, 277, 79, 277, 277, 277, 277,
+ 277, 277, 277, 277, 277, 215, 277, 277, 277, 277,
+ 277, 277, 277, 277, 277, 277, 277, 277, 277, 198,
+ 277, 277, 249, 50, 51, 52, 277, 277, 37, 277,
+
+ 0, 0, 0, 0, 0, 257, 277, 277, 72, 277,
+ 277, 255, 277, 277, 43, 277, 277, 277, 277, 216,
+ 211, 260, 277, 277, 277, 277, 277, 277, 277, 207,
+ 277, 277, 277, 277, 161, 59, 60, 61, 62, 63,
+ 64, 65, 66, 67, 277, 277, 277, 277, 221, 203,
+ 277, 277, 209, 277, 277, 277, 42, 277, 267, 158,
+ 246, 77, 210, 159, 277, 258, 205, 277, 277, 277,
+ 277, 277, 277, 277, 277, 277, 277, 277, 0, 0,
+ 0, 0, 0, 277, 277, 277, 277, 277, 206, 41,
+ 231, 232, 233, 234, 235, 236, 237, 238, 239, 277,
+
+ 270, 271, 272, 277, 277, 277, 277, 277, 115, 116,
+ 117, 277, 277, 269, 277, 277, 277, 251, 277, 277,
+ 277, 277, 114, 277, 277, 277, 277, 277, 277, 277,
+ 277, 200, 274, 275, 276, 277, 277, 277, 277, 277,
+ 277, 71, 277, 277, 73, 277, 277, 0, 0, 0,
+ 0, 0, 0, 277, 277, 74, 152, 35, 212, 126,
+ 127, 128, 277, 277, 277, 277, 277, 277, 277, 277,
+ 277, 277, 277, 277, 277, 277, 208, 277, 277, 277,
+ 155, 266, 154, 277, 277, 277, 277, 277, 277, 277,
+ 277, 201, 137, 138, 139, 277, 277, 273, 219, 277,
+
+ 153, 277, 0, 5, 0, 8, 0, 0, 0, 16,
+ 3, 277, 28, 277, 277, 277, 277, 277, 277, 277,
+ 277, 277, 124, 277, 277, 277, 119, 213, 75, 277,
+ 277, 277, 277, 277, 248, 277, 256, 253, 261, 80,
+ 81, 82, 277, 277, 277, 277, 277, 277, 277, 277,
+ 277, 277, 277, 277, 277, 277, 277, 156, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 277, 277, 277,
+ 135, 277, 277, 130, 277, 277, 277, 277, 277, 277,
+ 277, 277, 91, 92, 93, 277, 277, 277, 277, 277,
+ 277, 277, 277, 277, 277, 277, 277, 277, 268, 277,
+
+ 277, 146, 277, 277, 141, 97, 98, 99, 277, 277,
+ 4, 0, 0, 0, 6, 0, 0, 0, 0, 0,
+ 0, 0, 157, 277, 277, 277, 277, 277, 277, 277,
+ 277, 277, 277, 118, 277, 120, 277, 277, 277, 277,
+ 277, 277, 277, 277, 277, 277, 277, 103, 277, 277,
+ 277, 277, 83, 277, 277, 277, 277, 277, 277, 277,
+ 277, 277, 277, 277, 277, 277, 0, 7, 0, 0,
+ 0, 0, 0, 277, 277, 277, 129, 131, 277, 121,
+ 277, 122, 277, 277, 277, 277, 277, 104, 277, 277,
+ 94, 277, 277, 277, 277, 277, 277, 277, 277, 277,
+
+ 277, 277, 277, 277, 277, 277, 140, 142, 277, 277,
+ 277, 105, 277, 277, 100, 0, 0, 0, 0, 132,
+ 133, 277, 277, 277, 148, 277, 277, 149, 277, 277,
+ 277, 277, 277, 277, 277, 78, 277, 277, 277, 277,
+ 243, 277, 244, 259, 277, 277, 277, 143, 144, 277,
+ 277, 277, 277, 277, 277, 277, 277, 0, 0, 0,
+ 0, 277, 277, 277, 277, 125, 123, 277, 277, 277,
+ 262, 264, 277, 84, 277, 85, 277, 277, 277, 277,
+ 277, 277, 277, 277, 277, 277, 277, 263, 265, 277,
+ 0, 0, 0, 0, 0, 136, 134, 277, 277, 95,
+
+ 96, 277, 277, 277, 86, 277, 277, 277, 87, 277,
+ 277, 277, 147, 145, 101, 102, 277, 277, 0, 0,
+ 0, 0, 0, 277, 277, 277, 277, 277, 277, 106,
+ 277, 109, 277, 277, 277, 277, 0, 0, 0, 0,
+ 0, 0, 277, 277, 107, 110, 277, 277, 277, 277,
+ 88, 277, 108, 111, 0, 0, 0, 9, 0, 0,
+ 0, 0, 150, 151, 277, 277, 277, 277, 113, 0,
+ 0, 10, 0, 0, 0, 0, 277, 277, 245, 277,
+ 0, 0, 0, 0, 0, 13, 89, 90, 277, 0,
+ 0, 0, 11, 0, 14, 277, 0, 0, 12, 112,
+
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 15,
+ 0
+ } ;
+
+static const YY_CHAR yy_ec[256] =
+ { 0,
+ 1, 1, 1, 1, 1, 1, 1, 1, 2, 3,
+ 1, 1, 4, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 5, 6, 7, 8, 1, 9, 10, 1, 11,
+ 12, 13, 14, 1, 15, 16, 17, 18, 19, 20,
+ 21, 22, 23, 24, 23, 25, 25, 26, 1, 27,
+ 28, 29, 1, 1, 30, 31, 32, 33, 34, 35,
+ 36, 37, 37, 37, 37, 38, 39, 37, 40, 37,
+ 37, 41, 42, 43, 44, 37, 37, 45, 37, 37,
+ 1, 1, 1, 46, 47, 1, 48, 49, 50, 51,
+
+ 52, 53, 54, 55, 56, 57, 58, 59, 60, 61,
+ 62, 63, 37, 64, 65, 66, 67, 68, 69, 70,
+ 71, 72, 1, 73, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1
+ } ;
+
+static const YY_CHAR yy_meta[74] =
+ { 0,
+ 1, 2, 3, 1, 4, 1, 5, 1, 1, 1,
+ 1, 1, 1, 1, 1, 6, 5, 7, 7, 7,
+ 7, 7, 7, 7, 7, 1, 1, 1, 1, 8,
+ 8, 8, 8, 9, 10, 11, 11, 12, 11, 11,
+ 11, 11, 11, 11, 11, 1, 11, 8, 8, 8,
+ 8, 9, 10, 11, 11, 11, 11, 11, 12, 11,
+ 11, 11, 11, 11, 11, 11, 11, 11, 11, 11,
+ 11, 11, 1
+ } ;
+
+static const flex_int16_t yy_base[1127] =
+ { 0,
+ 0, 72, 80, 0, 1606, 1605, 1607, 1610, 74, 1610,
+ 1578, 1577, 144, 1576, 141, 142, 140, 1575, 157, 212,
+ 139, 1574, 155, 0, 128, 121, 126, 137, 151, 145,
+ 192, 1539, 144, 202, 160, 162, 177, 1533, 201, 190,
+ 218, 217, 221, 195, 165, 210, 288, 292, 1610, 296,
+ 1610, 1582, 1610, 284, 1610, 0, 1610, 1610, 308, 1610,
+ 1610, 1610, 1610, 1610, 1610, 1610, 1610, 1610, 343, 1610,
+ 385, 153, 283, 300, 1610, 1560, 0, 1538, 0, 1610,
+ 1558, 1536, 1566, 1610, 1610, 1610, 1565, 0, 1610, 1610,
+ 1526, 1531, 155, 1528, 1537, 1535, 1535, 1521, 1524, 1536,
+
+ 271, 274, 1518, 1534, 1514, 1528, 1514, 1511, 1511, 1517,
+ 221, 213, 1511, 1522, 1507, 1513, 1517, 1518, 1547, 0,
+ 1508, 1519, 274, 1518, 1513, 1493, 136, 1497, 1511, 1501,
+ 282, 1494, 293, 1507, 1509, 263, 1488, 1496, 268, 1483,
+ 1492, 305, 307, 1497, 1492, 1495, 1483, 1486, 1526, 286,
+ 188, 293, 1495, 1482, 1495, 315, 1488, 1487, 1610, 1610,
+ 371, 380, 395, 1610, 1472, 1480, 1484, 1475, 1486, 382,
+ 0, 393, 0, 446, 1610, 1502, 1483, 393, 458, 1610,
+ 1500, 1481, 466, 457, 455, 1610, 1610, 1477, 0, 1472,
+ 1467, 1471, 1481, 1475, 1477, 306, 1460, 1460, 1472, 1463,
+
+ 323, 1474, 1459, 1470, 1453, 1469, 1467, 1464, 1455, 1462,
+ 1448, 1446, 1459, 1444, 1461, 0, 1458, 1445, 1453, 1450,
+ 1454, 1435, 1454, 1447, 1444, 1432, 1431, 328, 1449, 1436,
+ 1445, 1432, 1439, 1429, 370, 1435, 1438, 1428, 1436, 1424,
+ 1428, 1419, 1434, 1433, 1423, 1430, 288, 1413, 1432, 1415,
+ 1413, 1412, 1423, 1412, 1407, 1405, 1407, 1418, 1403, 1405,
+ 1402, 1414, 1413, 1396, 1415, 1396, 368, 1405, 1400, 1398,
+ 1408, 1386, 404, 1405, 1407, 1395, 1387, 1386, 1401, 1389,
+ 1401, 1384, 0, 482, 459, 1610, 505, 1610, 1412, 1393,
+ 513, 504, 1610, 1610, 1410, 1391, 1610, 1405, 1383, 1373,
+
+ 1384, 1383, 0, 1380, 1385, 413, 0, 0, 1372, 1370,
+ 1370, 1371, 1366, 1375, 1363, 1363, 1380, 420, 1368, 435,
+ 0, 0, 1362, 1373, 1372, 1372, 0, 1356, 523, 0,
+ 0, 1358, 526, 1368, 1365, 1366, 1356, 1350, 1349, 1392,
+ 1349, 1348, 1348, 529, 1343, 0, 0, 1339, 1338, 1337,
+ 1339, 1340, 1345, 1339, 1335, 1349, 1344, 1344, 1342, 1341,
+ 1334, 1328, 1330, 1329, 1332, 1332, 1338, 1323, 1326, 1321,
+ 1330, 1335, 1322, 1319, 1332, 1322, 0, 0, 1329, 1327,
+ 1324, 1353, 1314, 1314, 1320, 1310, 1318, 532, 1315, 0,
+ 0, 0, 0, 1304, 1317, 1316, 1315, 1307, 1313, 1310,
+
+ 1298, 546, 520, 1610, 1610, 1310, 1311, 1311, 0, 1295,
+ 0, 0, 0, 1296, 0, 1305, 1295, 0, 1294, 1295,
+ 1301, 1288, 1281, 1280, 1279, 1296, 0, 0, 0, 1286,
+ 0, 1282, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 1295, 1292, 555, 1291, 0, 0, 1295, 1288, 1284,
+ 1280, 0, 0, 0, 1272, 539, 560, 563, 1277, 1273,
+ 1279, 1269, 1267, 1281, 1265, 0, 1265, 1279, 1267, 1263,
+ 1270, 1264, 1276, 1271, 1271, 0, 1268, 1265, 1269, 1252,
+ 1250, 1253, 1260, 1266, 1261, 1262, 1259, 1288, 1245, 0,
+ 1247, 1248, 0, 0, 0, 0, 1245, 1249, 0, 1242,
+
+ 1242, 1235, 448, 1241, 1244, 0, 1252, 1231, 0, 1241,
+ 1235, 0, 1228, 1228, 0, 1242, 568, 571, 574, 0,
+ 1244, 0, 577, 581, 1258, 1257, 1256, 1221, 1220, 0,
+ 1220, 1237, 1236, 1231, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 1219, 1233, 1219, 1216, 0, 0,
+ 1222, 457, 0, 1218, 1226, 1225, 0, 1217, 1209, 0,
+ 0, 0, 0, 0, 1206, 0, 0, 1205, 1217, 583,
+ 587, 1222, 1208, 1215, 1214, 1211, 1205, 1202, 1197, 1210,
+ 609, 1212, 1197, 1191, 1191, 1205, 1189, 1202, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 1194,
+
+ 0, 0, 0, 1219, 1218, 1217, 1182, 1181, 432, 596,
+ 0, 1194, 1197, 0, 1195, 1183, 1179, 0, 1192, 1189,
+ 1188, 1177, 0, 1176, 1166, 1184, 1169, 1172, 620, 1177,
+ 1180, 0, 0, 0, 0, 1198, 1197, 1196, 1161, 1160,
+ 1160, 0, 1174, 1160, 0, 1171, 1163, 1165, 1168, 653,
+ 618, 506, 1158, 1162, 1165, 0, 0, 0, 0, 1186,
+ 606, 0, 1162, 1165, 1149, 1157, 1147, 1168, 1157, 1153,
+ 1154, 1154, 1153, 1138, 629, 1151, 0, 1152, 1140, 1139,
+ 0, 0, 0, 1135, 1165, 1164, 1163, 1128, 1127, 1123,
+ 1131, 0, 1161, 623, 0, 1137, 1140, 0, 0, 647,
+
+ 0, 1117, 1125, 1610, 722, 1610, 681, 0, 641, 686,
+ 1610, 1125, 0, 1121, 1120, 1141, 1130, 1128, 1128, 1115,
+ 1130, 1113, 1146, 1125, 1126, 1121, 1142, 0, 0, 1138,
+ 1137, 1136, 1096, 1095, 0, 1078, 0, 0, 0, 580,
+ 651, 1102, 1085, 1087, 1069, 1080, 1066, 1065, 1084, 1072,
+ 1070, 1068, 1085, 1084, 1073, 1031, 1003, 0, 1008, 1061,
+ 754, 692, 704, 1024, 1013, 1001, 1015, 996, 997, 996,
+ 1029, 1008, 1005, 1026, 1007, 1003, 1005, 988, 985, 999,
+ 985, 984, 1017, 678, 0, 993, 996, 988, 979, 987,
+ 977, 997, 986, 982, 983, 981, 980, 979, 0, 960,
+
+ 955, 988, 958, 23, 138, 188, 681, 0, 188, 210,
+ 1610, 269, 710, 728, 1610, 747, 0, 733, 306, 344,
+ 328, 335, 0, 353, 374, 365, 383, 395, 411, 425,
+ 436, 444, 453, 0, 459, 0, 476, 477, 492, 520,
+ 520, 524, 526, 541, 551, 568, 553, 594, 575, 594,
+ 607, 607, 640, 599, 617, 621, 616, 619, 623, 625,
+ 632, 646, 672, 664, 665, 669, 758, 1610, 777, 690,
+ 665, 678, 678, 680, 682, 690, 0, 0, 694, 720,
+ 695, 739, 735, 715, 737, 722, 723, 758, 739, 738,
+ 761, 740, 745, 743, 747, 732, 731, 747, 734, 737,
+
+ 738, 748, 743, 734, 735, 743, 0, 0, 744, 745,
+ 746, 781, 762, 761, 784, 777, 762, 757, 762, 0,
+ 0, 771, 772, 766, 0, 767, 752, 0, 754, 778,
+ 779, 764, 763, 766, 767, 0, 761, 771, 763, 771,
+ 794, 775, 0, 0, 774, 791, 792, 0, 0, 793,
+ 794, 795, 796, 781, 780, 783, 784, 847, 848, 795,
+ 793, 784, 785, 809, 810, 0, 0, 789, 790, 798,
+ 0, 0, 799, 822, 796, 824, 819, 813, 800, 822,
+ 820, 813, 802, 803, 804, 805, 813, 0, 0, 814,
+ 877, 878, 879, 813, 832, 0, 0, 836, 837, 0,
+
+ 0, 842, 843, 837, 0, 838, 823, 847, 0, 825,
+ 835, 858, 0, 0, 0, 0, 851, 852, 840, 900,
+ 850, 852, 904, 845, 846, 839, 841, 865, 866, 0,
+ 865, 875, 849, 885, 849, 850, 854, 870, 922, 924,
+ 926, 934, 856, 861, 0, 0, 881, 887, 878, 887,
+ 0, 901, 0, 0, 896, 943, 944, 1610, 948, 949,
+ 955, 905, 0, 0, 885, 890, 892, 915, 0, 900,
+ 963, 1610, 965, 916, 918, 971, 903, 905, 0, 927,
+ 923, 927, 979, 980, 985, 1610, 0, 0, 924, 940,
+ 993, 997, 1610, 998, 1610, 920, 932, 1002, 1610, 0,
+
+ 928, 1010, 1011, 1018, 1023, 942, 947, 1024, 1028, 1610,
+ 1610, 1071, 1077, 1083, 1090, 1093, 1103, 1109, 1115, 1121,
+ 1127, 1138, 1139, 1146, 1157, 1158
+ } ;
+
+static const flex_int16_t yy_def[1127] =
+ { 0,
+ 1111, 1, 1111, 3, 1112, 1112, 1111, 1111, 1111, 1111,
+ 1111, 1111, 1111, 1111, 1111, 1111, 1111, 1111, 1111, 1111,
+ 1111, 1111, 1111, 1113, 1111, 1113, 1113, 1113, 1113, 1113,
+ 1113, 1113, 1113, 1113, 1113, 1113, 1113, 1113, 1113, 1113,
+ 1113, 1113, 1113, 1113, 1113, 1111, 1111, 1111, 1111, 1111,
+ 1111, 1111, 1111, 1111, 1111, 1114, 1111, 1111, 1111, 1111,
+ 1111, 1111, 1111, 1111, 1111, 1111, 1111, 1111, 1111, 1111,
+ 1111, 19, 1115, 1111, 1111, 1111, 1116, 1111, 20, 1111,
+ 1111, 1111, 1111, 1111, 1111, 1111, 1111, 1113, 1111, 1111,
+ 1113, 1113, 1113, 1113, 1113, 1113, 1113, 1113, 1113, 1113,
+
+ 1113, 1113, 1113, 1113, 1113, 1113, 1113, 1113, 1113, 1113,
+ 1113, 1113, 1113, 1113, 1113, 1113, 1113, 1113, 1113, 1113,
+ 1113, 1113, 1113, 1113, 1113, 1113, 1113, 1113, 1113, 1113,
+ 1113, 1113, 1113, 1113, 1113, 1113, 1113, 1113, 1113, 1113,
+ 1113, 1113, 1113, 1113, 1113, 1113, 1113, 1113, 1113, 1113,
+ 1113, 1113, 1113, 1113, 1113, 1113, 1113, 1113, 1111, 1111,
+ 1111, 1111, 1111, 1111, 1111, 1111, 1111, 1111, 1111, 1111,
+ 1117, 1111, 1114, 1111, 1111, 1111, 1111, 71, 1111, 1111,
+ 1111, 1111, 1111, 1118, 1116, 1111, 1111, 1113, 1113, 1113,
+ 1113, 1113, 1113, 1113, 1113, 1113, 1113, 1113, 1113, 1113,
+
+ 1113, 1113, 1113, 1113, 1113, 1113, 1113, 1113, 1113, 1113,
+ 1113, 1113, 1113, 1113, 1113, 1113, 1113, 1113, 1113, 1113,
+ 1113, 1113, 1113, 1113, 1113, 1113, 1113, 1113, 1113, 1113,
+ 1113, 1113, 1113, 1113, 1113, 1113, 1113, 1113, 1113, 1113,
+ 1113, 1113, 1113, 1113, 1113, 1113, 1113, 1113, 1113, 1113,
+ 1113, 1113, 1113, 1113, 1113, 1113, 1113, 1113, 1113, 1113,
+ 1113, 1113, 1113, 1113, 1113, 1113, 1113, 1113, 1113, 1113,
+ 1113, 1113, 1113, 1113, 1113, 1113, 1113, 1111, 1111, 1111,
+ 1111, 1111, 1117, 1111, 1119, 1111, 1111, 1111, 1111, 1111,
+ 1111, 1120, 1111, 1111, 1111, 1111, 1111, 1111, 1111, 1113,
+
+ 1113, 1113, 1113, 1113, 1113, 1113, 1113, 1113, 1113, 1113,
+ 1113, 1113, 1113, 1113, 1113, 1113, 1113, 1113, 1113, 1113,
+ 1113, 1113, 1113, 1113, 1113, 1113, 1113, 1113, 1113, 1113,
+ 1113, 1113, 1113, 1113, 1113, 1113, 1113, 1113, 1113, 1113,
+ 1113, 1113, 1113, 1113, 1113, 1113, 1113, 1113, 1113, 1113,
+ 1113, 1113, 1113, 1113, 1113, 1113, 1113, 1113, 1113, 1113,
+ 1113, 1113, 1113, 1113, 1113, 1113, 1113, 1113, 1113, 1113,
+ 1113, 1113, 1113, 1113, 1113, 1113, 1113, 1113, 1113, 1113,
+ 1113, 1113, 1113, 1113, 1113, 1113, 1113, 1113, 1113, 1113,
+ 1113, 1113, 1113, 1113, 1113, 1113, 1111, 1111, 1111, 1111,
+
+ 1111, 1111, 1121, 1111, 1111, 1113, 1113, 1113, 1113, 1113,
+ 1113, 1113, 1113, 1113, 1113, 1113, 1113, 1113, 1113, 1113,
+ 1113, 1113, 1113, 1113, 1113, 1113, 1113, 1113, 1113, 1113,
+ 1113, 1113, 1113, 1113, 1113, 1113, 1113, 1113, 1113, 1113,
+ 1113, 1113, 1113, 1113, 1113, 1113, 1113, 1113, 1113, 1113,
+ 1113, 1113, 1113, 1113, 1113, 1113, 1113, 1113, 1113, 1113,
+ 1113, 1113, 1113, 1113, 1113, 1113, 1113, 1113, 1113, 1113,
+ 1113, 1113, 1113, 1113, 1113, 1113, 1113, 1113, 1113, 1113,
+ 1113, 1113, 1113, 1113, 1113, 1113, 1113, 1113, 1113, 1113,
+ 1113, 1113, 1113, 1113, 1113, 1113, 1113, 1113, 1113, 1113,
+
+ 1111, 1111, 1111, 1111, 1111, 1113, 1113, 1113, 1113, 1113,
+ 1113, 1113, 1113, 1113, 1113, 1113, 1113, 1113, 1113, 1113,
+ 1113, 1113, 1113, 1113, 1113, 1113, 1113, 1113, 1113, 1113,
+ 1113, 1113, 1113, 1113, 1113, 1113, 1113, 1113, 1113, 1113,
+ 1113, 1113, 1113, 1113, 1113, 1113, 1113, 1113, 1113, 1113,
+ 1113, 1113, 1113, 1113, 1113, 1113, 1113, 1113, 1113, 1113,
+ 1113, 1113, 1113, 1113, 1113, 1113, 1113, 1113, 1113, 1113,
+ 1113, 1113, 1113, 1113, 1113, 1113, 1113, 1113, 1111, 1111,
+ 1111, 1111, 1111, 1113, 1113, 1113, 1113, 1113, 1113, 1113,
+ 1113, 1113, 1113, 1113, 1113, 1113, 1113, 1113, 1113, 1113,
+
+ 1113, 1113, 1113, 1113, 1113, 1113, 1113, 1113, 1113, 1113,
+ 1113, 1113, 1113, 1113, 1113, 1113, 1113, 1113, 1113, 1113,
+ 1113, 1113, 1113, 1113, 1113, 1113, 1113, 1113, 1113, 1113,
+ 1113, 1113, 1113, 1113, 1113, 1113, 1113, 1113, 1113, 1113,
+ 1113, 1113, 1113, 1113, 1113, 1113, 1113, 1111, 1111, 1111,
+ 1122, 1111, 1111, 1113, 1113, 1113, 1113, 1113, 1113, 1113,
+ 1113, 1113, 1113, 1113, 1113, 1113, 1113, 1113, 1113, 1113,
+ 1113, 1113, 1113, 1113, 1113, 1113, 1113, 1113, 1113, 1113,
+ 1113, 1113, 1113, 1113, 1113, 1113, 1113, 1113, 1113, 1113,
+ 1113, 1113, 1113, 1113, 1113, 1113, 1113, 1113, 1113, 1113,
+
+ 1113, 1113, 1111, 1111, 1111, 1111, 1111, 1123, 1122, 1111,
+ 1111, 1113, 1113, 1113, 1113, 1113, 1113, 1113, 1113, 1113,
+ 1113, 1113, 1113, 1113, 1113, 1113, 1113, 1113, 1113, 1113,
+ 1113, 1113, 1113, 1113, 1113, 1113, 1113, 1113, 1113, 1113,
+ 1113, 1113, 1113, 1113, 1113, 1113, 1113, 1113, 1113, 1113,
+ 1113, 1113, 1113, 1113, 1113, 1113, 1113, 1113, 1111, 1124,
+ 1111, 1125, 1123, 1111, 1111, 1111, 1111, 1113, 1113, 1113,
+ 1113, 1113, 1113, 1113, 1113, 1113, 1113, 1113, 1113, 1113,
+ 1113, 1113, 1113, 1113, 1113, 1113, 1113, 1113, 1113, 1113,
+ 1113, 1113, 1113, 1113, 1113, 1113, 1113, 1113, 1113, 1113,
+
+ 1113, 1113, 1113, 1113, 1113, 1113, 1113, 1113, 1113, 1113,
+ 1111, 1124, 1111, 1111, 1111, 1111, 1126, 1125, 1111, 1111,
+ 1111, 1111, 1113, 1113, 1113, 1113, 1113, 1113, 1113, 1113,
+ 1113, 1113, 1113, 1113, 1113, 1113, 1113, 1113, 1113, 1113,
+ 1113, 1113, 1113, 1113, 1113, 1113, 1113, 1113, 1113, 1113,
+ 1113, 1113, 1113, 1113, 1113, 1113, 1113, 1113, 1113, 1113,
+ 1113, 1113, 1113, 1113, 1113, 1113, 1111, 1111, 1126, 1111,
+ 1111, 1111, 1111, 1113, 1113, 1113, 1113, 1113, 1113, 1113,
+ 1113, 1113, 1113, 1113, 1113, 1113, 1113, 1113, 1113, 1113,
+ 1113, 1113, 1113, 1113, 1113, 1113, 1113, 1113, 1113, 1113,
+
+ 1113, 1113, 1113, 1113, 1113, 1113, 1113, 1113, 1113, 1113,
+ 1113, 1113, 1113, 1113, 1113, 1111, 1111, 1111, 1111, 1113,
+ 1113, 1113, 1113, 1113, 1113, 1113, 1113, 1113, 1113, 1113,
+ 1113, 1113, 1113, 1113, 1113, 1113, 1113, 1113, 1113, 1113,
+ 1113, 1113, 1113, 1113, 1113, 1113, 1113, 1113, 1113, 1113,
+ 1113, 1113, 1113, 1113, 1113, 1113, 1113, 1111, 1111, 1111,
+ 1111, 1113, 1113, 1113, 1113, 1113, 1113, 1113, 1113, 1113,
+ 1113, 1113, 1113, 1113, 1113, 1113, 1113, 1113, 1113, 1113,
+ 1113, 1113, 1113, 1113, 1113, 1113, 1113, 1113, 1113, 1113,
+ 1111, 1111, 1111, 1111, 1111, 1113, 1113, 1113, 1113, 1113,
+
+ 1113, 1113, 1113, 1113, 1113, 1113, 1113, 1113, 1113, 1113,
+ 1113, 1113, 1113, 1113, 1113, 1113, 1113, 1113, 1111, 1111,
+ 1111, 1111, 1111, 1113, 1113, 1113, 1113, 1113, 1113, 1113,
+ 1113, 1113, 1113, 1113, 1113, 1113, 1111, 1111, 1111, 1111,
+ 1111, 1111, 1113, 1113, 1113, 1113, 1113, 1113, 1113, 1113,
+ 1113, 1113, 1113, 1113, 1111, 1111, 1111, 1111, 1111, 1111,
+ 1111, 1111, 1113, 1113, 1113, 1113, 1113, 1113, 1113, 1111,
+ 1111, 1111, 1111, 1111, 1111, 1111, 1113, 1113, 1113, 1113,
+ 1111, 1111, 1111, 1111, 1111, 1111, 1113, 1113, 1113, 1111,
+ 1111, 1111, 1111, 1111, 1111, 1113, 1111, 1111, 1111, 1113,
+
+ 1111, 1111, 1111, 1111, 1111, 1111, 1111, 1111, 1111, 1111,
+ 0, 1111, 1111, 1111, 1111, 1111, 1111, 1111, 1111, 1111,
+ 1111, 1111, 1111, 1111, 1111, 1111
+ } ;
+
+static const flex_int16_t yy_nxt[1684] =
+ { 0,
+ 8, 9, 10, 9, 9, 11, 8, 8, 12, 13,
+ 8, 8, 14, 15, 16, 17, 18, 19, 20, 20,
+ 20, 20, 20, 20, 20, 8, 21, 22, 23, 24,
+ 24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
+ 24, 24, 24, 24, 24, 25, 24, 26, 27, 28,
+ 29, 30, 31, 32, 33, 34, 24, 24, 35, 36,
+ 37, 38, 39, 40, 41, 42, 43, 44, 45, 24,
+ 24, 24, 46, 47, 859, 59, 47, 59, 59, 48,
+ 49, 50, 51, 50, 50, 49, 49, 49, 49, 49,
+ 49, 49, 49, 49, 49, 49, 52, 53, 54, 54,
+
+ 54, 54, 54, 54, 54, 55, 49, 49, 49, 56,
+ 56, 56, 56, 56, 56, 56, 56, 56, 56, 56,
+ 56, 56, 56, 56, 56, 49, 56, 56, 56, 56,
+ 56, 56, 56, 56, 56, 56, 56, 56, 56, 56,
+ 56, 56, 56, 56, 56, 56, 56, 56, 56, 56,
+ 56, 56, 49, 62, 65, 89, 67, 69, 69, 69,
+ 69, 69, 69, 69, 69, 83, 84, 860, 66, 68,
+ 91, 63, 71, 90, 72, 72, 72, 72, 72, 72,
+ 72, 73, 86, 87, 98, 92, 93, 94, 99, 95,
+ 74, 116, 96, 97, 75, 100, 233, 1111, 101, 117,
+
+ 76, 77, 102, 107, 234, 108, 103, 126, 74, 128,
+ 104, 118, 105, 129, 109, 75, 190, 861, 106, 157,
+ 191, 127, 1111, 78, 130, 119, 77, 71, 158, 79,
+ 79, 79, 79, 79, 79, 79, 79, 159, 131, 110,
+ 865, 136, 154, 267, 149, 74, 155, 111, 133, 80,
+ 112, 137, 268, 113, 120, 81, 156, 121, 866, 114,
+ 214, 122, 123, 74, 134, 138, 124, 135, 145, 125,
+ 80, 146, 139, 140, 215, 813, 150, 141, 82, 212,
+ 147, 151, 160, 142, 143, 152, 144, 148, 153, 161,
+ 213, 59, 161, 163, 164, 162, 163, 170, 71, 170,
+
+ 170, 172, 172, 172, 172, 172, 172, 172, 172, 59,
+ 246, 59, 59, 183, 183, 251, 74, 184, 184, 184,
+ 184, 184, 184, 184, 184, 199, 202, 247, 248, 252,
+ 200, 201, 225, 203, 74, 226, 227, 238, 870, 228,
+ 269, 229, 241, 165, 239, 265, 266, 166, 270, 362,
+ 167, 340, 255, 363, 168, 257, 242, 307, 243, 169,
+ 69, 69, 69, 69, 69, 69, 69, 69, 256, 258,
+ 274, 308, 161, 275, 59, 161, 174, 175, 162, 341,
+ 176, 163, 164, 170, 163, 170, 170, 313, 314, 348,
+ 349, 350, 871, 872, 174, 175, 163, 164, 873, 163,
+
+ 874, 177, 178, 178, 178, 178, 178, 178, 178, 178,
+ 172, 172, 172, 172, 172, 172, 172, 172, 179, 180,
+ 383, 875, 181, 390, 391, 392, 287, 288, 876, 384,
+ 289, 165, 411, 412, 413, 166, 179, 180, 167, 423,
+ 424, 425, 168, 182, 287, 288, 165, 169, 877, 581,
+ 166, 290, 581, 167, 427, 428, 429, 168, 878, 284,
+ 284, 665, 169, 285, 285, 285, 285, 285, 285, 285,
+ 285, 291, 291, 666, 879, 292, 292, 292, 292, 292,
+ 292, 292, 292, 184, 184, 184, 184, 184, 184, 184,
+ 184, 294, 297, 175, 295, 880, 176, 881, 298, 285,
+
+ 285, 285, 285, 285, 285, 285, 285, 710, 623, 294,
+ 710, 175, 624, 297, 882, 296, 883, 177, 402, 402,
+ 884, 299, 403, 403, 403, 403, 403, 403, 403, 403,
+ 292, 292, 292, 292, 292, 292, 292, 292, 180, 885,
+ 886, 181, 435, 436, 437, 439, 440, 441, 452, 453,
+ 454, 494, 495, 496, 288, 887, 180, 289, 536, 537,
+ 538, 888, 182, 403, 403, 403, 403, 403, 403, 403,
+ 403, 889, 288, 525, 526, 527, 890, 891, 290, 539,
+ 540, 541, 542, 543, 544, 528, 529, 591, 592, 593,
+ 594, 595, 596, 597, 598, 599, 601, 602, 603, 604,
+
+ 605, 606, 633, 634, 635, 636, 637, 638, 892, 789,
+ 581, 607, 608, 581, 893, 894, 895, 639, 640, 705,
+ 706, 790, 705, 896, 897, 667, 650, 651, 651, 651,
+ 651, 651, 651, 651, 668, 715, 669, 670, 685, 686,
+ 687, 898, 705, 706, 716, 705, 717, 730, 731, 732,
+ 688, 689, 748, 690, 705, 706, 899, 705, 900, 733,
+ 734, 749, 903, 750, 904, 753, 754, 755, 905, 901,
+ 707, 707, 707, 707, 707, 707, 707, 756, 757, 906,
+ 791, 902, 705, 706, 907, 705, 908, 710, 909, 792,
+ 710, 793, 794, 814, 815, 910, 814, 708, 707, 707,
+
+ 707, 707, 707, 707, 707, 705, 706, 839, 705, 911,
+ 862, 867, 868, 912, 867, 913, 840, 914, 841, 863,
+ 915, 864, 708, 705, 706, 916, 705, 764, 760, 814,
+ 815, 917, 814, 918, 814, 815, 765, 814, 919, 761,
+ 762, 762, 762, 762, 762, 762, 762, 766, 814, 815,
+ 920, 814, 921, 922, 767, 814, 815, 923, 814, 867,
+ 868, 924, 867, 925, 816, 816, 816, 816, 816, 816,
+ 816, 816, 816, 816, 816, 816, 816, 816, 814, 815,
+ 926, 814, 927, 928, 929, 930, 931, 932, 933, 934,
+ 935, 936, 937, 938, 939, 940, 941, 942, 817, 943,
+
+ 944, 945, 946, 947, 948, 949, 950, 951, 952, 953,
+ 954, 955, 956, 957, 958, 959, 960, 961, 962, 963,
+ 964, 965, 966, 817, 967, 968, 969, 970, 971, 972,
+ 973, 974, 975, 976, 977, 978, 979, 980, 981, 982,
+ 983, 984, 985, 986, 987, 988, 989, 990, 991, 992,
+ 994, 991, 992, 995, 996, 997, 998, 999, 993, 1000,
+ 1001, 1002, 1003, 1004, 1005, 1006, 1007, 1008, 1009, 1010,
+ 1011, 1012, 1013, 1014, 1015, 1016, 1017, 1018, 991, 992,
+ 1020, 991, 992, 1020, 1022, 1023, 1024, 1025, 993, 1026,
+ 1027, 1028, 1029, 1030, 1031, 1032, 1033, 1034, 1035, 1036,
+
+ 1037, 1020, 1038, 1040, 1020, 1041, 1043, 1044, 1041, 1045,
+ 1039, 1046, 1047, 1048, 1042, 1049, 1050, 1051, 1052, 1053,
+ 1054, 1055, 1056, 1057, 1063, 1059, 1057, 1041, 1059, 1064,
+ 1041, 1065, 1019, 1058, 1060, 1061, 1042, 1066, 1061, 1067,
+ 1021, 1068, 1069, 1070, 1071, 1057, 1077, 1071, 1057, 1059,
+ 1073, 1078, 1059, 1073, 1072, 1058, 1061, 1075, 1060, 1061,
+ 1079, 1021, 1080, 1081, 1071, 1076, 1073, 1071, 1082, 1073,
+ 1084, 1087, 1085, 1088, 1072, 1085, 1083, 1089, 1090, 1091,
+ 1092, 1094, 1086, 1092, 1094, 1096, 1085, 1097, 1100, 1085,
+ 1093, 1095, 1101, 1102, 1098, 1062, 1086, 1098, 1092, 1094,
+
+ 1107, 1092, 1094, 1098, 1099, 1108, 1098, 858, 1093, 1095,
+ 1074, 1103, 1103, 1099, 1103, 1103, 1062, 857, 856, 1105,
+ 1104, 1104, 1105, 855, 1105, 1109, 1074, 1105, 1109, 1109,
+ 854, 853, 1109, 852, 851, 1110, 850, 849, 848, 1110,
+ 847, 846, 845, 844, 843, 842, 838, 837, 836, 835,
+ 834, 833, 832, 831, 830, 829, 828, 827, 826, 825,
+ 824, 823, 822, 821, 820, 1106, 819, 813, 811, 810,
+ 1106, 57, 57, 57, 57, 57, 57, 57, 57, 57,
+ 57, 57, 57, 88, 88, 88, 88, 88, 88, 173,
+ 173, 173, 173, 173, 173, 73, 73, 809, 73, 185,
+
+ 185, 185, 185, 283, 283, 808, 283, 283, 283, 283,
+ 283, 283, 283, 283, 283, 184, 807, 806, 184, 805,
+ 184, 285, 804, 803, 285, 802, 285, 292, 801, 800,
+ 292, 799, 292, 403, 798, 797, 403, 796, 403, 709,
+ 709, 709, 795, 788, 709, 763, 763, 763, 763, 812,
+ 812, 812, 812, 812, 812, 812, 812, 812, 818, 818,
+ 818, 787, 786, 818, 869, 869, 869, 869, 785, 784,
+ 783, 782, 781, 780, 779, 778, 777, 776, 775, 774,
+ 773, 772, 771, 770, 769, 768, 759, 758, 752, 751,
+ 747, 746, 745, 744, 743, 742, 741, 740, 739, 738,
+
+ 737, 736, 735, 729, 728, 727, 726, 725, 724, 723,
+ 722, 721, 720, 719, 718, 714, 713, 712, 711, 704,
+ 703, 702, 701, 700, 699, 698, 697, 696, 695, 694,
+ 693, 692, 691, 684, 683, 682, 681, 680, 679, 678,
+ 677, 676, 675, 674, 673, 672, 671, 664, 663, 662,
+ 661, 660, 659, 658, 657, 656, 655, 654, 653, 652,
+ 649, 648, 647, 646, 645, 644, 643, 642, 641, 632,
+ 631, 630, 629, 628, 627, 626, 625, 622, 621, 620,
+ 619, 618, 617, 616, 615, 614, 613, 612, 611, 610,
+ 609, 600, 590, 589, 588, 587, 586, 585, 584, 583,
+
+ 582, 580, 579, 578, 577, 576, 575, 574, 573, 572,
+ 571, 570, 569, 568, 567, 566, 565, 564, 563, 562,
+ 561, 560, 559, 558, 557, 556, 555, 554, 553, 552,
+ 551, 550, 549, 548, 547, 546, 545, 535, 534, 533,
+ 532, 531, 530, 524, 523, 522, 521, 520, 519, 518,
+ 517, 516, 515, 514, 513, 512, 511, 510, 509, 508,
+ 507, 506, 505, 504, 503, 502, 501, 500, 499, 498,
+ 497, 493, 492, 491, 490, 489, 488, 487, 486, 485,
+ 484, 483, 482, 481, 480, 479, 478, 477, 476, 475,
+ 474, 473, 472, 471, 470, 469, 468, 467, 466, 465,
+
+ 464, 463, 462, 461, 460, 459, 458, 457, 456, 455,
+ 451, 450, 449, 448, 447, 446, 445, 444, 443, 442,
+ 438, 434, 433, 432, 431, 430, 426, 422, 421, 420,
+ 419, 418, 417, 416, 415, 414, 410, 409, 408, 407,
+ 406, 297, 297, 405, 405, 404, 404, 401, 400, 399,
+ 398, 397, 396, 395, 394, 393, 389, 388, 387, 386,
+ 385, 382, 381, 380, 379, 378, 377, 376, 375, 374,
+ 373, 372, 371, 370, 369, 368, 367, 366, 365, 364,
+ 361, 360, 359, 358, 357, 356, 355, 354, 353, 352,
+ 351, 347, 346, 345, 344, 343, 342, 339, 338, 337,
+
+ 336, 335, 334, 333, 332, 331, 330, 329, 328, 327,
+ 326, 325, 324, 323, 322, 321, 320, 319, 318, 317,
+ 316, 315, 312, 311, 310, 309, 306, 305, 304, 303,
+ 302, 301, 300, 293, 293, 286, 286, 282, 281, 280,
+ 279, 278, 277, 276, 273, 272, 271, 264, 263, 262,
+ 261, 260, 259, 254, 253, 250, 249, 245, 244, 240,
+ 237, 236, 235, 232, 231, 230, 224, 223, 222, 221,
+ 220, 219, 218, 217, 216, 211, 210, 209, 208, 207,
+ 206, 205, 204, 198, 197, 196, 195, 194, 193, 192,
+ 189, 188, 187, 186, 80, 80, 75, 75, 171, 132,
+
+ 115, 85, 70, 64, 61, 60, 1111, 58, 58, 7,
+ 1111, 1111, 1111, 1111, 1111, 1111, 1111, 1111, 1111, 1111,
+ 1111, 1111, 1111, 1111, 1111, 1111, 1111, 1111, 1111, 1111,
+ 1111, 1111, 1111, 1111, 1111, 1111, 1111, 1111, 1111, 1111,
+ 1111, 1111, 1111, 1111, 1111, 1111, 1111, 1111, 1111, 1111,
+ 1111, 1111, 1111, 1111, 1111, 1111, 1111, 1111, 1111, 1111,
+ 1111, 1111, 1111, 1111, 1111, 1111, 1111, 1111, 1111, 1111,
+ 1111, 1111, 1111, 1111, 1111, 1111, 1111, 1111, 1111, 1111,
+ 1111, 1111, 1111
+ } ;
+
+static const flex_int16_t yy_chk[1684] =
+ { 0,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 2, 804, 9, 2, 9, 9, 2,
+ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
+ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
+
+ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
+ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
+ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
+ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
+ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
+ 3, 3, 3, 13, 15, 25, 16, 17, 17, 17,
+ 17, 17, 17, 17, 17, 21, 21, 805, 15, 16,
+ 26, 13, 19, 25, 19, 19, 19, 19, 19, 19,
+ 19, 19, 23, 23, 28, 26, 26, 27, 28, 27,
+ 19, 33, 27, 27, 19, 28, 127, 72, 28, 33,
+
+ 19, 19, 29, 30, 127, 30, 29, 35, 19, 36,
+ 29, 33, 29, 36, 30, 19, 93, 806, 29, 45,
+ 93, 35, 72, 19, 37, 34, 19, 20, 45, 20,
+ 20, 20, 20, 20, 20, 20, 20, 46, 37, 31,
+ 809, 40, 44, 151, 43, 20, 44, 31, 39, 20,
+ 31, 40, 151, 31, 34, 20, 44, 34, 810, 31,
+ 112, 34, 34, 20, 39, 41, 34, 39, 42, 34,
+ 20, 42, 41, 41, 112, 812, 43, 41, 20, 111,
+ 42, 43, 46, 41, 41, 43, 41, 42, 43, 47,
+ 111, 47, 47, 48, 48, 47, 48, 50, 73, 50,
+
+ 50, 54, 54, 54, 54, 54, 54, 54, 54, 59,
+ 136, 59, 59, 74, 74, 139, 73, 74, 74, 74,
+ 74, 74, 74, 74, 74, 101, 102, 136, 136, 139,
+ 101, 101, 123, 102, 73, 123, 123, 131, 819, 123,
+ 152, 123, 133, 48, 131, 150, 150, 48, 152, 247,
+ 48, 228, 142, 247, 48, 143, 133, 196, 133, 48,
+ 69, 69, 69, 69, 69, 69, 69, 69, 142, 143,
+ 156, 196, 161, 156, 161, 161, 69, 69, 161, 228,
+ 69, 162, 162, 170, 162, 170, 170, 201, 201, 235,
+ 235, 235, 820, 821, 69, 69, 163, 163, 822, 163,
+
+ 824, 69, 71, 71, 71, 71, 71, 71, 71, 71,
+ 172, 172, 172, 172, 172, 172, 172, 172, 71, 71,
+ 267, 825, 71, 273, 273, 273, 178, 178, 826, 267,
+ 178, 162, 306, 306, 306, 162, 71, 71, 162, 318,
+ 318, 318, 162, 71, 178, 178, 163, 162, 827, 503,
+ 163, 178, 503, 163, 320, 320, 320, 163, 828, 174,
+ 174, 609, 163, 174, 174, 174, 174, 174, 174, 174,
+ 174, 179, 179, 609, 829, 179, 179, 179, 179, 179,
+ 179, 179, 179, 183, 183, 183, 183, 183, 183, 183,
+ 183, 184, 185, 285, 184, 830, 285, 831, 185, 284,
+
+ 284, 284, 284, 284, 284, 284, 284, 652, 552, 184,
+ 652, 285, 552, 185, 832, 184, 833, 285, 287, 287,
+ 835, 185, 287, 287, 287, 287, 287, 287, 287, 287,
+ 291, 291, 291, 291, 291, 291, 291, 291, 292, 837,
+ 838, 292, 329, 329, 329, 333, 333, 333, 344, 344,
+ 344, 388, 388, 388, 403, 839, 292, 403, 456, 456,
+ 456, 840, 292, 402, 402, 402, 402, 402, 402, 402,
+ 402, 841, 403, 444, 444, 444, 842, 843, 403, 457,
+ 457, 457, 458, 458, 458, 444, 444, 517, 517, 517,
+ 518, 518, 518, 519, 519, 519, 523, 523, 523, 524,
+
+ 524, 524, 570, 570, 570, 571, 571, 571, 844, 740,
+ 581, 524, 524, 581, 845, 846, 847, 571, 571, 651,
+ 651, 740, 651, 848, 849, 610, 581, 581, 581, 581,
+ 581, 581, 581, 581, 610, 661, 610, 610, 629, 629,
+ 629, 850, 709, 709, 661, 709, 661, 675, 675, 675,
+ 629, 629, 694, 629, 650, 650, 851, 650, 852, 675,
+ 675, 694, 854, 694, 855, 700, 700, 700, 856, 853,
+ 650, 650, 650, 650, 650, 650, 650, 700, 700, 857,
+ 741, 853, 707, 707, 858, 707, 859, 710, 860, 741,
+ 710, 741, 741, 762, 762, 861, 762, 650, 707, 707,
+
+ 707, 707, 707, 707, 707, 763, 763, 784, 763, 862,
+ 807, 813, 813, 863, 813, 864, 784, 865, 784, 807,
+ 866, 807, 650, 705, 705, 870, 705, 710, 705, 814,
+ 814, 871, 814, 872, 818, 818, 710, 818, 873, 705,
+ 705, 705, 705, 705, 705, 705, 705, 710, 816, 816,
+ 874, 816, 875, 876, 710, 761, 761, 879, 761, 867,
+ 867, 880, 867, 881, 816, 816, 816, 816, 816, 816,
+ 816, 761, 761, 761, 761, 761, 761, 761, 869, 869,
+ 882, 869, 883, 884, 885, 886, 887, 888, 889, 890,
+ 891, 892, 893, 894, 895, 896, 897, 898, 761, 899,
+
+ 900, 901, 902, 903, 904, 905, 906, 909, 910, 911,
+ 912, 913, 914, 915, 916, 917, 918, 919, 922, 923,
+ 924, 926, 927, 761, 929, 930, 931, 932, 933, 934,
+ 935, 937, 938, 939, 940, 941, 942, 945, 946, 947,
+ 950, 951, 952, 953, 954, 955, 956, 957, 958, 959,
+ 960, 958, 959, 961, 962, 963, 964, 965, 959, 968,
+ 969, 970, 973, 974, 975, 976, 977, 978, 979, 980,
+ 981, 982, 983, 984, 985, 986, 987, 990, 991, 992,
+ 993, 991, 992, 993, 994, 995, 998, 999, 992, 1002,
+ 1003, 1004, 1006, 1007, 1008, 1010, 1011, 1012, 1017, 1018,
+
+ 1019, 1020, 1021, 1022, 1020, 1023, 1024, 1025, 1023, 1026,
+ 1021, 1027, 1028, 1029, 1023, 1031, 1032, 1033, 1034, 1035,
+ 1036, 1037, 1038, 1039, 1043, 1040, 1039, 1041, 1040, 1044,
+ 1041, 1047, 991, 1039, 1040, 1042, 1041, 1048, 1042, 1049,
+ 993, 1050, 1052, 1055, 1056, 1057, 1065, 1056, 1057, 1059,
+ 1060, 1066, 1059, 1060, 1056, 1057, 1061, 1062, 1059, 1061,
+ 1067, 1020, 1068, 1070, 1071, 1062, 1073, 1071, 1074, 1073,
+ 1075, 1077, 1076, 1078, 1071, 1076, 1074, 1080, 1081, 1082,
+ 1083, 1084, 1076, 1083, 1084, 1089, 1085, 1090, 1096, 1085,
+ 1083, 1084, 1097, 1101, 1091, 1042, 1085, 1091, 1092, 1094,
+
+ 1106, 1092, 1094, 1098, 1091, 1107, 1098, 803, 1092, 1094,
+ 1060, 1102, 1103, 1098, 1102, 1103, 1061, 802, 801, 1104,
+ 1102, 1103, 1104, 800, 1105, 1108, 1073, 1105, 1108, 1109,
+ 798, 797, 1109, 796, 795, 1108, 794, 793, 792, 1109,
+ 791, 790, 789, 788, 787, 786, 783, 782, 781, 780,
+ 779, 778, 777, 776, 775, 774, 773, 772, 771, 770,
+ 769, 768, 767, 766, 765, 1104, 764, 760, 759, 757,
+ 1105, 1112, 1112, 1112, 1112, 1112, 1112, 1112, 1112, 1112,
+ 1112, 1112, 1112, 1113, 1113, 1113, 1113, 1113, 1113, 1114,
+ 1114, 1114, 1114, 1114, 1114, 1115, 1115, 756, 1115, 1116,
+
+ 1116, 1116, 1116, 1117, 1117, 755, 1117, 1117, 1117, 1117,
+ 1117, 1117, 1117, 1117, 1117, 1118, 754, 753, 1118, 752,
+ 1118, 1119, 751, 750, 1119, 749, 1119, 1120, 748, 747,
+ 1120, 746, 1120, 1121, 745, 744, 1121, 743, 1121, 1122,
+ 1122, 1122, 742, 736, 1122, 1123, 1123, 1123, 1123, 1124,
+ 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1124, 1125, 1125,
+ 1125, 734, 733, 1125, 1126, 1126, 1126, 1126, 732, 731,
+ 730, 727, 726, 725, 724, 723, 722, 721, 720, 719,
+ 718, 717, 716, 715, 714, 712, 703, 702, 697, 696,
+ 693, 691, 690, 689, 688, 687, 686, 685, 684, 680,
+
+ 679, 678, 676, 674, 673, 672, 671, 670, 669, 668,
+ 667, 666, 665, 664, 663, 660, 655, 654, 653, 649,
+ 648, 647, 646, 644, 643, 641, 640, 639, 638, 637,
+ 636, 631, 630, 628, 627, 626, 625, 624, 622, 621,
+ 620, 619, 617, 616, 615, 613, 612, 608, 607, 606,
+ 605, 604, 600, 588, 587, 586, 585, 584, 583, 582,
+ 580, 579, 578, 577, 576, 575, 574, 573, 572, 569,
+ 568, 565, 559, 558, 556, 555, 554, 551, 548, 547,
+ 546, 545, 534, 533, 532, 531, 529, 528, 527, 526,
+ 525, 521, 516, 514, 513, 511, 510, 508, 507, 505,
+
+ 504, 502, 501, 500, 498, 497, 492, 491, 489, 488,
+ 487, 486, 485, 484, 483, 482, 481, 480, 479, 478,
+ 477, 475, 474, 473, 472, 471, 470, 469, 468, 467,
+ 465, 464, 463, 462, 461, 460, 459, 455, 451, 450,
+ 449, 448, 445, 443, 442, 432, 430, 426, 425, 424,
+ 423, 422, 421, 420, 419, 417, 416, 414, 410, 408,
+ 407, 406, 401, 400, 399, 398, 397, 396, 395, 394,
+ 389, 387, 386, 385, 384, 383, 382, 381, 380, 379,
+ 376, 375, 374, 373, 372, 371, 370, 369, 368, 367,
+ 366, 365, 364, 363, 362, 361, 360, 359, 358, 357,
+
+ 356, 355, 354, 353, 352, 351, 350, 349, 348, 345,
+ 343, 342, 341, 340, 339, 338, 337, 336, 335, 334,
+ 332, 328, 326, 325, 324, 323, 319, 317, 316, 315,
+ 314, 313, 312, 311, 310, 309, 305, 304, 302, 301,
+ 300, 299, 298, 296, 295, 290, 289, 282, 281, 280,
+ 279, 278, 277, 276, 275, 274, 272, 271, 270, 269,
+ 268, 266, 265, 264, 263, 262, 261, 260, 259, 258,
+ 257, 256, 255, 254, 253, 252, 251, 250, 249, 248,
+ 246, 245, 244, 243, 242, 241, 240, 239, 238, 237,
+ 236, 234, 233, 232, 231, 230, 229, 227, 226, 225,
+
+ 224, 223, 222, 221, 220, 219, 218, 217, 215, 214,
+ 213, 212, 211, 210, 209, 208, 207, 206, 205, 204,
+ 203, 202, 200, 199, 198, 197, 195, 194, 193, 192,
+ 191, 190, 188, 182, 181, 177, 176, 169, 168, 167,
+ 166, 165, 158, 157, 155, 154, 153, 149, 148, 147,
+ 146, 145, 144, 141, 140, 138, 137, 135, 134, 132,
+ 130, 129, 128, 126, 125, 124, 122, 121, 119, 118,
+ 117, 116, 115, 114, 113, 110, 109, 108, 107, 106,
+ 105, 104, 103, 100, 99, 98, 97, 96, 95, 94,
+ 92, 91, 87, 83, 82, 81, 78, 76, 52, 38,
+
+ 32, 22, 18, 14, 12, 11, 7, 6, 5, 1111,
+ 1111, 1111, 1111, 1111, 1111, 1111, 1111, 1111, 1111, 1111,
+ 1111, 1111, 1111, 1111, 1111, 1111, 1111, 1111, 1111, 1111,
+ 1111, 1111, 1111, 1111, 1111, 1111, 1111, 1111, 1111, 1111,
+ 1111, 1111, 1111, 1111, 1111, 1111, 1111, 1111, 1111, 1111,
+ 1111, 1111, 1111, 1111, 1111, 1111, 1111, 1111, 1111, 1111,
+ 1111, 1111, 1111, 1111, 1111, 1111, 1111, 1111, 1111, 1111,
+ 1111, 1111, 1111, 1111, 1111, 1111, 1111, 1111, 1111, 1111,
+ 1111, 1111, 1111
+ } ;
+
+/* The intent behind this definition is that it'll catch
+ * any uses of REJECT which flex missed.
+ */
+#define REJECT reject_used_but_not_detected
+#define yymore() yymore_used_but_not_detected
+#define YY_MORE_ADJ 0
+#define YY_RESTORE_YY_MORE_OFFSET
+#line 1 "src/compiler/glsl/glsl_lexer.ll"
+#line 2 "src/compiler/glsl/glsl_lexer.ll"
+/*
+ * Copyright © 2008, 2009 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+#include <ctype.h>
+#include <limits.h>
+#include "util/strtod.h"
+#include "ast.h"
+#include "glsl_parser_extras.h"
+#include "glsl_parser.h"
+#include "main/mtypes.h"
+
+static int classify_identifier(struct _mesa_glsl_parse_state *, const char *,
+ unsigned name_len, YYSTYPE *output);
+
+#ifdef _MSC_VER
+#define YY_NO_UNISTD_H
+#endif
+
+#define YY_NO_INPUT
+#define YY_USER_ACTION \
+ do { \
+ yylloc->first_column = yycolumn + 1; \
+ yylloc->first_line = yylloc->last_line = yylineno + 1; \
+ yycolumn += yyleng; \
+ yylloc->last_column = yycolumn + 1; \
+ } while(0);
+
+#define YY_USER_INIT yylineno = 0; yycolumn = 0; yylloc->source = 0; \
+ yylloc->path = NULL;
+
+/* A macro for handling reserved words and keywords across language versions.
+ *
+ * Certain words start out as identifiers, become reserved words in
+ * later language revisions, and finally become language keywords.
+ * This may happen at different times in desktop GLSL and GLSL ES.
+ *
+ * For example, consider the following lexer rule:
+ * samplerBuffer KEYWORD(130, 0, 140, 0, SAMPLERBUFFER)
+ *
+ * This means that "samplerBuffer" will be treated as:
+ * - a keyword (SAMPLERBUFFER token) ...in GLSL >= 1.40
+ * - a reserved word - error ...in GLSL >= 1.30
+ * - an identifier ...in GLSL < 1.30 or GLSL ES
+ */
+#define KEYWORD(reserved_glsl, reserved_glsl_es, \
+ allowed_glsl, allowed_glsl_es, token) \
+ KEYWORD_WITH_ALT(reserved_glsl, reserved_glsl_es, \
+ allowed_glsl, allowed_glsl_es, false, token)
+
+/**
+ * Like the KEYWORD macro, but the word is also treated as a keyword
+ * if the given boolean expression is true.
+ */
+#define KEYWORD_WITH_ALT(reserved_glsl, reserved_glsl_es, \
+ allowed_glsl, allowed_glsl_es, \
+ alt_expr, token) \
+ do { \
+ if (yyextra->is_version(allowed_glsl, allowed_glsl_es) \
+ || (alt_expr)) { \
+ return token; \
+ } else if (yyextra->is_version(reserved_glsl, \
+ reserved_glsl_es)) { \
+ _mesa_glsl_error(yylloc, yyextra, \
+ "illegal use of reserved word `%s'", yytext); \
+ return ERROR_TOK; \
+ } else { \
+ return classify_identifier(yyextra, yytext, yyleng, yylval); \
+ } \
+ } while (0)
+
+/**
+ * Like KEYWORD_WITH_ALT, but used for built-in GLSL types
+ */
+#define TYPE_WITH_ALT(reserved_glsl, reserved_glsl_es, \
+ allowed_glsl, allowed_glsl_es, \
+ alt_expr, gtype) \
+ do { \
+ if (yyextra->is_version(allowed_glsl, allowed_glsl_es) \
+ || (alt_expr)) { \
+ yylval->type = gtype; \
+ return BASIC_TYPE_TOK; \
+ } else if (yyextra->is_version(reserved_glsl, \
+ reserved_glsl_es)) { \
+ _mesa_glsl_error(yylloc, yyextra, \
+ "illegal use of reserved word `%s'", yytext); \
+ return ERROR_TOK; \
+ } else { \
+ return classify_identifier(yyextra, yytext, yyleng, yylval); \
+ } \
+ } while (0)
+
+#define TYPE(reserved_glsl, reserved_glsl_es, \
+ allowed_glsl, allowed_glsl_es, \
+ gtype) \
+ TYPE_WITH_ALT(reserved_glsl, reserved_glsl_es, \
+ allowed_glsl, allowed_glsl_es, \
+ false, gtype)
+
+/**
+ * A macro for handling keywords that have been present in GLSL since
+ * its origin, but were changed into reserved words in GLSL 3.00 ES.
+ */
+#define DEPRECATED_ES_KEYWORD(token) \
+ do { \
+ if (yyextra->is_version(0, 300)) { \
+ _mesa_glsl_error(yylloc, yyextra, \
+ "illegal use of reserved word `%s'", yytext); \
+ return ERROR_TOK; \
+ } else { \
+ return token; \
+ } \
+ } while (0)
+
+/**
+ * Like DEPRECATED_ES_KEYWORD, but for types
+ */
+#define DEPRECATED_ES_TYPE_WITH_ALT(alt_expr, gtype) \
+ do { \
+ if (yyextra->is_version(0, 300)) { \
+ _mesa_glsl_error(yylloc, yyextra, \
+ "illegal use of reserved word `%s'", yytext); \
+ return ERROR_TOK; \
+ } else if (alt_expr) { \
+ yylval->type = gtype; \
+ return BASIC_TYPE_TOK; \
+ } else { \
+ return classify_identifier(yyextra, yytext, yyleng, yylval); \
+ } \
+ } while (0)
+
+#define DEPRECATED_ES_TYPE(gtype) \
+ DEPRECATED_ES_TYPE_WITH_ALT(true, gtype)
+
+static int
+literal_integer(char *text, int len, struct _mesa_glsl_parse_state *state,
+ YYSTYPE *lval, YYLTYPE *lloc, int base)
+{
+ bool is_uint = (text[len - 1] == 'u' ||
+ text[len - 1] == 'U');
+ bool is_long = (text[len - 1] == 'l' || text[len - 1] == 'L');
+ const char *digits = text;
+
+ if (is_long)
+ is_uint = (text[len - 2] == 'u' && text[len - 1] == 'l') ||
+ (text[len - 2] == 'U' && text[len - 1] == 'L');
+ /* Skip "0x" */
+ if (base == 16)
+ digits += 2;
+
+ unsigned long long value = strtoull(digits, NULL, base);
+
+ if (is_long)
+ lval->n64 = (int64_t)value;
+ else
+ lval->n = (int)value;
+
+ if (is_long && !is_uint && base == 10 && value > (uint64_t)LLONG_MAX + 1) {
+ /* Tries to catch unintentionally providing a negative value. */
+ _mesa_glsl_warning(lloc, state,
+ "signed literal value `%s' is interpreted as %lld",
+ text, lval->n64);
+ } else if (!is_long && value > UINT_MAX) {
+ /* Note that signed 0xffffffff is valid, not out of range! */
+ if (state->is_version(130, 300)) {
+ _mesa_glsl_error(lloc, state,
+ "literal value `%s' out of range", text);
+ } else {
+ _mesa_glsl_warning(lloc, state,
+ "literal value `%s' out of range", text);
+ }
+ } else if (base == 10 && !is_uint && (unsigned)value > (unsigned)INT_MAX + 1) {
+ /* Tries to catch unintentionally providing a negative value.
+ * Note that -2147483648 is parsed as -(2147483648), so we don't
+ * want to warn for INT_MAX.
+ */
+ _mesa_glsl_warning(lloc, state,
+ "signed literal value `%s' is interpreted as %d",
+ text, lval->n);
+ }
+ if (is_long)
+ return is_uint ? UINT64CONSTANT : INT64CONSTANT;
+ else
+ return is_uint ? UINTCONSTANT : INTCONSTANT;
+}
+
+#define LITERAL_INTEGER(base) \
+ literal_integer(yytext, yyleng, yyextra, yylval, yylloc, base)
+
+#line 1606 "src/compiler/glsl/glsl_lexer.cpp"
+#line 219 "src/compiler/glsl/glsl_lexer.ll"
+ /* Note: When adding any start conditions to this list, you must also
+ * update the "Internal compiler error" catch-all rule near the end of
+ * this file. */
+
+#line 1612 "src/compiler/glsl/glsl_lexer.cpp"
+
+#define INITIAL 0
+#define PP 1
+#define PRAGMA 2
+
+#define YY_EXTRA_TYPE struct _mesa_glsl_parse_state *
+
+/* Holds the entire state of the reentrant scanner. */
+struct yyguts_t
+ {
+
+ /* User-defined. Not touched by flex. */
+ YY_EXTRA_TYPE yyextra_r;
+
+ /* The rest are the same as the globals declared in the non-reentrant scanner. */
+ FILE *yyin_r, *yyout_r;
+ size_t yy_buffer_stack_top; /**< index of top of stack. */
+ size_t yy_buffer_stack_max; /**< capacity of stack. */
+ YY_BUFFER_STATE * yy_buffer_stack; /**< Stack as an array. */
+ char yy_hold_char;
+ int yy_n_chars;
+ int yyleng_r;
+ char *yy_c_buf_p;
+ int yy_init;
+ int yy_start;
+ int yy_did_buffer_switch_on_eof;
+ int yy_start_stack_ptr;
+ int yy_start_stack_depth;
+ int *yy_start_stack;
+ yy_state_type yy_last_accepting_state;
+ char* yy_last_accepting_cpos;
+
+ int yylineno_r;
+ int yy_flex_debug_r;
+
+ char *yytext_r;
+ int yy_more_flag;
+ int yy_more_len;
+
+ YYSTYPE * yylval_r;
+
+ YYLTYPE * yylloc_r;
+
+ }; /* end struct yyguts_t */
+
+static int yy_init_globals ( yyscan_t yyscanner );
+
+ /* This must go here because YYSTYPE and YYLTYPE are included
+ * from bison output in section 1.*/
+ # define yylval yyg->yylval_r
+
+ # define yylloc yyg->yylloc_r
+
+int yylex_init (yyscan_t* scanner);
+
+int yylex_init_extra ( YY_EXTRA_TYPE user_defined, yyscan_t* scanner);
+
+/* Accessor methods to globals.
+ These are made visible to non-reentrant scanners for convenience. */
+
+int yylex_destroy ( yyscan_t yyscanner );
+
+int yyget_debug ( yyscan_t yyscanner );
+
+void yyset_debug ( int debug_flag , yyscan_t yyscanner );
+
+YY_EXTRA_TYPE yyget_extra ( yyscan_t yyscanner );
+
+void yyset_extra ( YY_EXTRA_TYPE user_defined , yyscan_t yyscanner );
+
+FILE *yyget_in ( yyscan_t yyscanner );
+
+void yyset_in ( FILE * _in_str , yyscan_t yyscanner );
+
+FILE *yyget_out ( yyscan_t yyscanner );
+
+void yyset_out ( FILE * _out_str , yyscan_t yyscanner );
+
+ int yyget_leng ( yyscan_t yyscanner );
+
+char *yyget_text ( yyscan_t yyscanner );
+
+int yyget_lineno ( yyscan_t yyscanner );
+
+void yyset_lineno ( int _line_number , yyscan_t yyscanner );
+
+int yyget_column ( yyscan_t yyscanner );
+
+void yyset_column ( int _column_no , yyscan_t yyscanner );
+
+YYSTYPE * yyget_lval ( yyscan_t yyscanner );
+
+void yyset_lval ( YYSTYPE * yylval_param , yyscan_t yyscanner );
+
+ YYLTYPE *yyget_lloc ( yyscan_t yyscanner );
+
+ void yyset_lloc ( YYLTYPE * yylloc_param , yyscan_t yyscanner );
+
+/* Macros after this point can all be overridden by user definitions in
+ * section 1.
+ */
+
+#ifndef YY_SKIP_YYWRAP
+#ifdef __cplusplus
+extern "C" int yywrap ( yyscan_t yyscanner );
+#else
+extern int yywrap ( yyscan_t yyscanner );
+#endif
+#endif
+
+#ifndef YY_NO_UNPUT
+
+#endif
+
+#ifndef yytext_ptr
+static void yy_flex_strncpy ( char *, const char *, int , yyscan_t yyscanner);
+#endif
+
+#ifdef YY_NEED_STRLEN
+static int yy_flex_strlen ( const char * , yyscan_t yyscanner);
+#endif
+
+#ifndef YY_NO_INPUT
+#ifdef __cplusplus
+static int yyinput ( yyscan_t yyscanner );
+#else
+static int input ( yyscan_t yyscanner );
+#endif
+
+#endif
+
+/* Amount of stuff to slurp up with each read. */
+#ifndef YY_READ_BUF_SIZE
+#ifdef __ia64__
+/* On IA-64, the buffer size is 16k, not 8k */
+#define YY_READ_BUF_SIZE 16384
+#else
+#define YY_READ_BUF_SIZE 8192
+#endif /* __ia64__ */
+#endif
+
+/* Copy whatever the last rule matched to the standard output. */
+#ifndef ECHO
+/* This used to be an fputs(), but since the string might contain NUL's,
+ * we now use fwrite().
+ */
+#define ECHO do { if (fwrite( yytext, (size_t) yyleng, 1, yyout )) {} } while (0)
+#endif
+
+/* Gets input and stuffs it into "buf". number of characters read, or YY_NULL,
+ * is returned in "result".
+ */
+#ifndef YY_INPUT
+#define YY_INPUT(buf,result,max_size) \
+ if ( YY_CURRENT_BUFFER_LVALUE->yy_is_interactive ) \
+ { \
+ int c = '*'; \
+ int n; \
+ for ( n = 0; n < max_size && \
+ (c = getc( yyin )) != EOF && c != '\n'; ++n ) \
+ buf[n] = (char) c; \
+ if ( c == '\n' ) \
+ buf[n++] = (char) c; \
+ if ( c == EOF && ferror( yyin ) ) \
+ YY_FATAL_ERROR( "input in flex scanner failed" ); \
+ result = n; \
+ } \
+ else \
+ { \
+ errno=0; \
+ while ( (result = (int) fread(buf, 1, (yy_size_t) max_size, yyin)) == 0 && ferror(yyin)) \
+ { \
+ if( errno != EINTR) \
+ { \
+ YY_FATAL_ERROR( "input in flex scanner failed" ); \
+ break; \
+ } \
+ errno=0; \
+ clearerr(yyin); \
+ } \
+ }\
+\
+
+#endif
+
+/* No semi-colon after return; correct usage is to write "yyterminate();" -
+ * we don't want an extra ';' after the "return" because that will cause
+ * some compilers to complain about unreachable statements.
+ */
+#ifndef yyterminate
+#define yyterminate() return YY_NULL
+#endif
+
+/* Number of entries by which start-condition stack grows. */
+#ifndef YY_START_STACK_INCR
+#define YY_START_STACK_INCR 25
+#endif
+
+/* Report a fatal error. */
+#ifndef YY_FATAL_ERROR
+#define YY_FATAL_ERROR(msg) yy_fatal_error( msg , yyscanner)
+#endif
+
+/* end tables serialization structures and prototypes */
+
+/* Default declaration of generated scanner - a define so the user can
+ * easily add parameters.
+ */
+#ifndef YY_DECL
+#define YY_DECL_IS_OURS 1
+
+extern int yylex \
+ (YYSTYPE * yylval_param, YYLTYPE * yylloc_param , yyscan_t yyscanner);
+
+#define YY_DECL int yylex \
+ (YYSTYPE * yylval_param, YYLTYPE * yylloc_param , yyscan_t yyscanner)
+#endif /* !YY_DECL */
+
+/* Code executed at the beginning of each rule, after yytext and yyleng
+ * have been set up.
+ */
+#ifndef YY_USER_ACTION
+#define YY_USER_ACTION
+#endif
+
+/* Code executed at the end of each rule. */
+#ifndef YY_BREAK
+#define YY_BREAK /*LINTED*/break;
+#endif
+
+#define YY_RULE_SETUP \
+ if ( yyleng > 0 ) \
+ YY_CURRENT_BUFFER_LVALUE->yy_at_bol = \
+ (yytext[yyleng - 1] == '\n'); \
+ YY_USER_ACTION
+
+/** The main scanner function which does all the work.
+ */
+YY_DECL
+{
+ yy_state_type yy_current_state;
+ char *yy_cp, *yy_bp;
+ int yy_act;
+ struct yyguts_t * yyg = (struct yyguts_t*)yyscanner;
+
+ yylval = yylval_param;
+
+ yylloc = yylloc_param;
+
+ if ( !yyg->yy_init )
+ {
+ yyg->yy_init = 1;
+
+#ifdef YY_USER_INIT
+ YY_USER_INIT;
+#endif
+
+ if ( ! yyg->yy_start )
+ yyg->yy_start = 1; /* first start state */
+
+ if ( ! yyin )
+ yyin = stdin;
+
+ if ( ! yyout )
+ yyout = stdout;
+
+ if ( ! YY_CURRENT_BUFFER ) {
+ yyensure_buffer_stack (yyscanner);
+ YY_CURRENT_BUFFER_LVALUE =
+ yy_create_buffer( yyin, YY_BUF_SIZE , yyscanner);
+ }
+
+ yy_load_buffer_state( yyscanner );
+ }
+
+ {
+#line 232 "src/compiler/glsl/glsl_lexer.ll"
+
+
+#line 1892 "src/compiler/glsl/glsl_lexer.cpp"
+
+ while ( /*CONSTCOND*/1 ) /* loops until end-of-file is reached */
+ {
+ yy_cp = yyg->yy_c_buf_p;
+
+ /* Support of yytext. */
+ *yy_cp = yyg->yy_hold_char;
+
+ /* yy_bp points to the position in yy_ch_buf of the start of
+ * the current run.
+ */
+ yy_bp = yy_cp;
+
+ yy_current_state = yyg->yy_start;
+ yy_current_state += YY_AT_BOL();
+yy_match:
+ do
+ {
+ YY_CHAR yy_c = yy_ec[YY_SC_TO_UI(*yy_cp)] ;
+ if ( yy_accept[yy_current_state] )
+ {
+ yyg->yy_last_accepting_state = yy_current_state;
+ yyg->yy_last_accepting_cpos = yy_cp;
+ }
+ while ( yy_chk[yy_base[yy_current_state] + yy_c] != yy_current_state )
+ {
+ yy_current_state = (int) yy_def[yy_current_state];
+ if ( yy_current_state >= 1112 )
+ yy_c = yy_meta[yy_c];
+ }
+ yy_current_state = yy_nxt[yy_base[yy_current_state] + yy_c];
+ ++yy_cp;
+ }
+ while ( yy_current_state != 1111 );
+ yy_cp = yyg->yy_last_accepting_cpos;
+ yy_current_state = yyg->yy_last_accepting_state;
+
+yy_find_action:
+ yy_act = yy_accept[yy_current_state];
+
+ YY_DO_BEFORE_ACTION;
+
+do_action: /* This label is used only to access EOF actions. */
+
+ switch ( yy_act )
+ { /* beginning of action switch */
+ case 0: /* must back up */
+ /* undo the effects of YY_DO_BEFORE_ACTION */
+ *yy_cp = yyg->yy_hold_char;
+ yy_cp = yyg->yy_last_accepting_cpos;
+ yy_current_state = yyg->yy_last_accepting_state;
+ goto yy_find_action;
+
+case 1:
+YY_RULE_SETUP
+#line 234 "src/compiler/glsl/glsl_lexer.ll"
+;
+ YY_BREAK
+/* Preprocessor tokens. */
+case 2:
+*yy_cp = yyg->yy_hold_char; /* undo effects of setting up yytext */
+yyg->yy_c_buf_p = yy_cp -= 1;
+YY_DO_BEFORE_ACTION; /* set up yytext again */
+YY_RULE_SETUP
+#line 237 "src/compiler/glsl/glsl_lexer.ll"
+;
+ YY_BREAK
+case 3:
+YY_RULE_SETUP
+#line 238 "src/compiler/glsl/glsl_lexer.ll"
+{ BEGIN PP; return VERSION_TOK; }
+ YY_BREAK
+case 4:
+YY_RULE_SETUP
+#line 239 "src/compiler/glsl/glsl_lexer.ll"
+{ BEGIN PP; return EXTENSION; }
+ YY_BREAK
+case 5:
+YY_RULE_SETUP
+#line 240 "src/compiler/glsl/glsl_lexer.ll"
+{
+ if (!yyextra->ARB_shading_language_include_enable) {
+ struct _mesa_glsl_parse_state *state = yyextra;
+ _mesa_glsl_error(yylloc, state,
+ "ARB_shading_language_include required "
+ "to use #include");
+ }
+}
+ YY_BREAK
+case 6:
+*yy_cp = yyg->yy_hold_char; /* undo effects of setting up yytext */
+yyg->yy_c_buf_p = yy_cp -= 1;
+YY_DO_BEFORE_ACTION; /* set up yytext again */
+YY_RULE_SETUP
+#line 248 "src/compiler/glsl/glsl_lexer.ll"
+{
+ /* Eat characters until the first digit is
+ * encountered
+ */
+ char *ptr = yytext;
+ while (!isdigit(*ptr))
+ ptr++;
+
+ /* Subtract one from the line number because
+ * yylineno is zero-based instead of
+ * one-based.
+ */
+ yylineno = strtol(ptr, &ptr, 0) - 1;
+
+ /* From GLSL 3.30 and GLSL ES on, after processing the
+ * line directive (including its new-line), the implementation
+ * will behave as if it is compiling at the line number passed
+ * as argument. It was line number + 1 in older specifications.
+ */
+ if (yyextra->is_version(330, 100))
+ yylineno--;
+
+ yylloc->source = strtol(ptr, NULL, 0);
+ yylloc->path = NULL;
+ }
+ YY_BREAK
+case 7:
+*yy_cp = yyg->yy_hold_char; /* undo effects of setting up yytext */
+yyg->yy_c_buf_p = yy_cp -= 1;
+YY_DO_BEFORE_ACTION; /* set up yytext again */
+YY_RULE_SETUP
+#line 273 "src/compiler/glsl/glsl_lexer.ll"
+{
+ if (!yyextra->ARB_shading_language_include_enable) {
+ struct _mesa_glsl_parse_state *state = yyextra;
+ _mesa_glsl_error(yylloc, state,
+ "ARB_shading_language_include required "
+ "to use #line <line> \"<path>\"");
+ }
+
+ /* Eat characters until the first digit is
+ * encountered
+ */
+ char *ptr = yytext;
+ while (!isdigit(*ptr))
+ ptr++;
+
+ /* Subtract one from the line number because
+ * yylineno is zero-based instead of
+ * one-based.
+ */
+ yylineno = strtol(ptr, &ptr, 0) - 1;
+
+ /* From GLSL 3.30 and GLSL ES on, after processing the
+ * line directive (including its new-line), the implementation
+ * will behave as if it is compiling at the line number passed
+ * as argument. It was line number + 1 in older specifications.
+ */
+ if (yyextra->is_version(330, 100))
+ yylineno--;
+
+ while (isspace(*ptr))
+ ptr++;
+
+ /* Skip over leading " */
+ ptr++;
+
+ char *end = strrchr(ptr, '"');
+ int path_len = (end - ptr) + 1;
+ void *mem_ctx = yyextra->linalloc;
+ yylloc->path = (char *) linear_alloc_child(mem_ctx, path_len);
+ memcpy(yylloc->path, ptr, path_len);
+ yylloc->path[path_len - 1] = '\0';
+ }
+ YY_BREAK
+case 8:
+*yy_cp = yyg->yy_hold_char; /* undo effects of setting up yytext */
+yyg->yy_c_buf_p = yy_cp -= 1;
+YY_DO_BEFORE_ACTION; /* set up yytext again */
+YY_RULE_SETUP
+#line 315 "src/compiler/glsl/glsl_lexer.ll"
+{
+ /* Eat characters until the first digit is
+ * encountered
+ */
+ char *ptr = yytext;
+ while (!isdigit(*ptr))
+ ptr++;
+
+ /* Subtract one from the line number because
+ * yylineno is zero-based instead of
+ * one-based.
+ */
+ yylineno = strtol(ptr, &ptr, 0) - 1;
+
+ /* From GLSL 3.30 and GLSL ES on, after processing the
+ * line directive (including its new-line), the implementation
+ * will behave as if it is compiling at the line number passed
+ * as argument. It was line number + 1 in older specifications.
+ */
+ if (yyextra->is_version(330, 100))
+ yylineno--;
+ }
+ YY_BREAK
+case 9:
+YY_RULE_SETUP
+#line 337 "src/compiler/glsl/glsl_lexer.ll"
+{
+ BEGIN PP;
+ return PRAGMA_DEBUG_ON;
+ }
+ YY_BREAK
+case 10:
+YY_RULE_SETUP
+#line 341 "src/compiler/glsl/glsl_lexer.ll"
+{
+ BEGIN PP;
+ return PRAGMA_DEBUG_OFF;
+ }
+ YY_BREAK
+case 11:
+YY_RULE_SETUP
+#line 345 "src/compiler/glsl/glsl_lexer.ll"
+{
+ BEGIN PP;
+ return PRAGMA_OPTIMIZE_ON;
+ }
+ YY_BREAK
+case 12:
+YY_RULE_SETUP
+#line 349 "src/compiler/glsl/glsl_lexer.ll"
+{
+ BEGIN PP;
+ return PRAGMA_OPTIMIZE_OFF;
+ }
+ YY_BREAK
+case 13:
+YY_RULE_SETUP
+#line 353 "src/compiler/glsl/glsl_lexer.ll"
+{
+ BEGIN PP;
+ return PRAGMA_WARNING_ON;
+ }
+ YY_BREAK
+case 14:
+YY_RULE_SETUP
+#line 357 "src/compiler/glsl/glsl_lexer.ll"
+{
+ BEGIN PP;
+ return PRAGMA_WARNING_OFF;
+ }
+ YY_BREAK
+case 15:
+YY_RULE_SETUP
+#line 361 "src/compiler/glsl/glsl_lexer.ll"
+{
+ BEGIN PP;
+ return PRAGMA_INVARIANT_ALL;
+ }
+ YY_BREAK
+case 16:
+YY_RULE_SETUP
+#line 365 "src/compiler/glsl/glsl_lexer.ll"
+{ BEGIN PRAGMA; }
+ YY_BREAK
+case 17:
+/* rule 17 can match eol */
+YY_RULE_SETUP
+#line 367 "src/compiler/glsl/glsl_lexer.ll"
+{ BEGIN 0; yylineno++; yycolumn = 0; }
+ YY_BREAK
+case 18:
+YY_RULE_SETUP
+#line 368 "src/compiler/glsl/glsl_lexer.ll"
+{ }
+ YY_BREAK
+case 19:
+YY_RULE_SETUP
+#line 370 "src/compiler/glsl/glsl_lexer.ll"
+{ }
+ YY_BREAK
+case 20:
+YY_RULE_SETUP
+#line 371 "src/compiler/glsl/glsl_lexer.ll"
+{ }
+ YY_BREAK
+case 21:
+YY_RULE_SETUP
+#line 372 "src/compiler/glsl/glsl_lexer.ll"
+return COLON;
+ YY_BREAK
+case 22:
+YY_RULE_SETUP
+#line 373 "src/compiler/glsl/glsl_lexer.ll"
+{
+ /* We're not doing linear_strdup here, to avoid an implicit call
+ * on strlen() for the length of the string, as this is already
+ * found by flex and stored in yyleng
+ */
+ void *mem_ctx = yyextra->linalloc;
+ char *id = (char *) linear_alloc_child(mem_ctx, yyleng + 1);
+ memcpy(id, yytext, yyleng + 1);
+ yylval->identifier = id;
+ return IDENTIFIER;
+ }
+ YY_BREAK
+case 23:
+YY_RULE_SETUP
+#line 384 "src/compiler/glsl/glsl_lexer.ll"
+{
+ yylval->n = strtol(yytext, NULL, 10);
+ return INTCONSTANT;
+ }
+ YY_BREAK
+case 24:
+YY_RULE_SETUP
+#line 388 "src/compiler/glsl/glsl_lexer.ll"
+{
+ yylval->n = 0;
+ return INTCONSTANT;
+ }
+ YY_BREAK
+case 25:
+/* rule 25 can match eol */
+YY_RULE_SETUP
+#line 392 "src/compiler/glsl/glsl_lexer.ll"
+{ BEGIN 0; yylineno++; yycolumn = 0; return EOL; }
+ YY_BREAK
+case 26:
+YY_RULE_SETUP
+#line 393 "src/compiler/glsl/glsl_lexer.ll"
+{ return yytext[0]; }
+ YY_BREAK
+case 27:
+/* rule 27 can match eol */
+YY_RULE_SETUP
+#line 395 "src/compiler/glsl/glsl_lexer.ll"
+{ yylineno++; yycolumn = 0; }
+ YY_BREAK
+case 28:
+YY_RULE_SETUP
+#line 397 "src/compiler/glsl/glsl_lexer.ll"
+DEPRECATED_ES_KEYWORD(ATTRIBUTE);
+ YY_BREAK
+case 29:
+YY_RULE_SETUP
+#line 398 "src/compiler/glsl/glsl_lexer.ll"
+return CONST_TOK;
+ YY_BREAK
+case 30:
+YY_RULE_SETUP
+#line 399 "src/compiler/glsl/glsl_lexer.ll"
+{ yylval->type = glsl_type::bool_type; return BASIC_TYPE_TOK; }
+ YY_BREAK
+case 31:
+YY_RULE_SETUP
+#line 400 "src/compiler/glsl/glsl_lexer.ll"
+{ yylval->type = glsl_type::float_type; return BASIC_TYPE_TOK; }
+ YY_BREAK
+case 32:
+YY_RULE_SETUP
+#line 401 "src/compiler/glsl/glsl_lexer.ll"
+{ yylval->type = glsl_type::int_type; return BASIC_TYPE_TOK; }
+ YY_BREAK
+case 33:
+YY_RULE_SETUP
+#line 402 "src/compiler/glsl/glsl_lexer.ll"
+TYPE(130, 300, 130, 300, glsl_type::uint_type);
+ YY_BREAK
+case 34:
+YY_RULE_SETUP
+#line 404 "src/compiler/glsl/glsl_lexer.ll"
+return BREAK;
+ YY_BREAK
+case 35:
+YY_RULE_SETUP
+#line 405 "src/compiler/glsl/glsl_lexer.ll"
+return CONTINUE;
+ YY_BREAK
+case 36:
+YY_RULE_SETUP
+#line 406 "src/compiler/glsl/glsl_lexer.ll"
+return DO;
+ YY_BREAK
+case 37:
+YY_RULE_SETUP
+#line 407 "src/compiler/glsl/glsl_lexer.ll"
+return WHILE;
+ YY_BREAK
+case 38:
+YY_RULE_SETUP
+#line 408 "src/compiler/glsl/glsl_lexer.ll"
+return ELSE;
+ YY_BREAK
+case 39:
+YY_RULE_SETUP
+#line 409 "src/compiler/glsl/glsl_lexer.ll"
+return FOR;
+ YY_BREAK
+case 40:
+YY_RULE_SETUP
+#line 410 "src/compiler/glsl/glsl_lexer.ll"
+return IF;
+ YY_BREAK
+case 41:
+YY_RULE_SETUP
+#line 411 "src/compiler/glsl/glsl_lexer.ll"
+return DISCARD;
+ YY_BREAK
+case 42:
+YY_RULE_SETUP
+#line 412 "src/compiler/glsl/glsl_lexer.ll"
+return RETURN;
+ YY_BREAK
+case 43:
+YY_RULE_SETUP
+#line 413 "src/compiler/glsl/glsl_lexer.ll"
+KEYWORD_WITH_ALT(0, 0, 0, 0, yyextra->EXT_demote_to_helper_invocation_enable, DEMOTE);
+ YY_BREAK
+case 44:
+YY_RULE_SETUP
+#line 415 "src/compiler/glsl/glsl_lexer.ll"
+{ yylval->type = glsl_type::bvec2_type; return BASIC_TYPE_TOK; }
+ YY_BREAK
+case 45:
+YY_RULE_SETUP
+#line 416 "src/compiler/glsl/glsl_lexer.ll"
+{ yylval->type = glsl_type::bvec3_type; return BASIC_TYPE_TOK; }
+ YY_BREAK
+case 46:
+YY_RULE_SETUP
+#line 417 "src/compiler/glsl/glsl_lexer.ll"
+{ yylval->type = glsl_type::bvec4_type; return BASIC_TYPE_TOK; }
+ YY_BREAK
+case 47:
+YY_RULE_SETUP
+#line 418 "src/compiler/glsl/glsl_lexer.ll"
+{ yylval->type = glsl_type::ivec2_type; return BASIC_TYPE_TOK; }
+ YY_BREAK
+case 48:
+YY_RULE_SETUP
+#line 419 "src/compiler/glsl/glsl_lexer.ll"
+{ yylval->type = glsl_type::ivec3_type; return BASIC_TYPE_TOK; }
+ YY_BREAK
+case 49:
+YY_RULE_SETUP
+#line 420 "src/compiler/glsl/glsl_lexer.ll"
+{ yylval->type = glsl_type::ivec4_type; return BASIC_TYPE_TOK; }
+ YY_BREAK
+case 50:
+YY_RULE_SETUP
+#line 421 "src/compiler/glsl/glsl_lexer.ll"
+TYPE_WITH_ALT(130, 300, 130, 300, yyextra->EXT_gpu_shader4_enable, glsl_type::uvec2_type);
+ YY_BREAK
+case 51:
+YY_RULE_SETUP
+#line 422 "src/compiler/glsl/glsl_lexer.ll"
+TYPE_WITH_ALT(130, 300, 130, 300, yyextra->EXT_gpu_shader4_enable, glsl_type::uvec3_type);
+ YY_BREAK
+case 52:
+YY_RULE_SETUP
+#line 423 "src/compiler/glsl/glsl_lexer.ll"
+TYPE_WITH_ALT(130, 300, 130, 300, yyextra->EXT_gpu_shader4_enable, glsl_type::uvec4_type);
+ YY_BREAK
+case 53:
+YY_RULE_SETUP
+#line 424 "src/compiler/glsl/glsl_lexer.ll"
+{ yylval->type = glsl_type::vec2_type; return BASIC_TYPE_TOK; }
+ YY_BREAK
+case 54:
+YY_RULE_SETUP
+#line 425 "src/compiler/glsl/glsl_lexer.ll"
+{ yylval->type = glsl_type::vec3_type; return BASIC_TYPE_TOK; }
+ YY_BREAK
+case 55:
+YY_RULE_SETUP
+#line 426 "src/compiler/glsl/glsl_lexer.ll"
+{ yylval->type = glsl_type::vec4_type; return BASIC_TYPE_TOK; }
+ YY_BREAK
+case 56:
+YY_RULE_SETUP
+#line 427 "src/compiler/glsl/glsl_lexer.ll"
+{ yylval->type = glsl_type::mat2_type; return BASIC_TYPE_TOK; }
+ YY_BREAK
+case 57:
+YY_RULE_SETUP
+#line 428 "src/compiler/glsl/glsl_lexer.ll"
+{ yylval->type = glsl_type::mat3_type; return BASIC_TYPE_TOK; }
+ YY_BREAK
+case 58:
+YY_RULE_SETUP
+#line 429 "src/compiler/glsl/glsl_lexer.ll"
+{ yylval->type = glsl_type::mat4_type; return BASIC_TYPE_TOK; }
+ YY_BREAK
+case 59:
+YY_RULE_SETUP
+#line 430 "src/compiler/glsl/glsl_lexer.ll"
+TYPE(120, 300, 120, 300, glsl_type::mat2_type);
+ YY_BREAK
+case 60:
+YY_RULE_SETUP
+#line 431 "src/compiler/glsl/glsl_lexer.ll"
+TYPE(120, 300, 120, 300, glsl_type::mat2x3_type);
+ YY_BREAK
+case 61:
+YY_RULE_SETUP
+#line 432 "src/compiler/glsl/glsl_lexer.ll"
+TYPE(120, 300, 120, 300, glsl_type::mat2x4_type);
+ YY_BREAK
+case 62:
+YY_RULE_SETUP
+#line 433 "src/compiler/glsl/glsl_lexer.ll"
+TYPE(120, 300, 120, 300, glsl_type::mat3x2_type);
+ YY_BREAK
+case 63:
+YY_RULE_SETUP
+#line 434 "src/compiler/glsl/glsl_lexer.ll"
+TYPE(120, 300, 120, 300, glsl_type::mat3_type);
+ YY_BREAK
+case 64:
+YY_RULE_SETUP
+#line 435 "src/compiler/glsl/glsl_lexer.ll"
+TYPE(120, 300, 120, 300, glsl_type::mat3x4_type);
+ YY_BREAK
+case 65:
+YY_RULE_SETUP
+#line 436 "src/compiler/glsl/glsl_lexer.ll"
+TYPE(120, 300, 120, 300, glsl_type::mat4x2_type);
+ YY_BREAK
+case 66:
+YY_RULE_SETUP
+#line 437 "src/compiler/glsl/glsl_lexer.ll"
+TYPE(120, 300, 120, 300, glsl_type::mat4x3_type);
+ YY_BREAK
+case 67:
+YY_RULE_SETUP
+#line 438 "src/compiler/glsl/glsl_lexer.ll"
+TYPE(120, 300, 120, 300, glsl_type::mat4_type);
+ YY_BREAK
+case 68:
+YY_RULE_SETUP
+#line 440 "src/compiler/glsl/glsl_lexer.ll"
+return IN_TOK;
+ YY_BREAK
+case 69:
+YY_RULE_SETUP
+#line 441 "src/compiler/glsl/glsl_lexer.ll"
+return OUT_TOK;
+ YY_BREAK
+case 70:
+YY_RULE_SETUP
+#line 442 "src/compiler/glsl/glsl_lexer.ll"
+return INOUT_TOK;
+ YY_BREAK
+case 71:
+YY_RULE_SETUP
+#line 443 "src/compiler/glsl/glsl_lexer.ll"
+return UNIFORM;
+ YY_BREAK
+case 72:
+YY_RULE_SETUP
+#line 444 "src/compiler/glsl/glsl_lexer.ll"
+KEYWORD_WITH_ALT(0, 0, 430, 310, yyextra->ARB_shader_storage_buffer_object_enable, BUFFER);
+ YY_BREAK
+case 73:
+YY_RULE_SETUP
+#line 445 "src/compiler/glsl/glsl_lexer.ll"
+DEPRECATED_ES_KEYWORD(VARYING);
+ YY_BREAK
+case 74:
+YY_RULE_SETUP
+#line 446 "src/compiler/glsl/glsl_lexer.ll"
+KEYWORD_WITH_ALT(120, 300, 120, 300, yyextra->EXT_gpu_shader4_enable, CENTROID);
+ YY_BREAK
+case 75:
+YY_RULE_SETUP
+#line 447 "src/compiler/glsl/glsl_lexer.ll"
+KEYWORD(120, 100, 120, 100, INVARIANT);
+ YY_BREAK
+case 76:
+YY_RULE_SETUP
+#line 448 "src/compiler/glsl/glsl_lexer.ll"
+KEYWORD_WITH_ALT(130, 100, 130, 300, yyextra->EXT_gpu_shader4_enable, FLAT);
+ YY_BREAK
+case 77:
+YY_RULE_SETUP
+#line 449 "src/compiler/glsl/glsl_lexer.ll"
+KEYWORD(130, 300, 130, 300, SMOOTH);
+ YY_BREAK
+case 78:
+YY_RULE_SETUP
+#line 450 "src/compiler/glsl/glsl_lexer.ll"
+KEYWORD_WITH_ALT(130, 300, 130, 0, yyextra->EXT_gpu_shader4_enable, NOPERSPECTIVE);
+ YY_BREAK
+case 79:
+YY_RULE_SETUP
+#line 451 "src/compiler/glsl/glsl_lexer.ll"
+KEYWORD_WITH_ALT(0, 300, 400, 320, yyextra->has_tessellation_shader(), PATCH);
+ YY_BREAK
+case 80:
+YY_RULE_SETUP
+#line 453 "src/compiler/glsl/glsl_lexer.ll"
+DEPRECATED_ES_TYPE(glsl_type::sampler1D_type);
+ YY_BREAK
+case 81:
+YY_RULE_SETUP
+#line 454 "src/compiler/glsl/glsl_lexer.ll"
+{ yylval->type = glsl_type::sampler2D_type; return BASIC_TYPE_TOK; }
+ YY_BREAK
+case 82:
+YY_RULE_SETUP
+#line 455 "src/compiler/glsl/glsl_lexer.ll"
+{ yylval->type = glsl_type::sampler3D_type; return BASIC_TYPE_TOK; }
+ YY_BREAK
+case 83:
+YY_RULE_SETUP
+#line 456 "src/compiler/glsl/glsl_lexer.ll"
+{ yylval->type = glsl_type::samplerCube_type; return BASIC_TYPE_TOK; }
+ YY_BREAK
+case 84:
+YY_RULE_SETUP
+#line 457 "src/compiler/glsl/glsl_lexer.ll"
+TYPE_WITH_ALT(130, 300, 130, 0, yyextra->EXT_gpu_shader4_enable && yyextra->ctx->Extensions.EXT_texture_array, glsl_type::sampler1DArray_type);
+ YY_BREAK
+case 85:
+YY_RULE_SETUP
+#line 458 "src/compiler/glsl/glsl_lexer.ll"
+TYPE_WITH_ALT(130, 300, 130, 300, yyextra->EXT_gpu_shader4_enable && yyextra->ctx->Extensions.EXT_texture_array, glsl_type::sampler2DArray_type);
+ YY_BREAK
+case 86:
+YY_RULE_SETUP
+#line 459 "src/compiler/glsl/glsl_lexer.ll"
+DEPRECATED_ES_TYPE(glsl_type::sampler1DShadow_type);
+ YY_BREAK
+case 87:
+YY_RULE_SETUP
+#line 460 "src/compiler/glsl/glsl_lexer.ll"
+{ yylval->type = glsl_type::sampler2DShadow_type; return BASIC_TYPE_TOK; }
+ YY_BREAK
+case 88:
+YY_RULE_SETUP
+#line 461 "src/compiler/glsl/glsl_lexer.ll"
+TYPE_WITH_ALT(130, 300, 130, 300, yyextra->EXT_gpu_shader4_enable, glsl_type::samplerCubeShadow_type);
+ YY_BREAK
+case 89:
+YY_RULE_SETUP
+#line 462 "src/compiler/glsl/glsl_lexer.ll"
+TYPE_WITH_ALT(130, 300, 130, 0, yyextra->EXT_gpu_shader4_enable && yyextra->ctx->Extensions.EXT_texture_array, glsl_type::sampler1DArrayShadow_type);
+ YY_BREAK
+case 90:
+YY_RULE_SETUP
+#line 463 "src/compiler/glsl/glsl_lexer.ll"
+TYPE_WITH_ALT(130, 300, 130, 300, yyextra->EXT_gpu_shader4_enable && yyextra->ctx->Extensions.EXT_texture_array, glsl_type::sampler2DArrayShadow_type);
+ YY_BREAK
+case 91:
+YY_RULE_SETUP
+#line 464 "src/compiler/glsl/glsl_lexer.ll"
+TYPE_WITH_ALT(130, 300, 130, 0, yyextra->EXT_gpu_shader4_enable && yyextra->ctx->Extensions.EXT_texture_integer, glsl_type::isampler1D_type);
+ YY_BREAK
+case 92:
+YY_RULE_SETUP
+#line 465 "src/compiler/glsl/glsl_lexer.ll"
+TYPE_WITH_ALT(130, 300, 130, 300, yyextra->EXT_gpu_shader4_enable && yyextra->ctx->Extensions.EXT_texture_integer, glsl_type::isampler2D_type);
+ YY_BREAK
+case 93:
+YY_RULE_SETUP
+#line 466 "src/compiler/glsl/glsl_lexer.ll"
+TYPE_WITH_ALT(130, 300, 130, 300, yyextra->EXT_gpu_shader4_enable && yyextra->ctx->Extensions.EXT_texture_integer, glsl_type::isampler3D_type);
+ YY_BREAK
+case 94:
+YY_RULE_SETUP
+#line 467 "src/compiler/glsl/glsl_lexer.ll"
+TYPE_WITH_ALT(130, 300, 130, 300, yyextra->EXT_gpu_shader4_enable && yyextra->ctx->Extensions.EXT_texture_integer, glsl_type::isamplerCube_type);
+ YY_BREAK
+case 95:
+YY_RULE_SETUP
+#line 468 "src/compiler/glsl/glsl_lexer.ll"
+TYPE_WITH_ALT(130, 300, 130, 0, yyextra->EXT_gpu_shader4_enable && yyextra->ctx->Extensions.EXT_texture_integer && yyextra->ctx->Extensions.EXT_texture_array, glsl_type::isampler1DArray_type);
+ YY_BREAK
+case 96:
+YY_RULE_SETUP
+#line 469 "src/compiler/glsl/glsl_lexer.ll"
+TYPE_WITH_ALT(130, 300, 130, 300, yyextra->EXT_gpu_shader4_enable && yyextra->ctx->Extensions.EXT_texture_integer && yyextra->ctx->Extensions.EXT_texture_array, glsl_type::isampler2DArray_type);
+ YY_BREAK
+case 97:
+YY_RULE_SETUP
+#line 470 "src/compiler/glsl/glsl_lexer.ll"
+TYPE_WITH_ALT(130, 300, 130, 0, yyextra->EXT_gpu_shader4_enable && yyextra->ctx->Extensions.EXT_texture_integer, glsl_type::usampler1D_type);
+ YY_BREAK
+case 98:
+YY_RULE_SETUP
+#line 471 "src/compiler/glsl/glsl_lexer.ll"
+TYPE_WITH_ALT(130, 300, 130, 300, yyextra->EXT_gpu_shader4_enable && yyextra->ctx->Extensions.EXT_texture_integer, glsl_type::usampler2D_type);
+ YY_BREAK
+case 99:
+YY_RULE_SETUP
+#line 472 "src/compiler/glsl/glsl_lexer.ll"
+TYPE_WITH_ALT(130, 300, 130, 300, yyextra->EXT_gpu_shader4_enable && yyextra->ctx->Extensions.EXT_texture_integer, glsl_type::usampler3D_type);
+ YY_BREAK
+case 100:
+YY_RULE_SETUP
+#line 473 "src/compiler/glsl/glsl_lexer.ll"
+TYPE_WITH_ALT(130, 300, 130, 300, yyextra->EXT_gpu_shader4_enable && yyextra->ctx->Extensions.EXT_texture_integer, glsl_type::usamplerCube_type);
+ YY_BREAK
+case 101:
+YY_RULE_SETUP
+#line 474 "src/compiler/glsl/glsl_lexer.ll"
+TYPE_WITH_ALT(130, 300, 130, 0, yyextra->EXT_gpu_shader4_enable && yyextra->ctx->Extensions.EXT_texture_integer && yyextra->ctx->Extensions.EXT_texture_array, glsl_type::usampler1DArray_type);
+ YY_BREAK
+case 102:
+YY_RULE_SETUP
+#line 475 "src/compiler/glsl/glsl_lexer.ll"
+TYPE_WITH_ALT(130, 300, 130, 300, yyextra->EXT_gpu_shader4_enable && yyextra->ctx->Extensions.EXT_texture_integer && yyextra->ctx->Extensions.EXT_texture_array, glsl_type::usampler2DArray_type);
+ YY_BREAK
+/* additional keywords in ARB_texture_multisample, included in GLSL 1.50 */
+/* these are reserved but not defined in GLSL 3.00 */
+/* [iu]sampler2DMS are defined in GLSL ES 3.10 */
+case 103:
+YY_RULE_SETUP
+#line 480 "src/compiler/glsl/glsl_lexer.ll"
+TYPE_WITH_ALT(150, 300, 150, 310, yyextra->ARB_texture_multisample_enable, glsl_type::sampler2DMS_type);
+ YY_BREAK
+case 104:
+YY_RULE_SETUP
+#line 481 "src/compiler/glsl/glsl_lexer.ll"
+TYPE_WITH_ALT(150, 300, 150, 310, yyextra->ARB_texture_multisample_enable, glsl_type::isampler2DMS_type);
+ YY_BREAK
+case 105:
+YY_RULE_SETUP
+#line 482 "src/compiler/glsl/glsl_lexer.ll"
+TYPE_WITH_ALT(150, 300, 150, 310, yyextra->ARB_texture_multisample_enable, glsl_type::usampler2DMS_type);
+ YY_BREAK
+case 106:
+YY_RULE_SETUP
+#line 483 "src/compiler/glsl/glsl_lexer.ll"
+TYPE_WITH_ALT(150, 300, 150, 320, yyextra->ARB_texture_multisample_enable || yyextra->OES_texture_storage_multisample_2d_array_enable, glsl_type::sampler2DMSArray_type);
+ YY_BREAK
+case 107:
+YY_RULE_SETUP
+#line 484 "src/compiler/glsl/glsl_lexer.ll"
+TYPE_WITH_ALT(150, 300, 150, 320, yyextra->ARB_texture_multisample_enable || yyextra->OES_texture_storage_multisample_2d_array_enable, glsl_type::isampler2DMSArray_type);
+ YY_BREAK
+case 108:
+YY_RULE_SETUP
+#line 485 "src/compiler/glsl/glsl_lexer.ll"
+TYPE_WITH_ALT(150, 300, 150, 320, yyextra->ARB_texture_multisample_enable || yyextra->OES_texture_storage_multisample_2d_array_enable, glsl_type::usampler2DMSArray_type);
+ YY_BREAK
+/* keywords available with ARB_texture_cube_map_array_enable extension on desktop GLSL */
+case 109:
+YY_RULE_SETUP
+#line 488 "src/compiler/glsl/glsl_lexer.ll"
+TYPE_WITH_ALT(400, 310, 400, 320, yyextra->ARB_texture_cube_map_array_enable || yyextra->OES_texture_cube_map_array_enable || yyextra->EXT_texture_cube_map_array_enable, glsl_type::samplerCubeArray_type);
+ YY_BREAK
+case 110:
+YY_RULE_SETUP
+#line 489 "src/compiler/glsl/glsl_lexer.ll"
+TYPE_WITH_ALT(400, 310, 400, 320, yyextra->ARB_texture_cube_map_array_enable || yyextra->OES_texture_cube_map_array_enable || yyextra->EXT_texture_cube_map_array_enable, glsl_type::isamplerCubeArray_type);
+ YY_BREAK
+case 111:
+YY_RULE_SETUP
+#line 490 "src/compiler/glsl/glsl_lexer.ll"
+TYPE_WITH_ALT(400, 310, 400, 320, yyextra->ARB_texture_cube_map_array_enable || yyextra->OES_texture_cube_map_array_enable || yyextra->EXT_texture_cube_map_array_enable, glsl_type::usamplerCubeArray_type);
+ YY_BREAK
+case 112:
+YY_RULE_SETUP
+#line 491 "src/compiler/glsl/glsl_lexer.ll"
+TYPE_WITH_ALT(400, 310, 400, 320, yyextra->ARB_texture_cube_map_array_enable || yyextra->OES_texture_cube_map_array_enable || yyextra->EXT_texture_cube_map_array_enable, glsl_type::samplerCubeArrayShadow_type);
+ YY_BREAK
+case 113:
+YY_RULE_SETUP
+#line 493 "src/compiler/glsl/glsl_lexer.ll"
+{
+ if (yyextra->OES_EGL_image_external_enable || yyextra->OES_EGL_image_external_essl3_enable) {
+ yylval->type = glsl_type::samplerExternalOES_type;
+ return BASIC_TYPE_TOK;
+ } else
+ return IDENTIFIER;
+ }
+ YY_BREAK
+/* keywords available with ARB_gpu_shader5 */
+case 114:
+YY_RULE_SETUP
+#line 502 "src/compiler/glsl/glsl_lexer.ll"
+KEYWORD_WITH_ALT(400, 310, 400, 320, yyextra->ARB_gpu_shader5_enable || yyextra->EXT_gpu_shader5_enable || yyextra->OES_gpu_shader5_enable, PRECISE);
+ YY_BREAK
+/* keywords available with ARB_shader_image_load_store */
+case 115:
+YY_RULE_SETUP
+#line 505 "src/compiler/glsl/glsl_lexer.ll"
+TYPE_WITH_ALT(130, 300, 420, 0, yyextra->ARB_shader_image_load_store_enable || yyextra->EXT_shader_image_load_store_enable, glsl_type::image1D_type);
+ YY_BREAK
+case 116:
+YY_RULE_SETUP
+#line 506 "src/compiler/glsl/glsl_lexer.ll"
+TYPE_WITH_ALT(130, 300, 420, 310, yyextra->ARB_shader_image_load_store_enable || yyextra->EXT_shader_image_load_store_enable, glsl_type::image2D_type);
+ YY_BREAK
+case 117:
+YY_RULE_SETUP
+#line 507 "src/compiler/glsl/glsl_lexer.ll"
+TYPE_WITH_ALT(130, 300, 420, 310, yyextra->ARB_shader_image_load_store_enable || yyextra->EXT_shader_image_load_store_enable, glsl_type::image3D_type);
+ YY_BREAK
+case 118:
+YY_RULE_SETUP
+#line 508 "src/compiler/glsl/glsl_lexer.ll"
+TYPE_WITH_ALT(130, 300, 420, 0, yyextra->ARB_shader_image_load_store_enable || yyextra->EXT_shader_image_load_store_enable, glsl_type::image2DRect_type);
+ YY_BREAK
+case 119:
+YY_RULE_SETUP
+#line 509 "src/compiler/glsl/glsl_lexer.ll"
+TYPE_WITH_ALT(130, 300, 420, 310, yyextra->ARB_shader_image_load_store_enable || yyextra->EXT_shader_image_load_store_enable, glsl_type::imageCube_type);
+ YY_BREAK
+case 120:
+YY_RULE_SETUP
+#line 510 "src/compiler/glsl/glsl_lexer.ll"
+TYPE_WITH_ALT(130, 300, 420, 320, yyextra->ARB_shader_image_load_store_enable || yyextra->EXT_shader_image_load_store_enable || yyextra->EXT_texture_buffer_enable || yyextra->OES_texture_buffer_enable, glsl_type::imageBuffer_type);
+ YY_BREAK
+case 121:
+YY_RULE_SETUP
+#line 511 "src/compiler/glsl/glsl_lexer.ll"
+TYPE_WITH_ALT(130, 300, 420, 0, yyextra->ARB_shader_image_load_store_enable || yyextra->EXT_shader_image_load_store_enable, glsl_type::image1DArray_type);
+ YY_BREAK
+case 122:
+YY_RULE_SETUP
+#line 512 "src/compiler/glsl/glsl_lexer.ll"
+TYPE_WITH_ALT(130, 300, 420, 310, yyextra->ARB_shader_image_load_store_enable || yyextra->EXT_shader_image_load_store_enable, glsl_type::image2DArray_type);
+ YY_BREAK
+case 123:
+YY_RULE_SETUP
+#line 513 "src/compiler/glsl/glsl_lexer.ll"
+TYPE_WITH_ALT(130, 300, 420, 320, yyextra->ARB_shader_image_load_store_enable || yyextra->EXT_shader_image_load_store_enable || yyextra->OES_texture_cube_map_array_enable || yyextra->EXT_texture_cube_map_array_enable, glsl_type::imageCubeArray_type);
+ YY_BREAK
+case 124:
+YY_RULE_SETUP
+#line 514 "src/compiler/glsl/glsl_lexer.ll"
+TYPE_WITH_ALT(130, 300, 420, 0, yyextra->ARB_shader_image_load_store_enable || yyextra->EXT_shader_image_load_store_enable, glsl_type::image2DMS_type);
+ YY_BREAK
+case 125:
+YY_RULE_SETUP
+#line 515 "src/compiler/glsl/glsl_lexer.ll"
+TYPE_WITH_ALT(130, 300, 420, 0, yyextra->ARB_shader_image_load_store_enable || yyextra->EXT_shader_image_load_store_enable, glsl_type::image2DMSArray_type);
+ YY_BREAK
+case 126:
+YY_RULE_SETUP
+#line 516 "src/compiler/glsl/glsl_lexer.ll"
+TYPE_WITH_ALT(130, 300, 420, 0, yyextra->ARB_shader_image_load_store_enable || yyextra->EXT_shader_image_load_store_enable, glsl_type::iimage1D_type);
+ YY_BREAK
+case 127:
+YY_RULE_SETUP
+#line 517 "src/compiler/glsl/glsl_lexer.ll"
+TYPE_WITH_ALT(130, 300, 420, 310, yyextra->ARB_shader_image_load_store_enable || yyextra->EXT_shader_image_load_store_enable, glsl_type::iimage2D_type);
+ YY_BREAK
+case 128:
+YY_RULE_SETUP
+#line 518 "src/compiler/glsl/glsl_lexer.ll"
+TYPE_WITH_ALT(130, 300, 420, 310, yyextra->ARB_shader_image_load_store_enable || yyextra->EXT_shader_image_load_store_enable, glsl_type::iimage3D_type);
+ YY_BREAK
+case 129:
+YY_RULE_SETUP
+#line 519 "src/compiler/glsl/glsl_lexer.ll"
+TYPE_WITH_ALT(130, 300, 420, 0, yyextra->ARB_shader_image_load_store_enable || yyextra->EXT_shader_image_load_store_enable, glsl_type::iimage2DRect_type);
+ YY_BREAK
+case 130:
+YY_RULE_SETUP
+#line 520 "src/compiler/glsl/glsl_lexer.ll"
+TYPE_WITH_ALT(130, 300, 420, 310, yyextra->ARB_shader_image_load_store_enable || yyextra->EXT_shader_image_load_store_enable, glsl_type::iimageCube_type);
+ YY_BREAK
+case 131:
+YY_RULE_SETUP
+#line 521 "src/compiler/glsl/glsl_lexer.ll"
+TYPE_WITH_ALT(130, 300, 420, 320, yyextra->ARB_shader_image_load_store_enable || yyextra->EXT_shader_image_load_store_enable || yyextra->EXT_texture_buffer_enable || yyextra->OES_texture_buffer_enable, glsl_type::iimageBuffer_type);
+ YY_BREAK
+case 132:
+YY_RULE_SETUP
+#line 522 "src/compiler/glsl/glsl_lexer.ll"
+TYPE_WITH_ALT(130, 300, 420, 0, yyextra->ARB_shader_image_load_store_enable || yyextra->EXT_shader_image_load_store_enable, glsl_type::iimage1DArray_type);
+ YY_BREAK
+case 133:
+YY_RULE_SETUP
+#line 523 "src/compiler/glsl/glsl_lexer.ll"
+TYPE_WITH_ALT(130, 300, 420, 310, yyextra->ARB_shader_image_load_store_enable || yyextra->EXT_shader_image_load_store_enable, glsl_type::iimage2DArray_type);
+ YY_BREAK
+case 134:
+YY_RULE_SETUP
+#line 524 "src/compiler/glsl/glsl_lexer.ll"
+TYPE_WITH_ALT(130, 300, 420, 320, yyextra->ARB_shader_image_load_store_enable || yyextra->EXT_shader_image_load_store_enable || yyextra->OES_texture_cube_map_array_enable || yyextra->EXT_texture_cube_map_array_enable, glsl_type::iimageCubeArray_type);
+ YY_BREAK
+case 135:
+YY_RULE_SETUP
+#line 525 "src/compiler/glsl/glsl_lexer.ll"
+TYPE_WITH_ALT(130, 300, 420, 0, yyextra->ARB_shader_image_load_store_enable || yyextra->EXT_shader_image_load_store_enable, glsl_type::iimage2DMS_type);
+ YY_BREAK
+case 136:
+YY_RULE_SETUP
+#line 526 "src/compiler/glsl/glsl_lexer.ll"
+TYPE_WITH_ALT(130, 300, 420, 0, yyextra->ARB_shader_image_load_store_enable || yyextra->EXT_shader_image_load_store_enable, glsl_type::iimage2DMSArray_type);
+ YY_BREAK
+case 137:
+YY_RULE_SETUP
+#line 527 "src/compiler/glsl/glsl_lexer.ll"
+TYPE_WITH_ALT(130, 300, 420, 0, yyextra->ARB_shader_image_load_store_enable || yyextra->EXT_shader_image_load_store_enable, glsl_type::uimage1D_type);
+ YY_BREAK
+case 138:
+YY_RULE_SETUP
+#line 528 "src/compiler/glsl/glsl_lexer.ll"
+TYPE_WITH_ALT(130, 300, 420, 310, yyextra->ARB_shader_image_load_store_enable || yyextra->EXT_shader_image_load_store_enable, glsl_type::uimage2D_type);
+ YY_BREAK
+case 139:
+YY_RULE_SETUP
+#line 529 "src/compiler/glsl/glsl_lexer.ll"
+TYPE_WITH_ALT(130, 300, 420, 310, yyextra->ARB_shader_image_load_store_enable || yyextra->EXT_shader_image_load_store_enable, glsl_type::uimage3D_type);
+ YY_BREAK
+case 140:
+YY_RULE_SETUP
+#line 530 "src/compiler/glsl/glsl_lexer.ll"
+TYPE_WITH_ALT(130, 300, 420, 0, yyextra->ARB_shader_image_load_store_enable || yyextra->EXT_shader_image_load_store_enable, glsl_type::uimage2DRect_type);
+ YY_BREAK
+case 141:
+YY_RULE_SETUP
+#line 531 "src/compiler/glsl/glsl_lexer.ll"
+TYPE_WITH_ALT(130, 300, 420, 310, yyextra->ARB_shader_image_load_store_enable || yyextra->EXT_shader_image_load_store_enable, glsl_type::uimageCube_type);
+ YY_BREAK
+case 142:
+YY_RULE_SETUP
+#line 532 "src/compiler/glsl/glsl_lexer.ll"
+TYPE_WITH_ALT(130, 300, 420, 320, yyextra->ARB_shader_image_load_store_enable || yyextra->EXT_shader_image_load_store_enable || yyextra->EXT_texture_buffer_enable || yyextra->OES_texture_buffer_enable, glsl_type::uimageBuffer_type);
+ YY_BREAK
+case 143:
+YY_RULE_SETUP
+#line 533 "src/compiler/glsl/glsl_lexer.ll"
+TYPE_WITH_ALT(130, 300, 420, 0, yyextra->ARB_shader_image_load_store_enable || yyextra->EXT_shader_image_load_store_enable, glsl_type::uimage1DArray_type);
+ YY_BREAK
+case 144:
+YY_RULE_SETUP
+#line 534 "src/compiler/glsl/glsl_lexer.ll"
+TYPE_WITH_ALT(130, 300, 420, 310, yyextra->ARB_shader_image_load_store_enable || yyextra->EXT_shader_image_load_store_enable, glsl_type::uimage2DArray_type);
+ YY_BREAK
+case 145:
+YY_RULE_SETUP
+#line 535 "src/compiler/glsl/glsl_lexer.ll"
+TYPE_WITH_ALT(130, 300, 420, 320, yyextra->ARB_shader_image_load_store_enable || yyextra->EXT_shader_image_load_store_enable || yyextra->OES_texture_cube_map_array_enable || yyextra->EXT_texture_cube_map_array_enable, glsl_type::uimageCubeArray_type);
+ YY_BREAK
+case 146:
+YY_RULE_SETUP
+#line 536 "src/compiler/glsl/glsl_lexer.ll"
+TYPE_WITH_ALT(130, 300, 420, 0, yyextra->ARB_shader_image_load_store_enable || yyextra->EXT_shader_image_load_store_enable, glsl_type::uimage2DMS_type);
+ YY_BREAK
+case 147:
+YY_RULE_SETUP
+#line 537 "src/compiler/glsl/glsl_lexer.ll"
+TYPE_WITH_ALT(130, 300, 420, 0, yyextra->ARB_shader_image_load_store_enable || yyextra->EXT_shader_image_load_store_enable, glsl_type::uimage2DMSArray_type);
+ YY_BREAK
+case 148:
+YY_RULE_SETUP
+#line 538 "src/compiler/glsl/glsl_lexer.ll"
+KEYWORD(130, 300, 0, 0, IMAGE1DSHADOW);
+ YY_BREAK
+case 149:
+YY_RULE_SETUP
+#line 539 "src/compiler/glsl/glsl_lexer.ll"
+KEYWORD(130, 300, 0, 0, IMAGE2DSHADOW);
+ YY_BREAK
+case 150:
+YY_RULE_SETUP
+#line 540 "src/compiler/glsl/glsl_lexer.ll"
+KEYWORD(130, 300, 0, 0, IMAGE1DARRAYSHADOW);
+ YY_BREAK
+case 151:
+YY_RULE_SETUP
+#line 541 "src/compiler/glsl/glsl_lexer.ll"
+KEYWORD(130, 300, 0, 0, IMAGE2DARRAYSHADOW);
+ YY_BREAK
+case 152:
+YY_RULE_SETUP
+#line 543 "src/compiler/glsl/glsl_lexer.ll"
+KEYWORD_WITH_ALT(420, 300, 420, 310, yyextra->ARB_shader_image_load_store_enable || yyextra->EXT_shader_image_load_store_enable || yyextra->ARB_shader_storage_buffer_object_enable, COHERENT);
+ YY_BREAK
+case 153:
+YY_RULE_SETUP
+#line 544 "src/compiler/glsl/glsl_lexer.ll"
+KEYWORD_WITH_ALT(110, 100, 420, 310, yyextra->ARB_shader_image_load_store_enable || yyextra->EXT_shader_image_load_store_enable || yyextra->ARB_shader_storage_buffer_object_enable, VOLATILE);
+ YY_BREAK
+case 154:
+YY_RULE_SETUP
+#line 545 "src/compiler/glsl/glsl_lexer.ll"
+KEYWORD_WITH_ALT(420, 300, 420, 310, yyextra->ARB_shader_image_load_store_enable || yyextra->EXT_shader_image_load_store_enable || yyextra->ARB_shader_storage_buffer_object_enable, RESTRICT);
+ YY_BREAK
+case 155:
+YY_RULE_SETUP
+#line 546 "src/compiler/glsl/glsl_lexer.ll"
+KEYWORD_WITH_ALT(420, 300, 420, 310, yyextra->ARB_shader_image_load_store_enable || yyextra->ARB_shader_storage_buffer_object_enable, READONLY);
+ YY_BREAK
+case 156:
+YY_RULE_SETUP
+#line 547 "src/compiler/glsl/glsl_lexer.ll"
+KEYWORD_WITH_ALT(420, 300, 420, 310, yyextra->ARB_shader_image_load_store_enable || yyextra->ARB_shader_storage_buffer_object_enable, WRITEONLY);
+ YY_BREAK
+case 157:
+YY_RULE_SETUP
+#line 549 "src/compiler/glsl/glsl_lexer.ll"
+TYPE_WITH_ALT(420, 300, 420, 310, yyextra->ARB_shader_atomic_counters_enable, glsl_type::atomic_uint_type);
+ YY_BREAK
+case 158:
+YY_RULE_SETUP
+#line 551 "src/compiler/glsl/glsl_lexer.ll"
+KEYWORD_WITH_ALT(430, 310, 430, 310, yyextra->ARB_compute_shader_enable, SHARED);
+ YY_BREAK
+case 159:
+YY_RULE_SETUP
+#line 553 "src/compiler/glsl/glsl_lexer.ll"
+return STRUCT;
+ YY_BREAK
+case 160:
+YY_RULE_SETUP
+#line 554 "src/compiler/glsl/glsl_lexer.ll"
+return VOID_TOK;
+ YY_BREAK
+case 161:
+YY_RULE_SETUP
+#line 556 "src/compiler/glsl/glsl_lexer.ll"
+{
+ if ((yyextra->is_version(140, 300))
+ || yyextra->ARB_bindless_texture_enable
+ || yyextra->KHR_blend_equation_advanced_enable
+ || yyextra->AMD_conservative_depth_enable
+ || yyextra->ARB_conservative_depth_enable
+ || yyextra->ARB_explicit_attrib_location_enable
+ || yyextra->ARB_explicit_uniform_location_enable
+ || yyextra->ARB_post_depth_coverage_enable
+ || yyextra->has_separate_shader_objects()
+ || yyextra->ARB_uniform_buffer_object_enable
+ || yyextra->ARB_fragment_coord_conventions_enable
+ || yyextra->ARB_shading_language_420pack_enable
+ || yyextra->ARB_compute_shader_enable
+ || yyextra->ARB_tessellation_shader_enable
+ || yyextra->EXT_shader_framebuffer_fetch_non_coherent_enable) {
+ return LAYOUT_TOK;
+ } else {
+ return classify_identifier(yyextra, yytext, yyleng, yylval);
+ }
+ }
+ YY_BREAK
+case 162:
+YY_RULE_SETUP
+#line 578 "src/compiler/glsl/glsl_lexer.ll"
+return INC_OP;
+ YY_BREAK
+case 163:
+YY_RULE_SETUP
+#line 579 "src/compiler/glsl/glsl_lexer.ll"
+return DEC_OP;
+ YY_BREAK
+case 164:
+YY_RULE_SETUP
+#line 580 "src/compiler/glsl/glsl_lexer.ll"
+return LE_OP;
+ YY_BREAK
+case 165:
+YY_RULE_SETUP
+#line 581 "src/compiler/glsl/glsl_lexer.ll"
+return GE_OP;
+ YY_BREAK
+case 166:
+YY_RULE_SETUP
+#line 582 "src/compiler/glsl/glsl_lexer.ll"
+return EQ_OP;
+ YY_BREAK
+case 167:
+YY_RULE_SETUP
+#line 583 "src/compiler/glsl/glsl_lexer.ll"
+return NE_OP;
+ YY_BREAK
+case 168:
+YY_RULE_SETUP
+#line 584 "src/compiler/glsl/glsl_lexer.ll"
+return AND_OP;
+ YY_BREAK
+case 169:
+YY_RULE_SETUP
+#line 585 "src/compiler/glsl/glsl_lexer.ll"
+return OR_OP;
+ YY_BREAK
+case 170:
+YY_RULE_SETUP
+#line 586 "src/compiler/glsl/glsl_lexer.ll"
+return XOR_OP;
+ YY_BREAK
+case 171:
+YY_RULE_SETUP
+#line 587 "src/compiler/glsl/glsl_lexer.ll"
+return LEFT_OP;
+ YY_BREAK
+case 172:
+YY_RULE_SETUP
+#line 588 "src/compiler/glsl/glsl_lexer.ll"
+return RIGHT_OP;
+ YY_BREAK
+case 173:
+YY_RULE_SETUP
+#line 590 "src/compiler/glsl/glsl_lexer.ll"
+return MUL_ASSIGN;
+ YY_BREAK
+case 174:
+YY_RULE_SETUP
+#line 591 "src/compiler/glsl/glsl_lexer.ll"
+return DIV_ASSIGN;
+ YY_BREAK
+case 175:
+YY_RULE_SETUP
+#line 592 "src/compiler/glsl/glsl_lexer.ll"
+return ADD_ASSIGN;
+ YY_BREAK
+case 176:
+YY_RULE_SETUP
+#line 593 "src/compiler/glsl/glsl_lexer.ll"
+return MOD_ASSIGN;
+ YY_BREAK
+case 177:
+YY_RULE_SETUP
+#line 594 "src/compiler/glsl/glsl_lexer.ll"
+return LEFT_ASSIGN;
+ YY_BREAK
+case 178:
+YY_RULE_SETUP
+#line 595 "src/compiler/glsl/glsl_lexer.ll"
+return RIGHT_ASSIGN;
+ YY_BREAK
+case 179:
+YY_RULE_SETUP
+#line 596 "src/compiler/glsl/glsl_lexer.ll"
+return AND_ASSIGN;
+ YY_BREAK
+case 180:
+YY_RULE_SETUP
+#line 597 "src/compiler/glsl/glsl_lexer.ll"
+return XOR_ASSIGN;
+ YY_BREAK
+case 181:
+YY_RULE_SETUP
+#line 598 "src/compiler/glsl/glsl_lexer.ll"
+return OR_ASSIGN;
+ YY_BREAK
+case 182:
+YY_RULE_SETUP
+#line 599 "src/compiler/glsl/glsl_lexer.ll"
+return SUB_ASSIGN;
+ YY_BREAK
+case 183:
+YY_RULE_SETUP
+#line 601 "src/compiler/glsl/glsl_lexer.ll"
+{
+ return LITERAL_INTEGER(10);
+ }
+ YY_BREAK
+case 184:
+YY_RULE_SETUP
+#line 604 "src/compiler/glsl/glsl_lexer.ll"
+{
+ return LITERAL_INTEGER(16);
+ }
+ YY_BREAK
+case 185:
+YY_RULE_SETUP
+#line 607 "src/compiler/glsl/glsl_lexer.ll"
+{
+ return LITERAL_INTEGER(8);
+ }
+ YY_BREAK
+case 186:
+#line 612 "src/compiler/glsl/glsl_lexer.ll"
+case 187:
+#line 613 "src/compiler/glsl/glsl_lexer.ll"
+case 188:
+#line 614 "src/compiler/glsl/glsl_lexer.ll"
+case 189:
+YY_RULE_SETUP
+#line 614 "src/compiler/glsl/glsl_lexer.ll"
+{
+ struct _mesa_glsl_parse_state *state = yyextra;
+ char suffix = yytext[strlen(yytext) - 1];
+ if (!state->is_version(120, 300) &&
+ (suffix == 'f' || suffix == 'F')) {
+ _mesa_glsl_warning(yylloc, state,
+ "Float suffixes are invalid in GLSL 1.10");
+ }
+ yylval->real = _mesa_strtof(yytext, NULL);
+ return FLOATCONSTANT;
+ }
+ YY_BREAK
+case 190:
+#line 627 "src/compiler/glsl/glsl_lexer.ll"
+case 191:
+#line 628 "src/compiler/glsl/glsl_lexer.ll"
+case 192:
+#line 629 "src/compiler/glsl/glsl_lexer.ll"
+case 193:
+YY_RULE_SETUP
+#line 629 "src/compiler/glsl/glsl_lexer.ll"
+{
+ if (!yyextra->is_version(400, 0) &&
+ !yyextra->ARB_gpu_shader_fp64_enable)
+ return ERROR_TOK;
+ yylval->dreal = _mesa_strtod(yytext, NULL);
+ return DOUBLECONSTANT;
+ }
+ YY_BREAK
+case 194:
+YY_RULE_SETUP
+#line 637 "src/compiler/glsl/glsl_lexer.ll"
+{
+ yylval->n = 1;
+ return BOOLCONSTANT;
+ }
+ YY_BREAK
+case 195:
+YY_RULE_SETUP
+#line 641 "src/compiler/glsl/glsl_lexer.ll"
+{
+ yylval->n = 0;
+ return BOOLCONSTANT;
+ }
+ YY_BREAK
+/* Reserved words in GLSL 1.10. */
+case 196:
+YY_RULE_SETUP
+#line 648 "src/compiler/glsl/glsl_lexer.ll"
+KEYWORD(110, 100, 0, 0, ASM);
+ YY_BREAK
+case 197:
+YY_RULE_SETUP
+#line 649 "src/compiler/glsl/glsl_lexer.ll"
+KEYWORD(110, 100, 0, 0, CLASS);
+ YY_BREAK
+case 198:
+YY_RULE_SETUP
+#line 650 "src/compiler/glsl/glsl_lexer.ll"
+KEYWORD(110, 100, 0, 0, UNION);
+ YY_BREAK
+case 199:
+YY_RULE_SETUP
+#line 651 "src/compiler/glsl/glsl_lexer.ll"
+KEYWORD(110, 100, 0, 0, ENUM);
+ YY_BREAK
+case 200:
+YY_RULE_SETUP
+#line 652 "src/compiler/glsl/glsl_lexer.ll"
+KEYWORD(110, 100, 0, 0, TYPEDEF);
+ YY_BREAK
+case 201:
+YY_RULE_SETUP
+#line 653 "src/compiler/glsl/glsl_lexer.ll"
+KEYWORD(110, 100, 0, 0, TEMPLATE);
+ YY_BREAK
+case 202:
+YY_RULE_SETUP
+#line 654 "src/compiler/glsl/glsl_lexer.ll"
+KEYWORD(110, 100, 0, 0, THIS);
+ YY_BREAK
+case 203:
+YY_RULE_SETUP
+#line 655 "src/compiler/glsl/glsl_lexer.ll"
+KEYWORD_WITH_ALT(110, 100, 140, 300, yyextra->ARB_uniform_buffer_object_enable, PACKED_TOK);
+ YY_BREAK
+case 204:
+YY_RULE_SETUP
+#line 656 "src/compiler/glsl/glsl_lexer.ll"
+KEYWORD(110, 100, 0, 0, GOTO);
+ YY_BREAK
+case 205:
+YY_RULE_SETUP
+#line 657 "src/compiler/glsl/glsl_lexer.ll"
+KEYWORD(110, 100, 130, 300, SWITCH);
+ YY_BREAK
+case 206:
+YY_RULE_SETUP
+#line 658 "src/compiler/glsl/glsl_lexer.ll"
+KEYWORD(110, 100, 130, 300, DEFAULT);
+ YY_BREAK
+case 207:
+YY_RULE_SETUP
+#line 659 "src/compiler/glsl/glsl_lexer.ll"
+KEYWORD(110, 100, 0, 0, INLINE_TOK);
+ YY_BREAK
+case 208:
+YY_RULE_SETUP
+#line 660 "src/compiler/glsl/glsl_lexer.ll"
+KEYWORD(110, 100, 0, 0, NOINLINE);
+ YY_BREAK
+case 209:
+YY_RULE_SETUP
+#line 661 "src/compiler/glsl/glsl_lexer.ll"
+KEYWORD(110, 100, 0, 0, PUBLIC_TOK);
+ YY_BREAK
+case 210:
+YY_RULE_SETUP
+#line 662 "src/compiler/glsl/glsl_lexer.ll"
+KEYWORD(110, 100, 0, 0, STATIC);
+ YY_BREAK
+case 211:
+YY_RULE_SETUP
+#line 663 "src/compiler/glsl/glsl_lexer.ll"
+KEYWORD(110, 100, 0, 0, EXTERN);
+ YY_BREAK
+case 212:
+YY_RULE_SETUP
+#line 664 "src/compiler/glsl/glsl_lexer.ll"
+KEYWORD(110, 100, 0, 0, EXTERNAL);
+ YY_BREAK
+case 213:
+YY_RULE_SETUP
+#line 665 "src/compiler/glsl/glsl_lexer.ll"
+KEYWORD(110, 100, 0, 0, INTERFACE);
+ YY_BREAK
+case 214:
+YY_RULE_SETUP
+#line 666 "src/compiler/glsl/glsl_lexer.ll"
+KEYWORD(110, 100, 0, 0, LONG_TOK);
+ YY_BREAK
+case 215:
+YY_RULE_SETUP
+#line 667 "src/compiler/glsl/glsl_lexer.ll"
+KEYWORD(110, 100, 0, 0, SHORT_TOK);
+ YY_BREAK
+case 216:
+YY_RULE_SETUP
+#line 668 "src/compiler/glsl/glsl_lexer.ll"
+TYPE_WITH_ALT(130, 100, 130, 300, yyextra->ARB_gpu_shader_fp64_enable, glsl_type::double_type);
+ YY_BREAK
+case 217:
+YY_RULE_SETUP
+#line 669 "src/compiler/glsl/glsl_lexer.ll"
+KEYWORD(110, 100, 0, 0, HALF);
+ YY_BREAK
+case 218:
+YY_RULE_SETUP
+#line 670 "src/compiler/glsl/glsl_lexer.ll"
+KEYWORD(110, 100, 0, 0, FIXED_TOK);
+ YY_BREAK
+case 219:
+YY_RULE_SETUP
+#line 671 "src/compiler/glsl/glsl_lexer.ll"
+KEYWORD_WITH_ALT(110, 100, 0, 0, yyextra->EXT_gpu_shader4_enable, UNSIGNED);
+ YY_BREAK
+case 220:
+YY_RULE_SETUP
+#line 672 "src/compiler/glsl/glsl_lexer.ll"
+KEYWORD(110, 100, 0, 0, INPUT_TOK);
+ YY_BREAK
+case 221:
+YY_RULE_SETUP
+#line 673 "src/compiler/glsl/glsl_lexer.ll"
+KEYWORD(110, 100, 0, 0, OUTPUT);
+ YY_BREAK
+case 222:
+YY_RULE_SETUP
+#line 674 "src/compiler/glsl/glsl_lexer.ll"
+KEYWORD(110, 100, 0, 0, HVEC2);
+ YY_BREAK
+case 223:
+YY_RULE_SETUP
+#line 675 "src/compiler/glsl/glsl_lexer.ll"
+KEYWORD(110, 100, 0, 0, HVEC3);
+ YY_BREAK
+case 224:
+YY_RULE_SETUP
+#line 676 "src/compiler/glsl/glsl_lexer.ll"
+KEYWORD(110, 100, 0, 0, HVEC4);
+ YY_BREAK
+case 225:
+YY_RULE_SETUP
+#line 677 "src/compiler/glsl/glsl_lexer.ll"
+TYPE_WITH_ALT(110, 100, 400, 0, yyextra->ARB_gpu_shader_fp64_enable, glsl_type::dvec2_type);
+ YY_BREAK
+case 226:
+YY_RULE_SETUP
+#line 678 "src/compiler/glsl/glsl_lexer.ll"
+TYPE_WITH_ALT(110, 100, 400, 0, yyextra->ARB_gpu_shader_fp64_enable, glsl_type::dvec3_type);
+ YY_BREAK
+case 227:
+YY_RULE_SETUP
+#line 679 "src/compiler/glsl/glsl_lexer.ll"
+TYPE_WITH_ALT(110, 100, 400, 0, yyextra->ARB_gpu_shader_fp64_enable, glsl_type::dvec4_type);
+ YY_BREAK
+case 228:
+YY_RULE_SETUP
+#line 680 "src/compiler/glsl/glsl_lexer.ll"
+TYPE_WITH_ALT(110, 100, 400, 0, yyextra->ARB_gpu_shader_fp64_enable, glsl_type::dmat2_type);
+ YY_BREAK
+case 229:
+YY_RULE_SETUP
+#line 681 "src/compiler/glsl/glsl_lexer.ll"
+TYPE_WITH_ALT(110, 100, 400, 0, yyextra->ARB_gpu_shader_fp64_enable, glsl_type::dmat3_type);
+ YY_BREAK
+case 230:
+YY_RULE_SETUP
+#line 682 "src/compiler/glsl/glsl_lexer.ll"
+TYPE_WITH_ALT(110, 100, 400, 0, yyextra->ARB_gpu_shader_fp64_enable, glsl_type::dmat4_type);
+ YY_BREAK
+case 231:
+YY_RULE_SETUP
+#line 683 "src/compiler/glsl/glsl_lexer.ll"
+TYPE_WITH_ALT(110, 100, 400, 0, yyextra->ARB_gpu_shader_fp64_enable, glsl_type::dmat2_type);
+ YY_BREAK
+case 232:
+YY_RULE_SETUP
+#line 684 "src/compiler/glsl/glsl_lexer.ll"
+TYPE_WITH_ALT(110, 100, 400, 0, yyextra->ARB_gpu_shader_fp64_enable, glsl_type::dmat2x3_type);
+ YY_BREAK
+case 233:
+YY_RULE_SETUP
+#line 685 "src/compiler/glsl/glsl_lexer.ll"
+TYPE_WITH_ALT(110, 100, 400, 0, yyextra->ARB_gpu_shader_fp64_enable, glsl_type::dmat2x4_type);
+ YY_BREAK
+case 234:
+YY_RULE_SETUP
+#line 686 "src/compiler/glsl/glsl_lexer.ll"
+TYPE_WITH_ALT(110, 100, 400, 0, yyextra->ARB_gpu_shader_fp64_enable, glsl_type::dmat3x2_type);
+ YY_BREAK
+case 235:
+YY_RULE_SETUP
+#line 687 "src/compiler/glsl/glsl_lexer.ll"
+TYPE_WITH_ALT(110, 100, 400, 0, yyextra->ARB_gpu_shader_fp64_enable, glsl_type::dmat3_type);
+ YY_BREAK
+case 236:
+YY_RULE_SETUP
+#line 688 "src/compiler/glsl/glsl_lexer.ll"
+TYPE_WITH_ALT(110, 100, 400, 0, yyextra->ARB_gpu_shader_fp64_enable, glsl_type::dmat3x4_type);
+ YY_BREAK
+case 237:
+YY_RULE_SETUP
+#line 689 "src/compiler/glsl/glsl_lexer.ll"
+TYPE_WITH_ALT(110, 100, 400, 0, yyextra->ARB_gpu_shader_fp64_enable, glsl_type::dmat4x2_type);
+ YY_BREAK
+case 238:
+YY_RULE_SETUP
+#line 690 "src/compiler/glsl/glsl_lexer.ll"
+TYPE_WITH_ALT(110, 100, 400, 0, yyextra->ARB_gpu_shader_fp64_enable, glsl_type::dmat4x3_type);
+ YY_BREAK
+case 239:
+YY_RULE_SETUP
+#line 691 "src/compiler/glsl/glsl_lexer.ll"
+TYPE_WITH_ALT(110, 100, 400, 0, yyextra->ARB_gpu_shader_fp64_enable, glsl_type::dmat4_type);
+ YY_BREAK
+case 240:
+YY_RULE_SETUP
+#line 692 "src/compiler/glsl/glsl_lexer.ll"
+KEYWORD(110, 100, 0, 0, FVEC2);
+ YY_BREAK
+case 241:
+YY_RULE_SETUP
+#line 693 "src/compiler/glsl/glsl_lexer.ll"
+KEYWORD(110, 100, 0, 0, FVEC3);
+ YY_BREAK
+case 242:
+YY_RULE_SETUP
+#line 694 "src/compiler/glsl/glsl_lexer.ll"
+KEYWORD(110, 100, 0, 0, FVEC4);
+ YY_BREAK
+case 243:
+YY_RULE_SETUP
+#line 695 "src/compiler/glsl/glsl_lexer.ll"
+TYPE_WITH_ALT(110, 100, 0, 0, yyextra->ARB_texture_rectangle_enable, glsl_type::sampler2DRect_type);
+ YY_BREAK
+case 244:
+YY_RULE_SETUP
+#line 696 "src/compiler/glsl/glsl_lexer.ll"
+KEYWORD(110, 100, 0, 0, SAMPLER3DRECT);
+ YY_BREAK
+case 245:
+YY_RULE_SETUP
+#line 697 "src/compiler/glsl/glsl_lexer.ll"
+TYPE_WITH_ALT(110, 100, 0, 0, yyextra->ARB_texture_rectangle_enable, glsl_type::sampler2DRectShadow_type);
+ YY_BREAK
+case 246:
+YY_RULE_SETUP
+#line 698 "src/compiler/glsl/glsl_lexer.ll"
+KEYWORD(110, 100, 0, 0, SIZEOF);
+ YY_BREAK
+case 247:
+YY_RULE_SETUP
+#line 699 "src/compiler/glsl/glsl_lexer.ll"
+KEYWORD(110, 100, 0, 0, CAST);
+ YY_BREAK
+case 248:
+YY_RULE_SETUP
+#line 700 "src/compiler/glsl/glsl_lexer.ll"
+KEYWORD(110, 100, 0, 0, NAMESPACE);
+ YY_BREAK
+case 249:
+YY_RULE_SETUP
+#line 701 "src/compiler/glsl/glsl_lexer.ll"
+KEYWORD(110, 100, 0, 0, USING);
+ YY_BREAK
+/* Additional reserved words in GLSL 1.20. */
+case 250:
+YY_RULE_SETUP
+#line 704 "src/compiler/glsl/glsl_lexer.ll"
+KEYWORD(120, 100, 130, 100, LOWP);
+ YY_BREAK
+case 251:
+YY_RULE_SETUP
+#line 705 "src/compiler/glsl/glsl_lexer.ll"
+KEYWORD(120, 100, 130, 100, MEDIUMP);
+ YY_BREAK
+case 252:
+YY_RULE_SETUP
+#line 706 "src/compiler/glsl/glsl_lexer.ll"
+KEYWORD(120, 100, 130, 100, HIGHP);
+ YY_BREAK
+case 253:
+YY_RULE_SETUP
+#line 707 "src/compiler/glsl/glsl_lexer.ll"
+KEYWORD(120, 100, 130, 100, PRECISION);
+ YY_BREAK
+/* Additional reserved words in GLSL 1.30. */
+case 254:
+YY_RULE_SETUP
+#line 710 "src/compiler/glsl/glsl_lexer.ll"
+KEYWORD(130, 300, 130, 300, CASE);
+ YY_BREAK
+case 255:
+YY_RULE_SETUP
+#line 711 "src/compiler/glsl/glsl_lexer.ll"
+KEYWORD(130, 300, 0, 0, COMMON);
+ YY_BREAK
+case 256:
+YY_RULE_SETUP
+#line 712 "src/compiler/glsl/glsl_lexer.ll"
+KEYWORD(130, 300, 0, 0, PARTITION);
+ YY_BREAK
+case 257:
+YY_RULE_SETUP
+#line 713 "src/compiler/glsl/glsl_lexer.ll"
+KEYWORD(130, 300, 0, 0, ACTIVE);
+ YY_BREAK
+case 258:
+YY_RULE_SETUP
+#line 714 "src/compiler/glsl/glsl_lexer.ll"
+KEYWORD(130, 100, 0, 0, SUPERP);
+ YY_BREAK
+case 259:
+YY_RULE_SETUP
+#line 715 "src/compiler/glsl/glsl_lexer.ll"
+TYPE_WITH_ALT(130, 300, 140, 320, yyextra->EXT_texture_buffer_enable || yyextra->OES_texture_buffer_enable || (yyextra->EXT_gpu_shader4_enable && yyextra->ctx->Extensions.EXT_texture_buffer_object), glsl_type::samplerBuffer_type);
+ YY_BREAK
+case 260:
+YY_RULE_SETUP
+#line 716 "src/compiler/glsl/glsl_lexer.ll"
+KEYWORD(130, 300, 0, 0, FILTER);
+ YY_BREAK
+case 261:
+YY_RULE_SETUP
+#line 717 "src/compiler/glsl/glsl_lexer.ll"
+KEYWORD_WITH_ALT(130, 0, 140, 0, yyextra->ARB_uniform_buffer_object_enable && !yyextra->es_shader, ROW_MAJOR);
+ YY_BREAK
+/* Additional reserved words in GLSL 1.40 */
+case 262:
+YY_RULE_SETUP
+#line 720 "src/compiler/glsl/glsl_lexer.ll"
+TYPE_WITH_ALT(140, 300, 140, 0, yyextra->EXT_gpu_shader4_enable && yyextra->ctx->Extensions.NV_texture_rectangle && yyextra->ctx->Extensions.EXT_texture_integer, glsl_type::isampler2DRect_type);
+ YY_BREAK
+case 263:
+YY_RULE_SETUP
+#line 721 "src/compiler/glsl/glsl_lexer.ll"
+TYPE_WITH_ALT(140, 300, 140, 0, yyextra->EXT_gpu_shader4_enable && yyextra->ctx->Extensions.NV_texture_rectangle && yyextra->ctx->Extensions.EXT_texture_integer, glsl_type::usampler2DRect_type);
+ YY_BREAK
+case 264:
+YY_RULE_SETUP
+#line 722 "src/compiler/glsl/glsl_lexer.ll"
+TYPE_WITH_ALT(140, 300, 140, 320, yyextra->EXT_texture_buffer_enable || yyextra->OES_texture_buffer_enable || (yyextra->EXT_gpu_shader4_enable && yyextra->ctx->Extensions.EXT_texture_buffer_object && yyextra->ctx->Extensions.EXT_texture_integer), glsl_type::isamplerBuffer_type);
+ YY_BREAK
+case 265:
+YY_RULE_SETUP
+#line 723 "src/compiler/glsl/glsl_lexer.ll"
+TYPE_WITH_ALT(140, 300, 140, 320, yyextra->EXT_texture_buffer_enable || yyextra->OES_texture_buffer_enable || (yyextra->EXT_gpu_shader4_enable && yyextra->ctx->Extensions.EXT_texture_buffer_object && yyextra->ctx->Extensions.EXT_texture_integer), glsl_type::usamplerBuffer_type);
+ YY_BREAK
+/* Additional reserved words in GLSL ES 3.00 */
+case 266:
+YY_RULE_SETUP
+#line 726 "src/compiler/glsl/glsl_lexer.ll"
+KEYWORD(420, 300, 0, 0, RESOURCE);
+ YY_BREAK
+case 267:
+YY_RULE_SETUP
+#line 727 "src/compiler/glsl/glsl_lexer.ll"
+KEYWORD_WITH_ALT(400, 300, 400, 320, yyextra->ARB_gpu_shader5_enable || yyextra->OES_shader_multisample_interpolation_enable, SAMPLE);
+ YY_BREAK
+case 268:
+YY_RULE_SETUP
+#line 728 "src/compiler/glsl/glsl_lexer.ll"
+KEYWORD_WITH_ALT(400, 300, 400, 0, yyextra->ARB_shader_subroutine_enable, SUBROUTINE);
+ YY_BREAK
+/* Additional words for ARB_gpu_shader_int64 */
+case 269:
+YY_RULE_SETUP
+#line 731 "src/compiler/glsl/glsl_lexer.ll"
+TYPE_WITH_ALT(0, 0, 0, 0, yyextra->ARB_gpu_shader_int64_enable || yyextra->AMD_gpu_shader_int64_enable, glsl_type::int64_t_type);
+ YY_BREAK
+case 270:
+YY_RULE_SETUP
+#line 732 "src/compiler/glsl/glsl_lexer.ll"
+TYPE_WITH_ALT(0, 0, 0, 0, yyextra->ARB_gpu_shader_int64_enable || yyextra->AMD_gpu_shader_int64_enable, glsl_type::i64vec2_type);
+ YY_BREAK
+case 271:
+YY_RULE_SETUP
+#line 733 "src/compiler/glsl/glsl_lexer.ll"
+TYPE_WITH_ALT(0, 0, 0, 0, yyextra->ARB_gpu_shader_int64_enable || yyextra->AMD_gpu_shader_int64_enable, glsl_type::i64vec3_type);
+ YY_BREAK
+case 272:
+YY_RULE_SETUP
+#line 734 "src/compiler/glsl/glsl_lexer.ll"
+TYPE_WITH_ALT(0, 0, 0, 0, yyextra->ARB_gpu_shader_int64_enable || yyextra->AMD_gpu_shader_int64_enable, glsl_type::i64vec4_type);
+ YY_BREAK
+case 273:
+YY_RULE_SETUP
+#line 736 "src/compiler/glsl/glsl_lexer.ll"
+TYPE_WITH_ALT(0, 0, 0, 0, yyextra->ARB_gpu_shader_int64_enable || yyextra->AMD_gpu_shader_int64_enable, glsl_type::uint64_t_type);
+ YY_BREAK
+case 274:
+YY_RULE_SETUP
+#line 737 "src/compiler/glsl/glsl_lexer.ll"
+TYPE_WITH_ALT(0, 0, 0, 0, yyextra->ARB_gpu_shader_int64_enable || yyextra->AMD_gpu_shader_int64_enable, glsl_type::u64vec2_type);
+ YY_BREAK
+case 275:
+YY_RULE_SETUP
+#line 738 "src/compiler/glsl/glsl_lexer.ll"
+TYPE_WITH_ALT(0, 0, 0, 0, yyextra->ARB_gpu_shader_int64_enable || yyextra->AMD_gpu_shader_int64_enable, glsl_type::u64vec3_type);
+ YY_BREAK
+case 276:
+YY_RULE_SETUP
+#line 739 "src/compiler/glsl/glsl_lexer.ll"
+TYPE_WITH_ALT(0, 0, 0, 0, yyextra->ARB_gpu_shader_int64_enable || yyextra->AMD_gpu_shader_int64_enable, glsl_type::u64vec4_type);
+ YY_BREAK
+case 277:
+YY_RULE_SETUP
+#line 741 "src/compiler/glsl/glsl_lexer.ll"
+{
+ struct _mesa_glsl_parse_state *state = yyextra;
+ if (state->es_shader && yyleng > 1024) {
+ _mesa_glsl_error(yylloc, state,
+ "Identifier `%s' exceeds 1024 characters",
+ yytext);
+ }
+ return classify_identifier(state, yytext, yyleng, yylval);
+ }
+ YY_BREAK
+case 278:
+YY_RULE_SETUP
+#line 751 "src/compiler/glsl/glsl_lexer.ll"
+{ struct _mesa_glsl_parse_state *state = yyextra;
+ state->is_field = true;
+ return DOT_TOK; }
+ YY_BREAK
+case 279:
+YY_RULE_SETUP
+#line 755 "src/compiler/glsl/glsl_lexer.ll"
+{ return yytext[0]; }
+ YY_BREAK
+case 280:
+YY_RULE_SETUP
+#line 757 "src/compiler/glsl/glsl_lexer.ll"
+YY_FATAL_ERROR( "flex scanner jammed" );
+ YY_BREAK
+#line 3550 "src/compiler/glsl/glsl_lexer.cpp"
+case YY_STATE_EOF(INITIAL):
+case YY_STATE_EOF(PP):
+case YY_STATE_EOF(PRAGMA):
+ yyterminate();
+
+ case YY_END_OF_BUFFER:
+ {
+ /* Amount of text matched not including the EOB char. */
+ int yy_amount_of_matched_text = (int) (yy_cp - yyg->yytext_ptr) - 1;
+
+ /* Undo the effects of YY_DO_BEFORE_ACTION. */
+ *yy_cp = yyg->yy_hold_char;
+ YY_RESTORE_YY_MORE_OFFSET
+
+ if ( YY_CURRENT_BUFFER_LVALUE->yy_buffer_status == YY_BUFFER_NEW )
+ {
+ /* We're scanning a new file or input source. It's
+ * possible that this happened because the user
+ * just pointed yyin at a new source and called
+ * yylex(). If so, then we have to assure
+ * consistency between YY_CURRENT_BUFFER and our
+ * globals. Here is the right place to do so, because
+ * this is the first action (other than possibly a
+ * back-up) that will match for the new input source.
+ */
+ yyg->yy_n_chars = YY_CURRENT_BUFFER_LVALUE->yy_n_chars;
+ YY_CURRENT_BUFFER_LVALUE->yy_input_file = yyin;
+ YY_CURRENT_BUFFER_LVALUE->yy_buffer_status = YY_BUFFER_NORMAL;
+ }
+
+ /* Note that here we test for yy_c_buf_p "<=" to the position
+ * of the first EOB in the buffer, since yy_c_buf_p will
+ * already have been incremented past the NUL character
+ * (since all states make transitions on EOB to the
+ * end-of-buffer state). Contrast this with the test
+ * in input().
+ */
+ if ( yyg->yy_c_buf_p <= &YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[yyg->yy_n_chars] )
+ { /* This was really a NUL. */
+ yy_state_type yy_next_state;
+
+ yyg->yy_c_buf_p = yyg->yytext_ptr + yy_amount_of_matched_text;
+
+ yy_current_state = yy_get_previous_state( yyscanner );
+
+ /* Okay, we're now positioned to make the NUL
+ * transition. We couldn't have
+ * yy_get_previous_state() go ahead and do it
+ * for us because it doesn't know how to deal
+ * with the possibility of jamming (and we don't
+ * want to build jamming into it because then it
+ * will run more slowly).
+ */
+
+ yy_next_state = yy_try_NUL_trans( yy_current_state , yyscanner);
+
+ yy_bp = yyg->yytext_ptr + YY_MORE_ADJ;
+
+ if ( yy_next_state )
+ {
+ /* Consume the NUL. */
+ yy_cp = ++yyg->yy_c_buf_p;
+ yy_current_state = yy_next_state;
+ goto yy_match;
+ }
+
+ else
+ {
+ yy_cp = yyg->yy_last_accepting_cpos;
+ yy_current_state = yyg->yy_last_accepting_state;
+ goto yy_find_action;
+ }
+ }
+
+ else switch ( yy_get_next_buffer( yyscanner ) )
+ {
+ case EOB_ACT_END_OF_FILE:
+ {
+ yyg->yy_did_buffer_switch_on_eof = 0;
+
+ if ( yywrap( yyscanner ) )
+ {
+ /* Note: because we've taken care in
+ * yy_get_next_buffer() to have set up
+ * yytext, we can now set up
+ * yy_c_buf_p so that if some total
+ * hoser (like flex itself) wants to
+ * call the scanner after we return the
+ * YY_NULL, it'll still work - another
+ * YY_NULL will get returned.
+ */
+ yyg->yy_c_buf_p = yyg->yytext_ptr + YY_MORE_ADJ;
+
+ yy_act = YY_STATE_EOF(YY_START);
+ goto do_action;
+ }
+
+ else
+ {
+ if ( ! yyg->yy_did_buffer_switch_on_eof )
+ YY_NEW_FILE;
+ }
+ break;
+ }
+
+ case EOB_ACT_CONTINUE_SCAN:
+ yyg->yy_c_buf_p =
+ yyg->yytext_ptr + yy_amount_of_matched_text;
+
+ yy_current_state = yy_get_previous_state( yyscanner );
+
+ yy_cp = yyg->yy_c_buf_p;
+ yy_bp = yyg->yytext_ptr + YY_MORE_ADJ;
+ goto yy_match;
+
+ case EOB_ACT_LAST_MATCH:
+ yyg->yy_c_buf_p =
+ &YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[yyg->yy_n_chars];
+
+ yy_current_state = yy_get_previous_state( yyscanner );
+
+ yy_cp = yyg->yy_c_buf_p;
+ yy_bp = yyg->yytext_ptr + YY_MORE_ADJ;
+ goto yy_find_action;
+ }
+ break;
+ }
+
+ default:
+ YY_FATAL_ERROR(
+ "fatal flex scanner internal error--no action found" );
+ } /* end of action switch */
+ } /* end of scanning one token */
+ } /* end of user's declarations */
+} /* end of yylex */
+
+/* yy_get_next_buffer - try to read in a new buffer
+ *
+ * Returns a code representing an action:
+ * EOB_ACT_LAST_MATCH -
+ * EOB_ACT_CONTINUE_SCAN - continue scanning from current position
+ * EOB_ACT_END_OF_FILE - end of file
+ */
+static int yy_get_next_buffer (yyscan_t yyscanner)
+{
+ struct yyguts_t * yyg = (struct yyguts_t*)yyscanner;
+ char *dest = YY_CURRENT_BUFFER_LVALUE->yy_ch_buf;
+ char *source = yyg->yytext_ptr;
+ int number_to_move, i;
+ int ret_val;
+
+ if ( yyg->yy_c_buf_p > &YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[yyg->yy_n_chars + 1] )
+ YY_FATAL_ERROR(
+ "fatal flex scanner internal error--end of buffer missed" );
+
+ if ( YY_CURRENT_BUFFER_LVALUE->yy_fill_buffer == 0 )
+ { /* Don't try to fill the buffer, so this is an EOF. */
+ if ( yyg->yy_c_buf_p - yyg->yytext_ptr - YY_MORE_ADJ == 1 )
+ {
+ /* We matched a single character, the EOB, so
+ * treat this as a final EOF.
+ */
+ return EOB_ACT_END_OF_FILE;
+ }
+
+ else
+ {
+ /* We matched some text prior to the EOB, first
+ * process it.
+ */
+ return EOB_ACT_LAST_MATCH;
+ }
+ }
+
+ /* Try to read more data. */
+
+ /* First move last chars to start of buffer. */
+ number_to_move = (int) (yyg->yy_c_buf_p - yyg->yytext_ptr - 1);
+
+ for ( i = 0; i < number_to_move; ++i )
+ *(dest++) = *(source++);
+
+ if ( YY_CURRENT_BUFFER_LVALUE->yy_buffer_status == YY_BUFFER_EOF_PENDING )
+ /* don't do the read, it's not guaranteed to return an EOF,
+ * just force an EOF
+ */
+ YY_CURRENT_BUFFER_LVALUE->yy_n_chars = yyg->yy_n_chars = 0;
+
+ else
+ {
+ int num_to_read =
+ YY_CURRENT_BUFFER_LVALUE->yy_buf_size - number_to_move - 1;
+
+ while ( num_to_read <= 0 )
+ { /* Not enough room in the buffer - grow it. */
+
+ /* just a shorter name for the current buffer */
+ YY_BUFFER_STATE b = YY_CURRENT_BUFFER_LVALUE;
+
+ int yy_c_buf_p_offset =
+ (int) (yyg->yy_c_buf_p - b->yy_ch_buf);
+
+ if ( b->yy_is_our_buffer )
+ {
+ int new_size = b->yy_buf_size * 2;
+
+ if ( new_size <= 0 )
+ b->yy_buf_size += b->yy_buf_size / 8;
+ else
+ b->yy_buf_size *= 2;
+
+ b->yy_ch_buf = (char *)
+ /* Include room in for 2 EOB chars. */
+ yyrealloc( (void *) b->yy_ch_buf,
+ (yy_size_t) (b->yy_buf_size + 2) , yyscanner );
+ }
+ else
+ /* Can't grow it, we don't own it. */
+ b->yy_ch_buf = NULL;
+
+ if ( ! b->yy_ch_buf )
+ YY_FATAL_ERROR(
+ "fatal error - scanner input buffer overflow" );
+
+ yyg->yy_c_buf_p = &b->yy_ch_buf[yy_c_buf_p_offset];
+
+ num_to_read = YY_CURRENT_BUFFER_LVALUE->yy_buf_size -
+ number_to_move - 1;
+
+ }
+
+ if ( num_to_read > YY_READ_BUF_SIZE )
+ num_to_read = YY_READ_BUF_SIZE;
+
+ /* Read in more data. */
+ YY_INPUT( (&YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[number_to_move]),
+ yyg->yy_n_chars, num_to_read );
+
+ YY_CURRENT_BUFFER_LVALUE->yy_n_chars = yyg->yy_n_chars;
+ }
+
+ if ( yyg->yy_n_chars == 0 )
+ {
+ if ( number_to_move == YY_MORE_ADJ )
+ {
+ ret_val = EOB_ACT_END_OF_FILE;
+ yyrestart( yyin , yyscanner);
+ }
+
+ else
+ {
+ ret_val = EOB_ACT_LAST_MATCH;
+ YY_CURRENT_BUFFER_LVALUE->yy_buffer_status =
+ YY_BUFFER_EOF_PENDING;
+ }
+ }
+
+ else
+ ret_val = EOB_ACT_CONTINUE_SCAN;
+
+ if ((yyg->yy_n_chars + number_to_move) > YY_CURRENT_BUFFER_LVALUE->yy_buf_size) {
+ /* Extend the array by 50%, plus the number we really need. */
+ int new_size = yyg->yy_n_chars + number_to_move + (yyg->yy_n_chars >> 1);
+ YY_CURRENT_BUFFER_LVALUE->yy_ch_buf = (char *) yyrealloc(
+ (void *) YY_CURRENT_BUFFER_LVALUE->yy_ch_buf, (yy_size_t) new_size , yyscanner );
+ if ( ! YY_CURRENT_BUFFER_LVALUE->yy_ch_buf )
+ YY_FATAL_ERROR( "out of dynamic memory in yy_get_next_buffer()" );
+ /* "- 2" to take care of EOB's */
+ YY_CURRENT_BUFFER_LVALUE->yy_buf_size = (int) (new_size - 2);
+ }
+
+ yyg->yy_n_chars += number_to_move;
+ YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[yyg->yy_n_chars] = YY_END_OF_BUFFER_CHAR;
+ YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[yyg->yy_n_chars + 1] = YY_END_OF_BUFFER_CHAR;
+
+ yyg->yytext_ptr = &YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[0];
+
+ return ret_val;
+}
+
+/* yy_get_previous_state - get the state just before the EOB char was reached */
+
+ static yy_state_type yy_get_previous_state (yyscan_t yyscanner)
+{
+ yy_state_type yy_current_state;
+ char *yy_cp;
+ struct yyguts_t * yyg = (struct yyguts_t*)yyscanner;
+
+ yy_current_state = yyg->yy_start;
+ yy_current_state += YY_AT_BOL();
+
+ for ( yy_cp = yyg->yytext_ptr + YY_MORE_ADJ; yy_cp < yyg->yy_c_buf_p; ++yy_cp )
+ {
+ YY_CHAR yy_c = (*yy_cp ? yy_ec[YY_SC_TO_UI(*yy_cp)] : 1);
+ if ( yy_accept[yy_current_state] )
+ {
+ yyg->yy_last_accepting_state = yy_current_state;
+ yyg->yy_last_accepting_cpos = yy_cp;
+ }
+ while ( yy_chk[yy_base[yy_current_state] + yy_c] != yy_current_state )
+ {
+ yy_current_state = (int) yy_def[yy_current_state];
+ if ( yy_current_state >= 1112 )
+ yy_c = yy_meta[yy_c];
+ }
+ yy_current_state = yy_nxt[yy_base[yy_current_state] + yy_c];
+ }
+
+ return yy_current_state;
+}
+
+/* yy_try_NUL_trans - try to make a transition on the NUL character
+ *
+ * synopsis
+ * next_state = yy_try_NUL_trans( current_state );
+ */
+ static yy_state_type yy_try_NUL_trans (yy_state_type yy_current_state , yyscan_t yyscanner)
+{
+ int yy_is_jam;
+ struct yyguts_t * yyg = (struct yyguts_t*)yyscanner; /* This var may be unused depending upon options. */
+ char *yy_cp = yyg->yy_c_buf_p;
+
+ YY_CHAR yy_c = 1;
+ if ( yy_accept[yy_current_state] )
+ {
+ yyg->yy_last_accepting_state = yy_current_state;
+ yyg->yy_last_accepting_cpos = yy_cp;
+ }
+ while ( yy_chk[yy_base[yy_current_state] + yy_c] != yy_current_state )
+ {
+ yy_current_state = (int) yy_def[yy_current_state];
+ if ( yy_current_state >= 1112 )
+ yy_c = yy_meta[yy_c];
+ }
+ yy_current_state = yy_nxt[yy_base[yy_current_state] + yy_c];
+ yy_is_jam = (yy_current_state == 1111);
+
+ (void)yyg;
+ return yy_is_jam ? 0 : yy_current_state;
+}
+
+#ifndef YY_NO_UNPUT
+
+#endif
+
+#ifndef YY_NO_INPUT
+#ifdef __cplusplus
+ static int yyinput (yyscan_t yyscanner)
+#else
+ static int input (yyscan_t yyscanner)
+#endif
+
+{
+ int c;
+ struct yyguts_t * yyg = (struct yyguts_t*)yyscanner;
+
+ *yyg->yy_c_buf_p = yyg->yy_hold_char;
+
+ if ( *yyg->yy_c_buf_p == YY_END_OF_BUFFER_CHAR )
+ {
+ /* yy_c_buf_p now points to the character we want to return.
+ * If this occurs *before* the EOB characters, then it's a
+ * valid NUL; if not, then we've hit the end of the buffer.
+ */
+ if ( yyg->yy_c_buf_p < &YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[yyg->yy_n_chars] )
+ /* This was really a NUL. */
+ *yyg->yy_c_buf_p = '\0';
+
+ else
+ { /* need more input */
+ int offset = (int) (yyg->yy_c_buf_p - yyg->yytext_ptr);
+ ++yyg->yy_c_buf_p;
+
+ switch ( yy_get_next_buffer( yyscanner ) )
+ {
+ case EOB_ACT_LAST_MATCH:
+ /* This happens because yy_g_n_b()
+ * sees that we've accumulated a
+ * token and flags that we need to
+ * try matching the token before
+ * proceeding. But for input(),
+ * there's no matching to consider.
+ * So convert the EOB_ACT_LAST_MATCH
+ * to EOB_ACT_END_OF_FILE.
+ */
+
+ /* Reset buffer status. */
+ yyrestart( yyin , yyscanner);
+
+ /*FALLTHROUGH*/
+
+ case EOB_ACT_END_OF_FILE:
+ {
+ if ( yywrap( yyscanner ) )
+ return 0;
+
+ if ( ! yyg->yy_did_buffer_switch_on_eof )
+ YY_NEW_FILE;
+#ifdef __cplusplus
+ return yyinput(yyscanner);
+#else
+ return input(yyscanner);
+#endif
+ }
+
+ case EOB_ACT_CONTINUE_SCAN:
+ yyg->yy_c_buf_p = yyg->yytext_ptr + offset;
+ break;
+ }
+ }
+ }
+
+ c = *(unsigned char *) yyg->yy_c_buf_p; /* cast for 8-bit char's */
+ *yyg->yy_c_buf_p = '\0'; /* preserve yytext */
+ yyg->yy_hold_char = *++yyg->yy_c_buf_p;
+
+ YY_CURRENT_BUFFER_LVALUE->yy_at_bol = (c == '\n');
+
+ return c;
+}
+#endif /* ifndef YY_NO_INPUT */
+
+/** Immediately switch to a different input stream.
+ * @param input_file A readable stream.
+ * @param yyscanner The scanner object.
+ * @note This function does not reset the start condition to @c INITIAL .
+ */
+ void yyrestart (FILE * input_file , yyscan_t yyscanner)
+{
+ struct yyguts_t * yyg = (struct yyguts_t*)yyscanner;
+
+ if ( ! YY_CURRENT_BUFFER ){
+ yyensure_buffer_stack (yyscanner);
+ YY_CURRENT_BUFFER_LVALUE =
+ yy_create_buffer( yyin, YY_BUF_SIZE , yyscanner);
+ }
+
+ yy_init_buffer( YY_CURRENT_BUFFER, input_file , yyscanner);
+ yy_load_buffer_state( yyscanner );
+}
+
+/** Switch to a different input buffer.
+ * @param new_buffer The new input buffer.
+ * @param yyscanner The scanner object.
+ */
+ void yy_switch_to_buffer (YY_BUFFER_STATE new_buffer , yyscan_t yyscanner)
+{
+ struct yyguts_t * yyg = (struct yyguts_t*)yyscanner;
+
+ /* TODO. We should be able to replace this entire function body
+ * with
+ * yypop_buffer_state();
+ * yypush_buffer_state(new_buffer);
+ */
+ yyensure_buffer_stack (yyscanner);
+ if ( YY_CURRENT_BUFFER == new_buffer )
+ return;
+
+ if ( YY_CURRENT_BUFFER )
+ {
+ /* Flush out information for old buffer. */
+ *yyg->yy_c_buf_p = yyg->yy_hold_char;
+ YY_CURRENT_BUFFER_LVALUE->yy_buf_pos = yyg->yy_c_buf_p;
+ YY_CURRENT_BUFFER_LVALUE->yy_n_chars = yyg->yy_n_chars;
+ }
+
+ YY_CURRENT_BUFFER_LVALUE = new_buffer;
+ yy_load_buffer_state( yyscanner );
+
+ /* We don't actually know whether we did this switch during
+ * EOF (yywrap()) processing, but the only time this flag
+ * is looked at is after yywrap() is called, so it's safe
+ * to go ahead and always set it.
+ */
+ yyg->yy_did_buffer_switch_on_eof = 1;
+}
+
+static void yy_load_buffer_state (yyscan_t yyscanner)
+{
+ struct yyguts_t * yyg = (struct yyguts_t*)yyscanner;
+ yyg->yy_n_chars = YY_CURRENT_BUFFER_LVALUE->yy_n_chars;
+ yyg->yytext_ptr = yyg->yy_c_buf_p = YY_CURRENT_BUFFER_LVALUE->yy_buf_pos;
+ yyin = YY_CURRENT_BUFFER_LVALUE->yy_input_file;
+ yyg->yy_hold_char = *yyg->yy_c_buf_p;
+}
+
+/** Allocate and initialize an input buffer state.
+ * @param file A readable stream.
+ * @param size The character buffer size in bytes. When in doubt, use @c YY_BUF_SIZE.
+ * @param yyscanner The scanner object.
+ * @return the allocated buffer state.
+ */
+ YY_BUFFER_STATE yy_create_buffer (FILE * file, int size , yyscan_t yyscanner)
+{
+ YY_BUFFER_STATE b;
+
+ b = (YY_BUFFER_STATE) yyalloc( sizeof( struct yy_buffer_state ) , yyscanner );
+ if ( ! b )
+ YY_FATAL_ERROR( "out of dynamic memory in yy_create_buffer()" );
+
+ b->yy_buf_size = size;
+
+ /* yy_ch_buf has to be 2 characters longer than the size given because
+ * we need to put in 2 end-of-buffer characters.
+ */
+ b->yy_ch_buf = (char *) yyalloc( (yy_size_t) (b->yy_buf_size + 2) , yyscanner );
+ if ( ! b->yy_ch_buf )
+ YY_FATAL_ERROR( "out of dynamic memory in yy_create_buffer()" );
+
+ b->yy_is_our_buffer = 1;
+
+ yy_init_buffer( b, file , yyscanner);
+
+ return b;
+}
+
+/** Destroy the buffer.
+ * @param b a buffer created with yy_create_buffer()
+ * @param yyscanner The scanner object.
+ */
+ void yy_delete_buffer (YY_BUFFER_STATE b , yyscan_t yyscanner)
+{
+ struct yyguts_t * yyg = (struct yyguts_t*)yyscanner;
+
+ if ( ! b )
+ return;
+
+ if ( b == YY_CURRENT_BUFFER ) /* Not sure if we should pop here. */
+ YY_CURRENT_BUFFER_LVALUE = (YY_BUFFER_STATE) 0;
+
+ if ( b->yy_is_our_buffer )
+ yyfree( (void *) b->yy_ch_buf , yyscanner );
+
+ yyfree( (void *) b , yyscanner );
+}
+
+/* Initializes or reinitializes a buffer.
+ * This function is sometimes called more than once on the same buffer,
+ * such as during a yyrestart() or at EOF.
+ */
+ static void yy_init_buffer (YY_BUFFER_STATE b, FILE * file , yyscan_t yyscanner)
+
+{
+ int oerrno = errno;
+ struct yyguts_t * yyg = (struct yyguts_t*)yyscanner;
+
+ yy_flush_buffer( b , yyscanner);
+
+ b->yy_input_file = file;
+ b->yy_fill_buffer = 1;
+
+ /* If b is the current buffer, then yy_init_buffer was _probably_
+ * called from yyrestart() or through yy_get_next_buffer.
+ * In that case, we don't want to reset the lineno or column.
+ */
+ if (b != YY_CURRENT_BUFFER){
+ b->yy_bs_lineno = 1;
+ b->yy_bs_column = 0;
+ }
+
+ b->yy_is_interactive = 0;
+
+ errno = oerrno;
+}
+
+/** Discard all buffered characters. On the next scan, YY_INPUT will be called.
+ * @param b the buffer state to be flushed, usually @c YY_CURRENT_BUFFER.
+ * @param yyscanner The scanner object.
+ */
+ void yy_flush_buffer (YY_BUFFER_STATE b , yyscan_t yyscanner)
+{
+ struct yyguts_t * yyg = (struct yyguts_t*)yyscanner;
+ if ( ! b )
+ return;
+
+ b->yy_n_chars = 0;
+
+ /* We always need two end-of-buffer characters. The first causes
+ * a transition to the end-of-buffer state. The second causes
+ * a jam in that state.
+ */
+ b->yy_ch_buf[0] = YY_END_OF_BUFFER_CHAR;
+ b->yy_ch_buf[1] = YY_END_OF_BUFFER_CHAR;
+
+ b->yy_buf_pos = &b->yy_ch_buf[0];
+
+ b->yy_at_bol = 1;
+ b->yy_buffer_status = YY_BUFFER_NEW;
+
+ if ( b == YY_CURRENT_BUFFER )
+ yy_load_buffer_state( yyscanner );
+}
+
+/** Pushes the new state onto the stack. The new state becomes
+ * the current state. This function will allocate the stack
+ * if necessary.
+ * @param new_buffer The new state.
+ * @param yyscanner The scanner object.
+ */
+void yypush_buffer_state (YY_BUFFER_STATE new_buffer , yyscan_t yyscanner)
+{
+ struct yyguts_t * yyg = (struct yyguts_t*)yyscanner;
+ if (new_buffer == NULL)
+ return;
+
+ yyensure_buffer_stack(yyscanner);
+
+ /* This block is copied from yy_switch_to_buffer. */
+ if ( YY_CURRENT_BUFFER )
+ {
+ /* Flush out information for old buffer. */
+ *yyg->yy_c_buf_p = yyg->yy_hold_char;
+ YY_CURRENT_BUFFER_LVALUE->yy_buf_pos = yyg->yy_c_buf_p;
+ YY_CURRENT_BUFFER_LVALUE->yy_n_chars = yyg->yy_n_chars;
+ }
+
+ /* Only push if top exists. Otherwise, replace top. */
+ if (YY_CURRENT_BUFFER)
+ yyg->yy_buffer_stack_top++;
+ YY_CURRENT_BUFFER_LVALUE = new_buffer;
+
+ /* copied from yy_switch_to_buffer. */
+ yy_load_buffer_state( yyscanner );
+ yyg->yy_did_buffer_switch_on_eof = 1;
+}
+
+/** Removes and deletes the top of the stack, if present.
+ * The next element becomes the new top.
+ * @param yyscanner The scanner object.
+ */
+void yypop_buffer_state (yyscan_t yyscanner)
+{
+ struct yyguts_t * yyg = (struct yyguts_t*)yyscanner;
+ if (!YY_CURRENT_BUFFER)
+ return;
+
+ yy_delete_buffer(YY_CURRENT_BUFFER , yyscanner);
+ YY_CURRENT_BUFFER_LVALUE = NULL;
+ if (yyg->yy_buffer_stack_top > 0)
+ --yyg->yy_buffer_stack_top;
+
+ if (YY_CURRENT_BUFFER) {
+ yy_load_buffer_state( yyscanner );
+ yyg->yy_did_buffer_switch_on_eof = 1;
+ }
+}
+
+/* Allocates the stack if it does not exist.
+ * Guarantees space for at least one push.
+ */
+static void yyensure_buffer_stack (yyscan_t yyscanner)
+{
+ yy_size_t num_to_alloc;
+ struct yyguts_t * yyg = (struct yyguts_t*)yyscanner;
+
+ if (!yyg->yy_buffer_stack) {
+
+ /* First allocation is just for 2 elements, since we don't know if this
+ * scanner will even need a stack. We use 2 instead of 1 to avoid an
+ * immediate realloc on the next call.
+ */
+ num_to_alloc = 1; /* After all that talk, this was set to 1 anyways... */
+ yyg->yy_buffer_stack = (struct yy_buffer_state**)yyalloc
+ (num_to_alloc * sizeof(struct yy_buffer_state*)
+ , yyscanner);
+ if ( ! yyg->yy_buffer_stack )
+ YY_FATAL_ERROR( "out of dynamic memory in yyensure_buffer_stack()" );
+
+ memset(yyg->yy_buffer_stack, 0, num_to_alloc * sizeof(struct yy_buffer_state*));
+
+ yyg->yy_buffer_stack_max = num_to_alloc;
+ yyg->yy_buffer_stack_top = 0;
+ return;
+ }
+
+ if (yyg->yy_buffer_stack_top >= (yyg->yy_buffer_stack_max) - 1){
+
+ /* Increase the buffer to prepare for a possible push. */
+ yy_size_t grow_size = 8 /* arbitrary grow size */;
+
+ num_to_alloc = yyg->yy_buffer_stack_max + grow_size;
+ yyg->yy_buffer_stack = (struct yy_buffer_state**)yyrealloc
+ (yyg->yy_buffer_stack,
+ num_to_alloc * sizeof(struct yy_buffer_state*)
+ , yyscanner);
+ if ( ! yyg->yy_buffer_stack )
+ YY_FATAL_ERROR( "out of dynamic memory in yyensure_buffer_stack()" );
+
+ /* zero only the new slots.*/
+ memset(yyg->yy_buffer_stack + yyg->yy_buffer_stack_max, 0, grow_size * sizeof(struct yy_buffer_state*));
+ yyg->yy_buffer_stack_max = num_to_alloc;
+ }
+}
+
+/** Setup the input buffer state to scan directly from a user-specified character buffer.
+ * @param base the character buffer
+ * @param size the size in bytes of the character buffer
+ * @param yyscanner The scanner object.
+ * @return the newly allocated buffer state object.
+ */
+YY_BUFFER_STATE yy_scan_buffer (char * base, yy_size_t size , yyscan_t yyscanner)
+{
+ YY_BUFFER_STATE b;
+
+ if ( size < 2 ||
+ base[size-2] != YY_END_OF_BUFFER_CHAR ||
+ base[size-1] != YY_END_OF_BUFFER_CHAR )
+ /* They forgot to leave room for the EOB's. */
+ return NULL;
+
+ b = (YY_BUFFER_STATE) yyalloc( sizeof( struct yy_buffer_state ) , yyscanner );
+ if ( ! b )
+ YY_FATAL_ERROR( "out of dynamic memory in yy_scan_buffer()" );
+
+ b->yy_buf_size = (int) (size - 2); /* "- 2" to take care of EOB's */
+ b->yy_buf_pos = b->yy_ch_buf = base;
+ b->yy_is_our_buffer = 0;
+ b->yy_input_file = NULL;
+ b->yy_n_chars = b->yy_buf_size;
+ b->yy_is_interactive = 0;
+ b->yy_at_bol = 1;
+ b->yy_fill_buffer = 0;
+ b->yy_buffer_status = YY_BUFFER_NEW;
+
+ yy_switch_to_buffer( b , yyscanner );
+
+ return b;
+}
+
+/** Setup the input buffer state to scan a string. The next call to yylex() will
+ * scan from a @e copy of @a str.
+ * @param yystr a NUL-terminated string to scan
+ * @param yyscanner The scanner object.
+ * @return the newly allocated buffer state object.
+ * @note If you want to scan bytes that may contain NUL values, then use
+ * yy_scan_bytes() instead.
+ */
+YY_BUFFER_STATE yy_scan_string (const char * yystr , yyscan_t yyscanner)
+{
+
+ return yy_scan_bytes( yystr, (int) strlen(yystr) , yyscanner);
+}
+
+/** Setup the input buffer state to scan the given bytes. The next call to yylex() will
+ * scan from a @e copy of @a bytes.
+ * @param yybytes the byte buffer to scan
+ * @param _yybytes_len the number of bytes in the buffer pointed to by @a bytes.
+ * @param yyscanner The scanner object.
+ * @return the newly allocated buffer state object.
+ */
+YY_BUFFER_STATE yy_scan_bytes (const char * yybytes, int _yybytes_len , yyscan_t yyscanner)
+{
+ YY_BUFFER_STATE b;
+ char *buf;
+ yy_size_t n;
+ int i;
+
+ /* Get memory for full buffer, including space for trailing EOB's. */
+ n = (yy_size_t) (_yybytes_len + 2);
+ buf = (char *) yyalloc( n , yyscanner );
+ if ( ! buf )
+ YY_FATAL_ERROR( "out of dynamic memory in yy_scan_bytes()" );
+
+ for ( i = 0; i < _yybytes_len; ++i )
+ buf[i] = yybytes[i];
+
+ buf[_yybytes_len] = buf[_yybytes_len+1] = YY_END_OF_BUFFER_CHAR;
+
+ b = yy_scan_buffer( buf, n , yyscanner);
+ if ( ! b )
+ YY_FATAL_ERROR( "bad buffer in yy_scan_bytes()" );
+
+ /* It's okay to grow etc. this buffer, and we should throw it
+ * away when we're done.
+ */
+ b->yy_is_our_buffer = 1;
+
+ return b;
+}
+
+#ifndef YY_EXIT_FAILURE
+#define YY_EXIT_FAILURE 2
+#endif
+
+static void yynoreturn yy_fatal_error (const char* msg , yyscan_t yyscanner)
+{
+ struct yyguts_t * yyg = (struct yyguts_t*)yyscanner;
+ (void)yyg;
+ fprintf( stderr, "%s\n", msg );
+ exit( YY_EXIT_FAILURE );
+}
+
+/* Redefine yyless() so it works in section 3 code. */
+
+#undef yyless
+#define yyless(n) \
+ do \
+ { \
+ /* Undo effects of setting up yytext. */ \
+ int yyless_macro_arg = (n); \
+ YY_LESS_LINENO(yyless_macro_arg);\
+ yytext[yyleng] = yyg->yy_hold_char; \
+ yyg->yy_c_buf_p = yytext + yyless_macro_arg; \
+ yyg->yy_hold_char = *yyg->yy_c_buf_p; \
+ *yyg->yy_c_buf_p = '\0'; \
+ yyleng = yyless_macro_arg; \
+ } \
+ while ( 0 )
+
+/* Accessor methods (get/set functions) to struct members. */
+
+/** Get the user-defined data for this scanner.
+ * @param yyscanner The scanner object.
+ */
+YY_EXTRA_TYPE yyget_extra (yyscan_t yyscanner)
+{
+ struct yyguts_t * yyg = (struct yyguts_t*)yyscanner;
+ return yyextra;
+}
+
+/** Get the current line number.
+ * @param yyscanner The scanner object.
+ */
+int yyget_lineno (yyscan_t yyscanner)
+{
+ struct yyguts_t * yyg = (struct yyguts_t*)yyscanner;
+
+ if (! YY_CURRENT_BUFFER)
+ return 0;
+
+ return yylineno;
+}
+
+/** Get the current column number.
+ * @param yyscanner The scanner object.
+ */
+int yyget_column (yyscan_t yyscanner)
+{
+ struct yyguts_t * yyg = (struct yyguts_t*)yyscanner;
+
+ if (! YY_CURRENT_BUFFER)
+ return 0;
+
+ return yycolumn;
+}
+
+/** Get the input stream.
+ * @param yyscanner The scanner object.
+ */
+FILE *yyget_in (yyscan_t yyscanner)
+{
+ struct yyguts_t * yyg = (struct yyguts_t*)yyscanner;
+ return yyin;
+}
+
+/** Get the output stream.
+ * @param yyscanner The scanner object.
+ */
+FILE *yyget_out (yyscan_t yyscanner)
+{
+ struct yyguts_t * yyg = (struct yyguts_t*)yyscanner;
+ return yyout;
+}
+
+/** Get the length of the current token.
+ * @param yyscanner The scanner object.
+ */
+int yyget_leng (yyscan_t yyscanner)
+{
+ struct yyguts_t * yyg = (struct yyguts_t*)yyscanner;
+ return yyleng;
+}
+
+/** Get the current token.
+ * @param yyscanner The scanner object.
+ */
+
+char *yyget_text (yyscan_t yyscanner)
+{
+ struct yyguts_t * yyg = (struct yyguts_t*)yyscanner;
+ return yytext;
+}
+
+/** Set the user-defined data. This data is never touched by the scanner.
+ * @param user_defined The data to be associated with this scanner.
+ * @param yyscanner The scanner object.
+ */
+void yyset_extra (YY_EXTRA_TYPE user_defined , yyscan_t yyscanner)
+{
+ struct yyguts_t * yyg = (struct yyguts_t*)yyscanner;
+ yyextra = user_defined ;
+}
+
+/** Set the current line number.
+ * @param _line_number line number
+ * @param yyscanner The scanner object.
+ */
+void yyset_lineno (int _line_number , yyscan_t yyscanner)
+{
+ struct yyguts_t * yyg = (struct yyguts_t*)yyscanner;
+
+ /* lineno is only valid if an input buffer exists. */
+ if (! YY_CURRENT_BUFFER )
+ YY_FATAL_ERROR( "yyset_lineno called with no buffer" );
+
+ yylineno = _line_number;
+}
+
+/** Set the current column.
+ * @param _column_no column number
+ * @param yyscanner The scanner object.
+ */
+void yyset_column (int _column_no , yyscan_t yyscanner)
+{
+ struct yyguts_t * yyg = (struct yyguts_t*)yyscanner;
+
+ /* column is only valid if an input buffer exists. */
+ if (! YY_CURRENT_BUFFER )
+ YY_FATAL_ERROR( "yyset_column called with no buffer" );
+
+ yycolumn = _column_no;
+}
+
+/** Set the input stream. This does not discard the current
+ * input buffer.
+ * @param _in_str A readable stream.
+ * @param yyscanner The scanner object.
+ * @see yy_switch_to_buffer
+ */
+void yyset_in (FILE * _in_str , yyscan_t yyscanner)
+{
+ struct yyguts_t * yyg = (struct yyguts_t*)yyscanner;
+ yyin = _in_str ;
+}
+
+void yyset_out (FILE * _out_str , yyscan_t yyscanner)
+{
+ struct yyguts_t * yyg = (struct yyguts_t*)yyscanner;
+ yyout = _out_str ;
+}
+
+int yyget_debug (yyscan_t yyscanner)
+{
+ struct yyguts_t * yyg = (struct yyguts_t*)yyscanner;
+ return yy_flex_debug;
+}
+
+void yyset_debug (int _bdebug , yyscan_t yyscanner)
+{
+ struct yyguts_t * yyg = (struct yyguts_t*)yyscanner;
+ yy_flex_debug = _bdebug ;
+}
+
+/* Accessor methods for yylval and yylloc */
+
+YYSTYPE * yyget_lval (yyscan_t yyscanner)
+{
+ struct yyguts_t * yyg = (struct yyguts_t*)yyscanner;
+ return yylval;
+}
+
+void yyset_lval (YYSTYPE * yylval_param , yyscan_t yyscanner)
+{
+ struct yyguts_t * yyg = (struct yyguts_t*)yyscanner;
+ yylval = yylval_param;
+}
+
+YYLTYPE *yyget_lloc (yyscan_t yyscanner)
+{
+ struct yyguts_t * yyg = (struct yyguts_t*)yyscanner;
+ return yylloc;
+}
+
+void yyset_lloc (YYLTYPE * yylloc_param , yyscan_t yyscanner)
+{
+ struct yyguts_t * yyg = (struct yyguts_t*)yyscanner;
+ yylloc = yylloc_param;
+}
+
+/* User-visible API */
+
+/* yylex_init is special because it creates the scanner itself, so it is
+ * the ONLY reentrant function that doesn't take the scanner as the last argument.
+ * That's why we explicitly handle the declaration, instead of using our macros.
+ */
+int yylex_init(yyscan_t* ptr_yy_globals)
+{
+ if (ptr_yy_globals == NULL){
+ errno = EINVAL;
+ return 1;
+ }
+
+ *ptr_yy_globals = (yyscan_t) yyalloc ( sizeof( struct yyguts_t ), NULL );
+
+ if (*ptr_yy_globals == NULL){
+ errno = ENOMEM;
+ return 1;
+ }
+
+ /* By setting to 0xAA, we expose bugs in yy_init_globals. Leave at 0x00 for releases. */
+ memset(*ptr_yy_globals,0x00,sizeof(struct yyguts_t));
+
+ return yy_init_globals ( *ptr_yy_globals );
+}
+
+/* yylex_init_extra has the same functionality as yylex_init, but follows the
+ * convention of taking the scanner as the last argument. Note however, that
+ * this is a *pointer* to a scanner, as it will be allocated by this call (and
+ * is the reason, too, why this function also must handle its own declaration).
+ * The user defined value in the first argument will be available to yyalloc in
+ * the yyextra field.
+ */
+int yylex_init_extra( YY_EXTRA_TYPE yy_user_defined, yyscan_t* ptr_yy_globals )
+{
+ struct yyguts_t dummy_yyguts;
+
+ yyset_extra (yy_user_defined, &dummy_yyguts);
+
+ if (ptr_yy_globals == NULL){
+ errno = EINVAL;
+ return 1;
+ }
+
+ *ptr_yy_globals = (yyscan_t) yyalloc ( sizeof( struct yyguts_t ), &dummy_yyguts );
+
+ if (*ptr_yy_globals == NULL){
+ errno = ENOMEM;
+ return 1;
+ }
+
+ /* By setting to 0xAA, we expose bugs in
+ yy_init_globals. Leave at 0x00 for releases. */
+ memset(*ptr_yy_globals,0x00,sizeof(struct yyguts_t));
+
+ yyset_extra (yy_user_defined, *ptr_yy_globals);
+
+ return yy_init_globals ( *ptr_yy_globals );
+}
+
+static int yy_init_globals (yyscan_t yyscanner)
+{
+ struct yyguts_t * yyg = (struct yyguts_t*)yyscanner;
+ /* Initialization is the same as for the non-reentrant scanner.
+ * This function is called from yylex_destroy(), so don't allocate here.
+ */
+
+ yyg->yy_buffer_stack = NULL;
+ yyg->yy_buffer_stack_top = 0;
+ yyg->yy_buffer_stack_max = 0;
+ yyg->yy_c_buf_p = NULL;
+ yyg->yy_init = 0;
+ yyg->yy_start = 0;
+
+ yyg->yy_start_stack_ptr = 0;
+ yyg->yy_start_stack_depth = 0;
+ yyg->yy_start_stack = NULL;
+
+/* Defined in main.c */
+#ifdef YY_STDINIT
+ yyin = stdin;
+ yyout = stdout;
+#else
+ yyin = NULL;
+ yyout = NULL;
+#endif
+
+ /* For future reference: Set errno on error, since we are called by
+ * yylex_init()
+ */
+ return 0;
+}
+
+/* yylex_destroy is for both reentrant and non-reentrant scanners. */
+int yylex_destroy (yyscan_t yyscanner)
+{
+ struct yyguts_t * yyg = (struct yyguts_t*)yyscanner;
+
+ /* Pop the buffer stack, destroying each element. */
+ while(YY_CURRENT_BUFFER){
+ yy_delete_buffer( YY_CURRENT_BUFFER , yyscanner );
+ YY_CURRENT_BUFFER_LVALUE = NULL;
+ yypop_buffer_state(yyscanner);
+ }
+
+ /* Destroy the stack itself. */
+ yyfree(yyg->yy_buffer_stack , yyscanner);
+ yyg->yy_buffer_stack = NULL;
+
+ /* Destroy the start condition stack. */
+ yyfree( yyg->yy_start_stack , yyscanner );
+ yyg->yy_start_stack = NULL;
+
+ /* Reset the globals. This is important in a non-reentrant scanner so the next time
+ * yylex() is called, initialization will occur. */
+ yy_init_globals( yyscanner);
+
+ /* Destroy the main struct (reentrant only). */
+ yyfree ( yyscanner , yyscanner );
+ yyscanner = NULL;
+ return 0;
+}
+
+/*
+ * Internal utility routines.
+ */
+
+#ifndef yytext_ptr
+static void yy_flex_strncpy (char* s1, const char * s2, int n , yyscan_t yyscanner)
+{
+ struct yyguts_t * yyg = (struct yyguts_t*)yyscanner;
+ (void)yyg;
+
+ int i;
+ for ( i = 0; i < n; ++i )
+ s1[i] = s2[i];
+}
+#endif
+
+#ifdef YY_NEED_STRLEN
+static int yy_flex_strlen (const char * s , yyscan_t yyscanner)
+{
+ int n;
+ for ( n = 0; s[n]; ++n )
+ ;
+
+ return n;
+}
+#endif
+
+void *yyalloc (yy_size_t size , yyscan_t yyscanner)
+{
+ struct yyguts_t * yyg = (struct yyguts_t*)yyscanner;
+ (void)yyg;
+ return malloc(size);
+}
+
+void *yyrealloc (void * ptr, yy_size_t size , yyscan_t yyscanner)
+{
+ struct yyguts_t * yyg = (struct yyguts_t*)yyscanner;
+ (void)yyg;
+
+ /* The cast to (char *) in the following accommodates both
+ * implementations that use char* generic pointers, and those
+ * that use void* generic pointers. It works with the latter
+ * because both ANSI C and C++ allow castless assignment from
+ * any pointer type to void*, and deal with argument conversions
+ * as though doing an assignment.
+ */
+ return realloc(ptr, size);
+}
+
+void yyfree (void * ptr , yyscan_t yyscanner)
+{
+ struct yyguts_t * yyg = (struct yyguts_t*)yyscanner;
+ (void)yyg;
+ free( (char *) ptr ); /* see yyrealloc() for (char *) cast */
+}
+
+#define YYTABLES_NAME "yytables"
+
+#line 757 "src/compiler/glsl/glsl_lexer.ll"
+
+
+int
+classify_identifier(struct _mesa_glsl_parse_state *state, const char *name,
+ unsigned name_len, YYSTYPE *output)
+{
+ /* We're not doing linear_strdup here, to avoid an implicit call on
+ * strlen() for the length of the string, as this is already found by flex
+ * and stored in yyleng
+ */
+ char *id = (char *) linear_alloc_child(state->linalloc, name_len + 1);
+ memcpy(id, name, name_len + 1);
+ output->identifier = id;
+
+ if (state->is_field) {
+ state->is_field = false;
+ return FIELD_SELECTION;
+ }
+ if (state->symbols->get_variable(name) || state->symbols->get_function(name))
+ return IDENTIFIER;
+ else if (state->symbols->get_type(name))
+ return TYPE_IDENTIFIER;
+ else
+ return NEW_IDENTIFIER;
+}
+
+void
+_mesa_glsl_lexer_ctor(struct _mesa_glsl_parse_state *state, const char *string)
+{
+ yylex_init_extra(state, & state->scanner);
+ yy_scan_string(string, state->scanner);
+}
+
+void
+_mesa_glsl_lexer_dtor(struct _mesa_glsl_parse_state *state)
+{
+ yylex_destroy(state->scanner);
+}
+
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/glsl_lexer.ll b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/glsl_lexer.ll
new file mode 100644
index 0000000000..7d7ee0c00f
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/glsl_lexer.ll
@@ -0,0 +1,793 @@
+%{
+/*
+ * Copyright © 2008, 2009 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+#include <ctype.h>
+#include <limits.h>
+#include "util/strtod.h"
+#include "ast.h"
+#include "glsl_parser_extras.h"
+#include "glsl_parser.h"
+#include "main/mtypes.h"
+
+static int classify_identifier(struct _mesa_glsl_parse_state *, const char *,
+ unsigned name_len, YYSTYPE *output);
+
+#ifdef _MSC_VER
+#define YY_NO_UNISTD_H
+#endif
+
+#define YY_NO_INPUT
+#define YY_USER_ACTION \
+ do { \
+ yylloc->first_column = yycolumn + 1; \
+ yylloc->first_line = yylloc->last_line = yylineno + 1; \
+ yycolumn += yyleng; \
+ yylloc->last_column = yycolumn + 1; \
+ } while(0);
+
+#define YY_USER_INIT yylineno = 0; yycolumn = 0; yylloc->source = 0; \
+ yylloc->path = NULL;
+
+/* A macro for handling reserved words and keywords across language versions.
+ *
+ * Certain words start out as identifiers, become reserved words in
+ * later language revisions, and finally become language keywords.
+ * This may happen at different times in desktop GLSL and GLSL ES.
+ *
+ * For example, consider the following lexer rule:
+ * samplerBuffer KEYWORD(130, 0, 140, 0, SAMPLERBUFFER)
+ *
+ * This means that "samplerBuffer" will be treated as:
+ * - a keyword (SAMPLERBUFFER token) ...in GLSL >= 1.40
+ * - a reserved word - error ...in GLSL >= 1.30
+ * - an identifier ...in GLSL < 1.30 or GLSL ES
+ */
+#define KEYWORD(reserved_glsl, reserved_glsl_es, \
+ allowed_glsl, allowed_glsl_es, token) \
+ KEYWORD_WITH_ALT(reserved_glsl, reserved_glsl_es, \
+ allowed_glsl, allowed_glsl_es, false, token)
+
+/**
+ * Like the KEYWORD macro, but the word is also treated as a keyword
+ * if the given boolean expression is true.
+ */
+#define KEYWORD_WITH_ALT(reserved_glsl, reserved_glsl_es, \
+ allowed_glsl, allowed_glsl_es, \
+ alt_expr, token) \
+ do { \
+ if (yyextra->is_version(allowed_glsl, allowed_glsl_es) \
+ || (alt_expr)) { \
+ return token; \
+ } else if (yyextra->is_version(reserved_glsl, \
+ reserved_glsl_es)) { \
+ _mesa_glsl_error(yylloc, yyextra, \
+ "illegal use of reserved word `%s'", yytext); \
+ return ERROR_TOK; \
+ } else { \
+ return classify_identifier(yyextra, yytext, yyleng, yylval); \
+ } \
+ } while (0)
+
+/**
+ * Like KEYWORD_WITH_ALT, but used for built-in GLSL types
+ */
+#define TYPE_WITH_ALT(reserved_glsl, reserved_glsl_es, \
+ allowed_glsl, allowed_glsl_es, \
+ alt_expr, gtype) \
+ do { \
+ if (yyextra->is_version(allowed_glsl, allowed_glsl_es) \
+ || (alt_expr)) { \
+ yylval->type = gtype; \
+ return BASIC_TYPE_TOK; \
+ } else if (yyextra->is_version(reserved_glsl, \
+ reserved_glsl_es)) { \
+ _mesa_glsl_error(yylloc, yyextra, \
+ "illegal use of reserved word `%s'", yytext); \
+ return ERROR_TOK; \
+ } else { \
+ return classify_identifier(yyextra, yytext, yyleng, yylval); \
+ } \
+ } while (0)
+
+#define TYPE(reserved_glsl, reserved_glsl_es, \
+ allowed_glsl, allowed_glsl_es, \
+ gtype) \
+ TYPE_WITH_ALT(reserved_glsl, reserved_glsl_es, \
+ allowed_glsl, allowed_glsl_es, \
+ false, gtype)
+
+/**
+ * A macro for handling keywords that have been present in GLSL since
+ * its origin, but were changed into reserved words in GLSL 3.00 ES.
+ */
+#define DEPRECATED_ES_KEYWORD(token) \
+ do { \
+ if (yyextra->is_version(0, 300)) { \
+ _mesa_glsl_error(yylloc, yyextra, \
+ "illegal use of reserved word `%s'", yytext); \
+ return ERROR_TOK; \
+ } else { \
+ return token; \
+ } \
+ } while (0)
+
+/**
+ * Like DEPRECATED_ES_KEYWORD, but for types
+ */
+#define DEPRECATED_ES_TYPE_WITH_ALT(alt_expr, gtype) \
+ do { \
+ if (yyextra->is_version(0, 300)) { \
+ _mesa_glsl_error(yylloc, yyextra, \
+ "illegal use of reserved word `%s'", yytext); \
+ return ERROR_TOK; \
+ } else if (alt_expr) { \
+ yylval->type = gtype; \
+ return BASIC_TYPE_TOK; \
+ } else { \
+ return classify_identifier(yyextra, yytext, yyleng, yylval); \
+ } \
+ } while (0)
+
+#define DEPRECATED_ES_TYPE(gtype) \
+ DEPRECATED_ES_TYPE_WITH_ALT(true, gtype)
+
+static int
+literal_integer(char *text, int len, struct _mesa_glsl_parse_state *state,
+ YYSTYPE *lval, YYLTYPE *lloc, int base)
+{
+ bool is_uint = (text[len - 1] == 'u' ||
+ text[len - 1] == 'U');
+ bool is_long = (text[len - 1] == 'l' || text[len - 1] == 'L');
+ const char *digits = text;
+
+ if (is_long)
+ is_uint = (text[len - 2] == 'u' && text[len - 1] == 'l') ||
+ (text[len - 2] == 'U' && text[len - 1] == 'L');
+ /* Skip "0x" */
+ if (base == 16)
+ digits += 2;
+
+ unsigned long long value = strtoull(digits, NULL, base);
+
+ if (is_long)
+ lval->n64 = (int64_t)value;
+ else
+ lval->n = (int)value;
+
+ if (is_long && !is_uint && base == 10 && value > (uint64_t)LLONG_MAX + 1) {
+ /* Tries to catch unintentionally providing a negative value. */
+ _mesa_glsl_warning(lloc, state,
+ "signed literal value `%s' is interpreted as %lld",
+ text, lval->n64);
+ } else if (!is_long && value > UINT_MAX) {
+ /* Note that signed 0xffffffff is valid, not out of range! */
+ if (state->is_version(130, 300)) {
+ _mesa_glsl_error(lloc, state,
+ "literal value `%s' out of range", text);
+ } else {
+ _mesa_glsl_warning(lloc, state,
+ "literal value `%s' out of range", text);
+ }
+ } else if (base == 10 && !is_uint && (unsigned)value > (unsigned)INT_MAX + 1) {
+ /* Tries to catch unintentionally providing a negative value.
+ * Note that -2147483648 is parsed as -(2147483648), so we don't
+ * want to warn for INT_MAX.
+ */
+ _mesa_glsl_warning(lloc, state,
+ "signed literal value `%s' is interpreted as %d",
+ text, lval->n);
+ }
+ if (is_long)
+ return is_uint ? UINT64CONSTANT : INT64CONSTANT;
+ else
+ return is_uint ? UINTCONSTANT : INTCONSTANT;
+}
+
+#define LITERAL_INTEGER(base) \
+ literal_integer(yytext, yyleng, yyextra, yylval, yylloc, base)
+
+%}
+
+%option bison-bridge bison-locations reentrant noyywrap
+%option nounput noyy_top_state
+%option never-interactive
+%option prefix="_mesa_glsl_lexer_"
+%option extra-type="struct _mesa_glsl_parse_state *"
+%option warn nodefault
+
+ /* Note: When adding any start conditions to this list, you must also
+ * update the "Internal compiler error" catch-all rule near the end of
+ * this file. */
+%x PP PRAGMA
+
+DEC_INT [1-9][0-9]*
+HEX_INT 0[xX][0-9a-fA-F]+
+OCT_INT 0[0-7]*
+INT ({DEC_INT}|{HEX_INT}|{OCT_INT})
+SPC [ \t]*
+SPCP [ \t]+
+HASH ^{SPC}#{SPC}
+PATH ["][./ _A-Za-z0-9]*["]
+%%
+
+[ \r\t]+ ;
+
+ /* Preprocessor tokens. */
+^[ \t]*#[ \t]*$ ;
+^[ \t]*#[ \t]*version { BEGIN PP; return VERSION_TOK; }
+^[ \t]*#[ \t]*extension { BEGIN PP; return EXTENSION; }
+{HASH}include {
+ if (!yyextra->ARB_shading_language_include_enable) {
+ struct _mesa_glsl_parse_state *state = yyextra;
+ _mesa_glsl_error(yylloc, state,
+ "ARB_shading_language_include required "
+ "to use #include");
+ }
+}
+{HASH}line{SPCP}{INT}{SPCP}{INT}{SPC}$ {
+ /* Eat characters until the first digit is
+ * encountered
+ */
+ char *ptr = yytext;
+ while (!isdigit(*ptr))
+ ptr++;
+
+ /* Subtract one from the line number because
+ * yylineno is zero-based instead of
+ * one-based.
+ */
+ yylineno = strtol(ptr, &ptr, 0) - 1;
+
+ /* From GLSL 3.30 and GLSL ES on, after processing the
+ * line directive (including its new-line), the implementation
+ * will behave as if it is compiling at the line number passed
+ * as argument. It was line number + 1 in older specifications.
+ */
+ if (yyextra->is_version(330, 100))
+ yylineno--;
+
+ yylloc->source = strtol(ptr, NULL, 0);
+ yylloc->path = NULL;
+ }
+{HASH}line{SPCP}{INT}{SPCP}{PATH}{SPC}$ {
+ if (!yyextra->ARB_shading_language_include_enable) {
+ struct _mesa_glsl_parse_state *state = yyextra;
+ _mesa_glsl_error(yylloc, state,
+ "ARB_shading_language_include required "
+ "to use #line <line> \"<path>\"");
+ }
+
+ /* Eat characters until the first digit is
+ * encountered
+ */
+ char *ptr = yytext;
+ while (!isdigit(*ptr))
+ ptr++;
+
+ /* Subtract one from the line number because
+ * yylineno is zero-based instead of
+ * one-based.
+ */
+ yylineno = strtol(ptr, &ptr, 0) - 1;
+
+ /* From GLSL 3.30 and GLSL ES on, after processing the
+ * line directive (including its new-line), the implementation
+ * will behave as if it is compiling at the line number passed
+ * as argument. It was line number + 1 in older specifications.
+ */
+ if (yyextra->is_version(330, 100))
+ yylineno--;
+
+ while (isspace(*ptr))
+ ptr++;
+
+ /* Skip over leading " */
+ ptr++;
+
+ char *end = strrchr(ptr, '"');
+ int path_len = (end - ptr) + 1;
+ void *mem_ctx = yyextra->linalloc;
+ yylloc->path = (char *) linear_alloc_child(mem_ctx, path_len);
+ memcpy(yylloc->path, ptr, path_len);
+ yylloc->path[path_len - 1] = '\0';
+ }
+{HASH}line{SPCP}{INT}{SPC}$ {
+ /* Eat characters until the first digit is
+ * encountered
+ */
+ char *ptr = yytext;
+ while (!isdigit(*ptr))
+ ptr++;
+
+ /* Subtract one from the line number because
+ * yylineno is zero-based instead of
+ * one-based.
+ */
+ yylineno = strtol(ptr, &ptr, 0) - 1;
+
+ /* From GLSL 3.30 and GLSL ES on, after processing the
+ * line directive (including its new-line), the implementation
+ * will behave as if it is compiling at the line number passed
+ * as argument. It was line number + 1 in older specifications.
+ */
+ if (yyextra->is_version(330, 100))
+ yylineno--;
+ }
+^{SPC}#{SPC}pragma{SPCP}debug{SPC}\({SPC}on{SPC}\) {
+ BEGIN PP;
+ return PRAGMA_DEBUG_ON;
+ }
+^{SPC}#{SPC}pragma{SPCP}debug{SPC}\({SPC}off{SPC}\) {
+ BEGIN PP;
+ return PRAGMA_DEBUG_OFF;
+ }
+^{SPC}#{SPC}pragma{SPCP}optimize{SPC}\({SPC}on{SPC}\) {
+ BEGIN PP;
+ return PRAGMA_OPTIMIZE_ON;
+ }
+^{SPC}#{SPC}pragma{SPCP}optimize{SPC}\({SPC}off{SPC}\) {
+ BEGIN PP;
+ return PRAGMA_OPTIMIZE_OFF;
+ }
+^{SPC}#{SPC}pragma{SPCP}warning{SPC}\({SPC}on{SPC}\) {
+ BEGIN PP;
+ return PRAGMA_WARNING_ON;
+ }
+^{SPC}#{SPC}pragma{SPCP}warning{SPC}\({SPC}off{SPC}\) {
+ BEGIN PP;
+ return PRAGMA_WARNING_OFF;
+ }
+^{SPC}#{SPC}pragma{SPCP}STDGL{SPCP}invariant{SPC}\({SPC}all{SPC}\) {
+ BEGIN PP;
+ return PRAGMA_INVARIANT_ALL;
+ }
+^{SPC}#{SPC}pragma{SPCP} { BEGIN PRAGMA; }
+
+<PRAGMA>\n { BEGIN 0; yylineno++; yycolumn = 0; }
+<PRAGMA>. { }
+
+<PP>\/\/[^\n]* { }
+<PP>[ \t\r]* { }
+<PP>: return COLON;
+<PP>[_a-zA-Z][_a-zA-Z0-9]* {
+ /* We're not doing linear_strdup here, to avoid an implicit call
+ * on strlen() for the length of the string, as this is already
+ * found by flex and stored in yyleng
+ */
+ void *mem_ctx = yyextra->linalloc;
+ char *id = (char *) linear_alloc_child(mem_ctx, yyleng + 1);
+ memcpy(id, yytext, yyleng + 1);
+ yylval->identifier = id;
+ return IDENTIFIER;
+ }
+<PP>[1-9][0-9]* {
+ yylval->n = strtol(yytext, NULL, 10);
+ return INTCONSTANT;
+ }
+<PP>0 {
+ yylval->n = 0;
+ return INTCONSTANT;
+ }
+<PP>\n { BEGIN 0; yylineno++; yycolumn = 0; return EOL; }
+<PP>. { return yytext[0]; }
+
+\n { yylineno++; yycolumn = 0; }
+
+attribute DEPRECATED_ES_KEYWORD(ATTRIBUTE);
+const return CONST_TOK;
+bool { yylval->type = glsl_type::bool_type; return BASIC_TYPE_TOK; }
+float { yylval->type = glsl_type::float_type; return BASIC_TYPE_TOK; }
+int { yylval->type = glsl_type::int_type; return BASIC_TYPE_TOK; }
+uint TYPE(130, 300, 130, 300, glsl_type::uint_type);
+
+break return BREAK;
+continue return CONTINUE;
+do return DO;
+while return WHILE;
+else return ELSE;
+for return FOR;
+if return IF;
+discard return DISCARD;
+return return RETURN;
+demote KEYWORD_WITH_ALT(0, 0, 0, 0, yyextra->EXT_demote_to_helper_invocation_enable, DEMOTE);
+
+bvec2 { yylval->type = glsl_type::bvec2_type; return BASIC_TYPE_TOK; }
+bvec3 { yylval->type = glsl_type::bvec3_type; return BASIC_TYPE_TOK; }
+bvec4 { yylval->type = glsl_type::bvec4_type; return BASIC_TYPE_TOK; }
+ivec2 { yylval->type = glsl_type::ivec2_type; return BASIC_TYPE_TOK; }
+ivec3 { yylval->type = glsl_type::ivec3_type; return BASIC_TYPE_TOK; }
+ivec4 { yylval->type = glsl_type::ivec4_type; return BASIC_TYPE_TOK; }
+uvec2 TYPE_WITH_ALT(130, 300, 130, 300, yyextra->EXT_gpu_shader4_enable, glsl_type::uvec2_type);
+uvec3 TYPE_WITH_ALT(130, 300, 130, 300, yyextra->EXT_gpu_shader4_enable, glsl_type::uvec3_type);
+uvec4 TYPE_WITH_ALT(130, 300, 130, 300, yyextra->EXT_gpu_shader4_enable, glsl_type::uvec4_type);
+vec2 { yylval->type = glsl_type::vec2_type; return BASIC_TYPE_TOK; }
+vec3 { yylval->type = glsl_type::vec3_type; return BASIC_TYPE_TOK; }
+vec4 { yylval->type = glsl_type::vec4_type; return BASIC_TYPE_TOK; }
+mat2 { yylval->type = glsl_type::mat2_type; return BASIC_TYPE_TOK; }
+mat3 { yylval->type = glsl_type::mat3_type; return BASIC_TYPE_TOK; }
+mat4 { yylval->type = glsl_type::mat4_type; return BASIC_TYPE_TOK; }
+mat2x2 TYPE(120, 300, 120, 300, glsl_type::mat2_type);
+mat2x3 TYPE(120, 300, 120, 300, glsl_type::mat2x3_type);
+mat2x4 TYPE(120, 300, 120, 300, glsl_type::mat2x4_type);
+mat3x2 TYPE(120, 300, 120, 300, glsl_type::mat3x2_type);
+mat3x3 TYPE(120, 300, 120, 300, glsl_type::mat3_type);
+mat3x4 TYPE(120, 300, 120, 300, glsl_type::mat3x4_type);
+mat4x2 TYPE(120, 300, 120, 300, glsl_type::mat4x2_type);
+mat4x3 TYPE(120, 300, 120, 300, glsl_type::mat4x3_type);
+mat4x4 TYPE(120, 300, 120, 300, glsl_type::mat4_type);
+
+in return IN_TOK;
+out return OUT_TOK;
+inout return INOUT_TOK;
+uniform return UNIFORM;
+buffer KEYWORD_WITH_ALT(0, 0, 430, 310, yyextra->ARB_shader_storage_buffer_object_enable, BUFFER);
+varying DEPRECATED_ES_KEYWORD(VARYING);
+centroid KEYWORD_WITH_ALT(120, 300, 120, 300, yyextra->EXT_gpu_shader4_enable, CENTROID);
+invariant KEYWORD(120, 100, 120, 100, INVARIANT);
+flat KEYWORD_WITH_ALT(130, 100, 130, 300, yyextra->EXT_gpu_shader4_enable, FLAT);
+smooth KEYWORD(130, 300, 130, 300, SMOOTH);
+noperspective KEYWORD_WITH_ALT(130, 300, 130, 0, yyextra->EXT_gpu_shader4_enable, NOPERSPECTIVE);
+patch KEYWORD_WITH_ALT(0, 300, 400, 320, yyextra->has_tessellation_shader(), PATCH);
+
+sampler1D DEPRECATED_ES_TYPE(glsl_type::sampler1D_type);
+sampler2D { yylval->type = glsl_type::sampler2D_type; return BASIC_TYPE_TOK; }
+sampler3D { yylval->type = glsl_type::sampler3D_type; return BASIC_TYPE_TOK; }
+samplerCube { yylval->type = glsl_type::samplerCube_type; return BASIC_TYPE_TOK; }
+sampler1DArray TYPE_WITH_ALT(130, 300, 130, 0, yyextra->EXT_gpu_shader4_enable && yyextra->ctx->Extensions.EXT_texture_array, glsl_type::sampler1DArray_type);
+sampler2DArray TYPE_WITH_ALT(130, 300, 130, 300, yyextra->EXT_gpu_shader4_enable && yyextra->ctx->Extensions.EXT_texture_array, glsl_type::sampler2DArray_type);
+sampler1DShadow DEPRECATED_ES_TYPE(glsl_type::sampler1DShadow_type);
+sampler2DShadow { yylval->type = glsl_type::sampler2DShadow_type; return BASIC_TYPE_TOK; }
+samplerCubeShadow TYPE_WITH_ALT(130, 300, 130, 300, yyextra->EXT_gpu_shader4_enable, glsl_type::samplerCubeShadow_type);
+sampler1DArrayShadow TYPE_WITH_ALT(130, 300, 130, 0, yyextra->EXT_gpu_shader4_enable && yyextra->ctx->Extensions.EXT_texture_array, glsl_type::sampler1DArrayShadow_type);
+sampler2DArrayShadow TYPE_WITH_ALT(130, 300, 130, 300, yyextra->EXT_gpu_shader4_enable && yyextra->ctx->Extensions.EXT_texture_array, glsl_type::sampler2DArrayShadow_type);
+isampler1D TYPE_WITH_ALT(130, 300, 130, 0, yyextra->EXT_gpu_shader4_enable && yyextra->ctx->Extensions.EXT_texture_integer, glsl_type::isampler1D_type);
+isampler2D TYPE_WITH_ALT(130, 300, 130, 300, yyextra->EXT_gpu_shader4_enable && yyextra->ctx->Extensions.EXT_texture_integer, glsl_type::isampler2D_type);
+isampler3D TYPE_WITH_ALT(130, 300, 130, 300, yyextra->EXT_gpu_shader4_enable && yyextra->ctx->Extensions.EXT_texture_integer, glsl_type::isampler3D_type);
+isamplerCube TYPE_WITH_ALT(130, 300, 130, 300, yyextra->EXT_gpu_shader4_enable && yyextra->ctx->Extensions.EXT_texture_integer, glsl_type::isamplerCube_type);
+isampler1DArray TYPE_WITH_ALT(130, 300, 130, 0, yyextra->EXT_gpu_shader4_enable && yyextra->ctx->Extensions.EXT_texture_integer && yyextra->ctx->Extensions.EXT_texture_array, glsl_type::isampler1DArray_type);
+isampler2DArray TYPE_WITH_ALT(130, 300, 130, 300, yyextra->EXT_gpu_shader4_enable && yyextra->ctx->Extensions.EXT_texture_integer && yyextra->ctx->Extensions.EXT_texture_array, glsl_type::isampler2DArray_type);
+usampler1D TYPE_WITH_ALT(130, 300, 130, 0, yyextra->EXT_gpu_shader4_enable && yyextra->ctx->Extensions.EXT_texture_integer, glsl_type::usampler1D_type);
+usampler2D TYPE_WITH_ALT(130, 300, 130, 300, yyextra->EXT_gpu_shader4_enable && yyextra->ctx->Extensions.EXT_texture_integer, glsl_type::usampler2D_type);
+usampler3D TYPE_WITH_ALT(130, 300, 130, 300, yyextra->EXT_gpu_shader4_enable && yyextra->ctx->Extensions.EXT_texture_integer, glsl_type::usampler3D_type);
+usamplerCube TYPE_WITH_ALT(130, 300, 130, 300, yyextra->EXT_gpu_shader4_enable && yyextra->ctx->Extensions.EXT_texture_integer, glsl_type::usamplerCube_type);
+usampler1DArray TYPE_WITH_ALT(130, 300, 130, 0, yyextra->EXT_gpu_shader4_enable && yyextra->ctx->Extensions.EXT_texture_integer && yyextra->ctx->Extensions.EXT_texture_array, glsl_type::usampler1DArray_type);
+usampler2DArray TYPE_WITH_ALT(130, 300, 130, 300, yyextra->EXT_gpu_shader4_enable && yyextra->ctx->Extensions.EXT_texture_integer && yyextra->ctx->Extensions.EXT_texture_array, glsl_type::usampler2DArray_type);
+
+ /* additional keywords in ARB_texture_multisample, included in GLSL 1.50 */
+ /* these are reserved but not defined in GLSL 3.00 */
+ /* [iu]sampler2DMS are defined in GLSL ES 3.10 */
+sampler2DMS TYPE_WITH_ALT(150, 300, 150, 310, yyextra->ARB_texture_multisample_enable, glsl_type::sampler2DMS_type);
+isampler2DMS TYPE_WITH_ALT(150, 300, 150, 310, yyextra->ARB_texture_multisample_enable, glsl_type::isampler2DMS_type);
+usampler2DMS TYPE_WITH_ALT(150, 300, 150, 310, yyextra->ARB_texture_multisample_enable, glsl_type::usampler2DMS_type);
+sampler2DMSArray TYPE_WITH_ALT(150, 300, 150, 320, yyextra->ARB_texture_multisample_enable || yyextra->OES_texture_storage_multisample_2d_array_enable, glsl_type::sampler2DMSArray_type);
+isampler2DMSArray TYPE_WITH_ALT(150, 300, 150, 320, yyextra->ARB_texture_multisample_enable || yyextra->OES_texture_storage_multisample_2d_array_enable, glsl_type::isampler2DMSArray_type);
+usampler2DMSArray TYPE_WITH_ALT(150, 300, 150, 320, yyextra->ARB_texture_multisample_enable || yyextra->OES_texture_storage_multisample_2d_array_enable, glsl_type::usampler2DMSArray_type);
+
+ /* keywords available with ARB_texture_cube_map_array_enable extension on desktop GLSL */
+samplerCubeArray TYPE_WITH_ALT(400, 310, 400, 320, yyextra->ARB_texture_cube_map_array_enable || yyextra->OES_texture_cube_map_array_enable || yyextra->EXT_texture_cube_map_array_enable, glsl_type::samplerCubeArray_type);
+isamplerCubeArray TYPE_WITH_ALT(400, 310, 400, 320, yyextra->ARB_texture_cube_map_array_enable || yyextra->OES_texture_cube_map_array_enable || yyextra->EXT_texture_cube_map_array_enable, glsl_type::isamplerCubeArray_type);
+usamplerCubeArray TYPE_WITH_ALT(400, 310, 400, 320, yyextra->ARB_texture_cube_map_array_enable || yyextra->OES_texture_cube_map_array_enable || yyextra->EXT_texture_cube_map_array_enable, glsl_type::usamplerCubeArray_type);
+samplerCubeArrayShadow TYPE_WITH_ALT(400, 310, 400, 320, yyextra->ARB_texture_cube_map_array_enable || yyextra->OES_texture_cube_map_array_enable || yyextra->EXT_texture_cube_map_array_enable, glsl_type::samplerCubeArrayShadow_type);
+
+samplerExternalOES {
+ if (yyextra->OES_EGL_image_external_enable || yyextra->OES_EGL_image_external_essl3_enable) {
+ yylval->type = glsl_type::samplerExternalOES_type;
+ return BASIC_TYPE_TOK;
+ } else
+ return IDENTIFIER;
+ }
+
+ /* keywords available with ARB_gpu_shader5 */
+precise KEYWORD_WITH_ALT(400, 310, 400, 320, yyextra->ARB_gpu_shader5_enable || yyextra->EXT_gpu_shader5_enable || yyextra->OES_gpu_shader5_enable, PRECISE);
+
+ /* keywords available with ARB_shader_image_load_store */
+image1D TYPE_WITH_ALT(130, 300, 420, 0, yyextra->ARB_shader_image_load_store_enable || yyextra->EXT_shader_image_load_store_enable, glsl_type::image1D_type);
+image2D TYPE_WITH_ALT(130, 300, 420, 310, yyextra->ARB_shader_image_load_store_enable || yyextra->EXT_shader_image_load_store_enable, glsl_type::image2D_type);
+image3D TYPE_WITH_ALT(130, 300, 420, 310, yyextra->ARB_shader_image_load_store_enable || yyextra->EXT_shader_image_load_store_enable, glsl_type::image3D_type);
+image2DRect TYPE_WITH_ALT(130, 300, 420, 0, yyextra->ARB_shader_image_load_store_enable || yyextra->EXT_shader_image_load_store_enable, glsl_type::image2DRect_type);
+imageCube TYPE_WITH_ALT(130, 300, 420, 310, yyextra->ARB_shader_image_load_store_enable || yyextra->EXT_shader_image_load_store_enable, glsl_type::imageCube_type);
+imageBuffer TYPE_WITH_ALT(130, 300, 420, 320, yyextra->ARB_shader_image_load_store_enable || yyextra->EXT_shader_image_load_store_enable || yyextra->EXT_texture_buffer_enable || yyextra->OES_texture_buffer_enable, glsl_type::imageBuffer_type);
+image1DArray TYPE_WITH_ALT(130, 300, 420, 0, yyextra->ARB_shader_image_load_store_enable || yyextra->EXT_shader_image_load_store_enable, glsl_type::image1DArray_type);
+image2DArray TYPE_WITH_ALT(130, 300, 420, 310, yyextra->ARB_shader_image_load_store_enable || yyextra->EXT_shader_image_load_store_enable, glsl_type::image2DArray_type);
+imageCubeArray TYPE_WITH_ALT(130, 300, 420, 320, yyextra->ARB_shader_image_load_store_enable || yyextra->EXT_shader_image_load_store_enable || yyextra->OES_texture_cube_map_array_enable || yyextra->EXT_texture_cube_map_array_enable, glsl_type::imageCubeArray_type);
+image2DMS TYPE_WITH_ALT(130, 300, 420, 0, yyextra->ARB_shader_image_load_store_enable || yyextra->EXT_shader_image_load_store_enable, glsl_type::image2DMS_type);
+image2DMSArray TYPE_WITH_ALT(130, 300, 420, 0, yyextra->ARB_shader_image_load_store_enable || yyextra->EXT_shader_image_load_store_enable, glsl_type::image2DMSArray_type);
+iimage1D TYPE_WITH_ALT(130, 300, 420, 0, yyextra->ARB_shader_image_load_store_enable || yyextra->EXT_shader_image_load_store_enable, glsl_type::iimage1D_type);
+iimage2D TYPE_WITH_ALT(130, 300, 420, 310, yyextra->ARB_shader_image_load_store_enable || yyextra->EXT_shader_image_load_store_enable, glsl_type::iimage2D_type);
+iimage3D TYPE_WITH_ALT(130, 300, 420, 310, yyextra->ARB_shader_image_load_store_enable || yyextra->EXT_shader_image_load_store_enable, glsl_type::iimage3D_type);
+iimage2DRect TYPE_WITH_ALT(130, 300, 420, 0, yyextra->ARB_shader_image_load_store_enable || yyextra->EXT_shader_image_load_store_enable, glsl_type::iimage2DRect_type);
+iimageCube TYPE_WITH_ALT(130, 300, 420, 310, yyextra->ARB_shader_image_load_store_enable || yyextra->EXT_shader_image_load_store_enable, glsl_type::iimageCube_type);
+iimageBuffer TYPE_WITH_ALT(130, 300, 420, 320, yyextra->ARB_shader_image_load_store_enable || yyextra->EXT_shader_image_load_store_enable || yyextra->EXT_texture_buffer_enable || yyextra->OES_texture_buffer_enable, glsl_type::iimageBuffer_type);
+iimage1DArray TYPE_WITH_ALT(130, 300, 420, 0, yyextra->ARB_shader_image_load_store_enable || yyextra->EXT_shader_image_load_store_enable, glsl_type::iimage1DArray_type);
+iimage2DArray TYPE_WITH_ALT(130, 300, 420, 310, yyextra->ARB_shader_image_load_store_enable || yyextra->EXT_shader_image_load_store_enable, glsl_type::iimage2DArray_type);
+iimageCubeArray TYPE_WITH_ALT(130, 300, 420, 320, yyextra->ARB_shader_image_load_store_enable || yyextra->EXT_shader_image_load_store_enable || yyextra->OES_texture_cube_map_array_enable || yyextra->EXT_texture_cube_map_array_enable, glsl_type::iimageCubeArray_type);
+iimage2DMS TYPE_WITH_ALT(130, 300, 420, 0, yyextra->ARB_shader_image_load_store_enable || yyextra->EXT_shader_image_load_store_enable, glsl_type::iimage2DMS_type);
+iimage2DMSArray TYPE_WITH_ALT(130, 300, 420, 0, yyextra->ARB_shader_image_load_store_enable || yyextra->EXT_shader_image_load_store_enable, glsl_type::iimage2DMSArray_type);
+uimage1D TYPE_WITH_ALT(130, 300, 420, 0, yyextra->ARB_shader_image_load_store_enable || yyextra->EXT_shader_image_load_store_enable, glsl_type::uimage1D_type);
+uimage2D TYPE_WITH_ALT(130, 300, 420, 310, yyextra->ARB_shader_image_load_store_enable || yyextra->EXT_shader_image_load_store_enable, glsl_type::uimage2D_type);
+uimage3D TYPE_WITH_ALT(130, 300, 420, 310, yyextra->ARB_shader_image_load_store_enable || yyextra->EXT_shader_image_load_store_enable, glsl_type::uimage3D_type);
+uimage2DRect TYPE_WITH_ALT(130, 300, 420, 0, yyextra->ARB_shader_image_load_store_enable || yyextra->EXT_shader_image_load_store_enable, glsl_type::uimage2DRect_type);
+uimageCube TYPE_WITH_ALT(130, 300, 420, 310, yyextra->ARB_shader_image_load_store_enable || yyextra->EXT_shader_image_load_store_enable, glsl_type::uimageCube_type);
+uimageBuffer TYPE_WITH_ALT(130, 300, 420, 320, yyextra->ARB_shader_image_load_store_enable || yyextra->EXT_shader_image_load_store_enable || yyextra->EXT_texture_buffer_enable || yyextra->OES_texture_buffer_enable, glsl_type::uimageBuffer_type);
+uimage1DArray TYPE_WITH_ALT(130, 300, 420, 0, yyextra->ARB_shader_image_load_store_enable || yyextra->EXT_shader_image_load_store_enable, glsl_type::uimage1DArray_type);
+uimage2DArray TYPE_WITH_ALT(130, 300, 420, 310, yyextra->ARB_shader_image_load_store_enable || yyextra->EXT_shader_image_load_store_enable, glsl_type::uimage2DArray_type);
+uimageCubeArray TYPE_WITH_ALT(130, 300, 420, 320, yyextra->ARB_shader_image_load_store_enable || yyextra->EXT_shader_image_load_store_enable || yyextra->OES_texture_cube_map_array_enable || yyextra->EXT_texture_cube_map_array_enable, glsl_type::uimageCubeArray_type);
+uimage2DMS TYPE_WITH_ALT(130, 300, 420, 0, yyextra->ARB_shader_image_load_store_enable || yyextra->EXT_shader_image_load_store_enable, glsl_type::uimage2DMS_type);
+uimage2DMSArray TYPE_WITH_ALT(130, 300, 420, 0, yyextra->ARB_shader_image_load_store_enable || yyextra->EXT_shader_image_load_store_enable, glsl_type::uimage2DMSArray_type);
+image1DShadow KEYWORD(130, 300, 0, 0, IMAGE1DSHADOW);
+image2DShadow KEYWORD(130, 300, 0, 0, IMAGE2DSHADOW);
+image1DArrayShadow KEYWORD(130, 300, 0, 0, IMAGE1DARRAYSHADOW);
+image2DArrayShadow KEYWORD(130, 300, 0, 0, IMAGE2DARRAYSHADOW);
+
+coherent KEYWORD_WITH_ALT(420, 300, 420, 310, yyextra->ARB_shader_image_load_store_enable || yyextra->EXT_shader_image_load_store_enable || yyextra->ARB_shader_storage_buffer_object_enable, COHERENT);
+volatile KEYWORD_WITH_ALT(110, 100, 420, 310, yyextra->ARB_shader_image_load_store_enable || yyextra->EXT_shader_image_load_store_enable || yyextra->ARB_shader_storage_buffer_object_enable, VOLATILE);
+restrict KEYWORD_WITH_ALT(420, 300, 420, 310, yyextra->ARB_shader_image_load_store_enable || yyextra->EXT_shader_image_load_store_enable || yyextra->ARB_shader_storage_buffer_object_enable, RESTRICT);
+readonly KEYWORD_WITH_ALT(420, 300, 420, 310, yyextra->ARB_shader_image_load_store_enable || yyextra->ARB_shader_storage_buffer_object_enable, READONLY);
+writeonly KEYWORD_WITH_ALT(420, 300, 420, 310, yyextra->ARB_shader_image_load_store_enable || yyextra->ARB_shader_storage_buffer_object_enable, WRITEONLY);
+
+atomic_uint TYPE_WITH_ALT(420, 300, 420, 310, yyextra->ARB_shader_atomic_counters_enable, glsl_type::atomic_uint_type);
+
+shared KEYWORD_WITH_ALT(430, 310, 430, 310, yyextra->ARB_compute_shader_enable, SHARED);
+
+struct return STRUCT;
+void return VOID_TOK;
+
+layout {
+ if ((yyextra->is_version(140, 300))
+ || yyextra->ARB_bindless_texture_enable
+ || yyextra->KHR_blend_equation_advanced_enable
+ || yyextra->AMD_conservative_depth_enable
+ || yyextra->ARB_conservative_depth_enable
+ || yyextra->ARB_explicit_attrib_location_enable
+ || yyextra->ARB_explicit_uniform_location_enable
+ || yyextra->ARB_post_depth_coverage_enable
+ || yyextra->has_separate_shader_objects()
+ || yyextra->ARB_uniform_buffer_object_enable
+ || yyextra->ARB_fragment_coord_conventions_enable
+ || yyextra->ARB_shading_language_420pack_enable
+ || yyextra->ARB_compute_shader_enable
+ || yyextra->ARB_tessellation_shader_enable
+ || yyextra->EXT_shader_framebuffer_fetch_non_coherent_enable) {
+ return LAYOUT_TOK;
+ } else {
+ return classify_identifier(yyextra, yytext, yyleng, yylval);
+ }
+ }
+
+\+\+ return INC_OP;
+-- return DEC_OP;
+\<= return LE_OP;
+>= return GE_OP;
+== return EQ_OP;
+!= return NE_OP;
+&& return AND_OP;
+\|\| return OR_OP;
+"^^" return XOR_OP;
+"<<" return LEFT_OP;
+">>" return RIGHT_OP;
+
+\*= return MUL_ASSIGN;
+\/= return DIV_ASSIGN;
+\+= return ADD_ASSIGN;
+\%= return MOD_ASSIGN;
+\<\<= return LEFT_ASSIGN;
+>>= return RIGHT_ASSIGN;
+&= return AND_ASSIGN;
+"^=" return XOR_ASSIGN;
+\|= return OR_ASSIGN;
+-= return SUB_ASSIGN;
+
+[1-9][0-9]*([uU]|[lL]|ul|UL)? {
+ return LITERAL_INTEGER(10);
+ }
+0[xX][0-9a-fA-F]+([uU]|[lL]|ul|UL)? {
+ return LITERAL_INTEGER(16);
+ }
+0[0-7]*([uU]|[lL]|ul|UL)? {
+ return LITERAL_INTEGER(8);
+ }
+
+[0-9]+\.[0-9]+([eE][+-]?[0-9]+)?[fF]? |
+\.[0-9]+([eE][+-]?[0-9]+)?[fF]? |
+[0-9]+\.([eE][+-]?[0-9]+)?[fF]? |
+[0-9]+[eE][+-]?[0-9]+[fF]? {
+ struct _mesa_glsl_parse_state *state = yyextra;
+ char suffix = yytext[strlen(yytext) - 1];
+ if (!state->is_version(120, 300) &&
+ (suffix == 'f' || suffix == 'F')) {
+ _mesa_glsl_warning(yylloc, state,
+ "Float suffixes are invalid in GLSL 1.10");
+ }
+ yylval->real = _mesa_strtof(yytext, NULL);
+ return FLOATCONSTANT;
+ }
+
+[0-9]+\.[0-9]+([eE][+-]?[0-9]+)?(lf|LF) |
+\.[0-9]+([eE][+-]?[0-9]+)?(lf|LF) |
+[0-9]+\.([eE][+-]?[0-9]+)?(lf|LF) |
+[0-9]+[eE][+-]?[0-9]+(lf|LF) {
+ if (!yyextra->is_version(400, 0) &&
+ !yyextra->ARB_gpu_shader_fp64_enable)
+ return ERROR_TOK;
+ yylval->dreal = _mesa_strtod(yytext, NULL);
+ return DOUBLECONSTANT;
+ }
+
+true {
+ yylval->n = 1;
+ return BOOLCONSTANT;
+ }
+false {
+ yylval->n = 0;
+ return BOOLCONSTANT;
+ }
+
+
+ /* Reserved words in GLSL 1.10. */
+asm KEYWORD(110, 100, 0, 0, ASM);
+class KEYWORD(110, 100, 0, 0, CLASS);
+union KEYWORD(110, 100, 0, 0, UNION);
+enum KEYWORD(110, 100, 0, 0, ENUM);
+typedef KEYWORD(110, 100, 0, 0, TYPEDEF);
+template KEYWORD(110, 100, 0, 0, TEMPLATE);
+this KEYWORD(110, 100, 0, 0, THIS);
+packed KEYWORD_WITH_ALT(110, 100, 140, 300, yyextra->ARB_uniform_buffer_object_enable, PACKED_TOK);
+goto KEYWORD(110, 100, 0, 0, GOTO);
+switch KEYWORD(110, 100, 130, 300, SWITCH);
+default KEYWORD(110, 100, 130, 300, DEFAULT);
+inline KEYWORD(110, 100, 0, 0, INLINE_TOK);
+noinline KEYWORD(110, 100, 0, 0, NOINLINE);
+public KEYWORD(110, 100, 0, 0, PUBLIC_TOK);
+static KEYWORD(110, 100, 0, 0, STATIC);
+extern KEYWORD(110, 100, 0, 0, EXTERN);
+external KEYWORD(110, 100, 0, 0, EXTERNAL);
+interface KEYWORD(110, 100, 0, 0, INTERFACE);
+long KEYWORD(110, 100, 0, 0, LONG_TOK);
+short KEYWORD(110, 100, 0, 0, SHORT_TOK);
+double TYPE_WITH_ALT(130, 100, 130, 300, yyextra->ARB_gpu_shader_fp64_enable, glsl_type::double_type);
+half KEYWORD(110, 100, 0, 0, HALF);
+fixed KEYWORD(110, 100, 0, 0, FIXED_TOK);
+unsigned KEYWORD_WITH_ALT(110, 100, 0, 0, yyextra->EXT_gpu_shader4_enable, UNSIGNED);
+input KEYWORD(110, 100, 0, 0, INPUT_TOK);
+output KEYWORD(110, 100, 0, 0, OUTPUT);
+hvec2 KEYWORD(110, 100, 0, 0, HVEC2);
+hvec3 KEYWORD(110, 100, 0, 0, HVEC3);
+hvec4 KEYWORD(110, 100, 0, 0, HVEC4);
+dvec2 TYPE_WITH_ALT(110, 100, 400, 0, yyextra->ARB_gpu_shader_fp64_enable, glsl_type::dvec2_type);
+dvec3 TYPE_WITH_ALT(110, 100, 400, 0, yyextra->ARB_gpu_shader_fp64_enable, glsl_type::dvec3_type);
+dvec4 TYPE_WITH_ALT(110, 100, 400, 0, yyextra->ARB_gpu_shader_fp64_enable, glsl_type::dvec4_type);
+dmat2 TYPE_WITH_ALT(110, 100, 400, 0, yyextra->ARB_gpu_shader_fp64_enable, glsl_type::dmat2_type);
+dmat3 TYPE_WITH_ALT(110, 100, 400, 0, yyextra->ARB_gpu_shader_fp64_enable, glsl_type::dmat3_type);
+dmat4 TYPE_WITH_ALT(110, 100, 400, 0, yyextra->ARB_gpu_shader_fp64_enable, glsl_type::dmat4_type);
+dmat2x2 TYPE_WITH_ALT(110, 100, 400, 0, yyextra->ARB_gpu_shader_fp64_enable, glsl_type::dmat2_type);
+dmat2x3 TYPE_WITH_ALT(110, 100, 400, 0, yyextra->ARB_gpu_shader_fp64_enable, glsl_type::dmat2x3_type);
+dmat2x4 TYPE_WITH_ALT(110, 100, 400, 0, yyextra->ARB_gpu_shader_fp64_enable, glsl_type::dmat2x4_type);
+dmat3x2 TYPE_WITH_ALT(110, 100, 400, 0, yyextra->ARB_gpu_shader_fp64_enable, glsl_type::dmat3x2_type);
+dmat3x3 TYPE_WITH_ALT(110, 100, 400, 0, yyextra->ARB_gpu_shader_fp64_enable, glsl_type::dmat3_type);
+dmat3x4 TYPE_WITH_ALT(110, 100, 400, 0, yyextra->ARB_gpu_shader_fp64_enable, glsl_type::dmat3x4_type);
+dmat4x2 TYPE_WITH_ALT(110, 100, 400, 0, yyextra->ARB_gpu_shader_fp64_enable, glsl_type::dmat4x2_type);
+dmat4x3 TYPE_WITH_ALT(110, 100, 400, 0, yyextra->ARB_gpu_shader_fp64_enable, glsl_type::dmat4x3_type);
+dmat4x4 TYPE_WITH_ALT(110, 100, 400, 0, yyextra->ARB_gpu_shader_fp64_enable, glsl_type::dmat4_type);
+fvec2 KEYWORD(110, 100, 0, 0, FVEC2);
+fvec3 KEYWORD(110, 100, 0, 0, FVEC3);
+fvec4 KEYWORD(110, 100, 0, 0, FVEC4);
+sampler2DRect TYPE_WITH_ALT(110, 100, 0, 0, yyextra->ARB_texture_rectangle_enable, glsl_type::sampler2DRect_type);
+sampler3DRect KEYWORD(110, 100, 0, 0, SAMPLER3DRECT);
+sampler2DRectShadow TYPE_WITH_ALT(110, 100, 0, 0, yyextra->ARB_texture_rectangle_enable, glsl_type::sampler2DRectShadow_type);
+sizeof KEYWORD(110, 100, 0, 0, SIZEOF);
+cast KEYWORD(110, 100, 0, 0, CAST);
+namespace KEYWORD(110, 100, 0, 0, NAMESPACE);
+using KEYWORD(110, 100, 0, 0, USING);
+
+ /* Additional reserved words in GLSL 1.20. */
+lowp KEYWORD(120, 100, 130, 100, LOWP);
+mediump KEYWORD(120, 100, 130, 100, MEDIUMP);
+highp KEYWORD(120, 100, 130, 100, HIGHP);
+precision KEYWORD(120, 100, 130, 100, PRECISION);
+
+ /* Additional reserved words in GLSL 1.30. */
+case KEYWORD(130, 300, 130, 300, CASE);
+common KEYWORD(130, 300, 0, 0, COMMON);
+partition KEYWORD(130, 300, 0, 0, PARTITION);
+active KEYWORD(130, 300, 0, 0, ACTIVE);
+superp KEYWORD(130, 100, 0, 0, SUPERP);
+samplerBuffer TYPE_WITH_ALT(130, 300, 140, 320, yyextra->EXT_texture_buffer_enable || yyextra->OES_texture_buffer_enable || (yyextra->EXT_gpu_shader4_enable && yyextra->ctx->Extensions.EXT_texture_buffer_object), glsl_type::samplerBuffer_type);
+filter KEYWORD(130, 300, 0, 0, FILTER);
+row_major KEYWORD_WITH_ALT(130, 0, 140, 0, yyextra->ARB_uniform_buffer_object_enable && !yyextra->es_shader, ROW_MAJOR);
+
+ /* Additional reserved words in GLSL 1.40 */
+isampler2DRect TYPE_WITH_ALT(140, 300, 140, 0, yyextra->EXT_gpu_shader4_enable && yyextra->ctx->Extensions.NV_texture_rectangle && yyextra->ctx->Extensions.EXT_texture_integer, glsl_type::isampler2DRect_type);
+usampler2DRect TYPE_WITH_ALT(140, 300, 140, 0, yyextra->EXT_gpu_shader4_enable && yyextra->ctx->Extensions.NV_texture_rectangle && yyextra->ctx->Extensions.EXT_texture_integer, glsl_type::usampler2DRect_type);
+isamplerBuffer TYPE_WITH_ALT(140, 300, 140, 320, yyextra->EXT_texture_buffer_enable || yyextra->OES_texture_buffer_enable || (yyextra->EXT_gpu_shader4_enable && yyextra->ctx->Extensions.EXT_texture_buffer_object && yyextra->ctx->Extensions.EXT_texture_integer), glsl_type::isamplerBuffer_type);
+usamplerBuffer TYPE_WITH_ALT(140, 300, 140, 320, yyextra->EXT_texture_buffer_enable || yyextra->OES_texture_buffer_enable || (yyextra->EXT_gpu_shader4_enable && yyextra->ctx->Extensions.EXT_texture_buffer_object && yyextra->ctx->Extensions.EXT_texture_integer), glsl_type::usamplerBuffer_type);
+
+ /* Additional reserved words in GLSL ES 3.00 */
+resource KEYWORD(420, 300, 0, 0, RESOURCE);
+sample KEYWORD_WITH_ALT(400, 300, 400, 320, yyextra->ARB_gpu_shader5_enable || yyextra->OES_shader_multisample_interpolation_enable, SAMPLE);
+subroutine KEYWORD_WITH_ALT(400, 300, 400, 0, yyextra->ARB_shader_subroutine_enable, SUBROUTINE);
+
+ /* Additional words for ARB_gpu_shader_int64 */
+int64_t TYPE_WITH_ALT(0, 0, 0, 0, yyextra->ARB_gpu_shader_int64_enable || yyextra->AMD_gpu_shader_int64_enable, glsl_type::int64_t_type);
+i64vec2 TYPE_WITH_ALT(0, 0, 0, 0, yyextra->ARB_gpu_shader_int64_enable || yyextra->AMD_gpu_shader_int64_enable, glsl_type::i64vec2_type);
+i64vec3 TYPE_WITH_ALT(0, 0, 0, 0, yyextra->ARB_gpu_shader_int64_enable || yyextra->AMD_gpu_shader_int64_enable, glsl_type::i64vec3_type);
+i64vec4 TYPE_WITH_ALT(0, 0, 0, 0, yyextra->ARB_gpu_shader_int64_enable || yyextra->AMD_gpu_shader_int64_enable, glsl_type::i64vec4_type);
+
+uint64_t TYPE_WITH_ALT(0, 0, 0, 0, yyextra->ARB_gpu_shader_int64_enable || yyextra->AMD_gpu_shader_int64_enable, glsl_type::uint64_t_type);
+u64vec2 TYPE_WITH_ALT(0, 0, 0, 0, yyextra->ARB_gpu_shader_int64_enable || yyextra->AMD_gpu_shader_int64_enable, glsl_type::u64vec2_type);
+u64vec3 TYPE_WITH_ALT(0, 0, 0, 0, yyextra->ARB_gpu_shader_int64_enable || yyextra->AMD_gpu_shader_int64_enable, glsl_type::u64vec3_type);
+u64vec4 TYPE_WITH_ALT(0, 0, 0, 0, yyextra->ARB_gpu_shader_int64_enable || yyextra->AMD_gpu_shader_int64_enable, glsl_type::u64vec4_type);
+
+[_a-zA-Z][_a-zA-Z0-9]* {
+ struct _mesa_glsl_parse_state *state = yyextra;
+ if (state->es_shader && yyleng > 1024) {
+ _mesa_glsl_error(yylloc, state,
+ "Identifier `%s' exceeds 1024 characters",
+ yytext);
+ }
+ return classify_identifier(state, yytext, yyleng, yylval);
+ }
+
+\. { struct _mesa_glsl_parse_state *state = yyextra;
+ state->is_field = true;
+ return DOT_TOK; }
+
+. { return yytext[0]; }
+
+%%
+
+int
+classify_identifier(struct _mesa_glsl_parse_state *state, const char *name,
+ unsigned name_len, YYSTYPE *output)
+{
+ /* We're not doing linear_strdup here, to avoid an implicit call on
+ * strlen() for the length of the string, as this is already found by flex
+ * and stored in yyleng
+ */
+ char *id = (char *) linear_alloc_child(state->linalloc, name_len + 1);
+ memcpy(id, name, name_len + 1);
+ output->identifier = id;
+
+ if (state->is_field) {
+ state->is_field = false;
+ return FIELD_SELECTION;
+ }
+ if (state->symbols->get_variable(name) || state->symbols->get_function(name))
+ return IDENTIFIER;
+ else if (state->symbols->get_type(name))
+ return TYPE_IDENTIFIER;
+ else
+ return NEW_IDENTIFIER;
+}
+
+void
+_mesa_glsl_lexer_ctor(struct _mesa_glsl_parse_state *state, const char *string)
+{
+ yylex_init_extra(state, & state->scanner);
+ yy_scan_string(string, state->scanner);
+}
+
+void
+_mesa_glsl_lexer_dtor(struct _mesa_glsl_parse_state *state)
+{
+ yylex_destroy(state->scanner);
+}
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/glsl_optimizer.cpp b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/glsl_optimizer.cpp
new file mode 100644
index 0000000000..1956fd009e
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/glsl_optimizer.cpp
@@ -0,0 +1,813 @@
+#include "glsl_optimizer.h"
+#include "ast.h"
+#include "glsl_parser_extras.h"
+#include "glsl_parser.h"
+#include "ir_optimization.h"
+// FIXME: metal
+// #include "ir_print_metal_visitor.h"
+#include "ir_print_glsl_visitor.h"
+#include "ir_print_visitor.h"
+// FIXME: stats
+// #include "ir_stats.h"
+#include "loop_analysis.h"
+#include "program.h"
+#include "linker.h"
+#include "main/mtypes.h"
+#include "standalone_scaffolding.h"
+#include "builtin_functions.h"
+#include "program/program.h"
+
+static void
+init_gl_program(struct gl_program *prog, bool is_arb_asm, gl_shader_stage stage)
+{
+ prog->RefCount = 1;
+ prog->Format = GL_PROGRAM_FORMAT_ASCII_ARB;
+ prog->is_arb_asm = is_arb_asm;
+ prog->info.stage = stage;
+}
+
+static struct gl_program *
+new_program(UNUSED struct gl_context *ctx, gl_shader_stage stage,
+ UNUSED GLuint id, bool is_arb_asm)
+{
+ struct gl_program *prog = rzalloc(NULL, struct gl_program);
+ init_gl_program(prog, is_arb_asm, stage);
+ return prog;
+}
+
+static void
+initialize_mesa_context(struct gl_context *ctx, glslopt_target api)
+{
+ gl_api mesaAPI;
+ switch(api)
+ {
+ default:
+ case kGlslTargetOpenGL:
+ mesaAPI = API_OPENGL_COMPAT;
+ break;
+ case kGlslTargetOpenGLES20:
+ mesaAPI = API_OPENGLES2;
+ break;
+ case kGlslTargetOpenGLES30:
+ mesaAPI = API_OPENGL_CORE;
+ break;
+ case kGlslTargetMetal:
+ mesaAPI = API_OPENGL_CORE;
+ break;
+ }
+ initialize_context_to_defaults (ctx, mesaAPI);
+ _mesa_glsl_builtin_functions_init_or_ref();
+
+ switch(api)
+ {
+ default:
+ case kGlslTargetOpenGL:
+ ctx->Const.GLSLVersion = 150;
+ break;
+ case kGlslTargetOpenGLES20:
+ ctx->Extensions.OES_standard_derivatives = true;
+ // FIXME: extensions
+ // ctx->Extensions.EXT_shadow_samplers = true;
+ // ctx->Extensions.EXT_frag_depth = true;
+ ctx->Extensions.EXT_shader_framebuffer_fetch = true;
+ break;
+ case kGlslTargetOpenGLES30:
+ ctx->Extensions.ARB_ES3_1_compatibility = true;
+ ctx->Extensions.EXT_shader_framebuffer_fetch = true;
+ break;
+ case kGlslTargetMetal:
+ ctx->Extensions.ARB_ES3_compatibility = true;
+ ctx->Extensions.EXT_shader_framebuffer_fetch = true;
+ break;
+ }
+
+
+ // allow high amount of texcoords
+ ctx->Const.MaxTextureCoordUnits = 16;
+
+ ctx->Const.Program[MESA_SHADER_VERTEX].MaxTextureImageUnits = 16;
+ ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxTextureImageUnits = 16;
+ ctx->Const.Program[MESA_SHADER_GEOMETRY].MaxTextureImageUnits = 16;
+
+ // For GLES2.0 this would be 1, but we do support GL_EXT_draw_buffers
+ ctx->Const.MaxDrawBuffers = 4;
+
+ ctx->Driver.NewProgram = new_program;
+}
+
+
+struct glslopt_ctx {
+ glslopt_ctx (glslopt_target target) {
+ this->target = target;
+ mem_ctx = ralloc_context (NULL);
+ initialize_mesa_context (&mesa_ctx, target);
+ }
+ ~glslopt_ctx() {
+ ralloc_free (mem_ctx);
+ }
+ struct gl_context mesa_ctx;
+ void* mem_ctx;
+ glslopt_target target;
+};
+
+glslopt_ctx* glslopt_initialize (glslopt_target target)
+{
+ return new glslopt_ctx(target);
+}
+
+void glslopt_cleanup (glslopt_ctx* ctx)
+{
+ delete ctx;
+}
+
+void glslopt_set_max_unroll_iterations (glslopt_ctx* ctx, unsigned iterations)
+{
+ for (int i = 0; i < MESA_SHADER_STAGES; ++i)
+ ctx->mesa_ctx.Const.ShaderCompilerOptions[i].MaxUnrollIterations = iterations;
+}
+
+struct glslopt_shader_var
+{
+ const char* name;
+ glslopt_basic_type type;
+ glslopt_precision prec;
+ int vectorSize;
+ int matrixSize;
+ int arraySize;
+ int location;
+};
+
+struct glslopt_shader
+{
+ static void* operator new(size_t size, void *ctx)
+ {
+ void *node;
+ node = ralloc_size(ctx, size);
+ assert(node != NULL);
+ return node;
+ }
+ static void operator delete(void *node)
+ {
+ ralloc_free(node);
+ }
+
+ glslopt_shader ()
+ : rawOutput(0)
+ , optimizedOutput(0)
+ , status(false)
+ , uniformCount(0)
+ , uniformsSize(0)
+ , inputCount(0)
+ , textureCount(0)
+ , statsMath(0)
+ , statsTex(0)
+ , statsFlow(0)
+ {
+ infoLog = "Shader not compiled yet";
+
+ whole_program = rzalloc (NULL, struct gl_shader_program);
+ assert(whole_program != NULL);
+ whole_program->data = rzalloc(whole_program, struct gl_shader_program_data);
+ assert(whole_program->data != NULL);
+ whole_program->data->InfoLog = ralloc_strdup(whole_program->data, "");
+
+ whole_program->Shaders = reralloc(whole_program, whole_program->Shaders, struct gl_shader *, whole_program->NumShaders + 1);
+ assert(whole_program->Shaders != NULL);
+
+ shader = rzalloc(whole_program, gl_shader);
+ whole_program->Shaders[whole_program->NumShaders] = shader;
+ whole_program->NumShaders++;
+
+ whole_program->data->LinkStatus = LINKING_SUCCESS;
+ }
+
+ ~glslopt_shader()
+ {
+ for (unsigned i = 0; i < MESA_SHADER_STAGES; i++)
+ ralloc_free(whole_program->_LinkedShaders[i]);
+ ralloc_free(whole_program);
+ ralloc_free(rawOutput);
+ ralloc_free(optimizedOutput);
+ }
+
+ struct gl_shader_program* whole_program;
+ struct gl_shader* shader;
+
+ static const int kMaxShaderUniforms = 1024;
+ static const int kMaxShaderInputs = 128;
+ static const int kMaxShaderTextures = 128;
+ glslopt_shader_var uniforms[kMaxShaderUniforms];
+ glslopt_shader_var inputs[kMaxShaderInputs];
+ glslopt_shader_var textures[kMaxShaderInputs];
+ int uniformCount, uniformsSize;
+ int inputCount;
+ int textureCount;
+ int statsMath, statsTex, statsFlow;
+
+ char* rawOutput;
+ char* optimizedOutput;
+ const char* infoLog;
+ bool status;
+};
+
+static inline void debug_print_ir (const char* name, exec_list* ir, _mesa_glsl_parse_state* state, void* memctx)
+{
+ #if 0
+ printf("**** %s:\n", name);
+// _mesa_print_ir (ir, state);
+ char* foobar = _mesa_print_ir_glsl(ir, state, ralloc_strdup(memctx, ""), kPrintGlslFragment);
+ printf("%s\n", foobar);
+ validate_ir_tree(ir);
+ #endif
+}
+
+
+// FIXME: precision
+// struct precision_ctx
+// {
+// exec_list* root_ir;
+// bool res;
+// };
+
+
+// static void propagate_precision_deref(ir_instruction *ir, void *data)
+// {
+// // variable deref with undefined precision: take from variable itself
+// ir_dereference_variable* der = ir->as_dereference_variable();
+// if (der && der->get_precision() == glsl_precision_undefined && der->var->data.precision != glsl_precision_undefined)
+// {
+// der->set_precision ((glsl_precision)der->var->data.precision);
+// ((precision_ctx*)data)->res = true;
+// }
+
+// // array deref with undefined precision: take from array itself
+// ir_dereference_array* der_arr = ir->as_dereference_array();
+// if (der_arr && der_arr->get_precision() == glsl_precision_undefined && der_arr->array->get_precision() != glsl_precision_undefined)
+// {
+// der_arr->set_precision (der_arr->array->get_precision());
+// ((precision_ctx*)data)->res = true;
+// }
+
+// // swizzle with undefined precision: take from swizzle argument
+// ir_swizzle* swz = ir->as_swizzle();
+// if (swz && swz->get_precision() == glsl_precision_undefined && swz->val->get_precision() != glsl_precision_undefined)
+// {
+// swz->set_precision (swz->val->get_precision());
+// ((precision_ctx*)data)->res = true;
+// }
+
+// }
+
+// static void propagate_precision_expr(ir_instruction *ir, void *data)
+// {
+// ir_expression* expr = ir->as_expression();
+// if (!expr)
+// return;
+// if (expr->get_precision() != glsl_precision_undefined)
+// return;
+
+// glsl_precision prec_params_max = glsl_precision_undefined;
+// for (int i = 0; i < (int)expr->get_num_operands(); ++i)
+// {
+// ir_rvalue* op = expr->operands[i];
+// if (op && op->get_precision() != glsl_precision_undefined)
+// prec_params_max = higher_precision (prec_params_max, op->get_precision());
+// }
+// if (expr->get_precision() != prec_params_max)
+// {
+// expr->set_precision (prec_params_max);
+// ((precision_ctx*)data)->res = true;
+// }
+
+// }
+
+// static void propagate_precision_texture(ir_instruction *ir, void *data)
+// {
+// ir_texture* tex = ir->as_texture();
+// if (!tex)
+// return;
+
+// glsl_precision sampler_prec = tex->sampler->get_precision();
+// if (tex->get_precision() == sampler_prec || sampler_prec == glsl_precision_undefined)
+// return;
+
+// // set precision of ir_texture node to that of the sampler itself
+// tex->set_precision(sampler_prec);
+// ((precision_ctx*)data)->res = true;
+// }
+
+// struct undefined_ass_ctx
+// {
+// ir_variable* var;
+// bool res;
+// };
+
+// static void has_only_undefined_precision_assignments(ir_instruction *ir, void *data)
+// {
+// ir_assignment* ass = ir->as_assignment();
+// if (!ass)
+// return;
+// undefined_ass_ctx* ctx = (undefined_ass_ctx*)data;
+// if (ass->whole_variable_written() != ctx->var)
+// return;
+// glsl_precision prec = ass->rhs->get_precision();
+// if (prec == glsl_precision_undefined)
+// return;
+// ctx->res = false;
+// }
+
+
+// static void propagate_precision_assign(ir_instruction *ir, void *data)
+// {
+// ir_assignment* ass = ir->as_assignment();
+// if (!ass || !ass->lhs || !ass->rhs)
+// return;
+
+// glsl_precision lp = ass->lhs->get_precision();
+// glsl_precision rp = ass->rhs->get_precision();
+
+// // for assignments with LHS having undefined precision, take it from RHS
+// if (rp != glsl_precision_undefined)
+// {
+// ir_variable* lhs_var = ass->lhs->variable_referenced();
+// if (lp == glsl_precision_undefined)
+// {
+// if (lhs_var)
+// lhs_var->data.precision = rp;
+// ass->lhs->set_precision (rp);
+// ((precision_ctx*)data)->res = true;
+// }
+// return;
+// }
+
+// // for assignments where LHS has precision, but RHS is a temporary variable
+// // with undefined precision that's only assigned from other undefined precision
+// // sources -> make the RHS variable take LHS precision
+// if (lp != glsl_precision_undefined && rp == glsl_precision_undefined)
+// {
+// ir_dereference* deref = ass->rhs->as_dereference();
+// if (deref)
+// {
+// ir_variable* rhs_var = deref->variable_referenced();
+// if (rhs_var && rhs_var->data.mode == ir_var_temporary && rhs_var->data.precision == glsl_precision_undefined)
+// {
+// undefined_ass_ctx ctx;
+// ctx.var = rhs_var;
+// // find if we only assign to it from undefined precision sources
+// ctx.res = true;
+// exec_list* root_ir = ((precision_ctx*)data)->root_ir;
+// foreach_in_list(ir_instruction, inst, root_ir)
+// {
+// visit_tree (ir, has_only_undefined_precision_assignments, &ctx);
+// }
+// if (ctx.res)
+// {
+// rhs_var->data.precision = lp;
+// ass->rhs->set_precision(lp);
+// ((precision_ctx*)data)->res = true;
+// }
+// }
+// }
+// return;
+// }
+// }
+
+
+// static void propagate_precision_call(ir_instruction *ir, void *data)
+// {
+// ir_call* call = ir->as_call();
+// if (!call)
+// return;
+// if (!call->return_deref)
+// return;
+// if (call->return_deref->get_precision() == glsl_precision_undefined /*&& call->callee->precision == glsl_precision_undefined*/)
+// {
+// glsl_precision prec_params_max = glsl_precision_undefined;
+// foreach_two_lists(formal_node, &call->callee->parameters,
+// actual_node, &call->actual_parameters) {
+// ir_variable* sig_param = (ir_variable*)formal_node;
+// ir_rvalue* param = (ir_rvalue*)actual_node;
+
+// glsl_precision p = (glsl_precision)sig_param->data.precision;
+// if (p == glsl_precision_undefined)
+// p = param->get_precision();
+
+// prec_params_max = higher_precision (prec_params_max, p);
+// }
+// if (call->return_deref->get_precision() != prec_params_max)
+// {
+// call->return_deref->set_precision (prec_params_max);
+// ((precision_ctx*)data)->res = true;
+// }
+// }
+// }
+
+// static bool propagate_precision(exec_list* list, bool assign_high_to_undefined)
+// {
+// bool anyProgress = false;
+// precision_ctx ctx;
+
+// do {
+// ctx.res = false;
+// ctx.root_ir = list;
+// foreach_in_list(ir_instruction, ir, list)
+// {
+// visit_tree (ir, propagate_precision_texture, &ctx);
+// visit_tree (ir, propagate_precision_deref, &ctx);
+// bool hadProgress = ctx.res;
+// ctx.res = false;
+// visit_tree (ir, propagate_precision_assign, &ctx);
+// if (ctx.res)
+// {
+// // assignment precision propagation might have added precision
+// // to some variables; need to propagate dereference precision right
+// // after that too.
+// visit_tree (ir, propagate_precision_deref, &ctx);
+// }
+// ctx.res |= hadProgress;
+// visit_tree (ir, propagate_precision_call, &ctx);
+// visit_tree (ir, propagate_precision_expr, &ctx);
+// }
+// anyProgress |= ctx.res;
+// } while (ctx.res);
+// anyProgress |= ctx.res;
+
+// // for globals that have undefined precision, set it to highp
+// if (assign_high_to_undefined)
+// {
+// foreach_in_list(ir_instruction, ir, list)
+// {
+// ir_variable* var = ir->as_variable();
+// if (var)
+// {
+// if (var->data.precision == glsl_precision_undefined)
+// {
+// var->data.precision = glsl_precision_high;
+// anyProgress = true;
+// }
+// }
+// }
+// }
+
+// return anyProgress;
+// }
+
+
+static void do_optimization_passes(exec_list* ir, bool linked, _mesa_glsl_parse_state* state, void* mem_ctx)
+{
+ bool progress;
+ // FIXME: Shouldn't need to bound the number of passes
+ int passes = 0,
+ kMaximumPasses = 1000;
+ do {
+ progress = false;
+ ++passes;
+ bool progress2;
+ debug_print_ir ("Initial", ir, state, mem_ctx);
+ if (linked) {
+ progress2 = do_function_inlining(ir); progress |= progress2; if (progress2) debug_print_ir ("After inlining", ir, state, mem_ctx);
+ progress2 = do_dead_functions(ir); progress |= progress2; if (progress2) debug_print_ir ("After dead functions", ir, state, mem_ctx);
+ progress2 = do_structure_splitting(ir); progress |= progress2; if (progress2) debug_print_ir ("After struct splitting", ir, state, mem_ctx);
+ }
+ progress2 = do_if_simplification(ir); progress |= progress2; if (progress2) debug_print_ir ("After if simpl", ir, state, mem_ctx);
+ progress2 = opt_flatten_nested_if_blocks(ir); progress |= progress2; if (progress2) debug_print_ir ("After if flatten", ir, state, mem_ctx);
+ // progress2 = propagate_precision (ir, state->metal_target); progress |= progress2; if (progress2) debug_print_ir ("After prec propagation", ir, state, mem_ctx);
+ progress2 = do_copy_propagation_elements(ir); progress |= progress2; if (progress2) debug_print_ir ("After copy propagation elems", ir, state, mem_ctx);
+
+ if (linked)
+ {
+ progress2 = do_vectorize(ir); progress |= progress2; if (progress2) debug_print_ir ("After vectorize", ir, state, mem_ctx);
+ }
+ if (linked) {
+ progress2 = do_dead_code(ir,false); progress |= progress2; if (progress2) debug_print_ir ("After dead code", ir, state, mem_ctx);
+ } else {
+ progress2 = do_dead_code_unlinked(ir); progress |= progress2; if (progress2) debug_print_ir ("After dead code unlinked", ir, state, mem_ctx);
+ }
+ progress2 = do_dead_code_local(ir); progress |= progress2; if (progress2) debug_print_ir ("After dead code local", ir, state, mem_ctx);
+ // progress2 = propagate_precision (ir, state->metal_target); progress |= progress2; if (progress2) debug_print_ir ("After prec propagation", ir, state, mem_ctx);
+ progress2 = do_tree_grafting(ir); progress |= progress2; if (progress2) debug_print_ir ("After tree grafting", ir, state, mem_ctx);
+ progress2 = do_constant_propagation(ir); progress |= progress2; if (progress2) debug_print_ir ("After const propagation", ir, state, mem_ctx);
+ if (linked) {
+ progress2 = do_constant_variable(ir); progress |= progress2; if (progress2) debug_print_ir ("After const variable", ir, state, mem_ctx);
+ } else {
+ progress2 = do_constant_variable_unlinked(ir); progress |= progress2; if (progress2) debug_print_ir ("After const variable unlinked", ir, state, mem_ctx);
+ }
+ progress2 = do_constant_folding(ir); progress |= progress2; if (progress2) debug_print_ir ("After const folding", ir, state, mem_ctx);
+ progress2 = do_minmax_prune(ir); progress |= progress2; if (progress2) debug_print_ir ("After minmax prune", ir, state, mem_ctx);
+ progress2 = do_rebalance_tree(ir); progress |= progress2; if (progress2) debug_print_ir ("After rebalance tree", ir, state, mem_ctx);
+ progress2 = do_algebraic(ir, state->ctx->Const.NativeIntegers, &state->ctx->Const.ShaderCompilerOptions[state->stage]); progress |= progress2; if (progress2) debug_print_ir ("After algebraic", ir, state, mem_ctx);
+ progress2 = do_lower_jumps(ir); progress |= progress2; if (progress2) debug_print_ir ("After lower jumps", ir, state, mem_ctx);
+ progress2 = do_vec_index_to_swizzle(ir); progress |= progress2; if (progress2) debug_print_ir ("After vec index to swizzle", ir, state, mem_ctx);
+ progress2 = lower_vector_insert(ir, false); progress |= progress2; if (progress2) debug_print_ir ("After lower vector insert", ir, state, mem_ctx);
+ progress2 = optimize_swizzles(ir); progress |= progress2; if (progress2) debug_print_ir ("After optimize swizzles", ir, state, mem_ctx);
+ progress2 = optimize_split_arrays(ir, linked); progress |= progress2; if (progress2) debug_print_ir ("After split arrays", ir, state, mem_ctx);
+ progress2 = optimize_redundant_jumps(ir); progress |= progress2; if (progress2) debug_print_ir ("After redundant jumps", ir, state, mem_ctx);
+
+ // do loop stuff only when linked; otherwise causes duplicate loop induction variable
+ // problems (ast-in.txt test)
+ if (linked)
+ {
+ loop_state *ls = analyze_loop_variables(ir);
+ if (ls->loop_found) {
+ progress2 = unroll_loops(ir, ls, &state->ctx->Const.ShaderCompilerOptions[state->stage]); progress |= progress2; if (progress2) debug_print_ir ("After unroll", ir, state, mem_ctx);
+ }
+ delete ls;
+ }
+ } while (progress && passes < kMaximumPasses);
+
+ // GLSL/ES does not have saturate, so lower it
+ lower_instructions(ir, SAT_TO_CLAMP);
+}
+
+// FIXME
+// static void glsl_type_to_optimizer_desc(const glsl_type* type, glsl_precision prec, glslopt_shader_var* out)
+// {
+// out->arraySize = type->array_size();
+
+// // type; use element type when in array
+// if (type->is_array())
+// type = type->element_type();
+
+// if (type->is_float())
+// out->type = kGlslTypeFloat;
+// else if (type->is_integer())
+// out->type = kGlslTypeInt;
+// else if (type->is_boolean())
+// out->type = kGlslTypeBool;
+// else if (type->is_sampler())
+// {
+// if (type->sampler_dimensionality == GLSL_SAMPLER_DIM_2D)
+// {
+// if (type->sampler_shadow)
+// out->type = kGlslTypeTex2DShadow;
+// else if (type->sampler_array)
+// out->type = kGlslTypeTex2DArray;
+// else
+// out->type = kGlslTypeTex2D;
+// }
+// else if (type->sampler_dimensionality == GLSL_SAMPLER_DIM_3D)
+// out->type = kGlslTypeTex3D;
+// else if (type->sampler_dimensionality == GLSL_SAMPLER_DIM_CUBE)
+// out->type = kGlslTypeTexCube;
+// else
+// out->type = kGlslTypeOther;
+// }
+// else
+// out->type = kGlslTypeOther;
+
+// // sizes
+// out->vectorSize = type->vector_elements;
+// out->matrixSize = type->matrix_columns;
+
+// // precision
+// switch (prec)
+// {
+// case glsl_precision_high: out->prec = kGlslPrecHigh; break;
+// case glsl_precision_medium: out->prec = kGlslPrecMedium; break;
+// case glsl_precision_low: out->prec = kGlslPrecLow; break;
+// default: out->prec = kGlslPrecHigh; break;
+// }
+// }
+
+static void find_shader_variables(glslopt_shader* sh, exec_list* ir)
+{
+ foreach_in_list(ir_instruction, node, ir)
+ {
+ ir_variable* const var = node->as_variable();
+ if (var == NULL)
+ continue;
+ if (var->data.mode == ir_var_shader_in)
+ {
+ if (sh->inputCount >= glslopt_shader::kMaxShaderInputs)
+ continue;
+
+ glslopt_shader_var& v = sh->inputs[sh->inputCount];
+ v.name = ralloc_strdup(sh, var->name);
+ // glsl_type_to_optimizer_desc(var->type, (glsl_precision)var->data.precision, &v);
+ v.location = var->data.explicit_location ? var->data.location : -1;
+ ++sh->inputCount;
+ }
+ if (var->data.mode == ir_var_uniform && !var->type->is_sampler())
+ {
+ if (sh->uniformCount >= glslopt_shader::kMaxShaderUniforms)
+ continue;
+
+ glslopt_shader_var& v = sh->uniforms[sh->uniformCount];
+ v.name = ralloc_strdup(sh, var->name);
+ // glsl_type_to_optimizer_desc(var->type, (glsl_precision)var->data.precision, &v);
+ v.location = var->data.explicit_location ? var->data.location : -1;
+ ++sh->uniformCount;
+ }
+ if (var->data.mode == ir_var_uniform && var->type->is_sampler())
+ {
+ if (sh->textureCount >= glslopt_shader::kMaxShaderTextures)
+ continue;
+
+ glslopt_shader_var& v = sh->textures[sh->textureCount];
+ v.name = ralloc_strdup(sh, var->name);
+ // glsl_type_to_optimizer_desc(var->type, (glsl_precision)var->data.precision, &v);
+ v.location = var->data.explicit_location ? var->data.location : -1;
+ ++sh->textureCount;
+ }
+ }
+}
+
+glslopt_shader* glslopt_optimize (glslopt_ctx* ctx, glslopt_shader_type type, const char* shaderSource, unsigned options)
+{
+ glslopt_shader* shader = new (ctx->mem_ctx) glslopt_shader ();
+
+ PrintGlslMode printMode = kPrintGlslVertex;
+ switch (type) {
+ case kGlslOptShaderVertex:
+ shader->shader->Type = GL_VERTEX_SHADER;
+ shader->shader->Stage = MESA_SHADER_VERTEX;
+ printMode = kPrintGlslVertex;
+ break;
+ case kGlslOptShaderFragment:
+ shader->shader->Type = GL_FRAGMENT_SHADER;
+ shader->shader->Stage = MESA_SHADER_FRAGMENT;
+ printMode = kPrintGlslFragment;
+ break;
+ }
+ if (!shader->shader->Type)
+ {
+ shader->infoLog = ralloc_asprintf (shader, "Unknown shader type %d", (int)type);
+ shader->status = false;
+ return shader;
+ }
+
+ _mesa_glsl_parse_state* state = new (shader) _mesa_glsl_parse_state (&ctx->mesa_ctx, shader->shader->Stage, shader);
+ state->error = 0;
+
+ if (!(options & kGlslOptionSkipPreprocessor))
+ {
+ state->error = !!glcpp_preprocess (state, &shaderSource, &state->info_log, add_builtin_defines, state, &ctx->mesa_ctx);
+ if (state->error)
+ {
+ shader->status = !state->error;
+ shader->infoLog = state->info_log;
+ return shader;
+ }
+ }
+
+ _mesa_glsl_lexer_ctor (state, shaderSource);
+ _mesa_glsl_parse (state);
+ _mesa_glsl_lexer_dtor (state);
+
+ exec_list* ir = new (shader) exec_list();
+ shader->shader->ir = ir;
+
+ if (!state->error && !state->translation_unit.is_empty())
+ _mesa_ast_to_hir (ir, state);
+
+ // Un-optimized output
+ if (!state->error) {
+ validate_ir_tree(ir);
+ shader->rawOutput = _mesa_print_ir_glsl(ir, state, ralloc_strdup(shader, ""), printMode);
+ }
+
+ // Lower builtin functions prior to linking.
+ lower_builtins(ir);
+
+ // Link built-in functions
+ shader->shader->symbols = state->symbols;
+
+ struct gl_linked_shader* linked_shader = NULL;
+
+ if (!state->error && !ir->is_empty() && !(options & kGlslOptionNotFullShader))
+ {
+ linked_shader = link_intrastage_shaders(shader,
+ &ctx->mesa_ctx,
+ shader->whole_program,
+ shader->whole_program->Shaders,
+ shader->whole_program->NumShaders,
+ true);
+ if (!linked_shader)
+ {
+ shader->status = false;
+ shader->infoLog = shader->whole_program->data->InfoLog;
+ return shader;
+ }
+ ir = linked_shader->ir;
+
+ debug_print_ir ("==== After link ====", ir, state, shader);
+ }
+
+ // Do optimization post-link
+ if (!state->error && !ir->is_empty())
+ {
+ const bool linked = !(options & kGlslOptionNotFullShader);
+ do_optimization_passes(ir, linked, state, shader);
+ validate_ir_tree(ir);
+ }
+
+ // Final optimized output
+ if (!state->error)
+ {
+ shader->optimizedOutput = _mesa_print_ir_glsl(ir, state, ralloc_strdup(shader, ""), printMode);
+ }
+
+ shader->status = !state->error;
+ shader->infoLog = state->info_log;
+
+ find_shader_variables (shader, ir);
+ // FIXME: stats
+ // if (!state->error)
+ // calculate_shader_stats (ir, &shader->statsMath, &shader->statsTex, &shader->statsFlow);
+
+ ralloc_free (ir);
+ ralloc_free (state);
+
+ if (linked_shader)
+ ralloc_free(linked_shader);
+
+ return shader;
+}
+
+void glslopt_shader_delete (glslopt_shader* shader)
+{
+ delete shader;
+}
+
+bool glslopt_get_status (glslopt_shader* shader)
+{
+ return shader->status;
+}
+
+const char* glslopt_get_output (glslopt_shader* shader)
+{
+ return shader->optimizedOutput;
+}
+
+const char* glslopt_get_raw_output (glslopt_shader* shader)
+{
+ return shader->rawOutput;
+}
+
+const char* glslopt_get_log (glslopt_shader* shader)
+{
+ return shader->infoLog;
+}
+
+int glslopt_shader_get_input_count (glslopt_shader* shader)
+{
+ return shader->inputCount;
+}
+
+int glslopt_shader_get_uniform_count (glslopt_shader* shader)
+{
+ return shader->uniformCount;
+}
+
+int glslopt_shader_get_uniform_total_size (glslopt_shader* shader)
+{
+ return shader->uniformsSize;
+}
+
+int glslopt_shader_get_texture_count (glslopt_shader* shader)
+{
+ return shader->textureCount;
+}
+
+void glslopt_shader_get_input_desc (glslopt_shader* shader, int index, const char** outName, glslopt_basic_type* outType, glslopt_precision* outPrec, int* outVecSize, int* outMatSize, int* outArraySize, int* outLocation)
+{
+ const glslopt_shader_var& v = shader->inputs[index];
+ *outName = v.name;
+ *outType = v.type;
+ *outPrec = v.prec;
+ *outVecSize = v.vectorSize;
+ *outMatSize = v.matrixSize;
+ *outArraySize = v.arraySize;
+ *outLocation = v.location;
+}
+
+void glslopt_shader_get_uniform_desc (glslopt_shader* shader, int index, const char** outName, glslopt_basic_type* outType, glslopt_precision* outPrec, int* outVecSize, int* outMatSize, int* outArraySize, int* outLocation)
+{
+ const glslopt_shader_var& v = shader->uniforms[index];
+ *outName = v.name;
+ *outType = v.type;
+ *outPrec = v.prec;
+ *outVecSize = v.vectorSize;
+ *outMatSize = v.matrixSize;
+ *outArraySize = v.arraySize;
+ *outLocation = v.location;
+}
+
+void glslopt_shader_get_texture_desc (glslopt_shader* shader, int index, const char** outName, glslopt_basic_type* outType, glslopt_precision* outPrec, int* outVecSize, int* outMatSize, int* outArraySize, int* outLocation)
+{
+ const glslopt_shader_var& v = shader->textures[index];
+ *outName = v.name;
+ *outType = v.type;
+ *outPrec = v.prec;
+ *outVecSize = v.vectorSize;
+ *outMatSize = v.matrixSize;
+ *outArraySize = v.arraySize;
+ *outLocation = v.location;
+}
+
+void glslopt_shader_get_stats (glslopt_shader* shader, int* approxMath, int* approxTex, int* approxFlow)
+{
+ *approxMath = shader->statsMath;
+ *approxTex = shader->statsTex;
+ *approxFlow = shader->statsFlow;
+}
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/glsl_optimizer.h b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/glsl_optimizer.h
new file mode 100644
index 0000000000..68b0a3f018
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/glsl_optimizer.h
@@ -0,0 +1,94 @@
+#pragma once
+#ifndef GLSL_OPTIMIZER_H
+#define GLSL_OPTIMIZER_H
+
+/*
+ Main GLSL optimizer interface.
+ See ../../README.md for more instructions.
+
+ General usage:
+
+ ctx = glslopt_initialize();
+ for (lots of shaders) {
+ shader = glslopt_optimize (ctx, shaderType, shaderSource, options);
+ if (glslopt_get_status (shader)) {
+ newSource = glslopt_get_output (shader);
+ } else {
+ errorLog = glslopt_get_log (shader);
+ }
+ glslopt_shader_delete (shader);
+ }
+ glslopt_cleanup (ctx);
+*/
+
+extern "C" {
+
+struct glslopt_shader;
+struct glslopt_ctx;
+
+enum glslopt_shader_type {
+ kGlslOptShaderVertex = 0,
+ kGlslOptShaderFragment,
+};
+
+// Options flags for glsl_optimize
+enum glslopt_options {
+ kGlslOptionSkipPreprocessor = (1<<0), // Skip preprocessing shader source. Saves some time if you know you don't need it.
+ kGlslOptionNotFullShader = (1<<1), // Passed shader is not the full shader source. This makes some optimizations weaker.
+};
+
+// Optimizer target language
+enum glslopt_target {
+ kGlslTargetOpenGL = 0,
+ kGlslTargetOpenGLES20 = 1,
+ kGlslTargetOpenGLES30 = 2,
+ kGlslTargetMetal = 3,
+};
+
+// Type info
+enum glslopt_basic_type {
+ kGlslTypeFloat = 0,
+ kGlslTypeInt,
+ kGlslTypeBool,
+ kGlslTypeTex2D,
+ kGlslTypeTex3D,
+ kGlslTypeTexCube,
+ kGlslTypeTex2DShadow,
+ kGlslTypeTex2DArray,
+ kGlslTypeOther,
+ kGlslTypeCount
+};
+enum glslopt_precision {
+ kGlslPrecHigh = 0,
+ kGlslPrecMedium,
+ kGlslPrecLow,
+ kGlslPrecCount
+};
+
+glslopt_ctx* glslopt_initialize (glslopt_target target);
+void glslopt_cleanup (glslopt_ctx* ctx);
+
+void glslopt_set_max_unroll_iterations (glslopt_ctx* ctx, unsigned iterations);
+
+glslopt_shader* glslopt_optimize (glslopt_ctx* ctx, glslopt_shader_type type, const char* shaderSource, unsigned options);
+bool glslopt_get_status (glslopt_shader* shader);
+const char* glslopt_get_output (glslopt_shader* shader);
+const char* glslopt_get_raw_output (glslopt_shader* shader);
+const char* glslopt_get_log (glslopt_shader* shader);
+void glslopt_shader_delete (glslopt_shader* shader);
+
+int glslopt_shader_get_input_count (glslopt_shader* shader);
+void glslopt_shader_get_input_desc (glslopt_shader* shader, int index, const char** outName, glslopt_basic_type* outType, glslopt_precision* outPrec, int* outVecSize, int* outMatSize, int* outArraySize, int* outLocation);
+int glslopt_shader_get_uniform_count (glslopt_shader* shader);
+int glslopt_shader_get_uniform_total_size (glslopt_shader* shader);
+void glslopt_shader_get_uniform_desc (glslopt_shader* shader, int index, const char** outName, glslopt_basic_type* outType, glslopt_precision* outPrec, int* outVecSize, int* outMatSize, int* outArraySize, int* outLocation);
+int glslopt_shader_get_texture_count (glslopt_shader* shader);
+void glslopt_shader_get_texture_desc (glslopt_shader* shader, int index, const char** outName, glslopt_basic_type* outType, glslopt_precision* outPrec, int* outVecSize, int* outMatSize, int* outArraySize, int* outLocation);
+
+// Get *very* approximate shader stats:
+// Number of math, texture and flow control instructions.
+void glslopt_shader_get_stats (glslopt_shader* shader, int* approxMath, int* approxTex, int* approxFlow);
+
+} // extern "C"
+
+#endif /* GLSL_OPTIMIZER_H */
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/glsl_parser.cpp b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/glsl_parser.cpp
new file mode 100644
index 0000000000..a1132bfb6a
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/glsl_parser.cpp
@@ -0,0 +1,6208 @@
+/* A Bison parser, made by GNU Bison 3.5. */
+
+/* Bison implementation for Yacc-like parsers in C
+
+ Copyright (C) 1984, 1989-1990, 2000-2015, 2018-2019 Free Software Foundation,
+ Inc.
+
+ This program is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>. */
+
+/* As a special exception, you may create a larger work that contains
+ part or all of the Bison parser skeleton and distribute that work
+ under terms of your choice, so long as that work isn't itself a
+ parser generator using the skeleton or a modified version thereof
+ as a parser skeleton. Alternatively, if you modify or redistribute
+ the parser skeleton itself, you may (at your option) remove this
+ special exception, which will cause the skeleton and the resulting
+ Bison output files to be licensed under the GNU General Public
+ License without this special exception.
+
+ This special exception was added by the Free Software Foundation in
+ version 2.2 of Bison. */
+
+/* C LALR(1) parser skeleton written by Richard Stallman, by
+ simplifying the original so-called "semantic" parser. */
+
+/* All symbols defined below should begin with yy or YY, to avoid
+ infringing on user name space. This should be done even for local
+ variables, as they might otherwise be expanded by user macros.
+ There are some unavoidable exceptions within include files to
+ define necessary library symbols; they are noted "INFRINGES ON
+ USER NAME SPACE" below. */
+
+/* Undocumented macros, especially those whose name start with YY_,
+ are private implementation details. Do not rely on them. */
+
+/* Identify Bison output. */
+#define YYBISON 1
+
+/* Bison version. */
+#define YYBISON_VERSION "3.5"
+
+/* Skeleton name. */
+#define YYSKELETON_NAME "yacc.c"
+
+/* Pure parsers. */
+#define YYPURE 1
+
+/* Push parsers. */
+#define YYPUSH 0
+
+/* Pull parsers. */
+#define YYPULL 1
+
+
+/* Substitute the variable and function names. */
+#define yyparse _mesa_glsl_parse
+#define yylex _mesa_glsl_lex
+#define yyerror _mesa_glsl_error
+#define yydebug _mesa_glsl_debug
+#define yynerrs _mesa_glsl_nerrs
+
+/* First part of user prologue. */
+#line 1 "src/compiler/glsl/glsl_parser.yy"
+
+/*
+ * Copyright © 2008, 2009 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#ifndef _MSC_VER
+#include <strings.h>
+#endif
+#include <assert.h>
+
+#include "ast.h"
+#include "glsl_parser_extras.h"
+#include "compiler/glsl_types.h"
+#include "main/context.h"
+#include "util/u_string.h"
+#include "util/format/u_format.h"
+
+#ifdef _MSC_VER
+#pragma warning( disable : 4065 ) // switch statement contains 'default' but no 'case' labels
+#endif
+
+#undef yyerror
+
+static void yyerror(YYLTYPE *loc, _mesa_glsl_parse_state *st, const char *msg)
+{
+ _mesa_glsl_error(loc, st, "%s", msg);
+}
+
+static int
+_mesa_glsl_lex(YYSTYPE *val, YYLTYPE *loc, _mesa_glsl_parse_state *state)
+{
+ return _mesa_glsl_lexer_lex(val, loc, state->scanner);
+}
+
+static bool match_layout_qualifier(const char *s1, const char *s2,
+ _mesa_glsl_parse_state *state)
+{
+ /* From the GLSL 1.50 spec, section 4.3.8 (Layout Qualifiers):
+ *
+ * "The tokens in any layout-qualifier-id-list ... are not case
+ * sensitive, unless explicitly noted otherwise."
+ *
+ * The text "unless explicitly noted otherwise" appears to be
+ * vacuous--no desktop GLSL spec (up through GLSL 4.40) notes
+ * otherwise.
+ *
+ * However, the GLSL ES 3.00 spec says, in section 4.3.8 (Layout
+ * Qualifiers):
+ *
+ * "As for other identifiers, they are case sensitive."
+ *
+ * So we need to do a case-sensitive or a case-insensitive match,
+ * depending on whether we are compiling for GLSL ES.
+ */
+ if (state->es_shader)
+ return strcmp(s1, s2);
+ else
+ return strcasecmp(s1, s2);
+}
+
+#line 157 "src/compiler/glsl/glsl_parser.cpp"
+
+# ifndef YY_CAST
+# ifdef __cplusplus
+# define YY_CAST(Type, Val) static_cast<Type> (Val)
+# define YY_REINTERPRET_CAST(Type, Val) reinterpret_cast<Type> (Val)
+# else
+# define YY_CAST(Type, Val) ((Type) (Val))
+# define YY_REINTERPRET_CAST(Type, Val) ((Type) (Val))
+# endif
+# endif
+# ifndef YY_NULLPTR
+# if defined __cplusplus
+# if 201103L <= __cplusplus
+# define YY_NULLPTR nullptr
+# else
+# define YY_NULLPTR 0
+# endif
+# else
+# define YY_NULLPTR ((void*)0)
+# endif
+# endif
+
+/* Enabling verbose error messages. */
+#ifdef YYERROR_VERBOSE
+# undef YYERROR_VERBOSE
+# define YYERROR_VERBOSE 1
+#else
+# define YYERROR_VERBOSE 1
+#endif
+
+/* Use api.header.include to #include this header
+ instead of duplicating it here. */
+#ifndef YY__MESA_GLSL_SRC_COMPILER_GLSL_GLSL_PARSER_H_INCLUDED
+# define YY__MESA_GLSL_SRC_COMPILER_GLSL_GLSL_PARSER_H_INCLUDED
+/* Debug traces. */
+#ifndef YYDEBUG
+# define YYDEBUG 0
+#endif
+#if YYDEBUG
+extern int _mesa_glsl_debug;
+#endif
+
+/* Token type. */
+#ifndef YYTOKENTYPE
+# define YYTOKENTYPE
+ enum yytokentype
+ {
+ ATTRIBUTE = 258,
+ CONST_TOK = 259,
+ BASIC_TYPE_TOK = 260,
+ BREAK = 261,
+ BUFFER = 262,
+ CONTINUE = 263,
+ DO = 264,
+ ELSE = 265,
+ FOR = 266,
+ IF = 267,
+ DEMOTE = 268,
+ DISCARD = 269,
+ RETURN = 270,
+ SWITCH = 271,
+ CASE = 272,
+ DEFAULT = 273,
+ CENTROID = 274,
+ IN_TOK = 275,
+ OUT_TOK = 276,
+ INOUT_TOK = 277,
+ UNIFORM = 278,
+ VARYING = 279,
+ SAMPLE = 280,
+ NOPERSPECTIVE = 281,
+ FLAT = 282,
+ SMOOTH = 283,
+ IMAGE1DSHADOW = 284,
+ IMAGE2DSHADOW = 285,
+ IMAGE1DARRAYSHADOW = 286,
+ IMAGE2DARRAYSHADOW = 287,
+ COHERENT = 288,
+ VOLATILE = 289,
+ RESTRICT = 290,
+ READONLY = 291,
+ WRITEONLY = 292,
+ SHARED = 293,
+ STRUCT = 294,
+ VOID_TOK = 295,
+ WHILE = 296,
+ IDENTIFIER = 297,
+ TYPE_IDENTIFIER = 298,
+ NEW_IDENTIFIER = 299,
+ FLOATCONSTANT = 300,
+ DOUBLECONSTANT = 301,
+ INTCONSTANT = 302,
+ UINTCONSTANT = 303,
+ BOOLCONSTANT = 304,
+ INT64CONSTANT = 305,
+ UINT64CONSTANT = 306,
+ FIELD_SELECTION = 307,
+ LEFT_OP = 308,
+ RIGHT_OP = 309,
+ INC_OP = 310,
+ DEC_OP = 311,
+ LE_OP = 312,
+ GE_OP = 313,
+ EQ_OP = 314,
+ NE_OP = 315,
+ AND_OP = 316,
+ OR_OP = 317,
+ XOR_OP = 318,
+ MUL_ASSIGN = 319,
+ DIV_ASSIGN = 320,
+ ADD_ASSIGN = 321,
+ MOD_ASSIGN = 322,
+ LEFT_ASSIGN = 323,
+ RIGHT_ASSIGN = 324,
+ AND_ASSIGN = 325,
+ XOR_ASSIGN = 326,
+ OR_ASSIGN = 327,
+ SUB_ASSIGN = 328,
+ INVARIANT = 329,
+ PRECISE = 330,
+ LOWP = 331,
+ MEDIUMP = 332,
+ HIGHP = 333,
+ SUPERP = 334,
+ PRECISION = 335,
+ VERSION_TOK = 336,
+ EXTENSION = 337,
+ LINE = 338,
+ COLON = 339,
+ EOL = 340,
+ INTERFACE = 341,
+ OUTPUT = 342,
+ PRAGMA_DEBUG_ON = 343,
+ PRAGMA_DEBUG_OFF = 344,
+ PRAGMA_OPTIMIZE_ON = 345,
+ PRAGMA_OPTIMIZE_OFF = 346,
+ PRAGMA_WARNING_ON = 347,
+ PRAGMA_WARNING_OFF = 348,
+ PRAGMA_INVARIANT_ALL = 349,
+ LAYOUT_TOK = 350,
+ DOT_TOK = 351,
+ ASM = 352,
+ CLASS = 353,
+ UNION = 354,
+ ENUM = 355,
+ TYPEDEF = 356,
+ TEMPLATE = 357,
+ THIS = 358,
+ PACKED_TOK = 359,
+ GOTO = 360,
+ INLINE_TOK = 361,
+ NOINLINE = 362,
+ PUBLIC_TOK = 363,
+ STATIC = 364,
+ EXTERN = 365,
+ EXTERNAL = 366,
+ LONG_TOK = 367,
+ SHORT_TOK = 368,
+ HALF = 369,
+ FIXED_TOK = 370,
+ UNSIGNED = 371,
+ INPUT_TOK = 372,
+ HVEC2 = 373,
+ HVEC3 = 374,
+ HVEC4 = 375,
+ FVEC2 = 376,
+ FVEC3 = 377,
+ FVEC4 = 378,
+ SAMPLER3DRECT = 379,
+ SIZEOF = 380,
+ CAST = 381,
+ NAMESPACE = 382,
+ USING = 383,
+ RESOURCE = 384,
+ PATCH = 385,
+ SUBROUTINE = 386,
+ ERROR_TOK = 387,
+ COMMON = 388,
+ PARTITION = 389,
+ ACTIVE = 390,
+ FILTER = 391,
+ ROW_MAJOR = 392,
+ THEN = 393
+ };
+#endif
+
+/* Value type. */
+#if ! defined YYSTYPE && ! defined YYSTYPE_IS_DECLARED
+union YYSTYPE
+{
+#line 101 "src/compiler/glsl/glsl_parser.yy"
+
+ int n;
+ int64_t n64;
+ float real;
+ double dreal;
+ const char *identifier;
+
+ struct ast_type_qualifier type_qualifier;
+
+ ast_node *node;
+ ast_type_specifier *type_specifier;
+ ast_array_specifier *array_specifier;
+ ast_fully_specified_type *fully_specified_type;
+ ast_function *function;
+ ast_parameter_declarator *parameter_declarator;
+ ast_function_definition *function_definition;
+ ast_compound_statement *compound_statement;
+ ast_expression *expression;
+ ast_declarator_list *declarator_list;
+ ast_struct_specifier *struct_specifier;
+ ast_declaration *declaration;
+ ast_switch_body *switch_body;
+ ast_case_label *case_label;
+ ast_case_label_list *case_label_list;
+ ast_case_statement *case_statement;
+ ast_case_statement_list *case_statement_list;
+ ast_interface_block *interface_block;
+ ast_subroutine_list *subroutine_list;
+ struct {
+ ast_node *cond;
+ ast_expression *rest;
+ } for_rest_statement;
+
+ struct {
+ ast_node *then_statement;
+ ast_node *else_statement;
+ } selection_rest_statement;
+
+ const glsl_type *type;
+
+#line 389 "src/compiler/glsl/glsl_parser.cpp"
+
+};
+typedef union YYSTYPE YYSTYPE;
+# define YYSTYPE_IS_TRIVIAL 1
+# define YYSTYPE_IS_DECLARED 1
+#endif
+
+/* Location type. */
+#if ! defined YYLTYPE && ! defined YYLTYPE_IS_DECLARED
+typedef struct YYLTYPE YYLTYPE;
+struct YYLTYPE
+{
+ int first_line;
+ int first_column;
+ int last_line;
+ int last_column;
+};
+# define YYLTYPE_IS_DECLARED 1
+# define YYLTYPE_IS_TRIVIAL 1
+#endif
+
+
+
+int _mesa_glsl_parse (struct _mesa_glsl_parse_state *state);
+
+#endif /* !YY__MESA_GLSL_SRC_COMPILER_GLSL_GLSL_PARSER_H_INCLUDED */
+
+
+
+#ifdef short
+# undef short
+#endif
+
+/* On compilers that do not define __PTRDIFF_MAX__ etc., make sure
+ <limits.h> and (if available) <stdint.h> are included
+ so that the code can choose integer types of a good width. */
+
+#ifndef __PTRDIFF_MAX__
+# include <limits.h> /* INFRINGES ON USER NAME SPACE */
+# if defined __STDC_VERSION__ && 199901 <= __STDC_VERSION__
+# include <stdint.h> /* INFRINGES ON USER NAME SPACE */
+# define YY_STDINT_H
+# endif
+#endif
+
+/* Narrow types that promote to a signed type and that can represent a
+ signed or unsigned integer of at least N bits. In tables they can
+ save space and decrease cache pressure. Promoting to a signed type
+ helps avoid bugs in integer arithmetic. */
+
+#ifdef __INT_LEAST8_MAX__
+typedef __INT_LEAST8_TYPE__ yytype_int8;
+#elif defined YY_STDINT_H
+typedef int_least8_t yytype_int8;
+#else
+typedef signed char yytype_int8;
+#endif
+
+#ifdef __INT_LEAST16_MAX__
+typedef __INT_LEAST16_TYPE__ yytype_int16;
+#elif defined YY_STDINT_H
+typedef int_least16_t yytype_int16;
+#else
+typedef short yytype_int16;
+#endif
+
+#if defined __UINT_LEAST8_MAX__ && __UINT_LEAST8_MAX__ <= __INT_MAX__
+typedef __UINT_LEAST8_TYPE__ yytype_uint8;
+#elif (!defined __UINT_LEAST8_MAX__ && defined YY_STDINT_H \
+ && UINT_LEAST8_MAX <= INT_MAX)
+typedef uint_least8_t yytype_uint8;
+#elif !defined __UINT_LEAST8_MAX__ && UCHAR_MAX <= INT_MAX
+typedef unsigned char yytype_uint8;
+#else
+typedef short yytype_uint8;
+#endif
+
+#if defined __UINT_LEAST16_MAX__ && __UINT_LEAST16_MAX__ <= __INT_MAX__
+typedef __UINT_LEAST16_TYPE__ yytype_uint16;
+#elif (!defined __UINT_LEAST16_MAX__ && defined YY_STDINT_H \
+ && UINT_LEAST16_MAX <= INT_MAX)
+typedef uint_least16_t yytype_uint16;
+#elif !defined __UINT_LEAST16_MAX__ && USHRT_MAX <= INT_MAX
+typedef unsigned short yytype_uint16;
+#else
+typedef int yytype_uint16;
+#endif
+
+#ifndef YYPTRDIFF_T
+# if defined __PTRDIFF_TYPE__ && defined __PTRDIFF_MAX__
+# define YYPTRDIFF_T __PTRDIFF_TYPE__
+# define YYPTRDIFF_MAXIMUM __PTRDIFF_MAX__
+# elif defined PTRDIFF_MAX
+# ifndef ptrdiff_t
+# include <stddef.h> /* INFRINGES ON USER NAME SPACE */
+# endif
+# define YYPTRDIFF_T ptrdiff_t
+# define YYPTRDIFF_MAXIMUM PTRDIFF_MAX
+# else
+# define YYPTRDIFF_T long
+# define YYPTRDIFF_MAXIMUM LONG_MAX
+# endif
+#endif
+
+#ifndef YYSIZE_T
+# ifdef __SIZE_TYPE__
+# define YYSIZE_T __SIZE_TYPE__
+# elif defined size_t
+# define YYSIZE_T size_t
+# elif defined __STDC_VERSION__ && 199901 <= __STDC_VERSION__
+# include <stddef.h> /* INFRINGES ON USER NAME SPACE */
+# define YYSIZE_T size_t
+# else
+# define YYSIZE_T unsigned
+# endif
+#endif
+
+#define YYSIZE_MAXIMUM \
+ YY_CAST (YYPTRDIFF_T, \
+ (YYPTRDIFF_MAXIMUM < YY_CAST (YYSIZE_T, -1) \
+ ? YYPTRDIFF_MAXIMUM \
+ : YY_CAST (YYSIZE_T, -1)))
+
+#define YYSIZEOF(X) YY_CAST (YYPTRDIFF_T, sizeof (X))
+
+/* Stored state numbers (used for stacks). */
+typedef yytype_int16 yy_state_t;
+
+/* State numbers in computations. */
+typedef int yy_state_fast_t;
+
+#ifndef YY_
+# if defined YYENABLE_NLS && YYENABLE_NLS
+# if ENABLE_NLS
+# include <libintl.h> /* INFRINGES ON USER NAME SPACE */
+# define YY_(Msgid) dgettext ("bison-runtime", Msgid)
+# endif
+# endif
+# ifndef YY_
+# define YY_(Msgid) Msgid
+# endif
+#endif
+
+#ifndef YY_ATTRIBUTE_PURE
+# if defined __GNUC__ && 2 < __GNUC__ + (96 <= __GNUC_MINOR__)
+# define YY_ATTRIBUTE_PURE __attribute__ ((__pure__))
+# else
+# define YY_ATTRIBUTE_PURE
+# endif
+#endif
+
+#ifndef YY_ATTRIBUTE_UNUSED
+# if defined __GNUC__ && 2 < __GNUC__ + (7 <= __GNUC_MINOR__)
+# define YY_ATTRIBUTE_UNUSED __attribute__ ((__unused__))
+# else
+# define YY_ATTRIBUTE_UNUSED
+# endif
+#endif
+
+/* Suppress unused-variable warnings by "using" E. */
+#if ! defined lint || defined __GNUC__
+# define YYUSE(E) ((void) (E))
+#else
+# define YYUSE(E) /* empty */
+#endif
+
+#if defined __GNUC__ && ! defined __ICC && 407 <= __GNUC__ * 100 + __GNUC_MINOR__
+/* Suppress an incorrect diagnostic about yylval being uninitialized. */
+# define YY_IGNORE_MAYBE_UNINITIALIZED_BEGIN \
+ _Pragma ("GCC diagnostic push") \
+ _Pragma ("GCC diagnostic ignored \"-Wuninitialized\"") \
+ _Pragma ("GCC diagnostic ignored \"-Wmaybe-uninitialized\"")
+# define YY_IGNORE_MAYBE_UNINITIALIZED_END \
+ _Pragma ("GCC diagnostic pop")
+#else
+# define YY_INITIAL_VALUE(Value) Value
+#endif
+#ifndef YY_IGNORE_MAYBE_UNINITIALIZED_BEGIN
+# define YY_IGNORE_MAYBE_UNINITIALIZED_BEGIN
+# define YY_IGNORE_MAYBE_UNINITIALIZED_END
+#endif
+#ifndef YY_INITIAL_VALUE
+# define YY_INITIAL_VALUE(Value) /* Nothing. */
+#endif
+
+#if defined __cplusplus && defined __GNUC__ && ! defined __ICC && 6 <= __GNUC__
+# define YY_IGNORE_USELESS_CAST_BEGIN \
+ _Pragma ("GCC diagnostic push") \
+ _Pragma ("GCC diagnostic ignored \"-Wuseless-cast\"")
+# define YY_IGNORE_USELESS_CAST_END \
+ _Pragma ("GCC diagnostic pop")
+#endif
+#ifndef YY_IGNORE_USELESS_CAST_BEGIN
+# define YY_IGNORE_USELESS_CAST_BEGIN
+# define YY_IGNORE_USELESS_CAST_END
+#endif
+
+
+#define YY_ASSERT(E) ((void) (0 && (E)))
+
+#if ! defined yyoverflow || YYERROR_VERBOSE
+
+/* The parser invokes alloca or malloc; define the necessary symbols. */
+
+# ifdef YYSTACK_USE_ALLOCA
+# if YYSTACK_USE_ALLOCA
+# ifdef __GNUC__
+# define YYSTACK_ALLOC __builtin_alloca
+# elif defined __BUILTIN_VA_ARG_INCR
+# include <alloca.h> /* INFRINGES ON USER NAME SPACE */
+# elif defined _AIX
+# define YYSTACK_ALLOC __alloca
+# elif defined _MSC_VER
+# include <malloc.h> /* INFRINGES ON USER NAME SPACE */
+# define alloca _alloca
+# else
+# define YYSTACK_ALLOC alloca
+# if ! defined _ALLOCA_H && ! defined EXIT_SUCCESS
+# include <stdlib.h> /* INFRINGES ON USER NAME SPACE */
+ /* Use EXIT_SUCCESS as a witness for stdlib.h. */
+# ifndef EXIT_SUCCESS
+# define EXIT_SUCCESS 0
+# endif
+# endif
+# endif
+# endif
+# endif
+
+# ifdef YYSTACK_ALLOC
+ /* Pacify GCC's 'empty if-body' warning. */
+# define YYSTACK_FREE(Ptr) do { /* empty */; } while (0)
+# ifndef YYSTACK_ALLOC_MAXIMUM
+ /* The OS might guarantee only one guard page at the bottom of the stack,
+ and a page size can be as small as 4096 bytes. So we cannot safely
+ invoke alloca (N) if N exceeds 4096. Use a slightly smaller number
+ to allow for a few compiler-allocated temporary stack slots. */
+# define YYSTACK_ALLOC_MAXIMUM 4032 /* reasonable circa 2006 */
+# endif
+# else
+# define YYSTACK_ALLOC YYMALLOC
+# define YYSTACK_FREE YYFREE
+# ifndef YYSTACK_ALLOC_MAXIMUM
+# define YYSTACK_ALLOC_MAXIMUM YYSIZE_MAXIMUM
+# endif
+# if (defined __cplusplus && ! defined EXIT_SUCCESS \
+ && ! ((defined YYMALLOC || defined malloc) \
+ && (defined YYFREE || defined free)))
+# include <stdlib.h> /* INFRINGES ON USER NAME SPACE */
+# ifndef EXIT_SUCCESS
+# define EXIT_SUCCESS 0
+# endif
+# endif
+# ifndef YYMALLOC
+# define YYMALLOC malloc
+# if ! defined malloc && ! defined EXIT_SUCCESS
+void *malloc (YYSIZE_T); /* INFRINGES ON USER NAME SPACE */
+# endif
+# endif
+# ifndef YYFREE
+# define YYFREE free
+# if ! defined free && ! defined EXIT_SUCCESS
+void free (void *); /* INFRINGES ON USER NAME SPACE */
+# endif
+# endif
+# endif
+#endif /* ! defined yyoverflow || YYERROR_VERBOSE */
+
+
+#if (! defined yyoverflow \
+ && (! defined __cplusplus \
+ || (defined YYLTYPE_IS_TRIVIAL && YYLTYPE_IS_TRIVIAL \
+ && defined YYSTYPE_IS_TRIVIAL && YYSTYPE_IS_TRIVIAL)))
+
+/* A type that is properly aligned for any stack member. */
+union yyalloc
+{
+ yy_state_t yyss_alloc;
+ YYSTYPE yyvs_alloc;
+ YYLTYPE yyls_alloc;
+};
+
+/* The size of the maximum gap between one aligned stack and the next. */
+# define YYSTACK_GAP_MAXIMUM (YYSIZEOF (union yyalloc) - 1)
+
+/* The size of an array large to enough to hold all stacks, each with
+ N elements. */
+# define YYSTACK_BYTES(N) \
+ ((N) * (YYSIZEOF (yy_state_t) + YYSIZEOF (YYSTYPE) \
+ + YYSIZEOF (YYLTYPE)) \
+ + 2 * YYSTACK_GAP_MAXIMUM)
+
+# define YYCOPY_NEEDED 1
+
+/* Relocate STACK from its old location to the new one. The
+ local variables YYSIZE and YYSTACKSIZE give the old and new number of
+ elements in the stack, and YYPTR gives the new location of the
+ stack. Advance YYPTR to a properly aligned location for the next
+ stack. */
+# define YYSTACK_RELOCATE(Stack_alloc, Stack) \
+ do \
+ { \
+ YYPTRDIFF_T yynewbytes; \
+ YYCOPY (&yyptr->Stack_alloc, Stack, yysize); \
+ Stack = &yyptr->Stack_alloc; \
+ yynewbytes = yystacksize * YYSIZEOF (*Stack) + YYSTACK_GAP_MAXIMUM; \
+ yyptr += yynewbytes / YYSIZEOF (*yyptr); \
+ } \
+ while (0)
+
+#endif
+
+#if defined YYCOPY_NEEDED && YYCOPY_NEEDED
+/* Copy COUNT objects from SRC to DST. The source and destination do
+ not overlap. */
+# ifndef YYCOPY
+# if defined __GNUC__ && 1 < __GNUC__
+# define YYCOPY(Dst, Src, Count) \
+ __builtin_memcpy (Dst, Src, YY_CAST (YYSIZE_T, (Count)) * sizeof (*(Src)))
+# else
+# define YYCOPY(Dst, Src, Count) \
+ do \
+ { \
+ YYPTRDIFF_T yyi; \
+ for (yyi = 0; yyi < (Count); yyi++) \
+ (Dst)[yyi] = (Src)[yyi]; \
+ } \
+ while (0)
+# endif
+# endif
+#endif /* !YYCOPY_NEEDED */
+
+/* YYFINAL -- State number of the termination state. */
+#define YYFINAL 5
+/* YYLAST -- Last index in YYTABLE. */
+#define YYLAST 2514
+
+/* YYNTOKENS -- Number of terminals. */
+#define YYNTOKENS 162
+/* YYNNTS -- Number of nonterminals. */
+#define YYNNTS 111
+/* YYNRULES -- Number of rules. */
+#define YYNRULES 312
+/* YYNSTATES -- Number of states. */
+#define YYNSTATES 475
+
+#define YYUNDEFTOK 2
+#define YYMAXUTOK 393
+
+
+/* YYTRANSLATE(TOKEN-NUM) -- Symbol number corresponding to TOKEN-NUM
+ as returned by yylex, with out-of-bounds checking. */
+#define YYTRANSLATE(YYX) \
+ (0 <= (YYX) && (YYX) <= YYMAXUTOK ? yytranslate[YYX] : YYUNDEFTOK)
+
+/* YYTRANSLATE[TOKEN-NUM] -- Symbol number corresponding to TOKEN-NUM
+ as returned by yylex. */
+static const yytype_uint8 yytranslate[] =
+{
+ 0, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 146, 2, 2, 2, 150, 153, 2,
+ 139, 140, 148, 144, 143, 145, 2, 149, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 157, 159,
+ 151, 158, 152, 156, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 141, 2, 142, 154, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 160, 155, 161, 147, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 1, 2, 3, 4,
+ 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
+ 15, 16, 17, 18, 19, 20, 21, 22, 23, 24,
+ 25, 26, 27, 28, 29, 30, 31, 32, 33, 34,
+ 35, 36, 37, 38, 39, 40, 41, 42, 43, 44,
+ 45, 46, 47, 48, 49, 50, 51, 52, 53, 54,
+ 55, 56, 57, 58, 59, 60, 61, 62, 63, 64,
+ 65, 66, 67, 68, 69, 70, 71, 72, 73, 74,
+ 75, 76, 77, 78, 79, 80, 81, 82, 83, 84,
+ 85, 86, 87, 88, 89, 90, 91, 92, 93, 94,
+ 95, 96, 97, 98, 99, 100, 101, 102, 103, 104,
+ 105, 106, 107, 108, 109, 110, 111, 112, 113, 114,
+ 115, 116, 117, 118, 119, 120, 121, 122, 123, 124,
+ 125, 126, 127, 128, 129, 130, 131, 132, 133, 134,
+ 135, 136, 137, 138
+};
+
+#if YYDEBUG
+ /* YYRLINE[YYN] -- Source line where rule number YYN was defined. */
+static const yytype_int16 yyrline[] =
+{
+ 0, 295, 295, 294, 318, 320, 327, 337, 338, 339,
+ 340, 341, 365, 370, 377, 379, 383, 384, 385, 389,
+ 398, 406, 414, 425, 426, 430, 437, 444, 451, 458,
+ 465, 472, 479, 486, 493, 494, 500, 504, 511, 517,
+ 526, 530, 534, 538, 539, 543, 544, 548, 554, 566,
+ 570, 576, 590, 591, 597, 603, 613, 614, 615, 616,
+ 620, 621, 627, 633, 642, 643, 649, 658, 659, 665,
+ 674, 675, 681, 687, 693, 702, 703, 709, 718, 719,
+ 728, 729, 738, 739, 748, 749, 758, 759, 768, 769,
+ 778, 779, 788, 789, 798, 799, 800, 801, 802, 803,
+ 804, 805, 806, 807, 808, 812, 816, 832, 836, 841,
+ 845, 850, 867, 871, 872, 876, 881, 889, 907, 918,
+ 935, 950, 958, 975, 978, 986, 994, 1006, 1018, 1025,
+ 1030, 1035, 1044, 1048, 1049, 1059, 1069, 1079, 1093, 1100,
+ 1111, 1122, 1133, 1144, 1156, 1171, 1178, 1196, 1203, 1204,
+ 1214, 1737, 1902, 1928, 1933, 1938, 1946, 1951, 1960, 1969,
+ 1981, 1986, 1991, 2000, 2005, 2010, 2011, 2012, 2013, 2014,
+ 2015, 2016, 2034, 2042, 2067, 2091, 2105, 2110, 2126, 2151,
+ 2163, 2171, 2176, 2181, 2188, 2193, 2198, 2203, 2208, 2233,
+ 2245, 2250, 2255, 2263, 2268, 2273, 2279, 2284, 2292, 2300,
+ 2306, 2316, 2327, 2328, 2336, 2342, 2348, 2357, 2358, 2359,
+ 2371, 2376, 2381, 2389, 2396, 2413, 2418, 2426, 2464, 2469,
+ 2477, 2483, 2492, 2493, 2497, 2504, 2511, 2518, 2524, 2525,
+ 2529, 2530, 2531, 2532, 2533, 2534, 2535, 2539, 2546, 2545,
+ 2559, 2560, 2564, 2570, 2579, 2589, 2598, 2610, 2616, 2625,
+ 2634, 2639, 2647, 2651, 2669, 2677, 2682, 2690, 2695, 2703,
+ 2711, 2719, 2727, 2735, 2743, 2751, 2758, 2765, 2775, 2776,
+ 2780, 2782, 2788, 2793, 2802, 2808, 2814, 2820, 2826, 2835,
+ 2844, 2845, 2846, 2847, 2848, 2852, 2866, 2870, 2883, 2901,
+ 2920, 2925, 2930, 2935, 2940, 2955, 2958, 2963, 2971, 2976,
+ 2984, 3008, 3015, 3019, 3026, 3030, 3040, 3049, 3059, 3068,
+ 3080, 3102, 3112
+};
+#endif
+
+#if YYDEBUG || YYERROR_VERBOSE || 1
+/* YYTNAME[SYMBOL-NUM] -- String name of the symbol SYMBOL-NUM.
+ First, the terminals, then, starting at YYNTOKENS, nonterminals. */
+static const char *const yytname[] =
+{
+ "$end", "error", "$undefined", "ATTRIBUTE", "CONST_TOK",
+ "BASIC_TYPE_TOK", "BREAK", "BUFFER", "CONTINUE", "DO", "ELSE", "FOR",
+ "IF", "DEMOTE", "DISCARD", "RETURN", "SWITCH", "CASE", "DEFAULT",
+ "CENTROID", "IN_TOK", "OUT_TOK", "INOUT_TOK", "UNIFORM", "VARYING",
+ "SAMPLE", "NOPERSPECTIVE", "FLAT", "SMOOTH", "IMAGE1DSHADOW",
+ "IMAGE2DSHADOW", "IMAGE1DARRAYSHADOW", "IMAGE2DARRAYSHADOW", "COHERENT",
+ "VOLATILE", "RESTRICT", "READONLY", "WRITEONLY", "SHARED", "STRUCT",
+ "VOID_TOK", "WHILE", "IDENTIFIER", "TYPE_IDENTIFIER", "NEW_IDENTIFIER",
+ "FLOATCONSTANT", "DOUBLECONSTANT", "INTCONSTANT", "UINTCONSTANT",
+ "BOOLCONSTANT", "INT64CONSTANT", "UINT64CONSTANT", "FIELD_SELECTION",
+ "LEFT_OP", "RIGHT_OP", "INC_OP", "DEC_OP", "LE_OP", "GE_OP", "EQ_OP",
+ "NE_OP", "AND_OP", "OR_OP", "XOR_OP", "MUL_ASSIGN", "DIV_ASSIGN",
+ "ADD_ASSIGN", "MOD_ASSIGN", "LEFT_ASSIGN", "RIGHT_ASSIGN", "AND_ASSIGN",
+ "XOR_ASSIGN", "OR_ASSIGN", "SUB_ASSIGN", "INVARIANT", "PRECISE", "LOWP",
+ "MEDIUMP", "HIGHP", "SUPERP", "PRECISION", "VERSION_TOK", "EXTENSION",
+ "LINE", "COLON", "EOL", "INTERFACE", "OUTPUT", "PRAGMA_DEBUG_ON",
+ "PRAGMA_DEBUG_OFF", "PRAGMA_OPTIMIZE_ON", "PRAGMA_OPTIMIZE_OFF",
+ "PRAGMA_WARNING_ON", "PRAGMA_WARNING_OFF", "PRAGMA_INVARIANT_ALL",
+ "LAYOUT_TOK", "DOT_TOK", "ASM", "CLASS", "UNION", "ENUM", "TYPEDEF",
+ "TEMPLATE", "THIS", "PACKED_TOK", "GOTO", "INLINE_TOK", "NOINLINE",
+ "PUBLIC_TOK", "STATIC", "EXTERN", "EXTERNAL", "LONG_TOK", "SHORT_TOK",
+ "HALF", "FIXED_TOK", "UNSIGNED", "INPUT_TOK", "HVEC2", "HVEC3", "HVEC4",
+ "FVEC2", "FVEC3", "FVEC4", "SAMPLER3DRECT", "SIZEOF", "CAST",
+ "NAMESPACE", "USING", "RESOURCE", "PATCH", "SUBROUTINE", "ERROR_TOK",
+ "COMMON", "PARTITION", "ACTIVE", "FILTER", "ROW_MAJOR", "THEN", "'('",
+ "')'", "'['", "']'", "','", "'+'", "'-'", "'!'", "'~'", "'*'", "'/'",
+ "'%'", "'<'", "'>'", "'&'", "'^'", "'|'", "'?'", "':'", "'='", "';'",
+ "'{'", "'}'", "$accept", "translation_unit", "$@1", "version_statement",
+ "pragma_statement", "extension_statement_list", "any_identifier",
+ "extension_statement", "external_declaration_list",
+ "variable_identifier", "primary_expression", "postfix_expression",
+ "integer_expression", "function_call", "function_call_or_method",
+ "function_call_generic", "function_call_header_no_parameters",
+ "function_call_header_with_parameters", "function_call_header",
+ "function_identifier", "unary_expression", "unary_operator",
+ "multiplicative_expression", "additive_expression", "shift_expression",
+ "relational_expression", "equality_expression", "and_expression",
+ "exclusive_or_expression", "inclusive_or_expression",
+ "logical_and_expression", "logical_xor_expression",
+ "logical_or_expression", "conditional_expression",
+ "assignment_expression", "assignment_operator", "expression",
+ "constant_expression", "declaration", "function_prototype",
+ "function_declarator", "function_header_with_parameters",
+ "function_header", "parameter_declarator", "parameter_declaration",
+ "parameter_qualifier", "parameter_direction_qualifier",
+ "parameter_type_specifier", "init_declarator_list", "single_declaration",
+ "fully_specified_type", "layout_qualifier", "layout_qualifier_id_list",
+ "layout_qualifier_id", "interface_block_layout_qualifier",
+ "subroutine_qualifier", "subroutine_type_list",
+ "interpolation_qualifier", "type_qualifier",
+ "auxiliary_storage_qualifier", "storage_qualifier", "memory_qualifier",
+ "array_specifier", "type_specifier", "type_specifier_nonarray",
+ "basic_type_specifier_nonarray", "precision_qualifier",
+ "struct_specifier", "struct_declaration_list", "struct_declaration",
+ "struct_declarator_list", "struct_declarator", "initializer",
+ "initializer_list", "declaration_statement", "statement",
+ "simple_statement", "compound_statement", "$@2",
+ "statement_no_new_scope", "compound_statement_no_new_scope",
+ "statement_list", "expression_statement", "selection_statement",
+ "selection_rest_statement", "condition", "switch_statement",
+ "switch_body", "case_label", "case_label_list", "case_statement",
+ "case_statement_list", "iteration_statement", "for_init_statement",
+ "conditionopt", "for_rest_statement", "jump_statement",
+ "demote_statement", "external_declaration", "function_definition",
+ "interface_block", "basic_interface_block", "interface_qualifier",
+ "instance_name_opt", "member_list", "member_declaration",
+ "layout_uniform_defaults", "layout_buffer_defaults",
+ "layout_in_defaults", "layout_out_defaults", "layout_defaults", YY_NULLPTR
+};
+#endif
+
+# ifdef YYPRINT
+/* YYTOKNUM[NUM] -- (External) token number corresponding to the
+ (internal) symbol number NUM (which must be that of a token). */
+static const yytype_int16 yytoknum[] =
+{
+ 0, 256, 257, 258, 259, 260, 261, 262, 263, 264,
+ 265, 266, 267, 268, 269, 270, 271, 272, 273, 274,
+ 275, 276, 277, 278, 279, 280, 281, 282, 283, 284,
+ 285, 286, 287, 288, 289, 290, 291, 292, 293, 294,
+ 295, 296, 297, 298, 299, 300, 301, 302, 303, 304,
+ 305, 306, 307, 308, 309, 310, 311, 312, 313, 314,
+ 315, 316, 317, 318, 319, 320, 321, 322, 323, 324,
+ 325, 326, 327, 328, 329, 330, 331, 332, 333, 334,
+ 335, 336, 337, 338, 339, 340, 341, 342, 343, 344,
+ 345, 346, 347, 348, 349, 350, 351, 352, 353, 354,
+ 355, 356, 357, 358, 359, 360, 361, 362, 363, 364,
+ 365, 366, 367, 368, 369, 370, 371, 372, 373, 374,
+ 375, 376, 377, 378, 379, 380, 381, 382, 383, 384,
+ 385, 386, 387, 388, 389, 390, 391, 392, 393, 40,
+ 41, 91, 93, 44, 43, 45, 33, 126, 42, 47,
+ 37, 60, 62, 38, 94, 124, 63, 58, 61, 59,
+ 123, 125
+};
+# endif
+
+#define YYPACT_NINF (-292)
+
+#define yypact_value_is_default(Yyn) \
+ ((Yyn) == YYPACT_NINF)
+
+#define YYTABLE_NINF (-294)
+
+#define yytable_value_is_error(Yyn) \
+ 0
+
+ /* YYPACT[STATE-NUM] -- Index in YYTABLE of the portion describing
+ STATE-NUM. */
+static const yytype_int16 yypact[] =
+{
+ 21, 64, 115, -292, 5, -292, 58, -292, -292, -292,
+ -292, 50, 154, 1766, -292, -292, 61, -292, -292, -292,
+ 119, -292, 130, 136, -292, 168, -292, -292, -292, -292,
+ -292, -292, -292, -292, -292, -292, -292, -23, -292, -292,
+ 2188, 2188, -292, -292, -292, 167, 132, 144, 147, 153,
+ 170, 171, 175, 124, 261, -292, 134, -292, -292, 1667,
+ -292, -122, 141, 131, 173, -120, -292, 210, 2254, 2317,
+ 2317, 31, 2383, 2317, 2383, -292, 135, -292, 2317, -292,
+ -292, -292, -292, -292, 241, -292, -292, -292, -292, -292,
+ 154, 2125, 126, -292, -292, -292, -292, -292, -292, 2317,
+ 2317, -292, 2317, -292, 2317, 2317, -292, -292, 31, -292,
+ -292, -292, -292, -292, -292, -292, 180, -292, 154, -292,
+ -292, -292, 815, -292, -292, 547, 547, -292, -292, -292,
+ 547, -292, 2, 547, 547, 547, 154, -292, 149, 151,
+ -59, 155, -32, -31, -20, -17, -292, -292, -292, -292,
+ -292, -292, -292, -292, -292, -292, -292, -292, 2383, -292,
+ -292, 1859, 152, -292, 139, 212, 154, 944, -292, 2125,
+ 145, -292, -292, -292, 148, -33, -292, -292, -292, 22,
+ 146, 156, 1294, 163, 172, 160, 162, 1772, 177, 186,
+ -292, -292, -292, -292, -292, -292, -292, 1995, 1995, 1995,
+ -292, -292, -292, -292, -292, 165, -292, -292, -292, 123,
+ -292, -292, -292, 188, 32, 2027, 190, 273, 1995, 120,
+ 13, 137, 15, 143, 159, 179, 181, 246, 247, -56,
+ -292, -292, -67, -292, 189, 195, -292, -292, -292, -292,
+ 497, -292, -292, -292, -292, -292, -292, -292, -292, -292,
+ -292, -292, 31, 154, -292, -292, -292, -57, 1506, -55,
+ -292, -292, -292, -292, -292, -292, -292, -292, 205, -292,
+ 1971, 2125, -292, 135, -63, -292, -292, -292, 1007, -292,
+ 1995, -292, 180, -292, 154, -292, -292, 309, 1581, 1995,
+ -292, -292, -292, -54, 1995, 1917, -292, -292, 44, -292,
+ 1294, -292, -292, 299, 1995, -292, -292, 1995, 213, -292,
+ -292, -292, -292, -292, -292, -292, -292, -292, -292, -292,
+ -292, -292, 1995, -292, 1995, 1995, 1995, 1995, 1995, 1995,
+ 1995, 1995, 1995, 1995, 1995, 1995, 1995, 1995, 1995, 1995,
+ 1995, 1995, 1995, 1995, 1995, -292, -292, -292, -292, 154,
+ 135, 1506, -50, 1506, -292, -292, 1506, -292, -292, 214,
+ 154, 191, 2125, 152, 154, -292, -292, -292, -292, -292,
+ 220, -292, -292, 1917, 46, -292, 71, 218, 154, 224,
+ -292, 656, -292, 223, 218, -292, -292, -292, -292, -292,
+ 120, 120, 13, 13, 137, 137, 137, 137, 15, 15,
+ 143, 159, 179, 181, 246, 247, 25, -292, -292, 152,
+ -292, 1506, -292, -109, -292, -292, -45, 323, -292, -292,
+ 1995, -292, 215, 233, 1294, 216, 219, 1452, -292, -292,
+ 1995, -292, 950, -292, -292, 135, 221, 73, 1995, 1452,
+ 368, -292, -8, -292, 1506, -292, -292, -292, -292, -292,
+ -292, 152, -292, 222, 218, -292, 1294, 1995, 226, -292,
+ -292, 1136, 1294, -1, -292, -292, -292, 28, -292, -292,
+ -292, -292, -292, 1294, -292
+};
+
+ /* YYDEFACT[STATE-NUM] -- Default reduction number in state STATE-NUM.
+ Performed when YYTABLE does not specify something else to do. Zero
+ means the default is an error. */
+static const yytype_int16 yydefact[] =
+{
+ 4, 0, 0, 14, 0, 1, 2, 16, 17, 18,
+ 5, 0, 0, 0, 15, 6, 0, 185, 184, 208,
+ 191, 181, 187, 188, 189, 190, 186, 182, 162, 161,
+ 160, 193, 194, 195, 196, 197, 192, 0, 207, 206,
+ 163, 164, 212, 211, 210, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 183, 156, 284, 282, 3,
+ 281, 0, 0, 114, 123, 0, 133, 138, 168, 170,
+ 167, 0, 165, 166, 169, 145, 202, 204, 171, 205,
+ 20, 280, 111, 286, 0, 309, 310, 311, 312, 283,
+ 0, 0, 0, 191, 187, 188, 190, 23, 24, 163,
+ 164, 143, 168, 173, 165, 169, 144, 172, 0, 7,
+ 8, 9, 10, 12, 13, 11, 0, 209, 0, 22,
+ 21, 108, 0, 285, 112, 123, 123, 129, 130, 131,
+ 123, 115, 0, 123, 123, 123, 0, 109, 16, 18,
+ 139, 0, 191, 187, 188, 190, 175, 287, 301, 303,
+ 305, 307, 176, 174, 146, 177, 294, 178, 168, 180,
+ 288, 0, 203, 179, 0, 0, 0, 0, 215, 0,
+ 0, 155, 154, 153, 150, 0, 148, 152, 158, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 30, 31, 26, 27, 32, 28, 29, 0, 0, 0,
+ 56, 57, 58, 59, 247, 238, 242, 25, 34, 52,
+ 36, 41, 42, 0, 0, 46, 0, 60, 0, 64,
+ 67, 70, 75, 78, 80, 82, 84, 86, 88, 90,
+ 92, 105, 0, 227, 0, 145, 230, 244, 229, 228,
+ 0, 231, 232, 233, 234, 235, 236, 116, 124, 125,
+ 121, 122, 0, 132, 126, 128, 127, 134, 0, 140,
+ 117, 304, 306, 308, 302, 198, 60, 107, 0, 50,
+ 0, 0, 19, 220, 0, 218, 214, 216, 0, 110,
+ 0, 147, 0, 157, 0, 275, 274, 0, 0, 0,
+ 279, 278, 276, 0, 0, 0, 53, 54, 0, 237,
+ 0, 38, 39, 0, 0, 44, 43, 0, 207, 47,
+ 49, 95, 96, 98, 97, 100, 101, 102, 103, 104,
+ 99, 94, 0, 55, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 248, 243, 246, 245, 0,
+ 118, 0, 135, 0, 222, 142, 0, 199, 200, 0,
+ 0, 0, 298, 221, 0, 217, 213, 151, 149, 159,
+ 0, 269, 268, 271, 0, 277, 0, 252, 0, 0,
+ 33, 0, 37, 0, 40, 48, 93, 61, 62, 63,
+ 65, 66, 68, 69, 73, 74, 71, 72, 76, 77,
+ 79, 81, 83, 85, 87, 89, 0, 106, 119, 120,
+ 137, 0, 225, 0, 141, 201, 0, 295, 299, 219,
+ 0, 270, 0, 0, 0, 0, 0, 0, 239, 35,
+ 0, 136, 0, 223, 300, 296, 0, 0, 272, 0,
+ 251, 249, 0, 254, 0, 241, 265, 240, 91, 224,
+ 226, 297, 289, 0, 273, 267, 0, 0, 0, 255,
+ 259, 0, 263, 0, 253, 266, 250, 0, 258, 261,
+ 260, 262, 256, 264, 257
+};
+
+ /* YYPGOTO[NTERM-NUM]. */
+static const yytype_int16 yypgoto[] =
+{
+ -292, -292, -292, -292, -292, -292, 14, 9, -292, 53,
+ -292, -292, -292, -292, -292, -292, -292, -292, -292, -292,
+ 157, -292, -107, -104, -97, -89, 42, 55, 45, 48,
+ 56, 52, -292, -136, -152, -292, -143, -237, -5, -2,
+ -292, -292, -292, -292, 271, 236, -292, -292, -292, -292,
+ -90, 1, -292, 116, -292, -292, -292, -292, 317, -38,
+ -292, -9, -135, -13, -292, -292, 197, -292, 230, -137,
+ 40, 37, -268, -292, 114, -153, -291, -292, -292, -36,
+ 343, 105, 118, -292, -292, 34, -292, -292, -53, -292,
+ -51, -292, -292, -292, -292, -292, -292, -292, 350, -292,
+ -46, -292, 338, -292, 51, -292, 352, 355, 356, 361,
+ -292
+};
+
+ /* YYDEFGOTO[NTERM-NUM]. */
+static const yytype_int16 yydefgoto[] =
+{
+ -1, 2, 13, 3, 58, 6, 273, 347, 59, 207,
+ 208, 209, 383, 210, 211, 212, 213, 214, 215, 216,
+ 217, 218, 219, 220, 221, 222, 223, 224, 225, 226,
+ 227, 228, 229, 230, 231, 322, 232, 268, 233, 234,
+ 62, 63, 64, 250, 131, 132, 133, 251, 65, 66,
+ 67, 102, 175, 176, 177, 69, 179, 70, 71, 72,
+ 73, 105, 162, 269, 76, 77, 78, 79, 167, 168,
+ 274, 275, 355, 413, 236, 237, 238, 239, 300, 446,
+ 447, 240, 241, 242, 441, 379, 243, 443, 460, 461,
+ 462, 463, 244, 373, 422, 423, 245, 246, 80, 81,
+ 82, 83, 84, 436, 361, 362, 85, 86, 87, 88,
+ 89
+};
+
+ /* YYTABLE[YYPACT[STATE-NUM]] -- What to do in state STATE-NUM. If
+ positive, shift that token. If negative, reduce the rule whose
+ number is the opposite. If YYTABLE_NINF, syntax error. */
+static const yytype_int16 yytable[] =
+{
+ 75, 166, 104, 104, 74, 259, 342, 19, 60, 457,
+ 458, 61, -293, -290, 68, 14, 457, 458, 11, 7,
+ 8, 9, 147, 136, -291, 267, 16, -292, 160, 287,
+ 277, 104, 104, 359, 432, 104, 19, 121, 122, 137,
+ 104, 37, 38, 367, 293, 39, 75, 7, 8, 9,
+ 74, 92, 433, 104, 60, 134, 298, 61, 154, 74,
+ 68, 104, 104, 309, 104, 74, 104, 104, 119, 68,
+ 37, 38, 331, 332, 39, 158, 344, 166, 75, 166,
+ 364, 140, 161, 410, 161, 412, 270, 348, 414, 344,
+ 10, 270, 345, 101, 106, 170, 365, 53, 364, 258,
+ 343, 351, 1, 356, 165, 375, 354, 281, 411, 235,
+ 282, 4, 147, 74, 434, 5, 134, 134, 54, 253,
+ 141, 134, 352, 158, 134, 134, 134, 261, 262, 104,
+ 174, 104, 178, 252, 267, 15, 445, 91, 363, 263,
+ 12, 277, 264, 431, 267, 90, 374, 54, 445, 74,
+ 257, 376, 377, 459, 75, 385, 75, 327, 328, 158,
+ 472, 384, 283, -293, 450, 284, 333, 334, 344, 235,
+ 386, 344, 306, 74, -290, 307, 464, 126, 301, 302,
+ -291, 360, 430, 158, 380, 474, 424, 344, 166, 344,
+ 329, 330, 407, 127, 128, 129, 7, 8, 9, 354,
+ 406, 354, 335, 336, 354, 378, 31, 32, 33, 34,
+ 35, 425, -292, 453, 344, 409, 344, 109, 171, 303,
+ 390, 391, 7, 8, 9, 392, 393, 235, 348, 110,
+ 377, 74, 111, 104, 394, 395, 396, 397, 112, 349,
+ 104, 158, 108, 42, 43, 44, 398, 399, 130, 42,
+ 43, 44, 138, 8, 139, 113, 114, 104, 75, 354,
+ 115, 135, -51, 116, 304, 75, 117, 350, 324, 325,
+ 326, 440, 360, 118, 125, 235, 161, 437, 448, 74,
+ 354, 124, 235, 378, 172, 164, 169, 235, -23, 158,
+ -24, 74, 354, 270, 260, 454, 174, 272, 369, 271,
+ 451, 158, 288, 466, 279, 285, 280, 340, 469, 471,
+ 341, 289, 337, -113, 467, 286, 294, 173, 266, 290,
+ 471, 291, 135, 135, 104, 295, 299, 135, 305, 310,
+ 135, 135, 135, 338, -50, 104, 339, 311, 312, 313,
+ 314, 315, 316, 317, 318, 319, 320, 357, 121, 75,
+ 370, 382, 417, -45, 296, 297, 415, 103, 107, 420,
+ 235, 344, 248, 408, 427, 429, 249, 435, 235, 254,
+ 255, 256, 74, 439, 438, 323, 442, 444, 456, 400,
+ 452, 465, 158, 468, 402, 146, 152, 153, 403, 155,
+ 157, 159, 426, 401, 405, 163, 247, 404, 368, 278,
+ 416, 419, 371, 455, 123, 381, 372, 421, 470, 120,
+ 156, 235, 473, 418, 235, 74, 103, 107, 74, 146,
+ 148, 155, 159, 149, 150, 158, 235, 266, 158, 151,
+ 74, 321, 0, 0, 0, 0, 0, 266, 0, 0,
+ 158, 0, 0, 235, 0, 0, 0, 74, 235, 235,
+ 0, 0, 74, 74, 0, 0, 0, 158, 0, 0,
+ 235, 0, 158, 158, 74, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 158, 146, 0, 0, 0, 0,
+ 0, 387, 388, 389, 266, 266, 266, 266, 266, 266,
+ 266, 266, 266, 266, 266, 266, 266, 266, 266, 266,
+ 17, 18, 19, 180, 20, 181, 182, 0, 183, 184,
+ 185, 186, 187, 188, 0, 0, 21, 22, 23, 24,
+ 25, 26, 27, 28, 29, 30, 0, 0, 0, 0,
+ 31, 32, 33, 34, 35, 36, 37, 38, 189, 97,
+ 39, 98, 190, 191, 192, 193, 194, 195, 196, 0,
+ 0, 126, 197, 198, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 127, 128, 129,
+ 0, 40, 41, 42, 43, 44, 0, 45, 0, 12,
+ 31, 32, 33, 34, 35, 0, 0, 0, 0, 0,
+ 0, 0, 53, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 54, 0, 0, 0, 0, 0, 0,
+ 0, 0, 130, 42, 43, 44, 0, 55, 56, 0,
+ 0, 0, 0, 0, 0, 0, 199, 0, 0, 0,
+ 0, 200, 201, 202, 203, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 204, 205, 346, 17,
+ 18, 19, 180, 20, 181, 182, 0, 183, 184, 185,
+ 186, 187, 188, 0, 0, 21, 22, 23, 24, 25,
+ 26, 27, 28, 29, 30, 0, 0, 0, 0, 31,
+ 32, 33, 34, 35, 36, 37, 38, 189, 97, 39,
+ 98, 190, 191, 192, 193, 194, 195, 196, 0, 0,
+ 0, 197, 198, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 40, 41, 42, 43, 44, 0, 45, 0, 12, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 53, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 54, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 55, 56, 0, 0,
+ 0, 0, 0, 0, 0, 199, 0, 0, 0, 0,
+ 200, 201, 202, 203, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 204, 205, 428, 17, 18,
+ 19, 180, 20, 181, 182, 0, 183, 184, 185, 186,
+ 187, 188, 0, 0, 21, 22, 23, 24, 25, 26,
+ 27, 28, 29, 30, 0, 0, 0, 0, 31, 32,
+ 33, 34, 35, 36, 37, 38, 189, 97, 39, 98,
+ 190, 191, 192, 193, 194, 195, 196, 0, 0, 0,
+ 197, 198, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 40,
+ 41, 42, 43, 44, 0, 45, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 53, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 54, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 55, 56, 17, 18, 19,
+ 0, 93, 0, 0, 199, 19, 0, 0, 0, 200,
+ 201, 202, 203, 21, 94, 95, 24, 96, 26, 27,
+ 28, 29, 30, 0, 204, 205, 206, 31, 32, 33,
+ 34, 35, 36, 37, 38, 0, 0, 39, 0, 37,
+ 38, 0, 97, 39, 98, 190, 191, 192, 193, 194,
+ 195, 196, 0, 0, 0, 197, 198, 0, 0, 0,
+ 17, 18, 19, 0, 93, 0, 0, 0, 99, 100,
+ 42, 43, 44, 0, 0, 0, 21, 94, 95, 24,
+ 96, 26, 27, 28, 29, 30, 0, 0, 0, 53,
+ 31, 32, 33, 34, 35, 36, 37, 38, 0, 0,
+ 39, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 54, 0, 0, 0, 0, 0, 54, 0, 0, 0,
+ 0, 0, 0, 0, 55, 56, 0, 0, 0, 0,
+ 0, 99, 100, 42, 43, 44, 0, 0, 0, 199,
+ 0, 0, 0, 0, 200, 201, 202, 203, 0, 0,
+ 0, 0, 53, 0, 0, 276, 0, 0, 0, 0,
+ 353, 449, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 54, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 55, 56, 17,
+ 18, 19, 180, 20, 181, 182, 0, 183, 184, 185,
+ 186, 187, 188, 457, 458, 21, 22, 23, 24, 25,
+ 26, 27, 28, 29, 30, 0, 0, 0, 366, 31,
+ 32, 33, 34, 35, 36, 37, 38, 189, 97, 39,
+ 98, 190, 191, 192, 193, 194, 195, 196, 0, 0,
+ 0, 197, 198, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 40, 41, 42, 43, 44, 0, 45, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 53, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 54, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 55, 56, 0, 0,
+ 0, 0, 0, 0, 0, 199, 0, 0, 0, 0,
+ 200, 201, 202, 203, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 204, 205, 17, 18, 19,
+ 180, 20, 181, 182, 0, 183, 184, 185, 186, 187,
+ 188, 0, 0, 21, 22, 23, 24, 25, 26, 27,
+ 28, 29, 30, 0, 0, 0, 0, 31, 32, 33,
+ 34, 35, 36, 37, 38, 189, 97, 39, 98, 190,
+ 191, 192, 193, 194, 195, 196, 0, 0, 0, 197,
+ 198, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 40, 41,
+ 42, 43, 44, 0, 45, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 53,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 54, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 55, 56, 0, 0, 0, 0,
+ 0, 0, 0, 199, 0, 0, 0, 0, 200, 201,
+ 202, 203, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 204, 205, 17, 18, 19, 180, 20,
+ 181, 182, 0, 183, 184, 185, 186, 187, 188, 0,
+ 0, 21, 22, 23, 24, 25, 26, 27, 28, 29,
+ 30, 0, 0, 0, 0, 31, 32, 33, 34, 35,
+ 36, 37, 38, 189, 97, 39, 98, 190, 191, 192,
+ 193, 194, 195, 196, 0, 0, 0, 197, 198, 0,
+ 0, 19, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 40, 41, 42, 43,
+ 44, 0, 45, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 37, 38, 53, 97, 39,
+ 98, 190, 191, 192, 193, 194, 195, 196, 0, 0,
+ 0, 197, 198, 0, 0, 0, 0, 0, 54, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 55, 56, 17, 18, 19, 0, 20, 0,
+ 0, 199, 0, 0, 0, 0, 200, 201, 202, 203,
+ 21, 22, 23, 24, 25, 26, 27, 28, 29, 30,
+ 0, 204, 122, 0, 31, 32, 33, 34, 35, 36,
+ 37, 38, 54, 97, 39, 98, 190, 191, 192, 193,
+ 194, 195, 196, 0, 0, 0, 197, 198, 0, 0,
+ 0, 0, 0, 0, 0, 199, 0, 0, 0, 0,
+ 200, 201, 202, 203, 0, 40, 41, 42, 43, 44,
+ 0, 45, 0, 0, 0, 0, 353, 0, 0, 0,
+ 17, 18, 19, 0, 20, 0, 53, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 21, 22, 23, 24,
+ 25, 26, 27, 28, 29, 30, 0, 54, 0, 0,
+ 31, 32, 33, 34, 35, 36, 37, 38, 0, 0,
+ 39, 55, 56, 0, 0, 0, 0, 0, 0, 0,
+ 199, 0, 0, 0, 0, 200, 201, 202, 203, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 204, 40, 41, 42, 43, 44, 0, 45, 0, 12,
+ 0, 0, 0, 0, 0, 46, 47, 48, 49, 50,
+ 51, 52, 53, 0, 0, 0, 0, 0, 0, 17,
+ 18, 19, 0, 20, 0, 0, 0, 19, 0, 0,
+ 0, 0, 0, 54, 0, 21, 22, 23, 24, 25,
+ 26, 27, 28, 29, 30, 0, 0, 55, 56, 31,
+ 32, 33, 34, 35, 36, 37, 38, 0, 0, 39,
+ 0, 37, 38, 0, 97, 39, 98, 190, 191, 192,
+ 193, 194, 195, 196, 0, 0, 57, 197, 198, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 40, 41, 42, 43, 44, 0, 45, 0, 0, 0,
+ 0, 0, 0, 0, 46, 47, 48, 49, 50, 51,
+ 52, 53, 0, 0, 19, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 54, 0, 0, 0, 0, 0, 54, 0,
+ 0, 0, 0, 0, 0, 0, 55, 56, 37, 38,
+ 0, 97, 39, 98, 190, 191, 192, 193, 194, 195,
+ 196, 199, 0, 0, 197, 198, 200, 201, 202, 203,
+ 17, 18, 19, 0, 93, 57, 0, 0, 0, 0,
+ 0, 292, 0, 0, 0, 0, 21, 94, 95, 24,
+ 96, 26, 27, 28, 29, 30, 0, 0, 0, 0,
+ 31, 32, 33, 34, 35, 36, 37, 38, 0, 97,
+ 39, 98, 190, 191, 192, 193, 194, 195, 196, 0,
+ 0, 0, 197, 198, 0, 54, 19, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 99, 100, 42, 43, 44, 0, 0, 199, 0,
+ 19, 265, 0, 200, 201, 202, 203, 0, 0, 0,
+ 37, 38, 53, 97, 39, 98, 190, 191, 192, 193,
+ 194, 195, 196, 0, 0, 0, 197, 198, 0, 0,
+ 0, 0, 19, 54, 37, 38, 0, 97, 39, 98,
+ 190, 191, 192, 193, 194, 195, 196, 55, 56, 0,
+ 197, 198, 0, 0, 0, 0, 199, 0, 0, 0,
+ 0, 200, 201, 202, 203, 0, 37, 308, 0, 97,
+ 39, 98, 190, 191, 192, 193, 194, 195, 196, 0,
+ 0, 0, 197, 198, 0, 0, 0, 54, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 199, 54, 0, 358, 0, 200, 201, 202, 203, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 17, 18,
+ 19, 0, 93, 0, 199, 0, 0, 0, 0, 200,
+ 201, 202, 203, 54, 21, 94, 95, 24, 96, 26,
+ 27, 28, 29, 30, 0, 0, 0, 0, 31, 32,
+ 33, 34, 35, 36, 37, 38, 199, 0, 39, 0,
+ 0, 200, 201, 202, 203, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 17, 18, 0, 0, 93, 0, 0, 0, 99,
+ 100, 42, 43, 44, 0, 0, 0, 21, 94, 95,
+ 24, 96, 26, 27, 28, 29, 30, 0, 0, 0,
+ 53, 31, 32, 33, 34, 35, 36, 0, 0, 0,
+ 97, 0, 98, 0, 0, 0, 0, 0, 0, 0,
+ 0, 54, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 55, 56, 17, 18, 0,
+ 0, 142, 99, 100, 42, 43, 44, 0, 0, 0,
+ 0, 0, 0, 21, 143, 144, 24, 145, 26, 27,
+ 28, 29, 30, 53, 0, 0, 0, 31, 32, 33,
+ 34, 35, 36, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 55, 56,
+ 17, 18, 0, 0, 93, 0, 0, 0, 99, 100,
+ 42, 43, 44, 0, 0, 0, 21, 94, 95, 24,
+ 96, 26, 27, 28, 29, 30, 0, 0, 0, 53,
+ 31, 32, 33, 34, 35, 36, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 55, 56, 17, 18, 0, 0,
+ 20, 99, 100, 42, 43, 44, 0, 0, 0, 0,
+ 0, 0, 21, 22, 23, 24, 25, 26, 27, 28,
+ 29, 30, 53, 0, 0, 0, 31, 32, 33, 34,
+ 35, 36, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 55, 56, 0,
+ 0, 0, 0, 0, 0, 0, 0, 99, 100, 42,
+ 43, 44, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 53, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 55, 56
+};
+
+static const yytype_int16 yycheck[] =
+{
+ 13, 91, 40, 41, 13, 140, 62, 5, 13, 17,
+ 18, 13, 44, 44, 13, 6, 17, 18, 4, 42,
+ 43, 44, 68, 143, 44, 161, 12, 44, 74, 182,
+ 167, 69, 70, 270, 143, 73, 5, 159, 160, 159,
+ 78, 39, 40, 280, 187, 43, 59, 42, 43, 44,
+ 59, 37, 161, 91, 59, 64, 199, 59, 71, 68,
+ 59, 99, 100, 215, 102, 74, 104, 105, 59, 68,
+ 39, 40, 57, 58, 43, 74, 143, 167, 91, 169,
+ 143, 67, 141, 351, 141, 353, 141, 240, 356, 143,
+ 85, 141, 159, 40, 41, 108, 159, 95, 143, 158,
+ 156, 158, 81, 158, 90, 159, 258, 140, 158, 122,
+ 143, 47, 158, 122, 159, 0, 125, 126, 116, 132,
+ 67, 130, 257, 122, 133, 134, 135, 159, 159, 167,
+ 116, 169, 118, 132, 270, 85, 427, 160, 273, 159,
+ 82, 278, 159, 411, 280, 84, 289, 116, 439, 158,
+ 136, 294, 295, 161, 167, 307, 169, 144, 145, 158,
+ 161, 304, 140, 44, 432, 143, 151, 152, 143, 182,
+ 322, 143, 140, 182, 44, 143, 444, 4, 55, 56,
+ 44, 271, 157, 182, 140, 157, 140, 143, 278, 143,
+ 53, 54, 344, 20, 21, 22, 42, 43, 44, 351,
+ 343, 353, 59, 60, 356, 295, 33, 34, 35, 36,
+ 37, 140, 44, 140, 143, 350, 143, 85, 38, 96,
+ 327, 328, 42, 43, 44, 329, 330, 240, 381, 85,
+ 373, 240, 85, 271, 331, 332, 333, 334, 85, 252,
+ 278, 240, 45, 76, 77, 78, 335, 336, 75, 76,
+ 77, 78, 42, 43, 44, 85, 85, 295, 271, 411,
+ 85, 64, 139, 139, 141, 278, 5, 253, 148, 149,
+ 150, 424, 362, 139, 143, 288, 141, 420, 430, 288,
+ 432, 140, 295, 373, 104, 44, 160, 300, 139, 288,
+ 139, 300, 444, 141, 139, 438, 282, 85, 284, 160,
+ 435, 300, 139, 456, 159, 159, 158, 61, 461, 462,
+ 63, 139, 153, 140, 457, 159, 139, 137, 161, 159,
+ 473, 159, 125, 126, 362, 139, 161, 130, 140, 139,
+ 133, 134, 135, 154, 139, 373, 155, 64, 65, 66,
+ 67, 68, 69, 70, 71, 72, 73, 142, 159, 362,
+ 41, 52, 161, 140, 197, 198, 142, 40, 41, 139,
+ 373, 143, 126, 349, 140, 142, 130, 44, 381, 133,
+ 134, 135, 381, 140, 159, 218, 160, 158, 10, 337,
+ 159, 159, 381, 157, 339, 68, 69, 70, 340, 72,
+ 73, 74, 378, 338, 342, 78, 125, 341, 282, 169,
+ 360, 364, 288, 439, 61, 300, 288, 373, 461, 59,
+ 72, 424, 463, 362, 427, 424, 99, 100, 427, 102,
+ 68, 104, 105, 68, 68, 424, 439, 270, 427, 68,
+ 439, 158, -1, -1, -1, -1, -1, 280, -1, -1,
+ 439, -1, -1, 456, -1, -1, -1, 456, 461, 462,
+ -1, -1, 461, 462, -1, -1, -1, 456, -1, -1,
+ 473, -1, 461, 462, 473, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, 473, 158, -1, -1, -1, -1,
+ -1, 324, 325, 326, 327, 328, 329, 330, 331, 332,
+ 333, 334, 335, 336, 337, 338, 339, 340, 341, 342,
+ 3, 4, 5, 6, 7, 8, 9, -1, 11, 12,
+ 13, 14, 15, 16, -1, -1, 19, 20, 21, 22,
+ 23, 24, 25, 26, 27, 28, -1, -1, -1, -1,
+ 33, 34, 35, 36, 37, 38, 39, 40, 41, 42,
+ 43, 44, 45, 46, 47, 48, 49, 50, 51, -1,
+ -1, 4, 55, 56, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, 20, 21, 22,
+ -1, 74, 75, 76, 77, 78, -1, 80, -1, 82,
+ 33, 34, 35, 36, 37, -1, -1, -1, -1, -1,
+ -1, -1, 95, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, 116, -1, -1, -1, -1, -1, -1,
+ -1, -1, 75, 76, 77, 78, -1, 130, 131, -1,
+ -1, -1, -1, -1, -1, -1, 139, -1, -1, -1,
+ -1, 144, 145, 146, 147, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, 159, 160, 161, 3,
+ 4, 5, 6, 7, 8, 9, -1, 11, 12, 13,
+ 14, 15, 16, -1, -1, 19, 20, 21, 22, 23,
+ 24, 25, 26, 27, 28, -1, -1, -1, -1, 33,
+ 34, 35, 36, 37, 38, 39, 40, 41, 42, 43,
+ 44, 45, 46, 47, 48, 49, 50, 51, -1, -1,
+ -1, 55, 56, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ 74, 75, 76, 77, 78, -1, 80, -1, 82, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, 95, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, 116, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, 130, 131, -1, -1,
+ -1, -1, -1, -1, -1, 139, -1, -1, -1, -1,
+ 144, 145, 146, 147, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, 159, 160, 161, 3, 4,
+ 5, 6, 7, 8, 9, -1, 11, 12, 13, 14,
+ 15, 16, -1, -1, 19, 20, 21, 22, 23, 24,
+ 25, 26, 27, 28, -1, -1, -1, -1, 33, 34,
+ 35, 36, 37, 38, 39, 40, 41, 42, 43, 44,
+ 45, 46, 47, 48, 49, 50, 51, -1, -1, -1,
+ 55, 56, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, 74,
+ 75, 76, 77, 78, -1, 80, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ 95, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, 116, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, 130, 131, 3, 4, 5,
+ -1, 7, -1, -1, 139, 5, -1, -1, -1, 144,
+ 145, 146, 147, 19, 20, 21, 22, 23, 24, 25,
+ 26, 27, 28, -1, 159, 160, 161, 33, 34, 35,
+ 36, 37, 38, 39, 40, -1, -1, 43, -1, 39,
+ 40, -1, 42, 43, 44, 45, 46, 47, 48, 49,
+ 50, 51, -1, -1, -1, 55, 56, -1, -1, -1,
+ 3, 4, 5, -1, 7, -1, -1, -1, 74, 75,
+ 76, 77, 78, -1, -1, -1, 19, 20, 21, 22,
+ 23, 24, 25, 26, 27, 28, -1, -1, -1, 95,
+ 33, 34, 35, 36, 37, 38, 39, 40, -1, -1,
+ 43, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ 116, -1, -1, -1, -1, -1, 116, -1, -1, -1,
+ -1, -1, -1, -1, 130, 131, -1, -1, -1, -1,
+ -1, 74, 75, 76, 77, 78, -1, -1, -1, 139,
+ -1, -1, -1, -1, 144, 145, 146, 147, -1, -1,
+ -1, -1, 95, -1, -1, 161, -1, -1, -1, -1,
+ 160, 161, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, 116, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, 130, 131, 3,
+ 4, 5, 6, 7, 8, 9, -1, 11, 12, 13,
+ 14, 15, 16, 17, 18, 19, 20, 21, 22, 23,
+ 24, 25, 26, 27, 28, -1, -1, -1, 161, 33,
+ 34, 35, 36, 37, 38, 39, 40, 41, 42, 43,
+ 44, 45, 46, 47, 48, 49, 50, 51, -1, -1,
+ -1, 55, 56, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ 74, 75, 76, 77, 78, -1, 80, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, 95, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, 116, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, 130, 131, -1, -1,
+ -1, -1, -1, -1, -1, 139, -1, -1, -1, -1,
+ 144, 145, 146, 147, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, 159, 160, 3, 4, 5,
+ 6, 7, 8, 9, -1, 11, 12, 13, 14, 15,
+ 16, -1, -1, 19, 20, 21, 22, 23, 24, 25,
+ 26, 27, 28, -1, -1, -1, -1, 33, 34, 35,
+ 36, 37, 38, 39, 40, 41, 42, 43, 44, 45,
+ 46, 47, 48, 49, 50, 51, -1, -1, -1, 55,
+ 56, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, 74, 75,
+ 76, 77, 78, -1, 80, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, 95,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ 116, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, 130, 131, -1, -1, -1, -1,
+ -1, -1, -1, 139, -1, -1, -1, -1, 144, 145,
+ 146, 147, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, 159, 160, 3, 4, 5, 6, 7,
+ 8, 9, -1, 11, 12, 13, 14, 15, 16, -1,
+ -1, 19, 20, 21, 22, 23, 24, 25, 26, 27,
+ 28, -1, -1, -1, -1, 33, 34, 35, 36, 37,
+ 38, 39, 40, 41, 42, 43, 44, 45, 46, 47,
+ 48, 49, 50, 51, -1, -1, -1, 55, 56, -1,
+ -1, 5, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, 74, 75, 76, 77,
+ 78, -1, 80, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, 39, 40, 95, 42, 43,
+ 44, 45, 46, 47, 48, 49, 50, 51, -1, -1,
+ -1, 55, 56, -1, -1, -1, -1, -1, 116, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, 130, 131, 3, 4, 5, -1, 7, -1,
+ -1, 139, -1, -1, -1, -1, 144, 145, 146, 147,
+ 19, 20, 21, 22, 23, 24, 25, 26, 27, 28,
+ -1, 159, 160, -1, 33, 34, 35, 36, 37, 38,
+ 39, 40, 116, 42, 43, 44, 45, 46, 47, 48,
+ 49, 50, 51, -1, -1, -1, 55, 56, -1, -1,
+ -1, -1, -1, -1, -1, 139, -1, -1, -1, -1,
+ 144, 145, 146, 147, -1, 74, 75, 76, 77, 78,
+ -1, 80, -1, -1, -1, -1, 160, -1, -1, -1,
+ 3, 4, 5, -1, 7, -1, 95, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, 19, 20, 21, 22,
+ 23, 24, 25, 26, 27, 28, -1, 116, -1, -1,
+ 33, 34, 35, 36, 37, 38, 39, 40, -1, -1,
+ 43, 130, 131, -1, -1, -1, -1, -1, -1, -1,
+ 139, -1, -1, -1, -1, 144, 145, 146, 147, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ 159, 74, 75, 76, 77, 78, -1, 80, -1, 82,
+ -1, -1, -1, -1, -1, 88, 89, 90, 91, 92,
+ 93, 94, 95, -1, -1, -1, -1, -1, -1, 3,
+ 4, 5, -1, 7, -1, -1, -1, 5, -1, -1,
+ -1, -1, -1, 116, -1, 19, 20, 21, 22, 23,
+ 24, 25, 26, 27, 28, -1, -1, 130, 131, 33,
+ 34, 35, 36, 37, 38, 39, 40, -1, -1, 43,
+ -1, 39, 40, -1, 42, 43, 44, 45, 46, 47,
+ 48, 49, 50, 51, -1, -1, 159, 55, 56, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ 74, 75, 76, 77, 78, -1, 80, -1, -1, -1,
+ -1, -1, -1, -1, 88, 89, 90, 91, 92, 93,
+ 94, 95, -1, -1, 5, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, 116, -1, -1, -1, -1, -1, 116, -1,
+ -1, -1, -1, -1, -1, -1, 130, 131, 39, 40,
+ -1, 42, 43, 44, 45, 46, 47, 48, 49, 50,
+ 51, 139, -1, -1, 55, 56, 144, 145, 146, 147,
+ 3, 4, 5, -1, 7, 159, -1, -1, -1, -1,
+ -1, 159, -1, -1, -1, -1, 19, 20, 21, 22,
+ 23, 24, 25, 26, 27, 28, -1, -1, -1, -1,
+ 33, 34, 35, 36, 37, 38, 39, 40, -1, 42,
+ 43, 44, 45, 46, 47, 48, 49, 50, 51, -1,
+ -1, -1, 55, 56, -1, 116, 5, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, 74, 75, 76, 77, 78, -1, -1, 139, -1,
+ 5, 142, -1, 144, 145, 146, 147, -1, -1, -1,
+ 39, 40, 95, 42, 43, 44, 45, 46, 47, 48,
+ 49, 50, 51, -1, -1, -1, 55, 56, -1, -1,
+ -1, -1, 5, 116, 39, 40, -1, 42, 43, 44,
+ 45, 46, 47, 48, 49, 50, 51, 130, 131, -1,
+ 55, 56, -1, -1, -1, -1, 139, -1, -1, -1,
+ -1, 144, 145, 146, 147, -1, 39, 40, -1, 42,
+ 43, 44, 45, 46, 47, 48, 49, 50, 51, -1,
+ -1, -1, 55, 56, -1, -1, -1, 116, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ 139, 116, -1, 142, -1, 144, 145, 146, 147, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, 3, 4,
+ 5, -1, 7, -1, 139, -1, -1, -1, -1, 144,
+ 145, 146, 147, 116, 19, 20, 21, 22, 23, 24,
+ 25, 26, 27, 28, -1, -1, -1, -1, 33, 34,
+ 35, 36, 37, 38, 39, 40, 139, -1, 43, -1,
+ -1, 144, 145, 146, 147, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, 3, 4, -1, -1, 7, -1, -1, -1, 74,
+ 75, 76, 77, 78, -1, -1, -1, 19, 20, 21,
+ 22, 23, 24, 25, 26, 27, 28, -1, -1, -1,
+ 95, 33, 34, 35, 36, 37, 38, -1, -1, -1,
+ 42, -1, 44, -1, -1, -1, -1, -1, -1, -1,
+ -1, 116, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, 130, 131, 3, 4, -1,
+ -1, 7, 74, 75, 76, 77, 78, -1, -1, -1,
+ -1, -1, -1, 19, 20, 21, 22, 23, 24, 25,
+ 26, 27, 28, 95, -1, -1, -1, 33, 34, 35,
+ 36, 37, 38, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, 130, 131,
+ 3, 4, -1, -1, 7, -1, -1, -1, 74, 75,
+ 76, 77, 78, -1, -1, -1, 19, 20, 21, 22,
+ 23, 24, 25, 26, 27, 28, -1, -1, -1, 95,
+ 33, 34, 35, 36, 37, 38, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, 130, 131, 3, 4, -1, -1,
+ 7, 74, 75, 76, 77, 78, -1, -1, -1, -1,
+ -1, -1, 19, 20, 21, 22, 23, 24, 25, 26,
+ 27, 28, 95, -1, -1, -1, 33, 34, 35, 36,
+ 37, 38, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, 130, 131, -1,
+ -1, -1, -1, -1, -1, -1, -1, 74, 75, 76,
+ 77, 78, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, 95, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, 130, 131
+};
+
+ /* YYSTOS[STATE-NUM] -- The (internal number of the) accessing
+ symbol of state STATE-NUM. */
+static const yytype_int16 yystos[] =
+{
+ 0, 81, 163, 165, 47, 0, 167, 42, 43, 44,
+ 85, 168, 82, 164, 169, 85, 168, 3, 4, 5,
+ 7, 19, 20, 21, 22, 23, 24, 25, 26, 27,
+ 28, 33, 34, 35, 36, 37, 38, 39, 40, 43,
+ 74, 75, 76, 77, 78, 80, 88, 89, 90, 91,
+ 92, 93, 94, 95, 116, 130, 131, 159, 166, 170,
+ 200, 201, 202, 203, 204, 210, 211, 212, 213, 217,
+ 219, 220, 221, 222, 223, 225, 226, 227, 228, 229,
+ 260, 261, 262, 263, 264, 268, 269, 270, 271, 272,
+ 84, 160, 168, 7, 20, 21, 23, 42, 44, 74,
+ 75, 171, 213, 220, 221, 223, 171, 220, 228, 85,
+ 85, 85, 85, 85, 85, 85, 139, 5, 139, 169,
+ 260, 159, 160, 242, 140, 143, 4, 20, 21, 22,
+ 75, 206, 207, 208, 223, 228, 143, 159, 42, 44,
+ 168, 171, 7, 20, 21, 23, 220, 262, 268, 269,
+ 270, 271, 220, 220, 225, 220, 264, 220, 213, 220,
+ 262, 141, 224, 220, 44, 168, 212, 230, 231, 160,
+ 225, 38, 104, 137, 168, 214, 215, 216, 168, 218,
+ 6, 8, 9, 11, 12, 13, 14, 15, 16, 41,
+ 45, 46, 47, 48, 49, 50, 51, 55, 56, 139,
+ 144, 145, 146, 147, 159, 160, 161, 171, 172, 173,
+ 175, 176, 177, 178, 179, 180, 181, 182, 183, 184,
+ 185, 186, 187, 188, 189, 190, 191, 192, 193, 194,
+ 195, 196, 198, 200, 201, 225, 236, 237, 238, 239,
+ 243, 244, 245, 248, 254, 258, 259, 206, 207, 207,
+ 205, 209, 213, 225, 207, 207, 207, 168, 158, 224,
+ 139, 159, 159, 159, 159, 142, 182, 195, 199, 225,
+ 141, 160, 85, 168, 232, 233, 161, 231, 230, 159,
+ 158, 140, 143, 140, 143, 159, 159, 237, 139, 139,
+ 159, 159, 159, 198, 139, 139, 182, 182, 198, 161,
+ 240, 55, 56, 96, 141, 140, 140, 143, 40, 196,
+ 139, 64, 65, 66, 67, 68, 69, 70, 71, 72,
+ 73, 158, 197, 182, 148, 149, 150, 144, 145, 53,
+ 54, 57, 58, 151, 152, 59, 60, 153, 154, 155,
+ 61, 63, 62, 156, 143, 159, 161, 169, 237, 225,
+ 168, 158, 224, 160, 196, 234, 158, 142, 142, 199,
+ 212, 266, 267, 224, 143, 159, 161, 199, 215, 168,
+ 41, 236, 244, 255, 198, 159, 198, 198, 212, 247,
+ 140, 243, 52, 174, 198, 196, 196, 182, 182, 182,
+ 184, 184, 185, 185, 186, 186, 186, 186, 187, 187,
+ 188, 189, 190, 191, 192, 193, 198, 196, 168, 224,
+ 234, 158, 234, 235, 234, 142, 232, 161, 266, 233,
+ 139, 247, 256, 257, 140, 140, 168, 140, 161, 142,
+ 157, 234, 143, 161, 159, 44, 265, 198, 159, 140,
+ 237, 246, 160, 249, 158, 238, 241, 242, 196, 161,
+ 234, 224, 159, 140, 198, 241, 10, 17, 18, 161,
+ 250, 251, 252, 253, 234, 159, 237, 198, 157, 237,
+ 250, 237, 161, 252, 157
+};
+
+ /* YYR1[YYN] -- Symbol number of symbol that rule YYN derives. */
+static const yytype_int16 yyr1[] =
+{
+ 0, 162, 164, 163, 165, 165, 165, 166, 166, 166,
+ 166, 166, 166, 166, 167, 167, 168, 168, 168, 169,
+ 170, 170, 170, 171, 171, 172, 172, 172, 172, 172,
+ 172, 172, 172, 172, 173, 173, 173, 173, 173, 173,
+ 174, 175, 176, 177, 177, 178, 178, 179, 179, 180,
+ 181, 181, 182, 182, 182, 182, 183, 183, 183, 183,
+ 184, 184, 184, 184, 185, 185, 185, 186, 186, 186,
+ 187, 187, 187, 187, 187, 188, 188, 188, 189, 189,
+ 190, 190, 191, 191, 192, 192, 193, 193, 194, 194,
+ 195, 195, 196, 196, 197, 197, 197, 197, 197, 197,
+ 197, 197, 197, 197, 197, 198, 198, 199, 200, 200,
+ 200, 200, 201, 202, 202, 203, 203, 204, 205, 205,
+ 205, 206, 206, 207, 207, 207, 207, 207, 207, 208,
+ 208, 208, 209, 210, 210, 210, 210, 210, 211, 211,
+ 211, 211, 211, 211, 211, 212, 212, 213, 214, 214,
+ 215, 215, 215, 216, 216, 216, 217, 217, 218, 218,
+ 219, 219, 219, 220, 220, 220, 220, 220, 220, 220,
+ 220, 220, 220, 220, 220, 220, 220, 220, 220, 220,
+ 220, 221, 221, 221, 222, 222, 222, 222, 222, 222,
+ 222, 222, 222, 223, 223, 223, 223, 223, 224, 224,
+ 224, 224, 225, 225, 226, 226, 226, 227, 227, 227,
+ 228, 228, 228, 229, 229, 230, 230, 231, 232, 232,
+ 233, 233, 234, 234, 234, 235, 235, 236, 237, 237,
+ 238, 238, 238, 238, 238, 238, 238, 239, 240, 239,
+ 241, 241, 242, 242, 243, 243, 243, 244, 244, 245,
+ 246, 246, 247, 247, 248, 249, 249, 250, 250, 251,
+ 251, 252, 252, 253, 253, 254, 254, 254, 255, 255,
+ 256, 256, 257, 257, 258, 258, 258, 258, 258, 259,
+ 260, 260, 260, 260, 260, 261, 262, 262, 262, 263,
+ 264, 264, 264, 264, 264, 265, 265, 265, 266, 266,
+ 267, 268, 268, 269, 269, 270, 270, 271, 271, 272,
+ 272, 272, 272
+};
+
+ /* YYR2[YYN] -- Number of symbols on the right hand side of rule YYN. */
+static const yytype_int8 yyr2[] =
+{
+ 0, 2, 0, 4, 0, 3, 4, 2, 2, 2,
+ 2, 2, 2, 2, 0, 2, 1, 1, 1, 5,
+ 1, 2, 2, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 3, 1, 4, 1, 3, 2, 2,
+ 1, 1, 1, 2, 2, 2, 1, 2, 3, 2,
+ 1, 1, 1, 2, 2, 2, 1, 1, 1, 1,
+ 1, 3, 3, 3, 1, 3, 3, 1, 3, 3,
+ 1, 3, 3, 3, 3, 1, 3, 3, 1, 3,
+ 1, 3, 1, 3, 1, 3, 1, 3, 1, 3,
+ 1, 5, 1, 3, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 3, 1, 2, 2,
+ 4, 1, 2, 1, 1, 2, 3, 3, 2, 3,
+ 3, 2, 2, 0, 2, 2, 2, 2, 2, 1,
+ 1, 1, 1, 1, 3, 4, 6, 5, 1, 2,
+ 3, 5, 4, 2, 2, 1, 2, 4, 1, 3,
+ 1, 3, 1, 1, 1, 1, 1, 4, 1, 3,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 2, 3,
+ 3, 4, 1, 2, 1, 1, 1, 1, 1, 2,
+ 1, 1, 1, 5, 4, 1, 2, 3, 1, 3,
+ 1, 2, 1, 3, 4, 1, 3, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 2, 0, 4,
+ 1, 1, 2, 3, 1, 2, 2, 1, 2, 5,
+ 3, 1, 1, 4, 5, 2, 3, 3, 2, 1,
+ 2, 2, 2, 1, 2, 5, 7, 6, 1, 1,
+ 1, 0, 2, 3, 2, 2, 2, 3, 2, 2,
+ 1, 1, 1, 1, 1, 2, 1, 2, 2, 7,
+ 1, 1, 1, 1, 2, 0, 1, 2, 1, 2,
+ 3, 2, 3, 2, 3, 2, 3, 2, 3, 1,
+ 1, 1, 1
+};
+
+
+#define yyerrok (yyerrstatus = 0)
+#define yyclearin (yychar = YYEMPTY)
+#define YYEMPTY (-2)
+#define YYEOF 0
+
+#define YYACCEPT goto yyacceptlab
+#define YYABORT goto yyabortlab
+#define YYERROR goto yyerrorlab
+
+
+#define YYRECOVERING() (!!yyerrstatus)
+
+#define YYBACKUP(Token, Value) \
+ do \
+ if (yychar == YYEMPTY) \
+ { \
+ yychar = (Token); \
+ yylval = (Value); \
+ YYPOPSTACK (yylen); \
+ yystate = *yyssp; \
+ goto yybackup; \
+ } \
+ else \
+ { \
+ yyerror (&yylloc, state, YY_("syntax error: cannot back up")); \
+ YYERROR; \
+ } \
+ while (0)
+
+/* Error token number */
+#define YYTERROR 1
+#define YYERRCODE 256
+
+
+/* YYLLOC_DEFAULT -- Set CURRENT to span from RHS[1] to RHS[N].
+ If N is 0, then set CURRENT to the empty location which ends
+ the previous symbol: RHS[0] (always defined). */
+
+#ifndef YYLLOC_DEFAULT
+# define YYLLOC_DEFAULT(Current, Rhs, N) \
+ do \
+ if (N) \
+ { \
+ (Current).first_line = YYRHSLOC (Rhs, 1).first_line; \
+ (Current).first_column = YYRHSLOC (Rhs, 1).first_column; \
+ (Current).last_line = YYRHSLOC (Rhs, N).last_line; \
+ (Current).last_column = YYRHSLOC (Rhs, N).last_column; \
+ } \
+ else \
+ { \
+ (Current).first_line = (Current).last_line = \
+ YYRHSLOC (Rhs, 0).last_line; \
+ (Current).first_column = (Current).last_column = \
+ YYRHSLOC (Rhs, 0).last_column; \
+ } \
+ while (0)
+#endif
+
+#define YYRHSLOC(Rhs, K) ((Rhs)[K])
+
+
+/* Enable debugging if requested. */
+#if YYDEBUG
+
+# ifndef YYFPRINTF
+# include <stdio.h> /* INFRINGES ON USER NAME SPACE */
+# define YYFPRINTF fprintf
+# endif
+
+# define YYDPRINTF(Args) \
+do { \
+ if (yydebug) \
+ YYFPRINTF Args; \
+} while (0)
+
+
+/* YY_LOCATION_PRINT -- Print the location on the stream.
+ This macro was not mandated originally: define only if we know
+ we won't break user code: when these are the locations we know. */
+
+#ifndef YY_LOCATION_PRINT
+# if defined YYLTYPE_IS_TRIVIAL && YYLTYPE_IS_TRIVIAL
+
+/* Print *YYLOCP on YYO. Private, do not rely on its existence. */
+
+YY_ATTRIBUTE_UNUSED
+static int
+yy_location_print_ (FILE *yyo, YYLTYPE const * const yylocp)
+{
+ int res = 0;
+ int end_col = 0 != yylocp->last_column ? yylocp->last_column - 1 : 0;
+ if (0 <= yylocp->first_line)
+ {
+ res += YYFPRINTF (yyo, "%d", yylocp->first_line);
+ if (0 <= yylocp->first_column)
+ res += YYFPRINTF (yyo, ".%d", yylocp->first_column);
+ }
+ if (0 <= yylocp->last_line)
+ {
+ if (yylocp->first_line < yylocp->last_line)
+ {
+ res += YYFPRINTF (yyo, "-%d", yylocp->last_line);
+ if (0 <= end_col)
+ res += YYFPRINTF (yyo, ".%d", end_col);
+ }
+ else if (0 <= end_col && yylocp->first_column < end_col)
+ res += YYFPRINTF (yyo, "-%d", end_col);
+ }
+ return res;
+ }
+
+# define YY_LOCATION_PRINT(File, Loc) \
+ yy_location_print_ (File, &(Loc))
+
+# else
+# define YY_LOCATION_PRINT(File, Loc) ((void) 0)
+# endif
+#endif
+
+
+# define YY_SYMBOL_PRINT(Title, Type, Value, Location) \
+do { \
+ if (yydebug) \
+ { \
+ YYFPRINTF (stderr, "%s ", Title); \
+ yy_symbol_print (stderr, \
+ Type, Value, Location, state); \
+ YYFPRINTF (stderr, "\n"); \
+ } \
+} while (0)
+
+
+/*-----------------------------------.
+| Print this symbol's value on YYO. |
+`-----------------------------------*/
+
+static void
+yy_symbol_value_print (FILE *yyo, int yytype, YYSTYPE const * const yyvaluep, YYLTYPE const * const yylocationp, struct _mesa_glsl_parse_state *state)
+{
+ FILE *yyoutput = yyo;
+ YYUSE (yyoutput);
+ YYUSE (yylocationp);
+ YYUSE (state);
+ if (!yyvaluep)
+ return;
+# ifdef YYPRINT
+ if (yytype < YYNTOKENS)
+ YYPRINT (yyo, yytoknum[yytype], *yyvaluep);
+# endif
+ YY_IGNORE_MAYBE_UNINITIALIZED_BEGIN
+ YYUSE (yytype);
+ YY_IGNORE_MAYBE_UNINITIALIZED_END
+}
+
+
+/*---------------------------.
+| Print this symbol on YYO. |
+`---------------------------*/
+
+static void
+yy_symbol_print (FILE *yyo, int yytype, YYSTYPE const * const yyvaluep, YYLTYPE const * const yylocationp, struct _mesa_glsl_parse_state *state)
+{
+ YYFPRINTF (yyo, "%s %s (",
+ yytype < YYNTOKENS ? "token" : "nterm", yytname[yytype]);
+
+ YY_LOCATION_PRINT (yyo, *yylocationp);
+ YYFPRINTF (yyo, ": ");
+ yy_symbol_value_print (yyo, yytype, yyvaluep, yylocationp, state);
+ YYFPRINTF (yyo, ")");
+}
+
+/*------------------------------------------------------------------.
+| yy_stack_print -- Print the state stack from its BOTTOM up to its |
+| TOP (included). |
+`------------------------------------------------------------------*/
+
+static void
+yy_stack_print (yy_state_t *yybottom, yy_state_t *yytop)
+{
+ YYFPRINTF (stderr, "Stack now");
+ for (; yybottom <= yytop; yybottom++)
+ {
+ int yybot = *yybottom;
+ YYFPRINTF (stderr, " %d", yybot);
+ }
+ YYFPRINTF (stderr, "\n");
+}
+
+# define YY_STACK_PRINT(Bottom, Top) \
+do { \
+ if (yydebug) \
+ yy_stack_print ((Bottom), (Top)); \
+} while (0)
+
+
+/*------------------------------------------------.
+| Report that the YYRULE is going to be reduced. |
+`------------------------------------------------*/
+
+static void
+yy_reduce_print (yy_state_t *yyssp, YYSTYPE *yyvsp, YYLTYPE *yylsp, int yyrule, struct _mesa_glsl_parse_state *state)
+{
+ int yylno = yyrline[yyrule];
+ int yynrhs = yyr2[yyrule];
+ int yyi;
+ YYFPRINTF (stderr, "Reducing stack by rule %d (line %d):\n",
+ yyrule - 1, yylno);
+ /* The symbols being reduced. */
+ for (yyi = 0; yyi < yynrhs; yyi++)
+ {
+ YYFPRINTF (stderr, " $%d = ", yyi + 1);
+ yy_symbol_print (stderr,
+ yystos[yyssp[yyi + 1 - yynrhs]],
+ &yyvsp[(yyi + 1) - (yynrhs)]
+ , &(yylsp[(yyi + 1) - (yynrhs)]) , state);
+ YYFPRINTF (stderr, "\n");
+ }
+}
+
+# define YY_REDUCE_PRINT(Rule) \
+do { \
+ if (yydebug) \
+ yy_reduce_print (yyssp, yyvsp, yylsp, Rule, state); \
+} while (0)
+
+/* Nonzero means print parse trace. It is left uninitialized so that
+ multiple parsers can coexist. */
+int yydebug;
+#else /* !YYDEBUG */
+# define YYDPRINTF(Args)
+# define YY_SYMBOL_PRINT(Title, Type, Value, Location)
+# define YY_STACK_PRINT(Bottom, Top)
+# define YY_REDUCE_PRINT(Rule)
+#endif /* !YYDEBUG */
+
+
+/* YYINITDEPTH -- initial size of the parser's stacks. */
+#ifndef YYINITDEPTH
+# define YYINITDEPTH 200
+#endif
+
+/* YYMAXDEPTH -- maximum size the stacks can grow to (effective only
+ if the built-in stack extension method is used).
+
+ Do not make this value too large; the results are undefined if
+ YYSTACK_ALLOC_MAXIMUM < YYSTACK_BYTES (YYMAXDEPTH)
+ evaluated with infinite-precision integer arithmetic. */
+
+#ifndef YYMAXDEPTH
+# define YYMAXDEPTH 10000
+#endif
+
+
+#if YYERROR_VERBOSE
+
+# ifndef yystrlen
+# if defined __GLIBC__ && defined _STRING_H
+# define yystrlen(S) (YY_CAST (YYPTRDIFF_T, strlen (S)))
+# else
+/* Return the length of YYSTR. */
+static YYPTRDIFF_T
+yystrlen (const char *yystr)
+{
+ YYPTRDIFF_T yylen;
+ for (yylen = 0; yystr[yylen]; yylen++)
+ continue;
+ return yylen;
+}
+# endif
+# endif
+
+# ifndef yystpcpy
+# if defined __GLIBC__ && defined _STRING_H && defined _GNU_SOURCE
+# define yystpcpy stpcpy
+# else
+/* Copy YYSRC to YYDEST, returning the address of the terminating '\0' in
+ YYDEST. */
+static char *
+yystpcpy (char *yydest, const char *yysrc)
+{
+ char *yyd = yydest;
+ const char *yys = yysrc;
+
+ while ((*yyd++ = *yys++) != '\0')
+ continue;
+
+ return yyd - 1;
+}
+# endif
+# endif
+
+# ifndef yytnamerr
+/* Copy to YYRES the contents of YYSTR after stripping away unnecessary
+ quotes and backslashes, so that it's suitable for yyerror. The
+ heuristic is that double-quoting is unnecessary unless the string
+ contains an apostrophe, a comma, or backslash (other than
+ backslash-backslash). YYSTR is taken from yytname. If YYRES is
+ null, do not copy; instead, return the length of what the result
+ would have been. */
+static YYPTRDIFF_T
+yytnamerr (char *yyres, const char *yystr)
+{
+ if (*yystr == '"')
+ {
+ YYPTRDIFF_T yyn = 0;
+ char const *yyp = yystr;
+
+ for (;;)
+ switch (*++yyp)
+ {
+ case '\'':
+ case ',':
+ goto do_not_strip_quotes;
+
+ case '\\':
+ if (*++yyp != '\\')
+ goto do_not_strip_quotes;
+ else
+ goto append;
+
+ append:
+ default:
+ if (yyres)
+ yyres[yyn] = *yyp;
+ yyn++;
+ break;
+
+ case '"':
+ if (yyres)
+ yyres[yyn] = '\0';
+ return yyn;
+ }
+ do_not_strip_quotes: ;
+ }
+
+ if (yyres)
+ return yystpcpy (yyres, yystr) - yyres;
+ else
+ return yystrlen (yystr);
+}
+# endif
+
+/* Copy into *YYMSG, which is of size *YYMSG_ALLOC, an error message
+ about the unexpected token YYTOKEN for the state stack whose top is
+ YYSSP.
+
+ Return 0 if *YYMSG was successfully written. Return 1 if *YYMSG is
+ not large enough to hold the message. In that case, also set
+ *YYMSG_ALLOC to the required number of bytes. Return 2 if the
+ required number of bytes is too large to store. */
+static int
+yysyntax_error (YYPTRDIFF_T *yymsg_alloc, char **yymsg,
+ yy_state_t *yyssp, int yytoken)
+{
+ enum { YYERROR_VERBOSE_ARGS_MAXIMUM = 5 };
+ /* Internationalized format string. */
+ const char *yyformat = YY_NULLPTR;
+ /* Arguments of yyformat: reported tokens (one for the "unexpected",
+ one per "expected"). */
+ char const *yyarg[YYERROR_VERBOSE_ARGS_MAXIMUM];
+ /* Actual size of YYARG. */
+ int yycount = 0;
+ /* Cumulated lengths of YYARG. */
+ YYPTRDIFF_T yysize = 0;
+
+ /* There are many possibilities here to consider:
+ - If this state is a consistent state with a default action, then
+ the only way this function was invoked is if the default action
+ is an error action. In that case, don't check for expected
+ tokens because there are none.
+ - The only way there can be no lookahead present (in yychar) is if
+ this state is a consistent state with a default action. Thus,
+ detecting the absence of a lookahead is sufficient to determine
+ that there is no unexpected or expected token to report. In that
+ case, just report a simple "syntax error".
+ - Don't assume there isn't a lookahead just because this state is a
+ consistent state with a default action. There might have been a
+ previous inconsistent state, consistent state with a non-default
+ action, or user semantic action that manipulated yychar.
+ - Of course, the expected token list depends on states to have
+ correct lookahead information, and it depends on the parser not
+ to perform extra reductions after fetching a lookahead from the
+ scanner and before detecting a syntax error. Thus, state merging
+ (from LALR or IELR) and default reductions corrupt the expected
+ token list. However, the list is correct for canonical LR with
+ one exception: it will still contain any token that will not be
+ accepted due to an error action in a later state.
+ */
+ if (yytoken != YYEMPTY)
+ {
+ int yyn = yypact[*yyssp];
+ YYPTRDIFF_T yysize0 = yytnamerr (YY_NULLPTR, yytname[yytoken]);
+ yysize = yysize0;
+ yyarg[yycount++] = yytname[yytoken];
+ if (!yypact_value_is_default (yyn))
+ {
+ /* Start YYX at -YYN if negative to avoid negative indexes in
+ YYCHECK. In other words, skip the first -YYN actions for
+ this state because they are default actions. */
+ int yyxbegin = yyn < 0 ? -yyn : 0;
+ /* Stay within bounds of both yycheck and yytname. */
+ int yychecklim = YYLAST - yyn + 1;
+ int yyxend = yychecklim < YYNTOKENS ? yychecklim : YYNTOKENS;
+ int yyx;
+
+ for (yyx = yyxbegin; yyx < yyxend; ++yyx)
+ if (yycheck[yyx + yyn] == yyx && yyx != YYTERROR
+ && !yytable_value_is_error (yytable[yyx + yyn]))
+ {
+ if (yycount == YYERROR_VERBOSE_ARGS_MAXIMUM)
+ {
+ yycount = 1;
+ yysize = yysize0;
+ break;
+ }
+ yyarg[yycount++] = yytname[yyx];
+ {
+ YYPTRDIFF_T yysize1
+ = yysize + yytnamerr (YY_NULLPTR, yytname[yyx]);
+ if (yysize <= yysize1 && yysize1 <= YYSTACK_ALLOC_MAXIMUM)
+ yysize = yysize1;
+ else
+ return 2;
+ }
+ }
+ }
+ }
+
+ switch (yycount)
+ {
+# define YYCASE_(N, S) \
+ case N: \
+ yyformat = S; \
+ break
+ default: /* Avoid compiler warnings. */
+ YYCASE_(0, YY_("syntax error"));
+ YYCASE_(1, YY_("syntax error, unexpected %s"));
+ YYCASE_(2, YY_("syntax error, unexpected %s, expecting %s"));
+ YYCASE_(3, YY_("syntax error, unexpected %s, expecting %s or %s"));
+ YYCASE_(4, YY_("syntax error, unexpected %s, expecting %s or %s or %s"));
+ YYCASE_(5, YY_("syntax error, unexpected %s, expecting %s or %s or %s or %s"));
+# undef YYCASE_
+ }
+
+ {
+ /* Don't count the "%s"s in the final size, but reserve room for
+ the terminator. */
+ YYPTRDIFF_T yysize1 = yysize + (yystrlen (yyformat) - 2 * yycount) + 1;
+ if (yysize <= yysize1 && yysize1 <= YYSTACK_ALLOC_MAXIMUM)
+ yysize = yysize1;
+ else
+ return 2;
+ }
+
+ if (*yymsg_alloc < yysize)
+ {
+ *yymsg_alloc = 2 * yysize;
+ if (! (yysize <= *yymsg_alloc
+ && *yymsg_alloc <= YYSTACK_ALLOC_MAXIMUM))
+ *yymsg_alloc = YYSTACK_ALLOC_MAXIMUM;
+ return 1;
+ }
+
+ /* Avoid sprintf, as that infringes on the user's name space.
+ Don't have undefined behavior even if the translation
+ produced a string with the wrong number of "%s"s. */
+ {
+ char *yyp = *yymsg;
+ int yyi = 0;
+ while ((*yyp = *yyformat) != '\0')
+ if (*yyp == '%' && yyformat[1] == 's' && yyi < yycount)
+ {
+ yyp += yytnamerr (yyp, yyarg[yyi++]);
+ yyformat += 2;
+ }
+ else
+ {
+ ++yyp;
+ ++yyformat;
+ }
+ }
+ return 0;
+}
+#endif /* YYERROR_VERBOSE */
+
+/*-----------------------------------------------.
+| Release the memory associated to this symbol. |
+`-----------------------------------------------*/
+
+static void
+yydestruct (const char *yymsg, int yytype, YYSTYPE *yyvaluep, YYLTYPE *yylocationp, struct _mesa_glsl_parse_state *state)
+{
+ YYUSE (yyvaluep);
+ YYUSE (yylocationp);
+ YYUSE (state);
+ if (!yymsg)
+ yymsg = "Deleting";
+ YY_SYMBOL_PRINT (yymsg, yytype, yyvaluep, yylocationp);
+
+ YY_IGNORE_MAYBE_UNINITIALIZED_BEGIN
+ YYUSE (yytype);
+ YY_IGNORE_MAYBE_UNINITIALIZED_END
+}
+
+
+
+
+/*----------.
+| yyparse. |
+`----------*/
+
+int
+yyparse (struct _mesa_glsl_parse_state *state)
+{
+/* The lookahead symbol. */
+int yychar;
+
+
+/* The semantic value of the lookahead symbol. */
+/* Default value used for initialization, for pacifying older GCCs
+ or non-GCC compilers. */
+YY_INITIAL_VALUE (static YYSTYPE yyval_default;)
+YYSTYPE yylval YY_INITIAL_VALUE (= yyval_default);
+
+/* Location data for the lookahead symbol. */
+static YYLTYPE yyloc_default
+# if defined YYLTYPE_IS_TRIVIAL && YYLTYPE_IS_TRIVIAL
+ = { 1, 1, 1, 1 }
+# endif
+;
+YYLTYPE yylloc = yyloc_default;
+
+ /* Number of syntax errors so far. */
+ int yynerrs;
+
+ yy_state_fast_t yystate;
+ /* Number of tokens to shift before error messages enabled. */
+ int yyerrstatus;
+
+ /* The stacks and their tools:
+ 'yyss': related to states.
+ 'yyvs': related to semantic values.
+ 'yyls': related to locations.
+
+ Refer to the stacks through separate pointers, to allow yyoverflow
+ to reallocate them elsewhere. */
+
+ /* The state stack. */
+ yy_state_t yyssa[YYINITDEPTH];
+ yy_state_t *yyss;
+ yy_state_t *yyssp;
+
+ /* The semantic value stack. */
+ YYSTYPE yyvsa[YYINITDEPTH];
+ YYSTYPE *yyvs;
+ YYSTYPE *yyvsp;
+
+ /* The location stack. */
+ YYLTYPE yylsa[YYINITDEPTH];
+ YYLTYPE *yyls;
+ YYLTYPE *yylsp;
+
+ /* The locations where the error started and ended. */
+ YYLTYPE yyerror_range[3];
+
+ YYPTRDIFF_T yystacksize;
+
+ int yyn;
+ int yyresult;
+ /* Lookahead token as an internal (translated) token number. */
+ int yytoken = 0;
+ /* The variables used to return semantic value and location from the
+ action routines. */
+ YYSTYPE yyval;
+ YYLTYPE yyloc;
+
+#if YYERROR_VERBOSE
+ /* Buffer for error messages, and its allocated size. */
+ char yymsgbuf[128];
+ char *yymsg = yymsgbuf;
+ YYPTRDIFF_T yymsg_alloc = sizeof yymsgbuf;
+#endif
+
+#define YYPOPSTACK(N) (yyvsp -= (N), yyssp -= (N), yylsp -= (N))
+
+ /* The number of symbols on the RHS of the reduced rule.
+ Keep to zero when no symbol should be popped. */
+ int yylen = 0;
+
+ yyssp = yyss = yyssa;
+ yyvsp = yyvs = yyvsa;
+ yylsp = yyls = yylsa;
+ yystacksize = YYINITDEPTH;
+
+ YYDPRINTF ((stderr, "Starting parse\n"));
+
+ yystate = 0;
+ yyerrstatus = 0;
+ yynerrs = 0;
+ yychar = YYEMPTY; /* Cause a token to be read. */
+
+/* User initialization code. */
+#line 89 "src/compiler/glsl/glsl_parser.yy"
+{
+ yylloc.first_line = 1;
+ yylloc.first_column = 1;
+ yylloc.last_line = 1;
+ yylloc.last_column = 1;
+ yylloc.source = 0;
+ yylloc.path = NULL;
+}
+
+#line 2339 "src/compiler/glsl/glsl_parser.cpp"
+
+ yylsp[0] = yylloc;
+ goto yysetstate;
+
+
+/*------------------------------------------------------------.
+| yynewstate -- push a new state, which is found in yystate. |
+`------------------------------------------------------------*/
+yynewstate:
+ /* In all cases, when you get here, the value and location stacks
+ have just been pushed. So pushing a state here evens the stacks. */
+ yyssp++;
+
+
+/*--------------------------------------------------------------------.
+| yysetstate -- set current state (the top of the stack) to yystate. |
+`--------------------------------------------------------------------*/
+yysetstate:
+ YYDPRINTF ((stderr, "Entering state %d\n", yystate));
+ YY_ASSERT (0 <= yystate && yystate < YYNSTATES);
+ YY_IGNORE_USELESS_CAST_BEGIN
+ *yyssp = YY_CAST (yy_state_t, yystate);
+ YY_IGNORE_USELESS_CAST_END
+
+ if (yyss + yystacksize - 1 <= yyssp)
+#if !defined yyoverflow && !defined YYSTACK_RELOCATE
+ goto yyexhaustedlab;
+#else
+ {
+ /* Get the current used size of the three stacks, in elements. */
+ YYPTRDIFF_T yysize = yyssp - yyss + 1;
+
+# if defined yyoverflow
+ {
+ /* Give user a chance to reallocate the stack. Use copies of
+ these so that the &'s don't force the real ones into
+ memory. */
+ yy_state_t *yyss1 = yyss;
+ YYSTYPE *yyvs1 = yyvs;
+ YYLTYPE *yyls1 = yyls;
+
+ /* Each stack pointer address is followed by the size of the
+ data in use in that stack, in bytes. This used to be a
+ conditional around just the two extra args, but that might
+ be undefined if yyoverflow is a macro. */
+ yyoverflow (YY_("memory exhausted"),
+ &yyss1, yysize * YYSIZEOF (*yyssp),
+ &yyvs1, yysize * YYSIZEOF (*yyvsp),
+ &yyls1, yysize * YYSIZEOF (*yylsp),
+ &yystacksize);
+ yyss = yyss1;
+ yyvs = yyvs1;
+ yyls = yyls1;
+ }
+# else /* defined YYSTACK_RELOCATE */
+ /* Extend the stack our own way. */
+ if (YYMAXDEPTH <= yystacksize)
+ goto yyexhaustedlab;
+ yystacksize *= 2;
+ if (YYMAXDEPTH < yystacksize)
+ yystacksize = YYMAXDEPTH;
+
+ {
+ yy_state_t *yyss1 = yyss;
+ union yyalloc *yyptr =
+ YY_CAST (union yyalloc *,
+ YYSTACK_ALLOC (YY_CAST (YYSIZE_T, YYSTACK_BYTES (yystacksize))));
+ if (! yyptr)
+ goto yyexhaustedlab;
+ YYSTACK_RELOCATE (yyss_alloc, yyss);
+ YYSTACK_RELOCATE (yyvs_alloc, yyvs);
+ YYSTACK_RELOCATE (yyls_alloc, yyls);
+# undef YYSTACK_RELOCATE
+ if (yyss1 != yyssa)
+ YYSTACK_FREE (yyss1);
+ }
+# endif
+
+ yyssp = yyss + yysize - 1;
+ yyvsp = yyvs + yysize - 1;
+ yylsp = yyls + yysize - 1;
+
+ YY_IGNORE_USELESS_CAST_BEGIN
+ YYDPRINTF ((stderr, "Stack size increased to %ld\n",
+ YY_CAST (long, yystacksize)));
+ YY_IGNORE_USELESS_CAST_END
+
+ if (yyss + yystacksize - 1 <= yyssp)
+ YYABORT;
+ }
+#endif /* !defined yyoverflow && !defined YYSTACK_RELOCATE */
+
+ if (yystate == YYFINAL)
+ YYACCEPT;
+
+ goto yybackup;
+
+
+/*-----------.
+| yybackup. |
+`-----------*/
+yybackup:
+ /* Do appropriate processing given the current state. Read a
+ lookahead token if we need one and don't already have one. */
+
+ /* First try to decide what to do without reference to lookahead token. */
+ yyn = yypact[yystate];
+ if (yypact_value_is_default (yyn))
+ goto yydefault;
+
+ /* Not known => get a lookahead token if don't already have one. */
+
+ /* YYCHAR is either YYEMPTY or YYEOF or a valid lookahead symbol. */
+ if (yychar == YYEMPTY)
+ {
+ YYDPRINTF ((stderr, "Reading a token: "));
+ yychar = yylex (&yylval, &yylloc, state);
+ }
+
+ if (yychar <= YYEOF)
+ {
+ yychar = yytoken = YYEOF;
+ YYDPRINTF ((stderr, "Now at end of input.\n"));
+ }
+ else
+ {
+ yytoken = YYTRANSLATE (yychar);
+ YY_SYMBOL_PRINT ("Next token is", yytoken, &yylval, &yylloc);
+ }
+
+ /* If the proper action on seeing token YYTOKEN is to reduce or to
+ detect an error, take that action. */
+ yyn += yytoken;
+ if (yyn < 0 || YYLAST < yyn || yycheck[yyn] != yytoken)
+ goto yydefault;
+ yyn = yytable[yyn];
+ if (yyn <= 0)
+ {
+ if (yytable_value_is_error (yyn))
+ goto yyerrlab;
+ yyn = -yyn;
+ goto yyreduce;
+ }
+
+ /* Count tokens shifted since error; after three, turn off error
+ status. */
+ if (yyerrstatus)
+ yyerrstatus--;
+
+ /* Shift the lookahead token. */
+ YY_SYMBOL_PRINT ("Shifting", yytoken, &yylval, &yylloc);
+ yystate = yyn;
+ YY_IGNORE_MAYBE_UNINITIALIZED_BEGIN
+ *++yyvsp = yylval;
+ YY_IGNORE_MAYBE_UNINITIALIZED_END
+ *++yylsp = yylloc;
+
+ /* Discard the shifted token. */
+ yychar = YYEMPTY;
+ goto yynewstate;
+
+
+/*-----------------------------------------------------------.
+| yydefault -- do the default action for the current state. |
+`-----------------------------------------------------------*/
+yydefault:
+ yyn = yydefact[yystate];
+ if (yyn == 0)
+ goto yyerrlab;
+ goto yyreduce;
+
+
+/*-----------------------------.
+| yyreduce -- do a reduction. |
+`-----------------------------*/
+yyreduce:
+ /* yyn is the number of a rule to reduce with. */
+ yylen = yyr2[yyn];
+
+ /* If YYLEN is nonzero, implement the default value of the action:
+ '$$ = $1'.
+
+ Otherwise, the following line sets YYVAL to garbage.
+ This behavior is undocumented and Bison
+ users should not rely upon it. Assigning to YYVAL
+ unconditionally makes the parser a bit smaller, and it avoids a
+ GCC warning that YYVAL may be used uninitialized. */
+ yyval = yyvsp[1-yylen];
+
+ /* Default location. */
+ YYLLOC_DEFAULT (yyloc, (yylsp - yylen), yylen);
+ yyerror_range[1] = yyloc;
+ YY_REDUCE_PRINT (yyn);
+ switch (yyn)
+ {
+ case 2:
+#line 295 "src/compiler/glsl/glsl_parser.yy"
+ {
+ _mesa_glsl_initialize_types(state);
+ }
+#line 2540 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 3:
+#line 299 "src/compiler/glsl/glsl_parser.yy"
+ {
+ delete state->symbols;
+ state->symbols = new(ralloc_parent(state)) glsl_symbol_table;
+ if (state->es_shader) {
+ if (state->stage == MESA_SHADER_FRAGMENT) {
+ state->symbols->add_default_precision_qualifier("int", ast_precision_medium);
+ } else {
+ state->symbols->add_default_precision_qualifier("float", ast_precision_high);
+ state->symbols->add_default_precision_qualifier("int", ast_precision_high);
+ }
+ state->symbols->add_default_precision_qualifier("sampler2D", ast_precision_low);
+ state->symbols->add_default_precision_qualifier("samplerExternalOES", ast_precision_low);
+ state->symbols->add_default_precision_qualifier("samplerCube", ast_precision_low);
+ state->symbols->add_default_precision_qualifier("atomic_uint", ast_precision_high);
+ }
+ _mesa_glsl_initialize_types(state);
+ }
+#line 2562 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 5:
+#line 321 "src/compiler/glsl/glsl_parser.yy"
+ {
+ state->process_version_directive(&(yylsp[-1]), (yyvsp[-1].n), NULL);
+ if (state->error) {
+ YYERROR;
+ }
+ }
+#line 2573 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 6:
+#line 328 "src/compiler/glsl/glsl_parser.yy"
+ {
+ state->process_version_directive(&(yylsp[-2]), (yyvsp[-2].n), (yyvsp[-1].identifier));
+ if (state->error) {
+ YYERROR;
+ }
+ }
+#line 2584 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 7:
+#line 337 "src/compiler/glsl/glsl_parser.yy"
+ { (yyval.node) = NULL; }
+#line 2590 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 8:
+#line 338 "src/compiler/glsl/glsl_parser.yy"
+ { (yyval.node) = NULL; }
+#line 2596 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 9:
+#line 339 "src/compiler/glsl/glsl_parser.yy"
+ { (yyval.node) = NULL; }
+#line 2602 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 10:
+#line 340 "src/compiler/glsl/glsl_parser.yy"
+ { (yyval.node) = NULL; }
+#line 2608 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 11:
+#line 342 "src/compiler/glsl/glsl_parser.yy"
+ {
+ /* Pragma invariant(all) cannot be used in a fragment shader.
+ *
+ * Page 27 of the GLSL 1.20 spec, Page 53 of the GLSL ES 3.00 spec:
+ *
+ * "It is an error to use this pragma in a fragment shader."
+ */
+ if (state->is_version(120, 300) &&
+ state->stage == MESA_SHADER_FRAGMENT) {
+ _mesa_glsl_error(& (yylsp[-1]), state,
+ "pragma `invariant(all)' cannot be used "
+ "in a fragment shader.");
+ } else if (!state->is_version(120, 100)) {
+ _mesa_glsl_warning(& (yylsp[-1]), state,
+ "pragma `invariant(all)' not supported in %s "
+ "(GLSL ES 1.00 or GLSL 1.20 required)",
+ state->get_version_string());
+ } else {
+ state->all_invariant = true;
+ }
+
+ (yyval.node) = NULL;
+ }
+#line 2636 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 12:
+#line 366 "src/compiler/glsl/glsl_parser.yy"
+ {
+ void *mem_ctx = state->linalloc;
+ (yyval.node) = new(mem_ctx) ast_warnings_toggle(true);
+ }
+#line 2645 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 13:
+#line 371 "src/compiler/glsl/glsl_parser.yy"
+ {
+ void *mem_ctx = state->linalloc;
+ (yyval.node) = new(mem_ctx) ast_warnings_toggle(false);
+ }
+#line 2654 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 19:
+#line 390 "src/compiler/glsl/glsl_parser.yy"
+ {
+ if (!_mesa_glsl_process_extension((yyvsp[-3].identifier), & (yylsp[-3]), (yyvsp[-1].identifier), & (yylsp[-1]), state)) {
+ YYERROR;
+ }
+ }
+#line 2664 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 20:
+#line 399 "src/compiler/glsl/glsl_parser.yy"
+ {
+ /* FINISHME: The NULL test is required because pragmas are set to
+ * FINISHME: NULL. (See production rule for external_declaration.)
+ */
+ if ((yyvsp[0].node) != NULL)
+ state->translation_unit.push_tail(& (yyvsp[0].node)->link);
+ }
+#line 2676 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 21:
+#line 407 "src/compiler/glsl/glsl_parser.yy"
+ {
+ /* FINISHME: The NULL test is required because pragmas are set to
+ * FINISHME: NULL. (See production rule for external_declaration.)
+ */
+ if ((yyvsp[0].node) != NULL)
+ state->translation_unit.push_tail(& (yyvsp[0].node)->link);
+ }
+#line 2688 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 22:
+#line 414 "src/compiler/glsl/glsl_parser.yy"
+ {
+ if (!state->allow_extension_directive_midshader) {
+ _mesa_glsl_error(& (yylsp[0]), state,
+ "#extension directive is not allowed "
+ "in the middle of a shader");
+ YYERROR;
+ }
+ }
+#line 2701 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 25:
+#line 431 "src/compiler/glsl/glsl_parser.yy"
+ {
+ void *ctx = state->linalloc;
+ (yyval.expression) = new(ctx) ast_expression(ast_identifier, NULL, NULL, NULL);
+ (yyval.expression)->set_location((yylsp[0]));
+ (yyval.expression)->primary_expression.identifier = (yyvsp[0].identifier);
+ }
+#line 2712 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 26:
+#line 438 "src/compiler/glsl/glsl_parser.yy"
+ {
+ void *ctx = state->linalloc;
+ (yyval.expression) = new(ctx) ast_expression(ast_int_constant, NULL, NULL, NULL);
+ (yyval.expression)->set_location((yylsp[0]));
+ (yyval.expression)->primary_expression.int_constant = (yyvsp[0].n);
+ }
+#line 2723 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 27:
+#line 445 "src/compiler/glsl/glsl_parser.yy"
+ {
+ void *ctx = state->linalloc;
+ (yyval.expression) = new(ctx) ast_expression(ast_uint_constant, NULL, NULL, NULL);
+ (yyval.expression)->set_location((yylsp[0]));
+ (yyval.expression)->primary_expression.uint_constant = (yyvsp[0].n);
+ }
+#line 2734 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 28:
+#line 452 "src/compiler/glsl/glsl_parser.yy"
+ {
+ void *ctx = state->linalloc;
+ (yyval.expression) = new(ctx) ast_expression(ast_int64_constant, NULL, NULL, NULL);
+ (yyval.expression)->set_location((yylsp[0]));
+ (yyval.expression)->primary_expression.int64_constant = (yyvsp[0].n64);
+ }
+#line 2745 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 29:
+#line 459 "src/compiler/glsl/glsl_parser.yy"
+ {
+ void *ctx = state->linalloc;
+ (yyval.expression) = new(ctx) ast_expression(ast_uint64_constant, NULL, NULL, NULL);
+ (yyval.expression)->set_location((yylsp[0]));
+ (yyval.expression)->primary_expression.uint64_constant = (yyvsp[0].n64);
+ }
+#line 2756 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 30:
+#line 466 "src/compiler/glsl/glsl_parser.yy"
+ {
+ void *ctx = state->linalloc;
+ (yyval.expression) = new(ctx) ast_expression(ast_float_constant, NULL, NULL, NULL);
+ (yyval.expression)->set_location((yylsp[0]));
+ (yyval.expression)->primary_expression.float_constant = (yyvsp[0].real);
+ }
+#line 2767 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 31:
+#line 473 "src/compiler/glsl/glsl_parser.yy"
+ {
+ void *ctx = state->linalloc;
+ (yyval.expression) = new(ctx) ast_expression(ast_double_constant, NULL, NULL, NULL);
+ (yyval.expression)->set_location((yylsp[0]));
+ (yyval.expression)->primary_expression.double_constant = (yyvsp[0].dreal);
+ }
+#line 2778 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 32:
+#line 480 "src/compiler/glsl/glsl_parser.yy"
+ {
+ void *ctx = state->linalloc;
+ (yyval.expression) = new(ctx) ast_expression(ast_bool_constant, NULL, NULL, NULL);
+ (yyval.expression)->set_location((yylsp[0]));
+ (yyval.expression)->primary_expression.bool_constant = (yyvsp[0].n);
+ }
+#line 2789 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 33:
+#line 487 "src/compiler/glsl/glsl_parser.yy"
+ {
+ (yyval.expression) = (yyvsp[-1].expression);
+ }
+#line 2797 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 35:
+#line 495 "src/compiler/glsl/glsl_parser.yy"
+ {
+ void *ctx = state->linalloc;
+ (yyval.expression) = new(ctx) ast_expression(ast_array_index, (yyvsp[-3].expression), (yyvsp[-1].expression), NULL);
+ (yyval.expression)->set_location_range((yylsp[-3]), (yylsp[0]));
+ }
+#line 2807 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 36:
+#line 501 "src/compiler/glsl/glsl_parser.yy"
+ {
+ (yyval.expression) = (yyvsp[0].expression);
+ }
+#line 2815 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 37:
+#line 505 "src/compiler/glsl/glsl_parser.yy"
+ {
+ void *ctx = state->linalloc;
+ (yyval.expression) = new(ctx) ast_expression(ast_field_selection, (yyvsp[-2].expression), NULL, NULL);
+ (yyval.expression)->set_location_range((yylsp[-2]), (yylsp[0]));
+ (yyval.expression)->primary_expression.identifier = (yyvsp[0].identifier);
+ }
+#line 2826 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 38:
+#line 512 "src/compiler/glsl/glsl_parser.yy"
+ {
+ void *ctx = state->linalloc;
+ (yyval.expression) = new(ctx) ast_expression(ast_post_inc, (yyvsp[-1].expression), NULL, NULL);
+ (yyval.expression)->set_location_range((yylsp[-1]), (yylsp[0]));
+ }
+#line 2836 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 39:
+#line 518 "src/compiler/glsl/glsl_parser.yy"
+ {
+ void *ctx = state->linalloc;
+ (yyval.expression) = new(ctx) ast_expression(ast_post_dec, (yyvsp[-1].expression), NULL, NULL);
+ (yyval.expression)->set_location_range((yylsp[-1]), (yylsp[0]));
+ }
+#line 2846 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 47:
+#line 549 "src/compiler/glsl/glsl_parser.yy"
+ {
+ (yyval.expression) = (yyvsp[-1].expression);
+ (yyval.expression)->set_location((yylsp[-1]));
+ (yyval.expression)->expressions.push_tail(& (yyvsp[0].expression)->link);
+ }
+#line 2856 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 48:
+#line 555 "src/compiler/glsl/glsl_parser.yy"
+ {
+ (yyval.expression) = (yyvsp[-2].expression);
+ (yyval.expression)->set_location((yylsp[-2]));
+ (yyval.expression)->expressions.push_tail(& (yyvsp[0].expression)->link);
+ }
+#line 2866 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 50:
+#line 571 "src/compiler/glsl/glsl_parser.yy"
+ {
+ void *ctx = state->linalloc;
+ (yyval.expression) = new(ctx) ast_function_expression((yyvsp[0].type_specifier));
+ (yyval.expression)->set_location((yylsp[0]));
+ }
+#line 2876 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 51:
+#line 577 "src/compiler/glsl/glsl_parser.yy"
+ {
+ void *ctx = state->linalloc;
+ (yyval.expression) = new(ctx) ast_function_expression((yyvsp[0].expression));
+ (yyval.expression)->set_location((yylsp[0]));
+ }
+#line 2886 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 53:
+#line 592 "src/compiler/glsl/glsl_parser.yy"
+ {
+ void *ctx = state->linalloc;
+ (yyval.expression) = new(ctx) ast_expression(ast_pre_inc, (yyvsp[0].expression), NULL, NULL);
+ (yyval.expression)->set_location((yylsp[-1]));
+ }
+#line 2896 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 54:
+#line 598 "src/compiler/glsl/glsl_parser.yy"
+ {
+ void *ctx = state->linalloc;
+ (yyval.expression) = new(ctx) ast_expression(ast_pre_dec, (yyvsp[0].expression), NULL, NULL);
+ (yyval.expression)->set_location((yylsp[-1]));
+ }
+#line 2906 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 55:
+#line 604 "src/compiler/glsl/glsl_parser.yy"
+ {
+ void *ctx = state->linalloc;
+ (yyval.expression) = new(ctx) ast_expression((yyvsp[-1].n), (yyvsp[0].expression), NULL, NULL);
+ (yyval.expression)->set_location_range((yylsp[-1]), (yylsp[0]));
+ }
+#line 2916 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 56:
+#line 613 "src/compiler/glsl/glsl_parser.yy"
+ { (yyval.n) = ast_plus; }
+#line 2922 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 57:
+#line 614 "src/compiler/glsl/glsl_parser.yy"
+ { (yyval.n) = ast_neg; }
+#line 2928 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 58:
+#line 615 "src/compiler/glsl/glsl_parser.yy"
+ { (yyval.n) = ast_logic_not; }
+#line 2934 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 59:
+#line 616 "src/compiler/glsl/glsl_parser.yy"
+ { (yyval.n) = ast_bit_not; }
+#line 2940 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 61:
+#line 622 "src/compiler/glsl/glsl_parser.yy"
+ {
+ void *ctx = state->linalloc;
+ (yyval.expression) = new(ctx) ast_expression_bin(ast_mul, (yyvsp[-2].expression), (yyvsp[0].expression));
+ (yyval.expression)->set_location_range((yylsp[-2]), (yylsp[0]));
+ }
+#line 2950 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 62:
+#line 628 "src/compiler/glsl/glsl_parser.yy"
+ {
+ void *ctx = state->linalloc;
+ (yyval.expression) = new(ctx) ast_expression_bin(ast_div, (yyvsp[-2].expression), (yyvsp[0].expression));
+ (yyval.expression)->set_location_range((yylsp[-2]), (yylsp[0]));
+ }
+#line 2960 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 63:
+#line 634 "src/compiler/glsl/glsl_parser.yy"
+ {
+ void *ctx = state->linalloc;
+ (yyval.expression) = new(ctx) ast_expression_bin(ast_mod, (yyvsp[-2].expression), (yyvsp[0].expression));
+ (yyval.expression)->set_location_range((yylsp[-2]), (yylsp[0]));
+ }
+#line 2970 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 65:
+#line 644 "src/compiler/glsl/glsl_parser.yy"
+ {
+ void *ctx = state->linalloc;
+ (yyval.expression) = new(ctx) ast_expression_bin(ast_add, (yyvsp[-2].expression), (yyvsp[0].expression));
+ (yyval.expression)->set_location_range((yylsp[-2]), (yylsp[0]));
+ }
+#line 2980 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 66:
+#line 650 "src/compiler/glsl/glsl_parser.yy"
+ {
+ void *ctx = state->linalloc;
+ (yyval.expression) = new(ctx) ast_expression_bin(ast_sub, (yyvsp[-2].expression), (yyvsp[0].expression));
+ (yyval.expression)->set_location_range((yylsp[-2]), (yylsp[0]));
+ }
+#line 2990 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 68:
+#line 660 "src/compiler/glsl/glsl_parser.yy"
+ {
+ void *ctx = state->linalloc;
+ (yyval.expression) = new(ctx) ast_expression_bin(ast_lshift, (yyvsp[-2].expression), (yyvsp[0].expression));
+ (yyval.expression)->set_location_range((yylsp[-2]), (yylsp[0]));
+ }
+#line 3000 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 69:
+#line 666 "src/compiler/glsl/glsl_parser.yy"
+ {
+ void *ctx = state->linalloc;
+ (yyval.expression) = new(ctx) ast_expression_bin(ast_rshift, (yyvsp[-2].expression), (yyvsp[0].expression));
+ (yyval.expression)->set_location_range((yylsp[-2]), (yylsp[0]));
+ }
+#line 3010 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 71:
+#line 676 "src/compiler/glsl/glsl_parser.yy"
+ {
+ void *ctx = state->linalloc;
+ (yyval.expression) = new(ctx) ast_expression_bin(ast_less, (yyvsp[-2].expression), (yyvsp[0].expression));
+ (yyval.expression)->set_location_range((yylsp[-2]), (yylsp[0]));
+ }
+#line 3020 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 72:
+#line 682 "src/compiler/glsl/glsl_parser.yy"
+ {
+ void *ctx = state->linalloc;
+ (yyval.expression) = new(ctx) ast_expression_bin(ast_greater, (yyvsp[-2].expression), (yyvsp[0].expression));
+ (yyval.expression)->set_location_range((yylsp[-2]), (yylsp[0]));
+ }
+#line 3030 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 73:
+#line 688 "src/compiler/glsl/glsl_parser.yy"
+ {
+ void *ctx = state->linalloc;
+ (yyval.expression) = new(ctx) ast_expression_bin(ast_lequal, (yyvsp[-2].expression), (yyvsp[0].expression));
+ (yyval.expression)->set_location_range((yylsp[-2]), (yylsp[0]));
+ }
+#line 3040 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 74:
+#line 694 "src/compiler/glsl/glsl_parser.yy"
+ {
+ void *ctx = state->linalloc;
+ (yyval.expression) = new(ctx) ast_expression_bin(ast_gequal, (yyvsp[-2].expression), (yyvsp[0].expression));
+ (yyval.expression)->set_location_range((yylsp[-2]), (yylsp[0]));
+ }
+#line 3050 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 76:
+#line 704 "src/compiler/glsl/glsl_parser.yy"
+ {
+ void *ctx = state->linalloc;
+ (yyval.expression) = new(ctx) ast_expression_bin(ast_equal, (yyvsp[-2].expression), (yyvsp[0].expression));
+ (yyval.expression)->set_location_range((yylsp[-2]), (yylsp[0]));
+ }
+#line 3060 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 77:
+#line 710 "src/compiler/glsl/glsl_parser.yy"
+ {
+ void *ctx = state->linalloc;
+ (yyval.expression) = new(ctx) ast_expression_bin(ast_nequal, (yyvsp[-2].expression), (yyvsp[0].expression));
+ (yyval.expression)->set_location_range((yylsp[-2]), (yylsp[0]));
+ }
+#line 3070 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 79:
+#line 720 "src/compiler/glsl/glsl_parser.yy"
+ {
+ void *ctx = state->linalloc;
+ (yyval.expression) = new(ctx) ast_expression_bin(ast_bit_and, (yyvsp[-2].expression), (yyvsp[0].expression));
+ (yyval.expression)->set_location_range((yylsp[-2]), (yylsp[0]));
+ }
+#line 3080 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 81:
+#line 730 "src/compiler/glsl/glsl_parser.yy"
+ {
+ void *ctx = state->linalloc;
+ (yyval.expression) = new(ctx) ast_expression_bin(ast_bit_xor, (yyvsp[-2].expression), (yyvsp[0].expression));
+ (yyval.expression)->set_location_range((yylsp[-2]), (yylsp[0]));
+ }
+#line 3090 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 83:
+#line 740 "src/compiler/glsl/glsl_parser.yy"
+ {
+ void *ctx = state->linalloc;
+ (yyval.expression) = new(ctx) ast_expression_bin(ast_bit_or, (yyvsp[-2].expression), (yyvsp[0].expression));
+ (yyval.expression)->set_location_range((yylsp[-2]), (yylsp[0]));
+ }
+#line 3100 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 85:
+#line 750 "src/compiler/glsl/glsl_parser.yy"
+ {
+ void *ctx = state->linalloc;
+ (yyval.expression) = new(ctx) ast_expression_bin(ast_logic_and, (yyvsp[-2].expression), (yyvsp[0].expression));
+ (yyval.expression)->set_location_range((yylsp[-2]), (yylsp[0]));
+ }
+#line 3110 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 87:
+#line 760 "src/compiler/glsl/glsl_parser.yy"
+ {
+ void *ctx = state->linalloc;
+ (yyval.expression) = new(ctx) ast_expression_bin(ast_logic_xor, (yyvsp[-2].expression), (yyvsp[0].expression));
+ (yyval.expression)->set_location_range((yylsp[-2]), (yylsp[0]));
+ }
+#line 3120 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 89:
+#line 770 "src/compiler/glsl/glsl_parser.yy"
+ {
+ void *ctx = state->linalloc;
+ (yyval.expression) = new(ctx) ast_expression_bin(ast_logic_or, (yyvsp[-2].expression), (yyvsp[0].expression));
+ (yyval.expression)->set_location_range((yylsp[-2]), (yylsp[0]));
+ }
+#line 3130 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 91:
+#line 780 "src/compiler/glsl/glsl_parser.yy"
+ {
+ void *ctx = state->linalloc;
+ (yyval.expression) = new(ctx) ast_expression(ast_conditional, (yyvsp[-4].expression), (yyvsp[-2].expression), (yyvsp[0].expression));
+ (yyval.expression)->set_location_range((yylsp[-4]), (yylsp[0]));
+ }
+#line 3140 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 93:
+#line 790 "src/compiler/glsl/glsl_parser.yy"
+ {
+ void *ctx = state->linalloc;
+ (yyval.expression) = new(ctx) ast_expression((yyvsp[-1].n), (yyvsp[-2].expression), (yyvsp[0].expression), NULL);
+ (yyval.expression)->set_location_range((yylsp[-2]), (yylsp[0]));
+ }
+#line 3150 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 94:
+#line 798 "src/compiler/glsl/glsl_parser.yy"
+ { (yyval.n) = ast_assign; }
+#line 3156 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 95:
+#line 799 "src/compiler/glsl/glsl_parser.yy"
+ { (yyval.n) = ast_mul_assign; }
+#line 3162 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 96:
+#line 800 "src/compiler/glsl/glsl_parser.yy"
+ { (yyval.n) = ast_div_assign; }
+#line 3168 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 97:
+#line 801 "src/compiler/glsl/glsl_parser.yy"
+ { (yyval.n) = ast_mod_assign; }
+#line 3174 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 98:
+#line 802 "src/compiler/glsl/glsl_parser.yy"
+ { (yyval.n) = ast_add_assign; }
+#line 3180 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 99:
+#line 803 "src/compiler/glsl/glsl_parser.yy"
+ { (yyval.n) = ast_sub_assign; }
+#line 3186 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 100:
+#line 804 "src/compiler/glsl/glsl_parser.yy"
+ { (yyval.n) = ast_ls_assign; }
+#line 3192 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 101:
+#line 805 "src/compiler/glsl/glsl_parser.yy"
+ { (yyval.n) = ast_rs_assign; }
+#line 3198 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 102:
+#line 806 "src/compiler/glsl/glsl_parser.yy"
+ { (yyval.n) = ast_and_assign; }
+#line 3204 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 103:
+#line 807 "src/compiler/glsl/glsl_parser.yy"
+ { (yyval.n) = ast_xor_assign; }
+#line 3210 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 104:
+#line 808 "src/compiler/glsl/glsl_parser.yy"
+ { (yyval.n) = ast_or_assign; }
+#line 3216 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 105:
+#line 813 "src/compiler/glsl/glsl_parser.yy"
+ {
+ (yyval.expression) = (yyvsp[0].expression);
+ }
+#line 3224 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 106:
+#line 817 "src/compiler/glsl/glsl_parser.yy"
+ {
+ void *ctx = state->linalloc;
+ if ((yyvsp[-2].expression)->oper != ast_sequence) {
+ (yyval.expression) = new(ctx) ast_expression(ast_sequence, NULL, NULL, NULL);
+ (yyval.expression)->set_location_range((yylsp[-2]), (yylsp[0]));
+ (yyval.expression)->expressions.push_tail(& (yyvsp[-2].expression)->link);
+ } else {
+ (yyval.expression) = (yyvsp[-2].expression);
+ }
+
+ (yyval.expression)->expressions.push_tail(& (yyvsp[0].expression)->link);
+ }
+#line 3241 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 108:
+#line 837 "src/compiler/glsl/glsl_parser.yy"
+ {
+ state->symbols->pop_scope();
+ (yyval.node) = (yyvsp[-1].function);
+ }
+#line 3250 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 109:
+#line 842 "src/compiler/glsl/glsl_parser.yy"
+ {
+ (yyval.node) = (yyvsp[-1].declarator_list);
+ }
+#line 3258 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 110:
+#line 846 "src/compiler/glsl/glsl_parser.yy"
+ {
+ (yyvsp[-1].type_specifier)->default_precision = (yyvsp[-2].n);
+ (yyval.node) = (yyvsp[-1].type_specifier);
+ }
+#line 3267 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 111:
+#line 851 "src/compiler/glsl/glsl_parser.yy"
+ {
+ ast_interface_block *block = (ast_interface_block *) (yyvsp[0].node);
+ if (block->layout.has_layout() || block->layout.has_memory()) {
+ if (!block->default_layout.merge_qualifier(& (yylsp[0]), state, block->layout, false)) {
+ YYERROR;
+ }
+ }
+ block->layout = block->default_layout;
+ if (!block->layout.push_to_global(& (yylsp[0]), state)) {
+ YYERROR;
+ }
+ (yyval.node) = (yyvsp[0].node);
+ }
+#line 3285 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 115:
+#line 877 "src/compiler/glsl/glsl_parser.yy"
+ {
+ (yyval.function) = (yyvsp[-1].function);
+ (yyval.function)->parameters.push_tail(& (yyvsp[0].parameter_declarator)->link);
+ }
+#line 3294 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 116:
+#line 882 "src/compiler/glsl/glsl_parser.yy"
+ {
+ (yyval.function) = (yyvsp[-2].function);
+ (yyval.function)->parameters.push_tail(& (yyvsp[0].parameter_declarator)->link);
+ }
+#line 3303 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 117:
+#line 890 "src/compiler/glsl/glsl_parser.yy"
+ {
+ void *ctx = state->linalloc;
+ (yyval.function) = new(ctx) ast_function();
+ (yyval.function)->set_location((yylsp[-1]));
+ (yyval.function)->return_type = (yyvsp[-2].fully_specified_type);
+ (yyval.function)->identifier = (yyvsp[-1].identifier);
+
+ if ((yyvsp[-2].fully_specified_type)->qualifier.is_subroutine_decl()) {
+ /* add type for IDENTIFIER search */
+ state->symbols->add_type((yyvsp[-1].identifier), glsl_type::get_subroutine_instance((yyvsp[-1].identifier)));
+ } else
+ state->symbols->add_function(new(state) ir_function((yyvsp[-1].identifier)));
+ state->symbols->push_scope();
+ }
+#line 3322 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 118:
+#line 908 "src/compiler/glsl/glsl_parser.yy"
+ {
+ void *ctx = state->linalloc;
+ (yyval.parameter_declarator) = new(ctx) ast_parameter_declarator();
+ (yyval.parameter_declarator)->set_location_range((yylsp[-1]), (yylsp[0]));
+ (yyval.parameter_declarator)->type = new(ctx) ast_fully_specified_type();
+ (yyval.parameter_declarator)->type->set_location((yylsp[-1]));
+ (yyval.parameter_declarator)->type->specifier = (yyvsp[-1].type_specifier);
+ (yyval.parameter_declarator)->identifier = (yyvsp[0].identifier);
+ state->symbols->add_variable(new(state) ir_variable(NULL, (yyvsp[0].identifier), ir_var_auto));
+ }
+#line 3337 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 119:
+#line 919 "src/compiler/glsl/glsl_parser.yy"
+ {
+ if (state->allow_layout_qualifier_on_function_parameter) {
+ void *ctx = state->linalloc;
+ (yyval.parameter_declarator) = new(ctx) ast_parameter_declarator();
+ (yyval.parameter_declarator)->set_location_range((yylsp[-1]), (yylsp[0]));
+ (yyval.parameter_declarator)->type = new(ctx) ast_fully_specified_type();
+ (yyval.parameter_declarator)->type->set_location((yylsp[-1]));
+ (yyval.parameter_declarator)->type->specifier = (yyvsp[-1].type_specifier);
+ (yyval.parameter_declarator)->identifier = (yyvsp[0].identifier);
+ state->symbols->add_variable(new(state) ir_variable(NULL, (yyvsp[0].identifier), ir_var_auto));
+ } else {
+ _mesa_glsl_error(&(yylsp[-2]), state,
+ "is is not allowed on function parameter");
+ YYERROR;
+ }
+ }
+#line 3358 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 120:
+#line 936 "src/compiler/glsl/glsl_parser.yy"
+ {
+ void *ctx = state->linalloc;
+ (yyval.parameter_declarator) = new(ctx) ast_parameter_declarator();
+ (yyval.parameter_declarator)->set_location_range((yylsp[-2]), (yylsp[0]));
+ (yyval.parameter_declarator)->type = new(ctx) ast_fully_specified_type();
+ (yyval.parameter_declarator)->type->set_location((yylsp[-2]));
+ (yyval.parameter_declarator)->type->specifier = (yyvsp[-2].type_specifier);
+ (yyval.parameter_declarator)->identifier = (yyvsp[-1].identifier);
+ (yyval.parameter_declarator)->array_specifier = (yyvsp[0].array_specifier);
+ state->symbols->add_variable(new(state) ir_variable(NULL, (yyvsp[-1].identifier), ir_var_auto));
+ }
+#line 3374 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 121:
+#line 951 "src/compiler/glsl/glsl_parser.yy"
+ {
+ (yyval.parameter_declarator) = (yyvsp[0].parameter_declarator);
+ (yyval.parameter_declarator)->type->qualifier = (yyvsp[-1].type_qualifier);
+ if (!(yyval.parameter_declarator)->type->qualifier.push_to_global(& (yylsp[-1]), state)) {
+ YYERROR;
+ }
+ }
+#line 3386 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 122:
+#line 959 "src/compiler/glsl/glsl_parser.yy"
+ {
+ void *ctx = state->linalloc;
+ (yyval.parameter_declarator) = new(ctx) ast_parameter_declarator();
+ (yyval.parameter_declarator)->set_location((yylsp[0]));
+ (yyval.parameter_declarator)->type = new(ctx) ast_fully_specified_type();
+ (yyval.parameter_declarator)->type->set_location_range((yylsp[-1]), (yylsp[0]));
+ (yyval.parameter_declarator)->type->qualifier = (yyvsp[-1].type_qualifier);
+ if (!(yyval.parameter_declarator)->type->qualifier.push_to_global(& (yylsp[-1]), state)) {
+ YYERROR;
+ }
+ (yyval.parameter_declarator)->type->specifier = (yyvsp[0].type_specifier);
+ }
+#line 3403 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 123:
+#line 975 "src/compiler/glsl/glsl_parser.yy"
+ {
+ memset(& (yyval.type_qualifier), 0, sizeof((yyval.type_qualifier)));
+ }
+#line 3411 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 124:
+#line 979 "src/compiler/glsl/glsl_parser.yy"
+ {
+ if ((yyvsp[0].type_qualifier).flags.q.constant)
+ _mesa_glsl_error(&(yylsp[-1]), state, "duplicate const qualifier");
+
+ (yyval.type_qualifier) = (yyvsp[0].type_qualifier);
+ (yyval.type_qualifier).flags.q.constant = 1;
+ }
+#line 3423 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 125:
+#line 987 "src/compiler/glsl/glsl_parser.yy"
+ {
+ if ((yyvsp[0].type_qualifier).flags.q.precise)
+ _mesa_glsl_error(&(yylsp[-1]), state, "duplicate precise qualifier");
+
+ (yyval.type_qualifier) = (yyvsp[0].type_qualifier);
+ (yyval.type_qualifier).flags.q.precise = 1;
+ }
+#line 3435 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 126:
+#line 995 "src/compiler/glsl/glsl_parser.yy"
+ {
+ if (((yyvsp[-1].type_qualifier).flags.q.in || (yyvsp[-1].type_qualifier).flags.q.out) && ((yyvsp[0].type_qualifier).flags.q.in || (yyvsp[0].type_qualifier).flags.q.out))
+ _mesa_glsl_error(&(yylsp[-1]), state, "duplicate in/out/inout qualifier");
+
+ if (!state->has_420pack_or_es31() && (yyvsp[0].type_qualifier).flags.q.constant)
+ _mesa_glsl_error(&(yylsp[-1]), state, "in/out/inout must come after const "
+ "or precise");
+
+ (yyval.type_qualifier) = (yyvsp[-1].type_qualifier);
+ (yyval.type_qualifier).merge_qualifier(&(yylsp[-1]), state, (yyvsp[0].type_qualifier), false);
+ }
+#line 3451 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 127:
+#line 1007 "src/compiler/glsl/glsl_parser.yy"
+ {
+ if ((yyvsp[0].type_qualifier).precision != ast_precision_none)
+ _mesa_glsl_error(&(yylsp[-1]), state, "duplicate precision qualifier");
+
+ if (!state->has_420pack_or_es31() &&
+ (yyvsp[0].type_qualifier).flags.i != 0)
+ _mesa_glsl_error(&(yylsp[-1]), state, "precision qualifiers must come last");
+
+ (yyval.type_qualifier) = (yyvsp[0].type_qualifier);
+ (yyval.type_qualifier).precision = (yyvsp[-1].n);
+ }
+#line 3467 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 128:
+#line 1019 "src/compiler/glsl/glsl_parser.yy"
+ {
+ (yyval.type_qualifier) = (yyvsp[-1].type_qualifier);
+ (yyval.type_qualifier).merge_qualifier(&(yylsp[-1]), state, (yyvsp[0].type_qualifier), false);
+ }
+#line 3476 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 129:
+#line 1026 "src/compiler/glsl/glsl_parser.yy"
+ {
+ memset(& (yyval.type_qualifier), 0, sizeof((yyval.type_qualifier)));
+ (yyval.type_qualifier).flags.q.in = 1;
+ }
+#line 3485 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 130:
+#line 1031 "src/compiler/glsl/glsl_parser.yy"
+ {
+ memset(& (yyval.type_qualifier), 0, sizeof((yyval.type_qualifier)));
+ (yyval.type_qualifier).flags.q.out = 1;
+ }
+#line 3494 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 131:
+#line 1036 "src/compiler/glsl/glsl_parser.yy"
+ {
+ memset(& (yyval.type_qualifier), 0, sizeof((yyval.type_qualifier)));
+ (yyval.type_qualifier).flags.q.in = 1;
+ (yyval.type_qualifier).flags.q.out = 1;
+ }
+#line 3504 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 134:
+#line 1050 "src/compiler/glsl/glsl_parser.yy"
+ {
+ void *ctx = state->linalloc;
+ ast_declaration *decl = new(ctx) ast_declaration((yyvsp[0].identifier), NULL, NULL);
+ decl->set_location((yylsp[0]));
+
+ (yyval.declarator_list) = (yyvsp[-2].declarator_list);
+ (yyval.declarator_list)->declarations.push_tail(&decl->link);
+ state->symbols->add_variable(new(state) ir_variable(NULL, (yyvsp[0].identifier), ir_var_auto));
+ }
+#line 3518 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 135:
+#line 1060 "src/compiler/glsl/glsl_parser.yy"
+ {
+ void *ctx = state->linalloc;
+ ast_declaration *decl = new(ctx) ast_declaration((yyvsp[-1].identifier), (yyvsp[0].array_specifier), NULL);
+ decl->set_location_range((yylsp[-1]), (yylsp[0]));
+
+ (yyval.declarator_list) = (yyvsp[-3].declarator_list);
+ (yyval.declarator_list)->declarations.push_tail(&decl->link);
+ state->symbols->add_variable(new(state) ir_variable(NULL, (yyvsp[-1].identifier), ir_var_auto));
+ }
+#line 3532 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 136:
+#line 1070 "src/compiler/glsl/glsl_parser.yy"
+ {
+ void *ctx = state->linalloc;
+ ast_declaration *decl = new(ctx) ast_declaration((yyvsp[-3].identifier), (yyvsp[-2].array_specifier), (yyvsp[0].expression));
+ decl->set_location_range((yylsp[-3]), (yylsp[-2]));
+
+ (yyval.declarator_list) = (yyvsp[-5].declarator_list);
+ (yyval.declarator_list)->declarations.push_tail(&decl->link);
+ state->symbols->add_variable(new(state) ir_variable(NULL, (yyvsp[-3].identifier), ir_var_auto));
+ }
+#line 3546 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 137:
+#line 1080 "src/compiler/glsl/glsl_parser.yy"
+ {
+ void *ctx = state->linalloc;
+ ast_declaration *decl = new(ctx) ast_declaration((yyvsp[-2].identifier), NULL, (yyvsp[0].expression));
+ decl->set_location((yylsp[-2]));
+
+ (yyval.declarator_list) = (yyvsp[-4].declarator_list);
+ (yyval.declarator_list)->declarations.push_tail(&decl->link);
+ state->symbols->add_variable(new(state) ir_variable(NULL, (yyvsp[-2].identifier), ir_var_auto));
+ }
+#line 3560 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 138:
+#line 1094 "src/compiler/glsl/glsl_parser.yy"
+ {
+ void *ctx = state->linalloc;
+ /* Empty declaration list is valid. */
+ (yyval.declarator_list) = new(ctx) ast_declarator_list((yyvsp[0].fully_specified_type));
+ (yyval.declarator_list)->set_location((yylsp[0]));
+ }
+#line 3571 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 139:
+#line 1101 "src/compiler/glsl/glsl_parser.yy"
+ {
+ void *ctx = state->linalloc;
+ ast_declaration *decl = new(ctx) ast_declaration((yyvsp[0].identifier), NULL, NULL);
+ decl->set_location((yylsp[0]));
+
+ (yyval.declarator_list) = new(ctx) ast_declarator_list((yyvsp[-1].fully_specified_type));
+ (yyval.declarator_list)->set_location_range((yylsp[-1]), (yylsp[0]));
+ (yyval.declarator_list)->declarations.push_tail(&decl->link);
+ state->symbols->add_variable(new(state) ir_variable(NULL, (yyvsp[0].identifier), ir_var_auto));
+ }
+#line 3586 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 140:
+#line 1112 "src/compiler/glsl/glsl_parser.yy"
+ {
+ void *ctx = state->linalloc;
+ ast_declaration *decl = new(ctx) ast_declaration((yyvsp[-1].identifier), (yyvsp[0].array_specifier), NULL);
+ decl->set_location_range((yylsp[-1]), (yylsp[0]));
+
+ (yyval.declarator_list) = new(ctx) ast_declarator_list((yyvsp[-2].fully_specified_type));
+ (yyval.declarator_list)->set_location_range((yylsp[-2]), (yylsp[0]));
+ (yyval.declarator_list)->declarations.push_tail(&decl->link);
+ state->symbols->add_variable(new(state) ir_variable(NULL, (yyvsp[-1].identifier), ir_var_auto));
+ }
+#line 3601 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 141:
+#line 1123 "src/compiler/glsl/glsl_parser.yy"
+ {
+ void *ctx = state->linalloc;
+ ast_declaration *decl = new(ctx) ast_declaration((yyvsp[-3].identifier), (yyvsp[-2].array_specifier), (yyvsp[0].expression));
+ decl->set_location_range((yylsp[-3]), (yylsp[-2]));
+
+ (yyval.declarator_list) = new(ctx) ast_declarator_list((yyvsp[-4].fully_specified_type));
+ (yyval.declarator_list)->set_location_range((yylsp[-4]), (yylsp[-2]));
+ (yyval.declarator_list)->declarations.push_tail(&decl->link);
+ state->symbols->add_variable(new(state) ir_variable(NULL, (yyvsp[-3].identifier), ir_var_auto));
+ }
+#line 3616 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 142:
+#line 1134 "src/compiler/glsl/glsl_parser.yy"
+ {
+ void *ctx = state->linalloc;
+ ast_declaration *decl = new(ctx) ast_declaration((yyvsp[-2].identifier), NULL, (yyvsp[0].expression));
+ decl->set_location((yylsp[-2]));
+
+ (yyval.declarator_list) = new(ctx) ast_declarator_list((yyvsp[-3].fully_specified_type));
+ (yyval.declarator_list)->set_location_range((yylsp[-3]), (yylsp[-2]));
+ (yyval.declarator_list)->declarations.push_tail(&decl->link);
+ state->symbols->add_variable(new(state) ir_variable(NULL, (yyvsp[-2].identifier), ir_var_auto));
+ }
+#line 3631 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 143:
+#line 1145 "src/compiler/glsl/glsl_parser.yy"
+ {
+ void *ctx = state->linalloc;
+ ast_declaration *decl = new(ctx) ast_declaration((yyvsp[0].identifier), NULL, NULL);
+ decl->set_location((yylsp[0]));
+
+ (yyval.declarator_list) = new(ctx) ast_declarator_list(NULL);
+ (yyval.declarator_list)->set_location_range((yylsp[-1]), (yylsp[0]));
+ (yyval.declarator_list)->invariant = true;
+
+ (yyval.declarator_list)->declarations.push_tail(&decl->link);
+ }
+#line 3647 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 144:
+#line 1157 "src/compiler/glsl/glsl_parser.yy"
+ {
+ void *ctx = state->linalloc;
+ ast_declaration *decl = new(ctx) ast_declaration((yyvsp[0].identifier), NULL, NULL);
+ decl->set_location((yylsp[0]));
+
+ (yyval.declarator_list) = new(ctx) ast_declarator_list(NULL);
+ (yyval.declarator_list)->set_location_range((yylsp[-1]), (yylsp[0]));
+ (yyval.declarator_list)->precise = true;
+
+ (yyval.declarator_list)->declarations.push_tail(&decl->link);
+ }
+#line 3663 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 145:
+#line 1172 "src/compiler/glsl/glsl_parser.yy"
+ {
+ void *ctx = state->linalloc;
+ (yyval.fully_specified_type) = new(ctx) ast_fully_specified_type();
+ (yyval.fully_specified_type)->set_location((yylsp[0]));
+ (yyval.fully_specified_type)->specifier = (yyvsp[0].type_specifier);
+ }
+#line 3674 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 146:
+#line 1179 "src/compiler/glsl/glsl_parser.yy"
+ {
+ void *ctx = state->linalloc;
+ (yyval.fully_specified_type) = new(ctx) ast_fully_specified_type();
+ (yyval.fully_specified_type)->set_location_range((yylsp[-1]), (yylsp[0]));
+ (yyval.fully_specified_type)->qualifier = (yyvsp[-1].type_qualifier);
+ if (!(yyval.fully_specified_type)->qualifier.push_to_global(& (yylsp[-1]), state)) {
+ YYERROR;
+ }
+ (yyval.fully_specified_type)->specifier = (yyvsp[0].type_specifier);
+ if ((yyval.fully_specified_type)->specifier->structure != NULL &&
+ (yyval.fully_specified_type)->specifier->structure->is_declaration) {
+ (yyval.fully_specified_type)->specifier->structure->layout = &(yyval.fully_specified_type)->qualifier;
+ }
+ }
+#line 3693 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 147:
+#line 1197 "src/compiler/glsl/glsl_parser.yy"
+ {
+ (yyval.type_qualifier) = (yyvsp[-1].type_qualifier);
+ }
+#line 3701 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 149:
+#line 1205 "src/compiler/glsl/glsl_parser.yy"
+ {
+ (yyval.type_qualifier) = (yyvsp[-2].type_qualifier);
+ if (!(yyval.type_qualifier).merge_qualifier(& (yylsp[0]), state, (yyvsp[0].type_qualifier), true)) {
+ YYERROR;
+ }
+ }
+#line 3712 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 150:
+#line 1215 "src/compiler/glsl/glsl_parser.yy"
+ {
+ memset(& (yyval.type_qualifier), 0, sizeof((yyval.type_qualifier)));
+
+ /* Layout qualifiers for ARB_fragment_coord_conventions. */
+ if (!(yyval.type_qualifier).flags.i && (state->ARB_fragment_coord_conventions_enable ||
+ state->is_version(150, 0))) {
+ if (match_layout_qualifier((yyvsp[0].identifier), "origin_upper_left", state) == 0) {
+ (yyval.type_qualifier).flags.q.origin_upper_left = 1;
+ } else if (match_layout_qualifier((yyvsp[0].identifier), "pixel_center_integer",
+ state) == 0) {
+ (yyval.type_qualifier).flags.q.pixel_center_integer = 1;
+ }
+
+ if ((yyval.type_qualifier).flags.i && state->ARB_fragment_coord_conventions_warn) {
+ _mesa_glsl_warning(& (yylsp[0]), state,
+ "GL_ARB_fragment_coord_conventions layout "
+ "identifier `%s' used", (yyvsp[0].identifier));
+ }
+ }
+
+ /* Layout qualifiers for AMD/ARB_conservative_depth. */
+ if (!(yyval.type_qualifier).flags.i &&
+ (state->AMD_conservative_depth_enable ||
+ state->ARB_conservative_depth_enable ||
+ state->is_version(420, 0))) {
+ if (match_layout_qualifier((yyvsp[0].identifier), "depth_any", state) == 0) {
+ (yyval.type_qualifier).flags.q.depth_type = 1;
+ (yyval.type_qualifier).depth_type = ast_depth_any;
+ } else if (match_layout_qualifier((yyvsp[0].identifier), "depth_greater", state) == 0) {
+ (yyval.type_qualifier).flags.q.depth_type = 1;
+ (yyval.type_qualifier).depth_type = ast_depth_greater;
+ } else if (match_layout_qualifier((yyvsp[0].identifier), "depth_less", state) == 0) {
+ (yyval.type_qualifier).flags.q.depth_type = 1;
+ (yyval.type_qualifier).depth_type = ast_depth_less;
+ } else if (match_layout_qualifier((yyvsp[0].identifier), "depth_unchanged",
+ state) == 0) {
+ (yyval.type_qualifier).flags.q.depth_type = 1;
+ (yyval.type_qualifier).depth_type = ast_depth_unchanged;
+ }
+
+ if ((yyval.type_qualifier).flags.i && state->AMD_conservative_depth_warn) {
+ _mesa_glsl_warning(& (yylsp[0]), state,
+ "GL_AMD_conservative_depth "
+ "layout qualifier `%s' is used", (yyvsp[0].identifier));
+ }
+ if ((yyval.type_qualifier).flags.i && state->ARB_conservative_depth_warn) {
+ _mesa_glsl_warning(& (yylsp[0]), state,
+ "GL_ARB_conservative_depth "
+ "layout qualifier `%s' is used", (yyvsp[0].identifier));
+ }
+ }
+
+ /* See also interface_block_layout_qualifier. */
+ if (!(yyval.type_qualifier).flags.i && state->has_uniform_buffer_objects()) {
+ if (match_layout_qualifier((yyvsp[0].identifier), "std140", state) == 0) {
+ (yyval.type_qualifier).flags.q.std140 = 1;
+ } else if (match_layout_qualifier((yyvsp[0].identifier), "shared", state) == 0) {
+ (yyval.type_qualifier).flags.q.shared = 1;
+ } else if (match_layout_qualifier((yyvsp[0].identifier), "std430", state) == 0) {
+ (yyval.type_qualifier).flags.q.std430 = 1;
+ } else if (match_layout_qualifier((yyvsp[0].identifier), "column_major", state) == 0) {
+ (yyval.type_qualifier).flags.q.column_major = 1;
+ /* "row_major" is a reserved word in GLSL 1.30+. Its token is parsed
+ * below in the interface_block_layout_qualifier rule.
+ *
+ * It is not a reserved word in GLSL ES 3.00, so it's handled here as
+ * an identifier.
+ *
+ * Also, this takes care of alternate capitalizations of
+ * "row_major" (which is necessary because layout qualifiers
+ * are case-insensitive in desktop GLSL).
+ */
+ } else if (match_layout_qualifier((yyvsp[0].identifier), "row_major", state) == 0) {
+ (yyval.type_qualifier).flags.q.row_major = 1;
+ /* "packed" is a reserved word in GLSL, and its token is
+ * parsed below in the interface_block_layout_qualifier rule.
+ * However, we must take care of alternate capitalizations of
+ * "packed", because layout qualifiers are case-insensitive
+ * in desktop GLSL.
+ */
+ } else if (match_layout_qualifier((yyvsp[0].identifier), "packed", state) == 0) {
+ (yyval.type_qualifier).flags.q.packed = 1;
+ }
+
+ if ((yyval.type_qualifier).flags.i && state->ARB_uniform_buffer_object_warn) {
+ _mesa_glsl_warning(& (yylsp[0]), state,
+ "#version 140 / GL_ARB_uniform_buffer_object "
+ "layout qualifier `%s' is used", (yyvsp[0].identifier));
+ }
+ }
+
+ /* Layout qualifiers for GLSL 1.50 geometry shaders. */
+ if (!(yyval.type_qualifier).flags.i) {
+ static const struct {
+ const char *s;
+ GLenum e;
+ } map[] = {
+ { "points", GL_POINTS },
+ { "lines", GL_LINES },
+ { "lines_adjacency", GL_LINES_ADJACENCY },
+ { "line_strip", GL_LINE_STRIP },
+ { "triangles", GL_TRIANGLES },
+ { "triangles_adjacency", GL_TRIANGLES_ADJACENCY },
+ { "triangle_strip", GL_TRIANGLE_STRIP },
+ };
+ for (unsigned i = 0; i < ARRAY_SIZE(map); i++) {
+ if (match_layout_qualifier((yyvsp[0].identifier), map[i].s, state) == 0) {
+ (yyval.type_qualifier).flags.q.prim_type = 1;
+ (yyval.type_qualifier).prim_type = map[i].e;
+ break;
+ }
+ }
+
+ if ((yyval.type_qualifier).flags.i && !state->has_geometry_shader() &&
+ !state->has_tessellation_shader()) {
+ _mesa_glsl_error(& (yylsp[0]), state, "#version 150 layout "
+ "qualifier `%s' used", (yyvsp[0].identifier));
+ }
+ }
+
+ /* Layout qualifiers for ARB_shader_image_load_store. */
+ if (state->has_shader_image_load_store()) {
+ if (!(yyval.type_qualifier).flags.i) {
+ static const struct {
+ const char *name;
+ enum pipe_format format;
+ glsl_base_type base_type;
+ /** Minimum desktop GLSL version required for the image
+ * format. Use 130 if already present in the original
+ * ARB extension.
+ */
+ unsigned required_glsl;
+ /** Minimum GLSL ES version required for the image format. */
+ unsigned required_essl;
+ /* NV_image_formats */
+ bool nv_image_formats;
+ bool ext_qualifiers;
+ } map[] = {
+ { "rgba32f", PIPE_FORMAT_R32G32B32A32_FLOAT, GLSL_TYPE_FLOAT, 130, 310, false, false },
+ { "rgba16f", PIPE_FORMAT_R16G16B16A16_FLOAT, GLSL_TYPE_FLOAT, 130, 310, false, false },
+ { "rg32f", PIPE_FORMAT_R32G32_FLOAT, GLSL_TYPE_FLOAT, 130, 0, true, false },
+ { "rg16f", PIPE_FORMAT_R16G16_FLOAT, GLSL_TYPE_FLOAT, 130, 0, true, false },
+ { "r11f_g11f_b10f", PIPE_FORMAT_R11G11B10_FLOAT, GLSL_TYPE_FLOAT, 130, 0, true, false },
+ { "r32f", PIPE_FORMAT_R32_FLOAT, GLSL_TYPE_FLOAT, 130, 310, false, false },
+ { "r16f", PIPE_FORMAT_R16_FLOAT, GLSL_TYPE_FLOAT, 130, 0, true, false },
+ { "rgba32ui", PIPE_FORMAT_R32G32B32A32_UINT, GLSL_TYPE_UINT, 130, 310, false, false },
+ { "rgba16ui", PIPE_FORMAT_R16G16B16A16_UINT, GLSL_TYPE_UINT, 130, 310, false, false },
+ { "rgb10_a2ui", PIPE_FORMAT_R10G10B10A2_UINT, GLSL_TYPE_UINT, 130, 0, true, false },
+ { "rgba8ui", PIPE_FORMAT_R8G8B8A8_UINT, GLSL_TYPE_UINT, 130, 310, false, false },
+ { "rg32ui", PIPE_FORMAT_R32G32_UINT, GLSL_TYPE_UINT, 130, 0, true, false },
+ { "rg16ui", PIPE_FORMAT_R16G16_UINT, GLSL_TYPE_UINT, 130, 0, true, false },
+ { "rg8ui", PIPE_FORMAT_R8G8_UINT, GLSL_TYPE_UINT, 130, 0, true, false },
+ { "r32ui", PIPE_FORMAT_R32_UINT, GLSL_TYPE_UINT, 130, 310, false, false },
+ { "r16ui", PIPE_FORMAT_R16_UINT, GLSL_TYPE_UINT, 130, 0, true, false },
+ { "r8ui", PIPE_FORMAT_R8_UINT, GLSL_TYPE_UINT, 130, 0, true, false },
+ { "rgba32i", PIPE_FORMAT_R32G32B32A32_SINT, GLSL_TYPE_INT, 130, 310, false, false },
+ { "rgba16i", PIPE_FORMAT_R16G16B16A16_SINT, GLSL_TYPE_INT, 130, 310, false, false },
+ { "rgba8i", PIPE_FORMAT_R8G8B8A8_SINT, GLSL_TYPE_INT, 130, 310, false, false },
+ { "rg32i", PIPE_FORMAT_R32G32_SINT, GLSL_TYPE_INT, 130, 0, true, false },
+ { "rg16i", PIPE_FORMAT_R16G16_SINT, GLSL_TYPE_INT, 130, 0, true, false },
+ { "rg8i", PIPE_FORMAT_R8G8_SINT, GLSL_TYPE_INT, 130, 0, true, false },
+ { "r32i", PIPE_FORMAT_R32_SINT, GLSL_TYPE_INT, 130, 310, false, false },
+ { "r16i", PIPE_FORMAT_R16_SINT, GLSL_TYPE_INT, 130, 0, true, false },
+ { "r8i", PIPE_FORMAT_R8_SINT, GLSL_TYPE_INT, 130, 0, true, false },
+ { "rgba16", PIPE_FORMAT_R16G16B16A16_UNORM, GLSL_TYPE_FLOAT, 130, 0, true, false },
+ { "rgb10_a2", PIPE_FORMAT_R10G10B10A2_UNORM, GLSL_TYPE_FLOAT, 130, 0, true, false },
+ { "rgba8", PIPE_FORMAT_R8G8B8A8_UNORM, GLSL_TYPE_FLOAT, 130, 310, false, false },
+ { "rg16", PIPE_FORMAT_R16G16_UNORM, GLSL_TYPE_FLOAT, 130, 0, true, false },
+ { "rg8", PIPE_FORMAT_R8G8_UNORM, GLSL_TYPE_FLOAT, 130, 0, true, false },
+ { "r16", PIPE_FORMAT_R16_UNORM, GLSL_TYPE_FLOAT, 130, 0, true, false },
+ { "r8", PIPE_FORMAT_R8_UNORM, GLSL_TYPE_FLOAT, 130, 0, true, false },
+ { "rgba16_snorm", PIPE_FORMAT_R16G16B16A16_SNORM, GLSL_TYPE_FLOAT, 130, 0, true, false },
+ { "rgba8_snorm", PIPE_FORMAT_R8G8B8A8_SNORM, GLSL_TYPE_FLOAT, 130, 310, false, false },
+ { "rg16_snorm", PIPE_FORMAT_R16G16_SNORM, GLSL_TYPE_FLOAT, 130, 0, true, false },
+ { "rg8_snorm", PIPE_FORMAT_R8G8_SNORM, GLSL_TYPE_FLOAT, 130, 0, true, false },
+ { "r16_snorm", PIPE_FORMAT_R16_SNORM, GLSL_TYPE_FLOAT, 130, 0, true, false },
+ { "r8_snorm", PIPE_FORMAT_R8_SNORM, GLSL_TYPE_FLOAT, 130, 0, true, false },
+
+ /* From GL_EXT_shader_image_load_store: */
+ /* base_type is incorrect but it'll be patched later when we know
+ * the variable type. See ast_to_hir.cpp */
+ { "size1x8", PIPE_FORMAT_R8_SINT, GLSL_TYPE_VOID, 130, 0, false, true },
+ { "size1x16", PIPE_FORMAT_R16_SINT, GLSL_TYPE_VOID, 130, 0, false, true },
+ { "size1x32", PIPE_FORMAT_R32_SINT, GLSL_TYPE_VOID, 130, 0, false, true },
+ { "size2x32", PIPE_FORMAT_R32G32_SINT, GLSL_TYPE_VOID, 130, 0, false, true },
+ { "size4x32", PIPE_FORMAT_R32G32B32A32_SINT, GLSL_TYPE_VOID, 130, 0, false, true },
+ };
+
+ for (unsigned i = 0; i < ARRAY_SIZE(map); i++) {
+ if ((state->is_version(map[i].required_glsl,
+ map[i].required_essl) ||
+ (state->NV_image_formats_enable &&
+ map[i].nv_image_formats)) &&
+ match_layout_qualifier((yyvsp[0].identifier), map[i].name, state) == 0) {
+ /* Skip ARB_shader_image_load_store qualifiers if not enabled */
+ if (!map[i].ext_qualifiers && !(state->ARB_shader_image_load_store_enable ||
+ state->is_version(420, 310))) {
+ continue;
+ }
+ /* Skip EXT_shader_image_load_store qualifiers if not enabled */
+ if (map[i].ext_qualifiers && !state->EXT_shader_image_load_store_enable) {
+ continue;
+ }
+ (yyval.type_qualifier).flags.q.explicit_image_format = 1;
+ (yyval.type_qualifier).image_format = map[i].format;
+ (yyval.type_qualifier).image_base_type = map[i].base_type;
+ break;
+ }
+ }
+ }
+ }
+
+ if (!(yyval.type_qualifier).flags.i) {
+ if (match_layout_qualifier((yyvsp[0].identifier), "early_fragment_tests", state) == 0) {
+ /* From section 4.4.1.3 of the GLSL 4.50 specification
+ * (Fragment Shader Inputs):
+ *
+ * "Fragment shaders also allow the following layout
+ * qualifier on in only (not with variable declarations)
+ * layout-qualifier-id
+ * early_fragment_tests
+ * [...]"
+ */
+ if (state->stage != MESA_SHADER_FRAGMENT) {
+ _mesa_glsl_error(& (yylsp[0]), state,
+ "early_fragment_tests layout qualifier only "
+ "valid in fragment shaders");
+ }
+
+ (yyval.type_qualifier).flags.q.early_fragment_tests = 1;
+ }
+
+ if (match_layout_qualifier((yyvsp[0].identifier), "inner_coverage", state) == 0) {
+ if (state->stage != MESA_SHADER_FRAGMENT) {
+ _mesa_glsl_error(& (yylsp[0]), state,
+ "inner_coverage layout qualifier only "
+ "valid in fragment shaders");
+ }
+
+ if (state->INTEL_conservative_rasterization_enable) {
+ (yyval.type_qualifier).flags.q.inner_coverage = 1;
+ } else {
+ _mesa_glsl_error(& (yylsp[0]), state,
+ "inner_coverage layout qualifier present, "
+ "but the INTEL_conservative_rasterization extension "
+ "is not enabled.");
+ }
+ }
+
+ if (match_layout_qualifier((yyvsp[0].identifier), "post_depth_coverage", state) == 0) {
+ if (state->stage != MESA_SHADER_FRAGMENT) {
+ _mesa_glsl_error(& (yylsp[0]), state,
+ "post_depth_coverage layout qualifier only "
+ "valid in fragment shaders");
+ }
+
+ if (state->ARB_post_depth_coverage_enable ||
+ state->INTEL_conservative_rasterization_enable) {
+ (yyval.type_qualifier).flags.q.post_depth_coverage = 1;
+ } else {
+ _mesa_glsl_error(& (yylsp[0]), state,
+ "post_depth_coverage layout qualifier present, "
+ "but the GL_ARB_post_depth_coverage extension "
+ "is not enabled.");
+ }
+ }
+
+ if ((yyval.type_qualifier).flags.q.post_depth_coverage && (yyval.type_qualifier).flags.q.inner_coverage) {
+ _mesa_glsl_error(& (yylsp[0]), state,
+ "post_depth_coverage & inner_coverage layout qualifiers "
+ "are mutually exclusive");
+ }
+ }
+
+ const bool pixel_interlock_ordered = match_layout_qualifier((yyvsp[0].identifier),
+ "pixel_interlock_ordered", state) == 0;
+ const bool pixel_interlock_unordered = match_layout_qualifier((yyvsp[0].identifier),
+ "pixel_interlock_unordered", state) == 0;
+ const bool sample_interlock_ordered = match_layout_qualifier((yyvsp[0].identifier),
+ "sample_interlock_ordered", state) == 0;
+ const bool sample_interlock_unordered = match_layout_qualifier((yyvsp[0].identifier),
+ "sample_interlock_unordered", state) == 0;
+
+ if (pixel_interlock_ordered + pixel_interlock_unordered +
+ sample_interlock_ordered + sample_interlock_unordered > 0 &&
+ state->stage != MESA_SHADER_FRAGMENT) {
+ _mesa_glsl_error(& (yylsp[0]), state, "interlock layout qualifiers: "
+ "pixel_interlock_ordered, pixel_interlock_unordered, "
+ "sample_interlock_ordered and sample_interlock_unordered, "
+ "only valid in fragment shader input layout declaration.");
+ } else if (pixel_interlock_ordered + pixel_interlock_unordered +
+ sample_interlock_ordered + sample_interlock_unordered > 0 &&
+ !state->ARB_fragment_shader_interlock_enable &&
+ !state->NV_fragment_shader_interlock_enable) {
+ _mesa_glsl_error(& (yylsp[0]), state,
+ "interlock layout qualifier present, but the "
+ "GL_ARB_fragment_shader_interlock or "
+ "GL_NV_fragment_shader_interlock extension is not "
+ "enabled.");
+ } else {
+ (yyval.type_qualifier).flags.q.pixel_interlock_ordered = pixel_interlock_ordered;
+ (yyval.type_qualifier).flags.q.pixel_interlock_unordered = pixel_interlock_unordered;
+ (yyval.type_qualifier).flags.q.sample_interlock_ordered = sample_interlock_ordered;
+ (yyval.type_qualifier).flags.q.sample_interlock_unordered = sample_interlock_unordered;
+ }
+
+ /* Layout qualifiers for tessellation evaluation shaders. */
+ if (!(yyval.type_qualifier).flags.i) {
+ static const struct {
+ const char *s;
+ GLenum e;
+ } map[] = {
+ /* triangles already parsed by gs-specific code */
+ { "quads", GL_QUADS },
+ { "isolines", GL_ISOLINES },
+ };
+ for (unsigned i = 0; i < ARRAY_SIZE(map); i++) {
+ if (match_layout_qualifier((yyvsp[0].identifier), map[i].s, state) == 0) {
+ (yyval.type_qualifier).flags.q.prim_type = 1;
+ (yyval.type_qualifier).prim_type = map[i].e;
+ break;
+ }
+ }
+
+ if ((yyval.type_qualifier).flags.i && !state->has_tessellation_shader()) {
+ _mesa_glsl_error(& (yylsp[0]), state,
+ "primitive mode qualifier `%s' requires "
+ "GLSL 4.00 or ARB_tessellation_shader", (yyvsp[0].identifier));
+ }
+ }
+ if (!(yyval.type_qualifier).flags.i) {
+ static const struct {
+ const char *s;
+ enum gl_tess_spacing e;
+ } map[] = {
+ { "equal_spacing", TESS_SPACING_EQUAL },
+ { "fractional_odd_spacing", TESS_SPACING_FRACTIONAL_ODD },
+ { "fractional_even_spacing", TESS_SPACING_FRACTIONAL_EVEN },
+ };
+ for (unsigned i = 0; i < ARRAY_SIZE(map); i++) {
+ if (match_layout_qualifier((yyvsp[0].identifier), map[i].s, state) == 0) {
+ (yyval.type_qualifier).flags.q.vertex_spacing = 1;
+ (yyval.type_qualifier).vertex_spacing = map[i].e;
+ break;
+ }
+ }
+
+ if ((yyval.type_qualifier).flags.i && !state->has_tessellation_shader()) {
+ _mesa_glsl_error(& (yylsp[0]), state,
+ "vertex spacing qualifier `%s' requires "
+ "GLSL 4.00 or ARB_tessellation_shader", (yyvsp[0].identifier));
+ }
+ }
+ if (!(yyval.type_qualifier).flags.i) {
+ if (match_layout_qualifier((yyvsp[0].identifier), "cw", state) == 0) {
+ (yyval.type_qualifier).flags.q.ordering = 1;
+ (yyval.type_qualifier).ordering = GL_CW;
+ } else if (match_layout_qualifier((yyvsp[0].identifier), "ccw", state) == 0) {
+ (yyval.type_qualifier).flags.q.ordering = 1;
+ (yyval.type_qualifier).ordering = GL_CCW;
+ }
+
+ if ((yyval.type_qualifier).flags.i && !state->has_tessellation_shader()) {
+ _mesa_glsl_error(& (yylsp[0]), state,
+ "ordering qualifier `%s' requires "
+ "GLSL 4.00 or ARB_tessellation_shader", (yyvsp[0].identifier));
+ }
+ }
+ if (!(yyval.type_qualifier).flags.i) {
+ if (match_layout_qualifier((yyvsp[0].identifier), "point_mode", state) == 0) {
+ (yyval.type_qualifier).flags.q.point_mode = 1;
+ (yyval.type_qualifier).point_mode = true;
+ }
+
+ if ((yyval.type_qualifier).flags.i && !state->has_tessellation_shader()) {
+ _mesa_glsl_error(& (yylsp[0]), state,
+ "qualifier `point_mode' requires "
+ "GLSL 4.00 or ARB_tessellation_shader");
+ }
+ }
+
+ if (!(yyval.type_qualifier).flags.i) {
+ static const struct {
+ const char *s;
+ uint32_t mask;
+ } map[] = {
+ { "blend_support_multiply", BLEND_MULTIPLY },
+ { "blend_support_screen", BLEND_SCREEN },
+ { "blend_support_overlay", BLEND_OVERLAY },
+ { "blend_support_darken", BLEND_DARKEN },
+ { "blend_support_lighten", BLEND_LIGHTEN },
+ { "blend_support_colordodge", BLEND_COLORDODGE },
+ { "blend_support_colorburn", BLEND_COLORBURN },
+ { "blend_support_hardlight", BLEND_HARDLIGHT },
+ { "blend_support_softlight", BLEND_SOFTLIGHT },
+ { "blend_support_difference", BLEND_DIFFERENCE },
+ { "blend_support_exclusion", BLEND_EXCLUSION },
+ { "blend_support_hsl_hue", BLEND_HSL_HUE },
+ { "blend_support_hsl_saturation", BLEND_HSL_SATURATION },
+ { "blend_support_hsl_color", BLEND_HSL_COLOR },
+ { "blend_support_hsl_luminosity", BLEND_HSL_LUMINOSITY },
+ { "blend_support_all_equations", BLEND_ALL },
+ };
+ for (unsigned i = 0; i < ARRAY_SIZE(map); i++) {
+ if (match_layout_qualifier((yyvsp[0].identifier), map[i].s, state) == 0) {
+ (yyval.type_qualifier).flags.q.blend_support = 1;
+ state->fs_blend_support |= map[i].mask;
+ break;
+ }
+ }
+
+ if ((yyval.type_qualifier).flags.i &&
+ !state->KHR_blend_equation_advanced_enable &&
+ !state->is_version(0, 320)) {
+ _mesa_glsl_error(& (yylsp[0]), state,
+ "advanced blending layout qualifiers require "
+ "ESSL 3.20 or KHR_blend_equation_advanced");
+ }
+
+ if ((yyval.type_qualifier).flags.i && state->stage != MESA_SHADER_FRAGMENT) {
+ _mesa_glsl_error(& (yylsp[0]), state,
+ "advanced blending layout qualifiers only "
+ "valid in fragment shaders");
+ }
+ }
+
+ /* Layout qualifiers for ARB_compute_variable_group_size. */
+ if (!(yyval.type_qualifier).flags.i) {
+ if (match_layout_qualifier((yyvsp[0].identifier), "local_size_variable", state) == 0) {
+ (yyval.type_qualifier).flags.q.local_size_variable = 1;
+ }
+
+ if ((yyval.type_qualifier).flags.i && !state->ARB_compute_variable_group_size_enable) {
+ _mesa_glsl_error(& (yylsp[0]), state,
+ "qualifier `local_size_variable` requires "
+ "ARB_compute_variable_group_size");
+ }
+ }
+
+ /* Layout qualifiers for ARB_bindless_texture. */
+ if (!(yyval.type_qualifier).flags.i) {
+ if (match_layout_qualifier((yyvsp[0].identifier), "bindless_sampler", state) == 0)
+ (yyval.type_qualifier).flags.q.bindless_sampler = 1;
+ if (match_layout_qualifier((yyvsp[0].identifier), "bound_sampler", state) == 0)
+ (yyval.type_qualifier).flags.q.bound_sampler = 1;
+
+ if (state->has_shader_image_load_store()) {
+ if (match_layout_qualifier((yyvsp[0].identifier), "bindless_image", state) == 0)
+ (yyval.type_qualifier).flags.q.bindless_image = 1;
+ if (match_layout_qualifier((yyvsp[0].identifier), "bound_image", state) == 0)
+ (yyval.type_qualifier).flags.q.bound_image = 1;
+ }
+
+ if ((yyval.type_qualifier).flags.i && !state->has_bindless()) {
+ _mesa_glsl_error(& (yylsp[0]), state,
+ "qualifier `%s` requires "
+ "ARB_bindless_texture", (yyvsp[0].identifier));
+ }
+ }
+
+ if (!(yyval.type_qualifier).flags.i &&
+ state->EXT_shader_framebuffer_fetch_non_coherent_enable) {
+ if (match_layout_qualifier((yyvsp[0].identifier), "noncoherent", state) == 0)
+ (yyval.type_qualifier).flags.q.non_coherent = 1;
+ }
+
+ // Layout qualifiers for NV_compute_shader_derivatives.
+ if (!(yyval.type_qualifier).flags.i) {
+ if (match_layout_qualifier((yyvsp[0].identifier), "derivative_group_quadsNV", state) == 0) {
+ (yyval.type_qualifier).flags.q.derivative_group = 1;
+ (yyval.type_qualifier).derivative_group = DERIVATIVE_GROUP_QUADS;
+ } else if (match_layout_qualifier((yyvsp[0].identifier), "derivative_group_linearNV", state) == 0) {
+ (yyval.type_qualifier).flags.q.derivative_group = 1;
+ (yyval.type_qualifier).derivative_group = DERIVATIVE_GROUP_LINEAR;
+ }
+
+ if ((yyval.type_qualifier).flags.i) {
+ if (!state->has_compute_shader()) {
+ _mesa_glsl_error(& (yylsp[0]), state,
+ "qualifier `%s' requires "
+ "a compute shader", (yyvsp[0].identifier));
+ }
+
+ if (!state->NV_compute_shader_derivatives_enable) {
+ _mesa_glsl_error(& (yylsp[0]), state,
+ "qualifier `%s' requires "
+ "NV_compute_shader_derivatives", (yyvsp[0].identifier));
+ }
+
+ if (state->NV_compute_shader_derivatives_warn) {
+ _mesa_glsl_warning(& (yylsp[0]), state,
+ "NV_compute_shader_derivatives layout "
+ "qualifier `%s' used", (yyvsp[0].identifier));
+ }
+ }
+ }
+
+ /* Layout qualifier for NV_viewport_array2. */
+ if (!(yyval.type_qualifier).flags.i && state->stage != MESA_SHADER_FRAGMENT) {
+ if (match_layout_qualifier((yyvsp[0].identifier), "viewport_relative", state) == 0) {
+ (yyval.type_qualifier).flags.q.viewport_relative = 1;
+ }
+
+ if ((yyval.type_qualifier).flags.i && !state->NV_viewport_array2_enable) {
+ _mesa_glsl_error(& (yylsp[0]), state,
+ "qualifier `%s' requires "
+ "GL_NV_viewport_array2", (yyvsp[0].identifier));
+ }
+
+ if ((yyval.type_qualifier).flags.i && state->NV_viewport_array2_warn) {
+ _mesa_glsl_warning(& (yylsp[0]), state,
+ "GL_NV_viewport_array2 layout "
+ "identifier `%s' used", (yyvsp[0].identifier));
+ }
+ }
+
+ if (!(yyval.type_qualifier).flags.i) {
+ _mesa_glsl_error(& (yylsp[0]), state, "unrecognized layout identifier "
+ "`%s'", (yyvsp[0].identifier));
+ YYERROR;
+ }
+ }
+#line 4239 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 151:
+#line 1738 "src/compiler/glsl/glsl_parser.yy"
+ {
+ memset(& (yyval.type_qualifier), 0, sizeof((yyval.type_qualifier)));
+ void *ctx = state->linalloc;
+
+ if ((yyvsp[0].expression)->oper != ast_int_constant &&
+ (yyvsp[0].expression)->oper != ast_uint_constant &&
+ !state->has_enhanced_layouts()) {
+ _mesa_glsl_error(& (yylsp[-2]), state,
+ "compile-time constant expressions require "
+ "GLSL 4.40 or ARB_enhanced_layouts");
+ }
+
+ if (match_layout_qualifier("align", (yyvsp[-2].identifier), state) == 0) {
+ if (!state->has_enhanced_layouts()) {
+ _mesa_glsl_error(& (yylsp[-2]), state,
+ "align qualifier requires "
+ "GLSL 4.40 or ARB_enhanced_layouts");
+ } else {
+ (yyval.type_qualifier).flags.q.explicit_align = 1;
+ (yyval.type_qualifier).align = (yyvsp[0].expression);
+ }
+ }
+
+ if (match_layout_qualifier("location", (yyvsp[-2].identifier), state) == 0) {
+ (yyval.type_qualifier).flags.q.explicit_location = 1;
+
+ if ((yyval.type_qualifier).flags.q.attribute == 1 &&
+ state->ARB_explicit_attrib_location_warn) {
+ _mesa_glsl_warning(& (yylsp[-2]), state,
+ "GL_ARB_explicit_attrib_location layout "
+ "identifier `%s' used", (yyvsp[-2].identifier));
+ }
+ (yyval.type_qualifier).location = (yyvsp[0].expression);
+ }
+
+ if (match_layout_qualifier("component", (yyvsp[-2].identifier), state) == 0) {
+ if (!state->has_enhanced_layouts()) {
+ _mesa_glsl_error(& (yylsp[-2]), state,
+ "component qualifier requires "
+ "GLSL 4.40 or ARB_enhanced_layouts");
+ } else {
+ (yyval.type_qualifier).flags.q.explicit_component = 1;
+ (yyval.type_qualifier).component = (yyvsp[0].expression);
+ }
+ }
+
+ if (match_layout_qualifier("index", (yyvsp[-2].identifier), state) == 0) {
+ if (state->es_shader && !state->EXT_blend_func_extended_enable) {
+ _mesa_glsl_error(& (yylsp[0]), state, "index layout qualifier requires EXT_blend_func_extended");
+ YYERROR;
+ }
+
+ (yyval.type_qualifier).flags.q.explicit_index = 1;
+ (yyval.type_qualifier).index = (yyvsp[0].expression);
+ }
+
+ if ((state->has_420pack_or_es31() ||
+ state->has_atomic_counters() ||
+ state->has_shader_storage_buffer_objects()) &&
+ match_layout_qualifier("binding", (yyvsp[-2].identifier), state) == 0) {
+ (yyval.type_qualifier).flags.q.explicit_binding = 1;
+ (yyval.type_qualifier).binding = (yyvsp[0].expression);
+ }
+
+ if ((state->has_atomic_counters() ||
+ state->has_enhanced_layouts()) &&
+ match_layout_qualifier("offset", (yyvsp[-2].identifier), state) == 0) {
+ (yyval.type_qualifier).flags.q.explicit_offset = 1;
+ (yyval.type_qualifier).offset = (yyvsp[0].expression);
+ }
+
+ if (match_layout_qualifier("max_vertices", (yyvsp[-2].identifier), state) == 0) {
+ (yyval.type_qualifier).flags.q.max_vertices = 1;
+ (yyval.type_qualifier).max_vertices = new(ctx) ast_layout_expression((yylsp[-2]), (yyvsp[0].expression));
+ if (!state->has_geometry_shader()) {
+ _mesa_glsl_error(& (yylsp[0]), state,
+ "#version 150 max_vertices qualifier "
+ "specified", (yyvsp[0].expression));
+ }
+ }
+
+ if (state->stage == MESA_SHADER_GEOMETRY) {
+ if (match_layout_qualifier("stream", (yyvsp[-2].identifier), state) == 0 &&
+ state->check_explicit_attrib_stream_allowed(& (yylsp[0]))) {
+ (yyval.type_qualifier).flags.q.stream = 1;
+ (yyval.type_qualifier).flags.q.explicit_stream = 1;
+ (yyval.type_qualifier).stream = (yyvsp[0].expression);
+ }
+ }
+
+ if (state->has_enhanced_layouts()) {
+ if (match_layout_qualifier("xfb_buffer", (yyvsp[-2].identifier), state) == 0) {
+ (yyval.type_qualifier).flags.q.xfb_buffer = 1;
+ (yyval.type_qualifier).flags.q.explicit_xfb_buffer = 1;
+ (yyval.type_qualifier).xfb_buffer = (yyvsp[0].expression);
+ }
+
+ if (match_layout_qualifier("xfb_offset", (yyvsp[-2].identifier), state) == 0) {
+ (yyval.type_qualifier).flags.q.explicit_xfb_offset = 1;
+ (yyval.type_qualifier).offset = (yyvsp[0].expression);
+ }
+
+ if (match_layout_qualifier("xfb_stride", (yyvsp[-2].identifier), state) == 0) {
+ (yyval.type_qualifier).flags.q.xfb_stride = 1;
+ (yyval.type_qualifier).flags.q.explicit_xfb_stride = 1;
+ (yyval.type_qualifier).xfb_stride = (yyvsp[0].expression);
+ }
+ }
+
+ static const char * const local_size_qualifiers[3] = {
+ "local_size_x",
+ "local_size_y",
+ "local_size_z",
+ };
+ for (int i = 0; i < 3; i++) {
+ if (match_layout_qualifier(local_size_qualifiers[i], (yyvsp[-2].identifier),
+ state) == 0) {
+ if (!state->has_compute_shader()) {
+ _mesa_glsl_error(& (yylsp[0]), state,
+ "%s qualifier requires GLSL 4.30 or "
+ "GLSL ES 3.10 or ARB_compute_shader",
+ local_size_qualifiers[i]);
+ YYERROR;
+ } else {
+ (yyval.type_qualifier).flags.q.local_size |= (1 << i);
+ (yyval.type_qualifier).local_size[i] = new(ctx) ast_layout_expression((yylsp[-2]), (yyvsp[0].expression));
+ }
+ break;
+ }
+ }
+
+ if (match_layout_qualifier("invocations", (yyvsp[-2].identifier), state) == 0) {
+ (yyval.type_qualifier).flags.q.invocations = 1;
+ (yyval.type_qualifier).invocations = new(ctx) ast_layout_expression((yylsp[-2]), (yyvsp[0].expression));
+ if (!state->is_version(400, 320) &&
+ !state->ARB_gpu_shader5_enable &&
+ !state->OES_geometry_shader_enable &&
+ !state->EXT_geometry_shader_enable) {
+ _mesa_glsl_error(& (yylsp[0]), state,
+ "GL_ARB_gpu_shader5 invocations "
+ "qualifier specified", (yyvsp[0].expression));
+ }
+ }
+
+ /* Layout qualifiers for tessellation control shaders. */
+ if (match_layout_qualifier("vertices", (yyvsp[-2].identifier), state) == 0) {
+ (yyval.type_qualifier).flags.q.vertices = 1;
+ (yyval.type_qualifier).vertices = new(ctx) ast_layout_expression((yylsp[-2]), (yyvsp[0].expression));
+ if (!state->has_tessellation_shader()) {
+ _mesa_glsl_error(& (yylsp[-2]), state,
+ "vertices qualifier requires GLSL 4.00 or "
+ "ARB_tessellation_shader");
+ }
+ }
+
+ /* If the identifier didn't match any known layout identifiers,
+ * emit an error.
+ */
+ if (!(yyval.type_qualifier).flags.i) {
+ _mesa_glsl_error(& (yylsp[-2]), state, "unrecognized layout identifier "
+ "`%s'", (yyvsp[-2].identifier));
+ YYERROR;
+ }
+ }
+#line 4408 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 152:
+#line 1903 "src/compiler/glsl/glsl_parser.yy"
+ {
+ (yyval.type_qualifier) = (yyvsp[0].type_qualifier);
+ /* Layout qualifiers for ARB_uniform_buffer_object. */
+ if ((yyval.type_qualifier).flags.q.uniform && !state->has_uniform_buffer_objects()) {
+ _mesa_glsl_error(& (yylsp[0]), state,
+ "#version 140 / GL_ARB_uniform_buffer_object "
+ "layout qualifier `%s' is used", (yyvsp[0].type_qualifier));
+ } else if ((yyval.type_qualifier).flags.q.uniform && state->ARB_uniform_buffer_object_warn) {
+ _mesa_glsl_warning(& (yylsp[0]), state,
+ "#version 140 / GL_ARB_uniform_buffer_object "
+ "layout qualifier `%s' is used", (yyvsp[0].type_qualifier));
+ }
+ }
+#line 4426 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 153:
+#line 1929 "src/compiler/glsl/glsl_parser.yy"
+ {
+ memset(& (yyval.type_qualifier), 0, sizeof((yyval.type_qualifier)));
+ (yyval.type_qualifier).flags.q.row_major = 1;
+ }
+#line 4435 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 154:
+#line 1934 "src/compiler/glsl/glsl_parser.yy"
+ {
+ memset(& (yyval.type_qualifier), 0, sizeof((yyval.type_qualifier)));
+ (yyval.type_qualifier).flags.q.packed = 1;
+ }
+#line 4444 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 155:
+#line 1939 "src/compiler/glsl/glsl_parser.yy"
+ {
+ memset(& (yyval.type_qualifier), 0, sizeof((yyval.type_qualifier)));
+ (yyval.type_qualifier).flags.q.shared = 1;
+ }
+#line 4453 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 156:
+#line 1947 "src/compiler/glsl/glsl_parser.yy"
+ {
+ memset(& (yyval.type_qualifier), 0, sizeof((yyval.type_qualifier)));
+ (yyval.type_qualifier).flags.q.subroutine = 1;
+ }
+#line 4462 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 157:
+#line 1952 "src/compiler/glsl/glsl_parser.yy"
+ {
+ memset(& (yyval.type_qualifier), 0, sizeof((yyval.type_qualifier)));
+ (yyval.type_qualifier).flags.q.subroutine = 1;
+ (yyval.type_qualifier).subroutine_list = (yyvsp[-1].subroutine_list);
+ }
+#line 4472 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 158:
+#line 1961 "src/compiler/glsl/glsl_parser.yy"
+ {
+ void *ctx = state->linalloc;
+ ast_declaration *decl = new(ctx) ast_declaration((yyvsp[0].identifier), NULL, NULL);
+ decl->set_location((yylsp[0]));
+
+ (yyval.subroutine_list) = new(ctx) ast_subroutine_list();
+ (yyval.subroutine_list)->declarations.push_tail(&decl->link);
+ }
+#line 4485 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 159:
+#line 1970 "src/compiler/glsl/glsl_parser.yy"
+ {
+ void *ctx = state->linalloc;
+ ast_declaration *decl = new(ctx) ast_declaration((yyvsp[0].identifier), NULL, NULL);
+ decl->set_location((yylsp[0]));
+
+ (yyval.subroutine_list) = (yyvsp[-2].subroutine_list);
+ (yyval.subroutine_list)->declarations.push_tail(&decl->link);
+ }
+#line 4498 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 160:
+#line 1982 "src/compiler/glsl/glsl_parser.yy"
+ {
+ memset(& (yyval.type_qualifier), 0, sizeof((yyval.type_qualifier)));
+ (yyval.type_qualifier).flags.q.smooth = 1;
+ }
+#line 4507 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 161:
+#line 1987 "src/compiler/glsl/glsl_parser.yy"
+ {
+ memset(& (yyval.type_qualifier), 0, sizeof((yyval.type_qualifier)));
+ (yyval.type_qualifier).flags.q.flat = 1;
+ }
+#line 4516 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 162:
+#line 1992 "src/compiler/glsl/glsl_parser.yy"
+ {
+ memset(& (yyval.type_qualifier), 0, sizeof((yyval.type_qualifier)));
+ (yyval.type_qualifier).flags.q.noperspective = 1;
+ }
+#line 4525 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 163:
+#line 2001 "src/compiler/glsl/glsl_parser.yy"
+ {
+ memset(& (yyval.type_qualifier), 0, sizeof((yyval.type_qualifier)));
+ (yyval.type_qualifier).flags.q.invariant = 1;
+ }
+#line 4534 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 164:
+#line 2006 "src/compiler/glsl/glsl_parser.yy"
+ {
+ memset(& (yyval.type_qualifier), 0, sizeof((yyval.type_qualifier)));
+ (yyval.type_qualifier).flags.q.precise = 1;
+ }
+#line 4543 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 171:
+#line 2017 "src/compiler/glsl/glsl_parser.yy"
+ {
+ memset(&(yyval.type_qualifier), 0, sizeof((yyval.type_qualifier)));
+ (yyval.type_qualifier).precision = (yyvsp[0].n);
+ }
+#line 4552 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 172:
+#line 2035 "src/compiler/glsl/glsl_parser.yy"
+ {
+ if ((yyvsp[0].type_qualifier).flags.q.precise)
+ _mesa_glsl_error(&(yylsp[-1]), state, "duplicate \"precise\" qualifier");
+
+ (yyval.type_qualifier) = (yyvsp[0].type_qualifier);
+ (yyval.type_qualifier).flags.q.precise = 1;
+ }
+#line 4564 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 173:
+#line 2043 "src/compiler/glsl/glsl_parser.yy"
+ {
+ if ((yyvsp[0].type_qualifier).flags.q.invariant)
+ _mesa_glsl_error(&(yylsp[-1]), state, "duplicate \"invariant\" qualifier");
+
+ if (!state->has_420pack_or_es31() && (yyvsp[0].type_qualifier).flags.q.precise)
+ _mesa_glsl_error(&(yylsp[-1]), state,
+ "\"invariant\" must come after \"precise\"");
+
+ (yyval.type_qualifier) = (yyvsp[0].type_qualifier);
+ (yyval.type_qualifier).flags.q.invariant = 1;
+
+ /* GLSL ES 3.00 spec, section 4.6.1 "The Invariant Qualifier":
+ *
+ * "Only variables output from a shader can be candidates for invariance.
+ * This includes user-defined output variables and the built-in output
+ * variables. As only outputs can be declared as invariant, an invariant
+ * output from one shader stage will still match an input of a subsequent
+ * stage without the input being declared as invariant."
+ *
+ * On the desktop side, this text first appears in GLSL 4.30.
+ */
+ if (state->is_version(430, 300) && (yyval.type_qualifier).flags.q.in)
+ _mesa_glsl_error(&(yylsp[-1]), state, "invariant qualifiers cannot be used with shader inputs");
+ }
+#line 4593 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 174:
+#line 2068 "src/compiler/glsl/glsl_parser.yy"
+ {
+ /* Section 4.3 of the GLSL 1.40 specification states:
+ * "...qualified with one of these interpolation qualifiers"
+ *
+ * GLSL 1.30 claims to allow "one or more", but insists that:
+ * "These interpolation qualifiers may only precede the qualifiers in,
+ * centroid in, out, or centroid out in a declaration."
+ *
+ * ...which means that e.g. smooth can't precede smooth, so there can be
+ * only one after all, and the 1.40 text is a clarification, not a change.
+ */
+ if ((yyvsp[0].type_qualifier).has_interpolation())
+ _mesa_glsl_error(&(yylsp[-1]), state, "duplicate interpolation qualifier");
+
+ if (!state->has_420pack_or_es31() &&
+ ((yyvsp[0].type_qualifier).flags.q.precise || (yyvsp[0].type_qualifier).flags.q.invariant)) {
+ _mesa_glsl_error(&(yylsp[-1]), state, "interpolation qualifiers must come "
+ "after \"precise\" or \"invariant\"");
+ }
+
+ (yyval.type_qualifier) = (yyvsp[-1].type_qualifier);
+ (yyval.type_qualifier).merge_qualifier(&(yylsp[-1]), state, (yyvsp[0].type_qualifier), false);
+ }
+#line 4621 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 175:
+#line 2092 "src/compiler/glsl/glsl_parser.yy"
+ {
+ /* In the absence of ARB_shading_language_420pack, layout qualifiers may
+ * appear no later than auxiliary storage qualifiers. There is no
+ * particularly clear spec language mandating this, but in all examples
+ * the layout qualifier precedes the storage qualifier.
+ *
+ * We allow combinations of layout with interpolation, invariant or
+ * precise qualifiers since these are useful in ARB_separate_shader_objects.
+ * There is no clear spec guidance on this either.
+ */
+ (yyval.type_qualifier) = (yyvsp[-1].type_qualifier);
+ (yyval.type_qualifier).merge_qualifier(& (yylsp[-1]), state, (yyvsp[0].type_qualifier), false, (yyvsp[0].type_qualifier).has_layout());
+ }
+#line 4639 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 176:
+#line 2106 "src/compiler/glsl/glsl_parser.yy"
+ {
+ (yyval.type_qualifier) = (yyvsp[-1].type_qualifier);
+ (yyval.type_qualifier).merge_qualifier(&(yylsp[-1]), state, (yyvsp[0].type_qualifier), false);
+ }
+#line 4648 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 177:
+#line 2111 "src/compiler/glsl/glsl_parser.yy"
+ {
+ if ((yyvsp[0].type_qualifier).has_auxiliary_storage()) {
+ _mesa_glsl_error(&(yylsp[-1]), state,
+ "duplicate auxiliary storage qualifier (centroid or sample)");
+ }
+
+ if ((!state->has_420pack_or_es31() && !state->EXT_gpu_shader4_enable) &&
+ ((yyvsp[0].type_qualifier).flags.q.precise || (yyvsp[0].type_qualifier).flags.q.invariant ||
+ (yyvsp[0].type_qualifier).has_interpolation() || (yyvsp[0].type_qualifier).has_layout())) {
+ _mesa_glsl_error(&(yylsp[-1]), state, "auxiliary storage qualifiers must come "
+ "just before storage qualifiers");
+ }
+ (yyval.type_qualifier) = (yyvsp[-1].type_qualifier);
+ (yyval.type_qualifier).merge_qualifier(&(yylsp[-1]), state, (yyvsp[0].type_qualifier), false);
+ }
+#line 4668 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 178:
+#line 2127 "src/compiler/glsl/glsl_parser.yy"
+ {
+ /* Section 4.3 of the GLSL 1.20 specification states:
+ * "Variable declarations may have a storage qualifier specified..."
+ * 1.30 clarifies this to "may have one storage qualifier".
+ *
+ * GL_EXT_gpu_shader4 allows "varying out" in fragment shaders.
+ */
+ if ((yyvsp[0].type_qualifier).has_storage() &&
+ (!state->EXT_gpu_shader4_enable ||
+ state->stage != MESA_SHADER_FRAGMENT ||
+ !(yyvsp[-1].type_qualifier).flags.q.varying || !(yyvsp[0].type_qualifier).flags.q.out))
+ _mesa_glsl_error(&(yylsp[-1]), state, "duplicate storage qualifier");
+
+ if (!state->has_420pack_or_es31() &&
+ ((yyvsp[0].type_qualifier).flags.q.precise || (yyvsp[0].type_qualifier).flags.q.invariant || (yyvsp[0].type_qualifier).has_interpolation() ||
+ (yyvsp[0].type_qualifier).has_layout() || (yyvsp[0].type_qualifier).has_auxiliary_storage())) {
+ _mesa_glsl_error(&(yylsp[-1]), state, "storage qualifiers must come after "
+ "precise, invariant, interpolation, layout and auxiliary "
+ "storage qualifiers");
+ }
+
+ (yyval.type_qualifier) = (yyvsp[-1].type_qualifier);
+ (yyval.type_qualifier).merge_qualifier(&(yylsp[-1]), state, (yyvsp[0].type_qualifier), false);
+ }
+#line 4697 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 179:
+#line 2152 "src/compiler/glsl/glsl_parser.yy"
+ {
+ if ((yyvsp[0].type_qualifier).precision != ast_precision_none)
+ _mesa_glsl_error(&(yylsp[-1]), state, "duplicate precision qualifier");
+
+ if (!(state->has_420pack_or_es31()) &&
+ (yyvsp[0].type_qualifier).flags.i != 0)
+ _mesa_glsl_error(&(yylsp[-1]), state, "precision qualifiers must come last");
+
+ (yyval.type_qualifier) = (yyvsp[0].type_qualifier);
+ (yyval.type_qualifier).precision = (yyvsp[-1].n);
+ }
+#line 4713 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 180:
+#line 2164 "src/compiler/glsl/glsl_parser.yy"
+ {
+ (yyval.type_qualifier) = (yyvsp[-1].type_qualifier);
+ (yyval.type_qualifier).merge_qualifier(&(yylsp[-1]), state, (yyvsp[0].type_qualifier), false);
+ }
+#line 4722 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 181:
+#line 2172 "src/compiler/glsl/glsl_parser.yy"
+ {
+ memset(& (yyval.type_qualifier), 0, sizeof((yyval.type_qualifier)));
+ (yyval.type_qualifier).flags.q.centroid = 1;
+ }
+#line 4731 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 182:
+#line 2177 "src/compiler/glsl/glsl_parser.yy"
+ {
+ memset(& (yyval.type_qualifier), 0, sizeof((yyval.type_qualifier)));
+ (yyval.type_qualifier).flags.q.sample = 1;
+ }
+#line 4740 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 183:
+#line 2182 "src/compiler/glsl/glsl_parser.yy"
+ {
+ memset(& (yyval.type_qualifier), 0, sizeof((yyval.type_qualifier)));
+ (yyval.type_qualifier).flags.q.patch = 1;
+ }
+#line 4749 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 184:
+#line 2189 "src/compiler/glsl/glsl_parser.yy"
+ {
+ memset(& (yyval.type_qualifier), 0, sizeof((yyval.type_qualifier)));
+ (yyval.type_qualifier).flags.q.constant = 1;
+ }
+#line 4758 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 185:
+#line 2194 "src/compiler/glsl/glsl_parser.yy"
+ {
+ memset(& (yyval.type_qualifier), 0, sizeof((yyval.type_qualifier)));
+ (yyval.type_qualifier).flags.q.attribute = 1;
+ }
+#line 4767 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 186:
+#line 2199 "src/compiler/glsl/glsl_parser.yy"
+ {
+ memset(& (yyval.type_qualifier), 0, sizeof((yyval.type_qualifier)));
+ (yyval.type_qualifier).flags.q.varying = 1;
+ }
+#line 4776 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 187:
+#line 2204 "src/compiler/glsl/glsl_parser.yy"
+ {
+ memset(& (yyval.type_qualifier), 0, sizeof((yyval.type_qualifier)));
+ (yyval.type_qualifier).flags.q.in = 1;
+ }
+#line 4785 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 188:
+#line 2209 "src/compiler/glsl/glsl_parser.yy"
+ {
+ memset(& (yyval.type_qualifier), 0, sizeof((yyval.type_qualifier)));
+ (yyval.type_qualifier).flags.q.out = 1;
+
+ if (state->stage == MESA_SHADER_GEOMETRY &&
+ state->has_explicit_attrib_stream()) {
+ /* Section 4.3.8.2 (Output Layout Qualifiers) of the GLSL 4.00
+ * spec says:
+ *
+ * "If the block or variable is declared with the stream
+ * identifier, it is associated with the specified stream;
+ * otherwise, it is associated with the current default stream."
+ */
+ (yyval.type_qualifier).flags.q.stream = 1;
+ (yyval.type_qualifier).flags.q.explicit_stream = 0;
+ (yyval.type_qualifier).stream = state->out_qualifier->stream;
+ }
+
+ if (state->has_enhanced_layouts()) {
+ (yyval.type_qualifier).flags.q.xfb_buffer = 1;
+ (yyval.type_qualifier).flags.q.explicit_xfb_buffer = 0;
+ (yyval.type_qualifier).xfb_buffer = state->out_qualifier->xfb_buffer;
+ }
+ }
+#line 4814 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 189:
+#line 2234 "src/compiler/glsl/glsl_parser.yy"
+ {
+ memset(& (yyval.type_qualifier), 0, sizeof((yyval.type_qualifier)));
+ (yyval.type_qualifier).flags.q.in = 1;
+ (yyval.type_qualifier).flags.q.out = 1;
+
+ if (!state->has_framebuffer_fetch() ||
+ !state->is_version(130, 300) ||
+ state->stage != MESA_SHADER_FRAGMENT)
+ _mesa_glsl_error(&(yylsp[0]), state, "A single interface variable cannot be "
+ "declared as both input and output");
+ }
+#line 4830 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 190:
+#line 2246 "src/compiler/glsl/glsl_parser.yy"
+ {
+ memset(& (yyval.type_qualifier), 0, sizeof((yyval.type_qualifier)));
+ (yyval.type_qualifier).flags.q.uniform = 1;
+ }
+#line 4839 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 191:
+#line 2251 "src/compiler/glsl/glsl_parser.yy"
+ {
+ memset(& (yyval.type_qualifier), 0, sizeof((yyval.type_qualifier)));
+ (yyval.type_qualifier).flags.q.buffer = 1;
+ }
+#line 4848 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 192:
+#line 2256 "src/compiler/glsl/glsl_parser.yy"
+ {
+ memset(& (yyval.type_qualifier), 0, sizeof((yyval.type_qualifier)));
+ (yyval.type_qualifier).flags.q.shared_storage = 1;
+ }
+#line 4857 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 193:
+#line 2264 "src/compiler/glsl/glsl_parser.yy"
+ {
+ memset(& (yyval.type_qualifier), 0, sizeof((yyval.type_qualifier)));
+ (yyval.type_qualifier).flags.q.coherent = 1;
+ }
+#line 4866 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 194:
+#line 2269 "src/compiler/glsl/glsl_parser.yy"
+ {
+ memset(& (yyval.type_qualifier), 0, sizeof((yyval.type_qualifier)));
+ (yyval.type_qualifier).flags.q._volatile = 1;
+ }
+#line 4875 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 195:
+#line 2274 "src/compiler/glsl/glsl_parser.yy"
+ {
+ STATIC_ASSERT(sizeof((yyval.type_qualifier).flags.q) <= sizeof((yyval.type_qualifier).flags.i));
+ memset(& (yyval.type_qualifier), 0, sizeof((yyval.type_qualifier)));
+ (yyval.type_qualifier).flags.q.restrict_flag = 1;
+ }
+#line 4885 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 196:
+#line 2280 "src/compiler/glsl/glsl_parser.yy"
+ {
+ memset(& (yyval.type_qualifier), 0, sizeof((yyval.type_qualifier)));
+ (yyval.type_qualifier).flags.q.read_only = 1;
+ }
+#line 4894 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 197:
+#line 2285 "src/compiler/glsl/glsl_parser.yy"
+ {
+ memset(& (yyval.type_qualifier), 0, sizeof((yyval.type_qualifier)));
+ (yyval.type_qualifier).flags.q.write_only = 1;
+ }
+#line 4903 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 198:
+#line 2293 "src/compiler/glsl/glsl_parser.yy"
+ {
+ void *ctx = state->linalloc;
+ (yyval.array_specifier) = new(ctx) ast_array_specifier((yylsp[-1]), new(ctx) ast_expression(
+ ast_unsized_array_dim, NULL,
+ NULL, NULL));
+ (yyval.array_specifier)->set_location_range((yylsp[-1]), (yylsp[0]));
+ }
+#line 4915 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 199:
+#line 2301 "src/compiler/glsl/glsl_parser.yy"
+ {
+ void *ctx = state->linalloc;
+ (yyval.array_specifier) = new(ctx) ast_array_specifier((yylsp[-2]), (yyvsp[-1].expression));
+ (yyval.array_specifier)->set_location_range((yylsp[-2]), (yylsp[0]));
+ }
+#line 4925 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 200:
+#line 2307 "src/compiler/glsl/glsl_parser.yy"
+ {
+ void *ctx = state->linalloc;
+ (yyval.array_specifier) = (yyvsp[-2].array_specifier);
+
+ if (state->check_arrays_of_arrays_allowed(& (yylsp[-2]))) {
+ (yyval.array_specifier)->add_dimension(new(ctx) ast_expression(ast_unsized_array_dim, NULL,
+ NULL, NULL));
+ }
+ }
+#line 4939 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 201:
+#line 2317 "src/compiler/glsl/glsl_parser.yy"
+ {
+ (yyval.array_specifier) = (yyvsp[-3].array_specifier);
+
+ if (state->check_arrays_of_arrays_allowed(& (yylsp[-3]))) {
+ (yyval.array_specifier)->add_dimension((yyvsp[-1].expression));
+ }
+ }
+#line 4951 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 203:
+#line 2329 "src/compiler/glsl/glsl_parser.yy"
+ {
+ (yyval.type_specifier) = (yyvsp[-1].type_specifier);
+ (yyval.type_specifier)->array_specifier = (yyvsp[0].array_specifier);
+ }
+#line 4960 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 204:
+#line 2337 "src/compiler/glsl/glsl_parser.yy"
+ {
+ void *ctx = state->linalloc;
+ (yyval.type_specifier) = new(ctx) ast_type_specifier((yyvsp[0].type));
+ (yyval.type_specifier)->set_location((yylsp[0]));
+ }
+#line 4970 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 205:
+#line 2343 "src/compiler/glsl/glsl_parser.yy"
+ {
+ void *ctx = state->linalloc;
+ (yyval.type_specifier) = new(ctx) ast_type_specifier((yyvsp[0].struct_specifier));
+ (yyval.type_specifier)->set_location((yylsp[0]));
+ }
+#line 4980 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 206:
+#line 2349 "src/compiler/glsl/glsl_parser.yy"
+ {
+ void *ctx = state->linalloc;
+ (yyval.type_specifier) = new(ctx) ast_type_specifier((yyvsp[0].identifier));
+ (yyval.type_specifier)->set_location((yylsp[0]));
+ }
+#line 4990 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 207:
+#line 2357 "src/compiler/glsl/glsl_parser.yy"
+ { (yyval.type) = glsl_type::void_type; }
+#line 4996 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 208:
+#line 2358 "src/compiler/glsl/glsl_parser.yy"
+ { (yyval.type) = (yyvsp[0].type); }
+#line 5002 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 209:
+#line 2360 "src/compiler/glsl/glsl_parser.yy"
+ {
+ if ((yyvsp[0].type) == glsl_type::int_type) {
+ (yyval.type) = glsl_type::uint_type;
+ } else {
+ _mesa_glsl_error(&(yylsp[-1]), state,
+ "\"unsigned\" is only allowed before \"int\"");
+ }
+ }
+#line 5015 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 210:
+#line 2372 "src/compiler/glsl/glsl_parser.yy"
+ {
+ state->check_precision_qualifiers_allowed(&(yylsp[0]));
+ (yyval.n) = ast_precision_high;
+ }
+#line 5024 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 211:
+#line 2377 "src/compiler/glsl/glsl_parser.yy"
+ {
+ state->check_precision_qualifiers_allowed(&(yylsp[0]));
+ (yyval.n) = ast_precision_medium;
+ }
+#line 5033 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 212:
+#line 2382 "src/compiler/glsl/glsl_parser.yy"
+ {
+ state->check_precision_qualifiers_allowed(&(yylsp[0]));
+ (yyval.n) = ast_precision_low;
+ }
+#line 5042 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 213:
+#line 2390 "src/compiler/glsl/glsl_parser.yy"
+ {
+ void *ctx = state->linalloc;
+ (yyval.struct_specifier) = new(ctx) ast_struct_specifier((yyvsp[-3].identifier), (yyvsp[-1].declarator_list));
+ (yyval.struct_specifier)->set_location_range((yylsp[-3]), (yylsp[0]));
+ state->symbols->add_type((yyvsp[-3].identifier), glsl_type::void_type);
+ }
+#line 5053 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 214:
+#line 2397 "src/compiler/glsl/glsl_parser.yy"
+ {
+ void *ctx = state->linalloc;
+
+ /* All anonymous structs have the same name. This simplifies matching of
+ * globals whose type is an unnamed struct.
+ *
+ * It also avoids a memory leak when the same shader is compiled over and
+ * over again.
+ */
+ (yyval.struct_specifier) = new(ctx) ast_struct_specifier("#anon_struct", (yyvsp[-1].declarator_list));
+
+ (yyval.struct_specifier)->set_location_range((yylsp[-2]), (yylsp[0]));
+ }
+#line 5071 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 215:
+#line 2414 "src/compiler/glsl/glsl_parser.yy"
+ {
+ (yyval.declarator_list) = (yyvsp[0].declarator_list);
+ (yyvsp[0].declarator_list)->link.self_link();
+ }
+#line 5080 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 216:
+#line 2419 "src/compiler/glsl/glsl_parser.yy"
+ {
+ (yyval.declarator_list) = (yyvsp[-1].declarator_list);
+ (yyval.declarator_list)->link.insert_before(& (yyvsp[0].declarator_list)->link);
+ }
+#line 5089 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 217:
+#line 2427 "src/compiler/glsl/glsl_parser.yy"
+ {
+ void *ctx = state->linalloc;
+ ast_fully_specified_type *const type = (yyvsp[-2].fully_specified_type);
+ type->set_location((yylsp[-2]));
+
+ if (state->has_bindless()) {
+ ast_type_qualifier input_layout_mask;
+
+ /* Allow to declare qualifiers for images. */
+ input_layout_mask.flags.i = 0;
+ input_layout_mask.flags.q.coherent = 1;
+ input_layout_mask.flags.q._volatile = 1;
+ input_layout_mask.flags.q.restrict_flag = 1;
+ input_layout_mask.flags.q.read_only = 1;
+ input_layout_mask.flags.q.write_only = 1;
+ input_layout_mask.flags.q.explicit_image_format = 1;
+
+ if ((type->qualifier.flags.i & ~input_layout_mask.flags.i) != 0) {
+ _mesa_glsl_error(&(yylsp[-2]), state,
+ "only precision and image qualifiers may be "
+ "applied to structure members");
+ }
+ } else {
+ if (type->qualifier.flags.i != 0)
+ _mesa_glsl_error(&(yylsp[-2]), state,
+ "only precision qualifiers may be applied to "
+ "structure members");
+ }
+
+ (yyval.declarator_list) = new(ctx) ast_declarator_list(type);
+ (yyval.declarator_list)->set_location((yylsp[-1]));
+
+ (yyval.declarator_list)->declarations.push_degenerate_list_at_head(& (yyvsp[-1].declaration)->link);
+ }
+#line 5128 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 218:
+#line 2465 "src/compiler/glsl/glsl_parser.yy"
+ {
+ (yyval.declaration) = (yyvsp[0].declaration);
+ (yyvsp[0].declaration)->link.self_link();
+ }
+#line 5137 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 219:
+#line 2470 "src/compiler/glsl/glsl_parser.yy"
+ {
+ (yyval.declaration) = (yyvsp[-2].declaration);
+ (yyval.declaration)->link.insert_before(& (yyvsp[0].declaration)->link);
+ }
+#line 5146 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 220:
+#line 2478 "src/compiler/glsl/glsl_parser.yy"
+ {
+ void *ctx = state->linalloc;
+ (yyval.declaration) = new(ctx) ast_declaration((yyvsp[0].identifier), NULL, NULL);
+ (yyval.declaration)->set_location((yylsp[0]));
+ }
+#line 5156 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 221:
+#line 2484 "src/compiler/glsl/glsl_parser.yy"
+ {
+ void *ctx = state->linalloc;
+ (yyval.declaration) = new(ctx) ast_declaration((yyvsp[-1].identifier), (yyvsp[0].array_specifier), NULL);
+ (yyval.declaration)->set_location_range((yylsp[-1]), (yylsp[0]));
+ }
+#line 5166 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 223:
+#line 2494 "src/compiler/glsl/glsl_parser.yy"
+ {
+ (yyval.expression) = (yyvsp[-1].expression);
+ }
+#line 5174 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 224:
+#line 2498 "src/compiler/glsl/glsl_parser.yy"
+ {
+ (yyval.expression) = (yyvsp[-2].expression);
+ }
+#line 5182 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 225:
+#line 2505 "src/compiler/glsl/glsl_parser.yy"
+ {
+ void *ctx = state->linalloc;
+ (yyval.expression) = new(ctx) ast_aggregate_initializer();
+ (yyval.expression)->set_location((yylsp[0]));
+ (yyval.expression)->expressions.push_tail(& (yyvsp[0].expression)->link);
+ }
+#line 5193 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 226:
+#line 2512 "src/compiler/glsl/glsl_parser.yy"
+ {
+ (yyvsp[-2].expression)->expressions.push_tail(& (yyvsp[0].expression)->link);
+ }
+#line 5201 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 228:
+#line 2524 "src/compiler/glsl/glsl_parser.yy"
+ { (yyval.node) = (ast_node *) (yyvsp[0].compound_statement); }
+#line 5207 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 237:
+#line 2540 "src/compiler/glsl/glsl_parser.yy"
+ {
+ void *ctx = state->linalloc;
+ (yyval.compound_statement) = new(ctx) ast_compound_statement(true, NULL);
+ (yyval.compound_statement)->set_location_range((yylsp[-1]), (yylsp[0]));
+ }
+#line 5217 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 238:
+#line 2546 "src/compiler/glsl/glsl_parser.yy"
+ {
+ state->symbols->push_scope();
+ }
+#line 5225 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 239:
+#line 2550 "src/compiler/glsl/glsl_parser.yy"
+ {
+ void *ctx = state->linalloc;
+ (yyval.compound_statement) = new(ctx) ast_compound_statement(true, (yyvsp[-1].node));
+ (yyval.compound_statement)->set_location_range((yylsp[-3]), (yylsp[0]));
+ state->symbols->pop_scope();
+ }
+#line 5236 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 240:
+#line 2559 "src/compiler/glsl/glsl_parser.yy"
+ { (yyval.node) = (ast_node *) (yyvsp[0].compound_statement); }
+#line 5242 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 242:
+#line 2565 "src/compiler/glsl/glsl_parser.yy"
+ {
+ void *ctx = state->linalloc;
+ (yyval.compound_statement) = new(ctx) ast_compound_statement(false, NULL);
+ (yyval.compound_statement)->set_location_range((yylsp[-1]), (yylsp[0]));
+ }
+#line 5252 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 243:
+#line 2571 "src/compiler/glsl/glsl_parser.yy"
+ {
+ void *ctx = state->linalloc;
+ (yyval.compound_statement) = new(ctx) ast_compound_statement(false, (yyvsp[-1].node));
+ (yyval.compound_statement)->set_location_range((yylsp[-2]), (yylsp[0]));
+ }
+#line 5262 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 244:
+#line 2580 "src/compiler/glsl/glsl_parser.yy"
+ {
+ if ((yyvsp[0].node) == NULL) {
+ _mesa_glsl_error(& (yylsp[0]), state, "<nil> statement");
+ assert((yyvsp[0].node) != NULL);
+ }
+
+ (yyval.node) = (yyvsp[0].node);
+ (yyval.node)->link.self_link();
+ }
+#line 5276 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 245:
+#line 2590 "src/compiler/glsl/glsl_parser.yy"
+ {
+ if ((yyvsp[0].node) == NULL) {
+ _mesa_glsl_error(& (yylsp[0]), state, "<nil> statement");
+ assert((yyvsp[0].node) != NULL);
+ }
+ (yyval.node) = (yyvsp[-1].node);
+ (yyval.node)->link.insert_before(& (yyvsp[0].node)->link);
+ }
+#line 5289 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 246:
+#line 2599 "src/compiler/glsl/glsl_parser.yy"
+ {
+ if (!state->allow_extension_directive_midshader) {
+ _mesa_glsl_error(& (yylsp[-1]), state,
+ "#extension directive is not allowed "
+ "in the middle of a shader");
+ YYERROR;
+ }
+ }
+#line 5302 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 247:
+#line 2611 "src/compiler/glsl/glsl_parser.yy"
+ {
+ void *ctx = state->linalloc;
+ (yyval.node) = new(ctx) ast_expression_statement(NULL);
+ (yyval.node)->set_location((yylsp[0]));
+ }
+#line 5312 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 248:
+#line 2617 "src/compiler/glsl/glsl_parser.yy"
+ {
+ void *ctx = state->linalloc;
+ (yyval.node) = new(ctx) ast_expression_statement((yyvsp[-1].expression));
+ (yyval.node)->set_location((yylsp[-1]));
+ }
+#line 5322 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 249:
+#line 2626 "src/compiler/glsl/glsl_parser.yy"
+ {
+ (yyval.node) = new(state->linalloc) ast_selection_statement((yyvsp[-2].expression), (yyvsp[0].selection_rest_statement).then_statement,
+ (yyvsp[0].selection_rest_statement).else_statement);
+ (yyval.node)->set_location_range((yylsp[-4]), (yylsp[0]));
+ }
+#line 5332 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 250:
+#line 2635 "src/compiler/glsl/glsl_parser.yy"
+ {
+ (yyval.selection_rest_statement).then_statement = (yyvsp[-2].node);
+ (yyval.selection_rest_statement).else_statement = (yyvsp[0].node);
+ }
+#line 5341 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 251:
+#line 2640 "src/compiler/glsl/glsl_parser.yy"
+ {
+ (yyval.selection_rest_statement).then_statement = (yyvsp[0].node);
+ (yyval.selection_rest_statement).else_statement = NULL;
+ }
+#line 5350 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 252:
+#line 2648 "src/compiler/glsl/glsl_parser.yy"
+ {
+ (yyval.node) = (ast_node *) (yyvsp[0].expression);
+ }
+#line 5358 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 253:
+#line 2652 "src/compiler/glsl/glsl_parser.yy"
+ {
+ void *ctx = state->linalloc;
+ ast_declaration *decl = new(ctx) ast_declaration((yyvsp[-2].identifier), NULL, (yyvsp[0].expression));
+ ast_declarator_list *declarator = new(ctx) ast_declarator_list((yyvsp[-3].fully_specified_type));
+ decl->set_location_range((yylsp[-2]), (yylsp[0]));
+ declarator->set_location((yylsp[-3]));
+
+ declarator->declarations.push_tail(&decl->link);
+ (yyval.node) = declarator;
+ }
+#line 5373 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 254:
+#line 2670 "src/compiler/glsl/glsl_parser.yy"
+ {
+ (yyval.node) = new(state->linalloc) ast_switch_statement((yyvsp[-2].expression), (yyvsp[0].switch_body));
+ (yyval.node)->set_location_range((yylsp[-4]), (yylsp[0]));
+ }
+#line 5382 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 255:
+#line 2678 "src/compiler/glsl/glsl_parser.yy"
+ {
+ (yyval.switch_body) = new(state->linalloc) ast_switch_body(NULL);
+ (yyval.switch_body)->set_location_range((yylsp[-1]), (yylsp[0]));
+ }
+#line 5391 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 256:
+#line 2683 "src/compiler/glsl/glsl_parser.yy"
+ {
+ (yyval.switch_body) = new(state->linalloc) ast_switch_body((yyvsp[-1].case_statement_list));
+ (yyval.switch_body)->set_location_range((yylsp[-2]), (yylsp[0]));
+ }
+#line 5400 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 257:
+#line 2691 "src/compiler/glsl/glsl_parser.yy"
+ {
+ (yyval.case_label) = new(state->linalloc) ast_case_label((yyvsp[-1].expression));
+ (yyval.case_label)->set_location((yylsp[-1]));
+ }
+#line 5409 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 258:
+#line 2696 "src/compiler/glsl/glsl_parser.yy"
+ {
+ (yyval.case_label) = new(state->linalloc) ast_case_label(NULL);
+ (yyval.case_label)->set_location((yylsp[0]));
+ }
+#line 5418 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 259:
+#line 2704 "src/compiler/glsl/glsl_parser.yy"
+ {
+ ast_case_label_list *labels = new(state->linalloc) ast_case_label_list();
+
+ labels->labels.push_tail(& (yyvsp[0].case_label)->link);
+ (yyval.case_label_list) = labels;
+ (yyval.case_label_list)->set_location((yylsp[0]));
+ }
+#line 5430 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 260:
+#line 2712 "src/compiler/glsl/glsl_parser.yy"
+ {
+ (yyval.case_label_list) = (yyvsp[-1].case_label_list);
+ (yyval.case_label_list)->labels.push_tail(& (yyvsp[0].case_label)->link);
+ }
+#line 5439 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 261:
+#line 2720 "src/compiler/glsl/glsl_parser.yy"
+ {
+ ast_case_statement *stmts = new(state->linalloc) ast_case_statement((yyvsp[-1].case_label_list));
+ stmts->set_location((yylsp[0]));
+
+ stmts->stmts.push_tail(& (yyvsp[0].node)->link);
+ (yyval.case_statement) = stmts;
+ }
+#line 5451 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 262:
+#line 2728 "src/compiler/glsl/glsl_parser.yy"
+ {
+ (yyval.case_statement) = (yyvsp[-1].case_statement);
+ (yyval.case_statement)->stmts.push_tail(& (yyvsp[0].node)->link);
+ }
+#line 5460 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 263:
+#line 2736 "src/compiler/glsl/glsl_parser.yy"
+ {
+ ast_case_statement_list *cases= new(state->linalloc) ast_case_statement_list();
+ cases->set_location((yylsp[0]));
+
+ cases->cases.push_tail(& (yyvsp[0].case_statement)->link);
+ (yyval.case_statement_list) = cases;
+ }
+#line 5472 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 264:
+#line 2744 "src/compiler/glsl/glsl_parser.yy"
+ {
+ (yyval.case_statement_list) = (yyvsp[-1].case_statement_list);
+ (yyval.case_statement_list)->cases.push_tail(& (yyvsp[0].case_statement)->link);
+ }
+#line 5481 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 265:
+#line 2752 "src/compiler/glsl/glsl_parser.yy"
+ {
+ void *ctx = state->linalloc;
+ (yyval.node) = new(ctx) ast_iteration_statement(ast_iteration_statement::ast_while,
+ NULL, (yyvsp[-2].node), NULL, (yyvsp[0].node));
+ (yyval.node)->set_location_range((yylsp[-4]), (yylsp[-1]));
+ }
+#line 5492 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 266:
+#line 2759 "src/compiler/glsl/glsl_parser.yy"
+ {
+ void *ctx = state->linalloc;
+ (yyval.node) = new(ctx) ast_iteration_statement(ast_iteration_statement::ast_do_while,
+ NULL, (yyvsp[-2].expression), NULL, (yyvsp[-5].node));
+ (yyval.node)->set_location_range((yylsp[-6]), (yylsp[-1]));
+ }
+#line 5503 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 267:
+#line 2766 "src/compiler/glsl/glsl_parser.yy"
+ {
+ void *ctx = state->linalloc;
+ (yyval.node) = new(ctx) ast_iteration_statement(ast_iteration_statement::ast_for,
+ (yyvsp[-3].node), (yyvsp[-2].for_rest_statement).cond, (yyvsp[-2].for_rest_statement).rest, (yyvsp[0].node));
+ (yyval.node)->set_location_range((yylsp[-5]), (yylsp[0]));
+ }
+#line 5514 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 271:
+#line 2782 "src/compiler/glsl/glsl_parser.yy"
+ {
+ (yyval.node) = NULL;
+ }
+#line 5522 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 272:
+#line 2789 "src/compiler/glsl/glsl_parser.yy"
+ {
+ (yyval.for_rest_statement).cond = (yyvsp[-1].node);
+ (yyval.for_rest_statement).rest = NULL;
+ }
+#line 5531 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 273:
+#line 2794 "src/compiler/glsl/glsl_parser.yy"
+ {
+ (yyval.for_rest_statement).cond = (yyvsp[-2].node);
+ (yyval.for_rest_statement).rest = (yyvsp[0].expression);
+ }
+#line 5540 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 274:
+#line 2803 "src/compiler/glsl/glsl_parser.yy"
+ {
+ void *ctx = state->linalloc;
+ (yyval.node) = new(ctx) ast_jump_statement(ast_jump_statement::ast_continue, NULL);
+ (yyval.node)->set_location((yylsp[-1]));
+ }
+#line 5550 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 275:
+#line 2809 "src/compiler/glsl/glsl_parser.yy"
+ {
+ void *ctx = state->linalloc;
+ (yyval.node) = new(ctx) ast_jump_statement(ast_jump_statement::ast_break, NULL);
+ (yyval.node)->set_location((yylsp[-1]));
+ }
+#line 5560 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 276:
+#line 2815 "src/compiler/glsl/glsl_parser.yy"
+ {
+ void *ctx = state->linalloc;
+ (yyval.node) = new(ctx) ast_jump_statement(ast_jump_statement::ast_return, NULL);
+ (yyval.node)->set_location((yylsp[-1]));
+ }
+#line 5570 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 277:
+#line 2821 "src/compiler/glsl/glsl_parser.yy"
+ {
+ void *ctx = state->linalloc;
+ (yyval.node) = new(ctx) ast_jump_statement(ast_jump_statement::ast_return, (yyvsp[-1].expression));
+ (yyval.node)->set_location_range((yylsp[-2]), (yylsp[-1]));
+ }
+#line 5580 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 278:
+#line 2827 "src/compiler/glsl/glsl_parser.yy"
+ {
+ void *ctx = state->linalloc;
+ (yyval.node) = new(ctx) ast_jump_statement(ast_jump_statement::ast_discard, NULL);
+ (yyval.node)->set_location((yylsp[-1]));
+ }
+#line 5590 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 279:
+#line 2836 "src/compiler/glsl/glsl_parser.yy"
+ {
+ void *ctx = state->linalloc;
+ (yyval.node) = new(ctx) ast_demote_statement();
+ (yyval.node)->set_location((yylsp[-1]));
+ }
+#line 5600 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 280:
+#line 2844 "src/compiler/glsl/glsl_parser.yy"
+ { (yyval.node) = (yyvsp[0].function_definition); }
+#line 5606 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 281:
+#line 2845 "src/compiler/glsl/glsl_parser.yy"
+ { (yyval.node) = (yyvsp[0].node); }
+#line 5612 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 282:
+#line 2846 "src/compiler/glsl/glsl_parser.yy"
+ { (yyval.node) = (yyvsp[0].node); }
+#line 5618 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 283:
+#line 2847 "src/compiler/glsl/glsl_parser.yy"
+ { (yyval.node) = (yyvsp[0].node); }
+#line 5624 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 284:
+#line 2848 "src/compiler/glsl/glsl_parser.yy"
+ { (yyval.node) = NULL; }
+#line 5630 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 285:
+#line 2853 "src/compiler/glsl/glsl_parser.yy"
+ {
+ void *ctx = state->linalloc;
+ (yyval.function_definition) = new(ctx) ast_function_definition();
+ (yyval.function_definition)->set_location_range((yylsp[-1]), (yylsp[0]));
+ (yyval.function_definition)->prototype = (yyvsp[-1].function);
+ (yyval.function_definition)->body = (yyvsp[0].compound_statement);
+
+ state->symbols->pop_scope();
+ }
+#line 5644 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 286:
+#line 2867 "src/compiler/glsl/glsl_parser.yy"
+ {
+ (yyval.node) = (yyvsp[0].interface_block);
+ }
+#line 5652 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 287:
+#line 2871 "src/compiler/glsl/glsl_parser.yy"
+ {
+ ast_interface_block *block = (ast_interface_block *) (yyvsp[0].node);
+
+ if (!(yyvsp[-1].type_qualifier).merge_qualifier(& (yylsp[-1]), state, block->layout, false,
+ block->layout.has_layout())) {
+ YYERROR;
+ }
+
+ block->layout = (yyvsp[-1].type_qualifier);
+
+ (yyval.node) = block;
+ }
+#line 5669 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 288:
+#line 2884 "src/compiler/glsl/glsl_parser.yy"
+ {
+ ast_interface_block *block = (ast_interface_block *)(yyvsp[0].node);
+
+ if (!block->default_layout.flags.q.buffer) {
+ _mesa_glsl_error(& (yylsp[-1]), state,
+ "memory qualifiers can only be used in the "
+ "declaration of shader storage blocks");
+ }
+ if (!(yyvsp[-1].type_qualifier).merge_qualifier(& (yylsp[-1]), state, block->layout, false)) {
+ YYERROR;
+ }
+ block->layout = (yyvsp[-1].type_qualifier);
+ (yyval.node) = block;
+ }
+#line 5688 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 289:
+#line 2902 "src/compiler/glsl/glsl_parser.yy"
+ {
+ ast_interface_block *const block = (yyvsp[-1].interface_block);
+
+ if ((yyvsp[-6].type_qualifier).flags.q.uniform) {
+ block->default_layout = *state->default_uniform_qualifier;
+ } else if ((yyvsp[-6].type_qualifier).flags.q.buffer) {
+ block->default_layout = *state->default_shader_storage_qualifier;
+ }
+ block->block_name = (yyvsp[-5].identifier);
+ block->declarations.push_degenerate_list_at_head(& (yyvsp[-3].declarator_list)->link);
+
+ _mesa_ast_process_interface_block(& (yylsp[-6]), state, block, (yyvsp[-6].type_qualifier));
+
+ (yyval.interface_block) = block;
+ }
+#line 5708 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 290:
+#line 2921 "src/compiler/glsl/glsl_parser.yy"
+ {
+ memset(& (yyval.type_qualifier), 0, sizeof((yyval.type_qualifier)));
+ (yyval.type_qualifier).flags.q.in = 1;
+ }
+#line 5717 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 291:
+#line 2926 "src/compiler/glsl/glsl_parser.yy"
+ {
+ memset(& (yyval.type_qualifier), 0, sizeof((yyval.type_qualifier)));
+ (yyval.type_qualifier).flags.q.out = 1;
+ }
+#line 5726 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 292:
+#line 2931 "src/compiler/glsl/glsl_parser.yy"
+ {
+ memset(& (yyval.type_qualifier), 0, sizeof((yyval.type_qualifier)));
+ (yyval.type_qualifier).flags.q.uniform = 1;
+ }
+#line 5735 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 293:
+#line 2936 "src/compiler/glsl/glsl_parser.yy"
+ {
+ memset(& (yyval.type_qualifier), 0, sizeof((yyval.type_qualifier)));
+ (yyval.type_qualifier).flags.q.buffer = 1;
+ }
+#line 5744 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 294:
+#line 2941 "src/compiler/glsl/glsl_parser.yy"
+ {
+ if (!(yyvsp[-1].type_qualifier).flags.q.patch) {
+ _mesa_glsl_error(&(yylsp[-1]), state, "invalid interface qualifier");
+ }
+ if ((yyvsp[0].type_qualifier).has_auxiliary_storage()) {
+ _mesa_glsl_error(&(yylsp[-1]), state, "duplicate patch qualifier");
+ }
+ (yyval.type_qualifier) = (yyvsp[0].type_qualifier);
+ (yyval.type_qualifier).flags.q.patch = 1;
+ }
+#line 5759 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 295:
+#line 2955 "src/compiler/glsl/glsl_parser.yy"
+ {
+ (yyval.interface_block) = new(state->linalloc) ast_interface_block(NULL, NULL);
+ }
+#line 5767 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 296:
+#line 2959 "src/compiler/glsl/glsl_parser.yy"
+ {
+ (yyval.interface_block) = new(state->linalloc) ast_interface_block((yyvsp[0].identifier), NULL);
+ (yyval.interface_block)->set_location((yylsp[0]));
+ }
+#line 5776 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 297:
+#line 2964 "src/compiler/glsl/glsl_parser.yy"
+ {
+ (yyval.interface_block) = new(state->linalloc) ast_interface_block((yyvsp[-1].identifier), (yyvsp[0].array_specifier));
+ (yyval.interface_block)->set_location_range((yylsp[-1]), (yylsp[0]));
+ }
+#line 5785 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 298:
+#line 2972 "src/compiler/glsl/glsl_parser.yy"
+ {
+ (yyval.declarator_list) = (yyvsp[0].declarator_list);
+ (yyvsp[0].declarator_list)->link.self_link();
+ }
+#line 5794 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 299:
+#line 2977 "src/compiler/glsl/glsl_parser.yy"
+ {
+ (yyval.declarator_list) = (yyvsp[-1].declarator_list);
+ (yyvsp[0].declarator_list)->link.insert_before(& (yyval.declarator_list)->link);
+ }
+#line 5803 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 300:
+#line 2985 "src/compiler/glsl/glsl_parser.yy"
+ {
+ void *ctx = state->linalloc;
+ ast_fully_specified_type *type = (yyvsp[-2].fully_specified_type);
+ type->set_location((yylsp[-2]));
+
+ if (type->qualifier.flags.q.attribute) {
+ _mesa_glsl_error(& (yylsp[-2]), state,
+ "keyword 'attribute' cannot be used with "
+ "interface block member");
+ } else if (type->qualifier.flags.q.varying) {
+ _mesa_glsl_error(& (yylsp[-2]), state,
+ "keyword 'varying' cannot be used with "
+ "interface block member");
+ }
+
+ (yyval.declarator_list) = new(ctx) ast_declarator_list(type);
+ (yyval.declarator_list)->set_location((yylsp[-1]));
+
+ (yyval.declarator_list)->declarations.push_degenerate_list_at_head(& (yyvsp[-1].declaration)->link);
+ }
+#line 5828 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 301:
+#line 3009 "src/compiler/glsl/glsl_parser.yy"
+ {
+ (yyval.type_qualifier) = (yyvsp[-1].type_qualifier);
+ if (!(yyval.type_qualifier).merge_qualifier(& (yylsp[-1]), state, (yyvsp[0].type_qualifier), false, true)) {
+ YYERROR;
+ }
+ }
+#line 5839 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 303:
+#line 3020 "src/compiler/glsl/glsl_parser.yy"
+ {
+ (yyval.type_qualifier) = (yyvsp[-1].type_qualifier);
+ if (!(yyval.type_qualifier).merge_qualifier(& (yylsp[-1]), state, (yyvsp[0].type_qualifier), false, true)) {
+ YYERROR;
+ }
+ }
+#line 5850 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 305:
+#line 3031 "src/compiler/glsl/glsl_parser.yy"
+ {
+ (yyval.type_qualifier) = (yyvsp[-1].type_qualifier);
+ if (!(yyval.type_qualifier).merge_qualifier(& (yylsp[-1]), state, (yyvsp[0].type_qualifier), false, true)) {
+ YYERROR;
+ }
+ if (!(yyval.type_qualifier).validate_in_qualifier(& (yylsp[-1]), state)) {
+ YYERROR;
+ }
+ }
+#line 5864 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 306:
+#line 3041 "src/compiler/glsl/glsl_parser.yy"
+ {
+ if (!(yyvsp[-2].type_qualifier).validate_in_qualifier(& (yylsp[-2]), state)) {
+ YYERROR;
+ }
+ }
+#line 5874 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 307:
+#line 3050 "src/compiler/glsl/glsl_parser.yy"
+ {
+ (yyval.type_qualifier) = (yyvsp[-1].type_qualifier);
+ if (!(yyval.type_qualifier).merge_qualifier(& (yylsp[-1]), state, (yyvsp[0].type_qualifier), false, true)) {
+ YYERROR;
+ }
+ if (!(yyval.type_qualifier).validate_out_qualifier(& (yylsp[-1]), state)) {
+ YYERROR;
+ }
+ }
+#line 5888 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 308:
+#line 3060 "src/compiler/glsl/glsl_parser.yy"
+ {
+ if (!(yyvsp[-2].type_qualifier).validate_out_qualifier(& (yylsp[-2]), state)) {
+ YYERROR;
+ }
+ }
+#line 5898 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 309:
+#line 3069 "src/compiler/glsl/glsl_parser.yy"
+ {
+ (yyval.node) = NULL;
+ if (!state->default_uniform_qualifier->
+ merge_qualifier(& (yylsp[0]), state, (yyvsp[0].type_qualifier), false)) {
+ YYERROR;
+ }
+ if (!state->default_uniform_qualifier->
+ push_to_global(& (yylsp[0]), state)) {
+ YYERROR;
+ }
+ }
+#line 5914 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 310:
+#line 3081 "src/compiler/glsl/glsl_parser.yy"
+ {
+ (yyval.node) = NULL;
+ if (!state->default_shader_storage_qualifier->
+ merge_qualifier(& (yylsp[0]), state, (yyvsp[0].type_qualifier), false)) {
+ YYERROR;
+ }
+ if (!state->default_shader_storage_qualifier->
+ push_to_global(& (yylsp[0]), state)) {
+ YYERROR;
+ }
+
+ /* From the GLSL 4.50 spec, section 4.4.5:
+ *
+ * "It is a compile-time error to specify the binding identifier for
+ * the global scope or for block member declarations."
+ */
+ if (state->default_shader_storage_qualifier->flags.q.explicit_binding) {
+ _mesa_glsl_error(& (yylsp[0]), state,
+ "binding qualifier cannot be set for default layout");
+ }
+ }
+#line 5940 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 311:
+#line 3103 "src/compiler/glsl/glsl_parser.yy"
+ {
+ (yyval.node) = NULL;
+ if (!(yyvsp[0].type_qualifier).merge_into_in_qualifier(& (yylsp[0]), state, (yyval.node))) {
+ YYERROR;
+ }
+ if (!state->in_qualifier->push_to_global(& (yylsp[0]), state)) {
+ YYERROR;
+ }
+ }
+#line 5954 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+ case 312:
+#line 3113 "src/compiler/glsl/glsl_parser.yy"
+ {
+ (yyval.node) = NULL;
+ if (!(yyvsp[0].type_qualifier).merge_into_out_qualifier(& (yylsp[0]), state, (yyval.node))) {
+ YYERROR;
+ }
+ if (!state->out_qualifier->push_to_global(& (yylsp[0]), state)) {
+ YYERROR;
+ }
+ }
+#line 5968 "src/compiler/glsl/glsl_parser.cpp"
+ break;
+
+
+#line 5972 "src/compiler/glsl/glsl_parser.cpp"
+
+ default: break;
+ }
+ /* User semantic actions sometimes alter yychar, and that requires
+ that yytoken be updated with the new translation. We take the
+ approach of translating immediately before every use of yytoken.
+ One alternative is translating here after every semantic action,
+ but that translation would be missed if the semantic action invokes
+ YYABORT, YYACCEPT, or YYERROR immediately after altering yychar or
+ if it invokes YYBACKUP. In the case of YYABORT or YYACCEPT, an
+ incorrect destructor might then be invoked immediately. In the
+ case of YYERROR or YYBACKUP, subsequent parser actions might lead
+ to an incorrect destructor call or verbose syntax error message
+ before the lookahead is translated. */
+ YY_SYMBOL_PRINT ("-> $$ =", yyr1[yyn], &yyval, &yyloc);
+
+ YYPOPSTACK (yylen);
+ yylen = 0;
+ YY_STACK_PRINT (yyss, yyssp);
+
+ *++yyvsp = yyval;
+ *++yylsp = yyloc;
+
+ /* Now 'shift' the result of the reduction. Determine what state
+ that goes to, based on the state we popped back to and the rule
+ number reduced by. */
+ {
+ const int yylhs = yyr1[yyn] - YYNTOKENS;
+ const int yyi = yypgoto[yylhs] + *yyssp;
+ yystate = (0 <= yyi && yyi <= YYLAST && yycheck[yyi] == *yyssp
+ ? yytable[yyi]
+ : yydefgoto[yylhs]);
+ }
+
+ goto yynewstate;
+
+
+/*--------------------------------------.
+| yyerrlab -- here on detecting error. |
+`--------------------------------------*/
+yyerrlab:
+ /* Make sure we have latest lookahead translation. See comments at
+ user semantic actions for why this is necessary. */
+ yytoken = yychar == YYEMPTY ? YYEMPTY : YYTRANSLATE (yychar);
+
+ /* If not already recovering from an error, report this error. */
+ if (!yyerrstatus)
+ {
+ ++yynerrs;
+#if ! YYERROR_VERBOSE
+ yyerror (&yylloc, state, YY_("syntax error"));
+#else
+# define YYSYNTAX_ERROR yysyntax_error (&yymsg_alloc, &yymsg, \
+ yyssp, yytoken)
+ {
+ char const *yymsgp = YY_("syntax error");
+ int yysyntax_error_status;
+ yysyntax_error_status = YYSYNTAX_ERROR;
+ if (yysyntax_error_status == 0)
+ yymsgp = yymsg;
+ else if (yysyntax_error_status == 1)
+ {
+ if (yymsg != yymsgbuf)
+ YYSTACK_FREE (yymsg);
+ yymsg = YY_CAST (char *, YYSTACK_ALLOC (YY_CAST (YYSIZE_T, yymsg_alloc)));
+ if (!yymsg)
+ {
+ yymsg = yymsgbuf;
+ yymsg_alloc = sizeof yymsgbuf;
+ yysyntax_error_status = 2;
+ }
+ else
+ {
+ yysyntax_error_status = YYSYNTAX_ERROR;
+ yymsgp = yymsg;
+ }
+ }
+ yyerror (&yylloc, state, yymsgp);
+ if (yysyntax_error_status == 2)
+ goto yyexhaustedlab;
+ }
+# undef YYSYNTAX_ERROR
+#endif
+ }
+
+ yyerror_range[1] = yylloc;
+
+ if (yyerrstatus == 3)
+ {
+ /* If just tried and failed to reuse lookahead token after an
+ error, discard it. */
+
+ if (yychar <= YYEOF)
+ {
+ /* Return failure if at end of input. */
+ if (yychar == YYEOF)
+ YYABORT;
+ }
+ else
+ {
+ yydestruct ("Error: discarding",
+ yytoken, &yylval, &yylloc, state);
+ yychar = YYEMPTY;
+ }
+ }
+
+ /* Else will try to reuse lookahead token after shifting the error
+ token. */
+ goto yyerrlab1;
+
+
+/*---------------------------------------------------.
+| yyerrorlab -- error raised explicitly by YYERROR. |
+`---------------------------------------------------*/
+yyerrorlab:
+ /* Pacify compilers when the user code never invokes YYERROR and the
+ label yyerrorlab therefore never appears in user code. */
+ if (0)
+ YYERROR;
+
+ /* Do not reclaim the symbols of the rule whose action triggered
+ this YYERROR. */
+ YYPOPSTACK (yylen);
+ yylen = 0;
+ YY_STACK_PRINT (yyss, yyssp);
+ yystate = *yyssp;
+ goto yyerrlab1;
+
+
+/*-------------------------------------------------------------.
+| yyerrlab1 -- common code for both syntax error and YYERROR. |
+`-------------------------------------------------------------*/
+yyerrlab1:
+ yyerrstatus = 3; /* Each real token shifted decrements this. */
+
+ for (;;)
+ {
+ yyn = yypact[yystate];
+ if (!yypact_value_is_default (yyn))
+ {
+ yyn += YYTERROR;
+ if (0 <= yyn && yyn <= YYLAST && yycheck[yyn] == YYTERROR)
+ {
+ yyn = yytable[yyn];
+ if (0 < yyn)
+ break;
+ }
+ }
+
+ /* Pop the current state because it cannot handle the error token. */
+ if (yyssp == yyss)
+ YYABORT;
+
+ yyerror_range[1] = *yylsp;
+ yydestruct ("Error: popping",
+ yystos[yystate], yyvsp, yylsp, state);
+ YYPOPSTACK (1);
+ yystate = *yyssp;
+ YY_STACK_PRINT (yyss, yyssp);
+ }
+
+ YY_IGNORE_MAYBE_UNINITIALIZED_BEGIN
+ *++yyvsp = yylval;
+ YY_IGNORE_MAYBE_UNINITIALIZED_END
+
+ yyerror_range[2] = yylloc;
+ /* Using YYLLOC is tempting, but would change the location of
+ the lookahead. YYLOC is available though. */
+ YYLLOC_DEFAULT (yyloc, yyerror_range, 2);
+ *++yylsp = yyloc;
+
+ /* Shift the error token. */
+ YY_SYMBOL_PRINT ("Shifting", yystos[yyn], yyvsp, yylsp);
+
+ yystate = yyn;
+ goto yynewstate;
+
+
+/*-------------------------------------.
+| yyacceptlab -- YYACCEPT comes here. |
+`-------------------------------------*/
+yyacceptlab:
+ yyresult = 0;
+ goto yyreturn;
+
+
+/*-----------------------------------.
+| yyabortlab -- YYABORT comes here. |
+`-----------------------------------*/
+yyabortlab:
+ yyresult = 1;
+ goto yyreturn;
+
+
+#if !defined yyoverflow || YYERROR_VERBOSE
+/*-------------------------------------------------.
+| yyexhaustedlab -- memory exhaustion comes here. |
+`-------------------------------------------------*/
+yyexhaustedlab:
+ yyerror (&yylloc, state, YY_("memory exhausted"));
+ yyresult = 2;
+ /* Fall through. */
+#endif
+
+
+/*-----------------------------------------------------.
+| yyreturn -- parsing is finished, return the result. |
+`-----------------------------------------------------*/
+yyreturn:
+ if (yychar != YYEMPTY)
+ {
+ /* Make sure we have latest lookahead translation. See comments at
+ user semantic actions for why this is necessary. */
+ yytoken = YYTRANSLATE (yychar);
+ yydestruct ("Cleanup: discarding lookahead",
+ yytoken, &yylval, &yylloc, state);
+ }
+ /* Do not reclaim the symbols of the rule whose action triggered
+ this YYABORT or YYACCEPT. */
+ YYPOPSTACK (yylen);
+ YY_STACK_PRINT (yyss, yyssp);
+ while (yyssp != yyss)
+ {
+ yydestruct ("Cleanup: popping",
+ yystos[*yyssp], yyvsp, yylsp, state);
+ YYPOPSTACK (1);
+ }
+#ifndef yyoverflow
+ if (yyss != yyssa)
+ YYSTACK_FREE (yyss);
+#endif
+#if YYERROR_VERBOSE
+ if (yymsg != yymsgbuf)
+ YYSTACK_FREE (yymsg);
+#endif
+ return yyresult;
+}
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/glsl_parser.h b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/glsl_parser.h
new file mode 100644
index 0000000000..2107266571
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/glsl_parser.h
@@ -0,0 +1,262 @@
+/* A Bison parser, made by GNU Bison 3.5. */
+
+/* Bison interface for Yacc-like parsers in C
+
+ Copyright (C) 1984, 1989-1990, 2000-2015, 2018-2019 Free Software Foundation,
+ Inc.
+
+ This program is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>. */
+
+/* As a special exception, you may create a larger work that contains
+ part or all of the Bison parser skeleton and distribute that work
+ under terms of your choice, so long as that work isn't itself a
+ parser generator using the skeleton or a modified version thereof
+ as a parser skeleton. Alternatively, if you modify or redistribute
+ the parser skeleton itself, you may (at your option) remove this
+ special exception, which will cause the skeleton and the resulting
+ Bison output files to be licensed under the GNU General Public
+ License without this special exception.
+
+ This special exception was added by the Free Software Foundation in
+ version 2.2 of Bison. */
+
+/* Undocumented macros, especially those whose name start with YY_,
+ are private implementation details. Do not rely on them. */
+
+#ifndef YY__MESA_GLSL_SRC_COMPILER_GLSL_GLSL_PARSER_H_INCLUDED
+# define YY__MESA_GLSL_SRC_COMPILER_GLSL_GLSL_PARSER_H_INCLUDED
+/* Debug traces. */
+#ifndef YYDEBUG
+# define YYDEBUG 0
+#endif
+#if YYDEBUG
+extern int _mesa_glsl_debug;
+#endif
+
+/* Token type. */
+#ifndef YYTOKENTYPE
+# define YYTOKENTYPE
+ enum yytokentype
+ {
+ ATTRIBUTE = 258,
+ CONST_TOK = 259,
+ BASIC_TYPE_TOK = 260,
+ BREAK = 261,
+ BUFFER = 262,
+ CONTINUE = 263,
+ DO = 264,
+ ELSE = 265,
+ FOR = 266,
+ IF = 267,
+ DEMOTE = 268,
+ DISCARD = 269,
+ RETURN = 270,
+ SWITCH = 271,
+ CASE = 272,
+ DEFAULT = 273,
+ CENTROID = 274,
+ IN_TOK = 275,
+ OUT_TOK = 276,
+ INOUT_TOK = 277,
+ UNIFORM = 278,
+ VARYING = 279,
+ SAMPLE = 280,
+ NOPERSPECTIVE = 281,
+ FLAT = 282,
+ SMOOTH = 283,
+ IMAGE1DSHADOW = 284,
+ IMAGE2DSHADOW = 285,
+ IMAGE1DARRAYSHADOW = 286,
+ IMAGE2DARRAYSHADOW = 287,
+ COHERENT = 288,
+ VOLATILE = 289,
+ RESTRICT = 290,
+ READONLY = 291,
+ WRITEONLY = 292,
+ SHARED = 293,
+ STRUCT = 294,
+ VOID_TOK = 295,
+ WHILE = 296,
+ IDENTIFIER = 297,
+ TYPE_IDENTIFIER = 298,
+ NEW_IDENTIFIER = 299,
+ FLOATCONSTANT = 300,
+ DOUBLECONSTANT = 301,
+ INTCONSTANT = 302,
+ UINTCONSTANT = 303,
+ BOOLCONSTANT = 304,
+ INT64CONSTANT = 305,
+ UINT64CONSTANT = 306,
+ FIELD_SELECTION = 307,
+ LEFT_OP = 308,
+ RIGHT_OP = 309,
+ INC_OP = 310,
+ DEC_OP = 311,
+ LE_OP = 312,
+ GE_OP = 313,
+ EQ_OP = 314,
+ NE_OP = 315,
+ AND_OP = 316,
+ OR_OP = 317,
+ XOR_OP = 318,
+ MUL_ASSIGN = 319,
+ DIV_ASSIGN = 320,
+ ADD_ASSIGN = 321,
+ MOD_ASSIGN = 322,
+ LEFT_ASSIGN = 323,
+ RIGHT_ASSIGN = 324,
+ AND_ASSIGN = 325,
+ XOR_ASSIGN = 326,
+ OR_ASSIGN = 327,
+ SUB_ASSIGN = 328,
+ INVARIANT = 329,
+ PRECISE = 330,
+ LOWP = 331,
+ MEDIUMP = 332,
+ HIGHP = 333,
+ SUPERP = 334,
+ PRECISION = 335,
+ VERSION_TOK = 336,
+ EXTENSION = 337,
+ LINE = 338,
+ COLON = 339,
+ EOL = 340,
+ INTERFACE = 341,
+ OUTPUT = 342,
+ PRAGMA_DEBUG_ON = 343,
+ PRAGMA_DEBUG_OFF = 344,
+ PRAGMA_OPTIMIZE_ON = 345,
+ PRAGMA_OPTIMIZE_OFF = 346,
+ PRAGMA_WARNING_ON = 347,
+ PRAGMA_WARNING_OFF = 348,
+ PRAGMA_INVARIANT_ALL = 349,
+ LAYOUT_TOK = 350,
+ DOT_TOK = 351,
+ ASM = 352,
+ CLASS = 353,
+ UNION = 354,
+ ENUM = 355,
+ TYPEDEF = 356,
+ TEMPLATE = 357,
+ THIS = 358,
+ PACKED_TOK = 359,
+ GOTO = 360,
+ INLINE_TOK = 361,
+ NOINLINE = 362,
+ PUBLIC_TOK = 363,
+ STATIC = 364,
+ EXTERN = 365,
+ EXTERNAL = 366,
+ LONG_TOK = 367,
+ SHORT_TOK = 368,
+ HALF = 369,
+ FIXED_TOK = 370,
+ UNSIGNED = 371,
+ INPUT_TOK = 372,
+ HVEC2 = 373,
+ HVEC3 = 374,
+ HVEC4 = 375,
+ FVEC2 = 376,
+ FVEC3 = 377,
+ FVEC4 = 378,
+ SAMPLER3DRECT = 379,
+ SIZEOF = 380,
+ CAST = 381,
+ NAMESPACE = 382,
+ USING = 383,
+ RESOURCE = 384,
+ PATCH = 385,
+ SUBROUTINE = 386,
+ ERROR_TOK = 387,
+ COMMON = 388,
+ PARTITION = 389,
+ ACTIVE = 390,
+ FILTER = 391,
+ ROW_MAJOR = 392,
+ THEN = 393
+ };
+#endif
+
+/* Value type. */
+#if ! defined YYSTYPE && ! defined YYSTYPE_IS_DECLARED
+union YYSTYPE
+{
+#line 101 "src/compiler/glsl/glsl_parser.yy"
+
+ int n;
+ int64_t n64;
+ float real;
+ double dreal;
+ const char *identifier;
+
+ struct ast_type_qualifier type_qualifier;
+
+ ast_node *node;
+ ast_type_specifier *type_specifier;
+ ast_array_specifier *array_specifier;
+ ast_fully_specified_type *fully_specified_type;
+ ast_function *function;
+ ast_parameter_declarator *parameter_declarator;
+ ast_function_definition *function_definition;
+ ast_compound_statement *compound_statement;
+ ast_expression *expression;
+ ast_declarator_list *declarator_list;
+ ast_struct_specifier *struct_specifier;
+ ast_declaration *declaration;
+ ast_switch_body *switch_body;
+ ast_case_label *case_label;
+ ast_case_label_list *case_label_list;
+ ast_case_statement *case_statement;
+ ast_case_statement_list *case_statement_list;
+ ast_interface_block *interface_block;
+ ast_subroutine_list *subroutine_list;
+ struct {
+ ast_node *cond;
+ ast_expression *rest;
+ } for_rest_statement;
+
+ struct {
+ ast_node *then_statement;
+ ast_node *else_statement;
+ } selection_rest_statement;
+
+ const glsl_type *type;
+
+#line 237 "src/compiler/glsl/glsl_parser.h"
+
+};
+typedef union YYSTYPE YYSTYPE;
+# define YYSTYPE_IS_TRIVIAL 1
+# define YYSTYPE_IS_DECLARED 1
+#endif
+
+/* Location type. */
+#if ! defined YYLTYPE && ! defined YYLTYPE_IS_DECLARED
+typedef struct YYLTYPE YYLTYPE;
+struct YYLTYPE
+{
+ int first_line;
+ int first_column;
+ int last_line;
+ int last_column;
+};
+# define YYLTYPE_IS_DECLARED 1
+# define YYLTYPE_IS_TRIVIAL 1
+#endif
+
+
+
+int _mesa_glsl_parse (struct _mesa_glsl_parse_state *state);
+
+#endif /* !YY__MESA_GLSL_SRC_COMPILER_GLSL_GLSL_PARSER_H_INCLUDED */
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/glsl_parser.yy b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/glsl_parser.yy
new file mode 100644
index 0000000000..e1f86993bf
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/glsl_parser.yy
@@ -0,0 +1,3122 @@
+%{
+/*
+ * Copyright © 2008, 2009 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#ifndef _MSC_VER
+#include <strings.h>
+#endif
+#include <assert.h>
+
+#include "ast.h"
+#include "glsl_parser_extras.h"
+#include "compiler/glsl_types.h"
+#include "main/context.h"
+#include "util/u_string.h"
+#include "util/format/u_format.h"
+
+#ifdef _MSC_VER
+#pragma warning( disable : 4065 ) // switch statement contains 'default' but no 'case' labels
+#endif
+
+#undef yyerror
+
+static void yyerror(YYLTYPE *loc, _mesa_glsl_parse_state *st, const char *msg)
+{
+ _mesa_glsl_error(loc, st, "%s", msg);
+}
+
+static int
+_mesa_glsl_lex(YYSTYPE *val, YYLTYPE *loc, _mesa_glsl_parse_state *state)
+{
+ return _mesa_glsl_lexer_lex(val, loc, state->scanner);
+}
+
+static bool match_layout_qualifier(const char *s1, const char *s2,
+ _mesa_glsl_parse_state *state)
+{
+ /* From the GLSL 1.50 spec, section 4.3.8 (Layout Qualifiers):
+ *
+ * "The tokens in any layout-qualifier-id-list ... are not case
+ * sensitive, unless explicitly noted otherwise."
+ *
+ * The text "unless explicitly noted otherwise" appears to be
+ * vacuous--no desktop GLSL spec (up through GLSL 4.40) notes
+ * otherwise.
+ *
+ * However, the GLSL ES 3.00 spec says, in section 4.3.8 (Layout
+ * Qualifiers):
+ *
+ * "As for other identifiers, they are case sensitive."
+ *
+ * So we need to do a case-sensitive or a case-insensitive match,
+ * depending on whether we are compiling for GLSL ES.
+ */
+ if (state->es_shader)
+ return strcmp(s1, s2);
+ else
+ return strcasecmp(s1, s2);
+}
+%}
+
+%expect 0
+
+%define api.pure
+%define parse.error verbose
+
+%locations
+%initial-action {
+ @$.first_line = 1;
+ @$.first_column = 1;
+ @$.last_line = 1;
+ @$.last_column = 1;
+ @$.source = 0;
+ @$.path = NULL;
+}
+
+%lex-param {struct _mesa_glsl_parse_state *state}
+%parse-param {struct _mesa_glsl_parse_state *state}
+
+%union {
+ int n;
+ int64_t n64;
+ float real;
+ double dreal;
+ const char *identifier;
+
+ struct ast_type_qualifier type_qualifier;
+
+ ast_node *node;
+ ast_type_specifier *type_specifier;
+ ast_array_specifier *array_specifier;
+ ast_fully_specified_type *fully_specified_type;
+ ast_function *function;
+ ast_parameter_declarator *parameter_declarator;
+ ast_function_definition *function_definition;
+ ast_compound_statement *compound_statement;
+ ast_expression *expression;
+ ast_declarator_list *declarator_list;
+ ast_struct_specifier *struct_specifier;
+ ast_declaration *declaration;
+ ast_switch_body *switch_body;
+ ast_case_label *case_label;
+ ast_case_label_list *case_label_list;
+ ast_case_statement *case_statement;
+ ast_case_statement_list *case_statement_list;
+ ast_interface_block *interface_block;
+ ast_subroutine_list *subroutine_list;
+ struct {
+ ast_node *cond;
+ ast_expression *rest;
+ } for_rest_statement;
+
+ struct {
+ ast_node *then_statement;
+ ast_node *else_statement;
+ } selection_rest_statement;
+
+ const glsl_type *type;
+}
+
+%token ATTRIBUTE CONST_TOK
+%token <type> BASIC_TYPE_TOK
+%token BREAK BUFFER CONTINUE DO ELSE FOR IF DEMOTE DISCARD RETURN SWITCH CASE DEFAULT
+%token CENTROID IN_TOK OUT_TOK INOUT_TOK UNIFORM VARYING SAMPLE
+%token NOPERSPECTIVE FLAT SMOOTH
+%token IMAGE1DSHADOW IMAGE2DSHADOW IMAGE1DARRAYSHADOW IMAGE2DARRAYSHADOW
+%token COHERENT VOLATILE RESTRICT READONLY WRITEONLY
+%token SHARED
+%token STRUCT VOID_TOK WHILE
+%token <identifier> IDENTIFIER TYPE_IDENTIFIER NEW_IDENTIFIER
+%type <identifier> any_identifier
+%type <interface_block> instance_name_opt
+%token <real> FLOATCONSTANT
+%token <dreal> DOUBLECONSTANT
+%token <n> INTCONSTANT UINTCONSTANT BOOLCONSTANT
+%token <n64> INT64CONSTANT UINT64CONSTANT
+%token <identifier> FIELD_SELECTION
+%token LEFT_OP RIGHT_OP
+%token INC_OP DEC_OP LE_OP GE_OP EQ_OP NE_OP
+%token AND_OP OR_OP XOR_OP MUL_ASSIGN DIV_ASSIGN ADD_ASSIGN
+%token MOD_ASSIGN LEFT_ASSIGN RIGHT_ASSIGN AND_ASSIGN XOR_ASSIGN OR_ASSIGN
+%token SUB_ASSIGN
+%token INVARIANT PRECISE
+%token LOWP MEDIUMP HIGHP SUPERP PRECISION
+
+%token VERSION_TOK EXTENSION LINE COLON EOL INTERFACE OUTPUT
+%token PRAGMA_DEBUG_ON PRAGMA_DEBUG_OFF
+%token PRAGMA_OPTIMIZE_ON PRAGMA_OPTIMIZE_OFF
+%token PRAGMA_WARNING_ON PRAGMA_WARNING_OFF
+%token PRAGMA_INVARIANT_ALL
+%token LAYOUT_TOK
+%token DOT_TOK
+ /* Reserved words that are not actually used in the grammar.
+ */
+%token ASM CLASS UNION ENUM TYPEDEF TEMPLATE THIS PACKED_TOK GOTO
+%token INLINE_TOK NOINLINE PUBLIC_TOK STATIC EXTERN EXTERNAL
+%token LONG_TOK SHORT_TOK HALF FIXED_TOK UNSIGNED INPUT_TOK
+%token HVEC2 HVEC3 HVEC4 FVEC2 FVEC3 FVEC4
+%token SAMPLER3DRECT
+%token SIZEOF CAST NAMESPACE USING
+%token RESOURCE PATCH
+%token SUBROUTINE
+
+%token ERROR_TOK
+
+%token COMMON PARTITION ACTIVE FILTER ROW_MAJOR
+
+%type <identifier> variable_identifier
+%type <node> statement
+%type <node> statement_list
+%type <node> simple_statement
+%type <n> precision_qualifier
+%type <type_qualifier> type_qualifier
+%type <type_qualifier> auxiliary_storage_qualifier
+%type <type_qualifier> storage_qualifier
+%type <type_qualifier> interpolation_qualifier
+%type <type_qualifier> layout_qualifier
+%type <type_qualifier> layout_qualifier_id_list layout_qualifier_id
+%type <type_qualifier> interface_block_layout_qualifier
+%type <type_qualifier> memory_qualifier
+%type <type_qualifier> subroutine_qualifier
+%type <subroutine_list> subroutine_type_list
+%type <type_qualifier> interface_qualifier
+%type <type_specifier> type_specifier
+%type <type_specifier> type_specifier_nonarray
+%type <array_specifier> array_specifier
+%type <type> basic_type_specifier_nonarray
+%type <fully_specified_type> fully_specified_type
+%type <function> function_prototype
+%type <function> function_header
+%type <function> function_header_with_parameters
+%type <function> function_declarator
+%type <parameter_declarator> parameter_declarator
+%type <parameter_declarator> parameter_declaration
+%type <type_qualifier> parameter_qualifier
+%type <type_qualifier> parameter_direction_qualifier
+%type <type_specifier> parameter_type_specifier
+%type <function_definition> function_definition
+%type <compound_statement> compound_statement_no_new_scope
+%type <compound_statement> compound_statement
+%type <node> statement_no_new_scope
+%type <node> expression_statement
+%type <expression> expression
+%type <expression> primary_expression
+%type <expression> assignment_expression
+%type <expression> conditional_expression
+%type <expression> logical_or_expression
+%type <expression> logical_xor_expression
+%type <expression> logical_and_expression
+%type <expression> inclusive_or_expression
+%type <expression> exclusive_or_expression
+%type <expression> and_expression
+%type <expression> equality_expression
+%type <expression> relational_expression
+%type <expression> shift_expression
+%type <expression> additive_expression
+%type <expression> multiplicative_expression
+%type <expression> unary_expression
+%type <expression> constant_expression
+%type <expression> integer_expression
+%type <expression> postfix_expression
+%type <expression> function_call_header_with_parameters
+%type <expression> function_call_header_no_parameters
+%type <expression> function_call_header
+%type <expression> function_call_generic
+%type <expression> function_call_or_method
+%type <expression> function_call
+%type <n> assignment_operator
+%type <n> unary_operator
+%type <expression> function_identifier
+%type <node> external_declaration
+%type <node> pragma_statement
+%type <declarator_list> init_declarator_list
+%type <declarator_list> single_declaration
+%type <expression> initializer
+%type <expression> initializer_list
+%type <node> declaration
+%type <node> declaration_statement
+%type <node> jump_statement
+%type <node> demote_statement
+%type <node> interface_block
+%type <interface_block> basic_interface_block
+%type <struct_specifier> struct_specifier
+%type <declarator_list> struct_declaration_list
+%type <declarator_list> struct_declaration
+%type <declaration> struct_declarator
+%type <declaration> struct_declarator_list
+%type <declarator_list> member_list
+%type <declarator_list> member_declaration
+%type <node> selection_statement
+%type <selection_rest_statement> selection_rest_statement
+%type <node> switch_statement
+%type <switch_body> switch_body
+%type <case_label_list> case_label_list
+%type <case_label> case_label
+%type <case_statement> case_statement
+%type <case_statement_list> case_statement_list
+%type <node> iteration_statement
+%type <node> condition
+%type <node> conditionopt
+%type <node> for_init_statement
+%type <for_rest_statement> for_rest_statement
+%type <node> layout_defaults
+%type <type_qualifier> layout_uniform_defaults
+%type <type_qualifier> layout_buffer_defaults
+%type <type_qualifier> layout_in_defaults
+%type <type_qualifier> layout_out_defaults
+
+%right THEN ELSE
+%%
+
+translation_unit:
+ version_statement extension_statement_list
+ {
+ _mesa_glsl_initialize_types(state);
+ }
+ external_declaration_list
+ {
+ delete state->symbols;
+ state->symbols = new(ralloc_parent(state)) glsl_symbol_table;
+ if (state->es_shader) {
+ if (state->stage == MESA_SHADER_FRAGMENT) {
+ state->symbols->add_default_precision_qualifier("int", ast_precision_medium);
+ } else {
+ state->symbols->add_default_precision_qualifier("float", ast_precision_high);
+ state->symbols->add_default_precision_qualifier("int", ast_precision_high);
+ }
+ state->symbols->add_default_precision_qualifier("sampler2D", ast_precision_low);
+ state->symbols->add_default_precision_qualifier("samplerExternalOES", ast_precision_low);
+ state->symbols->add_default_precision_qualifier("samplerCube", ast_precision_low);
+ state->symbols->add_default_precision_qualifier("atomic_uint", ast_precision_high);
+ }
+ _mesa_glsl_initialize_types(state);
+ }
+ ;
+
+version_statement:
+ /* blank - no #version specified: defaults are already set */
+ | VERSION_TOK INTCONSTANT EOL
+ {
+ state->process_version_directive(&@2, $2, NULL);
+ if (state->error) {
+ YYERROR;
+ }
+ }
+ | VERSION_TOK INTCONSTANT any_identifier EOL
+ {
+ state->process_version_directive(&@2, $2, $3);
+ if (state->error) {
+ YYERROR;
+ }
+ }
+ ;
+
+pragma_statement:
+ PRAGMA_DEBUG_ON EOL { $$ = NULL; }
+ | PRAGMA_DEBUG_OFF EOL { $$ = NULL; }
+ | PRAGMA_OPTIMIZE_ON EOL { $$ = NULL; }
+ | PRAGMA_OPTIMIZE_OFF EOL { $$ = NULL; }
+ | PRAGMA_INVARIANT_ALL EOL
+ {
+ /* Pragma invariant(all) cannot be used in a fragment shader.
+ *
+ * Page 27 of the GLSL 1.20 spec, Page 53 of the GLSL ES 3.00 spec:
+ *
+ * "It is an error to use this pragma in a fragment shader."
+ */
+ if (state->is_version(120, 300) &&
+ state->stage == MESA_SHADER_FRAGMENT) {
+ _mesa_glsl_error(& @1, state,
+ "pragma `invariant(all)' cannot be used "
+ "in a fragment shader.");
+ } else if (!state->is_version(120, 100)) {
+ _mesa_glsl_warning(& @1, state,
+ "pragma `invariant(all)' not supported in %s "
+ "(GLSL ES 1.00 or GLSL 1.20 required)",
+ state->get_version_string());
+ } else {
+ state->all_invariant = true;
+ }
+
+ $$ = NULL;
+ }
+ | PRAGMA_WARNING_ON EOL
+ {
+ void *mem_ctx = state->linalloc;
+ $$ = new(mem_ctx) ast_warnings_toggle(true);
+ }
+ | PRAGMA_WARNING_OFF EOL
+ {
+ void *mem_ctx = state->linalloc;
+ $$ = new(mem_ctx) ast_warnings_toggle(false);
+ }
+ ;
+
+extension_statement_list:
+
+ | extension_statement_list extension_statement
+ ;
+
+any_identifier:
+ IDENTIFIER
+ | TYPE_IDENTIFIER
+ | NEW_IDENTIFIER
+ ;
+
+extension_statement:
+ EXTENSION any_identifier COLON any_identifier EOL
+ {
+ if (!_mesa_glsl_process_extension($2, & @2, $4, & @4, state)) {
+ YYERROR;
+ }
+ }
+ ;
+
+external_declaration_list:
+ external_declaration
+ {
+ /* FINISHME: The NULL test is required because pragmas are set to
+ * FINISHME: NULL. (See production rule for external_declaration.)
+ */
+ if ($1 != NULL)
+ state->translation_unit.push_tail(& $1->link);
+ }
+ | external_declaration_list external_declaration
+ {
+ /* FINISHME: The NULL test is required because pragmas are set to
+ * FINISHME: NULL. (See production rule for external_declaration.)
+ */
+ if ($2 != NULL)
+ state->translation_unit.push_tail(& $2->link);
+ }
+ | external_declaration_list extension_statement {
+ if (!state->allow_extension_directive_midshader) {
+ _mesa_glsl_error(& @2, state,
+ "#extension directive is not allowed "
+ "in the middle of a shader");
+ YYERROR;
+ }
+ }
+ ;
+
+variable_identifier:
+ IDENTIFIER
+ | NEW_IDENTIFIER
+ ;
+
+primary_expression:
+ variable_identifier
+ {
+ void *ctx = state->linalloc;
+ $$ = new(ctx) ast_expression(ast_identifier, NULL, NULL, NULL);
+ $$->set_location(@1);
+ $$->primary_expression.identifier = $1;
+ }
+ | INTCONSTANT
+ {
+ void *ctx = state->linalloc;
+ $$ = new(ctx) ast_expression(ast_int_constant, NULL, NULL, NULL);
+ $$->set_location(@1);
+ $$->primary_expression.int_constant = $1;
+ }
+ | UINTCONSTANT
+ {
+ void *ctx = state->linalloc;
+ $$ = new(ctx) ast_expression(ast_uint_constant, NULL, NULL, NULL);
+ $$->set_location(@1);
+ $$->primary_expression.uint_constant = $1;
+ }
+ | INT64CONSTANT
+ {
+ void *ctx = state->linalloc;
+ $$ = new(ctx) ast_expression(ast_int64_constant, NULL, NULL, NULL);
+ $$->set_location(@1);
+ $$->primary_expression.int64_constant = $1;
+ }
+ | UINT64CONSTANT
+ {
+ void *ctx = state->linalloc;
+ $$ = new(ctx) ast_expression(ast_uint64_constant, NULL, NULL, NULL);
+ $$->set_location(@1);
+ $$->primary_expression.uint64_constant = $1;
+ }
+ | FLOATCONSTANT
+ {
+ void *ctx = state->linalloc;
+ $$ = new(ctx) ast_expression(ast_float_constant, NULL, NULL, NULL);
+ $$->set_location(@1);
+ $$->primary_expression.float_constant = $1;
+ }
+ | DOUBLECONSTANT
+ {
+ void *ctx = state->linalloc;
+ $$ = new(ctx) ast_expression(ast_double_constant, NULL, NULL, NULL);
+ $$->set_location(@1);
+ $$->primary_expression.double_constant = $1;
+ }
+ | BOOLCONSTANT
+ {
+ void *ctx = state->linalloc;
+ $$ = new(ctx) ast_expression(ast_bool_constant, NULL, NULL, NULL);
+ $$->set_location(@1);
+ $$->primary_expression.bool_constant = $1;
+ }
+ | '(' expression ')'
+ {
+ $$ = $2;
+ }
+ ;
+
+postfix_expression:
+ primary_expression
+ | postfix_expression '[' integer_expression ']'
+ {
+ void *ctx = state->linalloc;
+ $$ = new(ctx) ast_expression(ast_array_index, $1, $3, NULL);
+ $$->set_location_range(@1, @4);
+ }
+ | function_call
+ {
+ $$ = $1;
+ }
+ | postfix_expression DOT_TOK FIELD_SELECTION
+ {
+ void *ctx = state->linalloc;
+ $$ = new(ctx) ast_expression(ast_field_selection, $1, NULL, NULL);
+ $$->set_location_range(@1, @3);
+ $$->primary_expression.identifier = $3;
+ }
+ | postfix_expression INC_OP
+ {
+ void *ctx = state->linalloc;
+ $$ = new(ctx) ast_expression(ast_post_inc, $1, NULL, NULL);
+ $$->set_location_range(@1, @2);
+ }
+ | postfix_expression DEC_OP
+ {
+ void *ctx = state->linalloc;
+ $$ = new(ctx) ast_expression(ast_post_dec, $1, NULL, NULL);
+ $$->set_location_range(@1, @2);
+ }
+ ;
+
+integer_expression:
+ expression
+ ;
+
+function_call:
+ function_call_or_method
+ ;
+
+function_call_or_method:
+ function_call_generic
+ ;
+
+function_call_generic:
+ function_call_header_with_parameters ')'
+ | function_call_header_no_parameters ')'
+ ;
+
+function_call_header_no_parameters:
+ function_call_header VOID_TOK
+ | function_call_header
+ ;
+
+function_call_header_with_parameters:
+ function_call_header assignment_expression
+ {
+ $$ = $1;
+ $$->set_location(@1);
+ $$->expressions.push_tail(& $2->link);
+ }
+ | function_call_header_with_parameters ',' assignment_expression
+ {
+ $$ = $1;
+ $$->set_location(@1);
+ $$->expressions.push_tail(& $3->link);
+ }
+ ;
+
+ // Grammar Note: Constructors look like functions, but lexical
+ // analysis recognized most of them as keywords. They are now
+ // recognized through "type_specifier".
+function_call_header:
+ function_identifier '('
+ ;
+
+function_identifier:
+ type_specifier
+ {
+ void *ctx = state->linalloc;
+ $$ = new(ctx) ast_function_expression($1);
+ $$->set_location(@1);
+ }
+ | postfix_expression
+ {
+ void *ctx = state->linalloc;
+ $$ = new(ctx) ast_function_expression($1);
+ $$->set_location(@1);
+ }
+ ;
+
+ // Grammar Note: Constructors look like methods, but lexical
+ // analysis recognized most of them as keywords. They are now
+ // recognized through "type_specifier".
+
+ // Grammar Note: No traditional style type casts.
+unary_expression:
+ postfix_expression
+ | INC_OP unary_expression
+ {
+ void *ctx = state->linalloc;
+ $$ = new(ctx) ast_expression(ast_pre_inc, $2, NULL, NULL);
+ $$->set_location(@1);
+ }
+ | DEC_OP unary_expression
+ {
+ void *ctx = state->linalloc;
+ $$ = new(ctx) ast_expression(ast_pre_dec, $2, NULL, NULL);
+ $$->set_location(@1);
+ }
+ | unary_operator unary_expression
+ {
+ void *ctx = state->linalloc;
+ $$ = new(ctx) ast_expression($1, $2, NULL, NULL);
+ $$->set_location_range(@1, @2);
+ }
+ ;
+
+ // Grammar Note: No '*' or '&' unary ops. Pointers are not supported.
+unary_operator:
+ '+' { $$ = ast_plus; }
+ | '-' { $$ = ast_neg; }
+ | '!' { $$ = ast_logic_not; }
+ | '~' { $$ = ast_bit_not; }
+ ;
+
+multiplicative_expression:
+ unary_expression
+ | multiplicative_expression '*' unary_expression
+ {
+ void *ctx = state->linalloc;
+ $$ = new(ctx) ast_expression_bin(ast_mul, $1, $3);
+ $$->set_location_range(@1, @3);
+ }
+ | multiplicative_expression '/' unary_expression
+ {
+ void *ctx = state->linalloc;
+ $$ = new(ctx) ast_expression_bin(ast_div, $1, $3);
+ $$->set_location_range(@1, @3);
+ }
+ | multiplicative_expression '%' unary_expression
+ {
+ void *ctx = state->linalloc;
+ $$ = new(ctx) ast_expression_bin(ast_mod, $1, $3);
+ $$->set_location_range(@1, @3);
+ }
+ ;
+
+additive_expression:
+ multiplicative_expression
+ | additive_expression '+' multiplicative_expression
+ {
+ void *ctx = state->linalloc;
+ $$ = new(ctx) ast_expression_bin(ast_add, $1, $3);
+ $$->set_location_range(@1, @3);
+ }
+ | additive_expression '-' multiplicative_expression
+ {
+ void *ctx = state->linalloc;
+ $$ = new(ctx) ast_expression_bin(ast_sub, $1, $3);
+ $$->set_location_range(@1, @3);
+ }
+ ;
+
+shift_expression:
+ additive_expression
+ | shift_expression LEFT_OP additive_expression
+ {
+ void *ctx = state->linalloc;
+ $$ = new(ctx) ast_expression_bin(ast_lshift, $1, $3);
+ $$->set_location_range(@1, @3);
+ }
+ | shift_expression RIGHT_OP additive_expression
+ {
+ void *ctx = state->linalloc;
+ $$ = new(ctx) ast_expression_bin(ast_rshift, $1, $3);
+ $$->set_location_range(@1, @3);
+ }
+ ;
+
+relational_expression:
+ shift_expression
+ | relational_expression '<' shift_expression
+ {
+ void *ctx = state->linalloc;
+ $$ = new(ctx) ast_expression_bin(ast_less, $1, $3);
+ $$->set_location_range(@1, @3);
+ }
+ | relational_expression '>' shift_expression
+ {
+ void *ctx = state->linalloc;
+ $$ = new(ctx) ast_expression_bin(ast_greater, $1, $3);
+ $$->set_location_range(@1, @3);
+ }
+ | relational_expression LE_OP shift_expression
+ {
+ void *ctx = state->linalloc;
+ $$ = new(ctx) ast_expression_bin(ast_lequal, $1, $3);
+ $$->set_location_range(@1, @3);
+ }
+ | relational_expression GE_OP shift_expression
+ {
+ void *ctx = state->linalloc;
+ $$ = new(ctx) ast_expression_bin(ast_gequal, $1, $3);
+ $$->set_location_range(@1, @3);
+ }
+ ;
+
+equality_expression:
+ relational_expression
+ | equality_expression EQ_OP relational_expression
+ {
+ void *ctx = state->linalloc;
+ $$ = new(ctx) ast_expression_bin(ast_equal, $1, $3);
+ $$->set_location_range(@1, @3);
+ }
+ | equality_expression NE_OP relational_expression
+ {
+ void *ctx = state->linalloc;
+ $$ = new(ctx) ast_expression_bin(ast_nequal, $1, $3);
+ $$->set_location_range(@1, @3);
+ }
+ ;
+
+and_expression:
+ equality_expression
+ | and_expression '&' equality_expression
+ {
+ void *ctx = state->linalloc;
+ $$ = new(ctx) ast_expression_bin(ast_bit_and, $1, $3);
+ $$->set_location_range(@1, @3);
+ }
+ ;
+
+exclusive_or_expression:
+ and_expression
+ | exclusive_or_expression '^' and_expression
+ {
+ void *ctx = state->linalloc;
+ $$ = new(ctx) ast_expression_bin(ast_bit_xor, $1, $3);
+ $$->set_location_range(@1, @3);
+ }
+ ;
+
+inclusive_or_expression:
+ exclusive_or_expression
+ | inclusive_or_expression '|' exclusive_or_expression
+ {
+ void *ctx = state->linalloc;
+ $$ = new(ctx) ast_expression_bin(ast_bit_or, $1, $3);
+ $$->set_location_range(@1, @3);
+ }
+ ;
+
+logical_and_expression:
+ inclusive_or_expression
+ | logical_and_expression AND_OP inclusive_or_expression
+ {
+ void *ctx = state->linalloc;
+ $$ = new(ctx) ast_expression_bin(ast_logic_and, $1, $3);
+ $$->set_location_range(@1, @3);
+ }
+ ;
+
+logical_xor_expression:
+ logical_and_expression
+ | logical_xor_expression XOR_OP logical_and_expression
+ {
+ void *ctx = state->linalloc;
+ $$ = new(ctx) ast_expression_bin(ast_logic_xor, $1, $3);
+ $$->set_location_range(@1, @3);
+ }
+ ;
+
+logical_or_expression:
+ logical_xor_expression
+ | logical_or_expression OR_OP logical_xor_expression
+ {
+ void *ctx = state->linalloc;
+ $$ = new(ctx) ast_expression_bin(ast_logic_or, $1, $3);
+ $$->set_location_range(@1, @3);
+ }
+ ;
+
+conditional_expression:
+ logical_or_expression
+ | logical_or_expression '?' expression ':' assignment_expression
+ {
+ void *ctx = state->linalloc;
+ $$ = new(ctx) ast_expression(ast_conditional, $1, $3, $5);
+ $$->set_location_range(@1, @5);
+ }
+ ;
+
+assignment_expression:
+ conditional_expression
+ | unary_expression assignment_operator assignment_expression
+ {
+ void *ctx = state->linalloc;
+ $$ = new(ctx) ast_expression($2, $1, $3, NULL);
+ $$->set_location_range(@1, @3);
+ }
+ ;
+
+assignment_operator:
+ '=' { $$ = ast_assign; }
+ | MUL_ASSIGN { $$ = ast_mul_assign; }
+ | DIV_ASSIGN { $$ = ast_div_assign; }
+ | MOD_ASSIGN { $$ = ast_mod_assign; }
+ | ADD_ASSIGN { $$ = ast_add_assign; }
+ | SUB_ASSIGN { $$ = ast_sub_assign; }
+ | LEFT_ASSIGN { $$ = ast_ls_assign; }
+ | RIGHT_ASSIGN { $$ = ast_rs_assign; }
+ | AND_ASSIGN { $$ = ast_and_assign; }
+ | XOR_ASSIGN { $$ = ast_xor_assign; }
+ | OR_ASSIGN { $$ = ast_or_assign; }
+ ;
+
+expression:
+ assignment_expression
+ {
+ $$ = $1;
+ }
+ | expression ',' assignment_expression
+ {
+ void *ctx = state->linalloc;
+ if ($1->oper != ast_sequence) {
+ $$ = new(ctx) ast_expression(ast_sequence, NULL, NULL, NULL);
+ $$->set_location_range(@1, @3);
+ $$->expressions.push_tail(& $1->link);
+ } else {
+ $$ = $1;
+ }
+
+ $$->expressions.push_tail(& $3->link);
+ }
+ ;
+
+constant_expression:
+ conditional_expression
+ ;
+
+declaration:
+ function_prototype ';'
+ {
+ state->symbols->pop_scope();
+ $$ = $1;
+ }
+ | init_declarator_list ';'
+ {
+ $$ = $1;
+ }
+ | PRECISION precision_qualifier type_specifier ';'
+ {
+ $3->default_precision = $2;
+ $$ = $3;
+ }
+ | interface_block
+ {
+ ast_interface_block *block = (ast_interface_block *) $1;
+ if (block->layout.has_layout() || block->layout.has_memory()) {
+ if (!block->default_layout.merge_qualifier(& @1, state, block->layout, false)) {
+ YYERROR;
+ }
+ }
+ block->layout = block->default_layout;
+ if (!block->layout.push_to_global(& @1, state)) {
+ YYERROR;
+ }
+ $$ = $1;
+ }
+ ;
+
+function_prototype:
+ function_declarator ')'
+ ;
+
+function_declarator:
+ function_header
+ | function_header_with_parameters
+ ;
+
+function_header_with_parameters:
+ function_header parameter_declaration
+ {
+ $$ = $1;
+ $$->parameters.push_tail(& $2->link);
+ }
+ | function_header_with_parameters ',' parameter_declaration
+ {
+ $$ = $1;
+ $$->parameters.push_tail(& $3->link);
+ }
+ ;
+
+function_header:
+ fully_specified_type variable_identifier '('
+ {
+ void *ctx = state->linalloc;
+ $$ = new(ctx) ast_function();
+ $$->set_location(@2);
+ $$->return_type = $1;
+ $$->identifier = $2;
+
+ if ($1->qualifier.is_subroutine_decl()) {
+ /* add type for IDENTIFIER search */
+ state->symbols->add_type($2, glsl_type::get_subroutine_instance($2));
+ } else
+ state->symbols->add_function(new(state) ir_function($2));
+ state->symbols->push_scope();
+ }
+ ;
+
+parameter_declarator:
+ type_specifier any_identifier
+ {
+ void *ctx = state->linalloc;
+ $$ = new(ctx) ast_parameter_declarator();
+ $$->set_location_range(@1, @2);
+ $$->type = new(ctx) ast_fully_specified_type();
+ $$->type->set_location(@1);
+ $$->type->specifier = $1;
+ $$->identifier = $2;
+ state->symbols->add_variable(new(state) ir_variable(NULL, $2, ir_var_auto));
+ }
+ | layout_qualifier type_specifier any_identifier
+ {
+ if (state->allow_layout_qualifier_on_function_parameter) {
+ void *ctx = state->linalloc;
+ $$ = new(ctx) ast_parameter_declarator();
+ $$->set_location_range(@2, @3);
+ $$->type = new(ctx) ast_fully_specified_type();
+ $$->type->set_location(@2);
+ $$->type->specifier = $2;
+ $$->identifier = $3;
+ state->symbols->add_variable(new(state) ir_variable(NULL, $3, ir_var_auto));
+ } else {
+ _mesa_glsl_error(&@1, state,
+ "is is not allowed on function parameter");
+ YYERROR;
+ }
+ }
+ | type_specifier any_identifier array_specifier
+ {
+ void *ctx = state->linalloc;
+ $$ = new(ctx) ast_parameter_declarator();
+ $$->set_location_range(@1, @3);
+ $$->type = new(ctx) ast_fully_specified_type();
+ $$->type->set_location(@1);
+ $$->type->specifier = $1;
+ $$->identifier = $2;
+ $$->array_specifier = $3;
+ state->symbols->add_variable(new(state) ir_variable(NULL, $2, ir_var_auto));
+ }
+ ;
+
+parameter_declaration:
+ parameter_qualifier parameter_declarator
+ {
+ $$ = $2;
+ $$->type->qualifier = $1;
+ if (!$$->type->qualifier.push_to_global(& @1, state)) {
+ YYERROR;
+ }
+ }
+ | parameter_qualifier parameter_type_specifier
+ {
+ void *ctx = state->linalloc;
+ $$ = new(ctx) ast_parameter_declarator();
+ $$->set_location(@2);
+ $$->type = new(ctx) ast_fully_specified_type();
+ $$->type->set_location_range(@1, @2);
+ $$->type->qualifier = $1;
+ if (!$$->type->qualifier.push_to_global(& @1, state)) {
+ YYERROR;
+ }
+ $$->type->specifier = $2;
+ }
+ ;
+
+parameter_qualifier:
+ /* empty */
+ {
+ memset(& $$, 0, sizeof($$));
+ }
+ | CONST_TOK parameter_qualifier
+ {
+ if ($2.flags.q.constant)
+ _mesa_glsl_error(&@1, state, "duplicate const qualifier");
+
+ $$ = $2;
+ $$.flags.q.constant = 1;
+ }
+ | PRECISE parameter_qualifier
+ {
+ if ($2.flags.q.precise)
+ _mesa_glsl_error(&@1, state, "duplicate precise qualifier");
+
+ $$ = $2;
+ $$.flags.q.precise = 1;
+ }
+ | parameter_direction_qualifier parameter_qualifier
+ {
+ if (($1.flags.q.in || $1.flags.q.out) && ($2.flags.q.in || $2.flags.q.out))
+ _mesa_glsl_error(&@1, state, "duplicate in/out/inout qualifier");
+
+ if (!state->has_420pack_or_es31() && $2.flags.q.constant)
+ _mesa_glsl_error(&@1, state, "in/out/inout must come after const "
+ "or precise");
+
+ $$ = $1;
+ $$.merge_qualifier(&@1, state, $2, false);
+ }
+ | precision_qualifier parameter_qualifier
+ {
+ if ($2.precision != ast_precision_none)
+ _mesa_glsl_error(&@1, state, "duplicate precision qualifier");
+
+ if (!state->has_420pack_or_es31() &&
+ $2.flags.i != 0)
+ _mesa_glsl_error(&@1, state, "precision qualifiers must come last");
+
+ $$ = $2;
+ $$.precision = $1;
+ }
+ | memory_qualifier parameter_qualifier
+ {
+ $$ = $1;
+ $$.merge_qualifier(&@1, state, $2, false);
+ }
+
+parameter_direction_qualifier:
+ IN_TOK
+ {
+ memset(& $$, 0, sizeof($$));
+ $$.flags.q.in = 1;
+ }
+ | OUT_TOK
+ {
+ memset(& $$, 0, sizeof($$));
+ $$.flags.q.out = 1;
+ }
+ | INOUT_TOK
+ {
+ memset(& $$, 0, sizeof($$));
+ $$.flags.q.in = 1;
+ $$.flags.q.out = 1;
+ }
+ ;
+
+parameter_type_specifier:
+ type_specifier
+ ;
+
+init_declarator_list:
+ single_declaration
+ | init_declarator_list ',' any_identifier
+ {
+ void *ctx = state->linalloc;
+ ast_declaration *decl = new(ctx) ast_declaration($3, NULL, NULL);
+ decl->set_location(@3);
+
+ $$ = $1;
+ $$->declarations.push_tail(&decl->link);
+ state->symbols->add_variable(new(state) ir_variable(NULL, $3, ir_var_auto));
+ }
+ | init_declarator_list ',' any_identifier array_specifier
+ {
+ void *ctx = state->linalloc;
+ ast_declaration *decl = new(ctx) ast_declaration($3, $4, NULL);
+ decl->set_location_range(@3, @4);
+
+ $$ = $1;
+ $$->declarations.push_tail(&decl->link);
+ state->symbols->add_variable(new(state) ir_variable(NULL, $3, ir_var_auto));
+ }
+ | init_declarator_list ',' any_identifier array_specifier '=' initializer
+ {
+ void *ctx = state->linalloc;
+ ast_declaration *decl = new(ctx) ast_declaration($3, $4, $6);
+ decl->set_location_range(@3, @4);
+
+ $$ = $1;
+ $$->declarations.push_tail(&decl->link);
+ state->symbols->add_variable(new(state) ir_variable(NULL, $3, ir_var_auto));
+ }
+ | init_declarator_list ',' any_identifier '=' initializer
+ {
+ void *ctx = state->linalloc;
+ ast_declaration *decl = new(ctx) ast_declaration($3, NULL, $5);
+ decl->set_location(@3);
+
+ $$ = $1;
+ $$->declarations.push_tail(&decl->link);
+ state->symbols->add_variable(new(state) ir_variable(NULL, $3, ir_var_auto));
+ }
+ ;
+
+ // Grammar Note: No 'enum', or 'typedef'.
+single_declaration:
+ fully_specified_type
+ {
+ void *ctx = state->linalloc;
+ /* Empty declaration list is valid. */
+ $$ = new(ctx) ast_declarator_list($1);
+ $$->set_location(@1);
+ }
+ | fully_specified_type any_identifier
+ {
+ void *ctx = state->linalloc;
+ ast_declaration *decl = new(ctx) ast_declaration($2, NULL, NULL);
+ decl->set_location(@2);
+
+ $$ = new(ctx) ast_declarator_list($1);
+ $$->set_location_range(@1, @2);
+ $$->declarations.push_tail(&decl->link);
+ state->symbols->add_variable(new(state) ir_variable(NULL, $2, ir_var_auto));
+ }
+ | fully_specified_type any_identifier array_specifier
+ {
+ void *ctx = state->linalloc;
+ ast_declaration *decl = new(ctx) ast_declaration($2, $3, NULL);
+ decl->set_location_range(@2, @3);
+
+ $$ = new(ctx) ast_declarator_list($1);
+ $$->set_location_range(@1, @3);
+ $$->declarations.push_tail(&decl->link);
+ state->symbols->add_variable(new(state) ir_variable(NULL, $2, ir_var_auto));
+ }
+ | fully_specified_type any_identifier array_specifier '=' initializer
+ {
+ void *ctx = state->linalloc;
+ ast_declaration *decl = new(ctx) ast_declaration($2, $3, $5);
+ decl->set_location_range(@2, @3);
+
+ $$ = new(ctx) ast_declarator_list($1);
+ $$->set_location_range(@1, @3);
+ $$->declarations.push_tail(&decl->link);
+ state->symbols->add_variable(new(state) ir_variable(NULL, $2, ir_var_auto));
+ }
+ | fully_specified_type any_identifier '=' initializer
+ {
+ void *ctx = state->linalloc;
+ ast_declaration *decl = new(ctx) ast_declaration($2, NULL, $4);
+ decl->set_location(@2);
+
+ $$ = new(ctx) ast_declarator_list($1);
+ $$->set_location_range(@1, @2);
+ $$->declarations.push_tail(&decl->link);
+ state->symbols->add_variable(new(state) ir_variable(NULL, $2, ir_var_auto));
+ }
+ | INVARIANT variable_identifier
+ {
+ void *ctx = state->linalloc;
+ ast_declaration *decl = new(ctx) ast_declaration($2, NULL, NULL);
+ decl->set_location(@2);
+
+ $$ = new(ctx) ast_declarator_list(NULL);
+ $$->set_location_range(@1, @2);
+ $$->invariant = true;
+
+ $$->declarations.push_tail(&decl->link);
+ }
+ | PRECISE variable_identifier
+ {
+ void *ctx = state->linalloc;
+ ast_declaration *decl = new(ctx) ast_declaration($2, NULL, NULL);
+ decl->set_location(@2);
+
+ $$ = new(ctx) ast_declarator_list(NULL);
+ $$->set_location_range(@1, @2);
+ $$->precise = true;
+
+ $$->declarations.push_tail(&decl->link);
+ }
+ ;
+
+fully_specified_type:
+ type_specifier
+ {
+ void *ctx = state->linalloc;
+ $$ = new(ctx) ast_fully_specified_type();
+ $$->set_location(@1);
+ $$->specifier = $1;
+ }
+ | type_qualifier type_specifier
+ {
+ void *ctx = state->linalloc;
+ $$ = new(ctx) ast_fully_specified_type();
+ $$->set_location_range(@1, @2);
+ $$->qualifier = $1;
+ if (!$$->qualifier.push_to_global(& @1, state)) {
+ YYERROR;
+ }
+ $$->specifier = $2;
+ if ($$->specifier->structure != NULL &&
+ $$->specifier->structure->is_declaration) {
+ $$->specifier->structure->layout = &$$->qualifier;
+ }
+ }
+ ;
+
+layout_qualifier:
+ LAYOUT_TOK '(' layout_qualifier_id_list ')'
+ {
+ $$ = $3;
+ }
+ ;
+
+layout_qualifier_id_list:
+ layout_qualifier_id
+ | layout_qualifier_id_list ',' layout_qualifier_id
+ {
+ $$ = $1;
+ if (!$$.merge_qualifier(& @3, state, $3, true)) {
+ YYERROR;
+ }
+ }
+ ;
+
+layout_qualifier_id:
+ any_identifier
+ {
+ memset(& $$, 0, sizeof($$));
+
+ /* Layout qualifiers for ARB_fragment_coord_conventions. */
+ if (!$$.flags.i && (state->ARB_fragment_coord_conventions_enable ||
+ state->is_version(150, 0))) {
+ if (match_layout_qualifier($1, "origin_upper_left", state) == 0) {
+ $$.flags.q.origin_upper_left = 1;
+ } else if (match_layout_qualifier($1, "pixel_center_integer",
+ state) == 0) {
+ $$.flags.q.pixel_center_integer = 1;
+ }
+
+ if ($$.flags.i && state->ARB_fragment_coord_conventions_warn) {
+ _mesa_glsl_warning(& @1, state,
+ "GL_ARB_fragment_coord_conventions layout "
+ "identifier `%s' used", $1);
+ }
+ }
+
+ /* Layout qualifiers for AMD/ARB_conservative_depth. */
+ if (!$$.flags.i &&
+ (state->AMD_conservative_depth_enable ||
+ state->ARB_conservative_depth_enable ||
+ state->is_version(420, 0))) {
+ if (match_layout_qualifier($1, "depth_any", state) == 0) {
+ $$.flags.q.depth_type = 1;
+ $$.depth_type = ast_depth_any;
+ } else if (match_layout_qualifier($1, "depth_greater", state) == 0) {
+ $$.flags.q.depth_type = 1;
+ $$.depth_type = ast_depth_greater;
+ } else if (match_layout_qualifier($1, "depth_less", state) == 0) {
+ $$.flags.q.depth_type = 1;
+ $$.depth_type = ast_depth_less;
+ } else if (match_layout_qualifier($1, "depth_unchanged",
+ state) == 0) {
+ $$.flags.q.depth_type = 1;
+ $$.depth_type = ast_depth_unchanged;
+ }
+
+ if ($$.flags.i && state->AMD_conservative_depth_warn) {
+ _mesa_glsl_warning(& @1, state,
+ "GL_AMD_conservative_depth "
+ "layout qualifier `%s' is used", $1);
+ }
+ if ($$.flags.i && state->ARB_conservative_depth_warn) {
+ _mesa_glsl_warning(& @1, state,
+ "GL_ARB_conservative_depth "
+ "layout qualifier `%s' is used", $1);
+ }
+ }
+
+ /* See also interface_block_layout_qualifier. */
+ if (!$$.flags.i && state->has_uniform_buffer_objects()) {
+ if (match_layout_qualifier($1, "std140", state) == 0) {
+ $$.flags.q.std140 = 1;
+ } else if (match_layout_qualifier($1, "shared", state) == 0) {
+ $$.flags.q.shared = 1;
+ } else if (match_layout_qualifier($1, "std430", state) == 0) {
+ $$.flags.q.std430 = 1;
+ } else if (match_layout_qualifier($1, "column_major", state) == 0) {
+ $$.flags.q.column_major = 1;
+ /* "row_major" is a reserved word in GLSL 1.30+. Its token is parsed
+ * below in the interface_block_layout_qualifier rule.
+ *
+ * It is not a reserved word in GLSL ES 3.00, so it's handled here as
+ * an identifier.
+ *
+ * Also, this takes care of alternate capitalizations of
+ * "row_major" (which is necessary because layout qualifiers
+ * are case-insensitive in desktop GLSL).
+ */
+ } else if (match_layout_qualifier($1, "row_major", state) == 0) {
+ $$.flags.q.row_major = 1;
+ /* "packed" is a reserved word in GLSL, and its token is
+ * parsed below in the interface_block_layout_qualifier rule.
+ * However, we must take care of alternate capitalizations of
+ * "packed", because layout qualifiers are case-insensitive
+ * in desktop GLSL.
+ */
+ } else if (match_layout_qualifier($1, "packed", state) == 0) {
+ $$.flags.q.packed = 1;
+ }
+
+ if ($$.flags.i && state->ARB_uniform_buffer_object_warn) {
+ _mesa_glsl_warning(& @1, state,
+ "#version 140 / GL_ARB_uniform_buffer_object "
+ "layout qualifier `%s' is used", $1);
+ }
+ }
+
+ /* Layout qualifiers for GLSL 1.50 geometry shaders. */
+ if (!$$.flags.i) {
+ static const struct {
+ const char *s;
+ GLenum e;
+ } map[] = {
+ { "points", GL_POINTS },
+ { "lines", GL_LINES },
+ { "lines_adjacency", GL_LINES_ADJACENCY },
+ { "line_strip", GL_LINE_STRIP },
+ { "triangles", GL_TRIANGLES },
+ { "triangles_adjacency", GL_TRIANGLES_ADJACENCY },
+ { "triangle_strip", GL_TRIANGLE_STRIP },
+ };
+ for (unsigned i = 0; i < ARRAY_SIZE(map); i++) {
+ if (match_layout_qualifier($1, map[i].s, state) == 0) {
+ $$.flags.q.prim_type = 1;
+ $$.prim_type = map[i].e;
+ break;
+ }
+ }
+
+ if ($$.flags.i && !state->has_geometry_shader() &&
+ !state->has_tessellation_shader()) {
+ _mesa_glsl_error(& @1, state, "#version 150 layout "
+ "qualifier `%s' used", $1);
+ }
+ }
+
+ /* Layout qualifiers for ARB_shader_image_load_store. */
+ if (state->has_shader_image_load_store()) {
+ if (!$$.flags.i) {
+ static const struct {
+ const char *name;
+ enum pipe_format format;
+ glsl_base_type base_type;
+ /** Minimum desktop GLSL version required for the image
+ * format. Use 130 if already present in the original
+ * ARB extension.
+ */
+ unsigned required_glsl;
+ /** Minimum GLSL ES version required for the image format. */
+ unsigned required_essl;
+ /* NV_image_formats */
+ bool nv_image_formats;
+ bool ext_qualifiers;
+ } map[] = {
+ { "rgba32f", PIPE_FORMAT_R32G32B32A32_FLOAT, GLSL_TYPE_FLOAT, 130, 310, false, false },
+ { "rgba16f", PIPE_FORMAT_R16G16B16A16_FLOAT, GLSL_TYPE_FLOAT, 130, 310, false, false },
+ { "rg32f", PIPE_FORMAT_R32G32_FLOAT, GLSL_TYPE_FLOAT, 130, 0, true, false },
+ { "rg16f", PIPE_FORMAT_R16G16_FLOAT, GLSL_TYPE_FLOAT, 130, 0, true, false },
+ { "r11f_g11f_b10f", PIPE_FORMAT_R11G11B10_FLOAT, GLSL_TYPE_FLOAT, 130, 0, true, false },
+ { "r32f", PIPE_FORMAT_R32_FLOAT, GLSL_TYPE_FLOAT, 130, 310, false, false },
+ { "r16f", PIPE_FORMAT_R16_FLOAT, GLSL_TYPE_FLOAT, 130, 0, true, false },
+ { "rgba32ui", PIPE_FORMAT_R32G32B32A32_UINT, GLSL_TYPE_UINT, 130, 310, false, false },
+ { "rgba16ui", PIPE_FORMAT_R16G16B16A16_UINT, GLSL_TYPE_UINT, 130, 310, false, false },
+ { "rgb10_a2ui", PIPE_FORMAT_R10G10B10A2_UINT, GLSL_TYPE_UINT, 130, 0, true, false },
+ { "rgba8ui", PIPE_FORMAT_R8G8B8A8_UINT, GLSL_TYPE_UINT, 130, 310, false, false },
+ { "rg32ui", PIPE_FORMAT_R32G32_UINT, GLSL_TYPE_UINT, 130, 0, true, false },
+ { "rg16ui", PIPE_FORMAT_R16G16_UINT, GLSL_TYPE_UINT, 130, 0, true, false },
+ { "rg8ui", PIPE_FORMAT_R8G8_UINT, GLSL_TYPE_UINT, 130, 0, true, false },
+ { "r32ui", PIPE_FORMAT_R32_UINT, GLSL_TYPE_UINT, 130, 310, false, false },
+ { "r16ui", PIPE_FORMAT_R16_UINT, GLSL_TYPE_UINT, 130, 0, true, false },
+ { "r8ui", PIPE_FORMAT_R8_UINT, GLSL_TYPE_UINT, 130, 0, true, false },
+ { "rgba32i", PIPE_FORMAT_R32G32B32A32_SINT, GLSL_TYPE_INT, 130, 310, false, false },
+ { "rgba16i", PIPE_FORMAT_R16G16B16A16_SINT, GLSL_TYPE_INT, 130, 310, false, false },
+ { "rgba8i", PIPE_FORMAT_R8G8B8A8_SINT, GLSL_TYPE_INT, 130, 310, false, false },
+ { "rg32i", PIPE_FORMAT_R32G32_SINT, GLSL_TYPE_INT, 130, 0, true, false },
+ { "rg16i", PIPE_FORMAT_R16G16_SINT, GLSL_TYPE_INT, 130, 0, true, false },
+ { "rg8i", PIPE_FORMAT_R8G8_SINT, GLSL_TYPE_INT, 130, 0, true, false },
+ { "r32i", PIPE_FORMAT_R32_SINT, GLSL_TYPE_INT, 130, 310, false, false },
+ { "r16i", PIPE_FORMAT_R16_SINT, GLSL_TYPE_INT, 130, 0, true, false },
+ { "r8i", PIPE_FORMAT_R8_SINT, GLSL_TYPE_INT, 130, 0, true, false },
+ { "rgba16", PIPE_FORMAT_R16G16B16A16_UNORM, GLSL_TYPE_FLOAT, 130, 0, true, false },
+ { "rgb10_a2", PIPE_FORMAT_R10G10B10A2_UNORM, GLSL_TYPE_FLOAT, 130, 0, true, false },
+ { "rgba8", PIPE_FORMAT_R8G8B8A8_UNORM, GLSL_TYPE_FLOAT, 130, 310, false, false },
+ { "rg16", PIPE_FORMAT_R16G16_UNORM, GLSL_TYPE_FLOAT, 130, 0, true, false },
+ { "rg8", PIPE_FORMAT_R8G8_UNORM, GLSL_TYPE_FLOAT, 130, 0, true, false },
+ { "r16", PIPE_FORMAT_R16_UNORM, GLSL_TYPE_FLOAT, 130, 0, true, false },
+ { "r8", PIPE_FORMAT_R8_UNORM, GLSL_TYPE_FLOAT, 130, 0, true, false },
+ { "rgba16_snorm", PIPE_FORMAT_R16G16B16A16_SNORM, GLSL_TYPE_FLOAT, 130, 0, true, false },
+ { "rgba8_snorm", PIPE_FORMAT_R8G8B8A8_SNORM, GLSL_TYPE_FLOAT, 130, 310, false, false },
+ { "rg16_snorm", PIPE_FORMAT_R16G16_SNORM, GLSL_TYPE_FLOAT, 130, 0, true, false },
+ { "rg8_snorm", PIPE_FORMAT_R8G8_SNORM, GLSL_TYPE_FLOAT, 130, 0, true, false },
+ { "r16_snorm", PIPE_FORMAT_R16_SNORM, GLSL_TYPE_FLOAT, 130, 0, true, false },
+ { "r8_snorm", PIPE_FORMAT_R8_SNORM, GLSL_TYPE_FLOAT, 130, 0, true, false },
+
+ /* From GL_EXT_shader_image_load_store: */
+ /* base_type is incorrect but it'll be patched later when we know
+ * the variable type. See ast_to_hir.cpp */
+ { "size1x8", PIPE_FORMAT_R8_SINT, GLSL_TYPE_VOID, 130, 0, false, true },
+ { "size1x16", PIPE_FORMAT_R16_SINT, GLSL_TYPE_VOID, 130, 0, false, true },
+ { "size1x32", PIPE_FORMAT_R32_SINT, GLSL_TYPE_VOID, 130, 0, false, true },
+ { "size2x32", PIPE_FORMAT_R32G32_SINT, GLSL_TYPE_VOID, 130, 0, false, true },
+ { "size4x32", PIPE_FORMAT_R32G32B32A32_SINT, GLSL_TYPE_VOID, 130, 0, false, true },
+ };
+
+ for (unsigned i = 0; i < ARRAY_SIZE(map); i++) {
+ if ((state->is_version(map[i].required_glsl,
+ map[i].required_essl) ||
+ (state->NV_image_formats_enable &&
+ map[i].nv_image_formats)) &&
+ match_layout_qualifier($1, map[i].name, state) == 0) {
+ /* Skip ARB_shader_image_load_store qualifiers if not enabled */
+ if (!map[i].ext_qualifiers && !(state->ARB_shader_image_load_store_enable ||
+ state->is_version(420, 310))) {
+ continue;
+ }
+ /* Skip EXT_shader_image_load_store qualifiers if not enabled */
+ if (map[i].ext_qualifiers && !state->EXT_shader_image_load_store_enable) {
+ continue;
+ }
+ $$.flags.q.explicit_image_format = 1;
+ $$.image_format = map[i].format;
+ $$.image_base_type = map[i].base_type;
+ break;
+ }
+ }
+ }
+ }
+
+ if (!$$.flags.i) {
+ if (match_layout_qualifier($1, "early_fragment_tests", state) == 0) {
+ /* From section 4.4.1.3 of the GLSL 4.50 specification
+ * (Fragment Shader Inputs):
+ *
+ * "Fragment shaders also allow the following layout
+ * qualifier on in only (not with variable declarations)
+ * layout-qualifier-id
+ * early_fragment_tests
+ * [...]"
+ */
+ if (state->stage != MESA_SHADER_FRAGMENT) {
+ _mesa_glsl_error(& @1, state,
+ "early_fragment_tests layout qualifier only "
+ "valid in fragment shaders");
+ }
+
+ $$.flags.q.early_fragment_tests = 1;
+ }
+
+ if (match_layout_qualifier($1, "inner_coverage", state) == 0) {
+ if (state->stage != MESA_SHADER_FRAGMENT) {
+ _mesa_glsl_error(& @1, state,
+ "inner_coverage layout qualifier only "
+ "valid in fragment shaders");
+ }
+
+ if (state->INTEL_conservative_rasterization_enable) {
+ $$.flags.q.inner_coverage = 1;
+ } else {
+ _mesa_glsl_error(& @1, state,
+ "inner_coverage layout qualifier present, "
+ "but the INTEL_conservative_rasterization extension "
+ "is not enabled.");
+ }
+ }
+
+ if (match_layout_qualifier($1, "post_depth_coverage", state) == 0) {
+ if (state->stage != MESA_SHADER_FRAGMENT) {
+ _mesa_glsl_error(& @1, state,
+ "post_depth_coverage layout qualifier only "
+ "valid in fragment shaders");
+ }
+
+ if (state->ARB_post_depth_coverage_enable ||
+ state->INTEL_conservative_rasterization_enable) {
+ $$.flags.q.post_depth_coverage = 1;
+ } else {
+ _mesa_glsl_error(& @1, state,
+ "post_depth_coverage layout qualifier present, "
+ "but the GL_ARB_post_depth_coverage extension "
+ "is not enabled.");
+ }
+ }
+
+ if ($$.flags.q.post_depth_coverage && $$.flags.q.inner_coverage) {
+ _mesa_glsl_error(& @1, state,
+ "post_depth_coverage & inner_coverage layout qualifiers "
+ "are mutually exclusive");
+ }
+ }
+
+ const bool pixel_interlock_ordered = match_layout_qualifier($1,
+ "pixel_interlock_ordered", state) == 0;
+ const bool pixel_interlock_unordered = match_layout_qualifier($1,
+ "pixel_interlock_unordered", state) == 0;
+ const bool sample_interlock_ordered = match_layout_qualifier($1,
+ "sample_interlock_ordered", state) == 0;
+ const bool sample_interlock_unordered = match_layout_qualifier($1,
+ "sample_interlock_unordered", state) == 0;
+
+ if (pixel_interlock_ordered + pixel_interlock_unordered +
+ sample_interlock_ordered + sample_interlock_unordered > 0 &&
+ state->stage != MESA_SHADER_FRAGMENT) {
+ _mesa_glsl_error(& @1, state, "interlock layout qualifiers: "
+ "pixel_interlock_ordered, pixel_interlock_unordered, "
+ "sample_interlock_ordered and sample_interlock_unordered, "
+ "only valid in fragment shader input layout declaration.");
+ } else if (pixel_interlock_ordered + pixel_interlock_unordered +
+ sample_interlock_ordered + sample_interlock_unordered > 0 &&
+ !state->ARB_fragment_shader_interlock_enable &&
+ !state->NV_fragment_shader_interlock_enable) {
+ _mesa_glsl_error(& @1, state,
+ "interlock layout qualifier present, but the "
+ "GL_ARB_fragment_shader_interlock or "
+ "GL_NV_fragment_shader_interlock extension is not "
+ "enabled.");
+ } else {
+ $$.flags.q.pixel_interlock_ordered = pixel_interlock_ordered;
+ $$.flags.q.pixel_interlock_unordered = pixel_interlock_unordered;
+ $$.flags.q.sample_interlock_ordered = sample_interlock_ordered;
+ $$.flags.q.sample_interlock_unordered = sample_interlock_unordered;
+ }
+
+ /* Layout qualifiers for tessellation evaluation shaders. */
+ if (!$$.flags.i) {
+ static const struct {
+ const char *s;
+ GLenum e;
+ } map[] = {
+ /* triangles already parsed by gs-specific code */
+ { "quads", GL_QUADS },
+ { "isolines", GL_ISOLINES },
+ };
+ for (unsigned i = 0; i < ARRAY_SIZE(map); i++) {
+ if (match_layout_qualifier($1, map[i].s, state) == 0) {
+ $$.flags.q.prim_type = 1;
+ $$.prim_type = map[i].e;
+ break;
+ }
+ }
+
+ if ($$.flags.i && !state->has_tessellation_shader()) {
+ _mesa_glsl_error(& @1, state,
+ "primitive mode qualifier `%s' requires "
+ "GLSL 4.00 or ARB_tessellation_shader", $1);
+ }
+ }
+ if (!$$.flags.i) {
+ static const struct {
+ const char *s;
+ enum gl_tess_spacing e;
+ } map[] = {
+ { "equal_spacing", TESS_SPACING_EQUAL },
+ { "fractional_odd_spacing", TESS_SPACING_FRACTIONAL_ODD },
+ { "fractional_even_spacing", TESS_SPACING_FRACTIONAL_EVEN },
+ };
+ for (unsigned i = 0; i < ARRAY_SIZE(map); i++) {
+ if (match_layout_qualifier($1, map[i].s, state) == 0) {
+ $$.flags.q.vertex_spacing = 1;
+ $$.vertex_spacing = map[i].e;
+ break;
+ }
+ }
+
+ if ($$.flags.i && !state->has_tessellation_shader()) {
+ _mesa_glsl_error(& @1, state,
+ "vertex spacing qualifier `%s' requires "
+ "GLSL 4.00 or ARB_tessellation_shader", $1);
+ }
+ }
+ if (!$$.flags.i) {
+ if (match_layout_qualifier($1, "cw", state) == 0) {
+ $$.flags.q.ordering = 1;
+ $$.ordering = GL_CW;
+ } else if (match_layout_qualifier($1, "ccw", state) == 0) {
+ $$.flags.q.ordering = 1;
+ $$.ordering = GL_CCW;
+ }
+
+ if ($$.flags.i && !state->has_tessellation_shader()) {
+ _mesa_glsl_error(& @1, state,
+ "ordering qualifier `%s' requires "
+ "GLSL 4.00 or ARB_tessellation_shader", $1);
+ }
+ }
+ if (!$$.flags.i) {
+ if (match_layout_qualifier($1, "point_mode", state) == 0) {
+ $$.flags.q.point_mode = 1;
+ $$.point_mode = true;
+ }
+
+ if ($$.flags.i && !state->has_tessellation_shader()) {
+ _mesa_glsl_error(& @1, state,
+ "qualifier `point_mode' requires "
+ "GLSL 4.00 or ARB_tessellation_shader");
+ }
+ }
+
+ if (!$$.flags.i) {
+ static const struct {
+ const char *s;
+ uint32_t mask;
+ } map[] = {
+ { "blend_support_multiply", BLEND_MULTIPLY },
+ { "blend_support_screen", BLEND_SCREEN },
+ { "blend_support_overlay", BLEND_OVERLAY },
+ { "blend_support_darken", BLEND_DARKEN },
+ { "blend_support_lighten", BLEND_LIGHTEN },
+ { "blend_support_colordodge", BLEND_COLORDODGE },
+ { "blend_support_colorburn", BLEND_COLORBURN },
+ { "blend_support_hardlight", BLEND_HARDLIGHT },
+ { "blend_support_softlight", BLEND_SOFTLIGHT },
+ { "blend_support_difference", BLEND_DIFFERENCE },
+ { "blend_support_exclusion", BLEND_EXCLUSION },
+ { "blend_support_hsl_hue", BLEND_HSL_HUE },
+ { "blend_support_hsl_saturation", BLEND_HSL_SATURATION },
+ { "blend_support_hsl_color", BLEND_HSL_COLOR },
+ { "blend_support_hsl_luminosity", BLEND_HSL_LUMINOSITY },
+ { "blend_support_all_equations", BLEND_ALL },
+ };
+ for (unsigned i = 0; i < ARRAY_SIZE(map); i++) {
+ if (match_layout_qualifier($1, map[i].s, state) == 0) {
+ $$.flags.q.blend_support = 1;
+ state->fs_blend_support |= map[i].mask;
+ break;
+ }
+ }
+
+ if ($$.flags.i &&
+ !state->KHR_blend_equation_advanced_enable &&
+ !state->is_version(0, 320)) {
+ _mesa_glsl_error(& @1, state,
+ "advanced blending layout qualifiers require "
+ "ESSL 3.20 or KHR_blend_equation_advanced");
+ }
+
+ if ($$.flags.i && state->stage != MESA_SHADER_FRAGMENT) {
+ _mesa_glsl_error(& @1, state,
+ "advanced blending layout qualifiers only "
+ "valid in fragment shaders");
+ }
+ }
+
+ /* Layout qualifiers for ARB_compute_variable_group_size. */
+ if (!$$.flags.i) {
+ if (match_layout_qualifier($1, "local_size_variable", state) == 0) {
+ $$.flags.q.local_size_variable = 1;
+ }
+
+ if ($$.flags.i && !state->ARB_compute_variable_group_size_enable) {
+ _mesa_glsl_error(& @1, state,
+ "qualifier `local_size_variable` requires "
+ "ARB_compute_variable_group_size");
+ }
+ }
+
+ /* Layout qualifiers for ARB_bindless_texture. */
+ if (!$$.flags.i) {
+ if (match_layout_qualifier($1, "bindless_sampler", state) == 0)
+ $$.flags.q.bindless_sampler = 1;
+ if (match_layout_qualifier($1, "bound_sampler", state) == 0)
+ $$.flags.q.bound_sampler = 1;
+
+ if (state->has_shader_image_load_store()) {
+ if (match_layout_qualifier($1, "bindless_image", state) == 0)
+ $$.flags.q.bindless_image = 1;
+ if (match_layout_qualifier($1, "bound_image", state) == 0)
+ $$.flags.q.bound_image = 1;
+ }
+
+ if ($$.flags.i && !state->has_bindless()) {
+ _mesa_glsl_error(& @1, state,
+ "qualifier `%s` requires "
+ "ARB_bindless_texture", $1);
+ }
+ }
+
+ if (!$$.flags.i &&
+ state->EXT_shader_framebuffer_fetch_non_coherent_enable) {
+ if (match_layout_qualifier($1, "noncoherent", state) == 0)
+ $$.flags.q.non_coherent = 1;
+ }
+
+ // Layout qualifiers for NV_compute_shader_derivatives.
+ if (!$$.flags.i) {
+ if (match_layout_qualifier($1, "derivative_group_quadsNV", state) == 0) {
+ $$.flags.q.derivative_group = 1;
+ $$.derivative_group = DERIVATIVE_GROUP_QUADS;
+ } else if (match_layout_qualifier($1, "derivative_group_linearNV", state) == 0) {
+ $$.flags.q.derivative_group = 1;
+ $$.derivative_group = DERIVATIVE_GROUP_LINEAR;
+ }
+
+ if ($$.flags.i) {
+ if (!state->has_compute_shader()) {
+ _mesa_glsl_error(& @1, state,
+ "qualifier `%s' requires "
+ "a compute shader", $1);
+ }
+
+ if (!state->NV_compute_shader_derivatives_enable) {
+ _mesa_glsl_error(& @1, state,
+ "qualifier `%s' requires "
+ "NV_compute_shader_derivatives", $1);
+ }
+
+ if (state->NV_compute_shader_derivatives_warn) {
+ _mesa_glsl_warning(& @1, state,
+ "NV_compute_shader_derivatives layout "
+ "qualifier `%s' used", $1);
+ }
+ }
+ }
+
+ /* Layout qualifier for NV_viewport_array2. */
+ if (!$$.flags.i && state->stage != MESA_SHADER_FRAGMENT) {
+ if (match_layout_qualifier($1, "viewport_relative", state) == 0) {
+ $$.flags.q.viewport_relative = 1;
+ }
+
+ if ($$.flags.i && !state->NV_viewport_array2_enable) {
+ _mesa_glsl_error(& @1, state,
+ "qualifier `%s' requires "
+ "GL_NV_viewport_array2", $1);
+ }
+
+ if ($$.flags.i && state->NV_viewport_array2_warn) {
+ _mesa_glsl_warning(& @1, state,
+ "GL_NV_viewport_array2 layout "
+ "identifier `%s' used", $1);
+ }
+ }
+
+ if (!$$.flags.i) {
+ _mesa_glsl_error(& @1, state, "unrecognized layout identifier "
+ "`%s'", $1);
+ YYERROR;
+ }
+ }
+ | any_identifier '=' constant_expression
+ {
+ memset(& $$, 0, sizeof($$));
+ void *ctx = state->linalloc;
+
+ if ($3->oper != ast_int_constant &&
+ $3->oper != ast_uint_constant &&
+ !state->has_enhanced_layouts()) {
+ _mesa_glsl_error(& @1, state,
+ "compile-time constant expressions require "
+ "GLSL 4.40 or ARB_enhanced_layouts");
+ }
+
+ if (match_layout_qualifier("align", $1, state) == 0) {
+ if (!state->has_enhanced_layouts()) {
+ _mesa_glsl_error(& @1, state,
+ "align qualifier requires "
+ "GLSL 4.40 or ARB_enhanced_layouts");
+ } else {
+ $$.flags.q.explicit_align = 1;
+ $$.align = $3;
+ }
+ }
+
+ if (match_layout_qualifier("location", $1, state) == 0) {
+ $$.flags.q.explicit_location = 1;
+
+ if ($$.flags.q.attribute == 1 &&
+ state->ARB_explicit_attrib_location_warn) {
+ _mesa_glsl_warning(& @1, state,
+ "GL_ARB_explicit_attrib_location layout "
+ "identifier `%s' used", $1);
+ }
+ $$.location = $3;
+ }
+
+ if (match_layout_qualifier("component", $1, state) == 0) {
+ if (!state->has_enhanced_layouts()) {
+ _mesa_glsl_error(& @1, state,
+ "component qualifier requires "
+ "GLSL 4.40 or ARB_enhanced_layouts");
+ } else {
+ $$.flags.q.explicit_component = 1;
+ $$.component = $3;
+ }
+ }
+
+ if (match_layout_qualifier("index", $1, state) == 0) {
+ if (state->es_shader && !state->EXT_blend_func_extended_enable) {
+ _mesa_glsl_error(& @3, state, "index layout qualifier requires EXT_blend_func_extended");
+ YYERROR;
+ }
+
+ $$.flags.q.explicit_index = 1;
+ $$.index = $3;
+ }
+
+ if ((state->has_420pack_or_es31() ||
+ state->has_atomic_counters() ||
+ state->has_shader_storage_buffer_objects()) &&
+ match_layout_qualifier("binding", $1, state) == 0) {
+ $$.flags.q.explicit_binding = 1;
+ $$.binding = $3;
+ }
+
+ if ((state->has_atomic_counters() ||
+ state->has_enhanced_layouts()) &&
+ match_layout_qualifier("offset", $1, state) == 0) {
+ $$.flags.q.explicit_offset = 1;
+ $$.offset = $3;
+ }
+
+ if (match_layout_qualifier("max_vertices", $1, state) == 0) {
+ $$.flags.q.max_vertices = 1;
+ $$.max_vertices = new(ctx) ast_layout_expression(@1, $3);
+ if (!state->has_geometry_shader()) {
+ _mesa_glsl_error(& @3, state,
+ "#version 150 max_vertices qualifier "
+ "specified", $3);
+ }
+ }
+
+ if (state->stage == MESA_SHADER_GEOMETRY) {
+ if (match_layout_qualifier("stream", $1, state) == 0 &&
+ state->check_explicit_attrib_stream_allowed(& @3)) {
+ $$.flags.q.stream = 1;
+ $$.flags.q.explicit_stream = 1;
+ $$.stream = $3;
+ }
+ }
+
+ if (state->has_enhanced_layouts()) {
+ if (match_layout_qualifier("xfb_buffer", $1, state) == 0) {
+ $$.flags.q.xfb_buffer = 1;
+ $$.flags.q.explicit_xfb_buffer = 1;
+ $$.xfb_buffer = $3;
+ }
+
+ if (match_layout_qualifier("xfb_offset", $1, state) == 0) {
+ $$.flags.q.explicit_xfb_offset = 1;
+ $$.offset = $3;
+ }
+
+ if (match_layout_qualifier("xfb_stride", $1, state) == 0) {
+ $$.flags.q.xfb_stride = 1;
+ $$.flags.q.explicit_xfb_stride = 1;
+ $$.xfb_stride = $3;
+ }
+ }
+
+ static const char * const local_size_qualifiers[3] = {
+ "local_size_x",
+ "local_size_y",
+ "local_size_z",
+ };
+ for (int i = 0; i < 3; i++) {
+ if (match_layout_qualifier(local_size_qualifiers[i], $1,
+ state) == 0) {
+ if (!state->has_compute_shader()) {
+ _mesa_glsl_error(& @3, state,
+ "%s qualifier requires GLSL 4.30 or "
+ "GLSL ES 3.10 or ARB_compute_shader",
+ local_size_qualifiers[i]);
+ YYERROR;
+ } else {
+ $$.flags.q.local_size |= (1 << i);
+ $$.local_size[i] = new(ctx) ast_layout_expression(@1, $3);
+ }
+ break;
+ }
+ }
+
+ if (match_layout_qualifier("invocations", $1, state) == 0) {
+ $$.flags.q.invocations = 1;
+ $$.invocations = new(ctx) ast_layout_expression(@1, $3);
+ if (!state->is_version(400, 320) &&
+ !state->ARB_gpu_shader5_enable &&
+ !state->OES_geometry_shader_enable &&
+ !state->EXT_geometry_shader_enable) {
+ _mesa_glsl_error(& @3, state,
+ "GL_ARB_gpu_shader5 invocations "
+ "qualifier specified", $3);
+ }
+ }
+
+ /* Layout qualifiers for tessellation control shaders. */
+ if (match_layout_qualifier("vertices", $1, state) == 0) {
+ $$.flags.q.vertices = 1;
+ $$.vertices = new(ctx) ast_layout_expression(@1, $3);
+ if (!state->has_tessellation_shader()) {
+ _mesa_glsl_error(& @1, state,
+ "vertices qualifier requires GLSL 4.00 or "
+ "ARB_tessellation_shader");
+ }
+ }
+
+ /* If the identifier didn't match any known layout identifiers,
+ * emit an error.
+ */
+ if (!$$.flags.i) {
+ _mesa_glsl_error(& @1, state, "unrecognized layout identifier "
+ "`%s'", $1);
+ YYERROR;
+ }
+ }
+ | interface_block_layout_qualifier
+ {
+ $$ = $1;
+ /* Layout qualifiers for ARB_uniform_buffer_object. */
+ if ($$.flags.q.uniform && !state->has_uniform_buffer_objects()) {
+ _mesa_glsl_error(& @1, state,
+ "#version 140 / GL_ARB_uniform_buffer_object "
+ "layout qualifier `%s' is used", $1);
+ } else if ($$.flags.q.uniform && state->ARB_uniform_buffer_object_warn) {
+ _mesa_glsl_warning(& @1, state,
+ "#version 140 / GL_ARB_uniform_buffer_object "
+ "layout qualifier `%s' is used", $1);
+ }
+ }
+ ;
+
+/* This is a separate language rule because we parse these as tokens
+ * (due to them being reserved keywords) instead of identifiers like
+ * most qualifiers. See the any_identifier path of
+ * layout_qualifier_id for the others.
+ *
+ * Note that since layout qualifiers are case-insensitive in desktop
+ * GLSL, all of these qualifiers need to be handled as identifiers as
+ * well (by the any_identifier path of layout_qualifier_id).
+ */
+interface_block_layout_qualifier:
+ ROW_MAJOR
+ {
+ memset(& $$, 0, sizeof($$));
+ $$.flags.q.row_major = 1;
+ }
+ | PACKED_TOK
+ {
+ memset(& $$, 0, sizeof($$));
+ $$.flags.q.packed = 1;
+ }
+ | SHARED
+ {
+ memset(& $$, 0, sizeof($$));
+ $$.flags.q.shared = 1;
+ }
+ ;
+
+subroutine_qualifier:
+ SUBROUTINE
+ {
+ memset(& $$, 0, sizeof($$));
+ $$.flags.q.subroutine = 1;
+ }
+ | SUBROUTINE '(' subroutine_type_list ')'
+ {
+ memset(& $$, 0, sizeof($$));
+ $$.flags.q.subroutine = 1;
+ $$.subroutine_list = $3;
+ }
+ ;
+
+subroutine_type_list:
+ any_identifier
+ {
+ void *ctx = state->linalloc;
+ ast_declaration *decl = new(ctx) ast_declaration($1, NULL, NULL);
+ decl->set_location(@1);
+
+ $$ = new(ctx) ast_subroutine_list();
+ $$->declarations.push_tail(&decl->link);
+ }
+ | subroutine_type_list ',' any_identifier
+ {
+ void *ctx = state->linalloc;
+ ast_declaration *decl = new(ctx) ast_declaration($3, NULL, NULL);
+ decl->set_location(@3);
+
+ $$ = $1;
+ $$->declarations.push_tail(&decl->link);
+ }
+ ;
+
+interpolation_qualifier:
+ SMOOTH
+ {
+ memset(& $$, 0, sizeof($$));
+ $$.flags.q.smooth = 1;
+ }
+ | FLAT
+ {
+ memset(& $$, 0, sizeof($$));
+ $$.flags.q.flat = 1;
+ }
+ | NOPERSPECTIVE
+ {
+ memset(& $$, 0, sizeof($$));
+ $$.flags.q.noperspective = 1;
+ }
+ ;
+
+type_qualifier:
+ /* Single qualifiers */
+ INVARIANT
+ {
+ memset(& $$, 0, sizeof($$));
+ $$.flags.q.invariant = 1;
+ }
+ | PRECISE
+ {
+ memset(& $$, 0, sizeof($$));
+ $$.flags.q.precise = 1;
+ }
+ | auxiliary_storage_qualifier
+ | storage_qualifier
+ | interpolation_qualifier
+ | layout_qualifier
+ | memory_qualifier
+ | subroutine_qualifier
+ | precision_qualifier
+ {
+ memset(&$$, 0, sizeof($$));
+ $$.precision = $1;
+ }
+
+ /* Multiple qualifiers:
+ * In GLSL 4.20, these can be specified in any order. In earlier versions,
+ * they appear in this order (see GLSL 1.50 section 4.7 & comments below):
+ *
+ * invariant interpolation auxiliary storage precision ...or...
+ * layout storage precision
+ *
+ * Each qualifier's rule ensures that the accumulated qualifiers on the right
+ * side don't contain any that must appear on the left hand side.
+ * For example, when processing a storage qualifier, we check that there are
+ * no auxiliary, interpolation, layout, invariant, or precise qualifiers to the right.
+ */
+ | PRECISE type_qualifier
+ {
+ if ($2.flags.q.precise)
+ _mesa_glsl_error(&@1, state, "duplicate \"precise\" qualifier");
+
+ $$ = $2;
+ $$.flags.q.precise = 1;
+ }
+ | INVARIANT type_qualifier
+ {
+ if ($2.flags.q.invariant)
+ _mesa_glsl_error(&@1, state, "duplicate \"invariant\" qualifier");
+
+ if (!state->has_420pack_or_es31() && $2.flags.q.precise)
+ _mesa_glsl_error(&@1, state,
+ "\"invariant\" must come after \"precise\"");
+
+ $$ = $2;
+ $$.flags.q.invariant = 1;
+
+ /* GLSL ES 3.00 spec, section 4.6.1 "The Invariant Qualifier":
+ *
+ * "Only variables output from a shader can be candidates for invariance.
+ * This includes user-defined output variables and the built-in output
+ * variables. As only outputs can be declared as invariant, an invariant
+ * output from one shader stage will still match an input of a subsequent
+ * stage without the input being declared as invariant."
+ *
+ * On the desktop side, this text first appears in GLSL 4.30.
+ */
+ if (state->is_version(430, 300) && $$.flags.q.in)
+ _mesa_glsl_error(&@1, state, "invariant qualifiers cannot be used with shader inputs");
+ }
+ | interpolation_qualifier type_qualifier
+ {
+ /* Section 4.3 of the GLSL 1.40 specification states:
+ * "...qualified with one of these interpolation qualifiers"
+ *
+ * GLSL 1.30 claims to allow "one or more", but insists that:
+ * "These interpolation qualifiers may only precede the qualifiers in,
+ * centroid in, out, or centroid out in a declaration."
+ *
+ * ...which means that e.g. smooth can't precede smooth, so there can be
+ * only one after all, and the 1.40 text is a clarification, not a change.
+ */
+ if ($2.has_interpolation())
+ _mesa_glsl_error(&@1, state, "duplicate interpolation qualifier");
+
+ if (!state->has_420pack_or_es31() &&
+ ($2.flags.q.precise || $2.flags.q.invariant)) {
+ _mesa_glsl_error(&@1, state, "interpolation qualifiers must come "
+ "after \"precise\" or \"invariant\"");
+ }
+
+ $$ = $1;
+ $$.merge_qualifier(&@1, state, $2, false);
+ }
+ | layout_qualifier type_qualifier
+ {
+ /* In the absence of ARB_shading_language_420pack, layout qualifiers may
+ * appear no later than auxiliary storage qualifiers. There is no
+ * particularly clear spec language mandating this, but in all examples
+ * the layout qualifier precedes the storage qualifier.
+ *
+ * We allow combinations of layout with interpolation, invariant or
+ * precise qualifiers since these are useful in ARB_separate_shader_objects.
+ * There is no clear spec guidance on this either.
+ */
+ $$ = $1;
+ $$.merge_qualifier(& @1, state, $2, false, $2.has_layout());
+ }
+ | subroutine_qualifier type_qualifier
+ {
+ $$ = $1;
+ $$.merge_qualifier(&@1, state, $2, false);
+ }
+ | auxiliary_storage_qualifier type_qualifier
+ {
+ if ($2.has_auxiliary_storage()) {
+ _mesa_glsl_error(&@1, state,
+ "duplicate auxiliary storage qualifier (centroid or sample)");
+ }
+
+ if ((!state->has_420pack_or_es31() && !state->EXT_gpu_shader4_enable) &&
+ ($2.flags.q.precise || $2.flags.q.invariant ||
+ $2.has_interpolation() || $2.has_layout())) {
+ _mesa_glsl_error(&@1, state, "auxiliary storage qualifiers must come "
+ "just before storage qualifiers");
+ }
+ $$ = $1;
+ $$.merge_qualifier(&@1, state, $2, false);
+ }
+ | storage_qualifier type_qualifier
+ {
+ /* Section 4.3 of the GLSL 1.20 specification states:
+ * "Variable declarations may have a storage qualifier specified..."
+ * 1.30 clarifies this to "may have one storage qualifier".
+ *
+ * GL_EXT_gpu_shader4 allows "varying out" in fragment shaders.
+ */
+ if ($2.has_storage() &&
+ (!state->EXT_gpu_shader4_enable ||
+ state->stage != MESA_SHADER_FRAGMENT ||
+ !$1.flags.q.varying || !$2.flags.q.out))
+ _mesa_glsl_error(&@1, state, "duplicate storage qualifier");
+
+ if (!state->has_420pack_or_es31() &&
+ ($2.flags.q.precise || $2.flags.q.invariant || $2.has_interpolation() ||
+ $2.has_layout() || $2.has_auxiliary_storage())) {
+ _mesa_glsl_error(&@1, state, "storage qualifiers must come after "
+ "precise, invariant, interpolation, layout and auxiliary "
+ "storage qualifiers");
+ }
+
+ $$ = $1;
+ $$.merge_qualifier(&@1, state, $2, false);
+ }
+ | precision_qualifier type_qualifier
+ {
+ if ($2.precision != ast_precision_none)
+ _mesa_glsl_error(&@1, state, "duplicate precision qualifier");
+
+ if (!(state->has_420pack_or_es31()) &&
+ $2.flags.i != 0)
+ _mesa_glsl_error(&@1, state, "precision qualifiers must come last");
+
+ $$ = $2;
+ $$.precision = $1;
+ }
+ | memory_qualifier type_qualifier
+ {
+ $$ = $1;
+ $$.merge_qualifier(&@1, state, $2, false);
+ }
+ ;
+
+auxiliary_storage_qualifier:
+ CENTROID
+ {
+ memset(& $$, 0, sizeof($$));
+ $$.flags.q.centroid = 1;
+ }
+ | SAMPLE
+ {
+ memset(& $$, 0, sizeof($$));
+ $$.flags.q.sample = 1;
+ }
+ | PATCH
+ {
+ memset(& $$, 0, sizeof($$));
+ $$.flags.q.patch = 1;
+ }
+
+storage_qualifier:
+ CONST_TOK
+ {
+ memset(& $$, 0, sizeof($$));
+ $$.flags.q.constant = 1;
+ }
+ | ATTRIBUTE
+ {
+ memset(& $$, 0, sizeof($$));
+ $$.flags.q.attribute = 1;
+ }
+ | VARYING
+ {
+ memset(& $$, 0, sizeof($$));
+ $$.flags.q.varying = 1;
+ }
+ | IN_TOK
+ {
+ memset(& $$, 0, sizeof($$));
+ $$.flags.q.in = 1;
+ }
+ | OUT_TOK
+ {
+ memset(& $$, 0, sizeof($$));
+ $$.flags.q.out = 1;
+
+ if (state->stage == MESA_SHADER_GEOMETRY &&
+ state->has_explicit_attrib_stream()) {
+ /* Section 4.3.8.2 (Output Layout Qualifiers) of the GLSL 4.00
+ * spec says:
+ *
+ * "If the block or variable is declared with the stream
+ * identifier, it is associated with the specified stream;
+ * otherwise, it is associated with the current default stream."
+ */
+ $$.flags.q.stream = 1;
+ $$.flags.q.explicit_stream = 0;
+ $$.stream = state->out_qualifier->stream;
+ }
+
+ if (state->has_enhanced_layouts()) {
+ $$.flags.q.xfb_buffer = 1;
+ $$.flags.q.explicit_xfb_buffer = 0;
+ $$.xfb_buffer = state->out_qualifier->xfb_buffer;
+ }
+ }
+ | INOUT_TOK
+ {
+ memset(& $$, 0, sizeof($$));
+ $$.flags.q.in = 1;
+ $$.flags.q.out = 1;
+
+ if (!state->has_framebuffer_fetch() ||
+ !state->is_version(130, 300) ||
+ state->stage != MESA_SHADER_FRAGMENT)
+ _mesa_glsl_error(&@1, state, "A single interface variable cannot be "
+ "declared as both input and output");
+ }
+ | UNIFORM
+ {
+ memset(& $$, 0, sizeof($$));
+ $$.flags.q.uniform = 1;
+ }
+ | BUFFER
+ {
+ memset(& $$, 0, sizeof($$));
+ $$.flags.q.buffer = 1;
+ }
+ | SHARED
+ {
+ memset(& $$, 0, sizeof($$));
+ $$.flags.q.shared_storage = 1;
+ }
+ ;
+
+memory_qualifier:
+ COHERENT
+ {
+ memset(& $$, 0, sizeof($$));
+ $$.flags.q.coherent = 1;
+ }
+ | VOLATILE
+ {
+ memset(& $$, 0, sizeof($$));
+ $$.flags.q._volatile = 1;
+ }
+ | RESTRICT
+ {
+ STATIC_ASSERT(sizeof($$.flags.q) <= sizeof($$.flags.i));
+ memset(& $$, 0, sizeof($$));
+ $$.flags.q.restrict_flag = 1;
+ }
+ | READONLY
+ {
+ memset(& $$, 0, sizeof($$));
+ $$.flags.q.read_only = 1;
+ }
+ | WRITEONLY
+ {
+ memset(& $$, 0, sizeof($$));
+ $$.flags.q.write_only = 1;
+ }
+ ;
+
+array_specifier:
+ '[' ']'
+ {
+ void *ctx = state->linalloc;
+ $$ = new(ctx) ast_array_specifier(@1, new(ctx) ast_expression(
+ ast_unsized_array_dim, NULL,
+ NULL, NULL));
+ $$->set_location_range(@1, @2);
+ }
+ | '[' constant_expression ']'
+ {
+ void *ctx = state->linalloc;
+ $$ = new(ctx) ast_array_specifier(@1, $2);
+ $$->set_location_range(@1, @3);
+ }
+ | array_specifier '[' ']'
+ {
+ void *ctx = state->linalloc;
+ $$ = $1;
+
+ if (state->check_arrays_of_arrays_allowed(& @1)) {
+ $$->add_dimension(new(ctx) ast_expression(ast_unsized_array_dim, NULL,
+ NULL, NULL));
+ }
+ }
+ | array_specifier '[' constant_expression ']'
+ {
+ $$ = $1;
+
+ if (state->check_arrays_of_arrays_allowed(& @1)) {
+ $$->add_dimension($3);
+ }
+ }
+ ;
+
+type_specifier:
+ type_specifier_nonarray
+ | type_specifier_nonarray array_specifier
+ {
+ $$ = $1;
+ $$->array_specifier = $2;
+ }
+ ;
+
+type_specifier_nonarray:
+ basic_type_specifier_nonarray
+ {
+ void *ctx = state->linalloc;
+ $$ = new(ctx) ast_type_specifier($1);
+ $$->set_location(@1);
+ }
+ | struct_specifier
+ {
+ void *ctx = state->linalloc;
+ $$ = new(ctx) ast_type_specifier($1);
+ $$->set_location(@1);
+ }
+ | TYPE_IDENTIFIER
+ {
+ void *ctx = state->linalloc;
+ $$ = new(ctx) ast_type_specifier($1);
+ $$->set_location(@1);
+ }
+ ;
+
+basic_type_specifier_nonarray:
+ VOID_TOK { $$ = glsl_type::void_type; }
+ | BASIC_TYPE_TOK { $$ = $1; }
+ | UNSIGNED BASIC_TYPE_TOK
+ {
+ if ($2 == glsl_type::int_type) {
+ $$ = glsl_type::uint_type;
+ } else {
+ _mesa_glsl_error(&@1, state,
+ "\"unsigned\" is only allowed before \"int\"");
+ }
+ }
+ ;
+
+precision_qualifier:
+ HIGHP
+ {
+ state->check_precision_qualifiers_allowed(&@1);
+ $$ = ast_precision_high;
+ }
+ | MEDIUMP
+ {
+ state->check_precision_qualifiers_allowed(&@1);
+ $$ = ast_precision_medium;
+ }
+ | LOWP
+ {
+ state->check_precision_qualifiers_allowed(&@1);
+ $$ = ast_precision_low;
+ }
+ ;
+
+struct_specifier:
+ STRUCT any_identifier '{' struct_declaration_list '}'
+ {
+ void *ctx = state->linalloc;
+ $$ = new(ctx) ast_struct_specifier($2, $4);
+ $$->set_location_range(@2, @5);
+ state->symbols->add_type($2, glsl_type::void_type);
+ }
+ | STRUCT '{' struct_declaration_list '}'
+ {
+ void *ctx = state->linalloc;
+
+ /* All anonymous structs have the same name. This simplifies matching of
+ * globals whose type is an unnamed struct.
+ *
+ * It also avoids a memory leak when the same shader is compiled over and
+ * over again.
+ */
+ $$ = new(ctx) ast_struct_specifier("#anon_struct", $3);
+
+ $$->set_location_range(@2, @4);
+ }
+ ;
+
+struct_declaration_list:
+ struct_declaration
+ {
+ $$ = $1;
+ $1->link.self_link();
+ }
+ | struct_declaration_list struct_declaration
+ {
+ $$ = $1;
+ $$->link.insert_before(& $2->link);
+ }
+ ;
+
+struct_declaration:
+ fully_specified_type struct_declarator_list ';'
+ {
+ void *ctx = state->linalloc;
+ ast_fully_specified_type *const type = $1;
+ type->set_location(@1);
+
+ if (state->has_bindless()) {
+ ast_type_qualifier input_layout_mask;
+
+ /* Allow to declare qualifiers for images. */
+ input_layout_mask.flags.i = 0;
+ input_layout_mask.flags.q.coherent = 1;
+ input_layout_mask.flags.q._volatile = 1;
+ input_layout_mask.flags.q.restrict_flag = 1;
+ input_layout_mask.flags.q.read_only = 1;
+ input_layout_mask.flags.q.write_only = 1;
+ input_layout_mask.flags.q.explicit_image_format = 1;
+
+ if ((type->qualifier.flags.i & ~input_layout_mask.flags.i) != 0) {
+ _mesa_glsl_error(&@1, state,
+ "only precision and image qualifiers may be "
+ "applied to structure members");
+ }
+ } else {
+ if (type->qualifier.flags.i != 0)
+ _mesa_glsl_error(&@1, state,
+ "only precision qualifiers may be applied to "
+ "structure members");
+ }
+
+ $$ = new(ctx) ast_declarator_list(type);
+ $$->set_location(@2);
+
+ $$->declarations.push_degenerate_list_at_head(& $2->link);
+ }
+ ;
+
+struct_declarator_list:
+ struct_declarator
+ {
+ $$ = $1;
+ $1->link.self_link();
+ }
+ | struct_declarator_list ',' struct_declarator
+ {
+ $$ = $1;
+ $$->link.insert_before(& $3->link);
+ }
+ ;
+
+struct_declarator:
+ any_identifier
+ {
+ void *ctx = state->linalloc;
+ $$ = new(ctx) ast_declaration($1, NULL, NULL);
+ $$->set_location(@1);
+ }
+ | any_identifier array_specifier
+ {
+ void *ctx = state->linalloc;
+ $$ = new(ctx) ast_declaration($1, $2, NULL);
+ $$->set_location_range(@1, @2);
+ }
+ ;
+
+initializer:
+ assignment_expression
+ | '{' initializer_list '}'
+ {
+ $$ = $2;
+ }
+ | '{' initializer_list ',' '}'
+ {
+ $$ = $2;
+ }
+ ;
+
+initializer_list:
+ initializer
+ {
+ void *ctx = state->linalloc;
+ $$ = new(ctx) ast_aggregate_initializer();
+ $$->set_location(@1);
+ $$->expressions.push_tail(& $1->link);
+ }
+ | initializer_list ',' initializer
+ {
+ $1->expressions.push_tail(& $3->link);
+ }
+ ;
+
+declaration_statement:
+ declaration
+ ;
+
+ // Grammar Note: labeled statements for SWITCH only; 'goto' is not
+ // supported.
+statement:
+ compound_statement { $$ = (ast_node *) $1; }
+ | simple_statement
+ ;
+
+simple_statement:
+ declaration_statement
+ | expression_statement
+ | selection_statement
+ | switch_statement
+ | iteration_statement
+ | jump_statement
+ | demote_statement
+ ;
+
+compound_statement:
+ '{' '}'
+ {
+ void *ctx = state->linalloc;
+ $$ = new(ctx) ast_compound_statement(true, NULL);
+ $$->set_location_range(@1, @2);
+ }
+ | '{'
+ {
+ state->symbols->push_scope();
+ }
+ statement_list '}'
+ {
+ void *ctx = state->linalloc;
+ $$ = new(ctx) ast_compound_statement(true, $3);
+ $$->set_location_range(@1, @4);
+ state->symbols->pop_scope();
+ }
+ ;
+
+statement_no_new_scope:
+ compound_statement_no_new_scope { $$ = (ast_node *) $1; }
+ | simple_statement
+ ;
+
+compound_statement_no_new_scope:
+ '{' '}'
+ {
+ void *ctx = state->linalloc;
+ $$ = new(ctx) ast_compound_statement(false, NULL);
+ $$->set_location_range(@1, @2);
+ }
+ | '{' statement_list '}'
+ {
+ void *ctx = state->linalloc;
+ $$ = new(ctx) ast_compound_statement(false, $2);
+ $$->set_location_range(@1, @3);
+ }
+ ;
+
+statement_list:
+ statement
+ {
+ if ($1 == NULL) {
+ _mesa_glsl_error(& @1, state, "<nil> statement");
+ assert($1 != NULL);
+ }
+
+ $$ = $1;
+ $$->link.self_link();
+ }
+ | statement_list statement
+ {
+ if ($2 == NULL) {
+ _mesa_glsl_error(& @2, state, "<nil> statement");
+ assert($2 != NULL);
+ }
+ $$ = $1;
+ $$->link.insert_before(& $2->link);
+ }
+ | statement_list extension_statement
+ {
+ if (!state->allow_extension_directive_midshader) {
+ _mesa_glsl_error(& @1, state,
+ "#extension directive is not allowed "
+ "in the middle of a shader");
+ YYERROR;
+ }
+ }
+ ;
+
+expression_statement:
+ ';'
+ {
+ void *ctx = state->linalloc;
+ $$ = new(ctx) ast_expression_statement(NULL);
+ $$->set_location(@1);
+ }
+ | expression ';'
+ {
+ void *ctx = state->linalloc;
+ $$ = new(ctx) ast_expression_statement($1);
+ $$->set_location(@1);
+ }
+ ;
+
+selection_statement:
+ IF '(' expression ')' selection_rest_statement
+ {
+ $$ = new(state->linalloc) ast_selection_statement($3, $5.then_statement,
+ $5.else_statement);
+ $$->set_location_range(@1, @5);
+ }
+ ;
+
+selection_rest_statement:
+ statement ELSE statement
+ {
+ $$.then_statement = $1;
+ $$.else_statement = $3;
+ }
+ | statement %prec THEN
+ {
+ $$.then_statement = $1;
+ $$.else_statement = NULL;
+ }
+ ;
+
+condition:
+ expression
+ {
+ $$ = (ast_node *) $1;
+ }
+ | fully_specified_type any_identifier '=' initializer
+ {
+ void *ctx = state->linalloc;
+ ast_declaration *decl = new(ctx) ast_declaration($2, NULL, $4);
+ ast_declarator_list *declarator = new(ctx) ast_declarator_list($1);
+ decl->set_location_range(@2, @4);
+ declarator->set_location(@1);
+
+ declarator->declarations.push_tail(&decl->link);
+ $$ = declarator;
+ }
+ ;
+
+/*
+ * switch_statement grammar is based on the syntax described in the body
+ * of the GLSL spec, not in it's appendix!!!
+ */
+switch_statement:
+ SWITCH '(' expression ')' switch_body
+ {
+ $$ = new(state->linalloc) ast_switch_statement($3, $5);
+ $$->set_location_range(@1, @5);
+ }
+ ;
+
+switch_body:
+ '{' '}'
+ {
+ $$ = new(state->linalloc) ast_switch_body(NULL);
+ $$->set_location_range(@1, @2);
+ }
+ | '{' case_statement_list '}'
+ {
+ $$ = new(state->linalloc) ast_switch_body($2);
+ $$->set_location_range(@1, @3);
+ }
+ ;
+
+case_label:
+ CASE expression ':'
+ {
+ $$ = new(state->linalloc) ast_case_label($2);
+ $$->set_location(@2);
+ }
+ | DEFAULT ':'
+ {
+ $$ = new(state->linalloc) ast_case_label(NULL);
+ $$->set_location(@2);
+ }
+ ;
+
+case_label_list:
+ case_label
+ {
+ ast_case_label_list *labels = new(state->linalloc) ast_case_label_list();
+
+ labels->labels.push_tail(& $1->link);
+ $$ = labels;
+ $$->set_location(@1);
+ }
+ | case_label_list case_label
+ {
+ $$ = $1;
+ $$->labels.push_tail(& $2->link);
+ }
+ ;
+
+case_statement:
+ case_label_list statement
+ {
+ ast_case_statement *stmts = new(state->linalloc) ast_case_statement($1);
+ stmts->set_location(@2);
+
+ stmts->stmts.push_tail(& $2->link);
+ $$ = stmts;
+ }
+ | case_statement statement
+ {
+ $$ = $1;
+ $$->stmts.push_tail(& $2->link);
+ }
+ ;
+
+case_statement_list:
+ case_statement
+ {
+ ast_case_statement_list *cases= new(state->linalloc) ast_case_statement_list();
+ cases->set_location(@1);
+
+ cases->cases.push_tail(& $1->link);
+ $$ = cases;
+ }
+ | case_statement_list case_statement
+ {
+ $$ = $1;
+ $$->cases.push_tail(& $2->link);
+ }
+ ;
+
+iteration_statement:
+ WHILE '(' condition ')' statement_no_new_scope
+ {
+ void *ctx = state->linalloc;
+ $$ = new(ctx) ast_iteration_statement(ast_iteration_statement::ast_while,
+ NULL, $3, NULL, $5);
+ $$->set_location_range(@1, @4);
+ }
+ | DO statement WHILE '(' expression ')' ';'
+ {
+ void *ctx = state->linalloc;
+ $$ = new(ctx) ast_iteration_statement(ast_iteration_statement::ast_do_while,
+ NULL, $5, NULL, $2);
+ $$->set_location_range(@1, @6);
+ }
+ | FOR '(' for_init_statement for_rest_statement ')' statement_no_new_scope
+ {
+ void *ctx = state->linalloc;
+ $$ = new(ctx) ast_iteration_statement(ast_iteration_statement::ast_for,
+ $3, $4.cond, $4.rest, $6);
+ $$->set_location_range(@1, @6);
+ }
+ ;
+
+for_init_statement:
+ expression_statement
+ | declaration_statement
+ ;
+
+conditionopt:
+ condition
+ | /* empty */
+ {
+ $$ = NULL;
+ }
+ ;
+
+for_rest_statement:
+ conditionopt ';'
+ {
+ $$.cond = $1;
+ $$.rest = NULL;
+ }
+ | conditionopt ';' expression
+ {
+ $$.cond = $1;
+ $$.rest = $3;
+ }
+ ;
+
+ // Grammar Note: No 'goto'. Gotos are not supported.
+jump_statement:
+ CONTINUE ';'
+ {
+ void *ctx = state->linalloc;
+ $$ = new(ctx) ast_jump_statement(ast_jump_statement::ast_continue, NULL);
+ $$->set_location(@1);
+ }
+ | BREAK ';'
+ {
+ void *ctx = state->linalloc;
+ $$ = new(ctx) ast_jump_statement(ast_jump_statement::ast_break, NULL);
+ $$->set_location(@1);
+ }
+ | RETURN ';'
+ {
+ void *ctx = state->linalloc;
+ $$ = new(ctx) ast_jump_statement(ast_jump_statement::ast_return, NULL);
+ $$->set_location(@1);
+ }
+ | RETURN expression ';'
+ {
+ void *ctx = state->linalloc;
+ $$ = new(ctx) ast_jump_statement(ast_jump_statement::ast_return, $2);
+ $$->set_location_range(@1, @2);
+ }
+ | DISCARD ';' // Fragment shader only.
+ {
+ void *ctx = state->linalloc;
+ $$ = new(ctx) ast_jump_statement(ast_jump_statement::ast_discard, NULL);
+ $$->set_location(@1);
+ }
+ ;
+
+demote_statement:
+ DEMOTE ';'
+ {
+ void *ctx = state->linalloc;
+ $$ = new(ctx) ast_demote_statement();
+ $$->set_location(@1);
+ }
+ ;
+
+external_declaration:
+ function_definition { $$ = $1; }
+ | declaration { $$ = $1; }
+ | pragma_statement { $$ = $1; }
+ | layout_defaults { $$ = $1; }
+ | ';' { $$ = NULL; }
+ ;
+
+function_definition:
+ function_prototype compound_statement_no_new_scope
+ {
+ void *ctx = state->linalloc;
+ $$ = new(ctx) ast_function_definition();
+ $$->set_location_range(@1, @2);
+ $$->prototype = $1;
+ $$->body = $2;
+
+ state->symbols->pop_scope();
+ }
+ ;
+
+/* layout_qualifieropt is packed into this rule */
+interface_block:
+ basic_interface_block
+ {
+ $$ = $1;
+ }
+ | layout_qualifier interface_block
+ {
+ ast_interface_block *block = (ast_interface_block *) $2;
+
+ if (!$1.merge_qualifier(& @1, state, block->layout, false,
+ block->layout.has_layout())) {
+ YYERROR;
+ }
+
+ block->layout = $1;
+
+ $$ = block;
+ }
+ | memory_qualifier interface_block
+ {
+ ast_interface_block *block = (ast_interface_block *)$2;
+
+ if (!block->default_layout.flags.q.buffer) {
+ _mesa_glsl_error(& @1, state,
+ "memory qualifiers can only be used in the "
+ "declaration of shader storage blocks");
+ }
+ if (!$1.merge_qualifier(& @1, state, block->layout, false)) {
+ YYERROR;
+ }
+ block->layout = $1;
+ $$ = block;
+ }
+ ;
+
+basic_interface_block:
+ interface_qualifier NEW_IDENTIFIER '{' member_list '}' instance_name_opt ';'
+ {
+ ast_interface_block *const block = $6;
+
+ if ($1.flags.q.uniform) {
+ block->default_layout = *state->default_uniform_qualifier;
+ } else if ($1.flags.q.buffer) {
+ block->default_layout = *state->default_shader_storage_qualifier;
+ }
+ block->block_name = $2;
+ block->declarations.push_degenerate_list_at_head(& $4->link);
+
+ _mesa_ast_process_interface_block(& @1, state, block, $1);
+
+ $$ = block;
+ }
+ ;
+
+interface_qualifier:
+ IN_TOK
+ {
+ memset(& $$, 0, sizeof($$));
+ $$.flags.q.in = 1;
+ }
+ | OUT_TOK
+ {
+ memset(& $$, 0, sizeof($$));
+ $$.flags.q.out = 1;
+ }
+ | UNIFORM
+ {
+ memset(& $$, 0, sizeof($$));
+ $$.flags.q.uniform = 1;
+ }
+ | BUFFER
+ {
+ memset(& $$, 0, sizeof($$));
+ $$.flags.q.buffer = 1;
+ }
+ | auxiliary_storage_qualifier interface_qualifier
+ {
+ if (!$1.flags.q.patch) {
+ _mesa_glsl_error(&@1, state, "invalid interface qualifier");
+ }
+ if ($2.has_auxiliary_storage()) {
+ _mesa_glsl_error(&@1, state, "duplicate patch qualifier");
+ }
+ $$ = $2;
+ $$.flags.q.patch = 1;
+ }
+ ;
+
+instance_name_opt:
+ /* empty */
+ {
+ $$ = new(state->linalloc) ast_interface_block(NULL, NULL);
+ }
+ | NEW_IDENTIFIER
+ {
+ $$ = new(state->linalloc) ast_interface_block($1, NULL);
+ $$->set_location(@1);
+ }
+ | NEW_IDENTIFIER array_specifier
+ {
+ $$ = new(state->linalloc) ast_interface_block($1, $2);
+ $$->set_location_range(@1, @2);
+ }
+ ;
+
+member_list:
+ member_declaration
+ {
+ $$ = $1;
+ $1->link.self_link();
+ }
+ | member_declaration member_list
+ {
+ $$ = $1;
+ $2->link.insert_before(& $$->link);
+ }
+ ;
+
+member_declaration:
+ fully_specified_type struct_declarator_list ';'
+ {
+ void *ctx = state->linalloc;
+ ast_fully_specified_type *type = $1;
+ type->set_location(@1);
+
+ if (type->qualifier.flags.q.attribute) {
+ _mesa_glsl_error(& @1, state,
+ "keyword 'attribute' cannot be used with "
+ "interface block member");
+ } else if (type->qualifier.flags.q.varying) {
+ _mesa_glsl_error(& @1, state,
+ "keyword 'varying' cannot be used with "
+ "interface block member");
+ }
+
+ $$ = new(ctx) ast_declarator_list(type);
+ $$->set_location(@2);
+
+ $$->declarations.push_degenerate_list_at_head(& $2->link);
+ }
+ ;
+
+layout_uniform_defaults:
+ layout_qualifier layout_uniform_defaults
+ {
+ $$ = $1;
+ if (!$$.merge_qualifier(& @1, state, $2, false, true)) {
+ YYERROR;
+ }
+ }
+ | layout_qualifier UNIFORM ';'
+ ;
+
+layout_buffer_defaults:
+ layout_qualifier layout_buffer_defaults
+ {
+ $$ = $1;
+ if (!$$.merge_qualifier(& @1, state, $2, false, true)) {
+ YYERROR;
+ }
+ }
+ | layout_qualifier BUFFER ';'
+ ;
+
+layout_in_defaults:
+ layout_qualifier layout_in_defaults
+ {
+ $$ = $1;
+ if (!$$.merge_qualifier(& @1, state, $2, false, true)) {
+ YYERROR;
+ }
+ if (!$$.validate_in_qualifier(& @1, state)) {
+ YYERROR;
+ }
+ }
+ | layout_qualifier IN_TOK ';'
+ {
+ if (!$1.validate_in_qualifier(& @1, state)) {
+ YYERROR;
+ }
+ }
+ ;
+
+layout_out_defaults:
+ layout_qualifier layout_out_defaults
+ {
+ $$ = $1;
+ if (!$$.merge_qualifier(& @1, state, $2, false, true)) {
+ YYERROR;
+ }
+ if (!$$.validate_out_qualifier(& @1, state)) {
+ YYERROR;
+ }
+ }
+ | layout_qualifier OUT_TOK ';'
+ {
+ if (!$1.validate_out_qualifier(& @1, state)) {
+ YYERROR;
+ }
+ }
+ ;
+
+layout_defaults:
+ layout_uniform_defaults
+ {
+ $$ = NULL;
+ if (!state->default_uniform_qualifier->
+ merge_qualifier(& @1, state, $1, false)) {
+ YYERROR;
+ }
+ if (!state->default_uniform_qualifier->
+ push_to_global(& @1, state)) {
+ YYERROR;
+ }
+ }
+ | layout_buffer_defaults
+ {
+ $$ = NULL;
+ if (!state->default_shader_storage_qualifier->
+ merge_qualifier(& @1, state, $1, false)) {
+ YYERROR;
+ }
+ if (!state->default_shader_storage_qualifier->
+ push_to_global(& @1, state)) {
+ YYERROR;
+ }
+
+ /* From the GLSL 4.50 spec, section 4.4.5:
+ *
+ * "It is a compile-time error to specify the binding identifier for
+ * the global scope or for block member declarations."
+ */
+ if (state->default_shader_storage_qualifier->flags.q.explicit_binding) {
+ _mesa_glsl_error(& @1, state,
+ "binding qualifier cannot be set for default layout");
+ }
+ }
+ | layout_in_defaults
+ {
+ $$ = NULL;
+ if (!$1.merge_into_in_qualifier(& @1, state, $$)) {
+ YYERROR;
+ }
+ if (!state->in_qualifier->push_to_global(& @1, state)) {
+ YYERROR;
+ }
+ }
+ | layout_out_defaults
+ {
+ $$ = NULL;
+ if (!$1.merge_into_out_qualifier(& @1, state, $$)) {
+ YYERROR;
+ }
+ if (!state->out_qualifier->push_to_global(& @1, state)) {
+ YYERROR;
+ }
+ }
+ ;
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/glsl_parser_extras.cpp b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/glsl_parser_extras.cpp
new file mode 100644
index 0000000000..25c440d500
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/glsl_parser_extras.cpp
@@ -0,0 +1,2411 @@
+/*
+ * Copyright © 2008, 2009 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+#include <inttypes.h> /* for PRIx64 macro */
+#include <stdio.h>
+#include <stdarg.h>
+#include <string.h>
+#include <assert.h>
+
+#include "main/context.h"
+#include "main/debug_output.h"
+#include "main/formats.h"
+#include "main/shaderobj.h"
+#include "util/u_atomic.h" /* for p_atomic_cmpxchg */
+#include "util/ralloc.h"
+#include "util/disk_cache.h"
+#include "util/mesa-sha1.h"
+#include "ast.h"
+#include "glsl_parser_extras.h"
+#include "glsl_parser.h"
+#include "ir_optimization.h"
+#include "loop_analysis.h"
+#include "builtin_functions.h"
+
+/**
+ * Format a short human-readable description of the given GLSL version.
+ */
+const char *
+glsl_compute_version_string(void *mem_ctx, bool is_es, unsigned version)
+{
+ return ralloc_asprintf(mem_ctx, "GLSL%s %d.%02d", is_es ? " ES" : "",
+ version / 100, version % 100);
+}
+
+
+static const unsigned known_desktop_glsl_versions[] =
+ { 110, 120, 130, 140, 150, 330, 400, 410, 420, 430, 440, 450, 460 };
+static const unsigned known_desktop_gl_versions[] =
+ { 20, 21, 30, 31, 32, 33, 40, 41, 42, 43, 44, 45, 46 };
+
+
+_mesa_glsl_parse_state::_mesa_glsl_parse_state(struct gl_context *_ctx,
+ gl_shader_stage stage,
+ void *mem_ctx)
+ : ctx(_ctx), cs_input_local_size_specified(false), cs_input_local_size(),
+ switch_state(), warnings_enabled(true)
+{
+ assert(stage < MESA_SHADER_STAGES);
+ this->stage = stage;
+
+ this->scanner = NULL;
+ this->translation_unit.make_empty();
+ this->symbols = new(mem_ctx) glsl_symbol_table;
+
+ this->linalloc = linear_alloc_parent(this, 0);
+
+ this->info_log = ralloc_strdup(mem_ctx, "");
+ this->error = false;
+ this->loop_nesting_ast = NULL;
+
+ this->uses_builtin_functions = false;
+
+ /* Set default language version and extensions */
+ this->language_version = 110;
+ this->forced_language_version = ctx->Const.ForceGLSLVersion;
+ this->zero_init = ctx->Const.GLSLZeroInit;
+ this->gl_version = 20;
+ this->compat_shader = true;
+ this->es_shader = false;
+ this->had_version_string = false;
+ this->ARB_texture_rectangle_enable = true;
+
+ /* OpenGL ES 2.0 has different defaults from desktop GL. */
+ if (ctx->API == API_OPENGLES2) {
+ this->language_version = 100;
+ this->es_shader = true;
+ this->ARB_texture_rectangle_enable = false;
+ }
+
+ this->extensions = &ctx->Extensions;
+
+ this->Const.MaxLights = ctx->Const.MaxLights;
+ this->Const.MaxClipPlanes = ctx->Const.MaxClipPlanes;
+ this->Const.MaxTextureUnits = ctx->Const.MaxTextureUnits;
+ this->Const.MaxTextureCoords = ctx->Const.MaxTextureCoordUnits;
+ this->Const.MaxVertexAttribs = ctx->Const.Program[MESA_SHADER_VERTEX].MaxAttribs;
+ this->Const.MaxVertexUniformComponents = ctx->Const.Program[MESA_SHADER_VERTEX].MaxUniformComponents;
+ this->Const.MaxVertexTextureImageUnits = ctx->Const.Program[MESA_SHADER_VERTEX].MaxTextureImageUnits;
+ this->Const.MaxCombinedTextureImageUnits = ctx->Const.MaxCombinedTextureImageUnits;
+ this->Const.MaxTextureImageUnits = ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxTextureImageUnits;
+ this->Const.MaxFragmentUniformComponents = ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxUniformComponents;
+ this->Const.MinProgramTexelOffset = ctx->Const.MinProgramTexelOffset;
+ this->Const.MaxProgramTexelOffset = ctx->Const.MaxProgramTexelOffset;
+
+ this->Const.MaxDrawBuffers = ctx->Const.MaxDrawBuffers;
+
+ this->Const.MaxDualSourceDrawBuffers = ctx->Const.MaxDualSourceDrawBuffers;
+
+ /* 1.50 constants */
+ this->Const.MaxVertexOutputComponents = ctx->Const.Program[MESA_SHADER_VERTEX].MaxOutputComponents;
+ this->Const.MaxGeometryInputComponents = ctx->Const.Program[MESA_SHADER_GEOMETRY].MaxInputComponents;
+ this->Const.MaxGeometryOutputComponents = ctx->Const.Program[MESA_SHADER_GEOMETRY].MaxOutputComponents;
+ this->Const.MaxGeometryShaderInvocations = ctx->Const.MaxGeometryShaderInvocations;
+ this->Const.MaxFragmentInputComponents = ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxInputComponents;
+ this->Const.MaxGeometryTextureImageUnits = ctx->Const.Program[MESA_SHADER_GEOMETRY].MaxTextureImageUnits;
+ this->Const.MaxGeometryOutputVertices = ctx->Const.MaxGeometryOutputVertices;
+ this->Const.MaxGeometryTotalOutputComponents = ctx->Const.MaxGeometryTotalOutputComponents;
+ this->Const.MaxGeometryUniformComponents = ctx->Const.Program[MESA_SHADER_GEOMETRY].MaxUniformComponents;
+
+ this->Const.MaxVertexAtomicCounters = ctx->Const.Program[MESA_SHADER_VERTEX].MaxAtomicCounters;
+ this->Const.MaxTessControlAtomicCounters = ctx->Const.Program[MESA_SHADER_TESS_CTRL].MaxAtomicCounters;
+ this->Const.MaxTessEvaluationAtomicCounters = ctx->Const.Program[MESA_SHADER_TESS_EVAL].MaxAtomicCounters;
+ this->Const.MaxGeometryAtomicCounters = ctx->Const.Program[MESA_SHADER_GEOMETRY].MaxAtomicCounters;
+ this->Const.MaxFragmentAtomicCounters = ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxAtomicCounters;
+ this->Const.MaxComputeAtomicCounters = ctx->Const.Program[MESA_SHADER_COMPUTE].MaxAtomicCounters;
+ this->Const.MaxCombinedAtomicCounters = ctx->Const.MaxCombinedAtomicCounters;
+ this->Const.MaxAtomicBufferBindings = ctx->Const.MaxAtomicBufferBindings;
+ this->Const.MaxVertexAtomicCounterBuffers =
+ ctx->Const.Program[MESA_SHADER_VERTEX].MaxAtomicBuffers;
+ this->Const.MaxTessControlAtomicCounterBuffers =
+ ctx->Const.Program[MESA_SHADER_TESS_CTRL].MaxAtomicBuffers;
+ this->Const.MaxTessEvaluationAtomicCounterBuffers =
+ ctx->Const.Program[MESA_SHADER_TESS_EVAL].MaxAtomicBuffers;
+ this->Const.MaxGeometryAtomicCounterBuffers =
+ ctx->Const.Program[MESA_SHADER_GEOMETRY].MaxAtomicBuffers;
+ this->Const.MaxFragmentAtomicCounterBuffers =
+ ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxAtomicBuffers;
+ this->Const.MaxComputeAtomicCounterBuffers =
+ ctx->Const.Program[MESA_SHADER_COMPUTE].MaxAtomicBuffers;
+ this->Const.MaxCombinedAtomicCounterBuffers =
+ ctx->Const.MaxCombinedAtomicBuffers;
+ this->Const.MaxAtomicCounterBufferSize =
+ ctx->Const.MaxAtomicBufferSize;
+
+ /* ARB_enhanced_layouts constants */
+ this->Const.MaxTransformFeedbackBuffers = ctx->Const.MaxTransformFeedbackBuffers;
+ this->Const.MaxTransformFeedbackInterleavedComponents = ctx->Const.MaxTransformFeedbackInterleavedComponents;
+
+ /* Compute shader constants */
+ for (unsigned i = 0; i < ARRAY_SIZE(this->Const.MaxComputeWorkGroupCount); i++)
+ this->Const.MaxComputeWorkGroupCount[i] = ctx->Const.MaxComputeWorkGroupCount[i];
+ for (unsigned i = 0; i < ARRAY_SIZE(this->Const.MaxComputeWorkGroupSize); i++)
+ this->Const.MaxComputeWorkGroupSize[i] = ctx->Const.MaxComputeWorkGroupSize[i];
+
+ this->Const.MaxComputeTextureImageUnits = ctx->Const.Program[MESA_SHADER_COMPUTE].MaxTextureImageUnits;
+ this->Const.MaxComputeUniformComponents = ctx->Const.Program[MESA_SHADER_COMPUTE].MaxUniformComponents;
+
+ this->Const.MaxImageUnits = ctx->Const.MaxImageUnits;
+ this->Const.MaxCombinedShaderOutputResources = ctx->Const.MaxCombinedShaderOutputResources;
+ this->Const.MaxImageSamples = ctx->Const.MaxImageSamples;
+ this->Const.MaxVertexImageUniforms = ctx->Const.Program[MESA_SHADER_VERTEX].MaxImageUniforms;
+ this->Const.MaxTessControlImageUniforms = ctx->Const.Program[MESA_SHADER_TESS_CTRL].MaxImageUniforms;
+ this->Const.MaxTessEvaluationImageUniforms = ctx->Const.Program[MESA_SHADER_TESS_EVAL].MaxImageUniforms;
+ this->Const.MaxGeometryImageUniforms = ctx->Const.Program[MESA_SHADER_GEOMETRY].MaxImageUniforms;
+ this->Const.MaxFragmentImageUniforms = ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxImageUniforms;
+ this->Const.MaxComputeImageUniforms = ctx->Const.Program[MESA_SHADER_COMPUTE].MaxImageUniforms;
+ this->Const.MaxCombinedImageUniforms = ctx->Const.MaxCombinedImageUniforms;
+
+ /* ARB_viewport_array */
+ this->Const.MaxViewports = ctx->Const.MaxViewports;
+
+ /* tessellation shader constants */
+ this->Const.MaxPatchVertices = ctx->Const.MaxPatchVertices;
+ this->Const.MaxTessGenLevel = ctx->Const.MaxTessGenLevel;
+ this->Const.MaxTessControlInputComponents = ctx->Const.Program[MESA_SHADER_TESS_CTRL].MaxInputComponents;
+ this->Const.MaxTessControlOutputComponents = ctx->Const.Program[MESA_SHADER_TESS_CTRL].MaxOutputComponents;
+ this->Const.MaxTessControlTextureImageUnits = ctx->Const.Program[MESA_SHADER_TESS_CTRL].MaxTextureImageUnits;
+ this->Const.MaxTessEvaluationInputComponents = ctx->Const.Program[MESA_SHADER_TESS_EVAL].MaxInputComponents;
+ this->Const.MaxTessEvaluationOutputComponents = ctx->Const.Program[MESA_SHADER_TESS_EVAL].MaxOutputComponents;
+ this->Const.MaxTessEvaluationTextureImageUnits = ctx->Const.Program[MESA_SHADER_TESS_EVAL].MaxTextureImageUnits;
+ this->Const.MaxTessPatchComponents = ctx->Const.MaxTessPatchComponents;
+ this->Const.MaxTessControlTotalOutputComponents = ctx->Const.MaxTessControlTotalOutputComponents;
+ this->Const.MaxTessControlUniformComponents = ctx->Const.Program[MESA_SHADER_TESS_CTRL].MaxUniformComponents;
+ this->Const.MaxTessEvaluationUniformComponents = ctx->Const.Program[MESA_SHADER_TESS_EVAL].MaxUniformComponents;
+
+ /* GL 4.5 / OES_sample_variables */
+ this->Const.MaxSamples = ctx->Const.MaxSamples;
+
+ this->current_function = NULL;
+ this->toplevel_ir = NULL;
+ this->found_return = false;
+ this->found_begin_interlock = false;
+ this->found_end_interlock = false;
+ this->all_invariant = false;
+ this->user_structures = NULL;
+ this->num_user_structures = 0;
+ this->num_subroutines = 0;
+ this->subroutines = NULL;
+ this->num_subroutine_types = 0;
+ this->subroutine_types = NULL;
+
+ /* supported_versions should be large enough to support the known desktop
+ * GLSL versions plus 4 GLES versions (ES 1.00, ES 3.00, ES 3.10, ES 3.20)
+ */
+ STATIC_ASSERT((ARRAY_SIZE(known_desktop_glsl_versions) + 4) ==
+ ARRAY_SIZE(this->supported_versions));
+
+ /* Populate the list of supported GLSL versions */
+ /* FINISHME: Once the OpenGL 3.0 'forward compatible' context or
+ * the OpenGL 3.2 Core context is supported, this logic will need
+ * change. Older versions of GLSL are no longer supported
+ * outside the compatibility contexts of 3.x.
+ */
+ this->num_supported_versions = 0;
+ if (_mesa_is_desktop_gl(ctx)) {
+ for (unsigned i = 0; i < ARRAY_SIZE(known_desktop_glsl_versions); i++) {
+ if (known_desktop_glsl_versions[i] <= ctx->Const.GLSLVersion) {
+ this->supported_versions[this->num_supported_versions].ver
+ = known_desktop_glsl_versions[i];
+ this->supported_versions[this->num_supported_versions].gl_ver
+ = known_desktop_gl_versions[i];
+ this->supported_versions[this->num_supported_versions].es = false;
+ this->num_supported_versions++;
+ }
+ }
+ }
+ if (ctx->API == API_OPENGLES2 || ctx->Extensions.ARB_ES2_compatibility) {
+ this->supported_versions[this->num_supported_versions].ver = 100;
+ this->supported_versions[this->num_supported_versions].gl_ver = 20;
+ this->supported_versions[this->num_supported_versions].es = true;
+ this->num_supported_versions++;
+ }
+ if (_mesa_is_gles3(ctx) || ctx->Extensions.ARB_ES3_compatibility) {
+ this->supported_versions[this->num_supported_versions].ver = 300;
+ this->supported_versions[this->num_supported_versions].gl_ver = 30;
+ this->supported_versions[this->num_supported_versions].es = true;
+ this->num_supported_versions++;
+ }
+ if (_mesa_is_gles31(ctx) || ctx->Extensions.ARB_ES3_1_compatibility) {
+ this->supported_versions[this->num_supported_versions].ver = 310;
+ this->supported_versions[this->num_supported_versions].gl_ver = 31;
+ this->supported_versions[this->num_supported_versions].es = true;
+ this->num_supported_versions++;
+ }
+ if ((ctx->API == API_OPENGLES2 && ctx->Version >= 32) ||
+ ctx->Extensions.ARB_ES3_2_compatibility) {
+ this->supported_versions[this->num_supported_versions].ver = 320;
+ this->supported_versions[this->num_supported_versions].gl_ver = 32;
+ this->supported_versions[this->num_supported_versions].es = true;
+ this->num_supported_versions++;
+ }
+
+ /* Create a string for use in error messages to tell the user which GLSL
+ * versions are supported.
+ */
+ char *supported = ralloc_strdup(this, "");
+ for (unsigned i = 0; i < this->num_supported_versions; i++) {
+ unsigned ver = this->supported_versions[i].ver;
+ const char *const prefix = (i == 0)
+ ? ""
+ : ((i == this->num_supported_versions - 1) ? ", and " : ", ");
+ const char *const suffix = (this->supported_versions[i].es) ? " ES" : "";
+
+ ralloc_asprintf_append(& supported, "%s%u.%02u%s",
+ prefix,
+ ver / 100, ver % 100,
+ suffix);
+ }
+
+ this->supported_version_string = supported;
+
+ if (ctx->Const.ForceGLSLExtensionsWarn)
+ _mesa_glsl_process_extension("all", NULL, "warn", NULL, this);
+
+ this->default_uniform_qualifier = new(this) ast_type_qualifier();
+ this->default_uniform_qualifier->flags.q.shared = 1;
+ this->default_uniform_qualifier->flags.q.column_major = 1;
+
+ this->default_shader_storage_qualifier = new(this) ast_type_qualifier();
+ this->default_shader_storage_qualifier->flags.q.shared = 1;
+ this->default_shader_storage_qualifier->flags.q.column_major = 1;
+
+ this->fs_uses_gl_fragcoord = false;
+ this->fs_redeclares_gl_fragcoord = false;
+ this->fs_origin_upper_left = false;
+ this->fs_pixel_center_integer = false;
+ this->fs_redeclares_gl_fragcoord_with_no_layout_qualifiers = false;
+
+ this->gs_input_prim_type_specified = false;
+ this->tcs_output_vertices_specified = false;
+ this->gs_input_size = 0;
+ this->in_qualifier = new(this) ast_type_qualifier();
+ this->out_qualifier = new(this) ast_type_qualifier();
+ this->fs_early_fragment_tests = false;
+ this->fs_inner_coverage = false;
+ this->fs_post_depth_coverage = false;
+ this->fs_pixel_interlock_ordered = false;
+ this->fs_pixel_interlock_unordered = false;
+ this->fs_sample_interlock_ordered = false;
+ this->fs_sample_interlock_unordered = false;
+ this->fs_blend_support = 0;
+ memset(this->atomic_counter_offsets, 0,
+ sizeof(this->atomic_counter_offsets));
+ this->allow_extension_directive_midshader =
+ ctx->Const.AllowGLSLExtensionDirectiveMidShader;
+ this->allow_builtin_variable_redeclaration =
+ ctx->Const.AllowGLSLBuiltinVariableRedeclaration;
+ this->allow_layout_qualifier_on_function_parameter =
+ ctx->Const.AllowLayoutQualifiersOnFunctionParameters;
+
+ this->cs_input_local_size_variable_specified = false;
+
+ /* ARB_bindless_texture */
+ this->bindless_sampler_specified = false;
+ this->bindless_image_specified = false;
+ this->bound_sampler_specified = false;
+ this->bound_image_specified = false;
+}
+
+/**
+ * Determine whether the current GLSL version is sufficiently high to support
+ * a certain feature, and generate an error message if it isn't.
+ *
+ * \param required_glsl_version and \c required_glsl_es_version are
+ * interpreted as they are in _mesa_glsl_parse_state::is_version().
+ *
+ * \param locp is the parser location where the error should be reported.
+ *
+ * \param fmt (and additional arguments) constitute a printf-style error
+ * message to report if the version check fails. Information about the
+ * current and required GLSL versions will be appended. So, for example, if
+ * the GLSL version being compiled is 1.20, and check_version(130, 300, locp,
+ * "foo unsupported") is called, the error message will be "foo unsupported in
+ * GLSL 1.20 (GLSL 1.30 or GLSL 3.00 ES required)".
+ */
+bool
+_mesa_glsl_parse_state::check_version(unsigned required_glsl_version,
+ unsigned required_glsl_es_version,
+ YYLTYPE *locp, const char *fmt, ...)
+{
+ if (this->is_version(required_glsl_version, required_glsl_es_version))
+ return true;
+
+ va_list args;
+ va_start(args, fmt);
+ char *problem = ralloc_vasprintf(this, fmt, args);
+ va_end(args);
+ const char *glsl_version_string
+ = glsl_compute_version_string(this, false, required_glsl_version);
+ const char *glsl_es_version_string
+ = glsl_compute_version_string(this, true, required_glsl_es_version);
+ const char *requirement_string = "";
+ if (required_glsl_version && required_glsl_es_version) {
+ requirement_string = ralloc_asprintf(this, " (%s or %s required)",
+ glsl_version_string,
+ glsl_es_version_string);
+ } else if (required_glsl_version) {
+ requirement_string = ralloc_asprintf(this, " (%s required)",
+ glsl_version_string);
+ } else if (required_glsl_es_version) {
+ requirement_string = ralloc_asprintf(this, " (%s required)",
+ glsl_es_version_string);
+ }
+ _mesa_glsl_error(locp, this, "%s in %s%s",
+ problem, this->get_version_string(),
+ requirement_string);
+
+ return false;
+}
+
+/**
+ * Process a GLSL #version directive.
+ *
+ * \param version is the integer that follows the #version token.
+ *
+ * \param ident is a string identifier that follows the integer, if any is
+ * present. Otherwise NULL.
+ */
+void
+_mesa_glsl_parse_state::process_version_directive(YYLTYPE *locp, int version,
+ const char *ident)
+{
+ bool es_token_present = false;
+ bool compat_token_present = false;
+ if (ident) {
+ if (strcmp(ident, "es") == 0) {
+ es_token_present = true;
+ } else if (version >= 150) {
+ if (strcmp(ident, "core") == 0) {
+ /* Accept the token. There's no need to record that this is
+ * a core profile shader since that's the only profile we support.
+ */
+ } else if (strcmp(ident, "compatibility") == 0) {
+ compat_token_present = true;
+
+ if (this->ctx->API != API_OPENGL_COMPAT) {
+ _mesa_glsl_error(locp, this,
+ "the compatibility profile is not supported");
+ }
+ } else {
+ _mesa_glsl_error(locp, this,
+ "\"%s\" is not a valid shading language profile; "
+ "if present, it must be \"core\"", ident);
+ }
+ } else {
+ _mesa_glsl_error(locp, this,
+ "illegal text following version number");
+ }
+ }
+
+ this->es_shader = es_token_present;
+ if (version == 100) {
+ if (es_token_present) {
+ _mesa_glsl_error(locp, this,
+ "GLSL 1.00 ES should be selected using "
+ "`#version 100'");
+ } else {
+ this->es_shader = true;
+ }
+ }
+
+ if (this->es_shader) {
+ this->ARB_texture_rectangle_enable = false;
+ }
+
+ if (this->forced_language_version)
+ this->language_version = this->forced_language_version;
+ else
+ this->language_version = version;
+ this->had_version_string = true;
+
+ this->compat_shader = compat_token_present ||
+ (this->ctx->API == API_OPENGL_COMPAT &&
+ this->language_version == 140) ||
+ (!this->es_shader && this->language_version < 140);
+
+ bool supported = false;
+ for (unsigned i = 0; i < this->num_supported_versions; i++) {
+ if (this->supported_versions[i].ver == this->language_version
+ && this->supported_versions[i].es == this->es_shader) {
+ this->gl_version = this->supported_versions[i].gl_ver;
+ supported = true;
+ break;
+ }
+ }
+
+ if (!supported) {
+ _mesa_glsl_error(locp, this, "%s is not supported. "
+ "Supported versions are: %s",
+ this->get_version_string(),
+ this->supported_version_string);
+
+ /* On exit, the language_version must be set to a valid value.
+ * Later calls to _mesa_glsl_initialize_types will misbehave if
+ * the version is invalid.
+ */
+ switch (this->ctx->API) {
+ case API_OPENGL_COMPAT:
+ case API_OPENGL_CORE:
+ this->language_version = this->ctx->Const.GLSLVersion;
+ break;
+
+ case API_OPENGLES:
+ assert(!"Should not get here.");
+ /* FALLTHROUGH */
+
+ case API_OPENGLES2:
+ this->language_version = 100;
+ break;
+ }
+ }
+}
+
+
+/* This helper function will append the given message to the shader's
+ info log and report it via GL_ARB_debug_output. Per that extension,
+ 'type' is one of the enum values classifying the message, and
+ 'id' is the implementation-defined ID of the given message. */
+static void
+_mesa_glsl_msg(const YYLTYPE *locp, _mesa_glsl_parse_state *state,
+ GLenum type, const char *fmt, va_list ap)
+{
+ bool error = (type == MESA_DEBUG_TYPE_ERROR);
+ GLuint msg_id = 0;
+
+ assert(state->info_log != NULL);
+
+ /* Get the offset that the new message will be written to. */
+ int msg_offset = strlen(state->info_log);
+
+ if (locp->path) {
+ ralloc_asprintf_append(&state->info_log, "\"%s\"", locp->path);
+ } else {
+ ralloc_asprintf_append(&state->info_log, "%u", locp->source);
+ }
+ ralloc_asprintf_append(&state->info_log, ":%u(%u): %s: ",
+ locp->first_line, locp->first_column,
+ error ? "error" : "warning");
+
+ ralloc_vasprintf_append(&state->info_log, fmt, ap);
+
+ const char *const msg = &state->info_log[msg_offset];
+ struct gl_context *ctx = state->ctx;
+
+ /* Report the error via GL_ARB_debug_output. */
+ _mesa_shader_debug(ctx, type, &msg_id, msg);
+
+ ralloc_strcat(&state->info_log, "\n");
+}
+
+void
+_mesa_glsl_error(YYLTYPE *locp, _mesa_glsl_parse_state *state,
+ const char *fmt, ...)
+{
+ va_list ap;
+
+ state->error = true;
+
+ va_start(ap, fmt);
+ _mesa_glsl_msg(locp, state, MESA_DEBUG_TYPE_ERROR, fmt, ap);
+ va_end(ap);
+}
+
+
+void
+_mesa_glsl_warning(const YYLTYPE *locp, _mesa_glsl_parse_state *state,
+ const char *fmt, ...)
+{
+ if (state->warnings_enabled) {
+ va_list ap;
+
+ va_start(ap, fmt);
+ _mesa_glsl_msg(locp, state, MESA_DEBUG_TYPE_OTHER, fmt, ap);
+ va_end(ap);
+ }
+}
+
+
+/**
+ * Enum representing the possible behaviors that can be specified in
+ * an #extension directive.
+ */
+enum ext_behavior {
+ extension_disable,
+ extension_enable,
+ extension_require,
+ extension_warn
+};
+
+/**
+ * Element type for _mesa_glsl_supported_extensions
+ */
+struct _mesa_glsl_extension {
+ /**
+ * Name of the extension when referred to in a GLSL extension
+ * statement
+ */
+ const char *name;
+
+ /**
+ * Whether this extension is a part of AEP
+ */
+ bool aep;
+
+ /**
+ * Predicate that checks whether the relevant extension is available for
+ * this context.
+ */
+ bool (*available_pred)(const struct gl_context *,
+ gl_api api, uint8_t version);
+
+ /**
+ * Flag in the _mesa_glsl_parse_state struct that should be set
+ * when this extension is enabled.
+ *
+ * See note in _mesa_glsl_extension::supported_flag about "pointer
+ * to member" types.
+ */
+ bool _mesa_glsl_parse_state::* enable_flag;
+
+ /**
+ * Flag in the _mesa_glsl_parse_state struct that should be set
+ * when the shader requests "warn" behavior for this extension.
+ *
+ * See note in _mesa_glsl_extension::supported_flag about "pointer
+ * to member" types.
+ */
+ bool _mesa_glsl_parse_state::* warn_flag;
+
+
+ bool compatible_with_state(const _mesa_glsl_parse_state *state,
+ gl_api api, uint8_t gl_version) const;
+ void set_flags(_mesa_glsl_parse_state *state, ext_behavior behavior) const;
+};
+
+/** Checks if the context supports a user-facing extension */
+#define EXT(name_str, driver_cap, ...) \
+static UNUSED bool \
+has_##name_str(const struct gl_context *ctx, gl_api api, uint8_t version) \
+{ \
+ return ctx->Extensions.driver_cap && (version >= \
+ _mesa_extension_table[MESA_EXTENSION_##name_str].version[api]); \
+}
+#include "main/extensions_table.h"
+#undef EXT
+
+#define EXT(NAME) \
+ { "GL_" #NAME, false, has_##NAME, \
+ &_mesa_glsl_parse_state::NAME##_enable, \
+ &_mesa_glsl_parse_state::NAME##_warn }
+
+#define EXT_AEP(NAME) \
+ { "GL_" #NAME, true, has_##NAME, \
+ &_mesa_glsl_parse_state::NAME##_enable, \
+ &_mesa_glsl_parse_state::NAME##_warn }
+
+/**
+ * Table of extensions that can be enabled/disabled within a shader,
+ * and the conditions under which they are supported.
+ */
+static const _mesa_glsl_extension _mesa_glsl_supported_extensions[] = {
+ /* ARB extensions go here, sorted alphabetically.
+ */
+ EXT(ARB_ES3_1_compatibility),
+ EXT(ARB_ES3_2_compatibility),
+ EXT(ARB_arrays_of_arrays),
+ EXT(ARB_bindless_texture),
+ EXT(ARB_compatibility),
+ EXT(ARB_compute_shader),
+ EXT(ARB_compute_variable_group_size),
+ EXT(ARB_conservative_depth),
+ EXT(ARB_cull_distance),
+ EXT(ARB_derivative_control),
+ EXT(ARB_draw_buffers),
+ EXT(ARB_draw_instanced),
+ EXT(ARB_enhanced_layouts),
+ EXT(ARB_explicit_attrib_location),
+ EXT(ARB_explicit_uniform_location),
+ EXT(ARB_fragment_coord_conventions),
+ EXT(ARB_fragment_layer_viewport),
+ EXT(ARB_fragment_shader_interlock),
+ EXT(ARB_gpu_shader5),
+ EXT(ARB_gpu_shader_fp64),
+ EXT(ARB_gpu_shader_int64),
+ EXT(ARB_post_depth_coverage),
+ EXT(ARB_sample_shading),
+ EXT(ARB_separate_shader_objects),
+ EXT(ARB_shader_atomic_counter_ops),
+ EXT(ARB_shader_atomic_counters),
+ EXT(ARB_shader_ballot),
+ EXT(ARB_shader_bit_encoding),
+ EXT(ARB_shader_clock),
+ EXT(ARB_shader_draw_parameters),
+ EXT(ARB_shader_group_vote),
+ EXT(ARB_shader_image_load_store),
+ EXT(ARB_shader_image_size),
+ EXT(ARB_shader_precision),
+ EXT(ARB_shader_stencil_export),
+ EXT(ARB_shader_storage_buffer_object),
+ EXT(ARB_shader_subroutine),
+ EXT(ARB_shader_texture_image_samples),
+ EXT(ARB_shader_texture_lod),
+ EXT(ARB_shader_viewport_layer_array),
+ EXT(ARB_shading_language_420pack),
+ EXT(ARB_shading_language_include),
+ EXT(ARB_shading_language_packing),
+ EXT(ARB_tessellation_shader),
+ EXT(ARB_texture_cube_map_array),
+ EXT(ARB_texture_gather),
+ EXT(ARB_texture_multisample),
+ EXT(ARB_texture_query_levels),
+ EXT(ARB_texture_query_lod),
+ EXT(ARB_texture_rectangle),
+ EXT(ARB_uniform_buffer_object),
+ EXT(ARB_vertex_attrib_64bit),
+ EXT(ARB_viewport_array),
+
+ /* KHR extensions go here, sorted alphabetically.
+ */
+ EXT_AEP(KHR_blend_equation_advanced),
+
+ /* OES extensions go here, sorted alphabetically.
+ */
+ EXT(OES_EGL_image_external),
+ EXT(OES_EGL_image_external_essl3),
+ EXT(OES_geometry_point_size),
+ EXT(OES_geometry_shader),
+ EXT(OES_gpu_shader5),
+ EXT(OES_primitive_bounding_box),
+ EXT_AEP(OES_sample_variables),
+ EXT_AEP(OES_shader_image_atomic),
+ EXT(OES_shader_io_blocks),
+ EXT_AEP(OES_shader_multisample_interpolation),
+ EXT(OES_standard_derivatives),
+ EXT(OES_tessellation_point_size),
+ EXT(OES_tessellation_shader),
+ EXT(OES_texture_3D),
+ EXT(OES_texture_buffer),
+ EXT(OES_texture_cube_map_array),
+ EXT_AEP(OES_texture_storage_multisample_2d_array),
+ EXT(OES_viewport_array),
+
+ /* All other extensions go here, sorted alphabetically.
+ */
+ EXT(AMD_conservative_depth),
+ EXT(AMD_gpu_shader_int64),
+ EXT(AMD_shader_stencil_export),
+ EXT(AMD_shader_trinary_minmax),
+ EXT(AMD_texture_texture4),
+ EXT(AMD_vertex_shader_layer),
+ EXT(AMD_vertex_shader_viewport_index),
+ EXT(ANDROID_extension_pack_es31a),
+ EXT(EXT_blend_func_extended),
+ EXT(EXT_demote_to_helper_invocation),
+ EXT(EXT_frag_depth),
+ EXT(EXT_draw_buffers),
+ EXT(EXT_draw_instanced),
+ EXT(EXT_clip_cull_distance),
+ EXT(EXT_geometry_point_size),
+ EXT_AEP(EXT_geometry_shader),
+ EXT(EXT_gpu_shader4),
+ EXT_AEP(EXT_gpu_shader5),
+ EXT_AEP(EXT_primitive_bounding_box),
+ EXT(EXT_separate_shader_objects),
+ EXT(EXT_shader_framebuffer_fetch),
+ EXT(EXT_shader_framebuffer_fetch_non_coherent),
+ EXT(EXT_shader_image_load_formatted),
+ EXT(EXT_shader_image_load_store),
+ EXT(EXT_shader_implicit_conversions),
+ EXT(EXT_shader_integer_mix),
+ EXT_AEP(EXT_shader_io_blocks),
+ EXT(EXT_shader_samples_identical),
+ EXT(EXT_tessellation_point_size),
+ EXT_AEP(EXT_tessellation_shader),
+ EXT(EXT_texture_array),
+ EXT_AEP(EXT_texture_buffer),
+ EXT_AEP(EXT_texture_cube_map_array),
+ EXT(EXT_texture_query_lod),
+ EXT(EXT_texture_shadow_lod),
+ EXT(INTEL_conservative_rasterization),
+ EXT(INTEL_shader_atomic_float_minmax),
+ EXT(INTEL_shader_integer_functions2),
+ EXT(MESA_shader_integer_functions),
+ EXT(NV_compute_shader_derivatives),
+ EXT(NV_fragment_shader_interlock),
+ EXT(NV_image_formats),
+ EXT(NV_shader_atomic_float),
+ EXT(NV_viewport_array2),
+};
+
+#undef EXT
+
+
+/**
+ * Determine whether a given extension is compatible with the target,
+ * API, and extension information in the current parser state.
+ */
+bool _mesa_glsl_extension::compatible_with_state(
+ const _mesa_glsl_parse_state *state, gl_api api, uint8_t gl_version) const
+{
+ return this->available_pred(state->ctx, api, gl_version);
+}
+
+/**
+ * Set the appropriate flags in the parser state to establish the
+ * given behavior for this extension.
+ */
+void _mesa_glsl_extension::set_flags(_mesa_glsl_parse_state *state,
+ ext_behavior behavior) const
+{
+ /* Note: the ->* operator indexes into state by the
+ * offsets this->enable_flag and this->warn_flag. See
+ * _mesa_glsl_extension::supported_flag for more info.
+ */
+ state->*(this->enable_flag) = (behavior != extension_disable);
+ state->*(this->warn_flag) = (behavior == extension_warn);
+}
+
+/**
+ * Find an extension by name in _mesa_glsl_supported_extensions. If
+ * the name is not found, return NULL.
+ */
+static const _mesa_glsl_extension *find_extension(const char *name)
+{
+ for (unsigned i = 0; i < ARRAY_SIZE(_mesa_glsl_supported_extensions); ++i) {
+ if (strcmp(name, _mesa_glsl_supported_extensions[i].name) == 0) {
+ return &_mesa_glsl_supported_extensions[i];
+ }
+ }
+ return NULL;
+}
+
+bool
+_mesa_glsl_process_extension(const char *name, YYLTYPE *name_locp,
+ const char *behavior_string, YYLTYPE *behavior_locp,
+ _mesa_glsl_parse_state *state)
+{
+ uint8_t gl_version = state->ctx->Extensions.Version;
+ gl_api api = state->ctx->API;
+ ext_behavior behavior;
+ if (strcmp(behavior_string, "warn") == 0) {
+ behavior = extension_warn;
+ } else if (strcmp(behavior_string, "require") == 0) {
+ behavior = extension_require;
+ } else if (strcmp(behavior_string, "enable") == 0) {
+ behavior = extension_enable;
+ } else if (strcmp(behavior_string, "disable") == 0) {
+ behavior = extension_disable;
+ } else {
+ _mesa_glsl_error(behavior_locp, state,
+ "unknown extension behavior `%s'",
+ behavior_string);
+ return false;
+ }
+
+ /* If we're in a desktop context but with an ES shader, use an ES API enum
+ * to verify extension availability.
+ */
+ if (state->es_shader && api != API_OPENGLES2)
+ api = API_OPENGLES2;
+ /* Use the language-version derived GL version to extension checks, unless
+ * we're using meta, which sets the version to the max.
+ */
+ if (gl_version != 0xff)
+ gl_version = state->gl_version;
+
+ if (strcmp(name, "all") == 0) {
+ if ((behavior == extension_enable) || (behavior == extension_require)) {
+ _mesa_glsl_error(name_locp, state, "cannot %s all extensions",
+ (behavior == extension_enable)
+ ? "enable" : "require");
+ return false;
+ } else {
+ for (unsigned i = 0;
+ i < ARRAY_SIZE(_mesa_glsl_supported_extensions); ++i) {
+ const _mesa_glsl_extension *extension
+ = &_mesa_glsl_supported_extensions[i];
+ if (extension->compatible_with_state(state, api, gl_version)) {
+ _mesa_glsl_supported_extensions[i].set_flags(state, behavior);
+ }
+ }
+ }
+ } else {
+ const _mesa_glsl_extension *extension = find_extension(name);
+ if (extension && extension->compatible_with_state(state, api, gl_version)) {
+ extension->set_flags(state, behavior);
+ if (extension->available_pred == has_ANDROID_extension_pack_es31a) {
+ for (unsigned i = 0;
+ i < ARRAY_SIZE(_mesa_glsl_supported_extensions); ++i) {
+ const _mesa_glsl_extension *extension =
+ &_mesa_glsl_supported_extensions[i];
+
+ if (!extension->aep)
+ continue;
+ /* AEP should not be enabled if all of the sub-extensions can't
+ * also be enabled. This is not the proper layer to do such
+ * error-checking though.
+ */
+ assert(extension->compatible_with_state(state, api, gl_version));
+ extension->set_flags(state, behavior);
+ }
+ }
+ } else {
+ static const char fmt[] = "extension `%s' unsupported in %s shader";
+
+ if (behavior == extension_require) {
+ _mesa_glsl_error(name_locp, state, fmt,
+ name, _mesa_shader_stage_to_string(state->stage));
+ return false;
+ } else {
+ _mesa_glsl_warning(name_locp, state, fmt,
+ name, _mesa_shader_stage_to_string(state->stage));
+ }
+ }
+ }
+
+ return true;
+}
+
+
+/**
+ * Recurses through <type> and <expr> if <expr> is an aggregate initializer
+ * and sets <expr>'s <constructor_type> field to <type>. Gives later functions
+ * (process_array_constructor, et al) sufficient information to do type
+ * checking.
+ *
+ * Operates on assignments involving an aggregate initializer. E.g.,
+ *
+ * vec4 pos = {1.0, -1.0, 0.0, 1.0};
+ *
+ * or more ridiculously,
+ *
+ * struct S {
+ * vec4 v[2];
+ * };
+ *
+ * struct {
+ * S a[2], b;
+ * int c;
+ * } aggregate = {
+ * {
+ * {
+ * {
+ * {1.0, 2.0, 3.0, 4.0}, // a[0].v[0]
+ * {5.0, 6.0, 7.0, 8.0} // a[0].v[1]
+ * } // a[0].v
+ * }, // a[0]
+ * {
+ * {
+ * {1.0, 2.0, 3.0, 4.0}, // a[1].v[0]
+ * {5.0, 6.0, 7.0, 8.0} // a[1].v[1]
+ * } // a[1].v
+ * } // a[1]
+ * }, // a
+ * {
+ * {
+ * {1.0, 2.0, 3.0, 4.0}, // b.v[0]
+ * {5.0, 6.0, 7.0, 8.0} // b.v[1]
+ * } // b.v
+ * }, // b
+ * 4 // c
+ * };
+ *
+ * This pass is necessary because the right-hand side of <type> e = { ... }
+ * doesn't contain sufficient information to determine if the types match.
+ */
+void
+_mesa_ast_set_aggregate_type(const glsl_type *type,
+ ast_expression *expr)
+{
+ ast_aggregate_initializer *ai = (ast_aggregate_initializer *)expr;
+ ai->constructor_type = type;
+
+ /* If the aggregate is an array, recursively set its elements' types. */
+ if (type->is_array()) {
+ /* Each array element has the type type->fields.array.
+ *
+ * E.g., if <type> if struct S[2] we want to set each element's type to
+ * struct S.
+ */
+ for (exec_node *expr_node = ai->expressions.get_head_raw();
+ !expr_node->is_tail_sentinel();
+ expr_node = expr_node->next) {
+ ast_expression *expr = exec_node_data(ast_expression, expr_node,
+ link);
+
+ if (expr->oper == ast_aggregate)
+ _mesa_ast_set_aggregate_type(type->fields.array, expr);
+ }
+
+ /* If the aggregate is a struct, recursively set its fields' types. */
+ } else if (type->is_struct()) {
+ exec_node *expr_node = ai->expressions.get_head_raw();
+
+ /* Iterate through the struct's fields. */
+ for (unsigned i = 0; !expr_node->is_tail_sentinel() && i < type->length;
+ i++, expr_node = expr_node->next) {
+ ast_expression *expr = exec_node_data(ast_expression, expr_node,
+ link);
+
+ if (expr->oper == ast_aggregate) {
+ _mesa_ast_set_aggregate_type(type->fields.structure[i].type, expr);
+ }
+ }
+ /* If the aggregate is a matrix, set its columns' types. */
+ } else if (type->is_matrix()) {
+ for (exec_node *expr_node = ai->expressions.get_head_raw();
+ !expr_node->is_tail_sentinel();
+ expr_node = expr_node->next) {
+ ast_expression *expr = exec_node_data(ast_expression, expr_node,
+ link);
+
+ if (expr->oper == ast_aggregate)
+ _mesa_ast_set_aggregate_type(type->column_type(), expr);
+ }
+ }
+}
+
+void
+_mesa_ast_process_interface_block(YYLTYPE *locp,
+ _mesa_glsl_parse_state *state,
+ ast_interface_block *const block,
+ const struct ast_type_qualifier &q)
+{
+ if (q.flags.q.buffer) {
+ if (!state->has_shader_storage_buffer_objects()) {
+ _mesa_glsl_error(locp, state,
+ "#version 430 / GL_ARB_shader_storage_buffer_object "
+ "required for defining shader storage blocks");
+ } else if (state->ARB_shader_storage_buffer_object_warn) {
+ _mesa_glsl_warning(locp, state,
+ "#version 430 / GL_ARB_shader_storage_buffer_object "
+ "required for defining shader storage blocks");
+ }
+ } else if (q.flags.q.uniform) {
+ if (!state->has_uniform_buffer_objects()) {
+ _mesa_glsl_error(locp, state,
+ "#version 140 / GL_ARB_uniform_buffer_object "
+ "required for defining uniform blocks");
+ } else if (state->ARB_uniform_buffer_object_warn) {
+ _mesa_glsl_warning(locp, state,
+ "#version 140 / GL_ARB_uniform_buffer_object "
+ "required for defining uniform blocks");
+ }
+ } else {
+ if (!state->has_shader_io_blocks()) {
+ if (state->es_shader) {
+ _mesa_glsl_error(locp, state,
+ "GL_OES_shader_io_blocks or #version 320 "
+ "required for using interface blocks");
+ } else {
+ _mesa_glsl_error(locp, state,
+ "#version 150 required for using "
+ "interface blocks");
+ }
+ }
+ }
+
+ /* From the GLSL 1.50.11 spec, section 4.3.7 ("Interface Blocks"):
+ * "It is illegal to have an input block in a vertex shader
+ * or an output block in a fragment shader"
+ */
+ if ((state->stage == MESA_SHADER_VERTEX) && q.flags.q.in) {
+ _mesa_glsl_error(locp, state,
+ "`in' interface block is not allowed for "
+ "a vertex shader");
+ } else if ((state->stage == MESA_SHADER_FRAGMENT) && q.flags.q.out) {
+ _mesa_glsl_error(locp, state,
+ "`out' interface block is not allowed for "
+ "a fragment shader");
+ }
+
+ /* Since block arrays require names, and both features are added in
+ * the same language versions, we don't have to explicitly
+ * version-check both things.
+ */
+ if (block->instance_name != NULL) {
+ state->check_version(150, 300, locp, "interface blocks with "
+ "an instance name are not allowed");
+ }
+
+ ast_type_qualifier::bitset_t interface_type_mask;
+ struct ast_type_qualifier temp_type_qualifier;
+
+ /* Get a bitmask containing only the in/out/uniform/buffer
+ * flags, allowing us to ignore other irrelevant flags like
+ * interpolation qualifiers.
+ */
+ temp_type_qualifier.flags.i = 0;
+ temp_type_qualifier.flags.q.uniform = true;
+ temp_type_qualifier.flags.q.in = true;
+ temp_type_qualifier.flags.q.out = true;
+ temp_type_qualifier.flags.q.buffer = true;
+ temp_type_qualifier.flags.q.patch = true;
+ interface_type_mask = temp_type_qualifier.flags.i;
+
+ /* Get the block's interface qualifier. The interface_qualifier
+ * production rule guarantees that only one bit will be set (and
+ * it will be in/out/uniform).
+ */
+ ast_type_qualifier::bitset_t block_interface_qualifier = q.flags.i;
+
+ block->default_layout.flags.i |= block_interface_qualifier;
+
+ if (state->stage == MESA_SHADER_GEOMETRY &&
+ state->has_explicit_attrib_stream() &&
+ block->default_layout.flags.q.out) {
+ /* Assign global layout's stream value. */
+ block->default_layout.flags.q.stream = 1;
+ block->default_layout.flags.q.explicit_stream = 0;
+ block->default_layout.stream = state->out_qualifier->stream;
+ }
+
+ if (state->has_enhanced_layouts() && block->default_layout.flags.q.out) {
+ /* Assign global layout's xfb_buffer value. */
+ block->default_layout.flags.q.xfb_buffer = 1;
+ block->default_layout.flags.q.explicit_xfb_buffer = 0;
+ block->default_layout.xfb_buffer = state->out_qualifier->xfb_buffer;
+ }
+
+ foreach_list_typed (ast_declarator_list, member, link, &block->declarations) {
+ ast_type_qualifier& qualifier = member->type->qualifier;
+ if ((qualifier.flags.i & interface_type_mask) == 0) {
+ /* GLSLangSpec.1.50.11, 4.3.7 (Interface Blocks):
+ * "If no optional qualifier is used in a member declaration, the
+ * qualifier of the variable is just in, out, or uniform as declared
+ * by interface-qualifier."
+ */
+ qualifier.flags.i |= block_interface_qualifier;
+ } else if ((qualifier.flags.i & interface_type_mask) !=
+ block_interface_qualifier) {
+ /* GLSLangSpec.1.50.11, 4.3.7 (Interface Blocks):
+ * "If optional qualifiers are used, they can include interpolation
+ * and storage qualifiers and they must declare an input, output,
+ * or uniform variable consistent with the interface qualifier of
+ * the block."
+ */
+ _mesa_glsl_error(locp, state,
+ "uniform/in/out qualifier on "
+ "interface block member does not match "
+ "the interface block");
+ }
+
+ if (!(q.flags.q.in || q.flags.q.out) && qualifier.flags.q.invariant)
+ _mesa_glsl_error(locp, state,
+ "invariant qualifiers can be used only "
+ "in interface block members for shader "
+ "inputs or outputs");
+ }
+}
+
+static void
+_mesa_ast_type_qualifier_print(const struct ast_type_qualifier *q)
+{
+ if (q->is_subroutine_decl())
+ printf("subroutine ");
+
+ if (q->subroutine_list) {
+ printf("subroutine (");
+ q->subroutine_list->print();
+ printf(")");
+ }
+
+ if (q->flags.q.constant)
+ printf("const ");
+
+ if (q->flags.q.invariant)
+ printf("invariant ");
+
+ if (q->flags.q.attribute)
+ printf("attribute ");
+
+ if (q->flags.q.varying)
+ printf("varying ");
+
+ if (q->flags.q.in && q->flags.q.out)
+ printf("inout ");
+ else {
+ if (q->flags.q.in)
+ printf("in ");
+
+ if (q->flags.q.out)
+ printf("out ");
+ }
+
+ if (q->flags.q.centroid)
+ printf("centroid ");
+ if (q->flags.q.sample)
+ printf("sample ");
+ if (q->flags.q.patch)
+ printf("patch ");
+ if (q->flags.q.uniform)
+ printf("uniform ");
+ if (q->flags.q.buffer)
+ printf("buffer ");
+ if (q->flags.q.smooth)
+ printf("smooth ");
+ if (q->flags.q.flat)
+ printf("flat ");
+ if (q->flags.q.noperspective)
+ printf("noperspective ");
+}
+
+
+void
+ast_node::print(void) const
+{
+ printf("unhandled node ");
+}
+
+
+ast_node::ast_node(void)
+{
+ this->location.source = 0;
+ this->location.first_line = 0;
+ this->location.first_column = 0;
+ this->location.last_line = 0;
+ this->location.last_column = 0;
+}
+
+
+static void
+ast_opt_array_dimensions_print(const ast_array_specifier *array_specifier)
+{
+ if (array_specifier)
+ array_specifier->print();
+}
+
+
+void
+ast_compound_statement::print(void) const
+{
+ printf("{\n");
+
+ foreach_list_typed(ast_node, ast, link, &this->statements) {
+ ast->print();
+ }
+
+ printf("}\n");
+}
+
+
+ast_compound_statement::ast_compound_statement(int new_scope,
+ ast_node *statements)
+{
+ this->new_scope = new_scope;
+
+ if (statements != NULL) {
+ this->statements.push_degenerate_list_at_head(&statements->link);
+ }
+}
+
+
+void
+ast_expression::print(void) const
+{
+ switch (oper) {
+ case ast_assign:
+ case ast_mul_assign:
+ case ast_div_assign:
+ case ast_mod_assign:
+ case ast_add_assign:
+ case ast_sub_assign:
+ case ast_ls_assign:
+ case ast_rs_assign:
+ case ast_and_assign:
+ case ast_xor_assign:
+ case ast_or_assign:
+ subexpressions[0]->print();
+ printf("%s ", operator_string(oper));
+ subexpressions[1]->print();
+ break;
+
+ case ast_field_selection:
+ subexpressions[0]->print();
+ printf(". %s ", primary_expression.identifier);
+ break;
+
+ case ast_plus:
+ case ast_neg:
+ case ast_bit_not:
+ case ast_logic_not:
+ case ast_pre_inc:
+ case ast_pre_dec:
+ printf("%s ", operator_string(oper));
+ subexpressions[0]->print();
+ break;
+
+ case ast_post_inc:
+ case ast_post_dec:
+ subexpressions[0]->print();
+ printf("%s ", operator_string(oper));
+ break;
+
+ case ast_conditional:
+ subexpressions[0]->print();
+ printf("? ");
+ subexpressions[1]->print();
+ printf(": ");
+ subexpressions[2]->print();
+ break;
+
+ case ast_array_index:
+ subexpressions[0]->print();
+ printf("[ ");
+ subexpressions[1]->print();
+ printf("] ");
+ break;
+
+ case ast_function_call: {
+ subexpressions[0]->print();
+ printf("( ");
+
+ foreach_list_typed (ast_node, ast, link, &this->expressions) {
+ if (&ast->link != this->expressions.get_head())
+ printf(", ");
+
+ ast->print();
+ }
+
+ printf(") ");
+ break;
+ }
+
+ case ast_identifier:
+ printf("%s ", primary_expression.identifier);
+ break;
+
+ case ast_int_constant:
+ printf("%d ", primary_expression.int_constant);
+ break;
+
+ case ast_uint_constant:
+ printf("%u ", primary_expression.uint_constant);
+ break;
+
+ case ast_float_constant:
+ printf("%f ", primary_expression.float_constant);
+ break;
+
+ case ast_double_constant:
+ printf("%f ", primary_expression.double_constant);
+ break;
+
+ case ast_int64_constant:
+ printf("%" PRId64 " ", primary_expression.int64_constant);
+ break;
+
+ case ast_uint64_constant:
+ printf("%" PRIu64 " ", primary_expression.uint64_constant);
+ break;
+
+ case ast_bool_constant:
+ printf("%s ",
+ primary_expression.bool_constant
+ ? "true" : "false");
+ break;
+
+ case ast_sequence: {
+ printf("( ");
+ foreach_list_typed (ast_node, ast, link, & this->expressions) {
+ if (&ast->link != this->expressions.get_head())
+ printf(", ");
+
+ ast->print();
+ }
+ printf(") ");
+ break;
+ }
+
+ case ast_aggregate: {
+ printf("{ ");
+ foreach_list_typed (ast_node, ast, link, & this->expressions) {
+ if (&ast->link != this->expressions.get_head())
+ printf(", ");
+
+ ast->print();
+ }
+ printf("} ");
+ break;
+ }
+
+ default:
+ assert(0);
+ break;
+ }
+}
+
+ast_expression::ast_expression(int oper,
+ ast_expression *ex0,
+ ast_expression *ex1,
+ ast_expression *ex2) :
+ primary_expression()
+{
+ this->oper = ast_operators(oper);
+ this->subexpressions[0] = ex0;
+ this->subexpressions[1] = ex1;
+ this->subexpressions[2] = ex2;
+ this->non_lvalue_description = NULL;
+ this->is_lhs = false;
+}
+
+
+void
+ast_expression_statement::print(void) const
+{
+ if (expression)
+ expression->print();
+
+ printf("; ");
+}
+
+
+ast_expression_statement::ast_expression_statement(ast_expression *ex) :
+ expression(ex)
+{
+ /* empty */
+}
+
+
+void
+ast_function::print(void) const
+{
+ return_type->print();
+ printf(" %s (", identifier);
+
+ foreach_list_typed(ast_node, ast, link, & this->parameters) {
+ ast->print();
+ }
+
+ printf(")");
+}
+
+
+ast_function::ast_function(void)
+ : return_type(NULL), identifier(NULL), is_definition(false),
+ signature(NULL)
+{
+ /* empty */
+}
+
+
+void
+ast_fully_specified_type::print(void) const
+{
+ _mesa_ast_type_qualifier_print(& qualifier);
+ specifier->print();
+}
+
+
+void
+ast_parameter_declarator::print(void) const
+{
+ type->print();
+ if (identifier)
+ printf("%s ", identifier);
+ ast_opt_array_dimensions_print(array_specifier);
+}
+
+
+void
+ast_function_definition::print(void) const
+{
+ prototype->print();
+ body->print();
+}
+
+
+void
+ast_declaration::print(void) const
+{
+ printf("%s ", identifier);
+ ast_opt_array_dimensions_print(array_specifier);
+
+ if (initializer) {
+ printf("= ");
+ initializer->print();
+ }
+}
+
+
+ast_declaration::ast_declaration(const char *identifier,
+ ast_array_specifier *array_specifier,
+ ast_expression *initializer)
+{
+ this->identifier = identifier;
+ this->array_specifier = array_specifier;
+ this->initializer = initializer;
+}
+
+
+void
+ast_declarator_list::print(void) const
+{
+ assert(type || invariant);
+
+ if (type)
+ type->print();
+ else if (invariant)
+ printf("invariant ");
+ else
+ printf("precise ");
+
+ foreach_list_typed (ast_node, ast, link, & this->declarations) {
+ if (&ast->link != this->declarations.get_head())
+ printf(", ");
+
+ ast->print();
+ }
+
+ printf("; ");
+}
+
+
+ast_declarator_list::ast_declarator_list(ast_fully_specified_type *type)
+{
+ this->type = type;
+ this->invariant = false;
+ this->precise = false;
+}
+
+void
+ast_jump_statement::print(void) const
+{
+ switch (mode) {
+ case ast_continue:
+ printf("continue; ");
+ break;
+ case ast_break:
+ printf("break; ");
+ break;
+ case ast_return:
+ printf("return ");
+ if (opt_return_value)
+ opt_return_value->print();
+
+ printf("; ");
+ break;
+ case ast_discard:
+ printf("discard; ");
+ break;
+ }
+}
+
+
+ast_jump_statement::ast_jump_statement(int mode, ast_expression *return_value)
+ : opt_return_value(NULL)
+{
+ this->mode = ast_jump_modes(mode);
+
+ if (mode == ast_return)
+ opt_return_value = return_value;
+}
+
+
+void
+ast_demote_statement::print(void) const
+{
+ printf("demote; ");
+}
+
+
+void
+ast_selection_statement::print(void) const
+{
+ printf("if ( ");
+ condition->print();
+ printf(") ");
+
+ then_statement->print();
+
+ if (else_statement) {
+ printf("else ");
+ else_statement->print();
+ }
+}
+
+
+ast_selection_statement::ast_selection_statement(ast_expression *condition,
+ ast_node *then_statement,
+ ast_node *else_statement)
+{
+ this->condition = condition;
+ this->then_statement = then_statement;
+ this->else_statement = else_statement;
+}
+
+
+void
+ast_switch_statement::print(void) const
+{
+ printf("switch ( ");
+ test_expression->print();
+ printf(") ");
+
+ body->print();
+}
+
+
+ast_switch_statement::ast_switch_statement(ast_expression *test_expression,
+ ast_node *body)
+{
+ this->test_expression = test_expression;
+ this->body = body;
+}
+
+
+void
+ast_switch_body::print(void) const
+{
+ printf("{\n");
+ if (stmts != NULL) {
+ stmts->print();
+ }
+ printf("}\n");
+}
+
+
+ast_switch_body::ast_switch_body(ast_case_statement_list *stmts)
+{
+ this->stmts = stmts;
+}
+
+
+void ast_case_label::print(void) const
+{
+ if (test_value != NULL) {
+ printf("case ");
+ test_value->print();
+ printf(": ");
+ } else {
+ printf("default: ");
+ }
+}
+
+
+ast_case_label::ast_case_label(ast_expression *test_value)
+{
+ this->test_value = test_value;
+}
+
+
+void ast_case_label_list::print(void) const
+{
+ foreach_list_typed(ast_node, ast, link, & this->labels) {
+ ast->print();
+ }
+ printf("\n");
+}
+
+
+ast_case_label_list::ast_case_label_list(void)
+{
+}
+
+
+void ast_case_statement::print(void) const
+{
+ labels->print();
+ foreach_list_typed(ast_node, ast, link, & this->stmts) {
+ ast->print();
+ printf("\n");
+ }
+}
+
+
+ast_case_statement::ast_case_statement(ast_case_label_list *labels)
+{
+ this->labels = labels;
+}
+
+
+void ast_case_statement_list::print(void) const
+{
+ foreach_list_typed(ast_node, ast, link, & this->cases) {
+ ast->print();
+ }
+}
+
+
+ast_case_statement_list::ast_case_statement_list(void)
+{
+}
+
+
+void
+ast_iteration_statement::print(void) const
+{
+ switch (mode) {
+ case ast_for:
+ printf("for( ");
+ if (init_statement)
+ init_statement->print();
+ printf("; ");
+
+ if (condition)
+ condition->print();
+ printf("; ");
+
+ if (rest_expression)
+ rest_expression->print();
+ printf(") ");
+
+ body->print();
+ break;
+
+ case ast_while:
+ printf("while ( ");
+ if (condition)
+ condition->print();
+ printf(") ");
+ body->print();
+ break;
+
+ case ast_do_while:
+ printf("do ");
+ body->print();
+ printf("while ( ");
+ if (condition)
+ condition->print();
+ printf("); ");
+ break;
+ }
+}
+
+
+ast_iteration_statement::ast_iteration_statement(int mode,
+ ast_node *init,
+ ast_node *condition,
+ ast_expression *rest_expression,
+ ast_node *body)
+{
+ this->mode = ast_iteration_modes(mode);
+ this->init_statement = init;
+ this->condition = condition;
+ this->rest_expression = rest_expression;
+ this->body = body;
+}
+
+
+void
+ast_struct_specifier::print(void) const
+{
+ printf("struct %s { ", name);
+ foreach_list_typed(ast_node, ast, link, &this->declarations) {
+ ast->print();
+ }
+ printf("} ");
+}
+
+
+ast_struct_specifier::ast_struct_specifier(const char *identifier,
+ ast_declarator_list *declarator_list)
+ : name(identifier), layout(NULL), declarations(), is_declaration(true),
+ type(NULL)
+{
+ this->declarations.push_degenerate_list_at_head(&declarator_list->link);
+}
+
+void ast_subroutine_list::print(void) const
+{
+ foreach_list_typed (ast_node, ast, link, & this->declarations) {
+ if (&ast->link != this->declarations.get_head())
+ printf(", ");
+ ast->print();
+ }
+}
+
+static void
+set_shader_inout_layout(struct gl_shader *shader,
+ struct _mesa_glsl_parse_state *state)
+{
+ /* Should have been prevented by the parser. */
+ if (shader->Stage != MESA_SHADER_GEOMETRY &&
+ shader->Stage != MESA_SHADER_TESS_EVAL &&
+ shader->Stage != MESA_SHADER_COMPUTE) {
+ assert(!state->in_qualifier->flags.i);
+ }
+
+ if (shader->Stage != MESA_SHADER_COMPUTE) {
+ /* Should have been prevented by the parser. */
+ assert(!state->cs_input_local_size_specified);
+ assert(!state->cs_input_local_size_variable_specified);
+ assert(state->cs_derivative_group == DERIVATIVE_GROUP_NONE);
+ }
+
+ if (shader->Stage != MESA_SHADER_FRAGMENT) {
+ /* Should have been prevented by the parser. */
+ assert(!state->fs_uses_gl_fragcoord);
+ assert(!state->fs_redeclares_gl_fragcoord);
+ assert(!state->fs_pixel_center_integer);
+ assert(!state->fs_origin_upper_left);
+ assert(!state->fs_early_fragment_tests);
+ assert(!state->fs_inner_coverage);
+ assert(!state->fs_post_depth_coverage);
+ assert(!state->fs_pixel_interlock_ordered);
+ assert(!state->fs_pixel_interlock_unordered);
+ assert(!state->fs_sample_interlock_ordered);
+ assert(!state->fs_sample_interlock_unordered);
+ }
+
+ for (unsigned i = 0; i < MAX_FEEDBACK_BUFFERS; i++) {
+ if (state->out_qualifier->out_xfb_stride[i]) {
+ unsigned xfb_stride;
+ if (state->out_qualifier->out_xfb_stride[i]->
+ process_qualifier_constant(state, "xfb_stride", &xfb_stride,
+ true)) {
+ shader->TransformFeedbackBufferStride[i] = xfb_stride;
+ }
+ }
+ }
+
+ switch (shader->Stage) {
+ case MESA_SHADER_TESS_CTRL:
+ shader->info.TessCtrl.VerticesOut = 0;
+ if (state->tcs_output_vertices_specified) {
+ unsigned vertices;
+ if (state->out_qualifier->vertices->
+ process_qualifier_constant(state, "vertices", &vertices,
+ false)) {
+
+ YYLTYPE loc = state->out_qualifier->vertices->get_location();
+ if (vertices > state->Const.MaxPatchVertices) {
+ _mesa_glsl_error(&loc, state, "vertices (%d) exceeds "
+ "GL_MAX_PATCH_VERTICES", vertices);
+ }
+ shader->info.TessCtrl.VerticesOut = vertices;
+ }
+ }
+ break;
+ case MESA_SHADER_TESS_EVAL:
+ shader->info.TessEval.PrimitiveMode = PRIM_UNKNOWN;
+ if (state->in_qualifier->flags.q.prim_type)
+ shader->info.TessEval.PrimitiveMode = state->in_qualifier->prim_type;
+
+ shader->info.TessEval.Spacing = TESS_SPACING_UNSPECIFIED;
+ if (state->in_qualifier->flags.q.vertex_spacing)
+ shader->info.TessEval.Spacing = state->in_qualifier->vertex_spacing;
+
+ shader->info.TessEval.VertexOrder = 0;
+ if (state->in_qualifier->flags.q.ordering)
+ shader->info.TessEval.VertexOrder = state->in_qualifier->ordering;
+
+ shader->info.TessEval.PointMode = -1;
+ if (state->in_qualifier->flags.q.point_mode)
+ shader->info.TessEval.PointMode = state->in_qualifier->point_mode;
+ break;
+ case MESA_SHADER_GEOMETRY:
+ shader->info.Geom.VerticesOut = -1;
+ if (state->out_qualifier->flags.q.max_vertices) {
+ unsigned qual_max_vertices;
+ if (state->out_qualifier->max_vertices->
+ process_qualifier_constant(state, "max_vertices",
+ &qual_max_vertices, true)) {
+
+ if (qual_max_vertices > state->Const.MaxGeometryOutputVertices) {
+ YYLTYPE loc = state->out_qualifier->max_vertices->get_location();
+ _mesa_glsl_error(&loc, state,
+ "maximum output vertices (%d) exceeds "
+ "GL_MAX_GEOMETRY_OUTPUT_VERTICES",
+ qual_max_vertices);
+ }
+ shader->info.Geom.VerticesOut = qual_max_vertices;
+ }
+ }
+
+ if (state->gs_input_prim_type_specified) {
+ shader->info.Geom.InputType = state->in_qualifier->prim_type;
+ } else {
+ shader->info.Geom.InputType = PRIM_UNKNOWN;
+ }
+
+ if (state->out_qualifier->flags.q.prim_type) {
+ shader->info.Geom.OutputType = state->out_qualifier->prim_type;
+ } else {
+ shader->info.Geom.OutputType = PRIM_UNKNOWN;
+ }
+
+ shader->info.Geom.Invocations = 0;
+ if (state->in_qualifier->flags.q.invocations) {
+ unsigned invocations;
+ if (state->in_qualifier->invocations->
+ process_qualifier_constant(state, "invocations",
+ &invocations, false)) {
+
+ YYLTYPE loc = state->in_qualifier->invocations->get_location();
+ if (invocations > state->Const.MaxGeometryShaderInvocations) {
+ _mesa_glsl_error(&loc, state,
+ "invocations (%d) exceeds "
+ "GL_MAX_GEOMETRY_SHADER_INVOCATIONS",
+ invocations);
+ }
+ shader->info.Geom.Invocations = invocations;
+ }
+ }
+ break;
+
+ case MESA_SHADER_COMPUTE:
+ if (state->cs_input_local_size_specified) {
+ for (int i = 0; i < 3; i++)
+ shader->info.Comp.LocalSize[i] = state->cs_input_local_size[i];
+ } else {
+ for (int i = 0; i < 3; i++)
+ shader->info.Comp.LocalSize[i] = 0;
+ }
+
+ shader->info.Comp.LocalSizeVariable =
+ state->cs_input_local_size_variable_specified;
+
+ shader->info.Comp.DerivativeGroup = state->cs_derivative_group;
+
+ if (state->NV_compute_shader_derivatives_enable) {
+ /* We allow multiple cs_input_layout nodes, but do not store them in
+ * a convenient place, so for now live with an empty location error.
+ */
+ YYLTYPE loc = {0};
+ if (shader->info.Comp.DerivativeGroup == DERIVATIVE_GROUP_QUADS) {
+ if (shader->info.Comp.LocalSize[0] % 2 != 0) {
+ _mesa_glsl_error(&loc, state, "derivative_group_quadsNV must be used with a "
+ "local group size whose first dimension "
+ "is a multiple of 2\n");
+ }
+ if (shader->info.Comp.LocalSize[1] % 2 != 0) {
+ _mesa_glsl_error(&loc, state, "derivative_group_quadsNV must be used with a "
+ "local group size whose second dimension "
+ "is a multiple of 2\n");
+ }
+ } else if (shader->info.Comp.DerivativeGroup == DERIVATIVE_GROUP_LINEAR) {
+ if ((shader->info.Comp.LocalSize[0] *
+ shader->info.Comp.LocalSize[1] *
+ shader->info.Comp.LocalSize[2]) % 4 != 0) {
+ _mesa_glsl_error(&loc, state, "derivative_group_linearNV must be used with a "
+ "local group size whose total number of invocations "
+ "is a multiple of 4\n");
+ }
+ }
+ }
+
+ break;
+
+ case MESA_SHADER_FRAGMENT:
+ shader->redeclares_gl_fragcoord = state->fs_redeclares_gl_fragcoord;
+ shader->uses_gl_fragcoord = state->fs_uses_gl_fragcoord;
+ shader->pixel_center_integer = state->fs_pixel_center_integer;
+ shader->origin_upper_left = state->fs_origin_upper_left;
+ shader->ARB_fragment_coord_conventions_enable =
+ state->ARB_fragment_coord_conventions_enable;
+ shader->EarlyFragmentTests = state->fs_early_fragment_tests;
+ shader->InnerCoverage = state->fs_inner_coverage;
+ shader->PostDepthCoverage = state->fs_post_depth_coverage;
+ shader->PixelInterlockOrdered = state->fs_pixel_interlock_ordered;
+ shader->PixelInterlockUnordered = state->fs_pixel_interlock_unordered;
+ shader->SampleInterlockOrdered = state->fs_sample_interlock_ordered;
+ shader->SampleInterlockUnordered = state->fs_sample_interlock_unordered;
+ shader->BlendSupport = state->fs_blend_support;
+ break;
+
+ default:
+ /* Nothing to do. */
+ break;
+ }
+
+ shader->bindless_sampler = state->bindless_sampler_specified;
+ shader->bindless_image = state->bindless_image_specified;
+ shader->bound_sampler = state->bound_sampler_specified;
+ shader->bound_image = state->bound_image_specified;
+ shader->redeclares_gl_layer = state->redeclares_gl_layer;
+ shader->layer_viewport_relative = state->layer_viewport_relative;
+}
+
+/* src can be NULL if only the symbols found in the exec_list should be
+ * copied
+ */
+void
+_mesa_glsl_copy_symbols_from_table(struct exec_list *shader_ir,
+ struct glsl_symbol_table *src,
+ struct glsl_symbol_table *dest)
+{
+ foreach_in_list (ir_instruction, ir, shader_ir) {
+ switch (ir->ir_type) {
+ case ir_type_function:
+ dest->add_function((ir_function *) ir);
+ break;
+ case ir_type_variable: {
+ ir_variable *const var = (ir_variable *) ir;
+
+ if (var->data.mode != ir_var_temporary)
+ dest->add_variable(var);
+ break;
+ }
+ default:
+ break;
+ }
+ }
+
+ if (src != NULL) {
+ /* Explicitly copy the gl_PerVertex interface definitions because these
+ * are needed to check they are the same during the interstage link.
+ * They can’t necessarily be found via the exec_list because the members
+ * might not be referenced. The GL spec still requires that they match
+ * in that case.
+ */
+ const glsl_type *iface =
+ src->get_interface("gl_PerVertex", ir_var_shader_in);
+ if (iface)
+ dest->add_interface(iface->name, iface, ir_var_shader_in);
+
+ iface = src->get_interface("gl_PerVertex", ir_var_shader_out);
+ if (iface)
+ dest->add_interface(iface->name, iface, ir_var_shader_out);
+ }
+}
+
+extern "C" {
+
+static void
+assign_subroutine_indexes(struct _mesa_glsl_parse_state *state)
+{
+ int j, k;
+ int index = 0;
+
+ for (j = 0; j < state->num_subroutines; j++) {
+ while (state->subroutines[j]->subroutine_index == -1) {
+ for (k = 0; k < state->num_subroutines; k++) {
+ if (state->subroutines[k]->subroutine_index == index)
+ break;
+ else if (k == state->num_subroutines - 1) {
+ state->subroutines[j]->subroutine_index = index;
+ }
+ }
+ index++;
+ }
+ }
+}
+
+void
+add_builtin_defines(struct _mesa_glsl_parse_state *state,
+ void (*add_builtin_define)(struct glcpp_parser *, const char *, int),
+ struct glcpp_parser *data,
+ unsigned version,
+ bool es)
+{
+ unsigned gl_version = state->ctx->Extensions.Version;
+ gl_api api = state->ctx->API;
+
+ if (gl_version != 0xff) {
+ unsigned i;
+ for (i = 0; i < state->num_supported_versions; i++) {
+ if (state->supported_versions[i].ver == version &&
+ state->supported_versions[i].es == es) {
+ gl_version = state->supported_versions[i].gl_ver;
+ break;
+ }
+ }
+
+ if (i == state->num_supported_versions)
+ return;
+ }
+
+ if (es)
+ api = API_OPENGLES2;
+
+ for (unsigned i = 0;
+ i < ARRAY_SIZE(_mesa_glsl_supported_extensions); ++i) {
+ const _mesa_glsl_extension *extension
+ = &_mesa_glsl_supported_extensions[i];
+ if (extension->compatible_with_state(state, api, gl_version)) {
+ add_builtin_define(data, extension->name, 1);
+ }
+ }
+}
+
+/* Implements parsing checks that we can't do during parsing */
+static void
+do_late_parsing_checks(struct _mesa_glsl_parse_state *state)
+{
+ if (state->stage == MESA_SHADER_COMPUTE && !state->has_compute_shader()) {
+ YYLTYPE loc;
+ memset(&loc, 0, sizeof(loc));
+ _mesa_glsl_error(&loc, state, "Compute shaders require "
+ "GLSL 4.30 or GLSL ES 3.10");
+ }
+}
+
+static void
+opt_shader_and_create_symbol_table(struct gl_context *ctx,
+ struct glsl_symbol_table *source_symbols,
+ struct gl_shader *shader)
+{
+ assert(shader->CompileStatus != COMPILE_FAILURE &&
+ !shader->ir->is_empty());
+
+ struct gl_shader_compiler_options *options =
+ &ctx->Const.ShaderCompilerOptions[shader->Stage];
+
+ /* Do some optimization at compile time to reduce shader IR size
+ * and reduce later work if the same shader is linked multiple times
+ */
+ if (ctx->Const.GLSLOptimizeConservatively) {
+ /* Run it just once. */
+ do_common_optimization(shader->ir, false, false, options,
+ ctx->Const.NativeIntegers);
+ } else {
+ /* Repeat it until it stops making changes. */
+ while (do_common_optimization(shader->ir, false, false, options,
+ ctx->Const.NativeIntegers))
+ ;
+ }
+
+ validate_ir_tree(shader->ir);
+
+ enum ir_variable_mode other;
+ switch (shader->Stage) {
+ case MESA_SHADER_VERTEX:
+ other = ir_var_shader_in;
+ break;
+ case MESA_SHADER_FRAGMENT:
+ other = ir_var_shader_out;
+ break;
+ default:
+ /* Something invalid to ensure optimize_dead_builtin_uniforms
+ * doesn't remove anything other than uniforms or constants.
+ */
+ other = ir_var_mode_count;
+ break;
+ }
+
+ optimize_dead_builtin_variables(shader->ir, other);
+
+ validate_ir_tree(shader->ir);
+
+ /* Retain any live IR, but trash the rest. */
+ reparent_ir(shader->ir, shader->ir);
+
+ /* Destroy the symbol table. Create a new symbol table that contains only
+ * the variables and functions that still exist in the IR. The symbol
+ * table will be used later during linking.
+ *
+ * There must NOT be any freed objects still referenced by the symbol
+ * table. That could cause the linker to dereference freed memory.
+ *
+ * We don't have to worry about types or interface-types here because those
+ * are fly-weights that are looked up by glsl_type.
+ */
+ _mesa_glsl_copy_symbols_from_table(shader->ir, source_symbols,
+ shader->symbols);
+}
+
+static bool
+can_skip_compile(struct gl_context *ctx, struct gl_shader *shader,
+ const char *source, bool force_recompile,
+ bool source_has_shader_include)
+{
+ if (!force_recompile) {
+ if (ctx->Cache) {
+ char buf[41];
+ disk_cache_compute_key(ctx->Cache, source, strlen(source),
+ shader->sha1);
+ if (disk_cache_has_key(ctx->Cache, shader->sha1)) {
+ /* We've seen this shader before and know it compiles */
+ if (ctx->_Shader->Flags & GLSL_CACHE_INFO) {
+ _mesa_sha1_format(buf, shader->sha1);
+ fprintf(stderr, "deferring compile of shader: %s\n", buf);
+ }
+ shader->CompileStatus = COMPILE_SKIPPED;
+
+ free((void *)shader->FallbackSource);
+
+ /* Copy pre-processed shader include to fallback source otherwise
+ * we have no guarantee the shader include source tree has not
+ * changed.
+ */
+ shader->FallbackSource = source_has_shader_include ?
+ strdup(source) : NULL;
+ return true;
+ }
+ }
+ } else {
+ /* We should only ever end up here if a re-compile has been forced by a
+ * shader cache miss. In which case we can skip the compile if its
+ * already been done by a previous fallback or the initial compile call.
+ */
+ if (shader->CompileStatus == COMPILE_SUCCESS)
+ return true;
+ }
+
+ return false;
+}
+
+void
+_mesa_glsl_compile_shader(struct gl_context *ctx, struct gl_shader *shader,
+ bool dump_ast, bool dump_hir, bool force_recompile)
+{
+ const char *source = force_recompile && shader->FallbackSource ?
+ shader->FallbackSource : shader->Source;
+
+ /* Note this will be true for shaders the have #include inside comments
+ * however that should be rare enough not to worry about.
+ */
+ bool source_has_shader_include =
+ strstr(source, "#include") == NULL ? false : true;
+
+ /* If there was no shader include we can check the shader cache and skip
+ * compilation before we run the preprocessor. We never skip compiling
+ * shaders that use ARB_shading_language_include because we would need to
+ * keep duplicate copies of the shader include source tree and paths.
+ */
+ if (!source_has_shader_include &&
+ can_skip_compile(ctx, shader, source, force_recompile, false))
+ return;
+
+ struct _mesa_glsl_parse_state *state =
+ new(shader) _mesa_glsl_parse_state(ctx, shader->Stage, shader);
+
+ if (ctx->Const.GenerateTemporaryNames)
+ (void) p_atomic_cmpxchg(&ir_variable::temporaries_allocate_names,
+ false, true);
+
+ if (!source_has_shader_include || !force_recompile) {
+ state->error = glcpp_preprocess(state, &source, &state->info_log,
+ add_builtin_defines, state, ctx);
+ }
+
+ /* Now that we have run the preprocessor we can check the shader cache and
+ * skip compilation if possible for those shaders that contained a shader
+ * include.
+ */
+ if (source_has_shader_include &&
+ can_skip_compile(ctx, shader, source, force_recompile, true))
+ return;
+
+ if (!state->error) {
+ _mesa_glsl_lexer_ctor(state, source);
+ _mesa_glsl_parse(state);
+ _mesa_glsl_lexer_dtor(state);
+ do_late_parsing_checks(state);
+ }
+
+ if (dump_ast) {
+ foreach_list_typed(ast_node, ast, link, &state->translation_unit) {
+ ast->print();
+ }
+ printf("\n\n");
+ }
+
+ ralloc_free(shader->ir);
+ shader->ir = new(shader) exec_list;
+ if (!state->error && !state->translation_unit.is_empty())
+ _mesa_ast_to_hir(shader->ir, state);
+
+ if (!state->error) {
+ validate_ir_tree(shader->ir);
+
+ /* Print out the unoptimized IR. */
+ if (dump_hir) {
+ _mesa_print_ir(stdout, shader->ir, state);
+ }
+ }
+
+ if (shader->InfoLog)
+ ralloc_free(shader->InfoLog);
+
+ if (!state->error)
+ set_shader_inout_layout(shader, state);
+
+ shader->symbols = new(shader->ir) glsl_symbol_table;
+ shader->CompileStatus = state->error ? COMPILE_FAILURE : COMPILE_SUCCESS;
+ shader->InfoLog = state->info_log;
+ shader->Version = state->language_version;
+ shader->IsES = state->es_shader;
+
+ struct gl_shader_compiler_options *options =
+ &ctx->Const.ShaderCompilerOptions[shader->Stage];
+
+ if (!state->error && !shader->ir->is_empty()) {
+ if (options->LowerPrecision)
+ lower_precision(shader->ir);
+ lower_builtins(shader->ir);
+ assign_subroutine_indexes(state);
+ lower_subroutine(shader->ir, state);
+ opt_shader_and_create_symbol_table(ctx, state->symbols, shader);
+ }
+
+ if (!force_recompile) {
+ free((void *)shader->FallbackSource);
+
+ /* Copy pre-processed shader include to fallback source otherwise we
+ * have no guarantee the shader include source tree has not changed.
+ */
+ shader->FallbackSource = source_has_shader_include ?
+ strdup(source) : NULL;
+ }
+
+ delete state->symbols;
+ ralloc_free(state);
+
+ if (ctx->Cache && shader->CompileStatus == COMPILE_SUCCESS) {
+ char sha1_buf[41];
+ disk_cache_put_key(ctx->Cache, shader->sha1);
+ if (ctx->_Shader->Flags & GLSL_CACHE_INFO) {
+ _mesa_sha1_format(sha1_buf, shader->sha1);
+ fprintf(stderr, "marking shader: %s\n", sha1_buf);
+ }
+ }
+}
+
+} /* extern "C" */
+/**
+ * Do the set of common optimizations passes
+ *
+ * \param ir List of instructions to be optimized
+ * \param linked Is the shader linked? This enables
+ * optimizations passes that remove code at
+ * global scope and could cause linking to
+ * fail.
+ * \param uniform_locations_assigned Have locations already been assigned for
+ * uniforms? This prevents the declarations
+ * of unused uniforms from being removed.
+ * The setting of this flag only matters if
+ * \c linked is \c true.
+ * \param options The driver's preferred shader options.
+ * \param native_integers Selects optimizations that depend on the
+ * implementations supporting integers
+ * natively (as opposed to supporting
+ * integers in floating point registers).
+ */
+bool
+do_common_optimization(exec_list *ir, bool linked,
+ bool uniform_locations_assigned,
+ const struct gl_shader_compiler_options *options,
+ bool native_integers)
+{
+ const bool debug = false;
+ bool progress = false;
+
+#define OPT(PASS, ...) do { \
+ if (debug) { \
+ fprintf(stderr, "START GLSL optimization %s\n", #PASS); \
+ const bool opt_progress = PASS(__VA_ARGS__); \
+ progress = opt_progress || progress; \
+ if (opt_progress) \
+ _mesa_print_ir(stderr, ir, NULL); \
+ fprintf(stderr, "GLSL optimization %s: %s progress\n", \
+ #PASS, opt_progress ? "made" : "no"); \
+ } else { \
+ progress = PASS(__VA_ARGS__) || progress; \
+ } \
+ } while (false)
+
+ OPT(lower_instructions, ir, SUB_TO_ADD_NEG);
+
+ if (linked) {
+ OPT(do_function_inlining, ir);
+ OPT(do_dead_functions, ir);
+ OPT(do_structure_splitting, ir);
+ }
+ propagate_invariance(ir);
+ OPT(do_if_simplification, ir);
+ OPT(opt_flatten_nested_if_blocks, ir);
+ OPT(opt_conditional_discard, ir);
+ OPT(do_copy_propagation_elements, ir);
+
+ if (options->OptimizeForAOS && !linked)
+ OPT(opt_flip_matrices, ir);
+
+ if (linked && options->OptimizeForAOS) {
+ OPT(do_vectorize, ir);
+ }
+
+ if (linked)
+ OPT(do_dead_code, ir, uniform_locations_assigned);
+ else
+ OPT(do_dead_code_unlinked, ir);
+ OPT(do_dead_code_local, ir);
+ OPT(do_tree_grafting, ir);
+ OPT(do_constant_propagation, ir);
+ if (linked)
+ OPT(do_constant_variable, ir);
+ else
+ OPT(do_constant_variable_unlinked, ir);
+ OPT(do_constant_folding, ir);
+ OPT(do_minmax_prune, ir);
+ OPT(do_rebalance_tree, ir);
+ OPT(do_algebraic, ir, native_integers, options);
+ OPT(do_lower_jumps, ir, true, true, options->EmitNoMainReturn,
+ options->EmitNoCont, options->EmitNoLoops);
+ OPT(do_vec_index_to_swizzle, ir);
+ OPT(lower_vector_insert, ir, false);
+ OPT(optimize_swizzles, ir);
+
+ /* Some drivers only call do_common_optimization() once rather than in a
+ * loop, and split arrays causes each element of a constant array to
+ * dereference is own copy of the entire array initilizer. This IR is not
+ * something that can be generated manually in a shader and is not
+ * accounted for by NIR optimisations, the result is an exponential slow
+ * down in compilation speed as a constant arrays element count grows. To
+ * avoid that here we make sure to always clean up the mess split arrays
+ * causes to constant arrays.
+ */
+ bool array_split = optimize_split_arrays(ir, linked);
+ if (array_split)
+ do_constant_propagation(ir);
+ progress |= array_split;
+
+ OPT(optimize_redundant_jumps, ir);
+
+ if (options->MaxUnrollIterations) {
+ loop_state *ls = analyze_loop_variables(ir);
+ if (ls->loop_found) {
+ bool loop_progress = unroll_loops(ir, ls, options);
+ while (loop_progress) {
+ loop_progress = false;
+ loop_progress |= do_constant_propagation(ir);
+ loop_progress |= do_if_simplification(ir);
+
+ /* Some drivers only call do_common_optimization() once rather
+ * than in a loop. So we must call do_lower_jumps() after
+ * unrolling a loop because for drivers that use LLVM validation
+ * will fail if a jump is not the last instruction in the block.
+ * For example the following will fail LLVM validation:
+ *
+ * (loop (
+ * ...
+ * break
+ * (assign (x) (var_ref v124) (expression int + (var_ref v124)
+ * (constant int (1)) ) )
+ * ))
+ */
+ loop_progress |= do_lower_jumps(ir, true, true,
+ options->EmitNoMainReturn,
+ options->EmitNoCont,
+ options->EmitNoLoops);
+ }
+ progress |= loop_progress;
+ }
+ delete ls;
+ }
+
+#undef OPT
+
+ return progress;
+}
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/glsl_parser_extras.h b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/glsl_parser_extras.h
new file mode 100644
index 0000000000..7ce1d7c15e
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/glsl_parser_extras.h
@@ -0,0 +1,1060 @@
+/*
+ * Copyright © 2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef GLSL_PARSER_EXTRAS_H
+#define GLSL_PARSER_EXTRAS_H
+
+/*
+ * Most of the definitions here only apply to C++
+ */
+#ifdef __cplusplus
+
+
+#include <stdlib.h>
+#include "glsl_symbol_table.h"
+
+/* THIS is a macro defined somewhere deep in the Windows MSVC header files.
+ * Undefine it here to avoid collision with the lexer's THIS token.
+ */
+#undef THIS
+
+struct gl_context;
+
+struct glsl_switch_state {
+ /** Temporary variables needed for switch statement. */
+ ir_variable *test_var;
+ ir_variable *is_fallthru_var;
+ ir_variable *is_break_var;
+ class ast_switch_statement *switch_nesting_ast;
+
+ /** Used to set condition if 'default' label should be chosen. */
+ ir_variable *run_default;
+
+ /** Table of constant values already used in case labels */
+ struct hash_table *labels_ht;
+ class ast_case_label *previous_default;
+
+ bool is_switch_innermost; // if switch stmt is closest to break, ...
+};
+
+const char *
+glsl_compute_version_string(void *mem_ctx, bool is_es, unsigned version);
+
+typedef struct YYLTYPE {
+ int first_line;
+ int first_column;
+ int last_line;
+ int last_column;
+ unsigned source;
+ /* Path for ARB_shading_language_include include source */
+ char *path;
+} YYLTYPE;
+# define YYLTYPE_IS_DECLARED 1
+# define YYLTYPE_IS_TRIVIAL 1
+
+extern void _mesa_glsl_error(YYLTYPE *locp, _mesa_glsl_parse_state *state,
+ const char *fmt, ...);
+
+
+struct _mesa_glsl_parse_state {
+ _mesa_glsl_parse_state(struct gl_context *_ctx, gl_shader_stage stage,
+ void *mem_ctx);
+
+ DECLARE_RZALLOC_CXX_OPERATORS(_mesa_glsl_parse_state);
+
+ /**
+ * Generate a string representing the GLSL version currently being compiled
+ * (useful for error messages).
+ */
+ const char *get_version_string()
+ {
+ return glsl_compute_version_string(this, this->es_shader,
+ this->language_version);
+ }
+
+ /**
+ * Determine whether the current GLSL version is sufficiently high to
+ * support a certain feature.
+ *
+ * \param required_glsl_version is the desktop GLSL version that is
+ * required to support the feature, or 0 if no version of desktop GLSL
+ * supports the feature.
+ *
+ * \param required_glsl_es_version is the GLSL ES version that is required
+ * to support the feature, or 0 if no version of GLSL ES supports the
+ * feature.
+ */
+ bool is_version(unsigned required_glsl_version,
+ unsigned required_glsl_es_version) const
+ {
+ unsigned required_version = this->es_shader ?
+ required_glsl_es_version : required_glsl_version;
+ unsigned this_version = this->forced_language_version
+ ? this->forced_language_version : this->language_version;
+ return required_version != 0
+ && this_version >= required_version;
+ }
+
+ bool check_version(unsigned required_glsl_version,
+ unsigned required_glsl_es_version,
+ YYLTYPE *locp, const char *fmt, ...) PRINTFLIKE(5, 6);
+
+ bool check_arrays_of_arrays_allowed(YYLTYPE *locp)
+ {
+ if (!(ARB_arrays_of_arrays_enable || is_version(430, 310))) {
+ const char *const requirement = this->es_shader
+ ? "GLSL ES 3.10"
+ : "GL_ARB_arrays_of_arrays or GLSL 4.30";
+ _mesa_glsl_error(locp, this,
+ "%s required for defining arrays of arrays.",
+ requirement);
+ return false;
+ }
+ return true;
+ }
+
+ bool check_precision_qualifiers_allowed(YYLTYPE *locp)
+ {
+ return check_version(130, 100, locp,
+ "precision qualifiers are forbidden");
+ }
+
+ bool check_bitwise_operations_allowed(YYLTYPE *locp)
+ {
+ return EXT_gpu_shader4_enable ||
+ check_version(130, 300, locp, "bit-wise operations are forbidden");
+ }
+
+ bool check_explicit_attrib_stream_allowed(YYLTYPE *locp)
+ {
+ if (!this->has_explicit_attrib_stream()) {
+ const char *const requirement = "GL_ARB_gpu_shader5 extension or GLSL 4.00";
+
+ _mesa_glsl_error(locp, this, "explicit stream requires %s",
+ requirement);
+ return false;
+ }
+
+ return true;
+ }
+
+ bool check_explicit_attrib_location_allowed(YYLTYPE *locp,
+ const ir_variable *var)
+ {
+ if (!this->has_explicit_attrib_location()) {
+ const char *const requirement = this->es_shader
+ ? "GLSL ES 3.00"
+ : "GL_ARB_explicit_attrib_location extension or GLSL 3.30";
+
+ _mesa_glsl_error(locp, this, "%s explicit location requires %s",
+ mode_string(var), requirement);
+ return false;
+ }
+
+ return true;
+ }
+
+ bool check_separate_shader_objects_allowed(YYLTYPE *locp,
+ const ir_variable *var)
+ {
+ if (!this->has_separate_shader_objects()) {
+ const char *const requirement = this->es_shader
+ ? "GL_EXT_separate_shader_objects extension or GLSL ES 3.10"
+ : "GL_ARB_separate_shader_objects extension or GLSL 4.20";
+
+ _mesa_glsl_error(locp, this, "%s explicit location requires %s",
+ mode_string(var), requirement);
+ return false;
+ }
+
+ return true;
+ }
+
+ bool check_explicit_uniform_location_allowed(YYLTYPE *locp,
+ const ir_variable *)
+ {
+ if (!this->has_explicit_attrib_location() ||
+ !this->has_explicit_uniform_location()) {
+ const char *const requirement = this->es_shader
+ ? "GLSL ES 3.10"
+ : "GL_ARB_explicit_uniform_location and either "
+ "GL_ARB_explicit_attrib_location or GLSL 3.30.";
+
+ _mesa_glsl_error(locp, this,
+ "uniform explicit location requires %s",
+ requirement);
+ return false;
+ }
+
+ return true;
+ }
+
+ bool has_atomic_counters() const
+ {
+ return ARB_shader_atomic_counters_enable || is_version(420, 310);
+ }
+
+ bool has_enhanced_layouts() const
+ {
+ return ARB_enhanced_layouts_enable || is_version(440, 0);
+ }
+
+ bool has_explicit_attrib_stream() const
+ {
+ return ARB_gpu_shader5_enable || is_version(400, 0);
+ }
+
+ bool has_explicit_attrib_location() const
+ {
+ return ARB_explicit_attrib_location_enable || is_version(330, 300);
+ }
+
+ bool has_explicit_uniform_location() const
+ {
+ return ARB_explicit_uniform_location_enable || is_version(430, 310);
+ }
+
+ bool has_uniform_buffer_objects() const
+ {
+ return ARB_uniform_buffer_object_enable || is_version(140, 300);
+ }
+
+ bool has_shader_storage_buffer_objects() const
+ {
+ return ARB_shader_storage_buffer_object_enable || is_version(430, 310);
+ }
+
+ bool has_separate_shader_objects() const
+ {
+ return ARB_separate_shader_objects_enable || is_version(410, 310)
+ || EXT_separate_shader_objects_enable;
+ }
+
+ bool has_double() const
+ {
+ return ARB_gpu_shader_fp64_enable || is_version(400, 0);
+ }
+
+ bool has_int64() const
+ {
+ return ARB_gpu_shader_int64_enable ||
+ AMD_gpu_shader_int64_enable;
+ }
+
+ bool has_420pack() const
+ {
+ return ARB_shading_language_420pack_enable || is_version(420, 0);
+ }
+
+ bool has_420pack_or_es31() const
+ {
+ return ARB_shading_language_420pack_enable || is_version(420, 310);
+ }
+
+ bool has_compute_shader() const
+ {
+ return ARB_compute_shader_enable || is_version(430, 310);
+ }
+
+ bool has_shader_io_blocks() const
+ {
+ /* The OES_geometry_shader_specification says:
+ *
+ * "If the OES_geometry_shader extension is enabled, the
+ * OES_shader_io_blocks extension is also implicitly enabled."
+ *
+ * The OES_tessellation_shader extension has similar wording.
+ */
+ return OES_shader_io_blocks_enable ||
+ EXT_shader_io_blocks_enable ||
+ OES_geometry_shader_enable ||
+ EXT_geometry_shader_enable ||
+ OES_tessellation_shader_enable ||
+ EXT_tessellation_shader_enable ||
+
+ is_version(150, 320);
+ }
+
+ bool has_geometry_shader() const
+ {
+ return OES_geometry_shader_enable || EXT_geometry_shader_enable ||
+ is_version(150, 320);
+ }
+
+ bool has_tessellation_shader() const
+ {
+ return ARB_tessellation_shader_enable ||
+ OES_tessellation_shader_enable ||
+ EXT_tessellation_shader_enable ||
+ is_version(400, 320);
+ }
+
+ bool has_clip_distance() const
+ {
+ return EXT_clip_cull_distance_enable || is_version(130, 0);
+ }
+
+ bool has_cull_distance() const
+ {
+ return EXT_clip_cull_distance_enable ||
+ ARB_cull_distance_enable ||
+ is_version(450, 0);
+ }
+
+ bool has_framebuffer_fetch() const
+ {
+ return EXT_shader_framebuffer_fetch_enable ||
+ EXT_shader_framebuffer_fetch_non_coherent_enable;
+ }
+
+ bool has_texture_cube_map_array() const
+ {
+ return ARB_texture_cube_map_array_enable ||
+ EXT_texture_cube_map_array_enable ||
+ OES_texture_cube_map_array_enable ||
+ is_version(400, 320);
+ }
+
+ bool has_shader_image_load_store() const
+ {
+ return ARB_shader_image_load_store_enable ||
+ EXT_shader_image_load_store_enable ||
+ is_version(420, 310);
+ }
+
+ bool has_bindless() const
+ {
+ return ARB_bindless_texture_enable;
+ }
+
+ bool has_image_load_formatted() const
+ {
+ return EXT_shader_image_load_formatted_enable;
+ }
+
+ bool has_implicit_conversions() const
+ {
+ return EXT_shader_implicit_conversions_enable || is_version(120, 0);
+ }
+
+ bool has_implicit_uint_to_int_conversion() const
+ {
+ return ARB_gpu_shader5_enable ||
+ MESA_shader_integer_functions_enable ||
+ EXT_shader_implicit_conversions_enable ||
+ is_version(400, 0);
+ }
+
+ void process_version_directive(YYLTYPE *locp, int version,
+ const char *ident);
+
+ struct gl_context *const ctx;
+ void *scanner;
+ exec_list translation_unit;
+ glsl_symbol_table *symbols;
+
+ void *linalloc;
+
+ unsigned num_supported_versions;
+ struct {
+ unsigned ver;
+ uint8_t gl_ver;
+ bool es;
+ } supported_versions[17];
+
+ bool es_shader;
+ bool compat_shader;
+ unsigned language_version;
+ unsigned forced_language_version;
+ bool had_version_string;
+ bool zero_init;
+ unsigned gl_version;
+ gl_shader_stage stage;
+
+ /**
+ * Default uniform layout qualifiers tracked during parsing.
+ * Currently affects uniform blocks and uniform buffer variables in
+ * those blocks.
+ */
+ struct ast_type_qualifier *default_uniform_qualifier;
+
+ /**
+ * Default shader storage layout qualifiers tracked during parsing.
+ * Currently affects shader storage blocks and shader storage buffer
+ * variables in those blocks.
+ */
+ struct ast_type_qualifier *default_shader_storage_qualifier;
+
+ /**
+ * Variables to track different cases if a fragment shader redeclares
+ * built-in variable gl_FragCoord.
+ *
+ * Note: These values are computed at ast_to_hir time rather than at parse
+ * time.
+ */
+ bool fs_redeclares_gl_fragcoord;
+ bool fs_origin_upper_left;
+ bool fs_pixel_center_integer;
+ bool fs_redeclares_gl_fragcoord_with_no_layout_qualifiers;
+
+ /**
+ * True if a geometry shader input primitive type or tessellation control
+ * output vertices were specified using a layout directive.
+ *
+ * Note: these values are computed at ast_to_hir time rather than at parse
+ * time.
+ */
+ bool gs_input_prim_type_specified;
+ bool tcs_output_vertices_specified;
+
+ /**
+ * Input layout qualifiers from GLSL 1.50 (geometry shader controls),
+ * and GLSL 4.00 (tessellation evaluation shader)
+ */
+ struct ast_type_qualifier *in_qualifier;
+
+ /**
+ * True if a compute shader input local size was specified using a layout
+ * directive.
+ *
+ * Note: this value is computed at ast_to_hir time rather than at parse
+ * time.
+ */
+ bool cs_input_local_size_specified;
+
+ /**
+ * If cs_input_local_size_specified is true, the local size that was
+ * specified. Otherwise ignored.
+ */
+ unsigned cs_input_local_size[3];
+
+ /**
+ * True if a compute shader input local variable size was specified using
+ * a layout directive as specified by ARB_compute_variable_group_size.
+ */
+ bool cs_input_local_size_variable_specified;
+
+ /**
+ * Arrangement of invocations used to calculate derivatives in a compute
+ * shader. From NV_compute_shader_derivatives.
+ */
+ enum gl_derivative_group cs_derivative_group;
+
+ /**
+ * True if a shader declare bindless_sampler/bindless_image, and
+ * respectively bound_sampler/bound_image at global scope as specified by
+ * ARB_bindless_texture.
+ */
+ bool bindless_sampler_specified;
+ bool bindless_image_specified;
+ bool bound_sampler_specified;
+ bool bound_image_specified;
+
+ /**
+ * Output layout qualifiers from GLSL 1.50 (geometry shader controls),
+ * and GLSL 4.00 (tessellation control shader).
+ */
+ struct ast_type_qualifier *out_qualifier;
+
+ /**
+ * Printable list of GLSL versions supported by the current context
+ *
+ * \note
+ * This string should probably be generated per-context instead of per
+ * invokation of the compiler. This should be changed when the method of
+ * tracking supported GLSL versions changes.
+ */
+ const char *supported_version_string;
+
+ /**
+ * Implementation defined limits that affect built-in variables, etc.
+ *
+ * \sa struct gl_constants (in mtypes.h)
+ */
+ struct {
+ /* 1.10 */
+ unsigned MaxLights;
+ unsigned MaxClipPlanes;
+ unsigned MaxTextureUnits;
+ unsigned MaxTextureCoords;
+ unsigned MaxVertexAttribs;
+ unsigned MaxVertexUniformComponents;
+ unsigned MaxVertexTextureImageUnits;
+ unsigned MaxCombinedTextureImageUnits;
+ unsigned MaxTextureImageUnits;
+ unsigned MaxFragmentUniformComponents;
+
+ /* ARB_draw_buffers */
+ unsigned MaxDrawBuffers;
+
+ /* ARB_enhanced_layouts */
+ unsigned MaxTransformFeedbackBuffers;
+ unsigned MaxTransformFeedbackInterleavedComponents;
+
+ /* ARB_blend_func_extended */
+ unsigned MaxDualSourceDrawBuffers;
+
+ /* 3.00 ES */
+ int MinProgramTexelOffset;
+ int MaxProgramTexelOffset;
+
+ /* 1.50 */
+ unsigned MaxVertexOutputComponents;
+ unsigned MaxGeometryInputComponents;
+ unsigned MaxGeometryOutputComponents;
+ unsigned MaxGeometryShaderInvocations;
+ unsigned MaxFragmentInputComponents;
+ unsigned MaxGeometryTextureImageUnits;
+ unsigned MaxGeometryOutputVertices;
+ unsigned MaxGeometryTotalOutputComponents;
+ unsigned MaxGeometryUniformComponents;
+
+ /* ARB_shader_atomic_counters */
+ unsigned MaxVertexAtomicCounters;
+ unsigned MaxTessControlAtomicCounters;
+ unsigned MaxTessEvaluationAtomicCounters;
+ unsigned MaxGeometryAtomicCounters;
+ unsigned MaxFragmentAtomicCounters;
+ unsigned MaxCombinedAtomicCounters;
+ unsigned MaxAtomicBufferBindings;
+
+ /* These are also atomic counter related, but they weren't added to
+ * until atomic counters were added to core in GLSL 4.20 and GLSL ES
+ * 3.10.
+ */
+ unsigned MaxVertexAtomicCounterBuffers;
+ unsigned MaxTessControlAtomicCounterBuffers;
+ unsigned MaxTessEvaluationAtomicCounterBuffers;
+ unsigned MaxGeometryAtomicCounterBuffers;
+ unsigned MaxFragmentAtomicCounterBuffers;
+ unsigned MaxCombinedAtomicCounterBuffers;
+ unsigned MaxAtomicCounterBufferSize;
+
+ /* ARB_compute_shader */
+ unsigned MaxComputeAtomicCounterBuffers;
+ unsigned MaxComputeAtomicCounters;
+ unsigned MaxComputeImageUniforms;
+ unsigned MaxComputeTextureImageUnits;
+ unsigned MaxComputeUniformComponents;
+ unsigned MaxComputeWorkGroupCount[3];
+ unsigned MaxComputeWorkGroupSize[3];
+
+ /* ARB_shader_image_load_store */
+ unsigned MaxImageUnits;
+ unsigned MaxCombinedShaderOutputResources;
+ unsigned MaxImageSamples;
+ unsigned MaxVertexImageUniforms;
+ unsigned MaxTessControlImageUniforms;
+ unsigned MaxTessEvaluationImageUniforms;
+ unsigned MaxGeometryImageUniforms;
+ unsigned MaxFragmentImageUniforms;
+ unsigned MaxCombinedImageUniforms;
+
+ /* ARB_viewport_array */
+ unsigned MaxViewports;
+
+ /* ARB_tessellation_shader */
+ unsigned MaxPatchVertices;
+ unsigned MaxTessGenLevel;
+ unsigned MaxTessControlInputComponents;
+ unsigned MaxTessControlOutputComponents;
+ unsigned MaxTessControlTextureImageUnits;
+ unsigned MaxTessEvaluationInputComponents;
+ unsigned MaxTessEvaluationOutputComponents;
+ unsigned MaxTessEvaluationTextureImageUnits;
+ unsigned MaxTessPatchComponents;
+ unsigned MaxTessControlTotalOutputComponents;
+ unsigned MaxTessControlUniformComponents;
+ unsigned MaxTessEvaluationUniformComponents;
+
+ /* GL 4.5 / OES_sample_variables */
+ unsigned MaxSamples;
+ } Const;
+
+ /**
+ * During AST to IR conversion, pointer to current IR function
+ *
+ * Will be \c NULL whenever the AST to IR conversion is not inside a
+ * function definition.
+ */
+ class ir_function_signature *current_function;
+
+ /**
+ * During AST to IR conversion, pointer to the toplevel IR
+ * instruction list being generated.
+ */
+ exec_list *toplevel_ir;
+
+ /** Have we found a return statement in this function? */
+ bool found_return;
+
+ /** Have we found the interlock builtins in this function? */
+ bool found_begin_interlock;
+ bool found_end_interlock;
+
+ /** Was there an error during compilation? */
+ bool error;
+
+ /**
+ * Are all shader inputs / outputs invariant?
+ *
+ * This is set when the 'STDGL invariant(all)' pragma is used.
+ */
+ bool all_invariant;
+
+ /** Loop or switch statement containing the current instructions. */
+ class ast_iteration_statement *loop_nesting_ast;
+
+ struct glsl_switch_state switch_state;
+
+ /** List of structures defined in user code. */
+ const glsl_type **user_structures;
+ unsigned num_user_structures;
+
+ char *info_log;
+
+ /**
+ * Are warnings enabled?
+ *
+ * Emission of warngins is controlled by '#pragma warning(...)'.
+ */
+ bool warnings_enabled;
+
+ /**
+ * \name Enable bits for GLSL extensions
+ */
+ /*@{*/
+ /* ARB extensions go here, sorted alphabetically.
+ */
+ bool ARB_ES3_1_compatibility_enable;
+ bool ARB_ES3_1_compatibility_warn;
+ bool ARB_ES3_2_compatibility_enable;
+ bool ARB_ES3_2_compatibility_warn;
+ bool ARB_arrays_of_arrays_enable;
+ bool ARB_arrays_of_arrays_warn;
+ bool ARB_bindless_texture_enable;
+ bool ARB_bindless_texture_warn;
+ bool ARB_compatibility_enable;
+ bool ARB_compatibility_warn;
+ bool ARB_compute_shader_enable;
+ bool ARB_compute_shader_warn;
+ bool ARB_compute_variable_group_size_enable;
+ bool ARB_compute_variable_group_size_warn;
+ bool ARB_conservative_depth_enable;
+ bool ARB_conservative_depth_warn;
+ bool ARB_cull_distance_enable;
+ bool ARB_cull_distance_warn;
+ bool ARB_derivative_control_enable;
+ bool ARB_derivative_control_warn;
+ bool ARB_draw_buffers_enable;
+ bool ARB_draw_buffers_warn;
+ bool ARB_draw_instanced_enable;
+ bool ARB_draw_instanced_warn;
+ bool ARB_enhanced_layouts_enable;
+ bool ARB_enhanced_layouts_warn;
+ bool ARB_explicit_attrib_location_enable;
+ bool ARB_explicit_attrib_location_warn;
+ bool ARB_explicit_uniform_location_enable;
+ bool ARB_explicit_uniform_location_warn;
+ bool ARB_fragment_coord_conventions_enable;
+ bool ARB_fragment_coord_conventions_warn;
+ bool ARB_fragment_layer_viewport_enable;
+ bool ARB_fragment_layer_viewport_warn;
+ bool ARB_fragment_shader_interlock_enable;
+ bool ARB_fragment_shader_interlock_warn;
+ bool ARB_gpu_shader5_enable;
+ bool ARB_gpu_shader5_warn;
+ bool ARB_gpu_shader_fp64_enable;
+ bool ARB_gpu_shader_fp64_warn;
+ bool ARB_gpu_shader_int64_enable;
+ bool ARB_gpu_shader_int64_warn;
+ bool ARB_post_depth_coverage_enable;
+ bool ARB_post_depth_coverage_warn;
+ bool ARB_sample_shading_enable;
+ bool ARB_sample_shading_warn;
+ bool ARB_separate_shader_objects_enable;
+ bool ARB_separate_shader_objects_warn;
+ bool ARB_shader_atomic_counter_ops_enable;
+ bool ARB_shader_atomic_counter_ops_warn;
+ bool ARB_shader_atomic_counters_enable;
+ bool ARB_shader_atomic_counters_warn;
+ bool ARB_shader_ballot_enable;
+ bool ARB_shader_ballot_warn;
+ bool ARB_shader_bit_encoding_enable;
+ bool ARB_shader_bit_encoding_warn;
+ bool ARB_shader_clock_enable;
+ bool ARB_shader_clock_warn;
+ bool ARB_shader_draw_parameters_enable;
+ bool ARB_shader_draw_parameters_warn;
+ bool ARB_shader_group_vote_enable;
+ bool ARB_shader_group_vote_warn;
+ bool ARB_shader_image_load_store_enable;
+ bool ARB_shader_image_load_store_warn;
+ bool ARB_shader_image_size_enable;
+ bool ARB_shader_image_size_warn;
+ bool ARB_shader_precision_enable;
+ bool ARB_shader_precision_warn;
+ bool ARB_shader_stencil_export_enable;
+ bool ARB_shader_stencil_export_warn;
+ bool ARB_shader_storage_buffer_object_enable;
+ bool ARB_shader_storage_buffer_object_warn;
+ bool ARB_shader_subroutine_enable;
+ bool ARB_shader_subroutine_warn;
+ bool ARB_shader_texture_image_samples_enable;
+ bool ARB_shader_texture_image_samples_warn;
+ bool ARB_shader_texture_lod_enable;
+ bool ARB_shader_texture_lod_warn;
+ bool ARB_shader_viewport_layer_array_enable;
+ bool ARB_shader_viewport_layer_array_warn;
+ bool ARB_shading_language_420pack_enable;
+ bool ARB_shading_language_420pack_warn;
+ bool ARB_shading_language_include_enable;
+ bool ARB_shading_language_include_warn;
+ bool ARB_shading_language_packing_enable;
+ bool ARB_shading_language_packing_warn;
+ bool ARB_tessellation_shader_enable;
+ bool ARB_tessellation_shader_warn;
+ bool ARB_texture_cube_map_array_enable;
+ bool ARB_texture_cube_map_array_warn;
+ bool ARB_texture_gather_enable;
+ bool ARB_texture_gather_warn;
+ bool ARB_texture_multisample_enable;
+ bool ARB_texture_multisample_warn;
+ bool ARB_texture_query_levels_enable;
+ bool ARB_texture_query_levels_warn;
+ bool ARB_texture_query_lod_enable;
+ bool ARB_texture_query_lod_warn;
+ bool ARB_texture_rectangle_enable;
+ bool ARB_texture_rectangle_warn;
+ bool ARB_uniform_buffer_object_enable;
+ bool ARB_uniform_buffer_object_warn;
+ bool ARB_vertex_attrib_64bit_enable;
+ bool ARB_vertex_attrib_64bit_warn;
+ bool ARB_viewport_array_enable;
+ bool ARB_viewport_array_warn;
+
+ /* KHR extensions go here, sorted alphabetically.
+ */
+ bool KHR_blend_equation_advanced_enable;
+ bool KHR_blend_equation_advanced_warn;
+
+ /* OES extensions go here, sorted alphabetically.
+ */
+ bool OES_EGL_image_external_enable;
+ bool OES_EGL_image_external_warn;
+ bool OES_EGL_image_external_essl3_enable;
+ bool OES_EGL_image_external_essl3_warn;
+ bool OES_geometry_point_size_enable;
+ bool OES_geometry_point_size_warn;
+ bool OES_geometry_shader_enable;
+ bool OES_geometry_shader_warn;
+ bool OES_gpu_shader5_enable;
+ bool OES_gpu_shader5_warn;
+ bool OES_primitive_bounding_box_enable;
+ bool OES_primitive_bounding_box_warn;
+ bool OES_sample_variables_enable;
+ bool OES_sample_variables_warn;
+ bool OES_shader_image_atomic_enable;
+ bool OES_shader_image_atomic_warn;
+ bool OES_shader_io_blocks_enable;
+ bool OES_shader_io_blocks_warn;
+ bool OES_shader_multisample_interpolation_enable;
+ bool OES_shader_multisample_interpolation_warn;
+ bool OES_standard_derivatives_enable;
+ bool OES_standard_derivatives_warn;
+ bool OES_tessellation_point_size_enable;
+ bool OES_tessellation_point_size_warn;
+ bool OES_tessellation_shader_enable;
+ bool OES_tessellation_shader_warn;
+ bool OES_texture_3D_enable;
+ bool OES_texture_3D_warn;
+ bool OES_texture_buffer_enable;
+ bool OES_texture_buffer_warn;
+ bool OES_texture_cube_map_array_enable;
+ bool OES_texture_cube_map_array_warn;
+ bool OES_texture_storage_multisample_2d_array_enable;
+ bool OES_texture_storage_multisample_2d_array_warn;
+ bool OES_viewport_array_enable;
+ bool OES_viewport_array_warn;
+
+ /* All other extensions go here, sorted alphabetically.
+ */
+ bool AMD_conservative_depth_enable;
+ bool AMD_conservative_depth_warn;
+ bool AMD_gpu_shader_int64_enable;
+ bool AMD_gpu_shader_int64_warn;
+ bool AMD_shader_stencil_export_enable;
+ bool AMD_shader_stencil_export_warn;
+ bool AMD_shader_trinary_minmax_enable;
+ bool AMD_shader_trinary_minmax_warn;
+ bool AMD_texture_texture4_enable;
+ bool AMD_texture_texture4_warn;
+ bool AMD_vertex_shader_layer_enable;
+ bool AMD_vertex_shader_layer_warn;
+ bool AMD_vertex_shader_viewport_index_enable;
+ bool AMD_vertex_shader_viewport_index_warn;
+ bool ANDROID_extension_pack_es31a_enable;
+ bool ANDROID_extension_pack_es31a_warn;
+ bool EXT_blend_func_extended_enable;
+ bool EXT_blend_func_extended_warn;
+ bool EXT_clip_cull_distance_enable;
+ bool EXT_clip_cull_distance_warn;
+ bool EXT_demote_to_helper_invocation_enable;
+ bool EXT_demote_to_helper_invocation_warn;
+ bool EXT_draw_buffers_enable;
+ bool EXT_draw_buffers_warn;
+ bool EXT_draw_instanced_enable;
+ bool EXT_draw_instanced_warn;
+ bool EXT_frag_depth_enable;
+ bool EXT_frag_depth_warn;
+ bool EXT_geometry_point_size_enable;
+ bool EXT_geometry_point_size_warn;
+ bool EXT_geometry_shader_enable;
+ bool EXT_geometry_shader_warn;
+ bool EXT_gpu_shader4_enable;
+ bool EXT_gpu_shader4_warn;
+ bool EXT_gpu_shader5_enable;
+ bool EXT_gpu_shader5_warn;
+ bool EXT_primitive_bounding_box_enable;
+ bool EXT_primitive_bounding_box_warn;
+ bool EXT_separate_shader_objects_enable;
+ bool EXT_separate_shader_objects_warn;
+ bool EXT_shader_framebuffer_fetch_enable;
+ bool EXT_shader_framebuffer_fetch_warn;
+ bool EXT_shader_framebuffer_fetch_non_coherent_enable;
+ bool EXT_shader_framebuffer_fetch_non_coherent_warn;
+ bool EXT_shader_image_load_formatted_enable;
+ bool EXT_shader_image_load_formatted_warn;
+ bool EXT_shader_image_load_store_enable;
+ bool EXT_shader_image_load_store_warn;
+ bool EXT_shader_implicit_conversions_enable;
+ bool EXT_shader_implicit_conversions_warn;
+ bool EXT_shader_integer_mix_enable;
+ bool EXT_shader_integer_mix_warn;
+ bool EXT_shader_io_blocks_enable;
+ bool EXT_shader_io_blocks_warn;
+ bool EXT_shader_samples_identical_enable;
+ bool EXT_shader_samples_identical_warn;
+ bool EXT_tessellation_point_size_enable;
+ bool EXT_tessellation_point_size_warn;
+ bool EXT_tessellation_shader_enable;
+ bool EXT_tessellation_shader_warn;
+ bool EXT_texture_array_enable;
+ bool EXT_texture_array_warn;
+ bool EXT_texture_buffer_enable;
+ bool EXT_texture_buffer_warn;
+ bool EXT_texture_cube_map_array_enable;
+ bool EXT_texture_cube_map_array_warn;
+ bool EXT_texture_query_lod_enable;
+ bool EXT_texture_query_lod_warn;
+ bool EXT_texture_shadow_lod_enable;
+ bool EXT_texture_shadow_lod_warn;
+ bool INTEL_conservative_rasterization_enable;
+ bool INTEL_conservative_rasterization_warn;
+ bool INTEL_shader_atomic_float_minmax_enable;
+ bool INTEL_shader_atomic_float_minmax_warn;
+ bool INTEL_shader_integer_functions2_enable;
+ bool INTEL_shader_integer_functions2_warn;
+ bool MESA_shader_integer_functions_enable;
+ bool MESA_shader_integer_functions_warn;
+ bool NV_compute_shader_derivatives_enable;
+ bool NV_compute_shader_derivatives_warn;
+ bool NV_fragment_shader_interlock_enable;
+ bool NV_fragment_shader_interlock_warn;
+ bool NV_image_formats_enable;
+ bool NV_image_formats_warn;
+ bool NV_shader_atomic_float_enable;
+ bool NV_shader_atomic_float_warn;
+ bool NV_viewport_array2_enable;
+ bool NV_viewport_array2_warn;
+ /*@}*/
+
+ /** Extensions supported by the OpenGL implementation. */
+ const struct gl_extensions *extensions;
+
+ bool uses_builtin_functions;
+ bool fs_uses_gl_fragcoord;
+
+ /**
+ * For geometry shaders, size of the most recently seen input declaration
+ * that was a sized array, or 0 if no sized input array declarations have
+ * been seen.
+ *
+ * Unused for other shader types.
+ */
+ unsigned gs_input_size;
+
+ bool fs_early_fragment_tests;
+
+ bool fs_inner_coverage;
+
+ bool fs_post_depth_coverage;
+
+ bool fs_pixel_interlock_ordered;
+ bool fs_pixel_interlock_unordered;
+ bool fs_sample_interlock_ordered;
+ bool fs_sample_interlock_unordered;
+
+ unsigned fs_blend_support;
+
+ /**
+ * For tessellation control shaders, size of the most recently seen output
+ * declaration that was a sized array, or 0 if no sized output array
+ * declarations have been seen.
+ *
+ * Unused for other shader types.
+ */
+ unsigned tcs_output_size;
+
+ /** Atomic counter offsets by binding */
+ unsigned atomic_counter_offsets[MAX_COMBINED_ATOMIC_BUFFERS];
+
+ /** Whether gl_Layer output is viewport-relative. */
+ bool redeclares_gl_layer;
+ bool layer_viewport_relative;
+
+ bool allow_extension_directive_midshader;
+ bool allow_builtin_variable_redeclaration;
+ bool allow_layout_qualifier_on_function_parameter;
+
+ /**
+ * Known subroutine type declarations.
+ */
+ int num_subroutine_types;
+ ir_function **subroutine_types;
+
+ /**
+ * Functions that are associated with
+ * subroutine types.
+ */
+ int num_subroutines;
+ ir_function **subroutines;
+
+ /**
+ * field selection temporary parser storage -
+ * did the parser just parse a dot.
+ */
+ bool is_field;
+
+ /**
+ * seen values for clip/cull distance sizes
+ * so we can check totals aren't too large.
+ */
+ unsigned clip_dist_size, cull_dist_size;
+};
+
+# define YYLLOC_DEFAULT(Current, Rhs, N) \
+do { \
+ if (N) \
+ { \
+ (Current).first_line = YYRHSLOC(Rhs, 1).first_line; \
+ (Current).first_column = YYRHSLOC(Rhs, 1).first_column; \
+ (Current).last_line = YYRHSLOC(Rhs, N).last_line; \
+ (Current).last_column = YYRHSLOC(Rhs, N).last_column; \
+ (Current).path = YYRHSLOC(Rhs, N).path; \
+ } \
+ else \
+ { \
+ (Current).first_line = (Current).last_line = \
+ YYRHSLOC(Rhs, 0).last_line; \
+ (Current).first_column = (Current).last_column = \
+ YYRHSLOC(Rhs, 0).last_column; \
+ (Current).path = YYRHSLOC(Rhs, 0).path; \
+ } \
+ (Current).source = 0; \
+} while (0)
+
+/**
+ * Emit a warning to the shader log
+ *
+ * \sa _mesa_glsl_error
+ */
+extern void _mesa_glsl_warning(const YYLTYPE *locp,
+ _mesa_glsl_parse_state *state,
+ const char *fmt, ...);
+
+extern void _mesa_glsl_lexer_ctor(struct _mesa_glsl_parse_state *state,
+ const char *string);
+
+extern void _mesa_glsl_lexer_dtor(struct _mesa_glsl_parse_state *state);
+
+union YYSTYPE;
+extern int _mesa_glsl_lexer_lex(union YYSTYPE *yylval, YYLTYPE *yylloc,
+ void *scanner);
+
+extern int _mesa_glsl_parse(struct _mesa_glsl_parse_state *);
+
+/**
+ * Process elements of the #extension directive
+ *
+ * \return
+ * If \c name and \c behavior are valid, \c true is returned. Otherwise
+ * \c false is returned.
+ */
+extern bool _mesa_glsl_process_extension(const char *name, YYLTYPE *name_locp,
+ const char *behavior,
+ YYLTYPE *behavior_locp,
+ _mesa_glsl_parse_state *state);
+
+#endif /* __cplusplus */
+
+
+/*
+ * These definitions apply to C and C++
+ */
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+struct glcpp_parser;
+struct _mesa_glsl_parse_state;
+
+typedef void (*glcpp_extension_iterator)(
+ struct _mesa_glsl_parse_state *state,
+ void (*add_builtin_define)(struct glcpp_parser *, const char *, int),
+ struct glcpp_parser *data,
+ unsigned version,
+ bool es);
+
+extern int glcpp_preprocess(void *ctx, const char **shader, char **info_log,
+ glcpp_extension_iterator extensions,
+ struct _mesa_glsl_parse_state *state,
+ struct gl_context *gl_ctx);
+
+void add_builtin_defines(struct _mesa_glsl_parse_state *state,
+ void (*add_builtin_define)(struct glcpp_parser *, const char *, int),
+ struct glcpp_parser *data,
+ unsigned version,
+ bool es);
+
+extern void
+_mesa_glsl_copy_symbols_from_table(struct exec_list *shader_ir,
+ struct glsl_symbol_table *src,
+ struct glsl_symbol_table *dest);
+
+#ifdef __cplusplus
+}
+#endif
+
+
+#endif /* GLSL_PARSER_EXTRAS_H */
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/glsl_symbol_table.cpp b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/glsl_symbol_table.cpp
new file mode 100644
index 0000000000..9ae5fd3e55
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/glsl_symbol_table.cpp
@@ -0,0 +1,294 @@
+/* -*- c++ -*- */
+/*
+ * Copyright © 2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include "glsl_symbol_table.h"
+#include "ast.h"
+
+class symbol_table_entry {
+public:
+ DECLARE_LINEAR_ALLOC_CXX_OPERATORS(symbol_table_entry);
+
+ bool add_interface(const glsl_type *i, enum ir_variable_mode mode)
+ {
+ const glsl_type **dest;
+
+ switch (mode) {
+ case ir_var_uniform:
+ dest = &ibu;
+ break;
+ case ir_var_shader_storage:
+ dest = &iss;
+ break;
+ case ir_var_shader_in:
+ dest = &ibi;
+ break;
+ case ir_var_shader_out:
+ dest = &ibo;
+ break;
+ default:
+ assert(!"Unsupported interface variable mode!");
+ return false;
+ }
+
+ if (*dest != NULL) {
+ return false;
+ } else {
+ *dest = i;
+ return true;
+ }
+ }
+
+ const glsl_type *get_interface(enum ir_variable_mode mode)
+ {
+ switch (mode) {
+ case ir_var_uniform:
+ return ibu;
+ case ir_var_shader_storage:
+ return iss;
+ case ir_var_shader_in:
+ return ibi;
+ case ir_var_shader_out:
+ return ibo;
+ default:
+ assert(!"Unsupported interface variable mode!");
+ return NULL;
+ }
+ }
+
+ symbol_table_entry(ir_variable *v) :
+ v(v), f(0), t(0), ibu(0), iss(0), ibi(0), ibo(0), a(0) {}
+ symbol_table_entry(ir_function *f) :
+ v(0), f(f), t(0), ibu(0), iss(0), ibi(0), ibo(0), a(0) {}
+ symbol_table_entry(const glsl_type *t) :
+ v(0), f(0), t(t), ibu(0), iss(0), ibi(0), ibo(0), a(0) {}
+ symbol_table_entry(const glsl_type *t, enum ir_variable_mode mode) :
+ v(0), f(0), t(0), ibu(0), iss(0), ibi(0), ibo(0), a(0)
+ {
+ assert(t->is_interface());
+ add_interface(t, mode);
+ }
+ symbol_table_entry(const class ast_type_specifier *a):
+ v(0), f(0), t(0), ibu(0), iss(0), ibi(0), ibo(0), a(a) {}
+
+ ir_variable *v;
+ ir_function *f;
+ const glsl_type *t;
+ const glsl_type *ibu;
+ const glsl_type *iss;
+ const glsl_type *ibi;
+ const glsl_type *ibo;
+ const class ast_type_specifier *a;
+};
+
+glsl_symbol_table::glsl_symbol_table()
+{
+ this->separate_function_namespace = false;
+ this->table = _mesa_symbol_table_ctor();
+ this->mem_ctx = ralloc_context(NULL);
+ this->linalloc = linear_alloc_parent(this->mem_ctx, 0);
+}
+
+glsl_symbol_table::~glsl_symbol_table()
+{
+ _mesa_symbol_table_dtor(table);
+ ralloc_free(mem_ctx);
+}
+
+void glsl_symbol_table::push_scope()
+{
+ _mesa_symbol_table_push_scope(table);
+}
+
+void glsl_symbol_table::pop_scope()
+{
+ _mesa_symbol_table_pop_scope(table);
+}
+
+bool glsl_symbol_table::name_declared_this_scope(const char *name)
+{
+ return _mesa_symbol_table_symbol_scope(table, name) == 0;
+}
+
+bool glsl_symbol_table::add_variable(ir_variable *v)
+{
+ assert(v->data.mode != ir_var_temporary);
+
+ if (this->separate_function_namespace) {
+ /* In 1.10, functions and variables have separate namespaces. */
+ symbol_table_entry *existing = get_entry(v->name);
+ if (name_declared_this_scope(v->name)) {
+ /* If there's already an existing function (not a constructor!) in
+ * the current scope, just update the existing entry to include 'v'.
+ */
+ if (existing->v == NULL && existing->t == NULL) {
+ existing->v = v;
+ return true;
+ }
+ } else {
+ /* If not declared at this scope, add a new entry. But if an existing
+ * entry includes a function, propagate that to this block - otherwise
+ * the new variable declaration would shadow the function.
+ */
+ symbol_table_entry *entry = new(linalloc) symbol_table_entry(v);
+ if (existing != NULL)
+ entry->f = existing->f;
+ int added = _mesa_symbol_table_add_symbol(table, v->name, entry);
+ assert(added == 0);
+ (void)added;
+ return true;
+ }
+ return false;
+ }
+
+ /* 1.20+ rules: */
+ symbol_table_entry *entry = new(linalloc) symbol_table_entry(v);
+ return _mesa_symbol_table_add_symbol(table, v->name, entry) == 0;
+}
+
+bool glsl_symbol_table::add_type(const char *name, const glsl_type *t)
+{
+ symbol_table_entry *entry = new(linalloc) symbol_table_entry(t);
+ return _mesa_symbol_table_add_symbol(table, name, entry) == 0;
+}
+
+bool glsl_symbol_table::add_interface(const char *name, const glsl_type *i,
+ enum ir_variable_mode mode)
+{
+ assert(i->is_interface());
+ symbol_table_entry *entry = get_entry(name);
+ if (entry == NULL) {
+ symbol_table_entry *entry =
+ new(linalloc) symbol_table_entry(i, mode);
+ bool add_interface_symbol_result =
+ _mesa_symbol_table_add_symbol(table, name, entry) == 0;
+ assert(add_interface_symbol_result);
+ return add_interface_symbol_result;
+ } else {
+ return entry->add_interface(i, mode);
+ }
+}
+
+bool glsl_symbol_table::add_function(ir_function *f)
+{
+ if (this->separate_function_namespace && name_declared_this_scope(f->name)) {
+ /* In 1.10, functions and variables have separate namespaces. */
+ symbol_table_entry *existing = get_entry(f->name);
+ if ((existing->f == NULL) && (existing->t == NULL)) {
+ existing->f = f;
+ return true;
+ }
+ }
+ symbol_table_entry *entry = new(linalloc) symbol_table_entry(f);
+ return _mesa_symbol_table_add_symbol(table, f->name, entry) == 0;
+}
+
+bool glsl_symbol_table::add_default_precision_qualifier(const char *type_name,
+ int precision)
+{
+ char *name = ralloc_asprintf(mem_ctx, "#default_precision_%s", type_name);
+
+ ast_type_specifier *default_specifier = new(linalloc) ast_type_specifier(name);
+ default_specifier->default_precision = precision;
+
+ symbol_table_entry *entry =
+ new(linalloc) symbol_table_entry(default_specifier);
+
+ if (!get_entry(name))
+ return _mesa_symbol_table_add_symbol(table, name, entry) == 0;
+
+ return _mesa_symbol_table_replace_symbol(table, name, entry) == 0;
+}
+
+void glsl_symbol_table::add_global_function(ir_function *f)
+{
+ symbol_table_entry *entry = new(linalloc) symbol_table_entry(f);
+ int added = _mesa_symbol_table_add_global_symbol(table, f->name, entry);
+ assert(added == 0);
+ (void)added;
+}
+
+ir_variable *glsl_symbol_table::get_variable(const char *name)
+{
+ symbol_table_entry *entry = get_entry(name);
+ return entry != NULL ? entry->v : NULL;
+}
+
+const glsl_type *glsl_symbol_table::get_type(const char *name)
+{
+ symbol_table_entry *entry = get_entry(name);
+ return entry != NULL ? entry->t : NULL;
+}
+
+const glsl_type *glsl_symbol_table::get_interface(const char *name,
+ enum ir_variable_mode mode)
+{
+ symbol_table_entry *entry = get_entry(name);
+ return entry != NULL ? entry->get_interface(mode) : NULL;
+}
+
+ir_function *glsl_symbol_table::get_function(const char *name)
+{
+ symbol_table_entry *entry = get_entry(name);
+ return entry != NULL ? entry->f : NULL;
+}
+
+int glsl_symbol_table::get_default_precision_qualifier(const char *type_name)
+{
+ char *name = ralloc_asprintf(mem_ctx, "#default_precision_%s", type_name);
+ symbol_table_entry *entry = get_entry(name);
+ if (!entry)
+ return ast_precision_none;
+ return entry->a->default_precision;
+}
+
+symbol_table_entry *glsl_symbol_table::get_entry(const char *name)
+{
+ return (symbol_table_entry *)
+ _mesa_symbol_table_find_symbol(table, name);
+}
+
+void
+glsl_symbol_table::disable_variable(const char *name)
+{
+ /* Ideally we would remove the variable's entry from the symbol table, but
+ * that would be difficult. Fortunately, since this is only used for
+ * built-in variables, it won't be possible for the shader to re-introduce
+ * the variable later, so all we really need to do is to make sure that
+ * further attempts to access it using get_variable() will return NULL.
+ */
+ symbol_table_entry *entry = get_entry(name);
+ if (entry != NULL) {
+ entry->v = NULL;
+ }
+}
+
+void
+glsl_symbol_table::replace_variable(const char *name,
+ ir_variable *v)
+{
+ symbol_table_entry *entry = get_entry(name);
+ if (entry != NULL) {
+ entry->v = v;
+ }
+}
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/glsl_symbol_table.h b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/glsl_symbol_table.h
new file mode 100644
index 0000000000..c8ab690eb5
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/glsl_symbol_table.h
@@ -0,0 +1,113 @@
+/* -*- c++ -*- */
+/*
+ * Copyright © 2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef GLSL_SYMBOL_TABLE
+#define GLSL_SYMBOL_TABLE
+
+#include <new>
+
+#include "program/symbol_table.h"
+#include "ir.h"
+
+class symbol_table_entry;
+struct glsl_type;
+
+/**
+ * Facade class for _mesa_symbol_table
+ *
+ * Wraps the existing \c _mesa_symbol_table data structure to enforce some
+ * type safe and some symbol table invariants.
+ */
+struct glsl_symbol_table {
+ DECLARE_RALLOC_CXX_OPERATORS(glsl_symbol_table)
+
+ glsl_symbol_table();
+ ~glsl_symbol_table();
+
+ /* In 1.10, functions and variables have separate namespaces. */
+ bool separate_function_namespace;
+
+ void push_scope();
+ void pop_scope();
+
+ /**
+ * Determine whether a name was declared at the current scope
+ */
+ bool name_declared_this_scope(const char *name);
+
+ /**
+ * \name Methods to add symbols to the table
+ *
+ * There is some temptation to rename all these functions to \c add_symbol
+ * or similar. However, this breaks symmetry with the getter functions and
+ * reduces the clarity of the intention of code that uses these methods.
+ */
+ /*@{*/
+ bool add_variable(ir_variable *v);
+ bool add_type(const char *name, const glsl_type *t);
+ bool add_function(ir_function *f);
+ bool add_interface(const char *name, const glsl_type *i,
+ enum ir_variable_mode mode);
+ bool add_default_precision_qualifier(const char *type_name, int precision);
+ /*@}*/
+
+ /**
+ * Add an function at global scope without checking for scoping conflicts.
+ */
+ void add_global_function(ir_function *f);
+
+ /**
+ * \name Methods to get symbols from the table
+ */
+ /*@{*/
+ ir_variable *get_variable(const char *name);
+ const glsl_type *get_type(const char *name);
+ ir_function *get_function(const char *name);
+ const glsl_type *get_interface(const char *name,
+ enum ir_variable_mode mode);
+ int get_default_precision_qualifier(const char *type_name);
+ /*@}*/
+
+ /**
+ * Disable a previously-added variable so that it no longer appears to be
+ * in the symbol table. This is necessary when gl_PerVertex is redeclared,
+ * to ensure that previously-available built-in variables are no longer
+ * available.
+ */
+ void disable_variable(const char *name);
+
+ /**
+ * Replaces the variable in the entry by the new variable.
+ */
+ void replace_variable(const char *name, ir_variable *v);
+
+private:
+ symbol_table_entry *get_entry(const char *name);
+
+ struct _mesa_symbol_table *table;
+ void *mem_ctx;
+ void *linalloc;
+};
+
+#endif /* GLSL_SYMBOL_TABLE */
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/hir_field_selection.cpp b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/hir_field_selection.cpp
new file mode 100644
index 0000000000..cb499d43b0
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/hir_field_selection.cpp
@@ -0,0 +1,80 @@
+/*
+ * Copyright © 2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include "ir.h"
+#include "glsl_parser_extras.h"
+#include "ast.h"
+#include "compiler/glsl_types.h"
+
+ir_rvalue *
+_mesa_ast_field_selection_to_hir(const ast_expression *expr,
+ exec_list *instructions,
+ struct _mesa_glsl_parse_state *state)
+{
+ void *ctx = state;
+ ir_rvalue *result = NULL;
+ ir_rvalue *op;
+
+ op = expr->subexpressions[0]->hir(instructions, state);
+
+ /* There are two kinds of field selection. There is the selection of a
+ * specific field from a structure, and there is the selection of a
+ * swizzle / mask from a vector. Which is which is determined entirely
+ * by the base type of the thing to which the field selection operator is
+ * being applied.
+ */
+ YYLTYPE loc = expr->get_location();
+ if (op->type->is_error()) {
+ /* silently propagate the error */
+ } else if (op->type->is_struct() || op->type->is_interface()) {
+ result = new(ctx) ir_dereference_record(op,
+ expr->primary_expression.identifier);
+
+ if (result->type->is_error()) {
+ _mesa_glsl_error(& loc, state, "cannot access field `%s' of "
+ "structure",
+ expr->primary_expression.identifier);
+ }
+ } else if (op->type->is_vector() ||
+ (state->has_420pack() && op->type->is_scalar())) {
+ ir_swizzle *swiz = ir_swizzle::create(op,
+ expr->primary_expression.identifier,
+ op->type->vector_elements);
+ if (swiz != NULL) {
+ result = swiz;
+ } else {
+ /* FINISHME: Logging of error messages should be moved into
+ * FINISHME: ir_swizzle::create. This allows the generation of more
+ * FINISHME: specific error messages.
+ */
+ _mesa_glsl_error(& loc, state, "invalid swizzle / mask `%s'",
+ expr->primary_expression.identifier);
+ }
+ } else {
+ _mesa_glsl_error(& loc, state, "cannot access field `%s' of "
+ "non-structure / non-vector",
+ expr->primary_expression.identifier);
+ }
+
+ return result ? result : ir_rvalue::error_value(ctx);
+}
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/int64.glsl b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/int64.glsl
new file mode 100644
index 0000000000..538f56cff1
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/int64.glsl
@@ -0,0 +1,121 @@
+/* Compile with:
+ *
+ * glsl_compiler --version 400 --dump-builder int64.glsl > builtin_int64.h
+ *
+ * Version 4.00+ is required for umulExtended.
+ */
+#version 400
+#extension GL_ARB_gpu_shader_int64: require
+#extension GL_ARB_shading_language_420pack: require
+
+uvec2
+umul64(uvec2 a, uvec2 b)
+{
+ uvec2 result;
+
+ umulExtended(a.x, b.x, result.y, result.x);
+ result.y += a.x * b.y + a.y * b.x;
+
+ return result;
+}
+
+ivec2
+sign64(ivec2 a)
+{
+ ivec2 result;
+
+ result.y = a.y >> 31;
+ result.x = result.y | int((a.x | a.y) != 0);
+
+ return result;
+}
+
+uvec4
+udivmod64(uvec2 n, uvec2 d)
+{
+ uvec2 quot = uvec2(0U, 0U);
+ int log2_denom = findMSB(d.y) + 32;
+
+ /* If the upper 32 bits of denom are non-zero, it is impossible for shifts
+ * greater than 32 bits to occur. If the upper 32 bits of the numerator
+ * are zero, it is impossible for (denom << [63, 32]) <= numer unless
+ * denom == 0.
+ */
+ if (d.y == 0 && n.y >= d.x) {
+ log2_denom = findMSB(d.x);
+
+ /* Since the upper 32 bits of denom are zero, log2_denom <= 31 and we
+ * don't have to compare log2_denom inside the loop as is done in the
+ * general case (below).
+ */
+ for (int i = 31; i >= 1; i--) {
+ if (log2_denom <= 31 - i && (d.x << i) <= n.y) {
+ n.y -= d.x << i;
+ quot.y |= 1U << i;
+ }
+ }
+
+ /* log2_denom is always <= 31, so manually peel the last loop
+ * iteration.
+ */
+ if (d.x <= n.y) {
+ n.y -= d.x;
+ quot.y |= 1U;
+ }
+ }
+
+ uint64_t d64 = packUint2x32(d);
+ uint64_t n64 = packUint2x32(n);
+ for (int i = 31; i >= 1; i--) {
+ if (log2_denom <= 63 - i && (d64 << i) <= n64) {
+ n64 -= d64 << i;
+ quot.x |= 1U << i;
+ }
+ }
+
+ /* log2_denom is always <= 63, so manually peel the last loop
+ * iteration.
+ */
+ if (d64 <= n64) {
+ n64 -= d64;
+ quot.x |= 1U;
+ }
+
+ return uvec4(quot, unpackUint2x32(n64));
+}
+
+uvec2
+udiv64(uvec2 n, uvec2 d)
+{
+ return udivmod64(n, d).xy;
+}
+
+ivec2
+idiv64(ivec2 _n, ivec2 _d)
+{
+ const bool negate = (_n.y < 0) != (_d.y < 0);
+ uvec2 n = unpackUint2x32(uint64_t(abs(packInt2x32(_n))));
+ uvec2 d = unpackUint2x32(uint64_t(abs(packInt2x32(_d))));
+
+ uvec2 quot = udivmod64(n, d).xy;
+
+ return negate ? unpackInt2x32(-int64_t(packUint2x32(quot))) : ivec2(quot);
+}
+
+uvec2
+umod64(uvec2 n, uvec2 d)
+{
+ return udivmod64(n, d).zw;
+}
+
+ivec2
+imod64(ivec2 _n, ivec2 _d)
+{
+ const bool negate = (_n.y < 0) != (_d.y < 0);
+ uvec2 n = unpackUint2x32(uint64_t(abs(packInt2x32(_n))));
+ uvec2 d = unpackUint2x32(uint64_t(abs(packInt2x32(_d))));
+
+ uvec2 rem = udivmod64(n, d).zw;
+
+ return negate ? unpackInt2x32(-int64_t(packUint2x32(rem))) : ivec2(rem);
+}
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/ir.cpp b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/ir.cpp
new file mode 100644
index 0000000000..9ea8e00d31
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/ir.cpp
@@ -0,0 +1,2237 @@
+/*
+ * Copyright © 2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+#include <string.h>
+#include "ir.h"
+#include "util/half_float.h"
+#include "compiler/glsl_types.h"
+#include "glsl_parser_extras.h"
+
+
+ir_rvalue::ir_rvalue(enum ir_node_type t)
+ : ir_instruction(t)
+{
+ this->type = glsl_type::error_type;
+}
+
+bool ir_rvalue::is_zero() const
+{
+ return false;
+}
+
+bool ir_rvalue::is_one() const
+{
+ return false;
+}
+
+bool ir_rvalue::is_negative_one() const
+{
+ return false;
+}
+
+/**
+ * Modify the swizzle make to move one component to another
+ *
+ * \param m IR swizzle to be modified
+ * \param from Component in the RHS that is to be swizzled
+ * \param to Desired swizzle location of \c from
+ */
+static void
+update_rhs_swizzle(ir_swizzle_mask &m, unsigned from, unsigned to)
+{
+ switch (to) {
+ case 0: m.x = from; break;
+ case 1: m.y = from; break;
+ case 2: m.z = from; break;
+ case 3: m.w = from; break;
+ default: assert(!"Should not get here.");
+ }
+}
+
+void
+ir_assignment::set_lhs(ir_rvalue *lhs)
+{
+ void *mem_ctx = this;
+ bool swizzled = false;
+
+ while (lhs != NULL) {
+ ir_swizzle *swiz = lhs->as_swizzle();
+
+ if (swiz == NULL)
+ break;
+
+ unsigned write_mask = 0;
+ ir_swizzle_mask rhs_swiz = { 0, 0, 0, 0, 0, 0 };
+
+ for (unsigned i = 0; i < swiz->mask.num_components; i++) {
+ unsigned c = 0;
+
+ switch (i) {
+ case 0: c = swiz->mask.x; break;
+ case 1: c = swiz->mask.y; break;
+ case 2: c = swiz->mask.z; break;
+ case 3: c = swiz->mask.w; break;
+ default: assert(!"Should not get here.");
+ }
+
+ write_mask |= (((this->write_mask >> i) & 1) << c);
+ update_rhs_swizzle(rhs_swiz, i, c);
+ rhs_swiz.num_components = swiz->val->type->vector_elements;
+ }
+
+ this->write_mask = write_mask;
+ lhs = swiz->val;
+
+ this->rhs = new(mem_ctx) ir_swizzle(this->rhs, rhs_swiz);
+ swizzled = true;
+ }
+
+ if (swizzled) {
+ /* Now, RHS channels line up with the LHS writemask. Collapse it
+ * to just the channels that will be written.
+ */
+ ir_swizzle_mask rhs_swiz = { 0, 0, 0, 0, 0, 0 };
+ int rhs_chan = 0;
+ for (int i = 0; i < 4; i++) {
+ if (write_mask & (1 << i))
+ update_rhs_swizzle(rhs_swiz, i, rhs_chan++);
+ }
+ rhs_swiz.num_components = rhs_chan;
+ this->rhs = new(mem_ctx) ir_swizzle(this->rhs, rhs_swiz);
+ }
+
+ assert((lhs == NULL) || lhs->as_dereference());
+
+ this->lhs = (ir_dereference *) lhs;
+}
+
+ir_variable *
+ir_assignment::whole_variable_written()
+{
+ ir_variable *v = this->lhs->whole_variable_referenced();
+
+ if (v == NULL)
+ return NULL;
+
+ if (v->type->is_scalar())
+ return v;
+
+ if (v->type->is_vector()) {
+ const unsigned mask = (1U << v->type->vector_elements) - 1;
+
+ if (mask != this->write_mask)
+ return NULL;
+ }
+
+ /* Either all the vector components are assigned or the variable is some
+ * composite type (and the whole thing is assigned.
+ */
+ return v;
+}
+
+ir_assignment::ir_assignment(ir_dereference *lhs, ir_rvalue *rhs,
+ ir_rvalue *condition, unsigned write_mask)
+ : ir_instruction(ir_type_assignment)
+{
+ this->condition = condition;
+ this->rhs = rhs;
+ this->lhs = lhs;
+ this->write_mask = write_mask;
+
+ if (lhs->type->is_scalar() || lhs->type->is_vector()) {
+ int lhs_components = 0;
+ for (int i = 0; i < 4; i++) {
+ if (write_mask & (1 << i))
+ lhs_components++;
+ }
+
+ assert(lhs_components == this->rhs->type->vector_elements);
+ }
+}
+
+ir_assignment::ir_assignment(ir_rvalue *lhs, ir_rvalue *rhs,
+ ir_rvalue *condition)
+ : ir_instruction(ir_type_assignment)
+{
+ this->condition = condition;
+ this->rhs = rhs;
+
+ /* If the RHS is a vector type, assume that all components of the vector
+ * type are being written to the LHS. The write mask comes from the RHS
+ * because we can have a case where the LHS is a vec4 and the RHS is a
+ * vec3. In that case, the assignment is:
+ *
+ * (assign (...) (xyz) (var_ref lhs) (var_ref rhs))
+ */
+ if (rhs->type->is_vector())
+ this->write_mask = (1U << rhs->type->vector_elements) - 1;
+ else if (rhs->type->is_scalar())
+ this->write_mask = 1;
+ else
+ this->write_mask = 0;
+
+ this->set_lhs(lhs);
+}
+
+ir_expression::ir_expression(int op, const struct glsl_type *type,
+ ir_rvalue *op0, ir_rvalue *op1,
+ ir_rvalue *op2, ir_rvalue *op3)
+ : ir_rvalue(ir_type_expression)
+{
+ this->type = type;
+ this->operation = ir_expression_operation(op);
+ this->operands[0] = op0;
+ this->operands[1] = op1;
+ this->operands[2] = op2;
+ this->operands[3] = op3;
+ init_num_operands();
+
+#ifndef NDEBUG
+ for (unsigned i = num_operands; i < 4; i++) {
+ assert(this->operands[i] == NULL);
+ }
+
+ for (unsigned i = 0; i < num_operands; i++) {
+ assert(this->operands[i] != NULL);
+ }
+#endif
+}
+
+ir_expression::ir_expression(int op, ir_rvalue *op0)
+ : ir_rvalue(ir_type_expression)
+{
+ this->operation = ir_expression_operation(op);
+ this->operands[0] = op0;
+ this->operands[1] = NULL;
+ this->operands[2] = NULL;
+ this->operands[3] = NULL;
+
+ assert(op <= ir_last_unop);
+ init_num_operands();
+ assert(num_operands == 1);
+ assert(this->operands[0]);
+
+ switch (this->operation) {
+ case ir_unop_bit_not:
+ case ir_unop_logic_not:
+ case ir_unop_neg:
+ case ir_unop_abs:
+ case ir_unop_sign:
+ case ir_unop_rcp:
+ case ir_unop_rsq:
+ case ir_unop_sqrt:
+ case ir_unop_exp:
+ case ir_unop_log:
+ case ir_unop_exp2:
+ case ir_unop_log2:
+ case ir_unop_trunc:
+ case ir_unop_ceil:
+ case ir_unop_floor:
+ case ir_unop_fract:
+ case ir_unop_round_even:
+ case ir_unop_sin:
+ case ir_unop_cos:
+ case ir_unop_dFdx:
+ case ir_unop_dFdx_coarse:
+ case ir_unop_dFdx_fine:
+ case ir_unop_dFdy:
+ case ir_unop_dFdy_coarse:
+ case ir_unop_dFdy_fine:
+ case ir_unop_bitfield_reverse:
+ case ir_unop_interpolate_at_centroid:
+ case ir_unop_clz:
+ case ir_unop_saturate:
+ case ir_unop_atan:
+ this->type = op0->type;
+ break;
+
+ case ir_unop_f2i:
+ case ir_unop_b2i:
+ case ir_unop_u2i:
+ case ir_unop_d2i:
+ case ir_unop_bitcast_f2i:
+ case ir_unop_bit_count:
+ case ir_unop_find_msb:
+ case ir_unop_find_lsb:
+ case ir_unop_subroutine_to_int:
+ case ir_unop_i642i:
+ case ir_unop_u642i:
+ this->type = glsl_type::get_instance(GLSL_TYPE_INT,
+ op0->type->vector_elements, 1);
+ break;
+
+ case ir_unop_b2f:
+ case ir_unop_i2f:
+ case ir_unop_u2f:
+ case ir_unop_d2f:
+ case ir_unop_f162f:
+ case ir_unop_bitcast_i2f:
+ case ir_unop_bitcast_u2f:
+ case ir_unop_i642f:
+ case ir_unop_u642f:
+ this->type = glsl_type::get_instance(GLSL_TYPE_FLOAT,
+ op0->type->vector_elements, 1);
+ break;
+
+ case ir_unop_f2f16:
+ case ir_unop_f2fmp:
+ case ir_unop_b2f16:
+ this->type = glsl_type::get_instance(GLSL_TYPE_FLOAT16,
+ op0->type->vector_elements, 1);
+ break;
+
+ case ir_unop_f2b:
+ case ir_unop_i2b:
+ case ir_unop_d2b:
+ case ir_unop_f162b:
+ case ir_unop_i642b:
+ this->type = glsl_type::get_instance(GLSL_TYPE_BOOL,
+ op0->type->vector_elements, 1);
+ break;
+
+ case ir_unop_f2d:
+ case ir_unop_i2d:
+ case ir_unop_u2d:
+ case ir_unop_i642d:
+ case ir_unop_u642d:
+ this->type = glsl_type::get_instance(GLSL_TYPE_DOUBLE,
+ op0->type->vector_elements, 1);
+ break;
+
+ case ir_unop_i2u:
+ case ir_unop_f2u:
+ case ir_unop_d2u:
+ case ir_unop_bitcast_f2u:
+ case ir_unop_i642u:
+ case ir_unop_u642u:
+ this->type = glsl_type::get_instance(GLSL_TYPE_UINT,
+ op0->type->vector_elements, 1);
+ break;
+
+ case ir_unop_i2i64:
+ case ir_unop_u2i64:
+ case ir_unop_b2i64:
+ case ir_unop_f2i64:
+ case ir_unop_d2i64:
+ case ir_unop_u642i64:
+ this->type = glsl_type::get_instance(GLSL_TYPE_INT64,
+ op0->type->vector_elements, 1);
+ break;
+
+ case ir_unop_i2u64:
+ case ir_unop_u2u64:
+ case ir_unop_f2u64:
+ case ir_unop_d2u64:
+ case ir_unop_i642u64:
+ this->type = glsl_type::get_instance(GLSL_TYPE_UINT64,
+ op0->type->vector_elements, 1);
+ break;
+
+ case ir_unop_unpack_double_2x32:
+ case ir_unop_unpack_uint_2x32:
+ this->type = glsl_type::uvec2_type;
+ break;
+
+ case ir_unop_unpack_int_2x32:
+ this->type = glsl_type::ivec2_type;
+ break;
+
+ case ir_unop_pack_snorm_2x16:
+ case ir_unop_pack_snorm_4x8:
+ case ir_unop_pack_unorm_2x16:
+ case ir_unop_pack_unorm_4x8:
+ case ir_unop_pack_half_2x16:
+ this->type = glsl_type::uint_type;
+ break;
+
+ case ir_unop_pack_double_2x32:
+ this->type = glsl_type::double_type;
+ break;
+
+ case ir_unop_pack_int_2x32:
+ this->type = glsl_type::int64_t_type;
+ break;
+
+ case ir_unop_pack_uint_2x32:
+ this->type = glsl_type::uint64_t_type;
+ break;
+
+ case ir_unop_unpack_snorm_2x16:
+ case ir_unop_unpack_unorm_2x16:
+ case ir_unop_unpack_half_2x16:
+ this->type = glsl_type::vec2_type;
+ break;
+
+ case ir_unop_unpack_snorm_4x8:
+ case ir_unop_unpack_unorm_4x8:
+ this->type = glsl_type::vec4_type;
+ break;
+
+ case ir_unop_unpack_sampler_2x32:
+ case ir_unop_unpack_image_2x32:
+ this->type = glsl_type::uvec2_type;
+ break;
+
+ case ir_unop_pack_sampler_2x32:
+ case ir_unop_pack_image_2x32:
+ this->type = op0->type;
+ break;
+
+ case ir_unop_frexp_sig:
+ this->type = op0->type;
+ break;
+ case ir_unop_frexp_exp:
+ this->type = glsl_type::get_instance(GLSL_TYPE_INT,
+ op0->type->vector_elements, 1);
+ break;
+
+ case ir_unop_get_buffer_size:
+ case ir_unop_ssbo_unsized_array_length:
+ this->type = glsl_type::int_type;
+ break;
+
+ case ir_unop_bitcast_i642d:
+ case ir_unop_bitcast_u642d:
+ this->type = glsl_type::get_instance(GLSL_TYPE_DOUBLE,
+ op0->type->vector_elements, 1);
+ break;
+
+ case ir_unop_bitcast_d2i64:
+ this->type = glsl_type::get_instance(GLSL_TYPE_INT64,
+ op0->type->vector_elements, 1);
+ break;
+ case ir_unop_bitcast_d2u64:
+ this->type = glsl_type::get_instance(GLSL_TYPE_UINT64,
+ op0->type->vector_elements, 1);
+ break;
+
+ default:
+ assert(!"not reached: missing automatic type setup for ir_expression");
+ this->type = op0->type;
+ break;
+ }
+}
+
+ir_expression::ir_expression(int op, ir_rvalue *op0, ir_rvalue *op1)
+ : ir_rvalue(ir_type_expression)
+{
+ this->operation = ir_expression_operation(op);
+ this->operands[0] = op0;
+ this->operands[1] = op1;
+ this->operands[2] = NULL;
+ this->operands[3] = NULL;
+
+ assert(op > ir_last_unop);
+ init_num_operands();
+ assert(num_operands == 2);
+ for (unsigned i = 0; i < num_operands; i++) {
+ assert(this->operands[i] != NULL);
+ }
+
+ switch (this->operation) {
+ case ir_binop_all_equal:
+ case ir_binop_any_nequal:
+ this->type = glsl_type::bool_type;
+ break;
+
+ case ir_binop_add:
+ case ir_binop_sub:
+ case ir_binop_min:
+ case ir_binop_max:
+ case ir_binop_pow:
+ case ir_binop_mul:
+ case ir_binop_div:
+ case ir_binop_mod:
+ case ir_binop_atan2:
+ if (op0->type->is_scalar()) {
+ this->type = op1->type;
+ } else if (op1->type->is_scalar()) {
+ this->type = op0->type;
+ } else {
+ if (this->operation == ir_binop_mul) {
+ this->type = glsl_type::get_mul_type(op0->type, op1->type);
+ } else {
+ assert(op0->type == op1->type);
+ this->type = op0->type;
+ }
+ }
+ break;
+
+ case ir_binop_logic_and:
+ case ir_binop_logic_xor:
+ case ir_binop_logic_or:
+ case ir_binop_bit_and:
+ case ir_binop_bit_xor:
+ case ir_binop_bit_or:
+ assert(!op0->type->is_matrix());
+ assert(!op1->type->is_matrix());
+ if (op0->type->is_scalar()) {
+ this->type = op1->type;
+ } else if (op1->type->is_scalar()) {
+ this->type = op0->type;
+ } else {
+ assert(op0->type->vector_elements == op1->type->vector_elements);
+ this->type = op0->type;
+ }
+ break;
+
+ case ir_binop_equal:
+ case ir_binop_nequal:
+ case ir_binop_gequal:
+ case ir_binop_less:
+ assert(op0->type == op1->type);
+ this->type = glsl_type::get_instance(GLSL_TYPE_BOOL,
+ op0->type->vector_elements, 1);
+ break;
+
+ case ir_binop_dot:
+ this->type = op0->type->get_base_type();
+ break;
+
+ case ir_binop_imul_high:
+ case ir_binop_mul_32x16:
+ case ir_binop_carry:
+ case ir_binop_borrow:
+ case ir_binop_lshift:
+ case ir_binop_rshift:
+ case ir_binop_ldexp:
+ case ir_binop_interpolate_at_offset:
+ case ir_binop_interpolate_at_sample:
+ this->type = op0->type;
+ break;
+
+ case ir_binop_add_sat:
+ case ir_binop_sub_sat:
+ case ir_binop_avg:
+ case ir_binop_avg_round:
+ assert(op0->type == op1->type);
+ this->type = op0->type;
+ break;
+
+ case ir_binop_abs_sub: {
+ enum glsl_base_type base;
+
+ assert(op0->type == op1->type);
+
+ switch (op0->type->base_type) {
+ case GLSL_TYPE_UINT:
+ case GLSL_TYPE_INT:
+ base = GLSL_TYPE_UINT;
+ break;
+ case GLSL_TYPE_UINT8:
+ case GLSL_TYPE_INT8:
+ base = GLSL_TYPE_UINT8;
+ break;
+ case GLSL_TYPE_UINT16:
+ case GLSL_TYPE_INT16:
+ base = GLSL_TYPE_UINT16;
+ break;
+ case GLSL_TYPE_UINT64:
+ case GLSL_TYPE_INT64:
+ base = GLSL_TYPE_UINT64;
+ break;
+ default:
+ unreachable(!"Invalid base type.");
+ }
+
+ this->type = glsl_type::get_instance(base, op0->type->vector_elements, 1);
+ break;
+ }
+
+ case ir_binop_vector_extract:
+ this->type = op0->type->get_scalar_type();
+ break;
+
+ default:
+ assert(!"not reached: missing automatic type setup for ir_expression");
+ this->type = glsl_type::float_type;
+ }
+}
+
+ir_expression::ir_expression(int op, ir_rvalue *op0, ir_rvalue *op1,
+ ir_rvalue *op2)
+ : ir_rvalue(ir_type_expression)
+{
+ this->operation = ir_expression_operation(op);
+ this->operands[0] = op0;
+ this->operands[1] = op1;
+ this->operands[2] = op2;
+ this->operands[3] = NULL;
+
+ assert(op > ir_last_binop && op <= ir_last_triop);
+ init_num_operands();
+ assert(num_operands == 3);
+ for (unsigned i = 0; i < num_operands; i++) {
+ assert(this->operands[i] != NULL);
+ }
+
+ switch (this->operation) {
+ case ir_triop_fma:
+ case ir_triop_lrp:
+ case ir_triop_bitfield_extract:
+ case ir_triop_vector_insert:
+ this->type = op0->type;
+ break;
+
+ case ir_triop_csel:
+ this->type = op1->type;
+ break;
+
+ default:
+ assert(!"not reached: missing automatic type setup for ir_expression");
+ this->type = glsl_type::float_type;
+ }
+}
+
+/**
+ * This is only here for ir_reader to used for testing purposes. Please use
+ * the precomputed num_operands field if you need the number of operands.
+ */
+unsigned
+ir_expression::get_num_operands(ir_expression_operation op)
+{
+ assert(op <= ir_last_opcode);
+
+ if (op <= ir_last_unop)
+ return 1;
+
+ if (op <= ir_last_binop)
+ return 2;
+
+ if (op <= ir_last_triop)
+ return 3;
+
+ if (op <= ir_last_quadop)
+ return 4;
+
+ unreachable("Could not calculate number of operands");
+}
+
+#include "ir_expression_operation_strings.h"
+
+const char*
+depth_layout_string(ir_depth_layout layout)
+{
+ switch(layout) {
+ case ir_depth_layout_none: return "";
+ case ir_depth_layout_any: return "depth_any";
+ case ir_depth_layout_greater: return "depth_greater";
+ case ir_depth_layout_less: return "depth_less";
+ case ir_depth_layout_unchanged: return "depth_unchanged";
+
+ default:
+ assert(0);
+ return "";
+ }
+}
+
+ir_expression_operation
+ir_expression::get_operator(const char *str)
+{
+ for (int op = 0; op <= int(ir_last_opcode); op++) {
+ if (strcmp(str, ir_expression_operation_strings[op]) == 0)
+ return (ir_expression_operation) op;
+ }
+ return (ir_expression_operation) -1;
+}
+
+ir_variable *
+ir_expression::variable_referenced() const
+{
+ switch (operation) {
+ case ir_binop_vector_extract:
+ case ir_triop_vector_insert:
+ /* We get these for things like a[0] where a is a vector type. In these
+ * cases we want variable_referenced() to return the actual vector
+ * variable this is wrapping.
+ */
+ return operands[0]->variable_referenced();
+ default:
+ return ir_rvalue::variable_referenced();
+ }
+}
+
+ir_constant::ir_constant()
+ : ir_rvalue(ir_type_constant)
+{
+ this->const_elements = NULL;
+}
+
+ir_constant::ir_constant(const struct glsl_type *type,
+ const ir_constant_data *data)
+ : ir_rvalue(ir_type_constant)
+{
+ this->const_elements = NULL;
+
+ assert((type->base_type >= GLSL_TYPE_UINT)
+ && (type->base_type <= GLSL_TYPE_IMAGE));
+
+ this->type = type;
+ memcpy(& this->value, data, sizeof(this->value));
+}
+
+ir_constant::ir_constant(float16_t f16, unsigned vector_elements)
+ : ir_rvalue(ir_type_constant)
+{
+ assert(vector_elements <= 4);
+ this->type = glsl_type::get_instance(GLSL_TYPE_FLOAT16, vector_elements, 1);
+ for (unsigned i = 0; i < vector_elements; i++) {
+ this->value.f16[i] = f16.bits;
+ }
+ for (unsigned i = vector_elements; i < 16; i++) {
+ this->value.f[i] = 0;
+ }
+}
+
+ir_constant::ir_constant(float f, unsigned vector_elements)
+ : ir_rvalue(ir_type_constant)
+{
+ assert(vector_elements <= 4);
+ this->type = glsl_type::get_instance(GLSL_TYPE_FLOAT, vector_elements, 1);
+ for (unsigned i = 0; i < vector_elements; i++) {
+ this->value.f[i] = f;
+ }
+ for (unsigned i = vector_elements; i < 16; i++) {
+ this->value.f[i] = 0;
+ }
+}
+
+ir_constant::ir_constant(double d, unsigned vector_elements)
+ : ir_rvalue(ir_type_constant)
+{
+ assert(vector_elements <= 4);
+ this->type = glsl_type::get_instance(GLSL_TYPE_DOUBLE, vector_elements, 1);
+ for (unsigned i = 0; i < vector_elements; i++) {
+ this->value.d[i] = d;
+ }
+ for (unsigned i = vector_elements; i < 16; i++) {
+ this->value.d[i] = 0.0;
+ }
+}
+
+ir_constant::ir_constant(unsigned int u, unsigned vector_elements)
+ : ir_rvalue(ir_type_constant)
+{
+ assert(vector_elements <= 4);
+ this->type = glsl_type::get_instance(GLSL_TYPE_UINT, vector_elements, 1);
+ for (unsigned i = 0; i < vector_elements; i++) {
+ this->value.u[i] = u;
+ }
+ for (unsigned i = vector_elements; i < 16; i++) {
+ this->value.u[i] = 0;
+ }
+}
+
+ir_constant::ir_constant(int integer, unsigned vector_elements)
+ : ir_rvalue(ir_type_constant)
+{
+ assert(vector_elements <= 4);
+ this->type = glsl_type::get_instance(GLSL_TYPE_INT, vector_elements, 1);
+ for (unsigned i = 0; i < vector_elements; i++) {
+ this->value.i[i] = integer;
+ }
+ for (unsigned i = vector_elements; i < 16; i++) {
+ this->value.i[i] = 0;
+ }
+}
+
+ir_constant::ir_constant(uint64_t u64, unsigned vector_elements)
+ : ir_rvalue(ir_type_constant)
+{
+ assert(vector_elements <= 4);
+ this->type = glsl_type::get_instance(GLSL_TYPE_UINT64, vector_elements, 1);
+ for (unsigned i = 0; i < vector_elements; i++) {
+ this->value.u64[i] = u64;
+ }
+ for (unsigned i = vector_elements; i < 16; i++) {
+ this->value.u64[i] = 0;
+ }
+}
+
+ir_constant::ir_constant(int64_t int64, unsigned vector_elements)
+ : ir_rvalue(ir_type_constant)
+{
+ assert(vector_elements <= 4);
+ this->type = glsl_type::get_instance(GLSL_TYPE_INT64, vector_elements, 1);
+ for (unsigned i = 0; i < vector_elements; i++) {
+ this->value.i64[i] = int64;
+ }
+ for (unsigned i = vector_elements; i < 16; i++) {
+ this->value.i64[i] = 0;
+ }
+}
+
+ir_constant::ir_constant(bool b, unsigned vector_elements)
+ : ir_rvalue(ir_type_constant)
+{
+ assert(vector_elements <= 4);
+ this->type = glsl_type::get_instance(GLSL_TYPE_BOOL, vector_elements, 1);
+ for (unsigned i = 0; i < vector_elements; i++) {
+ this->value.b[i] = b;
+ }
+ for (unsigned i = vector_elements; i < 16; i++) {
+ this->value.b[i] = false;
+ }
+}
+
+ir_constant::ir_constant(const ir_constant *c, unsigned i)
+ : ir_rvalue(ir_type_constant)
+{
+ this->const_elements = NULL;
+ this->type = c->type->get_base_type();
+
+ switch (this->type->base_type) {
+ case GLSL_TYPE_UINT: this->value.u[0] = c->value.u[i]; break;
+ case GLSL_TYPE_INT: this->value.i[0] = c->value.i[i]; break;
+ case GLSL_TYPE_FLOAT: this->value.f[0] = c->value.f[i]; break;
+ case GLSL_TYPE_FLOAT16: this->value.f16[0] = c->value.f16[i]; break;
+ case GLSL_TYPE_BOOL: this->value.b[0] = c->value.b[i]; break;
+ case GLSL_TYPE_DOUBLE: this->value.d[0] = c->value.d[i]; break;
+ default: assert(!"Should not get here."); break;
+ }
+}
+
+ir_constant::ir_constant(const struct glsl_type *type, exec_list *value_list)
+ : ir_rvalue(ir_type_constant)
+{
+ this->const_elements = NULL;
+ this->type = type;
+
+ assert(type->is_scalar() || type->is_vector() || type->is_matrix()
+ || type->is_struct() || type->is_array());
+
+ /* If the constant is a record, the types of each of the entries in
+ * value_list must be a 1-for-1 match with the structure components. Each
+ * entry must also be a constant. Just move the nodes from the value_list
+ * to the list in the ir_constant.
+ */
+ if (type->is_array() || type->is_struct()) {
+ this->const_elements = ralloc_array(this, ir_constant *, type->length);
+ unsigned i = 0;
+ foreach_in_list(ir_constant, value, value_list) {
+ assert(value->as_constant() != NULL);
+
+ this->const_elements[i++] = value;
+ }
+ return;
+ }
+
+ for (unsigned i = 0; i < 16; i++) {
+ this->value.u[i] = 0;
+ }
+
+ ir_constant *value = (ir_constant *) (value_list->get_head_raw());
+
+ /* Constructors with exactly one scalar argument are special for vectors
+ * and matrices. For vectors, the scalar value is replicated to fill all
+ * the components. For matrices, the scalar fills the components of the
+ * diagonal while the rest is filled with 0.
+ */
+ if (value->type->is_scalar() && value->next->is_tail_sentinel()) {
+ if (type->is_matrix()) {
+ /* Matrix - fill diagonal (rest is already set to 0) */
+ for (unsigned i = 0; i < type->matrix_columns; i++) {
+ switch (type->base_type) {
+ case GLSL_TYPE_FLOAT:
+ this->value.f[i * type->vector_elements + i] =
+ value->value.f[0];
+ break;
+ case GLSL_TYPE_DOUBLE:
+ this->value.d[i * type->vector_elements + i] =
+ value->value.d[0];
+ break;
+ case GLSL_TYPE_FLOAT16:
+ this->value.f16[i * type->vector_elements + i] =
+ value->value.f16[0];
+ break;
+ default:
+ assert(!"unexpected matrix base type");
+ }
+ }
+ } else {
+ /* Vector or scalar - fill all components */
+ switch (type->base_type) {
+ case GLSL_TYPE_UINT:
+ case GLSL_TYPE_INT:
+ for (unsigned i = 0; i < type->components(); i++)
+ this->value.u[i] = value->value.u[0];
+ break;
+ case GLSL_TYPE_FLOAT:
+ for (unsigned i = 0; i < type->components(); i++)
+ this->value.f[i] = value->value.f[0];
+ break;
+ case GLSL_TYPE_FLOAT16:
+ for (unsigned i = 0; i < type->components(); i++)
+ this->value.f16[i] = value->value.f16[0];
+ break;
+ case GLSL_TYPE_DOUBLE:
+ for (unsigned i = 0; i < type->components(); i++)
+ this->value.d[i] = value->value.d[0];
+ break;
+ case GLSL_TYPE_UINT64:
+ case GLSL_TYPE_INT64:
+ for (unsigned i = 0; i < type->components(); i++)
+ this->value.u64[i] = value->value.u64[0];
+ break;
+ case GLSL_TYPE_BOOL:
+ for (unsigned i = 0; i < type->components(); i++)
+ this->value.b[i] = value->value.b[0];
+ break;
+ case GLSL_TYPE_SAMPLER:
+ case GLSL_TYPE_IMAGE:
+ this->value.u64[0] = value->value.u64[0];
+ break;
+ default:
+ assert(!"Should not get here.");
+ break;
+ }
+ }
+ return;
+ }
+
+ if (type->is_matrix() && value->type->is_matrix()) {
+ assert(value->next->is_tail_sentinel());
+
+ /* From section 5.4.2 of the GLSL 1.20 spec:
+ * "If a matrix is constructed from a matrix, then each component
+ * (column i, row j) in the result that has a corresponding component
+ * (column i, row j) in the argument will be initialized from there."
+ */
+ unsigned cols = MIN2(type->matrix_columns, value->type->matrix_columns);
+ unsigned rows = MIN2(type->vector_elements, value->type->vector_elements);
+ for (unsigned i = 0; i < cols; i++) {
+ for (unsigned j = 0; j < rows; j++) {
+ const unsigned src = i * value->type->vector_elements + j;
+ const unsigned dst = i * type->vector_elements + j;
+ this->value.f[dst] = value->value.f[src];
+ }
+ }
+
+ /* "All other components will be initialized to the identity matrix." */
+ for (unsigned i = cols; i < type->matrix_columns; i++)
+ this->value.f[i * type->vector_elements + i] = 1.0;
+
+ return;
+ }
+
+ /* Use each component from each entry in the value_list to initialize one
+ * component of the constant being constructed.
+ */
+ unsigned i = 0;
+ for (;;) {
+ assert(value->as_constant() != NULL);
+ assert(!value->is_tail_sentinel());
+
+ for (unsigned j = 0; j < value->type->components(); j++) {
+ switch (type->base_type) {
+ case GLSL_TYPE_UINT:
+ this->value.u[i] = value->get_uint_component(j);
+ break;
+ case GLSL_TYPE_INT:
+ this->value.i[i] = value->get_int_component(j);
+ break;
+ case GLSL_TYPE_FLOAT:
+ this->value.f[i] = value->get_float_component(j);
+ break;
+ case GLSL_TYPE_FLOAT16:
+ this->value.f16[i] = value->get_float16_component(j);
+ break;
+ case GLSL_TYPE_BOOL:
+ this->value.b[i] = value->get_bool_component(j);
+ break;
+ case GLSL_TYPE_DOUBLE:
+ this->value.d[i] = value->get_double_component(j);
+ break;
+ case GLSL_TYPE_UINT64:
+ this->value.u64[i] = value->get_uint64_component(j);
+ break;
+ case GLSL_TYPE_INT64:
+ this->value.i64[i] = value->get_int64_component(j);
+ break;
+ default:
+ /* FINISHME: What to do? Exceptions are not the answer.
+ */
+ break;
+ }
+
+ i++;
+ if (i >= type->components())
+ break;
+ }
+
+ if (i >= type->components())
+ break; /* avoid downcasting a list sentinel */
+ value = (ir_constant *) value->next;
+ }
+}
+
+ir_constant *
+ir_constant::zero(void *mem_ctx, const glsl_type *type)
+{
+ assert(type->is_scalar() || type->is_vector() || type->is_matrix()
+ || type->is_struct() || type->is_array());
+
+ ir_constant *c = new(mem_ctx) ir_constant;
+ c->type = type;
+ memset(&c->value, 0, sizeof(c->value));
+
+ if (type->is_array()) {
+ c->const_elements = ralloc_array(c, ir_constant *, type->length);
+
+ for (unsigned i = 0; i < type->length; i++)
+ c->const_elements[i] = ir_constant::zero(c, type->fields.array);
+ }
+
+ if (type->is_struct()) {
+ c->const_elements = ralloc_array(c, ir_constant *, type->length);
+
+ for (unsigned i = 0; i < type->length; i++) {
+ c->const_elements[i] =
+ ir_constant::zero(mem_ctx, type->fields.structure[i].type);
+ }
+ }
+
+ return c;
+}
+
+bool
+ir_constant::get_bool_component(unsigned i) const
+{
+ switch (this->type->base_type) {
+ case GLSL_TYPE_UINT: return this->value.u[i] != 0;
+ case GLSL_TYPE_INT: return this->value.i[i] != 0;
+ case GLSL_TYPE_FLOAT: return ((int)this->value.f[i]) != 0;
+ case GLSL_TYPE_FLOAT16: return ((int)_mesa_half_to_float(this->value.f16[i])) != 0;
+ case GLSL_TYPE_BOOL: return this->value.b[i];
+ case GLSL_TYPE_DOUBLE: return this->value.d[i] != 0.0;
+ case GLSL_TYPE_SAMPLER:
+ case GLSL_TYPE_IMAGE:
+ case GLSL_TYPE_UINT64: return this->value.u64[i] != 0;
+ case GLSL_TYPE_INT64: return this->value.i64[i] != 0;
+ default: assert(!"Should not get here."); break;
+ }
+
+ /* Must return something to make the compiler happy. This is clearly an
+ * error case.
+ */
+ return false;
+}
+
+float
+ir_constant::get_float_component(unsigned i) const
+{
+ switch (this->type->base_type) {
+ case GLSL_TYPE_UINT: return (float) this->value.u[i];
+ case GLSL_TYPE_INT: return (float) this->value.i[i];
+ case GLSL_TYPE_FLOAT: return this->value.f[i];
+ case GLSL_TYPE_FLOAT16: return _mesa_half_to_float(this->value.f16[i]);
+ case GLSL_TYPE_BOOL: return this->value.b[i] ? 1.0f : 0.0f;
+ case GLSL_TYPE_DOUBLE: return (float) this->value.d[i];
+ case GLSL_TYPE_SAMPLER:
+ case GLSL_TYPE_IMAGE:
+ case GLSL_TYPE_UINT64: return (float) this->value.u64[i];
+ case GLSL_TYPE_INT64: return (float) this->value.i64[i];
+ default: assert(!"Should not get here."); break;
+ }
+
+ /* Must return something to make the compiler happy. This is clearly an
+ * error case.
+ */
+ return 0.0;
+}
+
+uint16_t
+ir_constant::get_float16_component(unsigned i) const
+{
+ if (this->type->base_type == GLSL_TYPE_FLOAT16)
+ return this->value.f16[i];
+ else
+ return _mesa_float_to_half(get_float_component(i));
+}
+
+double
+ir_constant::get_double_component(unsigned i) const
+{
+ switch (this->type->base_type) {
+ case GLSL_TYPE_UINT: return (double) this->value.u[i];
+ case GLSL_TYPE_INT: return (double) this->value.i[i];
+ case GLSL_TYPE_FLOAT: return (double) this->value.f[i];
+ case GLSL_TYPE_FLOAT16: return (double) _mesa_half_to_float(this->value.f16[i]);
+ case GLSL_TYPE_BOOL: return this->value.b[i] ? 1.0 : 0.0;
+ case GLSL_TYPE_DOUBLE: return this->value.d[i];
+ case GLSL_TYPE_SAMPLER:
+ case GLSL_TYPE_IMAGE:
+ case GLSL_TYPE_UINT64: return (double) this->value.u64[i];
+ case GLSL_TYPE_INT64: return (double) this->value.i64[i];
+ default: assert(!"Should not get here."); break;
+ }
+
+ /* Must return something to make the compiler happy. This is clearly an
+ * error case.
+ */
+ return 0.0;
+}
+
+int
+ir_constant::get_int_component(unsigned i) const
+{
+ switch (this->type->base_type) {
+ case GLSL_TYPE_UINT: return this->value.u[i];
+ case GLSL_TYPE_INT: return this->value.i[i];
+ case GLSL_TYPE_FLOAT: return (int) this->value.f[i];
+ case GLSL_TYPE_FLOAT16: return (int) _mesa_half_to_float(this->value.f16[i]);
+ case GLSL_TYPE_BOOL: return this->value.b[i] ? 1 : 0;
+ case GLSL_TYPE_DOUBLE: return (int) this->value.d[i];
+ case GLSL_TYPE_SAMPLER:
+ case GLSL_TYPE_IMAGE:
+ case GLSL_TYPE_UINT64: return (int) this->value.u64[i];
+ case GLSL_TYPE_INT64: return (int) this->value.i64[i];
+ default: assert(!"Should not get here."); break;
+ }
+
+ /* Must return something to make the compiler happy. This is clearly an
+ * error case.
+ */
+ return 0;
+}
+
+unsigned
+ir_constant::get_uint_component(unsigned i) const
+{
+ switch (this->type->base_type) {
+ case GLSL_TYPE_UINT: return this->value.u[i];
+ case GLSL_TYPE_INT: return this->value.i[i];
+ case GLSL_TYPE_FLOAT: return (unsigned) this->value.f[i];
+ case GLSL_TYPE_FLOAT16: return (unsigned) _mesa_half_to_float(this->value.f16[i]);
+ case GLSL_TYPE_BOOL: return this->value.b[i] ? 1 : 0;
+ case GLSL_TYPE_DOUBLE: return (unsigned) this->value.d[i];
+ case GLSL_TYPE_SAMPLER:
+ case GLSL_TYPE_IMAGE:
+ case GLSL_TYPE_UINT64: return (unsigned) this->value.u64[i];
+ case GLSL_TYPE_INT64: return (unsigned) this->value.i64[i];
+ default: assert(!"Should not get here."); break;
+ }
+
+ /* Must return something to make the compiler happy. This is clearly an
+ * error case.
+ */
+ return 0;
+}
+
+int64_t
+ir_constant::get_int64_component(unsigned i) const
+{
+ switch (this->type->base_type) {
+ case GLSL_TYPE_UINT: return this->value.u[i];
+ case GLSL_TYPE_INT: return this->value.i[i];
+ case GLSL_TYPE_FLOAT: return (int64_t) this->value.f[i];
+ case GLSL_TYPE_FLOAT16: return (int64_t) _mesa_half_to_float(this->value.f16[i]);
+ case GLSL_TYPE_BOOL: return this->value.b[i] ? 1 : 0;
+ case GLSL_TYPE_DOUBLE: return (int64_t) this->value.d[i];
+ case GLSL_TYPE_SAMPLER:
+ case GLSL_TYPE_IMAGE:
+ case GLSL_TYPE_UINT64: return (int64_t) this->value.u64[i];
+ case GLSL_TYPE_INT64: return this->value.i64[i];
+ default: assert(!"Should not get here."); break;
+ }
+
+ /* Must return something to make the compiler happy. This is clearly an
+ * error case.
+ */
+ return 0;
+}
+
+uint64_t
+ir_constant::get_uint64_component(unsigned i) const
+{
+ switch (this->type->base_type) {
+ case GLSL_TYPE_UINT: return this->value.u[i];
+ case GLSL_TYPE_INT: return this->value.i[i];
+ case GLSL_TYPE_FLOAT: return (uint64_t) this->value.f[i];
+ case GLSL_TYPE_FLOAT16: return (uint64_t) _mesa_half_to_float(this->value.f16[i]);
+ case GLSL_TYPE_BOOL: return this->value.b[i] ? 1 : 0;
+ case GLSL_TYPE_DOUBLE: return (uint64_t) this->value.d[i];
+ case GLSL_TYPE_SAMPLER:
+ case GLSL_TYPE_IMAGE:
+ case GLSL_TYPE_UINT64: return this->value.u64[i];
+ case GLSL_TYPE_INT64: return (uint64_t) this->value.i64[i];
+ default: assert(!"Should not get here."); break;
+ }
+
+ /* Must return something to make the compiler happy. This is clearly an
+ * error case.
+ */
+ return 0;
+}
+
+ir_constant *
+ir_constant::get_array_element(unsigned i) const
+{
+ assert(this->type->is_array());
+
+ /* From page 35 (page 41 of the PDF) of the GLSL 1.20 spec:
+ *
+ * "Behavior is undefined if a shader subscripts an array with an index
+ * less than 0 or greater than or equal to the size the array was
+ * declared with."
+ *
+ * Most out-of-bounds accesses are removed before things could get this far.
+ * There are cases where non-constant array index values can get constant
+ * folded.
+ */
+ if (int(i) < 0)
+ i = 0;
+ else if (i >= this->type->length)
+ i = this->type->length - 1;
+
+ return const_elements[i];
+}
+
+ir_constant *
+ir_constant::get_record_field(int idx)
+{
+ assert(this->type->is_struct());
+ assert(idx >= 0 && (unsigned) idx < this->type->length);
+
+ return const_elements[idx];
+}
+
+void
+ir_constant::copy_offset(ir_constant *src, int offset)
+{
+ switch (this->type->base_type) {
+ case GLSL_TYPE_UINT:
+ case GLSL_TYPE_INT:
+ case GLSL_TYPE_FLOAT:
+ case GLSL_TYPE_FLOAT16:
+ case GLSL_TYPE_DOUBLE:
+ case GLSL_TYPE_SAMPLER:
+ case GLSL_TYPE_IMAGE:
+ case GLSL_TYPE_UINT64:
+ case GLSL_TYPE_INT64:
+ case GLSL_TYPE_BOOL: {
+ unsigned int size = src->type->components();
+ assert (size <= this->type->components() - offset);
+ for (unsigned int i=0; i<size; i++) {
+ switch (this->type->base_type) {
+ case GLSL_TYPE_UINT:
+ value.u[i+offset] = src->get_uint_component(i);
+ break;
+ case GLSL_TYPE_INT:
+ value.i[i+offset] = src->get_int_component(i);
+ break;
+ case GLSL_TYPE_FLOAT:
+ value.f[i+offset] = src->get_float_component(i);
+ break;
+ case GLSL_TYPE_FLOAT16:
+ value.f16[i+offset] = src->get_float16_component(i);
+ break;
+ case GLSL_TYPE_BOOL:
+ value.b[i+offset] = src->get_bool_component(i);
+ break;
+ case GLSL_TYPE_DOUBLE:
+ value.d[i+offset] = src->get_double_component(i);
+ break;
+ case GLSL_TYPE_SAMPLER:
+ case GLSL_TYPE_IMAGE:
+ case GLSL_TYPE_UINT64:
+ value.u64[i+offset] = src->get_uint64_component(i);
+ break;
+ case GLSL_TYPE_INT64:
+ value.i64[i+offset] = src->get_int64_component(i);
+ break;
+ default: // Shut up the compiler
+ break;
+ }
+ }
+ break;
+ }
+
+ case GLSL_TYPE_STRUCT:
+ case GLSL_TYPE_ARRAY: {
+ assert (src->type == this->type);
+ for (unsigned i = 0; i < this->type->length; i++) {
+ this->const_elements[i] = src->const_elements[i]->clone(this, NULL);
+ }
+ break;
+ }
+
+ default:
+ assert(!"Should not get here.");
+ break;
+ }
+}
+
+void
+ir_constant::copy_masked_offset(ir_constant *src, int offset, unsigned int mask)
+{
+ assert (!type->is_array() && !type->is_struct());
+
+ if (!type->is_vector() && !type->is_matrix()) {
+ offset = 0;
+ mask = 1;
+ }
+
+ int id = 0;
+ for (int i=0; i<4; i++) {
+ if (mask & (1 << i)) {
+ switch (this->type->base_type) {
+ case GLSL_TYPE_UINT:
+ value.u[i+offset] = src->get_uint_component(id++);
+ break;
+ case GLSL_TYPE_INT:
+ value.i[i+offset] = src->get_int_component(id++);
+ break;
+ case GLSL_TYPE_FLOAT:
+ value.f[i+offset] = src->get_float_component(id++);
+ break;
+ case GLSL_TYPE_FLOAT16:
+ value.f16[i+offset] = src->get_float16_component(id++);
+ break;
+ case GLSL_TYPE_BOOL:
+ value.b[i+offset] = src->get_bool_component(id++);
+ break;
+ case GLSL_TYPE_DOUBLE:
+ value.d[i+offset] = src->get_double_component(id++);
+ break;
+ case GLSL_TYPE_SAMPLER:
+ case GLSL_TYPE_IMAGE:
+ case GLSL_TYPE_UINT64:
+ value.u64[i+offset] = src->get_uint64_component(id++);
+ break;
+ case GLSL_TYPE_INT64:
+ value.i64[i+offset] = src->get_int64_component(id++);
+ break;
+ default:
+ assert(!"Should not get here.");
+ return;
+ }
+ }
+ }
+}
+
+bool
+ir_constant::has_value(const ir_constant *c) const
+{
+ if (this->type != c->type)
+ return false;
+
+ if (this->type->is_array() || this->type->is_struct()) {
+ for (unsigned i = 0; i < this->type->length; i++) {
+ if (!this->const_elements[i]->has_value(c->const_elements[i]))
+ return false;
+ }
+ return true;
+ }
+
+ for (unsigned i = 0; i < this->type->components(); i++) {
+ switch (this->type->base_type) {
+ case GLSL_TYPE_UINT:
+ if (this->value.u[i] != c->value.u[i])
+ return false;
+ break;
+ case GLSL_TYPE_INT:
+ if (this->value.i[i] != c->value.i[i])
+ return false;
+ break;
+ case GLSL_TYPE_FLOAT:
+ if (this->value.f[i] != c->value.f[i])
+ return false;
+ break;
+ case GLSL_TYPE_FLOAT16:
+ /* Convert to float to make sure NaN and ±0.0 compares correctly */
+ if (_mesa_half_to_float(this->value.f16[i]) !=
+ _mesa_half_to_float(c->value.f16[i]))
+ return false;
+ break;
+ case GLSL_TYPE_BOOL:
+ if (this->value.b[i] != c->value.b[i])
+ return false;
+ break;
+ case GLSL_TYPE_DOUBLE:
+ if (this->value.d[i] != c->value.d[i])
+ return false;
+ break;
+ case GLSL_TYPE_SAMPLER:
+ case GLSL_TYPE_IMAGE:
+ case GLSL_TYPE_UINT64:
+ if (this->value.u64[i] != c->value.u64[i])
+ return false;
+ break;
+ case GLSL_TYPE_INT64:
+ if (this->value.i64[i] != c->value.i64[i])
+ return false;
+ break;
+ default:
+ assert(!"Should not get here.");
+ return false;
+ }
+ }
+
+ return true;
+}
+
+bool
+ir_constant::is_value(float f, int i) const
+{
+ if (!this->type->is_scalar() && !this->type->is_vector())
+ return false;
+
+ /* Only accept boolean values for 0/1. */
+ if (int(bool(i)) != i && this->type->is_boolean())
+ return false;
+
+ for (unsigned c = 0; c < this->type->vector_elements; c++) {
+ switch (this->type->base_type) {
+ case GLSL_TYPE_FLOAT:
+ if (this->value.f[c] != f)
+ return false;
+ break;
+ case GLSL_TYPE_FLOAT16:
+ if (_mesa_half_to_float(this->value.f16[c]) != f)
+ return false;
+ break;
+ case GLSL_TYPE_INT:
+ if (this->value.i[c] != i)
+ return false;
+ break;
+ case GLSL_TYPE_UINT:
+ if (this->value.u[c] != unsigned(i))
+ return false;
+ break;
+ case GLSL_TYPE_BOOL:
+ if (this->value.b[c] != bool(i))
+ return false;
+ break;
+ case GLSL_TYPE_DOUBLE:
+ if (this->value.d[c] != double(f))
+ return false;
+ break;
+ case GLSL_TYPE_SAMPLER:
+ case GLSL_TYPE_IMAGE:
+ case GLSL_TYPE_UINT64:
+ if (this->value.u64[c] != uint64_t(i))
+ return false;
+ break;
+ case GLSL_TYPE_INT64:
+ if (this->value.i64[c] != i)
+ return false;
+ break;
+ default:
+ /* The only other base types are structures, arrays, and samplers.
+ * Samplers cannot be constants, and the others should have been
+ * filtered out above.
+ */
+ assert(!"Should not get here.");
+ return false;
+ }
+ }
+
+ return true;
+}
+
+bool
+ir_constant::is_zero() const
+{
+ return is_value(0.0, 0);
+}
+
+bool
+ir_constant::is_one() const
+{
+ return is_value(1.0, 1);
+}
+
+bool
+ir_constant::is_negative_one() const
+{
+ return is_value(-1.0, -1);
+}
+
+bool
+ir_constant::is_uint16_constant() const
+{
+ if (!type->is_integer_32())
+ return false;
+
+ return value.u[0] < (1 << 16);
+}
+
+ir_loop::ir_loop()
+ : ir_instruction(ir_type_loop)
+{
+}
+
+
+ir_dereference_variable::ir_dereference_variable(ir_variable *var)
+ : ir_dereference(ir_type_dereference_variable)
+{
+ assert(var != NULL);
+
+ this->var = var;
+ this->type = var->type;
+}
+
+
+ir_dereference_array::ir_dereference_array(ir_rvalue *value,
+ ir_rvalue *array_index)
+ : ir_dereference(ir_type_dereference_array)
+{
+ this->array_index = array_index;
+ this->set_array(value);
+}
+
+
+ir_dereference_array::ir_dereference_array(ir_variable *var,
+ ir_rvalue *array_index)
+ : ir_dereference(ir_type_dereference_array)
+{
+ void *ctx = ralloc_parent(var);
+
+ this->array_index = array_index;
+ this->set_array(new(ctx) ir_dereference_variable(var));
+}
+
+
+void
+ir_dereference_array::set_array(ir_rvalue *value)
+{
+ assert(value != NULL);
+
+ this->array = value;
+
+ const glsl_type *const vt = this->array->type;
+
+ if (vt->is_array()) {
+ type = vt->fields.array;
+ } else if (vt->is_matrix()) {
+ type = vt->column_type();
+ } else if (vt->is_vector()) {
+ type = vt->get_base_type();
+ }
+}
+
+
+ir_dereference_record::ir_dereference_record(ir_rvalue *value,
+ const char *field)
+ : ir_dereference(ir_type_dereference_record)
+{
+ assert(value != NULL);
+
+ this->record = value;
+ this->type = this->record->type->field_type(field);
+ this->field_idx = this->record->type->field_index(field);
+}
+
+
+ir_dereference_record::ir_dereference_record(ir_variable *var,
+ const char *field)
+ : ir_dereference(ir_type_dereference_record)
+{
+ void *ctx = ralloc_parent(var);
+
+ this->record = new(ctx) ir_dereference_variable(var);
+ this->type = this->record->type->field_type(field);
+ this->field_idx = this->record->type->field_index(field);
+}
+
+bool
+ir_dereference::is_lvalue(const struct _mesa_glsl_parse_state *state) const
+{
+ ir_variable *var = this->variable_referenced();
+
+ /* Every l-value derference chain eventually ends in a variable.
+ */
+ if ((var == NULL) || var->data.read_only)
+ return false;
+
+ /* From section 4.1.7 of the ARB_bindless_texture spec:
+ *
+ * "Samplers can be used as l-values, so can be assigned into and used as
+ * "out" and "inout" function parameters."
+ *
+ * From section 4.1.X of the ARB_bindless_texture spec:
+ *
+ * "Images can be used as l-values, so can be assigned into and used as
+ * "out" and "inout" function parameters."
+ */
+ if ((!state || state->has_bindless()) &&
+ (this->type->contains_sampler() || this->type->contains_image()))
+ return true;
+
+ /* From section 4.1.7 of the GLSL 4.40 spec:
+ *
+ * "Opaque variables cannot be treated as l-values; hence cannot
+ * be used as out or inout function parameters, nor can they be
+ * assigned into."
+ */
+ if (this->type->contains_opaque())
+ return false;
+
+ return true;
+}
+
+
+static const char * const tex_opcode_strs[] = { "tex", "txb", "txl", "txd", "txf", "txf_ms", "txs", "lod", "tg4", "query_levels", "texture_samples", "samples_identical" };
+
+const char *ir_texture::opcode_string()
+{
+ assert((unsigned int) op < ARRAY_SIZE(tex_opcode_strs));
+ return tex_opcode_strs[op];
+}
+
+ir_texture_opcode
+ir_texture::get_opcode(const char *str)
+{
+ const int count = sizeof(tex_opcode_strs) / sizeof(tex_opcode_strs[0]);
+ for (int op = 0; op < count; op++) {
+ if (strcmp(str, tex_opcode_strs[op]) == 0)
+ return (ir_texture_opcode) op;
+ }
+ return (ir_texture_opcode) -1;
+}
+
+
+void
+ir_texture::set_sampler(ir_dereference *sampler, const glsl_type *type)
+{
+ assert(sampler != NULL);
+ assert(type != NULL);
+ this->sampler = sampler;
+ this->type = type;
+
+ if (this->op == ir_txs || this->op == ir_query_levels ||
+ this->op == ir_texture_samples) {
+ assert(type->base_type == GLSL_TYPE_INT);
+ } else if (this->op == ir_lod) {
+ assert(type->vector_elements == 2);
+ assert(type->is_float());
+ } else if (this->op == ir_samples_identical) {
+ assert(type == glsl_type::bool_type);
+ assert(sampler->type->is_sampler());
+ assert(sampler->type->sampler_dimensionality == GLSL_SAMPLER_DIM_MS);
+ } else {
+ assert(sampler->type->sampled_type == (int) type->base_type);
+ if (sampler->type->sampler_shadow)
+ assert(type->vector_elements == 4 || type->vector_elements == 1);
+ else
+ assert(type->vector_elements == 4);
+ }
+}
+
+bool
+ir_texture::has_lod(const glsl_type *sampler_type)
+{
+ assert(sampler_type->is_sampler());
+
+ switch (sampler_type->sampler_dimensionality) {
+ case GLSL_SAMPLER_DIM_RECT:
+ case GLSL_SAMPLER_DIM_BUF:
+ case GLSL_SAMPLER_DIM_MS:
+ return false;
+ default:
+ return true;
+ }
+}
+
+void
+ir_swizzle::init_mask(const unsigned *comp, unsigned count)
+{
+ assert((count >= 1) && (count <= 4));
+
+ memset(&this->mask, 0, sizeof(this->mask));
+ this->mask.num_components = count;
+
+ unsigned dup_mask = 0;
+ switch (count) {
+ case 4:
+ assert(comp[3] <= 3);
+ dup_mask |= (1U << comp[3])
+ & ((1U << comp[0]) | (1U << comp[1]) | (1U << comp[2]));
+ this->mask.w = comp[3];
+
+ case 3:
+ assert(comp[2] <= 3);
+ dup_mask |= (1U << comp[2])
+ & ((1U << comp[0]) | (1U << comp[1]));
+ this->mask.z = comp[2];
+
+ case 2:
+ assert(comp[1] <= 3);
+ dup_mask |= (1U << comp[1])
+ & ((1U << comp[0]));
+ this->mask.y = comp[1];
+
+ case 1:
+ assert(comp[0] <= 3);
+ this->mask.x = comp[0];
+ }
+
+ this->mask.has_duplicates = dup_mask != 0;
+
+ /* Based on the number of elements in the swizzle and the base type
+ * (i.e., float, int, unsigned, or bool) of the vector being swizzled,
+ * generate the type of the resulting value.
+ */
+ type = glsl_type::get_instance(val->type->base_type, mask.num_components, 1);
+}
+
+ir_swizzle::ir_swizzle(ir_rvalue *val, unsigned x, unsigned y, unsigned z,
+ unsigned w, unsigned count)
+ : ir_rvalue(ir_type_swizzle), val(val)
+{
+ const unsigned components[4] = { x, y, z, w };
+ this->init_mask(components, count);
+}
+
+ir_swizzle::ir_swizzle(ir_rvalue *val, const unsigned *comp,
+ unsigned count)
+ : ir_rvalue(ir_type_swizzle), val(val)
+{
+ this->init_mask(comp, count);
+}
+
+ir_swizzle::ir_swizzle(ir_rvalue *val, ir_swizzle_mask mask)
+ : ir_rvalue(ir_type_swizzle), val(val), mask(mask)
+{
+ this->type = glsl_type::get_instance(val->type->base_type,
+ mask.num_components, 1);
+}
+
+#define X 1
+#define R 5
+#define S 9
+#define I 13
+
+ir_swizzle *
+ir_swizzle::create(ir_rvalue *val, const char *str, unsigned vector_length)
+{
+ void *ctx = ralloc_parent(val);
+
+ /* For each possible swizzle character, this table encodes the value in
+ * \c idx_map that represents the 0th element of the vector. For invalid
+ * swizzle characters (e.g., 'k'), a special value is used that will allow
+ * detection of errors.
+ */
+ static const unsigned char base_idx[26] = {
+ /* a b c d e f g h i j k l m */
+ R, R, I, I, I, I, R, I, I, I, I, I, I,
+ /* n o p q r s t u v w x y z */
+ I, I, S, S, R, S, S, I, I, X, X, X, X
+ };
+
+ /* Each valid swizzle character has an entry in the previous table. This
+ * table encodes the base index encoded in the previous table plus the actual
+ * index of the swizzle character. When processing swizzles, the first
+ * character in the string is indexed in the previous table. Each character
+ * in the string is indexed in this table, and the value found there has the
+ * value form the first table subtracted. The result must be on the range
+ * [0,3].
+ *
+ * For example, the string "wzyx" will get X from the first table. Each of
+ * the charcaters will get X+3, X+2, X+1, and X+0 from this table. After
+ * subtraction, the swizzle values are { 3, 2, 1, 0 }.
+ *
+ * The string "wzrg" will get X from the first table. Each of the characters
+ * will get X+3, X+2, R+0, and R+1 from this table. After subtraction, the
+ * swizzle values are { 3, 2, 4, 5 }. Since 4 and 5 are outside the range
+ * [0,3], the error is detected.
+ */
+ static const unsigned char idx_map[26] = {
+ /* a b c d e f g h i j k l m */
+ R+3, R+2, 0, 0, 0, 0, R+1, 0, 0, 0, 0, 0, 0,
+ /* n o p q r s t u v w x y z */
+ 0, 0, S+2, S+3, R+0, S+0, S+1, 0, 0, X+3, X+0, X+1, X+2
+ };
+
+ int swiz_idx[4] = { 0, 0, 0, 0 };
+ unsigned i;
+
+
+ /* Validate the first character in the swizzle string and look up the base
+ * index value as described above.
+ */
+ if ((str[0] < 'a') || (str[0] > 'z'))
+ return NULL;
+
+ const unsigned base = base_idx[str[0] - 'a'];
+
+
+ for (i = 0; (i < 4) && (str[i] != '\0'); i++) {
+ /* Validate the next character, and, as described above, convert it to a
+ * swizzle index.
+ */
+ if ((str[i] < 'a') || (str[i] > 'z'))
+ return NULL;
+
+ swiz_idx[i] = idx_map[str[i] - 'a'] - base;
+ if ((swiz_idx[i] < 0) || (swiz_idx[i] >= (int) vector_length))
+ return NULL;
+ }
+
+ if (str[i] != '\0')
+ return NULL;
+
+ return new(ctx) ir_swizzle(val, swiz_idx[0], swiz_idx[1], swiz_idx[2],
+ swiz_idx[3], i);
+}
+
+#undef X
+#undef R
+#undef S
+#undef I
+
+ir_variable *
+ir_swizzle::variable_referenced() const
+{
+ return this->val->variable_referenced();
+}
+
+
+bool ir_variable::temporaries_allocate_names = false;
+
+const char ir_variable::tmp_name[] = "compiler_temp";
+
+ir_variable::ir_variable(const struct glsl_type *type, const char *name,
+ ir_variable_mode mode)
+ : ir_instruction(ir_type_variable)
+{
+ this->type = type;
+
+ if (mode == ir_var_temporary && !ir_variable::temporaries_allocate_names)
+ name = NULL;
+
+ /* The ir_variable clone method may call this constructor with name set to
+ * tmp_name.
+ */
+ assert(name != NULL
+ || mode == ir_var_temporary
+ || mode == ir_var_function_in
+ || mode == ir_var_function_out
+ || mode == ir_var_function_inout);
+ assert(name != ir_variable::tmp_name
+ || mode == ir_var_temporary);
+ if (mode == ir_var_temporary
+ && (name == NULL || name == ir_variable::tmp_name)) {
+ this->name = ir_variable::tmp_name;
+ } else if (name == NULL ||
+ strlen(name) < ARRAY_SIZE(this->name_storage)) {
+ strcpy(this->name_storage, name ? name : "");
+ this->name = this->name_storage;
+ } else {
+ this->name = ralloc_strdup(this, name);
+ }
+
+ this->u.max_ifc_array_access = NULL;
+
+ this->data.explicit_location = false;
+ this->data.explicit_index = false;
+ this->data.explicit_binding = false;
+ this->data.explicit_component = false;
+ this->data.has_initializer = false;
+ this->data.is_unmatched_generic_inout = false;
+ this->data.is_xfb_only = false;
+ this->data.explicit_xfb_buffer = false;
+ this->data.explicit_xfb_offset = false;
+ this->data.explicit_xfb_stride = false;
+ this->data.location = -1;
+ this->data.location_frac = 0;
+ this->data.matrix_layout = GLSL_MATRIX_LAYOUT_INHERITED;
+ this->data.from_named_ifc_block = false;
+ this->data.must_be_shader_input = false;
+ this->data.index = 0;
+ this->data.binding = 0;
+ this->data.warn_extension_index = 0;
+ this->constant_value = NULL;
+ this->constant_initializer = NULL;
+ this->data.depth_layout = ir_depth_layout_none;
+ this->data.used = false;
+ this->data.assigned = false;
+ this->data.always_active_io = false;
+ this->data.read_only = false;
+ this->data.centroid = false;
+ this->data.sample = false;
+ this->data.patch = false;
+ this->data.explicit_invariant = false;
+ this->data.invariant = false;
+ this->data.precise = false;
+ this->data.how_declared = ir_var_declared_normally;
+ this->data.mode = mode;
+ this->data.interpolation = INTERP_MODE_NONE;
+ this->data.max_array_access = -1;
+ this->data.offset = 0;
+ this->data.precision = GLSL_PRECISION_NONE;
+ this->data.memory_read_only = false;
+ this->data.memory_write_only = false;
+ this->data.memory_coherent = false;
+ this->data.memory_volatile = false;
+ this->data.memory_restrict = false;
+ this->data.from_ssbo_unsized_array = false;
+ this->data.implicit_sized_array = false;
+ this->data.fb_fetch_output = false;
+ this->data.bindless = false;
+ this->data.bound = false;
+ this->data.image_format = PIPE_FORMAT_NONE;
+ this->data._num_state_slots = 0;
+ this->data.param_index = 0;
+ this->data.stream = 0;
+ this->data.xfb_buffer = -1;
+ this->data.xfb_stride = -1;
+
+ this->interface_type = NULL;
+
+ if (type != NULL) {
+ if (type->is_interface())
+ this->init_interface_type(type);
+ else if (type->without_array()->is_interface())
+ this->init_interface_type(type->without_array());
+ }
+}
+
+
+const char *
+interpolation_string(unsigned interpolation)
+{
+ switch (interpolation) {
+ case INTERP_MODE_NONE: return "no";
+ case INTERP_MODE_SMOOTH: return "smooth";
+ case INTERP_MODE_FLAT: return "flat";
+ case INTERP_MODE_NOPERSPECTIVE: return "noperspective";
+ }
+
+ assert(!"Should not get here.");
+ return "";
+}
+
+const char *const ir_variable::warn_extension_table[] = {
+ "",
+ "GL_ARB_shader_stencil_export",
+ "GL_AMD_shader_stencil_export",
+};
+
+void
+ir_variable::enable_extension_warning(const char *extension)
+{
+ for (unsigned i = 0; i < ARRAY_SIZE(warn_extension_table); i++) {
+ if (strcmp(warn_extension_table[i], extension) == 0) {
+ this->data.warn_extension_index = i;
+ return;
+ }
+ }
+
+ assert(!"Should not get here.");
+ this->data.warn_extension_index = 0;
+}
+
+const char *
+ir_variable::get_extension_warning() const
+{
+ return this->data.warn_extension_index == 0
+ ? NULL : warn_extension_table[this->data.warn_extension_index];
+}
+
+ir_function_signature::ir_function_signature(const glsl_type *return_type,
+ builtin_available_predicate b)
+ : ir_instruction(ir_type_function_signature),
+ return_type(return_type), is_defined(false),
+ return_precision(GLSL_PRECISION_NONE),
+ intrinsic_id(ir_intrinsic_invalid), builtin_avail(b), _function(NULL)
+{
+ this->origin = NULL;
+}
+
+
+bool
+ir_function_signature::is_builtin() const
+{
+ return builtin_avail != NULL;
+}
+
+
+bool
+ir_function_signature::is_builtin_available(const _mesa_glsl_parse_state *state) const
+{
+ /* We can't call the predicate without a state pointer, so just say that
+ * the signature is available. At compile time, we need the filtering,
+ * but also receive a valid state pointer. At link time, we're resolving
+ * imported built-in prototypes to their definitions, which will always
+ * be an exact match. So we can skip the filtering.
+ */
+ if (state == NULL)
+ return true;
+
+ assert(builtin_avail != NULL);
+ return builtin_avail(state);
+}
+
+
+static bool
+modes_match(unsigned a, unsigned b)
+{
+ if (a == b)
+ return true;
+
+ /* Accept "in" vs. "const in" */
+ if ((a == ir_var_const_in && b == ir_var_function_in) ||
+ (b == ir_var_const_in && a == ir_var_function_in))
+ return true;
+
+ return false;
+}
+
+
+const char *
+ir_function_signature::qualifiers_match(exec_list *params)
+{
+ /* check that the qualifiers match. */
+ foreach_two_lists(a_node, &this->parameters, b_node, params) {
+ ir_variable *a = (ir_variable *) a_node;
+ ir_variable *b = (ir_variable *) b_node;
+
+ if (a->data.read_only != b->data.read_only ||
+ !modes_match(a->data.mode, b->data.mode) ||
+ a->data.interpolation != b->data.interpolation ||
+ a->data.centroid != b->data.centroid ||
+ a->data.sample != b->data.sample ||
+ a->data.patch != b->data.patch ||
+ a->data.memory_read_only != b->data.memory_read_only ||
+ a->data.memory_write_only != b->data.memory_write_only ||
+ a->data.memory_coherent != b->data.memory_coherent ||
+ a->data.memory_volatile != b->data.memory_volatile ||
+ a->data.memory_restrict != b->data.memory_restrict) {
+
+ /* parameter a's qualifiers don't match */
+ return a->name;
+ }
+ }
+ return NULL;
+}
+
+
+void
+ir_function_signature::replace_parameters(exec_list *new_params)
+{
+ /* Destroy all of the previous parameter information. If the previous
+ * parameter information comes from the function prototype, it may either
+ * specify incorrect parameter names or not have names at all.
+ */
+ new_params->move_nodes_to(&parameters);
+}
+
+
+ir_function::ir_function(const char *name)
+ : ir_instruction(ir_type_function)
+{
+ this->subroutine_index = -1;
+ this->name = ralloc_strdup(this, name);
+}
+
+
+bool
+ir_function::has_user_signature()
+{
+ foreach_in_list(ir_function_signature, sig, &this->signatures) {
+ if (!sig->is_builtin())
+ return true;
+ }
+ return false;
+}
+
+
+ir_rvalue *
+ir_rvalue::error_value(void *mem_ctx)
+{
+ ir_rvalue *v = new(mem_ctx) ir_rvalue(ir_type_unset);
+
+ v->type = glsl_type::error_type;
+ return v;
+}
+
+
+void
+visit_exec_list(exec_list *list, ir_visitor *visitor)
+{
+ foreach_in_list_safe(ir_instruction, node, list) {
+ node->accept(visitor);
+ }
+}
+
+
+static void
+steal_memory(ir_instruction *ir, void *new_ctx)
+{
+ ir_variable *var = ir->as_variable();
+ ir_function *fn = ir->as_function();
+ ir_constant *constant = ir->as_constant();
+ if (var != NULL && var->constant_value != NULL)
+ steal_memory(var->constant_value, ir);
+
+ if (var != NULL && var->constant_initializer != NULL)
+ steal_memory(var->constant_initializer, ir);
+
+ if (fn != NULL && fn->subroutine_types)
+ ralloc_steal(new_ctx, fn->subroutine_types);
+
+ /* The components of aggregate constants are not visited by the normal
+ * visitor, so steal their values by hand.
+ */
+ if (constant != NULL &&
+ (constant->type->is_array() || constant->type->is_struct())) {
+ for (unsigned int i = 0; i < constant->type->length; i++) {
+ steal_memory(constant->const_elements[i], ir);
+ }
+ }
+
+ ralloc_steal(new_ctx, ir);
+}
+
+
+void
+reparent_ir(exec_list *list, void *mem_ctx)
+{
+ foreach_in_list(ir_instruction, node, list) {
+ visit_tree(node, steal_memory, mem_ctx);
+ }
+}
+
+
+static ir_rvalue *
+try_min_one(ir_rvalue *ir)
+{
+ ir_expression *expr = ir->as_expression();
+
+ if (!expr || expr->operation != ir_binop_min)
+ return NULL;
+
+ if (expr->operands[0]->is_one())
+ return expr->operands[1];
+
+ if (expr->operands[1]->is_one())
+ return expr->operands[0];
+
+ return NULL;
+}
+
+static ir_rvalue *
+try_max_zero(ir_rvalue *ir)
+{
+ ir_expression *expr = ir->as_expression();
+
+ if (!expr || expr->operation != ir_binop_max)
+ return NULL;
+
+ if (expr->operands[0]->is_zero())
+ return expr->operands[1];
+
+ if (expr->operands[1]->is_zero())
+ return expr->operands[0];
+
+ return NULL;
+}
+
+ir_rvalue *
+ir_rvalue::as_rvalue_to_saturate()
+{
+ ir_expression *expr = this->as_expression();
+
+ if (!expr)
+ return NULL;
+
+ ir_rvalue *max_zero = try_max_zero(expr);
+ if (max_zero) {
+ return try_min_one(max_zero);
+ } else {
+ ir_rvalue *min_one = try_min_one(expr);
+ if (min_one) {
+ return try_max_zero(min_one);
+ }
+ }
+
+ return NULL;
+}
+
+
+unsigned
+vertices_per_prim(GLenum prim)
+{
+ switch (prim) {
+ case GL_POINTS:
+ return 1;
+ case GL_LINES:
+ return 2;
+ case GL_TRIANGLES:
+ return 3;
+ case GL_LINES_ADJACENCY:
+ return 4;
+ case GL_TRIANGLES_ADJACENCY:
+ return 6;
+ default:
+ assert(!"Bad primitive");
+ return 3;
+ }
+}
+
+/**
+ * Generate a string describing the mode of a variable
+ */
+const char *
+mode_string(const ir_variable *var)
+{
+ switch (var->data.mode) {
+ case ir_var_auto:
+ return (var->data.read_only) ? "global constant" : "global variable";
+
+ case ir_var_uniform:
+ return "uniform";
+
+ case ir_var_shader_storage:
+ return "buffer";
+
+ case ir_var_shader_in:
+ return "shader input";
+
+ case ir_var_shader_out:
+ return "shader output";
+
+ case ir_var_function_in:
+ case ir_var_const_in:
+ return "function input";
+
+ case ir_var_function_out:
+ return "function output";
+
+ case ir_var_function_inout:
+ return "function inout";
+
+ case ir_var_system_value:
+ return "shader input";
+
+ case ir_var_temporary:
+ return "compiler temporary";
+
+ case ir_var_mode_count:
+ break;
+ }
+
+ assert(!"Should not get here.");
+ return "invalid variable";
+}
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/ir.h b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/ir.h
new file mode 100644
index 0000000000..7a23256fd7
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/ir.h
@@ -0,0 +1,2589 @@
+/* -*- c++ -*- */
+/*
+ * Copyright © 2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef IR_H
+#define IR_H
+
+#include <stdio.h>
+#include <stdlib.h>
+
+#include "util/ralloc.h"
+#include "util/format/u_format.h"
+#include "util/half_float.h"
+#include "compiler/glsl_types.h"
+#include "list.h"
+#include "ir_visitor.h"
+#include "ir_hierarchical_visitor.h"
+
+#ifdef __cplusplus
+
+/**
+ * \defgroup IR Intermediate representation nodes
+ *
+ * @{
+ */
+
+/**
+ * Class tags
+ *
+ * Each concrete class derived from \c ir_instruction has a value in this
+ * enumerant. The value for the type is stored in \c ir_instruction::ir_type
+ * by the constructor. While using type tags is not very C++, it is extremely
+ * convenient. For example, during debugging you can simply inspect
+ * \c ir_instruction::ir_type to find out the actual type of the object.
+ *
+ * In addition, it is possible to use a switch-statement based on \c
+ * \c ir_instruction::ir_type to select different behavior for different object
+ * types. For functions that have only slight differences for several object
+ * types, this allows writing very straightforward, readable code.
+ */
+enum ir_node_type {
+ ir_type_dereference_array,
+ ir_type_dereference_record,
+ ir_type_dereference_variable,
+ ir_type_constant,
+ ir_type_expression,
+ ir_type_swizzle,
+ ir_type_texture,
+ ir_type_variable,
+ ir_type_assignment,
+ ir_type_call,
+ ir_type_function,
+ ir_type_function_signature,
+ ir_type_if,
+ ir_type_loop,
+ ir_type_loop_jump,
+ ir_type_return,
+ ir_type_precision,
+ ir_type_typedecl,
+ ir_type_discard,
+ ir_type_demote,
+ ir_type_emit_vertex,
+ ir_type_end_primitive,
+ ir_type_barrier,
+ ir_type_max, /**< maximum ir_type enum number, for validation */
+ ir_type_unset = ir_type_max
+};
+
+
+/**
+ * Base class of all IR instructions
+ */
+class ir_instruction : public exec_node {
+public:
+ enum ir_node_type ir_type;
+
+ /**
+ * GCC 4.7+ and clang warn when deleting an ir_instruction unless
+ * there's a virtual destructor present. Because we almost
+ * universally use ralloc for our memory management of
+ * ir_instructions, the destructor doesn't need to do any work.
+ */
+ virtual ~ir_instruction()
+ {
+ }
+
+ /** ir_print_visitor helper for debugging. */
+ void print(void) const;
+ void fprint(FILE *f) const;
+
+ virtual void accept(ir_visitor *) = 0;
+ virtual ir_visitor_status accept(ir_hierarchical_visitor *) = 0;
+ virtual ir_instruction *clone(void *mem_ctx,
+ struct hash_table *ht) const = 0;
+
+ bool is_rvalue() const
+ {
+ return ir_type == ir_type_dereference_array ||
+ ir_type == ir_type_dereference_record ||
+ ir_type == ir_type_dereference_variable ||
+ ir_type == ir_type_constant ||
+ ir_type == ir_type_expression ||
+ ir_type == ir_type_swizzle ||
+ ir_type == ir_type_texture;
+ }
+
+ bool is_dereference() const
+ {
+ return ir_type == ir_type_dereference_array ||
+ ir_type == ir_type_dereference_record ||
+ ir_type == ir_type_dereference_variable;
+ }
+
+ bool is_jump() const
+ {
+ return ir_type == ir_type_loop_jump ||
+ ir_type == ir_type_return ||
+ ir_type == ir_type_discard;
+ }
+
+ /**
+ * \name IR instruction downcast functions
+ *
+ * These functions either cast the object to a derived class or return
+ * \c NULL if the object's type does not match the specified derived class.
+ * Additional downcast functions will be added as needed.
+ */
+ /*@{*/
+ #define AS_BASE(TYPE) \
+ class ir_##TYPE *as_##TYPE() \
+ { \
+ assume(this != NULL); \
+ return is_##TYPE() ? (ir_##TYPE *) this : NULL; \
+ } \
+ const class ir_##TYPE *as_##TYPE() const \
+ { \
+ assume(this != NULL); \
+ return is_##TYPE() ? (ir_##TYPE *) this : NULL; \
+ }
+
+ AS_BASE(rvalue)
+ AS_BASE(dereference)
+ AS_BASE(jump)
+ #undef AS_BASE
+
+ #define AS_CHILD(TYPE) \
+ class ir_##TYPE * as_##TYPE() \
+ { \
+ assume(this != NULL); \
+ return ir_type == ir_type_##TYPE ? (ir_##TYPE *) this : NULL; \
+ } \
+ const class ir_##TYPE * as_##TYPE() const \
+ { \
+ assume(this != NULL); \
+ return ir_type == ir_type_##TYPE ? (const ir_##TYPE *) this : NULL; \
+ }
+ AS_CHILD(variable)
+ AS_CHILD(function)
+ AS_CHILD(dereference_array)
+ AS_CHILD(dereference_variable)
+ AS_CHILD(dereference_record)
+ AS_CHILD(expression)
+ AS_CHILD(loop)
+ AS_CHILD(assignment)
+ AS_CHILD(call)
+ AS_CHILD(return)
+ AS_CHILD(if)
+ AS_CHILD(swizzle)
+ AS_CHILD(texture)
+ AS_CHILD(constant)
+ AS_CHILD(discard)
+ #undef AS_CHILD
+ /*@}*/
+
+ /**
+ * IR equality method: Return true if the referenced instruction would
+ * return the same value as this one.
+ *
+ * This intended to be used for CSE and algebraic optimizations, on rvalues
+ * in particular. No support for other instruction types (assignments,
+ * jumps, calls, etc.) is planned.
+ */
+ virtual bool equals(const ir_instruction *ir,
+ enum ir_node_type ignore = ir_type_unset) const;
+
+protected:
+ ir_instruction(enum ir_node_type t)
+ : ir_type(t)
+ {
+ }
+
+private:
+ ir_instruction()
+ {
+ assert(!"Should not get here.");
+ }
+};
+
+
+/**
+ * The base class for all "values"/expression trees.
+ */
+class ir_rvalue : public ir_instruction {
+public:
+ const struct glsl_type *type;
+
+ virtual ir_rvalue *clone(void *mem_ctx, struct hash_table *) const;
+
+ virtual void accept(ir_visitor *v)
+ {
+ v->visit(this);
+ }
+
+ virtual ir_visitor_status accept(ir_hierarchical_visitor *);
+
+ virtual ir_constant *constant_expression_value(void *mem_ctx,
+ struct hash_table *variable_context = NULL);
+
+ ir_rvalue *as_rvalue_to_saturate();
+
+ virtual bool is_lvalue(const struct _mesa_glsl_parse_state * = NULL) const
+ {
+ return false;
+ }
+
+ /**
+ * Get the variable that is ultimately referenced by an r-value
+ */
+ virtual ir_variable *variable_referenced() const
+ {
+ return NULL;
+ }
+
+
+ /**
+ * If an r-value is a reference to a whole variable, get that variable
+ *
+ * \return
+ * Pointer to a variable that is completely dereferenced by the r-value. If
+ * the r-value is not a dereference or the dereference does not access the
+ * entire variable (i.e., it's just one array element, struct field), \c NULL
+ * is returned.
+ */
+ virtual ir_variable *whole_variable_referenced()
+ {
+ return NULL;
+ }
+
+ /**
+ * Determine if an r-value has the value zero
+ *
+ * The base implementation of this function always returns \c false. The
+ * \c ir_constant class over-rides this function to return \c true \b only
+ * for vector and scalar types that have all elements set to the value
+ * zero (or \c false for booleans).
+ *
+ * \sa ir_constant::has_value, ir_rvalue::is_one, ir_rvalue::is_negative_one
+ */
+ virtual bool is_zero() const;
+
+ /**
+ * Determine if an r-value has the value one
+ *
+ * The base implementation of this function always returns \c false. The
+ * \c ir_constant class over-rides this function to return \c true \b only
+ * for vector and scalar types that have all elements set to the value
+ * one (or \c true for booleans).
+ *
+ * \sa ir_constant::has_value, ir_rvalue::is_zero, ir_rvalue::is_negative_one
+ */
+ virtual bool is_one() const;
+
+ /**
+ * Determine if an r-value has the value negative one
+ *
+ * The base implementation of this function always returns \c false. The
+ * \c ir_constant class over-rides this function to return \c true \b only
+ * for vector and scalar types that have all elements set to the value
+ * negative one. For boolean types, the result is always \c false.
+ *
+ * \sa ir_constant::has_value, ir_rvalue::is_zero, ir_rvalue::is_one
+ */
+ virtual bool is_negative_one() const;
+
+ /**
+ * Determine if an r-value is an unsigned integer constant which can be
+ * stored in 16 bits.
+ *
+ * \sa ir_constant::is_uint16_constant.
+ */
+ virtual bool is_uint16_constant() const { return false; }
+
+ /**
+ * Return a generic value of error_type.
+ *
+ * Allocation will be performed with 'mem_ctx' as ralloc owner.
+ */
+ static ir_rvalue *error_value(void *mem_ctx);
+
+protected:
+ ir_rvalue(enum ir_node_type t);
+};
+
+
+/**
+ * Variable storage classes
+ */
+enum ir_variable_mode {
+ ir_var_auto = 0, /**< Function local variables and globals. */
+ ir_var_uniform, /**< Variable declared as a uniform. */
+ ir_var_shader_storage, /**< Variable declared as an ssbo. */
+ ir_var_shader_shared, /**< Variable declared as shared. */
+ ir_var_shader_in,
+ ir_var_shader_out,
+ ir_var_function_in,
+ ir_var_function_out,
+ ir_var_function_inout,
+ ir_var_const_in, /**< "in" param that must be a constant expression */
+ ir_var_system_value, /**< Ex: front-face, instance-id, etc. */
+ ir_var_temporary, /**< Temporary variable generated during compilation. */
+ ir_var_mode_count /**< Number of variable modes */
+};
+
+/**
+ * Enum keeping track of how a variable was declared. For error checking of
+ * the gl_PerVertex redeclaration rules.
+ */
+enum ir_var_declaration_type {
+ /**
+ * Normal declaration (for most variables, this means an explicit
+ * declaration. Exception: temporaries are always implicitly declared, but
+ * they still use ir_var_declared_normally).
+ *
+ * Note: an ir_variable that represents a named interface block uses
+ * ir_var_declared_normally.
+ */
+ ir_var_declared_normally = 0,
+
+ /**
+ * Variable was explicitly declared (or re-declared) in an unnamed
+ * interface block.
+ */
+ ir_var_declared_in_block,
+
+ /**
+ * Variable is an implicitly declared built-in that has not been explicitly
+ * re-declared by the shader.
+ */
+ ir_var_declared_implicitly,
+
+ /**
+ * Variable is implicitly generated by the compiler and should not be
+ * visible via the API.
+ */
+ ir_var_hidden,
+};
+
+/**
+ * \brief Layout qualifiers for gl_FragDepth.
+ *
+ * The AMD/ARB_conservative_depth extensions allow gl_FragDepth to be redeclared
+ * with a layout qualifier.
+ */
+enum ir_depth_layout {
+ ir_depth_layout_none, /**< No depth layout is specified. */
+ ir_depth_layout_any,
+ ir_depth_layout_greater,
+ ir_depth_layout_less,
+ ir_depth_layout_unchanged
+};
+
+/**
+ * \brief Convert depth layout qualifier to string.
+ */
+const char*
+depth_layout_string(ir_depth_layout layout);
+
+/**
+ * Description of built-in state associated with a uniform
+ *
+ * \sa ir_variable::state_slots
+ */
+struct ir_state_slot {
+ gl_state_index16 tokens[STATE_LENGTH];
+ int swizzle;
+};
+
+
+/**
+ * Get the string value for an interpolation qualifier
+ *
+ * \return The string that would be used in a shader to specify \c
+ * mode will be returned.
+ *
+ * This function is used to generate error messages of the form "shader
+ * uses %s interpolation qualifier", so in the case where there is no
+ * interpolation qualifier, it returns "no".
+ *
+ * This function should only be used on a shader input or output variable.
+ */
+const char *interpolation_string(unsigned interpolation);
+
+
+class ir_variable : public ir_instruction {
+public:
+ ir_variable(const struct glsl_type *, const char *, ir_variable_mode);
+
+ virtual ir_variable *clone(void *mem_ctx, struct hash_table *ht) const;
+
+ virtual void accept(ir_visitor *v)
+ {
+ v->visit(this);
+ }
+
+ virtual ir_visitor_status accept(ir_hierarchical_visitor *);
+
+
+ /**
+ * Determine whether or not a variable is part of a uniform or
+ * shader storage block.
+ */
+ inline bool is_in_buffer_block() const
+ {
+ return (this->data.mode == ir_var_uniform ||
+ this->data.mode == ir_var_shader_storage) &&
+ this->interface_type != NULL;
+ }
+
+ /**
+ * Determine whether or not a variable is part of a shader storage block.
+ */
+ inline bool is_in_shader_storage_block() const
+ {
+ return this->data.mode == ir_var_shader_storage &&
+ this->interface_type != NULL;
+ }
+
+ /**
+ * Determine whether or not a variable is the declaration of an interface
+ * block
+ *
+ * For the first declaration below, there will be an \c ir_variable named
+ * "instance" whose type and whose instance_type will be the same
+ * \c glsl_type. For the second declaration, there will be an \c ir_variable
+ * named "f" whose type is float and whose instance_type is B2.
+ *
+ * "instance" is an interface instance variable, but "f" is not.
+ *
+ * uniform B1 {
+ * float f;
+ * } instance;
+ *
+ * uniform B2 {
+ * float f;
+ * };
+ */
+ inline bool is_interface_instance() const
+ {
+ return this->type->without_array() == this->interface_type;
+ }
+
+ /**
+ * Return whether this variable contains a bindless sampler/image.
+ */
+ inline bool contains_bindless() const
+ {
+ if (!this->type->contains_sampler() && !this->type->contains_image())
+ return false;
+
+ return this->data.bindless || this->data.mode != ir_var_uniform;
+ }
+
+ /**
+ * Set this->interface_type on a newly created variable.
+ */
+ void init_interface_type(const struct glsl_type *type)
+ {
+ assert(this->interface_type == NULL);
+ this->interface_type = type;
+ if (this->is_interface_instance()) {
+ this->u.max_ifc_array_access =
+ ralloc_array(this, int, type->length);
+ for (unsigned i = 0; i < type->length; i++) {
+ this->u.max_ifc_array_access[i] = -1;
+ }
+ }
+ }
+
+ /**
+ * Change this->interface_type on a variable that previously had a
+ * different, but compatible, interface_type. This is used during linking
+ * to set the size of arrays in interface blocks.
+ */
+ void change_interface_type(const struct glsl_type *type)
+ {
+ if (this->u.max_ifc_array_access != NULL) {
+ /* max_ifc_array_access has already been allocated, so make sure the
+ * new interface has the same number of fields as the old one.
+ */
+ assert(this->interface_type->length == type->length);
+ }
+ this->interface_type = type;
+ }
+
+ /**
+ * Change this->interface_type on a variable that previously had a
+ * different, and incompatible, interface_type. This is used during
+ * compilation to handle redeclaration of the built-in gl_PerVertex
+ * interface block.
+ */
+ void reinit_interface_type(const struct glsl_type *type)
+ {
+ if (this->u.max_ifc_array_access != NULL) {
+#ifndef NDEBUG
+ /* Redeclaring gl_PerVertex is only allowed if none of the built-ins
+ * it defines have been accessed yet; so it's safe to throw away the
+ * old max_ifc_array_access pointer, since all of its values are
+ * zero.
+ */
+ for (unsigned i = 0; i < this->interface_type->length; i++)
+ assert(this->u.max_ifc_array_access[i] == -1);
+#endif
+ ralloc_free(this->u.max_ifc_array_access);
+ this->u.max_ifc_array_access = NULL;
+ }
+ this->interface_type = NULL;
+ init_interface_type(type);
+ }
+
+ const glsl_type *get_interface_type() const
+ {
+ return this->interface_type;
+ }
+
+ enum glsl_interface_packing get_interface_type_packing() const
+ {
+ return this->interface_type->get_interface_packing();
+ }
+ /**
+ * Get the max_ifc_array_access pointer
+ *
+ * A "set" function is not needed because the array is dynmically allocated
+ * as necessary.
+ */
+ inline int *get_max_ifc_array_access()
+ {
+ assert(this->data._num_state_slots == 0);
+ return this->u.max_ifc_array_access;
+ }
+
+ inline unsigned get_num_state_slots() const
+ {
+ assert(!this->is_interface_instance()
+ || this->data._num_state_slots == 0);
+ return this->data._num_state_slots;
+ }
+
+ inline void set_num_state_slots(unsigned n)
+ {
+ assert(!this->is_interface_instance()
+ || n == 0);
+ this->data._num_state_slots = n;
+ }
+
+ inline ir_state_slot *get_state_slots()
+ {
+ return this->is_interface_instance() ? NULL : this->u.state_slots;
+ }
+
+ inline const ir_state_slot *get_state_slots() const
+ {
+ return this->is_interface_instance() ? NULL : this->u.state_slots;
+ }
+
+ inline ir_state_slot *allocate_state_slots(unsigned n)
+ {
+ assert(!this->is_interface_instance());
+
+ this->u.state_slots = ralloc_array(this, ir_state_slot, n);
+ this->data._num_state_slots = 0;
+
+ if (this->u.state_slots != NULL)
+ this->data._num_state_slots = n;
+
+ return this->u.state_slots;
+ }
+
+ inline bool is_interpolation_flat() const
+ {
+ return this->data.interpolation == INTERP_MODE_FLAT ||
+ this->type->contains_integer() ||
+ this->type->contains_double();
+ }
+
+ inline bool is_name_ralloced() const
+ {
+ return this->name != ir_variable::tmp_name &&
+ this->name != this->name_storage;
+ }
+
+ /**
+ * Enable emitting extension warnings for this variable
+ */
+ void enable_extension_warning(const char *extension);
+
+ /**
+ * Get the extension warning string for this variable
+ *
+ * If warnings are not enabled, \c NULL is returned.
+ */
+ const char *get_extension_warning() const;
+
+ /**
+ * Declared type of the variable
+ */
+ const struct glsl_type *type;
+
+ /**
+ * Declared name of the variable
+ */
+ const char *name;
+
+private:
+ /**
+ * If the name length fits into name_storage, it's used, otherwise
+ * the name is ralloc'd. shader-db mining showed that 70% of variables
+ * fit here. This is a win over ralloc where only ralloc_header has
+ * 20 bytes on 64-bit (28 bytes with DEBUG), and we can also skip malloc.
+ */
+ char name_storage[16];
+
+public:
+ struct ir_variable_data {
+
+ /**
+ * Is the variable read-only?
+ *
+ * This is set for variables declared as \c const, shader inputs,
+ * and uniforms.
+ */
+ unsigned read_only:1;
+ unsigned centroid:1;
+ unsigned sample:1;
+ unsigned patch:1;
+ /**
+ * Was an 'invariant' qualifier explicitly set in the shader?
+ *
+ * This is used to cross validate qualifiers.
+ */
+ unsigned explicit_invariant:1;
+ /**
+ * Is the variable invariant?
+ *
+ * It can happen either by having the 'invariant' qualifier
+ * explicitly set in the shader or by being used in calculations
+ * of other invariant variables.
+ */
+ unsigned invariant:1;
+ unsigned precise:1;
+
+ /**
+ * Has this variable been used for reading or writing?
+ *
+ * Several GLSL semantic checks require knowledge of whether or not a
+ * variable has been used. For example, it is an error to redeclare a
+ * variable as invariant after it has been used.
+ *
+ * This is maintained in the ast_to_hir.cpp path and during linking,
+ * but not in Mesa's fixed function or ARB program paths.
+ */
+ unsigned used:1;
+
+ /**
+ * Has this variable been statically assigned?
+ *
+ * This answers whether the variable was assigned in any path of
+ * the shader during ast_to_hir. This doesn't answer whether it is
+ * still written after dead code removal, nor is it maintained in
+ * non-ast_to_hir.cpp (GLSL parsing) paths.
+ */
+ unsigned assigned:1;
+
+ /**
+ * When separate shader programs are enabled, only input/outputs between
+ * the stages of a multi-stage separate program can be safely removed
+ * from the shader interface. Other input/outputs must remains active.
+ */
+ unsigned always_active_io:1;
+
+ /**
+ * Enum indicating how the variable was declared. See
+ * ir_var_declaration_type.
+ *
+ * This is used to detect certain kinds of illegal variable redeclarations.
+ */
+ unsigned how_declared:2;
+
+ /**
+ * Storage class of the variable.
+ *
+ * \sa ir_variable_mode
+ */
+ unsigned mode:4;
+
+ /**
+ * Interpolation mode for shader inputs / outputs
+ *
+ * \sa glsl_interp_mode
+ */
+ unsigned interpolation:2;
+
+ /**
+ * Was the location explicitly set in the shader?
+ *
+ * If the location is explicitly set in the shader, it \b cannot be changed
+ * by the linker or by the API (e.g., calls to \c glBindAttribLocation have
+ * no effect).
+ */
+ unsigned explicit_location:1;
+ unsigned explicit_index:1;
+
+ /**
+ * Was an initial binding explicitly set in the shader?
+ *
+ * If so, constant_value contains an integer ir_constant representing the
+ * initial binding point.
+ */
+ unsigned explicit_binding:1;
+
+ /**
+ * Was an initial component explicitly set in the shader?
+ */
+ unsigned explicit_component:1;
+
+ /**
+ * Does this variable have an initializer?
+ *
+ * This is used by the linker to cross-validiate initializers of global
+ * variables.
+ */
+ unsigned has_initializer:1;
+
+ /**
+ * Is this variable a generic output or input that has not yet been matched
+ * up to a variable in another stage of the pipeline?
+ *
+ * This is used by the linker as scratch storage while assigning locations
+ * to generic inputs and outputs.
+ */
+ unsigned is_unmatched_generic_inout:1;
+
+ /**
+ * Is this varying used by transform feedback?
+ *
+ * This is used by the linker to decide if it's safe to pack the varying.
+ */
+ unsigned is_xfb:1;
+
+ /**
+ * Is this varying used only by transform feedback?
+ *
+ * This is used by the linker to decide if its safe to pack the varying.
+ */
+ unsigned is_xfb_only:1;
+
+ /**
+ * Was a transform feedback buffer set in the shader?
+ */
+ unsigned explicit_xfb_buffer:1;
+
+ /**
+ * Was a transform feedback offset set in the shader?
+ */
+ unsigned explicit_xfb_offset:1;
+
+ /**
+ * Was a transform feedback stride set in the shader?
+ */
+ unsigned explicit_xfb_stride:1;
+
+ /**
+ * If non-zero, then this variable may be packed along with other variables
+ * into a single varying slot, so this offset should be applied when
+ * accessing components. For example, an offset of 1 means that the x
+ * component of this variable is actually stored in component y of the
+ * location specified by \c location.
+ */
+ unsigned location_frac:2;
+
+ /**
+ * Layout of the matrix. Uses glsl_matrix_layout values.
+ */
+ unsigned matrix_layout:2;
+
+ /**
+ * Non-zero if this variable was created by lowering a named interface
+ * block.
+ */
+ unsigned from_named_ifc_block:1;
+
+ /**
+ * Non-zero if the variable must be a shader input. This is useful for
+ * constraints on function parameters.
+ */
+ unsigned must_be_shader_input:1;
+
+ /**
+ * Output index for dual source blending.
+ *
+ * \note
+ * The GLSL spec only allows the values 0 or 1 for the index in \b dual
+ * source blending.
+ */
+ unsigned index:1;
+
+ /**
+ * Precision qualifier.
+ *
+ * In desktop GLSL we do not care about precision qualifiers at all, in
+ * fact, the spec says that precision qualifiers are ignored.
+ *
+ * To make things easy, we make it so that this field is always
+ * GLSL_PRECISION_NONE on desktop shaders. This way all the variables
+ * have the same precision value and the checks we add in the compiler
+ * for this field will never break a desktop shader compile.
+ */
+ unsigned precision:2;
+
+ /**
+ * \brief Layout qualifier for gl_FragDepth.
+ *
+ * This is not equal to \c ir_depth_layout_none if and only if this
+ * variable is \c gl_FragDepth and a layout qualifier is specified.
+ */
+ ir_depth_layout depth_layout:3;
+
+ /**
+ * Memory qualifiers.
+ */
+ unsigned memory_read_only:1; /**< "readonly" qualifier. */
+ unsigned memory_write_only:1; /**< "writeonly" qualifier. */
+ unsigned memory_coherent:1;
+ unsigned memory_volatile:1;
+ unsigned memory_restrict:1;
+
+ /**
+ * ARB_shader_storage_buffer_object
+ */
+ unsigned from_ssbo_unsized_array:1; /**< unsized array buffer variable. */
+
+ unsigned implicit_sized_array:1;
+
+ /**
+ * Whether this is a fragment shader output implicitly initialized with
+ * the previous contents of the specified render target at the
+ * framebuffer location corresponding to this shader invocation.
+ */
+ unsigned fb_fetch_output:1;
+
+ /**
+ * Non-zero if this variable is considered bindless as defined by
+ * ARB_bindless_texture.
+ */
+ unsigned bindless:1;
+
+ /**
+ * Non-zero if this variable is considered bound as defined by
+ * ARB_bindless_texture.
+ */
+ unsigned bound:1;
+
+ /**
+ * Emit a warning if this variable is accessed.
+ */
+ private:
+ uint8_t warn_extension_index;
+
+ public:
+ /**
+ * Image internal format if specified explicitly, otherwise
+ * PIPE_FORMAT_NONE.
+ */
+ enum pipe_format image_format;
+
+ private:
+ /**
+ * Number of state slots used
+ *
+ * \note
+ * This could be stored in as few as 7-bits, if necessary. If it is made
+ * smaller, add an assertion to \c ir_variable::allocate_state_slots to
+ * be safe.
+ */
+ uint16_t _num_state_slots;
+
+ public:
+ /**
+ * Initial binding point for a sampler, atomic, or UBO.
+ *
+ * For array types, this represents the binding point for the first element.
+ */
+ uint16_t binding;
+
+ /**
+ * Storage location of the base of this variable
+ *
+ * The precise meaning of this field depends on the nature of the variable.
+ *
+ * - Vertex shader input: one of the values from \c gl_vert_attrib.
+ * - Vertex shader output: one of the values from \c gl_varying_slot.
+ * - Geometry shader input: one of the values from \c gl_varying_slot.
+ * - Geometry shader output: one of the values from \c gl_varying_slot.
+ * - Fragment shader input: one of the values from \c gl_varying_slot.
+ * - Fragment shader output: one of the values from \c gl_frag_result.
+ * - Uniforms: Per-stage uniform slot number for default uniform block.
+ * - Uniforms: Index within the uniform block definition for UBO members.
+ * - Non-UBO Uniforms: explicit location until linking then reused to
+ * store uniform slot number.
+ * - Other: This field is not currently used.
+ *
+ * If the variable is a uniform, shader input, or shader output, and the
+ * slot has not been assigned, the value will be -1.
+ */
+ int location;
+
+ /**
+ * for glsl->tgsi/mesa IR we need to store the index into the
+ * parameters for uniforms, initially the code overloaded location
+ * but this causes problems with indirect samplers and AoA.
+ * This is assigned in _mesa_generate_parameters_list_for_uniforms.
+ */
+ int param_index;
+
+ /**
+ * Vertex stream output identifier.
+ *
+ * For packed outputs, bit 31 is set and bits [2*i+1,2*i] indicate the
+ * stream of the i-th component.
+ */
+ unsigned stream;
+
+ /**
+ * Atomic, transform feedback or block member offset.
+ */
+ unsigned offset;
+
+ /**
+ * Highest element accessed with a constant expression array index
+ *
+ * Not used for non-array variables. -1 is never accessed.
+ */
+ int max_array_access;
+
+ /**
+ * Transform feedback buffer.
+ */
+ unsigned xfb_buffer;
+
+ /**
+ * Transform feedback stride.
+ */
+ unsigned xfb_stride;
+
+ /**
+ * Allow (only) ir_variable direct access private members.
+ */
+ friend class ir_variable;
+ } data;
+
+ /**
+ * Value assigned in the initializer of a variable declared "const"
+ */
+ ir_constant *constant_value;
+
+ /**
+ * Constant expression assigned in the initializer of the variable
+ *
+ * \warning
+ * This field and \c ::constant_value are distinct. Even if the two fields
+ * refer to constants with the same value, they must point to separate
+ * objects.
+ */
+ ir_constant *constant_initializer;
+
+private:
+ static const char *const warn_extension_table[];
+
+ union {
+ /**
+ * For variables which satisfy the is_interface_instance() predicate,
+ * this points to an array of integers such that if the ith member of
+ * the interface block is an array, max_ifc_array_access[i] is the
+ * maximum array element of that member that has been accessed. If the
+ * ith member of the interface block is not an array,
+ * max_ifc_array_access[i] is unused.
+ *
+ * For variables whose type is not an interface block, this pointer is
+ * NULL.
+ */
+ int *max_ifc_array_access;
+
+ /**
+ * Built-in state that backs this uniform
+ *
+ * Once set at variable creation, \c state_slots must remain invariant.
+ *
+ * If the variable is not a uniform, \c _num_state_slots will be zero
+ * and \c state_slots will be \c NULL.
+ */
+ ir_state_slot *state_slots;
+ } u;
+
+ /**
+ * For variables that are in an interface block or are an instance of an
+ * interface block, this is the \c GLSL_TYPE_INTERFACE type for that block.
+ *
+ * \sa ir_variable::location
+ */
+ const glsl_type *interface_type;
+
+ /**
+ * Name used for anonymous compiler temporaries
+ */
+ static const char tmp_name[];
+
+public:
+ /**
+ * Should the construct keep names for ir_var_temporary variables?
+ *
+ * When this global is false, names passed to the constructor for
+ * \c ir_var_temporary variables will be dropped. Instead, the variable will
+ * be named "compiler_temp". This name will be in static storage.
+ *
+ * \warning
+ * \b NEVER change the mode of an \c ir_var_temporary.
+ *
+ * \warning
+ * This variable is \b not thread-safe. It is global, \b not
+ * per-context. It begins life false. A context can, at some point, make
+ * it true. From that point on, it will be true forever. This should be
+ * okay since it will only be set true while debugging.
+ */
+ static bool temporaries_allocate_names;
+};
+
+/**
+ * A function that returns whether a built-in function is available in the
+ * current shading language (based on version, ES or desktop, and extensions).
+ */
+typedef bool (*builtin_available_predicate)(const _mesa_glsl_parse_state *);
+
+#define MAKE_INTRINSIC_FOR_TYPE(op, t) \
+ ir_intrinsic_generic_ ## op - ir_intrinsic_generic_load + ir_intrinsic_ ## t ## _ ## load
+
+#define MAP_INTRINSIC_TO_TYPE(i, t) \
+ ir_intrinsic_id(int(i) - int(ir_intrinsic_generic_load) + int(ir_intrinsic_ ## t ## _ ## load))
+
+enum ir_intrinsic_id {
+ ir_intrinsic_invalid = 0,
+
+ /**
+ * \name Generic intrinsics
+ *
+ * Each of these intrinsics has a specific version for shared variables and
+ * SSBOs.
+ */
+ /*@{*/
+ ir_intrinsic_generic_load,
+ ir_intrinsic_generic_store,
+ ir_intrinsic_generic_atomic_add,
+ ir_intrinsic_generic_atomic_and,
+ ir_intrinsic_generic_atomic_or,
+ ir_intrinsic_generic_atomic_xor,
+ ir_intrinsic_generic_atomic_min,
+ ir_intrinsic_generic_atomic_max,
+ ir_intrinsic_generic_atomic_exchange,
+ ir_intrinsic_generic_atomic_comp_swap,
+ /*@}*/
+
+ ir_intrinsic_atomic_counter_read,
+ ir_intrinsic_atomic_counter_increment,
+ ir_intrinsic_atomic_counter_predecrement,
+ ir_intrinsic_atomic_counter_add,
+ ir_intrinsic_atomic_counter_and,
+ ir_intrinsic_atomic_counter_or,
+ ir_intrinsic_atomic_counter_xor,
+ ir_intrinsic_atomic_counter_min,
+ ir_intrinsic_atomic_counter_max,
+ ir_intrinsic_atomic_counter_exchange,
+ ir_intrinsic_atomic_counter_comp_swap,
+
+ ir_intrinsic_image_load,
+ ir_intrinsic_image_store,
+ ir_intrinsic_image_atomic_add,
+ ir_intrinsic_image_atomic_and,
+ ir_intrinsic_image_atomic_or,
+ ir_intrinsic_image_atomic_xor,
+ ir_intrinsic_image_atomic_min,
+ ir_intrinsic_image_atomic_max,
+ ir_intrinsic_image_atomic_exchange,
+ ir_intrinsic_image_atomic_comp_swap,
+ ir_intrinsic_image_size,
+ ir_intrinsic_image_samples,
+ ir_intrinsic_image_atomic_inc_wrap,
+ ir_intrinsic_image_atomic_dec_wrap,
+
+ ir_intrinsic_ssbo_load,
+ ir_intrinsic_ssbo_store = MAKE_INTRINSIC_FOR_TYPE(store, ssbo),
+ ir_intrinsic_ssbo_atomic_add = MAKE_INTRINSIC_FOR_TYPE(atomic_add, ssbo),
+ ir_intrinsic_ssbo_atomic_and = MAKE_INTRINSIC_FOR_TYPE(atomic_and, ssbo),
+ ir_intrinsic_ssbo_atomic_or = MAKE_INTRINSIC_FOR_TYPE(atomic_or, ssbo),
+ ir_intrinsic_ssbo_atomic_xor = MAKE_INTRINSIC_FOR_TYPE(atomic_xor, ssbo),
+ ir_intrinsic_ssbo_atomic_min = MAKE_INTRINSIC_FOR_TYPE(atomic_min, ssbo),
+ ir_intrinsic_ssbo_atomic_max = MAKE_INTRINSIC_FOR_TYPE(atomic_max, ssbo),
+ ir_intrinsic_ssbo_atomic_exchange = MAKE_INTRINSIC_FOR_TYPE(atomic_exchange, ssbo),
+ ir_intrinsic_ssbo_atomic_comp_swap = MAKE_INTRINSIC_FOR_TYPE(atomic_comp_swap, ssbo),
+
+ ir_intrinsic_memory_barrier,
+ ir_intrinsic_shader_clock,
+ ir_intrinsic_group_memory_barrier,
+ ir_intrinsic_memory_barrier_atomic_counter,
+ ir_intrinsic_memory_barrier_buffer,
+ ir_intrinsic_memory_barrier_image,
+ ir_intrinsic_memory_barrier_shared,
+ ir_intrinsic_begin_invocation_interlock,
+ ir_intrinsic_end_invocation_interlock,
+
+ ir_intrinsic_vote_all,
+ ir_intrinsic_vote_any,
+ ir_intrinsic_vote_eq,
+ ir_intrinsic_ballot,
+ ir_intrinsic_read_invocation,
+ ir_intrinsic_read_first_invocation,
+
+ ir_intrinsic_helper_invocation,
+
+ ir_intrinsic_shared_load,
+ ir_intrinsic_shared_store = MAKE_INTRINSIC_FOR_TYPE(store, shared),
+ ir_intrinsic_shared_atomic_add = MAKE_INTRINSIC_FOR_TYPE(atomic_add, shared),
+ ir_intrinsic_shared_atomic_and = MAKE_INTRINSIC_FOR_TYPE(atomic_and, shared),
+ ir_intrinsic_shared_atomic_or = MAKE_INTRINSIC_FOR_TYPE(atomic_or, shared),
+ ir_intrinsic_shared_atomic_xor = MAKE_INTRINSIC_FOR_TYPE(atomic_xor, shared),
+ ir_intrinsic_shared_atomic_min = MAKE_INTRINSIC_FOR_TYPE(atomic_min, shared),
+ ir_intrinsic_shared_atomic_max = MAKE_INTRINSIC_FOR_TYPE(atomic_max, shared),
+ ir_intrinsic_shared_atomic_exchange = MAKE_INTRINSIC_FOR_TYPE(atomic_exchange, shared),
+ ir_intrinsic_shared_atomic_comp_swap = MAKE_INTRINSIC_FOR_TYPE(atomic_comp_swap, shared),
+};
+
+/*@{*/
+/**
+ * The representation of a function instance; may be the full definition or
+ * simply a prototype.
+ */
+class ir_function_signature : public ir_instruction {
+ /* An ir_function_signature will be part of the list of signatures in
+ * an ir_function.
+ */
+public:
+ ir_function_signature(const glsl_type *return_type,
+ builtin_available_predicate builtin_avail = NULL);
+
+ virtual ir_function_signature *clone(void *mem_ctx,
+ struct hash_table *ht) const;
+ ir_function_signature *clone_prototype(void *mem_ctx,
+ struct hash_table *ht) const;
+
+ virtual void accept(ir_visitor *v)
+ {
+ v->visit(this);
+ }
+
+ virtual ir_visitor_status accept(ir_hierarchical_visitor *);
+
+ /**
+ * Attempt to evaluate this function as a constant expression,
+ * given a list of the actual parameters and the variable context.
+ * Returns NULL for non-built-ins.
+ */
+ ir_constant *constant_expression_value(void *mem_ctx,
+ exec_list *actual_parameters,
+ struct hash_table *variable_context);
+
+ /**
+ * Get the name of the function for which this is a signature
+ */
+ const char *function_name() const;
+
+ /**
+ * Get a handle to the function for which this is a signature
+ *
+ * There is no setter function, this function returns a \c const pointer,
+ * and \c ir_function_signature::_function is private for a reason. The
+ * only way to make a connection between a function and function signature
+ * is via \c ir_function::add_signature. This helps ensure that certain
+ * invariants (i.e., a function signature is in the list of signatures for
+ * its \c _function) are met.
+ *
+ * \sa ir_function::add_signature
+ */
+ inline const class ir_function *function() const
+ {
+ return this->_function;
+ }
+
+ /**
+ * Check whether the qualifiers match between this signature's parameters
+ * and the supplied parameter list. If not, returns the name of the first
+ * parameter with mismatched qualifiers (for use in error messages).
+ */
+ const char *qualifiers_match(exec_list *params);
+
+ /**
+ * Replace the current parameter list with the given one. This is useful
+ * if the current information came from a prototype, and either has invalid
+ * or missing parameter names.
+ */
+ void replace_parameters(exec_list *new_params);
+
+ /**
+ * Function return type.
+ *
+ * \note The precision qualifier is stored separately in return_precision.
+ */
+ const struct glsl_type *return_type;
+
+ /**
+ * List of ir_variable of function parameters.
+ *
+ * This represents the storage. The paramaters passed in a particular
+ * call will be in ir_call::actual_paramaters.
+ */
+ struct exec_list parameters;
+
+ /** Whether or not this function has a body (which may be empty). */
+ unsigned is_defined:1;
+
+ /*
+ * Precision qualifier for the return type.
+ *
+ * See the comment for ir_variable_data::precision for more details.
+ */
+ unsigned return_precision:2;
+
+ /** Whether or not this function signature is a built-in. */
+ bool is_builtin() const;
+
+ /**
+ * Whether or not this function is an intrinsic to be implemented
+ * by the driver.
+ */
+ inline bool is_intrinsic() const
+ {
+ return intrinsic_id != ir_intrinsic_invalid;
+ }
+
+ /** Indentifier for this intrinsic. */
+ enum ir_intrinsic_id intrinsic_id;
+
+ /** Whether or not a built-in is available for this shader. */
+ bool is_builtin_available(const _mesa_glsl_parse_state *state) const;
+
+ /** Body of instructions in the function. */
+ struct exec_list body;
+
+private:
+ /**
+ * A function pointer to a predicate that answers whether a built-in
+ * function is available in the current shader. NULL if not a built-in.
+ */
+ builtin_available_predicate builtin_avail;
+
+ /** Function of which this signature is one overload. */
+ class ir_function *_function;
+
+ /** Function signature of which this one is a prototype clone */
+ const ir_function_signature *origin;
+
+ friend class ir_function;
+
+ /**
+ * Helper function to run a list of instructions for constant
+ * expression evaluation.
+ *
+ * The hash table represents the values of the visible variables.
+ * There are no scoping issues because the table is indexed on
+ * ir_variable pointers, not variable names.
+ *
+ * Returns false if the expression is not constant, true otherwise,
+ * and the value in *result if result is non-NULL.
+ */
+ bool constant_expression_evaluate_expression_list(void *mem_ctx,
+ const struct exec_list &body,
+ struct hash_table *variable_context,
+ ir_constant **result);
+};
+
+
+/**
+ * Header for tracking multiple overloaded functions with the same name.
+ * Contains a list of ir_function_signatures representing each of the
+ * actual functions.
+ */
+class ir_function : public ir_instruction {
+public:
+ ir_function(const char *name);
+
+ virtual ir_function *clone(void *mem_ctx, struct hash_table *ht) const;
+
+ virtual void accept(ir_visitor *v)
+ {
+ v->visit(this);
+ }
+
+ virtual ir_visitor_status accept(ir_hierarchical_visitor *);
+
+ void add_signature(ir_function_signature *sig)
+ {
+ sig->_function = this;
+ this->signatures.push_tail(sig);
+ }
+
+ /**
+ * Find a signature that matches a set of actual parameters, taking implicit
+ * conversions into account. Also flags whether the match was exact.
+ */
+ ir_function_signature *matching_signature(_mesa_glsl_parse_state *state,
+ const exec_list *actual_param,
+ bool allow_builtins,
+ bool *match_is_exact);
+
+ /**
+ * Find a signature that matches a set of actual parameters, taking implicit
+ * conversions into account.
+ */
+ ir_function_signature *matching_signature(_mesa_glsl_parse_state *state,
+ const exec_list *actual_param,
+ bool allow_builtins);
+
+ /**
+ * Find a signature that exactly matches a set of actual parameters without
+ * any implicit type conversions.
+ */
+ ir_function_signature *exact_matching_signature(_mesa_glsl_parse_state *state,
+ const exec_list *actual_ps);
+
+ /**
+ * Name of the function.
+ */
+ const char *name;
+
+ /** Whether or not this function has a signature that isn't a built-in. */
+ bool has_user_signature();
+
+ /**
+ * List of ir_function_signature for each overloaded function with this name.
+ */
+ struct exec_list signatures;
+
+ /**
+ * is this function a subroutine type declaration
+ * e.g. subroutine void type1(float arg1);
+ */
+ bool is_subroutine;
+
+ /**
+ * is this function associated to a subroutine type
+ * e.g. subroutine (type1, type2) function_name { function_body };
+ * would have num_subroutine_types 2,
+ * and pointers to the type1 and type2 types.
+ */
+ int num_subroutine_types;
+ const struct glsl_type **subroutine_types;
+
+ int subroutine_index;
+};
+
+inline const char *ir_function_signature::function_name() const
+{
+ return this->_function->name;
+}
+/*@}*/
+
+
+/**
+ * IR instruction representing high-level if-statements
+ */
+class ir_if : public ir_instruction {
+public:
+ ir_if(ir_rvalue *condition)
+ : ir_instruction(ir_type_if), condition(condition)
+ {
+ }
+
+ virtual ir_if *clone(void *mem_ctx, struct hash_table *ht) const;
+
+ virtual void accept(ir_visitor *v)
+ {
+ v->visit(this);
+ }
+
+ virtual ir_visitor_status accept(ir_hierarchical_visitor *);
+
+ ir_rvalue *condition;
+ /** List of ir_instruction for the body of the then branch */
+ exec_list then_instructions;
+ /** List of ir_instruction for the body of the else branch */
+ exec_list else_instructions;
+};
+
+
+/**
+ * IR instruction representing a high-level loop structure.
+ */
+class ir_loop : public ir_instruction {
+public:
+ ir_loop();
+
+ virtual ir_loop *clone(void *mem_ctx, struct hash_table *ht) const;
+
+ virtual void accept(ir_visitor *v)
+ {
+ v->visit(this);
+ }
+
+ virtual ir_visitor_status accept(ir_hierarchical_visitor *);
+
+ /** List of ir_instruction that make up the body of the loop. */
+ exec_list body_instructions;
+};
+
+
+class ir_assignment : public ir_instruction {
+public:
+ ir_assignment(ir_rvalue *lhs, ir_rvalue *rhs, ir_rvalue *condition = NULL);
+
+ /**
+ * Construct an assignment with an explicit write mask
+ *
+ * \note
+ * Since a write mask is supplied, the LHS must already be a bare
+ * \c ir_dereference. The cannot be any swizzles in the LHS.
+ */
+ ir_assignment(ir_dereference *lhs, ir_rvalue *rhs, ir_rvalue *condition,
+ unsigned write_mask);
+
+ virtual ir_assignment *clone(void *mem_ctx, struct hash_table *ht) const;
+
+ virtual ir_constant *constant_expression_value(void *mem_ctx,
+ struct hash_table *variable_context = NULL);
+
+ virtual void accept(ir_visitor *v)
+ {
+ v->visit(this);
+ }
+
+ virtual ir_visitor_status accept(ir_hierarchical_visitor *);
+
+ /**
+ * Get a whole variable written by an assignment
+ *
+ * If the LHS of the assignment writes a whole variable, the variable is
+ * returned. Otherwise \c NULL is returned. Examples of whole-variable
+ * assignment are:
+ *
+ * - Assigning to a scalar
+ * - Assigning to all components of a vector
+ * - Whole array (or matrix) assignment
+ * - Whole structure assignment
+ */
+ ir_variable *whole_variable_written();
+
+ /**
+ * Set the LHS of an assignment
+ */
+ void set_lhs(ir_rvalue *lhs);
+
+ /**
+ * Left-hand side of the assignment.
+ *
+ * This should be treated as read only. If you need to set the LHS of an
+ * assignment, use \c ir_assignment::set_lhs.
+ */
+ ir_dereference *lhs;
+
+ /**
+ * Value being assigned
+ */
+ ir_rvalue *rhs;
+
+ /**
+ * Optional condition for the assignment.
+ */
+ ir_rvalue *condition;
+
+
+ /**
+ * Component mask written
+ *
+ * For non-vector types in the LHS, this field will be zero. For vector
+ * types, a bit will be set for each component that is written. Note that
+ * for \c vec2 and \c vec3 types only the lower bits will ever be set.
+ *
+ * A partially-set write mask means that each enabled channel gets
+ * the value from a consecutive channel of the rhs. For example,
+ * to write just .xyw of gl_FrontColor with color:
+ *
+ * (assign (constant bool (1)) (xyw)
+ * (var_ref gl_FragColor)
+ * (swiz xyw (var_ref color)))
+ */
+ unsigned write_mask:4;
+};
+
+#include "ir_expression_operation.h"
+
+extern const char *const ir_expression_operation_strings[ir_last_opcode + 1];
+extern const char *const ir_expression_operation_enum_strings[ir_last_opcode + 1];
+
+class ir_expression : public ir_rvalue {
+public:
+ ir_expression(int op, const struct glsl_type *type,
+ ir_rvalue *op0, ir_rvalue *op1 = NULL,
+ ir_rvalue *op2 = NULL, ir_rvalue *op3 = NULL);
+
+ /**
+ * Constructor for unary operation expressions
+ */
+ ir_expression(int op, ir_rvalue *);
+
+ /**
+ * Constructor for binary operation expressions
+ */
+ ir_expression(int op, ir_rvalue *op0, ir_rvalue *op1);
+
+ /**
+ * Constructor for ternary operation expressions
+ */
+ ir_expression(int op, ir_rvalue *op0, ir_rvalue *op1, ir_rvalue *op2);
+
+ virtual bool equals(const ir_instruction *ir,
+ enum ir_node_type ignore = ir_type_unset) const;
+
+ virtual ir_expression *clone(void *mem_ctx, struct hash_table *ht) const;
+
+ /**
+ * Attempt to constant-fold the expression
+ *
+ * The "variable_context" hash table links ir_variable * to ir_constant *
+ * that represent the variables' values. \c NULL represents an empty
+ * context.
+ *
+ * If the expression cannot be constant folded, this method will return
+ * \c NULL.
+ */
+ virtual ir_constant *constant_expression_value(void *mem_ctx,
+ struct hash_table *variable_context = NULL);
+
+ /**
+ * This is only here for ir_reader to used for testing purposes please use
+ * the precomputed num_operands field if you need the number of operands.
+ */
+ static unsigned get_num_operands(ir_expression_operation);
+
+ /**
+ * Return whether the expression operates on vectors horizontally.
+ */
+ bool is_horizontal() const
+ {
+ return operation == ir_binop_all_equal ||
+ operation == ir_binop_any_nequal ||
+ operation == ir_binop_dot ||
+ operation == ir_binop_vector_extract ||
+ operation == ir_triop_vector_insert ||
+ operation == ir_binop_ubo_load ||
+ operation == ir_quadop_vector;
+ }
+
+ /**
+ * Do a reverse-lookup to translate the given string into an operator.
+ */
+ static ir_expression_operation get_operator(const char *);
+
+ virtual void accept(ir_visitor *v)
+ {
+ v->visit(this);
+ }
+
+ virtual ir_visitor_status accept(ir_hierarchical_visitor *);
+
+ virtual ir_variable *variable_referenced() const;
+
+ /**
+ * Determine the number of operands used by an expression
+ */
+ void init_num_operands()
+ {
+ if (operation == ir_quadop_vector) {
+ num_operands = this->type->vector_elements;
+ } else {
+ num_operands = get_num_operands(operation);
+ }
+ }
+
+ ir_expression_operation operation;
+ ir_rvalue *operands[4];
+ uint8_t num_operands;
+};
+
+
+/**
+ * HIR instruction representing a high-level function call, containing a list
+ * of parameters and returning a value in the supplied temporary.
+ */
+class ir_call : public ir_instruction {
+public:
+ ir_call(ir_function_signature *callee,
+ ir_dereference_variable *return_deref,
+ exec_list *actual_parameters)
+ : ir_instruction(ir_type_call), return_deref(return_deref), callee(callee), sub_var(NULL), array_idx(NULL)
+ {
+ assert(callee->return_type != NULL);
+ actual_parameters->move_nodes_to(& this->actual_parameters);
+ }
+
+ ir_call(ir_function_signature *callee,
+ ir_dereference_variable *return_deref,
+ exec_list *actual_parameters,
+ ir_variable *var, ir_rvalue *array_idx)
+ : ir_instruction(ir_type_call), return_deref(return_deref), callee(callee), sub_var(var), array_idx(array_idx)
+ {
+ assert(callee->return_type != NULL);
+ actual_parameters->move_nodes_to(& this->actual_parameters);
+ }
+
+ virtual ir_call *clone(void *mem_ctx, struct hash_table *ht) const;
+
+ virtual ir_constant *constant_expression_value(void *mem_ctx,
+ struct hash_table *variable_context = NULL);
+
+ virtual void accept(ir_visitor *v)
+ {
+ v->visit(this);
+ }
+
+ virtual ir_visitor_status accept(ir_hierarchical_visitor *);
+
+ /**
+ * Get the name of the function being called.
+ */
+ const char *callee_name() const
+ {
+ return callee->function_name();
+ }
+
+ /**
+ * Generates an inline version of the function before @ir,
+ * storing the return value in return_deref.
+ */
+ void generate_inline(ir_instruction *ir);
+
+ /**
+ * Storage for the function's return value.
+ * This must be NULL if the return type is void.
+ */
+ ir_dereference_variable *return_deref;
+
+ /**
+ * The specific function signature being called.
+ */
+ ir_function_signature *callee;
+
+ /* List of ir_rvalue of paramaters passed in this call. */
+ exec_list actual_parameters;
+
+ /*
+ * ARB_shader_subroutine support -
+ * the subroutine uniform variable and array index
+ * rvalue to be used in the lowering pass later.
+ */
+ ir_variable *sub_var;
+ ir_rvalue *array_idx;
+};
+
+
+/**
+ * \name Jump-like IR instructions.
+ *
+ * These include \c break, \c continue, \c return, and \c discard.
+ */
+/*@{*/
+class ir_jump : public ir_instruction {
+protected:
+ ir_jump(enum ir_node_type t)
+ : ir_instruction(t)
+ {
+ }
+};
+
+class ir_return : public ir_jump {
+public:
+ ir_return()
+ : ir_jump(ir_type_return), value(NULL)
+ {
+ }
+
+ ir_return(ir_rvalue *value)
+ : ir_jump(ir_type_return), value(value)
+ {
+ }
+
+ virtual ir_return *clone(void *mem_ctx, struct hash_table *) const;
+
+ ir_rvalue *get_value() const
+ {
+ return value;
+ }
+
+ virtual void accept(ir_visitor *v)
+ {
+ v->visit(this);
+ }
+
+ virtual ir_visitor_status accept(ir_hierarchical_visitor *);
+
+ ir_rvalue *value;
+};
+
+
+/**
+ * Jump instructions used inside loops
+ *
+ * These include \c break and \c continue. The \c break within a loop is
+ * different from the \c break within a switch-statement.
+ *
+ * \sa ir_switch_jump
+ */
+class ir_loop_jump : public ir_jump {
+public:
+ enum jump_mode {
+ jump_break,
+ jump_continue
+ };
+
+ ir_loop_jump(jump_mode mode)
+ : ir_jump(ir_type_loop_jump)
+ {
+ this->mode = mode;
+ }
+
+ virtual ir_loop_jump *clone(void *mem_ctx, struct hash_table *) const;
+
+ virtual void accept(ir_visitor *v)
+ {
+ v->visit(this);
+ }
+
+ virtual ir_visitor_status accept(ir_hierarchical_visitor *);
+
+ bool is_break() const
+ {
+ return mode == jump_break;
+ }
+
+ bool is_continue() const
+ {
+ return mode == jump_continue;
+ }
+
+ /** Mode selector for the jump instruction. */
+ enum jump_mode mode;
+};
+
+/**
+ * IR instruction representing discard statements.
+ */
+class ir_discard : public ir_jump {
+public:
+ ir_discard()
+ : ir_jump(ir_type_discard)
+ {
+ this->condition = NULL;
+ }
+
+ ir_discard(ir_rvalue *cond)
+ : ir_jump(ir_type_discard)
+ {
+ this->condition = cond;
+ }
+
+ virtual ir_discard *clone(void *mem_ctx, struct hash_table *ht) const;
+
+ virtual void accept(ir_visitor *v)
+ {
+ v->visit(this);
+ }
+
+ virtual ir_visitor_status accept(ir_hierarchical_visitor *);
+
+ ir_rvalue *condition;
+};
+/*@}*/
+
+
+/**
+ * IR instruction representing demote statements from
+ * GL_EXT_demote_to_helper_invocation.
+ */
+class ir_demote : public ir_instruction {
+public:
+ ir_demote()
+ : ir_instruction(ir_type_demote)
+ {
+ }
+
+ virtual ir_demote *clone(void *mem_ctx, struct hash_table *ht) const;
+
+ virtual void accept(ir_visitor *v)
+ {
+ v->visit(this);
+ }
+
+ virtual ir_visitor_status accept(ir_hierarchical_visitor *);
+};
+
+
+/**
+ * Texture sampling opcodes used in ir_texture
+ */
+enum ir_texture_opcode {
+ ir_tex, /**< Regular texture look-up */
+ ir_txb, /**< Texture look-up with LOD bias */
+ ir_txl, /**< Texture look-up with explicit LOD */
+ ir_txd, /**< Texture look-up with partial derivatvies */
+ ir_txf, /**< Texel fetch with explicit LOD */
+ ir_txf_ms, /**< Multisample texture fetch */
+ ir_txs, /**< Texture size */
+ ir_lod, /**< Texture lod query */
+ ir_tg4, /**< Texture gather */
+ ir_query_levels, /**< Texture levels query */
+ ir_texture_samples, /**< Texture samples query */
+ ir_samples_identical, /**< Query whether all samples are definitely identical. */
+};
+
+
+/**
+ * IR instruction to sample a texture
+ *
+ * The specific form of the IR instruction depends on the \c mode value
+ * selected from \c ir_texture_opcodes. In the printed IR, these will
+ * appear as:
+ *
+ * Texel offset (0 or an expression)
+ * | Projection divisor
+ * | | Shadow comparator
+ * | | |
+ * v v v
+ * (tex <type> <sampler> <coordinate> 0 1 ( ))
+ * (txb <type> <sampler> <coordinate> 0 1 ( ) <bias>)
+ * (txl <type> <sampler> <coordinate> 0 1 ( ) <lod>)
+ * (txd <type> <sampler> <coordinate> 0 1 ( ) (dPdx dPdy))
+ * (txf <type> <sampler> <coordinate> 0 <lod>)
+ * (txf_ms
+ * <type> <sampler> <coordinate> <sample_index>)
+ * (txs <type> <sampler> <lod>)
+ * (lod <type> <sampler> <coordinate>)
+ * (tg4 <type> <sampler> <coordinate> <offset> <component>)
+ * (query_levels <type> <sampler>)
+ * (samples_identical <sampler> <coordinate>)
+ */
+class ir_texture : public ir_rvalue {
+public:
+ ir_texture(enum ir_texture_opcode op)
+ : ir_rvalue(ir_type_texture),
+ op(op), sampler(NULL), coordinate(NULL), projector(NULL),
+ shadow_comparator(NULL), offset(NULL)
+ {
+ memset(&lod_info, 0, sizeof(lod_info));
+ }
+
+ virtual ir_texture *clone(void *mem_ctx, struct hash_table *) const;
+
+ virtual ir_constant *constant_expression_value(void *mem_ctx,
+ struct hash_table *variable_context = NULL);
+
+ virtual void accept(ir_visitor *v)
+ {
+ v->visit(this);
+ }
+
+ virtual ir_visitor_status accept(ir_hierarchical_visitor *);
+
+ virtual bool equals(const ir_instruction *ir,
+ enum ir_node_type ignore = ir_type_unset) const;
+
+ /**
+ * Return a string representing the ir_texture_opcode.
+ */
+ const char *opcode_string();
+
+ /** Set the sampler and type. */
+ void set_sampler(ir_dereference *sampler, const glsl_type *type);
+
+ static bool has_lod(const glsl_type *sampler_type);
+
+ /**
+ * Do a reverse-lookup to translate a string into an ir_texture_opcode.
+ */
+ static ir_texture_opcode get_opcode(const char *);
+
+ enum ir_texture_opcode op;
+
+ /** Sampler to use for the texture access. */
+ ir_dereference *sampler;
+
+ /** Texture coordinate to sample */
+ ir_rvalue *coordinate;
+
+ /**
+ * Value used for projective divide.
+ *
+ * If there is no projective divide (the common case), this will be
+ * \c NULL. Optimization passes should check for this to point to a constant
+ * of 1.0 and replace that with \c NULL.
+ */
+ ir_rvalue *projector;
+
+ /**
+ * Coordinate used for comparison on shadow look-ups.
+ *
+ * If there is no shadow comparison, this will be \c NULL. For the
+ * \c ir_txf opcode, this *must* be \c NULL.
+ */
+ ir_rvalue *shadow_comparator;
+
+ /** Texel offset. */
+ ir_rvalue *offset;
+
+ union {
+ ir_rvalue *lod; /**< Floating point LOD */
+ ir_rvalue *bias; /**< Floating point LOD bias */
+ ir_rvalue *sample_index; /**< MSAA sample index */
+ ir_rvalue *component; /**< Gather component selector */
+ struct {
+ ir_rvalue *dPdx; /**< Partial derivative of coordinate wrt X */
+ ir_rvalue *dPdy; /**< Partial derivative of coordinate wrt Y */
+ } grad;
+ } lod_info;
+};
+
+
+struct ir_swizzle_mask {
+ unsigned x:2;
+ unsigned y:2;
+ unsigned z:2;
+ unsigned w:2;
+
+ /**
+ * Number of components in the swizzle.
+ */
+ unsigned num_components:3;
+
+ /**
+ * Does the swizzle contain duplicate components?
+ *
+ * L-value swizzles cannot contain duplicate components.
+ */
+ unsigned has_duplicates:1;
+};
+
+
+class ir_swizzle : public ir_rvalue {
+public:
+ ir_swizzle(ir_rvalue *, unsigned x, unsigned y, unsigned z, unsigned w,
+ unsigned count);
+
+ ir_swizzle(ir_rvalue *val, const unsigned *components, unsigned count);
+
+ ir_swizzle(ir_rvalue *val, ir_swizzle_mask mask);
+
+ virtual ir_swizzle *clone(void *mem_ctx, struct hash_table *) const;
+
+ virtual ir_constant *constant_expression_value(void *mem_ctx,
+ struct hash_table *variable_context = NULL);
+
+ /**
+ * Construct an ir_swizzle from the textual representation. Can fail.
+ */
+ static ir_swizzle *create(ir_rvalue *, const char *, unsigned vector_length);
+
+ virtual void accept(ir_visitor *v)
+ {
+ v->visit(this);
+ }
+
+ virtual ir_visitor_status accept(ir_hierarchical_visitor *);
+
+ virtual bool equals(const ir_instruction *ir,
+ enum ir_node_type ignore = ir_type_unset) const;
+
+ bool is_lvalue(const struct _mesa_glsl_parse_state *state) const
+ {
+ return val->is_lvalue(state) && !mask.has_duplicates;
+ }
+
+ /**
+ * Get the variable that is ultimately referenced by an r-value
+ */
+ virtual ir_variable *variable_referenced() const;
+
+ ir_rvalue *val;
+ ir_swizzle_mask mask;
+
+private:
+ /**
+ * Initialize the mask component of a swizzle
+ *
+ * This is used by the \c ir_swizzle constructors.
+ */
+ void init_mask(const unsigned *components, unsigned count);
+};
+
+
+class ir_dereference : public ir_rvalue {
+public:
+ virtual ir_dereference *clone(void *mem_ctx, struct hash_table *) const = 0;
+
+ bool is_lvalue(const struct _mesa_glsl_parse_state *state) const;
+
+ /**
+ * Get the variable that is ultimately referenced by an r-value
+ */
+ virtual ir_variable *variable_referenced() const = 0;
+
+ /**
+ * Get the precision. This can either come from the eventual variable that
+ * is dereferenced, or from a record member.
+ */
+ virtual int precision() const = 0;
+
+protected:
+ ir_dereference(enum ir_node_type t)
+ : ir_rvalue(t)
+ {
+ }
+};
+
+
+class ir_dereference_variable : public ir_dereference {
+public:
+ ir_dereference_variable(ir_variable *var);
+
+ virtual ir_dereference_variable *clone(void *mem_ctx,
+ struct hash_table *) const;
+
+ virtual ir_constant *constant_expression_value(void *mem_ctx,
+ struct hash_table *variable_context = NULL);
+
+ virtual bool equals(const ir_instruction *ir,
+ enum ir_node_type ignore = ir_type_unset) const;
+
+ /**
+ * Get the variable that is ultimately referenced by an r-value
+ */
+ virtual ir_variable *variable_referenced() const
+ {
+ return this->var;
+ }
+
+ virtual int precision() const
+ {
+ return this->var->data.precision;
+ }
+
+ virtual ir_variable *whole_variable_referenced()
+ {
+ /* ir_dereference_variable objects always dereference the entire
+ * variable. However, if this dereference is dereferenced by anything
+ * else, the complete deferefernce chain is not a whole-variable
+ * dereference. This method should only be called on the top most
+ * ir_rvalue in a dereference chain.
+ */
+ return this->var;
+ }
+
+ virtual void accept(ir_visitor *v)
+ {
+ v->visit(this);
+ }
+
+ virtual ir_visitor_status accept(ir_hierarchical_visitor *);
+
+ /**
+ * Object being dereferenced.
+ */
+ ir_variable *var;
+};
+
+
+class ir_dereference_array : public ir_dereference {
+public:
+ ir_dereference_array(ir_rvalue *value, ir_rvalue *array_index);
+
+ ir_dereference_array(ir_variable *var, ir_rvalue *array_index);
+
+ virtual ir_dereference_array *clone(void *mem_ctx,
+ struct hash_table *) const;
+
+ virtual ir_constant *constant_expression_value(void *mem_ctx,
+ struct hash_table *variable_context = NULL);
+
+ virtual bool equals(const ir_instruction *ir,
+ enum ir_node_type ignore = ir_type_unset) const;
+
+ /**
+ * Get the variable that is ultimately referenced by an r-value
+ */
+ virtual ir_variable *variable_referenced() const
+ {
+ return this->array->variable_referenced();
+ }
+
+ virtual int precision() const
+ {
+ ir_dereference *deref = this->array->as_dereference();
+
+ if (deref == NULL)
+ return GLSL_PRECISION_NONE;
+ else
+ return deref->precision();
+ }
+
+ virtual void accept(ir_visitor *v)
+ {
+ v->visit(this);
+ }
+
+ virtual ir_visitor_status accept(ir_hierarchical_visitor *);
+
+ ir_rvalue *array;
+ ir_rvalue *array_index;
+
+private:
+ void set_array(ir_rvalue *value);
+};
+
+
+class ir_dereference_record : public ir_dereference {
+public:
+ ir_dereference_record(ir_rvalue *value, const char *field);
+
+ ir_dereference_record(ir_variable *var, const char *field);
+
+ virtual ir_dereference_record *clone(void *mem_ctx,
+ struct hash_table *) const;
+
+ virtual ir_constant *constant_expression_value(void *mem_ctx,
+ struct hash_table *variable_context = NULL);
+
+ /**
+ * Get the variable that is ultimately referenced by an r-value
+ */
+ virtual ir_variable *variable_referenced() const
+ {
+ return this->record->variable_referenced();
+ }
+
+ virtual int precision() const
+ {
+ glsl_struct_field *field = record->type->fields.structure + field_idx;
+
+ return field->precision;
+ }
+
+ virtual void accept(ir_visitor *v)
+ {
+ v->visit(this);
+ }
+
+ virtual ir_visitor_status accept(ir_hierarchical_visitor *);
+
+ ir_rvalue *record;
+ int field_idx;
+};
+
+
+/**
+ * Data stored in an ir_constant
+ */
+union ir_constant_data {
+ unsigned u[16];
+ int i[16];
+ float f[16];
+ bool b[16];
+ double d[16];
+ uint16_t f16[16];
+ uint64_t u64[16];
+ int64_t i64[16];
+};
+
+
+class ir_constant : public ir_rvalue {
+public:
+ ir_constant(const struct glsl_type *type, const ir_constant_data *data);
+ ir_constant(bool b, unsigned vector_elements=1);
+ ir_constant(unsigned int u, unsigned vector_elements=1);
+ ir_constant(int i, unsigned vector_elements=1);
+ ir_constant(float16_t f16, unsigned vector_elements=1);
+ ir_constant(float f, unsigned vector_elements=1);
+ ir_constant(double d, unsigned vector_elements=1);
+ ir_constant(uint64_t u64, unsigned vector_elements=1);
+ ir_constant(int64_t i64, unsigned vector_elements=1);
+
+ /**
+ * Construct an ir_constant from a list of ir_constant values
+ */
+ ir_constant(const struct glsl_type *type, exec_list *values);
+
+ /**
+ * Construct an ir_constant from a scalar component of another ir_constant
+ *
+ * The new \c ir_constant inherits the type of the component from the
+ * source constant.
+ *
+ * \note
+ * In the case of a matrix constant, the new constant is a scalar, \b not
+ * a vector.
+ */
+ ir_constant(const ir_constant *c, unsigned i);
+
+ /**
+ * Return a new ir_constant of the specified type containing all zeros.
+ */
+ static ir_constant *zero(void *mem_ctx, const glsl_type *type);
+
+ virtual ir_constant *clone(void *mem_ctx, struct hash_table *) const;
+
+ virtual ir_constant *constant_expression_value(void *mem_ctx,
+ struct hash_table *variable_context = NULL);
+
+ virtual void accept(ir_visitor *v)
+ {
+ v->visit(this);
+ }
+
+ virtual ir_visitor_status accept(ir_hierarchical_visitor *);
+
+ virtual bool equals(const ir_instruction *ir,
+ enum ir_node_type ignore = ir_type_unset) const;
+
+ /**
+ * Get a particular component of a constant as a specific type
+ *
+ * This is useful, for example, to get a value from an integer constant
+ * as a float or bool. This appears frequently when constructors are
+ * called with all constant parameters.
+ */
+ /*@{*/
+ bool get_bool_component(unsigned i) const;
+ float get_float_component(unsigned i) const;
+ uint16_t get_float16_component(unsigned i) const;
+ double get_double_component(unsigned i) const;
+ int get_int_component(unsigned i) const;
+ unsigned get_uint_component(unsigned i) const;
+ int64_t get_int64_component(unsigned i) const;
+ uint64_t get_uint64_component(unsigned i) const;
+ /*@}*/
+
+ ir_constant *get_array_element(unsigned i) const;
+
+ ir_constant *get_record_field(int idx);
+
+ /**
+ * Copy the values on another constant at a given offset.
+ *
+ * The offset is ignored for array or struct copies, it's only for
+ * scalars or vectors into vectors or matrices.
+ *
+ * With identical types on both sides and zero offset it's clone()
+ * without creating a new object.
+ */
+
+ void copy_offset(ir_constant *src, int offset);
+
+ /**
+ * Copy the values on another constant at a given offset and
+ * following an assign-like mask.
+ *
+ * The mask is ignored for scalars.
+ *
+ * Note that this function only handles what assign can handle,
+ * i.e. at most a vector as source and a column of a matrix as
+ * destination.
+ */
+
+ void copy_masked_offset(ir_constant *src, int offset, unsigned int mask);
+
+ /**
+ * Determine whether a constant has the same value as another constant
+ *
+ * \sa ir_constant::is_zero, ir_constant::is_one,
+ * ir_constant::is_negative_one
+ */
+ bool has_value(const ir_constant *) const;
+
+ /**
+ * Return true if this ir_constant represents the given value.
+ *
+ * For vectors, this checks that each component is the given value.
+ */
+ virtual bool is_value(float f, int i) const;
+ virtual bool is_zero() const;
+ virtual bool is_one() const;
+ virtual bool is_negative_one() const;
+
+ /**
+ * Return true for constants that could be stored as 16-bit unsigned values.
+ *
+ * Note that this will return true even for signed integer ir_constants, as
+ * long as the value is non-negative and fits in 16-bits.
+ */
+ virtual bool is_uint16_constant() const;
+
+ /**
+ * Value of the constant.
+ *
+ * The field used to back the values supplied by the constant is determined
+ * by the type associated with the \c ir_instruction. Constants may be
+ * scalars, vectors, or matrices.
+ */
+ union ir_constant_data value;
+
+ /* Array elements and structure fields */
+ ir_constant **const_elements;
+
+private:
+ /**
+ * Parameterless constructor only used by the clone method
+ */
+ ir_constant(void);
+};
+
+class ir_precision_statement : public ir_instruction {
+public:
+ ir_precision_statement(const char *statement_to_store)
+ : ir_instruction(ir_type_precision)
+ {
+ ir_type = ir_type_precision;
+ precision_statement = statement_to_store;
+ }
+
+ virtual ir_precision_statement *clone(void *mem_ctx, struct hash_table *) const;
+
+ virtual void accept(ir_visitor *v)
+ {
+ v->visit(this);
+ }
+
+ virtual ir_visitor_status accept(ir_hierarchical_visitor *);
+
+ /**
+ * Precision statement
+ */
+ const char *precision_statement;
+};
+
+class ir_typedecl_statement : public ir_instruction {
+public:
+ ir_typedecl_statement(const glsl_type* type_decl)
+ : ir_instruction(ir_type_typedecl)
+ {
+ this->ir_type = ir_type_typedecl;
+ this->type_decl = type_decl;
+ }
+
+ virtual ir_typedecl_statement *clone(void *mem_ctx, struct hash_table *) const;
+
+ virtual void accept(ir_visitor *v)
+ {
+ v->visit(this);
+ }
+
+ virtual ir_visitor_status accept(ir_hierarchical_visitor *);
+
+ const glsl_type* type_decl;
+};
+
+/**
+ * IR instruction to emit a vertex in a geometry shader.
+ */
+class ir_emit_vertex : public ir_instruction {
+public:
+ ir_emit_vertex(ir_rvalue *stream)
+ : ir_instruction(ir_type_emit_vertex),
+ stream(stream)
+ {
+ assert(stream);
+ }
+
+ virtual void accept(ir_visitor *v)
+ {
+ v->visit(this);
+ }
+
+ virtual ir_emit_vertex *clone(void *mem_ctx, struct hash_table *ht) const
+ {
+ return new(mem_ctx) ir_emit_vertex(this->stream->clone(mem_ctx, ht));
+ }
+
+ virtual ir_visitor_status accept(ir_hierarchical_visitor *);
+
+ int stream_id() const
+ {
+ return stream->as_constant()->value.i[0];
+ }
+
+ ir_rvalue *stream;
+};
+
+/**
+ * IR instruction to complete the current primitive and start a new one in a
+ * geometry shader.
+ */
+class ir_end_primitive : public ir_instruction {
+public:
+ ir_end_primitive(ir_rvalue *stream)
+ : ir_instruction(ir_type_end_primitive),
+ stream(stream)
+ {
+ assert(stream);
+ }
+
+ virtual void accept(ir_visitor *v)
+ {
+ v->visit(this);
+ }
+
+ virtual ir_end_primitive *clone(void *mem_ctx, struct hash_table *ht) const
+ {
+ return new(mem_ctx) ir_end_primitive(this->stream->clone(mem_ctx, ht));
+ }
+
+ virtual ir_visitor_status accept(ir_hierarchical_visitor *);
+
+ int stream_id() const
+ {
+ return stream->as_constant()->value.i[0];
+ }
+
+ ir_rvalue *stream;
+};
+
+/**
+ * IR instruction for tessellation control and compute shader barrier.
+ */
+class ir_barrier : public ir_instruction {
+public:
+ ir_barrier()
+ : ir_instruction(ir_type_barrier)
+ {
+ }
+
+ virtual void accept(ir_visitor *v)
+ {
+ v->visit(this);
+ }
+
+ virtual ir_barrier *clone(void *mem_ctx, struct hash_table *) const
+ {
+ return new(mem_ctx) ir_barrier();
+ }
+
+ virtual ir_visitor_status accept(ir_hierarchical_visitor *);
+};
+
+/*@}*/
+
+/**
+ * Apply a visitor to each IR node in a list
+ */
+void
+visit_exec_list(exec_list *list, ir_visitor *visitor);
+
+/**
+ * Validate invariants on each IR node in a list
+ */
+void validate_ir_tree(exec_list *instructions);
+
+struct _mesa_glsl_parse_state;
+struct gl_shader_program;
+
+/**
+ * Detect whether an unlinked shader contains static recursion
+ *
+ * If the list of instructions is determined to contain static recursion,
+ * \c _mesa_glsl_error will be called to emit error messages for each function
+ * that is in the recursion cycle.
+ */
+void
+detect_recursion_unlinked(struct _mesa_glsl_parse_state *state,
+ exec_list *instructions);
+
+/**
+ * Detect whether a linked shader contains static recursion
+ *
+ * If the list of instructions is determined to contain static recursion,
+ * \c link_error_printf will be called to emit error messages for each function
+ * that is in the recursion cycle. In addition,
+ * \c gl_shader_program::LinkStatus will be set to false.
+ */
+void
+detect_recursion_linked(struct gl_shader_program *prog,
+ exec_list *instructions);
+
+/**
+ * Make a clone of each IR instruction in a list
+ *
+ * \param in List of IR instructions that are to be cloned
+ * \param out List to hold the cloned instructions
+ */
+void
+clone_ir_list(void *mem_ctx, exec_list *out, const exec_list *in);
+
+extern void
+_mesa_glsl_initialize_variables(exec_list *instructions,
+ struct _mesa_glsl_parse_state *state);
+
+extern void
+reparent_ir(exec_list *list, void *mem_ctx);
+
+extern void
+do_set_program_inouts(exec_list *instructions, struct gl_program *prog,
+ gl_shader_stage shader_stage);
+
+extern char *
+prototype_string(const glsl_type *return_type, const char *name,
+ exec_list *parameters);
+
+const char *
+mode_string(const ir_variable *var);
+
+/**
+ * Built-in / reserved GL variables names start with "gl_"
+ */
+static inline bool
+is_gl_identifier(const char *s)
+{
+ return s && s[0] == 'g' && s[1] == 'l' && s[2] == '_';
+}
+
+extern "C" {
+#endif /* __cplusplus */
+
+extern void _mesa_print_ir(FILE *f, struct exec_list *instructions,
+ struct _mesa_glsl_parse_state *state);
+
+extern void
+fprint_ir(FILE *f, const void *instruction);
+
+extern const struct gl_builtin_uniform_desc *
+_mesa_glsl_get_builtin_uniform_desc(const char *name);
+
+#ifdef __cplusplus
+} /* extern "C" */
+#endif
+
+unsigned
+vertices_per_prim(GLenum prim);
+
+#endif /* IR_H */
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/ir_array_refcount.cpp b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/ir_array_refcount.cpp
new file mode 100644
index 0000000000..0c18c7e0ec
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/ir_array_refcount.cpp
@@ -0,0 +1,207 @@
+/*
+ * Copyright © 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+/**
+ * \file ir_array_refcount.cpp
+ *
+ * Provides a visitor which produces a list of variables referenced.
+ */
+
+#include "ir.h"
+#include "ir_visitor.h"
+#include "ir_array_refcount.h"
+#include "compiler/glsl_types.h"
+#include "util/hash_table.h"
+
+ir_array_refcount_visitor::ir_array_refcount_visitor()
+ : last_array_deref(0), derefs(0), num_derefs(0), derefs_size(0)
+{
+ this->mem_ctx = ralloc_context(NULL);
+ this->ht = _mesa_pointer_hash_table_create(NULL);
+}
+
+static void
+free_entry(struct hash_entry *entry)
+{
+ ir_array_refcount_entry *ivre = (ir_array_refcount_entry *) entry->data;
+ delete ivre;
+}
+
+ir_array_refcount_visitor::~ir_array_refcount_visitor()
+{
+ ralloc_free(this->mem_ctx);
+ _mesa_hash_table_destroy(this->ht, free_entry);
+}
+
+ir_array_refcount_entry::ir_array_refcount_entry(ir_variable *var)
+ : var(var), is_referenced(false)
+{
+ num_bits = MAX2(1, var->type->arrays_of_arrays_size());
+ bits = new BITSET_WORD[BITSET_WORDS(num_bits)];
+ memset(bits, 0, BITSET_WORDS(num_bits) * sizeof(bits[0]));
+
+ /* Count the "depth" of the arrays-of-arrays. */
+ array_depth = 0;
+ for (const glsl_type *type = var->type;
+ type->is_array();
+ type = type->fields.array) {
+ array_depth++;
+ }
+}
+
+
+ir_array_refcount_entry::~ir_array_refcount_entry()
+{
+ delete [] bits;
+}
+
+ir_array_refcount_entry *
+ir_array_refcount_visitor::get_variable_entry(ir_variable *var)
+{
+ assert(var);
+
+ struct hash_entry *e = _mesa_hash_table_search(this->ht, var);
+ if (e)
+ return (ir_array_refcount_entry *)e->data;
+
+ ir_array_refcount_entry *entry = new ir_array_refcount_entry(var);
+ _mesa_hash_table_insert(this->ht, var, entry);
+
+ return entry;
+}
+
+
+array_deref_range *
+ir_array_refcount_visitor::get_array_deref()
+{
+ if ((num_derefs + 1) * sizeof(array_deref_range) > derefs_size) {
+ void *ptr = reralloc_size(mem_ctx, derefs, derefs_size + 4096);
+
+ if (ptr == NULL)
+ return NULL;
+
+ derefs_size += 4096;
+ derefs = (array_deref_range *)ptr;
+ }
+
+ array_deref_range *d = &derefs[num_derefs];
+ num_derefs++;
+
+ return d;
+}
+
+ir_visitor_status
+ir_array_refcount_visitor::visit_enter(ir_dereference_array *ir)
+{
+ /* It could also be a vector or a matrix. Individual elements of vectors
+ * are natrices are not tracked, so bail.
+ */
+ if (!ir->array->type->is_array())
+ return visit_continue;
+
+ /* If this array dereference is a child of an array dereference that was
+ * already visited, just continue on. Otherwise, for an arrays-of-arrays
+ * dereference like x[1][2][3][4], we'd process the [1][2][3][4] sequence,
+ * the [1][2][3] sequence, the [1][2] sequence, and the [1] sequence. This
+ * ensures that we only process the full sequence.
+ */
+ if (last_array_deref && last_array_deref->array == ir) {
+ last_array_deref = ir;
+ return visit_continue;
+ }
+
+ last_array_deref = ir;
+
+ num_derefs = 0;
+
+ ir_rvalue *rv = ir;
+ while (rv->ir_type == ir_type_dereference_array) {
+ ir_dereference_array *const deref = rv->as_dereference_array();
+
+ assert(deref != NULL);
+ assert(deref->array->type->is_array());
+
+ ir_rvalue *const array = deref->array;
+ const ir_constant *const idx = deref->array_index->as_constant();
+ array_deref_range *const dr = get_array_deref();
+
+ dr->size = array->type->array_size();
+
+ if (idx != NULL) {
+ dr->index = idx->get_int_component(0);
+ } else {
+ /* An unsized array can occur at the end of an SSBO. We can't track
+ * accesses to such an array, so bail.
+ */
+ if (array->type->array_size() == 0)
+ return visit_continue;
+
+ dr->index = dr->size;
+ }
+
+ rv = array;
+ }
+
+ ir_dereference_variable *const var_deref = rv->as_dereference_variable();
+
+ /* If the array being dereferenced is not a variable, bail. At the very
+ * least, ir_constant and ir_dereference_record are possible.
+ */
+ if (var_deref == NULL)
+ return visit_continue;
+
+ ir_array_refcount_entry *const entry =
+ this->get_variable_entry(var_deref->var);
+
+ if (entry == NULL)
+ return visit_stop;
+
+ link_util_mark_array_elements_referenced(derefs, num_derefs,
+ entry->array_depth,
+ entry->bits);
+
+ return visit_continue;
+}
+
+
+ir_visitor_status
+ir_array_refcount_visitor::visit(ir_dereference_variable *ir)
+{
+ ir_variable *const var = ir->variable_referenced();
+ ir_array_refcount_entry *entry = this->get_variable_entry(var);
+
+ entry->is_referenced = true;
+
+ return visit_continue;
+}
+
+
+ir_visitor_status
+ir_array_refcount_visitor::visit_enter(ir_function_signature *ir)
+{
+ /* We don't want to descend into the function parameters and
+ * dead-code eliminate them, so just accept the body here.
+ */
+ visit_list_elements(this, &ir->body);
+ return visit_continue_with_parent;
+}
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/ir_array_refcount.h b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/ir_array_refcount.h
new file mode 100644
index 0000000000..4a9d9017a0
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/ir_array_refcount.h
@@ -0,0 +1,127 @@
+/*
+ * Copyright © 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+/**
+ * \file ir_array_refcount.h
+ *
+ * Provides a visitor which produces a list of variables referenced.
+ */
+
+#ifndef GLSL_IR_ARRAY_REFCOUNT_H
+#define GLSL_IR_ARRAY_REFCOUNT_H
+
+#include "ir.h"
+#include "ir_visitor.h"
+#include "linker_util.h"
+#include "compiler/glsl_types.h"
+#include "util/bitset.h"
+
+class ir_array_refcount_entry
+{
+public:
+ ir_array_refcount_entry(ir_variable *var);
+ ~ir_array_refcount_entry();
+
+ ir_variable *var; /* The key: the variable's pointer. */
+
+ /** Has the variable been referenced? */
+ bool is_referenced;
+
+ /** Count of nested arrays in the type. */
+ unsigned array_depth;
+
+ /** Set of bit-flags to note which array elements have been accessed. */
+ BITSET_WORD *bits;
+
+ /** Has a linearized array index been referenced? */
+ bool is_linearized_index_referenced(unsigned linearized_index) const
+ {
+ assert(bits != 0);
+ assert(linearized_index <= num_bits);
+
+ return BITSET_TEST(bits, linearized_index);
+ }
+
+private:
+
+ /**
+ * Total number of bits referenced by \c bits.
+ *
+ * Also the total number of array(s-of-arrays) elements of \c var.
+ */
+ unsigned num_bits;
+
+ friend class array_refcount_test;
+};
+
+class ir_array_refcount_visitor : public ir_hierarchical_visitor {
+public:
+ ir_array_refcount_visitor(void);
+ ~ir_array_refcount_visitor(void);
+
+ virtual ir_visitor_status visit(ir_dereference_variable *);
+
+ virtual ir_visitor_status visit_enter(ir_function_signature *);
+ virtual ir_visitor_status visit_enter(ir_dereference_array *);
+
+ /**
+ * Find variable in the hash table, and insert it if not present
+ */
+ ir_array_refcount_entry *get_variable_entry(ir_variable *var);
+
+ /**
+ * Hash table mapping ir_variable to ir_array_refcount_entry.
+ */
+ struct hash_table *ht;
+
+ void *mem_ctx;
+
+private:
+ /** Get an array_deref_range element from private tracking. */
+ array_deref_range *get_array_deref();
+
+ /**
+ * Last ir_dereference_array that was visited
+ *
+ * Used to prevent some redundant calculations.
+ *
+ * \sa ::visit_enter(ir_dereference_array *)
+ */
+ ir_dereference_array *last_array_deref;
+
+ /**
+ * \name array_deref_range tracking
+ */
+ /*@{*/
+ /** Currently allocated block of derefs. */
+ array_deref_range *derefs;
+
+ /** Number of derefs used in current processing. */
+ unsigned num_derefs;
+
+ /** Size of the derefs buffer in bytes. */
+ unsigned derefs_size;
+ /*@}*/
+};
+
+#endif /* GLSL_IR_ARRAY_REFCOUNT_H */
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/ir_basic_block.cpp b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/ir_basic_block.cpp
new file mode 100644
index 0000000000..15481aa47f
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/ir_basic_block.cpp
@@ -0,0 +1,99 @@
+/*
+ * Copyright © 2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+/**
+ * \file ir_basic_block.cpp
+ *
+ * Basic block analysis of instruction streams.
+ */
+
+#include "ir.h"
+#include "ir_basic_block.h"
+
+/**
+ * Calls a user function for every basic block in the instruction stream.
+ *
+ * Basic block analysis is pretty easy in our IR thanks to the lack of
+ * unstructured control flow. We've got:
+ *
+ * ir_loop (for () {}, while () {}, do {} while ())
+ * ir_loop_jump (
+ * ir_if () {}
+ * ir_return
+ * ir_call()
+ *
+ * Note that the basic blocks returned by this don't encompass all
+ * operations performed by the program -- for example, if conditions
+ * don't get returned, nor do the assignments that will be generated
+ * for ir_call parameters.
+ */
+void call_for_basic_blocks(exec_list *instructions,
+ void (*callback)(ir_instruction *first,
+ ir_instruction *last,
+ void *data),
+ void *data)
+{
+ ir_instruction *leader = NULL;
+ ir_instruction *last = NULL;
+
+ foreach_in_list(ir_instruction, ir, instructions) {
+ ir_if *ir_if;
+ ir_loop *ir_loop;
+ ir_function *ir_function;
+
+ if (!leader)
+ leader = ir;
+
+ if ((ir_if = ir->as_if())) {
+ callback(leader, ir, data);
+ leader = NULL;
+
+ call_for_basic_blocks(&ir_if->then_instructions, callback, data);
+ call_for_basic_blocks(&ir_if->else_instructions, callback, data);
+ } else if ((ir_loop = ir->as_loop())) {
+ callback(leader, ir, data);
+ leader = NULL;
+ call_for_basic_blocks(&ir_loop->body_instructions, callback, data);
+ } else if (ir->as_jump() || ir->as_call()) {
+ callback(leader, ir, data);
+ leader = NULL;
+ } else if ((ir_function = ir->as_function())) {
+ /* A function definition doesn't interrupt our basic block
+ * since execution doesn't go into it. We should process the
+ * bodies of its signatures for BBs, though.
+ *
+ * Note that we miss an opportunity for producing more
+ * maximal BBs between the instructions that precede main()
+ * and the body of main(). Perhaps those instructions ought
+ * to live inside of main().
+ */
+ foreach_in_list(ir_function_signature, ir_sig, &ir_function->signatures) {
+ call_for_basic_blocks(&ir_sig->body, callback, data);
+ }
+ }
+ last = ir;
+ }
+ if (leader) {
+ callback(leader, last, data);
+ }
+}
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/ir_basic_block.h b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/ir_basic_block.h
new file mode 100644
index 0000000000..3d32e4e110
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/ir_basic_block.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright © 2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef GLSL_IR_BASIC_BLOCK_H
+#define GLSL_IR_BASIC_BLOCK_H
+
+void call_for_basic_blocks(exec_list *instructions,
+ void (*callback)(ir_instruction *first,
+ ir_instruction *last,
+ void *data),
+ void *data);
+
+#endif /* GLSL_IR_BASIC_BLOCK_H */
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/ir_builder.cpp b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/ir_builder.cpp
new file mode 100644
index 0000000000..416d8c71ed
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/ir_builder.cpp
@@ -0,0 +1,648 @@
+/*
+ * Copyright © 2012 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "ir_builder.h"
+#include "program/prog_instruction.h"
+
+using namespace ir_builder;
+
+namespace ir_builder {
+
+void
+ir_factory::emit(ir_instruction *ir)
+{
+ instructions->push_tail(ir);
+}
+
+ir_variable *
+ir_factory::make_temp(const glsl_type *type, const char *name)
+{
+ ir_variable *var;
+
+ var = new(mem_ctx) ir_variable(type, name, ir_var_temporary);
+ emit(var);
+
+ return var;
+}
+
+ir_assignment *
+assign(deref lhs, operand rhs, operand condition, int writemask)
+{
+ void *mem_ctx = ralloc_parent(lhs.val);
+
+ ir_assignment *assign = new(mem_ctx) ir_assignment(lhs.val,
+ rhs.val,
+ condition.val,
+ writemask);
+
+ return assign;
+}
+
+ir_assignment *
+assign(deref lhs, operand rhs)
+{
+ return assign(lhs, rhs, (1 << lhs.val->type->vector_elements) - 1);
+}
+
+ir_assignment *
+assign(deref lhs, operand rhs, int writemask)
+{
+ return assign(lhs, rhs, (ir_rvalue *) NULL, writemask);
+}
+
+ir_assignment *
+assign(deref lhs, operand rhs, operand condition)
+{
+ return assign(lhs, rhs, condition, (1 << lhs.val->type->vector_elements) - 1);
+}
+
+ir_return *
+ret(operand retval)
+{
+ void *mem_ctx = ralloc_parent(retval.val);
+ return new(mem_ctx) ir_return(retval.val);
+}
+
+ir_swizzle *
+swizzle(operand a, int swizzle, int components)
+{
+ void *mem_ctx = ralloc_parent(a.val);
+
+ return new(mem_ctx) ir_swizzle(a.val,
+ GET_SWZ(swizzle, 0),
+ GET_SWZ(swizzle, 1),
+ GET_SWZ(swizzle, 2),
+ GET_SWZ(swizzle, 3),
+ components);
+}
+
+ir_swizzle *
+swizzle_for_size(operand a, unsigned components)
+{
+ void *mem_ctx = ralloc_parent(a.val);
+
+ if (a.val->type->vector_elements < components)
+ components = a.val->type->vector_elements;
+
+ unsigned s[4] = { 0, 1, 2, 3 };
+ for (int i = components; i < 4; i++)
+ s[i] = components - 1;
+
+ return new(mem_ctx) ir_swizzle(a.val, s, components);
+}
+
+ir_swizzle *
+swizzle_xxxx(operand a)
+{
+ return swizzle(a, SWIZZLE_XXXX, 4);
+}
+
+ir_swizzle *
+swizzle_yyyy(operand a)
+{
+ return swizzle(a, SWIZZLE_YYYY, 4);
+}
+
+ir_swizzle *
+swizzle_zzzz(operand a)
+{
+ return swizzle(a, SWIZZLE_ZZZZ, 4);
+}
+
+ir_swizzle *
+swizzle_wwww(operand a)
+{
+ return swizzle(a, SWIZZLE_WWWW, 4);
+}
+
+ir_swizzle *
+swizzle_x(operand a)
+{
+ return swizzle(a, SWIZZLE_XXXX, 1);
+}
+
+ir_swizzle *
+swizzle_y(operand a)
+{
+ return swizzle(a, SWIZZLE_YYYY, 1);
+}
+
+ir_swizzle *
+swizzle_z(operand a)
+{
+ return swizzle(a, SWIZZLE_ZZZZ, 1);
+}
+
+ir_swizzle *
+swizzle_w(operand a)
+{
+ return swizzle(a, SWIZZLE_WWWW, 1);
+}
+
+ir_swizzle *
+swizzle_xy(operand a)
+{
+ return swizzle(a, SWIZZLE_XYZW, 2);
+}
+
+ir_swizzle *
+swizzle_xyz(operand a)
+{
+ return swizzle(a, SWIZZLE_XYZW, 3);
+}
+
+ir_swizzle *
+swizzle_xyzw(operand a)
+{
+ return swizzle(a, SWIZZLE_XYZW, 4);
+}
+
+ir_expression *
+expr(ir_expression_operation op, operand a)
+{
+ void *mem_ctx = ralloc_parent(a.val);
+
+ return new(mem_ctx) ir_expression(op, a.val);
+}
+
+ir_expression *
+expr(ir_expression_operation op, operand a, operand b)
+{
+ void *mem_ctx = ralloc_parent(a.val);
+
+ return new(mem_ctx) ir_expression(op, a.val, b.val);
+}
+
+ir_expression *
+expr(ir_expression_operation op, operand a, operand b, operand c)
+{
+ void *mem_ctx = ralloc_parent(a.val);
+
+ return new(mem_ctx) ir_expression(op, a.val, b.val, c.val);
+}
+
+ir_expression *add(operand a, operand b)
+{
+ return expr(ir_binop_add, a, b);
+}
+
+ir_expression *sub(operand a, operand b)
+{
+ return expr(ir_binop_sub, a, b);
+}
+
+ir_expression *min2(operand a, operand b)
+{
+ return expr(ir_binop_min, a, b);
+}
+
+ir_expression *max2(operand a, operand b)
+{
+ return expr(ir_binop_max, a, b);
+}
+
+ir_expression *mul(operand a, operand b)
+{
+ return expr(ir_binop_mul, a, b);
+}
+
+ir_expression *imul_high(operand a, operand b)
+{
+ return expr(ir_binop_imul_high, a, b);
+}
+
+ir_expression *div(operand a, operand b)
+{
+ return expr(ir_binop_div, a, b);
+}
+
+ir_expression *carry(operand a, operand b)
+{
+ return expr(ir_binop_carry, a, b);
+}
+
+ir_expression *borrow(operand a, operand b)
+{
+ return expr(ir_binop_borrow, a, b);
+}
+
+ir_expression *trunc(operand a)
+{
+ return expr(ir_unop_trunc, a);
+}
+
+ir_expression *round_even(operand a)
+{
+ return expr(ir_unop_round_even, a);
+}
+
+ir_expression *fract(operand a)
+{
+ return expr(ir_unop_fract, a);
+}
+
+/* dot for vectors, mul for scalars */
+ir_expression *dot(operand a, operand b)
+{
+ assert(a.val->type == b.val->type);
+
+ if (a.val->type->vector_elements == 1)
+ return expr(ir_binop_mul, a, b);
+
+ return expr(ir_binop_dot, a, b);
+}
+
+ir_expression*
+clamp(operand a, operand b, operand c)
+{
+ return expr(ir_binop_min, expr(ir_binop_max, a, b), c);
+}
+
+ir_expression *
+saturate(operand a)
+{
+ return expr(ir_unop_saturate, a);
+}
+
+ir_expression *
+abs(operand a)
+{
+ return expr(ir_unop_abs, a);
+}
+
+ir_expression *
+neg(operand a)
+{
+ return expr(ir_unop_neg, a);
+}
+
+ir_expression *
+sin(operand a)
+{
+ return expr(ir_unop_sin, a);
+}
+
+ir_expression *
+cos(operand a)
+{
+ return expr(ir_unop_cos, a);
+}
+
+ir_expression *
+exp(operand a)
+{
+ return expr(ir_unop_exp, a);
+}
+
+ir_expression *
+rcp(operand a)
+{
+ return expr(ir_unop_rcp, a);
+}
+
+ir_expression *
+rsq(operand a)
+{
+ return expr(ir_unop_rsq, a);
+}
+
+ir_expression *
+sqrt(operand a)
+{
+ return expr(ir_unop_sqrt, a);
+}
+
+ir_expression *
+log(operand a)
+{
+ return expr(ir_unop_log, a);
+}
+
+ir_expression *
+sign(operand a)
+{
+ return expr(ir_unop_sign, a);
+}
+
+ir_expression *
+subr_to_int(operand a)
+{
+ return expr(ir_unop_subroutine_to_int, a);
+}
+
+ir_expression*
+equal(operand a, operand b)
+{
+ return expr(ir_binop_equal, a, b);
+}
+
+ir_expression*
+nequal(operand a, operand b)
+{
+ return expr(ir_binop_nequal, a, b);
+}
+
+ir_expression*
+less(operand a, operand b)
+{
+ return expr(ir_binop_less, a, b);
+}
+
+ir_expression*
+greater(operand a, operand b)
+{
+ return expr(ir_binop_less, b, a);
+}
+
+ir_expression*
+lequal(operand a, operand b)
+{
+ return expr(ir_binop_gequal, b, a);
+}
+
+ir_expression*
+gequal(operand a, operand b)
+{
+ return expr(ir_binop_gequal, a, b);
+}
+
+ir_expression*
+logic_not(operand a)
+{
+ return expr(ir_unop_logic_not, a);
+}
+
+ir_expression*
+logic_and(operand a, operand b)
+{
+ return expr(ir_binop_logic_and, a, b);
+}
+
+ir_expression*
+logic_or(operand a, operand b)
+{
+ return expr(ir_binop_logic_or, a, b);
+}
+
+ir_expression*
+bit_not(operand a)
+{
+ return expr(ir_unop_bit_not, a);
+}
+
+ir_expression*
+bit_and(operand a, operand b)
+{
+ return expr(ir_binop_bit_and, a, b);
+}
+
+ir_expression*
+bit_or(operand a, operand b)
+{
+ return expr(ir_binop_bit_or, a, b);
+}
+
+ir_expression*
+bit_xor(operand a, operand b)
+{
+ return expr(ir_binop_bit_xor, a, b);
+}
+
+ir_expression*
+lshift(operand a, operand b)
+{
+ return expr(ir_binop_lshift, a, b);
+}
+
+ir_expression*
+rshift(operand a, operand b)
+{
+ return expr(ir_binop_rshift, a, b);
+}
+
+ir_expression*
+f2i(operand a)
+{
+ return expr(ir_unop_f2i, a);
+}
+
+ir_expression*
+bitcast_f2i(operand a)
+{
+ return expr(ir_unop_bitcast_f2i, a);
+}
+
+ir_expression*
+i2f(operand a)
+{
+ return expr(ir_unop_i2f, a);
+}
+
+ir_expression*
+bitcast_i2f(operand a)
+{
+ return expr(ir_unop_bitcast_i2f, a);
+}
+
+ir_expression*
+i2u(operand a)
+{
+ return expr(ir_unop_i2u, a);
+}
+
+ir_expression*
+u2i(operand a)
+{
+ return expr(ir_unop_u2i, a);
+}
+
+ir_expression*
+f2u(operand a)
+{
+ return expr(ir_unop_f2u, a);
+}
+
+ir_expression*
+bitcast_f2u(operand a)
+{
+ return expr(ir_unop_bitcast_f2u, a);
+}
+
+ir_expression*
+u2f(operand a)
+{
+ return expr(ir_unop_u2f, a);
+}
+
+ir_expression*
+bitcast_u2f(operand a)
+{
+ return expr(ir_unop_bitcast_u2f, a);
+}
+
+ir_expression*
+i2b(operand a)
+{
+ return expr(ir_unop_i2b, a);
+}
+
+ir_expression*
+b2i(operand a)
+{
+ return expr(ir_unop_b2i, a);
+}
+
+ir_expression *
+f2b(operand a)
+{
+ return expr(ir_unop_f2b, a);
+}
+
+ir_expression *
+b2f(operand a)
+{
+ return expr(ir_unop_b2f, a);
+}
+
+ir_expression*
+bitcast_d2i64(operand a)
+{
+ return expr(ir_unop_bitcast_d2i64, a);
+}
+
+ir_expression*
+bitcast_d2u64(operand a)
+{
+ return expr(ir_unop_bitcast_d2u64, a);
+}
+
+ir_expression*
+bitcast_i642d(operand a)
+{
+ return expr(ir_unop_bitcast_i642d, a);
+}
+
+ir_expression*
+bitcast_u642d(operand a)
+{
+ return expr(ir_unop_bitcast_u642d, a);
+}
+
+ir_expression *
+interpolate_at_centroid(operand a)
+{
+ return expr(ir_unop_interpolate_at_centroid, a);
+}
+
+ir_expression *
+interpolate_at_offset(operand a, operand b)
+{
+ return expr(ir_binop_interpolate_at_offset, a, b);
+}
+
+ir_expression *
+interpolate_at_sample(operand a, operand b)
+{
+ return expr(ir_binop_interpolate_at_sample, a, b);
+}
+
+ir_expression *
+f2d(operand a)
+{
+ return expr(ir_unop_f2d, a);
+}
+
+ir_expression *
+i2d(operand a)
+{
+ return expr(ir_unop_i2d, a);
+}
+
+ir_expression *
+u2d(operand a)
+{
+ return expr(ir_unop_u2d, a);
+}
+
+ir_expression *
+fma(operand a, operand b, operand c)
+{
+ return expr(ir_triop_fma, a, b, c);
+}
+
+ir_expression *
+lrp(operand x, operand y, operand a)
+{
+ return expr(ir_triop_lrp, x, y, a);
+}
+
+ir_expression *
+csel(operand a, operand b, operand c)
+{
+ return expr(ir_triop_csel, a, b, c);
+}
+
+ir_expression *
+bitfield_extract(operand a, operand b, operand c)
+{
+ return expr(ir_triop_bitfield_extract, a, b, c);
+}
+
+ir_expression *
+bitfield_insert(operand a, operand b, operand c, operand d)
+{
+ void *mem_ctx = ralloc_parent(a.val);
+ return new(mem_ctx) ir_expression(ir_quadop_bitfield_insert,
+ a.val->type, a.val, b.val, c.val, d.val);
+}
+
+ir_if*
+if_tree(operand condition,
+ ir_instruction *then_branch)
+{
+ assert(then_branch != NULL);
+
+ void *mem_ctx = ralloc_parent(condition.val);
+
+ ir_if *result = new(mem_ctx) ir_if(condition.val);
+ result->then_instructions.push_tail(then_branch);
+ return result;
+}
+
+ir_if*
+if_tree(operand condition,
+ ir_instruction *then_branch,
+ ir_instruction *else_branch)
+{
+ assert(then_branch != NULL);
+ assert(else_branch != NULL);
+
+ void *mem_ctx = ralloc_parent(condition.val);
+
+ ir_if *result = new(mem_ctx) ir_if(condition.val);
+ result->then_instructions.push_tail(then_branch);
+ result->else_instructions.push_tail(else_branch);
+ return result;
+}
+
+} /* namespace ir_builder */
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/ir_builder.h b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/ir_builder.h
new file mode 100644
index 0000000000..9309039f9d
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/ir_builder.h
@@ -0,0 +1,243 @@
+/*
+ * Copyright © 2012 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#ifndef IR_BUILDER_H
+#define IR_BUILDER_H
+
+#include "ir.h"
+
+namespace ir_builder {
+
+#ifndef WRITEMASK_X
+enum writemask {
+ WRITEMASK_X = 0x1,
+ WRITEMASK_Y = 0x2,
+ WRITEMASK_Z = 0x4,
+ WRITEMASK_W = 0x8,
+};
+#endif
+
+/**
+ * This little class exists to let the helper expression generators
+ * take either an ir_rvalue * or an ir_variable * to be automatically
+ * dereferenced, while still providing compile-time type checking.
+ *
+ * You don't have to explicitly call the constructor -- C++ will see
+ * that you passed an ir_variable, and silently call the
+ * operand(ir_variable *var) constructor behind your back.
+ */
+class operand {
+public:
+ operand(ir_rvalue *val)
+ : val(val)
+ {
+ }
+
+ operand(ir_variable *var)
+ {
+ void *mem_ctx = ralloc_parent(var);
+ val = new(mem_ctx) ir_dereference_variable(var);
+ }
+
+ ir_rvalue *val;
+};
+
+/** Automatic generator for ir_dereference_variable on assignment LHS.
+ *
+ * \sa operand
+ */
+class deref {
+public:
+ deref(ir_dereference *val)
+ : val(val)
+ {
+ }
+
+ deref(ir_variable *var)
+ {
+ void *mem_ctx = ralloc_parent(var);
+ val = new(mem_ctx) ir_dereference_variable(var);
+ }
+
+
+ ir_dereference *val;
+};
+
+class ir_factory {
+public:
+ ir_factory(exec_list *instructions = NULL, void *mem_ctx = NULL)
+ : instructions(instructions),
+ mem_ctx(mem_ctx)
+ {
+ return;
+ }
+
+ void emit(ir_instruction *ir);
+ ir_variable *make_temp(const glsl_type *type, const char *name);
+
+ ir_constant*
+ constant(float f)
+ {
+ return new(mem_ctx) ir_constant(f);
+ }
+
+ ir_constant*
+ constant(int i)
+ {
+ return new(mem_ctx) ir_constant(i);
+ }
+
+ ir_constant*
+ constant(unsigned u)
+ {
+ return new(mem_ctx) ir_constant(u);
+ }
+
+ ir_constant*
+ constant(bool b)
+ {
+ return new(mem_ctx) ir_constant(b);
+ }
+
+ exec_list *instructions;
+ void *mem_ctx;
+};
+
+ir_assignment *assign(deref lhs, operand rhs);
+ir_assignment *assign(deref lhs, operand rhs, int writemask);
+ir_assignment *assign(deref lhs, operand rhs, operand condition);
+ir_assignment *assign(deref lhs, operand rhs, operand condition, int writemask);
+
+ir_return *ret(operand retval);
+
+ir_expression *expr(ir_expression_operation op, operand a);
+ir_expression *expr(ir_expression_operation op, operand a, operand b);
+ir_expression *expr(ir_expression_operation op, operand a, operand b, operand c);
+ir_expression *add(operand a, operand b);
+ir_expression *sub(operand a, operand b);
+ir_expression *mul(operand a, operand b);
+ir_expression *imul_high(operand a, operand b);
+ir_expression *div(operand a, operand b);
+ir_expression *carry(operand a, operand b);
+ir_expression *borrow(operand a, operand b);
+ir_expression *trunc(operand a);
+ir_expression *round_even(operand a);
+ir_expression *fract(operand a);
+ir_expression *dot(operand a, operand b);
+ir_expression *clamp(operand a, operand b, operand c);
+ir_expression *saturate(operand a);
+ir_expression *abs(operand a);
+ir_expression *neg(operand a);
+ir_expression *sin(operand a);
+ir_expression *cos(operand a);
+ir_expression *exp(operand a);
+ir_expression *rcp(operand a);
+ir_expression *rsq(operand a);
+ir_expression *sqrt(operand a);
+ir_expression *log(operand a);
+ir_expression *sign(operand a);
+
+ir_expression *subr_to_int(operand a);
+ir_expression *equal(operand a, operand b);
+ir_expression *nequal(operand a, operand b);
+ir_expression *less(operand a, operand b);
+ir_expression *greater(operand a, operand b);
+ir_expression *lequal(operand a, operand b);
+ir_expression *gequal(operand a, operand b);
+
+ir_expression *logic_not(operand a);
+ir_expression *logic_and(operand a, operand b);
+ir_expression *logic_or(operand a, operand b);
+
+ir_expression *bit_not(operand a);
+ir_expression *bit_or(operand a, operand b);
+ir_expression *bit_and(operand a, operand b);
+ir_expression *bit_xor(operand a, operand b);
+ir_expression *lshift(operand a, operand b);
+ir_expression *rshift(operand a, operand b);
+
+ir_expression *f2i(operand a);
+ir_expression *bitcast_f2i(operand a);
+ir_expression *i2f(operand a);
+ir_expression *bitcast_i2f(operand a);
+ir_expression *f2u(operand a);
+ir_expression *bitcast_f2u(operand a);
+ir_expression *u2f(operand a);
+ir_expression *bitcast_u2f(operand a);
+ir_expression *i2u(operand a);
+ir_expression *u2i(operand a);
+ir_expression *b2i(operand a);
+ir_expression *i2b(operand a);
+ir_expression *f2b(operand a);
+ir_expression *b2f(operand a);
+
+ir_expression *f2d(operand a);
+ir_expression *i2d(operand a);
+ir_expression *u2d(operand a);
+
+ir_expression *bitcast_d2i64(operand a);
+ir_expression *bitcast_d2u64(operand a);
+
+ir_expression *bitcast_i642d(operand a);
+ir_expression *bitcast_u642d(operand a);
+
+ir_expression *min2(operand a, operand b);
+ir_expression *max2(operand a, operand b);
+
+ir_expression *interpolate_at_centroid(operand a);
+ir_expression *interpolate_at_offset(operand a, operand b);
+ir_expression *interpolate_at_sample(operand a, operand b);
+
+ir_expression *fma(operand a, operand b, operand c);
+ir_expression *lrp(operand x, operand y, operand a);
+ir_expression *csel(operand a, operand b, operand c);
+ir_expression *bitfield_extract(operand a, operand b, operand c);
+ir_expression *bitfield_insert(operand a, operand b, operand c, operand d);
+
+ir_swizzle *swizzle(operand a, int swizzle, int components);
+/**
+ * Swizzle away later components, but preserve the ordering.
+ */
+ir_swizzle *swizzle_for_size(operand a, unsigned components);
+
+ir_swizzle *swizzle_xxxx(operand a);
+ir_swizzle *swizzle_yyyy(operand a);
+ir_swizzle *swizzle_zzzz(operand a);
+ir_swizzle *swizzle_wwww(operand a);
+ir_swizzle *swizzle_x(operand a);
+ir_swizzle *swizzle_y(operand a);
+ir_swizzle *swizzle_z(operand a);
+ir_swizzle *swizzle_w(operand a);
+ir_swizzle *swizzle_xy(operand a);
+ir_swizzle *swizzle_xyz(operand a);
+ir_swizzle *swizzle_xyzw(operand a);
+
+ir_if *if_tree(operand condition,
+ ir_instruction *then_branch);
+ir_if *if_tree(operand condition,
+ ir_instruction *then_branch,
+ ir_instruction *else_branch);
+
+} /* namespace ir_builder */
+
+#endif
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/ir_builder_print_visitor.cpp b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/ir_builder_print_visitor.cpp
new file mode 100644
index 0000000000..5ab50a5841
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/ir_builder_print_visitor.cpp
@@ -0,0 +1,778 @@
+/*
+ * Copyright © 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include <inttypes.h> /* for PRIx64 macro */
+#include "ir.h"
+#include "ir_hierarchical_visitor.h"
+#include "ir_builder_print_visitor.h"
+#include "compiler/glsl_types.h"
+#include "glsl_parser_extras.h"
+#include "main/macros.h"
+#include "util/hash_table.h"
+#include "util/u_string.h"
+
+class ir_builder_print_visitor : public ir_hierarchical_visitor {
+public:
+ ir_builder_print_visitor(FILE *f);
+ virtual ~ir_builder_print_visitor();
+
+ void indent(void);
+
+ virtual ir_visitor_status visit(class ir_variable *);
+ virtual ir_visitor_status visit(class ir_dereference_variable *);
+ virtual ir_visitor_status visit(class ir_constant *);
+ virtual ir_visitor_status visit(class ir_loop_jump *);
+
+ virtual ir_visitor_status visit_enter(class ir_if *);
+
+ virtual ir_visitor_status visit_enter(class ir_loop *);
+ virtual ir_visitor_status visit_leave(class ir_loop *);
+
+ virtual ir_visitor_status visit_enter(class ir_function_signature *);
+ virtual ir_visitor_status visit_leave(class ir_function_signature *);
+
+ virtual ir_visitor_status visit_enter(class ir_expression *);
+
+ virtual ir_visitor_status visit_enter(class ir_assignment *);
+ virtual ir_visitor_status visit_leave(class ir_assignment *);
+
+ virtual ir_visitor_status visit_leave(class ir_call *);
+ virtual ir_visitor_status visit_leave(class ir_swizzle *);
+ virtual ir_visitor_status visit_leave(class ir_return *);
+
+ virtual ir_visitor_status visit_enter(ir_texture *ir);
+
+private:
+ void print_with_indent(const char *fmt, ...);
+ void print_without_indent(const char *fmt, ...);
+
+ void print_without_declaration(const ir_rvalue *ir);
+ void print_without_declaration(const ir_constant *ir);
+ void print_without_declaration(const ir_dereference_variable *ir);
+ void print_without_declaration(const ir_swizzle *ir);
+ void print_without_declaration(const ir_expression *ir);
+
+ unsigned next_ir_index;
+
+ /**
+ * Mapping from ir_instruction * -> index used in the generated C code
+ * variable name.
+ */
+ hash_table *index_map;
+
+ FILE *f;
+
+ int indentation;
+};
+
+/* An operand is "simple" if it can be compactly printed on one line.
+ */
+static bool
+is_simple_operand(const ir_rvalue *ir, unsigned depth = 1)
+{
+ if (depth == 0)
+ return false;
+
+ switch (ir->ir_type) {
+ case ir_type_dereference_variable:
+ return true;
+
+ case ir_type_constant: {
+ if (ir->type == glsl_type::uint_type ||
+ ir->type == glsl_type::int_type ||
+ ir->type == glsl_type::float_type ||
+ ir->type == glsl_type::bool_type)
+ return true;
+
+ const ir_constant *const c = (ir_constant *) ir;
+ ir_constant_data all_zero;
+ memset(&all_zero, 0, sizeof(all_zero));
+
+ return memcmp(&c->value, &all_zero, sizeof(all_zero)) == 0;
+ }
+
+ case ir_type_swizzle: {
+ const ir_swizzle *swiz = (ir_swizzle *) ir;
+ return swiz->mask.num_components == 1 &&
+ is_simple_operand(swiz->val, depth);
+ }
+
+ case ir_type_expression: {
+ const ir_expression *expr = (ir_expression *) ir;
+
+ for (unsigned i = 0; i < expr->num_operands; i++) {
+ if (!is_simple_operand(expr->operands[i], depth - 1))
+ return false;
+ }
+
+ return true;
+ }
+
+ default:
+ return false;
+ }
+}
+
+void
+_mesa_print_builder_for_ir(FILE *f, exec_list *instructions)
+{
+ ir_builder_print_visitor v(f);
+ v.run(instructions);
+}
+
+ir_builder_print_visitor::ir_builder_print_visitor(FILE *f)
+ : next_ir_index(1), f(f), indentation(0)
+{
+ index_map = _mesa_pointer_hash_table_create(NULL);
+}
+
+ir_builder_print_visitor::~ir_builder_print_visitor()
+{
+ _mesa_hash_table_destroy(index_map, NULL);
+}
+
+void ir_builder_print_visitor::indent(void)
+{
+ for (int i = 0; i < indentation; i++)
+ fprintf(f, " ");
+}
+
+void
+ir_builder_print_visitor::print_with_indent(const char *fmt, ...)
+{
+ va_list ap;
+
+ indent();
+
+ va_start(ap, fmt);
+ vfprintf(f, fmt, ap);
+ va_end(ap);
+}
+
+void
+ir_builder_print_visitor::print_without_indent(const char *fmt, ...)
+{
+ va_list ap;
+
+ va_start(ap, fmt);
+ vfprintf(f, fmt, ap);
+ va_end(ap);
+}
+
+void
+ir_builder_print_visitor::print_without_declaration(const ir_rvalue *ir)
+{
+ switch (ir->ir_type) {
+ case ir_type_dereference_variable:
+ print_without_declaration((ir_dereference_variable *) ir);
+ break;
+ case ir_type_constant:
+ print_without_declaration((ir_constant *) ir);
+ break;
+ case ir_type_swizzle:
+ print_without_declaration((ir_swizzle *) ir);
+ break;
+ case ir_type_expression:
+ print_without_declaration((ir_expression *) ir);
+ break;
+ default:
+ unreachable("Invalid IR type.");
+ }
+}
+
+ir_visitor_status
+ir_builder_print_visitor::visit(ir_variable *ir)
+{
+ const unsigned my_index = next_ir_index++;
+
+ _mesa_hash_table_insert(index_map, ir, (void *)(uintptr_t) my_index);
+
+ const char *mode_str;
+ switch (ir->data.mode) {
+ case ir_var_auto: mode_str = "ir_var_auto"; break;
+ case ir_var_uniform: mode_str = "ir_var_uniform"; break;
+ case ir_var_shader_storage: mode_str = "ir_var_shader_storage"; break;
+ case ir_var_shader_shared: mode_str = "ir_var_shader_shared"; break;
+ case ir_var_shader_in: mode_str = "ir_var_shader_in"; break;
+ case ir_var_shader_out: mode_str = "ir_var_shader_out"; break;
+ case ir_var_function_in: mode_str = "ir_var_function_in"; break;
+ case ir_var_function_out: mode_str = "ir_var_function_out"; break;
+ case ir_var_function_inout: mode_str = "ir_var_function_inout"; break;
+ case ir_var_const_in: mode_str = "ir_var_const_in"; break;
+ case ir_var_system_value: mode_str = "ir_var_system_value"; break;
+ case ir_var_temporary: mode_str = "ir_var_temporary"; break;
+ default:
+ unreachable("Invalid variable mode");
+ }
+
+ if (ir->data.mode == ir_var_temporary) {
+ print_with_indent("ir_variable *const r%04X = body.make_temp(glsl_type::%s_type, \"%s\");\n",
+ my_index,
+ ir->type->name,
+ ir->name);
+ } else {
+ print_with_indent("ir_variable *const r%04X = new(mem_ctx) ir_variable(glsl_type::%s_type, \"%s\", %s);\n",
+ my_index,
+ ir->type->name,
+ ir->name,
+ mode_str);
+
+ switch (ir->data.mode) {
+ case ir_var_function_in:
+ case ir_var_function_out:
+ case ir_var_function_inout:
+ case ir_var_const_in:
+ print_with_indent("sig_parameters.push_tail(r%04X);\n", my_index);
+ break;
+ default:
+ print_with_indent("body.emit(r%04X);\n", my_index);
+ break;
+ }
+ }
+
+ return visit_continue;
+}
+
+void
+ir_builder_print_visitor::print_without_declaration(const ir_dereference_variable *ir)
+{
+ const struct hash_entry *const he =
+ _mesa_hash_table_search(index_map, ir->var);
+
+ print_without_indent("r%04X", (unsigned)(uintptr_t) he->data);
+}
+
+ir_visitor_status
+ir_builder_print_visitor::visit(ir_dereference_variable *ir)
+{
+ const struct hash_entry *const he =
+ _mesa_hash_table_search(index_map, ir->var);
+
+ if (he != NULL)
+ _mesa_hash_table_insert(index_map, ir, he->data);
+
+ return visit_continue;
+}
+
+ir_visitor_status
+ir_builder_print_visitor::visit_enter(ir_function_signature *ir)
+{
+ if (!ir->is_defined)
+ return visit_continue_with_parent;
+
+ print_with_indent("ir_function_signature *\n"
+ "%s(void *mem_ctx, builtin_available_predicate avail)\n"
+ "{\n",
+ ir->function_name());
+ indentation++;
+ print_with_indent("ir_function_signature *const sig =\n");
+ print_with_indent(" new(mem_ctx) ir_function_signature(glsl_type::%s_type, avail);\n",
+ ir->return_type->name);
+
+ print_with_indent("ir_factory body(&sig->body, mem_ctx);\n");
+ print_with_indent("sig->is_defined = true;\n\n");
+
+ if (!ir->parameters.is_empty())
+ print_with_indent("exec_list sig_parameters;\n\n");
+
+ return visit_continue;
+}
+
+ir_visitor_status
+ir_builder_print_visitor::visit_leave(ir_function_signature *ir)
+{
+ if (!ir->parameters.is_empty())
+ print_with_indent("sig->replace_parameters(&sig_parameters);\n");
+
+ print_with_indent("return sig;\n");
+ indentation--;
+ print_with_indent("}\n");
+ return visit_continue;
+}
+
+void
+ir_builder_print_visitor::print_without_declaration(const ir_constant *ir)
+{
+ if (ir->type->is_scalar()) {
+ switch (ir->type->base_type) {
+ case GLSL_TYPE_UINT:
+ print_without_indent("body.constant(%uu)", ir->value.u[0]);
+ return;
+ case GLSL_TYPE_INT:
+ print_without_indent("body.constant(int(%d))", ir->value.i[0]);
+ return;
+ case GLSL_TYPE_FLOAT:
+ print_without_indent("body.constant(%ff)", ir->value.f[0]);
+ return;
+ case GLSL_TYPE_BOOL:
+ print_without_indent("body.constant(%s)",
+ ir->value.i[0] != 0 ? "true" : "false");
+ return;
+ default:
+ break;
+ }
+ }
+
+ ir_constant_data all_zero;
+ memset(&all_zero, 0, sizeof(all_zero));
+
+ if (memcmp(&ir->value, &all_zero, sizeof(all_zero)) == 0) {
+ print_without_indent("ir_constant::zero(mem_ctx, glsl_type::%s_type)",
+ ir->type->name);
+ }
+}
+
+ir_visitor_status
+ir_builder_print_visitor::visit(ir_constant *ir)
+{
+ const unsigned my_index = next_ir_index++;
+
+ _mesa_hash_table_insert(index_map, ir, (void *)(uintptr_t) my_index);
+
+ if (ir->type == glsl_type::uint_type ||
+ ir->type == glsl_type::int_type ||
+ ir->type == glsl_type::float_type ||
+ ir->type == glsl_type::bool_type) {
+ print_with_indent("ir_constant *const r%04X = ", my_index);
+ print_without_declaration(ir);
+ print_without_indent(";\n");
+ return visit_continue;
+ }
+
+ ir_constant_data all_zero;
+ memset(&all_zero, 0, sizeof(all_zero));
+
+ if (memcmp(&ir->value, &all_zero, sizeof(all_zero)) == 0) {
+ print_with_indent("ir_constant *const r%04X = ", my_index);
+ print_without_declaration(ir);
+ print_without_indent(";\n");
+ } else {
+ print_with_indent("ir_constant_data r%04X_data;\n", my_index);
+ print_with_indent("memset(&r%04X_data, 0, sizeof(ir_constant_data));\n",
+ my_index);
+ for (unsigned i = 0; i < 16; i++) {
+ switch (ir->type->base_type) {
+ case GLSL_TYPE_UINT:
+ if (ir->value.u[i] != 0)
+ print_with_indent("r%04X_data.u[%u] = %u;\n",
+ my_index, i, ir->value.u[i]);
+ break;
+ case GLSL_TYPE_INT:
+ if (ir->value.i[i] != 0)
+ print_with_indent("r%04X_data.i[%u] = %i;\n",
+ my_index, i, ir->value.i[i]);
+ break;
+ case GLSL_TYPE_FLOAT:
+ if (ir->value.u[i] != 0)
+ print_with_indent("r%04X_data.u[%u] = 0x%08x; /* %f */\n",
+ my_index,
+ i,
+ ir->value.u[i],
+ ir->value.f[i]);
+ break;
+ case GLSL_TYPE_DOUBLE: {
+ uint64_t v;
+
+ STATIC_ASSERT(sizeof(double) == sizeof(uint64_t));
+
+ memcpy(&v, &ir->value.d[i], sizeof(v));
+ if (v != 0)
+ print_with_indent("r%04X_data.u64[%u] = 0x%016" PRIx64 "; /* %g */\n",
+ my_index, i, v, ir->value.d[i]);
+ break;
+ }
+ case GLSL_TYPE_UINT64:
+ if (ir->value.u64[i] != 0)
+ print_with_indent("r%04X_data.u64[%u] = %" PRIu64 ";\n",
+ my_index,
+ i,
+ ir->value.u64[i]);
+ break;
+ case GLSL_TYPE_INT64:
+ if (ir->value.i64[i] != 0)
+ print_with_indent("r%04X_data.i64[%u] = %" PRId64 ";\n",
+ my_index,
+ i,
+ ir->value.i64[i]);
+ break;
+ case GLSL_TYPE_BOOL:
+ if (ir->value.u[i] != 0)
+ print_with_indent("r%04X_data.u[%u] = 1;\n", my_index, i);
+ break;
+ default:
+ unreachable("Invalid constant type");
+ }
+ }
+
+ print_with_indent("ir_constant *const r%04X = new(mem_ctx) ir_constant(glsl_type::%s_type, &r%04X_data);\n",
+ my_index,
+ ir->type->name,
+ my_index);
+ }
+
+ return visit_continue;
+}
+
+void
+ir_builder_print_visitor::print_without_declaration(const ir_swizzle *ir)
+{
+ const struct hash_entry *const he =
+ _mesa_hash_table_search(index_map, ir->val);
+
+ if (ir->mask.num_components == 1) {
+ static const char swiz[4] = { 'x', 'y', 'z', 'w' };
+
+ if (is_simple_operand(ir->val)) {
+ print_without_indent("swizzle_%c(", swiz[ir->mask.x]);
+ print_without_declaration(ir->val);
+ print_without_indent(")");
+ } else {
+ assert(he);
+ print_without_indent("swizzle_%c(r%04X)",
+ swiz[ir->mask.x],
+ (unsigned)(uintptr_t) he->data);
+ }
+ } else {
+ static const char swiz[4] = { 'X', 'Y', 'Z', 'W' };
+
+ assert(he);
+ print_without_indent("swizzle(r%04X, MAKE_SWIZZLE4(SWIZZLE_%c, SWIZZLE_%c, SWIZZLE_%c, SWIZZLE_%c), %u)",
+ (unsigned)(uintptr_t) he->data,
+ swiz[ir->mask.x],
+ swiz[ir->mask.y],
+ swiz[ir->mask.z],
+ swiz[ir->mask.w],
+ ir->mask.num_components);
+ }
+}
+
+ir_visitor_status
+ir_builder_print_visitor::visit_leave(ir_swizzle *ir)
+{
+ const unsigned my_index = next_ir_index++;
+
+ _mesa_hash_table_insert(index_map, ir, (void *)(uintptr_t) my_index);
+
+ print_with_indent("ir_swizzle *const r%04X = ", my_index);
+ print_without_declaration(ir);
+ print_without_indent(";\n");
+
+ return visit_continue;
+}
+
+ir_visitor_status
+ir_builder_print_visitor::visit_enter(ir_assignment *ir)
+{
+ ir_expression *const rhs_expr = ir->rhs->as_expression();
+
+ if (!is_simple_operand(ir->rhs) && rhs_expr == NULL)
+ return visit_continue;
+
+ if (rhs_expr != NULL) {
+ const unsigned num_op = rhs_expr->num_operands;
+
+ for (unsigned i = 0; i < num_op; i++) {
+ if (is_simple_operand(rhs_expr->operands[i]))
+ continue;
+
+ rhs_expr->operands[i]->accept(this);
+ }
+ }
+
+ ir_visitor_status s;
+
+ this->in_assignee = true;
+ s = ir->lhs->accept(this);
+ this->in_assignee = false;
+ if (s != visit_continue)
+ return (s == visit_continue_with_parent) ? visit_continue : s;
+
+ assert(ir->condition == NULL);
+
+ const struct hash_entry *const he_lhs =
+ _mesa_hash_table_search(index_map, ir->lhs);
+
+ print_with_indent("body.emit(assign(r%04X, ",
+ (unsigned)(uintptr_t) he_lhs->data);
+ print_without_declaration(ir->rhs);
+ print_without_indent(", 0x%02x));\n\n", ir->write_mask);
+
+ return visit_continue_with_parent;
+}
+
+ir_visitor_status
+ir_builder_print_visitor::visit_leave(ir_assignment *ir)
+{
+ const struct hash_entry *const he_lhs =
+ _mesa_hash_table_search(index_map, ir->lhs);
+
+ const struct hash_entry *const he_rhs =
+ _mesa_hash_table_search(index_map, ir->rhs);
+
+ assert(ir->condition == NULL);
+ assert(ir->lhs && ir->rhs);
+
+ print_with_indent("body.emit(assign(r%04X, r%04X, 0x%02x));\n\n",
+ (unsigned)(uintptr_t) he_lhs->data,
+ (unsigned)(uintptr_t) he_rhs->data,
+ ir->write_mask);
+
+ return visit_continue;
+}
+
+void
+ir_builder_print_visitor::print_without_declaration(const ir_expression *ir)
+{
+ const unsigned num_op = ir->num_operands;
+
+ static const char *const arity[] = {
+ "", "unop", "binop", "triop", "quadop"
+ };
+
+ switch (ir->operation) {
+ case ir_unop_neg:
+ case ir_binop_add:
+ case ir_binop_sub:
+ case ir_binop_mul:
+ case ir_binop_imul_high:
+ case ir_binop_less:
+ case ir_binop_gequal:
+ case ir_binop_equal:
+ case ir_binop_nequal:
+ case ir_binop_lshift:
+ case ir_binop_rshift:
+ case ir_binop_bit_and:
+ case ir_binop_bit_xor:
+ case ir_binop_bit_or:
+ case ir_binop_logic_and:
+ case ir_binop_logic_xor:
+ case ir_binop_logic_or:
+ print_without_indent("%s(",
+ ir_expression_operation_enum_strings[ir->operation]);
+ break;
+ default:
+ print_without_indent("expr(ir_%s_%s, ",
+ arity[num_op],
+ ir_expression_operation_enum_strings[ir->operation]);
+ break;
+ }
+
+ for (unsigned i = 0; i < num_op; i++) {
+ if (is_simple_operand(ir->operands[i]))
+ print_without_declaration(ir->operands[i]);
+ else {
+ const struct hash_entry *const he =
+ _mesa_hash_table_search(index_map, ir->operands[i]);
+
+ print_without_indent("r%04X", (unsigned)(uintptr_t) he->data);
+ }
+
+ if (i < num_op - 1)
+ print_without_indent(", ");
+ }
+
+ print_without_indent(")");
+}
+
+ir_visitor_status
+ir_builder_print_visitor::visit_enter(ir_expression *ir)
+{
+ const unsigned num_op = ir->num_operands;
+
+ for (unsigned i = 0; i < num_op; i++) {
+ if (is_simple_operand(ir->operands[i]))
+ continue;
+
+ ir->operands[i]->accept(this);
+ }
+
+ const unsigned my_index = next_ir_index++;
+
+ _mesa_hash_table_insert(index_map, ir, (void *)(uintptr_t) my_index);
+
+ print_with_indent("ir_expression *const r%04X = ", my_index);
+ print_without_declaration(ir);
+ print_without_indent(";\n");
+
+ return visit_continue_with_parent;
+}
+
+ir_visitor_status
+ir_builder_print_visitor::visit_enter(ir_if *ir)
+{
+ const unsigned my_index = next_ir_index++;
+
+ print_with_indent("/* IF CONDITION */\n");
+
+ ir_visitor_status s = ir->condition->accept(this);
+ if (s != visit_continue)
+ return (s == visit_continue_with_parent) ? visit_continue : s;
+
+ const struct hash_entry *const he =
+ _mesa_hash_table_search(index_map, ir->condition);
+
+ print_with_indent("ir_if *f%04X = new(mem_ctx) ir_if(operand(r%04X).val);\n",
+ my_index,
+ (unsigned)(uintptr_t) he->data);
+ print_with_indent("exec_list *const f%04X_parent_instructions = body.instructions;\n\n",
+ my_index);
+
+ indentation++;
+ print_with_indent("/* THEN INSTRUCTIONS */\n");
+ print_with_indent("body.instructions = &f%04X->then_instructions;\n\n",
+ my_index);
+
+ if (s != visit_continue_with_parent) {
+ s = visit_list_elements(this, &ir->then_instructions);
+ if (s == visit_stop)
+ return s;
+ }
+
+ print_without_indent("\n");
+
+ if (!ir->else_instructions.is_empty()) {
+ print_with_indent("/* ELSE INSTRUCTIONS */\n");
+ print_with_indent("body.instructions = &f%04X->else_instructions;\n\n",
+ my_index);
+
+ if (s != visit_continue_with_parent) {
+ s = visit_list_elements(this, &ir->else_instructions);
+ if (s == visit_stop)
+ return s;
+ }
+
+ print_without_indent("\n");
+ }
+
+ indentation--;
+
+ print_with_indent("body.instructions = f%04X_parent_instructions;\n",
+ my_index);
+ print_with_indent("body.emit(f%04X);\n\n",
+ my_index);
+ print_with_indent("/* END IF */\n\n");
+
+ return visit_continue_with_parent;
+}
+
+ir_visitor_status
+ir_builder_print_visitor::visit_leave(ir_return *ir)
+{
+ const struct hash_entry *const he =
+ _mesa_hash_table_search(index_map, ir->value);
+
+ print_with_indent("body.emit(ret(r%04X));\n\n",
+ (unsigned)(uintptr_t) he->data);
+
+ return visit_continue;
+}
+
+ir_visitor_status
+ir_builder_print_visitor::visit_enter(ir_texture *ir)
+{
+ print_with_indent("\nUnsupported IR is encountered: texture functions are not supported. Exiting.\n");
+
+ return visit_stop;
+}
+
+ir_visitor_status
+ir_builder_print_visitor::visit_leave(ir_call *ir)
+{
+ const unsigned my_index = next_ir_index++;
+
+ print_without_indent("\n");
+ print_with_indent("/* CALL %s */\n", ir->callee_name());
+ print_with_indent("exec_list r%04X_parameters;\n", my_index);
+
+ foreach_in_list(ir_dereference_variable, param, &ir->actual_parameters) {
+ const struct hash_entry *const he =
+ _mesa_hash_table_search(index_map, param);
+
+ print_with_indent("r%04X_parameters.push_tail(operand(r%04X).val);\n",
+ my_index,
+ (unsigned)(uintptr_t) he->data);
+ }
+
+ char return_deref_string[32];
+ if (ir->return_deref) {
+ const struct hash_entry *const he =
+ _mesa_hash_table_search(index_map, ir->return_deref);
+
+ snprintf(return_deref_string, sizeof(return_deref_string),
+ "operand(r%04X).val", (unsigned)(uintptr_t) he->data);
+ } else {
+ strcpy(return_deref_string, "NULL");
+ }
+
+ print_with_indent("body.emit(new(mem_ctx) ir_call(shader->symbols->get_function(\"%s\"),\n",
+ ir->callee_name());
+ print_with_indent(" %s, &r%04X_parameters);\n\n",
+ return_deref_string,
+ my_index);
+ return visit_continue;
+}
+
+ir_visitor_status
+ir_builder_print_visitor::visit_enter(ir_loop *ir)
+{
+ const unsigned my_index = next_ir_index++;
+
+ _mesa_hash_table_insert(index_map, ir, (void *)(uintptr_t) my_index);
+
+ print_with_indent("/* LOOP BEGIN */\n");
+ print_with_indent("ir_loop *f%04X = new(mem_ctx) ir_loop();\n", my_index);
+ print_with_indent("exec_list *const f%04X_parent_instructions = body.instructions;\n\n",
+ my_index);
+
+ indentation++;
+
+ print_with_indent("body.instructions = &f%04X->body_instructions;\n\n",
+ my_index);
+
+ return visit_continue;
+}
+
+ir_visitor_status
+ir_builder_print_visitor::visit_leave(ir_loop *ir)
+{
+ const struct hash_entry *const he =
+ _mesa_hash_table_search(index_map, ir);
+
+ indentation--;
+
+ print_with_indent("/* LOOP END */\n\n");
+ print_with_indent("body.instructions = f%04X_parent_instructions;\n",
+ (unsigned)(uintptr_t) he->data);
+ print_with_indent("body.emit(f%04X);\n\n",
+ (unsigned)(uintptr_t) he->data);
+
+ return visit_continue;
+}
+
+ir_visitor_status
+ir_builder_print_visitor::visit(ir_loop_jump *ir)
+{
+ print_with_indent("body.emit(new(mem_ctx) ir_loop_jump(ir_loop_jump::jump_%s));\n\n",
+ ir->is_break() ? "break" : "continue");
+ return visit_continue;
+}
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/ir_builder_print_visitor.h b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/ir_builder_print_visitor.h
new file mode 100644
index 0000000000..bd540151df
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/ir_builder_print_visitor.h
@@ -0,0 +1,31 @@
+/* -*- c++ -*- */
+/*
+ * Copyright © 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef IR_BUILDER_PRINT_VISITOR_H
+#define IR_BUILDER_PRINT_VISITOR_H
+
+extern void
+_mesa_print_builder_for_ir(FILE *f, exec_list *instructions);
+
+#endif /* IR_BUILDER_PRINT_VISITOR_H */
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/ir_clone.cpp b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/ir_clone.cpp
new file mode 100644
index 0000000000..0355b9e916
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/ir_clone.cpp
@@ -0,0 +1,455 @@
+/*
+ * Copyright © 2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include <string.h>
+#include "util/compiler.h"
+#include "ir.h"
+#include "compiler/glsl_types.h"
+#include "util/hash_table.h"
+
+ir_rvalue *
+ir_rvalue::clone(void *mem_ctx, struct hash_table *) const
+{
+ /* The only possible instantiation is the generic error value. */
+ return error_value(mem_ctx);
+}
+
+/**
+ * Duplicate an IR variable
+ */
+ir_variable *
+ir_variable::clone(void *mem_ctx, struct hash_table *ht) const
+{
+ ir_variable *var = new(mem_ctx) ir_variable(this->type, this->name,
+ (ir_variable_mode) this->data.mode);
+
+ var->data.max_array_access = this->data.max_array_access;
+ if (this->is_interface_instance()) {
+ var->u.max_ifc_array_access =
+ rzalloc_array(var, int, this->interface_type->length);
+ memcpy(var->u.max_ifc_array_access, this->u.max_ifc_array_access,
+ this->interface_type->length * sizeof(unsigned));
+ }
+
+ memcpy(&var->data, &this->data, sizeof(var->data));
+
+ if (this->get_state_slots()) {
+ ir_state_slot *s = var->allocate_state_slots(this->get_num_state_slots());
+ memcpy(s, this->get_state_slots(),
+ sizeof(s[0]) * var->get_num_state_slots());
+ }
+
+ if (this->constant_value)
+ var->constant_value = this->constant_value->clone(mem_ctx, ht);
+
+ if (this->constant_initializer)
+ var->constant_initializer =
+ this->constant_initializer->clone(mem_ctx, ht);
+
+ var->interface_type = this->interface_type;
+
+ if (ht)
+ _mesa_hash_table_insert(ht, (void *)const_cast<ir_variable *>(this), var);
+
+ return var;
+}
+
+ir_swizzle *
+ir_swizzle::clone(void *mem_ctx, struct hash_table *ht) const
+{
+ return new(mem_ctx) ir_swizzle(this->val->clone(mem_ctx, ht), this->mask);
+}
+
+ir_return *
+ir_return::clone(void *mem_ctx, struct hash_table *ht) const
+{
+ ir_rvalue *new_value = NULL;
+
+ if (this->value)
+ new_value = this->value->clone(mem_ctx, ht);
+
+ return new(mem_ctx) ir_return(new_value);
+}
+
+ir_discard *
+ir_discard::clone(void *mem_ctx, struct hash_table *ht) const
+{
+ ir_rvalue *new_condition = NULL;
+
+ if (this->condition != NULL)
+ new_condition = this->condition->clone(mem_ctx, ht);
+
+ return new(mem_ctx) ir_discard(new_condition);
+}
+
+ir_demote *
+ir_demote::clone(void *mem_ctx, struct hash_table *ht) const
+{
+ return new(mem_ctx) ir_demote();
+}
+
+ir_loop_jump *
+ir_loop_jump::clone(void *mem_ctx, struct hash_table *ht) const
+{
+ (void)ht;
+
+ return new(mem_ctx) ir_loop_jump(this->mode);
+}
+
+ir_if *
+ir_if::clone(void *mem_ctx, struct hash_table *ht) const
+{
+ ir_if *new_if = new(mem_ctx) ir_if(this->condition->clone(mem_ctx, ht));
+
+ foreach_in_list(ir_instruction, ir, &this->then_instructions) {
+ new_if->then_instructions.push_tail(ir->clone(mem_ctx, ht));
+ }
+
+ foreach_in_list(ir_instruction, ir, &this->else_instructions) {
+ new_if->else_instructions.push_tail(ir->clone(mem_ctx, ht));
+ }
+
+ return new_if;
+}
+
+ir_loop *
+ir_loop::clone(void *mem_ctx, struct hash_table *ht) const
+{
+ ir_loop *new_loop = new(mem_ctx) ir_loop();
+
+ foreach_in_list(ir_instruction, ir, &this->body_instructions) {
+ new_loop->body_instructions.push_tail(ir->clone(mem_ctx, ht));
+ }
+
+ return new_loop;
+}
+
+ir_call *
+ir_call::clone(void *mem_ctx, struct hash_table *ht) const
+{
+ ir_dereference_variable *new_return_ref = NULL;
+ if (this->return_deref != NULL)
+ new_return_ref = this->return_deref->clone(mem_ctx, ht);
+
+ exec_list new_parameters;
+
+ foreach_in_list(ir_instruction, ir, &this->actual_parameters) {
+ new_parameters.push_tail(ir->clone(mem_ctx, ht));
+ }
+
+ return new(mem_ctx) ir_call(this->callee, new_return_ref, &new_parameters);
+}
+
+ir_expression *
+ir_expression::clone(void *mem_ctx, struct hash_table *ht) const
+{
+ ir_rvalue *op[ARRAY_SIZE(this->operands)] = { NULL, };
+ unsigned int i;
+
+ for (i = 0; i < num_operands; i++) {
+ op[i] = this->operands[i]->clone(mem_ctx, ht);
+ }
+
+ return new(mem_ctx) ir_expression(this->operation, this->type,
+ op[0], op[1], op[2], op[3]);
+}
+
+ir_dereference_variable *
+ir_dereference_variable::clone(void *mem_ctx, struct hash_table *ht) const
+{
+ ir_variable *new_var;
+
+ if (ht) {
+ hash_entry *entry = _mesa_hash_table_search(ht, this->var);
+ new_var = entry ? (ir_variable *) entry->data : this->var;
+ } else {
+ new_var = this->var;
+ }
+
+ return new(mem_ctx) ir_dereference_variable(new_var);
+}
+
+ir_dereference_array *
+ir_dereference_array::clone(void *mem_ctx, struct hash_table *ht) const
+{
+ return new(mem_ctx) ir_dereference_array(this->array->clone(mem_ctx, ht),
+ this->array_index->clone(mem_ctx,
+ ht));
+}
+
+ir_dereference_record *
+ir_dereference_record::clone(void *mem_ctx, struct hash_table *ht) const
+{
+ assert(this->field_idx >= 0);
+ const char *field_name =
+ this->record->type->fields.structure[this->field_idx].name;
+ return new(mem_ctx) ir_dereference_record(this->record->clone(mem_ctx, ht),
+ field_name);
+}
+
+ir_texture *
+ir_texture::clone(void *mem_ctx, struct hash_table *ht) const
+{
+ ir_texture *new_tex = new(mem_ctx) ir_texture(this->op);
+ new_tex->type = this->type;
+
+ new_tex->sampler = this->sampler->clone(mem_ctx, ht);
+ if (this->coordinate)
+ new_tex->coordinate = this->coordinate->clone(mem_ctx, ht);
+ if (this->projector)
+ new_tex->projector = this->projector->clone(mem_ctx, ht);
+ if (this->shadow_comparator) {
+ new_tex->shadow_comparator = this->shadow_comparator->clone(mem_ctx, ht);
+ }
+
+ if (this->offset != NULL)
+ new_tex->offset = this->offset->clone(mem_ctx, ht);
+
+ switch (this->op) {
+ case ir_tex:
+ case ir_lod:
+ case ir_query_levels:
+ case ir_texture_samples:
+ case ir_samples_identical:
+ break;
+ case ir_txb:
+ new_tex->lod_info.bias = this->lod_info.bias->clone(mem_ctx, ht);
+ break;
+ case ir_txl:
+ case ir_txf:
+ case ir_txs:
+ new_tex->lod_info.lod = this->lod_info.lod->clone(mem_ctx, ht);
+ break;
+ case ir_txf_ms:
+ new_tex->lod_info.sample_index = this->lod_info.sample_index->clone(mem_ctx, ht);
+ break;
+ case ir_txd:
+ new_tex->lod_info.grad.dPdx = this->lod_info.grad.dPdx->clone(mem_ctx, ht);
+ new_tex->lod_info.grad.dPdy = this->lod_info.grad.dPdy->clone(mem_ctx, ht);
+ break;
+ case ir_tg4:
+ new_tex->lod_info.component = this->lod_info.component->clone(mem_ctx, ht);
+ break;
+ }
+
+ return new_tex;
+}
+
+ir_assignment *
+ir_assignment::clone(void *mem_ctx, struct hash_table *ht) const
+{
+ ir_rvalue *new_condition = NULL;
+
+ if (this->condition)
+ new_condition = this->condition->clone(mem_ctx, ht);
+
+ ir_assignment *cloned =
+ new(mem_ctx) ir_assignment(this->lhs->clone(mem_ctx, ht),
+ this->rhs->clone(mem_ctx, ht),
+ new_condition);
+ cloned->write_mask = this->write_mask;
+ return cloned;
+}
+
+ir_function *
+ir_function::clone(void *mem_ctx, struct hash_table *ht) const
+{
+ ir_function *copy = new(mem_ctx) ir_function(this->name);
+
+ copy->is_subroutine = this->is_subroutine;
+ copy->subroutine_index = this->subroutine_index;
+ copy->num_subroutine_types = this->num_subroutine_types;
+ copy->subroutine_types = ralloc_array(mem_ctx, const struct glsl_type *, copy->num_subroutine_types);
+ for (int i = 0; i < copy->num_subroutine_types; i++)
+ copy->subroutine_types[i] = this->subroutine_types[i];
+
+ foreach_in_list(const ir_function_signature, sig, &this->signatures) {
+ ir_function_signature *sig_copy = sig->clone(mem_ctx, ht);
+ copy->add_signature(sig_copy);
+
+ if (ht != NULL) {
+ _mesa_hash_table_insert(ht,
+ (void *)const_cast<ir_function_signature *>(sig), sig_copy);
+ }
+ }
+
+ return copy;
+}
+
+ir_function_signature *
+ir_function_signature::clone(void *mem_ctx, struct hash_table *ht) const
+{
+ ir_function_signature *copy = this->clone_prototype(mem_ctx, ht);
+
+ copy->is_defined = this->is_defined;
+
+ /* Clone the instruction list.
+ */
+ foreach_in_list(const ir_instruction, inst, &this->body) {
+ ir_instruction *const inst_copy = inst->clone(mem_ctx, ht);
+ copy->body.push_tail(inst_copy);
+ }
+
+ return copy;
+}
+
+ir_function_signature *
+ir_function_signature::clone_prototype(void *mem_ctx, struct hash_table *ht) const
+{
+ ir_function_signature *copy =
+ new(mem_ctx) ir_function_signature(this->return_type);
+
+ copy->is_defined = false;
+ copy->builtin_avail = this->builtin_avail;
+ copy->origin = this;
+
+ /* Clone the parameter list, but NOT the body.
+ */
+ foreach_in_list(const ir_variable, param, &this->parameters) {
+ assert(const_cast<ir_variable *>(param)->as_variable() != NULL);
+
+ ir_variable *const param_copy = param->clone(mem_ctx, ht);
+ copy->parameters.push_tail(param_copy);
+ }
+
+ return copy;
+}
+
+ir_constant *
+ir_constant::clone(void *mem_ctx, struct hash_table *ht) const
+{
+ (void)ht;
+
+ switch (this->type->base_type) {
+ case GLSL_TYPE_UINT:
+ case GLSL_TYPE_INT:
+ case GLSL_TYPE_FLOAT:
+ case GLSL_TYPE_FLOAT16:
+ case GLSL_TYPE_DOUBLE:
+ case GLSL_TYPE_BOOL:
+ case GLSL_TYPE_UINT64:
+ case GLSL_TYPE_INT64:
+ case GLSL_TYPE_UINT16:
+ case GLSL_TYPE_INT16:
+ case GLSL_TYPE_UINT8:
+ case GLSL_TYPE_INT8:
+ case GLSL_TYPE_SAMPLER:
+ case GLSL_TYPE_IMAGE:
+ return new(mem_ctx) ir_constant(this->type, &this->value);
+
+ case GLSL_TYPE_STRUCT:
+ case GLSL_TYPE_ARRAY: {
+ ir_constant *c = new(mem_ctx) ir_constant;
+
+ c->type = this->type;
+ c->const_elements = ralloc_array(c, ir_constant *, this->type->length);
+ for (unsigned i = 0; i < this->type->length; i++) {
+ c->const_elements[i] = this->const_elements[i]->clone(mem_ctx, NULL);
+ }
+ return c;
+ }
+
+ case GLSL_TYPE_ATOMIC_UINT:
+ case GLSL_TYPE_VOID:
+ case GLSL_TYPE_ERROR:
+ case GLSL_TYPE_SUBROUTINE:
+ case GLSL_TYPE_INTERFACE:
+ case GLSL_TYPE_FUNCTION:
+ assert(!"Should not get here.");
+ break;
+ }
+
+ return NULL;
+}
+
+ir_precision_statement *
+ir_precision_statement::clone(void *mem_ctx, struct hash_table *ht) const
+{
+ return new(mem_ctx) ir_precision_statement(this->precision_statement);
+}
+
+ir_typedecl_statement *
+ir_typedecl_statement::clone(void *mem_ctx, struct hash_table *ht) const
+{
+ return new(mem_ctx) ir_typedecl_statement(this->type_decl);
+}
+
+class fixup_ir_call_visitor : public ir_hierarchical_visitor {
+public:
+ fixup_ir_call_visitor(struct hash_table *ht)
+ {
+ this->ht = ht;
+ }
+
+ virtual ir_visitor_status visit_enter(ir_call *ir)
+ {
+ /* Try to find the function signature referenced by the ir_call in the
+ * table. If it is found, replace it with the value from the table.
+ */
+ ir_function_signature *sig;
+ hash_entry *entry = _mesa_hash_table_search(this->ht, ir->callee);
+
+ if (entry != NULL) {
+ sig = (ir_function_signature *) entry->data;
+ ir->callee = sig;
+ }
+
+ /* Since this may be used before function call parameters are flattened,
+ * the children also need to be processed.
+ */
+ return visit_continue;
+ }
+
+private:
+ struct hash_table *ht;
+};
+
+
+static void
+fixup_function_calls(struct hash_table *ht, exec_list *instructions)
+{
+ fixup_ir_call_visitor v(ht);
+ v.run(instructions);
+}
+
+
+void
+clone_ir_list(void *mem_ctx, exec_list *out, const exec_list *in)
+{
+ struct hash_table *ht = _mesa_pointer_hash_table_create(NULL);
+
+ foreach_in_list(const ir_instruction, original, in) {
+ ir_instruction *copy = original->clone(mem_ctx, ht);
+
+ out->push_tail(copy);
+ }
+
+ /* Make a pass over the cloned tree to fix up ir_call nodes to point to the
+ * cloned ir_function_signature nodes. This cannot be done automatically
+ * during cloning because the ir_call might be a forward reference (i.e.,
+ * the function signature that it references may not have been cloned yet).
+ */
+ fixup_function_calls(ht, out);
+
+ _mesa_hash_table_destroy(ht, NULL);
+}
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/ir_constant_expression.cpp b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/ir_constant_expression.cpp
new file mode 100644
index 0000000000..de7d4f6c42
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/ir_constant_expression.cpp
@@ -0,0 +1,1153 @@
+/*
+ * Copyright © 2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+/**
+ * \file ir_constant_expression.cpp
+ * Evaluate and process constant valued expressions
+ *
+ * In GLSL, constant valued expressions are used in several places. These
+ * must be processed and evaluated very early in the compilation process.
+ *
+ * * Sizes of arrays
+ * * Initializers for uniforms
+ * * Initializers for \c const variables
+ */
+
+#include <math.h>
+#include "util/rounding.h" /* for _mesa_roundeven */
+#include "util/half_float.h"
+#include "ir.h"
+#include "compiler/glsl_types.h"
+#include "util/hash_table.h"
+#include "util/u_math.h"
+
+static float
+dot_f(ir_constant *op0, ir_constant *op1)
+{
+ assert(op0->type->is_float() && op1->type->is_float());
+
+ float result = 0;
+ for (unsigned c = 0; c < op0->type->components(); c++)
+ result += op0->value.f[c] * op1->value.f[c];
+
+ return result;
+}
+
+static double
+dot_d(ir_constant *op0, ir_constant *op1)
+{
+ assert(op0->type->is_double() && op1->type->is_double());
+
+ double result = 0;
+ for (unsigned c = 0; c < op0->type->components(); c++)
+ result += op0->value.d[c] * op1->value.d[c];
+
+ return result;
+}
+
+/* This method is the only one supported by gcc. Unions in particular
+ * are iffy, and read-through-converted-pointer is killed by strict
+ * aliasing. OTOH, the compiler sees through the memcpy, so the
+ * resulting asm is reasonable.
+ */
+static float
+bitcast_u2f(unsigned int u)
+{
+ static_assert(sizeof(float) == sizeof(unsigned int),
+ "float and unsigned int size mismatch");
+ float f;
+ memcpy(&f, &u, sizeof(f));
+ return f;
+}
+
+static unsigned int
+bitcast_f2u(float f)
+{
+ static_assert(sizeof(float) == sizeof(unsigned int),
+ "float and unsigned int size mismatch");
+ unsigned int u;
+ memcpy(&u, &f, sizeof(f));
+ return u;
+}
+
+static double
+bitcast_u642d(uint64_t u)
+{
+ static_assert(sizeof(double) == sizeof(uint64_t),
+ "double and uint64_t size mismatch");
+ double d;
+ memcpy(&d, &u, sizeof(d));
+ return d;
+}
+
+static double
+bitcast_i642d(int64_t i)
+{
+ static_assert(sizeof(double) == sizeof(int64_t),
+ "double and int64_t size mismatch");
+ double d;
+ memcpy(&d, &i, sizeof(d));
+ return d;
+}
+
+static uint64_t
+bitcast_d2u64(double d)
+{
+ static_assert(sizeof(double) == sizeof(uint64_t),
+ "double and uint64_t size mismatch");
+ uint64_t u;
+ memcpy(&u, &d, sizeof(d));
+ return u;
+}
+
+static int64_t
+bitcast_d2i64(double d)
+{
+ static_assert(sizeof(double) == sizeof(int64_t),
+ "double and int64_t size mismatch");
+ int64_t i;
+ memcpy(&i, &d, sizeof(d));
+ return i;
+}
+
+/**
+ * Evaluate one component of a floating-point 4x8 unpacking function.
+ */
+typedef uint8_t
+(*pack_1x8_func_t)(float);
+
+/**
+ * Evaluate one component of a floating-point 2x16 unpacking function.
+ */
+typedef uint16_t
+(*pack_1x16_func_t)(float);
+
+/**
+ * Evaluate one component of a floating-point 4x8 unpacking function.
+ */
+typedef float
+(*unpack_1x8_func_t)(uint8_t);
+
+/**
+ * Evaluate one component of a floating-point 2x16 unpacking function.
+ */
+typedef float
+(*unpack_1x16_func_t)(uint16_t);
+
+/**
+ * Evaluate a 2x16 floating-point packing function.
+ */
+static uint32_t
+pack_2x16(pack_1x16_func_t pack_1x16,
+ float x, float y)
+{
+ /* From section 8.4 of the GLSL ES 3.00 spec:
+ *
+ * packSnorm2x16
+ * -------------
+ * The first component of the vector will be written to the least
+ * significant bits of the output; the last component will be written to
+ * the most significant bits.
+ *
+ * The specifications for the other packing functions contain similar
+ * language.
+ */
+ uint32_t u = 0;
+ u |= ((uint32_t) pack_1x16(x) << 0);
+ u |= ((uint32_t) pack_1x16(y) << 16);
+ return u;
+}
+
+/**
+ * Evaluate a 4x8 floating-point packing function.
+ */
+static uint32_t
+pack_4x8(pack_1x8_func_t pack_1x8,
+ float x, float y, float z, float w)
+{
+ /* From section 8.4 of the GLSL 4.30 spec:
+ *
+ * packSnorm4x8
+ * ------------
+ * The first component of the vector will be written to the least
+ * significant bits of the output; the last component will be written to
+ * the most significant bits.
+ *
+ * The specifications for the other packing functions contain similar
+ * language.
+ */
+ uint32_t u = 0;
+ u |= ((uint32_t) pack_1x8(x) << 0);
+ u |= ((uint32_t) pack_1x8(y) << 8);
+ u |= ((uint32_t) pack_1x8(z) << 16);
+ u |= ((uint32_t) pack_1x8(w) << 24);
+ return u;
+}
+
+/**
+ * Evaluate a 2x16 floating-point unpacking function.
+ */
+static void
+unpack_2x16(unpack_1x16_func_t unpack_1x16,
+ uint32_t u,
+ float *x, float *y)
+{
+ /* From section 8.4 of the GLSL ES 3.00 spec:
+ *
+ * unpackSnorm2x16
+ * ---------------
+ * The first component of the returned vector will be extracted from
+ * the least significant bits of the input; the last component will be
+ * extracted from the most significant bits.
+ *
+ * The specifications for the other unpacking functions contain similar
+ * language.
+ */
+ *x = unpack_1x16((uint16_t) (u & 0xffff));
+ *y = unpack_1x16((uint16_t) (u >> 16));
+}
+
+/**
+ * Evaluate a 4x8 floating-point unpacking function.
+ */
+static void
+unpack_4x8(unpack_1x8_func_t unpack_1x8, uint32_t u,
+ float *x, float *y, float *z, float *w)
+{
+ /* From section 8.4 of the GLSL 4.30 spec:
+ *
+ * unpackSnorm4x8
+ * --------------
+ * The first component of the returned vector will be extracted from
+ * the least significant bits of the input; the last component will be
+ * extracted from the most significant bits.
+ *
+ * The specifications for the other unpacking functions contain similar
+ * language.
+ */
+ *x = unpack_1x8((uint8_t) (u & 0xff));
+ *y = unpack_1x8((uint8_t) (u >> 8));
+ *z = unpack_1x8((uint8_t) (u >> 16));
+ *w = unpack_1x8((uint8_t) (u >> 24));
+}
+
+/**
+ * Evaluate one component of packSnorm4x8.
+ */
+static uint8_t
+pack_snorm_1x8(float x)
+{
+ /* From section 8.4 of the GLSL 4.30 spec:
+ *
+ * packSnorm4x8
+ * ------------
+ * The conversion for component c of v to fixed point is done as
+ * follows:
+ *
+ * packSnorm4x8: round(clamp(c, -1, +1) * 127.0)
+ */
+ return (uint8_t)
+ _mesa_lroundevenf(CLAMP(x, -1.0f, +1.0f) * 127.0f);
+}
+
+/**
+ * Evaluate one component of packSnorm2x16.
+ */
+static uint16_t
+pack_snorm_1x16(float x)
+{
+ /* From section 8.4 of the GLSL ES 3.00 spec:
+ *
+ * packSnorm2x16
+ * -------------
+ * The conversion for component c of v to fixed point is done as
+ * follows:
+ *
+ * packSnorm2x16: round(clamp(c, -1, +1) * 32767.0)
+ */
+ return (uint16_t)
+ _mesa_lroundevenf(CLAMP(x, -1.0f, +1.0f) * 32767.0f);
+}
+
+/**
+ * Evaluate one component of unpackSnorm4x8.
+ */
+static float
+unpack_snorm_1x8(uint8_t u)
+{
+ /* From section 8.4 of the GLSL 4.30 spec:
+ *
+ * unpackSnorm4x8
+ * --------------
+ * The conversion for unpacked fixed-point value f to floating point is
+ * done as follows:
+ *
+ * unpackSnorm4x8: clamp(f / 127.0, -1, +1)
+ */
+ return CLAMP((int8_t) u / 127.0f, -1.0f, +1.0f);
+}
+
+/**
+ * Evaluate one component of unpackSnorm2x16.
+ */
+static float
+unpack_snorm_1x16(uint16_t u)
+{
+ /* From section 8.4 of the GLSL ES 3.00 spec:
+ *
+ * unpackSnorm2x16
+ * ---------------
+ * The conversion for unpacked fixed-point value f to floating point is
+ * done as follows:
+ *
+ * unpackSnorm2x16: clamp(f / 32767.0, -1, +1)
+ */
+ return CLAMP((int16_t) u / 32767.0f, -1.0f, +1.0f);
+}
+
+/**
+ * Evaluate one component packUnorm4x8.
+ */
+static uint8_t
+pack_unorm_1x8(float x)
+{
+ /* From section 8.4 of the GLSL 4.30 spec:
+ *
+ * packUnorm4x8
+ * ------------
+ * The conversion for component c of v to fixed point is done as
+ * follows:
+ *
+ * packUnorm4x8: round(clamp(c, 0, +1) * 255.0)
+ */
+ return (uint8_t) (int) _mesa_roundevenf(CLAMP(x, 0.0f, 1.0f) * 255.0f);
+}
+
+/**
+ * Evaluate one component packUnorm2x16.
+ */
+static uint16_t
+pack_unorm_1x16(float x)
+{
+ /* From section 8.4 of the GLSL ES 3.00 spec:
+ *
+ * packUnorm2x16
+ * -------------
+ * The conversion for component c of v to fixed point is done as
+ * follows:
+ *
+ * packUnorm2x16: round(clamp(c, 0, +1) * 65535.0)
+ */
+ return (uint16_t) (int)
+ _mesa_roundevenf(CLAMP(x, 0.0f, 1.0f) * 65535.0f);
+}
+
+/**
+ * Evaluate one component of unpackUnorm4x8.
+ */
+static float
+unpack_unorm_1x8(uint8_t u)
+{
+ /* From section 8.4 of the GLSL 4.30 spec:
+ *
+ * unpackUnorm4x8
+ * --------------
+ * The conversion for unpacked fixed-point value f to floating point is
+ * done as follows:
+ *
+ * unpackUnorm4x8: f / 255.0
+ */
+ return (float) u / 255.0f;
+}
+
+/**
+ * Evaluate one component of unpackUnorm2x16.
+ */
+static float
+unpack_unorm_1x16(uint16_t u)
+{
+ /* From section 8.4 of the GLSL ES 3.00 spec:
+ *
+ * unpackUnorm2x16
+ * ---------------
+ * The conversion for unpacked fixed-point value f to floating point is
+ * done as follows:
+ *
+ * unpackUnorm2x16: f / 65535.0
+ */
+ return (float) u / 65535.0f;
+}
+
+/**
+ * Evaluate one component of packHalf2x16.
+ */
+static uint16_t
+pack_half_1x16(float x)
+{
+ return _mesa_float_to_half(x);
+}
+
+/**
+ * Evaluate one component of unpackHalf2x16.
+ */
+static float
+unpack_half_1x16(uint16_t u)
+{
+ return _mesa_half_to_float(u);
+}
+
+static int32_t
+iadd_saturate(int32_t a, int32_t b)
+{
+ return CLAMP(int64_t(a) + int64_t(b), INT32_MIN, INT32_MAX);
+}
+
+static int64_t
+iadd64_saturate(int64_t a, int64_t b)
+{
+ if (a < 0 && b < INT64_MIN - a)
+ return INT64_MIN;
+
+ if (a > 0 && b > INT64_MAX - a)
+ return INT64_MAX;
+
+ return a + b;
+}
+
+static int32_t
+isub_saturate(int32_t a, int32_t b)
+{
+ return CLAMP(int64_t(a) - int64_t(b), INT32_MIN, INT32_MAX);
+}
+
+static int64_t
+isub64_saturate(int64_t a, int64_t b)
+{
+ if (b > 0 && a < INT64_MIN + b)
+ return INT64_MIN;
+
+ if (b < 0 && a > INT64_MAX + b)
+ return INT64_MAX;
+
+ return a - b;
+}
+
+static uint64_t
+pack_2x32(uint32_t a, uint32_t b)
+{
+ uint64_t v = a;
+ v |= (uint64_t)b << 32;
+ return v;
+}
+
+static void
+unpack_2x32(uint64_t p, uint32_t *a, uint32_t *b)
+{
+ *a = p & 0xffffffff;
+ *b = (p >> 32);
+}
+
+/**
+ * Get the constant that is ultimately referenced by an r-value, in a constant
+ * expression evaluation context.
+ *
+ * The offset is used when the reference is to a specific column of a matrix.
+ */
+static bool
+constant_referenced(const ir_dereference *deref,
+ struct hash_table *variable_context,
+ ir_constant *&store, int &offset)
+{
+ store = NULL;
+ offset = 0;
+
+ if (variable_context == NULL)
+ return false;
+
+ switch (deref->ir_type) {
+ case ir_type_dereference_array: {
+ const ir_dereference_array *const da =
+ (const ir_dereference_array *) deref;
+
+ ir_constant *const index_c =
+ da->array_index->constant_expression_value(variable_context);
+
+ if (!index_c || !index_c->type->is_scalar() ||
+ !index_c->type->is_integer_32())
+ break;
+
+ const int index = index_c->type->base_type == GLSL_TYPE_INT ?
+ index_c->get_int_component(0) :
+ index_c->get_uint_component(0);
+
+ ir_constant *substore;
+ int suboffset;
+
+ const ir_dereference *const deref = da->array->as_dereference();
+ if (!deref)
+ break;
+
+ if (!constant_referenced(deref, variable_context, substore, suboffset))
+ break;
+
+ const glsl_type *const vt = da->array->type;
+ if (vt->is_array()) {
+ store = substore->get_array_element(index);
+ offset = 0;
+ } else if (vt->is_matrix()) {
+ store = substore;
+ offset = index * vt->vector_elements;
+ } else if (vt->is_vector()) {
+ store = substore;
+ offset = suboffset + index;
+ }
+
+ break;
+ }
+
+ case ir_type_dereference_record: {
+ const ir_dereference_record *const dr =
+ (const ir_dereference_record *) deref;
+
+ const ir_dereference *const deref = dr->record->as_dereference();
+ if (!deref)
+ break;
+
+ ir_constant *substore;
+ int suboffset;
+
+ if (!constant_referenced(deref, variable_context, substore, suboffset))
+ break;
+
+ /* Since we're dropping it on the floor...
+ */
+ assert(suboffset == 0);
+
+ store = substore->get_record_field(dr->field_idx);
+ break;
+ }
+
+ case ir_type_dereference_variable: {
+ const ir_dereference_variable *const dv =
+ (const ir_dereference_variable *) deref;
+
+ hash_entry *entry = _mesa_hash_table_search(variable_context, dv->var);
+ if (entry)
+ store = (ir_constant *) entry->data;
+ break;
+ }
+
+ default:
+ assert(!"Should not get here.");
+ break;
+ }
+
+ return store != NULL;
+}
+
+
+ir_constant *
+ir_rvalue::constant_expression_value(void *, struct hash_table *)
+{
+ assert(this->type->is_error());
+ return NULL;
+}
+
+static uint32_t
+bitfield_reverse(uint32_t v)
+{
+ /* http://graphics.stanford.edu/~seander/bithacks.html#BitReverseObvious */
+ uint32_t r = v; // r will be reversed bits of v; first get LSB of v
+ int s = sizeof(v) * CHAR_BIT - 1; // extra shift needed at end
+
+ for (v >>= 1; v; v >>= 1) {
+ r <<= 1;
+ r |= v & 1;
+ s--;
+ }
+ r <<= s; // shift when v's highest bits are zero
+
+ return r;
+}
+
+static int
+find_msb_uint(uint32_t v)
+{
+ int count = 0;
+
+ /* If v == 0, then the loop will terminate when count == 32. In that case
+ * 31-count will produce the -1 result required by GLSL findMSB().
+ */
+ while (((v & (1u << 31)) == 0) && count != 32) {
+ count++;
+ v <<= 1;
+ }
+
+ return 31 - count;
+}
+
+static int
+find_msb_int(int32_t v)
+{
+ /* If v is signed, findMSB() returns the position of the most significant
+ * zero bit.
+ */
+ return find_msb_uint(v < 0 ? ~v : v);
+}
+
+static float
+ldexpf_flush_subnormal(float x, int exp)
+{
+ const float result = ldexpf(x, exp);
+
+ /* Flush subnormal values to zero. */
+ return !isnormal(result) ? copysignf(0.0f, x) : result;
+}
+
+static double
+ldexp_flush_subnormal(double x, int exp)
+{
+ const double result = ldexp(x, exp);
+
+ /* Flush subnormal values to zero. */
+ return !isnormal(result) ? copysign(0.0, x) : result;
+}
+
+static uint32_t
+bitfield_extract_uint(uint32_t value, int offset, int bits)
+{
+ if (bits == 0)
+ return 0;
+ else if (offset < 0 || bits < 0)
+ return 0; /* Undefined, per spec. */
+ else if (offset + bits > 32)
+ return 0; /* Undefined, per spec. */
+ else {
+ value <<= 32 - bits - offset;
+ value >>= 32 - bits;
+ return value;
+ }
+}
+
+static int32_t
+bitfield_extract_int(int32_t value, int offset, int bits)
+{
+ if (bits == 0)
+ return 0;
+ else if (offset < 0 || bits < 0)
+ return 0; /* Undefined, per spec. */
+ else if (offset + bits > 32)
+ return 0; /* Undefined, per spec. */
+ else {
+ value <<= 32 - bits - offset;
+ value >>= 32 - bits;
+ return value;
+ }
+}
+
+static uint32_t
+bitfield_insert(uint32_t base, uint32_t insert, int offset, int bits)
+{
+ if (bits == 0)
+ return base;
+ else if (offset < 0 || bits < 0)
+ return 0; /* Undefined, per spec. */
+ else if (offset + bits > 32)
+ return 0; /* Undefined, per spec. */
+ else {
+ unsigned insert_mask = ((1ull << bits) - 1) << offset;
+
+ insert <<= offset;
+ insert &= insert_mask;
+ base &= ~insert_mask;
+
+ return base | insert;
+ }
+}
+
+ir_constant *
+ir_expression::constant_expression_value(void *mem_ctx,
+ struct hash_table *variable_context)
+{
+ assert(mem_ctx);
+
+ if (this->type->is_error())
+ return NULL;
+
+ ir_constant *op[ARRAY_SIZE(this->operands)] = { NULL, };
+ ir_constant_data data;
+
+ memset(&data, 0, sizeof(data));
+
+ for (unsigned operand = 0; operand < this->num_operands; operand++) {
+ op[operand] =
+ this->operands[operand]->constant_expression_value(mem_ctx,
+ variable_context);
+ if (!op[operand])
+ return NULL;
+ }
+
+ for (unsigned operand = 0; operand < this->num_operands; operand++) {
+ if (op[operand]->type->base_type == GLSL_TYPE_FLOAT16) {
+ const struct glsl_type *float_type =
+ glsl_type::get_instance(GLSL_TYPE_FLOAT,
+ op[operand]->type->vector_elements,
+ op[operand]->type->matrix_columns,
+ op[operand]->type->explicit_stride,
+ op[operand]->type->interface_row_major);
+
+ ir_constant_data f;
+ for (unsigned i = 0; i < ARRAY_SIZE(f.f); i++)
+ f.f[i] = _mesa_half_to_float(op[operand]->value.f16[i]);
+
+ op[operand] = new(mem_ctx) ir_constant(float_type, &f);
+ }
+ }
+
+ if (op[1] != NULL)
+ switch (this->operation) {
+ case ir_binop_lshift:
+ case ir_binop_rshift:
+ case ir_binop_ldexp:
+ case ir_binop_interpolate_at_offset:
+ case ir_binop_interpolate_at_sample:
+ case ir_binop_vector_extract:
+ case ir_triop_csel:
+ case ir_triop_bitfield_extract:
+ break;
+
+ default:
+ assert(op[0]->type->base_type == op[1]->type->base_type);
+ break;
+ }
+
+ bool op0_scalar = op[0]->type->is_scalar();
+ bool op1_scalar = op[1] != NULL && op[1]->type->is_scalar();
+
+ /* When iterating over a vector or matrix's components, we want to increase
+ * the loop counter. However, for scalars, we want to stay at 0.
+ */
+ unsigned c0_inc = op0_scalar ? 0 : 1;
+ unsigned c1_inc = op1_scalar ? 0 : 1;
+ unsigned components;
+ if (op1_scalar || !op[1]) {
+ components = op[0]->type->components();
+ } else {
+ components = op[1]->type->components();
+ }
+
+ /* Handle array operations here, rather than below. */
+ if (op[0]->type->is_array()) {
+ assert(op[1] != NULL && op[1]->type->is_array());
+ switch (this->operation) {
+ case ir_binop_all_equal:
+ return new(mem_ctx) ir_constant(op[0]->has_value(op[1]));
+ case ir_binop_any_nequal:
+ return new(mem_ctx) ir_constant(!op[0]->has_value(op[1]));
+ default:
+ break;
+ }
+ return NULL;
+ }
+
+#include "ir_expression_operation_constant.h"
+
+ if (this->type->base_type == GLSL_TYPE_FLOAT16) {
+ ir_constant_data f;
+ for (unsigned i = 0; i < ARRAY_SIZE(f.f16); i++)
+ f.f16[i] = _mesa_float_to_half(data.f[i]);
+
+ return new(mem_ctx) ir_constant(this->type, &f);
+ }
+
+
+ return new(mem_ctx) ir_constant(this->type, &data);
+}
+
+
+ir_constant *
+ir_texture::constant_expression_value(void *, struct hash_table *)
+{
+ /* texture lookups aren't constant expressions */
+ return NULL;
+}
+
+
+ir_constant *
+ir_swizzle::constant_expression_value(void *mem_ctx,
+ struct hash_table *variable_context)
+{
+ assert(mem_ctx);
+
+ ir_constant *v = this->val->constant_expression_value(mem_ctx,
+ variable_context);
+
+ if (v != NULL) {
+ ir_constant_data data = { { 0 } };
+
+ const unsigned swiz_idx[4] = {
+ this->mask.x, this->mask.y, this->mask.z, this->mask.w
+ };
+
+ for (unsigned i = 0; i < this->mask.num_components; i++) {
+ switch (v->type->base_type) {
+ case GLSL_TYPE_UINT:
+ case GLSL_TYPE_INT: data.u[i] = v->value.u[swiz_idx[i]]; break;
+ case GLSL_TYPE_FLOAT: data.f[i] = v->value.f[swiz_idx[i]]; break;
+ case GLSL_TYPE_FLOAT16: data.f16[i] = v->value.f16[swiz_idx[i]]; break;
+ case GLSL_TYPE_BOOL: data.b[i] = v->value.b[swiz_idx[i]]; break;
+ case GLSL_TYPE_DOUBLE:data.d[i] = v->value.d[swiz_idx[i]]; break;
+ case GLSL_TYPE_UINT64:data.u64[i] = v->value.u64[swiz_idx[i]]; break;
+ case GLSL_TYPE_INT64: data.i64[i] = v->value.i64[swiz_idx[i]]; break;
+ default: assert(!"Should not get here."); break;
+ }
+ }
+
+ return new(mem_ctx) ir_constant(this->type, &data);
+ }
+ return NULL;
+}
+
+
+ir_constant *
+ir_dereference_variable::constant_expression_value(void *mem_ctx,
+ struct hash_table *variable_context)
+{
+ assert(var);
+ assert(mem_ctx);
+
+ /* Give priority to the context hashtable, if it exists */
+ if (variable_context) {
+ hash_entry *entry = _mesa_hash_table_search(variable_context, var);
+
+ if(entry)
+ return (ir_constant *) entry->data;
+ }
+
+ /* The constant_value of a uniform variable is its initializer,
+ * not the lifetime constant value of the uniform.
+ */
+ if (var->data.mode == ir_var_uniform)
+ return NULL;
+
+ if (!var->constant_value)
+ return NULL;
+
+ return var->constant_value->clone(mem_ctx, NULL);
+}
+
+
+ir_constant *
+ir_dereference_array::constant_expression_value(void *mem_ctx,
+ struct hash_table *variable_context)
+{
+ assert(mem_ctx);
+
+ ir_constant *array = this->array->constant_expression_value(mem_ctx, variable_context);
+ ir_constant *idx = this->array_index->constant_expression_value(mem_ctx, variable_context);
+
+ if ((array != NULL) && (idx != NULL)) {
+ if (array->type->is_matrix()) {
+ /* Array access of a matrix results in a vector.
+ */
+ const unsigned column = idx->value.u[0];
+
+ const glsl_type *const column_type = array->type->column_type();
+
+ /* Offset in the constant matrix to the first element of the column
+ * to be extracted.
+ */
+ const unsigned mat_idx = column * column_type->vector_elements;
+
+ ir_constant_data data = { { 0 } };
+
+ switch (column_type->base_type) {
+ case GLSL_TYPE_UINT:
+ case GLSL_TYPE_INT:
+ for (unsigned i = 0; i < column_type->vector_elements; i++)
+ data.u[i] = array->value.u[mat_idx + i];
+
+ break;
+
+ case GLSL_TYPE_FLOAT:
+ for (unsigned i = 0; i < column_type->vector_elements; i++)
+ data.f[i] = array->value.f[mat_idx + i];
+
+ break;
+
+ case GLSL_TYPE_DOUBLE:
+ for (unsigned i = 0; i < column_type->vector_elements; i++)
+ data.d[i] = array->value.d[mat_idx + i];
+
+ break;
+
+ default:
+ assert(!"Should not get here.");
+ break;
+ }
+
+ return new(mem_ctx) ir_constant(column_type, &data);
+ } else if (array->type->is_vector()) {
+ const unsigned component = idx->value.u[0];
+
+ return new(mem_ctx) ir_constant(array, component);
+ } else if (array->type->is_array()) {
+ const unsigned index = idx->value.u[0];
+ return array->get_array_element(index)->clone(mem_ctx, NULL);
+ }
+ }
+ return NULL;
+}
+
+
+ir_constant *
+ir_dereference_record::constant_expression_value(void *mem_ctx,
+ struct hash_table *)
+{
+ assert(mem_ctx);
+
+ ir_constant *v = this->record->constant_expression_value(mem_ctx);
+
+ return (v != NULL) ? v->get_record_field(this->field_idx) : NULL;
+}
+
+
+ir_constant *
+ir_assignment::constant_expression_value(void *, struct hash_table *)
+{
+ /* FINISHME: Handle CEs involving assignment (return RHS) */
+ return NULL;
+}
+
+
+ir_constant *
+ir_constant::constant_expression_value(void *, struct hash_table *)
+{
+ return this;
+}
+
+
+ir_constant *
+ir_call::constant_expression_value(void *mem_ctx, struct hash_table *variable_context)
+{
+ assert(mem_ctx);
+
+ return this->callee->constant_expression_value(mem_ctx,
+ &this->actual_parameters,
+ variable_context);
+}
+
+
+bool ir_function_signature::constant_expression_evaluate_expression_list(void *mem_ctx,
+ const struct exec_list &body,
+ struct hash_table *variable_context,
+ ir_constant **result)
+{
+ assert(mem_ctx);
+
+ foreach_in_list(ir_instruction, inst, &body) {
+ switch(inst->ir_type) {
+
+ /* (declare () type symbol) */
+ case ir_type_variable: {
+ ir_variable *var = inst->as_variable();
+ _mesa_hash_table_insert(variable_context, var, ir_constant::zero(this, var->type));
+ break;
+ }
+
+ /* (assign [condition] (write-mask) (ref) (value)) */
+ case ir_type_assignment: {
+ ir_assignment *asg = inst->as_assignment();
+ if (asg->condition) {
+ ir_constant *cond =
+ asg->condition->constant_expression_value(mem_ctx,
+ variable_context);
+ if (!cond)
+ return false;
+ if (!cond->get_bool_component(0))
+ break;
+ }
+
+ ir_constant *store = NULL;
+ int offset = 0;
+
+ if (!constant_referenced(asg->lhs, variable_context, store, offset))
+ return false;
+
+ ir_constant *value =
+ asg->rhs->constant_expression_value(mem_ctx, variable_context);
+
+ if (!value)
+ return false;
+
+ store->copy_masked_offset(value, offset, asg->write_mask);
+ break;
+ }
+
+ /* (return (expression)) */
+ case ir_type_return:
+ assert (result);
+ *result =
+ inst->as_return()->value->constant_expression_value(mem_ctx,
+ variable_context);
+ return *result != NULL;
+
+ /* (call name (ref) (params))*/
+ case ir_type_call: {
+ ir_call *call = inst->as_call();
+
+ /* Just say no to void functions in constant expressions. We
+ * don't need them at that point.
+ */
+
+ if (!call->return_deref)
+ return false;
+
+ ir_constant *store = NULL;
+ int offset = 0;
+
+ if (!constant_referenced(call->return_deref, variable_context,
+ store, offset))
+ return false;
+
+ ir_constant *value =
+ call->constant_expression_value(mem_ctx, variable_context);
+
+ if(!value)
+ return false;
+
+ store->copy_offset(value, offset);
+ break;
+ }
+
+ /* (if condition (then-instructions) (else-instructions)) */
+ case ir_type_if: {
+ ir_if *iif = inst->as_if();
+
+ ir_constant *cond =
+ iif->condition->constant_expression_value(mem_ctx,
+ variable_context);
+ if (!cond || !cond->type->is_boolean())
+ return false;
+
+ exec_list &branch = cond->get_bool_component(0) ? iif->then_instructions : iif->else_instructions;
+
+ *result = NULL;
+ if (!constant_expression_evaluate_expression_list(mem_ctx, branch,
+ variable_context,
+ result))
+ return false;
+
+ /* If there was a return in the branch chosen, drop out now. */
+ if (*result)
+ return true;
+
+ break;
+ }
+
+ /* Every other expression type, we drop out. */
+ default:
+ return false;
+ }
+ }
+
+ /* Reaching the end of the block is not an error condition */
+ if (result)
+ *result = NULL;
+
+ return true;
+}
+
+ir_constant *
+ir_function_signature::constant_expression_value(void *mem_ctx,
+ exec_list *actual_parameters,
+ struct hash_table *variable_context)
+{
+ assert(mem_ctx);
+
+ const glsl_type *type = this->return_type;
+ if (type == glsl_type::void_type)
+ return NULL;
+
+ /* From the GLSL 1.20 spec, page 23:
+ * "Function calls to user-defined functions (non-built-in functions)
+ * cannot be used to form constant expressions."
+ */
+ if (!this->is_builtin())
+ return NULL;
+
+ /*
+ * Of the builtin functions, only the texture lookups and the noise
+ * ones must not be used in constant expressions. Texture instructions
+ * include special ir_texture opcodes which can't be constant-folded (see
+ * ir_texture::constant_expression_value). Noise functions, however, we
+ * have to special case here.
+ */
+ if (strcmp(this->function_name(), "noise1") == 0 ||
+ strcmp(this->function_name(), "noise2") == 0 ||
+ strcmp(this->function_name(), "noise3") == 0 ||
+ strcmp(this->function_name(), "noise4") == 0)
+ return NULL;
+
+ /* Initialize the table of dereferencable names with the function
+ * parameters. Verify their const-ness on the way.
+ *
+ * We expect the correctness of the number of parameters to have
+ * been checked earlier.
+ */
+ hash_table *deref_hash = _mesa_pointer_hash_table_create(NULL);
+
+ /* If "origin" is non-NULL, then the function body is there. So we
+ * have to use the variable objects from the object with the body,
+ * but the parameter instanciation on the current object.
+ */
+ const exec_node *parameter_info = origin ? origin->parameters.get_head_raw() : parameters.get_head_raw();
+
+ foreach_in_list(ir_rvalue, n, actual_parameters) {
+ ir_constant *constant =
+ n->constant_expression_value(mem_ctx, variable_context);
+ if (constant == NULL) {
+ _mesa_hash_table_destroy(deref_hash, NULL);
+ return NULL;
+ }
+
+
+ ir_variable *var = (ir_variable *)parameter_info;
+ _mesa_hash_table_insert(deref_hash, var, constant);
+
+ parameter_info = parameter_info->next;
+ }
+
+ ir_constant *result = NULL;
+
+ /* Now run the builtin function until something non-constant
+ * happens or we get the result.
+ */
+ if (constant_expression_evaluate_expression_list(mem_ctx, origin ? origin->body : body, deref_hash, &result) &&
+ result)
+ result = result->clone(mem_ctx, NULL);
+
+ _mesa_hash_table_destroy(deref_hash, NULL);
+
+ return result;
+}
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/ir_equals.cpp b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/ir_equals.cpp
new file mode 100644
index 0000000000..f7359e2390
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/ir_equals.cpp
@@ -0,0 +1,211 @@
+/*
+ * Copyright © 2013 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include "ir.h"
+
+/**
+ * Helper for checking equality when one instruction might be NULL, since you
+ * can't access a's vtable in that case.
+ */
+static bool
+possibly_null_equals(const ir_instruction *a, const ir_instruction *b,
+ enum ir_node_type ignore)
+{
+ if (!a || !b)
+ return !a && !b;
+
+ return a->equals(b, ignore);
+}
+
+/**
+ * The base equality function: Return not equal for anything we don't know
+ * about.
+ */
+bool
+ir_instruction::equals(const ir_instruction *, enum ir_node_type) const
+{
+ return false;
+}
+
+bool
+ir_constant::equals(const ir_instruction *ir, enum ir_node_type) const
+{
+ const ir_constant *other = ir->as_constant();
+ if (!other)
+ return false;
+
+ if (type != other->type)
+ return false;
+
+ for (unsigned i = 0; i < type->components(); i++) {
+ if (type->is_double()) {
+ if (value.d[i] != other->value.d[i])
+ return false;
+ } else {
+ if (value.u[i] != other->value.u[i])
+ return false;
+ }
+ }
+
+ return true;
+}
+
+bool
+ir_dereference_variable::equals(const ir_instruction *ir,
+ enum ir_node_type) const
+{
+ const ir_dereference_variable *other = ir->as_dereference_variable();
+ if (!other)
+ return false;
+
+ return var == other->var;
+}
+
+bool
+ir_dereference_array::equals(const ir_instruction *ir,
+ enum ir_node_type ignore) const
+{
+ const ir_dereference_array *other = ir->as_dereference_array();
+ if (!other)
+ return false;
+
+ if (type != other->type)
+ return false;
+
+ if (!array->equals(other->array, ignore))
+ return false;
+
+ if (!array_index->equals(other->array_index, ignore))
+ return false;
+
+ return true;
+}
+
+bool
+ir_swizzle::equals(const ir_instruction *ir,
+ enum ir_node_type ignore) const
+{
+ const ir_swizzle *other = ir->as_swizzle();
+ if (!other)
+ return false;
+
+ if (type != other->type)
+ return false;
+
+ if (ignore != ir_type_swizzle) {
+ if (mask.x != other->mask.x ||
+ mask.y != other->mask.y ||
+ mask.z != other->mask.z ||
+ mask.w != other->mask.w) {
+ return false;
+ }
+ }
+
+ return val->equals(other->val, ignore);
+}
+
+bool
+ir_texture::equals(const ir_instruction *ir, enum ir_node_type ignore) const
+{
+ const ir_texture *other = ir->as_texture();
+ if (!other)
+ return false;
+
+ if (type != other->type)
+ return false;
+
+ if (op != other->op)
+ return false;
+
+ if (!possibly_null_equals(coordinate, other->coordinate, ignore))
+ return false;
+
+ if (!possibly_null_equals(projector, other->projector, ignore))
+ return false;
+
+ if (!possibly_null_equals(shadow_comparator, other->shadow_comparator, ignore))
+ return false;
+
+ if (!possibly_null_equals(offset, other->offset, ignore))
+ return false;
+
+ if (!sampler->equals(other->sampler, ignore))
+ return false;
+
+ switch (op) {
+ case ir_tex:
+ case ir_lod:
+ case ir_query_levels:
+ case ir_texture_samples:
+ case ir_samples_identical:
+ break;
+ case ir_txb:
+ if (!lod_info.bias->equals(other->lod_info.bias, ignore))
+ return false;
+ break;
+ case ir_txl:
+ case ir_txf:
+ case ir_txs:
+ if (!lod_info.lod->equals(other->lod_info.lod, ignore))
+ return false;
+ break;
+ case ir_txd:
+ if (!lod_info.grad.dPdx->equals(other->lod_info.grad.dPdx, ignore) ||
+ !lod_info.grad.dPdy->equals(other->lod_info.grad.dPdy, ignore))
+ return false;
+ break;
+ case ir_txf_ms:
+ if (!lod_info.sample_index->equals(other->lod_info.sample_index, ignore))
+ return false;
+ break;
+ case ir_tg4:
+ if (!lod_info.component->equals(other->lod_info.component, ignore))
+ return false;
+ break;
+ default:
+ assert(!"Unrecognized texture op");
+ }
+
+ return true;
+}
+
+bool
+ir_expression::equals(const ir_instruction *ir, enum ir_node_type ignore) const
+{
+ const ir_expression *other = ir->as_expression();
+ if (!other)
+ return false;
+
+ if (type != other->type)
+ return false;
+
+ if (operation != other->operation)
+ return false;
+
+ for (unsigned i = 0; i < num_operands; i++) {
+ if (!operands[i]->equals(other->operands[i], ignore))
+ return false;
+ }
+
+ return true;
+}
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/ir_expression_flattening.cpp b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/ir_expression_flattening.cpp
new file mode 100644
index 0000000000..e4ca850d2f
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/ir_expression_flattening.cpp
@@ -0,0 +1,84 @@
+/*
+ * Copyright © 2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+/**
+ * \file ir_expression_flattening.cpp
+ *
+ * Takes the leaves of expression trees and makes them dereferences of
+ * assignments of the leaves to temporaries, according to a predicate.
+ *
+ * This is used for breaking down matrix operations, where it's easier to
+ * create a temporary and work on each of its vector components individually.
+ */
+
+#include "ir.h"
+#include "ir_rvalue_visitor.h"
+#include "ir_expression_flattening.h"
+
+class ir_expression_flattening_visitor : public ir_rvalue_visitor {
+public:
+ ir_expression_flattening_visitor(bool (*predicate)(ir_instruction *ir))
+ {
+ this->predicate = predicate;
+ }
+
+ virtual ~ir_expression_flattening_visitor()
+ {
+ /* empty */
+ }
+
+ void handle_rvalue(ir_rvalue **rvalue);
+ bool (*predicate)(ir_instruction *ir);
+};
+
+void
+do_expression_flattening(exec_list *instructions,
+ bool (*predicate)(ir_instruction *ir))
+{
+ ir_expression_flattening_visitor v(predicate);
+
+ foreach_in_list(ir_instruction, ir, instructions) {
+ ir->accept(&v);
+ }
+}
+
+void
+ir_expression_flattening_visitor::handle_rvalue(ir_rvalue **rvalue)
+{
+ ir_variable *var;
+ ir_assignment *assign;
+ ir_rvalue *ir = *rvalue;
+
+ if (!ir || !this->predicate(ir))
+ return;
+
+ void *ctx = ralloc_parent(ir);
+
+ var = new(ctx) ir_variable(ir->type, "flattening_tmp", ir_var_temporary);
+ base_ir->insert_before(var);
+
+ assign = new(ctx) ir_assignment(new(ctx) ir_dereference_variable(var), ir);
+ base_ir->insert_before(assign);
+
+ *rvalue = new(ctx) ir_dereference_variable(var);
+}
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/ir_expression_flattening.h b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/ir_expression_flattening.h
new file mode 100644
index 0000000000..e75bdfd842
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/ir_expression_flattening.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright © 2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+
+/**
+ * \file ir_expression_flattening.h
+ *
+ * Takes the leaves of expression trees and makes them dereferences of
+ * assignments of the leaves to temporaries, according to a predicate.
+ *
+ * This is used for automatic function inlining, where we want to take
+ * an expression containing a call and move the call out to its own
+ * assignment so that we can inline it at the appropriate place in the
+ * instruction stream.
+ */
+
+#ifndef GLSL_IR_EXPRESSION_FLATTENING_H
+#define GLSL_IR_EXPRESSION_FLATTENING_H
+
+void do_expression_flattening(exec_list *instructions,
+ bool (*predicate)(ir_instruction *ir));
+
+#endif /* GLSL_IR_EXPRESSION_FLATTENING_H */
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/ir_expression_operation.h b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/ir_expression_operation.h
new file mode 100644
index 0000000000..112d782fb0
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/ir_expression_operation.h
@@ -0,0 +1,185 @@
+/*
+ * Copyright (C) 2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+enum ir_expression_operation {
+ ir_unop_bit_not,
+ ir_unop_logic_not,
+ ir_unop_neg,
+ ir_unop_abs,
+ ir_unop_sign,
+ ir_unop_rcp,
+ ir_unop_rsq,
+ ir_unop_sqrt,
+ ir_unop_exp,
+ ir_unop_log,
+ ir_unop_exp2,
+ ir_unop_log2,
+ ir_unop_f2i,
+ ir_unop_f2u,
+ ir_unop_i2f,
+ ir_unop_f2b,
+ ir_unop_b2f,
+ ir_unop_b2f16,
+ ir_unop_i2b,
+ ir_unop_b2i,
+ ir_unop_u2f,
+ ir_unop_i2u,
+ ir_unop_u2i,
+ ir_unop_d2f,
+ ir_unop_f2d,
+ ir_unop_f2f16,
+ ir_unop_f2fmp,
+ ir_unop_f162f,
+ ir_unop_d2i,
+ ir_unop_i2d,
+ ir_unop_d2u,
+ ir_unop_u2d,
+ ir_unop_d2b,
+ ir_unop_f162b,
+ ir_unop_bitcast_i2f,
+ ir_unop_bitcast_f2i,
+ ir_unop_bitcast_u2f,
+ ir_unop_bitcast_f2u,
+ ir_unop_bitcast_u642d,
+ ir_unop_bitcast_i642d,
+ ir_unop_bitcast_d2u64,
+ ir_unop_bitcast_d2i64,
+ ir_unop_i642i,
+ ir_unop_u642i,
+ ir_unop_i642u,
+ ir_unop_u642u,
+ ir_unop_i642b,
+ ir_unop_i642f,
+ ir_unop_u642f,
+ ir_unop_i642d,
+ ir_unop_u642d,
+ ir_unop_i2i64,
+ ir_unop_u2i64,
+ ir_unop_b2i64,
+ ir_unop_f2i64,
+ ir_unop_d2i64,
+ ir_unop_i2u64,
+ ir_unop_u2u64,
+ ir_unop_f2u64,
+ ir_unop_d2u64,
+ ir_unop_u642i64,
+ ir_unop_i642u64,
+ ir_unop_trunc,
+ ir_unop_ceil,
+ ir_unop_floor,
+ ir_unop_fract,
+ ir_unop_round_even,
+ ir_unop_sin,
+ ir_unop_cos,
+ ir_unop_atan,
+ ir_unop_dFdx,
+ ir_unop_dFdx_coarse,
+ ir_unop_dFdx_fine,
+ ir_unop_dFdy,
+ ir_unop_dFdy_coarse,
+ ir_unop_dFdy_fine,
+ ir_unop_pack_snorm_2x16,
+ ir_unop_pack_snorm_4x8,
+ ir_unop_pack_unorm_2x16,
+ ir_unop_pack_unorm_4x8,
+ ir_unop_pack_half_2x16,
+ ir_unop_unpack_snorm_2x16,
+ ir_unop_unpack_snorm_4x8,
+ ir_unop_unpack_unorm_2x16,
+ ir_unop_unpack_unorm_4x8,
+ ir_unop_unpack_half_2x16,
+ ir_unop_bitfield_reverse,
+ ir_unop_bit_count,
+ ir_unop_find_msb,
+ ir_unop_find_lsb,
+ ir_unop_clz,
+ ir_unop_saturate,
+ ir_unop_pack_double_2x32,
+ ir_unop_unpack_double_2x32,
+ ir_unop_pack_sampler_2x32,
+ ir_unop_pack_image_2x32,
+ ir_unop_unpack_sampler_2x32,
+ ir_unop_unpack_image_2x32,
+ ir_unop_frexp_sig,
+ ir_unop_frexp_exp,
+ ir_unop_subroutine_to_int,
+ ir_unop_interpolate_at_centroid,
+ ir_unop_get_buffer_size,
+ ir_unop_ssbo_unsized_array_length,
+ ir_unop_pack_int_2x32,
+ ir_unop_pack_uint_2x32,
+ ir_unop_unpack_int_2x32,
+ ir_unop_unpack_uint_2x32,
+ ir_binop_add,
+ ir_binop_sub,
+ ir_binop_add_sat,
+ ir_binop_sub_sat,
+ ir_binop_abs_sub,
+ ir_binop_avg,
+ ir_binop_avg_round,
+ ir_binop_mul,
+ ir_binop_mul_32x16,
+ ir_binop_imul_high,
+ ir_binop_div,
+ ir_binop_carry,
+ ir_binop_borrow,
+ ir_binop_mod,
+ ir_binop_less,
+ ir_binop_gequal,
+ ir_binop_equal,
+ ir_binop_nequal,
+ ir_binop_all_equal,
+ ir_binop_any_nequal,
+ ir_binop_lshift,
+ ir_binop_rshift,
+ ir_binop_bit_and,
+ ir_binop_bit_xor,
+ ir_binop_bit_or,
+ ir_binop_logic_and,
+ ir_binop_logic_xor,
+ ir_binop_logic_or,
+ ir_binop_dot,
+ ir_binop_min,
+ ir_binop_max,
+ ir_binop_pow,
+ ir_binop_ubo_load,
+ ir_binop_ldexp,
+ ir_binop_vector_extract,
+ ir_binop_interpolate_at_offset,
+ ir_binop_interpolate_at_sample,
+ ir_binop_atan2,
+ ir_triop_fma,
+ ir_triop_lrp,
+ ir_triop_csel,
+ ir_triop_bitfield_extract,
+ ir_triop_vector_insert,
+ ir_quadop_bitfield_insert,
+ ir_quadop_vector,
+
+ /* Sentinels marking the last of each kind of operation. */
+ ir_last_unop = ir_unop_unpack_uint_2x32,
+ ir_last_binop = ir_binop_atan2,
+ ir_last_triop = ir_triop_vector_insert,
+ ir_last_quadop = ir_quadop_vector,
+ ir_last_opcode = ir_quadop_vector
+};
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/ir_expression_operation.py b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/ir_expression_operation.py
new file mode 100644
index 0000000000..160626e6ef
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/ir_expression_operation.py
@@ -0,0 +1,818 @@
+#
+# Copyright (C) 2015 Intel Corporation
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice (including the next
+# paragraph) shall be included in all copies or substantial portions of the
+# Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+import mako.template
+import sys
+
+class type(object):
+ def __init__(self, c_type, union_field, glsl_type):
+ self.c_type = c_type
+ self.union_field = union_field
+ self.glsl_type = glsl_type
+
+
+class type_signature_iter(object):
+ """Basic iterator for a set of type signatures. Various kinds of sequences of
+ types come in, and an iteration of type_signature objects come out.
+
+ """
+
+ def __init__(self, source_types, num_operands):
+ """Initialize an iterator from a sequence of input types and a number
+ operands. This is for signatures where all the operands have the same
+ type and the result type of the operation is the same as the input type.
+
+ """
+ self.dest_type = None
+ self.source_types = source_types
+ self.num_operands = num_operands
+ self.i = 0
+
+ def __init__(self, dest_type, source_types, num_operands):
+ """Initialize an iterator from a result tpye, a sequence of input types and a
+ number operands. This is for signatures where all the operands have the
+ same type but the result type of the operation is different from the
+ input type.
+
+ """
+ self.dest_type = dest_type
+ self.source_types = source_types
+ self.num_operands = num_operands
+ self.i = 0
+
+ def __iter__(self):
+ return self
+
+ def __next__(self):
+ if self.i < len(self.source_types):
+ i = self.i
+ self.i += 1
+
+ if self.dest_type is None:
+ dest_type = self.source_types[i]
+ else:
+ dest_type = self.dest_type
+
+ return (dest_type, self.num_operands * (self.source_types[i],))
+ else:
+ raise StopIteration()
+
+ next = __next__
+
+
+uint_type = type("unsigned", "u", "GLSL_TYPE_UINT")
+int_type = type("int", "i", "GLSL_TYPE_INT")
+uint64_type = type("uint64_t", "u64", "GLSL_TYPE_UINT64")
+int64_type = type("int64_t", "i64", "GLSL_TYPE_INT64")
+float_type = type("float", "f", "GLSL_TYPE_FLOAT")
+double_type = type("double", "d", "GLSL_TYPE_DOUBLE")
+bool_type = type("bool", "b", "GLSL_TYPE_BOOL")
+
+all_types = (uint_type, int_type, float_type, double_type, uint64_type, int64_type, bool_type)
+numeric_types = (uint_type, int_type, float_type, double_type, uint64_type, int64_type)
+signed_numeric_types = (int_type, float_type, double_type, int64_type)
+integer_types = (uint_type, int_type, uint64_type, int64_type)
+real_types = (float_type, double_type)
+
+# This template is for operations that can have operands of a several
+# different types, and each type may or may not has a different C expression.
+# This is used by most operations.
+constant_template_common = mako.template.Template("""\
+ case ${op.get_enum_name()}:
+ for (unsigned c = 0; c < op[0]->type->components(); c++) {
+ switch (op[0]->type->base_type) {
+ % for dst_type, src_types in op.signatures():
+ case ${src_types[0].glsl_type}:
+ data.${dst_type.union_field}[c] = ${op.get_c_expression(src_types)};
+ break;
+ % endfor
+ default:
+ unreachable("invalid type");
+ }
+ }
+ break;""")
+
+# This template is for binary operations that can operate on some combination
+# of scalar and vector operands.
+constant_template_vector_scalar = mako.template.Template("""\
+ case ${op.get_enum_name()}:
+ % if "mixed" in op.flags:
+ % for i in range(op.num_operands):
+ assert(op[${i}]->type->base_type == ${op.source_types[0].glsl_type} ||
+ % for src_type in op.source_types[1:-1]:
+ op[${i}]->type->base_type == ${src_type.glsl_type} ||
+ % endfor
+ op[${i}]->type->base_type == ${op.source_types[-1].glsl_type});
+ % endfor
+ % else:
+ assert(op[0]->type == op[1]->type || op0_scalar || op1_scalar);
+ % endif
+ for (unsigned c = 0, c0 = 0, c1 = 0;
+ c < components;
+ c0 += c0_inc, c1 += c1_inc, c++) {
+
+ switch (op[0]->type->base_type) {
+ % for dst_type, src_types in op.signatures():
+ case ${src_types[0].glsl_type}:
+ data.${dst_type.union_field}[c] = ${op.get_c_expression(src_types, ("c0", "c1", "c2"))};
+ break;
+ % endfor
+ default:
+ unreachable("invalid type");
+ }
+ }
+ break;""")
+
+# This template is for multiplication. It is unique because it has to support
+# matrix * vector and matrix * matrix operations, and those are just different.
+constant_template_mul = mako.template.Template("""\
+ case ${op.get_enum_name()}:
+ /* Check for equal types, or unequal types involving scalars */
+ if ((op[0]->type == op[1]->type && !op[0]->type->is_matrix())
+ || op0_scalar || op1_scalar) {
+ for (unsigned c = 0, c0 = 0, c1 = 0;
+ c < components;
+ c0 += c0_inc, c1 += c1_inc, c++) {
+
+ switch (op[0]->type->base_type) {
+ % for dst_type, src_types in op.signatures():
+ case ${src_types[0].glsl_type}:
+ data.${dst_type.union_field}[c] = ${op.get_c_expression(src_types, ("c0", "c1", "c2"))};
+ break;
+ % endfor
+ default:
+ unreachable("invalid type");
+ }
+ }
+ } else {
+ assert(op[0]->type->is_matrix() || op[1]->type->is_matrix());
+
+ /* Multiply an N-by-M matrix with an M-by-P matrix. Since either
+ * matrix can be a GLSL vector, either N or P can be 1.
+ *
+ * For vec*mat, the vector is treated as a row vector. This
+ * means the vector is a 1-row x M-column matrix.
+ *
+ * For mat*vec, the vector is treated as a column vector. Since
+ * matrix_columns is 1 for vectors, this just works.
+ */
+ const unsigned n = op[0]->type->is_vector()
+ ? 1 : op[0]->type->vector_elements;
+ const unsigned m = op[1]->type->vector_elements;
+ const unsigned p = op[1]->type->matrix_columns;
+ for (unsigned j = 0; j < p; j++) {
+ for (unsigned i = 0; i < n; i++) {
+ for (unsigned k = 0; k < m; k++) {
+ if (op[0]->type->is_double())
+ data.d[i+n*j] += op[0]->value.d[i+n*k]*op[1]->value.d[k+m*j];
+ else
+ data.f[i+n*j] += op[0]->value.f[i+n*k]*op[1]->value.f[k+m*j];
+ }
+ }
+ }
+ }
+ break;""")
+
+# This template is for operations that are horizontal and either have only a
+# single type or the implementation for all types is identical. That is, the
+# operation consumes a vector and produces a scalar.
+constant_template_horizontal_single_implementation = mako.template.Template("""\
+ case ${op.get_enum_name()}:
+ data.${op.dest_type.union_field}[0] = ${op.c_expression['default']};
+ break;""")
+
+# This template is for operations that are horizontal and do not assign the
+# result. The various unpack operations are examples.
+constant_template_horizontal_nonassignment = mako.template.Template("""\
+ case ${op.get_enum_name()}:
+ ${op.c_expression['default']};
+ break;""")
+
+# This template is for binary operations that are horizontal. That is, the
+# operation consumes a vector and produces a scalar.
+constant_template_horizontal = mako.template.Template("""\
+ case ${op.get_enum_name()}:
+ switch (op[0]->type->base_type) {
+ % for dst_type, src_types in op.signatures():
+ case ${src_types[0].glsl_type}:
+ data.${dst_type.union_field}[0] = ${op.get_c_expression(src_types)};
+ break;
+ % endfor
+ default:
+ unreachable("invalid type");
+ }
+ break;""")
+
+# This template is for ir_binop_vector_extract.
+constant_template_vector_extract = mako.template.Template("""\
+ case ${op.get_enum_name()}: {
+ const int c = CLAMP(op[1]->value.i[0], 0,
+ (int) op[0]->type->vector_elements - 1);
+
+ switch (op[0]->type->base_type) {
+ % for dst_type, src_types in op.signatures():
+ case ${src_types[0].glsl_type}:
+ data.${dst_type.union_field}[0] = op[0]->value.${src_types[0].union_field}[c];
+ break;
+ % endfor
+ default:
+ unreachable("invalid type");
+ }
+ break;
+ }""")
+
+# This template is for ir_triop_vector_insert.
+constant_template_vector_insert = mako.template.Template("""\
+ case ${op.get_enum_name()}: {
+ const unsigned idx = op[2]->value.u[0];
+
+ memcpy(&data, &op[0]->value, sizeof(data));
+
+ switch (this->type->base_type) {
+ % for dst_type, src_types in op.signatures():
+ case ${src_types[0].glsl_type}:
+ data.${dst_type.union_field}[idx] = op[1]->value.${src_types[0].union_field}[0];
+ break;
+ % endfor
+ default:
+ unreachable("invalid type");
+ }
+ break;
+ }""")
+
+# This template is for ir_quadop_vector.
+constant_template_vector = mako.template.Template("""\
+ case ${op.get_enum_name()}:
+ for (unsigned c = 0; c < this->type->vector_elements; c++) {
+ switch (this->type->base_type) {
+ % for dst_type, src_types in op.signatures():
+ case ${src_types[0].glsl_type}:
+ data.${dst_type.union_field}[c] = op[c]->value.${src_types[0].union_field}[0];
+ break;
+ % endfor
+ default:
+ unreachable("invalid type");
+ }
+ }
+ break;""")
+
+# This template is for ir_triop_lrp.
+constant_template_lrp = mako.template.Template("""\
+ case ${op.get_enum_name()}: {
+ assert(op[0]->type->is_float() || op[0]->type->is_double());
+ assert(op[1]->type->is_float() || op[1]->type->is_double());
+ assert(op[2]->type->is_float() || op[2]->type->is_double());
+
+ unsigned c2_inc = op[2]->type->is_scalar() ? 0 : 1;
+ for (unsigned c = 0, c2 = 0; c < components; c2 += c2_inc, c++) {
+ switch (this->type->base_type) {
+ % for dst_type, src_types in op.signatures():
+ case ${src_types[0].glsl_type}:
+ data.${dst_type.union_field}[c] = ${op.get_c_expression(src_types, ("c", "c", "c2"))};
+ break;
+ % endfor
+ default:
+ unreachable("invalid type");
+ }
+ }
+ break;
+ }""")
+
+# This template is for ir_triop_csel. This expression is really unique
+# because not all of the operands are the same type, and the second operand
+# determines the type of the expression (instead of the first).
+constant_template_csel = mako.template.Template("""\
+ case ${op.get_enum_name()}:
+ for (unsigned c = 0; c < components; c++) {
+ switch (this->type->base_type) {
+ % for dst_type, src_types in op.signatures():
+ case ${src_types[1].glsl_type}:
+ data.${dst_type.union_field}[c] = ${op.get_c_expression(src_types)};
+ break;
+ % endfor
+ default:
+ unreachable("invalid type");
+ }
+ }
+ break;""")
+
+
+vector_scalar_operation = "vector-scalar"
+horizontal_operation = "horizontal"
+types_identical_operation = "identical"
+non_assign_operation = "nonassign"
+mixed_type_operation = "mixed"
+
+class operation(object):
+ def __init__(self, name, num_operands, printable_name = None, source_types = None, dest_type = None, c_expression = None, flags = None, all_signatures = None):
+ self.name = name
+ self.num_operands = num_operands
+
+ if printable_name is None:
+ self.printable_name = name
+ else:
+ self.printable_name = printable_name
+
+ self.all_signatures = all_signatures
+
+ if source_types is None:
+ self.source_types = tuple()
+ else:
+ self.source_types = source_types
+
+ self.dest_type = dest_type
+
+ if c_expression is None:
+ self.c_expression = None
+ elif isinstance(c_expression, str):
+ self.c_expression = {'default': c_expression}
+ else:
+ self.c_expression = c_expression
+
+ if flags is None:
+ self.flags = frozenset()
+ elif isinstance(flags, str):
+ self.flags = frozenset([flags])
+ else:
+ self.flags = frozenset(flags)
+
+
+ def get_enum_name(self):
+ return "ir_{0}op_{1}".format(("un", "bin", "tri", "quad")[self.num_operands-1], self.name)
+
+
+ def get_template(self):
+ if self.c_expression is None:
+ return None
+
+ if horizontal_operation in self.flags:
+ if non_assign_operation in self.flags:
+ return constant_template_horizontal_nonassignment.render(op=self)
+ elif types_identical_operation in self.flags:
+ return constant_template_horizontal_single_implementation.render(op=self)
+ else:
+ return constant_template_horizontal.render(op=self)
+
+ if self.num_operands == 2:
+ if self.name == "mul":
+ return constant_template_mul.render(op=self)
+ elif self.name == "vector_extract":
+ return constant_template_vector_extract.render(op=self)
+ elif vector_scalar_operation in self.flags:
+ return constant_template_vector_scalar.render(op=self)
+ elif self.num_operands == 3:
+ if self.name == "vector_insert":
+ return constant_template_vector_insert.render(op=self)
+ elif self.name == "lrp":
+ return constant_template_lrp.render(op=self)
+ elif self.name == "csel":
+ return constant_template_csel.render(op=self)
+ elif self.num_operands == 4:
+ if self.name == "vector":
+ return constant_template_vector.render(op=self)
+
+ return constant_template_common.render(op=self)
+
+
+ def get_c_expression(self, types, indices=("c", "c", "c")):
+ src0 = "op[0]->value.{0}[{1}]".format(types[0].union_field, indices[0])
+ src1 = "op[1]->value.{0}[{1}]".format(types[1].union_field, indices[1]) if len(types) >= 2 else "ERROR"
+ src2 = "op[2]->value.{0}[{1}]".format(types[2].union_field, indices[2]) if len(types) >= 3 else "ERROR"
+ src3 = "op[3]->value.{0}[c]".format(types[3].union_field) if len(types) >= 4 else "ERROR"
+
+ expr = self.c_expression[types[0].union_field] if types[0].union_field in self.c_expression else self.c_expression['default']
+
+ return expr.format(src0=src0,
+ src1=src1,
+ src2=src2,
+ src3=src3)
+
+
+ def signatures(self):
+ if self.all_signatures is not None:
+ return self.all_signatures
+ else:
+ return type_signature_iter(self.dest_type, self.source_types, self.num_operands)
+
+
+ir_expression_operation = [
+ operation("bit_not", 1, printable_name="~", source_types=integer_types, c_expression="~ {src0}"),
+ operation("logic_not", 1, printable_name="!", source_types=(bool_type,), c_expression="!{src0}"),
+ operation("neg", 1, source_types=numeric_types, c_expression={'u': "-((int) {src0})", 'u64': "-((int64_t) {src0})", 'default': "-{src0}"}),
+ operation("abs", 1, source_types=signed_numeric_types, c_expression={'i': "{src0} < 0 ? -{src0} : {src0}", 'f': "fabsf({src0})", 'd': "fabs({src0})", 'i64': "{src0} < 0 ? -{src0} : {src0}"}),
+ operation("sign", 1, source_types=signed_numeric_types, c_expression={'i': "({src0} > 0) - ({src0} < 0)", 'f': "float(({src0} > 0.0F) - ({src0} < 0.0F))", 'd': "double(({src0} > 0.0) - ({src0} < 0.0))", 'i64': "({src0} > 0) - ({src0} < 0)"}),
+ operation("rcp", 1, source_types=real_types, c_expression={'f': "1.0F / {src0}", 'd': "1.0 / {src0}"}),
+ operation("rsq", 1, source_types=real_types, c_expression={'f': "1.0F / sqrtf({src0})", 'd': "1.0 / sqrt({src0})"}),
+ operation("sqrt", 1, source_types=real_types, c_expression={'f': "sqrtf({src0})", 'd': "sqrt({src0})"}),
+ operation("exp", 1, source_types=(float_type,), c_expression="expf({src0})"), # Log base e on gentype
+ operation("log", 1, source_types=(float_type,), c_expression="logf({src0})"), # Natural log on gentype
+ operation("exp2", 1, source_types=(float_type,), c_expression="exp2f({src0})"),
+ operation("log2", 1, source_types=(float_type,), c_expression="log2f({src0})"),
+
+ # Float-to-integer conversion.
+ operation("f2i", 1, source_types=(float_type,), dest_type=int_type, c_expression="(int) {src0}"),
+ # Float-to-unsigned conversion.
+ operation("f2u", 1, source_types=(float_type,), dest_type=uint_type, c_expression="(unsigned) {src0}"),
+ # Integer-to-float conversion.
+ operation("i2f", 1, source_types=(int_type,), dest_type=float_type, c_expression="(float) {src0}"),
+ # Float-to-boolean conversion
+ operation("f2b", 1, source_types=(float_type,), dest_type=bool_type, c_expression="{src0} != 0.0F ? true : false"),
+ # Boolean-to-float conversion
+ operation("b2f", 1, source_types=(bool_type,), dest_type=float_type, c_expression="{src0} ? 1.0F : 0.0F"),
+ # Boolean-to-float16 conversion
+ operation("b2f16", 1, source_types=(bool_type,), dest_type=float_type, c_expression="{src0} ? 1.0F : 0.0F"),
+ # int-to-boolean conversion
+ operation("i2b", 1, source_types=(uint_type, int_type), dest_type=bool_type, c_expression="{src0} ? true : false"),
+ # Boolean-to-int conversion
+ operation("b2i", 1, source_types=(bool_type,), dest_type=int_type, c_expression="{src0} ? 1 : 0"),
+ # Unsigned-to-float conversion.
+ operation("u2f", 1, source_types=(uint_type,), dest_type=float_type, c_expression="(float) {src0}"),
+ # Integer-to-unsigned conversion.
+ operation("i2u", 1, source_types=(int_type,), dest_type=uint_type, c_expression="{src0}"),
+ # Unsigned-to-integer conversion.
+ operation("u2i", 1, source_types=(uint_type,), dest_type=int_type, c_expression="{src0}"),
+ # Double-to-float conversion.
+ operation("d2f", 1, source_types=(double_type,), dest_type=float_type, c_expression="{src0}"),
+ # Float-to-double conversion.
+ operation("f2d", 1, source_types=(float_type,), dest_type=double_type, c_expression="{src0}"),
+ # Half-float conversions. These all operate on and return float types,
+ # since the framework expands half to full float before calling in. We
+ # still have to handle them here so that we can constant propagate through
+ # them, but they are no-ops.
+ operation("f2f16", 1, source_types=(float_type,), dest_type=float_type, c_expression="{src0}"),
+ operation("f2fmp", 1, source_types=(float_type,), dest_type=float_type, c_expression="{src0}"),
+ operation("f162f", 1, source_types=(float_type,), dest_type=float_type, c_expression="{src0}"),
+ # Double-to-integer conversion.
+ operation("d2i", 1, source_types=(double_type,), dest_type=int_type, c_expression="{src0}"),
+ # Integer-to-double conversion.
+ operation("i2d", 1, source_types=(int_type,), dest_type=double_type, c_expression="{src0}"),
+ # Double-to-unsigned conversion.
+ operation("d2u", 1, source_types=(double_type,), dest_type=uint_type, c_expression="{src0}"),
+ # Unsigned-to-double conversion.
+ operation("u2d", 1, source_types=(uint_type,), dest_type=double_type, c_expression="{src0}"),
+ # Double-to-boolean conversion.
+ operation("d2b", 1, source_types=(double_type,), dest_type=bool_type, c_expression="{src0} != 0.0"),
+ # Float16-to-boolean conversion.
+ operation("f162b", 1, source_types=(float_type,), dest_type=bool_type, c_expression="{src0} != 0.0"),
+ # 'Bit-identical int-to-float "conversion"
+ operation("bitcast_i2f", 1, source_types=(int_type,), dest_type=float_type, c_expression="bitcast_u2f({src0})"),
+ # 'Bit-identical float-to-int "conversion"
+ operation("bitcast_f2i", 1, source_types=(float_type,), dest_type=int_type, c_expression="bitcast_f2u({src0})"),
+ # 'Bit-identical uint-to-float "conversion"
+ operation("bitcast_u2f", 1, source_types=(uint_type,), dest_type=float_type, c_expression="bitcast_u2f({src0})"),
+ # 'Bit-identical float-to-uint "conversion"
+ operation("bitcast_f2u", 1, source_types=(float_type,), dest_type=uint_type, c_expression="bitcast_f2u({src0})"),
+ # Bit-identical u64-to-double "conversion"
+ operation("bitcast_u642d", 1, source_types=(uint64_type,), dest_type=double_type, c_expression="bitcast_u642d({src0})"),
+ # Bit-identical i64-to-double "conversion"
+ operation("bitcast_i642d", 1, source_types=(int64_type,), dest_type=double_type, c_expression="bitcast_i642d({src0})"),
+ # Bit-identical double-to_u64 "conversion"
+ operation("bitcast_d2u64", 1, source_types=(double_type,), dest_type=uint64_type, c_expression="bitcast_d2u64({src0})"),
+ # Bit-identical double-to-i64 "conversion"
+ operation("bitcast_d2i64", 1, source_types=(double_type,), dest_type=int64_type, c_expression="bitcast_d2i64({src0})"),
+ # i64-to-i32 conversion
+ operation("i642i", 1, source_types=(int64_type,), dest_type=int_type, c_expression="{src0}"),
+ # ui64-to-i32 conversion
+ operation("u642i", 1, source_types=(uint64_type,), dest_type=int_type, c_expression="{src0}"),
+ operation("i642u", 1, source_types=(int64_type,), dest_type=uint_type, c_expression="{src0}"),
+ operation("u642u", 1, source_types=(uint64_type,), dest_type=uint_type, c_expression="{src0}"),
+ operation("i642b", 1, source_types=(int64_type,), dest_type=bool_type, c_expression="{src0} != 0"),
+ operation("i642f", 1, source_types=(int64_type,), dest_type=float_type, c_expression="{src0}"),
+ operation("u642f", 1, source_types=(uint64_type,), dest_type=float_type, c_expression="{src0}"),
+ operation("i642d", 1, source_types=(int64_type,), dest_type=double_type, c_expression="{src0}"),
+ operation("u642d", 1, source_types=(uint64_type,), dest_type=double_type, c_expression="{src0}"),
+ operation("i2i64", 1, source_types=(int_type,), dest_type=int64_type, c_expression="{src0}"),
+ operation("u2i64", 1, source_types=(uint_type,), dest_type=int64_type, c_expression="{src0}"),
+ operation("b2i64", 1, source_types=(bool_type,), dest_type=int64_type, c_expression="{src0}"),
+ operation("f2i64", 1, source_types=(float_type,), dest_type=int64_type, c_expression="{src0}"),
+ operation("d2i64", 1, source_types=(double_type,), dest_type=int64_type, c_expression="{src0}"),
+ operation("i2u64", 1, source_types=(int_type,), dest_type=uint64_type, c_expression="{src0}"),
+ operation("u2u64", 1, source_types=(uint_type,), dest_type=uint64_type, c_expression="{src0}"),
+ operation("f2u64", 1, source_types=(float_type,), dest_type=uint64_type, c_expression="{src0}"),
+ operation("d2u64", 1, source_types=(double_type,), dest_type=uint64_type, c_expression="{src0}"),
+ operation("u642i64", 1, source_types=(uint64_type,), dest_type=int64_type, c_expression="{src0}"),
+ operation("i642u64", 1, source_types=(int64_type,), dest_type=uint64_type, c_expression="{src0}"),
+
+
+ # Unary floating-point rounding operations.
+ operation("trunc", 1, source_types=real_types, c_expression={'f': "truncf({src0})", 'd': "trunc({src0})"}),
+ operation("ceil", 1, source_types=real_types, c_expression={'f': "ceilf({src0})", 'd': "ceil({src0})"}),
+ operation("floor", 1, source_types=real_types, c_expression={'f': "floorf({src0})", 'd': "floor({src0})"}),
+ operation("fract", 1, source_types=real_types, c_expression={'f': "{src0} - floorf({src0})", 'd': "{src0} - floor({src0})"}),
+ operation("round_even", 1, source_types=real_types, c_expression={'f': "_mesa_roundevenf({src0})", 'd': "_mesa_roundeven({src0})"}),
+
+ # Trigonometric operations.
+ operation("sin", 1, source_types=(float_type,), c_expression="sinf({src0})"),
+ operation("cos", 1, source_types=(float_type,), c_expression="cosf({src0})"),
+ operation("atan", 1, source_types=(float_type,), c_expression="atan({src0})"),
+
+ # Partial derivatives.
+ operation("dFdx", 1, source_types=(float_type,), c_expression="0.0f"),
+ operation("dFdx_coarse", 1, printable_name="dFdxCoarse", source_types=(float_type,), c_expression="0.0f"),
+ operation("dFdx_fine", 1, printable_name="dFdxFine", source_types=(float_type,), c_expression="0.0f"),
+ operation("dFdy", 1, source_types=(float_type,), c_expression="0.0f"),
+ operation("dFdy_coarse", 1, printable_name="dFdyCoarse", source_types=(float_type,), c_expression="0.0f"),
+ operation("dFdy_fine", 1, printable_name="dFdyFine", source_types=(float_type,), c_expression="0.0f"),
+
+ # Floating point pack and unpack operations.
+ operation("pack_snorm_2x16", 1, printable_name="packSnorm2x16", source_types=(float_type,), dest_type=uint_type, c_expression="pack_2x16(pack_snorm_1x16, op[0]->value.f[0], op[0]->value.f[1])", flags=horizontal_operation),
+ operation("pack_snorm_4x8", 1, printable_name="packSnorm4x8", source_types=(float_type,), dest_type=uint_type, c_expression="pack_4x8(pack_snorm_1x8, op[0]->value.f[0], op[0]->value.f[1], op[0]->value.f[2], op[0]->value.f[3])", flags=horizontal_operation),
+ operation("pack_unorm_2x16", 1, printable_name="packUnorm2x16", source_types=(float_type,), dest_type=uint_type, c_expression="pack_2x16(pack_unorm_1x16, op[0]->value.f[0], op[0]->value.f[1])", flags=horizontal_operation),
+ operation("pack_unorm_4x8", 1, printable_name="packUnorm4x8", source_types=(float_type,), dest_type=uint_type, c_expression="pack_4x8(pack_unorm_1x8, op[0]->value.f[0], op[0]->value.f[1], op[0]->value.f[2], op[0]->value.f[3])", flags=horizontal_operation),
+ operation("pack_half_2x16", 1, printable_name="packHalf2x16", source_types=(float_type,), dest_type=uint_type, c_expression="pack_2x16(pack_half_1x16, op[0]->value.f[0], op[0]->value.f[1])", flags=horizontal_operation),
+ operation("unpack_snorm_2x16", 1, printable_name="unpackSnorm2x16", source_types=(uint_type,), dest_type=float_type, c_expression="unpack_2x16(unpack_snorm_1x16, op[0]->value.u[0], &data.f[0], &data.f[1])", flags=frozenset((horizontal_operation, non_assign_operation))),
+ operation("unpack_snorm_4x8", 1, printable_name="unpackSnorm4x8", source_types=(uint_type,), dest_type=float_type, c_expression="unpack_4x8(unpack_snorm_1x8, op[0]->value.u[0], &data.f[0], &data.f[1], &data.f[2], &data.f[3])", flags=frozenset((horizontal_operation, non_assign_operation))),
+ operation("unpack_unorm_2x16", 1, printable_name="unpackUnorm2x16", source_types=(uint_type,), dest_type=float_type, c_expression="unpack_2x16(unpack_unorm_1x16, op[0]->value.u[0], &data.f[0], &data.f[1])", flags=frozenset((horizontal_operation, non_assign_operation))),
+ operation("unpack_unorm_4x8", 1, printable_name="unpackUnorm4x8", source_types=(uint_type,), dest_type=float_type, c_expression="unpack_4x8(unpack_unorm_1x8, op[0]->value.u[0], &data.f[0], &data.f[1], &data.f[2], &data.f[3])", flags=frozenset((horizontal_operation, non_assign_operation))),
+ operation("unpack_half_2x16", 1, printable_name="unpackHalf2x16", source_types=(uint_type,), dest_type=float_type, c_expression="unpack_2x16(unpack_half_1x16, op[0]->value.u[0], &data.f[0], &data.f[1])", flags=frozenset((horizontal_operation, non_assign_operation))),
+
+ # Bit operations, part of ARB_gpu_shader5.
+ operation("bitfield_reverse", 1, source_types=(uint_type, int_type), c_expression="bitfield_reverse({src0})"),
+ operation("bit_count", 1, source_types=(uint_type, int_type), dest_type=int_type, c_expression="util_bitcount({src0})"),
+ operation("find_msb", 1, source_types=(uint_type, int_type), dest_type=int_type, c_expression={'u': "find_msb_uint({src0})", 'i': "find_msb_int({src0})"}),
+ operation("find_lsb", 1, source_types=(uint_type, int_type), dest_type=int_type, c_expression="find_msb_uint({src0} & -{src0})"),
+ operation("clz", 1, source_types=(uint_type,), dest_type=uint_type, c_expression="(unsigned)(31 - find_msb_uint({src0}))"),
+
+ operation("saturate", 1, printable_name="sat", source_types=(float_type,), c_expression="CLAMP({src0}, 0.0f, 1.0f)"),
+
+ # Double packing, part of ARB_gpu_shader_fp64.
+ operation("pack_double_2x32", 1, printable_name="packDouble2x32", source_types=(uint_type,), dest_type=double_type, c_expression="data.u64[0] = pack_2x32(op[0]->value.u[0], op[0]->value.u[1])", flags=frozenset((horizontal_operation, non_assign_operation))),
+ operation("unpack_double_2x32", 1, printable_name="unpackDouble2x32", source_types=(double_type,), dest_type=uint_type, c_expression="unpack_2x32(op[0]->value.u64[0], &data.u[0], &data.u[1])", flags=frozenset((horizontal_operation, non_assign_operation))),
+
+ # Sampler/Image packing, part of ARB_bindless_texture.
+ operation("pack_sampler_2x32", 1, printable_name="packSampler2x32", source_types=(uint_type,), dest_type=uint64_type, c_expression="data.u64[0] = pack_2x32(op[0]->value.u[0], op[0]->value.u[1])", flags=frozenset((horizontal_operation, non_assign_operation))),
+ operation("pack_image_2x32", 1, printable_name="packImage2x32", source_types=(uint_type,), dest_type=uint64_type, c_expression="data.u64[0] = pack_2x32(op[0]->value.u[0], op[0]->value.u[1])", flags=frozenset((horizontal_operation, non_assign_operation))),
+ operation("unpack_sampler_2x32", 1, printable_name="unpackSampler2x32", source_types=(uint64_type,), dest_type=uint_type, c_expression="unpack_2x32(op[0]->value.u64[0], &data.u[0], &data.u[1])", flags=frozenset((horizontal_operation, non_assign_operation))),
+ operation("unpack_image_2x32", 1, printable_name="unpackImage2x32", source_types=(uint64_type,), dest_type=uint_type, c_expression="unpack_2x32(op[0]->value.u64[0], &data.u[0], &data.u[1])", flags=frozenset((horizontal_operation, non_assign_operation))),
+
+ operation("frexp_sig", 1),
+ operation("frexp_exp", 1),
+
+ operation("subroutine_to_int", 1),
+
+ # Interpolate fs input at centroid
+ #
+ # operand0 is the fs input.
+ operation("interpolate_at_centroid", 1),
+
+ # Ask the driver for the total size of a buffer block.
+ # operand0 is the ir_constant buffer block index in the linked shader.
+ operation("get_buffer_size", 1),
+
+ # Calculate length of an unsized array inside a buffer block.
+ # This opcode is going to be replaced in a lowering pass inside
+ # the linker.
+ #
+ # operand0 is the unsized array's ir_value for the calculation
+ # of its length.
+ operation("ssbo_unsized_array_length", 1),
+
+ # 64-bit integer packing ops.
+ operation("pack_int_2x32", 1, printable_name="packInt2x32", source_types=(int_type,), dest_type=int64_type, c_expression="data.u64[0] = pack_2x32(op[0]->value.u[0], op[0]->value.u[1])", flags=frozenset((horizontal_operation, non_assign_operation))),
+ operation("pack_uint_2x32", 1, printable_name="packUint2x32", source_types=(uint_type,), dest_type=uint64_type, c_expression="data.u64[0] = pack_2x32(op[0]->value.u[0], op[0]->value.u[1])", flags=frozenset((horizontal_operation, non_assign_operation))),
+ operation("unpack_int_2x32", 1, printable_name="unpackInt2x32", source_types=(int64_type,), dest_type=int_type, c_expression="unpack_2x32(op[0]->value.u64[0], &data.u[0], &data.u[1])", flags=frozenset((horizontal_operation, non_assign_operation))),
+ operation("unpack_uint_2x32", 1, printable_name="unpackUint2x32", source_types=(uint64_type,), dest_type=uint_type, c_expression="unpack_2x32(op[0]->value.u64[0], &data.u[0], &data.u[1])", flags=frozenset((horizontal_operation, non_assign_operation))),
+
+ operation("add", 2, printable_name="+", source_types=numeric_types, c_expression="{src0} + {src1}", flags=vector_scalar_operation),
+ operation("sub", 2, printable_name="-", source_types=numeric_types, c_expression="{src0} - {src1}", flags=vector_scalar_operation),
+ operation("add_sat", 2, printable_name="add_sat", source_types=integer_types, c_expression={
+ 'u': "({src0} + {src1}) < {src0} ? UINT32_MAX : ({src0} + {src1})",
+ 'i': "iadd_saturate({src0}, {src1})",
+ 'u64': "({src0} + {src1}) < {src0} ? UINT64_MAX : ({src0} + {src1})",
+ 'i64': "iadd64_saturate({src0}, {src1})"
+ }),
+ operation("sub_sat", 2, printable_name="sub_sat", source_types=integer_types, c_expression={
+ 'u': "({src1} > {src0}) ? 0 : {src0} - {src1}",
+ 'i': "isub_saturate({src0}, {src1})",
+ 'u64': "({src1} > {src0}) ? 0 : {src0} - {src1}",
+ 'i64': "isub64_saturate({src0}, {src1})"
+ }),
+ operation("abs_sub", 2, printable_name="abs_sub", source_types=integer_types, c_expression={
+ 'u': "({src1} > {src0}) ? {src1} - {src0} : {src0} - {src1}",
+ 'i': "({src1} > {src0}) ? (unsigned){src1} - (unsigned){src0} : (unsigned){src0} - (unsigned){src1}",
+ 'u64': "({src1} > {src0}) ? {src1} - {src0} : {src0} - {src1}",
+ 'i64': "({src1} > {src0}) ? (uint64_t){src1} - (uint64_t){src0} : (uint64_t){src0} - (uint64_t){src1}",
+ }),
+ operation("avg", 2, printable_name="average", source_types=integer_types, c_expression="({src0} >> 1) + ({src1} >> 1) + (({src0} & {src1}) & 1)"),
+ operation("avg_round", 2, printable_name="average_rounded", source_types=integer_types, c_expression="({src0} >> 1) + ({src1} >> 1) + (({src0} | {src1}) & 1)"),
+
+ # "Floating-point or low 32-bit integer multiply."
+ operation("mul", 2, printable_name="*", source_types=numeric_types, c_expression="{src0} * {src1}"),
+ operation("mul_32x16", 2, printable_name="*", source_types=(uint_type, int_type), c_expression={
+ 'u': "{src0} * (uint16_t){src1}",
+ 'i': "{src0} * (int16_t){src0}"
+ }),
+ operation("imul_high", 2), # Calculates the high 32-bits of a 64-bit multiply.
+ operation("div", 2, printable_name="/", source_types=numeric_types, c_expression={'u': "{src1} == 0 ? 0 : {src0} / {src1}", 'i': "{src1} == 0 ? 0 : {src0} / {src1}", 'u64': "{src1} == 0 ? 0 : {src0} / {src1}", 'i64': "{src1} == 0 ? 0 : {src0} / {src1}", 'default': "{src0} / {src1}"}, flags=vector_scalar_operation),
+
+ # Returns the carry resulting from the addition of the two arguments.
+ operation("carry", 2),
+
+ # Returns the borrow resulting from the subtraction of the second argument
+ # from the first argument.
+ operation("borrow", 2),
+
+ # Either (vector % vector) or (vector % scalar)
+ #
+ # We don't use fmod because it rounds toward zero; GLSL specifies the use
+ # of floor.
+ operation("mod", 2, printable_name="%", source_types=numeric_types, c_expression={'u': "{src1} == 0 ? 0 : {src0} % {src1}", 'i': "{src1} == 0 ? 0 : {src0} % {src1}", 'f': "{src0} - {src1} * floorf({src0} / {src1})", 'd': "{src0} - {src1} * floor({src0} / {src1})", 'u64': "{src1} == 0 ? 0 : {src0} % {src1}", 'i64': "{src1} == 0 ? 0 : {src0} % {src1}"}, flags=vector_scalar_operation),
+
+ # Binary comparison operators which return a boolean vector.
+ # The type of both operands must be equal.
+ operation("less", 2, printable_name="<", source_types=numeric_types, dest_type=bool_type, c_expression="{src0} < {src1}"),
+ operation("gequal", 2, printable_name=">=", source_types=numeric_types, dest_type=bool_type, c_expression="{src0} >= {src1}"),
+ operation("equal", 2, printable_name="==", source_types=all_types, dest_type=bool_type, c_expression="{src0} == {src1}"),
+ operation("nequal", 2, printable_name="!=", source_types=all_types, dest_type=bool_type, c_expression="{src0} != {src1}"),
+
+ # Returns single boolean for whether all components of operands[0]
+ # equal the components of operands[1].
+ operation("all_equal", 2, source_types=all_types, dest_type=bool_type, c_expression="op[0]->has_value(op[1])", flags=frozenset((horizontal_operation, types_identical_operation))),
+
+ # Returns single boolean for whether any component of operands[0]
+ # is not equal to the corresponding component of operands[1].
+ operation("any_nequal", 2, source_types=all_types, dest_type=bool_type, c_expression="!op[0]->has_value(op[1])", flags=frozenset((horizontal_operation, types_identical_operation))),
+
+ # Bit-wise binary operations.
+ operation("lshift", 2, printable_name="<<", source_types=integer_types, c_expression="{src0} << {src1}", flags=frozenset((vector_scalar_operation, mixed_type_operation))),
+ operation("rshift", 2, printable_name=">>", source_types=integer_types, c_expression="{src0} >> {src1}", flags=frozenset((vector_scalar_operation, mixed_type_operation))),
+ operation("bit_and", 2, printable_name="&", source_types=integer_types, c_expression="{src0} & {src1}", flags=vector_scalar_operation),
+ operation("bit_xor", 2, printable_name="^", source_types=integer_types, c_expression="{src0} ^ {src1}", flags=vector_scalar_operation),
+ operation("bit_or", 2, printable_name="|", source_types=integer_types, c_expression="{src0} | {src1}", flags=vector_scalar_operation),
+
+ operation("logic_and", 2, printable_name="&&", source_types=(bool_type,), c_expression="{src0} && {src1}"),
+ operation("logic_xor", 2, printable_name="^^", source_types=(bool_type,), c_expression="{src0} != {src1}"),
+ operation("logic_or", 2, printable_name="||", source_types=(bool_type,), c_expression="{src0} || {src1}"),
+
+ operation("dot", 2, source_types=real_types, c_expression={'f': "dot_f(op[0], op[1])", 'd': "dot_d(op[0], op[1])"}, flags=horizontal_operation),
+ operation("min", 2, source_types=numeric_types, c_expression="MIN2({src0}, {src1})", flags=vector_scalar_operation),
+ operation("max", 2, source_types=numeric_types, c_expression="MAX2({src0}, {src1})", flags=vector_scalar_operation),
+
+ operation("pow", 2, source_types=(float_type,), c_expression="powf({src0}, {src1})"),
+
+ # Load a value the size of a given GLSL type from a uniform block.
+ #
+ # operand0 is the ir_constant uniform block index in the linked shader.
+ # operand1 is a byte offset within the uniform block.
+ operation("ubo_load", 2),
+
+ # Multiplies a number by two to a power, part of ARB_gpu_shader5.
+ operation("ldexp", 2,
+ all_signatures=((float_type, (float_type, int_type)),
+ (double_type, (double_type, int_type))),
+ c_expression={'f': "ldexpf_flush_subnormal({src0}, {src1})",
+ 'd': "ldexp_flush_subnormal({src0}, {src1})"}),
+
+ # Extract a scalar from a vector
+ #
+ # operand0 is the vector
+ # operand1 is the index of the field to read from operand0
+ operation("vector_extract", 2, source_types=all_types, c_expression="anything-except-None"),
+
+ # Interpolate fs input at offset
+ #
+ # operand0 is the fs input
+ # operand1 is the offset from the pixel center
+ operation("interpolate_at_offset", 2),
+
+ # Interpolate fs input at sample position
+ #
+ # operand0 is the fs input
+ # operand1 is the sample ID
+ operation("interpolate_at_sample", 2),
+
+ operation("atan2", 2, source_types=(float_type,), c_expression="atan2({src0}, {src1})"),
+
+ # Fused floating-point multiply-add, part of ARB_gpu_shader5.
+ operation("fma", 3, source_types=real_types, c_expression="{src0} * {src1} + {src2}"),
+
+ operation("lrp", 3, source_types=real_types, c_expression={'f': "{src0} * (1.0f - {src2}) + ({src1} * {src2})", 'd': "{src0} * (1.0 - {src2}) + ({src1} * {src2})"}),
+
+ # Conditional Select
+ #
+ # A vector conditional select instruction (like ?:, but operating per-
+ # component on vectors).
+ #
+ # See also lower_instructions_visitor::ldexp_to_arith
+ operation("csel", 3,
+ all_signatures=zip(all_types, zip(len(all_types) * (bool_type,), all_types, all_types)),
+ c_expression="{src0} ? {src1} : {src2}"),
+
+ operation("bitfield_extract", 3,
+ all_signatures=((int_type, (uint_type, int_type, int_type)),
+ (int_type, (int_type, int_type, int_type))),
+ c_expression={'u': "bitfield_extract_uint({src0}, {src1}, {src2})",
+ 'i': "bitfield_extract_int({src0}, {src1}, {src2})"}),
+
+ # Generate a value with one field of a vector changed
+ #
+ # operand0 is the vector
+ # operand1 is the value to write into the vector result
+ # operand2 is the index in operand0 to be modified
+ operation("vector_insert", 3, source_types=all_types, c_expression="anything-except-None"),
+
+ operation("bitfield_insert", 4,
+ all_signatures=((uint_type, (uint_type, uint_type, int_type, int_type)),
+ (int_type, (int_type, int_type, int_type, int_type))),
+ c_expression="bitfield_insert({src0}, {src1}, {src2}, {src3})"),
+
+ operation("vector", 4, source_types=all_types, c_expression="anything-except-None"),
+]
+
+
+if __name__ == "__main__":
+ copyright = """/*
+ * Copyright (C) 2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+"""
+ enum_template = mako.template.Template(copyright + """
+enum ir_expression_operation {
+% for item in values:
+ ${item.get_enum_name()},
+% endfor
+
+ /* Sentinels marking the last of each kind of operation. */
+% for item in lasts:
+ ir_last_${("un", "bin", "tri", "quad")[item.num_operands - 1]}op = ${item.get_enum_name()},
+% endfor
+ ir_last_opcode = ir_quadop_${lasts[3].name}
+};""")
+
+ strings_template = mako.template.Template(copyright + """
+const char *const ir_expression_operation_strings[] = {
+% for item in values:
+ "${item.printable_name}",
+% endfor
+};
+
+const char *const ir_expression_operation_enum_strings[] = {
+% for item in values:
+ "${item.name}",
+% endfor
+};""")
+
+ constant_template = mako.template.Template("""\
+ switch (this->operation) {
+% for op in values:
+ % if op.c_expression is not None:
+${op.get_template()}
+
+ % endif
+% endfor
+ default:
+ /* FINISHME: Should handle all expression types. */
+ return NULL;
+ }
+""")
+
+ if sys.argv[1] == "enum":
+ lasts = [None, None, None, None]
+ for item in reversed(ir_expression_operation):
+ i = item.num_operands - 1
+ if lasts[i] is None:
+ lasts[i] = item
+
+ print(enum_template.render(values=ir_expression_operation,
+ lasts=lasts))
+ elif sys.argv[1] == "strings":
+ print(strings_template.render(values=ir_expression_operation))
+ elif sys.argv[1] == "constant":
+ print(constant_template.render(values=ir_expression_operation))
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/ir_expression_operation_constant.h b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/ir_expression_operation_constant.h
new file mode 100644
index 0000000000..ef5c4e7927
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/ir_expression_operation_constant.h
@@ -0,0 +1,2087 @@
+ switch (this->operation) {
+ case ir_unop_bit_not:
+ for (unsigned c = 0; c < op[0]->type->components(); c++) {
+ switch (op[0]->type->base_type) {
+ case GLSL_TYPE_UINT:
+ data.u[c] = ~ op[0]->value.u[c];
+ break;
+ case GLSL_TYPE_INT:
+ data.i[c] = ~ op[0]->value.i[c];
+ break;
+ case GLSL_TYPE_UINT64:
+ data.u64[c] = ~ op[0]->value.u64[c];
+ break;
+ case GLSL_TYPE_INT64:
+ data.i64[c] = ~ op[0]->value.i64[c];
+ break;
+ default:
+ unreachable("invalid type");
+ }
+ }
+ break;
+
+ case ir_unop_logic_not:
+ for (unsigned c = 0; c < op[0]->type->components(); c++) {
+ switch (op[0]->type->base_type) {
+ case GLSL_TYPE_BOOL:
+ data.b[c] = !op[0]->value.b[c];
+ break;
+ default:
+ unreachable("invalid type");
+ }
+ }
+ break;
+
+ case ir_unop_neg:
+ for (unsigned c = 0; c < op[0]->type->components(); c++) {
+ switch (op[0]->type->base_type) {
+ case GLSL_TYPE_UINT:
+ data.u[c] = -((int) op[0]->value.u[c]);
+ break;
+ case GLSL_TYPE_INT:
+ data.i[c] = -op[0]->value.i[c];
+ break;
+ case GLSL_TYPE_FLOAT:
+ data.f[c] = -op[0]->value.f[c];
+ break;
+ case GLSL_TYPE_DOUBLE:
+ data.d[c] = -op[0]->value.d[c];
+ break;
+ case GLSL_TYPE_UINT64:
+ data.u64[c] = -((int64_t) op[0]->value.u64[c]);
+ break;
+ case GLSL_TYPE_INT64:
+ data.i64[c] = -op[0]->value.i64[c];
+ break;
+ default:
+ unreachable("invalid type");
+ }
+ }
+ break;
+
+ case ir_unop_abs:
+ for (unsigned c = 0; c < op[0]->type->components(); c++) {
+ switch (op[0]->type->base_type) {
+ case GLSL_TYPE_INT:
+ data.i[c] = op[0]->value.i[c] < 0 ? -op[0]->value.i[c] : op[0]->value.i[c];
+ break;
+ case GLSL_TYPE_FLOAT:
+ data.f[c] = fabsf(op[0]->value.f[c]);
+ break;
+ case GLSL_TYPE_DOUBLE:
+ data.d[c] = fabs(op[0]->value.d[c]);
+ break;
+ case GLSL_TYPE_INT64:
+ data.i64[c] = op[0]->value.i64[c] < 0 ? -op[0]->value.i64[c] : op[0]->value.i64[c];
+ break;
+ default:
+ unreachable("invalid type");
+ }
+ }
+ break;
+
+ case ir_unop_sign:
+ for (unsigned c = 0; c < op[0]->type->components(); c++) {
+ switch (op[0]->type->base_type) {
+ case GLSL_TYPE_INT:
+ data.i[c] = (op[0]->value.i[c] > 0) - (op[0]->value.i[c] < 0);
+ break;
+ case GLSL_TYPE_FLOAT:
+ data.f[c] = float((op[0]->value.f[c] > 0.0F) - (op[0]->value.f[c] < 0.0F));
+ break;
+ case GLSL_TYPE_DOUBLE:
+ data.d[c] = double((op[0]->value.d[c] > 0.0) - (op[0]->value.d[c] < 0.0));
+ break;
+ case GLSL_TYPE_INT64:
+ data.i64[c] = (op[0]->value.i64[c] > 0) - (op[0]->value.i64[c] < 0);
+ break;
+ default:
+ unreachable("invalid type");
+ }
+ }
+ break;
+
+ case ir_unop_rcp:
+ for (unsigned c = 0; c < op[0]->type->components(); c++) {
+ switch (op[0]->type->base_type) {
+ case GLSL_TYPE_FLOAT:
+ data.f[c] = 1.0F / op[0]->value.f[c];
+ break;
+ case GLSL_TYPE_DOUBLE:
+ data.d[c] = 1.0 / op[0]->value.d[c];
+ break;
+ default:
+ unreachable("invalid type");
+ }
+ }
+ break;
+
+ case ir_unop_rsq:
+ for (unsigned c = 0; c < op[0]->type->components(); c++) {
+ switch (op[0]->type->base_type) {
+ case GLSL_TYPE_FLOAT:
+ data.f[c] = 1.0F / sqrtf(op[0]->value.f[c]);
+ break;
+ case GLSL_TYPE_DOUBLE:
+ data.d[c] = 1.0 / sqrt(op[0]->value.d[c]);
+ break;
+ default:
+ unreachable("invalid type");
+ }
+ }
+ break;
+
+ case ir_unop_sqrt:
+ for (unsigned c = 0; c < op[0]->type->components(); c++) {
+ switch (op[0]->type->base_type) {
+ case GLSL_TYPE_FLOAT:
+ data.f[c] = sqrtf(op[0]->value.f[c]);
+ break;
+ case GLSL_TYPE_DOUBLE:
+ data.d[c] = sqrt(op[0]->value.d[c]);
+ break;
+ default:
+ unreachable("invalid type");
+ }
+ }
+ break;
+
+ case ir_unop_exp:
+ for (unsigned c = 0; c < op[0]->type->components(); c++) {
+ switch (op[0]->type->base_type) {
+ case GLSL_TYPE_FLOAT:
+ data.f[c] = expf(op[0]->value.f[c]);
+ break;
+ default:
+ unreachable("invalid type");
+ }
+ }
+ break;
+
+ case ir_unop_log:
+ for (unsigned c = 0; c < op[0]->type->components(); c++) {
+ switch (op[0]->type->base_type) {
+ case GLSL_TYPE_FLOAT:
+ data.f[c] = logf(op[0]->value.f[c]);
+ break;
+ default:
+ unreachable("invalid type");
+ }
+ }
+ break;
+
+ case ir_unop_exp2:
+ for (unsigned c = 0; c < op[0]->type->components(); c++) {
+ switch (op[0]->type->base_type) {
+ case GLSL_TYPE_FLOAT:
+ data.f[c] = exp2f(op[0]->value.f[c]);
+ break;
+ default:
+ unreachable("invalid type");
+ }
+ }
+ break;
+
+ case ir_unop_log2:
+ for (unsigned c = 0; c < op[0]->type->components(); c++) {
+ switch (op[0]->type->base_type) {
+ case GLSL_TYPE_FLOAT:
+ data.f[c] = log2f(op[0]->value.f[c]);
+ break;
+ default:
+ unreachable("invalid type");
+ }
+ }
+ break;
+
+ case ir_unop_f2i:
+ for (unsigned c = 0; c < op[0]->type->components(); c++) {
+ switch (op[0]->type->base_type) {
+ case GLSL_TYPE_FLOAT:
+ data.i[c] = (int) op[0]->value.f[c];
+ break;
+ default:
+ unreachable("invalid type");
+ }
+ }
+ break;
+
+ case ir_unop_f2u:
+ for (unsigned c = 0; c < op[0]->type->components(); c++) {
+ switch (op[0]->type->base_type) {
+ case GLSL_TYPE_FLOAT:
+ data.u[c] = (unsigned) op[0]->value.f[c];
+ break;
+ default:
+ unreachable("invalid type");
+ }
+ }
+ break;
+
+ case ir_unop_i2f:
+ for (unsigned c = 0; c < op[0]->type->components(); c++) {
+ switch (op[0]->type->base_type) {
+ case GLSL_TYPE_INT:
+ data.f[c] = (float) op[0]->value.i[c];
+ break;
+ default:
+ unreachable("invalid type");
+ }
+ }
+ break;
+
+ case ir_unop_f2b:
+ for (unsigned c = 0; c < op[0]->type->components(); c++) {
+ switch (op[0]->type->base_type) {
+ case GLSL_TYPE_FLOAT:
+ data.b[c] = op[0]->value.f[c] != 0.0F ? true : false;
+ break;
+ default:
+ unreachable("invalid type");
+ }
+ }
+ break;
+
+ case ir_unop_b2f:
+ for (unsigned c = 0; c < op[0]->type->components(); c++) {
+ switch (op[0]->type->base_type) {
+ case GLSL_TYPE_BOOL:
+ data.f[c] = op[0]->value.b[c] ? 1.0F : 0.0F;
+ break;
+ default:
+ unreachable("invalid type");
+ }
+ }
+ break;
+
+ case ir_unop_b2f16:
+ for (unsigned c = 0; c < op[0]->type->components(); c++) {
+ switch (op[0]->type->base_type) {
+ case GLSL_TYPE_BOOL:
+ data.f[c] = op[0]->value.b[c] ? 1.0F : 0.0F;
+ break;
+ default:
+ unreachable("invalid type");
+ }
+ }
+ break;
+
+ case ir_unop_i2b:
+ for (unsigned c = 0; c < op[0]->type->components(); c++) {
+ switch (op[0]->type->base_type) {
+ case GLSL_TYPE_UINT:
+ data.b[c] = op[0]->value.u[c] ? true : false;
+ break;
+ case GLSL_TYPE_INT:
+ data.b[c] = op[0]->value.i[c] ? true : false;
+ break;
+ default:
+ unreachable("invalid type");
+ }
+ }
+ break;
+
+ case ir_unop_b2i:
+ for (unsigned c = 0; c < op[0]->type->components(); c++) {
+ switch (op[0]->type->base_type) {
+ case GLSL_TYPE_BOOL:
+ data.i[c] = op[0]->value.b[c] ? 1 : 0;
+ break;
+ default:
+ unreachable("invalid type");
+ }
+ }
+ break;
+
+ case ir_unop_u2f:
+ for (unsigned c = 0; c < op[0]->type->components(); c++) {
+ switch (op[0]->type->base_type) {
+ case GLSL_TYPE_UINT:
+ data.f[c] = (float) op[0]->value.u[c];
+ break;
+ default:
+ unreachable("invalid type");
+ }
+ }
+ break;
+
+ case ir_unop_i2u:
+ for (unsigned c = 0; c < op[0]->type->components(); c++) {
+ switch (op[0]->type->base_type) {
+ case GLSL_TYPE_INT:
+ data.u[c] = op[0]->value.i[c];
+ break;
+ default:
+ unreachable("invalid type");
+ }
+ }
+ break;
+
+ case ir_unop_u2i:
+ for (unsigned c = 0; c < op[0]->type->components(); c++) {
+ switch (op[0]->type->base_type) {
+ case GLSL_TYPE_UINT:
+ data.i[c] = op[0]->value.u[c];
+ break;
+ default:
+ unreachable("invalid type");
+ }
+ }
+ break;
+
+ case ir_unop_d2f:
+ for (unsigned c = 0; c < op[0]->type->components(); c++) {
+ switch (op[0]->type->base_type) {
+ case GLSL_TYPE_DOUBLE:
+ data.f[c] = op[0]->value.d[c];
+ break;
+ default:
+ unreachable("invalid type");
+ }
+ }
+ break;
+
+ case ir_unop_f2d:
+ for (unsigned c = 0; c < op[0]->type->components(); c++) {
+ switch (op[0]->type->base_type) {
+ case GLSL_TYPE_FLOAT:
+ data.d[c] = op[0]->value.f[c];
+ break;
+ default:
+ unreachable("invalid type");
+ }
+ }
+ break;
+
+ case ir_unop_f2f16:
+ for (unsigned c = 0; c < op[0]->type->components(); c++) {
+ switch (op[0]->type->base_type) {
+ case GLSL_TYPE_FLOAT:
+ data.f[c] = op[0]->value.f[c];
+ break;
+ default:
+ unreachable("invalid type");
+ }
+ }
+ break;
+
+ case ir_unop_f2fmp:
+ for (unsigned c = 0; c < op[0]->type->components(); c++) {
+ switch (op[0]->type->base_type) {
+ case GLSL_TYPE_FLOAT:
+ data.f[c] = op[0]->value.f[c];
+ break;
+ default:
+ unreachable("invalid type");
+ }
+ }
+ break;
+
+ case ir_unop_f162f:
+ for (unsigned c = 0; c < op[0]->type->components(); c++) {
+ switch (op[0]->type->base_type) {
+ case GLSL_TYPE_FLOAT:
+ data.f[c] = op[0]->value.f[c];
+ break;
+ default:
+ unreachable("invalid type");
+ }
+ }
+ break;
+
+ case ir_unop_d2i:
+ for (unsigned c = 0; c < op[0]->type->components(); c++) {
+ switch (op[0]->type->base_type) {
+ case GLSL_TYPE_DOUBLE:
+ data.i[c] = op[0]->value.d[c];
+ break;
+ default:
+ unreachable("invalid type");
+ }
+ }
+ break;
+
+ case ir_unop_i2d:
+ for (unsigned c = 0; c < op[0]->type->components(); c++) {
+ switch (op[0]->type->base_type) {
+ case GLSL_TYPE_INT:
+ data.d[c] = op[0]->value.i[c];
+ break;
+ default:
+ unreachable("invalid type");
+ }
+ }
+ break;
+
+ case ir_unop_d2u:
+ for (unsigned c = 0; c < op[0]->type->components(); c++) {
+ switch (op[0]->type->base_type) {
+ case GLSL_TYPE_DOUBLE:
+ data.u[c] = op[0]->value.d[c];
+ break;
+ default:
+ unreachable("invalid type");
+ }
+ }
+ break;
+
+ case ir_unop_u2d:
+ for (unsigned c = 0; c < op[0]->type->components(); c++) {
+ switch (op[0]->type->base_type) {
+ case GLSL_TYPE_UINT:
+ data.d[c] = op[0]->value.u[c];
+ break;
+ default:
+ unreachable("invalid type");
+ }
+ }
+ break;
+
+ case ir_unop_d2b:
+ for (unsigned c = 0; c < op[0]->type->components(); c++) {
+ switch (op[0]->type->base_type) {
+ case GLSL_TYPE_DOUBLE:
+ data.b[c] = op[0]->value.d[c] != 0.0;
+ break;
+ default:
+ unreachable("invalid type");
+ }
+ }
+ break;
+
+ case ir_unop_f162b:
+ for (unsigned c = 0; c < op[0]->type->components(); c++) {
+ switch (op[0]->type->base_type) {
+ case GLSL_TYPE_FLOAT:
+ data.b[c] = op[0]->value.f[c] != 0.0;
+ break;
+ default:
+ unreachable("invalid type");
+ }
+ }
+ break;
+
+ case ir_unop_bitcast_i2f:
+ for (unsigned c = 0; c < op[0]->type->components(); c++) {
+ switch (op[0]->type->base_type) {
+ case GLSL_TYPE_INT:
+ data.f[c] = bitcast_u2f(op[0]->value.i[c]);
+ break;
+ default:
+ unreachable("invalid type");
+ }
+ }
+ break;
+
+ case ir_unop_bitcast_f2i:
+ for (unsigned c = 0; c < op[0]->type->components(); c++) {
+ switch (op[0]->type->base_type) {
+ case GLSL_TYPE_FLOAT:
+ data.i[c] = bitcast_f2u(op[0]->value.f[c]);
+ break;
+ default:
+ unreachable("invalid type");
+ }
+ }
+ break;
+
+ case ir_unop_bitcast_u2f:
+ for (unsigned c = 0; c < op[0]->type->components(); c++) {
+ switch (op[0]->type->base_type) {
+ case GLSL_TYPE_UINT:
+ data.f[c] = bitcast_u2f(op[0]->value.u[c]);
+ break;
+ default:
+ unreachable("invalid type");
+ }
+ }
+ break;
+
+ case ir_unop_bitcast_f2u:
+ for (unsigned c = 0; c < op[0]->type->components(); c++) {
+ switch (op[0]->type->base_type) {
+ case GLSL_TYPE_FLOAT:
+ data.u[c] = bitcast_f2u(op[0]->value.f[c]);
+ break;
+ default:
+ unreachable("invalid type");
+ }
+ }
+ break;
+
+ case ir_unop_bitcast_u642d:
+ for (unsigned c = 0; c < op[0]->type->components(); c++) {
+ switch (op[0]->type->base_type) {
+ case GLSL_TYPE_UINT64:
+ data.d[c] = bitcast_u642d(op[0]->value.u64[c]);
+ break;
+ default:
+ unreachable("invalid type");
+ }
+ }
+ break;
+
+ case ir_unop_bitcast_i642d:
+ for (unsigned c = 0; c < op[0]->type->components(); c++) {
+ switch (op[0]->type->base_type) {
+ case GLSL_TYPE_INT64:
+ data.d[c] = bitcast_i642d(op[0]->value.i64[c]);
+ break;
+ default:
+ unreachable("invalid type");
+ }
+ }
+ break;
+
+ case ir_unop_bitcast_d2u64:
+ for (unsigned c = 0; c < op[0]->type->components(); c++) {
+ switch (op[0]->type->base_type) {
+ case GLSL_TYPE_DOUBLE:
+ data.u64[c] = bitcast_d2u64(op[0]->value.d[c]);
+ break;
+ default:
+ unreachable("invalid type");
+ }
+ }
+ break;
+
+ case ir_unop_bitcast_d2i64:
+ for (unsigned c = 0; c < op[0]->type->components(); c++) {
+ switch (op[0]->type->base_type) {
+ case GLSL_TYPE_DOUBLE:
+ data.i64[c] = bitcast_d2i64(op[0]->value.d[c]);
+ break;
+ default:
+ unreachable("invalid type");
+ }
+ }
+ break;
+
+ case ir_unop_i642i:
+ for (unsigned c = 0; c < op[0]->type->components(); c++) {
+ switch (op[0]->type->base_type) {
+ case GLSL_TYPE_INT64:
+ data.i[c] = op[0]->value.i64[c];
+ break;
+ default:
+ unreachable("invalid type");
+ }
+ }
+ break;
+
+ case ir_unop_u642i:
+ for (unsigned c = 0; c < op[0]->type->components(); c++) {
+ switch (op[0]->type->base_type) {
+ case GLSL_TYPE_UINT64:
+ data.i[c] = op[0]->value.u64[c];
+ break;
+ default:
+ unreachable("invalid type");
+ }
+ }
+ break;
+
+ case ir_unop_i642u:
+ for (unsigned c = 0; c < op[0]->type->components(); c++) {
+ switch (op[0]->type->base_type) {
+ case GLSL_TYPE_INT64:
+ data.u[c] = op[0]->value.i64[c];
+ break;
+ default:
+ unreachable("invalid type");
+ }
+ }
+ break;
+
+ case ir_unop_u642u:
+ for (unsigned c = 0; c < op[0]->type->components(); c++) {
+ switch (op[0]->type->base_type) {
+ case GLSL_TYPE_UINT64:
+ data.u[c] = op[0]->value.u64[c];
+ break;
+ default:
+ unreachable("invalid type");
+ }
+ }
+ break;
+
+ case ir_unop_i642b:
+ for (unsigned c = 0; c < op[0]->type->components(); c++) {
+ switch (op[0]->type->base_type) {
+ case GLSL_TYPE_INT64:
+ data.b[c] = op[0]->value.i64[c] != 0;
+ break;
+ default:
+ unreachable("invalid type");
+ }
+ }
+ break;
+
+ case ir_unop_i642f:
+ for (unsigned c = 0; c < op[0]->type->components(); c++) {
+ switch (op[0]->type->base_type) {
+ case GLSL_TYPE_INT64:
+ data.f[c] = op[0]->value.i64[c];
+ break;
+ default:
+ unreachable("invalid type");
+ }
+ }
+ break;
+
+ case ir_unop_u642f:
+ for (unsigned c = 0; c < op[0]->type->components(); c++) {
+ switch (op[0]->type->base_type) {
+ case GLSL_TYPE_UINT64:
+ data.f[c] = op[0]->value.u64[c];
+ break;
+ default:
+ unreachable("invalid type");
+ }
+ }
+ break;
+
+ case ir_unop_i642d:
+ for (unsigned c = 0; c < op[0]->type->components(); c++) {
+ switch (op[0]->type->base_type) {
+ case GLSL_TYPE_INT64:
+ data.d[c] = op[0]->value.i64[c];
+ break;
+ default:
+ unreachable("invalid type");
+ }
+ }
+ break;
+
+ case ir_unop_u642d:
+ for (unsigned c = 0; c < op[0]->type->components(); c++) {
+ switch (op[0]->type->base_type) {
+ case GLSL_TYPE_UINT64:
+ data.d[c] = op[0]->value.u64[c];
+ break;
+ default:
+ unreachable("invalid type");
+ }
+ }
+ break;
+
+ case ir_unop_i2i64:
+ for (unsigned c = 0; c < op[0]->type->components(); c++) {
+ switch (op[0]->type->base_type) {
+ case GLSL_TYPE_INT:
+ data.i64[c] = op[0]->value.i[c];
+ break;
+ default:
+ unreachable("invalid type");
+ }
+ }
+ break;
+
+ case ir_unop_u2i64:
+ for (unsigned c = 0; c < op[0]->type->components(); c++) {
+ switch (op[0]->type->base_type) {
+ case GLSL_TYPE_UINT:
+ data.i64[c] = op[0]->value.u[c];
+ break;
+ default:
+ unreachable("invalid type");
+ }
+ }
+ break;
+
+ case ir_unop_b2i64:
+ for (unsigned c = 0; c < op[0]->type->components(); c++) {
+ switch (op[0]->type->base_type) {
+ case GLSL_TYPE_BOOL:
+ data.i64[c] = op[0]->value.b[c];
+ break;
+ default:
+ unreachable("invalid type");
+ }
+ }
+ break;
+
+ case ir_unop_f2i64:
+ for (unsigned c = 0; c < op[0]->type->components(); c++) {
+ switch (op[0]->type->base_type) {
+ case GLSL_TYPE_FLOAT:
+ data.i64[c] = op[0]->value.f[c];
+ break;
+ default:
+ unreachable("invalid type");
+ }
+ }
+ break;
+
+ case ir_unop_d2i64:
+ for (unsigned c = 0; c < op[0]->type->components(); c++) {
+ switch (op[0]->type->base_type) {
+ case GLSL_TYPE_DOUBLE:
+ data.i64[c] = op[0]->value.d[c];
+ break;
+ default:
+ unreachable("invalid type");
+ }
+ }
+ break;
+
+ case ir_unop_i2u64:
+ for (unsigned c = 0; c < op[0]->type->components(); c++) {
+ switch (op[0]->type->base_type) {
+ case GLSL_TYPE_INT:
+ data.u64[c] = op[0]->value.i[c];
+ break;
+ default:
+ unreachable("invalid type");
+ }
+ }
+ break;
+
+ case ir_unop_u2u64:
+ for (unsigned c = 0; c < op[0]->type->components(); c++) {
+ switch (op[0]->type->base_type) {
+ case GLSL_TYPE_UINT:
+ data.u64[c] = op[0]->value.u[c];
+ break;
+ default:
+ unreachable("invalid type");
+ }
+ }
+ break;
+
+ case ir_unop_f2u64:
+ for (unsigned c = 0; c < op[0]->type->components(); c++) {
+ switch (op[0]->type->base_type) {
+ case GLSL_TYPE_FLOAT:
+ data.u64[c] = op[0]->value.f[c];
+ break;
+ default:
+ unreachable("invalid type");
+ }
+ }
+ break;
+
+ case ir_unop_d2u64:
+ for (unsigned c = 0; c < op[0]->type->components(); c++) {
+ switch (op[0]->type->base_type) {
+ case GLSL_TYPE_DOUBLE:
+ data.u64[c] = op[0]->value.d[c];
+ break;
+ default:
+ unreachable("invalid type");
+ }
+ }
+ break;
+
+ case ir_unop_u642i64:
+ for (unsigned c = 0; c < op[0]->type->components(); c++) {
+ switch (op[0]->type->base_type) {
+ case GLSL_TYPE_UINT64:
+ data.i64[c] = op[0]->value.u64[c];
+ break;
+ default:
+ unreachable("invalid type");
+ }
+ }
+ break;
+
+ case ir_unop_i642u64:
+ for (unsigned c = 0; c < op[0]->type->components(); c++) {
+ switch (op[0]->type->base_type) {
+ case GLSL_TYPE_INT64:
+ data.u64[c] = op[0]->value.i64[c];
+ break;
+ default:
+ unreachable("invalid type");
+ }
+ }
+ break;
+
+ case ir_unop_trunc:
+ for (unsigned c = 0; c < op[0]->type->components(); c++) {
+ switch (op[0]->type->base_type) {
+ case GLSL_TYPE_FLOAT:
+ data.f[c] = truncf(op[0]->value.f[c]);
+ break;
+ case GLSL_TYPE_DOUBLE:
+ data.d[c] = trunc(op[0]->value.d[c]);
+ break;
+ default:
+ unreachable("invalid type");
+ }
+ }
+ break;
+
+ case ir_unop_ceil:
+ for (unsigned c = 0; c < op[0]->type->components(); c++) {
+ switch (op[0]->type->base_type) {
+ case GLSL_TYPE_FLOAT:
+ data.f[c] = ceilf(op[0]->value.f[c]);
+ break;
+ case GLSL_TYPE_DOUBLE:
+ data.d[c] = ceil(op[0]->value.d[c]);
+ break;
+ default:
+ unreachable("invalid type");
+ }
+ }
+ break;
+
+ case ir_unop_floor:
+ for (unsigned c = 0; c < op[0]->type->components(); c++) {
+ switch (op[0]->type->base_type) {
+ case GLSL_TYPE_FLOAT:
+ data.f[c] = floorf(op[0]->value.f[c]);
+ break;
+ case GLSL_TYPE_DOUBLE:
+ data.d[c] = floor(op[0]->value.d[c]);
+ break;
+ default:
+ unreachable("invalid type");
+ }
+ }
+ break;
+
+ case ir_unop_fract:
+ for (unsigned c = 0; c < op[0]->type->components(); c++) {
+ switch (op[0]->type->base_type) {
+ case GLSL_TYPE_FLOAT:
+ data.f[c] = op[0]->value.f[c] - floorf(op[0]->value.f[c]);
+ break;
+ case GLSL_TYPE_DOUBLE:
+ data.d[c] = op[0]->value.d[c] - floor(op[0]->value.d[c]);
+ break;
+ default:
+ unreachable("invalid type");
+ }
+ }
+ break;
+
+ case ir_unop_round_even:
+ for (unsigned c = 0; c < op[0]->type->components(); c++) {
+ switch (op[0]->type->base_type) {
+ case GLSL_TYPE_FLOAT:
+ data.f[c] = _mesa_roundevenf(op[0]->value.f[c]);
+ break;
+ case GLSL_TYPE_DOUBLE:
+ data.d[c] = _mesa_roundeven(op[0]->value.d[c]);
+ break;
+ default:
+ unreachable("invalid type");
+ }
+ }
+ break;
+
+ case ir_unop_sin:
+ for (unsigned c = 0; c < op[0]->type->components(); c++) {
+ switch (op[0]->type->base_type) {
+ case GLSL_TYPE_FLOAT:
+ data.f[c] = sinf(op[0]->value.f[c]);
+ break;
+ default:
+ unreachable("invalid type");
+ }
+ }
+ break;
+
+ case ir_unop_cos:
+ for (unsigned c = 0; c < op[0]->type->components(); c++) {
+ switch (op[0]->type->base_type) {
+ case GLSL_TYPE_FLOAT:
+ data.f[c] = cosf(op[0]->value.f[c]);
+ break;
+ default:
+ unreachable("invalid type");
+ }
+ }
+ break;
+
+ case ir_unop_atan:
+ for (unsigned c = 0; c < op[0]->type->components(); c++) {
+ switch (op[0]->type->base_type) {
+ case GLSL_TYPE_FLOAT:
+ data.f[c] = atan(op[0]->value.f[c]);
+ break;
+ default:
+ unreachable("invalid type");
+ }
+ }
+ break;
+
+ case ir_unop_dFdx:
+ for (unsigned c = 0; c < op[0]->type->components(); c++) {
+ switch (op[0]->type->base_type) {
+ case GLSL_TYPE_FLOAT:
+ data.f[c] = 0.0f;
+ break;
+ default:
+ unreachable("invalid type");
+ }
+ }
+ break;
+
+ case ir_unop_dFdx_coarse:
+ for (unsigned c = 0; c < op[0]->type->components(); c++) {
+ switch (op[0]->type->base_type) {
+ case GLSL_TYPE_FLOAT:
+ data.f[c] = 0.0f;
+ break;
+ default:
+ unreachable("invalid type");
+ }
+ }
+ break;
+
+ case ir_unop_dFdx_fine:
+ for (unsigned c = 0; c < op[0]->type->components(); c++) {
+ switch (op[0]->type->base_type) {
+ case GLSL_TYPE_FLOAT:
+ data.f[c] = 0.0f;
+ break;
+ default:
+ unreachable("invalid type");
+ }
+ }
+ break;
+
+ case ir_unop_dFdy:
+ for (unsigned c = 0; c < op[0]->type->components(); c++) {
+ switch (op[0]->type->base_type) {
+ case GLSL_TYPE_FLOAT:
+ data.f[c] = 0.0f;
+ break;
+ default:
+ unreachable("invalid type");
+ }
+ }
+ break;
+
+ case ir_unop_dFdy_coarse:
+ for (unsigned c = 0; c < op[0]->type->components(); c++) {
+ switch (op[0]->type->base_type) {
+ case GLSL_TYPE_FLOAT:
+ data.f[c] = 0.0f;
+ break;
+ default:
+ unreachable("invalid type");
+ }
+ }
+ break;
+
+ case ir_unop_dFdy_fine:
+ for (unsigned c = 0; c < op[0]->type->components(); c++) {
+ switch (op[0]->type->base_type) {
+ case GLSL_TYPE_FLOAT:
+ data.f[c] = 0.0f;
+ break;
+ default:
+ unreachable("invalid type");
+ }
+ }
+ break;
+
+ case ir_unop_pack_snorm_2x16:
+ switch (op[0]->type->base_type) {
+ case GLSL_TYPE_FLOAT:
+ data.u[0] = pack_2x16(pack_snorm_1x16, op[0]->value.f[0], op[0]->value.f[1]);
+ break;
+ default:
+ unreachable("invalid type");
+ }
+ break;
+
+ case ir_unop_pack_snorm_4x8:
+ switch (op[0]->type->base_type) {
+ case GLSL_TYPE_FLOAT:
+ data.u[0] = pack_4x8(pack_snorm_1x8, op[0]->value.f[0], op[0]->value.f[1], op[0]->value.f[2], op[0]->value.f[3]);
+ break;
+ default:
+ unreachable("invalid type");
+ }
+ break;
+
+ case ir_unop_pack_unorm_2x16:
+ switch (op[0]->type->base_type) {
+ case GLSL_TYPE_FLOAT:
+ data.u[0] = pack_2x16(pack_unorm_1x16, op[0]->value.f[0], op[0]->value.f[1]);
+ break;
+ default:
+ unreachable("invalid type");
+ }
+ break;
+
+ case ir_unop_pack_unorm_4x8:
+ switch (op[0]->type->base_type) {
+ case GLSL_TYPE_FLOAT:
+ data.u[0] = pack_4x8(pack_unorm_1x8, op[0]->value.f[0], op[0]->value.f[1], op[0]->value.f[2], op[0]->value.f[3]);
+ break;
+ default:
+ unreachable("invalid type");
+ }
+ break;
+
+ case ir_unop_pack_half_2x16:
+ switch (op[0]->type->base_type) {
+ case GLSL_TYPE_FLOAT:
+ data.u[0] = pack_2x16(pack_half_1x16, op[0]->value.f[0], op[0]->value.f[1]);
+ break;
+ default:
+ unreachable("invalid type");
+ }
+ break;
+
+ case ir_unop_unpack_snorm_2x16:
+ unpack_2x16(unpack_snorm_1x16, op[0]->value.u[0], &data.f[0], &data.f[1]);
+ break;
+
+ case ir_unop_unpack_snorm_4x8:
+ unpack_4x8(unpack_snorm_1x8, op[0]->value.u[0], &data.f[0], &data.f[1], &data.f[2], &data.f[3]);
+ break;
+
+ case ir_unop_unpack_unorm_2x16:
+ unpack_2x16(unpack_unorm_1x16, op[0]->value.u[0], &data.f[0], &data.f[1]);
+ break;
+
+ case ir_unop_unpack_unorm_4x8:
+ unpack_4x8(unpack_unorm_1x8, op[0]->value.u[0], &data.f[0], &data.f[1], &data.f[2], &data.f[3]);
+ break;
+
+ case ir_unop_unpack_half_2x16:
+ unpack_2x16(unpack_half_1x16, op[0]->value.u[0], &data.f[0], &data.f[1]);
+ break;
+
+ case ir_unop_bitfield_reverse:
+ for (unsigned c = 0; c < op[0]->type->components(); c++) {
+ switch (op[0]->type->base_type) {
+ case GLSL_TYPE_UINT:
+ data.u[c] = bitfield_reverse(op[0]->value.u[c]);
+ break;
+ case GLSL_TYPE_INT:
+ data.i[c] = bitfield_reverse(op[0]->value.i[c]);
+ break;
+ default:
+ unreachable("invalid type");
+ }
+ }
+ break;
+
+ case ir_unop_bit_count:
+ for (unsigned c = 0; c < op[0]->type->components(); c++) {
+ switch (op[0]->type->base_type) {
+ case GLSL_TYPE_UINT:
+ data.i[c] = util_bitcount(op[0]->value.u[c]);
+ break;
+ case GLSL_TYPE_INT:
+ data.i[c] = util_bitcount(op[0]->value.i[c]);
+ break;
+ default:
+ unreachable("invalid type");
+ }
+ }
+ break;
+
+ case ir_unop_find_msb:
+ for (unsigned c = 0; c < op[0]->type->components(); c++) {
+ switch (op[0]->type->base_type) {
+ case GLSL_TYPE_UINT:
+ data.i[c] = find_msb_uint(op[0]->value.u[c]);
+ break;
+ case GLSL_TYPE_INT:
+ data.i[c] = find_msb_int(op[0]->value.i[c]);
+ break;
+ default:
+ unreachable("invalid type");
+ }
+ }
+ break;
+
+ case ir_unop_find_lsb:
+ for (unsigned c = 0; c < op[0]->type->components(); c++) {
+ switch (op[0]->type->base_type) {
+ case GLSL_TYPE_UINT:
+ data.i[c] = find_msb_uint(op[0]->value.u[c] & -op[0]->value.u[c]);
+ break;
+ case GLSL_TYPE_INT:
+ data.i[c] = find_msb_uint(op[0]->value.i[c] & -op[0]->value.i[c]);
+ break;
+ default:
+ unreachable("invalid type");
+ }
+ }
+ break;
+
+ case ir_unop_clz:
+ for (unsigned c = 0; c < op[0]->type->components(); c++) {
+ switch (op[0]->type->base_type) {
+ case GLSL_TYPE_UINT:
+ data.u[c] = (unsigned)(31 - find_msb_uint(op[0]->value.u[c]));
+ break;
+ default:
+ unreachable("invalid type");
+ }
+ }
+ break;
+
+ case ir_unop_saturate:
+ for (unsigned c = 0; c < op[0]->type->components(); c++) {
+ switch (op[0]->type->base_type) {
+ case GLSL_TYPE_FLOAT:
+ data.f[c] = CLAMP(op[0]->value.f[c], 0.0f, 1.0f);
+ break;
+ default:
+ unreachable("invalid type");
+ }
+ }
+ break;
+
+ case ir_unop_pack_double_2x32:
+ data.u64[0] = pack_2x32(op[0]->value.u[0], op[0]->value.u[1]);
+ break;
+
+ case ir_unop_unpack_double_2x32:
+ unpack_2x32(op[0]->value.u64[0], &data.u[0], &data.u[1]);
+ break;
+
+ case ir_unop_pack_sampler_2x32:
+ data.u64[0] = pack_2x32(op[0]->value.u[0], op[0]->value.u[1]);
+ break;
+
+ case ir_unop_pack_image_2x32:
+ data.u64[0] = pack_2x32(op[0]->value.u[0], op[0]->value.u[1]);
+ break;
+
+ case ir_unop_unpack_sampler_2x32:
+ unpack_2x32(op[0]->value.u64[0], &data.u[0], &data.u[1]);
+ break;
+
+ case ir_unop_unpack_image_2x32:
+ unpack_2x32(op[0]->value.u64[0], &data.u[0], &data.u[1]);
+ break;
+
+ case ir_unop_pack_int_2x32:
+ data.u64[0] = pack_2x32(op[0]->value.u[0], op[0]->value.u[1]);
+ break;
+
+ case ir_unop_pack_uint_2x32:
+ data.u64[0] = pack_2x32(op[0]->value.u[0], op[0]->value.u[1]);
+ break;
+
+ case ir_unop_unpack_int_2x32:
+ unpack_2x32(op[0]->value.u64[0], &data.u[0], &data.u[1]);
+ break;
+
+ case ir_unop_unpack_uint_2x32:
+ unpack_2x32(op[0]->value.u64[0], &data.u[0], &data.u[1]);
+ break;
+
+ case ir_binop_add:
+ assert(op[0]->type == op[1]->type || op0_scalar || op1_scalar);
+ for (unsigned c = 0, c0 = 0, c1 = 0;
+ c < components;
+ c0 += c0_inc, c1 += c1_inc, c++) {
+
+ switch (op[0]->type->base_type) {
+ case GLSL_TYPE_UINT:
+ data.u[c] = op[0]->value.u[c0] + op[1]->value.u[c1];
+ break;
+ case GLSL_TYPE_INT:
+ data.i[c] = op[0]->value.i[c0] + op[1]->value.i[c1];
+ break;
+ case GLSL_TYPE_FLOAT:
+ data.f[c] = op[0]->value.f[c0] + op[1]->value.f[c1];
+ break;
+ case GLSL_TYPE_DOUBLE:
+ data.d[c] = op[0]->value.d[c0] + op[1]->value.d[c1];
+ break;
+ case GLSL_TYPE_UINT64:
+ data.u64[c] = op[0]->value.u64[c0] + op[1]->value.u64[c1];
+ break;
+ case GLSL_TYPE_INT64:
+ data.i64[c] = op[0]->value.i64[c0] + op[1]->value.i64[c1];
+ break;
+ default:
+ unreachable("invalid type");
+ }
+ }
+ break;
+
+ case ir_binop_sub:
+ assert(op[0]->type == op[1]->type || op0_scalar || op1_scalar);
+ for (unsigned c = 0, c0 = 0, c1 = 0;
+ c < components;
+ c0 += c0_inc, c1 += c1_inc, c++) {
+
+ switch (op[0]->type->base_type) {
+ case GLSL_TYPE_UINT:
+ data.u[c] = op[0]->value.u[c0] - op[1]->value.u[c1];
+ break;
+ case GLSL_TYPE_INT:
+ data.i[c] = op[0]->value.i[c0] - op[1]->value.i[c1];
+ break;
+ case GLSL_TYPE_FLOAT:
+ data.f[c] = op[0]->value.f[c0] - op[1]->value.f[c1];
+ break;
+ case GLSL_TYPE_DOUBLE:
+ data.d[c] = op[0]->value.d[c0] - op[1]->value.d[c1];
+ break;
+ case GLSL_TYPE_UINT64:
+ data.u64[c] = op[0]->value.u64[c0] - op[1]->value.u64[c1];
+ break;
+ case GLSL_TYPE_INT64:
+ data.i64[c] = op[0]->value.i64[c0] - op[1]->value.i64[c1];
+ break;
+ default:
+ unreachable("invalid type");
+ }
+ }
+ break;
+
+ case ir_binop_add_sat:
+ for (unsigned c = 0; c < op[0]->type->components(); c++) {
+ switch (op[0]->type->base_type) {
+ case GLSL_TYPE_UINT:
+ data.u[c] = (op[0]->value.u[c] + op[1]->value.u[c]) < op[0]->value.u[c] ? UINT32_MAX : (op[0]->value.u[c] + op[1]->value.u[c]);
+ break;
+ case GLSL_TYPE_INT:
+ data.i[c] = iadd_saturate(op[0]->value.i[c], op[1]->value.i[c]);
+ break;
+ case GLSL_TYPE_UINT64:
+ data.u64[c] = (op[0]->value.u64[c] + op[1]->value.u64[c]) < op[0]->value.u64[c] ? UINT64_MAX : (op[0]->value.u64[c] + op[1]->value.u64[c]);
+ break;
+ case GLSL_TYPE_INT64:
+ data.i64[c] = iadd64_saturate(op[0]->value.i64[c], op[1]->value.i64[c]);
+ break;
+ default:
+ unreachable("invalid type");
+ }
+ }
+ break;
+
+ case ir_binop_sub_sat:
+ for (unsigned c = 0; c < op[0]->type->components(); c++) {
+ switch (op[0]->type->base_type) {
+ case GLSL_TYPE_UINT:
+ data.u[c] = (op[1]->value.u[c] > op[0]->value.u[c]) ? 0 : op[0]->value.u[c] - op[1]->value.u[c];
+ break;
+ case GLSL_TYPE_INT:
+ data.i[c] = isub_saturate(op[0]->value.i[c], op[1]->value.i[c]);
+ break;
+ case GLSL_TYPE_UINT64:
+ data.u64[c] = (op[1]->value.u64[c] > op[0]->value.u64[c]) ? 0 : op[0]->value.u64[c] - op[1]->value.u64[c];
+ break;
+ case GLSL_TYPE_INT64:
+ data.i64[c] = isub64_saturate(op[0]->value.i64[c], op[1]->value.i64[c]);
+ break;
+ default:
+ unreachable("invalid type");
+ }
+ }
+ break;
+
+ case ir_binop_abs_sub:
+ for (unsigned c = 0; c < op[0]->type->components(); c++) {
+ switch (op[0]->type->base_type) {
+ case GLSL_TYPE_UINT:
+ data.u[c] = (op[1]->value.u[c] > op[0]->value.u[c]) ? op[1]->value.u[c] - op[0]->value.u[c] : op[0]->value.u[c] - op[1]->value.u[c];
+ break;
+ case GLSL_TYPE_INT:
+ data.i[c] = (op[1]->value.i[c] > op[0]->value.i[c]) ? (unsigned)op[1]->value.i[c] - (unsigned)op[0]->value.i[c] : (unsigned)op[0]->value.i[c] - (unsigned)op[1]->value.i[c];
+ break;
+ case GLSL_TYPE_UINT64:
+ data.u64[c] = (op[1]->value.u64[c] > op[0]->value.u64[c]) ? op[1]->value.u64[c] - op[0]->value.u64[c] : op[0]->value.u64[c] - op[1]->value.u64[c];
+ break;
+ case GLSL_TYPE_INT64:
+ data.i64[c] = (op[1]->value.i64[c] > op[0]->value.i64[c]) ? (uint64_t)op[1]->value.i64[c] - (uint64_t)op[0]->value.i64[c] : (uint64_t)op[0]->value.i64[c] - (uint64_t)op[1]->value.i64[c];
+ break;
+ default:
+ unreachable("invalid type");
+ }
+ }
+ break;
+
+ case ir_binop_avg:
+ for (unsigned c = 0; c < op[0]->type->components(); c++) {
+ switch (op[0]->type->base_type) {
+ case GLSL_TYPE_UINT:
+ data.u[c] = (op[0]->value.u[c] >> 1) + (op[1]->value.u[c] >> 1) + ((op[0]->value.u[c] & op[1]->value.u[c]) & 1);
+ break;
+ case GLSL_TYPE_INT:
+ data.i[c] = (op[0]->value.i[c] >> 1) + (op[1]->value.i[c] >> 1) + ((op[0]->value.i[c] & op[1]->value.i[c]) & 1);
+ break;
+ case GLSL_TYPE_UINT64:
+ data.u64[c] = (op[0]->value.u64[c] >> 1) + (op[1]->value.u64[c] >> 1) + ((op[0]->value.u64[c] & op[1]->value.u64[c]) & 1);
+ break;
+ case GLSL_TYPE_INT64:
+ data.i64[c] = (op[0]->value.i64[c] >> 1) + (op[1]->value.i64[c] >> 1) + ((op[0]->value.i64[c] & op[1]->value.i64[c]) & 1);
+ break;
+ default:
+ unreachable("invalid type");
+ }
+ }
+ break;
+
+ case ir_binop_avg_round:
+ for (unsigned c = 0; c < op[0]->type->components(); c++) {
+ switch (op[0]->type->base_type) {
+ case GLSL_TYPE_UINT:
+ data.u[c] = (op[0]->value.u[c] >> 1) + (op[1]->value.u[c] >> 1) + ((op[0]->value.u[c] | op[1]->value.u[c]) & 1);
+ break;
+ case GLSL_TYPE_INT:
+ data.i[c] = (op[0]->value.i[c] >> 1) + (op[1]->value.i[c] >> 1) + ((op[0]->value.i[c] | op[1]->value.i[c]) & 1);
+ break;
+ case GLSL_TYPE_UINT64:
+ data.u64[c] = (op[0]->value.u64[c] >> 1) + (op[1]->value.u64[c] >> 1) + ((op[0]->value.u64[c] | op[1]->value.u64[c]) & 1);
+ break;
+ case GLSL_TYPE_INT64:
+ data.i64[c] = (op[0]->value.i64[c] >> 1) + (op[1]->value.i64[c] >> 1) + ((op[0]->value.i64[c] | op[1]->value.i64[c]) & 1);
+ break;
+ default:
+ unreachable("invalid type");
+ }
+ }
+ break;
+
+ case ir_binop_mul:
+ /* Check for equal types, or unequal types involving scalars */
+ if ((op[0]->type == op[1]->type && !op[0]->type->is_matrix())
+ || op0_scalar || op1_scalar) {
+ for (unsigned c = 0, c0 = 0, c1 = 0;
+ c < components;
+ c0 += c0_inc, c1 += c1_inc, c++) {
+
+ switch (op[0]->type->base_type) {
+ case GLSL_TYPE_UINT:
+ data.u[c] = op[0]->value.u[c0] * op[1]->value.u[c1];
+ break;
+ case GLSL_TYPE_INT:
+ data.i[c] = op[0]->value.i[c0] * op[1]->value.i[c1];
+ break;
+ case GLSL_TYPE_FLOAT:
+ data.f[c] = op[0]->value.f[c0] * op[1]->value.f[c1];
+ break;
+ case GLSL_TYPE_DOUBLE:
+ data.d[c] = op[0]->value.d[c0] * op[1]->value.d[c1];
+ break;
+ case GLSL_TYPE_UINT64:
+ data.u64[c] = op[0]->value.u64[c0] * op[1]->value.u64[c1];
+ break;
+ case GLSL_TYPE_INT64:
+ data.i64[c] = op[0]->value.i64[c0] * op[1]->value.i64[c1];
+ break;
+ default:
+ unreachable("invalid type");
+ }
+ }
+ } else {
+ assert(op[0]->type->is_matrix() || op[1]->type->is_matrix());
+
+ /* Multiply an N-by-M matrix with an M-by-P matrix. Since either
+ * matrix can be a GLSL vector, either N or P can be 1.
+ *
+ * For vec*mat, the vector is treated as a row vector. This
+ * means the vector is a 1-row x M-column matrix.
+ *
+ * For mat*vec, the vector is treated as a column vector. Since
+ * matrix_columns is 1 for vectors, this just works.
+ */
+ const unsigned n = op[0]->type->is_vector()
+ ? 1 : op[0]->type->vector_elements;
+ const unsigned m = op[1]->type->vector_elements;
+ const unsigned p = op[1]->type->matrix_columns;
+ for (unsigned j = 0; j < p; j++) {
+ for (unsigned i = 0; i < n; i++) {
+ for (unsigned k = 0; k < m; k++) {
+ if (op[0]->type->is_double())
+ data.d[i+n*j] += op[0]->value.d[i+n*k]*op[1]->value.d[k+m*j];
+ else
+ data.f[i+n*j] += op[0]->value.f[i+n*k]*op[1]->value.f[k+m*j];
+ }
+ }
+ }
+ }
+ break;
+
+ case ir_binop_mul_32x16:
+ for (unsigned c = 0; c < op[0]->type->components(); c++) {
+ switch (op[0]->type->base_type) {
+ case GLSL_TYPE_UINT:
+ data.u[c] = op[0]->value.u[c] * (uint16_t)op[1]->value.u[c];
+ break;
+ case GLSL_TYPE_INT:
+ data.i[c] = op[0]->value.i[c] * (int16_t)op[0]->value.i[c];
+ break;
+ default:
+ unreachable("invalid type");
+ }
+ }
+ break;
+
+ case ir_binop_div:
+ assert(op[0]->type == op[1]->type || op0_scalar || op1_scalar);
+ for (unsigned c = 0, c0 = 0, c1 = 0;
+ c < components;
+ c0 += c0_inc, c1 += c1_inc, c++) {
+
+ switch (op[0]->type->base_type) {
+ case GLSL_TYPE_UINT:
+ data.u[c] = op[1]->value.u[c1] == 0 ? 0 : op[0]->value.u[c0] / op[1]->value.u[c1];
+ break;
+ case GLSL_TYPE_INT:
+ data.i[c] = op[1]->value.i[c1] == 0 ? 0 : op[0]->value.i[c0] / op[1]->value.i[c1];
+ break;
+ case GLSL_TYPE_FLOAT:
+ data.f[c] = op[0]->value.f[c0] / op[1]->value.f[c1];
+ break;
+ case GLSL_TYPE_DOUBLE:
+ data.d[c] = op[0]->value.d[c0] / op[1]->value.d[c1];
+ break;
+ case GLSL_TYPE_UINT64:
+ data.u64[c] = op[1]->value.u64[c1] == 0 ? 0 : op[0]->value.u64[c0] / op[1]->value.u64[c1];
+ break;
+ case GLSL_TYPE_INT64:
+ data.i64[c] = op[1]->value.i64[c1] == 0 ? 0 : op[0]->value.i64[c0] / op[1]->value.i64[c1];
+ break;
+ default:
+ unreachable("invalid type");
+ }
+ }
+ break;
+
+ case ir_binop_mod:
+ assert(op[0]->type == op[1]->type || op0_scalar || op1_scalar);
+ for (unsigned c = 0, c0 = 0, c1 = 0;
+ c < components;
+ c0 += c0_inc, c1 += c1_inc, c++) {
+
+ switch (op[0]->type->base_type) {
+ case GLSL_TYPE_UINT:
+ data.u[c] = op[1]->value.u[c1] == 0 ? 0 : op[0]->value.u[c0] % op[1]->value.u[c1];
+ break;
+ case GLSL_TYPE_INT:
+ data.i[c] = op[1]->value.i[c1] == 0 ? 0 : op[0]->value.i[c0] % op[1]->value.i[c1];
+ break;
+ case GLSL_TYPE_FLOAT:
+ data.f[c] = op[0]->value.f[c0] - op[1]->value.f[c1] * floorf(op[0]->value.f[c0] / op[1]->value.f[c1]);
+ break;
+ case GLSL_TYPE_DOUBLE:
+ data.d[c] = op[0]->value.d[c0] - op[1]->value.d[c1] * floor(op[0]->value.d[c0] / op[1]->value.d[c1]);
+ break;
+ case GLSL_TYPE_UINT64:
+ data.u64[c] = op[1]->value.u64[c1] == 0 ? 0 : op[0]->value.u64[c0] % op[1]->value.u64[c1];
+ break;
+ case GLSL_TYPE_INT64:
+ data.i64[c] = op[1]->value.i64[c1] == 0 ? 0 : op[0]->value.i64[c0] % op[1]->value.i64[c1];
+ break;
+ default:
+ unreachable("invalid type");
+ }
+ }
+ break;
+
+ case ir_binop_less:
+ for (unsigned c = 0; c < op[0]->type->components(); c++) {
+ switch (op[0]->type->base_type) {
+ case GLSL_TYPE_UINT:
+ data.b[c] = op[0]->value.u[c] < op[1]->value.u[c];
+ break;
+ case GLSL_TYPE_INT:
+ data.b[c] = op[0]->value.i[c] < op[1]->value.i[c];
+ break;
+ case GLSL_TYPE_FLOAT:
+ data.b[c] = op[0]->value.f[c] < op[1]->value.f[c];
+ break;
+ case GLSL_TYPE_DOUBLE:
+ data.b[c] = op[0]->value.d[c] < op[1]->value.d[c];
+ break;
+ case GLSL_TYPE_UINT64:
+ data.b[c] = op[0]->value.u64[c] < op[1]->value.u64[c];
+ break;
+ case GLSL_TYPE_INT64:
+ data.b[c] = op[0]->value.i64[c] < op[1]->value.i64[c];
+ break;
+ default:
+ unreachable("invalid type");
+ }
+ }
+ break;
+
+ case ir_binop_gequal:
+ for (unsigned c = 0; c < op[0]->type->components(); c++) {
+ switch (op[0]->type->base_type) {
+ case GLSL_TYPE_UINT:
+ data.b[c] = op[0]->value.u[c] >= op[1]->value.u[c];
+ break;
+ case GLSL_TYPE_INT:
+ data.b[c] = op[0]->value.i[c] >= op[1]->value.i[c];
+ break;
+ case GLSL_TYPE_FLOAT:
+ data.b[c] = op[0]->value.f[c] >= op[1]->value.f[c];
+ break;
+ case GLSL_TYPE_DOUBLE:
+ data.b[c] = op[0]->value.d[c] >= op[1]->value.d[c];
+ break;
+ case GLSL_TYPE_UINT64:
+ data.b[c] = op[0]->value.u64[c] >= op[1]->value.u64[c];
+ break;
+ case GLSL_TYPE_INT64:
+ data.b[c] = op[0]->value.i64[c] >= op[1]->value.i64[c];
+ break;
+ default:
+ unreachable("invalid type");
+ }
+ }
+ break;
+
+ case ir_binop_equal:
+ for (unsigned c = 0; c < op[0]->type->components(); c++) {
+ switch (op[0]->type->base_type) {
+ case GLSL_TYPE_UINT:
+ data.b[c] = op[0]->value.u[c] == op[1]->value.u[c];
+ break;
+ case GLSL_TYPE_INT:
+ data.b[c] = op[0]->value.i[c] == op[1]->value.i[c];
+ break;
+ case GLSL_TYPE_FLOAT:
+ data.b[c] = op[0]->value.f[c] == op[1]->value.f[c];
+ break;
+ case GLSL_TYPE_DOUBLE:
+ data.b[c] = op[0]->value.d[c] == op[1]->value.d[c];
+ break;
+ case GLSL_TYPE_UINT64:
+ data.b[c] = op[0]->value.u64[c] == op[1]->value.u64[c];
+ break;
+ case GLSL_TYPE_INT64:
+ data.b[c] = op[0]->value.i64[c] == op[1]->value.i64[c];
+ break;
+ case GLSL_TYPE_BOOL:
+ data.b[c] = op[0]->value.b[c] == op[1]->value.b[c];
+ break;
+ default:
+ unreachable("invalid type");
+ }
+ }
+ break;
+
+ case ir_binop_nequal:
+ for (unsigned c = 0; c < op[0]->type->components(); c++) {
+ switch (op[0]->type->base_type) {
+ case GLSL_TYPE_UINT:
+ data.b[c] = op[0]->value.u[c] != op[1]->value.u[c];
+ break;
+ case GLSL_TYPE_INT:
+ data.b[c] = op[0]->value.i[c] != op[1]->value.i[c];
+ break;
+ case GLSL_TYPE_FLOAT:
+ data.b[c] = op[0]->value.f[c] != op[1]->value.f[c];
+ break;
+ case GLSL_TYPE_DOUBLE:
+ data.b[c] = op[0]->value.d[c] != op[1]->value.d[c];
+ break;
+ case GLSL_TYPE_UINT64:
+ data.b[c] = op[0]->value.u64[c] != op[1]->value.u64[c];
+ break;
+ case GLSL_TYPE_INT64:
+ data.b[c] = op[0]->value.i64[c] != op[1]->value.i64[c];
+ break;
+ case GLSL_TYPE_BOOL:
+ data.b[c] = op[0]->value.b[c] != op[1]->value.b[c];
+ break;
+ default:
+ unreachable("invalid type");
+ }
+ }
+ break;
+
+ case ir_binop_all_equal:
+ data.b[0] = op[0]->has_value(op[1]);
+ break;
+
+ case ir_binop_any_nequal:
+ data.b[0] = !op[0]->has_value(op[1]);
+ break;
+
+ case ir_binop_lshift:
+ assert(op[0]->type->base_type == GLSL_TYPE_UINT ||
+ op[0]->type->base_type == GLSL_TYPE_INT ||
+ op[0]->type->base_type == GLSL_TYPE_UINT64 ||
+ op[0]->type->base_type == GLSL_TYPE_INT64);
+ assert(op[1]->type->base_type == GLSL_TYPE_UINT ||
+ op[1]->type->base_type == GLSL_TYPE_INT ||
+ op[1]->type->base_type == GLSL_TYPE_UINT64 ||
+ op[1]->type->base_type == GLSL_TYPE_INT64);
+ for (unsigned c = 0, c0 = 0, c1 = 0;
+ c < components;
+ c0 += c0_inc, c1 += c1_inc, c++) {
+
+ switch (op[0]->type->base_type) {
+ case GLSL_TYPE_UINT:
+ data.u[c] = op[0]->value.u[c0] << op[1]->value.u[c1];
+ break;
+ case GLSL_TYPE_INT:
+ data.i[c] = op[0]->value.i[c0] << op[1]->value.i[c1];
+ break;
+ case GLSL_TYPE_UINT64:
+ data.u64[c] = op[0]->value.u64[c0] << op[1]->value.u64[c1];
+ break;
+ case GLSL_TYPE_INT64:
+ data.i64[c] = op[0]->value.i64[c0] << op[1]->value.i64[c1];
+ break;
+ default:
+ unreachable("invalid type");
+ }
+ }
+ break;
+
+ case ir_binop_rshift:
+ assert(op[0]->type->base_type == GLSL_TYPE_UINT ||
+ op[0]->type->base_type == GLSL_TYPE_INT ||
+ op[0]->type->base_type == GLSL_TYPE_UINT64 ||
+ op[0]->type->base_type == GLSL_TYPE_INT64);
+ assert(op[1]->type->base_type == GLSL_TYPE_UINT ||
+ op[1]->type->base_type == GLSL_TYPE_INT ||
+ op[1]->type->base_type == GLSL_TYPE_UINT64 ||
+ op[1]->type->base_type == GLSL_TYPE_INT64);
+ for (unsigned c = 0, c0 = 0, c1 = 0;
+ c < components;
+ c0 += c0_inc, c1 += c1_inc, c++) {
+
+ switch (op[0]->type->base_type) {
+ case GLSL_TYPE_UINT:
+ data.u[c] = op[0]->value.u[c0] >> op[1]->value.u[c1];
+ break;
+ case GLSL_TYPE_INT:
+ data.i[c] = op[0]->value.i[c0] >> op[1]->value.i[c1];
+ break;
+ case GLSL_TYPE_UINT64:
+ data.u64[c] = op[0]->value.u64[c0] >> op[1]->value.u64[c1];
+ break;
+ case GLSL_TYPE_INT64:
+ data.i64[c] = op[0]->value.i64[c0] >> op[1]->value.i64[c1];
+ break;
+ default:
+ unreachable("invalid type");
+ }
+ }
+ break;
+
+ case ir_binop_bit_and:
+ assert(op[0]->type == op[1]->type || op0_scalar || op1_scalar);
+ for (unsigned c = 0, c0 = 0, c1 = 0;
+ c < components;
+ c0 += c0_inc, c1 += c1_inc, c++) {
+
+ switch (op[0]->type->base_type) {
+ case GLSL_TYPE_UINT:
+ data.u[c] = op[0]->value.u[c0] & op[1]->value.u[c1];
+ break;
+ case GLSL_TYPE_INT:
+ data.i[c] = op[0]->value.i[c0] & op[1]->value.i[c1];
+ break;
+ case GLSL_TYPE_UINT64:
+ data.u64[c] = op[0]->value.u64[c0] & op[1]->value.u64[c1];
+ break;
+ case GLSL_TYPE_INT64:
+ data.i64[c] = op[0]->value.i64[c0] & op[1]->value.i64[c1];
+ break;
+ default:
+ unreachable("invalid type");
+ }
+ }
+ break;
+
+ case ir_binop_bit_xor:
+ assert(op[0]->type == op[1]->type || op0_scalar || op1_scalar);
+ for (unsigned c = 0, c0 = 0, c1 = 0;
+ c < components;
+ c0 += c0_inc, c1 += c1_inc, c++) {
+
+ switch (op[0]->type->base_type) {
+ case GLSL_TYPE_UINT:
+ data.u[c] = op[0]->value.u[c0] ^ op[1]->value.u[c1];
+ break;
+ case GLSL_TYPE_INT:
+ data.i[c] = op[0]->value.i[c0] ^ op[1]->value.i[c1];
+ break;
+ case GLSL_TYPE_UINT64:
+ data.u64[c] = op[0]->value.u64[c0] ^ op[1]->value.u64[c1];
+ break;
+ case GLSL_TYPE_INT64:
+ data.i64[c] = op[0]->value.i64[c0] ^ op[1]->value.i64[c1];
+ break;
+ default:
+ unreachable("invalid type");
+ }
+ }
+ break;
+
+ case ir_binop_bit_or:
+ assert(op[0]->type == op[1]->type || op0_scalar || op1_scalar);
+ for (unsigned c = 0, c0 = 0, c1 = 0;
+ c < components;
+ c0 += c0_inc, c1 += c1_inc, c++) {
+
+ switch (op[0]->type->base_type) {
+ case GLSL_TYPE_UINT:
+ data.u[c] = op[0]->value.u[c0] | op[1]->value.u[c1];
+ break;
+ case GLSL_TYPE_INT:
+ data.i[c] = op[0]->value.i[c0] | op[1]->value.i[c1];
+ break;
+ case GLSL_TYPE_UINT64:
+ data.u64[c] = op[0]->value.u64[c0] | op[1]->value.u64[c1];
+ break;
+ case GLSL_TYPE_INT64:
+ data.i64[c] = op[0]->value.i64[c0] | op[1]->value.i64[c1];
+ break;
+ default:
+ unreachable("invalid type");
+ }
+ }
+ break;
+
+ case ir_binop_logic_and:
+ for (unsigned c = 0; c < op[0]->type->components(); c++) {
+ switch (op[0]->type->base_type) {
+ case GLSL_TYPE_BOOL:
+ data.b[c] = op[0]->value.b[c] && op[1]->value.b[c];
+ break;
+ default:
+ unreachable("invalid type");
+ }
+ }
+ break;
+
+ case ir_binop_logic_xor:
+ for (unsigned c = 0; c < op[0]->type->components(); c++) {
+ switch (op[0]->type->base_type) {
+ case GLSL_TYPE_BOOL:
+ data.b[c] = op[0]->value.b[c] != op[1]->value.b[c];
+ break;
+ default:
+ unreachable("invalid type");
+ }
+ }
+ break;
+
+ case ir_binop_logic_or:
+ for (unsigned c = 0; c < op[0]->type->components(); c++) {
+ switch (op[0]->type->base_type) {
+ case GLSL_TYPE_BOOL:
+ data.b[c] = op[0]->value.b[c] || op[1]->value.b[c];
+ break;
+ default:
+ unreachable("invalid type");
+ }
+ }
+ break;
+
+ case ir_binop_dot:
+ switch (op[0]->type->base_type) {
+ case GLSL_TYPE_FLOAT:
+ data.f[0] = dot_f(op[0], op[1]);
+ break;
+ case GLSL_TYPE_DOUBLE:
+ data.d[0] = dot_d(op[0], op[1]);
+ break;
+ default:
+ unreachable("invalid type");
+ }
+ break;
+
+ case ir_binop_min:
+ assert(op[0]->type == op[1]->type || op0_scalar || op1_scalar);
+ for (unsigned c = 0, c0 = 0, c1 = 0;
+ c < components;
+ c0 += c0_inc, c1 += c1_inc, c++) {
+
+ switch (op[0]->type->base_type) {
+ case GLSL_TYPE_UINT:
+ data.u[c] = MIN2(op[0]->value.u[c0], op[1]->value.u[c1]);
+ break;
+ case GLSL_TYPE_INT:
+ data.i[c] = MIN2(op[0]->value.i[c0], op[1]->value.i[c1]);
+ break;
+ case GLSL_TYPE_FLOAT:
+ data.f[c] = MIN2(op[0]->value.f[c0], op[1]->value.f[c1]);
+ break;
+ case GLSL_TYPE_DOUBLE:
+ data.d[c] = MIN2(op[0]->value.d[c0], op[1]->value.d[c1]);
+ break;
+ case GLSL_TYPE_UINT64:
+ data.u64[c] = MIN2(op[0]->value.u64[c0], op[1]->value.u64[c1]);
+ break;
+ case GLSL_TYPE_INT64:
+ data.i64[c] = MIN2(op[0]->value.i64[c0], op[1]->value.i64[c1]);
+ break;
+ default:
+ unreachable("invalid type");
+ }
+ }
+ break;
+
+ case ir_binop_max:
+ assert(op[0]->type == op[1]->type || op0_scalar || op1_scalar);
+ for (unsigned c = 0, c0 = 0, c1 = 0;
+ c < components;
+ c0 += c0_inc, c1 += c1_inc, c++) {
+
+ switch (op[0]->type->base_type) {
+ case GLSL_TYPE_UINT:
+ data.u[c] = MAX2(op[0]->value.u[c0], op[1]->value.u[c1]);
+ break;
+ case GLSL_TYPE_INT:
+ data.i[c] = MAX2(op[0]->value.i[c0], op[1]->value.i[c1]);
+ break;
+ case GLSL_TYPE_FLOAT:
+ data.f[c] = MAX2(op[0]->value.f[c0], op[1]->value.f[c1]);
+ break;
+ case GLSL_TYPE_DOUBLE:
+ data.d[c] = MAX2(op[0]->value.d[c0], op[1]->value.d[c1]);
+ break;
+ case GLSL_TYPE_UINT64:
+ data.u64[c] = MAX2(op[0]->value.u64[c0], op[1]->value.u64[c1]);
+ break;
+ case GLSL_TYPE_INT64:
+ data.i64[c] = MAX2(op[0]->value.i64[c0], op[1]->value.i64[c1]);
+ break;
+ default:
+ unreachable("invalid type");
+ }
+ }
+ break;
+
+ case ir_binop_pow:
+ for (unsigned c = 0; c < op[0]->type->components(); c++) {
+ switch (op[0]->type->base_type) {
+ case GLSL_TYPE_FLOAT:
+ data.f[c] = powf(op[0]->value.f[c], op[1]->value.f[c]);
+ break;
+ default:
+ unreachable("invalid type");
+ }
+ }
+ break;
+
+ case ir_binop_ldexp:
+ for (unsigned c = 0; c < op[0]->type->components(); c++) {
+ switch (op[0]->type->base_type) {
+ case GLSL_TYPE_FLOAT:
+ data.f[c] = ldexpf_flush_subnormal(op[0]->value.f[c], op[1]->value.i[c]);
+ break;
+ case GLSL_TYPE_DOUBLE:
+ data.d[c] = ldexp_flush_subnormal(op[0]->value.d[c], op[1]->value.i[c]);
+ break;
+ default:
+ unreachable("invalid type");
+ }
+ }
+ break;
+
+ case ir_binop_vector_extract: {
+ const int c = CLAMP(op[1]->value.i[0], 0,
+ (int) op[0]->type->vector_elements - 1);
+
+ switch (op[0]->type->base_type) {
+ case GLSL_TYPE_UINT:
+ data.u[0] = op[0]->value.u[c];
+ break;
+ case GLSL_TYPE_INT:
+ data.i[0] = op[0]->value.i[c];
+ break;
+ case GLSL_TYPE_FLOAT:
+ data.f[0] = op[0]->value.f[c];
+ break;
+ case GLSL_TYPE_DOUBLE:
+ data.d[0] = op[0]->value.d[c];
+ break;
+ case GLSL_TYPE_UINT64:
+ data.u64[0] = op[0]->value.u64[c];
+ break;
+ case GLSL_TYPE_INT64:
+ data.i64[0] = op[0]->value.i64[c];
+ break;
+ case GLSL_TYPE_BOOL:
+ data.b[0] = op[0]->value.b[c];
+ break;
+ default:
+ unreachable("invalid type");
+ }
+ break;
+ }
+
+ case ir_binop_atan2:
+ for (unsigned c = 0; c < op[0]->type->components(); c++) {
+ switch (op[0]->type->base_type) {
+ case GLSL_TYPE_FLOAT:
+ data.f[c] = atan2(op[0]->value.f[c], op[1]->value.f[c]);
+ break;
+ default:
+ unreachable("invalid type");
+ }
+ }
+ break;
+
+ case ir_triop_fma:
+ for (unsigned c = 0; c < op[0]->type->components(); c++) {
+ switch (op[0]->type->base_type) {
+ case GLSL_TYPE_FLOAT:
+ data.f[c] = op[0]->value.f[c] * op[1]->value.f[c] + op[2]->value.f[c];
+ break;
+ case GLSL_TYPE_DOUBLE:
+ data.d[c] = op[0]->value.d[c] * op[1]->value.d[c] + op[2]->value.d[c];
+ break;
+ default:
+ unreachable("invalid type");
+ }
+ }
+ break;
+
+ case ir_triop_lrp: {
+ assert(op[0]->type->is_float() || op[0]->type->is_double());
+ assert(op[1]->type->is_float() || op[1]->type->is_double());
+ assert(op[2]->type->is_float() || op[2]->type->is_double());
+
+ unsigned c2_inc = op[2]->type->is_scalar() ? 0 : 1;
+ for (unsigned c = 0, c2 = 0; c < components; c2 += c2_inc, c++) {
+ switch (this->type->base_type) {
+ case GLSL_TYPE_FLOAT:
+ data.f[c] = op[0]->value.f[c] * (1.0f - op[2]->value.f[c2]) + (op[1]->value.f[c] * op[2]->value.f[c2]);
+ break;
+ case GLSL_TYPE_DOUBLE:
+ data.d[c] = op[0]->value.d[c] * (1.0 - op[2]->value.d[c2]) + (op[1]->value.d[c] * op[2]->value.d[c2]);
+ break;
+ default:
+ unreachable("invalid type");
+ }
+ }
+ break;
+ }
+
+ case ir_triop_csel:
+ for (unsigned c = 0; c < components; c++) {
+ switch (this->type->base_type) {
+ case GLSL_TYPE_UINT:
+ data.u[c] = op[0]->value.b[c] ? op[1]->value.u[c] : op[2]->value.u[c];
+ break;
+ case GLSL_TYPE_INT:
+ data.i[c] = op[0]->value.b[c] ? op[1]->value.i[c] : op[2]->value.i[c];
+ break;
+ case GLSL_TYPE_FLOAT:
+ data.f[c] = op[0]->value.b[c] ? op[1]->value.f[c] : op[2]->value.f[c];
+ break;
+ case GLSL_TYPE_DOUBLE:
+ data.d[c] = op[0]->value.b[c] ? op[1]->value.d[c] : op[2]->value.d[c];
+ break;
+ case GLSL_TYPE_UINT64:
+ data.u64[c] = op[0]->value.b[c] ? op[1]->value.u64[c] : op[2]->value.u64[c];
+ break;
+ case GLSL_TYPE_INT64:
+ data.i64[c] = op[0]->value.b[c] ? op[1]->value.i64[c] : op[2]->value.i64[c];
+ break;
+ case GLSL_TYPE_BOOL:
+ data.b[c] = op[0]->value.b[c] ? op[1]->value.b[c] : op[2]->value.b[c];
+ break;
+ default:
+ unreachable("invalid type");
+ }
+ }
+ break;
+
+ case ir_triop_bitfield_extract:
+ for (unsigned c = 0; c < op[0]->type->components(); c++) {
+ switch (op[0]->type->base_type) {
+ case GLSL_TYPE_UINT:
+ data.i[c] = bitfield_extract_uint(op[0]->value.u[c], op[1]->value.i[c], op[2]->value.i[c]);
+ break;
+ case GLSL_TYPE_INT:
+ data.i[c] = bitfield_extract_int(op[0]->value.i[c], op[1]->value.i[c], op[2]->value.i[c]);
+ break;
+ default:
+ unreachable("invalid type");
+ }
+ }
+ break;
+
+ case ir_triop_vector_insert: {
+ const unsigned idx = op[2]->value.u[0];
+
+ memcpy(&data, &op[0]->value, sizeof(data));
+
+ switch (this->type->base_type) {
+ case GLSL_TYPE_UINT:
+ data.u[idx] = op[1]->value.u[0];
+ break;
+ case GLSL_TYPE_INT:
+ data.i[idx] = op[1]->value.i[0];
+ break;
+ case GLSL_TYPE_FLOAT:
+ data.f[idx] = op[1]->value.f[0];
+ break;
+ case GLSL_TYPE_DOUBLE:
+ data.d[idx] = op[1]->value.d[0];
+ break;
+ case GLSL_TYPE_UINT64:
+ data.u64[idx] = op[1]->value.u64[0];
+ break;
+ case GLSL_TYPE_INT64:
+ data.i64[idx] = op[1]->value.i64[0];
+ break;
+ case GLSL_TYPE_BOOL:
+ data.b[idx] = op[1]->value.b[0];
+ break;
+ default:
+ unreachable("invalid type");
+ }
+ break;
+ }
+
+ case ir_quadop_bitfield_insert:
+ for (unsigned c = 0; c < op[0]->type->components(); c++) {
+ switch (op[0]->type->base_type) {
+ case GLSL_TYPE_UINT:
+ data.u[c] = bitfield_insert(op[0]->value.u[c], op[1]->value.u[c], op[2]->value.i[c], op[3]->value.i[c]);
+ break;
+ case GLSL_TYPE_INT:
+ data.i[c] = bitfield_insert(op[0]->value.i[c], op[1]->value.i[c], op[2]->value.i[c], op[3]->value.i[c]);
+ break;
+ default:
+ unreachable("invalid type");
+ }
+ }
+ break;
+
+ case ir_quadop_vector:
+ for (unsigned c = 0; c < this->type->vector_elements; c++) {
+ switch (this->type->base_type) {
+ case GLSL_TYPE_UINT:
+ data.u[c] = op[c]->value.u[0];
+ break;
+ case GLSL_TYPE_INT:
+ data.i[c] = op[c]->value.i[0];
+ break;
+ case GLSL_TYPE_FLOAT:
+ data.f[c] = op[c]->value.f[0];
+ break;
+ case GLSL_TYPE_DOUBLE:
+ data.d[c] = op[c]->value.d[0];
+ break;
+ case GLSL_TYPE_UINT64:
+ data.u64[c] = op[c]->value.u64[0];
+ break;
+ case GLSL_TYPE_INT64:
+ data.i64[c] = op[c]->value.i64[0];
+ break;
+ case GLSL_TYPE_BOOL:
+ data.b[c] = op[c]->value.b[0];
+ break;
+ default:
+ unreachable("invalid type");
+ }
+ }
+ break;
+
+ default:
+ /* FINISHME: Should handle all expression types. */
+ return NULL;
+ }
+
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/ir_expression_operation_strings.h b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/ir_expression_operation_strings.h
new file mode 100644
index 0000000000..a78aba201d
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/ir_expression_operation_strings.h
@@ -0,0 +1,334 @@
+/*
+ * Copyright (C) 2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+const char *const ir_expression_operation_strings[] = {
+ "~",
+ "!",
+ "neg",
+ "abs",
+ "sign",
+ "rcp",
+ "rsq",
+ "sqrt",
+ "exp",
+ "log",
+ "exp2",
+ "log2",
+ "f2i",
+ "f2u",
+ "i2f",
+ "f2b",
+ "b2f",
+ "b2f16",
+ "i2b",
+ "b2i",
+ "u2f",
+ "i2u",
+ "u2i",
+ "d2f",
+ "f2d",
+ "f2f16",
+ "f2fmp",
+ "f162f",
+ "d2i",
+ "i2d",
+ "d2u",
+ "u2d",
+ "d2b",
+ "f162b",
+ "bitcast_i2f",
+ "bitcast_f2i",
+ "bitcast_u2f",
+ "bitcast_f2u",
+ "bitcast_u642d",
+ "bitcast_i642d",
+ "bitcast_d2u64",
+ "bitcast_d2i64",
+ "i642i",
+ "u642i",
+ "i642u",
+ "u642u",
+ "i642b",
+ "i642f",
+ "u642f",
+ "i642d",
+ "u642d",
+ "i2i64",
+ "u2i64",
+ "b2i64",
+ "f2i64",
+ "d2i64",
+ "i2u64",
+ "u2u64",
+ "f2u64",
+ "d2u64",
+ "u642i64",
+ "i642u64",
+ "trunc",
+ "ceil",
+ "floor",
+ "fract",
+ "round_even",
+ "sin",
+ "cos",
+ "atan",
+ "dFdx",
+ "dFdxCoarse",
+ "dFdxFine",
+ "dFdy",
+ "dFdyCoarse",
+ "dFdyFine",
+ "packSnorm2x16",
+ "packSnorm4x8",
+ "packUnorm2x16",
+ "packUnorm4x8",
+ "packHalf2x16",
+ "unpackSnorm2x16",
+ "unpackSnorm4x8",
+ "unpackUnorm2x16",
+ "unpackUnorm4x8",
+ "unpackHalf2x16",
+ "bitfield_reverse",
+ "bit_count",
+ "find_msb",
+ "find_lsb",
+ "clz",
+ "sat",
+ "packDouble2x32",
+ "unpackDouble2x32",
+ "packSampler2x32",
+ "packImage2x32",
+ "unpackSampler2x32",
+ "unpackImage2x32",
+ "frexp_sig",
+ "frexp_exp",
+ "subroutine_to_int",
+ "interpolate_at_centroid",
+ "get_buffer_size",
+ "ssbo_unsized_array_length",
+ "packInt2x32",
+ "packUint2x32",
+ "unpackInt2x32",
+ "unpackUint2x32",
+ "+",
+ "-",
+ "add_sat",
+ "sub_sat",
+ "abs_sub",
+ "average",
+ "average_rounded",
+ "*",
+ "*",
+ "imul_high",
+ "/",
+ "carry",
+ "borrow",
+ "%",
+ "<",
+ ">=",
+ "==",
+ "!=",
+ "all_equal",
+ "any_nequal",
+ "<<",
+ ">>",
+ "&",
+ "^",
+ "|",
+ "&&",
+ "^^",
+ "||",
+ "dot",
+ "min",
+ "max",
+ "pow",
+ "ubo_load",
+ "ldexp",
+ "vector_extract",
+ "interpolate_at_offset",
+ "interpolate_at_sample",
+ "atan2",
+ "fma",
+ "lrp",
+ "csel",
+ "bitfield_extract",
+ "vector_insert",
+ "bitfield_insert",
+ "vector",
+};
+
+const char *const ir_expression_operation_enum_strings[] = {
+ "bit_not",
+ "logic_not",
+ "neg",
+ "abs",
+ "sign",
+ "rcp",
+ "rsq",
+ "sqrt",
+ "exp",
+ "log",
+ "exp2",
+ "log2",
+ "f2i",
+ "f2u",
+ "i2f",
+ "f2b",
+ "b2f",
+ "b2f16",
+ "i2b",
+ "b2i",
+ "u2f",
+ "i2u",
+ "u2i",
+ "d2f",
+ "f2d",
+ "f2f16",
+ "f2fmp",
+ "f162f",
+ "d2i",
+ "i2d",
+ "d2u",
+ "u2d",
+ "d2b",
+ "f162b",
+ "bitcast_i2f",
+ "bitcast_f2i",
+ "bitcast_u2f",
+ "bitcast_f2u",
+ "bitcast_u642d",
+ "bitcast_i642d",
+ "bitcast_d2u64",
+ "bitcast_d2i64",
+ "i642i",
+ "u642i",
+ "i642u",
+ "u642u",
+ "i642b",
+ "i642f",
+ "u642f",
+ "i642d",
+ "u642d",
+ "i2i64",
+ "u2i64",
+ "b2i64",
+ "f2i64",
+ "d2i64",
+ "i2u64",
+ "u2u64",
+ "f2u64",
+ "d2u64",
+ "u642i64",
+ "i642u64",
+ "trunc",
+ "ceil",
+ "floor",
+ "fract",
+ "round_even",
+ "sin",
+ "cos",
+ "atan",
+ "dFdx",
+ "dFdx_coarse",
+ "dFdx_fine",
+ "dFdy",
+ "dFdy_coarse",
+ "dFdy_fine",
+ "pack_snorm_2x16",
+ "pack_snorm_4x8",
+ "pack_unorm_2x16",
+ "pack_unorm_4x8",
+ "pack_half_2x16",
+ "unpack_snorm_2x16",
+ "unpack_snorm_4x8",
+ "unpack_unorm_2x16",
+ "unpack_unorm_4x8",
+ "unpack_half_2x16",
+ "bitfield_reverse",
+ "bit_count",
+ "find_msb",
+ "find_lsb",
+ "clz",
+ "saturate",
+ "pack_double_2x32",
+ "unpack_double_2x32",
+ "pack_sampler_2x32",
+ "pack_image_2x32",
+ "unpack_sampler_2x32",
+ "unpack_image_2x32",
+ "frexp_sig",
+ "frexp_exp",
+ "subroutine_to_int",
+ "interpolate_at_centroid",
+ "get_buffer_size",
+ "ssbo_unsized_array_length",
+ "pack_int_2x32",
+ "pack_uint_2x32",
+ "unpack_int_2x32",
+ "unpack_uint_2x32",
+ "add",
+ "sub",
+ "add_sat",
+ "sub_sat",
+ "abs_sub",
+ "avg",
+ "avg_round",
+ "mul",
+ "mul_32x16",
+ "imul_high",
+ "div",
+ "carry",
+ "borrow",
+ "mod",
+ "less",
+ "gequal",
+ "equal",
+ "nequal",
+ "all_equal",
+ "any_nequal",
+ "lshift",
+ "rshift",
+ "bit_and",
+ "bit_xor",
+ "bit_or",
+ "logic_and",
+ "logic_xor",
+ "logic_or",
+ "dot",
+ "min",
+ "max",
+ "pow",
+ "ubo_load",
+ "ldexp",
+ "vector_extract",
+ "interpolate_at_offset",
+ "interpolate_at_sample",
+ "atan2",
+ "fma",
+ "lrp",
+ "csel",
+ "bitfield_extract",
+ "vector_insert",
+ "bitfield_insert",
+ "vector",
+};
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/ir_function.cpp b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/ir_function.cpp
new file mode 100644
index 0000000000..97262f0f4b
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/ir_function.cpp
@@ -0,0 +1,407 @@
+/*
+ * Copyright © 2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include "compiler/glsl_types.h"
+#include "ir.h"
+#include "glsl_parser_extras.h"
+#include "main/errors.h"
+
+typedef enum {
+ PARAMETER_LIST_NO_MATCH,
+ PARAMETER_LIST_EXACT_MATCH,
+ PARAMETER_LIST_INEXACT_MATCH /*< Match requires implicit conversion. */
+} parameter_list_match_t;
+
+/**
+ * \brief Check if two parameter lists match.
+ *
+ * \param list_a Parameters of the function definition.
+ * \param list_b Actual parameters passed to the function.
+ * \see matching_signature()
+ */
+static parameter_list_match_t
+parameter_lists_match(_mesa_glsl_parse_state *state,
+ const exec_list *list_a, const exec_list *list_b)
+{
+ const exec_node *node_a = list_a->get_head_raw();
+ const exec_node *node_b = list_b->get_head_raw();
+
+ /* This is set to true if there is an inexact match requiring an implicit
+ * conversion. */
+ bool inexact_match = false;
+
+ for (/* empty */
+ ; !node_a->is_tail_sentinel()
+ ; node_a = node_a->next, node_b = node_b->next) {
+ /* If all of the parameters from the other parameter list have been
+ * exhausted, the lists have different length and, by definition,
+ * do not match.
+ */
+ if (node_b->is_tail_sentinel())
+ return PARAMETER_LIST_NO_MATCH;
+
+
+ const ir_variable *const param = (ir_variable *) node_a;
+ const ir_rvalue *const actual = (ir_rvalue *) node_b;
+
+ if (param->type == actual->type)
+ continue;
+
+ /* Try to find an implicit conversion from actual to param. */
+ inexact_match = true;
+ switch ((enum ir_variable_mode)(param->data.mode)) {
+ case ir_var_auto:
+ case ir_var_uniform:
+ case ir_var_shader_storage:
+ case ir_var_temporary:
+ /* These are all error conditions. It is invalid for a parameter to
+ * a function to be declared as auto (not in, out, or inout) or
+ * as uniform.
+ */
+ assert(0);
+ return PARAMETER_LIST_NO_MATCH;
+
+ case ir_var_const_in:
+ case ir_var_function_in:
+ if (!actual->type->can_implicitly_convert_to(param->type, state))
+ return PARAMETER_LIST_NO_MATCH;
+ break;
+
+ case ir_var_function_out:
+ if (!param->type->can_implicitly_convert_to(actual->type, state))
+ return PARAMETER_LIST_NO_MATCH;
+ break;
+
+ case ir_var_function_inout:
+ /* Since there are no bi-directional automatic conversions (e.g.,
+ * there is int -> float but no float -> int), inout parameters must
+ * be exact matches.
+ */
+ return PARAMETER_LIST_NO_MATCH;
+
+ default:
+ assert(false);
+ return PARAMETER_LIST_NO_MATCH;
+ }
+ }
+
+ /* If all of the parameters from the other parameter list have been
+ * exhausted, the lists have different length and, by definition, do not
+ * match.
+ */
+ if (!node_b->is_tail_sentinel())
+ return PARAMETER_LIST_NO_MATCH;
+
+ if (inexact_match)
+ return PARAMETER_LIST_INEXACT_MATCH;
+ else
+ return PARAMETER_LIST_EXACT_MATCH;
+}
+
+
+/* Classes of parameter match, sorted (mostly) best matches first.
+ * See is_better_parameter_match() below for the exceptions.
+ * */
+typedef enum {
+ PARAMETER_EXACT_MATCH,
+ PARAMETER_FLOAT_TO_DOUBLE,
+ PARAMETER_INT_TO_FLOAT,
+ PARAMETER_INT_TO_DOUBLE,
+ PARAMETER_OTHER_CONVERSION,
+} parameter_match_t;
+
+
+static parameter_match_t
+get_parameter_match_type(const ir_variable *param,
+ const ir_rvalue *actual)
+{
+ const glsl_type *from_type;
+ const glsl_type *to_type;
+
+ if (param->data.mode == ir_var_function_out) {
+ from_type = param->type;
+ to_type = actual->type;
+ } else {
+ from_type = actual->type;
+ to_type = param->type;
+ }
+
+ if (from_type == to_type)
+ return PARAMETER_EXACT_MATCH;
+
+ if (to_type->is_double()) {
+ if (from_type->is_float())
+ return PARAMETER_FLOAT_TO_DOUBLE;
+ return PARAMETER_INT_TO_DOUBLE;
+ }
+
+ if (to_type->is_float())
+ return PARAMETER_INT_TO_FLOAT;
+
+ /* int -> uint and any other oddball conversions */
+ return PARAMETER_OTHER_CONVERSION;
+}
+
+
+static bool
+is_better_parameter_match(parameter_match_t a_match,
+ parameter_match_t b_match)
+{
+ /* From section 6.1 of the GLSL 4.00 spec (and the ARB_gpu_shader5 spec):
+ *
+ * 1. An exact match is better than a match involving any implicit
+ * conversion.
+ *
+ * 2. A match involving an implicit conversion from float to double
+ * is better than match involving any other implicit conversion.
+ *
+ * [XXX: Not in GLSL 4.0: Only in ARB_gpu_shader5:
+ * 3. A match involving an implicit conversion from either int or uint
+ * to float is better than a match involving an implicit conversion
+ * from either int or uint to double.]
+ *
+ * If none of the rules above apply to a particular pair of conversions,
+ * neither conversion is considered better than the other.
+ *
+ * --
+ *
+ * Notably, the int->uint conversion is *not* considered to be better
+ * or worse than int/uint->float or int/uint->double.
+ */
+
+ if (a_match >= PARAMETER_INT_TO_FLOAT && b_match == PARAMETER_OTHER_CONVERSION)
+ return false;
+
+ return a_match < b_match;
+}
+
+
+static bool
+is_best_inexact_overload(const exec_list *actual_parameters,
+ ir_function_signature **matches,
+ int num_matches,
+ ir_function_signature *sig)
+{
+ /* From section 6.1 of the GLSL 4.00 spec (and the ARB_gpu_shader5 spec):
+ *
+ * "A function definition A is considered a better
+ * match than function definition B if:
+ *
+ * * for at least one function argument, the conversion for that argument
+ * in A is better than the corresponding conversion in B; and
+ *
+ * * there is no function argument for which the conversion in B is better
+ * than the corresponding conversion in A.
+ *
+ * If a single function definition is considered a better match than every
+ * other matching function definition, it will be used. Otherwise, a
+ * semantic error occurs and the shader will fail to compile."
+ */
+ for (ir_function_signature **other = matches;
+ other < matches + num_matches; other++) {
+ if (*other == sig)
+ continue;
+
+ const exec_node *node_a = sig->parameters.get_head_raw();
+ const exec_node *node_b = (*other)->parameters.get_head_raw();
+ const exec_node *node_p = actual_parameters->get_head_raw();
+
+ bool better_for_some_parameter = false;
+
+ for (/* empty */
+ ; !node_a->is_tail_sentinel()
+ ; node_a = node_a->next,
+ node_b = node_b->next,
+ node_p = node_p->next) {
+ parameter_match_t a_match = get_parameter_match_type(
+ (const ir_variable *)node_a,
+ (const ir_rvalue *)node_p);
+ parameter_match_t b_match = get_parameter_match_type(
+ (const ir_variable *)node_b,
+ (const ir_rvalue *)node_p);
+
+ if (is_better_parameter_match(a_match, b_match))
+ better_for_some_parameter = true;
+
+ if (is_better_parameter_match(b_match, a_match))
+ return false; /* B is better for this parameter */
+ }
+
+ if (!better_for_some_parameter)
+ return false; /* A must be better than B for some parameter */
+
+ }
+
+ return true;
+}
+
+
+static ir_function_signature *
+choose_best_inexact_overload(_mesa_glsl_parse_state *state,
+ const exec_list *actual_parameters,
+ ir_function_signature **matches,
+ int num_matches)
+{
+ if (num_matches == 0)
+ return NULL;
+
+ if (num_matches == 1)
+ return *matches;
+
+ /* Without GLSL 4.0, ARB_gpu_shader5, or MESA_shader_integer_functions,
+ * there is no overload resolution among multiple inexact matches. Note
+ * that state may be NULL here if called from the linker; in that case we
+ * assume everything supported in any GLSL version is available.
+ */
+ if (!state || state->is_version(400, 0) || state->ARB_gpu_shader5_enable ||
+ state->MESA_shader_integer_functions_enable ||
+ state->EXT_shader_implicit_conversions_enable) {
+ for (ir_function_signature **sig = matches; sig < matches + num_matches; sig++) {
+ if (is_best_inexact_overload(actual_parameters, matches, num_matches, *sig))
+ return *sig;
+ }
+ }
+
+ return NULL; /* no best candidate */
+}
+
+
+ir_function_signature *
+ir_function::matching_signature(_mesa_glsl_parse_state *state,
+ const exec_list *actual_parameters,
+ bool allow_builtins)
+{
+ bool is_exact;
+ return matching_signature(state, actual_parameters, allow_builtins,
+ &is_exact);
+}
+
+ir_function_signature *
+ir_function::matching_signature(_mesa_glsl_parse_state *state,
+ const exec_list *actual_parameters,
+ bool allow_builtins,
+ bool *is_exact)
+{
+ ir_function_signature **inexact_matches = NULL;
+ ir_function_signature **inexact_matches_temp;
+ ir_function_signature *match = NULL;
+ int num_inexact_matches = 0;
+
+ /* From page 42 (page 49 of the PDF) of the GLSL 1.20 spec:
+ *
+ * "If an exact match is found, the other signatures are ignored, and
+ * the exact match is used. Otherwise, if no exact match is found, then
+ * the implicit conversions in Section 4.1.10 "Implicit Conversions" will
+ * be applied to the calling arguments if this can make their types match
+ * a signature. In this case, it is a semantic error if there are
+ * multiple ways to apply these conversions to the actual arguments of a
+ * call such that the call can be made to match multiple signatures."
+ */
+ foreach_in_list(ir_function_signature, sig, &this->signatures) {
+ /* Skip over any built-ins that aren't available in this shader. */
+ if (sig->is_builtin() && (!allow_builtins ||
+ !sig->is_builtin_available(state)))
+ continue;
+
+ switch (parameter_lists_match(state, & sig->parameters, actual_parameters)) {
+ case PARAMETER_LIST_EXACT_MATCH:
+ *is_exact = true;
+ free(inexact_matches);
+ return sig;
+ case PARAMETER_LIST_INEXACT_MATCH:
+ inexact_matches_temp = (ir_function_signature **)
+ realloc(inexact_matches,
+ sizeof(*inexact_matches) *
+ (num_inexact_matches + 1));
+ if (inexact_matches_temp == NULL) {
+ _mesa_error_no_memory(__func__);
+ free(inexact_matches);
+ return NULL;
+ }
+ inexact_matches = inexact_matches_temp;
+ inexact_matches[num_inexact_matches++] = sig;
+ continue;
+ case PARAMETER_LIST_NO_MATCH:
+ continue;
+ default:
+ assert(false);
+ return NULL;
+ }
+ }
+
+ /* There is no exact match (we would have returned it by now). If there
+ * are multiple inexact matches, the call is ambiguous, which is an error.
+ *
+ * FINISHME: Report a decent error. Returning NULL will likely result in
+ * FINISHME: a "no matching signature" error; it should report that the
+ * FINISHME: call is ambiguous. But reporting errors from here is hard.
+ */
+ *is_exact = false;
+
+ match = choose_best_inexact_overload(state, actual_parameters,
+ inexact_matches, num_inexact_matches);
+
+ free(inexact_matches);
+ return match;
+}
+
+
+static bool
+parameter_lists_match_exact(const exec_list *list_a, const exec_list *list_b)
+{
+ const exec_node *node_a = list_a->get_head_raw();
+ const exec_node *node_b = list_b->get_head_raw();
+
+ for (/* empty */
+ ; !node_a->is_tail_sentinel() && !node_b->is_tail_sentinel()
+ ; node_a = node_a->next, node_b = node_b->next) {
+ ir_variable *a = (ir_variable *) node_a;
+ ir_variable *b = (ir_variable *) node_b;
+
+ /* If the types of the parameters do not match, the parameters lists
+ * are different.
+ */
+ if (a->type != b->type)
+ return false;
+ }
+
+ /* Unless both lists are exhausted, they differ in length and, by
+ * definition, do not match.
+ */
+ return (node_a->is_tail_sentinel() == node_b->is_tail_sentinel());
+}
+
+ir_function_signature *
+ir_function::exact_matching_signature(_mesa_glsl_parse_state *state,
+ const exec_list *actual_parameters)
+{
+ foreach_in_list(ir_function_signature, sig, &this->signatures) {
+ /* Skip over any built-ins that aren't available in this shader. */
+ if (sig->is_builtin() && !sig->is_builtin_available(state))
+ continue;
+
+ if (parameter_lists_match_exact(&sig->parameters, actual_parameters))
+ return sig;
+ }
+ return NULL;
+}
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/ir_function_can_inline.cpp b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/ir_function_can_inline.cpp
new file mode 100644
index 0000000000..3b1d15f80f
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/ir_function_can_inline.cpp
@@ -0,0 +1,75 @@
+/*
+ * Copyright © 2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+/**
+ * \file ir_function_can_inline.cpp
+ *
+ * Determines if we can inline a function call using ir_function_inlining.cpp.
+ *
+ * The primary restriction is that we can't return from the function other
+ * than as the last instruction. In lower_jumps.cpp, we can lower return
+ * statements not at the end of the function to other control flow in order to
+ * deal with this restriction.
+ */
+
+#include "ir.h"
+
+class ir_function_can_inline_visitor : public ir_hierarchical_visitor {
+public:
+ ir_function_can_inline_visitor()
+ {
+ this->num_returns = 0;
+ }
+
+ virtual ir_visitor_status visit_enter(ir_return *);
+
+ int num_returns;
+};
+
+ir_visitor_status
+ir_function_can_inline_visitor::visit_enter(ir_return *ir)
+{
+ (void) ir;
+ this->num_returns++;
+ return visit_continue;
+}
+
+bool
+can_inline(ir_call *call)
+{
+ ir_function_can_inline_visitor v;
+ const ir_function_signature *callee = call->callee;
+ if (!callee->is_defined)
+ return false;
+
+ v.run((exec_list *) &callee->body);
+
+ /* If the function is empty (no last instruction) or does not end with a
+ * return statement, we need to count the implicit return.
+ */
+ ir_instruction *last = (ir_instruction *)callee->body.get_tail();
+ if (last == NULL || !last->as_return())
+ v.num_returns++;
+
+ return v.num_returns == 1;
+}
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/ir_function_detect_recursion.cpp b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/ir_function_detect_recursion.cpp
new file mode 100644
index 0000000000..4a774f666c
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/ir_function_detect_recursion.cpp
@@ -0,0 +1,360 @@
+/*
+ * Copyright © 2011 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+/**
+ * \file ir_function_detect_recursion.cpp
+ * Determine whether a shader contains static recursion.
+ *
+ * Consider the (possibly disjoint) graph of function calls in a shader. If a
+ * program contains recursion, this graph will contain a cycle. If a function
+ * is part of a cycle, it will have a caller and it will have a callee (it
+ * calls another function).
+ *
+ * To detect recursion, the function call graph is constructed. The graph is
+ * repeatedly reduced by removing any function that either has no callees
+ * (leaf functions) or has no caller. Eventually the only functions that
+ * remain will be the functions in the cycles.
+ *
+ * The GLSL spec is a bit wishy-washy about recursion.
+ *
+ * From page 39 (page 45 of the PDF) of the GLSL 1.10 spec:
+ *
+ * "Behavior is undefined if recursion is used. Recursion means having any
+ * function appearing more than once at any one time in the run-time stack
+ * of function calls. That is, a function may not call itself either
+ * directly or indirectly. Compilers may give diagnostic messages when
+ * this is detectable at compile time, but not all such cases can be
+ * detected at compile time."
+ *
+ * From page 79 (page 85 of the PDF):
+ *
+ * "22) Should recursion be supported?
+ *
+ * DISCUSSION: Probably not necessary, but another example of limiting
+ * the language based on how it would directly map to hardware. One
+ * thought is that recursion would benefit ray tracing shaders. On the
+ * other hand, many recursion operations can also be implemented with the
+ * user managing the recursion through arrays. RenderMan doesn't support
+ * recursion. This could be added at a later date, if it proved to be
+ * necessary.
+ *
+ * RESOLVED on September 10, 2002: Implementations are not required to
+ * support recursion.
+ *
+ * CLOSED on September 10, 2002."
+ *
+ * From page 79 (page 85 of the PDF):
+ *
+ * "56) Is it an error for an implementation to support recursion if the
+ * specification says recursion is not supported?
+ *
+ * ADDED on September 10, 2002.
+ *
+ * DISCUSSION: This issues is related to Issue (22). If we say that
+ * recursion (or some other piece of functionality) is not supported, is
+ * it an error for an implementation to support it? Perhaps the
+ * specification should remain silent on these kind of things so that they
+ * could be gracefully added later as an extension or as part of the
+ * standard.
+ *
+ * RESOLUTION: Languages, in general, have programs that are not
+ * well-formed in ways a compiler cannot detect. Portability is only
+ * ensured for well-formed programs. Detecting recursion is an example of
+ * this. The language will say a well-formed program may not recurse, but
+ * compilers are not forced to detect that recursion may happen.
+ *
+ * CLOSED: November 29, 2002."
+ *
+ * In GLSL 1.10 the behavior of recursion is undefined. Compilers don't have
+ * to reject shaders (at compile-time or link-time) that contain recursion.
+ * Instead they could work, or crash, or kill a kitten.
+ *
+ * From page 44 (page 50 of the PDF) of the GLSL 1.20 spec:
+ *
+ * "Recursion is not allowed, not even statically. Static recursion is
+ * present if the static function call graph of the program contains
+ * cycles."
+ *
+ * This langauge clears things up a bit, but it still leaves a lot of
+ * questions unanswered.
+ *
+ * - Is the error generated at compile-time or link-time?
+ *
+ * - Is it an error to have a recursive function that is never statically
+ * called by main or any function called directly or indirectly by main?
+ * Technically speaking, such a function is not in the "static function
+ * call graph of the program" at all.
+ *
+ * \bug
+ * If a shader has multiple cycles, this algorithm may erroneously complain
+ * about functions that aren't in any cycle, but are in the part of the call
+ * tree that connects them. For example, if the call graph consists of a
+ * cycle between A and B, and a cycle between D and E, and B also calls C
+ * which calls D, then this algorithm will report C as a function which "has
+ * static recursion" even though it is not part of any cycle.
+ *
+ * A better algorithm for cycle detection that doesn't have this drawback can
+ * be found here:
+ *
+ * http://en.wikipedia.org/wiki/Tarjan%E2%80%99s_strongly_connected_components_algorithm
+ *
+ * \author Ian Romanick <ian.d.romanick@intel.com>
+ */
+#include "ir.h"
+#include "glsl_parser_extras.h"
+#include "linker.h"
+#include "util/hash_table.h"
+#include "program.h"
+
+namespace {
+
+struct call_node : public exec_node {
+ class function *func;
+};
+
+class function {
+public:
+ function(ir_function_signature *sig)
+ : sig(sig)
+ {
+ /* empty */
+ }
+
+ DECLARE_RALLOC_CXX_OPERATORS(function)
+
+ ir_function_signature *sig;
+
+ /** List of functions called by this function. */
+ exec_list callees;
+
+ /** List of functions that call this function. */
+ exec_list callers;
+};
+
+class has_recursion_visitor : public ir_hierarchical_visitor {
+public:
+ has_recursion_visitor()
+ : current(NULL)
+ {
+ progress = false;
+ this->mem_ctx = ralloc_context(NULL);
+ this->function_hash = _mesa_pointer_hash_table_create(NULL);
+ }
+
+ ~has_recursion_visitor()
+ {
+ _mesa_hash_table_destroy(this->function_hash, NULL);
+ ralloc_free(this->mem_ctx);
+ }
+
+ function *get_function(ir_function_signature *sig)
+ {
+ function *f;
+ hash_entry *entry = _mesa_hash_table_search(this->function_hash, sig);
+ if (entry == NULL) {
+ f = new(mem_ctx) function(sig);
+ _mesa_hash_table_insert(this->function_hash, sig, f);
+ } else {
+ f = (function *) entry->data;
+ }
+
+ return f;
+ }
+
+ virtual ir_visitor_status visit_enter(ir_function_signature *sig)
+ {
+ this->current = this->get_function(sig);
+ return visit_continue;
+ }
+
+ virtual ir_visitor_status visit_leave(ir_function_signature *sig)
+ {
+ (void) sig;
+ this->current = NULL;
+ return visit_continue;
+ }
+
+ virtual ir_visitor_status visit_enter(ir_call *call)
+ {
+ /* At global scope this->current will be NULL. Since there is no way to
+ * call global scope, it can never be part of a cycle. Don't bother
+ * adding calls from global scope to the graph.
+ */
+ if (this->current == NULL)
+ return visit_continue;
+
+ function *const target = this->get_function(call->callee);
+
+ /* Create a link from the caller to the callee.
+ */
+ call_node *node = new(mem_ctx) call_node;
+ node->func = target;
+ this->current->callees.push_tail(node);
+
+ /* Create a link from the callee to the caller.
+ */
+ node = new(mem_ctx) call_node;
+ node->func = this->current;
+ target->callers.push_tail(node);
+ return visit_continue;
+ }
+
+ function *current;
+ struct hash_table *function_hash;
+ void *mem_ctx;
+ bool progress;
+};
+
+} /* anonymous namespace */
+
+static void
+destroy_links(exec_list *list, function *f)
+{
+ foreach_in_list_safe(call_node, node, list) {
+ /* If this is the right function, remove it. Note that the loop cannot
+ * terminate now. There can be multiple links to a function if it is
+ * either called multiple times or calls multiple times.
+ */
+ if (node->func == f)
+ node->remove();
+ }
+}
+
+
+/**
+ * Remove a function if it has either no in or no out links
+ */
+static void
+remove_unlinked_functions(const void *key, void *data, void *closure)
+{
+ has_recursion_visitor *visitor = (has_recursion_visitor *) closure;
+ function *f = (function *) data;
+
+ if (f->callers.is_empty() || f->callees.is_empty()) {
+ while (!f->callers.is_empty()) {
+ struct call_node *n = (struct call_node *) f->callers.pop_head();
+ destroy_links(& n->func->callees, f);
+ }
+
+ while (!f->callees.is_empty()) {
+ struct call_node *n = (struct call_node *) f->callees.pop_head();
+ destroy_links(& n->func->callers, f);
+ }
+
+ hash_entry *entry = _mesa_hash_table_search(visitor->function_hash, key);
+ _mesa_hash_table_remove(visitor->function_hash, entry);
+ visitor->progress = true;
+ }
+}
+
+
+static void
+emit_errors_unlinked(const void *key, void *data, void *closure)
+{
+ struct _mesa_glsl_parse_state *state =
+ (struct _mesa_glsl_parse_state *) closure;
+ function *f = (function *) data;
+ YYLTYPE loc;
+
+ (void) key;
+
+ char *proto = prototype_string(f->sig->return_type,
+ f->sig->function_name(),
+ &f->sig->parameters);
+
+ memset(&loc, 0, sizeof(loc));
+ _mesa_glsl_error(&loc, state,
+ "function `%s' has static recursion",
+ proto);
+ ralloc_free(proto);
+}
+
+
+static void
+emit_errors_linked(const void *key, void *data, void *closure)
+{
+ struct gl_shader_program *prog =
+ (struct gl_shader_program *) closure;
+ function *f = (function *) data;
+
+ (void) key;
+
+ char *proto = prototype_string(f->sig->return_type,
+ f->sig->function_name(),
+ &f->sig->parameters);
+
+ linker_error(prog, "function `%s' has static recursion.\n", proto);
+ ralloc_free(proto);
+}
+
+
+void
+detect_recursion_unlinked(struct _mesa_glsl_parse_state *state,
+ exec_list *instructions)
+{
+ has_recursion_visitor v;
+
+ /* Collect all of the information about which functions call which other
+ * functions.
+ */
+ v.run(instructions);
+
+ /* Remove from the set all of the functions that either have no caller or
+ * call no other functions. Repeat until no functions are removed.
+ */
+ do {
+ v.progress = false;
+ hash_table_call_foreach(v.function_hash, remove_unlinked_functions, & v);
+ } while (v.progress);
+
+
+ /* At this point any functions still in the hash must be part of a cycle.
+ */
+ hash_table_call_foreach(v.function_hash, emit_errors_unlinked, state);
+}
+
+
+void
+detect_recursion_linked(struct gl_shader_program *prog,
+ exec_list *instructions)
+{
+ has_recursion_visitor v;
+
+ /* Collect all of the information about which functions call which other
+ * functions.
+ */
+ v.run(instructions);
+
+ /* Remove from the set all of the functions that either have no caller or
+ * call no other functions. Repeat until no functions are removed.
+ */
+ do {
+ v.progress = false;
+ hash_table_call_foreach(v.function_hash, remove_unlinked_functions, & v);
+ } while (v.progress);
+
+
+ /* At this point any functions still in the hash must be part of a cycle.
+ */
+ hash_table_call_foreach(v.function_hash, emit_errors_linked, prog);
+}
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/ir_function_inlining.h b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/ir_function_inlining.h
new file mode 100644
index 0000000000..2af33fac66
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/ir_function_inlining.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright © 2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+/**
+ * \file ir_function_inlining.h
+ *
+ * Replaces calls to functions with the body of the function.
+ */
+
+#ifndef GLSL_IR_FUNCTION_INLINING_H
+#define GLSL_IR_FUNCTION_INLINING_H
+
+bool can_inline(ir_call *call);
+
+#endif /* GLSL_IR_FUNCTION_INLINING_H */
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/ir_hierarchical_visitor.cpp b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/ir_hierarchical_visitor.cpp
new file mode 100644
index 0000000000..793290bbc0
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/ir_hierarchical_visitor.cpp
@@ -0,0 +1,421 @@
+/*
+ * Copyright © 2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include "ir.h"
+#include "ir_hierarchical_visitor.h"
+
+ir_hierarchical_visitor::ir_hierarchical_visitor()
+{
+ this->base_ir = NULL;
+ this->callback_enter = NULL;
+ this->callback_leave = NULL;
+ this->data_enter = NULL;
+ this->data_leave = NULL;
+ this->in_assignee = false;
+}
+
+ir_visitor_status
+ir_hierarchical_visitor::visit(ir_rvalue *ir)
+{
+ call_enter_leave_callbacks(ir);
+
+ return visit_continue;
+}
+
+ir_visitor_status
+ir_hierarchical_visitor::visit(ir_variable *ir)
+{
+ call_enter_leave_callbacks(ir);
+
+ return visit_continue;
+}
+
+ir_visitor_status
+ir_hierarchical_visitor::visit(ir_constant *ir)
+{
+ call_enter_leave_callbacks(ir);
+
+ return visit_continue;
+}
+
+ir_visitor_status
+ir_hierarchical_visitor::visit(ir_loop_jump *ir)
+{
+ call_enter_leave_callbacks(ir);
+
+ return visit_continue;
+}
+
+ir_visitor_status
+ir_hierarchical_visitor::visit(ir_precision_statement *ir)
+{
+ if (this->callback_enter != NULL)
+ this->callback_enter(ir, this->data_enter);
+
+ return visit_continue;
+}
+
+ir_visitor_status
+ir_hierarchical_visitor::visit(ir_typedecl_statement *ir)
+{
+ if (this->callback_enter != NULL)
+ this->callback_enter(ir, this->data_enter);
+
+ return visit_continue;
+}
+
+ir_visitor_status
+ir_hierarchical_visitor::visit(ir_dereference_variable *ir)
+{
+ call_enter_leave_callbacks(ir);
+
+ return visit_continue;
+}
+
+ir_visitor_status
+ir_hierarchical_visitor::visit(ir_barrier *ir)
+{
+ call_enter_leave_callbacks(ir);
+
+ return visit_continue;
+}
+
+ir_visitor_status
+ir_hierarchical_visitor::visit_enter(ir_loop *ir)
+{
+ if (this->callback_enter != NULL)
+ this->callback_enter(ir, this->data_enter);
+
+ return visit_continue;
+}
+
+ir_visitor_status
+ir_hierarchical_visitor::visit_leave(ir_loop *ir)
+{
+ if (this->callback_leave != NULL)
+ this->callback_leave(ir, this->data_leave);
+
+ return visit_continue;
+}
+
+ir_visitor_status
+ir_hierarchical_visitor::visit_enter(ir_function_signature *ir)
+{
+ if (this->callback_enter != NULL)
+ this->callback_enter(ir, this->data_enter);
+
+ return visit_continue;
+}
+
+ir_visitor_status
+ir_hierarchical_visitor::visit_leave(ir_function_signature *ir)
+{
+ if (this->callback_leave != NULL)
+ this->callback_leave(ir, this->data_leave);
+
+ return visit_continue;
+}
+
+ir_visitor_status
+ir_hierarchical_visitor::visit_enter(ir_function *ir)
+{
+ if (this->callback_enter != NULL)
+ this->callback_enter(ir, this->data_enter);
+
+ return visit_continue;
+}
+
+ir_visitor_status
+ir_hierarchical_visitor::visit_leave(ir_function *ir)
+{
+ if (this->callback_leave != NULL)
+ this->callback_leave(ir, this->data_leave);
+
+ return visit_continue;
+}
+
+ir_visitor_status
+ir_hierarchical_visitor::visit_enter(ir_expression *ir)
+{
+ if (this->callback_enter != NULL)
+ this->callback_enter(ir, this->data_enter);
+
+ return visit_continue;
+}
+
+ir_visitor_status
+ir_hierarchical_visitor::visit_leave(ir_expression *ir)
+{
+ if (this->callback_leave != NULL)
+ this->callback_leave(ir, this->data_leave);
+
+ return visit_continue;
+}
+
+ir_visitor_status
+ir_hierarchical_visitor::visit_enter(ir_texture *ir)
+{
+ if (this->callback_enter != NULL)
+ this->callback_enter(ir, this->data_enter);
+
+ return visit_continue;
+}
+
+ir_visitor_status
+ir_hierarchical_visitor::visit_leave(ir_texture *ir)
+{
+ if (this->callback_leave != NULL)
+ this->callback_leave(ir, this->data_leave);
+
+ return visit_continue;
+}
+
+ir_visitor_status
+ir_hierarchical_visitor::visit_enter(ir_swizzle *ir)
+{
+ if (this->callback_enter != NULL)
+ this->callback_enter(ir, this->data_enter);
+
+ return visit_continue;
+}
+
+ir_visitor_status
+ir_hierarchical_visitor::visit_leave(ir_swizzle *ir)
+{
+ if (this->callback_leave != NULL)
+ this->callback_leave(ir, this->data_leave);
+
+ return visit_continue;
+}
+
+ir_visitor_status
+ir_hierarchical_visitor::visit_enter(ir_dereference_array *ir)
+{
+ if (this->callback_enter != NULL)
+ this->callback_enter(ir, this->data_enter);
+
+ return visit_continue;
+}
+
+ir_visitor_status
+ir_hierarchical_visitor::visit_leave(ir_dereference_array *ir)
+{
+ if (this->callback_leave != NULL)
+ this->callback_leave(ir, this->data_leave);
+
+ return visit_continue;
+}
+
+ir_visitor_status
+ir_hierarchical_visitor::visit_enter(ir_dereference_record *ir)
+{
+ if (this->callback_enter != NULL)
+ this->callback_enter(ir, this->data_enter);
+
+ return visit_continue;
+}
+
+ir_visitor_status
+ir_hierarchical_visitor::visit_leave(ir_dereference_record *ir)
+{
+ if (this->callback_leave != NULL)
+ this->callback_leave(ir, this->data_leave);
+
+ return visit_continue;
+}
+
+ir_visitor_status
+ir_hierarchical_visitor::visit_enter(ir_assignment *ir)
+{
+ if (this->callback_enter != NULL)
+ this->callback_enter(ir, this->data_enter);
+
+ return visit_continue;
+}
+
+ir_visitor_status
+ir_hierarchical_visitor::visit_leave(ir_assignment *ir)
+{
+ if (this->callback_leave != NULL)
+ this->callback_leave(ir, this->data_leave);
+
+ return visit_continue;
+}
+
+ir_visitor_status
+ir_hierarchical_visitor::visit_enter(ir_call *ir)
+{
+ if (this->callback_enter != NULL)
+ this->callback_enter(ir, this->data_enter);
+
+ return visit_continue;
+}
+
+ir_visitor_status
+ir_hierarchical_visitor::visit_leave(ir_call *ir)
+{
+ if (this->callback_leave != NULL)
+ this->callback_leave(ir, this->data_leave);
+
+ return visit_continue;
+}
+
+ir_visitor_status
+ir_hierarchical_visitor::visit_enter(ir_return *ir)
+{
+ if (this->callback_enter != NULL)
+ this->callback_enter(ir, this->data_enter);
+
+ return visit_continue;
+}
+
+ir_visitor_status
+ir_hierarchical_visitor::visit_leave(ir_return *ir)
+{
+ if (this->callback_leave != NULL)
+ this->callback_leave(ir, this->data_leave);
+
+ return visit_continue;
+}
+
+ir_visitor_status
+ir_hierarchical_visitor::visit_enter(ir_discard *ir)
+{
+ if (this->callback_enter != NULL)
+ this->callback_enter(ir, this->data_enter);
+
+ return visit_continue;
+}
+
+ir_visitor_status
+ir_hierarchical_visitor::visit_leave(ir_discard *ir)
+{
+ if (this->callback_leave != NULL)
+ this->callback_leave(ir, this->data_leave);
+
+ return visit_continue;
+}
+
+ir_visitor_status
+ir_hierarchical_visitor::visit_enter(ir_demote *ir)
+{
+ if (this->callback_enter != NULL)
+ this->callback_enter(ir, this->data_enter);
+
+ return visit_continue;
+}
+
+ir_visitor_status
+ir_hierarchical_visitor::visit_leave(ir_demote *ir)
+{
+ if (this->callback_leave != NULL)
+ this->callback_leave(ir, this->data_leave);
+
+ return visit_continue;
+}
+
+ir_visitor_status
+ir_hierarchical_visitor::visit_enter(ir_if *ir)
+{
+ if (this->callback_enter != NULL)
+ this->callback_enter(ir, this->data_enter);
+
+ return visit_continue;
+}
+
+ir_visitor_status
+ir_hierarchical_visitor::visit_leave(ir_if *ir)
+{
+ if (this->callback_leave != NULL)
+ this->callback_leave(ir, this->data_leave);
+
+ return visit_continue;
+}
+
+ir_visitor_status
+ir_hierarchical_visitor::visit_enter(ir_emit_vertex *ir)
+{
+ if (this->callback_enter != NULL)
+ this->callback_enter(ir, this->data_enter);
+
+ return visit_continue;
+}
+
+ir_visitor_status
+ir_hierarchical_visitor::visit_leave(ir_emit_vertex *ir)
+{
+ if (this->callback_leave != NULL)
+ this->callback_leave(ir, this->data_leave);
+
+ return visit_continue;
+}
+
+ir_visitor_status
+ir_hierarchical_visitor::visit_enter(ir_end_primitive *ir)
+{
+ if (this->callback_enter != NULL)
+ this->callback_enter(ir, this->data_enter);
+
+ return visit_continue;
+}
+
+ir_visitor_status
+ir_hierarchical_visitor::visit_leave(ir_end_primitive *ir)
+{
+ if (this->callback_leave != NULL)
+ this->callback_leave(ir, this->data_leave);
+
+ return visit_continue;
+}
+
+void
+ir_hierarchical_visitor::run(exec_list *instructions)
+{
+ visit_list_elements(this, instructions);
+}
+
+void
+ir_hierarchical_visitor::call_enter_leave_callbacks(class ir_instruction *ir)
+{
+ if (this->callback_enter != NULL)
+ this->callback_enter(ir, this->data_enter);
+ if (this->callback_leave != NULL)
+ this->callback_leave(ir, this->data_leave);
+}
+
+void
+visit_tree(ir_instruction *ir,
+ void (*callback_enter)(class ir_instruction *ir, void *data),
+ void *data_enter,
+ void (*callback_leave)(class ir_instruction *ir, void *data),
+ void *data_leave)
+{
+ ir_hierarchical_visitor v;
+
+ v.callback_enter = callback_enter;
+ v.callback_leave = callback_leave;
+ v.data_enter = data_enter;
+ v.data_leave = data_leave;
+
+ ir->accept(&v);
+}
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/ir_hierarchical_visitor.h b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/ir_hierarchical_visitor.h
new file mode 100644
index 0000000000..8f9717442c
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/ir_hierarchical_visitor.h
@@ -0,0 +1,218 @@
+/* -*- c++ -*- */
+/*
+ * Copyright © 2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef IR_HIERARCHICAL_VISITOR_H
+#define IR_HIERARCHICAL_VISITOR_H
+
+/**
+ * Enumeration values returned by visit methods to guide processing
+ */
+enum ir_visitor_status {
+ visit_continue, /**< Continue visiting as normal. */
+ visit_continue_with_parent, /**< Don't visit siblings, continue w/parent. */
+ visit_stop /**< Stop visiting immediately. */
+};
+
+
+#ifdef __cplusplus
+/**
+ * Base class of hierarchical visitors of IR instruction trees
+ *
+ * Hierarchical visitors differ from traditional visitors in a couple of
+ * important ways. Rather than having a single \c visit method for each
+ * subclass in the composite, there are three kinds of visit methods.
+ * Leaf-node classes have a traditional \c visit method. Internal-node
+ * classes have a \c visit_enter method, which is invoked just before
+ * processing child nodes, and a \c visit_leave method which is invoked just
+ * after processing child nodes.
+ *
+ * In addition, each visit method and the \c accept methods in the composite
+ * have a return value which guides the navigation. Any of the visit methods
+ * can choose to continue visiting the tree as normal (by returning \c
+ * visit_continue), terminate visiting any further nodes immediately (by
+ * returning \c visit_stop), or stop visiting sibling nodes (by returning \c
+ * visit_continue_with_parent).
+ *
+ * These two changes combine to allow navigation of children to be implemented
+ * in the composite's \c accept method. The \c accept method for a leaf-node
+ * class will simply call the \c visit method, as usual, and pass its return
+ * value on. The \c accept method for internal-node classes will call the \c
+ * visit_enter method, call the \c accept method of each child node, and,
+ * finally, call the \c visit_leave method. If any of these return a value
+ * other that \c visit_continue, the correct action must be taken.
+ *
+ * The final benefit is that the hierarchical visitor base class need not be
+ * abstract. Default implementations of every \c visit, \c visit_enter, and
+ * \c visit_leave method can be provided. By default each of these methods
+ * simply returns \c visit_continue. This allows a significant reduction in
+ * derived class code.
+ *
+ * For more information about hierarchical visitors, see:
+ *
+ * http://c2.com/cgi/wiki?HierarchicalVisitorPattern
+ * http://c2.com/cgi/wiki?HierarchicalVisitorDiscussion
+ */
+
+class ir_hierarchical_visitor {
+public:
+ ir_hierarchical_visitor();
+
+ /**
+ * \name Visit methods for leaf-node classes
+ */
+ /*@{*/
+ virtual ir_visitor_status visit(class ir_rvalue *);
+ virtual ir_visitor_status visit(class ir_variable *);
+ virtual ir_visitor_status visit(class ir_constant *);
+ virtual ir_visitor_status visit(class ir_loop_jump *);
+ virtual ir_visitor_status visit(class ir_precision_statement *);
+ virtual ir_visitor_status visit(class ir_typedecl_statement *);
+ virtual ir_visitor_status visit(class ir_barrier *);
+
+ /**
+ * ir_dereference_variable isn't technically a leaf, but it is treated as a
+ * leaf here for a couple reasons. By not automatically visiting the one
+ * child ir_variable node from the ir_dereference_variable, ir_variable
+ * nodes can always be handled as variable declarations. Code that used
+ * non-hierarchical visitors had to set an "in a dereference" flag to
+ * determine how to handle an ir_variable. By forcing the visitor to
+ * handle the ir_variable within the ir_dereference_variable visitor, this
+ * kludge can be avoided.
+ *
+ * In addition, I can envision no use for having separate enter and leave
+ * methods. Anything that could be done in the enter and leave methods
+ * that couldn't just be done in the visit method.
+ */
+ virtual ir_visitor_status visit(class ir_dereference_variable *);
+ /*@}*/
+
+ /**
+ * \name Visit methods for internal-node classes
+ */
+ /*@{*/
+ virtual ir_visitor_status visit_enter(class ir_loop *);
+ virtual ir_visitor_status visit_leave(class ir_loop *);
+ virtual ir_visitor_status visit_enter(class ir_function_signature *);
+ virtual ir_visitor_status visit_leave(class ir_function_signature *);
+ virtual ir_visitor_status visit_enter(class ir_function *);
+ virtual ir_visitor_status visit_leave(class ir_function *);
+ virtual ir_visitor_status visit_enter(class ir_expression *);
+ virtual ir_visitor_status visit_leave(class ir_expression *);
+ virtual ir_visitor_status visit_enter(class ir_texture *);
+ virtual ir_visitor_status visit_leave(class ir_texture *);
+ virtual ir_visitor_status visit_enter(class ir_swizzle *);
+ virtual ir_visitor_status visit_leave(class ir_swizzle *);
+ virtual ir_visitor_status visit_enter(class ir_dereference_array *);
+ virtual ir_visitor_status visit_leave(class ir_dereference_array *);
+ virtual ir_visitor_status visit_enter(class ir_dereference_record *);
+ virtual ir_visitor_status visit_leave(class ir_dereference_record *);
+ virtual ir_visitor_status visit_enter(class ir_assignment *);
+ virtual ir_visitor_status visit_leave(class ir_assignment *);
+ virtual ir_visitor_status visit_enter(class ir_call *);
+ virtual ir_visitor_status visit_leave(class ir_call *);
+ virtual ir_visitor_status visit_enter(class ir_return *);
+ virtual ir_visitor_status visit_leave(class ir_return *);
+ virtual ir_visitor_status visit_enter(class ir_discard *);
+ virtual ir_visitor_status visit_leave(class ir_discard *);
+ virtual ir_visitor_status visit_enter(class ir_demote *);
+ virtual ir_visitor_status visit_leave(class ir_demote *);
+ virtual ir_visitor_status visit_enter(class ir_if *);
+ virtual ir_visitor_status visit_leave(class ir_if *);
+ virtual ir_visitor_status visit_enter(class ir_emit_vertex *);
+ virtual ir_visitor_status visit_leave(class ir_emit_vertex *);
+ virtual ir_visitor_status visit_enter(class ir_end_primitive *);
+ virtual ir_visitor_status visit_leave(class ir_end_primitive *);
+ /*@}*/
+
+
+ /**
+ * Utility function to process a linked list of instructions with a visitor
+ */
+ void run(struct exec_list *instructions);
+
+ /**
+ * Utility function to call both the leave and enter callback functions.
+ * This is used for leaf nodes.
+ */
+ void call_enter_leave_callbacks(class ir_instruction *ir);
+
+ /* Some visitors may need to insert new variable declarations and
+ * assignments for portions of a subtree, which means they need a
+ * pointer to the current instruction in the stream, not just their
+ * node in the tree rooted at that instruction.
+ *
+ * This is implemented by visit_list_elements -- if the visitor is
+ * not called by it, nothing good will happen.
+ */
+ class ir_instruction *base_ir;
+
+ /**
+ * Callback function that is invoked on entry to each node visited.
+ *
+ * \warning
+ * Visitor classes derived from \c ir_hierarchical_visitor \b may \b not
+ * invoke this function. This can be used, for example, to cause the
+ * callback to be invoked on every node type except one.
+ */
+ void (*callback_enter)(class ir_instruction *ir, void *data);
+
+ /**
+ * Callback function that is invoked on exit of each node visited.
+ *
+ * \warning
+ * Visitor classes derived from \c ir_hierarchical_visitor \b may \b not
+ * invoke this function. This can be used, for example, to cause the
+ * callback to be invoked on every node type except one.
+ */
+ void (*callback_leave)(class ir_instruction *ir, void *data);
+
+ /**
+ * Extra data parameter passed to the per-node callback_enter function
+ */
+ void *data_enter;
+
+ /**
+ * Extra data parameter passed to the per-node callback_leave function
+ */
+ void *data_leave;
+
+ /**
+ * Currently in the LHS of an assignment?
+ *
+ * This is set and cleared by the \c ir_assignment::accept method.
+ */
+ bool in_assignee;
+};
+
+void visit_tree(ir_instruction *ir,
+ void (*callback_enter)(class ir_instruction *ir, void *data),
+ void *data_enter,
+ void (*callback_leave)(class ir_instruction *ir, void *data) = NULL,
+ void *data_leave = NULL);
+
+ir_visitor_status visit_list_elements(ir_hierarchical_visitor *v, exec_list *l,
+ bool statement_list = true);
+#endif /* __cplusplus */
+
+#endif /* IR_HIERARCHICAL_VISITOR_H */
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/ir_hv_accept.cpp b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/ir_hv_accept.cpp
new file mode 100644
index 0000000000..cfccb2fe9f
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/ir_hv_accept.cpp
@@ -0,0 +1,466 @@
+/*
+ * Copyright © 2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include "ir.h"
+
+/**
+ * \file ir_hv_accept.cpp
+ * Implementations of all hierarchical visitor accept methods for IR
+ * instructions.
+ */
+
+/**
+ * Process a list of nodes using a hierarchical vistor.
+ *
+ * If statement_list is true (the default), this is a list of statements, so
+ * v->base_ir will be set to point to each statement just before iterating
+ * over it, and restored after iteration is complete. If statement_list is
+ * false, this is a list that appears inside a statement (e.g. a parameter
+ * list), so v->base_ir will be left alone.
+ *
+ * \warning
+ * This function will operate correctly if a node being processed is removed
+ * from the list. However, if nodes are added to the list after the node being
+ * processed, some of the added nodes may not be processed.
+ */
+ir_visitor_status
+visit_list_elements(ir_hierarchical_visitor *v, exec_list *l,
+ bool statement_list)
+{
+ ir_instruction *prev_base_ir = v->base_ir;
+
+ foreach_in_list_safe(ir_instruction, ir, l) {
+ if (statement_list)
+ v->base_ir = ir;
+ ir_visitor_status s = ir->accept(v);
+
+ if (s != visit_continue)
+ return s;
+ }
+ if (statement_list)
+ v->base_ir = prev_base_ir;
+
+ return visit_continue;
+}
+
+
+ir_visitor_status
+ir_rvalue::accept(ir_hierarchical_visitor *v)
+{
+ return v->visit(this);
+}
+
+
+ir_visitor_status
+ir_variable::accept(ir_hierarchical_visitor *v)
+{
+ return v->visit(this);
+}
+
+
+ir_visitor_status
+ir_loop::accept(ir_hierarchical_visitor *v)
+{
+ ir_visitor_status s = v->visit_enter(this);
+
+ if (s != visit_continue)
+ return (s == visit_continue_with_parent) ? visit_continue : s;
+
+ s = visit_list_elements(v, &this->body_instructions);
+ if (s == visit_stop)
+ return s;
+
+ return v->visit_leave(this);
+}
+
+
+ir_visitor_status
+ir_loop_jump::accept(ir_hierarchical_visitor *v)
+{
+ return v->visit(this);
+}
+
+
+ir_visitor_status
+ir_function_signature::accept(ir_hierarchical_visitor *v)
+{
+ ir_visitor_status s = v->visit_enter(this);
+ if (s != visit_continue)
+ return (s == visit_continue_with_parent) ? visit_continue : s;
+
+ s = visit_list_elements(v, &this->parameters);
+ if (s == visit_stop)
+ return s;
+
+ s = visit_list_elements(v, &this->body);
+ return (s == visit_stop) ? s : v->visit_leave(this);
+}
+
+
+ir_visitor_status
+ir_function::accept(ir_hierarchical_visitor *v)
+{
+ ir_visitor_status s = v->visit_enter(this);
+ if (s != visit_continue)
+ return (s == visit_continue_with_parent) ? visit_continue : s;
+
+ s = visit_list_elements(v, &this->signatures, false);
+ return (s == visit_stop) ? s : v->visit_leave(this);
+}
+
+
+ir_visitor_status
+ir_expression::accept(ir_hierarchical_visitor *v)
+{
+ ir_visitor_status s = v->visit_enter(this);
+
+ if (s != visit_continue)
+ return (s == visit_continue_with_parent) ? visit_continue : s;
+
+ for (unsigned i = 0; i < this->num_operands; i++) {
+ switch (this->operands[i]->accept(v)) {
+ case visit_continue:
+ break;
+
+ case visit_continue_with_parent:
+ // I wish for Java's labeled break-statement here.
+ goto done;
+
+ case visit_stop:
+ return visit_stop;
+ }
+ }
+
+done:
+ return v->visit_leave(this);
+}
+
+ir_visitor_status
+ir_texture::accept(ir_hierarchical_visitor *v)
+{
+ ir_visitor_status s = v->visit_enter(this);
+ if (s != visit_continue)
+ return (s == visit_continue_with_parent) ? visit_continue : s;
+
+ s = this->sampler->accept(v);
+ if (s != visit_continue)
+ return (s == visit_continue_with_parent) ? visit_continue : s;
+
+ if (this->coordinate) {
+ s = this->coordinate->accept(v);
+ if (s != visit_continue)
+ return (s == visit_continue_with_parent) ? visit_continue : s;
+ }
+
+ if (this->projector) {
+ s = this->projector->accept(v);
+ if (s != visit_continue)
+ return (s == visit_continue_with_parent) ? visit_continue : s;
+ }
+
+ if (this->shadow_comparator) {
+ s = this->shadow_comparator->accept(v);
+ if (s != visit_continue)
+ return (s == visit_continue_with_parent) ? visit_continue : s;
+ }
+
+ if (this->offset) {
+ s = this->offset->accept(v);
+ if (s != visit_continue)
+ return (s == visit_continue_with_parent) ? visit_continue : s;
+ }
+
+ switch (this->op) {
+ case ir_tex:
+ case ir_lod:
+ case ir_query_levels:
+ case ir_texture_samples:
+ case ir_samples_identical:
+ break;
+ case ir_txb:
+ s = this->lod_info.bias->accept(v);
+ if (s != visit_continue)
+ return (s == visit_continue_with_parent) ? visit_continue : s;
+ break;
+ case ir_txl:
+ case ir_txf:
+ case ir_txs:
+ s = this->lod_info.lod->accept(v);
+ if (s != visit_continue)
+ return (s == visit_continue_with_parent) ? visit_continue : s;
+ break;
+ case ir_txf_ms:
+ s = this->lod_info.sample_index->accept(v);
+ if (s != visit_continue)
+ return (s == visit_continue_with_parent) ? visit_continue : s;
+ break;
+ case ir_txd:
+ s = this->lod_info.grad.dPdx->accept(v);
+ if (s != visit_continue)
+ return (s == visit_continue_with_parent) ? visit_continue : s;
+
+ s = this->lod_info.grad.dPdy->accept(v);
+ if (s != visit_continue)
+ return (s == visit_continue_with_parent) ? visit_continue : s;
+ break;
+ case ir_tg4:
+ s = this->lod_info.component->accept(v);
+ if (s != visit_continue)
+ return (s == visit_continue_with_parent) ? visit_continue : s;
+ break;
+ }
+
+ assert(s == visit_continue);
+ return v->visit_leave(this);
+}
+
+
+ir_visitor_status
+ir_swizzle::accept(ir_hierarchical_visitor *v)
+{
+ ir_visitor_status s = v->visit_enter(this);
+ if (s != visit_continue)
+ return (s == visit_continue_with_parent) ? visit_continue : s;
+
+ s = this->val->accept(v);
+ return (s == visit_stop) ? s : v->visit_leave(this);
+}
+
+
+ir_visitor_status
+ir_dereference_variable::accept(ir_hierarchical_visitor *v)
+{
+ return v->visit(this);
+}
+
+
+ir_visitor_status
+ir_dereference_array::accept(ir_hierarchical_visitor *v)
+{
+ ir_visitor_status s = v->visit_enter(this);
+ if (s != visit_continue)
+ return (s == visit_continue_with_parent) ? visit_continue : s;
+
+ /* The array index is not the target of the assignment, so clear the
+ * 'in_assignee' flag. Restore it after returning from the array index.
+ */
+ const bool was_in_assignee = v->in_assignee;
+ v->in_assignee = false;
+ s = this->array_index->accept(v);
+ v->in_assignee = was_in_assignee;
+
+ if (s != visit_continue)
+ return (s == visit_continue_with_parent) ? visit_continue : s;
+
+ s = this->array->accept(v);
+ return (s == visit_stop) ? s : v->visit_leave(this);
+}
+
+
+ir_visitor_status
+ir_dereference_record::accept(ir_hierarchical_visitor *v)
+{
+ ir_visitor_status s = v->visit_enter(this);
+ if (s != visit_continue)
+ return (s == visit_continue_with_parent) ? visit_continue : s;
+
+ s = this->record->accept(v);
+ return (s == visit_stop) ? s : v->visit_leave(this);
+}
+
+
+ir_visitor_status
+ir_assignment::accept(ir_hierarchical_visitor *v)
+{
+ ir_visitor_status s = v->visit_enter(this);
+ if (s != visit_continue)
+ return (s == visit_continue_with_parent) ? visit_continue : s;
+
+ v->in_assignee = true;
+ s = this->lhs->accept(v);
+ v->in_assignee = false;
+ if (s != visit_continue)
+ return (s == visit_continue_with_parent) ? visit_continue : s;
+
+ s = this->rhs->accept(v);
+ if (s != visit_continue)
+ return (s == visit_continue_with_parent) ? visit_continue : s;
+
+ if (this->condition)
+ s = this->condition->accept(v);
+
+ return (s == visit_stop) ? s : v->visit_leave(this);
+}
+
+
+ir_visitor_status
+ir_constant::accept(ir_hierarchical_visitor *v)
+{
+ return v->visit(this);
+}
+
+
+ir_visitor_status
+ir_call::accept(ir_hierarchical_visitor *v)
+{
+ ir_visitor_status s = v->visit_enter(this);
+ if (s != visit_continue)
+ return (s == visit_continue_with_parent) ? visit_continue : s;
+
+ if (this->return_deref != NULL) {
+ v->in_assignee = true;
+ s = this->return_deref->accept(v);
+ v->in_assignee = false;
+ if (s != visit_continue)
+ return (s == visit_continue_with_parent) ? visit_continue : s;
+ }
+
+ s = visit_list_elements(v, &this->actual_parameters, false);
+ if (s == visit_stop)
+ return s;
+
+ return v->visit_leave(this);
+}
+
+
+ir_visitor_status
+ir_return::accept(ir_hierarchical_visitor *v)
+{
+ ir_visitor_status s = v->visit_enter(this);
+ if (s != visit_continue)
+ return (s == visit_continue_with_parent) ? visit_continue : s;
+
+ ir_rvalue *val = this->get_value();
+ if (val) {
+ s = val->accept(v);
+ if (s != visit_continue)
+ return (s == visit_continue_with_parent) ? visit_continue : s;
+ }
+
+ return v->visit_leave(this);
+}
+
+
+ir_visitor_status
+ir_discard::accept(ir_hierarchical_visitor *v)
+{
+ ir_visitor_status s = v->visit_enter(this);
+ if (s != visit_continue)
+ return (s == visit_continue_with_parent) ? visit_continue : s;
+
+ if (this->condition != NULL) {
+ s = this->condition->accept(v);
+ if (s != visit_continue)
+ return (s == visit_continue_with_parent) ? visit_continue : s;
+ }
+
+ return v->visit_leave(this);
+}
+
+
+ir_visitor_status
+ir_demote::accept(ir_hierarchical_visitor *v)
+{
+ ir_visitor_status s = v->visit_enter(this);
+
+ if (s != visit_continue)
+ return (s == visit_continue_with_parent) ? visit_continue : s;
+
+ return v->visit_leave(this);
+}
+
+
+ir_visitor_status
+ir_if::accept(ir_hierarchical_visitor *v)
+{
+ ir_visitor_status s = v->visit_enter(this);
+ if (s != visit_continue)
+ return (s == visit_continue_with_parent) ? visit_continue : s;
+
+ s = this->condition->accept(v);
+ if (s != visit_continue)
+ return (s == visit_continue_with_parent) ? visit_continue : s;
+
+ if (s != visit_continue_with_parent) {
+ s = visit_list_elements(v, &this->then_instructions);
+ if (s == visit_stop)
+ return s;
+ }
+
+ if (s != visit_continue_with_parent) {
+ s = visit_list_elements(v, &this->else_instructions);
+ if (s == visit_stop)
+ return s;
+ }
+
+ return v->visit_leave(this);
+}
+
+ir_visitor_status
+ir_precision_statement::accept(ir_hierarchical_visitor *v)
+{
+ return v->visit(this);
+}
+
+ir_visitor_status
+ir_typedecl_statement::accept(ir_hierarchical_visitor *v)
+{
+ return v->visit(this);
+}
+
+ir_visitor_status
+ir_emit_vertex::accept(ir_hierarchical_visitor *v)
+{
+ ir_visitor_status s = v->visit_enter(this);
+ if (s != visit_continue)
+ return (s == visit_continue_with_parent) ? visit_continue : s;
+
+ s = this->stream->accept(v);
+ if (s != visit_continue)
+ return (s == visit_continue_with_parent) ? visit_continue : s;
+
+ assert(s == visit_continue);
+ return v->visit_leave(this);
+}
+
+
+ir_visitor_status
+ir_end_primitive::accept(ir_hierarchical_visitor *v)
+{
+ ir_visitor_status s = v->visit_enter(this);
+ if (s != visit_continue)
+ return (s == visit_continue_with_parent) ? visit_continue : s;
+
+ s = this->stream->accept(v);
+ if (s != visit_continue)
+ return (s == visit_continue_with_parent) ? visit_continue : s;
+
+ assert(s == visit_continue);
+ return v->visit_leave(this);
+}
+
+ir_visitor_status
+ir_barrier::accept(ir_hierarchical_visitor *v)
+{
+ return v->visit(this);
+}
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/ir_optimization.h b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/ir_optimization.h
new file mode 100644
index 0000000000..23365df10a
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/ir_optimization.h
@@ -0,0 +1,193 @@
+/*
+ * Copyright © 2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+/**
+ * \file ir_optimization.h
+ *
+ * Prototypes for optimization passes to be called by the compiler and drivers.
+ */
+
+#ifndef GLSL_IR_OPTIMIZATION_H
+#define GLSL_IR_OPTIMIZATION_H
+
+struct gl_linked_shader;
+struct gl_shader_program;
+
+/* Operations for lower_instructions() */
+#define SUB_TO_ADD_NEG 0x01
+#define FDIV_TO_MUL_RCP 0x02
+#define EXP_TO_EXP2 0x04
+#define POW_TO_EXP2 0x08
+#define LOG_TO_LOG2 0x10
+#define MOD_TO_FLOOR 0x20
+#define INT_DIV_TO_MUL_RCP 0x40
+#define LDEXP_TO_ARITH 0x80
+#define CARRY_TO_ARITH 0x100
+#define BORROW_TO_ARITH 0x200
+#define SAT_TO_CLAMP 0x400
+#define DOPS_TO_DFRAC 0x800
+#define DFREXP_DLDEXP_TO_ARITH 0x1000
+#define BIT_COUNT_TO_MATH 0x02000
+#define EXTRACT_TO_SHIFTS 0x04000
+#define INSERT_TO_SHIFTS 0x08000
+#define REVERSE_TO_SHIFTS 0x10000
+#define FIND_LSB_TO_FLOAT_CAST 0x20000
+#define FIND_MSB_TO_FLOAT_CAST 0x40000
+#define IMUL_HIGH_TO_MUL 0x80000
+#define DDIV_TO_MUL_RCP 0x100000
+#define DIV_TO_MUL_RCP (FDIV_TO_MUL_RCP | DDIV_TO_MUL_RCP)
+#define SQRT_TO_ABS_SQRT 0x200000
+#define MUL64_TO_MUL_AND_MUL_HIGH 0x400000
+
+/* Opertaions for lower_64bit_integer_instructions() */
+#define MUL64 (1U << 0)
+#define SIGN64 (1U << 1)
+#define DIV64 (1U << 2)
+#define MOD64 (1U << 3)
+
+/**
+ * \see class lower_packing_builtins_visitor
+ */
+enum lower_packing_builtins_op {
+ LOWER_PACK_UNPACK_NONE = 0x0000,
+
+ LOWER_PACK_SNORM_2x16 = 0x0001,
+ LOWER_UNPACK_SNORM_2x16 = 0x0002,
+
+ LOWER_PACK_UNORM_2x16 = 0x0004,
+ LOWER_UNPACK_UNORM_2x16 = 0x0008,
+
+ LOWER_PACK_HALF_2x16 = 0x0010,
+ LOWER_UNPACK_HALF_2x16 = 0x0020,
+
+ LOWER_PACK_SNORM_4x8 = 0x0040,
+ LOWER_UNPACK_SNORM_4x8 = 0x0080,
+
+ LOWER_PACK_UNORM_4x8 = 0x0100,
+ LOWER_UNPACK_UNORM_4x8 = 0x0200,
+
+ LOWER_PACK_USE_BFI = 0x0400,
+ LOWER_PACK_USE_BFE = 0x0800,
+};
+
+bool do_common_optimization(exec_list *ir, bool linked,
+ bool uniform_locations_assigned,
+ const struct gl_shader_compiler_options *options,
+ bool native_integers);
+
+bool ir_constant_fold(ir_rvalue **rvalue);
+
+bool do_rebalance_tree(exec_list *instructions);
+bool do_algebraic(exec_list *instructions, bool native_integers,
+ const struct gl_shader_compiler_options *options);
+bool opt_conditional_discard(exec_list *instructions);
+bool do_constant_folding(exec_list *instructions);
+bool do_constant_variable(exec_list *instructions);
+bool do_constant_variable_unlinked(exec_list *instructions);
+bool do_copy_propagation_elements(exec_list *instructions);
+bool do_constant_propagation(exec_list *instructions);
+void do_dead_builtin_varyings(struct gl_context *ctx,
+ gl_linked_shader *producer,
+ gl_linked_shader *consumer,
+ unsigned num_tfeedback_decls,
+ class tfeedback_decl *tfeedback_decls);
+bool do_dead_code(exec_list *instructions, bool uniform_locations_assigned);
+bool do_dead_code_local(exec_list *instructions);
+bool do_dead_code_unlinked(exec_list *instructions);
+bool do_dead_functions(exec_list *instructions);
+bool opt_flip_matrices(exec_list *instructions);
+bool do_function_inlining(exec_list *instructions);
+bool do_lower_jumps(exec_list *instructions, bool pull_out_jumps = true, bool lower_sub_return = true, bool lower_main_return = false, bool lower_continue = false, bool lower_break = false);
+bool do_lower_texture_projection(exec_list *instructions);
+bool do_if_simplification(exec_list *instructions);
+bool opt_flatten_nested_if_blocks(exec_list *instructions);
+bool do_discard_simplification(exec_list *instructions);
+bool lower_if_to_cond_assign(gl_shader_stage stage, exec_list *instructions,
+ unsigned max_depth = 0, unsigned min_branch_cost = 0);
+bool do_mat_op_to_vec(exec_list *instructions);
+bool do_minmax_prune(exec_list *instructions);
+bool do_structure_splitting(exec_list *instructions);
+bool optimize_swizzles(exec_list *instructions);
+bool do_vectorize(exec_list *instructions);
+bool do_tree_grafting(exec_list *instructions);
+bool do_vec_index_to_cond_assign(exec_list *instructions);
+bool do_vec_index_to_swizzle(exec_list *instructions);
+bool lower_discard(exec_list *instructions);
+void lower_discard_flow(exec_list *instructions);
+bool lower_instructions(exec_list *instructions, unsigned what_to_lower);
+bool lower_variable_index_to_cond_assign(gl_shader_stage stage,
+ exec_list *instructions, bool lower_input, bool lower_output,
+ bool lower_temp, bool lower_uniform);
+bool lower_quadop_vector(exec_list *instructions, bool dont_lower_swz);
+bool lower_const_arrays_to_uniforms(exec_list *instructions, unsigned stage, unsigned max_uniform_components);
+bool lower_clip_cull_distance(struct gl_shader_program *prog,
+ gl_linked_shader *shader);
+ir_variable * lower_xfb_varying(void *mem_ctx,
+ gl_linked_shader *shader,
+ const char *old_var_name);
+void lower_output_reads(unsigned stage, exec_list *instructions);
+bool lower_packing_builtins(exec_list *instructions, int op_mask);
+void lower_shared_reference(struct gl_context *ctx,
+ struct gl_shader_program *prog,
+ struct gl_linked_shader *shader);
+void lower_ubo_reference(struct gl_linked_shader *shader,
+ bool clamp_block_indices, bool use_std430_as_default);
+void lower_packed_varyings(void *mem_ctx,
+ unsigned locations_used,
+ const uint8_t *components,
+ ir_variable_mode mode,
+ unsigned gs_input_vertices,
+ gl_linked_shader *shader,
+ bool disable_varying_packing,
+ bool disable_xfb_packing,
+ bool xfb_enabled);
+bool lower_vector_insert(exec_list *instructions, bool lower_nonconstant_index);
+bool lower_vector_derefs(gl_linked_shader *shader);
+void lower_named_interface_blocks(void *mem_ctx, gl_linked_shader *shader);
+bool optimize_redundant_jumps(exec_list *instructions);
+bool optimize_split_arrays(exec_list *instructions, bool linked);
+bool lower_offset_arrays(exec_list *instructions);
+void optimize_dead_builtin_variables(exec_list *instructions,
+ enum ir_variable_mode other);
+bool lower_tess_level(gl_linked_shader *shader);
+
+bool lower_vertex_id(gl_linked_shader *shader);
+bool lower_cs_derived(gl_linked_shader *shader);
+bool lower_blend_equation_advanced(gl_linked_shader *shader, bool coherent);
+
+bool lower_builtins(exec_list *instructions);
+bool lower_subroutine(exec_list *instructions, struct _mesa_glsl_parse_state *state);
+void propagate_invariance(exec_list *instructions);
+
+namespace ir_builder { class ir_factory; };
+
+ir_variable *compare_index_block(ir_builder::ir_factory &body,
+ ir_variable *index,
+ unsigned base, unsigned components);
+
+bool lower_64bit_integer_instructions(exec_list *instructions,
+ unsigned what_to_lower);
+
+bool lower_precision(exec_list *instructions);
+
+#endif /* GLSL_IR_OPTIMIZATION_H */
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/ir_print_glsl_visitor.cpp b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/ir_print_glsl_visitor.cpp
new file mode 100644
index 0000000000..d15bfd0e56
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/ir_print_glsl_visitor.cpp
@@ -0,0 +1,1978 @@
+/*
+ * Copyright © 2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include "ir_print_glsl_visitor.h"
+#include "ir_visitor.h"
+#include "glsl_types.h"
+#include "glsl_parser_extras.h"
+#include "ir_unused_structs.h"
+#include "loop_analysis.h"
+#include "util/hash_table.h"
+#include <math.h>
+#include <limits>
+
+
+static void print_type(string_buffer& buffer, const glsl_type *t, bool arraySize);
+static void print_type_post(string_buffer& buffer, const glsl_type *t, bool arraySize);
+
+// FIXME: precision
+static inline const char* get_precision_string (unsigned p)
+{
+ switch (p) {
+ case GLSL_PRECISION_HIGH:
+ return "highp ";
+ case GLSL_PRECISION_MEDIUM:
+ return "mediump ";
+ case GLSL_PRECISION_LOW:
+ return "lowp ";
+ case GLSL_PRECISION_NONE:
+ return "";
+ }
+ assert(!"Should not get here.");
+ return "";
+}
+
+static const int tex_sampler_type_count = 7;
+// [glsl_sampler_dim]
+static const char* tex_sampler_dim_name[tex_sampler_type_count] = {
+ "1D", "2D", "3D", "Cube", "Rect", "Buf", "2D", /* samplerExternal uses texture2D */
+};
+static int tex_sampler_dim_size[tex_sampler_type_count] = {
+ 1, 2, 3, 3, 2, 2, 2,
+};
+
+struct ga_entry : public exec_node
+{
+ ga_entry(ir_instruction* ir)
+ {
+ assert(ir);
+ this->ir = ir;
+ }
+ ir_instruction* ir;
+};
+
+
+struct global_print_tracker {
+ global_print_tracker () {
+ mem_ctx = ralloc_context(0);
+ var_counter = 0;
+ var_hash = _mesa_hash_table_create(nullptr, _mesa_hash_pointer, _mesa_key_pointer_equal);
+ main_function_done = false;
+ }
+
+ ~global_print_tracker() {
+ _mesa_hash_table_destroy (var_hash, nullptr);
+ ralloc_free(mem_ctx);
+ }
+
+ unsigned var_counter;
+ hash_table* var_hash;
+ exec_list global_assignements;
+ void* mem_ctx;
+ bool main_function_done;
+};
+
+class ir_print_glsl_visitor : public ir_visitor {
+public:
+ ir_print_glsl_visitor(string_buffer& buf, global_print_tracker* globals_, PrintGlslMode mode_, bool use_precision_, const _mesa_glsl_parse_state* state_)
+ : buffer(buf)
+ , loopstate(NULL)
+ , inside_loop_body(false)
+ , skipped_this_ir(false)
+ , previous_skipped(false)
+ , uses_texlod_impl(0)
+ , uses_texlodproj_impl(0)
+ {
+ indentation = 0;
+ expression_depth = 0;
+ globals = globals_;
+ mode = mode_;
+ use_precision = use_precision_;
+ state = state_;
+ }
+
+ virtual ~ir_print_glsl_visitor()
+ {
+ }
+
+
+ void indent(void);
+ void newline_indent();
+ void end_statement_line();
+ void newline_deindent();
+ void print_var_name (ir_variable* v);
+ void print_precision (ir_instruction* ir, const glsl_type* type);
+
+ virtual void visit(ir_variable *);
+ virtual void visit(ir_function_signature *);
+ virtual void visit(ir_function *);
+ virtual void visit(ir_expression *);
+ virtual void visit(ir_texture *);
+ virtual void visit(ir_swizzle *);
+ virtual void visit(ir_dereference_variable *);
+ virtual void visit(ir_dereference_array *);
+ virtual void visit(ir_dereference_record *);
+ virtual void visit(ir_assignment *);
+ virtual void visit(ir_constant *);
+ virtual void visit(ir_call *);
+ virtual void visit(ir_return *);
+ virtual void visit(ir_discard *);
+ virtual void visit(class ir_demote *);
+ virtual void visit(ir_if *);
+ virtual void visit(ir_loop *);
+ virtual void visit(ir_loop_jump *);
+ virtual void visit(ir_precision_statement *);
+ virtual void visit(ir_typedecl_statement *);
+ virtual void visit(ir_emit_vertex *);
+ virtual void visit(ir_end_primitive *);
+ virtual void visit(class ir_barrier *);
+
+ void emit_assignment_part (ir_dereference* lhs, ir_rvalue* rhs, unsigned write_mask, ir_rvalue* dstIndex);
+ bool can_emit_canonical_for (loop_variable_state *ls);
+ bool emit_canonical_for (ir_loop* ir);
+ bool try_print_array_assignment (ir_dereference* lhs, ir_rvalue* rhs);
+
+ int indentation;
+ int expression_depth;
+ string_buffer& buffer;
+ global_print_tracker* globals;
+ const _mesa_glsl_parse_state* state;
+ PrintGlslMode mode;
+ loop_state* loopstate;
+ bool use_precision;
+ bool inside_loop_body;
+ bool skipped_this_ir;
+ bool previous_skipped;
+ int uses_texlod_impl; // 3 bits per tex_dimension, bit set for each precision if any texture sampler needs the GLES2 lod workaround.
+ int uses_texlodproj_impl; // 3 bits per tex_dimension, bit set for each precision if any texture sampler needs the GLES2 lod workaround.
+};
+
+static void print_texlod_workarounds(int usage_bitfield, int usage_proj_bitfield, string_buffer &str)
+{
+ static const char *precStrings[3] = {"lowp", "mediump", "highp"};
+ static const char *precNameStrings[3] = { "low_", "medium_", "high_" };
+ // Print out the texlod workarounds
+ for (int prec = 0; prec < 3; prec++)
+ {
+ const char *precString = precStrings[prec];
+ const char *precName = precNameStrings[prec];
+
+ for (int dim = 0; dim < tex_sampler_type_count; dim++)
+ {
+ int mask = 1 << (dim + (prec * 8));
+ if (usage_bitfield & mask)
+ {
+ str.asprintf_append("%s vec4 impl_%stexture%sLodEXT(%s sampler%s sampler, highp vec%d coord, mediump float lod)\n", precString, precName, tex_sampler_dim_name[dim], precString, tex_sampler_dim_name[dim], tex_sampler_dim_size[dim]);
+ str.asprintf_append("{\n");
+ str.asprintf_append("#if defined(GL_EXT_shader_texture_lod)\n");
+ str.asprintf_append("\treturn texture%sLodEXT(sampler, coord, lod);\n", tex_sampler_dim_name[dim]);
+ str.asprintf_append("#else\n");
+ str.asprintf_append("\treturn texture%s(sampler, coord, lod);\n", tex_sampler_dim_name[dim]);
+ str.asprintf_append("#endif\n");
+ str.asprintf_append("}\n\n");
+ }
+ if (usage_proj_bitfield & mask)
+ {
+ // 2D projected read also has a vec4 UV variant
+ if (dim == GLSL_SAMPLER_DIM_2D)
+ {
+ str.asprintf_append("%s vec4 impl_%stexture2DProjLodEXT(%s sampler2D sampler, highp vec4 coord, mediump float lod)\n", precString, precName, precString);
+ str.asprintf_append("{\n");
+ str.asprintf_append("#if defined(GL_EXT_shader_texture_lod)\n");
+ str.asprintf_append("\treturn texture%sProjLodEXT(sampler, coord, lod);\n", tex_sampler_dim_name[dim]);
+ str.asprintf_append("#else\n");
+ str.asprintf_append("\treturn texture%sProj(sampler, coord, lod);\n", tex_sampler_dim_name[dim]);
+ str.asprintf_append("#endif\n");
+ str.asprintf_append("}\n\n");
+ }
+ str.asprintf_append("%s vec4 impl_%stexture%sProjLodEXT(%s sampler%s sampler, highp vec%d coord, mediump float lod)\n", precString, precName, tex_sampler_dim_name[dim], precString, tex_sampler_dim_name[dim], tex_sampler_dim_size[dim] + 1);
+ str.asprintf_append("{\n");
+ str.asprintf_append("#if defined(GL_EXT_shader_texture_lod)\n");
+ str.asprintf_append("\treturn texture%sProjLodEXT(sampler, coord, lod);\n", tex_sampler_dim_name[dim]);
+ str.asprintf_append("#else\n");
+ str.asprintf_append("\treturn texture%sProj(sampler, coord, lod);\n", tex_sampler_dim_name[dim]);
+ str.asprintf_append("#endif\n");
+ str.asprintf_append("}\n\n");
+ }
+ }
+ }
+}
+
+
+char*
+_mesa_print_ir_glsl(exec_list *instructions,
+ struct _mesa_glsl_parse_state *state,
+ char* buffer, PrintGlslMode mode)
+{
+ string_buffer str(buffer);
+ string_buffer body(buffer);
+
+ // print version & extensions
+ if (state) {
+ if (state->had_version_string)
+ {
+ str.asprintf_append ("#version %i", state->language_version);
+ if (state->es_shader && state->language_version >= 300)
+ str.asprintf_append (" es");
+ str.asprintf_append ("\n");
+ }
+ if (state->ARB_shader_texture_lod_enable)
+ str.asprintf_append ("#extension GL_ARB_shader_texture_lod : enable\n");
+ if (state->ARB_draw_instanced_enable)
+ str.asprintf_append ("#extension GL_ARB_draw_instanced : enable\n");
+ if (state->ARB_explicit_attrib_location_enable)
+ str.asprintf_append ("#extension GL_ARB_explicit_attrib_location : enable\n");
+ if (state->EXT_gpu_shader4_enable)
+ str.asprintf_append ("#extension GL_EXT_gpu_shader4 : enable\n");
+ // FIXME
+ // if (state->EXT_shader_texture_lod_enable)
+ // str.asprintf_append ("#extension GL_EXT_shader_texture_lod : enable\n");
+ if (state->OES_standard_derivatives_enable)
+ str.asprintf_append ("#extension GL_OES_standard_derivatives : enable\n");
+ // FIXME
+ // if (state->EXT_shadow_samplers_enable)
+ // str.asprintf_append ("#extension GL_EXT_shadow_samplers : enable\n");
+ if (state->EXT_frag_depth_enable)
+ str.asprintf_append ("#extension GL_EXT_frag_depth : enable\n");
+ if (state->es_shader && state->language_version < 300)
+ {
+ if (state->EXT_draw_buffers_enable)
+ str.asprintf_append ("#extension GL_EXT_draw_buffers : enable\n");
+ // FIXME
+ // if (state->EXT_draw_instanced_enable)
+ // str.asprintf_append ("#extension GL_EXT_draw_instanced : enable\n");
+ }
+ if (state->EXT_shader_framebuffer_fetch_enable)
+ str.asprintf_append ("#extension GL_EXT_shader_framebuffer_fetch : enable\n");
+ if (state->ARB_shader_bit_encoding_enable)
+ str.asprintf_append("#extension GL_ARB_shader_bit_encoding : enable\n");
+ if (state->EXT_texture_array_enable)
+ str.asprintf_append ("#extension GL_EXT_texture_array : enable\n");
+ if (state->KHR_blend_equation_advanced_enable)
+ str.asprintf_append ("#extension GL_KHR_blend_equation_advanced : enable\n");
+ if (state->EXT_blend_func_extended_enable)
+ str.asprintf_append ("#extension GL_EXT_blend_func_extended : enable\n");
+ if (state->OES_EGL_image_external_enable)
+ str.asprintf_append ("#extension GL_OES_EGL_image_external : enable\n");
+ if (state->OES_EGL_image_external_essl3_enable)
+ str.asprintf_append ("#extension GL_OES_EGL_image_external_essl3 : enable\n");
+ if (state->ARB_shader_storage_buffer_object_enable)
+ str.asprintf_append ("#extension GL_ARB_shader_storage_buffer_object : enable\n");
+
+
+ // TODO: support other blend specifiers besides "all"
+ if (state->fs_blend_support == BLEND_ALL)
+ str.asprintf_append ("layout(blend_support_all_equations) out;\n");
+ }
+
+ // remove unused struct declarations
+ do_remove_unused_typedecls(instructions);
+
+ global_print_tracker gtracker;
+ int uses_texlod_impl = 0;
+ int uses_texlodproj_impl = 0;
+
+ loop_state* ls = analyze_loop_variables(instructions);
+ // FIXME: set_loop_controls has been merged in to unroll_loops
+ // if (ls->loop_found)
+ // set_loop_controls(instructions, ls);
+
+ foreach_in_list(ir_instruction, ir, instructions)
+ {
+ if (ir->ir_type == ir_type_variable) {
+ ir_variable *var = static_cast<ir_variable*>(ir);
+ if ((strstr(var->name, "gl_") == var->name)
+ && !var->data.invariant)
+ continue;
+ }
+
+ ir_print_glsl_visitor v (body, &gtracker, mode, state->es_shader, state);
+ v.loopstate = ls;
+
+ ir->accept(&v);
+ if (ir->ir_type != ir_type_function && !v.skipped_this_ir)
+ body.asprintf_append (";\n");
+
+ uses_texlod_impl |= v.uses_texlod_impl;
+ uses_texlodproj_impl |= v.uses_texlodproj_impl;
+ }
+
+ delete ls;
+
+ print_texlod_workarounds(uses_texlod_impl, uses_texlodproj_impl, str);
+
+ // Add the optimized glsl code
+ str.asprintf_append("%s", body.c_str());
+
+ return ralloc_strdup(buffer, str.c_str());
+}
+
+
+void ir_print_glsl_visitor::indent(void)
+{
+ if (previous_skipped)
+ return;
+ previous_skipped = false;
+ for (int i = 0; i < indentation; i++)
+ buffer.asprintf_append (" ");
+}
+
+void ir_print_glsl_visitor::end_statement_line()
+{
+ if (!skipped_this_ir)
+ buffer.asprintf_append(";\n");
+ previous_skipped = skipped_this_ir;
+ skipped_this_ir = false;
+}
+
+void ir_print_glsl_visitor::newline_indent()
+{
+ if (expression_depth % 4 == 0)
+ {
+ ++indentation;
+ buffer.asprintf_append ("\n");
+ indent();
+ }
+}
+void ir_print_glsl_visitor::newline_deindent()
+{
+ if (expression_depth % 4 == 0)
+ {
+ --indentation;
+ buffer.asprintf_append ("\n");
+ indent();
+ }
+}
+
+
+void ir_print_glsl_visitor::print_var_name (ir_variable* v)
+{
+ long id = 0;
+ const hash_entry *entry = _mesa_hash_table_search(globals->var_hash, v);
+ if (entry)
+ {
+ id = (long)entry->data;
+ }
+ else if (v->data.mode == ir_var_temporary)
+ {
+ id = ++globals->var_counter;
+ _mesa_hash_table_insert (globals->var_hash, v, (void*)id);
+ }
+ if (id)
+ {
+ if (v->data.mode == ir_var_temporary)
+ buffer.asprintf_append ("tmpvar_%d", (int)id);
+ else
+ buffer.asprintf_append ("%s_%d", v->name, (int)id);
+ }
+ else
+ {
+ buffer.asprintf_append ("%s", v->name);
+ }
+}
+
+void ir_print_glsl_visitor::print_precision (ir_instruction* ir, const glsl_type* type)
+{
+ if (!this->use_precision)
+ return;
+ if (type &&
+ !type->is_float() &&
+ !type->is_sampler() &&
+ !type->is_integer() &&
+ (!type->is_array() || !type->without_array()->is_float()) &&
+ (!type->is_array() || !type->without_array()->is_integer())
+ )
+ {
+ return;
+ }
+
+ ir_variable* var = ir->as_variable();
+ if (var) {
+ buffer.asprintf_append ("%s", get_precision_string(var->data.precision));
+ }
+
+ // FIXME
+ // glsl_precision prec = precision_from_ir(ir);
+
+ // // In fragment shader, default float precision is undefined.
+ // // We must thus always print it, when there was no default precision
+ // // and for whatever reason our type ended up having undefined precision.
+ // if (prec == glsl_precision_undefined &&
+ // type && type->is_float() &&
+ // this->state->stage == MESA_SHADER_FRAGMENT &&
+ // !this->state->had_float_precision)
+ // {
+ // prec = glsl_precision_high;
+ // }
+ // if (type && type->is_integer())
+ // {
+ // if (prec == glsl_precision_undefined && type && type->is_integer())
+ // {
+ // // Default to highp on integers
+ // prec = glsl_precision_high;
+ // }
+ // }
+
+ // // skip precision for samplers that end up being lowp (default anyway) or undefined;
+ // // except always emit it for shadowmap samplers (some drivers don't implement
+ // // default EXT_shadow_samplers precision) and 3D textures (they always require precision)
+ // if (type && type->is_sampler() && !type->sampler_shadow && !(type->sampler_dimensionality > GLSL_SAMPLER_DIM_2D))
+ // {
+ // if (prec == glsl_precision_low || prec == glsl_precision_undefined)
+ // return;
+ // }
+
+ // if (prec == glsl_precision_high || prec == glsl_precision_undefined)
+ // {
+ // if (ir->ir_type == ir_type_function_signature)
+ // return;
+ // }
+ // buffer.asprintf_append ("%s", get_precision_string(prec));
+}
+
+
+static void print_type(string_buffer& buffer, const glsl_type *t, bool arraySize)
+{
+ if (t->base_type == GLSL_TYPE_ARRAY) {
+ print_type(buffer, t->fields.array, true);
+ if (arraySize)
+ buffer.asprintf_append ("[%u]", t->length);
+ } else if ((t->base_type == GLSL_TYPE_STRUCT)
+ && (strncmp("gl_", t->name, 3) != 0)) {
+ buffer.asprintf_append ("%s", t->name);
+ } else {
+ buffer.asprintf_append ("%s", t->name);
+ }
+}
+
+static void print_type_post(string_buffer& buffer, const glsl_type *t, bool arraySize)
+{
+ if (t->base_type == GLSL_TYPE_ARRAY) {
+ if (!arraySize) {
+ if (t->length) {
+ buffer.asprintf_append ("[%u]", t->length);
+ } else {
+ buffer.asprintf_append ("[]");
+ }
+ }
+ }
+}
+
+
+void ir_print_glsl_visitor::visit(ir_variable *ir)
+{
+ // Variables that are declared as or part of interface blocks will be printed by the block declaration.
+ if (ir->is_in_buffer_block()) {
+ skipped_this_ir = true;
+ return;
+ }
+
+ const char *const cent = (ir->data.centroid) ? "centroid " : "";
+ const char *const inv = (ir->data.invariant) ? "invariant " : "";
+ const char *const mode[3][ir_var_mode_count] =
+ {
+ { "", "uniform ", "", "", "in ", "out ", "in ", "out ", "inout ", "", "", "" },
+ { "", "uniform ", "", "", "attribute ", "varying ", "in ", "out ", "inout ", "", "", "" },
+ { "", "uniform ", "", "", "varying ", "out ", "in ", "out ", "inout ", "", "", "" },
+ };
+
+ const char *const interp[] = { "", "smooth ", "flat ", "noperspective " };
+
+ bool supports_explicit_location = this->state->language_version >= 300 ||
+ this->state->ARB_explicit_attrib_location_enable;
+ if (supports_explicit_location && ir->data.explicit_location)
+ {
+ const int binding_base = (this->state->stage == MESA_SHADER_VERTEX ? (int)VERT_ATTRIB_GENERIC0 : (int)FRAG_RESULT_DATA0);
+ const int location = ir->data.location - binding_base;
+ if (ir->data.explicit_index) {
+ const int index = ir->data.index;
+ buffer.asprintf_append ("layout(location=%d, index=%d) ", location, index);
+ } else {
+ buffer.asprintf_append ("layout(location=%d) ", location);
+ }
+ }
+
+ int decormode = this->mode;
+ // GLSL 1.30 and up use "in" and "out" for everything
+ if (this->state->language_version >= 130)
+ {
+ decormode = 0;
+ }
+
+ // give an id to any variable defined in a function that is not an uniform
+ if ((this->mode == kPrintGlslNone && ir->data.mode != ir_var_uniform))
+ {
+ const hash_entry *entry = _mesa_hash_table_search (globals->var_hash, ir);
+ if (!entry)
+ {
+ long id = ++globals->var_counter;
+ _mesa_hash_table_insert (globals->var_hash, ir, (void*)id);
+ }
+ }
+
+ // if this is a loop induction variable, do not print it
+ // (will be printed inside loop body)
+ if (!inside_loop_body)
+ {
+ // FIXME
+ // loop_variable_state* inductor_state = loopstate->get_for_inductor(ir);
+ // if (inductor_state && inductor_state->private_induction_variable_count == 1 &&
+ // can_emit_canonical_for(inductor_state))
+ // {
+ // skipped_this_ir = true;
+ // return;
+ // }
+ }
+
+ // keep invariant declaration for builtin variables
+ if (strstr(ir->name, "gl_") == ir->name) {
+ buffer.asprintf_append ("%s", inv);
+ print_var_name (ir);
+ return;
+ }
+
+ buffer.asprintf_append ("%s%s%s%s",
+ cent, inv, interp[ir->data.interpolation], mode[decormode][ir->data.mode]);
+ print_precision (ir, ir->type);
+ print_type(buffer, ir->type, false);
+ buffer.asprintf_append (" ");
+ print_var_name (ir);
+ print_type_post(buffer, ir->type, false);
+
+ // FIXME: inout is a metal thing?
+ if (ir->constant_value &&
+ ir->data.mode != ir_var_shader_in &&
+ ir->data.mode != ir_var_shader_out &&
+ // ir->data.mode != ir_var_shader_inout &&
+ ir->data.mode != ir_var_function_in &&
+ ir->data.mode != ir_var_function_out) // &&
+ // ir->data.mode != ir_var_function_inout)
+ {
+ buffer.asprintf_append (" = ");
+ visit (ir->constant_value);
+ }
+}
+
+
+void ir_print_glsl_visitor::visit(ir_function_signature *ir)
+{
+ print_precision (ir, ir->return_type);
+ print_type(buffer, ir->return_type, true);
+ buffer.asprintf_append (" %s (", ir->function_name());
+
+ if (!ir->parameters.is_empty())
+ {
+ buffer.asprintf_append ("\n");
+
+ indentation++; previous_skipped = false;
+ bool first = true;
+ foreach_in_list(ir_variable, inst, &ir->parameters) {
+ if (!first)
+ buffer.asprintf_append (",\n");
+ indent();
+ inst->accept(this);
+ first = false;
+ }
+ indentation--;
+
+ buffer.asprintf_append ("\n");
+ indent();
+ }
+
+ if (ir->body.is_empty())
+ {
+ buffer.asprintf_append (");\n");
+ return;
+ }
+
+ buffer.asprintf_append (")\n");
+
+ indent();
+ buffer.asprintf_append ("{\n");
+ indentation++; previous_skipped = false;
+
+ // insert postponed global assigments
+ if (strcmp(ir->function()->name, "main") == 0)
+ {
+ assert (!globals->main_function_done);
+ globals->main_function_done = true;
+ foreach_in_list(ga_entry, node, &globals->global_assignements)
+ {
+ ir_instruction* as = node->ir;
+ as->accept(this);
+ buffer.asprintf_append(";\n");
+ }
+ }
+
+ foreach_in_list(ir_instruction, inst, &ir->body) {
+ indent();
+ inst->accept(this);
+ end_statement_line();
+ }
+ indentation--;
+ indent();
+ buffer.asprintf_append ("}\n");
+}
+
+void ir_print_glsl_visitor::visit(ir_function *ir)
+{
+ bool found_non_builtin_proto = false;
+
+ foreach_in_list(ir_function_signature, sig, &ir->signatures) {
+ if (!sig->is_builtin())
+ found_non_builtin_proto = true;
+ }
+ if (!found_non_builtin_proto)
+ return;
+
+ PrintGlslMode oldMode = this->mode;
+ this->mode = kPrintGlslNone;
+
+ foreach_in_list(ir_function_signature, sig, &ir->signatures) {
+ indent();
+ sig->accept(this);
+ buffer.asprintf_append ("\n");
+ }
+
+ this->mode = oldMode;
+
+ indent();
+}
+
+static const char* operator_glsl_str(ir_expression_operation op, const glsl_type* type) {
+ switch (op) {
+ case ir_unop_bit_not:
+ return "~";
+ case ir_unop_logic_not:
+ return "!";
+ case ir_unop_neg:
+ return "-";
+ case ir_unop_abs:
+ return "abs";
+ case ir_unop_sign:
+ return "sign";
+ case ir_unop_rsq:
+ return "inversesqrt";
+ case ir_unop_sqrt:
+ return "sqrt";
+ case ir_unop_exp:
+ return "exp";
+ case ir_unop_log:
+ return "log";
+ case ir_unop_exp2:
+ return "exp2";
+ case ir_unop_log2:
+ return "log2";
+ case ir_unop_trunc:
+ return "trunc";
+ case ir_unop_ceil:
+ return "ceil";
+ case ir_unop_floor:
+ return "floor";
+ case ir_unop_fract:
+ return "fract";
+ case ir_unop_round_even:
+ return "roundEven";
+ case ir_unop_sin:
+ return "sin";
+ case ir_unop_cos:
+ return "cos";
+ case ir_unop_atan:
+ return "atan";
+ case ir_unop_dFdx:
+ return "dFdx";
+ case ir_unop_dFdx_coarse:
+ return "dFdxCoarse";
+ case ir_unop_dFdx_fine:
+ return "dFdxFine";
+ case ir_unop_dFdy:
+ return "dFdy";
+ case ir_unop_dFdy_coarse:
+ return "dFdyCoarse";
+ case ir_unop_dFdy_fine:
+ return "dFdyFine";
+ case ir_unop_pack_snorm_2x16:
+ return "packSnorm2x16";
+ case ir_unop_pack_snorm_4x8:
+ return "packSnorm4x8";
+ case ir_unop_pack_unorm_2x16:
+ return "packUnorm2x16";
+ case ir_unop_pack_unorm_4x8:
+ return "packUnorm4x8";
+ case ir_unop_pack_half_2x16:
+ return "packHalf2x16";
+ case ir_unop_unpack_snorm_2x16:
+ return "unpackSnorm2x16";
+ case ir_unop_unpack_snorm_4x8:
+ return "unpackSnorm4x8";
+ case ir_unop_unpack_unorm_2x16:
+ return "unpackUnorm2x16";
+ case ir_unop_unpack_unorm_4x8:
+ return "unpackUnorm4x8";
+ case ir_unop_unpack_half_2x16:
+ return "unpackHalf2x16";
+ case ir_unop_bitfield_reverse:
+ return "bitfieldReverse";
+ case ir_unop_bit_count:
+ return "bitCount";
+ case ir_unop_find_msb:
+ return "findMSB";
+ case ir_unop_find_lsb:
+ return "findLSB";
+ case ir_unop_saturate:
+ return "saturate";
+ case ir_unop_pack_double_2x32:
+ return "packDouble2x32";
+ case ir_unop_unpack_double_2x32:
+ return "unpackDouble2x32";
+ case ir_unop_pack_sampler_2x32:
+ return "packSampler2x32";
+ case ir_unop_pack_image_2x32:
+ return "packImage2x32";
+ case ir_unop_unpack_sampler_2x32:
+ return "unpackSampler2x32";
+ case ir_unop_unpack_image_2x32:
+ return "unpackImage2x32";
+ case ir_unop_interpolate_at_centroid:
+ return "interpolateAtCentroid";
+ case ir_unop_pack_int_2x32:
+ return "packInt2x32";
+ case ir_unop_pack_uint_2x32:
+ return "packUint2x32";
+ case ir_unop_unpack_int_2x32:
+ return "unpackInt2x32";
+ case ir_unop_unpack_uint_2x32:
+ return "unpackUint2x32";
+ case ir_binop_add:
+ return "+";
+ case ir_binop_sub:
+ return "-";
+ case ir_binop_mul:
+ return "*";
+ case ir_binop_div:
+ return "/";
+ case ir_binop_mod:
+ if (type->is_integer())
+ return "%";
+ else
+ return "mod";
+ case ir_binop_less:
+ if (type->is_vector())
+ return "lessThan";
+ else
+ return "<";
+ case ir_binop_gequal:
+ if (type->is_vector())
+ return "greaterThanEqual";
+ else
+ return ">=";
+ case ir_binop_equal:
+ if (type->is_vector())
+ return "equal";
+ else
+ return "==";
+ case ir_binop_nequal:
+ if (type->is_vector())
+ return "notEqual";
+ else
+ return "!=";
+ case ir_binop_all_equal:
+ return "==";
+ case ir_binop_any_nequal:
+ return "!=";
+ case ir_binop_lshift:
+ return "<<";
+ case ir_binop_rshift:
+ return ">>";
+ case ir_binop_bit_and:
+ return "&";
+ case ir_binop_bit_xor:
+ return "^";
+ case ir_binop_bit_or:
+ return "|";
+ case ir_binop_logic_and:
+ return "&&";
+ case ir_binop_logic_xor:
+ return "^^";
+ case ir_binop_logic_or:
+ return "||";
+ case ir_binop_dot:
+ return "dot";
+ case ir_binop_min:
+ return "min";
+ case ir_binop_max:
+ return "max";
+ case ir_binop_pow:
+ return "pow";
+ case ir_binop_interpolate_at_offset:
+ return "interpolateAtOffset";
+ case ir_binop_interpolate_at_sample:
+ return "interpolateAtSample";
+ case ir_binop_atan2:
+ return "atan";
+ case ir_triop_fma:
+ return "fma";
+ case ir_triop_lrp:
+ return "mix";
+ default:
+ unreachable("Unexpected operator in operator_glsl_str");
+ return "UNIMPLEMENTED";
+ }
+}
+
+static bool is_binop_func_like(ir_expression_operation op, const glsl_type* type)
+{
+ if (op == ir_binop_mod && !type->is_integer()) {
+ return true;
+ } else if ((op >= ir_binop_dot && op <= ir_binop_pow) || op == ir_binop_atan2) {
+ return true;
+ } else if (type->is_vector() && (op >= ir_binop_less && op <= ir_binop_nequal)) {
+ return true;
+ }
+ return false;
+}
+
+void ir_print_glsl_visitor::visit(ir_expression *ir)
+{
+ ++this->expression_depth;
+ newline_indent();
+
+ if (ir->num_operands == 1) {
+ if (ir->operation >= ir_unop_f2i && ir->operation <= ir_unop_u2i) {
+ print_type(buffer, ir->type, true);
+ buffer.asprintf_append ("(");
+ } else if (ir->operation == ir_unop_rcp) {
+ buffer.asprintf_append ("(1.0/(");
+ } else {
+ buffer.asprintf_append ("%s(", operator_glsl_str(ir->operation, ir->type));
+ }
+ if (ir->operands[0])
+ ir->operands[0]->accept(this);
+ buffer.asprintf_append (")");
+ if (ir->operation == ir_unop_rcp) {
+ buffer.asprintf_append (")");
+ }
+ }
+ else if (ir->operation == ir_triop_csel)
+ {
+ buffer.asprintf_append ("mix(");
+ ir->operands[2]->accept(this);
+ buffer.asprintf_append (", ");
+ ir->operands[1]->accept(this);
+ if (ir->operands[1]->type->is_scalar())
+ buffer.asprintf_append (", bool(");
+ else
+ buffer.asprintf_append (", bvec%d(", ir->operands[1]->type->vector_elements);
+ ir->operands[0]->accept(this);
+ buffer.asprintf_append ("))");
+ }
+ else if (ir->operation == ir_binop_vector_extract)
+ {
+ // a[b]
+
+ if (ir->operands[0])
+ ir->operands[0]->accept(this);
+ buffer.asprintf_append ("[");
+ if (ir->operands[1])
+ ir->operands[1]->accept(this);
+ buffer.asprintf_append ("]");
+ }
+ else if (is_binop_func_like(ir->operation, ir->type))
+ {
+ if (ir->operation == ir_binop_mod)
+ {
+ buffer.asprintf_append ("(");
+ print_type(buffer, ir->type, true);
+ buffer.asprintf_append ("(");
+ }
+ buffer.asprintf_append ("%s (", operator_glsl_str(ir->operation, ir->type));
+
+ if (ir->operands[0])
+ ir->operands[0]->accept(this);
+ buffer.asprintf_append (", ");
+ if (ir->operands[1])
+ ir->operands[1]->accept(this);
+ buffer.asprintf_append (")");
+ if (ir->operation == ir_binop_mod)
+ buffer.asprintf_append ("))");
+ }
+ else if (ir->num_operands == 2)
+ {
+ buffer.asprintf_append ("(");
+ if (ir->operands[0])
+ ir->operands[0]->accept(this);
+
+ buffer.asprintf_append (" %s ", operator_glsl_str(ir->operation, ir->type));
+
+ if (ir->operands[1])
+ ir->operands[1]->accept(this);
+ buffer.asprintf_append (")");
+ }
+ else
+ {
+ // ternary op
+ buffer.asprintf_append ("%s (", operator_glsl_str(ir->operation, ir->type));
+ if (ir->operands[0])
+ ir->operands[0]->accept(this);
+ buffer.asprintf_append (", ");
+ if (ir->operands[1])
+ ir->operands[1]->accept(this);
+ buffer.asprintf_append (", ");
+ if (ir->operands[2])
+ ir->operands[2]->accept(this);
+ buffer.asprintf_append (")");
+ }
+
+ newline_deindent();
+ --this->expression_depth;
+}
+
+void ir_print_glsl_visitor::visit(ir_texture *ir)
+{
+ glsl_sampler_dim sampler_dim = (glsl_sampler_dim)ir->sampler->type->sampler_dimensionality;
+ const bool is_shadow = ir->sampler->type->sampler_shadow;
+ const bool is_array = ir->sampler->type->sampler_array;
+
+ if (ir->op == ir_txs)
+ {
+ buffer.asprintf_append("textureSize (");
+ ir->sampler->accept(this);
+ if (ir_texture::has_lod(ir->sampler->type))
+ {
+ buffer.asprintf_append(", ");
+ ir->lod_info.lod->accept(this);
+ }
+ buffer.asprintf_append(")");
+ return;
+ }
+
+ const glsl_type* uv_type = ir->coordinate->type;
+ const int uv_dim = uv_type->vector_elements;
+ int sampler_uv_dim = tex_sampler_dim_size[sampler_dim];
+ if (is_shadow)
+ sampler_uv_dim += 1;
+ if (is_array)
+ sampler_uv_dim += 1;
+ const bool is_proj = ((ir->op == ir_tex || ir->op == ir_txb || ir->op == ir_txl || ir->op == ir_txd) && uv_dim > sampler_uv_dim);
+ const bool is_lod = (ir->op == ir_txl);
+
+ // FIXME precision/lod
+ // if (is_lod && state->es_shader && state->language_version < 300 && state->stage == MESA_SHADER_FRAGMENT)
+ // {
+ // // Special workaround for GLES 2.0 LOD samplers to prevent a lot of debug spew.
+ // const glsl_precision prec = ir->sampler->get_precision();
+ // const char *precString = "";
+ // // Sampler bitfield is 7 bits, so use 0-7 for lowp, 8-15 for mediump and 16-23 for highp.
+ // int position = (int)sampler_dim;
+ // switch (prec)
+ // {
+ // case glsl_precision_high:
+ // position += 16;
+ // precString = "_high_";
+ // break;
+ // case glsl_precision_medium:
+ // position += 8;
+ // precString = "_medium_";
+ // break;
+ // case glsl_precision_low:
+ // default:
+ // precString = "_low_";
+ // break;
+ // }
+ // buffer.asprintf_append("impl%s", precString);
+ // if (is_proj)
+ // uses_texlodproj_impl |= (1 << position);
+ // else
+ // uses_texlod_impl |= (1 << position);
+ // }
+
+
+ // texture function name
+ //ACS: shadow lookups and lookups with dimensionality included in the name were deprecated in 130
+ if(state->language_version<130)
+ {
+ buffer.asprintf_append ("%s", is_shadow ? "shadow" : "texture");
+ buffer.asprintf_append ("%s", tex_sampler_dim_name[sampler_dim]);
+ }
+ else
+ {
+ if (ir->op == ir_txf || ir->op == ir_txf_ms)
+ buffer.asprintf_append ("texelFetch");
+ else
+ buffer.asprintf_append ("texture");
+ }
+
+ if (is_array && state->EXT_texture_array_enable)
+ buffer.asprintf_append ("Array");
+
+ if (is_proj)
+ buffer.asprintf_append ("Proj");
+ if (ir->op == ir_txl)
+ buffer.asprintf_append ("Lod");
+ if (ir->op == ir_txd)
+ buffer.asprintf_append ("Grad");
+ if (ir->offset != NULL)
+ buffer.asprintf_append ("Offset");
+
+ if (state->es_shader)
+ {
+ // FIXME extension
+ // if ( (is_shadow && state->EXT_shadow_samplers_enable) ||
+ // (ir->op == ir_txl && state->EXT_shader_texture_lod_enable) )
+ // {
+ // buffer.asprintf_append ("EXT");
+ // }
+ }
+
+ if(ir->op == ir_txd)
+ {
+ // FIXME extension
+ // if(state->es_shader && state->EXT_shader_texture_lod_enable)
+ // buffer.asprintf_append ("EXT");
+ // else if(!state->es_shader && state->ARB_shader_texture_lod_enable)
+ // buffer.asprintf_append ("ARB");
+ }
+
+ buffer.asprintf_append (" (");
+
+ // sampler
+ ir->sampler->accept(this);
+ buffer.asprintf_append (", ");
+
+ // texture coordinate
+ ir->coordinate->accept(this);
+
+ // lod
+ if (ir->op == ir_txl || ir->op == ir_txf)
+ {
+ buffer.asprintf_append (", ");
+ ir->lod_info.lod->accept(this);
+ }
+
+ // sample index
+ if (ir->op == ir_txf_ms)
+ {
+ buffer.asprintf_append (", ");
+ ir->lod_info.sample_index->accept(this);
+ }
+
+ // grad
+ if (ir->op == ir_txd)
+ {
+ buffer.asprintf_append (", ");
+ ir->lod_info.grad.dPdx->accept(this);
+ buffer.asprintf_append (", ");
+ ir->lod_info.grad.dPdy->accept(this);
+ }
+
+ // texel offset
+ if (ir->offset != NULL)
+ {
+ buffer.asprintf_append (", ");
+ ir->offset->accept(this);
+ }
+
+ // lod bias
+ if (ir->op == ir_txb)
+ {
+ buffer.asprintf_append (", ");
+ ir->lod_info.bias->accept(this);
+ }
+
+ /*
+
+
+ if (ir->op != ir_txf) {
+ if (ir->projector)
+ ir->projector->accept(this);
+ else
+ buffer.asprintf_append ("1");
+
+ if (ir->shadow_comparitor) {
+ buffer.asprintf_append (" ");
+ ir->shadow_comparitor->accept(this);
+ } else {
+ buffer.asprintf_append (" ()");
+ }
+ }
+
+ buffer.asprintf_append (" ");
+ switch (ir->op)
+ {
+ case ir_tex:
+ break;
+ case ir_txb:
+ ir->lod_info.bias->accept(this);
+ break;
+ case ir_txl:
+ case ir_txf:
+ ir->lod_info.lod->accept(this);
+ break;
+ case ir_txd:
+ buffer.asprintf_append ("(");
+ ir->lod_info.grad.dPdx->accept(this);
+ buffer.asprintf_append (" ");
+ ir->lod_info.grad.dPdy->accept(this);
+ buffer.asprintf_append (")");
+ break;
+ };
+ */
+ buffer.asprintf_append (")");
+}
+
+
+void ir_print_glsl_visitor::visit(ir_swizzle *ir)
+{
+ const unsigned swiz[4] = {
+ ir->mask.x,
+ ir->mask.y,
+ ir->mask.z,
+ ir->mask.w,
+ };
+
+ if (ir->val->type == glsl_type::float_type || ir->val->type == glsl_type::int_type || ir->val->type == glsl_type::uint_type)
+ {
+ if (ir->mask.num_components != 1)
+ {
+ print_type(buffer, ir->type, true);
+ buffer.asprintf_append ("(");
+ }
+ }
+
+ ir->val->accept(this);
+
+ if (ir->val->type == glsl_type::float_type || ir->val->type == glsl_type::int_type || ir->val->type == glsl_type::uint_type)
+ {
+ if (ir->mask.num_components != 1)
+ {
+ buffer.asprintf_append (")");
+ }
+ return;
+ }
+
+ // Swizzling scalar types is not allowed so just return now.
+ if (ir->val->type->vector_elements == 1)
+ return;
+
+ buffer.asprintf_append (".");
+ for (unsigned i = 0; i < ir->mask.num_components; i++) {
+ buffer.asprintf_append ("%c", "xyzw"[swiz[i]]);
+ }
+}
+
+
+void ir_print_glsl_visitor::visit(ir_dereference_variable *ir)
+{
+ ir_variable *var = ir->variable_referenced();
+ print_var_name (var);
+}
+
+
+void ir_print_glsl_visitor::visit(ir_dereference_array *ir)
+{
+ ir->array->accept(this);
+ buffer.asprintf_append ("[");
+ ir->array_index->accept(this);
+ buffer.asprintf_append ("]");
+}
+
+
+void ir_print_glsl_visitor::visit(ir_dereference_record *ir)
+{
+ ir->record->accept(this);
+ const char *field_name = ir->record->type->fields.structure[ir->field_idx].name;
+ buffer.asprintf_append (".%s", field_name);
+}
+
+
+bool ir_print_glsl_visitor::try_print_array_assignment (ir_dereference* lhs, ir_rvalue* rhs)
+{
+ if (this->state->language_version >= 120)
+ return false;
+ ir_dereference_variable* rhsarr = rhs->as_dereference_variable();
+ if (rhsarr == NULL)
+ return false;
+ const glsl_type* lhstype = lhs->type;
+ const glsl_type* rhstype = rhsarr->type;
+ if (!lhstype->is_array() || !rhstype->is_array())
+ return false;
+ if (lhstype->array_size() != rhstype->array_size())
+ return false;
+ if (lhstype->base_type != rhstype->base_type)
+ return false;
+
+ const unsigned size = rhstype->array_size();
+ for (unsigned i = 0; i < size; i++)
+ {
+ lhs->accept(this);
+ buffer.asprintf_append ("[%d]=", i);
+ rhs->accept(this);
+ buffer.asprintf_append ("[%d]", i);
+ if (i != size-1)
+ buffer.asprintf_append (";");
+ }
+ return true;
+}
+
+void ir_print_glsl_visitor::emit_assignment_part (ir_dereference* lhs, ir_rvalue* rhs, unsigned write_mask, ir_rvalue* dstIndex)
+{
+ lhs->accept(this);
+
+ if (dstIndex)
+ {
+ // if dst index is a constant, then emit a swizzle
+ ir_constant* dstConst = dstIndex->as_constant();
+ if (dstConst)
+ {
+ const char* comps = "xyzw";
+ char comp = comps[dstConst->get_int_component(0)];
+ buffer.asprintf_append (".%c", comp);
+ }
+ else
+ {
+ buffer.asprintf_append ("[");
+ dstIndex->accept(this);
+ buffer.asprintf_append ("]");
+ }
+ }
+
+ char mask[5];
+ unsigned j = 0;
+ const glsl_type* lhsType = lhs->type;
+ const glsl_type* rhsType = rhs->type;
+ if (!dstIndex && lhsType->matrix_columns <= 1 && lhsType->vector_elements > 1 && write_mask != (1<<lhsType->vector_elements)-1)
+ {
+ for (unsigned i = 0; i < 4; i++) {
+ if ((write_mask & (1 << i)) != 0) {
+ mask[j] = "xyzw"[i];
+ j++;
+ }
+ }
+ lhsType = glsl_type::get_instance(lhsType->base_type, j, 1);
+ }
+ mask[j] = '\0';
+ bool hasWriteMask = false;
+ if (mask[0])
+ {
+ buffer.asprintf_append (".%s", mask);
+ hasWriteMask = true;
+ }
+
+ buffer.asprintf_append (" = ");
+
+ bool typeMismatch = !dstIndex && (lhsType != rhsType);
+ const bool addSwizzle = hasWriteMask && typeMismatch;
+ if (typeMismatch)
+ {
+ if (!addSwizzle)
+ print_type(buffer, lhsType, true);
+ buffer.asprintf_append ("(");
+ }
+
+ rhs->accept(this);
+
+ if (typeMismatch)
+ {
+ buffer.asprintf_append (")");
+ if (addSwizzle)
+ buffer.asprintf_append (".%s", mask);
+ }
+}
+
+
+// Try to print (X = X + const) as (X += const), mostly to satisfy
+// OpenGL ES 2.0 loop syntax restrictions.
+static bool try_print_increment (ir_print_glsl_visitor* vis, ir_assignment* ir)
+{
+ if (ir->condition)
+ return false;
+
+ // Needs to be + on rhs
+ ir_expression* rhsOp = ir->rhs->as_expression();
+ if (!rhsOp || rhsOp->operation != ir_binop_add)
+ return false;
+
+ // Needs to write to whole variable
+ ir_variable* lhsVar = ir->whole_variable_written();
+ if (lhsVar == NULL)
+ return false;
+
+ // Types must match
+ if (ir->lhs->type != ir->rhs->type)
+ return false;
+
+ // Type must be scalar
+ if (!ir->lhs->type->is_scalar())
+ return false;
+
+ // rhs0 must be variable deref, same one as lhs
+ ir_dereference_variable* rhsDeref = rhsOp->operands[0]->as_dereference_variable();
+ if (rhsDeref == NULL)
+ return false;
+ if (lhsVar != rhsDeref->var)
+ return false;
+
+ // rhs1 must be a constant
+ ir_constant* rhsConst = rhsOp->operands[1]->as_constant();
+ if (!rhsConst)
+ return false;
+
+ // print variable name
+ ir->lhs->accept (vis);
+
+ // print ++ or +=const
+ if (ir->lhs->type->base_type <= GLSL_TYPE_INT && rhsConst->is_one())
+ {
+ vis->buffer.asprintf_append ("++");
+ }
+ else
+ {
+ vis->buffer.asprintf_append(" += ");
+ rhsConst->accept (vis);
+ }
+
+ return true;
+}
+
+
+void ir_print_glsl_visitor::visit(ir_assignment *ir)
+{
+ // if this is a loop induction variable initial assignment, and we aren't inside loop body:
+ // do not print it (will be printed when inside loop body)
+ if (!inside_loop_body)
+ {
+ ir_variable* whole_var = ir->whole_variable_written();
+ if (!ir->condition && whole_var)
+ {
+ // FIXME
+ // loop_variable_state* inductor_state = loopstate->get_for_inductor(whole_var);
+ // if (inductor_state && inductor_state->private_induction_variable_count == 1 &&
+ // can_emit_canonical_for(inductor_state))
+ // {
+ // skipped_this_ir = true;
+ // return;
+ // }
+ }
+ }
+
+ // assignments in global scope are postponed to main function
+ if (this->mode != kPrintGlslNone)
+ {
+ // FIXME: This assertion gets tripped when encountering const variable
+ // initializations which occur after the main() function definition.
+ // assert (!this->globals->main_function_done);
+ this->globals->global_assignements.push_tail (new(this->globals->mem_ctx) ga_entry(ir));
+ buffer.asprintf_append ("//"); // for the ; that will follow (ugly, I know)
+ return;
+ }
+
+ // if RHS is ir_triop_vector_insert, then we have to do some special dance. If source expression is:
+ // dst = vector_insert (a, b, idx)
+ // then emit it like:
+ // dst = a;
+ // dst.idx = b;
+ ir_expression* rhsOp = ir->rhs->as_expression();
+ if (rhsOp && rhsOp->operation == ir_triop_vector_insert)
+ {
+ // skip assignment if lhs and rhs would be the same
+ bool skip_assign = false;
+ ir_dereference_variable* lhsDeref = ir->lhs->as_dereference_variable();
+ ir_dereference_variable* rhsDeref = rhsOp->operands[0]->as_dereference_variable();
+ if (lhsDeref && rhsDeref)
+ {
+ if (lhsDeref->var == rhsDeref->var)
+ skip_assign = true;
+ }
+
+ if (!skip_assign)
+ {
+ emit_assignment_part(ir->lhs, rhsOp->operands[0], ir->write_mask, NULL);
+ buffer.asprintf_append ("; ");
+ }
+ emit_assignment_part(ir->lhs, rhsOp->operands[1], ir->write_mask, rhsOp->operands[2]);
+ return;
+ }
+
+ if (try_print_increment (this, ir))
+ return;
+
+ if (try_print_array_assignment (ir->lhs, ir->rhs))
+ return;
+
+ if (ir->condition)
+ {
+ if (ir->condition)
+ {
+ buffer.asprintf_append ("if (");
+ ir->condition->accept(this);
+ buffer.asprintf_append (") ");
+ }
+ }
+
+ emit_assignment_part (ir->lhs, ir->rhs, ir->write_mask, NULL);
+}
+
+
+#ifdef _MSC_VER
+#define isnan(x) _isnan(x)
+#define isinf(x) (!_finite(x))
+#endif
+
+#define fpcheck(x) (isnan(x) || isinf(x))
+
+void print_float (string_buffer& buffer, float f)
+{
+ // Kind of roundabout way, but this is to satisfy two things:
+ // * MSVC and gcc-based compilers differ a bit in how they treat float
+ // widht/precision specifiers. Want to match for tests.
+ // * GLSL (early version at least) require floats to have ".0" or
+ // exponential notation.
+ char tmp[64];
+ snprintf(tmp, 64, "%.7g", f);
+
+ char* posE = NULL;
+ posE = strchr(tmp, 'e');
+ if (!posE)
+ posE = strchr(tmp, 'E');
+
+ // snprintf formats infinity as inf.0 or -inf.0, which isn't useful here.
+ // GLSL has no infinity constant so print an equivalent expression instead.
+ if (f == std::numeric_limits<float>::infinity())
+ strcpy(tmp, "(1.0/0.0)");
+
+ if (f == -std::numeric_limits<float>::infinity())
+ strcpy(tmp, "(-1.0/0.0)");
+
+ // Do similar thing for NaN
+ if (isnan(f))
+ strcpy(tmp, "(0.0/0.0)");
+
+ #if _MSC_VER
+ // While gcc would print something like 1.0e+07, MSVC will print 1.0e+007 -
+ // only for exponential notation, it seems, will add one extra useless zero. Let's try to remove
+ // that so compiler output matches.
+ if (posE != NULL)
+ {
+ if((posE[1] == '+' || posE[1] == '-') && posE[2] == '0')
+ {
+ char* p = posE+2;
+ while (p[0])
+ {
+ p[0] = p[1];
+ ++p;
+ }
+ }
+ }
+ #endif
+
+ buffer.asprintf_append ("%s", tmp);
+
+ // need to append ".0"?
+ if (!strchr(tmp,'.') && (posE == NULL))
+ buffer.asprintf_append(".0");
+}
+
+void ir_print_glsl_visitor::visit(ir_constant *ir)
+{
+ const glsl_type* type = ir->type;
+
+ if (type == glsl_type::float_type)
+ {
+ if (fpcheck(ir->value.f[0]))
+ {
+ // Non-printable float. If we have bit conversions, we're fine. otherwise do hand-wavey things in print_float().
+ if ((state->es_shader && (state->language_version >= 300))
+ || (state->language_version >= 330)
+ || (state->ARB_shader_bit_encoding_enable))
+ {
+ buffer.asprintf_append("uintBitsToFloat(%uu)", ir->value.u[0]);
+ return;
+ }
+ }
+
+ print_float (buffer, ir->value.f[0]);
+ return;
+ }
+ else if (type == glsl_type::int_type)
+ {
+ // Need special handling for INT_MIN
+ if (ir->value.u[0] == 0x80000000)
+ buffer.asprintf_append("int(0x%X)", ir->value.i[0]);
+ else
+ buffer.asprintf_append ("%d", ir->value.i[0]);
+ return;
+ }
+ else if (type == glsl_type::uint_type)
+ {
+ // ES 2.0 doesn't support uints, neither does GLSL < 130
+ if ((state->es_shader && (state->language_version < 300))
+ || (state->language_version < 130))
+ buffer.asprintf_append("%u", ir->value.u[0]);
+ else
+ {
+ // Old Adreno drivers try to be smart with '0u' and treat that as 'const int'. Sigh.
+ if (ir->value.u[0] == 0)
+ buffer.asprintf_append("uint(0)");
+ else
+ buffer.asprintf_append("%uu", ir->value.u[0]);
+ }
+ return;
+ }
+
+ const glsl_type *const base_type = ir->type->get_base_type();
+
+ print_type(buffer, type, true);
+ buffer.asprintf_append ("(");
+
+ if (ir->type->is_array()) {
+ for (unsigned i = 0; i < ir->type->length; i++)
+ {
+ if (i != 0)
+ buffer.asprintf_append (", ");
+ ir->get_array_element(i)->accept(this);
+ }
+ } else if (ir->type->is_struct()) {
+ for (unsigned i = 0; i < ir->type->length; i++) {
+ if (i > 0)
+ buffer.asprintf_append (", ");
+ ir->const_elements[i]->accept(this);
+ }
+
+ }else {
+ bool first = true;
+ for (unsigned i = 0; i < ir->type->components(); i++) {
+ if (!first)
+ buffer.asprintf_append (", ");
+ first = false;
+ switch (base_type->base_type) {
+ case GLSL_TYPE_UINT:
+ {
+ // ES 2.0 doesn't support uints, neither does GLSL < 130
+ if ((state->es_shader && (state->language_version < 300))
+ || (state->language_version < 130))
+ buffer.asprintf_append("%u", ir->value.u[i]);
+ else
+ buffer.asprintf_append("%uu", ir->value.u[i]);
+ break;
+ }
+ case GLSL_TYPE_INT:
+ {
+ // Need special handling for INT_MIN
+ if (ir->value.u[i] == 0x80000000)
+ buffer.asprintf_append("int(0x%X)", ir->value.i[i]);
+ else
+ buffer.asprintf_append("%d", ir->value.i[i]);
+ break;
+ }
+ case GLSL_TYPE_FLOAT: print_float(buffer, ir->value.f[i]); break;
+ case GLSL_TYPE_BOOL: buffer.asprintf_append ("%d", ir->value.b[i]); break;
+ default: assert(0);
+ }
+ }
+ }
+ buffer.asprintf_append (")");
+}
+
+
+void
+ir_print_glsl_visitor::visit(ir_call *ir)
+{
+ // calls in global scope are postponed to main function
+ if (this->mode != kPrintGlslNone)
+ {
+ assert (!this->globals->main_function_done);
+ this->globals->global_assignements.push_tail (new(this->globals->mem_ctx) ga_entry(ir));
+ buffer.asprintf_append ("//"); // for the ; that will follow (ugly, I know)
+ return;
+ }
+
+ if (ir->return_deref)
+ {
+ visit(ir->return_deref);
+ buffer.asprintf_append (" = ");
+ }
+
+ buffer.asprintf_append ("%s (", ir->callee_name());
+ bool first = true;
+ foreach_in_list(ir_instruction, inst, &ir->actual_parameters) {
+ if (!first)
+ buffer.asprintf_append (", ");
+ inst->accept(this);
+ first = false;
+ }
+ buffer.asprintf_append (")");
+}
+
+
+void
+ir_print_glsl_visitor::visit(ir_return *ir)
+{
+ buffer.asprintf_append ("return");
+
+ ir_rvalue *const value = ir->get_value();
+ if (value) {
+ buffer.asprintf_append (" ");
+ value->accept(this);
+ }
+}
+
+
+void
+ir_print_glsl_visitor::visit(ir_discard *ir)
+{
+ buffer.asprintf_append ("discard");
+
+ if (ir->condition != NULL) {
+ buffer.asprintf_append (" TODO ");
+ ir->condition->accept(this);
+ }
+}
+
+void
+ir_print_glsl_visitor::visit(ir_demote *ir)
+{
+ buffer.asprintf_append ("discard-TODO");
+}
+
+void
+ir_print_glsl_visitor::visit(ir_if *ir)
+{
+ buffer.asprintf_append ("if (");
+ ir->condition->accept(this);
+
+ buffer.asprintf_append (") {\n");
+ indentation++; previous_skipped = false;
+
+
+ foreach_in_list(ir_instruction, inst, &ir->then_instructions) {
+ indent();
+ inst->accept(this);
+ end_statement_line();
+ }
+
+ indentation--;
+ indent();
+ buffer.asprintf_append ("}");
+
+ if (!ir->else_instructions.is_empty())
+ {
+ buffer.asprintf_append (" else {\n");
+ indentation++; previous_skipped = false;
+
+ foreach_in_list(ir_instruction, inst, &ir->else_instructions) {
+ indent();
+ inst->accept(this);
+ end_statement_line();
+ }
+ indentation--;
+ indent();
+ buffer.asprintf_append ("}");
+ }
+}
+
+bool ir_print_glsl_visitor::can_emit_canonical_for (loop_variable_state *ls)
+{
+ if (ls == NULL)
+ return false;
+
+ if (ls->induction_variables.is_empty())
+ return false;
+
+ if (ls->terminators.is_empty())
+ return false;
+
+ // only support for loops with one terminator condition
+ int terminatorCount = ls->terminators.length();
+ if (terminatorCount != 1)
+ return false;
+
+ return true;
+}
+
+bool ir_print_glsl_visitor::emit_canonical_for (ir_loop* ir)
+{
+ loop_variable_state* const ls = this->loopstate->get(ir);
+
+ if (!can_emit_canonical_for(ls))
+ return false;
+
+ hash_table* terminator_hash = _mesa_hash_table_create(nullptr, _mesa_hash_pointer, _mesa_key_pointer_equal);
+ hash_table* induction_hash = _mesa_hash_table_create(nullptr, _mesa_hash_pointer, _mesa_key_pointer_equal);
+
+ buffer.asprintf_append("for (");
+ inside_loop_body = true;
+
+ // emit loop induction variable declarations.
+ // only for loops with single induction variable, to avoid cases of different types of them
+ // FIXME
+ // if (ls->private_induction_variable_count == 1)
+ // {
+ // foreach_in_list(loop_variable, indvar, &ls->induction_variables)
+ // {
+ // if (!this->loopstate->get_for_inductor(indvar->var))
+ // continue;
+
+ // ir_variable* var = indvar->var;
+ // print_precision (var, var->type);
+ // print_type(buffer, var->type, false);
+ // buffer.asprintf_append (" ");
+ // print_var_name (var);
+ // print_type_post(buffer, var->type, false);
+ // if (indvar->initial_value)
+ // {
+ // buffer.asprintf_append (" = ");
+ // // if the var is an array add the proper initializer
+ // if(var->type->is_vector())
+ // {
+ // print_type(buffer, var->type, false);
+ // buffer.asprintf_append ("(");
+ // }
+ // indvar->initial_value->accept(this);
+ // if(var->type->is_vector())
+ // {
+ // buffer.asprintf_append (")");
+ // }
+ // }
+ // }
+ // }
+ buffer.asprintf_append("; ");
+
+ // emit loop terminating conditions
+ foreach_in_list(loop_terminator, term, &ls->terminators)
+ {
+ _mesa_hash_table_insert(terminator_hash, term->ir, term);
+
+ // IR has conditions in the form of "if (x) break",
+ // whereas for loop needs them negated, in the form
+ // if "while (x) continue the loop".
+ // See if we can print them using syntax that reads nice.
+ bool handled = false;
+ ir_expression* term_expr = term->ir->condition->as_expression();
+ if (term_expr)
+ {
+ // Binary comparison conditions
+ const char* termOp = NULL;
+ switch (term_expr->operation)
+ {
+ case ir_binop_less: termOp = ">="; break;
+ case ir_binop_gequal: termOp = "<"; break;
+ case ir_binop_equal: termOp = "!="; break;
+ case ir_binop_nequal: termOp = "=="; break;
+ default: break;
+ }
+ if (termOp != NULL)
+ {
+ term_expr->operands[0]->accept(this);
+ buffer.asprintf_append(" %s ", termOp);
+ term_expr->operands[1]->accept(this);
+ handled = true;
+ }
+
+ // Unary logic not
+ if (!handled && term_expr->operation == ir_unop_logic_not)
+ {
+ term_expr->operands[0]->accept(this);
+ handled = true;
+ }
+ }
+
+ // More complex condition, print as "!(x)"
+ if (!handled)
+ {
+ buffer.asprintf_append("!(");
+ term->ir->condition->accept(this);
+ buffer.asprintf_append(")");
+ }
+ }
+ buffer.asprintf_append("; ");
+
+ // emit loop induction variable updates
+ bool first = true;
+ foreach_in_list(loop_variable, indvar, &ls->induction_variables)
+ {
+ _mesa_hash_table_insert(induction_hash, indvar->first_assignment, indvar);
+ if (!first)
+ buffer.asprintf_append(", ");
+ visit(indvar->first_assignment);
+ first = false;
+ }
+ buffer.asprintf_append(") {\n");
+
+ inside_loop_body = false;
+
+ // emit loop body
+ indentation++; previous_skipped = false;
+ foreach_in_list(ir_instruction, inst, &ir->body_instructions) {
+
+ // skip termination & induction statements,
+ // they are part of "for" clause
+ if (_mesa_hash_table_search(terminator_hash, inst))
+ continue;
+ if (_mesa_hash_table_search(induction_hash, inst))
+ continue;
+
+ indent();
+ inst->accept(this);
+ end_statement_line();
+ }
+ indentation--;
+
+ indent();
+ buffer.asprintf_append("}");
+
+ _mesa_hash_table_destroy (terminator_hash, nullptr);
+ _mesa_hash_table_destroy (induction_hash, nullptr);
+
+ return true;
+}
+
+
+void
+ir_print_glsl_visitor::visit(ir_loop *ir)
+{
+ if (emit_canonical_for(ir))
+ return;
+
+ buffer.asprintf_append ("while (true) {\n");
+ indentation++; previous_skipped = false;
+ foreach_in_list(ir_instruction, inst, &ir->body_instructions) {
+ indent();
+ inst->accept(this);
+ end_statement_line();
+ }
+ indentation--;
+ indent();
+ buffer.asprintf_append ("}");
+}
+
+
+void
+ir_print_glsl_visitor::visit(ir_loop_jump *ir)
+{
+ buffer.asprintf_append ("%s", ir->is_break() ? "break" : "continue");
+}
+
+void
+ir_print_glsl_visitor::visit(ir_precision_statement *ir)
+{
+ buffer.asprintf_append ("%s", ir->precision_statement);
+}
+
+static const char*
+interface_packing_string(enum glsl_interface_packing packing)
+{
+ switch (packing) {
+ case GLSL_INTERFACE_PACKING_STD140:
+ return "std140";
+ case GLSL_INTERFACE_PACKING_SHARED:
+ return "shared";
+ case GLSL_INTERFACE_PACKING_PACKED:
+ return "packed";
+ case GLSL_INTERFACE_PACKING_STD430:
+ return "std430";
+ default:
+ unreachable("Unexpected interface packing");
+ return "UNKNOWN";
+ }
+}
+
+static const char*
+interface_variable_mode_string(enum ir_variable_mode mode)
+{
+ switch (mode) {
+ case ir_var_uniform:
+ return "uniform";
+ case ir_var_shader_storage:
+ return "buffer";
+ default:
+ unreachable("Unexpected interface variable mode");
+ return "UNKOWN";
+ }
+}
+
+void
+ir_print_glsl_visitor::visit(ir_typedecl_statement *ir)
+{
+ const glsl_type *const s = ir->type_decl;
+
+ ir_variable* interface_var = NULL;
+
+ if (s->is_struct()) {
+ buffer.asprintf_append ("struct %s {\n", s->name);
+ } else if (s->is_interface()) {
+ const char* packing = interface_packing_string(s->get_interface_packing());
+
+ // Find a variable defined by this interface, as it holds some necessary data.
+ exec_node* n = ir;
+ while ((n = n->get_next())) {
+ ir_variable* v = ((ir_instruction *)n)->as_variable();
+ if (v != NULL && v->get_interface_type() == ir->type_decl) {
+ interface_var = v;
+ break;
+ }
+ }
+ const char* mode = interface_variable_mode_string((enum ir_variable_mode)interface_var->data.mode);
+ if (interface_var->data.explicit_binding) {
+ uint16_t binding = interface_var->data.binding;
+ buffer.asprintf_append ("layout(%s, binding=%" PRIu16 ") %s %s {\n", packing, binding, mode, s->name);
+ } else {
+ buffer.asprintf_append ("layout(%s) %s %s {\n", packing, mode, s->name);
+ }
+
+ }
+
+ for (unsigned j = 0; j < s->length; j++) {
+ buffer.asprintf_append (" ");
+ // FIXME: precision
+ // if (state->es_shader)
+ // buffer.asprintf_append ("%s", get_precision_string(s->fields.structure[j].precision));
+ print_type(buffer, s->fields.structure[j].type, false);
+ buffer.asprintf_append (" %s", s->fields.structure[j].name);
+ print_type_post(buffer, s->fields.structure[j].type, false);
+ buffer.asprintf_append (";\n");
+ }
+ buffer.asprintf_append ("}");
+
+ if (interface_var && interface_var->is_interface_instance()) {
+ buffer.asprintf_append(" ");
+ print_var_name(interface_var);
+ }
+}
+
+void
+ir_print_glsl_visitor::visit(ir_emit_vertex *ir)
+{
+ buffer.asprintf_append ("emit-vertex-TODO");
+}
+
+void
+ir_print_glsl_visitor::visit(ir_end_primitive *ir)
+{
+ buffer.asprintf_append ("end-primitive-TODO");
+}
+
+void
+ir_print_glsl_visitor::visit(ir_barrier *ir)
+{
+ buffer.asprintf_append ("discard-TODO");
+}
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/ir_print_glsl_visitor.h b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/ir_print_glsl_visitor.h
new file mode 100644
index 0000000000..827cf2876c
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/ir_print_glsl_visitor.h
@@ -0,0 +1,105 @@
+/* -*- c++ -*- */
+/*
+ * Copyright © 2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#pragma once
+#ifndef IR_PRINT_GLSL_VISITOR_H
+#define IR_PRINT_GLSL_VISITOR_H
+
+#include "ir.h"
+
+enum PrintGlslMode {
+ kPrintGlslNone = 0,
+ kPrintGlslVertex,
+ kPrintGlslFragment,
+};
+
+extern char* _mesa_print_ir_glsl(exec_list *instructions,
+ struct _mesa_glsl_parse_state *state,
+ char* buf, PrintGlslMode mode);
+
+
+
+class string_buffer
+{
+public:
+ string_buffer(void* mem_ctx)
+ {
+ m_Capacity = 512;
+ m_Ptr = (char*)ralloc_size(mem_ctx, m_Capacity);
+ m_Size = 0;
+ m_Ptr[0] = 0;
+ }
+
+ ~string_buffer()
+ {
+ ralloc_free(m_Ptr);
+ }
+
+ bool empty() const { return m_Size == 0; }
+
+ const char* c_str() const { return m_Ptr; }
+
+ void asprintf_append(const char *fmt, ...) PRINTFLIKE(2, 3)
+ {
+ va_list args;
+ va_start(args, fmt);
+ vasprintf_append(fmt, args);
+ va_end(args);
+ }
+
+ void vasprintf_append(const char *fmt, va_list args)
+ {
+ assert (m_Ptr != NULL);
+ vasprintf_rewrite_tail (&m_Size, fmt, args);
+ }
+
+ void vasprintf_rewrite_tail (size_t *start, const char *fmt, va_list args)
+ {
+ assert (m_Ptr != NULL);
+
+ size_t new_length = printf_length(fmt, args);
+ size_t needed_length = m_Size + new_length + 1;
+
+ if (m_Capacity < needed_length)
+ {
+ m_Capacity = MAX2 (m_Capacity + m_Capacity/2, needed_length);
+ m_Ptr = (char*)reralloc_size(ralloc_parent(m_Ptr), m_Ptr, m_Capacity);
+ }
+
+ vsnprintf(m_Ptr + m_Size, new_length+1, fmt, args);
+ m_Size += new_length;
+ assert (m_Capacity >= m_Size);
+ }
+
+private:
+ char* m_Ptr;
+ size_t m_Size;
+ size_t m_Capacity;
+};
+
+
+extern void print_float (string_buffer& buffer, float f);
+
+
+#endif /* IR_PRINT_GLSL_VISITOR_H */
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/ir_print_visitor.cpp b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/ir_print_visitor.cpp
new file mode 100644
index 0000000000..d621c26700
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/ir_print_visitor.cpp
@@ -0,0 +1,675 @@
+/*
+ * Copyright © 2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include <inttypes.h> /* for PRIx64 macro */
+#include "ir_print_visitor.h"
+#include "compiler/glsl_types.h"
+#include "glsl_parser_extras.h"
+#include "main/macros.h"
+#include "util/hash_table.h"
+#include "util/u_string.h"
+#include "util/half_float.h"
+
+static void print_type(FILE *f, const glsl_type *t);
+
+void
+ir_instruction::print(void) const
+{
+ this->fprint(stdout);
+}
+
+void
+ir_instruction::fprint(FILE *f) const
+{
+ ir_instruction *deconsted = const_cast<ir_instruction *>(this);
+
+ ir_print_visitor v(f);
+ deconsted->accept(&v);
+}
+
+extern "C" {
+void
+_mesa_print_ir(FILE *f, exec_list *instructions,
+ struct _mesa_glsl_parse_state *state)
+{
+ if (state) {
+ for (unsigned i = 0; i < state->num_user_structures; i++) {
+ const glsl_type *const s = state->user_structures[i];
+
+ fprintf(f, "(structure (%s) (%s@%p) (%u) (\n",
+ s->name, s->name, (void *) s, s->length);
+
+ for (unsigned j = 0; j < s->length; j++) {
+ fprintf(f, "\t((");
+ print_type(f, s->fields.structure[j].type);
+ fprintf(f, ")(%s))\n", s->fields.structure[j].name);
+ }
+
+ fprintf(f, ")\n");
+ }
+ }
+
+ fprintf(f, "(\n");
+ foreach_in_list(ir_instruction, ir, instructions) {
+ ir->fprint(f);
+ if (ir->ir_type != ir_type_function)
+ fprintf(f, "\n");
+ }
+ fprintf(f, ")\n");
+}
+
+void
+fprint_ir(FILE *f, const void *instruction)
+{
+ const ir_instruction *ir = (const ir_instruction *)instruction;
+ ir->fprint(f);
+}
+
+} /* extern "C" */
+
+ir_print_visitor::ir_print_visitor(FILE *f)
+ : f(f)
+{
+ indentation = 0;
+ printable_names = _mesa_pointer_hash_table_create(NULL);
+ symbols = _mesa_symbol_table_ctor();
+ mem_ctx = ralloc_context(NULL);
+}
+
+ir_print_visitor::~ir_print_visitor()
+{
+ _mesa_hash_table_destroy(printable_names, NULL);
+ _mesa_symbol_table_dtor(symbols);
+ ralloc_free(mem_ctx);
+}
+
+void ir_print_visitor::indent(void)
+{
+ for (int i = 0; i < indentation; i++)
+ fprintf(f, " ");
+}
+
+const char *
+ir_print_visitor::unique_name(ir_variable *var)
+{
+ /* var->name can be NULL in function prototypes when a type is given for a
+ * parameter but no name is given. In that case, just return an empty
+ * string. Don't worry about tracking the generated name in the printable
+ * names hash because this is the only scope where it can ever appear.
+ */
+ if (var->name == NULL) {
+ static unsigned arg = 1;
+ return ralloc_asprintf(this->mem_ctx, "parameter@%u", arg++);
+ }
+
+ /* Do we already have a name for this variable? */
+ struct hash_entry * entry =
+ _mesa_hash_table_search(this->printable_names, var);
+
+ if (entry != NULL) {
+ return (const char *) entry->data;
+ }
+
+ /* If there's no conflict, just use the original name */
+ const char* name = NULL;
+ if (_mesa_symbol_table_find_symbol(this->symbols, var->name) == NULL) {
+ name = var->name;
+ } else {
+ static unsigned i = 1;
+ name = ralloc_asprintf(this->mem_ctx, "%s@%u", var->name, ++i);
+ }
+ _mesa_hash_table_insert(this->printable_names, var, (void *) name);
+ _mesa_symbol_table_add_symbol(this->symbols, name, var);
+ return name;
+}
+
+static void
+print_type(FILE *f, const glsl_type *t)
+{
+ if (t->is_array()) {
+ fprintf(f, "(array ");
+ print_type(f, t->fields.array);
+ fprintf(f, " %u)", t->length);
+ } else if (t->is_struct() && !is_gl_identifier(t->name)) {
+ fprintf(f, "%s@%p", t->name, (void *) t);
+ } else {
+ fprintf(f, "%s", t->name);
+ }
+}
+
+void ir_print_visitor::visit(ir_rvalue *)
+{
+ fprintf(f, "error");
+}
+
+void ir_print_visitor::visit(ir_variable *ir)
+{
+ fprintf(f, "(declare ");
+
+ char binding[32] = {0};
+ if (ir->data.binding)
+ snprintf(binding, sizeof(binding), "binding=%i ", ir->data.binding);
+
+ char loc[32] = {0};
+ if (ir->data.location != -1)
+ snprintf(loc, sizeof(loc), "location=%i ", ir->data.location);
+
+ char component[32] = {0};
+ if (ir->data.explicit_component || ir->data.location_frac != 0)
+ snprintf(component, sizeof(component), "component=%i ",
+ ir->data.location_frac);
+
+ char stream[32] = {0};
+ if (ir->data.stream & (1u << 31)) {
+ if (ir->data.stream & ~(1u << 31)) {
+ snprintf(stream, sizeof(stream), "stream(%u,%u,%u,%u) ",
+ ir->data.stream & 3, (ir->data.stream >> 2) & 3,
+ (ir->data.stream >> 4) & 3, (ir->data.stream >> 6) & 3);
+ }
+ } else if (ir->data.stream) {
+ snprintf(stream, sizeof(stream), "stream%u ", ir->data.stream);
+ }
+
+ char image_format[32] = {0};
+ if (ir->data.image_format) {
+ snprintf(image_format, sizeof(image_format), "format=%x ",
+ ir->data.image_format);
+ }
+
+ const char *const cent = (ir->data.centroid) ? "centroid " : "";
+ const char *const samp = (ir->data.sample) ? "sample " : "";
+ const char *const patc = (ir->data.patch) ? "patch " : "";
+ const char *const inv = (ir->data.invariant) ? "invariant " : "";
+ const char *const explicit_inv = (ir->data.explicit_invariant) ? "explicit_invariant " : "";
+ const char *const prec = (ir->data.precise) ? "precise " : "";
+ const char *const bindless = (ir->data.bindless) ? "bindless " : "";
+ const char *const bound = (ir->data.bound) ? "bound " : "";
+ const char *const memory_read_only = (ir->data.memory_read_only) ? "readonly " : "";
+ const char *const memory_write_only = (ir->data.memory_write_only) ? "writeonly " : "";
+ const char *const memory_coherent = (ir->data.memory_coherent) ? "coherent " : "";
+ const char *const memory_volatile = (ir->data.memory_volatile) ? "volatile " : "";
+ const char *const memory_restrict = (ir->data.memory_restrict) ? "restrict " : "";
+ const char *const mode[] = { "", "uniform ", "shader_storage ",
+ "shader_shared ", "shader_in ", "shader_out ",
+ "in ", "out ", "inout ",
+ "const_in ", "sys ", "temporary " };
+ STATIC_ASSERT(ARRAY_SIZE(mode) == ir_var_mode_count);
+ const char *const interp[] = { "", "smooth", "flat", "noperspective", "explicit" };
+ STATIC_ASSERT(ARRAY_SIZE(interp) == INTERP_MODE_COUNT);
+
+ fprintf(f, "(%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s) ",
+ binding, loc, component, cent, bindless, bound,
+ image_format, memory_read_only, memory_write_only,
+ memory_coherent, memory_volatile, memory_restrict,
+ samp, patc, inv, explicit_inv, prec, mode[ir->data.mode],
+ stream,
+ interp[ir->data.interpolation]);
+
+ print_type(f, ir->type);
+ fprintf(f, " %s)", unique_name(ir));
+}
+
+
+void ir_print_visitor::visit(ir_function_signature *ir)
+{
+ _mesa_symbol_table_push_scope(symbols);
+ fprintf(f, "(signature ");
+ indentation++;
+
+ print_type(f, ir->return_type);
+ fprintf(f, "\n");
+ indent();
+
+ fprintf(f, "(parameters\n");
+ indentation++;
+
+ foreach_in_list(ir_variable, inst, &ir->parameters) {
+ indent();
+ inst->accept(this);
+ fprintf(f, "\n");
+ }
+ indentation--;
+
+ indent();
+ fprintf(f, ")\n");
+
+ indent();
+
+ fprintf(f, "(\n");
+ indentation++;
+
+ foreach_in_list(ir_instruction, inst, &ir->body) {
+ indent();
+ inst->accept(this);
+ fprintf(f, "\n");
+ }
+ indentation--;
+ indent();
+ fprintf(f, "))\n");
+ indentation--;
+ _mesa_symbol_table_pop_scope(symbols);
+}
+
+
+void ir_print_visitor::visit(ir_function *ir)
+{
+ fprintf(f, "(%s function %s\n", ir->is_subroutine ? "subroutine" : "", ir->name);
+ indentation++;
+ foreach_in_list(ir_function_signature, sig, &ir->signatures) {
+ indent();
+ sig->accept(this);
+ fprintf(f, "\n");
+ }
+ indentation--;
+ indent();
+ fprintf(f, ")\n\n");
+}
+
+
+void ir_print_visitor::visit(ir_expression *ir)
+{
+ fprintf(f, "(expression ");
+
+ print_type(f, ir->type);
+
+ fprintf(f, " %s ", ir_expression_operation_strings[ir->operation]);
+
+ for (unsigned i = 0; i < ir->num_operands; i++) {
+ ir->operands[i]->accept(this);
+ }
+
+ fprintf(f, ") ");
+}
+
+
+void ir_print_visitor::visit(ir_texture *ir)
+{
+ fprintf(f, "(%s ", ir->opcode_string());
+
+ if (ir->op == ir_samples_identical) {
+ ir->sampler->accept(this);
+ fprintf(f, " ");
+ ir->coordinate->accept(this);
+ fprintf(f, ")");
+ return;
+ }
+
+ print_type(f, ir->type);
+ fprintf(f, " ");
+
+ ir->sampler->accept(this);
+ fprintf(f, " ");
+
+ if (ir->op != ir_txs && ir->op != ir_query_levels &&
+ ir->op != ir_texture_samples) {
+ ir->coordinate->accept(this);
+
+ fprintf(f, " ");
+
+ if (ir->offset != NULL) {
+ ir->offset->accept(this);
+ } else {
+ fprintf(f, "0");
+ }
+
+ fprintf(f, " ");
+ }
+
+ if (ir->op != ir_txf && ir->op != ir_txf_ms &&
+ ir->op != ir_txs && ir->op != ir_tg4 &&
+ ir->op != ir_query_levels && ir->op != ir_texture_samples) {
+ if (ir->projector)
+ ir->projector->accept(this);
+ else
+ fprintf(f, "1");
+
+ if (ir->shadow_comparator) {
+ fprintf(f, " ");
+ ir->shadow_comparator->accept(this);
+ } else {
+ fprintf(f, " ()");
+ }
+ }
+
+ fprintf(f, " ");
+ switch (ir->op)
+ {
+ case ir_tex:
+ case ir_lod:
+ case ir_query_levels:
+ case ir_texture_samples:
+ break;
+ case ir_txb:
+ ir->lod_info.bias->accept(this);
+ break;
+ case ir_txl:
+ case ir_txf:
+ case ir_txs:
+ ir->lod_info.lod->accept(this);
+ break;
+ case ir_txf_ms:
+ ir->lod_info.sample_index->accept(this);
+ break;
+ case ir_txd:
+ fprintf(f, "(");
+ ir->lod_info.grad.dPdx->accept(this);
+ fprintf(f, " ");
+ ir->lod_info.grad.dPdy->accept(this);
+ fprintf(f, ")");
+ break;
+ case ir_tg4:
+ ir->lod_info.component->accept(this);
+ break;
+ case ir_samples_identical:
+ unreachable("ir_samples_identical was already handled");
+ };
+ fprintf(f, ")");
+}
+
+
+void ir_print_visitor::visit(ir_swizzle *ir)
+{
+ const unsigned swiz[4] = {
+ ir->mask.x,
+ ir->mask.y,
+ ir->mask.z,
+ ir->mask.w,
+ };
+
+ fprintf(f, "(swiz ");
+ for (unsigned i = 0; i < ir->mask.num_components; i++) {
+ fprintf(f, "%c", "xyzw"[swiz[i]]);
+ }
+ fprintf(f, " ");
+ ir->val->accept(this);
+ fprintf(f, ")");
+}
+
+
+void ir_print_visitor::visit(ir_dereference_variable *ir)
+{
+ ir_variable *var = ir->variable_referenced();
+ fprintf(f, "(var_ref %s) ", unique_name(var));
+}
+
+
+void ir_print_visitor::visit(ir_dereference_array *ir)
+{
+ fprintf(f, "(array_ref ");
+ ir->array->accept(this);
+ ir->array_index->accept(this);
+ fprintf(f, ") ");
+}
+
+
+void ir_print_visitor::visit(ir_dereference_record *ir)
+{
+ fprintf(f, "(record_ref ");
+ ir->record->accept(this);
+
+ const char *field_name =
+ ir->record->type->fields.structure[ir->field_idx].name;
+ fprintf(f, " %s) ", field_name);
+}
+
+
+void ir_print_visitor::visit(ir_assignment *ir)
+{
+ fprintf(f, "(assign ");
+
+ if (ir->condition)
+ ir->condition->accept(this);
+
+ char mask[5];
+ unsigned j = 0;
+
+ for (unsigned i = 0; i < 4; i++) {
+ if ((ir->write_mask & (1 << i)) != 0) {
+ mask[j] = "xyzw"[i];
+ j++;
+ }
+ }
+ mask[j] = '\0';
+
+ fprintf(f, " (%s) ", mask);
+
+ ir->lhs->accept(this);
+
+ fprintf(f, " ");
+
+ ir->rhs->accept(this);
+ fprintf(f, ") ");
+}
+
+static void
+print_float_constant(FILE *f, float val)
+{
+ if (val == 0.0f)
+ /* 0.0 == -0.0, so print with %f to get the proper sign. */
+ fprintf(f, "%f", val);
+ else if (fabs(val) < 0.000001f)
+ fprintf(f, "%a", val);
+ else if (fabs(val) > 1000000.0f)
+ fprintf(f, "%e", val);
+ else
+ fprintf(f, "%f", val);
+}
+
+void ir_print_visitor::visit(ir_constant *ir)
+{
+ fprintf(f, "(constant ");
+ print_type(f, ir->type);
+ fprintf(f, " (");
+
+ if (ir->type->is_array()) {
+ for (unsigned i = 0; i < ir->type->length; i++)
+ ir->get_array_element(i)->accept(this);
+ } else if (ir->type->is_struct()) {
+ for (unsigned i = 0; i < ir->type->length; i++) {
+ fprintf(f, "(%s ", ir->type->fields.structure[i].name);
+ ir->get_record_field(i)->accept(this);
+ fprintf(f, ")");
+ }
+ } else {
+ for (unsigned i = 0; i < ir->type->components(); i++) {
+ if (i != 0)
+ fprintf(f, " ");
+ switch (ir->type->base_type) {
+ case GLSL_TYPE_UINT: fprintf(f, "%u", ir->value.u[i]); break;
+ case GLSL_TYPE_INT: fprintf(f, "%d", ir->value.i[i]); break;
+ case GLSL_TYPE_FLOAT:
+ print_float_constant(f, ir->value.f[i]);
+ break;
+ case GLSL_TYPE_FLOAT16:
+ print_float_constant(f, _mesa_half_to_float(ir->value.f16[i]));
+ break;
+ case GLSL_TYPE_SAMPLER:
+ case GLSL_TYPE_IMAGE:
+ case GLSL_TYPE_UINT64:
+ fprintf(f, "%" PRIu64, ir->value.u64[i]);
+ break;
+ case GLSL_TYPE_INT64: fprintf(f, "%" PRIi64, ir->value.i64[i]); break;
+ case GLSL_TYPE_BOOL: fprintf(f, "%d", ir->value.b[i]); break;
+ case GLSL_TYPE_DOUBLE:
+ if (ir->value.d[i] == 0.0)
+ /* 0.0 == -0.0, so print with %f to get the proper sign. */
+ fprintf(f, "%.1f", ir->value.d[i]);
+ else if (fabs(ir->value.d[i]) < 0.000001)
+ fprintf(f, "%a", ir->value.d[i]);
+ else if (fabs(ir->value.d[i]) > 1000000.0)
+ fprintf(f, "%e", ir->value.d[i]);
+ else
+ fprintf(f, "%f", ir->value.d[i]);
+ break;
+ default:
+ unreachable("Invalid constant type");
+ }
+ }
+ }
+ fprintf(f, ")) ");
+}
+
+
+void
+ir_print_visitor::visit(ir_call *ir)
+{
+ fprintf(f, "(call %s ", ir->callee_name());
+ if (ir->return_deref)
+ ir->return_deref->accept(this);
+ fprintf(f, " (");
+ foreach_in_list(ir_rvalue, param, &ir->actual_parameters) {
+ param->accept(this);
+ }
+ fprintf(f, "))\n");
+}
+
+
+void
+ir_print_visitor::visit(ir_return *ir)
+{
+ fprintf(f, "(return");
+
+ ir_rvalue *const value = ir->get_value();
+ if (value) {
+ fprintf(f, " ");
+ value->accept(this);
+ }
+
+ fprintf(f, ")");
+}
+
+
+void
+ir_print_visitor::visit(ir_discard *ir)
+{
+ fprintf(f, "(discard ");
+
+ if (ir->condition != NULL) {
+ fprintf(f, " ");
+ ir->condition->accept(this);
+ }
+
+ fprintf(f, ")");
+}
+
+
+void
+ir_print_visitor::visit(ir_demote *ir)
+{
+ fprintf(f, "(demote)");
+}
+
+
+void
+ir_print_visitor::visit(ir_if *ir)
+{
+ fprintf(f, "(if ");
+ ir->condition->accept(this);
+
+ fprintf(f, "(\n");
+ indentation++;
+
+ foreach_in_list(ir_instruction, inst, &ir->then_instructions) {
+ indent();
+ inst->accept(this);
+ fprintf(f, "\n");
+ }
+
+ indentation--;
+ indent();
+ fprintf(f, ")\n");
+
+ indent();
+ if (!ir->else_instructions.is_empty()) {
+ fprintf(f, "(\n");
+ indentation++;
+
+ foreach_in_list(ir_instruction, inst, &ir->else_instructions) {
+ indent();
+ inst->accept(this);
+ fprintf(f, "\n");
+ }
+ indentation--;
+ indent();
+ fprintf(f, "))\n");
+ } else {
+ fprintf(f, "())\n");
+ }
+}
+
+
+void
+ir_print_visitor::visit(ir_loop *ir)
+{
+ fprintf(f, "(loop (\n");
+ indentation++;
+
+ foreach_in_list(ir_instruction, inst, &ir->body_instructions) {
+ indent();
+ inst->accept(this);
+ fprintf(f, "\n");
+ }
+ indentation--;
+ indent();
+ fprintf(f, "))\n");
+}
+
+
+void
+ir_print_visitor::visit(ir_loop_jump *ir)
+{
+ fprintf(f, "%s", ir->is_break() ? "break" : "continue");
+}
+
+void
+ir_print_visitor::visit(ir_precision_statement *ir)
+{
+ //printf("%s", ir->precision_statement);
+}
+
+void
+ir_print_visitor::visit(ir_typedecl_statement *)
+{
+}
+
+void
+ir_print_visitor::visit(ir_emit_vertex *ir)
+{
+ fprintf(f, "(emit-vertex ");
+ ir->stream->accept(this);
+ fprintf(f, ")\n");
+}
+
+void
+ir_print_visitor::visit(ir_end_primitive *ir)
+{
+ fprintf(f, "(end-primitive ");
+ ir->stream->accept(this);
+ fprintf(f, ")\n");
+}
+
+void
+ir_print_visitor::visit(ir_barrier *)
+{
+ fprintf(f, "(barrier)\n");
+}
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/ir_print_visitor.h b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/ir_print_visitor.h
new file mode 100644
index 0000000000..718c709683
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/ir_print_visitor.h
@@ -0,0 +1,96 @@
+/* -*- c++ -*- */
+/*
+ * Copyright © 2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef IR_PRINT_VISITOR_H
+#define IR_PRINT_VISITOR_H
+
+#include "ir.h"
+#include "ir_visitor.h"
+
+#include "program/symbol_table.h"
+
+/**
+ * Abstract base class of visitors of IR instruction trees
+ */
+class ir_print_visitor : public ir_visitor {
+public:
+ ir_print_visitor(FILE *f);
+ virtual ~ir_print_visitor();
+
+ void indent(void);
+
+ /**
+ * \name Visit methods
+ *
+ * As typical for the visitor pattern, there must be one \c visit method for
+ * each concrete subclass of \c ir_instruction. Virtual base classes within
+ * the hierarchy should not have \c visit methods.
+ */
+ /*@{*/
+ virtual void visit(ir_rvalue *);
+ virtual void visit(ir_variable *);
+ virtual void visit(ir_function_signature *);
+ virtual void visit(ir_function *);
+ virtual void visit(ir_expression *);
+ virtual void visit(ir_texture *);
+ virtual void visit(ir_swizzle *);
+ virtual void visit(ir_dereference_variable *);
+ virtual void visit(ir_dereference_array *);
+ virtual void visit(ir_dereference_record *);
+ virtual void visit(ir_assignment *);
+ virtual void visit(ir_constant *);
+ virtual void visit(ir_call *);
+ virtual void visit(ir_return *);
+ virtual void visit(ir_discard *);
+ virtual void visit(ir_demote *);
+ virtual void visit(ir_if *);
+ virtual void visit(ir_loop *);
+ virtual void visit(ir_loop_jump *);
+ virtual void visit(ir_precision_statement *);
+ virtual void visit(ir_typedecl_statement *);
+ virtual void visit(ir_emit_vertex *);
+ virtual void visit(ir_end_primitive *);
+ virtual void visit(ir_barrier *);
+ /*@}*/
+
+private:
+ /**
+ * Fetch/generate a unique name for ir_variable.
+ *
+ * GLSL IR permits multiple ir_variables to share the same name. This works
+ * fine until we try to print it, when we really need a unique one.
+ */
+ const char *unique_name(ir_variable *var);
+
+ /** A mapping from ir_variable * -> unique printable names. */
+ hash_table *printable_names;
+ _mesa_symbol_table *symbols;
+
+ void *mem_ctx;
+ FILE *f;
+
+ int indentation;
+};
+
+#endif /* IR_PRINT_VISITOR_H */
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/ir_reader.cpp b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/ir_reader.cpp
new file mode 100644
index 0000000000..d4f0e58b15
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/ir_reader.cpp
@@ -0,0 +1,1169 @@
+/*
+ * Copyright © 2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include "ir_reader.h"
+#include "glsl_parser_extras.h"
+#include "compiler/glsl_types.h"
+#include "s_expression.h"
+
+static const bool debug = false;
+
+namespace {
+
+class ir_reader {
+public:
+ ir_reader(_mesa_glsl_parse_state *);
+
+ void read(exec_list *instructions, const char *src, bool scan_for_protos);
+
+private:
+ void *mem_ctx;
+ _mesa_glsl_parse_state *state;
+
+ void ir_read_error(s_expression *, const char *fmt, ...);
+
+ const glsl_type *read_type(s_expression *);
+
+ void scan_for_prototypes(exec_list *, s_expression *);
+ ir_function *read_function(s_expression *, bool skip_body);
+ void read_function_sig(ir_function *, s_expression *, bool skip_body);
+
+ void read_instructions(exec_list *, s_expression *, ir_loop *);
+ ir_instruction *read_instruction(s_expression *, ir_loop *);
+ ir_variable *read_declaration(s_expression *);
+ ir_if *read_if(s_expression *, ir_loop *);
+ ir_loop *read_loop(s_expression *);
+ ir_call *read_call(s_expression *);
+ ir_return *read_return(s_expression *);
+ ir_rvalue *read_rvalue(s_expression *);
+ ir_assignment *read_assignment(s_expression *);
+ ir_expression *read_expression(s_expression *);
+ ir_swizzle *read_swizzle(s_expression *);
+ ir_constant *read_constant(s_expression *);
+ ir_texture *read_texture(s_expression *);
+ ir_emit_vertex *read_emit_vertex(s_expression *);
+ ir_end_primitive *read_end_primitive(s_expression *);
+ ir_barrier *read_barrier(s_expression *);
+
+ ir_dereference *read_dereference(s_expression *);
+ ir_dereference_variable *read_var_ref(s_expression *);
+};
+
+} /* anonymous namespace */
+
+ir_reader::ir_reader(_mesa_glsl_parse_state *state) : state(state)
+{
+ this->mem_ctx = state;
+}
+
+void
+_mesa_glsl_read_ir(_mesa_glsl_parse_state *state, exec_list *instructions,
+ const char *src, bool scan_for_protos)
+{
+ ir_reader r(state);
+ r.read(instructions, src, scan_for_protos);
+}
+
+void
+ir_reader::read(exec_list *instructions, const char *src, bool scan_for_protos)
+{
+ void *sx_mem_ctx = ralloc_context(NULL);
+ s_expression *expr = s_expression::read_expression(sx_mem_ctx, src);
+ if (expr == NULL) {
+ ir_read_error(NULL, "couldn't parse S-Expression.");
+ return;
+ }
+
+ if (scan_for_protos) {
+ scan_for_prototypes(instructions, expr);
+ if (state->error)
+ return;
+ }
+
+ read_instructions(instructions, expr, NULL);
+ ralloc_free(sx_mem_ctx);
+
+ if (debug)
+ validate_ir_tree(instructions);
+}
+
+void
+ir_reader::ir_read_error(s_expression *expr, const char *fmt, ...)
+{
+ va_list ap;
+
+ state->error = true;
+
+ if (state->current_function != NULL)
+ ralloc_asprintf_append(&state->info_log, "In function %s:\n",
+ state->current_function->function_name());
+ ralloc_strcat(&state->info_log, "error: ");
+
+ va_start(ap, fmt);
+ ralloc_vasprintf_append(&state->info_log, fmt, ap);
+ va_end(ap);
+ ralloc_strcat(&state->info_log, "\n");
+
+ if (expr != NULL) {
+ ralloc_strcat(&state->info_log, "...in this context:\n ");
+ expr->print();
+ ralloc_strcat(&state->info_log, "\n\n");
+ }
+}
+
+const glsl_type *
+ir_reader::read_type(s_expression *expr)
+{
+ s_expression *s_base_type;
+ s_int *s_size;
+
+ s_pattern pat[] = { "array", s_base_type, s_size };
+ if (MATCH(expr, pat)) {
+ const glsl_type *base_type = read_type(s_base_type);
+ if (base_type == NULL) {
+ ir_read_error(NULL, "when reading base type of array type");
+ return NULL;
+ }
+
+ return glsl_type::get_array_instance(base_type, s_size->value());
+ }
+
+ s_symbol *type_sym = SX_AS_SYMBOL(expr);
+ if (type_sym == NULL) {
+ ir_read_error(expr, "expected <type>");
+ return NULL;
+ }
+
+ const glsl_type *type = state->symbols->get_type(type_sym->value());
+ if (type == NULL)
+ ir_read_error(expr, "invalid type: %s", type_sym->value());
+
+ return type;
+}
+
+
+void
+ir_reader::scan_for_prototypes(exec_list *instructions, s_expression *expr)
+{
+ s_list *list = SX_AS_LIST(expr);
+ if (list == NULL) {
+ ir_read_error(expr, "Expected (<instruction> ...); found an atom.");
+ return;
+ }
+
+ foreach_in_list(s_list, sub, &list->subexpressions) {
+ if (!sub->is_list())
+ continue; // not a (function ...); ignore it.
+
+ s_symbol *tag = SX_AS_SYMBOL(sub->subexpressions.get_head());
+ if (tag == NULL || strcmp(tag->value(), "function") != 0)
+ continue; // not a (function ...); ignore it.
+
+ ir_function *f = read_function(sub, true);
+ if (f == NULL)
+ return;
+ instructions->push_tail(f);
+ }
+}
+
+ir_function *
+ir_reader::read_function(s_expression *expr, bool skip_body)
+{
+ bool added = false;
+ s_symbol *name;
+
+ s_pattern pat[] = { "function", name };
+ if (!PARTIAL_MATCH(expr, pat)) {
+ ir_read_error(expr, "Expected (function <name> (signature ...) ...)");
+ return NULL;
+ }
+
+ ir_function *f = state->symbols->get_function(name->value());
+ if (f == NULL) {
+ f = new(mem_ctx) ir_function(name->value());
+ added = state->symbols->add_function(f);
+ assert(added);
+ }
+
+ /* Skip over "function" tag and function name (which are guaranteed to be
+ * present by the above PARTIAL_MATCH call).
+ */
+ exec_node *node = ((s_list *) expr)->subexpressions.get_head_raw()->next->next;
+ for (/* nothing */; !node->is_tail_sentinel(); node = node->next) {
+ s_expression *s_sig = (s_expression *) node;
+ read_function_sig(f, s_sig, skip_body);
+ }
+ return added ? f : NULL;
+}
+
+static bool
+always_available(const _mesa_glsl_parse_state *)
+{
+ return true;
+}
+
+void
+ir_reader::read_function_sig(ir_function *f, s_expression *expr, bool skip_body)
+{
+ s_expression *type_expr;
+ s_list *paramlist;
+ s_list *body_list;
+
+ s_pattern pat[] = { "signature", type_expr, paramlist, body_list };
+ if (!MATCH(expr, pat)) {
+ ir_read_error(expr, "Expected (signature <type> (parameters ...) "
+ "(<instruction> ...))");
+ return;
+ }
+
+ const glsl_type *return_type = read_type(type_expr);
+ if (return_type == NULL)
+ return;
+
+ s_symbol *paramtag = SX_AS_SYMBOL(paramlist->subexpressions.get_head());
+ if (paramtag == NULL || strcmp(paramtag->value(), "parameters") != 0) {
+ ir_read_error(paramlist, "Expected (parameters ...)");
+ return;
+ }
+
+ // Read the parameters list into a temporary place.
+ exec_list hir_parameters;
+ state->symbols->push_scope();
+
+ /* Skip over the "parameters" tag. */
+ exec_node *node = paramlist->subexpressions.get_head_raw()->next;
+ for (/* nothing */; !node->is_tail_sentinel(); node = node->next) {
+ ir_variable *var = read_declaration((s_expression *) node);
+ if (var == NULL)
+ return;
+
+ hir_parameters.push_tail(var);
+ }
+
+ ir_function_signature *sig =
+ f->exact_matching_signature(state, &hir_parameters);
+ if (sig == NULL && skip_body) {
+ /* If scanning for prototypes, generate a new signature. */
+ /* ir_reader doesn't know what languages support a given built-in, so
+ * just say that they're always available. For now, other mechanisms
+ * guarantee the right built-ins are available.
+ */
+ sig = new(mem_ctx) ir_function_signature(return_type, always_available);
+ f->add_signature(sig);
+ } else if (sig != NULL) {
+ const char *badvar = sig->qualifiers_match(&hir_parameters);
+ if (badvar != NULL) {
+ ir_read_error(expr, "function `%s' parameter `%s' qualifiers "
+ "don't match prototype", f->name, badvar);
+ return;
+ }
+
+ if (sig->return_type != return_type) {
+ ir_read_error(expr, "function `%s' return type doesn't "
+ "match prototype", f->name);
+ return;
+ }
+ } else {
+ /* No prototype for this body exists - skip it. */
+ state->symbols->pop_scope();
+ return;
+ }
+ assert(sig != NULL);
+
+ sig->replace_parameters(&hir_parameters);
+
+ if (!skip_body && !body_list->subexpressions.is_empty()) {
+ if (sig->is_defined) {
+ ir_read_error(expr, "function %s redefined", f->name);
+ return;
+ }
+ state->current_function = sig;
+ read_instructions(&sig->body, body_list, NULL);
+ state->current_function = NULL;
+ sig->is_defined = true;
+ }
+
+ state->symbols->pop_scope();
+}
+
+void
+ir_reader::read_instructions(exec_list *instructions, s_expression *expr,
+ ir_loop *loop_ctx)
+{
+ // Read in a list of instructions
+ s_list *list = SX_AS_LIST(expr);
+ if (list == NULL) {
+ ir_read_error(expr, "Expected (<instruction> ...); found an atom.");
+ return;
+ }
+
+ foreach_in_list(s_expression, sub, &list->subexpressions) {
+ ir_instruction *ir = read_instruction(sub, loop_ctx);
+ if (ir != NULL) {
+ /* Global variable declarations should be moved to the top, before
+ * any functions that might use them. Functions are added to the
+ * instruction stream when scanning for prototypes, so without this
+ * hack, they always appear before variable declarations.
+ */
+ if (state->current_function == NULL && ir->as_variable() != NULL)
+ instructions->push_head(ir);
+ else
+ instructions->push_tail(ir);
+ }
+ }
+}
+
+
+ir_instruction *
+ir_reader::read_instruction(s_expression *expr, ir_loop *loop_ctx)
+{
+ s_symbol *symbol = SX_AS_SYMBOL(expr);
+ if (symbol != NULL) {
+ if (strcmp(symbol->value(), "break") == 0 && loop_ctx != NULL)
+ return new(mem_ctx) ir_loop_jump(ir_loop_jump::jump_break);
+ if (strcmp(symbol->value(), "continue") == 0 && loop_ctx != NULL)
+ return new(mem_ctx) ir_loop_jump(ir_loop_jump::jump_continue);
+ }
+
+ s_list *list = SX_AS_LIST(expr);
+ if (list == NULL || list->subexpressions.is_empty()) {
+ ir_read_error(expr, "Invalid instruction.\n");
+ return NULL;
+ }
+
+ s_symbol *tag = SX_AS_SYMBOL(list->subexpressions.get_head());
+ if (tag == NULL) {
+ ir_read_error(expr, "expected instruction tag");
+ return NULL;
+ }
+
+ ir_instruction *inst = NULL;
+ if (strcmp(tag->value(), "declare") == 0) {
+ inst = read_declaration(list);
+ } else if (strcmp(tag->value(), "assign") == 0) {
+ inst = read_assignment(list);
+ } else if (strcmp(tag->value(), "if") == 0) {
+ inst = read_if(list, loop_ctx);
+ } else if (strcmp(tag->value(), "loop") == 0) {
+ inst = read_loop(list);
+ } else if (strcmp(tag->value(), "call") == 0) {
+ inst = read_call(list);
+ } else if (strcmp(tag->value(), "return") == 0) {
+ inst = read_return(list);
+ } else if (strcmp(tag->value(), "function") == 0) {
+ inst = read_function(list, false);
+ } else if (strcmp(tag->value(), "emit-vertex") == 0) {
+ inst = read_emit_vertex(list);
+ } else if (strcmp(tag->value(), "end-primitive") == 0) {
+ inst = read_end_primitive(list);
+ } else if (strcmp(tag->value(), "barrier") == 0) {
+ inst = read_barrier(list);
+ } else {
+ inst = read_rvalue(list);
+ if (inst == NULL)
+ ir_read_error(NULL, "when reading instruction");
+ }
+ return inst;
+}
+
+ir_variable *
+ir_reader::read_declaration(s_expression *expr)
+{
+ s_list *s_quals;
+ s_expression *s_type;
+ s_symbol *s_name;
+
+ s_pattern pat[] = { "declare", s_quals, s_type, s_name };
+ if (!MATCH(expr, pat)) {
+ ir_read_error(expr, "expected (declare (<qualifiers>) <type> <name>)");
+ return NULL;
+ }
+
+ const glsl_type *type = read_type(s_type);
+ if (type == NULL)
+ return NULL;
+
+ ir_variable *var = new(mem_ctx) ir_variable(type, s_name->value(),
+ ir_var_auto);
+
+ foreach_in_list(s_symbol, qualifier, &s_quals->subexpressions) {
+ if (!qualifier->is_symbol()) {
+ ir_read_error(expr, "qualifier list must contain only symbols");
+ return NULL;
+ }
+
+ // FINISHME: Check for duplicate/conflicting qualifiers.
+ if (strcmp(qualifier->value(), "centroid") == 0) {
+ var->data.centroid = 1;
+ } else if (strcmp(qualifier->value(), "sample") == 0) {
+ var->data.sample = 1;
+ } else if (strcmp(qualifier->value(), "patch") == 0) {
+ var->data.patch = 1;
+ } else if (strcmp(qualifier->value(), "explicit_invariant") == 0) {
+ var->data.explicit_invariant = true;
+ } else if (strcmp(qualifier->value(), "invariant") == 0) {
+ var->data.invariant = true;
+ } else if (strcmp(qualifier->value(), "uniform") == 0) {
+ var->data.mode = ir_var_uniform;
+ } else if (strcmp(qualifier->value(), "shader_storage") == 0) {
+ var->data.mode = ir_var_shader_storage;
+ } else if (strcmp(qualifier->value(), "auto") == 0) {
+ var->data.mode = ir_var_auto;
+ } else if (strcmp(qualifier->value(), "in") == 0) {
+ var->data.mode = ir_var_function_in;
+ } else if (strcmp(qualifier->value(), "shader_in") == 0) {
+ var->data.mode = ir_var_shader_in;
+ } else if (strcmp(qualifier->value(), "const_in") == 0) {
+ var->data.mode = ir_var_const_in;
+ } else if (strcmp(qualifier->value(), "out") == 0) {
+ var->data.mode = ir_var_function_out;
+ } else if (strcmp(qualifier->value(), "shader_out") == 0) {
+ var->data.mode = ir_var_shader_out;
+ } else if (strcmp(qualifier->value(), "inout") == 0) {
+ var->data.mode = ir_var_function_inout;
+ } else if (strcmp(qualifier->value(), "temporary") == 0) {
+ var->data.mode = ir_var_temporary;
+ } else if (strcmp(qualifier->value(), "stream1") == 0) {
+ var->data.stream = 1;
+ } else if (strcmp(qualifier->value(), "stream2") == 0) {
+ var->data.stream = 2;
+ } else if (strcmp(qualifier->value(), "stream3") == 0) {
+ var->data.stream = 3;
+ } else if (strcmp(qualifier->value(), "smooth") == 0) {
+ var->data.interpolation = INTERP_MODE_SMOOTH;
+ } else if (strcmp(qualifier->value(), "flat") == 0) {
+ var->data.interpolation = INTERP_MODE_FLAT;
+ } else if (strcmp(qualifier->value(), "noperspective") == 0) {
+ var->data.interpolation = INTERP_MODE_NOPERSPECTIVE;
+ } else {
+ ir_read_error(expr, "unknown qualifier: %s", qualifier->value());
+ return NULL;
+ }
+ }
+
+ // Add the variable to the symbol table
+ state->symbols->add_variable(var);
+
+ return var;
+}
+
+
+ir_if *
+ir_reader::read_if(s_expression *expr, ir_loop *loop_ctx)
+{
+ s_expression *s_cond;
+ s_expression *s_then;
+ s_expression *s_else;
+
+ s_pattern pat[] = { "if", s_cond, s_then, s_else };
+ if (!MATCH(expr, pat)) {
+ ir_read_error(expr, "expected (if <condition> (<then>...) (<else>...))");
+ return NULL;
+ }
+
+ ir_rvalue *condition = read_rvalue(s_cond);
+ if (condition == NULL) {
+ ir_read_error(NULL, "when reading condition of (if ...)");
+ return NULL;
+ }
+
+ ir_if *iff = new(mem_ctx) ir_if(condition);
+
+ read_instructions(&iff->then_instructions, s_then, loop_ctx);
+ read_instructions(&iff->else_instructions, s_else, loop_ctx);
+ if (state->error) {
+ delete iff;
+ iff = NULL;
+ }
+ return iff;
+}
+
+
+ir_loop *
+ir_reader::read_loop(s_expression *expr)
+{
+ s_expression *s_body;
+
+ s_pattern loop_pat[] = { "loop", s_body };
+ if (!MATCH(expr, loop_pat)) {
+ ir_read_error(expr, "expected (loop <body>)");
+ return NULL;
+ }
+
+ ir_loop *loop = new(mem_ctx) ir_loop;
+
+ read_instructions(&loop->body_instructions, s_body, loop);
+ if (state->error) {
+ delete loop;
+ loop = NULL;
+ }
+ return loop;
+}
+
+
+ir_return *
+ir_reader::read_return(s_expression *expr)
+{
+ s_expression *s_retval;
+
+ s_pattern return_value_pat[] = { "return", s_retval};
+ s_pattern return_void_pat[] = { "return" };
+ if (MATCH(expr, return_value_pat)) {
+ ir_rvalue *retval = read_rvalue(s_retval);
+ if (retval == NULL) {
+ ir_read_error(NULL, "when reading return value");
+ return NULL;
+ }
+ return new(mem_ctx) ir_return(retval);
+ } else if (MATCH(expr, return_void_pat)) {
+ return new(mem_ctx) ir_return;
+ } else {
+ ir_read_error(expr, "expected (return <rvalue>) or (return)");
+ return NULL;
+ }
+}
+
+
+ir_rvalue *
+ir_reader::read_rvalue(s_expression *expr)
+{
+ s_list *list = SX_AS_LIST(expr);
+ if (list == NULL || list->subexpressions.is_empty())
+ return NULL;
+
+ s_symbol *tag = SX_AS_SYMBOL(list->subexpressions.get_head());
+ if (tag == NULL) {
+ ir_read_error(expr, "expected rvalue tag");
+ return NULL;
+ }
+
+ ir_rvalue *rvalue = read_dereference(list);
+ if (rvalue != NULL || state->error)
+ return rvalue;
+ else if (strcmp(tag->value(), "swiz") == 0) {
+ rvalue = read_swizzle(list);
+ } else if (strcmp(tag->value(), "expression") == 0) {
+ rvalue = read_expression(list);
+ } else if (strcmp(tag->value(), "constant") == 0) {
+ rvalue = read_constant(list);
+ } else {
+ rvalue = read_texture(list);
+ if (rvalue == NULL && !state->error)
+ ir_read_error(expr, "unrecognized rvalue tag: %s", tag->value());
+ }
+
+ return rvalue;
+}
+
+ir_assignment *
+ir_reader::read_assignment(s_expression *expr)
+{
+ s_expression *cond_expr = NULL;
+ s_expression *lhs_expr, *rhs_expr;
+ s_list *mask_list;
+
+ s_pattern pat4[] = { "assign", mask_list, lhs_expr, rhs_expr };
+ s_pattern pat5[] = { "assign", cond_expr, mask_list, lhs_expr, rhs_expr };
+ if (!MATCH(expr, pat4) && !MATCH(expr, pat5)) {
+ ir_read_error(expr, "expected (assign [<condition>] (<write mask>) "
+ "<lhs> <rhs>)");
+ return NULL;
+ }
+
+ ir_rvalue *condition = NULL;
+ if (cond_expr != NULL) {
+ condition = read_rvalue(cond_expr);
+ if (condition == NULL) {
+ ir_read_error(NULL, "when reading condition of assignment");
+ return NULL;
+ }
+ }
+
+ unsigned mask = 0;
+
+ s_symbol *mask_symbol;
+ s_pattern mask_pat[] = { mask_symbol };
+ if (MATCH(mask_list, mask_pat)) {
+ const char *mask_str = mask_symbol->value();
+ unsigned mask_length = strlen(mask_str);
+ if (mask_length > 4) {
+ ir_read_error(expr, "invalid write mask: %s", mask_str);
+ return NULL;
+ }
+
+ const unsigned idx_map[] = { 3, 0, 1, 2 }; /* w=bit 3, x=0, y=1, z=2 */
+
+ for (unsigned i = 0; i < mask_length; i++) {
+ if (mask_str[i] < 'w' || mask_str[i] > 'z') {
+ ir_read_error(expr, "write mask contains invalid character: %c",
+ mask_str[i]);
+ return NULL;
+ }
+ mask |= 1 << idx_map[mask_str[i] - 'w'];
+ }
+ } else if (!mask_list->subexpressions.is_empty()) {
+ ir_read_error(mask_list, "expected () or (<write mask>)");
+ return NULL;
+ }
+
+ ir_dereference *lhs = read_dereference(lhs_expr);
+ if (lhs == NULL) {
+ ir_read_error(NULL, "when reading left-hand side of assignment");
+ return NULL;
+ }
+
+ ir_rvalue *rhs = read_rvalue(rhs_expr);
+ if (rhs == NULL) {
+ ir_read_error(NULL, "when reading right-hand side of assignment");
+ return NULL;
+ }
+
+ if (mask == 0 && (lhs->type->is_vector() || lhs->type->is_scalar())) {
+ ir_read_error(expr, "non-zero write mask required.");
+ return NULL;
+ }
+
+ return new(mem_ctx) ir_assignment(lhs, rhs, condition, mask);
+}
+
+ir_call *
+ir_reader::read_call(s_expression *expr)
+{
+ s_symbol *name;
+ s_list *params;
+ s_list *s_return = NULL;
+
+ ir_dereference_variable *return_deref = NULL;
+
+ s_pattern void_pat[] = { "call", name, params };
+ s_pattern non_void_pat[] = { "call", name, s_return, params };
+ if (MATCH(expr, non_void_pat)) {
+ return_deref = read_var_ref(s_return);
+ if (return_deref == NULL) {
+ ir_read_error(s_return, "when reading a call's return storage");
+ return NULL;
+ }
+ } else if (!MATCH(expr, void_pat)) {
+ ir_read_error(expr, "expected (call <name> [<deref>] (<param> ...))");
+ return NULL;
+ }
+
+ exec_list parameters;
+
+ foreach_in_list(s_expression, e, &params->subexpressions) {
+ ir_rvalue *param = read_rvalue(e);
+ if (param == NULL) {
+ ir_read_error(e, "when reading parameter to function call");
+ return NULL;
+ }
+ parameters.push_tail(param);
+ }
+
+ ir_function *f = state->symbols->get_function(name->value());
+ if (f == NULL) {
+ ir_read_error(expr, "found call to undefined function %s",
+ name->value());
+ return NULL;
+ }
+
+ ir_function_signature *callee =
+ f->matching_signature(state, &parameters, true);
+ if (callee == NULL) {
+ ir_read_error(expr, "couldn't find matching signature for function "
+ "%s", name->value());
+ return NULL;
+ }
+
+ if (callee->return_type == glsl_type::void_type && return_deref) {
+ ir_read_error(expr, "call has return value storage but void type");
+ return NULL;
+ } else if (callee->return_type != glsl_type::void_type && !return_deref) {
+ ir_read_error(expr, "call has non-void type but no return value storage");
+ return NULL;
+ }
+
+ return new(mem_ctx) ir_call(callee, return_deref, &parameters);
+}
+
+ir_expression *
+ir_reader::read_expression(s_expression *expr)
+{
+ s_expression *s_type;
+ s_symbol *s_op;
+ s_expression *s_arg[4] = {NULL};
+
+ s_pattern pat[] = { "expression", s_type, s_op, s_arg[0] };
+ if (!PARTIAL_MATCH(expr, pat)) {
+ ir_read_error(expr, "expected (expression <type> <operator> "
+ "<operand> [<operand>] [<operand>] [<operand>])");
+ return NULL;
+ }
+ s_arg[1] = (s_expression *) s_arg[0]->next; // may be tail sentinel
+ s_arg[2] = (s_expression *) s_arg[1]->next; // may be tail sentinel or NULL
+ if (s_arg[2])
+ s_arg[3] = (s_expression *) s_arg[2]->next; // may be tail sentinel or NULL
+
+ const glsl_type *type = read_type(s_type);
+ if (type == NULL)
+ return NULL;
+
+ /* Read the operator */
+ ir_expression_operation op = ir_expression::get_operator(s_op->value());
+ if (op == (ir_expression_operation) -1) {
+ ir_read_error(expr, "invalid operator: %s", s_op->value());
+ return NULL;
+ }
+
+ /* Skip "expression" <type> <operation> by subtracting 3. */
+ int num_operands = (int) ((s_list *) expr)->subexpressions.length() - 3;
+
+ int expected_operands = ir_expression::get_num_operands(op);
+ if (num_operands != expected_operands) {
+ ir_read_error(expr, "found %d expression operands, expected %d",
+ num_operands, expected_operands);
+ return NULL;
+ }
+
+ ir_rvalue *arg[4] = {NULL};
+ for (int i = 0; i < num_operands; i++) {
+ arg[i] = read_rvalue(s_arg[i]);
+ if (arg[i] == NULL) {
+ ir_read_error(NULL, "when reading operand #%d of %s", i, s_op->value());
+ return NULL;
+ }
+ }
+
+ return new(mem_ctx) ir_expression(op, type, arg[0], arg[1], arg[2], arg[3]);
+}
+
+ir_swizzle *
+ir_reader::read_swizzle(s_expression *expr)
+{
+ s_symbol *swiz;
+ s_expression *sub;
+
+ s_pattern pat[] = { "swiz", swiz, sub };
+ if (!MATCH(expr, pat)) {
+ ir_read_error(expr, "expected (swiz <swizzle> <rvalue>)");
+ return NULL;
+ }
+
+ if (strlen(swiz->value()) > 4) {
+ ir_read_error(expr, "expected a valid swizzle; found %s", swiz->value());
+ return NULL;
+ }
+
+ ir_rvalue *rvalue = read_rvalue(sub);
+ if (rvalue == NULL)
+ return NULL;
+
+ ir_swizzle *ir = ir_swizzle::create(rvalue, swiz->value(),
+ rvalue->type->vector_elements);
+ if (ir == NULL)
+ ir_read_error(expr, "invalid swizzle");
+
+ return ir;
+}
+
+ir_constant *
+ir_reader::read_constant(s_expression *expr)
+{
+ s_expression *type_expr;
+ s_list *values;
+
+ s_pattern pat[] = { "constant", type_expr, values };
+ if (!MATCH(expr, pat)) {
+ ir_read_error(expr, "expected (constant <type> (...))");
+ return NULL;
+ }
+
+ const glsl_type *type = read_type(type_expr);
+ if (type == NULL)
+ return NULL;
+
+ if (values == NULL) {
+ ir_read_error(expr, "expected (constant <type> (...))");
+ return NULL;
+ }
+
+ if (type->is_array()) {
+ unsigned elements_supplied = 0;
+ exec_list elements;
+ foreach_in_list(s_expression, elt, &values->subexpressions) {
+ ir_constant *ir_elt = read_constant(elt);
+ if (ir_elt == NULL)
+ return NULL;
+ elements.push_tail(ir_elt);
+ elements_supplied++;
+ }
+
+ if (elements_supplied != type->length) {
+ ir_read_error(values, "expected exactly %u array elements, "
+ "given %u", type->length, elements_supplied);
+ return NULL;
+ }
+ return new(mem_ctx) ir_constant(type, &elements);
+ }
+
+ ir_constant_data data = { { 0 } };
+
+ // Read in list of values (at most 16).
+ unsigned k = 0;
+ foreach_in_list(s_expression, expr, &values->subexpressions) {
+ if (k >= 16) {
+ ir_read_error(values, "expected at most 16 numbers");
+ return NULL;
+ }
+
+ if (type->is_float()) {
+ s_number *value = SX_AS_NUMBER(expr);
+ if (value == NULL) {
+ ir_read_error(values, "expected numbers");
+ return NULL;
+ }
+ data.f[k] = value->fvalue();
+ } else {
+ s_int *value = SX_AS_INT(expr);
+ if (value == NULL) {
+ ir_read_error(values, "expected integers");
+ return NULL;
+ }
+
+ switch (type->base_type) {
+ case GLSL_TYPE_UINT: {
+ data.u[k] = value->value();
+ break;
+ }
+ case GLSL_TYPE_INT: {
+ data.i[k] = value->value();
+ break;
+ }
+ case GLSL_TYPE_BOOL: {
+ data.b[k] = value->value();
+ break;
+ }
+ default:
+ ir_read_error(values, "unsupported constant type");
+ return NULL;
+ }
+ }
+ ++k;
+ }
+ if (k != type->components()) {
+ ir_read_error(values, "expected %u constant values, found %u",
+ type->components(), k);
+ return NULL;
+ }
+
+ return new(mem_ctx) ir_constant(type, &data);
+}
+
+ir_dereference_variable *
+ir_reader::read_var_ref(s_expression *expr)
+{
+ s_symbol *s_var;
+ s_pattern var_pat[] = { "var_ref", s_var };
+
+ if (MATCH(expr, var_pat)) {
+ ir_variable *var = state->symbols->get_variable(s_var->value());
+ if (var == NULL) {
+ ir_read_error(expr, "undeclared variable: %s", s_var->value());
+ return NULL;
+ }
+ return new(mem_ctx) ir_dereference_variable(var);
+ }
+ return NULL;
+}
+
+ir_dereference *
+ir_reader::read_dereference(s_expression *expr)
+{
+ s_expression *s_subject;
+ s_expression *s_index;
+ s_symbol *s_field;
+
+ s_pattern array_pat[] = { "array_ref", s_subject, s_index };
+ s_pattern record_pat[] = { "record_ref", s_subject, s_field };
+
+ ir_dereference_variable *var_ref = read_var_ref(expr);
+ if (var_ref != NULL) {
+ return var_ref;
+ } else if (MATCH(expr, array_pat)) {
+ ir_rvalue *subject = read_rvalue(s_subject);
+ if (subject == NULL) {
+ ir_read_error(NULL, "when reading the subject of an array_ref");
+ return NULL;
+ }
+
+ ir_rvalue *idx = read_rvalue(s_index);
+ if (idx == NULL) {
+ ir_read_error(NULL, "when reading the index of an array_ref");
+ return NULL;
+ }
+ return new(mem_ctx) ir_dereference_array(subject, idx);
+ } else if (MATCH(expr, record_pat)) {
+ ir_rvalue *subject = read_rvalue(s_subject);
+ if (subject == NULL) {
+ ir_read_error(NULL, "when reading the subject of a record_ref");
+ return NULL;
+ }
+ return new(mem_ctx) ir_dereference_record(subject, s_field->value());
+ }
+ return NULL;
+}
+
+ir_texture *
+ir_reader::read_texture(s_expression *expr)
+{
+ s_symbol *tag = NULL;
+ s_expression *s_type = NULL;
+ s_expression *s_sampler = NULL;
+ s_expression *s_coord = NULL;
+ s_expression *s_offset = NULL;
+ s_expression *s_proj = NULL;
+ s_list *s_shadow = NULL;
+ s_expression *s_lod = NULL;
+ s_expression *s_sample_index = NULL;
+ s_expression *s_component = NULL;
+
+ ir_texture_opcode op = ir_tex; /* silence warning */
+
+ s_pattern tex_pattern[] =
+ { "tex", s_type, s_sampler, s_coord, s_offset, s_proj, s_shadow };
+ s_pattern lod_pattern[] =
+ { "lod", s_type, s_sampler, s_coord };
+ s_pattern txf_pattern[] =
+ { "txf", s_type, s_sampler, s_coord, s_offset, s_lod };
+ s_pattern txf_ms_pattern[] =
+ { "txf_ms", s_type, s_sampler, s_coord, s_sample_index };
+ s_pattern txs_pattern[] =
+ { "txs", s_type, s_sampler, s_lod };
+ s_pattern tg4_pattern[] =
+ { "tg4", s_type, s_sampler, s_coord, s_offset, s_component };
+ s_pattern query_levels_pattern[] =
+ { "query_levels", s_type, s_sampler };
+ s_pattern texture_samples_pattern[] =
+ { "samples", s_type, s_sampler };
+ s_pattern other_pattern[] =
+ { tag, s_type, s_sampler, s_coord, s_offset, s_proj, s_shadow, s_lod };
+
+ if (MATCH(expr, lod_pattern)) {
+ op = ir_lod;
+ } else if (MATCH(expr, tex_pattern)) {
+ op = ir_tex;
+ } else if (MATCH(expr, txf_pattern)) {
+ op = ir_txf;
+ } else if (MATCH(expr, txf_ms_pattern)) {
+ op = ir_txf_ms;
+ } else if (MATCH(expr, txs_pattern)) {
+ op = ir_txs;
+ } else if (MATCH(expr, tg4_pattern)) {
+ op = ir_tg4;
+ } else if (MATCH(expr, query_levels_pattern)) {
+ op = ir_query_levels;
+ } else if (MATCH(expr, texture_samples_pattern)) {
+ op = ir_texture_samples;
+ } else if (MATCH(expr, other_pattern)) {
+ op = ir_texture::get_opcode(tag->value());
+ if (op == (ir_texture_opcode) -1)
+ return NULL;
+ } else {
+ ir_read_error(NULL, "unexpected texture pattern %s", tag->value());
+ return NULL;
+ }
+
+ ir_texture *tex = new(mem_ctx) ir_texture(op);
+
+ // Read return type
+ const glsl_type *type = read_type(s_type);
+ if (type == NULL) {
+ ir_read_error(NULL, "when reading type in (%s ...)",
+ tex->opcode_string());
+ return NULL;
+ }
+
+ // Read sampler (must be a deref)
+ ir_dereference *sampler = read_dereference(s_sampler);
+ if (sampler == NULL) {
+ ir_read_error(NULL, "when reading sampler in (%s ...)",
+ tex->opcode_string());
+ return NULL;
+ }
+ tex->set_sampler(sampler, type);
+
+ if (op != ir_txs) {
+ // Read coordinate (any rvalue)
+ tex->coordinate = read_rvalue(s_coord);
+ if (tex->coordinate == NULL) {
+ ir_read_error(NULL, "when reading coordinate in (%s ...)",
+ tex->opcode_string());
+ return NULL;
+ }
+
+ if (op != ir_txf_ms && op != ir_lod) {
+ // Read texel offset - either 0 or an rvalue.
+ s_int *si_offset = SX_AS_INT(s_offset);
+ if (si_offset == NULL || si_offset->value() != 0) {
+ tex->offset = read_rvalue(s_offset);
+ if (tex->offset == NULL) {
+ ir_read_error(s_offset, "expected 0 or an expression");
+ return NULL;
+ }
+ }
+ }
+ }
+
+ if (op != ir_txf && op != ir_txf_ms &&
+ op != ir_txs && op != ir_lod && op != ir_tg4 &&
+ op != ir_query_levels && op != ir_texture_samples) {
+ s_int *proj_as_int = SX_AS_INT(s_proj);
+ if (proj_as_int && proj_as_int->value() == 1) {
+ tex->projector = NULL;
+ } else {
+ tex->projector = read_rvalue(s_proj);
+ if (tex->projector == NULL) {
+ ir_read_error(NULL, "when reading projective divide in (%s ..)",
+ tex->opcode_string());
+ return NULL;
+ }
+ }
+
+ if (s_shadow->subexpressions.is_empty()) {
+ tex->shadow_comparator = NULL;
+ } else {
+ tex->shadow_comparator = read_rvalue(s_shadow);
+ if (tex->shadow_comparator == NULL) {
+ ir_read_error(NULL, "when reading shadow comparator in (%s ..)",
+ tex->opcode_string());
+ return NULL;
+ }
+ }
+ }
+
+ switch (op) {
+ case ir_txb:
+ tex->lod_info.bias = read_rvalue(s_lod);
+ if (tex->lod_info.bias == NULL) {
+ ir_read_error(NULL, "when reading LOD bias in (txb ...)");
+ return NULL;
+ }
+ break;
+ case ir_txl:
+ case ir_txf:
+ case ir_txs:
+ tex->lod_info.lod = read_rvalue(s_lod);
+ if (tex->lod_info.lod == NULL) {
+ ir_read_error(NULL, "when reading LOD in (%s ...)",
+ tex->opcode_string());
+ return NULL;
+ }
+ break;
+ case ir_txf_ms:
+ tex->lod_info.sample_index = read_rvalue(s_sample_index);
+ if (tex->lod_info.sample_index == NULL) {
+ ir_read_error(NULL, "when reading sample_index in (txf_ms ...)");
+ return NULL;
+ }
+ break;
+ case ir_txd: {
+ s_expression *s_dx, *s_dy;
+ s_pattern dxdy_pat[] = { s_dx, s_dy };
+ if (!MATCH(s_lod, dxdy_pat)) {
+ ir_read_error(s_lod, "expected (dPdx dPdy) in (txd ...)");
+ return NULL;
+ }
+ tex->lod_info.grad.dPdx = read_rvalue(s_dx);
+ if (tex->lod_info.grad.dPdx == NULL) {
+ ir_read_error(NULL, "when reading dPdx in (txd ...)");
+ return NULL;
+ }
+ tex->lod_info.grad.dPdy = read_rvalue(s_dy);
+ if (tex->lod_info.grad.dPdy == NULL) {
+ ir_read_error(NULL, "when reading dPdy in (txd ...)");
+ return NULL;
+ }
+ break;
+ }
+ case ir_tg4:
+ tex->lod_info.component = read_rvalue(s_component);
+ if (tex->lod_info.component == NULL) {
+ ir_read_error(NULL, "when reading component in (tg4 ...)");
+ return NULL;
+ }
+ break;
+ default:
+ // tex and lod don't have any extra parameters.
+ break;
+ };
+ return tex;
+}
+
+ir_emit_vertex *
+ir_reader::read_emit_vertex(s_expression *expr)
+{
+ s_expression *s_stream = NULL;
+
+ s_pattern pat[] = { "emit-vertex", s_stream };
+
+ if (MATCH(expr, pat)) {
+ ir_rvalue *stream = read_dereference(s_stream);
+ if (stream == NULL) {
+ ir_read_error(NULL, "when reading stream info in emit-vertex");
+ return NULL;
+ }
+ return new(mem_ctx) ir_emit_vertex(stream);
+ }
+ ir_read_error(NULL, "when reading emit-vertex");
+ return NULL;
+}
+
+ir_end_primitive *
+ir_reader::read_end_primitive(s_expression *expr)
+{
+ s_expression *s_stream = NULL;
+
+ s_pattern pat[] = { "end-primitive", s_stream };
+
+ if (MATCH(expr, pat)) {
+ ir_rvalue *stream = read_dereference(s_stream);
+ if (stream == NULL) {
+ ir_read_error(NULL, "when reading stream info in end-primitive");
+ return NULL;
+ }
+ return new(mem_ctx) ir_end_primitive(stream);
+ }
+ ir_read_error(NULL, "when reading end-primitive");
+ return NULL;
+}
+
+ir_barrier *
+ir_reader::read_barrier(s_expression *expr)
+{
+ s_pattern pat[] = { "barrier" };
+
+ if (MATCH(expr, pat)) {
+ return new(mem_ctx) ir_barrier();
+ }
+ ir_read_error(NULL, "when reading barrier");
+ return NULL;
+}
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/ir_reader.h b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/ir_reader.h
new file mode 100644
index 0000000000..89b0392a6c
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/ir_reader.h
@@ -0,0 +1,33 @@
+/* -*- c++ -*- */
+/*
+ * Copyright © 2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef IR_READER_H
+#define IR_READER_H
+
+#include "ir.h"
+
+void _mesa_glsl_read_ir(_mesa_glsl_parse_state *state, exec_list *instructions,
+ const char *src, bool scan_for_prototypes);
+
+#endif /* IR_READER_H */
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/ir_rvalue_visitor.cpp b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/ir_rvalue_visitor.cpp
new file mode 100644
index 0000000000..72dd6201ec
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/ir_rvalue_visitor.cpp
@@ -0,0 +1,316 @@
+/*
+ * Copyright © 2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+/**
+ * \file ir_rvalue_visitor.cpp
+ *
+ * Generic class to implement the common pattern we have of wanting to
+ * visit each ir_rvalue * and possibly change that node to a different
+ * class.
+ */
+
+#include "ir.h"
+#include "ir_visitor.h"
+#include "ir_rvalue_visitor.h"
+#include "compiler/glsl_types.h"
+
+ir_visitor_status
+ir_rvalue_base_visitor::rvalue_visit(ir_expression *ir)
+{
+ unsigned int operand;
+
+ for (operand = 0; operand < ir->num_operands; operand++) {
+ handle_rvalue(&ir->operands[operand]);
+ }
+
+ return visit_continue;
+}
+
+ir_visitor_status
+ir_rvalue_base_visitor::rvalue_visit(ir_texture *ir)
+{
+ handle_rvalue(&ir->coordinate);
+ handle_rvalue(&ir->projector);
+ handle_rvalue(&ir->shadow_comparator);
+ handle_rvalue(&ir->offset);
+
+ switch (ir->op) {
+ case ir_tex:
+ case ir_lod:
+ case ir_query_levels:
+ case ir_texture_samples:
+ case ir_samples_identical:
+ break;
+ case ir_txb:
+ handle_rvalue(&ir->lod_info.bias);
+ break;
+ case ir_txf:
+ case ir_txl:
+ case ir_txs:
+ handle_rvalue(&ir->lod_info.lod);
+ break;
+ case ir_txf_ms:
+ handle_rvalue(&ir->lod_info.sample_index);
+ break;
+ case ir_txd:
+ handle_rvalue(&ir->lod_info.grad.dPdx);
+ handle_rvalue(&ir->lod_info.grad.dPdy);
+ break;
+ case ir_tg4:
+ handle_rvalue(&ir->lod_info.component);
+ break;
+ }
+
+ return visit_continue;
+}
+
+ir_visitor_status
+ir_rvalue_base_visitor::rvalue_visit(ir_swizzle *ir)
+{
+ handle_rvalue(&ir->val);
+ return visit_continue;
+}
+
+ir_visitor_status
+ir_rvalue_base_visitor::rvalue_visit(ir_dereference_array *ir)
+{
+ /* The array index is not the target of the assignment, so clear the
+ * 'in_assignee' flag. Restore it after returning from the array index.
+ */
+ const bool was_in_assignee = this->in_assignee;
+ this->in_assignee = false;
+ handle_rvalue(&ir->array_index);
+ this->in_assignee = was_in_assignee;
+
+ handle_rvalue(&ir->array);
+ return visit_continue;
+}
+
+ir_visitor_status
+ir_rvalue_base_visitor::rvalue_visit(ir_dereference_record *ir)
+{
+ handle_rvalue(&ir->record);
+ return visit_continue;
+}
+
+ir_visitor_status
+ir_rvalue_base_visitor::rvalue_visit(ir_assignment *ir)
+{
+ handle_rvalue(&ir->rhs);
+ handle_rvalue(&ir->condition);
+
+ return visit_continue;
+}
+
+ir_visitor_status
+ir_rvalue_base_visitor::rvalue_visit(ir_call *ir)
+{
+ foreach_in_list_safe(ir_rvalue, param, &ir->actual_parameters) {
+ ir_rvalue *new_param = param;
+ handle_rvalue(&new_param);
+
+ if (new_param != param) {
+ param->replace_with(new_param);
+ }
+ }
+ return visit_continue;
+}
+
+ir_visitor_status
+ir_rvalue_base_visitor::rvalue_visit(ir_discard *ir)
+{
+ handle_rvalue(&ir->condition);
+ return visit_continue;
+}
+
+ir_visitor_status
+ir_rvalue_base_visitor::rvalue_visit(ir_return *ir)
+{
+ handle_rvalue(&ir->value);
+ return visit_continue;
+}
+
+ir_visitor_status
+ir_rvalue_base_visitor::rvalue_visit(ir_if *ir)
+{
+ handle_rvalue(&ir->condition);
+ return visit_continue;
+}
+
+ir_visitor_status
+ir_rvalue_base_visitor::rvalue_visit(ir_emit_vertex *ir)
+{
+ handle_rvalue(&ir->stream);
+ return visit_continue;
+}
+
+ir_visitor_status
+ir_rvalue_base_visitor::rvalue_visit(ir_end_primitive *ir)
+{
+ handle_rvalue(&ir->stream);
+ return visit_continue;
+}
+
+ir_visitor_status
+ir_rvalue_visitor::visit_leave(ir_expression *ir)
+{
+ return rvalue_visit(ir);
+}
+
+ir_visitor_status
+ir_rvalue_visitor::visit_leave(ir_texture *ir)
+{
+ return rvalue_visit(ir);
+}
+
+ir_visitor_status
+ir_rvalue_visitor::visit_leave(ir_swizzle *ir)
+{
+ return rvalue_visit(ir);
+}
+
+ir_visitor_status
+ir_rvalue_visitor::visit_leave(ir_dereference_array *ir)
+{
+ return rvalue_visit(ir);
+}
+
+ir_visitor_status
+ir_rvalue_visitor::visit_leave(ir_dereference_record *ir)
+{
+ return rvalue_visit(ir);
+}
+
+ir_visitor_status
+ir_rvalue_visitor::visit_leave(ir_assignment *ir)
+{
+ return rvalue_visit(ir);
+}
+
+ir_visitor_status
+ir_rvalue_visitor::visit_leave(ir_call *ir)
+{
+ return rvalue_visit(ir);
+}
+
+ir_visitor_status
+ir_rvalue_visitor::visit_leave(ir_discard *ir)
+{
+ return rvalue_visit(ir);
+}
+
+ir_visitor_status
+ir_rvalue_visitor::visit_leave(ir_return *ir)
+{
+ return rvalue_visit(ir);
+}
+
+ir_visitor_status
+ir_rvalue_visitor::visit_leave(ir_if *ir)
+{
+ return rvalue_visit(ir);
+}
+
+ir_visitor_status
+ir_rvalue_visitor::visit_leave(ir_emit_vertex *ir)
+{
+ return rvalue_visit(ir);
+}
+
+ir_visitor_status
+ir_rvalue_visitor::visit_leave(ir_end_primitive *ir)
+{
+ return rvalue_visit(ir);
+}
+
+ir_visitor_status
+ir_rvalue_enter_visitor::visit_enter(ir_expression *ir)
+{
+ return rvalue_visit(ir);
+}
+
+ir_visitor_status
+ir_rvalue_enter_visitor::visit_enter(ir_texture *ir)
+{
+ return rvalue_visit(ir);
+}
+
+ir_visitor_status
+ir_rvalue_enter_visitor::visit_enter(ir_swizzle *ir)
+{
+ return rvalue_visit(ir);
+}
+
+ir_visitor_status
+ir_rvalue_enter_visitor::visit_enter(ir_dereference_array *ir)
+{
+ return rvalue_visit(ir);
+}
+
+ir_visitor_status
+ir_rvalue_enter_visitor::visit_enter(ir_dereference_record *ir)
+{
+ return rvalue_visit(ir);
+}
+
+ir_visitor_status
+ir_rvalue_enter_visitor::visit_enter(ir_assignment *ir)
+{
+ return rvalue_visit(ir);
+}
+
+ir_visitor_status
+ir_rvalue_enter_visitor::visit_enter(ir_call *ir)
+{
+ return rvalue_visit(ir);
+}
+
+ir_visitor_status
+ir_rvalue_enter_visitor::visit_enter(ir_discard *ir)
+{
+ return rvalue_visit(ir);
+}
+
+ir_visitor_status
+ir_rvalue_enter_visitor::visit_enter(ir_return *ir)
+{
+ return rvalue_visit(ir);
+}
+
+ir_visitor_status
+ir_rvalue_enter_visitor::visit_enter(ir_if *ir)
+{
+ return rvalue_visit(ir);
+}
+
+ir_visitor_status
+ir_rvalue_enter_visitor::visit_enter(ir_emit_vertex *ir)
+{
+ return rvalue_visit(ir);
+}
+
+ir_visitor_status
+ir_rvalue_enter_visitor::visit_enter(ir_end_primitive *ir)
+{
+ return rvalue_visit(ir);
+}
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/ir_rvalue_visitor.h b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/ir_rvalue_visitor.h
new file mode 100644
index 0000000000..73d1885a25
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/ir_rvalue_visitor.h
@@ -0,0 +1,88 @@
+/*
+ * Copyright © 2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+/**
+ * \file ir_rvalue_visitor.h
+ *
+ * Generic class to implement the common pattern we have of wanting to
+ * visit each ir_rvalue * and possibly change that node to a different
+ * class. Just implement handle_rvalue() and you will be called with
+ * a pointer to each rvalue in the tree.
+ */
+
+#ifndef GLSL_IR_RVALUE_VISITOR_H
+#define GLSL_IR_RVALUE_VISITOR_H
+
+class ir_rvalue_base_visitor : public ir_hierarchical_visitor {
+public:
+ ir_visitor_status rvalue_visit(ir_assignment *);
+ ir_visitor_status rvalue_visit(ir_call *);
+ ir_visitor_status rvalue_visit(ir_dereference_array *);
+ ir_visitor_status rvalue_visit(ir_dereference_record *);
+ ir_visitor_status rvalue_visit(ir_discard *);
+ ir_visitor_status rvalue_visit(ir_expression *);
+ ir_visitor_status rvalue_visit(ir_if *);
+ ir_visitor_status rvalue_visit(ir_return *);
+ ir_visitor_status rvalue_visit(ir_swizzle *);
+ ir_visitor_status rvalue_visit(ir_texture *);
+ ir_visitor_status rvalue_visit(ir_emit_vertex *);
+ ir_visitor_status rvalue_visit(ir_end_primitive *);
+
+ virtual void handle_rvalue(ir_rvalue **rvalue) = 0;
+};
+
+class ir_rvalue_visitor : public ir_rvalue_base_visitor {
+public:
+
+ virtual ir_visitor_status visit_leave(ir_assignment *);
+ virtual ir_visitor_status visit_leave(ir_call *);
+ virtual ir_visitor_status visit_leave(ir_dereference_array *);
+ virtual ir_visitor_status visit_leave(ir_dereference_record *);
+ virtual ir_visitor_status visit_leave(ir_discard *);
+ virtual ir_visitor_status visit_leave(ir_expression *);
+ virtual ir_visitor_status visit_leave(ir_if *);
+ virtual ir_visitor_status visit_leave(ir_return *);
+ virtual ir_visitor_status visit_leave(ir_swizzle *);
+ virtual ir_visitor_status visit_leave(ir_texture *);
+ virtual ir_visitor_status visit_leave(ir_emit_vertex *);
+ virtual ir_visitor_status visit_leave(ir_end_primitive *);
+};
+
+class ir_rvalue_enter_visitor : public ir_rvalue_base_visitor {
+public:
+
+ virtual ir_visitor_status visit_enter(ir_assignment *);
+ virtual ir_visitor_status visit_enter(ir_call *);
+ virtual ir_visitor_status visit_enter(ir_dereference_array *);
+ virtual ir_visitor_status visit_enter(ir_dereference_record *);
+ virtual ir_visitor_status visit_enter(ir_discard *);
+ virtual ir_visitor_status visit_enter(ir_expression *);
+ virtual ir_visitor_status visit_enter(ir_if *);
+ virtual ir_visitor_status visit_enter(ir_return *);
+ virtual ir_visitor_status visit_enter(ir_swizzle *);
+ virtual ir_visitor_status visit_enter(ir_texture *);
+ virtual ir_visitor_status visit_enter(ir_emit_vertex *);
+ virtual ir_visitor_status visit_enter(ir_end_primitive *);
+};
+
+#endif /* GLSL_IR_RVALUE_VISITOR_H */
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/ir_set_program_inouts.cpp b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/ir_set_program_inouts.cpp
new file mode 100644
index 0000000000..a3cb19479b
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/ir_set_program_inouts.cpp
@@ -0,0 +1,441 @@
+/*
+ * Copyright © 2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+/**
+ * \file ir_set_program_inouts.cpp
+ *
+ * Sets the inputs_read and outputs_written of Mesa programs.
+ *
+ * Mesa programs (gl_program, not gl_shader_program) have a set of
+ * flags indicating which varyings are read and written. Computing
+ * which are actually read from some sort of backend code can be
+ * tricky when variable array indexing involved. So this pass
+ * provides support for setting inputs_read and outputs_written right
+ * from the GLSL IR.
+ */
+
+#include "ir.h"
+#include "ir_visitor.h"
+#include "compiler/glsl_types.h"
+#include "main/mtypes.h"
+
+namespace {
+
+class ir_set_program_inouts_visitor : public ir_hierarchical_visitor {
+public:
+ ir_set_program_inouts_visitor(struct gl_program *prog,
+ gl_shader_stage shader_stage)
+ {
+ this->prog = prog;
+ this->shader_stage = shader_stage;
+ }
+ ~ir_set_program_inouts_visitor()
+ {
+ }
+
+ virtual ir_visitor_status visit_enter(ir_dereference_array *);
+ virtual ir_visitor_status visit_enter(ir_function_signature *);
+ virtual ir_visitor_status visit_enter(ir_discard *);
+ virtual ir_visitor_status visit_enter(ir_texture *);
+ virtual ir_visitor_status visit(ir_dereference_variable *);
+
+private:
+ void mark_whole_variable(ir_variable *var);
+ bool try_mark_partial_variable(ir_variable *var, ir_rvalue *index);
+
+ struct gl_program *prog;
+ gl_shader_stage shader_stage;
+};
+
+} /* anonymous namespace */
+
+static inline bool
+is_shader_inout(ir_variable *var)
+{
+ return var->data.mode == ir_var_shader_in ||
+ var->data.mode == ir_var_shader_out ||
+ var->data.mode == ir_var_system_value;
+}
+
+static void
+mark(struct gl_program *prog, ir_variable *var, int offset, int len,
+ gl_shader_stage stage)
+{
+ /* As of GLSL 1.20, varyings can only be floats, floating-point
+ * vectors or matrices, or arrays of them. For Mesa programs using
+ * inputs_read/outputs_written, everything but matrices uses one
+ * slot, while matrices use a slot per column. Presumably
+ * something doing a more clever packing would use something other
+ * than inputs_read/outputs_written.
+ */
+
+ for (int i = 0; i < len; i++) {
+ assert(var->data.location != -1);
+
+ int idx = var->data.location + offset + i;
+ bool is_patch_generic = var->data.patch &&
+ idx != VARYING_SLOT_TESS_LEVEL_INNER &&
+ idx != VARYING_SLOT_TESS_LEVEL_OUTER &&
+ idx != VARYING_SLOT_BOUNDING_BOX0 &&
+ idx != VARYING_SLOT_BOUNDING_BOX1;
+ GLbitfield64 bitfield;
+
+ if (is_patch_generic) {
+ assert(idx >= VARYING_SLOT_PATCH0 && idx < VARYING_SLOT_TESS_MAX);
+ bitfield = BITFIELD64_BIT(idx - VARYING_SLOT_PATCH0);
+ }
+ else {
+ assert(idx < VARYING_SLOT_MAX);
+ bitfield = BITFIELD64_BIT(idx);
+ }
+
+ if (var->data.mode == ir_var_shader_in) {
+ if (is_patch_generic)
+ prog->info.patch_inputs_read |= bitfield;
+ else
+ prog->info.inputs_read |= bitfield;
+
+ /* double inputs read is only for vertex inputs */
+ if (stage == MESA_SHADER_VERTEX &&
+ var->type->without_array()->is_dual_slot())
+ prog->DualSlotInputs |= bitfield;
+
+ if (stage == MESA_SHADER_FRAGMENT) {
+ prog->info.fs.uses_sample_qualifier |= var->data.sample;
+ }
+ } else if (var->data.mode == ir_var_system_value) {
+ prog->info.system_values_read |= bitfield;
+ } else {
+ assert(var->data.mode == ir_var_shader_out);
+ if (is_patch_generic) {
+ prog->info.patch_outputs_written |= bitfield;
+ } else if (!var->data.read_only) {
+ prog->info.outputs_written |= bitfield;
+ if (var->data.index > 0)
+ prog->SecondaryOutputsWritten |= bitfield;
+ }
+
+ if (var->data.fb_fetch_output)
+ prog->info.outputs_read |= bitfield;
+ }
+ }
+}
+
+/**
+ * Mark an entire variable as used. Caller must ensure that the variable
+ * represents a shader input or output.
+ */
+void
+ir_set_program_inouts_visitor::mark_whole_variable(ir_variable *var)
+{
+ const glsl_type *type = var->type;
+ bool is_vertex_input = false;
+ if (this->shader_stage == MESA_SHADER_GEOMETRY &&
+ var->data.mode == ir_var_shader_in && type->is_array()) {
+ type = type->fields.array;
+ }
+
+ if (this->shader_stage == MESA_SHADER_TESS_CTRL &&
+ var->data.mode == ir_var_shader_in) {
+ assert(type->is_array());
+ type = type->fields.array;
+ }
+
+ if (this->shader_stage == MESA_SHADER_TESS_CTRL &&
+ var->data.mode == ir_var_shader_out && !var->data.patch) {
+ assert(type->is_array());
+ type = type->fields.array;
+ }
+
+ if (this->shader_stage == MESA_SHADER_TESS_EVAL &&
+ var->data.mode == ir_var_shader_in && !var->data.patch) {
+ assert(type->is_array());
+ type = type->fields.array;
+ }
+
+ if (this->shader_stage == MESA_SHADER_VERTEX &&
+ var->data.mode == ir_var_shader_in)
+ is_vertex_input = true;
+
+ mark(this->prog, var, 0, type->count_attribute_slots(is_vertex_input),
+ this->shader_stage);
+}
+
+/* Default handler: Mark all the locations in the variable as used. */
+ir_visitor_status
+ir_set_program_inouts_visitor::visit(ir_dereference_variable *ir)
+{
+ if (!is_shader_inout(ir->var))
+ return visit_continue;
+
+ mark_whole_variable(ir->var);
+
+ return visit_continue;
+}
+
+/**
+ * Try to mark a portion of the given variable as used. Caller must ensure
+ * that the variable represents a shader input or output which can be indexed
+ * into in array fashion (an array or matrix). For the purpose of geometry
+ * shader inputs (which are always arrays*), this means that the array element
+ * must be something that can be indexed into in array fashion.
+ *
+ * *Except gl_PrimitiveIDIn, as noted below.
+ *
+ * For tessellation control shaders all inputs and non-patch outputs are
+ * arrays. For tessellation evaluation shaders non-patch inputs are arrays.
+ *
+ * If the index can't be interpreted as a constant, or some other problem
+ * occurs, then nothing will be marked and false will be returned.
+ */
+bool
+ir_set_program_inouts_visitor::try_mark_partial_variable(ir_variable *var,
+ ir_rvalue *index)
+{
+ const glsl_type *type = var->type;
+
+ if (this->shader_stage == MESA_SHADER_GEOMETRY &&
+ var->data.mode == ir_var_shader_in) {
+ /* The only geometry shader input that is not an array is
+ * gl_PrimitiveIDIn, and in that case, this code will never be reached,
+ * because gl_PrimitiveIDIn can't be indexed into in array fashion.
+ */
+ assert(type->is_array());
+ type = type->fields.array;
+ }
+
+ if (this->shader_stage == MESA_SHADER_TESS_CTRL &&
+ var->data.mode == ir_var_shader_in) {
+ assert(type->is_array());
+ type = type->fields.array;
+ }
+
+ if (this->shader_stage == MESA_SHADER_TESS_CTRL &&
+ var->data.mode == ir_var_shader_out && !var->data.patch) {
+ assert(type->is_array());
+ type = type->fields.array;
+ }
+
+ if (this->shader_stage == MESA_SHADER_TESS_EVAL &&
+ var->data.mode == ir_var_shader_in && !var->data.patch) {
+ assert(type->is_array());
+ type = type->fields.array;
+ }
+
+ /* TODO: implement proper arrays of arrays support
+ * for now let the caller mark whole variable as used.
+ */
+ if (type->is_array() && type->fields.array->is_array())
+ return false;
+
+ /* The code below only handles:
+ *
+ * - Indexing into matrices
+ * - Indexing into arrays of (matrices, vectors, or scalars)
+ *
+ * All other possibilities are either prohibited by GLSL (vertex inputs and
+ * fragment outputs can't be structs) or should have been eliminated by
+ * lowering passes (do_vec_index_to_swizzle() gets rid of indexing into
+ * vectors, and lower_packed_varyings() gets rid of structs that occur in
+ * varyings).
+ *
+ * However, we don't use varying packing in all cases - tessellation
+ * shaders bypass it. This means we'll see varying structs and arrays
+ * of structs here. For now, we just give up so the caller marks the
+ * entire variable as used.
+ */
+ if (!(type->is_matrix() ||
+ (type->is_array() &&
+ (type->fields.array->is_numeric() ||
+ type->fields.array->is_boolean())))) {
+
+ /* If we don't know how to handle this case, give up and let the
+ * caller mark the whole variable as used.
+ */
+ return false;
+ }
+
+ ir_constant *index_as_constant = index->as_constant();
+ if (!index_as_constant)
+ return false;
+
+ unsigned elem_width;
+ unsigned num_elems;
+ if (type->is_array()) {
+ num_elems = type->length;
+ if (type->fields.array->is_matrix())
+ elem_width = type->fields.array->matrix_columns;
+ else
+ elem_width = 1;
+ } else {
+ num_elems = type->matrix_columns;
+ elem_width = 1;
+ }
+
+ if (index_as_constant->value.u[0] >= num_elems) {
+ /* Constant index outside the bounds of the matrix/array. This could
+ * arise as a result of constant folding of a legal GLSL program.
+ *
+ * Even though the spec says that indexing outside the bounds of a
+ * matrix/array results in undefined behaviour, we don't want to pass
+ * out-of-range values to mark() (since this could result in slots that
+ * don't exist being marked as used), so just let the caller mark the
+ * whole variable as used.
+ */
+ return false;
+ }
+
+ /* double element width for double types that takes two slots */
+ if (this->shader_stage != MESA_SHADER_VERTEX ||
+ var->data.mode != ir_var_shader_in) {
+ if (type->without_array()->is_dual_slot())
+ elem_width *= 2;
+ }
+
+ mark(this->prog, var, index_as_constant->value.u[0] * elem_width,
+ elem_width, this->shader_stage);
+ return true;
+}
+
+static bool
+is_multiple_vertices(gl_shader_stage stage, ir_variable *var)
+{
+ if (var->data.patch)
+ return false;
+
+ if (var->data.mode == ir_var_shader_in)
+ return stage == MESA_SHADER_GEOMETRY ||
+ stage == MESA_SHADER_TESS_CTRL ||
+ stage == MESA_SHADER_TESS_EVAL;
+ if (var->data.mode == ir_var_shader_out)
+ return stage == MESA_SHADER_TESS_CTRL;
+
+ return false;
+}
+
+ir_visitor_status
+ir_set_program_inouts_visitor::visit_enter(ir_dereference_array *ir)
+{
+ /* Note: for geometry shader inputs, lower_named_interface_blocks may
+ * create 2D arrays, so we need to be able to handle those. 2D arrays
+ * shouldn't be able to crop up for any other reason.
+ */
+ if (ir_dereference_array * const inner_array =
+ ir->array->as_dereference_array()) {
+ /* ir => foo[i][j]
+ * inner_array => foo[i]
+ */
+ if (ir_dereference_variable * const deref_var =
+ inner_array->array->as_dereference_variable()) {
+ if (is_multiple_vertices(this->shader_stage, deref_var->var)) {
+ /* foo is a geometry or tessellation shader input, so i is
+ * the vertex, and j the part of the input we're accessing.
+ */
+ if (try_mark_partial_variable(deref_var->var, ir->array_index))
+ {
+ /* We've now taken care of foo and j, but i might contain a
+ * subexpression that accesses shader inputs. So manually
+ * visit i and then continue with the parent.
+ */
+ inner_array->array_index->accept(this);
+ return visit_continue_with_parent;
+ }
+ }
+ }
+ } else if (ir_dereference_variable * const deref_var =
+ ir->array->as_dereference_variable()) {
+ /* ir => foo[i], where foo is a variable. */
+ if (is_multiple_vertices(this->shader_stage, deref_var->var)) {
+ /* foo is a geometry or tessellation shader input, so i is
+ * the vertex, and we're accessing the entire input.
+ */
+ mark_whole_variable(deref_var->var);
+ /* We've now taken care of foo, but i might contain a subexpression
+ * that accesses shader inputs. So manually visit i and then
+ * continue with the parent.
+ */
+ ir->array_index->accept(this);
+ return visit_continue_with_parent;
+ } else if (is_shader_inout(deref_var->var)) {
+ /* foo is a shader input/output, but not a geometry shader input,
+ * so i is the part of the input we're accessing.
+ */
+ if (try_mark_partial_variable(deref_var->var, ir->array_index))
+ return visit_continue_with_parent;
+ }
+ }
+
+ /* The expression is something we don't recognize. Just visit its
+ * subexpressions.
+ */
+ return visit_continue;
+}
+
+ir_visitor_status
+ir_set_program_inouts_visitor::visit_enter(ir_function_signature *ir)
+{
+ /* We don't want to descend into the function parameters and
+ * consider them as shader inputs or outputs.
+ */
+ visit_list_elements(this, &ir->body);
+ return visit_continue_with_parent;
+}
+
+ir_visitor_status
+ir_set_program_inouts_visitor::visit_enter(ir_discard *)
+{
+ /* discards are only allowed in fragment shaders. */
+ assert(this->shader_stage == MESA_SHADER_FRAGMENT);
+
+ prog->info.fs.uses_discard = true;
+
+ return visit_continue;
+}
+
+ir_visitor_status
+ir_set_program_inouts_visitor::visit_enter(ir_texture *ir)
+{
+ if (ir->op == ir_tg4)
+ prog->info.uses_texture_gather = true;
+ return visit_continue;
+}
+
+void
+do_set_program_inouts(exec_list *instructions, struct gl_program *prog,
+ gl_shader_stage shader_stage)
+{
+ ir_set_program_inouts_visitor v(prog, shader_stage);
+
+ prog->info.inputs_read = 0;
+ prog->info.outputs_written = 0;
+ prog->SecondaryOutputsWritten = 0;
+ prog->info.outputs_read = 0;
+ prog->info.patch_inputs_read = 0;
+ prog->info.patch_outputs_written = 0;
+ prog->info.system_values_read = 0;
+ if (shader_stage == MESA_SHADER_FRAGMENT) {
+ prog->info.fs.uses_sample_qualifier = false;
+ prog->info.fs.uses_discard = false;
+ }
+ visit_list_elements(&v, instructions);
+}
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/ir_uniform.h b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/ir_uniform.h
new file mode 100644
index 0000000000..9770790cb2
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/ir_uniform.h
@@ -0,0 +1,221 @@
+/*
+ * Copyright © 2011 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef IR_UNIFORM_H
+#define IR_UNIFORM_H
+
+
+/* stdbool.h is necessary because this file is included in both C and C++ code.
+ */
+#include <stdbool.h>
+#include "util/macros.h"
+#include "program/prog_parameter.h" /* For union gl_constant_value. */
+
+/**
+ * Used by GL_ARB_explicit_uniform_location extension code in the linker
+ * and glUniform* functions to identify inactive explicit uniform locations.
+ */
+#define INACTIVE_UNIFORM_EXPLICIT_LOCATION ((gl_uniform_storage *) -1)
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+enum PACKED gl_uniform_driver_format {
+ uniform_native = 0, /**< Store data in the native format. */
+ uniform_int_float, /**< Store integer data as floats. */
+};
+
+struct gl_uniform_driver_storage {
+ /**
+ * Number of bytes from one array element to the next.
+ */
+ uint8_t element_stride;
+
+ /**
+ * Number of bytes from one vector in a matrix to the next.
+ */
+ uint8_t vector_stride;
+
+ /**
+ * Base format of the stored data.
+ */
+ enum gl_uniform_driver_format format;
+
+ /**
+ * Pointer to the base of the data.
+ */
+ void *data;
+};
+
+struct gl_opaque_uniform_index {
+ /**
+ * Base opaque uniform index
+ *
+ * If \c gl_uniform_storage::base_type is an opaque type, this
+ * represents its uniform index. If \c
+ * gl_uniform_storage::array_elements is not zero, the array will
+ * use opaque uniform indices \c index through \c index + \c
+ * gl_uniform_storage::array_elements - 1, inclusive.
+ *
+ * Note that the index may be different in each shader stage.
+ */
+ uint8_t index;
+
+ /**
+ * Whether this opaque uniform is used in this shader stage.
+ */
+ bool active;
+};
+
+struct gl_uniform_storage {
+ char *name;
+ /** Type of this uniform data stored.
+ *
+ * In the case of an array, it's the type of a single array element.
+ */
+ const struct glsl_type *type;
+
+ /**
+ * The number of elements in this uniform.
+ *
+ * For non-arrays, this is always 0. For arrays, the value is the size of
+ * the array.
+ */
+ unsigned array_elements;
+
+ struct gl_opaque_uniform_index opaque[MESA_SHADER_STAGES];
+
+ /**
+ * Mask of shader stages (1 << MESA_SHADER_xxx) where this uniform is used.
+ */
+ unsigned active_shader_mask;
+
+ /**
+ * Storage used by the driver for the uniform
+ */
+ unsigned num_driver_storage;
+ struct gl_uniform_driver_storage *driver_storage;
+
+ /**
+ * Storage used by Mesa for the uniform
+ *
+ * This form of the uniform is used by Mesa's implementation of \c
+ * glGetUniform. It can also be used by drivers to obtain the value of the
+ * uniform if the \c ::driver_storage interface is not used.
+ */
+ union gl_constant_value *storage;
+
+ /** Fields for GL_ARB_uniform_buffer_object
+ * @{
+ */
+
+ /**
+ * GL_UNIFORM_BLOCK_INDEX: index of the uniform block containing
+ * the uniform, or -1 for the default uniform block. Note that the
+ * index is into the linked program's UniformBlocks[] array, not
+ * the linked shader's.
+ */
+ int block_index;
+
+ /** GL_UNIFORM_OFFSET: byte offset within the uniform block, or -1. */
+ int offset;
+
+ /**
+ * GL_UNIFORM_MATRIX_STRIDE: byte stride between columns or rows of
+ * a matrix. Set to 0 for non-matrices in UBOs, or -1 for uniforms
+ * in the default uniform block.
+ */
+ int matrix_stride;
+
+ /**
+ * GL_UNIFORM_ARRAY_STRIDE: byte stride between elements of the
+ * array. Set to zero for non-arrays in UBOs, or -1 for uniforms
+ * in the default uniform block.
+ */
+ int array_stride;
+
+ /** GL_UNIFORM_ROW_MAJOR: true iff it's a row-major matrix in a UBO */
+ bool row_major;
+
+ /** @} */
+
+ /**
+ * This is a compiler-generated uniform that should not be advertised
+ * via the API.
+ */
+ bool hidden;
+
+ /**
+ * This is a built-in uniform that should not be modified through any gl API.
+ */
+ bool builtin;
+
+ /**
+ * This is a shader storage buffer variable, not an uniform.
+ */
+ bool is_shader_storage;
+
+ /**
+ * Index within gl_shader_program::AtomicBuffers[] of the atomic
+ * counter buffer this uniform is stored in, or -1 if this is not
+ * an atomic counter.
+ */
+ int atomic_buffer_index;
+
+ /**
+ * The 'base location' for this uniform in the uniform remap table. For
+ * arrays this is the first element in the array.
+ * for subroutines this is in shader subroutine uniform remap table.
+ */
+ unsigned remap_location;
+
+ /**
+ * The number of compatible subroutines with this subroutine uniform.
+ */
+ unsigned num_compatible_subroutines;
+
+ /**
+ * A single integer identifying the number of active array elements of
+ * the top-level shader storage block member (GL_TOP_LEVEL_ARRAY_SIZE).
+ */
+ unsigned top_level_array_size;
+
+ /**
+ * A single integer identifying the stride between array elements of the
+ * top-level shader storage block member. (GL_TOP_LEVEL_ARRAY_STRIDE).
+ */
+ unsigned top_level_array_stride;
+
+ /**
+ * Whether this uniform variable has the bindless_sampler or bindless_image
+ * layout qualifier as specified by ARB_bindless_texture.
+ */
+ bool is_bindless;
+};
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* IR_UNIFORM_H */
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/ir_unused_structs.cpp b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/ir_unused_structs.cpp
new file mode 100644
index 0000000000..a04d77280e
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/ir_unused_structs.cpp
@@ -0,0 +1,139 @@
+/*
+ * Copyright © 2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include "ir.h"
+#include "ir_visitor.h"
+#include "ir_unused_structs.h"
+#include "glsl_types.h"
+
+
+class ir_struct_usage_visitor : public ir_hierarchical_visitor {
+public:
+ ir_struct_usage_visitor();
+ ~ir_struct_usage_visitor(void);
+
+ virtual ir_visitor_status visit(ir_dereference_variable *);
+
+ bool has_struct_entry(const glsl_type *t) const;
+
+ exec_list struct_list;
+ void *mem_ctx;
+};
+
+class ir_decl_removal_visitor : public ir_hierarchical_visitor {
+public:
+ ir_decl_removal_visitor(ir_struct_usage_visitor* used_structs)
+ : used_structs(used_structs)
+ {
+ }
+
+ virtual ir_visitor_status visit(ir_typedecl_statement* ir)
+ {
+ if (ir->type_decl->is_struct() && !used_structs->has_struct_entry(ir->type_decl))
+ {
+ ir->remove();
+ }
+ return visit_continue;
+ }
+
+ ir_struct_usage_visitor* used_structs;
+};
+
+
+struct struct_entry : public exec_node
+{
+ struct_entry(const glsl_type *type_) : type(type_) { }
+ const glsl_type *type;
+};
+
+
+bool
+ir_struct_usage_visitor::has_struct_entry(const glsl_type *t) const
+{
+ assert(t);
+ foreach_in_list(struct_entry, entry, &this->struct_list) {
+ if (entry->type == t)
+ return true;
+ }
+ return false;
+}
+
+
+ir_visitor_status
+ir_struct_usage_visitor::visit(ir_dereference_variable *ir)
+{
+ const glsl_type* t = ir->type;
+ if (t->base_type == GLSL_TYPE_STRUCT)
+ {
+ if (!has_struct_entry (t))
+ {
+ struct_entry *entry = new(mem_ctx) struct_entry(t);
+ this->struct_list.push_tail (entry);
+ }
+ }
+ return visit_continue;
+}
+
+static void visit_variable (ir_instruction* ir, void* data)
+{
+ ir_variable* var = ir->as_variable();
+ if (!var)
+ return;
+ ir_struct_usage_visitor* self = reinterpret_cast<ir_struct_usage_visitor*>(data);
+ const glsl_type* t = var->type;
+ if (t->base_type == GLSL_TYPE_ARRAY)
+ t = t->fields.array; // handle array of structs case
+ if (t->base_type == GLSL_TYPE_STRUCT)
+ {
+ if (!self->has_struct_entry (t))
+ {
+ struct_entry *entry = new(self->mem_ctx) struct_entry(t);
+ self->struct_list.push_tail (entry);
+ }
+ }
+
+}
+
+ir_struct_usage_visitor::ir_struct_usage_visitor()
+{
+ this->mem_ctx = ralloc_context(NULL);
+ this->struct_list.make_empty();
+ this->callback_enter = visit_variable;
+ this->data_enter = this;
+}
+
+ir_struct_usage_visitor::~ir_struct_usage_visitor(void)
+{
+ ralloc_free(mem_ctx);
+}
+
+
+
+void do_remove_unused_typedecls(exec_list* instructions)
+{
+ ir_struct_usage_visitor v;
+ v.run (instructions);
+
+ ir_decl_removal_visitor v2(&v);
+ v2.run (instructions);
+}
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/ir_unused_structs.h b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/ir_unused_structs.h
new file mode 100644
index 0000000000..3c1bb5ec6b
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/ir_unused_structs.h
@@ -0,0 +1,26 @@
+/*
+ * Copyright © 2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include "ir.h"
+
+void do_remove_unused_typedecls(exec_list* instructions);
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/ir_validate.cpp b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/ir_validate.cpp
new file mode 100644
index 0000000000..e370bc21b2
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/ir_validate.cpp
@@ -0,0 +1,1129 @@
+/*
+ * Copyright © 2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+/**
+ * \file ir_validate.cpp
+ *
+ * Attempts to verify that various invariants of the IR tree are true.
+ *
+ * In particular, at the moment it makes sure that no single
+ * ir_instruction node except for ir_variable appears multiple times
+ * in the ir tree. ir_variable does appear multiple times: Once as a
+ * declaration in an exec_list, and multiple times as the endpoint of
+ * a dereference chain.
+ */
+
+#include "ir.h"
+#include "ir_hierarchical_visitor.h"
+#include "util/hash_table.h"
+#include "util/macros.h"
+#include "util/set.h"
+#include "compiler/glsl_types.h"
+
+namespace {
+
+class ir_validate : public ir_hierarchical_visitor {
+public:
+ ir_validate()
+ {
+ this->ir_set = _mesa_pointer_set_create(NULL);
+
+ this->current_function = NULL;
+
+ this->callback_enter = ir_validate::validate_ir;
+ this->data_enter = ir_set;
+ }
+
+ ~ir_validate()
+ {
+ _mesa_set_destroy(this->ir_set, NULL);
+ }
+
+ virtual ir_visitor_status visit(ir_variable *v);
+ virtual ir_visitor_status visit(ir_dereference_variable *ir);
+
+ virtual ir_visitor_status visit_enter(ir_discard *ir);
+ virtual ir_visitor_status visit_enter(ir_if *ir);
+
+ virtual ir_visitor_status visit_enter(ir_function *ir);
+ virtual ir_visitor_status visit_leave(ir_function *ir);
+ virtual ir_visitor_status visit_enter(ir_function_signature *ir);
+
+ virtual ir_visitor_status visit_leave(ir_expression *ir);
+ virtual ir_visitor_status visit_leave(ir_swizzle *ir);
+
+ virtual ir_visitor_status visit_enter(class ir_dereference_array *);
+
+ virtual ir_visitor_status visit_enter(ir_assignment *ir);
+ virtual ir_visitor_status visit_enter(ir_call *ir);
+
+ static void validate_ir(ir_instruction *ir, void *data);
+
+ ir_function *current_function;
+
+ struct set *ir_set;
+};
+
+} /* anonymous namespace */
+
+ir_visitor_status
+ir_validate::visit(ir_dereference_variable *ir)
+{
+ if ((ir->var == NULL) || (ir->var->as_variable() == NULL)) {
+ printf("ir_dereference_variable @ %p does not specify a variable %p\n",
+ (void *) ir, (void *) ir->var);
+ abort();
+ }
+
+ if (_mesa_set_search(ir_set, ir->var) == NULL) {
+ printf("ir_dereference_variable @ %p specifies undeclared variable "
+ "`%s' @ %p\n",
+ (void *) ir, ir->var->name, (void *) ir->var);
+ abort();
+ }
+
+ this->validate_ir(ir, this->data_enter);
+
+ return visit_continue;
+}
+
+ir_visitor_status
+ir_validate::visit_enter(class ir_dereference_array *ir)
+{
+ if (!ir->array->type->is_array() && !ir->array->type->is_matrix() &&
+ !ir->array->type->is_vector()) {
+ printf("ir_dereference_array @ %p does not specify an array, a vector "
+ "or a matrix\n",
+ (void *) ir);
+ ir->print();
+ printf("\n");
+ abort();
+ }
+
+ if (!ir->array_index->type->is_scalar()) {
+ printf("ir_dereference_array @ %p does not have scalar index: %s\n",
+ (void *) ir, ir->array_index->type->name);
+ abort();
+ }
+
+ if (!ir->array_index->type->is_integer_32()) {
+ printf("ir_dereference_array @ %p does not have integer index: %s\n",
+ (void *) ir, ir->array_index->type->name);
+ abort();
+ }
+
+ return visit_continue;
+}
+
+ir_visitor_status
+ir_validate::visit_enter(ir_discard *ir)
+{
+ if (ir->condition && ir->condition->type != glsl_type::bool_type) {
+ printf("ir_discard condition %s type instead of bool.\n",
+ ir->condition->type->name);
+ ir->print();
+ printf("\n");
+ abort();
+ }
+
+ return visit_continue;
+}
+
+ir_visitor_status
+ir_validate::visit_enter(ir_if *ir)
+{
+ if (ir->condition->type != glsl_type::bool_type) {
+ printf("ir_if condition %s type instead of bool.\n",
+ ir->condition->type->name);
+ ir->print();
+ printf("\n");
+ abort();
+ }
+
+ return visit_continue;
+}
+
+
+ir_visitor_status
+ir_validate::visit_enter(ir_function *ir)
+{
+ /* Function definitions cannot be nested.
+ */
+ if (this->current_function != NULL) {
+ printf("Function definition nested inside another function "
+ "definition:\n");
+ printf("%s %p inside %s %p\n",
+ ir->name, (void *) ir,
+ this->current_function->name, (void *) this->current_function);
+ abort();
+ }
+
+ /* Store the current function hierarchy being traversed. This is used
+ * by the function signature visitor to ensure that the signatures are
+ * linked with the correct functions.
+ */
+ this->current_function = ir;
+
+ this->validate_ir(ir, this->data_enter);
+
+ /* Verify that all of the things stored in the list of signatures are,
+ * in fact, function signatures.
+ */
+ foreach_in_list(ir_instruction, sig, &ir->signatures) {
+ if (sig->ir_type != ir_type_function_signature) {
+ printf("Non-signature in signature list of function `%s'\n",
+ ir->name);
+ abort();
+ }
+ }
+
+ return visit_continue;
+}
+
+ir_visitor_status
+ir_validate::visit_leave(ir_function *ir)
+{
+ assert(ralloc_parent(ir->name) == ir);
+
+ this->current_function = NULL;
+ return visit_continue;
+}
+
+ir_visitor_status
+ir_validate::visit_enter(ir_function_signature *ir)
+{
+ if (this->current_function != ir->function()) {
+ printf("Function signature nested inside wrong function "
+ "definition:\n");
+ printf("%p inside %s %p instead of %s %p\n",
+ (void *) ir,
+ this->current_function->name, (void *) this->current_function,
+ ir->function_name(), (void *) ir->function());
+ abort();
+ }
+
+ if (ir->return_type == NULL) {
+ printf("Function signature %p for function %s has NULL return type.\n",
+ (void *) ir, ir->function_name());
+ abort();
+ }
+
+ this->validate_ir(ir, this->data_enter);
+
+ return visit_continue;
+}
+
+ir_visitor_status
+ir_validate::visit_leave(ir_expression *ir)
+{
+ for (unsigned i = ir->num_operands; i < 4; i++) {
+ assert(ir->operands[i] == NULL);
+ }
+
+ for (unsigned i = 0; i < ir->num_operands; i++) {
+ assert(ir->operands[i] != NULL);
+ }
+
+ switch (ir->operation) {
+ case ir_unop_bit_not:
+ assert(ir->operands[0]->type == ir->type);
+ break;
+ case ir_unop_logic_not:
+ assert(ir->type->is_boolean());
+ assert(ir->operands[0]->type->is_boolean());
+ break;
+
+ case ir_unop_neg:
+ assert(ir->type == ir->operands[0]->type);
+ break;
+
+ case ir_unop_abs:
+ case ir_unop_sign:
+ assert(ir->operands[0]->type->base_type == GLSL_TYPE_INT ||
+ ir->operands[0]->type->is_float_16_32_64() ||
+ ir->operands[0]->type->base_type == GLSL_TYPE_INT64);
+ assert(ir->type == ir->operands[0]->type);
+ break;
+
+ case ir_unop_rcp:
+ case ir_unop_rsq:
+ case ir_unop_sqrt:
+ assert(ir->type->is_float_16_32_64());
+ assert(ir->type == ir->operands[0]->type);
+ break;
+
+ case ir_unop_exp:
+ case ir_unop_log:
+ case ir_unop_exp2:
+ case ir_unop_log2:
+ case ir_unop_saturate:
+ assert(ir->operands[0]->type->is_float_16_32());
+ assert(ir->type == ir->operands[0]->type);
+ break;
+
+ case ir_unop_f2i:
+ assert(ir->operands[0]->type->is_float());
+ assert(ir->type->base_type == GLSL_TYPE_INT);
+ break;
+ case ir_unop_f2u:
+ assert(ir->operands[0]->type->is_float());
+ assert(ir->type->base_type == GLSL_TYPE_UINT);
+ break;
+ case ir_unop_i2f:
+ assert(ir->operands[0]->type->base_type == GLSL_TYPE_INT);
+ assert(ir->type->is_float());
+ break;
+ case ir_unop_f2b:
+ assert(ir->operands[0]->type->is_float());
+ assert(ir->type->is_boolean());
+ break;
+ case ir_unop_f162b:
+ assert(ir->operands[0]->type->base_type ==
+ GLSL_TYPE_FLOAT16);
+ assert(ir->type->is_boolean());
+ break;
+ case ir_unop_b2f:
+ assert(ir->operands[0]->type->is_boolean());
+ assert(ir->type->is_float());
+ break;
+ case ir_unop_b2f16:
+ assert(ir->operands[0]->type->is_boolean());
+ assert(ir->type->base_type == GLSL_TYPE_FLOAT16);
+ break;
+ case ir_unop_i2b:
+ assert(ir->operands[0]->type->base_type == GLSL_TYPE_INT);
+ assert(ir->type->is_boolean());
+ break;
+ case ir_unop_b2i:
+ assert(ir->operands[0]->type->is_boolean());
+ assert(ir->type->base_type == GLSL_TYPE_INT);
+ break;
+ case ir_unop_u2f:
+ assert(ir->operands[0]->type->base_type == GLSL_TYPE_UINT);
+ assert(ir->type->is_float());
+ break;
+ case ir_unop_i2u:
+ assert(ir->operands[0]->type->base_type == GLSL_TYPE_INT);
+ assert(ir->type->base_type == GLSL_TYPE_UINT);
+ break;
+ case ir_unop_u2i:
+ assert(ir->operands[0]->type->base_type == GLSL_TYPE_UINT);
+ assert(ir->type->base_type == GLSL_TYPE_INT);
+ break;
+ case ir_unop_bitcast_i2f:
+ assert(ir->operands[0]->type->base_type == GLSL_TYPE_INT);
+ assert(ir->type->is_float());
+ break;
+ case ir_unop_bitcast_f2i:
+ assert(ir->operands[0]->type->is_float());
+ assert(ir->type->base_type == GLSL_TYPE_INT);
+ break;
+ case ir_unop_bitcast_u2f:
+ assert(ir->operands[0]->type->base_type == GLSL_TYPE_UINT);
+ assert(ir->type->is_float());
+ break;
+ case ir_unop_bitcast_f2u:
+ assert(ir->operands[0]->type->is_float());
+ assert(ir->type->base_type == GLSL_TYPE_UINT);
+ break;
+
+ case ir_unop_bitcast_u642d:
+ assert(ir->operands[0]->type->base_type == GLSL_TYPE_UINT64);
+ assert(ir->type->is_double());
+ break;
+ case ir_unop_bitcast_i642d:
+ assert(ir->operands[0]->type->base_type == GLSL_TYPE_INT64);
+ assert(ir->type->is_double());
+ break;
+ case ir_unop_bitcast_d2u64:
+ assert(ir->operands[0]->type->is_double());
+ assert(ir->type->base_type == GLSL_TYPE_UINT64);
+ break;
+ case ir_unop_bitcast_d2i64:
+ assert(ir->operands[0]->type->is_double());
+ assert(ir->type->base_type == GLSL_TYPE_INT64);
+ break;
+ case ir_unop_i642i:
+ assert(ir->operands[0]->type->base_type == GLSL_TYPE_INT64);
+ assert(ir->type->base_type == GLSL_TYPE_INT);
+ break;
+ case ir_unop_u642i:
+ assert(ir->operands[0]->type->base_type == GLSL_TYPE_UINT64);
+ assert(ir->type->base_type == GLSL_TYPE_INT);
+ break;
+ case ir_unop_i642u:
+ assert(ir->operands[0]->type->base_type == GLSL_TYPE_INT64);
+ assert(ir->type->base_type == GLSL_TYPE_UINT);
+ break;
+ case ir_unop_u642u:
+ assert(ir->operands[0]->type->base_type == GLSL_TYPE_UINT64);
+ assert(ir->type->base_type == GLSL_TYPE_UINT);
+ break;
+ case ir_unop_i642b:
+ assert(ir->operands[0]->type->base_type == GLSL_TYPE_INT64);
+ assert(ir->type->is_boolean());
+ break;
+ case ir_unop_i642f:
+ assert(ir->operands[0]->type->base_type == GLSL_TYPE_INT64);
+ assert(ir->type->is_float());
+ break;
+ case ir_unop_u642f:
+ assert(ir->operands[0]->type->base_type == GLSL_TYPE_UINT64);
+ assert(ir->type->is_float());
+ break;
+ case ir_unop_i642d:
+ assert(ir->operands[0]->type->base_type == GLSL_TYPE_INT64);
+ assert(ir->type->is_double());
+ break;
+ case ir_unop_u642d:
+ assert(ir->operands[0]->type->base_type == GLSL_TYPE_UINT64);
+ assert(ir->type->is_double());
+ break;
+ case ir_unop_i2i64:
+ assert(ir->operands[0]->type->base_type == GLSL_TYPE_INT);
+ assert(ir->type->base_type == GLSL_TYPE_INT64);
+ break;
+ case ir_unop_u2i64:
+ assert(ir->operands[0]->type->base_type == GLSL_TYPE_UINT);
+ assert(ir->type->base_type == GLSL_TYPE_INT64);
+ break;
+ case ir_unop_b2i64:
+ assert(ir->operands[0]->type->is_boolean());
+ assert(ir->type->base_type == GLSL_TYPE_INT64);
+ break;
+ case ir_unop_f2i64:
+ assert(ir->operands[0]->type->is_float());
+ assert(ir->type->base_type == GLSL_TYPE_INT64);
+ break;
+ case ir_unop_d2i64:
+ assert(ir->operands[0]->type->is_double());
+ assert(ir->type->base_type == GLSL_TYPE_INT64);
+ break;
+ case ir_unop_i2u64:
+ assert(ir->operands[0]->type->base_type == GLSL_TYPE_INT);
+ assert(ir->type->base_type == GLSL_TYPE_UINT64);
+ break;
+ case ir_unop_u2u64:
+ assert(ir->operands[0]->type->base_type == GLSL_TYPE_UINT);
+ assert(ir->type->base_type == GLSL_TYPE_UINT64);
+ break;
+ case ir_unop_f2u64:
+ assert(ir->operands[0]->type->is_float());
+ assert(ir->type->base_type == GLSL_TYPE_UINT64);
+ break;
+ case ir_unop_d2u64:
+ assert(ir->operands[0]->type->is_double());
+ assert(ir->type->base_type == GLSL_TYPE_UINT64);
+ break;
+ case ir_unop_u642i64:
+ assert(ir->operands[0]->type->base_type == GLSL_TYPE_UINT64);
+ assert(ir->type->base_type == GLSL_TYPE_INT64);
+ break;
+ case ir_unop_i642u64:
+ assert(ir->operands[0]->type->base_type == GLSL_TYPE_INT64);
+ assert(ir->type->base_type == GLSL_TYPE_UINT64);
+ break;
+ case ir_unop_trunc:
+ case ir_unop_round_even:
+ case ir_unop_ceil:
+ case ir_unop_floor:
+ case ir_unop_fract:
+ assert(ir->operands[0]->type->is_float_16_32_64());
+ assert(ir->operands[0]->type == ir->type);
+ break;
+ case ir_unop_sin:
+ case ir_unop_cos:
+ case ir_unop_dFdx:
+ case ir_unop_dFdx_coarse:
+ case ir_unop_dFdx_fine:
+ case ir_unop_dFdy:
+ case ir_unop_dFdy_coarse:
+ case ir_unop_dFdy_fine:
+ assert(ir->operands[0]->type->is_float_16_32());
+ assert(ir->operands[0]->type == ir->type);
+ break;
+
+ case ir_unop_pack_snorm_2x16:
+ case ir_unop_pack_unorm_2x16:
+ case ir_unop_pack_half_2x16:
+ assert(ir->type == glsl_type::uint_type);
+ assert(ir->operands[0]->type == glsl_type::vec2_type);
+ break;
+
+ case ir_unop_pack_snorm_4x8:
+ case ir_unop_pack_unorm_4x8:
+ assert(ir->type == glsl_type::uint_type);
+ assert(ir->operands[0]->type == glsl_type::vec4_type);
+ break;
+
+ case ir_unop_pack_double_2x32:
+ assert(ir->type == glsl_type::double_type);
+ assert(ir->operands[0]->type == glsl_type::uvec2_type);
+ break;
+
+ case ir_unop_pack_int_2x32:
+ assert(ir->type == glsl_type::int64_t_type);
+ assert(ir->operands[0]->type == glsl_type::ivec2_type);
+ break;
+
+ case ir_unop_pack_uint_2x32:
+ assert(ir->type == glsl_type::uint64_t_type);
+ assert(ir->operands[0]->type == glsl_type::uvec2_type);
+ break;
+
+ case ir_unop_pack_sampler_2x32:
+ assert(ir->type->is_sampler());
+ assert(ir->operands[0]->type == glsl_type::uvec2_type);
+ break;
+
+ case ir_unop_pack_image_2x32:
+ assert(ir->type->is_image());
+ assert(ir->operands[0]->type == glsl_type::uvec2_type);
+ break;
+
+ case ir_unop_unpack_snorm_2x16:
+ case ir_unop_unpack_unorm_2x16:
+ case ir_unop_unpack_half_2x16:
+ assert(ir->type == glsl_type::vec2_type);
+ assert(ir->operands[0]->type == glsl_type::uint_type);
+ break;
+
+ case ir_unop_unpack_snorm_4x8:
+ case ir_unop_unpack_unorm_4x8:
+ assert(ir->type == glsl_type::vec4_type);
+ assert(ir->operands[0]->type == glsl_type::uint_type);
+ break;
+
+ case ir_unop_unpack_double_2x32:
+ assert(ir->type == glsl_type::uvec2_type);
+ assert(ir->operands[0]->type == glsl_type::double_type);
+ break;
+
+ case ir_unop_unpack_int_2x32:
+ assert(ir->type == glsl_type::ivec2_type);
+ assert(ir->operands[0]->type == glsl_type::int64_t_type);
+ break;
+
+ case ir_unop_unpack_uint_2x32:
+ assert(ir->type == glsl_type::uvec2_type);
+ assert(ir->operands[0]->type == glsl_type::uint64_t_type);
+ break;
+
+ case ir_unop_unpack_sampler_2x32:
+ assert(ir->type == glsl_type::uvec2_type);
+ assert(ir->operands[0]->type->is_sampler());
+ break;
+
+ case ir_unop_unpack_image_2x32:
+ assert(ir->type == glsl_type::uvec2_type);
+ assert(ir->operands[0]->type->is_image());
+ break;
+
+ case ir_unop_bitfield_reverse:
+ assert(ir->operands[0]->type == ir->type);
+ assert(ir->type->is_integer_32());
+ break;
+
+ case ir_unop_bit_count:
+ case ir_unop_find_msb:
+ case ir_unop_find_lsb:
+ assert(ir->operands[0]->type->vector_elements == ir->type->vector_elements);
+ assert(ir->operands[0]->type->is_integer_32());
+ assert(ir->type->base_type == GLSL_TYPE_INT);
+ break;
+
+ case ir_unop_clz:
+ assert(ir->operands[0]->type == ir->type);
+ assert(ir->type->base_type == GLSL_TYPE_UINT);
+ break;
+
+ case ir_unop_interpolate_at_centroid:
+ assert(ir->operands[0]->type == ir->type);
+ assert(ir->operands[0]->type->is_float_16_32());
+ break;
+
+ case ir_unop_get_buffer_size:
+ assert(ir->type == glsl_type::int_type);
+ assert(ir->operands[0]->type == glsl_type::uint_type);
+ break;
+
+ case ir_unop_ssbo_unsized_array_length:
+ assert(ir->type == glsl_type::int_type);
+ assert(ir->operands[0]->type->is_array());
+ assert(ir->operands[0]->type->is_unsized_array());
+ break;
+
+ case ir_unop_d2f:
+ assert(ir->operands[0]->type->is_double());
+ assert(ir->type->is_float());
+ break;
+ case ir_unop_f2d:
+ assert(ir->operands[0]->type->is_float());
+ assert(ir->type->is_double());
+ break;
+ case ir_unop_f162f:
+ assert(ir->operands[0]->type->base_type == GLSL_TYPE_FLOAT16);
+ assert(ir->type->is_float());
+ break;
+ case ir_unop_f2f16:
+ case ir_unop_f2fmp:
+ assert(ir->operands[0]->type->is_float());
+ assert(ir->type->base_type == GLSL_TYPE_FLOAT16);
+ break;
+ case ir_unop_d2i:
+ assert(ir->operands[0]->type->is_double());
+ assert(ir->type->base_type == GLSL_TYPE_INT);
+ break;
+ case ir_unop_i2d:
+ assert(ir->operands[0]->type->base_type == GLSL_TYPE_INT);
+ assert(ir->type->is_double());
+ break;
+ case ir_unop_d2u:
+ assert(ir->operands[0]->type->is_double());
+ assert(ir->type->base_type == GLSL_TYPE_UINT);
+ break;
+ case ir_unop_u2d:
+ assert(ir->operands[0]->type->base_type == GLSL_TYPE_UINT);
+ assert(ir->type->is_double());
+ break;
+ case ir_unop_d2b:
+ assert(ir->operands[0]->type->is_double());
+ assert(ir->type->is_boolean());
+ break;
+
+ case ir_unop_frexp_sig:
+ assert(ir->operands[0]->type->is_float_16_32_64());
+ assert(ir->type->is_double());
+ break;
+ case ir_unop_frexp_exp:
+ assert(ir->operands[0]->type->is_float_16_32_64());
+ assert(ir->type->base_type == GLSL_TYPE_INT);
+ break;
+ case ir_unop_subroutine_to_int:
+ assert(ir->operands[0]->type->base_type == GLSL_TYPE_SUBROUTINE);
+ assert(ir->type->base_type == GLSL_TYPE_INT);
+ break;
+
+ case ir_unop_atan:
+ assert(ir->operands[0]->type->is_float_16_32_64());
+ assert(ir->type == ir->operands[0]->type);
+ break;
+
+ case ir_binop_add:
+ case ir_binop_sub:
+ case ir_binop_mul:
+ case ir_binop_div:
+ case ir_binop_mod:
+ case ir_binop_min:
+ case ir_binop_max:
+ case ir_binop_pow:
+ assert(ir->operands[0]->type->base_type ==
+ ir->operands[1]->type->base_type);
+
+ if (ir->operation == ir_binop_mul &&
+ (ir->type->base_type == GLSL_TYPE_UINT64 ||
+ ir->type->base_type == GLSL_TYPE_INT64) &&
+ (ir->operands[0]->type->base_type == GLSL_TYPE_INT ||
+ ir->operands[1]->type->base_type == GLSL_TYPE_INT ||
+ ir->operands[0]->type->base_type == GLSL_TYPE_UINT ||
+ ir->operands[1]->type->base_type == GLSL_TYPE_UINT)) {
+ assert(ir->operands[0]->type == ir->operands[1]->type);
+ break;
+ }
+
+ if (ir->operands[0]->type->is_scalar())
+ assert(ir->operands[1]->type == ir->type);
+ else if (ir->operands[1]->type->is_scalar())
+ assert(ir->operands[0]->type == ir->type);
+ else if (ir->operands[0]->type->is_vector() &&
+ ir->operands[1]->type->is_vector()) {
+ assert(ir->operands[0]->type == ir->operands[1]->type);
+ assert(ir->operands[0]->type == ir->type);
+ }
+ break;
+
+ case ir_binop_abs_sub:
+ assert(ir->operands[0]->type == ir->operands[1]->type);
+ assert(ir->operands[0]->type->is_integer_32_64());
+ assert(ir->operands[0]->type->vector_elements ==
+ ir->type->vector_elements);
+ assert(ir->type->base_type == GLSL_TYPE_UINT ||
+ ir->type->base_type == GLSL_TYPE_UINT64);
+ break;
+
+ case ir_binop_add_sat:
+ case ir_binop_sub_sat:
+ case ir_binop_avg:
+ case ir_binop_avg_round:
+ assert(ir->type == ir->operands[0]->type);
+ assert(ir->type == ir->operands[1]->type);
+ assert(ir->type->is_integer_32_64());
+ break;
+
+ case ir_binop_mul_32x16:
+ case ir_binop_imul_high:
+ assert(ir->type == ir->operands[0]->type);
+ assert(ir->type == ir->operands[1]->type);
+ assert(ir->type->is_integer_32());
+ break;
+
+ case ir_binop_carry:
+ case ir_binop_borrow:
+ assert(ir->type == ir->operands[0]->type);
+ assert(ir->type == ir->operands[1]->type);
+ assert(ir->type->base_type == GLSL_TYPE_UINT);
+ break;
+
+ case ir_binop_less:
+ case ir_binop_gequal:
+ case ir_binop_equal:
+ case ir_binop_nequal:
+ /* The semantics of the IR operators differ from the GLSL <, >, <=, >=,
+ * ==, and != operators. The IR operators perform a component-wise
+ * comparison on scalar or vector types and return a boolean scalar or
+ * vector type of the same size.
+ */
+ assert(ir->type->is_boolean());
+ assert(ir->operands[0]->type == ir->operands[1]->type);
+ assert(ir->operands[0]->type->is_vector()
+ || ir->operands[0]->type->is_scalar());
+ assert(ir->operands[0]->type->vector_elements
+ == ir->type->vector_elements);
+ break;
+
+ case ir_binop_all_equal:
+ case ir_binop_any_nequal:
+ /* GLSL == and != operate on scalars, vectors, matrices and arrays, and
+ * return a scalar boolean. The IR matches that.
+ */
+ assert(ir->type == glsl_type::bool_type);
+ assert(ir->operands[0]->type == ir->operands[1]->type);
+ break;
+
+ case ir_binop_lshift:
+ case ir_binop_rshift:
+ assert(ir->operands[0]->type->is_integer_32_64() &&
+ ir->operands[1]->type->is_integer_32());
+ if (ir->operands[0]->type->is_scalar()) {
+ assert(ir->operands[1]->type->is_scalar());
+ }
+ if (ir->operands[0]->type->is_vector() &&
+ ir->operands[1]->type->is_vector()) {
+ assert(ir->operands[0]->type->components() ==
+ ir->operands[1]->type->components());
+ }
+ assert(ir->type == ir->operands[0]->type);
+ break;
+
+ case ir_binop_bit_and:
+ case ir_binop_bit_xor:
+ case ir_binop_bit_or:
+ assert(ir->operands[0]->type->base_type ==
+ ir->operands[1]->type->base_type);
+ assert(ir->type->is_integer_32_64());
+ if (ir->operands[0]->type->is_vector() &&
+ ir->operands[1]->type->is_vector()) {
+ assert(ir->operands[0]->type->vector_elements ==
+ ir->operands[1]->type->vector_elements);
+ }
+ break;
+
+ case ir_binop_logic_and:
+ case ir_binop_logic_xor:
+ case ir_binop_logic_or:
+ assert(ir->type->is_boolean());
+ assert(ir->operands[0]->type->is_boolean());
+ assert(ir->operands[1]->type->is_boolean());
+ break;
+
+ case ir_binop_dot:
+ assert(ir->type == glsl_type::float_type ||
+ ir->type == glsl_type::double_type ||
+ ir->type == glsl_type::float16_t_type);
+ assert(ir->operands[0]->type->is_float_16_32_64());
+ assert(ir->operands[0]->type->is_vector());
+ assert(ir->operands[0]->type == ir->operands[1]->type);
+ break;
+
+ case ir_binop_ubo_load:
+ assert(ir->operands[0]->type == glsl_type::uint_type);
+
+ assert(ir->operands[1]->type == glsl_type::uint_type);
+ break;
+
+ case ir_binop_ldexp:
+ assert(ir->operands[0]->type == ir->type);
+ assert(ir->operands[0]->type->is_float_16_32_64());
+ assert(ir->operands[1]->type->base_type == GLSL_TYPE_INT);
+ assert(ir->operands[0]->type->components() ==
+ ir->operands[1]->type->components());
+ break;
+
+ case ir_binop_vector_extract:
+ assert(ir->operands[0]->type->is_vector());
+ assert(ir->operands[1]->type->is_scalar()
+ && ir->operands[1]->type->is_integer_32());
+ break;
+
+ case ir_binop_interpolate_at_offset:
+ assert(ir->operands[0]->type == ir->type);
+ assert(ir->operands[0]->type->is_float());
+ assert(ir->operands[1]->type->components() == 2);
+ assert(ir->operands[1]->type->is_float());
+ break;
+
+ case ir_binop_interpolate_at_sample:
+ assert(ir->operands[0]->type == ir->type);
+ assert(ir->operands[0]->type->is_float());
+ assert(ir->operands[1]->type == glsl_type::int_type);
+ break;
+
+ case ir_binop_atan2:
+ assert(ir->operands[0]->type->is_float_16_32_64());
+ assert(ir->operands[1]->type == ir->operands[0]->type);
+ assert(ir->type == ir->operands[0]->type);
+ break;
+
+ case ir_triop_fma:
+ assert(ir->type->is_float_16_32_64());
+ assert(ir->type == ir->operands[0]->type);
+ assert(ir->type == ir->operands[1]->type);
+ assert(ir->type == ir->operands[2]->type);
+ break;
+
+ case ir_triop_lrp:
+ assert(ir->operands[0]->type->is_float_16_32_64());
+ assert(ir->operands[0]->type == ir->operands[1]->type);
+ assert(ir->operands[2]->type == ir->operands[0]->type ||
+ ir->operands[2]->type == glsl_type::float_type ||
+ ir->operands[2]->type == glsl_type::double_type ||
+ ir->operands[2]->type == glsl_type::float16_t_type);
+ break;
+
+ case ir_triop_csel:
+ assert(ir->operands[0]->type->is_boolean());
+ assert(ir->type->vector_elements == ir->operands[0]->type->vector_elements);
+ assert(ir->type == ir->operands[1]->type);
+ assert(ir->type == ir->operands[2]->type);
+ break;
+
+ case ir_triop_bitfield_extract:
+ assert(ir->type->is_integer_32());
+ assert(ir->operands[0]->type == ir->type);
+ assert(ir->operands[1]->type == ir->type);
+ assert(ir->operands[2]->type == ir->type);
+ break;
+
+ case ir_triop_vector_insert:
+ assert(ir->operands[0]->type->is_vector());
+ assert(ir->operands[1]->type->is_scalar());
+ assert(ir->operands[0]->type->base_type == ir->operands[1]->type->base_type);
+ assert(ir->operands[2]->type->is_scalar()
+ && ir->operands[2]->type->is_integer_32());
+ assert(ir->type == ir->operands[0]->type);
+ break;
+
+ case ir_quadop_bitfield_insert:
+ assert(ir->type->is_integer_32());
+ assert(ir->operands[0]->type == ir->type);
+ assert(ir->operands[1]->type == ir->type);
+ assert(ir->operands[2]->type == ir->type);
+ assert(ir->operands[3]->type == ir->type);
+ break;
+
+ case ir_quadop_vector:
+ /* The vector operator collects some number of scalars and generates a
+ * vector from them.
+ *
+ * - All of the operands must be scalar.
+ * - Number of operands must matche the size of the resulting vector.
+ * - Base type of the operands must match the base type of the result.
+ */
+ assert(ir->type->is_vector());
+ switch (ir->type->vector_elements) {
+ case 2:
+ assert(ir->operands[0]->type->is_scalar());
+ assert(ir->operands[0]->type->base_type == ir->type->base_type);
+ assert(ir->operands[1]->type->is_scalar());
+ assert(ir->operands[1]->type->base_type == ir->type->base_type);
+ assert(ir->operands[2] == NULL);
+ assert(ir->operands[3] == NULL);
+ break;
+ case 3:
+ assert(ir->operands[0]->type->is_scalar());
+ assert(ir->operands[0]->type->base_type == ir->type->base_type);
+ assert(ir->operands[1]->type->is_scalar());
+ assert(ir->operands[1]->type->base_type == ir->type->base_type);
+ assert(ir->operands[2]->type->is_scalar());
+ assert(ir->operands[2]->type->base_type == ir->type->base_type);
+ assert(ir->operands[3] == NULL);
+ break;
+ case 4:
+ assert(ir->operands[0]->type->is_scalar());
+ assert(ir->operands[0]->type->base_type == ir->type->base_type);
+ assert(ir->operands[1]->type->is_scalar());
+ assert(ir->operands[1]->type->base_type == ir->type->base_type);
+ assert(ir->operands[2]->type->is_scalar());
+ assert(ir->operands[2]->type->base_type == ir->type->base_type);
+ assert(ir->operands[3]->type->is_scalar());
+ assert(ir->operands[3]->type->base_type == ir->type->base_type);
+ break;
+ default:
+ /* The is_vector assertion above should prevent execution from ever
+ * getting here.
+ */
+ assert(!"Should not get here.");
+ break;
+ }
+ }
+
+ return visit_continue;
+}
+
+ir_visitor_status
+ir_validate::visit_leave(ir_swizzle *ir)
+{
+ unsigned int chans[4] = {ir->mask.x, ir->mask.y, ir->mask.z, ir->mask.w};
+
+ for (unsigned int i = 0; i < ir->type->vector_elements; i++) {
+ if (chans[i] >= ir->val->type->vector_elements) {
+ printf("ir_swizzle @ %p specifies a channel not present "
+ "in the value.\n", (void *) ir);
+ ir->print();
+ abort();
+ }
+ }
+
+ return visit_continue;
+}
+
+ir_visitor_status
+ir_validate::visit(ir_variable *ir)
+{
+ /* An ir_variable is the one thing that can (and will) appear multiple times
+ * in an IR tree. It is added to the hashtable so that it can be used
+ * in the ir_dereference_variable handler to ensure that a variable is
+ * declared before it is dereferenced.
+ */
+ if (ir->name && ir->is_name_ralloced())
+ assert(ralloc_parent(ir->name) == ir);
+
+ _mesa_set_add(ir_set, ir);
+
+ /* If a variable is an array, verify that the maximum array index is in
+ * bounds. There was once an error in AST-to-HIR conversion that set this
+ * to be out of bounds.
+ */
+ if (ir->type->array_size() > 0) {
+ if (ir->data.max_array_access >= (int)ir->type->length) {
+ printf("ir_variable has maximum access out of bounds (%d vs %d)\n",
+ ir->data.max_array_access, ir->type->length - 1);
+ ir->print();
+ abort();
+ }
+ }
+
+ /* If a variable is an interface block (or an array of interface blocks),
+ * verify that the maximum array index for each interface member is in
+ * bounds.
+ */
+ if (ir->is_interface_instance()) {
+ const glsl_struct_field *fields =
+ ir->get_interface_type()->fields.structure;
+ for (unsigned i = 0; i < ir->get_interface_type()->length; i++) {
+ if (fields[i].type->array_size() > 0 &&
+ !fields[i].implicit_sized_array) {
+ const int *const max_ifc_array_access =
+ ir->get_max_ifc_array_access();
+
+ assert(max_ifc_array_access != NULL);
+
+ if (max_ifc_array_access[i] >= (int)fields[i].type->length) {
+ printf("ir_variable has maximum access out of bounds for "
+ "field %s (%d vs %d)\n", fields[i].name,
+ max_ifc_array_access[i], fields[i].type->length);
+ ir->print();
+ abort();
+ }
+ }
+ }
+ }
+
+ if (ir->constant_initializer != NULL && !ir->data.has_initializer) {
+ printf("ir_variable didn't have an initializer, but has a constant "
+ "initializer value.\n");
+ ir->print();
+ abort();
+ }
+
+ if (ir->data.mode == ir_var_uniform
+ && is_gl_identifier(ir->name)
+ && ir->get_state_slots() == NULL) {
+ printf("built-in uniform has no state\n");
+ ir->print();
+ abort();
+ }
+
+ return visit_continue;
+}
+
+ir_visitor_status
+ir_validate::visit_enter(ir_assignment *ir)
+{
+ const ir_dereference *const lhs = ir->lhs;
+ if (lhs->type->is_scalar() || lhs->type->is_vector()) {
+ if (ir->write_mask == 0) {
+ printf("Assignment LHS is %s, but write mask is 0:\n",
+ lhs->type->is_scalar() ? "scalar" : "vector");
+ ir->print();
+ abort();
+ }
+
+ int lhs_components = 0;
+ for (int i = 0; i < 4; i++) {
+ if (ir->write_mask & (1 << i))
+ lhs_components++;
+ }
+
+ if (lhs_components != ir->rhs->type->vector_elements) {
+ printf("Assignment count of LHS write mask channels enabled not\n"
+ "matching RHS vector size (%d LHS, %d RHS).\n",
+ lhs_components, ir->rhs->type->vector_elements);
+ ir->print();
+ abort();
+ }
+ }
+
+ this->validate_ir(ir, this->data_enter);
+
+ return visit_continue;
+}
+
+ir_visitor_status
+ir_validate::visit_enter(ir_call *ir)
+{
+ ir_function_signature *const callee = ir->callee;
+
+ if (callee->ir_type != ir_type_function_signature) {
+ printf("IR called by ir_call is not ir_function_signature!\n");
+ abort();
+ }
+
+ if (ir->return_deref) {
+ if (ir->return_deref->type != callee->return_type) {
+ printf("callee type %s does not match return storage type %s\n",
+ callee->return_type->name, ir->return_deref->type->name);
+ abort();
+ }
+ } else if (callee->return_type != glsl_type::void_type) {
+ printf("ir_call has non-void callee but no return storage\n");
+ abort();
+ }
+
+ const exec_node *formal_param_node = callee->parameters.get_head_raw();
+ const exec_node *actual_param_node = ir->actual_parameters.get_head_raw();
+ while (true) {
+ if (formal_param_node->is_tail_sentinel()
+ != actual_param_node->is_tail_sentinel()) {
+ printf("ir_call has the wrong number of parameters:\n");
+ goto dump_ir;
+ }
+ if (formal_param_node->is_tail_sentinel()) {
+ break;
+ }
+ const ir_variable *formal_param
+ = (const ir_variable *) formal_param_node;
+ const ir_rvalue *actual_param
+ = (const ir_rvalue *) actual_param_node;
+ if (formal_param->type != actual_param->type) {
+ printf("ir_call parameter type mismatch:\n");
+ goto dump_ir;
+ }
+ if (formal_param->data.mode == ir_var_function_out
+ || formal_param->data.mode == ir_var_function_inout) {
+ if (!actual_param->is_lvalue()) {
+ printf("ir_call out/inout parameters must be lvalues:\n");
+ goto dump_ir;
+ }
+ }
+ formal_param_node = formal_param_node->next;
+ actual_param_node = actual_param_node->next;
+ }
+
+ return visit_continue;
+
+dump_ir:
+ ir->print();
+ printf("callee:\n");
+ callee->print();
+ abort();
+ return visit_stop;
+}
+
+void
+ir_validate::validate_ir(ir_instruction *ir, void *data)
+{
+ struct set *ir_set = (struct set *) data;
+
+ if (_mesa_set_search(ir_set, ir)) {
+ printf("Instruction node present twice in ir tree:\n");
+ ir->print();
+ printf("\n");
+ abort();
+ }
+ _mesa_set_add(ir_set, ir);
+}
+
+#ifdef DEBUG
+static void
+check_node_type(ir_instruction *ir, void *data)
+{
+ (void) data;
+
+ if (ir->ir_type >= ir_type_max) {
+ printf("Instruction node with unset type\n");
+ ir->print(); printf("\n");
+ }
+ ir_rvalue *value = ir->as_rvalue();
+ if (value != NULL)
+ assert(value->type != glsl_type::error_type);
+}
+#endif
+
+void
+validate_ir_tree(exec_list *instructions)
+{
+ /* We shouldn't have any reason to validate IR in a release build,
+ * and it's half composed of assert()s anyway which wouldn't do
+ * anything.
+ */
+#ifdef DEBUG
+ ir_validate v;
+
+ v.run(instructions);
+
+ foreach_in_list(ir_instruction, ir, instructions) {
+ visit_tree(ir, check_node_type, NULL);
+ }
+#endif
+}
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/ir_variable_refcount.cpp b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/ir_variable_refcount.cpp
new file mode 100644
index 0000000000..47e9d0c897
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/ir_variable_refcount.cpp
@@ -0,0 +1,152 @@
+/*
+ * Copyright © 2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+/**
+ * \file ir_variable_refcount.cpp
+ *
+ * Provides a visitor which produces a list of variables referenced,
+ * how many times they were referenced and assigned, and whether they
+ * were defined in the scope.
+ */
+
+#include "ir.h"
+#include "ir_visitor.h"
+#include "ir_variable_refcount.h"
+#include "compiler/glsl_types.h"
+#include "util/hash_table.h"
+
+ir_variable_refcount_visitor::ir_variable_refcount_visitor()
+{
+ this->mem_ctx = ralloc_context(NULL);
+ this->ht = _mesa_pointer_hash_table_create(NULL);
+}
+
+static void
+free_entry(struct hash_entry *entry)
+{
+ ir_variable_refcount_entry *ivre = (ir_variable_refcount_entry *) entry->data;
+
+ /* Free assignment list */
+ exec_node *n;
+ while ((n = ivre->assign_list.pop_head()) != NULL) {
+ struct assignment_entry *assignment_entry =
+ exec_node_data(struct assignment_entry, n, link);
+ free(assignment_entry);
+ }
+
+ delete ivre;
+}
+
+ir_variable_refcount_visitor::~ir_variable_refcount_visitor()
+{
+ ralloc_free(this->mem_ctx);
+ _mesa_hash_table_destroy(this->ht, free_entry);
+}
+
+// constructor
+ir_variable_refcount_entry::ir_variable_refcount_entry(ir_variable *var)
+{
+ this->var = var;
+ assigned_count = 0;
+ declaration = false;
+ referenced_count = 0;
+}
+
+
+ir_variable_refcount_entry *
+ir_variable_refcount_visitor::get_variable_entry(ir_variable *var)
+{
+ assert(var);
+
+ struct hash_entry *e = _mesa_hash_table_search(this->ht, var);
+ if (e)
+ return (ir_variable_refcount_entry *)e->data;
+
+ ir_variable_refcount_entry *entry = new ir_variable_refcount_entry(var);
+ assert(entry->referenced_count == 0);
+ _mesa_hash_table_insert(this->ht, var, entry);
+
+ return entry;
+}
+
+
+ir_visitor_status
+ir_variable_refcount_visitor::visit(ir_variable *ir)
+{
+ ir_variable_refcount_entry *entry = this->get_variable_entry(ir);
+ if (entry)
+ entry->declaration = true;
+
+ return visit_continue;
+}
+
+
+ir_visitor_status
+ir_variable_refcount_visitor::visit(ir_dereference_variable *ir)
+{
+ ir_variable *const var = ir->variable_referenced();
+ ir_variable_refcount_entry *entry = this->get_variable_entry(var);
+
+ if (entry)
+ entry->referenced_count++;
+
+ return visit_continue;
+}
+
+
+ir_visitor_status
+ir_variable_refcount_visitor::visit_enter(ir_function_signature *ir)
+{
+ /* We don't want to descend into the function parameters and
+ * dead-code eliminate them, so just accept the body here.
+ */
+ visit_list_elements(this, &ir->body);
+ return visit_continue_with_parent;
+}
+
+
+ir_visitor_status
+ir_variable_refcount_visitor::visit_leave(ir_assignment *ir)
+{
+ ir_variable_refcount_entry *entry;
+ entry = this->get_variable_entry(ir->lhs->variable_referenced());
+ if (entry) {
+ entry->assigned_count++;
+
+ /* Build a list for dead code optimisation. Don't add assignment if it
+ * was declared out of scope (outside the instruction stream). Also don't
+ * bother adding any more to the list if there are more references than
+ * assignments as this means the variable is used and won't be optimised
+ * out.
+ */
+ assert(entry->referenced_count >= entry->assigned_count);
+ if (entry->referenced_count == entry->assigned_count) {
+ struct assignment_entry *assignment_entry =
+ (struct assignment_entry *)calloc(1, sizeof(*assignment_entry));
+ assignment_entry->assign = ir;
+ entry->assign_list.push_head(&assignment_entry->link);
+ }
+ }
+
+ return visit_continue;
+}
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/ir_variable_refcount.h b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/ir_variable_refcount.h
new file mode 100644
index 0000000000..4a90f08c91
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/ir_variable_refcount.h
@@ -0,0 +1,91 @@
+/*
+ * Copyright © 2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+/**
+ * \file ir_variable_refcount.h
+ *
+ * Provides a visitor which produces a list of variables referenced,
+ * how many times they were referenced and assigned, and whether they
+ * were defined in the scope.
+ */
+
+#ifndef GLSL_IR_VARIABLE_REFCOUNT_H
+#define GLSL_IR_VARIABLE_REFCOUNT_H
+
+#include "ir.h"
+#include "ir_visitor.h"
+#include "compiler/glsl_types.h"
+
+struct assignment_entry {
+ exec_node link;
+ ir_assignment *assign;
+};
+
+class ir_variable_refcount_entry
+{
+public:
+ ir_variable_refcount_entry(ir_variable *var);
+
+ ir_variable *var; /* The key: the variable's pointer. */
+
+ /**
+ * List of assignments to the variable, if any.
+ * This is intended to be used for dead code optimisation and may
+ * not be a complete list.
+ */
+ exec_list assign_list;
+
+ /** Number of times the variable is referenced, including assignments. */
+ unsigned referenced_count;
+
+ /** Number of times the variable is assigned. */
+ unsigned assigned_count;
+
+ bool declaration; /* If the variable had a decl in the instruction stream */
+};
+
+class ir_variable_refcount_visitor : public ir_hierarchical_visitor {
+public:
+ ir_variable_refcount_visitor(void);
+ ~ir_variable_refcount_visitor(void);
+
+ virtual ir_visitor_status visit(ir_variable *);
+ virtual ir_visitor_status visit(ir_dereference_variable *);
+
+ virtual ir_visitor_status visit_enter(ir_function_signature *);
+ virtual ir_visitor_status visit_leave(ir_assignment *);
+
+ /**
+ * Find variable in the hash table, and insert it if not present
+ */
+ ir_variable_refcount_entry *get_variable_entry(ir_variable *var);
+
+ /**
+ * Hash table mapping ir_variable to ir_variable_refcount_entry.
+ */
+ struct hash_table *ht;
+
+ void *mem_ctx;
+};
+
+#endif /* GLSL_IR_VARIABLE_REFCOUNT_H */
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/ir_visitor.h b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/ir_visitor.h
new file mode 100644
index 0000000000..0b3e548b89
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/ir_visitor.h
@@ -0,0 +1,98 @@
+/* -*- c++ -*- */
+/*
+ * Copyright © 2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef IR_VISITOR_H
+#define IR_VISITOR_H
+
+#ifdef __cplusplus
+/**
+ * Abstract base class of visitors of IR instruction trees
+ */
+class ir_visitor {
+public:
+ virtual ~ir_visitor()
+ {
+ /* empty */
+ }
+
+ /**
+ * \name Visit methods
+ *
+ * As typical for the visitor pattern, there must be one \c visit method for
+ * each concrete subclass of \c ir_instruction. Virtual base classes within
+ * the hierarchy should not have \c visit methods.
+ */
+ /*@{*/
+ virtual void visit(class ir_rvalue *) { assert(!"unhandled error_type"); }
+ virtual void visit(class ir_variable *) = 0;
+ virtual void visit(class ir_function_signature *) = 0;
+ virtual void visit(class ir_function *) = 0;
+ virtual void visit(class ir_expression *) = 0;
+ virtual void visit(class ir_texture *) = 0;
+ virtual void visit(class ir_swizzle *) = 0;
+ virtual void visit(class ir_dereference_variable *) = 0;
+ virtual void visit(class ir_dereference_array *) = 0;
+ virtual void visit(class ir_dereference_record *) = 0;
+ virtual void visit(class ir_assignment *) = 0;
+ virtual void visit(class ir_constant *) = 0;
+ virtual void visit(class ir_call *) = 0;
+ virtual void visit(class ir_return *) = 0;
+ virtual void visit(class ir_discard *) = 0;
+ virtual void visit(class ir_demote *) = 0;
+ virtual void visit(class ir_if *) = 0;
+ virtual void visit(class ir_loop *) = 0;
+ virtual void visit(class ir_loop_jump *) = 0;
+ virtual void visit(class ir_precision_statement *) = 0;
+ virtual void visit(class ir_typedecl_statement *) = 0;
+ virtual void visit(class ir_emit_vertex *) = 0;
+ virtual void visit(class ir_end_primitive *) = 0;
+ virtual void visit(class ir_barrier *) = 0;
+ /*@}*/
+};
+
+/* NOTE: function calls may never return due to discards inside them
+ * This is usually not an issue, but if it is, keep it in mind
+ */
+class ir_control_flow_visitor : public ir_visitor {
+public:
+ virtual void visit(class ir_variable *) {}
+ virtual void visit(class ir_expression *) {}
+ virtual void visit(class ir_texture *) {}
+ virtual void visit(class ir_swizzle *) {}
+ virtual void visit(class ir_dereference_variable *) {}
+ virtual void visit(class ir_dereference_array *) {}
+ virtual void visit(class ir_dereference_record *) {}
+ virtual void visit(class ir_assignment *) {}
+ virtual void visit(class ir_constant *) {}
+ virtual void visit(class ir_call *) {}
+ virtual void visit(class ir_demote *) {}
+ virtual void visit(class ir_precision_statement *) {};
+ virtual void visit(class ir_typedecl_statement *) {};
+ virtual void visit(class ir_emit_vertex *) {}
+ virtual void visit(class ir_end_primitive *) {}
+ virtual void visit(class ir_barrier *) {}
+};
+#endif /* __cplusplus */
+
+#endif /* IR_VISITOR_H */
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/link_atomics.cpp b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/link_atomics.cpp
new file mode 100644
index 0000000000..bc03d64633
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/link_atomics.cpp
@@ -0,0 +1,353 @@
+/*
+ * Copyright © 2013 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include "glsl_parser_extras.h"
+#include "ir.h"
+#include "ir_uniform.h"
+#include "linker.h"
+#include "main/errors.h"
+#include "main/macros.h"
+#include "main/mtypes.h"
+
+namespace {
+ /*
+ * Atomic counter uniform as seen by the program.
+ */
+ struct active_atomic_counter_uniform {
+ unsigned uniform_loc;
+ ir_variable *var;
+ };
+
+ /*
+ * Atomic counter buffer referenced by the program. There is a one
+ * to one correspondence between these and the objects that can be
+ * queried using glGetActiveAtomicCounterBufferiv().
+ */
+ struct active_atomic_buffer {
+ active_atomic_buffer()
+ : uniforms(0), num_uniforms(0), stage_counter_references(), size(0)
+ {}
+
+ ~active_atomic_buffer()
+ {
+ free(uniforms);
+ }
+
+ void push_back(unsigned uniform_loc, ir_variable *var)
+ {
+ active_atomic_counter_uniform *new_uniforms;
+
+ new_uniforms = (active_atomic_counter_uniform *)
+ realloc(uniforms, sizeof(active_atomic_counter_uniform) *
+ (num_uniforms + 1));
+
+ if (new_uniforms == NULL) {
+ _mesa_error_no_memory(__func__);
+ return;
+ }
+
+ uniforms = new_uniforms;
+ uniforms[num_uniforms].uniform_loc = uniform_loc;
+ uniforms[num_uniforms].var = var;
+ num_uniforms++;
+ }
+
+ active_atomic_counter_uniform *uniforms;
+ unsigned num_uniforms;
+ unsigned stage_counter_references[MESA_SHADER_STAGES];
+ unsigned size;
+ };
+
+ int
+ cmp_actives(const void *a, const void *b)
+ {
+ const active_atomic_counter_uniform *const first = (active_atomic_counter_uniform *) a;
+ const active_atomic_counter_uniform *const second = (active_atomic_counter_uniform *) b;
+
+ return int(first->var->data.offset) - int(second->var->data.offset);
+ }
+
+ bool
+ check_atomic_counters_overlap(const ir_variable *x, const ir_variable *y)
+ {
+ return ((x->data.offset >= y->data.offset &&
+ x->data.offset < y->data.offset + y->type->atomic_size()) ||
+ (y->data.offset >= x->data.offset &&
+ y->data.offset < x->data.offset + x->type->atomic_size()));
+ }
+
+ void
+ process_atomic_variable(const glsl_type *t, struct gl_shader_program *prog,
+ unsigned *uniform_loc, ir_variable *var,
+ active_atomic_buffer *const buffers,
+ unsigned *num_buffers, int *offset,
+ const unsigned shader_stage)
+ {
+ /* FIXME: Arrays of arrays get counted separately. For example:
+ * x1[3][3][2] = 9 uniforms, 18 atomic counters
+ * x2[3][2] = 3 uniforms, 6 atomic counters
+ * x3[2] = 1 uniform, 2 atomic counters
+ *
+ * However this code marks all the counters as active even when they
+ * might not be used.
+ */
+ if (t->is_array() && t->fields.array->is_array()) {
+ for (unsigned i = 0; i < t->length; i++) {
+ process_atomic_variable(t->fields.array, prog, uniform_loc,
+ var, buffers, num_buffers, offset,
+ shader_stage);
+ }
+ } else {
+ active_atomic_buffer *buf = &buffers[var->data.binding];
+ gl_uniform_storage *const storage =
+ &prog->data->UniformStorage[*uniform_loc];
+
+ /* If this is the first time the buffer is used, increment
+ * the counter of buffers used.
+ */
+ if (buf->size == 0)
+ (*num_buffers)++;
+
+ buf->push_back(*uniform_loc, var);
+
+ /* When checking for atomic counters we should count every member in
+ * an array as an atomic counter reference.
+ */
+ if (t->is_array())
+ buf->stage_counter_references[shader_stage] += t->length;
+ else
+ buf->stage_counter_references[shader_stage]++;
+ buf->size = MAX2(buf->size, *offset + t->atomic_size());
+
+ storage->offset = *offset;
+ *offset += t->atomic_size();
+
+ (*uniform_loc)++;
+ }
+ }
+
+ active_atomic_buffer *
+ find_active_atomic_counters(struct gl_context *ctx,
+ struct gl_shader_program *prog,
+ unsigned *num_buffers)
+ {
+ active_atomic_buffer *const buffers =
+ new active_atomic_buffer[ctx->Const.MaxAtomicBufferBindings];
+
+ *num_buffers = 0;
+
+ for (unsigned i = 0; i < MESA_SHADER_STAGES; ++i) {
+ struct gl_linked_shader *sh = prog->_LinkedShaders[i];
+ if (sh == NULL)
+ continue;
+
+ foreach_in_list(ir_instruction, node, sh->ir) {
+ ir_variable *var = node->as_variable();
+
+ if (var && var->type->contains_atomic()) {
+ int offset = var->data.offset;
+ unsigned uniform_loc = var->data.location;
+ process_atomic_variable(var->type, prog, &uniform_loc,
+ var, buffers, num_buffers, &offset, i);
+ }
+ }
+ }
+
+ for (unsigned i = 0; i < ctx->Const.MaxAtomicBufferBindings; i++) {
+ if (buffers[i].size == 0)
+ continue;
+
+ qsort(buffers[i].uniforms, buffers[i].num_uniforms,
+ sizeof(active_atomic_counter_uniform),
+ cmp_actives);
+
+ for (unsigned j = 1; j < buffers[i].num_uniforms; j++) {
+ /* If an overlapping counter found, it must be a reference to the
+ * same counter from a different shader stage.
+ */
+ if (check_atomic_counters_overlap(buffers[i].uniforms[j-1].var,
+ buffers[i].uniforms[j].var)
+ && strcmp(buffers[i].uniforms[j-1].var->name,
+ buffers[i].uniforms[j].var->name) != 0) {
+ linker_error(prog, "Atomic counter %s declared at offset %d "
+ "which is already in use.",
+ buffers[i].uniforms[j].var->name,
+ buffers[i].uniforms[j].var->data.offset);
+ }
+ }
+ }
+ return buffers;
+ }
+}
+
+void
+link_assign_atomic_counter_resources(struct gl_context *ctx,
+ struct gl_shader_program *prog)
+{
+ unsigned num_buffers;
+ unsigned num_atomic_buffers[MESA_SHADER_STAGES] = {};
+ active_atomic_buffer *abs =
+ find_active_atomic_counters(ctx, prog, &num_buffers);
+
+ prog->data->AtomicBuffers = rzalloc_array(prog->data, gl_active_atomic_buffer,
+ num_buffers);
+ prog->data->NumAtomicBuffers = num_buffers;
+
+ unsigned i = 0;
+ for (unsigned binding = 0;
+ binding < ctx->Const.MaxAtomicBufferBindings;
+ binding++) {
+
+ /* If the binding was not used, skip.
+ */
+ if (abs[binding].size == 0)
+ continue;
+
+ active_atomic_buffer &ab = abs[binding];
+ gl_active_atomic_buffer &mab = prog->data->AtomicBuffers[i];
+
+ /* Assign buffer-specific fields. */
+ mab.Binding = binding;
+ mab.MinimumSize = ab.size;
+ mab.Uniforms = rzalloc_array(prog->data->AtomicBuffers, GLuint,
+ ab.num_uniforms);
+ mab.NumUniforms = ab.num_uniforms;
+
+ /* Assign counter-specific fields. */
+ for (unsigned j = 0; j < ab.num_uniforms; j++) {
+ ir_variable *const var = ab.uniforms[j].var;
+ gl_uniform_storage *const storage =
+ &prog->data->UniformStorage[ab.uniforms[j].uniform_loc];
+
+ mab.Uniforms[j] = ab.uniforms[j].uniform_loc;
+ if (!var->data.explicit_binding)
+ var->data.binding = i;
+
+ storage->atomic_buffer_index = i;
+ storage->offset = var->data.offset;
+ storage->array_stride = (var->type->is_array() ?
+ var->type->without_array()->atomic_size() : 0);
+ if (!var->type->is_matrix())
+ storage->matrix_stride = 0;
+ }
+
+ /* Assign stage-specific fields. */
+ for (unsigned j = 0; j < MESA_SHADER_STAGES; ++j) {
+ if (ab.stage_counter_references[j]) {
+ mab.StageReferences[j] = GL_TRUE;
+ num_atomic_buffers[j]++;
+ } else {
+ mab.StageReferences[j] = GL_FALSE;
+ }
+ }
+
+ i++;
+ }
+
+ /* Store a list pointers to atomic buffers per stage and store the index
+ * to the intra-stage buffer list in uniform storage.
+ */
+ for (unsigned j = 0; j < MESA_SHADER_STAGES; ++j) {
+ if (prog->_LinkedShaders[j] && num_atomic_buffers[j] > 0) {
+ struct gl_program *gl_prog = prog->_LinkedShaders[j]->Program;
+ gl_prog->info.num_abos = num_atomic_buffers[j];
+ gl_prog->sh.AtomicBuffers =
+ rzalloc_array(gl_prog, gl_active_atomic_buffer *,
+ num_atomic_buffers[j]);
+
+ unsigned intra_stage_idx = 0;
+ for (unsigned i = 0; i < num_buffers; i++) {
+ struct gl_active_atomic_buffer *atomic_buffer =
+ &prog->data->AtomicBuffers[i];
+ if (atomic_buffer->StageReferences[j]) {
+ gl_prog->sh.AtomicBuffers[intra_stage_idx] = atomic_buffer;
+
+ for (unsigned u = 0; u < atomic_buffer->NumUniforms; u++) {
+ prog->data->UniformStorage[atomic_buffer->Uniforms[u]].opaque[j].index =
+ intra_stage_idx;
+ prog->data->UniformStorage[atomic_buffer->Uniforms[u]].opaque[j].active =
+ true;
+ }
+
+ intra_stage_idx++;
+ }
+ }
+ }
+ }
+
+ delete [] abs;
+ assert(i == num_buffers);
+}
+
+void
+link_check_atomic_counter_resources(struct gl_context *ctx,
+ struct gl_shader_program *prog)
+{
+ unsigned num_buffers;
+ active_atomic_buffer *const abs =
+ find_active_atomic_counters(ctx, prog, &num_buffers);
+ unsigned atomic_counters[MESA_SHADER_STAGES] = {};
+ unsigned atomic_buffers[MESA_SHADER_STAGES] = {};
+ unsigned total_atomic_counters = 0;
+ unsigned total_atomic_buffers = 0;
+
+ /* Sum the required resources. Note that this counts buffers and
+ * counters referenced by several shader stages multiple times
+ * against the combined limit -- That's the behavior the spec
+ * requires.
+ */
+ for (unsigned i = 0; i < ctx->Const.MaxAtomicBufferBindings; i++) {
+ if (abs[i].size == 0)
+ continue;
+
+ for (unsigned j = 0; j < MESA_SHADER_STAGES; ++j) {
+ const unsigned n = abs[i].stage_counter_references[j];
+
+ if (n) {
+ atomic_counters[j] += n;
+ total_atomic_counters += n;
+ atomic_buffers[j]++;
+ total_atomic_buffers++;
+ }
+ }
+ }
+
+ /* Check that they are within the supported limits. */
+ for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
+ if (atomic_counters[i] > ctx->Const.Program[i].MaxAtomicCounters)
+ linker_error(prog, "Too many %s shader atomic counters",
+ _mesa_shader_stage_to_string(i));
+
+ if (atomic_buffers[i] > ctx->Const.Program[i].MaxAtomicBuffers)
+ linker_error(prog, "Too many %s shader atomic counter buffers",
+ _mesa_shader_stage_to_string(i));
+ }
+
+ if (total_atomic_counters > ctx->Const.MaxCombinedAtomicCounters)
+ linker_error(prog, "Too many combined atomic counters");
+
+ if (total_atomic_buffers > ctx->Const.MaxCombinedAtomicBuffers)
+ linker_error(prog, "Too many combined atomic buffers");
+
+ delete [] abs;
+}
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/link_functions.cpp b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/link_functions.cpp
new file mode 100644
index 0000000000..4998d39dc5
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/link_functions.cpp
@@ -0,0 +1,339 @@
+/*
+ * Copyright © 2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include "glsl_symbol_table.h"
+#include "glsl_parser_extras.h"
+#include "ir.h"
+#include "program.h"
+#include "util/set.h"
+#include "util/hash_table.h"
+#include "linker.h"
+#include "main/mtypes.h"
+
+static ir_function_signature *
+find_matching_signature(const char *name, const exec_list *actual_parameters,
+ glsl_symbol_table *symbols);
+
+namespace {
+
+class call_link_visitor : public ir_hierarchical_visitor {
+public:
+ call_link_visitor(gl_shader_program *prog, gl_linked_shader *linked,
+ gl_shader **shader_list, unsigned num_shaders)
+ {
+ this->prog = prog;
+ this->shader_list = shader_list;
+ this->num_shaders = num_shaders;
+ this->success = true;
+ this->linked = linked;
+
+ this->locals = _mesa_pointer_set_create(NULL);
+ }
+
+ ~call_link_visitor()
+ {
+ _mesa_set_destroy(this->locals, NULL);
+ }
+
+ virtual ir_visitor_status visit(ir_variable *ir)
+ {
+ _mesa_set_add(locals, ir);
+ return visit_continue;
+ }
+
+ virtual ir_visitor_status visit_enter(ir_call *ir)
+ {
+ /* If ir is an ir_call from a function that was imported from another
+ * shader callee will point to an ir_function_signature in the original
+ * shader. In this case the function signature MUST NOT BE MODIFIED.
+ * Doing so will modify the original shader. This may prevent that
+ * shader from being linkable in other programs.
+ */
+ const ir_function_signature *const callee = ir->callee;
+ assert(callee != NULL);
+ const char *const name = callee->function_name();
+
+ /* We don't actually need to find intrinsics; they're not real */
+ if (callee->is_intrinsic())
+ return visit_continue;
+
+ /* Determine if the requested function signature already exists in the
+ * final linked shader. If it does, use it as the target of the call.
+ */
+ ir_function_signature *sig =
+ find_matching_signature(name, &callee->parameters, linked->symbols);
+ if (sig != NULL) {
+ ir->callee = sig;
+ return visit_continue;
+ }
+
+ /* Try to find the signature in one of the other shaders that is being
+ * linked. If it's not found there, return an error.
+ */
+ for (unsigned i = 0; i < num_shaders; i++) {
+ sig = find_matching_signature(name, &ir->actual_parameters,
+ shader_list[i]->symbols);
+ if (sig)
+ break;
+ }
+
+ if (sig == NULL) {
+ /* FINISHME: Log the full signature of unresolved function.
+ */
+ linker_error(this->prog, "unresolved reference to function `%s'\n",
+ name);
+ this->success = false;
+ return visit_stop;
+ }
+
+ /* Find the prototype information in the linked shader. Generate any
+ * details that may be missing.
+ */
+ ir_function *f = linked->symbols->get_function(name);
+ if (f == NULL) {
+ f = new(linked) ir_function(name);
+
+ /* Add the new function to the linked IR. Put it at the end
+ * so that it comes after any global variable declarations
+ * that it refers to.
+ */
+ linked->symbols->add_function(f);
+ linked->ir->push_tail(f);
+ }
+
+ ir_function_signature *linked_sig =
+ f->exact_matching_signature(NULL, &callee->parameters);
+ if (linked_sig == NULL) {
+ linked_sig = new(linked) ir_function_signature(callee->return_type);
+ f->add_signature(linked_sig);
+ }
+
+ /* At this point linked_sig and called may be the same. If ir is an
+ * ir_call from linked then linked_sig and callee will be
+ * ir_function_signatures that have no definitions (is_defined is false).
+ */
+ assert(!linked_sig->is_defined);
+ assert(linked_sig->body.is_empty());
+
+ /* Create an in-place clone of the function definition. This multistep
+ * process introduces some complexity here, but it has some advantages.
+ * The parameter list and the and function body are cloned separately.
+ * The clone of the parameter list is used to prime the hashtable used
+ * to replace variable references in the cloned body.
+ *
+ * The big advantage is that the ir_function_signature does not change.
+ * This means that we don't have to process the rest of the IR tree to
+ * patch ir_call nodes. In addition, there is no way to remove or
+ * replace signature stored in a function. One could easily be added,
+ * but this avoids the need.
+ */
+ struct hash_table *ht = _mesa_pointer_hash_table_create(NULL);
+
+ exec_list formal_parameters;
+ foreach_in_list(const ir_instruction, original, &sig->parameters) {
+ assert(const_cast<ir_instruction *>(original)->as_variable());
+
+ ir_instruction *copy = original->clone(linked, ht);
+ formal_parameters.push_tail(copy);
+ }
+
+ linked_sig->replace_parameters(&formal_parameters);
+
+ linked_sig->intrinsic_id = sig->intrinsic_id;
+
+ if (sig->is_defined) {
+ foreach_in_list(const ir_instruction, original, &sig->body) {
+ ir_instruction *copy = original->clone(linked, ht);
+ linked_sig->body.push_tail(copy);
+ }
+
+ linked_sig->is_defined = true;
+ }
+
+ _mesa_hash_table_destroy(ht, NULL);
+
+ /* Patch references inside the function to things outside the function
+ * (i.e., function calls and global variables).
+ */
+ linked_sig->accept(this);
+
+ ir->callee = linked_sig;
+
+ return visit_continue;
+ }
+
+ virtual ir_visitor_status visit_leave(ir_call *ir)
+ {
+ /* Traverse list of function parameters, and for array parameters
+ * propagate max_array_access. Otherwise arrays that are only referenced
+ * from inside functions via function parameters will be incorrectly
+ * optimized. This will lead to incorrect code being generated (or worse).
+ * Do it when leaving the node so the children would propagate their
+ * array accesses first.
+ */
+
+ const exec_node *formal_param_node = ir->callee->parameters.get_head();
+ if (formal_param_node) {
+ const exec_node *actual_param_node = ir->actual_parameters.get_head();
+ while (!actual_param_node->is_tail_sentinel()) {
+ ir_variable *formal_param = (ir_variable *) formal_param_node;
+ ir_rvalue *actual_param = (ir_rvalue *) actual_param_node;
+
+ formal_param_node = formal_param_node->get_next();
+ actual_param_node = actual_param_node->get_next();
+
+ if (formal_param->type->is_array()) {
+ ir_dereference_variable *deref = actual_param->as_dereference_variable();
+ if (deref && deref->var && deref->var->type->is_array()) {
+ deref->var->data.max_array_access =
+ MAX2(formal_param->data.max_array_access,
+ deref->var->data.max_array_access);
+ }
+ }
+ }
+ }
+ return visit_continue;
+ }
+
+ virtual ir_visitor_status visit(ir_dereference_variable *ir)
+ {
+ if (_mesa_set_search(locals, ir->var) == NULL) {
+ /* The non-function variable must be a global, so try to find the
+ * variable in the shader's symbol table. If the variable is not
+ * found, then it's a global that *MUST* be defined in the original
+ * shader.
+ */
+ ir_variable *var = linked->symbols->get_variable(ir->var->name);
+ if (var == NULL) {
+ /* Clone the ir_variable that the dereference already has and add
+ * it to the linked shader.
+ */
+ var = ir->var->clone(linked, NULL);
+ linked->symbols->add_variable(var);
+ linked->ir->push_head(var);
+ } else {
+ if (var->type->is_array()) {
+ /* It is possible to have a global array declared in multiple
+ * shaders without a size. The array is implicitly sized by
+ * the maximal access to it in *any* shader. Because of this,
+ * we need to track the maximal access to the array as linking
+ * pulls more functions in that access the array.
+ */
+ var->data.max_array_access =
+ MAX2(var->data.max_array_access,
+ ir->var->data.max_array_access);
+
+ if (var->type->length == 0 && ir->var->type->length != 0)
+ var->type = ir->var->type;
+ }
+ if (var->is_interface_instance()) {
+ /* Similarly, we need implicit sizes of arrays within interface
+ * blocks to be sized by the maximal access in *any* shader.
+ */
+ int *const linked_max_ifc_array_access =
+ var->get_max_ifc_array_access();
+ int *const ir_max_ifc_array_access =
+ ir->var->get_max_ifc_array_access();
+
+ assert(linked_max_ifc_array_access != NULL);
+ assert(ir_max_ifc_array_access != NULL);
+
+ for (unsigned i = 0; i < var->get_interface_type()->length;
+ i++) {
+ linked_max_ifc_array_access[i] =
+ MAX2(linked_max_ifc_array_access[i],
+ ir_max_ifc_array_access[i]);
+ }
+ }
+ }
+
+ ir->var = var;
+ }
+
+ return visit_continue;
+ }
+
+ /** Was function linking successful? */
+ bool success;
+
+private:
+ /**
+ * Shader program being linked
+ *
+ * This is only used for logging error messages.
+ */
+ gl_shader_program *prog;
+
+ /** List of shaders available for linking. */
+ gl_shader **shader_list;
+
+ /** Number of shaders available for linking. */
+ unsigned num_shaders;
+
+ /**
+ * Final linked shader
+ *
+ * This is used two ways. It is used to find global variables in the
+ * linked shader that are accessed by the function. It is also used to add
+ * global variables from the shader where the function originated.
+ */
+ gl_linked_shader *linked;
+
+ /**
+ * Table of variables local to the function.
+ */
+ set *locals;
+};
+
+} /* anonymous namespace */
+
+/**
+ * Searches a list of shaders for a particular function definition
+ */
+ir_function_signature *
+find_matching_signature(const char *name, const exec_list *actual_parameters,
+ glsl_symbol_table *symbols)
+{
+ ir_function *const f = symbols->get_function(name);
+
+ if (f) {
+ ir_function_signature *sig =
+ f->matching_signature(NULL, actual_parameters, false);
+
+ if (sig && (sig->is_defined || sig->is_intrinsic()))
+ return sig;
+ }
+
+ return NULL;
+}
+
+
+bool
+link_function_calls(gl_shader_program *prog, gl_linked_shader *main,
+ gl_shader **shader_list, unsigned num_shaders)
+{
+ call_link_visitor v(prog, main, shader_list, num_shaders);
+
+ v.run(main->ir);
+ return v.success;
+}
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/link_interface_blocks.cpp b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/link_interface_blocks.cpp
new file mode 100644
index 0000000000..4471b41cf0
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/link_interface_blocks.cpp
@@ -0,0 +1,538 @@
+/*
+ * Copyright © 2013 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+/**
+ * \file link_interface_blocks.cpp
+ * Linker support for GLSL's interface blocks.
+ */
+
+#include "ir.h"
+#include "glsl_symbol_table.h"
+#include "linker.h"
+#include "main/macros.h"
+#include "main/mtypes.h"
+#include "util/hash_table.h"
+#include "util/u_string.h"
+
+
+namespace {
+
+/**
+ * Return true if interface members mismatch and its not allowed by GLSL.
+ */
+static bool
+interstage_member_mismatch(struct gl_shader_program *prog,
+ const glsl_type *c, const glsl_type *p) {
+
+ if (c->length != p->length)
+ return true;
+
+ for (unsigned i = 0; i < c->length; i++) {
+ if (c->fields.structure[i].type != p->fields.structure[i].type)
+ return true;
+ if (strcmp(c->fields.structure[i].name,
+ p->fields.structure[i].name) != 0)
+ return true;
+ if (c->fields.structure[i].location !=
+ p->fields.structure[i].location)
+ return true;
+ if (c->fields.structure[i].patch !=
+ p->fields.structure[i].patch)
+ return true;
+
+ /* From Section 4.5 (Interpolation Qualifiers) of the GLSL 4.40 spec:
+ *
+ * "It is a link-time error if, within the same stage, the
+ * interpolation qualifiers of variables of the same name do not
+ * match."
+ */
+ if (prog->IsES || prog->data->Version < 440)
+ if (c->fields.structure[i].interpolation !=
+ p->fields.structure[i].interpolation)
+ return true;
+
+ /* From Section 4.3.4 (Input Variables) of the GLSL ES 3.0 spec:
+ *
+ * "The output of the vertex shader and the input of the fragment
+ * shader form an interface. For this interface, vertex shader
+ * output variables and fragment shader input variables of the same
+ * name must match in type and qualification (other than precision
+ * and out matching to in).
+ *
+ * The table in Section 9.2.1 Linked Shaders of the GLSL ES 3.1 spec
+ * says that centroid no longer needs to match for varyings.
+ *
+ * The table in Section 9.2.1 Linked Shaders of the GLSL ES 3.2 spec
+ * says that sample need not match for varyings.
+ */
+ if (!prog->IsES || prog->data->Version < 310)
+ if (c->fields.structure[i].centroid !=
+ p->fields.structure[i].centroid)
+ return true;
+ if (!prog->IsES)
+ if (c->fields.structure[i].sample !=
+ p->fields.structure[i].sample)
+ return true;
+ }
+
+ return false;
+}
+
+/**
+ * Check if two interfaces match, according to intrastage interface matching
+ * rules. If they do, and the first interface uses an unsized array, it will
+ * be updated to reflect the array size declared in the second interface.
+ */
+bool
+intrastage_match(ir_variable *a,
+ ir_variable *b,
+ struct gl_shader_program *prog,
+ bool match_precision)
+{
+ /* Types must match. */
+ if (a->get_interface_type() != b->get_interface_type()) {
+ /* Exception: if both the interface blocks are implicitly declared,
+ * don't force their types to match. They might mismatch due to the two
+ * shaders using different GLSL versions, and that's ok.
+ */
+ if ((a->data.how_declared != ir_var_declared_implicitly ||
+ b->data.how_declared != ir_var_declared_implicitly) &&
+ (!prog->IsES ||
+ interstage_member_mismatch(prog, a->get_interface_type(),
+ b->get_interface_type())))
+ return false;
+ }
+
+ /* Presence/absence of interface names must match. */
+ if (a->is_interface_instance() != b->is_interface_instance())
+ return false;
+
+ /* For uniforms, instance names need not match. For shader ins/outs,
+ * it's not clear from the spec whether they need to match, but
+ * Mesa's implementation relies on them matching.
+ */
+ if (a->is_interface_instance() && b->data.mode != ir_var_uniform &&
+ b->data.mode != ir_var_shader_storage &&
+ strcmp(a->name, b->name) != 0) {
+ return false;
+ }
+
+ bool type_match = (match_precision ?
+ a->type == b->type :
+ a->type->compare_no_precision(b->type));
+
+ /* If a block is an array then it must match across the shader.
+ * Unsized arrays are also processed and matched agaist sized arrays.
+ */
+ if (!type_match && (b->type->is_array() || a->type->is_array()) &&
+ (b->is_interface_instance() || a->is_interface_instance()) &&
+ !validate_intrastage_arrays(prog, b, a, match_precision))
+ return false;
+
+ return true;
+}
+
+/**
+ * Check if two interfaces match, according to interstage (in/out) interface
+ * matching rules.
+ *
+ * If \c extra_array_level is true, the consumer interface is required to be
+ * an array and the producer interface is required to be a non-array.
+ * This is used for tessellation control and geometry shader consumers.
+ */
+static bool
+interstage_match(struct gl_shader_program *prog, ir_variable *producer,
+ ir_variable *consumer, bool extra_array_level)
+{
+ /* Types must match. */
+ if (consumer->get_interface_type() != producer->get_interface_type()) {
+ /* Exception: if both the interface blocks are implicitly declared,
+ * don't force their types to match. They might mismatch due to the two
+ * shaders using different GLSL versions, and that's ok.
+ *
+ * Also we store some member information such as interpolation in
+ * glsl_type that doesn't always have to match across shader stages.
+ * Therefore we make a pass over the members glsl_struct_field to make
+ * sure we don't reject shaders where fields don't need to match.
+ */
+ if ((consumer->data.how_declared != ir_var_declared_implicitly ||
+ producer->data.how_declared != ir_var_declared_implicitly) &&
+ interstage_member_mismatch(prog, consumer->get_interface_type(),
+ producer->get_interface_type()))
+ return false;
+ }
+
+ /* Ignore outermost array if geom shader */
+ const glsl_type *consumer_instance_type;
+ if (extra_array_level) {
+ consumer_instance_type = consumer->type->fields.array;
+ } else {
+ consumer_instance_type = consumer->type;
+ }
+
+ /* If a block is an array then it must match across shaders.
+ * Since unsized arrays have been ruled out, we can check this by just
+ * making sure the types are equal.
+ */
+ if ((consumer->is_interface_instance() &&
+ consumer_instance_type->is_array()) ||
+ (producer->is_interface_instance() &&
+ producer->type->is_array())) {
+ if (consumer_instance_type != producer->type)
+ return false;
+ }
+
+ return true;
+}
+
+
+/**
+ * This class keeps track of a mapping from an interface block name to the
+ * necessary information about that interface block to determine whether to
+ * generate a link error.
+ *
+ * Note: this class is expected to be short lived, so it doesn't make copies
+ * of the strings it references; it simply borrows the pointers from the
+ * ir_variable class.
+ */
+class interface_block_definitions
+{
+public:
+ interface_block_definitions()
+ : mem_ctx(ralloc_context(NULL)),
+ ht(_mesa_hash_table_create(NULL, _mesa_hash_string,
+ _mesa_key_string_equal))
+ {
+ }
+
+ ~interface_block_definitions()
+ {
+ ralloc_free(mem_ctx);
+ _mesa_hash_table_destroy(ht, NULL);
+ }
+
+ /**
+ * Lookup the interface definition. Return NULL if none is found.
+ */
+ ir_variable *lookup(ir_variable *var)
+ {
+ if (var->data.explicit_location &&
+ var->data.location >= VARYING_SLOT_VAR0) {
+ char location_str[11];
+ snprintf(location_str, 11, "%d", var->data.location);
+
+ const struct hash_entry *entry =
+ _mesa_hash_table_search(ht, location_str);
+ return entry ? (ir_variable *) entry->data : NULL;
+ } else {
+ const struct hash_entry *entry =
+ _mesa_hash_table_search(ht,
+ var->get_interface_type()->without_array()->name);
+ return entry ? (ir_variable *) entry->data : NULL;
+ }
+ }
+
+ /**
+ * Add a new interface definition.
+ */
+ void store(ir_variable *var)
+ {
+ if (var->data.explicit_location &&
+ var->data.location >= VARYING_SLOT_VAR0) {
+ /* If explicit location is given then lookup the variable by location.
+ * We turn the location into a string and use this as the hash key
+ * rather than the name. Note: We allocate enough space for a 32-bit
+ * unsigned location value which is overkill but future proof.
+ */
+ char location_str[11];
+ snprintf(location_str, 11, "%d", var->data.location);
+ _mesa_hash_table_insert(ht, ralloc_strdup(mem_ctx, location_str), var);
+ } else {
+ _mesa_hash_table_insert(ht,
+ var->get_interface_type()->without_array()->name, var);
+ }
+ }
+
+private:
+ /**
+ * Ralloc context for data structures allocated by this class.
+ */
+ void *mem_ctx;
+
+ /**
+ * Hash table mapping interface block name to an \c
+ * ir_variable.
+ */
+ hash_table *ht;
+};
+
+
+}; /* anonymous namespace */
+
+
+void
+validate_intrastage_interface_blocks(struct gl_shader_program *prog,
+ const gl_shader **shader_list,
+ unsigned num_shaders)
+{
+ interface_block_definitions in_interfaces;
+ interface_block_definitions out_interfaces;
+ interface_block_definitions uniform_interfaces;
+ interface_block_definitions buffer_interfaces;
+
+ for (unsigned int i = 0; i < num_shaders; i++) {
+ if (shader_list[i] == NULL)
+ continue;
+
+ foreach_in_list(ir_instruction, node, shader_list[i]->ir) {
+ ir_variable *var = node->as_variable();
+ if (!var)
+ continue;
+
+ const glsl_type *iface_type = var->get_interface_type();
+
+ if (iface_type == NULL)
+ continue;
+
+ interface_block_definitions *definitions;
+ switch (var->data.mode) {
+ case ir_var_shader_in:
+ definitions = &in_interfaces;
+ break;
+ case ir_var_shader_out:
+ definitions = &out_interfaces;
+ break;
+ case ir_var_uniform:
+ definitions = &uniform_interfaces;
+ break;
+ case ir_var_shader_storage:
+ definitions = &buffer_interfaces;
+ break;
+ default:
+ /* Only in, out, and uniform interfaces are legal, so we should
+ * never get here.
+ */
+ assert(!"illegal interface type");
+ continue;
+ }
+
+ ir_variable *prev_def = definitions->lookup(var);
+ if (prev_def == NULL) {
+ /* This is the first time we've seen the interface, so save
+ * it into the appropriate data structure.
+ */
+ definitions->store(var);
+ } else if (!intrastage_match(prev_def, var, prog,
+ true /* match_precision */)) {
+ linker_error(prog, "definitions of interface block `%s' do not"
+ " match\n", iface_type->name);
+ return;
+ }
+ }
+ }
+}
+
+static bool
+is_builtin_gl_in_block(ir_variable *var, int consumer_stage)
+{
+ return !strcmp(var->name, "gl_in") &&
+ (consumer_stage == MESA_SHADER_TESS_CTRL ||
+ consumer_stage == MESA_SHADER_TESS_EVAL ||
+ consumer_stage == MESA_SHADER_GEOMETRY);
+}
+
+void
+validate_interstage_inout_blocks(struct gl_shader_program *prog,
+ const gl_linked_shader *producer,
+ const gl_linked_shader *consumer)
+{
+ interface_block_definitions definitions;
+ /* VS -> GS, VS -> TCS, VS -> TES, TES -> GS */
+ const bool extra_array_level = (producer->Stage == MESA_SHADER_VERTEX &&
+ consumer->Stage != MESA_SHADER_FRAGMENT) ||
+ consumer->Stage == MESA_SHADER_GEOMETRY;
+
+ /* Check that block re-declarations of gl_PerVertex are compatible
+ * across shaders: From OpenGL Shading Language 4.5, section
+ * "7.1 Built-In Language Variables", page 130 of the PDF:
+ *
+ * "If multiple shaders using members of a built-in block belonging
+ * to the same interface are linked together in the same program,
+ * they must all redeclare the built-in block in the same way, as
+ * described in section 4.3.9 “Interface Blocks” for interface-block
+ * matching, or a link-time error will result."
+ *
+ * This is done explicitly outside of iterating the member variable
+ * declarations because it is possible that the variables are not used and
+ * so they would have been optimised out.
+ */
+ const glsl_type *consumer_iface =
+ consumer->symbols->get_interface("gl_PerVertex",
+ ir_var_shader_in);
+
+ const glsl_type *producer_iface =
+ producer->symbols->get_interface("gl_PerVertex",
+ ir_var_shader_out);
+
+ if (producer_iface && consumer_iface &&
+ interstage_member_mismatch(prog, consumer_iface, producer_iface)) {
+ linker_error(prog, "Incompatible or missing gl_PerVertex re-declaration "
+ "in consecutive shaders");
+ return;
+ }
+
+ /* Desktop OpenGL requires redeclaration of the built-in interfaces for
+ * SSO programs. Passes above implement following rules:
+ *
+ * From Section 7.4 (Program Pipeline Objects) of the OpenGL 4.6 Core
+ * spec:
+ *
+ * "To use any built-in input or output in the gl_PerVertex and
+ * gl_PerFragment blocks in separable program objects, shader code
+ * must redeclare those blocks prior to use. A separable program
+ * will fail to link if:
+ *
+ * it contains multiple shaders of a single type with different
+ * redeclarations of these built-in input and output blocks; or
+ *
+ * any shader uses a built-in block member not found in the
+ * redeclaration of that block."
+ *
+ * ARB_separate_shader_objects issues section (issue #28) states that
+ * redeclaration is not required for GLSL shaders using #version 140 or
+ * earlier (since interface blocks are not possible with older versions).
+ *
+ * From Section 7.4.1 (Shader Interface Matching) of the OpenGL ES 3.1
+ * spec:
+ *
+ * "Built-in inputs or outputs do not affect interface matching."
+ *
+ * GL_OES_shader_io_blocks adds following:
+ *
+ * "When using any built-in input or output in the gl_PerVertex block
+ * in separable program objects, shader code may redeclare that block
+ * prior to use. If the shader does not redeclare the block, the
+ * intrinsically declared definition of that block will be used."
+ */
+
+ /* Add output interfaces from the producer to the symbol table. */
+ foreach_in_list(ir_instruction, node, producer->ir) {
+ ir_variable *var = node->as_variable();
+ if (!var || !var->get_interface_type() || var->data.mode != ir_var_shader_out)
+ continue;
+
+ /* Built-in interface redeclaration check. */
+ if (prog->SeparateShader && !prog->IsES && prog->data->Version >= 150 &&
+ var->data.how_declared == ir_var_declared_implicitly &&
+ var->data.used && !producer_iface) {
+ linker_error(prog, "missing output builtin block %s redeclaration "
+ "in separable shader program",
+ var->get_interface_type()->name);
+ return;
+ }
+
+ definitions.store(var);
+ }
+
+ /* Verify that the consumer's input interfaces match. */
+ foreach_in_list(ir_instruction, node, consumer->ir) {
+ ir_variable *var = node->as_variable();
+ if (!var || !var->get_interface_type() || var->data.mode != ir_var_shader_in)
+ continue;
+
+ ir_variable *producer_def = definitions.lookup(var);
+
+ /* Built-in interface redeclaration check. */
+ if (prog->SeparateShader && !prog->IsES && prog->data->Version >= 150 &&
+ var->data.how_declared == ir_var_declared_implicitly &&
+ var->data.used && !producer_iface) {
+ linker_error(prog, "missing input builtin block %s redeclaration "
+ "in separable shader program",
+ var->get_interface_type()->name);
+ return;
+ }
+
+ /* The producer doesn't generate this input: fail to link. Skip built-in
+ * 'gl_in[]' since that may not be present if the producer does not
+ * write to any of the pre-defined outputs (e.g. if the vertex shader
+ * does not write to gl_Position, etc), which is allowed and results in
+ * undefined behavior.
+ *
+ * From Section 4.3.4 (Inputs) of the GLSL 1.50 spec:
+ *
+ * "Only the input variables that are actually read need to be written
+ * by the previous stage; it is allowed to have superfluous
+ * declarations of input variables."
+ */
+ if (producer_def == NULL &&
+ !is_builtin_gl_in_block(var, consumer->Stage) && var->data.used) {
+ linker_error(prog, "Input block `%s' is not an output of "
+ "the previous stage\n", var->get_interface_type()->name);
+ return;
+ }
+
+ if (producer_def &&
+ !interstage_match(prog, producer_def, var, extra_array_level)) {
+ linker_error(prog, "definitions of interface block `%s' do not "
+ "match\n", var->get_interface_type()->name);
+ return;
+ }
+ }
+}
+
+
+void
+validate_interstage_uniform_blocks(struct gl_shader_program *prog,
+ gl_linked_shader **stages)
+{
+ interface_block_definitions definitions;
+
+ for (int i = 0; i < MESA_SHADER_STAGES; i++) {
+ if (stages[i] == NULL)
+ continue;
+
+ const gl_linked_shader *stage = stages[i];
+ foreach_in_list(ir_instruction, node, stage->ir) {
+ ir_variable *var = node->as_variable();
+ if (!var || !var->get_interface_type() ||
+ (var->data.mode != ir_var_uniform &&
+ var->data.mode != ir_var_shader_storage))
+ continue;
+
+ ir_variable *old_def = definitions.lookup(var);
+ if (old_def == NULL) {
+ definitions.store(var);
+ } else {
+ /* Interstage uniform matching rules are the same as intrastage
+ * uniform matchin rules (for uniforms, it is as though all
+ * shaders are in the same shader stage).
+ */
+ if (!intrastage_match(old_def, var, prog, false /* precision */)) {
+ linker_error(prog, "definitions of uniform block `%s' do not "
+ "match\n", var->get_interface_type()->name);
+ return;
+ }
+ }
+ }
+ }
+}
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/link_uniform_block_active_visitor.cpp b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/link_uniform_block_active_visitor.cpp
new file mode 100644
index 0000000000..7f12353bb2
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/link_uniform_block_active_visitor.cpp
@@ -0,0 +1,294 @@
+/*
+ * Copyright © 2013 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include "link_uniform_block_active_visitor.h"
+#include "program.h"
+#include "linker_util.h"
+
+static link_uniform_block_active *
+process_block(void *mem_ctx, struct hash_table *ht, ir_variable *var)
+{
+ const hash_entry *const existing_block =
+ _mesa_hash_table_search(ht, var->get_interface_type()->name);
+
+ const glsl_type *const block_type = var->is_interface_instance()
+ ? var->type : var->get_interface_type();
+
+
+ /* If a block with this block-name has not previously been seen, add it.
+ * If a block with this block-name has been seen, it must be identical to
+ * the block currently being examined.
+ */
+ if (existing_block == NULL) {
+ link_uniform_block_active *const b =
+ rzalloc(mem_ctx, struct link_uniform_block_active);
+
+ b->type = block_type;
+ b->has_instance_name = var->is_interface_instance();
+ b->is_shader_storage = var->data.mode == ir_var_shader_storage;
+
+ if (var->data.explicit_binding) {
+ b->has_binding = true;
+ b->binding = var->data.binding;
+ } else {
+ b->has_binding = false;
+ b->binding = 0;
+ }
+
+ _mesa_hash_table_insert(ht, var->get_interface_type()->name, (void *) b);
+ return b;
+ } else {
+ link_uniform_block_active *const b =
+ (link_uniform_block_active *) existing_block->data;
+
+ if (b->type != block_type
+ || b->has_instance_name != var->is_interface_instance())
+ return NULL;
+ else
+ return b;
+ }
+
+ assert(!"Should not get here.");
+ return NULL;
+}
+
+/* For arrays of arrays this function will give us a middle ground between
+ * detecting inactive uniform blocks and structuring them in a way that makes
+ * it easy to calculate the offset for indirect indexing.
+ *
+ * For example given the shader:
+ *
+ * uniform ArraysOfArraysBlock
+ * {
+ * vec4 a;
+ * } i[3][4][5];
+ *
+ * void main()
+ * {
+ * vec4 b = i[0][1][1].a;
+ * gl_Position = i[2][2][3].a + b;
+ * }
+ *
+ * There are only 2 active blocks above but for the sake of indirect indexing
+ * and not over complicating the code we will end up with a count of 8. Here
+ * each dimension has 2 different indices counted so we end up with 2*2*2
+ */
+static struct uniform_block_array_elements **
+process_arrays(void *mem_ctx, ir_dereference_array *ir,
+ struct link_uniform_block_active *block)
+{
+ if (ir) {
+ struct uniform_block_array_elements **ub_array_ptr =
+ process_arrays(mem_ctx, ir->array->as_dereference_array(), block);
+ if (*ub_array_ptr == NULL) {
+ *ub_array_ptr = rzalloc(mem_ctx, struct uniform_block_array_elements);
+ (*ub_array_ptr)->ir = ir;
+ (*ub_array_ptr)->aoa_size =
+ ir->array->type->arrays_of_arrays_size();
+ }
+
+ struct uniform_block_array_elements *ub_array = *ub_array_ptr;
+ ir_constant *c = ir->array_index->as_constant();
+ if (c) {
+ /* Index is a constant, so mark just that element used, if not
+ * already.
+ */
+ const unsigned idx = c->get_uint_component(0);
+
+ unsigned i;
+ for (i = 0; i < ub_array->num_array_elements; i++) {
+ if (ub_array->array_elements[i] == idx)
+ break;
+ }
+
+ assert(i <= ub_array->num_array_elements);
+
+ if (i == ub_array->num_array_elements) {
+ ub_array->array_elements = reralloc(mem_ctx,
+ ub_array->array_elements,
+ unsigned,
+ ub_array->num_array_elements + 1);
+
+ ub_array->array_elements[ub_array->num_array_elements] = idx;
+
+ ub_array->num_array_elements++;
+ }
+ } else {
+ /* The array index is not a constant, so mark the entire array used. */
+ assert(ir->array->type->is_array());
+ if (ub_array->num_array_elements < ir->array->type->length) {
+ ub_array->num_array_elements = ir->array->type->length;
+ ub_array->array_elements = reralloc(mem_ctx,
+ ub_array->array_elements,
+ unsigned,
+ ub_array->num_array_elements);
+
+ for (unsigned i = 0; i < ub_array->num_array_elements; i++) {
+ ub_array->array_elements[i] = i;
+ }
+ }
+ }
+
+ return &ub_array->array;
+ } else {
+ return &block->array;
+ }
+}
+
+ir_visitor_status
+link_uniform_block_active_visitor::visit(ir_variable *var)
+{
+ if (!var->is_in_buffer_block())
+ return visit_continue;
+
+ /* Section 2.11.6 (Uniform Variables) of the OpenGL ES 3.0.3 spec says:
+ *
+ * "All members of a named uniform block declared with a shared or
+ * std140 layout qualifier are considered active, even if they are not
+ * referenced in any shader in the program. The uniform block itself is
+ * also considered active, even if no member of the block is
+ * referenced."
+ */
+ if (var->get_interface_type_packing() == GLSL_INTERFACE_PACKING_PACKED)
+ return visit_continue;
+
+ /* Process the block. Bail if there was an error. */
+ link_uniform_block_active *const b =
+ process_block(this->mem_ctx, this->ht, var);
+ if (b == NULL) {
+ linker_error(this->prog,
+ "uniform block `%s' has mismatching definitions",
+ var->get_interface_type()->name);
+ this->success = false;
+ return visit_stop;
+ }
+
+ assert(b->array == NULL);
+ assert(b->type != NULL);
+ assert(!b->type->is_array() || b->has_instance_name);
+
+ /* For uniform block arrays declared with a shared or std140 layout
+ * qualifier, mark all its instances as used.
+ */
+ const glsl_type *type = b->type;
+ struct uniform_block_array_elements **ub_array = &b->array;
+ while (type->is_array()) {
+ assert(b->type->length > 0);
+
+ *ub_array = rzalloc(this->mem_ctx, struct uniform_block_array_elements);
+ (*ub_array)->num_array_elements = type->length;
+ (*ub_array)->array_elements = reralloc(this->mem_ctx,
+ (*ub_array)->array_elements,
+ unsigned,
+ (*ub_array)->num_array_elements);
+ (*ub_array)->aoa_size = type->arrays_of_arrays_size();
+
+ for (unsigned i = 0; i < (*ub_array)->num_array_elements; i++) {
+ (*ub_array)->array_elements[i] = i;
+ }
+ ub_array = &(*ub_array)->array;
+ type = type->fields.array;
+ }
+
+ return visit_continue;
+}
+
+ir_visitor_status
+link_uniform_block_active_visitor::visit_enter(ir_dereference_array *ir)
+{
+ /* cycle through arrays of arrays */
+ ir_dereference_array *base_ir = ir;
+ while (base_ir->array->ir_type == ir_type_dereference_array)
+ base_ir = base_ir->array->as_dereference_array();
+
+ ir_dereference_variable *const d =
+ base_ir->array->as_dereference_variable();
+ ir_variable *const var = (d == NULL) ? NULL : d->var;
+
+ /* If the r-value being dereferenced is not a variable (e.g., a field of a
+ * structure) or is not a uniform block instance, continue.
+ *
+ * WARNING: It is not enough for the variable to be part of uniform block.
+ * It must represent the entire block. Arrays (or matrices) inside blocks
+ * that lack an instance name are handled by the ir_dereference_variable
+ * function.
+ */
+ if (var == NULL
+ || !var->is_in_buffer_block()
+ || !var->is_interface_instance())
+ return visit_continue;
+
+ /* Process the block. Bail if there was an error. */
+ link_uniform_block_active *const b =
+ process_block(this->mem_ctx, this->ht, var);
+ if (b == NULL) {
+ linker_error(prog,
+ "uniform block `%s' has mismatching definitions",
+ var->get_interface_type()->name);
+ this->success = false;
+ return visit_stop;
+ }
+
+ /* Block arrays must be declared with an instance name.
+ */
+ assert(b->has_instance_name);
+ assert(b->type != NULL);
+
+ /* If the block array was declared with a shared or std140 layout
+ * qualifier, all its instances have been already marked as used in
+ * link_uniform_block_active_visitor::visit(ir_variable *).
+ */
+ if (var->get_interface_type_packing() == GLSL_INTERFACE_PACKING_PACKED) {
+ b->var = var;
+ process_arrays(this->mem_ctx, ir, b);
+ }
+
+ return visit_continue_with_parent;
+}
+
+ir_visitor_status
+link_uniform_block_active_visitor::visit(ir_dereference_variable *ir)
+{
+ ir_variable *var = ir->var;
+
+ if (!var->is_in_buffer_block())
+ return visit_continue;
+
+ assert(!var->is_interface_instance() || !var->type->is_array());
+
+ /* Process the block. Bail if there was an error. */
+ link_uniform_block_active *const b =
+ process_block(this->mem_ctx, this->ht, var);
+ if (b == NULL) {
+ linker_error(this->prog,
+ "uniform block `%s' has mismatching definitions",
+ var->get_interface_type()->name);
+ this->success = false;
+ return visit_stop;
+ }
+
+ assert(b->array == NULL);
+ assert(b->type != NULL);
+
+ return visit_continue;
+}
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/link_uniform_block_active_visitor.h b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/link_uniform_block_active_visitor.h
new file mode 100644
index 0000000000..fed8168440
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/link_uniform_block_active_visitor.h
@@ -0,0 +1,82 @@
+/*
+ * Copyright © 2013 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef LINK_UNIFORM_BLOCK_ACTIVE_VISITOR_H
+#define LINK_UNIFORM_BLOCK_ACTIVE_VISITOR_H
+
+#include "ir.h"
+#include "util/hash_table.h"
+
+struct uniform_block_array_elements {
+ unsigned *array_elements;
+ unsigned num_array_elements;
+ /**
+ * Size of the array before array-trimming optimizations.
+ *
+ * Locations are only assigned to active array elements, but the location
+ * values are calculated as if all elements are active. The total number
+ * of elements in an array including the elements in arrays of arrays before
+ * inactive elements are removed is needed to be perform that calculation.
+ */
+ unsigned aoa_size;
+
+ ir_dereference_array *ir;
+
+ struct uniform_block_array_elements *array;
+};
+
+struct link_uniform_block_active {
+ const glsl_type *type;
+ ir_variable *var;
+
+ struct uniform_block_array_elements *array;
+
+ unsigned binding;
+
+ bool has_instance_name;
+ bool has_binding;
+ bool is_shader_storage;
+};
+
+class link_uniform_block_active_visitor : public ir_hierarchical_visitor {
+public:
+ link_uniform_block_active_visitor(void *mem_ctx, struct hash_table *ht,
+ struct gl_shader_program *prog)
+ : success(true), prog(prog), ht(ht), mem_ctx(mem_ctx)
+ {
+ /* empty */
+ }
+
+ virtual ir_visitor_status visit_enter(ir_dereference_array *);
+ virtual ir_visitor_status visit(ir_dereference_variable *);
+ virtual ir_visitor_status visit(ir_variable *);
+
+ bool success;
+
+private:
+ struct gl_shader_program *prog;
+ struct hash_table *ht;
+ void *mem_ctx;
+};
+
+#endif /* LINK_UNIFORM_BLOCK_ACTIVE_VISITOR_H */
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/link_uniform_blocks.cpp b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/link_uniform_blocks.cpp
new file mode 100644
index 0000000000..1603cbd59a
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/link_uniform_blocks.cpp
@@ -0,0 +1,573 @@
+/*
+ * Copyright © 2012 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include "ir.h"
+#include "linker.h"
+#include "ir_uniform.h"
+#include "link_uniform_block_active_visitor.h"
+#include "util/hash_table.h"
+#include "program.h"
+#include "main/errors.h"
+#include "main/mtypes.h"
+
+namespace {
+
+class ubo_visitor : public program_resource_visitor {
+public:
+ ubo_visitor(void *mem_ctx, gl_uniform_buffer_variable *variables,
+ unsigned num_variables, struct gl_shader_program *prog,
+ bool use_std430_as_default)
+ : index(0), offset(0), buffer_size(0), variables(variables),
+ num_variables(num_variables), mem_ctx(mem_ctx),
+ is_array_instance(false), prog(prog),
+ use_std430_as_default(use_std430_as_default)
+ {
+ /* empty */
+ }
+
+ void process(const glsl_type *type, const char *name)
+ {
+ this->offset = 0;
+ this->buffer_size = 0;
+ this->is_array_instance = strchr(name, ']') != NULL;
+ this->program_resource_visitor::process(type, name,
+ use_std430_as_default);
+ }
+
+ unsigned index;
+ unsigned offset;
+ unsigned buffer_size;
+ gl_uniform_buffer_variable *variables;
+ unsigned num_variables;
+ void *mem_ctx;
+ bool is_array_instance;
+ struct gl_shader_program *prog;
+
+private:
+ virtual void enter_record(const glsl_type *type, const char *,
+ bool row_major,
+ const enum glsl_interface_packing packing)
+ {
+ assert(type->is_struct());
+ if (packing == GLSL_INTERFACE_PACKING_STD430)
+ this->offset = glsl_align(
+ this->offset, type->std430_base_alignment(row_major));
+ else
+ this->offset = glsl_align(
+ this->offset, type->std140_base_alignment(row_major));
+ }
+
+ virtual void leave_record(const glsl_type *type, const char *,
+ bool row_major,
+ const enum glsl_interface_packing packing)
+ {
+ assert(type->is_struct());
+
+ /* If this is the last field of a structure, apply rule #9. The
+ * ARB_uniform_buffer_object spec says:
+ *
+ * The structure may have padding at the end; the base offset of the
+ * member following the sub-structure is rounded up to the next
+ * multiple of the base alignment of the structure.
+ */
+ if (packing == GLSL_INTERFACE_PACKING_STD430)
+ this->offset = glsl_align(
+ this->offset, type->std430_base_alignment(row_major));
+ else
+ this->offset = glsl_align(
+ this->offset, type->std140_base_alignment(row_major));
+ }
+
+ virtual void set_buffer_offset(unsigned offset)
+ {
+ this->offset = offset;
+ }
+
+ virtual void visit_field(const glsl_type *type, const char *name,
+ bool row_major, const glsl_type *,
+ const enum glsl_interface_packing packing,
+ bool last_field)
+ {
+ assert(this->index < this->num_variables);
+
+ gl_uniform_buffer_variable *v = &this->variables[this->index++];
+
+ v->Name = ralloc_strdup(mem_ctx, name);
+ v->Type = type;
+ v->RowMajor = type->without_array()->is_matrix() && row_major;
+
+ if (this->is_array_instance) {
+ v->IndexName = ralloc_strdup(mem_ctx, name);
+
+ char *open_bracket = strchr(v->IndexName, '[');
+ assert(open_bracket != NULL);
+
+ char *close_bracket = strchr(open_bracket, '.') - 1;
+ assert(close_bracket != NULL);
+
+ /* Length of the tail without the ']' but with the NUL.
+ */
+ unsigned len = strlen(close_bracket + 1) + 1;
+
+ memmove(open_bracket, close_bracket + 1, len);
+ } else {
+ v->IndexName = v->Name;
+ }
+
+ unsigned alignment = 0;
+ unsigned size = 0;
+
+ /* The ARB_program_interface_query spec says:
+ *
+ * If the final member of an active shader storage block is array
+ * with no declared size, the minimum buffer size is computed
+ * assuming the array was declared as an array with one element.
+ *
+ * For that reason, we use the base type of the unsized array to
+ * calculate its size. We don't need to check if the unsized array is
+ * the last member of a shader storage block (that check was already
+ * done by the parser).
+ */
+ const glsl_type *type_for_size = type;
+ if (type->is_unsized_array()) {
+ if (!last_field) {
+ linker_error(prog, "unsized array `%s' definition: "
+ "only last member of a shader storage block "
+ "can be defined as unsized array",
+ name);
+ }
+
+ type_for_size = type->without_array();
+ }
+
+ if (packing == GLSL_INTERFACE_PACKING_STD430) {
+ alignment = type->std430_base_alignment(v->RowMajor);
+ size = type_for_size->std430_size(v->RowMajor);
+ } else {
+ alignment = type->std140_base_alignment(v->RowMajor);
+ size = type_for_size->std140_size(v->RowMajor);
+ }
+
+ this->offset = glsl_align(this->offset, alignment);
+ v->Offset = this->offset;
+
+ this->offset += size;
+
+ /* The ARB_uniform_buffer_object spec says:
+ *
+ * For uniform blocks laid out according to [std140] rules, the
+ * minimum buffer object size returned by the UNIFORM_BLOCK_DATA_SIZE
+ * query is derived by taking the offset of the last basic machine
+ * unit consumed by the last uniform of the uniform block (including
+ * any end-of-array or end-of-structure padding), adding one, and
+ * rounding up to the next multiple of the base alignment required
+ * for a vec4.
+ */
+ this->buffer_size = glsl_align(this->offset, 16);
+ }
+
+ bool use_std430_as_default;
+};
+
+class count_block_size : public program_resource_visitor {
+public:
+ count_block_size() : num_active_uniforms(0)
+ {
+ /* empty */
+ }
+
+ unsigned num_active_uniforms;
+
+private:
+ virtual void visit_field(const glsl_type * /* type */,
+ const char * /* name */,
+ bool /* row_major */,
+ const glsl_type * /* record_type */,
+ const enum glsl_interface_packing,
+ bool /* last_field */)
+ {
+ this->num_active_uniforms++;
+ }
+};
+
+} /* anonymous namespace */
+
+struct block {
+ const glsl_type *type;
+ bool has_instance_name;
+};
+
+static void process_block_array_leaf(const char *name, gl_uniform_block *blocks,
+ ubo_visitor *parcel,
+ gl_uniform_buffer_variable *variables,
+ const struct link_uniform_block_active *const b,
+ unsigned *block_index,
+ unsigned binding_offset,
+ unsigned linearized_index,
+ struct gl_context *ctx,
+ struct gl_shader_program *prog);
+
+/**
+ *
+ * \param first_index Value of \c block_index for the first element of the
+ * array.
+ */
+static void
+process_block_array(struct uniform_block_array_elements *ub_array, char **name,
+ size_t name_length, gl_uniform_block *blocks,
+ ubo_visitor *parcel, gl_uniform_buffer_variable *variables,
+ const struct link_uniform_block_active *const b,
+ unsigned *block_index, unsigned binding_offset,
+ struct gl_context *ctx, struct gl_shader_program *prog,
+ unsigned first_index)
+{
+ for (unsigned j = 0; j < ub_array->num_array_elements; j++) {
+ size_t new_length = name_length;
+
+ unsigned int element_idx = ub_array->array_elements[j];
+ /* Append the subscript to the current variable name */
+ ralloc_asprintf_rewrite_tail(name, &new_length, "[%u]", element_idx);
+
+ if (ub_array->array) {
+ unsigned binding_stride = binding_offset + (element_idx *
+ ub_array->array->aoa_size);
+ process_block_array(ub_array->array, name, new_length, blocks,
+ parcel, variables, b, block_index,
+ binding_stride, ctx, prog, first_index);
+ } else {
+ process_block_array_leaf(*name, blocks,
+ parcel, variables, b, block_index,
+ binding_offset + element_idx,
+ *block_index - first_index, ctx, prog);
+ }
+ }
+}
+
+static void
+process_block_array_leaf(const char *name,
+ gl_uniform_block *blocks,
+ ubo_visitor *parcel, gl_uniform_buffer_variable *variables,
+ const struct link_uniform_block_active *const b,
+ unsigned *block_index, unsigned binding_offset,
+ unsigned linearized_index,
+ struct gl_context *ctx, struct gl_shader_program *prog)
+{
+ unsigned i = *block_index;
+ const glsl_type *type = b->type->without_array();
+
+ blocks[i].Name = ralloc_strdup(blocks, name);
+ blocks[i].Uniforms = &variables[(*parcel).index];
+
+ /* The ARB_shading_language_420pack spec says:
+ *
+ * If the binding identifier is used with a uniform block instanced as
+ * an array then the first element of the array takes the specified
+ * block binding and each subsequent element takes the next consecutive
+ * uniform block binding point.
+ */
+ blocks[i].Binding = (b->has_binding) ? b->binding + binding_offset : 0;
+
+ blocks[i].UniformBufferSize = 0;
+ blocks[i]._Packing = glsl_interface_packing(type->interface_packing);
+ blocks[i]._RowMajor = type->get_interface_row_major();
+ blocks[i].linearized_array_index = linearized_index;
+
+ parcel->process(type, b->has_instance_name ? blocks[i].Name : "");
+
+ blocks[i].UniformBufferSize = parcel->buffer_size;
+
+ /* Check SSBO size is lower than maximum supported size for SSBO */
+ if (b->is_shader_storage &&
+ parcel->buffer_size > ctx->Const.MaxShaderStorageBlockSize) {
+ linker_error(prog, "shader storage block `%s' has size %d, "
+ "which is larger than the maximum allowed (%d)",
+ b->type->name,
+ parcel->buffer_size,
+ ctx->Const.MaxShaderStorageBlockSize);
+ }
+ blocks[i].NumUniforms =
+ (unsigned)(ptrdiff_t)(&variables[parcel->index] - blocks[i].Uniforms);
+
+ *block_index = *block_index + 1;
+}
+
+/* This function resizes the array types of the block so that later we can use
+ * this new size to correctly calculate the offest for indirect indexing.
+ */
+static const glsl_type *
+resize_block_array(const glsl_type *type,
+ struct uniform_block_array_elements *ub_array)
+{
+ if (type->is_array()) {
+ struct uniform_block_array_elements *child_array =
+ type->fields.array->is_array() ? ub_array->array : NULL;
+ const glsl_type *new_child_type =
+ resize_block_array(type->fields.array, child_array);
+
+ const glsl_type *new_type =
+ glsl_type::get_array_instance(new_child_type,
+ ub_array->num_array_elements);
+ ub_array->ir->array->type = new_type;
+ return new_type;
+ } else {
+ return type;
+ }
+}
+
+static void
+create_buffer_blocks(void *mem_ctx, struct gl_context *ctx,
+ struct gl_shader_program *prog,
+ struct gl_uniform_block **out_blks, unsigned num_blocks,
+ struct hash_table *block_hash, unsigned num_variables,
+ bool create_ubo_blocks)
+{
+ if (num_blocks == 0) {
+ assert(num_variables == 0);
+ return;
+ }
+
+ assert(num_variables != 0);
+
+ /* Allocate storage to hold all of the information related to uniform
+ * blocks that can be queried through the API.
+ */
+ struct gl_uniform_block *blocks =
+ rzalloc_array(mem_ctx, gl_uniform_block, num_blocks);
+ gl_uniform_buffer_variable *variables =
+ ralloc_array(blocks, gl_uniform_buffer_variable, num_variables);
+
+ /* Add each variable from each uniform block to the API tracking
+ * structures.
+ */
+ ubo_visitor parcel(blocks, variables, num_variables, prog,
+ ctx->Const.UseSTD430AsDefaultPacking);
+
+ unsigned i = 0;
+ hash_table_foreach (block_hash, entry) {
+ const struct link_uniform_block_active *const b =
+ (const struct link_uniform_block_active *) entry->data;
+ const glsl_type *block_type = b->type;
+
+ if ((create_ubo_blocks && !b->is_shader_storage) ||
+ (!create_ubo_blocks && b->is_shader_storage)) {
+
+ if (b->array != NULL) {
+ char *name = ralloc_strdup(NULL,
+ block_type->without_array()->name);
+ size_t name_length = strlen(name);
+
+ assert(b->has_instance_name);
+ process_block_array(b->array, &name, name_length, blocks, &parcel,
+ variables, b, &i, 0, ctx, prog,
+ i);
+ ralloc_free(name);
+ } else {
+ process_block_array_leaf(block_type->name, blocks, &parcel,
+ variables, b, &i, 0,
+ 0, ctx, prog);
+ }
+ }
+ }
+
+ *out_blks = blocks;
+
+ assert(parcel.index == num_variables);
+}
+
+void
+link_uniform_blocks(void *mem_ctx,
+ struct gl_context *ctx,
+ struct gl_shader_program *prog,
+ struct gl_linked_shader *shader,
+ struct gl_uniform_block **ubo_blocks,
+ unsigned *num_ubo_blocks,
+ struct gl_uniform_block **ssbo_blocks,
+ unsigned *num_ssbo_blocks)
+{
+ /* This hash table will track all of the uniform blocks that have been
+ * encountered. Since blocks with the same block-name must be the same,
+ * the hash is organized by block-name.
+ */
+ struct hash_table *block_hash =
+ _mesa_hash_table_create(mem_ctx, _mesa_hash_string,
+ _mesa_key_string_equal);
+
+ if (block_hash == NULL) {
+ _mesa_error_no_memory(__func__);
+ linker_error(prog, "out of memory\n");
+ return;
+ }
+
+ /* Determine which uniform blocks are active. */
+ link_uniform_block_active_visitor v(mem_ctx, block_hash, prog);
+ visit_list_elements(&v, shader->ir);
+
+ /* Count the number of active uniform blocks. Count the total number of
+ * active slots in those uniform blocks.
+ */
+ unsigned num_ubo_variables = 0;
+ unsigned num_ssbo_variables = 0;
+ count_block_size block_size;
+
+ hash_table_foreach (block_hash, entry) {
+ struct link_uniform_block_active *const b =
+ (struct link_uniform_block_active *) entry->data;
+
+ assert((b->array != NULL) == b->type->is_array());
+
+ if (b->array != NULL &&
+ (b->type->without_array()->interface_packing ==
+ GLSL_INTERFACE_PACKING_PACKED)) {
+ b->type = resize_block_array(b->type, b->array);
+ b->var->type = b->type;
+ b->var->data.max_array_access = b->type->length - 1;
+ }
+
+ block_size.num_active_uniforms = 0;
+ block_size.process(b->type->without_array(), "",
+ ctx->Const.UseSTD430AsDefaultPacking);
+
+ if (b->array != NULL) {
+ unsigned aoa_size = b->type->arrays_of_arrays_size();
+ if (b->is_shader_storage) {
+ *num_ssbo_blocks += aoa_size;
+ num_ssbo_variables += aoa_size * block_size.num_active_uniforms;
+ } else {
+ *num_ubo_blocks += aoa_size;
+ num_ubo_variables += aoa_size * block_size.num_active_uniforms;
+ }
+ } else {
+ if (b->is_shader_storage) {
+ (*num_ssbo_blocks)++;
+ num_ssbo_variables += block_size.num_active_uniforms;
+ } else {
+ (*num_ubo_blocks)++;
+ num_ubo_variables += block_size.num_active_uniforms;
+ }
+ }
+
+ }
+
+ create_buffer_blocks(mem_ctx, ctx, prog, ubo_blocks, *num_ubo_blocks,
+ block_hash, num_ubo_variables, true);
+ create_buffer_blocks(mem_ctx, ctx, prog, ssbo_blocks, *num_ssbo_blocks,
+ block_hash, num_ssbo_variables, false);
+
+ _mesa_hash_table_destroy(block_hash, NULL);
+}
+
+static bool
+link_uniform_blocks_are_compatible(const gl_uniform_block *a,
+ const gl_uniform_block *b)
+{
+ assert(strcmp(a->Name, b->Name) == 0);
+
+ /* Page 35 (page 42 of the PDF) in section 4.3.7 of the GLSL 1.50 spec says:
+ *
+ * Matched block names within an interface (as defined above) must match
+ * in terms of having the same number of declarations with the same
+ * sequence of types and the same sequence of member names, as well as
+ * having the same member-wise layout qualification....if a matching
+ * block is declared as an array, then the array sizes must also
+ * match... Any mismatch will generate a link error.
+ *
+ * Arrays are not yet supported, so there is no check for that.
+ */
+ if (a->NumUniforms != b->NumUniforms)
+ return false;
+
+ if (a->_Packing != b->_Packing)
+ return false;
+
+ if (a->_RowMajor != b->_RowMajor)
+ return false;
+
+ if (a->Binding != b->Binding)
+ return false;
+
+ for (unsigned i = 0; i < a->NumUniforms; i++) {
+ if (strcmp(a->Uniforms[i].Name, b->Uniforms[i].Name) != 0)
+ return false;
+
+ if (a->Uniforms[i].Type != b->Uniforms[i].Type)
+ return false;
+
+ if (a->Uniforms[i].RowMajor != b->Uniforms[i].RowMajor)
+ return false;
+ }
+
+ return true;
+}
+
+/**
+ * Merges a uniform block into an array of uniform blocks that may or
+ * may not already contain a copy of it.
+ *
+ * Returns the index of the new block in the array.
+ */
+int
+link_cross_validate_uniform_block(void *mem_ctx,
+ struct gl_uniform_block **linked_blocks,
+ unsigned int *num_linked_blocks,
+ struct gl_uniform_block *new_block)
+{
+ for (unsigned int i = 0; i < *num_linked_blocks; i++) {
+ struct gl_uniform_block *old_block = &(*linked_blocks)[i];
+
+ if (strcmp(old_block->Name, new_block->Name) == 0)
+ return link_uniform_blocks_are_compatible(old_block, new_block)
+ ? i : -1;
+ }
+
+ *linked_blocks = reralloc(mem_ctx, *linked_blocks,
+ struct gl_uniform_block,
+ *num_linked_blocks + 1);
+ int linked_block_index = (*num_linked_blocks)++;
+ struct gl_uniform_block *linked_block = &(*linked_blocks)[linked_block_index];
+
+ memcpy(linked_block, new_block, sizeof(*new_block));
+ linked_block->Uniforms = ralloc_array(*linked_blocks,
+ struct gl_uniform_buffer_variable,
+ linked_block->NumUniforms);
+
+ memcpy(linked_block->Uniforms,
+ new_block->Uniforms,
+ sizeof(*linked_block->Uniforms) * linked_block->NumUniforms);
+
+ linked_block->Name = ralloc_strdup(*linked_blocks, linked_block->Name);
+
+ for (unsigned int i = 0; i < linked_block->NumUniforms; i++) {
+ struct gl_uniform_buffer_variable *ubo_var =
+ &linked_block->Uniforms[i];
+
+ if (ubo_var->Name == ubo_var->IndexName) {
+ ubo_var->Name = ralloc_strdup(*linked_blocks, ubo_var->Name);
+ ubo_var->IndexName = ubo_var->Name;
+ } else {
+ ubo_var->Name = ralloc_strdup(*linked_blocks, ubo_var->Name);
+ ubo_var->IndexName = ralloc_strdup(*linked_blocks, ubo_var->IndexName);
+ }
+ }
+
+ return linked_block_index;
+}
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/link_uniform_initializers.cpp b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/link_uniform_initializers.cpp
new file mode 100644
index 0000000000..076ff5cea3
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/link_uniform_initializers.cpp
@@ -0,0 +1,311 @@
+/*
+ * Copyright © 2012 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include "ir.h"
+#include "linker.h"
+#include "ir_uniform.h"
+#include "string_to_uint_map.h"
+#include "main/mtypes.h"
+
+/* These functions are put in a "private" namespace instead of being marked
+ * static so that the unit tests can access them. See
+ * http://code.google.com/p/googletest/wiki/AdvancedGuide#Testing_Private_Code
+ */
+namespace linker {
+
+static gl_uniform_storage *
+get_storage(struct gl_shader_program *prog, const char *name)
+{
+ unsigned id;
+ if (prog->UniformHash->get(id, name))
+ return &prog->data->UniformStorage[id];
+
+ assert(!"No uniform storage found!");
+ return NULL;
+}
+
+void
+copy_constant_to_storage(union gl_constant_value *storage,
+ const ir_constant *val,
+ const enum glsl_base_type base_type,
+ const unsigned int elements,
+ unsigned int boolean_true)
+{
+ for (unsigned int i = 0; i < elements; i++) {
+ switch (base_type) {
+ case GLSL_TYPE_UINT:
+ storage[i].u = val->value.u[i];
+ break;
+ case GLSL_TYPE_INT:
+ case GLSL_TYPE_SAMPLER:
+ storage[i].i = val->value.i[i];
+ break;
+ case GLSL_TYPE_FLOAT:
+ storage[i].f = val->value.f[i];
+ break;
+ case GLSL_TYPE_DOUBLE:
+ case GLSL_TYPE_UINT64:
+ case GLSL_TYPE_INT64:
+ /* XXX need to check on big-endian */
+ memcpy(&storage[i * 2].u, &val->value.d[i], sizeof(double));
+ break;
+ case GLSL_TYPE_BOOL:
+ storage[i].b = val->value.b[i] ? boolean_true : 0;
+ break;
+ case GLSL_TYPE_ARRAY:
+ case GLSL_TYPE_STRUCT:
+ case GLSL_TYPE_IMAGE:
+ case GLSL_TYPE_ATOMIC_UINT:
+ case GLSL_TYPE_INTERFACE:
+ case GLSL_TYPE_VOID:
+ case GLSL_TYPE_SUBROUTINE:
+ case GLSL_TYPE_FUNCTION:
+ case GLSL_TYPE_ERROR:
+ case GLSL_TYPE_UINT16:
+ case GLSL_TYPE_INT16:
+ case GLSL_TYPE_UINT8:
+ case GLSL_TYPE_INT8:
+ case GLSL_TYPE_FLOAT16:
+ /* All other types should have already been filtered by other
+ * paths in the caller.
+ */
+ assert(!"Should not get here.");
+ break;
+ }
+ }
+}
+
+/**
+ * Initialize an opaque uniform from the value of an explicit binding
+ * qualifier specified in the shader. Atomic counters are different because
+ * they have no storage and should be handled elsewhere.
+ */
+static void
+set_opaque_binding(void *mem_ctx, gl_shader_program *prog,
+ const ir_variable *var, const glsl_type *type,
+ const char *name, int *binding)
+{
+
+ if (type->is_array() && type->fields.array->is_array()) {
+ const glsl_type *const element_type = type->fields.array;
+
+ for (unsigned int i = 0; i < type->length; i++) {
+ const char *element_name = ralloc_asprintf(mem_ctx, "%s[%d]", name, i);
+
+ set_opaque_binding(mem_ctx, prog, var, element_type,
+ element_name, binding);
+ }
+ } else {
+ struct gl_uniform_storage *const storage = get_storage(prog, name);
+
+ if (!storage)
+ return;
+
+ const unsigned elements = MAX2(storage->array_elements, 1);
+
+ /* Section 4.4.6 (Opaque-Uniform Layout Qualifiers) of the GLSL 4.50 spec
+ * says:
+ *
+ * "If the binding identifier is used with an array, the first element
+ * of the array takes the specified unit and each subsequent element
+ * takes the next consecutive unit."
+ */
+ for (unsigned int i = 0; i < elements; i++) {
+ storage->storage[i].i = (*binding)++;
+ }
+
+ for (int sh = 0; sh < MESA_SHADER_STAGES; sh++) {
+ gl_linked_shader *shader = prog->_LinkedShaders[sh];
+
+ if (!shader)
+ continue;
+ if (!storage->opaque[sh].active)
+ continue;
+
+ if (storage->type->is_sampler()) {
+ for (unsigned i = 0; i < elements; i++) {
+ const unsigned index = storage->opaque[sh].index + i;
+
+ if (var->data.bindless) {
+ if (index >= shader->Program->sh.NumBindlessSamplers)
+ break;
+ shader->Program->sh.BindlessSamplers[index].unit =
+ storage->storage[i].i;
+ shader->Program->sh.BindlessSamplers[index].bound = true;
+ shader->Program->sh.HasBoundBindlessSampler = true;
+ } else {
+ if (index >= ARRAY_SIZE(shader->Program->SamplerUnits))
+ break;
+ shader->Program->SamplerUnits[index] =
+ storage->storage[i].i;
+ }
+ }
+ } else if (storage->type->is_image()) {
+ for (unsigned i = 0; i < elements; i++) {
+ const unsigned index = storage->opaque[sh].index + i;
+
+
+ if (var->data.bindless) {
+ if (index >= shader->Program->sh.NumBindlessImages)
+ break;
+ shader->Program->sh.BindlessImages[index].unit =
+ storage->storage[i].i;
+ shader->Program->sh.BindlessImages[index].bound = true;
+ shader->Program->sh.HasBoundBindlessImage = true;
+ } else {
+ if (index >= ARRAY_SIZE(shader->Program->sh.ImageUnits))
+ break;
+ shader->Program->sh.ImageUnits[index] =
+ storage->storage[i].i;
+ }
+ }
+ }
+ }
+ }
+}
+
+void
+set_uniform_initializer(void *mem_ctx, gl_shader_program *prog,
+ const char *name, const glsl_type *type,
+ ir_constant *val, unsigned int boolean_true)
+{
+ const glsl_type *t_without_array = type->without_array();
+ if (type->is_struct()) {
+ for (unsigned int i = 0; i < type->length; i++) {
+ const glsl_type *field_type = type->fields.structure[i].type;
+ const char *field_name = ralloc_asprintf(mem_ctx, "%s.%s", name,
+ type->fields.structure[i].name);
+ set_uniform_initializer(mem_ctx, prog, field_name,
+ field_type, val->get_record_field(i),
+ boolean_true);
+ }
+ return;
+ } else if (t_without_array->is_struct() ||
+ (type->is_array() && type->fields.array->is_array())) {
+ const glsl_type *const element_type = type->fields.array;
+
+ for (unsigned int i = 0; i < type->length; i++) {
+ const char *element_name = ralloc_asprintf(mem_ctx, "%s[%d]", name, i);
+
+ set_uniform_initializer(mem_ctx, prog, element_name,
+ element_type, val->const_elements[i],
+ boolean_true);
+ }
+ return;
+ }
+
+ struct gl_uniform_storage *const storage = get_storage(prog, name);
+
+ if (!storage)
+ return;
+
+ if (val->type->is_array()) {
+ const enum glsl_base_type base_type =
+ val->const_elements[0]->type->base_type;
+ const unsigned int elements = val->const_elements[0]->type->components();
+ unsigned int idx = 0;
+ unsigned dmul = glsl_base_type_is_64bit(base_type) ? 2 : 1;
+
+ assert(val->type->length >= storage->array_elements);
+ for (unsigned int i = 0; i < storage->array_elements; i++) {
+ copy_constant_to_storage(& storage->storage[idx],
+ val->const_elements[i],
+ base_type,
+ elements,
+ boolean_true);
+
+ idx += elements * dmul;
+ }
+ } else {
+ copy_constant_to_storage(storage->storage,
+ val,
+ val->type->base_type,
+ val->type->components(),
+ boolean_true);
+
+ if (storage->type->is_sampler()) {
+ for (int sh = 0; sh < MESA_SHADER_STAGES; sh++) {
+ gl_linked_shader *shader = prog->_LinkedShaders[sh];
+
+ if (shader && storage->opaque[sh].active) {
+ unsigned index = storage->opaque[sh].index;
+
+ shader->Program->SamplerUnits[index] = storage->storage[0].i;
+ }
+ }
+ }
+ }
+}
+}
+
+void
+link_set_uniform_initializers(struct gl_shader_program *prog,
+ unsigned int boolean_true)
+{
+ void *mem_ctx = NULL;
+
+ for (unsigned int i = 0; i < MESA_SHADER_STAGES; i++) {
+ struct gl_linked_shader *shader = prog->_LinkedShaders[i];
+
+ if (shader == NULL)
+ continue;
+
+ foreach_in_list(ir_instruction, node, shader->ir) {
+ ir_variable *const var = node->as_variable();
+
+ if (!var || (var->data.mode != ir_var_uniform &&
+ var->data.mode != ir_var_shader_storage))
+ continue;
+
+ if (!mem_ctx)
+ mem_ctx = ralloc_context(NULL);
+
+ if (var->data.explicit_binding) {
+ const glsl_type *const type = var->type;
+
+ if (var->is_in_buffer_block()) {
+ /* This case is handled by link_uniform_blocks (at
+ * process_block_array_leaf)
+ */
+ } else if (type->without_array()->is_sampler() ||
+ type->without_array()->is_image()) {
+ int binding = var->data.binding;
+ linker::set_opaque_binding(mem_ctx, prog, var, var->type,
+ var->name, &binding);
+ } else if (type->contains_atomic()) {
+ /* we don't actually need to do anything. */
+ } else {
+ assert(!"Explicit binding not on a sampler, UBO or atomic.");
+ }
+ } else if (var->constant_initializer) {
+ linker::set_uniform_initializer(mem_ctx, prog, var->name,
+ var->type, var->constant_initializer,
+ boolean_true);
+ }
+ }
+ }
+
+ memcpy(prog->data->UniformDataDefaults, prog->data->UniformDataSlots,
+ sizeof(union gl_constant_value) * prog->data->NumUniformDataSlots);
+ ralloc_free(mem_ctx);
+}
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/link_uniforms.cpp b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/link_uniforms.cpp
new file mode 100644
index 0000000000..6518ec10f3
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/link_uniforms.cpp
@@ -0,0 +1,1767 @@
+/*
+ * Copyright © 2011 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include "ir.h"
+#include "linker.h"
+#include "ir_uniform.h"
+#include "glsl_symbol_table.h"
+#include "program.h"
+#include "string_to_uint_map.h"
+#include "ir_array_refcount.h"
+
+#include "main/mtypes.h"
+#include "util/strndup.h"
+
+/**
+ * \file link_uniforms.cpp
+ * Assign locations for GLSL uniforms.
+ *
+ * \author Ian Romanick <ian.d.romanick@intel.com>
+ */
+
+/**
+ * Used by linker to indicate uniforms that have no location set.
+ */
+#define UNMAPPED_UNIFORM_LOC ~0u
+
+static char*
+get_top_level_name(const char *name)
+{
+ const char *first_dot = strchr(name, '.');
+ const char *first_square_bracket = strchr(name, '[');
+ int name_size = 0;
+
+ /* The ARB_program_interface_query spec says:
+ *
+ * "For the property TOP_LEVEL_ARRAY_SIZE, a single integer identifying
+ * the number of active array elements of the top-level shader storage
+ * block member containing to the active variable is written to
+ * <params>. If the top-level block member is not declared as an
+ * array, the value one is written to <params>. If the top-level block
+ * member is an array with no declared size, the value zero is written
+ * to <params>."
+ */
+
+ /* The buffer variable is on top level.*/
+ if (!first_square_bracket && !first_dot)
+ name_size = strlen(name);
+ else if ((!first_square_bracket ||
+ (first_dot && first_dot < first_square_bracket)))
+ name_size = first_dot - name;
+ else
+ name_size = first_square_bracket - name;
+
+ return strndup(name, name_size);
+}
+
+static char*
+get_var_name(const char *name)
+{
+ const char *first_dot = strchr(name, '.');
+
+ if (!first_dot)
+ return strdup(name);
+
+ return strndup(first_dot+1, strlen(first_dot) - 1);
+}
+
+static bool
+is_top_level_shader_storage_block_member(const char* name,
+ const char* interface_name,
+ const char* field_name)
+{
+ bool result = false;
+
+ /* If the given variable is already a top-level shader storage
+ * block member, then return array_size = 1.
+ * We could have two possibilities: if we have an instanced
+ * shader storage block or not instanced.
+ *
+ * For the first, we check create a name as it was in top level and
+ * compare it with the real name. If they are the same, then
+ * the variable is already at top-level.
+ *
+ * Full instanced name is: interface name + '.' + var name +
+ * NULL character
+ */
+ int name_length = strlen(interface_name) + 1 + strlen(field_name) + 1;
+ char *full_instanced_name = (char *) calloc(name_length, sizeof(char));
+ if (!full_instanced_name) {
+ fprintf(stderr, "%s: Cannot allocate space for name\n", __func__);
+ return false;
+ }
+
+ snprintf(full_instanced_name, name_length, "%s.%s",
+ interface_name, field_name);
+
+ /* Check if its top-level shader storage block member of an
+ * instanced interface block, or of a unnamed interface block.
+ */
+ if (strcmp(name, full_instanced_name) == 0 ||
+ strcmp(name, field_name) == 0)
+ result = true;
+
+ free(full_instanced_name);
+ return result;
+}
+
+static int
+get_array_size(struct gl_uniform_storage *uni, const glsl_struct_field *field,
+ char *interface_name, char *var_name)
+{
+ /* The ARB_program_interface_query spec says:
+ *
+ * "For the property TOP_LEVEL_ARRAY_SIZE, a single integer identifying
+ * the number of active array elements of the top-level shader storage
+ * block member containing to the active variable is written to
+ * <params>. If the top-level block member is not declared as an
+ * array, the value one is written to <params>. If the top-level block
+ * member is an array with no declared size, the value zero is written
+ * to <params>."
+ */
+ if (is_top_level_shader_storage_block_member(uni->name,
+ interface_name,
+ var_name))
+ return 1;
+ else if (field->type->is_array())
+ return field->type->length;
+
+ return 1;
+}
+
+static int
+get_array_stride(struct gl_uniform_storage *uni, const glsl_type *iface,
+ const glsl_struct_field *field, char *interface_name,
+ char *var_name, bool use_std430_as_default)
+{
+ /* The ARB_program_interface_query spec says:
+ *
+ * "For the property TOP_LEVEL_ARRAY_STRIDE, a single integer
+ * identifying the stride between array elements of the top-level
+ * shader storage block member containing the active variable is
+ * written to <params>. For top-level block members declared as
+ * arrays, the value written is the difference, in basic machine units,
+ * between the offsets of the active variable for consecutive elements
+ * in the top-level array. For top-level block members not declared as
+ * an array, zero is written to <params>."
+ */
+ if (field->type->is_array()) {
+ const enum glsl_matrix_layout matrix_layout =
+ glsl_matrix_layout(field->matrix_layout);
+ bool row_major = matrix_layout == GLSL_MATRIX_LAYOUT_ROW_MAJOR;
+ const glsl_type *array_type = field->type->fields.array;
+
+ if (is_top_level_shader_storage_block_member(uni->name,
+ interface_name,
+ var_name))
+ return 0;
+
+ if (GLSL_INTERFACE_PACKING_STD140 ==
+ iface->get_internal_ifc_packing(use_std430_as_default)) {
+ if (array_type->is_struct() || array_type->is_array())
+ return glsl_align(array_type->std140_size(row_major), 16);
+ else
+ return MAX2(array_type->std140_base_alignment(row_major), 16);
+ } else {
+ return array_type->std430_array_stride(row_major);
+ }
+ }
+ return 0;
+}
+
+static void
+calculate_array_size_and_stride(struct gl_shader_program *shProg,
+ struct gl_uniform_storage *uni,
+ bool use_std430_as_default)
+{
+ if (!uni->is_shader_storage)
+ return;
+
+ int block_index = uni->block_index;
+ int array_size = -1;
+ int array_stride = -1;
+ char *var_name = get_top_level_name(uni->name);
+ char *interface_name =
+ get_top_level_name(uni->is_shader_storage ?
+ shProg->data->ShaderStorageBlocks[block_index].Name :
+ shProg->data->UniformBlocks[block_index].Name);
+
+ if (strcmp(var_name, interface_name) == 0) {
+ /* Deal with instanced array of SSBOs */
+ char *temp_name = get_var_name(uni->name);
+ if (!temp_name) {
+ linker_error(shProg, "Out of memory during linking.\n");
+ goto write_top_level_array_size_and_stride;
+ }
+ free(var_name);
+ var_name = get_top_level_name(temp_name);
+ free(temp_name);
+ if (!var_name) {
+ linker_error(shProg, "Out of memory during linking.\n");
+ goto write_top_level_array_size_and_stride;
+ }
+ }
+
+ for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
+ const gl_linked_shader *sh = shProg->_LinkedShaders[i];
+ if (sh == NULL)
+ continue;
+
+ foreach_in_list(ir_instruction, node, sh->ir) {
+ ir_variable *var = node->as_variable();
+ if (!var || !var->get_interface_type() ||
+ var->data.mode != ir_var_shader_storage)
+ continue;
+
+ const glsl_type *iface = var->get_interface_type();
+
+ if (strcmp(interface_name, iface->name) != 0)
+ continue;
+
+ for (unsigned i = 0; i < iface->length; i++) {
+ const glsl_struct_field *field = &iface->fields.structure[i];
+ if (strcmp(field->name, var_name) != 0)
+ continue;
+
+ array_stride = get_array_stride(uni, iface, field, interface_name,
+ var_name, use_std430_as_default);
+ array_size = get_array_size(uni, field, interface_name, var_name);
+ goto write_top_level_array_size_and_stride;
+ }
+ }
+ }
+write_top_level_array_size_and_stride:
+ free(interface_name);
+ free(var_name);
+ uni->top_level_array_stride = array_stride;
+ uni->top_level_array_size = array_size;
+}
+
+void
+program_resource_visitor::process(const glsl_type *type, const char *name,
+ bool use_std430_as_default)
+{
+ assert(type->without_array()->is_struct()
+ || type->without_array()->is_interface());
+
+ unsigned record_array_count = 1;
+ char *name_copy = ralloc_strdup(NULL, name);
+
+ enum glsl_interface_packing packing =
+ type->get_internal_ifc_packing(use_std430_as_default);
+
+ recursion(type, &name_copy, strlen(name), false, NULL, packing, false,
+ record_array_count, NULL);
+ ralloc_free(name_copy);
+}
+
+void
+program_resource_visitor::process(ir_variable *var, bool use_std430_as_default)
+{
+ const glsl_type *t =
+ var->data.from_named_ifc_block ? var->get_interface_type() : var->type;
+ process(var, t, use_std430_as_default);
+}
+
+void
+program_resource_visitor::process(ir_variable *var, const glsl_type *var_type,
+ bool use_std430_as_default)
+{
+ unsigned record_array_count = 1;
+ const bool row_major =
+ var->data.matrix_layout == GLSL_MATRIX_LAYOUT_ROW_MAJOR;
+
+ enum glsl_interface_packing packing = var->get_interface_type() ?
+ var->get_interface_type()->
+ get_internal_ifc_packing(use_std430_as_default) :
+ var->type->get_internal_ifc_packing(use_std430_as_default);
+
+ const glsl_type *t = var_type;
+ const glsl_type *t_without_array = t->without_array();
+
+ /* false is always passed for the row_major parameter to the other
+ * processing functions because no information is available to do
+ * otherwise. See the warning in linker.h.
+ */
+ if (t_without_array->is_struct() ||
+ (t->is_array() && t->fields.array->is_array())) {
+ char *name = ralloc_strdup(NULL, var->name);
+ recursion(var->type, &name, strlen(name), row_major, NULL, packing,
+ false, record_array_count, NULL);
+ ralloc_free(name);
+ } else if (t_without_array->is_interface()) {
+ char *name = ralloc_strdup(NULL, t_without_array->name);
+ const glsl_struct_field *ifc_member = var->data.from_named_ifc_block ?
+ &t_without_array->
+ fields.structure[t_without_array->field_index(var->name)] : NULL;
+
+ recursion(t, &name, strlen(name), row_major, NULL, packing,
+ false, record_array_count, ifc_member);
+ ralloc_free(name);
+ } else {
+ this->set_record_array_count(record_array_count);
+ this->visit_field(t, var->name, row_major, NULL, packing, false);
+ }
+}
+
+void
+program_resource_visitor::recursion(const glsl_type *t, char **name,
+ size_t name_length, bool row_major,
+ const glsl_type *record_type,
+ const enum glsl_interface_packing packing,
+ bool last_field,
+ unsigned record_array_count,
+ const glsl_struct_field *named_ifc_member)
+{
+ /* Records need to have each field processed individually.
+ *
+ * Arrays of records need to have each array element processed
+ * individually, then each field of the resulting array elements processed
+ * individually.
+ */
+ if (t->is_interface() && named_ifc_member) {
+ ralloc_asprintf_rewrite_tail(name, &name_length, ".%s",
+ named_ifc_member->name);
+ recursion(named_ifc_member->type, name, name_length, row_major, NULL,
+ packing, false, record_array_count, NULL);
+ } else if (t->is_struct() || t->is_interface()) {
+ if (record_type == NULL && t->is_struct())
+ record_type = t;
+
+ if (t->is_struct())
+ this->enter_record(t, *name, row_major, packing);
+
+ for (unsigned i = 0; i < t->length; i++) {
+ const char *field = t->fields.structure[i].name;
+ size_t new_length = name_length;
+
+ if (t->is_interface() && t->fields.structure[i].offset != -1)
+ this->set_buffer_offset(t->fields.structure[i].offset);
+
+ /* Append '.field' to the current variable name. */
+ if (name_length == 0) {
+ ralloc_asprintf_rewrite_tail(name, &new_length, "%s", field);
+ } else {
+ ralloc_asprintf_rewrite_tail(name, &new_length, ".%s", field);
+ }
+
+ /* The layout of structures at the top level of the block is set
+ * during parsing. For matrices contained in multiple levels of
+ * structures in the block, the inner structures have no layout.
+ * These cases must potentially inherit the layout from the outer
+ * levels.
+ */
+ bool field_row_major = row_major;
+ const enum glsl_matrix_layout matrix_layout =
+ glsl_matrix_layout(t->fields.structure[i].matrix_layout);
+ if (matrix_layout == GLSL_MATRIX_LAYOUT_ROW_MAJOR) {
+ field_row_major = true;
+ } else if (matrix_layout == GLSL_MATRIX_LAYOUT_COLUMN_MAJOR) {
+ field_row_major = false;
+ }
+
+ recursion(t->fields.structure[i].type, name, new_length,
+ field_row_major,
+ record_type,
+ packing,
+ (i + 1) == t->length, record_array_count, NULL);
+
+ /* Only the first leaf-field of the record gets called with the
+ * record type pointer.
+ */
+ record_type = NULL;
+ }
+
+ if (t->is_struct()) {
+ (*name)[name_length] = '\0';
+ this->leave_record(t, *name, row_major, packing);
+ }
+ } else if (t->without_array()->is_struct() ||
+ t->without_array()->is_interface() ||
+ (t->is_array() && t->fields.array->is_array())) {
+ if (record_type == NULL && t->fields.array->is_struct())
+ record_type = t->fields.array;
+
+ unsigned length = t->length;
+
+ /* Shader storage block unsized arrays: add subscript [0] to variable
+ * names.
+ */
+ if (t->is_unsized_array())
+ length = 1;
+
+ record_array_count *= length;
+
+ for (unsigned i = 0; i < length; i++) {
+ size_t new_length = name_length;
+
+ /* Append the subscript to the current variable name */
+ ralloc_asprintf_rewrite_tail(name, &new_length, "[%u]", i);
+
+ recursion(t->fields.array, name, new_length, row_major,
+ record_type,
+ packing,
+ (i + 1) == t->length, record_array_count,
+ named_ifc_member);
+
+ /* Only the first leaf-field of the record gets called with the
+ * record type pointer.
+ */
+ record_type = NULL;
+ }
+ } else {
+ this->set_record_array_count(record_array_count);
+ this->visit_field(t, *name, row_major, record_type, packing, last_field);
+ }
+}
+
+void
+program_resource_visitor::enter_record(const glsl_type *, const char *, bool,
+ const enum glsl_interface_packing)
+{
+}
+
+void
+program_resource_visitor::leave_record(const glsl_type *, const char *, bool,
+ const enum glsl_interface_packing)
+{
+}
+
+void
+program_resource_visitor::set_buffer_offset(unsigned)
+{
+}
+
+void
+program_resource_visitor::set_record_array_count(unsigned)
+{
+}
+
+namespace {
+
+/**
+ * Class to help calculate the storage requirements for a set of uniforms
+ *
+ * As uniforms are added to the active set the number of active uniforms and
+ * the storage requirements for those uniforms are accumulated. The active
+ * uniforms are added to the hash table supplied to the constructor.
+ *
+ * If the same uniform is added multiple times (i.e., once for each shader
+ * target), it will only be accounted once.
+ */
+class count_uniform_size : public program_resource_visitor {
+public:
+ count_uniform_size(struct string_to_uint_map *map,
+ struct string_to_uint_map *hidden_map,
+ bool use_std430_as_default)
+ : num_active_uniforms(0), num_hidden_uniforms(0), num_values(0),
+ num_shader_samplers(0), num_shader_images(0),
+ num_shader_uniform_components(0), num_shader_subroutines(0),
+ is_buffer_block(false), is_shader_storage(false), map(map),
+ hidden_map(hidden_map), current_var(NULL),
+ use_std430_as_default(use_std430_as_default)
+ {
+ /* empty */
+ }
+
+ void start_shader()
+ {
+ this->num_shader_samplers = 0;
+ this->num_shader_images = 0;
+ this->num_shader_uniform_components = 0;
+ this->num_shader_subroutines = 0;
+ }
+
+ void process(ir_variable *var)
+ {
+ this->current_var = var;
+ this->is_buffer_block = var->is_in_buffer_block();
+ this->is_shader_storage = var->is_in_shader_storage_block();
+ if (var->is_interface_instance())
+ program_resource_visitor::process(var->get_interface_type(),
+ var->get_interface_type()->name,
+ use_std430_as_default);
+ else
+ program_resource_visitor::process(var, use_std430_as_default);
+ }
+
+ /**
+ * Total number of active uniforms counted
+ */
+ unsigned num_active_uniforms;
+
+ unsigned num_hidden_uniforms;
+
+ /**
+ * Number of data values required to back the storage for the active uniforms
+ */
+ unsigned num_values;
+
+ /**
+ * Number of samplers used
+ */
+ unsigned num_shader_samplers;
+
+ /**
+ * Number of images used
+ */
+ unsigned num_shader_images;
+
+ /**
+ * Number of uniforms used in the current shader
+ */
+ unsigned num_shader_uniform_components;
+
+ /**
+ * Number of subroutine uniforms used
+ */
+ unsigned num_shader_subroutines;
+
+ bool is_buffer_block;
+ bool is_shader_storage;
+
+ struct string_to_uint_map *map;
+
+private:
+ virtual void visit_field(const glsl_type *type, const char *name,
+ bool /* row_major */,
+ const glsl_type * /* record_type */,
+ const enum glsl_interface_packing,
+ bool /* last_field */)
+ {
+ assert(!type->without_array()->is_struct());
+ assert(!type->without_array()->is_interface());
+ assert(!(type->is_array() && type->fields.array->is_array()));
+
+ /* Count the number of samplers regardless of whether the uniform is
+ * already in the hash table. The hash table prevents adding the same
+ * uniform for multiple shader targets, but in this case we want to
+ * count it for each shader target.
+ */
+ const unsigned values = type->component_slots();
+ if (type->contains_subroutine()) {
+ this->num_shader_subroutines += values;
+ } else if (type->contains_sampler() && !current_var->data.bindless) {
+ /* Samplers (bound or bindless) are counted as two components as
+ * specified by ARB_bindless_texture. */
+ this->num_shader_samplers += values / 2;
+ } else if (type->contains_image() && !current_var->data.bindless) {
+ /* Images (bound or bindless) are counted as two components as
+ * specified by ARB_bindless_texture. */
+ this->num_shader_images += values / 2;
+
+ /* As drivers are likely to represent image uniforms as
+ * scalar indices, count them against the limit of uniform
+ * components in the default block. The spec allows image
+ * uniforms to use up no more than one scalar slot.
+ */
+ if (!is_shader_storage)
+ this->num_shader_uniform_components += values;
+ } else {
+ /* Accumulate the total number of uniform slots used by this shader.
+ * Note that samplers do not count against this limit because they
+ * don't use any storage on current hardware.
+ */
+ if (!is_buffer_block)
+ this->num_shader_uniform_components += values;
+ }
+
+ /* If the uniform is already in the map, there's nothing more to do.
+ */
+ unsigned id;
+ if (this->map->get(id, name))
+ return;
+
+ if (this->current_var->data.how_declared == ir_var_hidden) {
+ this->hidden_map->put(this->num_hidden_uniforms, name);
+ this->num_hidden_uniforms++;
+ } else {
+ this->map->put(this->num_active_uniforms-this->num_hidden_uniforms,
+ name);
+ }
+
+ /* Each leaf uniform occupies one entry in the list of active
+ * uniforms.
+ */
+ this->num_active_uniforms++;
+
+ if(!is_gl_identifier(name) && !is_shader_storage && !is_buffer_block)
+ this->num_values += values;
+ }
+
+ struct string_to_uint_map *hidden_map;
+
+ /**
+ * Current variable being processed.
+ */
+ ir_variable *current_var;
+
+ bool use_std430_as_default;
+};
+
+} /* anonymous namespace */
+
+unsigned
+link_calculate_matrix_stride(const glsl_type *matrix, bool row_major,
+ enum glsl_interface_packing packing)
+{
+ const unsigned N = matrix->is_double() ? 8 : 4;
+ const unsigned items =
+ row_major ? matrix->matrix_columns : matrix->vector_elements;
+
+ assert(items <= 4);
+
+ /* Matrix stride for std430 mat2xY matrices are not rounded up to
+ * vec4 size.
+ *
+ * Section 7.6.2.2 "Standard Uniform Block Layout" of the OpenGL 4.3 spec
+ * says:
+ *
+ * 2. If the member is a two- or four-component vector with components
+ * consuming N basic machine units, the base alignment is 2N or 4N,
+ * respectively.
+ * ...
+ * 4. If the member is an array of scalars or vectors, the base
+ * alignment and array stride are set to match the base alignment of
+ * a single array element, according to rules (1), (2), and (3), and
+ * rounded up to the base alignment of a vec4.
+ * ...
+ * 7. If the member is a row-major matrix with C columns and R rows, the
+ * matrix is stored identically to an array of R row vectors with C
+ * components each, according to rule (4).
+ * ...
+ *
+ * When using the std430 storage layout, shader storage blocks will be
+ * laid out in buffer storage identically to uniform and shader storage
+ * blocks using the std140 layout, except that the base alignment and
+ * stride of arrays of scalars and vectors in rule 4 and of structures
+ * in rule 9 are not rounded up a multiple of the base alignment of a
+ * vec4.
+ */
+ return packing == GLSL_INTERFACE_PACKING_STD430
+ ? (items < 3 ? items * N : glsl_align(items * N, 16))
+ : glsl_align(items * N, 16);
+}
+
+/**
+ * Class to help parcel out pieces of backing storage to uniforms
+ *
+ * Each uniform processed has some range of the \c gl_constant_value
+ * structures associated with it. The association is done by finding
+ * the uniform in the \c string_to_uint_map and using the value from
+ * the map to connect that slot in the \c gl_uniform_storage table
+ * with the next available slot in the \c gl_constant_value array.
+ *
+ * \warning
+ * This class assumes that every uniform that will be processed is
+ * already in the \c string_to_uint_map. In addition, it assumes that
+ * the \c gl_uniform_storage and \c gl_constant_value arrays are "big
+ * enough."
+ */
+class parcel_out_uniform_storage : public program_resource_visitor {
+public:
+ parcel_out_uniform_storage(struct gl_shader_program *prog,
+ struct string_to_uint_map *map,
+ struct gl_uniform_storage *uniforms,
+ union gl_constant_value *values,
+ bool use_std430_as_default)
+ : prog(prog), map(map), uniforms(uniforms),
+ use_std430_as_default(use_std430_as_default), values(values),
+ bindless_targets(NULL), bindless_access(NULL),
+ shader_storage_blocks_write_access(0)
+ {
+ }
+
+ virtual ~parcel_out_uniform_storage()
+ {
+ free(this->bindless_targets);
+ free(this->bindless_access);
+ }
+
+ void start_shader(gl_shader_stage shader_type)
+ {
+ assert(shader_type < MESA_SHADER_STAGES);
+ this->shader_type = shader_type;
+
+ this->shader_samplers_used = 0;
+ this->shader_shadow_samplers = 0;
+ this->next_sampler = 0;
+ this->next_image = 0;
+ this->next_subroutine = 0;
+ this->record_array_count = 1;
+ memset(this->targets, 0, sizeof(this->targets));
+
+ this->num_bindless_samplers = 0;
+ this->next_bindless_sampler = 0;
+ free(this->bindless_targets);
+ this->bindless_targets = NULL;
+
+ this->num_bindless_images = 0;
+ this->next_bindless_image = 0;
+ free(this->bindless_access);
+ this->bindless_access = NULL;
+ this->shader_storage_blocks_write_access = 0;
+ }
+
+ void set_and_process(ir_variable *var)
+ {
+ current_var = var;
+ field_counter = 0;
+ this->record_next_sampler = new string_to_uint_map;
+ this->record_next_bindless_sampler = new string_to_uint_map;
+ this->record_next_image = new string_to_uint_map;
+ this->record_next_bindless_image = new string_to_uint_map;
+
+ buffer_block_index = -1;
+ if (var->is_in_buffer_block()) {
+ struct gl_uniform_block *blks = var->is_in_shader_storage_block() ?
+ prog->data->ShaderStorageBlocks : prog->data->UniformBlocks;
+ unsigned num_blks = var->is_in_shader_storage_block() ?
+ prog->data->NumShaderStorageBlocks : prog->data->NumUniformBlocks;
+ bool is_interface_array =
+ var->is_interface_instance() && var->type->is_array();
+
+ if (is_interface_array) {
+ unsigned l = strlen(var->get_interface_type()->name);
+
+ for (unsigned i = 0; i < num_blks; i++) {
+ if (strncmp(var->get_interface_type()->name, blks[i].Name, l)
+ == 0 && blks[i].Name[l] == '[') {
+ buffer_block_index = i;
+ break;
+ }
+ }
+ } else {
+ for (unsigned i = 0; i < num_blks; i++) {
+ if (strcmp(var->get_interface_type()->name, blks[i].Name) == 0) {
+ buffer_block_index = i;
+ break;
+ }
+ }
+ }
+ assert(buffer_block_index != -1);
+
+ if (var->is_in_shader_storage_block() &&
+ !var->data.memory_read_only) {
+ unsigned array_size = is_interface_array ?
+ var->type->array_size() : 1;
+
+ STATIC_ASSERT(MAX_SHADER_STORAGE_BUFFERS <= 32);
+
+ /* Shaders that use too many SSBOs will fail to compile, which
+ * we don't care about.
+ *
+ * This is true for shaders that do not use too many SSBOs:
+ */
+ if (buffer_block_index + array_size <= 32) {
+ shader_storage_blocks_write_access |=
+ u_bit_consecutive(buffer_block_index, array_size);
+ }
+ }
+
+ /* Uniform blocks that were specified with an instance name must be
+ * handled a little bit differently. The name of the variable is the
+ * name used to reference the uniform block instead of being the name
+ * of a variable within the block. Therefore, searching for the name
+ * within the block will fail.
+ */
+ if (var->is_interface_instance()) {
+ ubo_byte_offset = 0;
+ process(var->get_interface_type(),
+ var->get_interface_type()->name,
+ use_std430_as_default);
+ } else {
+ const struct gl_uniform_block *const block =
+ &blks[buffer_block_index];
+
+ assert(var->data.location != -1);
+
+ const struct gl_uniform_buffer_variable *const ubo_var =
+ &block->Uniforms[var->data.location];
+
+ ubo_byte_offset = ubo_var->Offset;
+ process(var, use_std430_as_default);
+ }
+ } else {
+ /* Store any explicit location and reset data location so we can
+ * reuse this variable for storing the uniform slot number.
+ */
+ this->explicit_location = current_var->data.location;
+ current_var->data.location = -1;
+
+ process(var, use_std430_as_default);
+ }
+ delete this->record_next_sampler;
+ delete this->record_next_bindless_sampler;
+ delete this->record_next_image;
+ delete this->record_next_bindless_image;
+ }
+
+ int buffer_block_index;
+ int ubo_byte_offset;
+ gl_shader_stage shader_type;
+
+private:
+ bool set_opaque_indices(const glsl_type *base_type,
+ struct gl_uniform_storage *uniform,
+ const char *name, unsigned &next_index,
+ struct string_to_uint_map *record_next_index)
+ {
+ assert(base_type->is_sampler() || base_type->is_image());
+
+ if (this->record_array_count > 1) {
+ unsigned inner_array_size = MAX2(1, uniform->array_elements);
+ char *name_copy = ralloc_strdup(NULL, name);
+
+ /* Remove all array subscripts from the sampler/image name */
+ char *str_start;
+ const char *str_end;
+ while((str_start = strchr(name_copy, '[')) &&
+ (str_end = strchr(name_copy, ']'))) {
+ memmove(str_start, str_end + 1, 1 + strlen(str_end + 1));
+ }
+
+ unsigned index = 0;
+ if (record_next_index->get(index, name_copy)) {
+ /* In this case, we've already seen this uniform so we just use the
+ * next sampler/image index recorded the last time we visited.
+ */
+ uniform->opaque[shader_type].index = index;
+ index = inner_array_size + uniform->opaque[shader_type].index;
+ record_next_index->put(index, name_copy);
+
+ ralloc_free(name_copy);
+ /* Return as everything else has already been initialised in a
+ * previous pass.
+ */
+ return false;
+ } else {
+ /* We've never seen this uniform before so we need to allocate
+ * enough indices to store it.
+ *
+ * Nested struct arrays behave like arrays of arrays so we need to
+ * increase the index by the total number of elements of the
+ * sampler/image in case there is more than one sampler/image
+ * inside the structs. This allows the offset to be easily
+ * calculated for indirect indexing.
+ */
+ uniform->opaque[shader_type].index = next_index;
+ next_index += inner_array_size * this->record_array_count;
+
+ /* Store the next index for future passes over the struct array
+ */
+ index = uniform->opaque[shader_type].index + inner_array_size;
+ record_next_index->put(index, name_copy);
+ ralloc_free(name_copy);
+ }
+ } else {
+ /* Increment the sampler/image by 1 for non-arrays and by the number
+ * of array elements for arrays.
+ */
+ uniform->opaque[shader_type].index = next_index;
+ next_index += MAX2(1, uniform->array_elements);
+ }
+ return true;
+ }
+
+ void handle_samplers(const glsl_type *base_type,
+ struct gl_uniform_storage *uniform, const char *name)
+ {
+ if (base_type->is_sampler()) {
+ uniform->opaque[shader_type].active = true;
+
+ const gl_texture_index target = base_type->sampler_index();
+ const unsigned shadow = base_type->sampler_shadow;
+
+ if (current_var->data.bindless) {
+ if (!set_opaque_indices(base_type, uniform, name,
+ this->next_bindless_sampler,
+ this->record_next_bindless_sampler))
+ return;
+
+ this->num_bindless_samplers = this->next_bindless_sampler;
+
+ this->bindless_targets = (gl_texture_index *)
+ realloc(this->bindless_targets,
+ this->num_bindless_samplers * sizeof(gl_texture_index));
+
+ for (unsigned i = uniform->opaque[shader_type].index;
+ i < this->num_bindless_samplers;
+ i++) {
+ this->bindless_targets[i] = target;
+ }
+ } else {
+ if (!set_opaque_indices(base_type, uniform, name,
+ this->next_sampler,
+ this->record_next_sampler))
+ return;
+
+ for (unsigned i = uniform->opaque[shader_type].index;
+ i < MIN2(this->next_sampler, MAX_SAMPLERS);
+ i++) {
+ this->targets[i] = target;
+ this->shader_samplers_used |= 1U << i;
+ this->shader_shadow_samplers |= shadow << i;
+ }
+ }
+ }
+ }
+
+ void handle_images(const glsl_type *base_type,
+ struct gl_uniform_storage *uniform, const char *name)
+ {
+ if (base_type->is_image()) {
+ uniform->opaque[shader_type].active = true;
+
+ /* Set image access qualifiers */
+ const GLenum access =
+ current_var->data.memory_read_only ?
+ (current_var->data.memory_write_only ? GL_NONE :
+ GL_READ_ONLY) :
+ (current_var->data.memory_write_only ? GL_WRITE_ONLY :
+ GL_READ_WRITE);
+
+ if (current_var->data.bindless) {
+ if (!set_opaque_indices(base_type, uniform, name,
+ this->next_bindless_image,
+ this->record_next_bindless_image))
+ return;
+
+ this->num_bindless_images = this->next_bindless_image;
+
+ this->bindless_access = (GLenum *)
+ realloc(this->bindless_access,
+ this->num_bindless_images * sizeof(GLenum));
+
+ for (unsigned i = uniform->opaque[shader_type].index;
+ i < this->num_bindless_images;
+ i++) {
+ this->bindless_access[i] = access;
+ }
+ } else {
+ if (!set_opaque_indices(base_type, uniform, name,
+ this->next_image,
+ this->record_next_image))
+ return;
+
+ for (unsigned i = uniform->opaque[shader_type].index;
+ i < MIN2(this->next_image, MAX_IMAGE_UNIFORMS);
+ i++) {
+ prog->_LinkedShaders[shader_type]->Program->sh.ImageAccess[i] = access;
+ }
+ }
+ }
+ }
+
+ void handle_subroutines(const glsl_type *base_type,
+ struct gl_uniform_storage *uniform)
+ {
+ if (base_type->is_subroutine()) {
+ uniform->opaque[shader_type].index = this->next_subroutine;
+ uniform->opaque[shader_type].active = true;
+
+ prog->_LinkedShaders[shader_type]->Program->sh.NumSubroutineUniforms++;
+
+ /* Increment the subroutine index by 1 for non-arrays and by the
+ * number of array elements for arrays.
+ */
+ this->next_subroutine += MAX2(1, uniform->array_elements);
+
+ }
+ }
+
+ virtual void set_buffer_offset(unsigned offset)
+ {
+ this->ubo_byte_offset = offset;
+ }
+
+ virtual void set_record_array_count(unsigned record_array_count)
+ {
+ this->record_array_count = record_array_count;
+ }
+
+ virtual void enter_record(const glsl_type *type, const char *,
+ bool row_major,
+ const enum glsl_interface_packing packing)
+ {
+ assert(type->is_struct());
+ if (this->buffer_block_index == -1)
+ return;
+ if (packing == GLSL_INTERFACE_PACKING_STD430)
+ this->ubo_byte_offset = glsl_align(
+ this->ubo_byte_offset, type->std430_base_alignment(row_major));
+ else
+ this->ubo_byte_offset = glsl_align(
+ this->ubo_byte_offset, type->std140_base_alignment(row_major));
+ }
+
+ virtual void leave_record(const glsl_type *type, const char *,
+ bool row_major,
+ const enum glsl_interface_packing packing)
+ {
+ assert(type->is_struct());
+ if (this->buffer_block_index == -1)
+ return;
+ if (packing == GLSL_INTERFACE_PACKING_STD430)
+ this->ubo_byte_offset = glsl_align(
+ this->ubo_byte_offset, type->std430_base_alignment(row_major));
+ else
+ this->ubo_byte_offset = glsl_align(
+ this->ubo_byte_offset, type->std140_base_alignment(row_major));
+ }
+
+ virtual void visit_field(const glsl_type *type, const char *name,
+ bool row_major, const glsl_type * /* record_type */,
+ const enum glsl_interface_packing packing,
+ bool /* last_field */)
+ {
+ assert(!type->without_array()->is_struct());
+ assert(!type->without_array()->is_interface());
+ assert(!(type->is_array() && type->fields.array->is_array()));
+
+ unsigned id;
+ bool found = this->map->get(id, name);
+ assert(found);
+
+ if (!found)
+ return;
+
+ const glsl_type *base_type;
+ if (type->is_array()) {
+ this->uniforms[id].array_elements = type->length;
+ base_type = type->fields.array;
+ } else {
+ this->uniforms[id].array_elements = 0;
+ base_type = type;
+ }
+
+ /* Initialise opaque data */
+ this->uniforms[id].opaque[shader_type].index = ~0;
+ this->uniforms[id].opaque[shader_type].active = false;
+
+ if (current_var->data.used || base_type->is_subroutine())
+ this->uniforms[id].active_shader_mask |= 1 << shader_type;
+
+ /* This assigns uniform indices to sampler and image uniforms. */
+ handle_samplers(base_type, &this->uniforms[id], name);
+ handle_images(base_type, &this->uniforms[id], name);
+ handle_subroutines(base_type, &this->uniforms[id]);
+
+ /* For array of arrays or struct arrays the base location may have
+ * already been set so don't set it again.
+ */
+ if (buffer_block_index == -1 && current_var->data.location == -1) {
+ current_var->data.location = id;
+ }
+
+ /* If there is already storage associated with this uniform or if the
+ * uniform is set as builtin, it means that it was set while processing
+ * an earlier shader stage. For example, we may be processing the
+ * uniform in the fragment shader, but the uniform was already processed
+ * in the vertex shader.
+ */
+ if (this->uniforms[id].storage != NULL || this->uniforms[id].builtin) {
+ return;
+ }
+
+ /* Assign explicit locations. */
+ if (current_var->data.explicit_location) {
+ /* Set sequential locations for struct fields. */
+ if (current_var->type->without_array()->is_struct() ||
+ current_var->type->is_array_of_arrays()) {
+ const unsigned entries = MAX2(1, this->uniforms[id].array_elements);
+ this->uniforms[id].remap_location =
+ this->explicit_location + field_counter;
+ field_counter += entries;
+ } else {
+ this->uniforms[id].remap_location = this->explicit_location;
+ }
+ } else {
+ /* Initialize to to indicate that no location is set */
+ this->uniforms[id].remap_location = UNMAPPED_UNIFORM_LOC;
+ }
+
+ this->uniforms[id].name = ralloc_strdup(this->uniforms, name);
+ this->uniforms[id].type = base_type;
+ this->uniforms[id].num_driver_storage = 0;
+ this->uniforms[id].driver_storage = NULL;
+ this->uniforms[id].atomic_buffer_index = -1;
+ this->uniforms[id].hidden =
+ current_var->data.how_declared == ir_var_hidden;
+ this->uniforms[id].builtin = is_gl_identifier(name);
+
+ this->uniforms[id].is_shader_storage =
+ current_var->is_in_shader_storage_block();
+ this->uniforms[id].is_bindless = current_var->data.bindless;
+
+ /* Do not assign storage if the uniform is a builtin or buffer object */
+ if (!this->uniforms[id].builtin &&
+ !this->uniforms[id].is_shader_storage &&
+ this->buffer_block_index == -1)
+ this->uniforms[id].storage = this->values;
+
+ if (this->buffer_block_index != -1) {
+ this->uniforms[id].block_index = this->buffer_block_index;
+
+ unsigned alignment = type->std140_base_alignment(row_major);
+ if (packing == GLSL_INTERFACE_PACKING_STD430)
+ alignment = type->std430_base_alignment(row_major);
+ this->ubo_byte_offset = glsl_align(this->ubo_byte_offset, alignment);
+ this->uniforms[id].offset = this->ubo_byte_offset;
+ if (packing == GLSL_INTERFACE_PACKING_STD430)
+ this->ubo_byte_offset += type->std430_size(row_major);
+ else
+ this->ubo_byte_offset += type->std140_size(row_major);
+
+ if (type->is_array()) {
+ if (packing == GLSL_INTERFACE_PACKING_STD430)
+ this->uniforms[id].array_stride =
+ type->without_array()->std430_array_stride(row_major);
+ else
+ this->uniforms[id].array_stride =
+ glsl_align(type->without_array()->std140_size(row_major),
+ 16);
+ } else {
+ this->uniforms[id].array_stride = 0;
+ }
+
+ if (type->without_array()->is_matrix()) {
+ this->uniforms[id].matrix_stride =
+ link_calculate_matrix_stride(type->without_array(),
+ row_major,
+ packing);
+ this->uniforms[id].row_major = row_major;
+ } else {
+ this->uniforms[id].matrix_stride = 0;
+ this->uniforms[id].row_major = false;
+ }
+ } else {
+ this->uniforms[id].block_index = -1;
+ this->uniforms[id].offset = -1;
+ this->uniforms[id].array_stride = -1;
+ this->uniforms[id].matrix_stride = -1;
+ this->uniforms[id].row_major = false;
+ }
+
+ if (!this->uniforms[id].builtin &&
+ !this->uniforms[id].is_shader_storage &&
+ this->buffer_block_index == -1)
+ this->values += type->component_slots();
+
+ calculate_array_size_and_stride(prog, &this->uniforms[id],
+ use_std430_as_default);
+ }
+
+ /**
+ * Current program being processed.
+ */
+ struct gl_shader_program *prog;
+
+ struct string_to_uint_map *map;
+
+ struct gl_uniform_storage *uniforms;
+ unsigned next_sampler;
+ unsigned next_bindless_sampler;
+ unsigned next_image;
+ unsigned next_bindless_image;
+ unsigned next_subroutine;
+
+ bool use_std430_as_default;
+
+ /**
+ * Field counter is used to take care that uniform structures
+ * with explicit locations get sequential locations.
+ */
+ unsigned field_counter;
+
+ /**
+ * Current variable being processed.
+ */
+ ir_variable *current_var;
+
+ /* Used to store the explicit location from current_var so that we can
+ * reuse the location field for storing the uniform slot id.
+ */
+ int explicit_location;
+
+ /* Stores total struct array elements including nested structs */
+ unsigned record_array_count;
+
+ /* Map for temporarily storing next sampler index when handling samplers in
+ * struct arrays.
+ */
+ struct string_to_uint_map *record_next_sampler;
+
+ /* Map for temporarily storing next imager index when handling images in
+ * struct arrays.
+ */
+ struct string_to_uint_map *record_next_image;
+
+ /* Map for temporarily storing next bindless sampler index when handling
+ * bindless samplers in struct arrays.
+ */
+ struct string_to_uint_map *record_next_bindless_sampler;
+
+ /* Map for temporarily storing next bindless image index when handling
+ * bindless images in struct arrays.
+ */
+ struct string_to_uint_map *record_next_bindless_image;
+
+public:
+ union gl_constant_value *values;
+
+ gl_texture_index targets[MAX_SAMPLERS];
+
+ /**
+ * Mask of samplers used by the current shader stage.
+ */
+ unsigned shader_samplers_used;
+
+ /**
+ * Mask of samplers used by the current shader stage for shadows.
+ */
+ unsigned shader_shadow_samplers;
+
+ /**
+ * Number of bindless samplers used by the current shader stage.
+ */
+ unsigned num_bindless_samplers;
+
+ /**
+ * Texture targets for bindless samplers used by the current stage.
+ */
+ gl_texture_index *bindless_targets;
+
+ /**
+ * Number of bindless images used by the current shader stage.
+ */
+ unsigned num_bindless_images;
+
+ /**
+ * Access types for bindless images used by the current stage.
+ */
+ GLenum *bindless_access;
+
+ /**
+ * Bitmask of shader storage blocks not declared as read-only.
+ */
+ unsigned shader_storage_blocks_write_access;
+};
+
+static bool
+variable_is_referenced(ir_array_refcount_visitor &v, ir_variable *var)
+{
+ ir_array_refcount_entry *const entry = v.get_variable_entry(var);
+
+ return entry->is_referenced;
+
+}
+
+/**
+ * Walks the IR and update the references to uniform blocks in the
+ * ir_variables to point at linked shader's list (previously, they
+ * would point at the uniform block list in one of the pre-linked
+ * shaders).
+ */
+static void
+link_update_uniform_buffer_variables(struct gl_linked_shader *shader,
+ unsigned stage)
+{
+ ir_array_refcount_visitor v;
+
+ v.run(shader->ir);
+
+ foreach_in_list(ir_instruction, node, shader->ir) {
+ ir_variable *const var = node->as_variable();
+
+ if (var == NULL || !var->is_in_buffer_block())
+ continue;
+
+ assert(var->data.mode == ir_var_uniform ||
+ var->data.mode == ir_var_shader_storage);
+
+ unsigned num_blocks = var->data.mode == ir_var_uniform ?
+ shader->Program->info.num_ubos : shader->Program->info.num_ssbos;
+ struct gl_uniform_block **blks = var->data.mode == ir_var_uniform ?
+ shader->Program->sh.UniformBlocks :
+ shader->Program->sh.ShaderStorageBlocks;
+
+ if (var->is_interface_instance()) {
+ const ir_array_refcount_entry *const entry = v.get_variable_entry(var);
+
+ if (entry->is_referenced) {
+ /* Since this is an interface instance, the instance type will be
+ * same as the array-stripped variable type. If the variable type
+ * is an array, then the block names will be suffixed with [0]
+ * through [n-1]. Unlike for non-interface instances, there will
+ * not be structure types here, so the only name sentinel that we
+ * have to worry about is [.
+ */
+ assert(var->type->without_array() == var->get_interface_type());
+ const char sentinel = var->type->is_array() ? '[' : '\0';
+
+ const ptrdiff_t len = strlen(var->get_interface_type()->name);
+ for (unsigned i = 0; i < num_blocks; i++) {
+ const char *const begin = blks[i]->Name;
+ const char *const end = strchr(begin, sentinel);
+
+ if (end == NULL)
+ continue;
+
+ if (len != (end - begin))
+ continue;
+
+ /* Even when a match is found, do not "break" here. This could
+ * be an array of instances, and all elements of the array need
+ * to be marked as referenced.
+ */
+ if (strncmp(begin, var->get_interface_type()->name, len) == 0 &&
+ (!var->type->is_array() ||
+ entry->is_linearized_index_referenced(blks[i]->linearized_array_index))) {
+ blks[i]->stageref |= 1U << stage;
+ }
+ }
+ }
+
+ var->data.location = 0;
+ continue;
+ }
+
+ bool found = false;
+ char sentinel = '\0';
+
+ if (var->type->is_struct()) {
+ sentinel = '.';
+ } else if (var->type->is_array() && (var->type->fields.array->is_array()
+ || var->type->without_array()->is_struct())) {
+ sentinel = '[';
+ }
+
+ const unsigned l = strlen(var->name);
+ for (unsigned i = 0; i < num_blocks; i++) {
+ for (unsigned j = 0; j < blks[i]->NumUniforms; j++) {
+ if (sentinel) {
+ const char *begin = blks[i]->Uniforms[j].Name;
+ const char *end = strchr(begin, sentinel);
+
+ if (end == NULL)
+ continue;
+
+ if ((ptrdiff_t) l != (end - begin))
+ continue;
+
+ found = strncmp(var->name, begin, l) == 0;
+ } else {
+ found = strcmp(var->name, blks[i]->Uniforms[j].Name) == 0;
+ }
+
+ if (found) {
+ var->data.location = j;
+
+ if (variable_is_referenced(v, var))
+ blks[i]->stageref |= 1U << stage;
+
+ break;
+ }
+ }
+
+ if (found)
+ break;
+ }
+ assert(found);
+ }
+}
+
+/**
+ * Combine the hidden uniform hash map with the uniform hash map so that the
+ * hidden uniforms will be given indicies at the end of the uniform storage
+ * array.
+ */
+static void
+assign_hidden_uniform_slot_id(const char *name, unsigned hidden_id,
+ void *closure)
+{
+ count_uniform_size *uniform_size = (count_uniform_size *) closure;
+ unsigned hidden_uniform_start = uniform_size->num_active_uniforms -
+ uniform_size->num_hidden_uniforms;
+
+ uniform_size->map->put(hidden_uniform_start + hidden_id, name);
+}
+
+static void
+link_setup_uniform_remap_tables(struct gl_context *ctx,
+ struct gl_shader_program *prog)
+{
+ unsigned total_entries = prog->NumExplicitUniformLocations;
+ unsigned empty_locs = prog->NumUniformRemapTable - total_entries;
+
+ /* Reserve all the explicit locations of the active uniforms. */
+ for (unsigned i = 0; i < prog->data->NumUniformStorage; i++) {
+ if (prog->data->UniformStorage[i].type->is_subroutine() ||
+ prog->data->UniformStorage[i].is_shader_storage)
+ continue;
+
+ if (prog->data->UniformStorage[i].remap_location !=
+ UNMAPPED_UNIFORM_LOC) {
+ /* How many new entries for this uniform? */
+ const unsigned entries =
+ MAX2(1, prog->data->UniformStorage[i].array_elements);
+
+ /* Set remap table entries point to correct gl_uniform_storage. */
+ for (unsigned j = 0; j < entries; j++) {
+ unsigned element_loc =
+ prog->data->UniformStorage[i].remap_location + j;
+ assert(prog->UniformRemapTable[element_loc] ==
+ INACTIVE_UNIFORM_EXPLICIT_LOCATION);
+ prog->UniformRemapTable[element_loc] =
+ &prog->data->UniformStorage[i];
+ }
+ }
+ }
+
+ /* Reserve locations for rest of the uniforms. */
+ for (unsigned i = 0; i < prog->data->NumUniformStorage; i++) {
+
+ if (prog->data->UniformStorage[i].type->is_subroutine() ||
+ prog->data->UniformStorage[i].is_shader_storage)
+ continue;
+
+ /* Built-in uniforms should not get any location. */
+ if (prog->data->UniformStorage[i].builtin)
+ continue;
+
+ /* Explicit ones have been set already. */
+ if (prog->data->UniformStorage[i].remap_location != UNMAPPED_UNIFORM_LOC)
+ continue;
+
+ /* how many new entries for this uniform? */
+ const unsigned entries =
+ MAX2(1, prog->data->UniformStorage[i].array_elements);
+
+ /* Find UniformRemapTable for empty blocks where we can fit this uniform. */
+ int chosen_location = -1;
+
+ if (empty_locs)
+ chosen_location = link_util_find_empty_block(prog, &prog->data->UniformStorage[i]);
+
+ /* Add new entries to the total amount for checking against MAX_UNIFORM-
+ * _LOCATIONS. This only applies to the default uniform block (-1),
+ * because locations of uniform block entries are not assignable.
+ */
+ if (prog->data->UniformStorage[i].block_index == -1)
+ total_entries += entries;
+
+ if (chosen_location != -1) {
+ empty_locs -= entries;
+ } else {
+ chosen_location = prog->NumUniformRemapTable;
+
+ /* resize remap table to fit new entries */
+ prog->UniformRemapTable =
+ reralloc(prog,
+ prog->UniformRemapTable,
+ gl_uniform_storage *,
+ prog->NumUniformRemapTable + entries);
+ prog->NumUniformRemapTable += entries;
+ }
+
+ /* set pointers for this uniform */
+ for (unsigned j = 0; j < entries; j++)
+ prog->UniformRemapTable[chosen_location + j] =
+ &prog->data->UniformStorage[i];
+
+ /* set the base location in remap table for the uniform */
+ prog->data->UniformStorage[i].remap_location = chosen_location;
+ }
+
+ /* Verify that total amount of entries for explicit and implicit locations
+ * is less than MAX_UNIFORM_LOCATIONS.
+ */
+
+ if (total_entries > ctx->Const.MaxUserAssignableUniformLocations) {
+ linker_error(prog, "count of uniform locations > MAX_UNIFORM_LOCATIONS"
+ "(%u > %u)", total_entries,
+ ctx->Const.MaxUserAssignableUniformLocations);
+ }
+
+ /* Reserve all the explicit locations of the active subroutine uniforms. */
+ for (unsigned i = 0; i < prog->data->NumUniformStorage; i++) {
+ if (!prog->data->UniformStorage[i].type->is_subroutine())
+ continue;
+
+ if (prog->data->UniformStorage[i].remap_location == UNMAPPED_UNIFORM_LOC)
+ continue;
+
+ /* How many new entries for this uniform? */
+ const unsigned entries =
+ MAX2(1, prog->data->UniformStorage[i].array_elements);
+
+ unsigned mask = prog->data->linked_stages;
+ while (mask) {
+ const int j = u_bit_scan(&mask);
+ struct gl_program *p = prog->_LinkedShaders[j]->Program;
+
+ if (!prog->data->UniformStorage[i].opaque[j].active)
+ continue;
+
+ /* Set remap table entries point to correct gl_uniform_storage. */
+ for (unsigned k = 0; k < entries; k++) {
+ unsigned element_loc =
+ prog->data->UniformStorage[i].remap_location + k;
+ assert(p->sh.SubroutineUniformRemapTable[element_loc] ==
+ INACTIVE_UNIFORM_EXPLICIT_LOCATION);
+ p->sh.SubroutineUniformRemapTable[element_loc] =
+ &prog->data->UniformStorage[i];
+ }
+ }
+ }
+
+ /* reserve subroutine locations */
+ for (unsigned i = 0; i < prog->data->NumUniformStorage; i++) {
+ if (!prog->data->UniformStorage[i].type->is_subroutine())
+ continue;
+
+ if (prog->data->UniformStorage[i].remap_location !=
+ UNMAPPED_UNIFORM_LOC)
+ continue;
+
+ const unsigned entries =
+ MAX2(1, prog->data->UniformStorage[i].array_elements);
+
+ unsigned mask = prog->data->linked_stages;
+ while (mask) {
+ const int j = u_bit_scan(&mask);
+ struct gl_program *p = prog->_LinkedShaders[j]->Program;
+
+ if (!prog->data->UniformStorage[i].opaque[j].active)
+ continue;
+
+ p->sh.SubroutineUniformRemapTable =
+ reralloc(p,
+ p->sh.SubroutineUniformRemapTable,
+ gl_uniform_storage *,
+ p->sh.NumSubroutineUniformRemapTable + entries);
+
+ for (unsigned k = 0; k < entries; k++) {
+ p->sh.SubroutineUniformRemapTable[p->sh.NumSubroutineUniformRemapTable + k] =
+ &prog->data->UniformStorage[i];
+ }
+ prog->data->UniformStorage[i].remap_location =
+ p->sh.NumSubroutineUniformRemapTable;
+ p->sh.NumSubroutineUniformRemapTable += entries;
+ }
+ }
+}
+
+static void
+link_assign_uniform_storage(struct gl_context *ctx,
+ struct gl_shader_program *prog,
+ const unsigned num_data_slots)
+{
+ /* On the outside chance that there were no uniforms, bail out.
+ */
+ if (prog->data->NumUniformStorage == 0)
+ return;
+
+ unsigned int boolean_true = ctx->Const.UniformBooleanTrue;
+
+ union gl_constant_value *data;
+ if (prog->data->UniformStorage == NULL) {
+ prog->data->UniformStorage = rzalloc_array(prog->data,
+ struct gl_uniform_storage,
+ prog->data->NumUniformStorage);
+ data = rzalloc_array(prog->data->UniformStorage,
+ union gl_constant_value, num_data_slots);
+ prog->data->UniformDataDefaults =
+ rzalloc_array(prog->data->UniformStorage,
+ union gl_constant_value, num_data_slots);
+ } else {
+ data = prog->data->UniformDataSlots;
+ }
+
+#ifndef NDEBUG
+ union gl_constant_value *data_end = &data[num_data_slots];
+#endif
+
+ parcel_out_uniform_storage parcel(prog, prog->UniformHash,
+ prog->data->UniformStorage, data,
+ ctx->Const.UseSTD430AsDefaultPacking);
+
+ for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
+ struct gl_linked_shader *shader = prog->_LinkedShaders[i];
+
+ if (!shader)
+ continue;
+
+ parcel.start_shader((gl_shader_stage)i);
+
+ foreach_in_list(ir_instruction, node, shader->ir) {
+ ir_variable *const var = node->as_variable();
+
+ if ((var == NULL) || (var->data.mode != ir_var_uniform &&
+ var->data.mode != ir_var_shader_storage))
+ continue;
+
+ parcel.set_and_process(var);
+ }
+
+ shader->Program->SamplersUsed = parcel.shader_samplers_used;
+ shader->shadow_samplers = parcel.shader_shadow_samplers;
+ shader->Program->sh.ShaderStorageBlocksWriteAccess =
+ parcel.shader_storage_blocks_write_access;
+
+ if (parcel.num_bindless_samplers > 0) {
+ shader->Program->sh.NumBindlessSamplers = parcel.num_bindless_samplers;
+ shader->Program->sh.BindlessSamplers =
+ rzalloc_array(shader->Program, gl_bindless_sampler,
+ parcel.num_bindless_samplers);
+ for (unsigned j = 0; j < parcel.num_bindless_samplers; j++) {
+ shader->Program->sh.BindlessSamplers[j].target =
+ parcel.bindless_targets[j];
+ }
+ }
+
+ if (parcel.num_bindless_images > 0) {
+ shader->Program->sh.NumBindlessImages = parcel.num_bindless_images;
+ shader->Program->sh.BindlessImages =
+ rzalloc_array(shader->Program, gl_bindless_image,
+ parcel.num_bindless_images);
+ for (unsigned j = 0; j < parcel.num_bindless_images; j++) {
+ shader->Program->sh.BindlessImages[j].access =
+ parcel.bindless_access[j];
+ }
+ }
+
+ STATIC_ASSERT(ARRAY_SIZE(shader->Program->sh.SamplerTargets) ==
+ ARRAY_SIZE(parcel.targets));
+ for (unsigned j = 0; j < ARRAY_SIZE(parcel.targets); j++)
+ shader->Program->sh.SamplerTargets[j] = parcel.targets[j];
+ }
+
+#ifndef NDEBUG
+ for (unsigned i = 0; i < prog->data->NumUniformStorage; i++) {
+ assert(prog->data->UniformStorage[i].storage != NULL ||
+ prog->data->UniformStorage[i].builtin ||
+ prog->data->UniformStorage[i].is_shader_storage ||
+ prog->data->UniformStorage[i].block_index != -1);
+ }
+
+ assert(parcel.values == data_end);
+#endif
+
+ link_setup_uniform_remap_tables(ctx, prog);
+
+ /* Set shader cache fields */
+ prog->data->NumUniformDataSlots = num_data_slots;
+ prog->data->UniformDataSlots = data;
+
+ link_set_uniform_initializers(prog, boolean_true);
+}
+
+void
+link_assign_uniform_locations(struct gl_shader_program *prog,
+ struct gl_context *ctx)
+{
+ ralloc_free(prog->data->UniformStorage);
+ prog->data->UniformStorage = NULL;
+ prog->data->NumUniformStorage = 0;
+
+ if (prog->UniformHash != NULL) {
+ prog->UniformHash->clear();
+ } else {
+ prog->UniformHash = new string_to_uint_map;
+ }
+
+ /* First pass: Count the uniform resources used by the user-defined
+ * uniforms. While this happens, each active uniform will have an index
+ * assigned to it.
+ *
+ * Note: this is *NOT* the index that is returned to the application by
+ * glGetUniformLocation.
+ */
+ struct string_to_uint_map *hiddenUniforms = new string_to_uint_map;
+ count_uniform_size uniform_size(prog->UniformHash, hiddenUniforms,
+ ctx->Const.UseSTD430AsDefaultPacking);
+ for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
+ struct gl_linked_shader *sh = prog->_LinkedShaders[i];
+
+ if (sh == NULL)
+ continue;
+
+ link_update_uniform_buffer_variables(sh, i);
+
+ /* Reset various per-shader target counts.
+ */
+ uniform_size.start_shader();
+
+ foreach_in_list(ir_instruction, node, sh->ir) {
+ ir_variable *const var = node->as_variable();
+
+ if ((var == NULL) || (var->data.mode != ir_var_uniform &&
+ var->data.mode != ir_var_shader_storage))
+ continue;
+
+ uniform_size.process(var);
+ }
+
+ if (uniform_size.num_shader_samplers >
+ ctx->Const.Program[i].MaxTextureImageUnits) {
+ linker_error(prog, "Too many %s shader texture samplers\n",
+ _mesa_shader_stage_to_string(i));
+ continue;
+ }
+
+ if (uniform_size.num_shader_images >
+ ctx->Const.Program[i].MaxImageUniforms) {
+ linker_error(prog, "Too many %s shader image uniforms (%u > %u)\n",
+ _mesa_shader_stage_to_string(i),
+ sh->Program->info.num_images,
+ ctx->Const.Program[i].MaxImageUniforms);
+ continue;
+ }
+
+ sh->Program->info.num_textures = uniform_size.num_shader_samplers;
+ sh->Program->info.num_images = uniform_size.num_shader_images;
+ sh->num_uniform_components = uniform_size.num_shader_uniform_components;
+ sh->num_combined_uniform_components = sh->num_uniform_components;
+
+ for (unsigned i = 0; i < sh->Program->info.num_ubos; i++) {
+ sh->num_combined_uniform_components +=
+ sh->Program->sh.UniformBlocks[i]->UniformBufferSize / 4;
+ }
+ }
+
+ if (prog->data->LinkStatus == LINKING_FAILURE) {
+ delete hiddenUniforms;
+ return;
+ }
+
+ prog->data->NumUniformStorage = uniform_size.num_active_uniforms;
+ prog->data->NumHiddenUniforms = uniform_size.num_hidden_uniforms;
+
+ /* assign hidden uniforms a slot id */
+ hiddenUniforms->iterate(assign_hidden_uniform_slot_id, &uniform_size);
+ delete hiddenUniforms;
+
+ link_assign_uniform_storage(ctx, prog, uniform_size.num_values);
+}
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/link_varyings.cpp b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/link_varyings.cpp
new file mode 100644
index 0000000000..7af97cddc0
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/link_varyings.cpp
@@ -0,0 +1,3188 @@
+/*
+ * Copyright © 2012 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+/**
+ * \file link_varyings.cpp
+ *
+ * Linker functions related specifically to linking varyings between shader
+ * stages.
+ */
+
+
+#include "main/errors.h"
+#include "main/mtypes.h"
+#include "glsl_symbol_table.h"
+#include "glsl_parser_extras.h"
+#include "ir_optimization.h"
+#include "linker.h"
+#include "link_varyings.h"
+#include "main/macros.h"
+#include "util/hash_table.h"
+#include "util/u_math.h"
+#include "program.h"
+
+
+/**
+ * Get the varying type stripped of the outermost array if we're processing
+ * a stage whose varyings are arrays indexed by a vertex number (such as
+ * geometry shader inputs).
+ */
+static const glsl_type *
+get_varying_type(const ir_variable *var, gl_shader_stage stage)
+{
+ const glsl_type *type = var->type;
+
+ if (!var->data.patch &&
+ ((var->data.mode == ir_var_shader_out &&
+ stage == MESA_SHADER_TESS_CTRL) ||
+ (var->data.mode == ir_var_shader_in &&
+ (stage == MESA_SHADER_TESS_CTRL || stage == MESA_SHADER_TESS_EVAL ||
+ stage == MESA_SHADER_GEOMETRY)))) {
+ assert(type->is_array());
+ type = type->fields.array;
+ }
+
+ return type;
+}
+
+static void
+create_xfb_varying_names(void *mem_ctx, const glsl_type *t, char **name,
+ size_t name_length, unsigned *count,
+ const char *ifc_member_name,
+ const glsl_type *ifc_member_t, char ***varying_names)
+{
+ if (t->is_interface()) {
+ size_t new_length = name_length;
+
+ assert(ifc_member_name && ifc_member_t);
+ ralloc_asprintf_rewrite_tail(name, &new_length, ".%s", ifc_member_name);
+
+ create_xfb_varying_names(mem_ctx, ifc_member_t, name, new_length, count,
+ NULL, NULL, varying_names);
+ } else if (t->is_struct()) {
+ for (unsigned i = 0; i < t->length; i++) {
+ const char *field = t->fields.structure[i].name;
+ size_t new_length = name_length;
+
+ ralloc_asprintf_rewrite_tail(name, &new_length, ".%s", field);
+
+ create_xfb_varying_names(mem_ctx, t->fields.structure[i].type, name,
+ new_length, count, NULL, NULL,
+ varying_names);
+ }
+ } else if (t->without_array()->is_struct() ||
+ t->without_array()->is_interface() ||
+ (t->is_array() && t->fields.array->is_array())) {
+ for (unsigned i = 0; i < t->length; i++) {
+ size_t new_length = name_length;
+
+ /* Append the subscript to the current variable name */
+ ralloc_asprintf_rewrite_tail(name, &new_length, "[%u]", i);
+
+ create_xfb_varying_names(mem_ctx, t->fields.array, name, new_length,
+ count, ifc_member_name, ifc_member_t,
+ varying_names);
+ }
+ } else {
+ (*varying_names)[(*count)++] = ralloc_strdup(mem_ctx, *name);
+ }
+}
+
+static bool
+process_xfb_layout_qualifiers(void *mem_ctx, const gl_linked_shader *sh,
+ struct gl_shader_program *prog,
+ unsigned *num_tfeedback_decls,
+ char ***varying_names)
+{
+ bool has_xfb_qualifiers = false;
+
+ /* We still need to enable transform feedback mode even if xfb_stride is
+ * only applied to a global out. Also we don't bother to propagate
+ * xfb_stride to interface block members so this will catch that case also.
+ */
+ for (unsigned j = 0; j < MAX_FEEDBACK_BUFFERS; j++) {
+ if (prog->TransformFeedback.BufferStride[j]) {
+ has_xfb_qualifiers = true;
+ break;
+ }
+ }
+
+ foreach_in_list(ir_instruction, node, sh->ir) {
+ ir_variable *var = node->as_variable();
+ if (!var || var->data.mode != ir_var_shader_out)
+ continue;
+
+ /* From the ARB_enhanced_layouts spec:
+ *
+ * "Any shader making any static use (after preprocessing) of any of
+ * these *xfb_* qualifiers will cause the shader to be in a
+ * transform feedback capturing mode and hence responsible for
+ * describing the transform feedback setup. This mode will capture
+ * any output selected by *xfb_offset*, directly or indirectly, to
+ * a transform feedback buffer."
+ */
+ if (var->data.explicit_xfb_buffer || var->data.explicit_xfb_stride) {
+ has_xfb_qualifiers = true;
+ }
+
+ if (var->data.explicit_xfb_offset) {
+ *num_tfeedback_decls += var->type->varying_count();
+ has_xfb_qualifiers = true;
+ }
+ }
+
+ if (*num_tfeedback_decls == 0)
+ return has_xfb_qualifiers;
+
+ unsigned i = 0;
+ *varying_names = ralloc_array(mem_ctx, char *, *num_tfeedback_decls);
+ foreach_in_list(ir_instruction, node, sh->ir) {
+ ir_variable *var = node->as_variable();
+ if (!var || var->data.mode != ir_var_shader_out)
+ continue;
+
+ if (var->data.explicit_xfb_offset) {
+ char *name;
+ const glsl_type *type, *member_type;
+
+ if (var->data.from_named_ifc_block) {
+ type = var->get_interface_type();
+
+ /* Find the member type before it was altered by lowering */
+ const glsl_type *type_wa = type->without_array();
+ member_type =
+ type_wa->fields.structure[type_wa->field_index(var->name)].type;
+ name = ralloc_strdup(NULL, type_wa->name);
+ } else {
+ type = var->type;
+ member_type = NULL;
+ name = ralloc_strdup(NULL, var->name);
+ }
+ create_xfb_varying_names(mem_ctx, type, &name, strlen(name), &i,
+ var->name, member_type, varying_names);
+ ralloc_free(name);
+ }
+ }
+
+ assert(i == *num_tfeedback_decls);
+ return has_xfb_qualifiers;
+}
+
+/**
+ * Validate the types and qualifiers of an output from one stage against the
+ * matching input to another stage.
+ */
+static void
+cross_validate_types_and_qualifiers(struct gl_context *ctx,
+ struct gl_shader_program *prog,
+ const ir_variable *input,
+ const ir_variable *output,
+ gl_shader_stage consumer_stage,
+ gl_shader_stage producer_stage)
+{
+ /* Check that the types match between stages.
+ */
+ const glsl_type *type_to_match = input->type;
+
+ /* VS -> GS, VS -> TCS, VS -> TES, TES -> GS */
+ const bool extra_array_level = (producer_stage == MESA_SHADER_VERTEX &&
+ consumer_stage != MESA_SHADER_FRAGMENT) ||
+ consumer_stage == MESA_SHADER_GEOMETRY;
+ if (extra_array_level) {
+ assert(type_to_match->is_array());
+ type_to_match = type_to_match->fields.array;
+ }
+
+ if (type_to_match != output->type) {
+ if (output->type->is_struct()) {
+ /* Structures across shader stages can have different name
+ * and considered to match in type if and only if structure
+ * members match in name, type, qualification, and declaration
+ * order. The precision doesn’t need to match.
+ */
+ if (!output->type->record_compare(type_to_match,
+ false, /* match_name */
+ true, /* match_locations */
+ false /* match_precision */)) {
+ linker_error(prog,
+ "%s shader output `%s' declared as struct `%s', "
+ "doesn't match in type with %s shader input "
+ "declared as struct `%s'\n",
+ _mesa_shader_stage_to_string(producer_stage),
+ output->name,
+ output->type->name,
+ _mesa_shader_stage_to_string(consumer_stage),
+ input->type->name);
+ }
+ } else if (!output->type->is_array() || !is_gl_identifier(output->name)) {
+ /* There is a bit of a special case for gl_TexCoord. This
+ * built-in is unsized by default. Applications that variable
+ * access it must redeclare it with a size. There is some
+ * language in the GLSL spec that implies the fragment shader
+ * and vertex shader do not have to agree on this size. Other
+ * driver behave this way, and one or two applications seem to
+ * rely on it.
+ *
+ * Neither declaration needs to be modified here because the array
+ * sizes are fixed later when update_array_sizes is called.
+ *
+ * From page 48 (page 54 of the PDF) of the GLSL 1.10 spec:
+ *
+ * "Unlike user-defined varying variables, the built-in
+ * varying variables don't have a strict one-to-one
+ * correspondence between the vertex language and the
+ * fragment language."
+ */
+ linker_error(prog,
+ "%s shader output `%s' declared as type `%s', "
+ "but %s shader input declared as type `%s'\n",
+ _mesa_shader_stage_to_string(producer_stage),
+ output->name,
+ output->type->name,
+ _mesa_shader_stage_to_string(consumer_stage),
+ input->type->name);
+ return;
+ }
+ }
+
+ /* Check that all of the qualifiers match between stages.
+ */
+
+ /* According to the OpenGL and OpenGLES GLSL specs, the centroid qualifier
+ * should match until OpenGL 4.3 and OpenGLES 3.1. The OpenGLES 3.0
+ * conformance test suite does not verify that the qualifiers must match.
+ * The deqp test suite expects the opposite (OpenGLES 3.1) behavior for
+ * OpenGLES 3.0 drivers, so we relax the checking in all cases.
+ */
+ if (false /* always skip the centroid check */ &&
+ prog->data->Version < (prog->IsES ? 310 : 430) &&
+ input->data.centroid != output->data.centroid) {
+ linker_error(prog,
+ "%s shader output `%s' %s centroid qualifier, "
+ "but %s shader input %s centroid qualifier\n",
+ _mesa_shader_stage_to_string(producer_stage),
+ output->name,
+ (output->data.centroid) ? "has" : "lacks",
+ _mesa_shader_stage_to_string(consumer_stage),
+ (input->data.centroid) ? "has" : "lacks");
+ return;
+ }
+
+ if (input->data.sample != output->data.sample) {
+ linker_error(prog,
+ "%s shader output `%s' %s sample qualifier, "
+ "but %s shader input %s sample qualifier\n",
+ _mesa_shader_stage_to_string(producer_stage),
+ output->name,
+ (output->data.sample) ? "has" : "lacks",
+ _mesa_shader_stage_to_string(consumer_stage),
+ (input->data.sample) ? "has" : "lacks");
+ return;
+ }
+
+ if (input->data.patch != output->data.patch) {
+ linker_error(prog,
+ "%s shader output `%s' %s patch qualifier, "
+ "but %s shader input %s patch qualifier\n",
+ _mesa_shader_stage_to_string(producer_stage),
+ output->name,
+ (output->data.patch) ? "has" : "lacks",
+ _mesa_shader_stage_to_string(consumer_stage),
+ (input->data.patch) ? "has" : "lacks");
+ return;
+ }
+
+ /* The GLSL 4.30 and GLSL ES 3.00 specifications say:
+ *
+ * "As only outputs need be declared with invariant, an output from
+ * one shader stage will still match an input of a subsequent stage
+ * without the input being declared as invariant."
+ *
+ * while GLSL 4.20 says:
+ *
+ * "For variables leaving one shader and coming into another shader,
+ * the invariant keyword has to be used in both shaders, or a link
+ * error will result."
+ *
+ * and GLSL ES 1.00 section 4.6.4 "Invariance and Linking" says:
+ *
+ * "The invariance of varyings that are declared in both the vertex
+ * and fragment shaders must match."
+ */
+ if (input->data.explicit_invariant != output->data.explicit_invariant &&
+ prog->data->Version < (prog->IsES ? 300 : 430)) {
+ linker_error(prog,
+ "%s shader output `%s' %s invariant qualifier, "
+ "but %s shader input %s invariant qualifier\n",
+ _mesa_shader_stage_to_string(producer_stage),
+ output->name,
+ (output->data.explicit_invariant) ? "has" : "lacks",
+ _mesa_shader_stage_to_string(consumer_stage),
+ (input->data.explicit_invariant) ? "has" : "lacks");
+ return;
+ }
+
+ /* GLSL >= 4.40 removes text requiring interpolation qualifiers
+ * to match cross stage, they must only match within the same stage.
+ *
+ * From page 84 (page 90 of the PDF) of the GLSL 4.40 spec:
+ *
+ * "It is a link-time error if, within the same stage, the interpolation
+ * qualifiers of variables of the same name do not match.
+ *
+ * Section 4.3.9 (Interpolation) of the GLSL ES 3.00 spec says:
+ *
+ * "When no interpolation qualifier is present, smooth interpolation
+ * is used."
+ *
+ * So we match variables where one is smooth and the other has no explicit
+ * qualifier.
+ */
+ unsigned input_interpolation = input->data.interpolation;
+ unsigned output_interpolation = output->data.interpolation;
+ if (prog->IsES) {
+ if (input_interpolation == INTERP_MODE_NONE)
+ input_interpolation = INTERP_MODE_SMOOTH;
+ if (output_interpolation == INTERP_MODE_NONE)
+ output_interpolation = INTERP_MODE_SMOOTH;
+ }
+ if (input_interpolation != output_interpolation &&
+ prog->data->Version < 440) {
+ if (!ctx->Const.AllowGLSLCrossStageInterpolationMismatch) {
+ linker_error(prog,
+ "%s shader output `%s' specifies %s "
+ "interpolation qualifier, "
+ "but %s shader input specifies %s "
+ "interpolation qualifier\n",
+ _mesa_shader_stage_to_string(producer_stage),
+ output->name,
+ interpolation_string(output->data.interpolation),
+ _mesa_shader_stage_to_string(consumer_stage),
+ interpolation_string(input->data.interpolation));
+ return;
+ } else {
+ linker_warning(prog,
+ "%s shader output `%s' specifies %s "
+ "interpolation qualifier, "
+ "but %s shader input specifies %s "
+ "interpolation qualifier\n",
+ _mesa_shader_stage_to_string(producer_stage),
+ output->name,
+ interpolation_string(output->data.interpolation),
+ _mesa_shader_stage_to_string(consumer_stage),
+ interpolation_string(input->data.interpolation));
+ }
+ }
+}
+
+/**
+ * Validate front and back color outputs against single color input
+ */
+static void
+cross_validate_front_and_back_color(struct gl_context *ctx,
+ struct gl_shader_program *prog,
+ const ir_variable *input,
+ const ir_variable *front_color,
+ const ir_variable *back_color,
+ gl_shader_stage consumer_stage,
+ gl_shader_stage producer_stage)
+{
+ if (front_color != NULL && front_color->data.assigned)
+ cross_validate_types_and_qualifiers(ctx, prog, input, front_color,
+ consumer_stage, producer_stage);
+
+ if (back_color != NULL && back_color->data.assigned)
+ cross_validate_types_and_qualifiers(ctx, prog, input, back_color,
+ consumer_stage, producer_stage);
+}
+
+static unsigned
+compute_variable_location_slot(ir_variable *var, gl_shader_stage stage)
+{
+ unsigned location_start = VARYING_SLOT_VAR0;
+
+ switch (stage) {
+ case MESA_SHADER_VERTEX:
+ if (var->data.mode == ir_var_shader_in)
+ location_start = VERT_ATTRIB_GENERIC0;
+ break;
+ case MESA_SHADER_TESS_CTRL:
+ case MESA_SHADER_TESS_EVAL:
+ if (var->data.patch)
+ location_start = VARYING_SLOT_PATCH0;
+ break;
+ case MESA_SHADER_FRAGMENT:
+ if (var->data.mode == ir_var_shader_out)
+ location_start = FRAG_RESULT_DATA0;
+ break;
+ default:
+ break;
+ }
+
+ return var->data.location - location_start;
+}
+
+struct explicit_location_info {
+ ir_variable *var;
+ bool base_type_is_integer;
+ unsigned base_type_bit_size;
+ unsigned interpolation;
+ bool centroid;
+ bool sample;
+ bool patch;
+};
+
+static bool
+check_location_aliasing(struct explicit_location_info explicit_locations[][4],
+ ir_variable *var,
+ unsigned location,
+ unsigned component,
+ unsigned location_limit,
+ const glsl_type *type,
+ unsigned interpolation,
+ bool centroid,
+ bool sample,
+ bool patch,
+ gl_shader_program *prog,
+ gl_shader_stage stage)
+{
+ unsigned last_comp;
+ unsigned base_type_bit_size;
+ const glsl_type *type_without_array = type->without_array();
+ const bool base_type_is_integer =
+ glsl_base_type_is_integer(type_without_array->base_type);
+ const bool is_struct = type_without_array->is_struct();
+ if (is_struct) {
+ /* structs don't have a defined underlying base type so just treat all
+ * component slots as used and set the bit size to 0. If there is
+ * location aliasing, we'll fail anyway later.
+ */
+ last_comp = 4;
+ base_type_bit_size = 0;
+ } else {
+ unsigned dmul = type_without_array->is_64bit() ? 2 : 1;
+ last_comp = component + type_without_array->vector_elements * dmul;
+ base_type_bit_size =
+ glsl_base_type_get_bit_size(type_without_array->base_type);
+ }
+
+ while (location < location_limit) {
+ unsigned comp = 0;
+ while (comp < 4) {
+ struct explicit_location_info *info =
+ &explicit_locations[location][comp];
+
+ if (info->var) {
+ if (info->var->type->without_array()->is_struct() || is_struct) {
+ /* Structs cannot share location since they are incompatible
+ * with any other underlying numerical type.
+ */
+ linker_error(prog,
+ "%s shader has multiple %sputs sharing the "
+ "same location that don't have the same "
+ "underlying numerical type. Struct variable '%s', "
+ "location %u\n",
+ _mesa_shader_stage_to_string(stage),
+ var->data.mode == ir_var_shader_in ? "in" : "out",
+ is_struct ? var->name : info->var->name,
+ location);
+ return false;
+ } else if (comp >= component && comp < last_comp) {
+ /* Component aliasing is not allowed */
+ linker_error(prog,
+ "%s shader has multiple %sputs explicitly "
+ "assigned to location %d and component %d\n",
+ _mesa_shader_stage_to_string(stage),
+ var->data.mode == ir_var_shader_in ? "in" : "out",
+ location, comp);
+ return false;
+ } else {
+ /* From the OpenGL 4.60.5 spec, section 4.4.1 Input Layout
+ * Qualifiers, Page 67, (Location aliasing):
+ *
+ * " Further, when location aliasing, the aliases sharing the
+ * location must have the same underlying numerical type
+ * and bit width (floating-point or integer, 32-bit versus
+ * 64-bit, etc.) and the same auxiliary storage and
+ * interpolation qualification."
+ */
+
+ /* If the underlying numerical type isn't integer, implicitly
+ * it will be float or else we would have failed by now.
+ */
+ if (info->base_type_is_integer != base_type_is_integer) {
+ linker_error(prog,
+ "%s shader has multiple %sputs sharing the "
+ "same location that don't have the same "
+ "underlying numerical type. Location %u "
+ "component %u.\n",
+ _mesa_shader_stage_to_string(stage),
+ var->data.mode == ir_var_shader_in ?
+ "in" : "out", location, comp);
+ return false;
+ }
+
+ if (info->base_type_bit_size != base_type_bit_size) {
+ linker_error(prog,
+ "%s shader has multiple %sputs sharing the "
+ "same location that don't have the same "
+ "underlying numerical bit size. Location %u "
+ "component %u.\n",
+ _mesa_shader_stage_to_string(stage),
+ var->data.mode == ir_var_shader_in ?
+ "in" : "out", location, comp);
+ return false;
+ }
+
+ if (info->interpolation != interpolation) {
+ linker_error(prog,
+ "%s shader has multiple %sputs sharing the "
+ "same location that don't have the same "
+ "interpolation qualification. Location %u "
+ "component %u.\n",
+ _mesa_shader_stage_to_string(stage),
+ var->data.mode == ir_var_shader_in ?
+ "in" : "out", location, comp);
+ return false;
+ }
+
+ if (info->centroid != centroid ||
+ info->sample != sample ||
+ info->patch != patch) {
+ linker_error(prog,
+ "%s shader has multiple %sputs sharing the "
+ "same location that don't have the same "
+ "auxiliary storage qualification. Location %u "
+ "component %u.\n",
+ _mesa_shader_stage_to_string(stage),
+ var->data.mode == ir_var_shader_in ?
+ "in" : "out", location, comp);
+ return false;
+ }
+ }
+ } else if (comp >= component && comp < last_comp) {
+ info->var = var;
+ info->base_type_is_integer = base_type_is_integer;
+ info->base_type_bit_size = base_type_bit_size;
+ info->interpolation = interpolation;
+ info->centroid = centroid;
+ info->sample = sample;
+ info->patch = patch;
+ }
+
+ comp++;
+
+ /* We need to do some special handling for doubles as dvec3 and
+ * dvec4 consume two consecutive locations. We don't need to
+ * worry about components beginning at anything other than 0 as
+ * the spec does not allow this for dvec3 and dvec4.
+ */
+ if (comp == 4 && last_comp > 4) {
+ last_comp = last_comp - 4;
+ /* Bump location index and reset the component index */
+ location++;
+ comp = 0;
+ component = 0;
+ }
+ }
+
+ location++;
+ }
+
+ return true;
+}
+
+static bool
+validate_explicit_variable_location(struct gl_context *ctx,
+ struct explicit_location_info explicit_locations[][4],
+ ir_variable *var,
+ gl_shader_program *prog,
+ gl_linked_shader *sh)
+{
+ const glsl_type *type = get_varying_type(var, sh->Stage);
+ unsigned num_elements = type->count_attribute_slots(false);
+ unsigned idx = compute_variable_location_slot(var, sh->Stage);
+ unsigned slot_limit = idx + num_elements;
+
+ /* Vertex shader inputs and fragment shader outputs are validated in
+ * assign_attribute_or_color_locations() so we should not attempt to
+ * validate them again here.
+ */
+ unsigned slot_max;
+ if (var->data.mode == ir_var_shader_out) {
+ assert(sh->Stage != MESA_SHADER_FRAGMENT);
+ slot_max =
+ ctx->Const.Program[sh->Stage].MaxOutputComponents / 4;
+ } else {
+ assert(var->data.mode == ir_var_shader_in);
+ assert(sh->Stage != MESA_SHADER_VERTEX);
+ slot_max =
+ ctx->Const.Program[sh->Stage].MaxInputComponents / 4;
+ }
+
+ if (slot_limit > slot_max) {
+ linker_error(prog,
+ "Invalid location %u in %s shader\n",
+ idx, _mesa_shader_stage_to_string(sh->Stage));
+ return false;
+ }
+
+ const glsl_type *type_without_array = type->without_array();
+ if (type_without_array->is_interface()) {
+ for (unsigned i = 0; i < type_without_array->length; i++) {
+ glsl_struct_field *field = &type_without_array->fields.structure[i];
+ unsigned field_location = field->location -
+ (field->patch ? VARYING_SLOT_PATCH0 : VARYING_SLOT_VAR0);
+ if (!check_location_aliasing(explicit_locations, var,
+ field_location,
+ 0, field_location + 1,
+ field->type,
+ field->interpolation,
+ field->centroid,
+ field->sample,
+ field->patch,
+ prog, sh->Stage)) {
+ return false;
+ }
+ }
+ } else if (!check_location_aliasing(explicit_locations, var,
+ idx, var->data.location_frac,
+ slot_limit, type,
+ var->data.interpolation,
+ var->data.centroid,
+ var->data.sample,
+ var->data.patch,
+ prog, sh->Stage)) {
+ return false;
+ }
+
+ return true;
+}
+
+/**
+ * Validate explicit locations for the inputs to the first stage and the
+ * outputs of the last stage in a program, if those are not the VS and FS
+ * shaders.
+ */
+void
+validate_first_and_last_interface_explicit_locations(struct gl_context *ctx,
+ struct gl_shader_program *prog,
+ gl_shader_stage first_stage,
+ gl_shader_stage last_stage)
+{
+ /* VS inputs and FS outputs are validated in
+ * assign_attribute_or_color_locations()
+ */
+ bool validate_first_stage = first_stage != MESA_SHADER_VERTEX;
+ bool validate_last_stage = last_stage != MESA_SHADER_FRAGMENT;
+ if (!validate_first_stage && !validate_last_stage)
+ return;
+
+ struct explicit_location_info explicit_locations[MAX_VARYING][4];
+
+ gl_shader_stage stages[2] = { first_stage, last_stage };
+ bool validate_stage[2] = { validate_first_stage, validate_last_stage };
+ ir_variable_mode var_direction[2] = { ir_var_shader_in, ir_var_shader_out };
+
+ for (unsigned i = 0; i < 2; i++) {
+ if (!validate_stage[i])
+ continue;
+
+ gl_shader_stage stage = stages[i];
+
+ gl_linked_shader *sh = prog->_LinkedShaders[stage];
+ assert(sh);
+
+ memset(explicit_locations, 0, sizeof(explicit_locations));
+
+ foreach_in_list(ir_instruction, node, sh->ir) {
+ ir_variable *const var = node->as_variable();
+
+ if (var == NULL ||
+ !var->data.explicit_location ||
+ var->data.location < VARYING_SLOT_VAR0 ||
+ var->data.mode != var_direction[i])
+ continue;
+
+ if (!validate_explicit_variable_location(
+ ctx, explicit_locations, var, prog, sh)) {
+ return;
+ }
+ }
+ }
+}
+
+/**
+ * Validate that outputs from one stage match inputs of another
+ */
+void
+cross_validate_outputs_to_inputs(struct gl_context *ctx,
+ struct gl_shader_program *prog,
+ gl_linked_shader *producer,
+ gl_linked_shader *consumer)
+{
+ glsl_symbol_table parameters;
+ struct explicit_location_info output_explicit_locations[MAX_VARYING][4] = {};
+ struct explicit_location_info input_explicit_locations[MAX_VARYING][4] = {};
+
+ /* Find all shader outputs in the "producer" stage.
+ */
+ foreach_in_list(ir_instruction, node, producer->ir) {
+ ir_variable *const var = node->as_variable();
+
+ if (var == NULL || var->data.mode != ir_var_shader_out)
+ continue;
+
+ if (!var->data.explicit_location
+ || var->data.location < VARYING_SLOT_VAR0)
+ parameters.add_variable(var);
+ else {
+ /* User-defined varyings with explicit locations are handled
+ * differently because they do not need to have matching names.
+ */
+ if (!validate_explicit_variable_location(ctx,
+ output_explicit_locations,
+ var, prog, producer)) {
+ return;
+ }
+ }
+ }
+
+
+ /* Find all shader inputs in the "consumer" stage. Any variables that have
+ * matching outputs already in the symbol table must have the same type and
+ * qualifiers.
+ *
+ * Exception: if the consumer is the geometry shader, then the inputs
+ * should be arrays and the type of the array element should match the type
+ * of the corresponding producer output.
+ */
+ foreach_in_list(ir_instruction, node, consumer->ir) {
+ ir_variable *const input = node->as_variable();
+
+ if (input == NULL || input->data.mode != ir_var_shader_in)
+ continue;
+
+ if (strcmp(input->name, "gl_Color") == 0 && input->data.used) {
+ const ir_variable *const front_color =
+ parameters.get_variable("gl_FrontColor");
+
+ const ir_variable *const back_color =
+ parameters.get_variable("gl_BackColor");
+
+ cross_validate_front_and_back_color(ctx, prog, input,
+ front_color, back_color,
+ consumer->Stage, producer->Stage);
+ } else if (strcmp(input->name, "gl_SecondaryColor") == 0 && input->data.used) {
+ const ir_variable *const front_color =
+ parameters.get_variable("gl_FrontSecondaryColor");
+
+ const ir_variable *const back_color =
+ parameters.get_variable("gl_BackSecondaryColor");
+
+ cross_validate_front_and_back_color(ctx, prog, input,
+ front_color, back_color,
+ consumer->Stage, producer->Stage);
+ } else {
+ /* The rules for connecting inputs and outputs change in the presence
+ * of explicit locations. In this case, we no longer care about the
+ * names of the variables. Instead, we care only about the
+ * explicitly assigned location.
+ */
+ ir_variable *output = NULL;
+ if (input->data.explicit_location
+ && input->data.location >= VARYING_SLOT_VAR0) {
+
+ const glsl_type *type = get_varying_type(input, consumer->Stage);
+ unsigned num_elements = type->count_attribute_slots(false);
+ unsigned idx =
+ compute_variable_location_slot(input, consumer->Stage);
+ unsigned slot_limit = idx + num_elements;
+
+ if (!validate_explicit_variable_location(ctx,
+ input_explicit_locations,
+ input, prog, consumer)) {
+ return;
+ }
+
+ while (idx < slot_limit) {
+ if (idx >= MAX_VARYING) {
+ linker_error(prog,
+ "Invalid location %u in %s shader\n", idx,
+ _mesa_shader_stage_to_string(consumer->Stage));
+ return;
+ }
+
+ output = output_explicit_locations[idx][input->data.location_frac].var;
+
+ if (output == NULL) {
+ /* A linker failure should only happen when there is no
+ * output declaration and there is Static Use of the
+ * declared input.
+ */
+ if (input->data.used) {
+ linker_error(prog,
+ "%s shader input `%s' with explicit location "
+ "has no matching output\n",
+ _mesa_shader_stage_to_string(consumer->Stage),
+ input->name);
+ break;
+ }
+ } else if (input->data.location != output->data.location) {
+ linker_error(prog,
+ "%s shader input `%s' with explicit location "
+ "has no matching output\n",
+ _mesa_shader_stage_to_string(consumer->Stage),
+ input->name);
+ break;
+ }
+ idx++;
+ }
+ } else {
+ output = parameters.get_variable(input->name);
+ }
+
+ if (output != NULL) {
+ /* Interface blocks have their own validation elsewhere so don't
+ * try validating them here.
+ */
+ if (!(input->get_interface_type() &&
+ output->get_interface_type()))
+ cross_validate_types_and_qualifiers(ctx, prog, input, output,
+ consumer->Stage,
+ producer->Stage);
+ } else {
+ /* Check for input vars with unmatched output vars in prev stage
+ * taking into account that interface blocks could have a matching
+ * output but with different name, so we ignore them.
+ */
+ assert(!input->data.assigned);
+ if (input->data.used && !input->get_interface_type() &&
+ !input->data.explicit_location)
+ linker_error(prog,
+ "%s shader input `%s' "
+ "has no matching output in the previous stage\n",
+ _mesa_shader_stage_to_string(consumer->Stage),
+ input->name);
+ }
+ }
+ }
+}
+
+/**
+ * Demote shader inputs and outputs that are not used in other stages, and
+ * remove them via dead code elimination.
+ */
+static void
+remove_unused_shader_inputs_and_outputs(bool is_separate_shader_object,
+ gl_linked_shader *sh,
+ enum ir_variable_mode mode)
+{
+ if (is_separate_shader_object)
+ return;
+
+ foreach_in_list(ir_instruction, node, sh->ir) {
+ ir_variable *const var = node->as_variable();
+
+ if (var == NULL || var->data.mode != int(mode))
+ continue;
+
+ /* A shader 'in' or 'out' variable is only really an input or output if
+ * its value is used by other shader stages. This will cause the
+ * variable to have a location assigned.
+ */
+ if (var->data.is_unmatched_generic_inout && !var->data.is_xfb_only) {
+ assert(var->data.mode != ir_var_temporary);
+
+ /* Assign zeros to demoted inputs to allow more optimizations. */
+ if (var->data.mode == ir_var_shader_in && !var->constant_value)
+ var->constant_value = ir_constant::zero(var, var->type);
+
+ var->data.mode = ir_var_auto;
+ }
+ }
+
+ /* Eliminate code that is now dead due to unused inputs/outputs being
+ * demoted.
+ */
+ while (do_dead_code(sh->ir, false))
+ ;
+
+}
+
+/**
+ * Initialize this object based on a string that was passed to
+ * glTransformFeedbackVaryings.
+ *
+ * If the input is mal-formed, this call still succeeds, but it sets
+ * this->var_name to a mal-formed input, so tfeedback_decl::find_output_var()
+ * will fail to find any matching variable.
+ */
+void
+tfeedback_decl::init(struct gl_context *ctx, const void *mem_ctx,
+ const char *input)
+{
+ /* We don't have to be pedantic about what is a valid GLSL variable name,
+ * because any variable with an invalid name can't exist in the IR anyway.
+ */
+
+ this->location = -1;
+ this->orig_name = input;
+ this->lowered_builtin_array_variable = none;
+ this->skip_components = 0;
+ this->next_buffer_separator = false;
+ this->matched_candidate = NULL;
+ this->stream_id = 0;
+ this->buffer = 0;
+ this->offset = 0;
+
+ if (ctx->Extensions.ARB_transform_feedback3) {
+ /* Parse gl_NextBuffer. */
+ if (strcmp(input, "gl_NextBuffer") == 0) {
+ this->next_buffer_separator = true;
+ return;
+ }
+
+ /* Parse gl_SkipComponents. */
+ if (strcmp(input, "gl_SkipComponents1") == 0)
+ this->skip_components = 1;
+ else if (strcmp(input, "gl_SkipComponents2") == 0)
+ this->skip_components = 2;
+ else if (strcmp(input, "gl_SkipComponents3") == 0)
+ this->skip_components = 3;
+ else if (strcmp(input, "gl_SkipComponents4") == 0)
+ this->skip_components = 4;
+
+ if (this->skip_components)
+ return;
+ }
+
+ /* Parse a declaration. */
+ const char *base_name_end;
+ long subscript = parse_program_resource_name(input, &base_name_end);
+ this->var_name = ralloc_strndup(mem_ctx, input, base_name_end - input);
+ if (this->var_name == NULL) {
+ _mesa_error_no_memory(__func__);
+ return;
+ }
+
+ if (subscript >= 0) {
+ this->array_subscript = subscript;
+ this->is_subscripted = true;
+ } else {
+ this->is_subscripted = false;
+ }
+
+ /* For drivers that lower gl_ClipDistance to gl_ClipDistanceMESA, this
+ * class must behave specially to account for the fact that gl_ClipDistance
+ * is converted from a float[8] to a vec4[2].
+ */
+ if (ctx->Const.ShaderCompilerOptions[MESA_SHADER_VERTEX].LowerCombinedClipCullDistance &&
+ strcmp(this->var_name, "gl_ClipDistance") == 0) {
+ this->lowered_builtin_array_variable = clip_distance;
+ }
+ if (ctx->Const.ShaderCompilerOptions[MESA_SHADER_VERTEX].LowerCombinedClipCullDistance &&
+ strcmp(this->var_name, "gl_CullDistance") == 0) {
+ this->lowered_builtin_array_variable = cull_distance;
+ }
+
+ if (ctx->Const.LowerTessLevel &&
+ (strcmp(this->var_name, "gl_TessLevelOuter") == 0))
+ this->lowered_builtin_array_variable = tess_level_outer;
+ if (ctx->Const.LowerTessLevel &&
+ (strcmp(this->var_name, "gl_TessLevelInner") == 0))
+ this->lowered_builtin_array_variable = tess_level_inner;
+}
+
+
+/**
+ * Determine whether two tfeedback_decl objects refer to the same variable and
+ * array index (if applicable).
+ */
+bool
+tfeedback_decl::is_same(const tfeedback_decl &x, const tfeedback_decl &y)
+{
+ assert(x.is_varying() && y.is_varying());
+
+ if (strcmp(x.var_name, y.var_name) != 0)
+ return false;
+ if (x.is_subscripted != y.is_subscripted)
+ return false;
+ if (x.is_subscripted && x.array_subscript != y.array_subscript)
+ return false;
+ return true;
+}
+
+
+/**
+ * Assign a location and stream ID for this tfeedback_decl object based on the
+ * transform feedback candidate found by find_candidate.
+ *
+ * If an error occurs, the error is reported through linker_error() and false
+ * is returned.
+ */
+bool
+tfeedback_decl::assign_location(struct gl_context *ctx,
+ struct gl_shader_program *prog)
+{
+ assert(this->is_varying());
+
+ unsigned fine_location
+ = this->matched_candidate->toplevel_var->data.location * 4
+ + this->matched_candidate->toplevel_var->data.location_frac
+ + this->matched_candidate->offset;
+ const unsigned dmul =
+ this->matched_candidate->type->without_array()->is_64bit() ? 2 : 1;
+
+ if (this->matched_candidate->type->is_array()) {
+ /* Array variable */
+ const unsigned matrix_cols =
+ this->matched_candidate->type->fields.array->matrix_columns;
+ const unsigned vector_elements =
+ this->matched_candidate->type->fields.array->vector_elements;
+ unsigned actual_array_size;
+ switch (this->lowered_builtin_array_variable) {
+ case clip_distance:
+ actual_array_size = prog->last_vert_prog ?
+ prog->last_vert_prog->info.clip_distance_array_size : 0;
+ break;
+ case cull_distance:
+ actual_array_size = prog->last_vert_prog ?
+ prog->last_vert_prog->info.cull_distance_array_size : 0;
+ break;
+ case tess_level_outer:
+ actual_array_size = 4;
+ break;
+ case tess_level_inner:
+ actual_array_size = 2;
+ break;
+ case none:
+ default:
+ actual_array_size = this->matched_candidate->type->array_size();
+ break;
+ }
+
+ if (this->is_subscripted) {
+ /* Check array bounds. */
+ if (this->array_subscript >= actual_array_size) {
+ linker_error(prog, "Transform feedback varying %s has index "
+ "%i, but the array size is %u.",
+ this->orig_name, this->array_subscript,
+ actual_array_size);
+ return false;
+ }
+ unsigned array_elem_size = this->lowered_builtin_array_variable ?
+ 1 : vector_elements * matrix_cols * dmul;
+ fine_location += array_elem_size * this->array_subscript;
+ this->size = 1;
+ } else {
+ this->size = actual_array_size;
+ }
+ this->vector_elements = vector_elements;
+ this->matrix_columns = matrix_cols;
+ if (this->lowered_builtin_array_variable)
+ this->type = GL_FLOAT;
+ else
+ this->type = this->matched_candidate->type->fields.array->gl_type;
+ } else {
+ /* Regular variable (scalar, vector, or matrix) */
+ if (this->is_subscripted) {
+ linker_error(prog, "Transform feedback varying %s requested, "
+ "but %s is not an array.",
+ this->orig_name, this->var_name);
+ return false;
+ }
+ this->size = 1;
+ this->vector_elements = this->matched_candidate->type->vector_elements;
+ this->matrix_columns = this->matched_candidate->type->matrix_columns;
+ this->type = this->matched_candidate->type->gl_type;
+ }
+ this->location = fine_location / 4;
+ this->location_frac = fine_location % 4;
+
+ /* From GL_EXT_transform_feedback:
+ * A program will fail to link if:
+ *
+ * * the total number of components to capture in any varying
+ * variable in <varyings> is greater than the constant
+ * MAX_TRANSFORM_FEEDBACK_SEPARATE_COMPONENTS_EXT and the
+ * buffer mode is SEPARATE_ATTRIBS_EXT;
+ */
+ if (prog->TransformFeedback.BufferMode == GL_SEPARATE_ATTRIBS &&
+ this->num_components() >
+ ctx->Const.MaxTransformFeedbackSeparateComponents) {
+ linker_error(prog, "Transform feedback varying %s exceeds "
+ "MAX_TRANSFORM_FEEDBACK_SEPARATE_COMPONENTS.",
+ this->orig_name);
+ return false;
+ }
+
+ /* Only transform feedback varyings can be assigned to non-zero streams,
+ * so assign the stream id here.
+ */
+ this->stream_id = this->matched_candidate->toplevel_var->data.stream;
+
+ unsigned array_offset = this->array_subscript * 4 * dmul;
+ unsigned struct_offset = this->matched_candidate->offset * 4 * dmul;
+ this->buffer = this->matched_candidate->toplevel_var->data.xfb_buffer;
+ this->offset = this->matched_candidate->toplevel_var->data.offset +
+ array_offset + struct_offset;
+
+ return true;
+}
+
+
+unsigned
+tfeedback_decl::get_num_outputs() const
+{
+ if (!this->is_varying()) {
+ return 0;
+ }
+ return (this->num_components() + this->location_frac + 3)/4;
+}
+
+
+/**
+ * Update gl_transform_feedback_info to reflect this tfeedback_decl.
+ *
+ * If an error occurs, the error is reported through linker_error() and false
+ * is returned.
+ */
+bool
+tfeedback_decl::store(struct gl_context *ctx, struct gl_shader_program *prog,
+ struct gl_transform_feedback_info *info,
+ unsigned buffer, unsigned buffer_index,
+ const unsigned max_outputs,
+ BITSET_WORD *used_components[MAX_FEEDBACK_BUFFERS],
+ bool *explicit_stride, bool has_xfb_qualifiers,
+ const void* mem_ctx) const
+{
+ unsigned xfb_offset = 0;
+ unsigned size = this->size;
+ /* Handle gl_SkipComponents. */
+ if (this->skip_components) {
+ info->Buffers[buffer].Stride += this->skip_components;
+ size = this->skip_components;
+ goto store_varying;
+ }
+
+ if (this->next_buffer_separator) {
+ size = 0;
+ goto store_varying;
+ }
+
+ if (has_xfb_qualifiers) {
+ xfb_offset = this->offset / 4;
+ } else {
+ xfb_offset = info->Buffers[buffer].Stride;
+ }
+ info->Varyings[info->NumVarying].Offset = xfb_offset * 4;
+
+ {
+ unsigned location = this->location;
+ unsigned location_frac = this->location_frac;
+ unsigned num_components = this->num_components();
+
+ /* From GL_EXT_transform_feedback:
+ *
+ * " A program will fail to link if:
+ *
+ * * the total number of components to capture is greater than the
+ * constant MAX_TRANSFORM_FEEDBACK_INTERLEAVED_COMPONENTS_EXT
+ * and the buffer mode is INTERLEAVED_ATTRIBS_EXT."
+ *
+ * From GL_ARB_enhanced_layouts:
+ *
+ * " The resulting stride (implicit or explicit) must be less than or
+ * equal to the implementation-dependent constant
+ * gl_MaxTransformFeedbackInterleavedComponents."
+ */
+ if ((prog->TransformFeedback.BufferMode == GL_INTERLEAVED_ATTRIBS ||
+ has_xfb_qualifiers) &&
+ xfb_offset + num_components >
+ ctx->Const.MaxTransformFeedbackInterleavedComponents) {
+ linker_error(prog,
+ "The MAX_TRANSFORM_FEEDBACK_INTERLEAVED_COMPONENTS "
+ "limit has been exceeded.");
+ return false;
+ }
+
+ /* From the OpenGL 4.60.5 spec, section 4.4.2. Output Layout Qualifiers,
+ * Page 76, (Transform Feedback Layout Qualifiers):
+ *
+ * " No aliasing in output buffers is allowed: It is a compile-time or
+ * link-time error to specify variables with overlapping transform
+ * feedback offsets."
+ */
+ const unsigned max_components =
+ ctx->Const.MaxTransformFeedbackInterleavedComponents;
+ const unsigned first_component = xfb_offset;
+ const unsigned last_component = xfb_offset + num_components - 1;
+ const unsigned start_word = BITSET_BITWORD(first_component);
+ const unsigned end_word = BITSET_BITWORD(last_component);
+ BITSET_WORD *used;
+ assert(last_component < max_components);
+
+ if (!used_components[buffer]) {
+ used_components[buffer] =
+ rzalloc_array(mem_ctx, BITSET_WORD, BITSET_WORDS(max_components));
+ }
+ used = used_components[buffer];
+
+ for (unsigned word = start_word; word <= end_word; word++) {
+ unsigned start_range = 0;
+ unsigned end_range = BITSET_WORDBITS - 1;
+
+ if (word == start_word)
+ start_range = first_component % BITSET_WORDBITS;
+
+ if (word == end_word)
+ end_range = last_component % BITSET_WORDBITS;
+
+ if (used[word] & BITSET_RANGE(start_range, end_range)) {
+ linker_error(prog,
+ "variable '%s', xfb_offset (%d) is causing aliasing.",
+ this->orig_name, xfb_offset * 4);
+ return false;
+ }
+ used[word] |= BITSET_RANGE(start_range, end_range);
+ }
+
+ while (num_components > 0) {
+ unsigned output_size = MIN2(num_components, 4 - location_frac);
+ assert((info->NumOutputs == 0 && max_outputs == 0) ||
+ info->NumOutputs < max_outputs);
+
+ /* From the ARB_enhanced_layouts spec:
+ *
+ * "If such a block member or variable is not written during a shader
+ * invocation, the buffer contents at the assigned offset will be
+ * undefined. Even if there are no static writes to a variable or
+ * member that is assigned a transform feedback offset, the space is
+ * still allocated in the buffer and still affects the stride."
+ */
+ if (this->is_varying_written()) {
+ info->Outputs[info->NumOutputs].ComponentOffset = location_frac;
+ info->Outputs[info->NumOutputs].OutputRegister = location;
+ info->Outputs[info->NumOutputs].NumComponents = output_size;
+ info->Outputs[info->NumOutputs].StreamId = stream_id;
+ info->Outputs[info->NumOutputs].OutputBuffer = buffer;
+ info->Outputs[info->NumOutputs].DstOffset = xfb_offset;
+ ++info->NumOutputs;
+ }
+ info->Buffers[buffer].Stream = this->stream_id;
+ xfb_offset += output_size;
+
+ num_components -= output_size;
+ location++;
+ location_frac = 0;
+ }
+ }
+
+ if (explicit_stride && explicit_stride[buffer]) {
+ if (this->is_64bit() && info->Buffers[buffer].Stride % 2) {
+ linker_error(prog, "invalid qualifier xfb_stride=%d must be a "
+ "multiple of 8 as its applied to a type that is or "
+ "contains a double.",
+ info->Buffers[buffer].Stride * 4);
+ return false;
+ }
+
+ if (xfb_offset > info->Buffers[buffer].Stride) {
+ linker_error(prog, "xfb_offset (%d) overflows xfb_stride (%d) for "
+ "buffer (%d)", xfb_offset * 4,
+ info->Buffers[buffer].Stride * 4, buffer);
+ return false;
+ }
+ } else {
+ info->Buffers[buffer].Stride = xfb_offset;
+ }
+
+ store_varying:
+ info->Varyings[info->NumVarying].Name = ralloc_strdup(prog,
+ this->orig_name);
+ info->Varyings[info->NumVarying].Type = this->type;
+ info->Varyings[info->NumVarying].Size = size;
+ info->Varyings[info->NumVarying].BufferIndex = buffer_index;
+ info->NumVarying++;
+ info->Buffers[buffer].NumVaryings++;
+
+ return true;
+}
+
+
+const tfeedback_candidate *
+tfeedback_decl::find_candidate(gl_shader_program *prog,
+ hash_table *tfeedback_candidates)
+{
+ const char *name = this->var_name;
+ switch (this->lowered_builtin_array_variable) {
+ case none:
+ name = this->var_name;
+ break;
+ case clip_distance:
+ name = "gl_ClipDistanceMESA";
+ break;
+ case cull_distance:
+ name = "gl_CullDistanceMESA";
+ break;
+ case tess_level_outer:
+ name = "gl_TessLevelOuterMESA";
+ break;
+ case tess_level_inner:
+ name = "gl_TessLevelInnerMESA";
+ break;
+ }
+ hash_entry *entry = _mesa_hash_table_search(tfeedback_candidates, name);
+
+ this->matched_candidate = entry ?
+ (const tfeedback_candidate *) entry->data : NULL;
+
+ if (!this->matched_candidate) {
+ /* From GL_EXT_transform_feedback:
+ * A program will fail to link if:
+ *
+ * * any variable name specified in the <varyings> array is not
+ * declared as an output in the geometry shader (if present) or
+ * the vertex shader (if no geometry shader is present);
+ */
+ linker_error(prog, "Transform feedback varying %s undeclared.",
+ this->orig_name);
+ }
+
+ return this->matched_candidate;
+}
+
+/**
+ * Force a candidate over the previously matched one. It happens when a new
+ * varying needs to be created to match the xfb declaration, for example,
+ * to fullfil an alignment criteria.
+ */
+void
+tfeedback_decl::set_lowered_candidate(const tfeedback_candidate *candidate)
+{
+ this->matched_candidate = candidate;
+
+ /* The subscript part is no longer relevant */
+ this->is_subscripted = false;
+ this->array_subscript = 0;
+}
+
+
+/**
+ * Parse all the transform feedback declarations that were passed to
+ * glTransformFeedbackVaryings() and store them in tfeedback_decl objects.
+ *
+ * If an error occurs, the error is reported through linker_error() and false
+ * is returned.
+ */
+static bool
+parse_tfeedback_decls(struct gl_context *ctx, struct gl_shader_program *prog,
+ const void *mem_ctx, unsigned num_names,
+ char **varying_names, tfeedback_decl *decls)
+{
+ for (unsigned i = 0; i < num_names; ++i) {
+ decls[i].init(ctx, mem_ctx, varying_names[i]);
+
+ if (!decls[i].is_varying())
+ continue;
+
+ /* From GL_EXT_transform_feedback:
+ * A program will fail to link if:
+ *
+ * * any two entries in the <varyings> array specify the same varying
+ * variable;
+ *
+ * We interpret this to mean "any two entries in the <varyings> array
+ * specify the same varying variable and array index", since transform
+ * feedback of arrays would be useless otherwise.
+ */
+ for (unsigned j = 0; j < i; ++j) {
+ if (decls[j].is_varying()) {
+ if (tfeedback_decl::is_same(decls[i], decls[j])) {
+ linker_error(prog, "Transform feedback varying %s specified "
+ "more than once.", varying_names[i]);
+ return false;
+ }
+ }
+ }
+ }
+ return true;
+}
+
+
+static int
+cmp_xfb_offset(const void * x_generic, const void * y_generic)
+{
+ tfeedback_decl *x = (tfeedback_decl *) x_generic;
+ tfeedback_decl *y = (tfeedback_decl *) y_generic;
+
+ if (x->get_buffer() != y->get_buffer())
+ return x->get_buffer() - y->get_buffer();
+ return x->get_offset() - y->get_offset();
+}
+
+/**
+ * Store transform feedback location assignments into
+ * prog->sh.LinkedTransformFeedback based on the data stored in
+ * tfeedback_decls.
+ *
+ * If an error occurs, the error is reported through linker_error() and false
+ * is returned.
+ */
+static bool
+store_tfeedback_info(struct gl_context *ctx, struct gl_shader_program *prog,
+ unsigned num_tfeedback_decls,
+ tfeedback_decl *tfeedback_decls, bool has_xfb_qualifiers,
+ const void *mem_ctx)
+{
+ if (!prog->last_vert_prog)
+ return true;
+
+ /* Make sure MaxTransformFeedbackBuffers is less than 32 so the bitmask for
+ * tracking the number of buffers doesn't overflow.
+ */
+ assert(ctx->Const.MaxTransformFeedbackBuffers < 32);
+
+ bool separate_attribs_mode =
+ prog->TransformFeedback.BufferMode == GL_SEPARATE_ATTRIBS;
+
+ struct gl_program *xfb_prog = prog->last_vert_prog;
+ xfb_prog->sh.LinkedTransformFeedback =
+ rzalloc(xfb_prog, struct gl_transform_feedback_info);
+
+ /* The xfb_offset qualifier does not have to be used in increasing order
+ * however some drivers expect to receive the list of transform feedback
+ * declarations in order so sort it now for convenience.
+ */
+ if (has_xfb_qualifiers) {
+ qsort(tfeedback_decls, num_tfeedback_decls, sizeof(*tfeedback_decls),
+ cmp_xfb_offset);
+ }
+
+ xfb_prog->sh.LinkedTransformFeedback->Varyings =
+ rzalloc_array(xfb_prog, struct gl_transform_feedback_varying_info,
+ num_tfeedback_decls);
+
+ unsigned num_outputs = 0;
+ for (unsigned i = 0; i < num_tfeedback_decls; ++i) {
+ if (tfeedback_decls[i].is_varying_written())
+ num_outputs += tfeedback_decls[i].get_num_outputs();
+ }
+
+ xfb_prog->sh.LinkedTransformFeedback->Outputs =
+ rzalloc_array(xfb_prog, struct gl_transform_feedback_output,
+ num_outputs);
+
+ unsigned num_buffers = 0;
+ unsigned buffers = 0;
+ BITSET_WORD *used_components[MAX_FEEDBACK_BUFFERS] = {};
+
+ if (!has_xfb_qualifiers && separate_attribs_mode) {
+ /* GL_SEPARATE_ATTRIBS */
+ for (unsigned i = 0; i < num_tfeedback_decls; ++i) {
+ if (!tfeedback_decls[i].store(ctx, prog,
+ xfb_prog->sh.LinkedTransformFeedback,
+ num_buffers, num_buffers, num_outputs,
+ used_components, NULL,
+ has_xfb_qualifiers, mem_ctx))
+ return false;
+
+ buffers |= 1 << num_buffers;
+ num_buffers++;
+ }
+ }
+ else {
+ /* GL_INVERLEAVED_ATTRIBS */
+ int buffer_stream_id = -1;
+ unsigned buffer =
+ num_tfeedback_decls ? tfeedback_decls[0].get_buffer() : 0;
+ bool explicit_stride[MAX_FEEDBACK_BUFFERS] = { false };
+
+ /* Apply any xfb_stride global qualifiers */
+ if (has_xfb_qualifiers) {
+ for (unsigned j = 0; j < MAX_FEEDBACK_BUFFERS; j++) {
+ if (prog->TransformFeedback.BufferStride[j]) {
+ explicit_stride[j] = true;
+ xfb_prog->sh.LinkedTransformFeedback->Buffers[j].Stride =
+ prog->TransformFeedback.BufferStride[j] / 4;
+ }
+ }
+ }
+
+ for (unsigned i = 0; i < num_tfeedback_decls; ++i) {
+ if (has_xfb_qualifiers &&
+ buffer != tfeedback_decls[i].get_buffer()) {
+ /* we have moved to the next buffer so reset stream id */
+ buffer_stream_id = -1;
+ num_buffers++;
+ }
+
+ if (tfeedback_decls[i].is_next_buffer_separator()) {
+ if (!tfeedback_decls[i].store(ctx, prog,
+ xfb_prog->sh.LinkedTransformFeedback,
+ buffer, num_buffers, num_outputs,
+ used_components, explicit_stride,
+ has_xfb_qualifiers, mem_ctx))
+ return false;
+ num_buffers++;
+ buffer_stream_id = -1;
+ continue;
+ }
+
+ if (has_xfb_qualifiers) {
+ buffer = tfeedback_decls[i].get_buffer();
+ } else {
+ buffer = num_buffers;
+ }
+
+ if (tfeedback_decls[i].is_varying()) {
+ if (buffer_stream_id == -1) {
+ /* First varying writing to this buffer: remember its stream */
+ buffer_stream_id = (int) tfeedback_decls[i].get_stream_id();
+
+ /* Only mark a buffer as active when there is a varying
+ * attached to it. This behaviour is based on a revised version
+ * of section 13.2.2 of the GL 4.6 spec.
+ */
+ buffers |= 1 << buffer;
+ } else if (buffer_stream_id !=
+ (int) tfeedback_decls[i].get_stream_id()) {
+ /* Varying writes to the same buffer from a different stream */
+ linker_error(prog,
+ "Transform feedback can't capture varyings belonging "
+ "to different vertex streams in a single buffer. "
+ "Varying %s writes to buffer from stream %u, other "
+ "varyings in the same buffer write from stream %u.",
+ tfeedback_decls[i].name(),
+ tfeedback_decls[i].get_stream_id(),
+ buffer_stream_id);
+ return false;
+ }
+ }
+
+ if (!tfeedback_decls[i].store(ctx, prog,
+ xfb_prog->sh.LinkedTransformFeedback,
+ buffer, num_buffers, num_outputs,
+ used_components, explicit_stride,
+ has_xfb_qualifiers, mem_ctx))
+ return false;
+ }
+ }
+
+ assert(xfb_prog->sh.LinkedTransformFeedback->NumOutputs == num_outputs);
+
+ xfb_prog->sh.LinkedTransformFeedback->ActiveBuffers = buffers;
+ return true;
+}
+
+namespace {
+
+/**
+ * Data structure recording the relationship between outputs of one shader
+ * stage (the "producer") and inputs of another (the "consumer").
+ */
+class varying_matches
+{
+public:
+ varying_matches(bool disable_varying_packing,
+ bool disable_xfb_packing,
+ bool xfb_enabled,
+ bool enhanced_layouts_enabled,
+ gl_shader_stage producer_stage,
+ gl_shader_stage consumer_stage);
+ ~varying_matches();
+ void record(ir_variable *producer_var, ir_variable *consumer_var);
+ unsigned assign_locations(struct gl_shader_program *prog,
+ uint8_t components[],
+ uint64_t reserved_slots);
+ void store_locations() const;
+
+private:
+ bool is_varying_packing_safe(const glsl_type *type,
+ const ir_variable *var) const;
+
+ /**
+ * If true, this driver disables varying packing, so all varyings need to
+ * be aligned on slot boundaries, and take up a number of slots equal to
+ * their number of matrix columns times their array size.
+ *
+ * Packing may also be disabled because our current packing method is not
+ * safe in SSO or versions of OpenGL where interpolation qualifiers are not
+ * guaranteed to match across stages.
+ */
+ const bool disable_varying_packing;
+
+ /**
+ * If true, this driver disables packing for varyings used by transform
+ * feedback.
+ */
+ const bool disable_xfb_packing;
+
+ /**
+ * If true, this driver has transform feedback enabled. The transform
+ * feedback code usually requires at least some packing be done even
+ * when varying packing is disabled, fortunately where transform feedback
+ * requires packing it's safe to override the disabled setting. See
+ * is_varying_packing_safe().
+ */
+ const bool xfb_enabled;
+
+ const bool enhanced_layouts_enabled;
+
+ /**
+ * Enum representing the order in which varyings are packed within a
+ * packing class.
+ *
+ * Currently we pack vec4's first, then vec2's, then scalar values, then
+ * vec3's. This order ensures that the only vectors that are at risk of
+ * having to be "double parked" (split between two adjacent varying slots)
+ * are the vec3's.
+ */
+ enum packing_order_enum {
+ PACKING_ORDER_VEC4,
+ PACKING_ORDER_VEC2,
+ PACKING_ORDER_SCALAR,
+ PACKING_ORDER_VEC3,
+ };
+
+ static unsigned compute_packing_class(const ir_variable *var);
+ static packing_order_enum compute_packing_order(const ir_variable *var);
+ static int match_comparator(const void *x_generic, const void *y_generic);
+ static int xfb_comparator(const void *x_generic, const void *y_generic);
+ static int not_xfb_comparator(const void *x_generic, const void *y_generic);
+
+ /**
+ * Structure recording the relationship between a single producer output
+ * and a single consumer input.
+ */
+ struct match {
+ /**
+ * Packing class for this varying, computed by compute_packing_class().
+ */
+ unsigned packing_class;
+
+ /**
+ * Packing order for this varying, computed by compute_packing_order().
+ */
+ packing_order_enum packing_order;
+ unsigned num_components;
+
+ /**
+ * The output variable in the producer stage.
+ */
+ ir_variable *producer_var;
+
+ /**
+ * The input variable in the consumer stage.
+ */
+ ir_variable *consumer_var;
+
+ /**
+ * The location which has been assigned for this varying. This is
+ * expressed in multiples of a float, with the first generic varying
+ * (i.e. the one referred to by VARYING_SLOT_VAR0) represented by the
+ * value 0.
+ */
+ unsigned generic_location;
+ } *matches;
+
+ /**
+ * The number of elements in the \c matches array that are currently in
+ * use.
+ */
+ unsigned num_matches;
+
+ /**
+ * The number of elements that were set aside for the \c matches array when
+ * it was allocated.
+ */
+ unsigned matches_capacity;
+
+ gl_shader_stage producer_stage;
+ gl_shader_stage consumer_stage;
+};
+
+} /* anonymous namespace */
+
+varying_matches::varying_matches(bool disable_varying_packing,
+ bool disable_xfb_packing,
+ bool xfb_enabled,
+ bool enhanced_layouts_enabled,
+ gl_shader_stage producer_stage,
+ gl_shader_stage consumer_stage)
+ : disable_varying_packing(disable_varying_packing),
+ disable_xfb_packing(disable_xfb_packing),
+ xfb_enabled(xfb_enabled),
+ enhanced_layouts_enabled(enhanced_layouts_enabled),
+ producer_stage(producer_stage),
+ consumer_stage(consumer_stage)
+{
+ /* Note: this initial capacity is rather arbitrarily chosen to be large
+ * enough for many cases without wasting an unreasonable amount of space.
+ * varying_matches::record() will resize the array if there are more than
+ * this number of varyings.
+ */
+ this->matches_capacity = 8;
+ this->matches = (match *)
+ malloc(sizeof(*this->matches) * this->matches_capacity);
+ this->num_matches = 0;
+}
+
+
+varying_matches::~varying_matches()
+{
+ free(this->matches);
+}
+
+
+/**
+ * Packing is always safe on individual arrays, structures, and matrices. It
+ * is also safe if the varying is only used for transform feedback.
+ */
+bool
+varying_matches::is_varying_packing_safe(const glsl_type *type,
+ const ir_variable *var) const
+{
+ if (consumer_stage == MESA_SHADER_TESS_EVAL ||
+ consumer_stage == MESA_SHADER_TESS_CTRL ||
+ producer_stage == MESA_SHADER_TESS_CTRL)
+ return false;
+
+ return xfb_enabled && (type->is_array() || type->is_struct() ||
+ type->is_matrix() || var->data.is_xfb_only);
+}
+
+
+/**
+ * Record the given producer/consumer variable pair in the list of variables
+ * that should later be assigned locations.
+ *
+ * It is permissible for \c consumer_var to be NULL (this happens if a
+ * variable is output by the producer and consumed by transform feedback, but
+ * not consumed by the consumer).
+ *
+ * If \c producer_var has already been paired up with a consumer_var, or
+ * producer_var is part of fixed pipeline functionality (and hence already has
+ * a location assigned), this function has no effect.
+ *
+ * Note: as a side effect this function may change the interpolation type of
+ * \c producer_var, but only when the change couldn't possibly affect
+ * rendering.
+ */
+void
+varying_matches::record(ir_variable *producer_var, ir_variable *consumer_var)
+{
+ assert(producer_var != NULL || consumer_var != NULL);
+
+ if ((producer_var && (!producer_var->data.is_unmatched_generic_inout ||
+ producer_var->data.explicit_location)) ||
+ (consumer_var && (!consumer_var->data.is_unmatched_generic_inout ||
+ consumer_var->data.explicit_location))) {
+ /* Either a location already exists for this variable (since it is part
+ * of fixed functionality), or it has already been recorded as part of a
+ * previous match.
+ */
+ return;
+ }
+
+ bool needs_flat_qualifier = consumer_var == NULL &&
+ (producer_var->type->contains_integer() ||
+ producer_var->type->contains_double());
+
+ if (!disable_varying_packing &&
+ (!disable_xfb_packing || producer_var == NULL || !producer_var->data.is_xfb) &&
+ (needs_flat_qualifier ||
+ (consumer_stage != MESA_SHADER_NONE && consumer_stage != MESA_SHADER_FRAGMENT))) {
+ /* Since this varying is not being consumed by the fragment shader, its
+ * interpolation type varying cannot possibly affect rendering.
+ * Also, this variable is non-flat and is (or contains) an integer
+ * or a double.
+ * If the consumer stage is unknown, don't modify the interpolation
+ * type as it could affect rendering later with separate shaders.
+ *
+ * lower_packed_varyings requires all integer varyings to flat,
+ * regardless of where they appear. We can trivially satisfy that
+ * requirement by changing the interpolation type to flat here.
+ */
+ if (producer_var) {
+ producer_var->data.centroid = false;
+ producer_var->data.sample = false;
+ producer_var->data.interpolation = INTERP_MODE_FLAT;
+ }
+
+ if (consumer_var) {
+ consumer_var->data.centroid = false;
+ consumer_var->data.sample = false;
+ consumer_var->data.interpolation = INTERP_MODE_FLAT;
+ }
+ }
+
+ if (this->num_matches == this->matches_capacity) {
+ this->matches_capacity *= 2;
+ this->matches = (match *)
+ realloc(this->matches,
+ sizeof(*this->matches) * this->matches_capacity);
+ }
+
+ /* We must use the consumer to compute the packing class because in GL4.4+
+ * there is no guarantee interpolation qualifiers will match across stages.
+ *
+ * From Section 4.5 (Interpolation Qualifiers) of the GLSL 4.30 spec:
+ *
+ * "The type and presence of interpolation qualifiers of variables with
+ * the same name declared in all linked shaders for the same cross-stage
+ * interface must match, otherwise the link command will fail.
+ *
+ * When comparing an output from one stage to an input of a subsequent
+ * stage, the input and output don't match if their interpolation
+ * qualifiers (or lack thereof) are not the same."
+ *
+ * This text was also in at least revison 7 of the 4.40 spec but is no
+ * longer in revision 9 and not in the 4.50 spec.
+ */
+ const ir_variable *const var = (consumer_var != NULL)
+ ? consumer_var : producer_var;
+ const gl_shader_stage stage = (consumer_var != NULL)
+ ? consumer_stage : producer_stage;
+ const glsl_type *type = get_varying_type(var, stage);
+
+ if (producer_var && consumer_var &&
+ consumer_var->data.must_be_shader_input) {
+ producer_var->data.must_be_shader_input = 1;
+ }
+
+ this->matches[this->num_matches].packing_class
+ = this->compute_packing_class(var);
+ this->matches[this->num_matches].packing_order
+ = this->compute_packing_order(var);
+ if ((this->disable_varying_packing && !is_varying_packing_safe(type, var)) ||
+ (this->disable_xfb_packing && var->data.is_xfb) ||
+ var->data.must_be_shader_input) {
+ unsigned slots = type->count_attribute_slots(false);
+ this->matches[this->num_matches].num_components = slots * 4;
+ } else {
+ this->matches[this->num_matches].num_components
+ = type->component_slots();
+ }
+
+ this->matches[this->num_matches].producer_var = producer_var;
+ this->matches[this->num_matches].consumer_var = consumer_var;
+ this->num_matches++;
+ if (producer_var)
+ producer_var->data.is_unmatched_generic_inout = 0;
+ if (consumer_var)
+ consumer_var->data.is_unmatched_generic_inout = 0;
+}
+
+
+/**
+ * Choose locations for all of the variable matches that were previously
+ * passed to varying_matches::record().
+ * \param components returns array[slot] of number of components used
+ * per slot (1, 2, 3 or 4)
+ * \param reserved_slots bitmask indicating which varying slots are already
+ * allocated
+ * \return number of slots (4-element vectors) allocated
+ */
+unsigned
+varying_matches::assign_locations(struct gl_shader_program *prog,
+ uint8_t components[],
+ uint64_t reserved_slots)
+{
+ /* If packing has been disabled then we cannot safely sort the varyings by
+ * class as it may mean we are using a version of OpenGL where
+ * interpolation qualifiers are not guaranteed to be matching across
+ * shaders, sorting in this case could result in mismatching shader
+ * interfaces.
+ * When packing is disabled the sort orders varyings used by transform
+ * feedback first, but also depends on *undefined behaviour* of qsort to
+ * reverse the order of the varyings. See: xfb_comparator().
+ *
+ * If packing is only disabled for xfb varyings (mutually exclusive with
+ * disable_varying_packing), we then group varyings depending on if they
+ * are captured for transform feedback. The same *undefined behaviour* is
+ * taken advantage of.
+ */
+ if (this->disable_varying_packing) {
+ /* Only sort varyings that are only used by transform feedback. */
+ qsort(this->matches, this->num_matches, sizeof(*this->matches),
+ &varying_matches::xfb_comparator);
+ } else if (this->disable_xfb_packing) {
+ /* Only sort varyings that are NOT used by transform feedback. */
+ qsort(this->matches, this->num_matches, sizeof(*this->matches),
+ &varying_matches::not_xfb_comparator);
+ } else {
+ /* Sort varying matches into an order that makes them easy to pack. */
+ qsort(this->matches, this->num_matches, sizeof(*this->matches),
+ &varying_matches::match_comparator);
+ }
+
+ unsigned generic_location = 0;
+ unsigned generic_patch_location = MAX_VARYING*4;
+ bool previous_var_xfb = false;
+ bool previous_var_xfb_only = false;
+ unsigned previous_packing_class = ~0u;
+
+ /* For tranform feedback separate mode, we know the number of attributes
+ * is <= the number of buffers. So packing isn't critical. In fact,
+ * packing vec3 attributes can cause trouble because splitting a vec3
+ * effectively creates an additional transform feedback output. The
+ * extra TFB output may exceed device driver limits.
+ */
+ const bool dont_pack_vec3 =
+ (prog->TransformFeedback.BufferMode == GL_SEPARATE_ATTRIBS &&
+ prog->TransformFeedback.NumVarying > 0);
+
+ for (unsigned i = 0; i < this->num_matches; i++) {
+ unsigned *location = &generic_location;
+ const ir_variable *var;
+ const glsl_type *type;
+ bool is_vertex_input = false;
+
+ if (matches[i].consumer_var) {
+ var = matches[i].consumer_var;
+ type = get_varying_type(var, consumer_stage);
+ if (consumer_stage == MESA_SHADER_VERTEX)
+ is_vertex_input = true;
+ } else {
+ var = matches[i].producer_var;
+ type = get_varying_type(var, producer_stage);
+ }
+
+ if (var->data.patch)
+ location = &generic_patch_location;
+
+ /* Advance to the next slot if this varying has a different packing
+ * class than the previous one, and we're not already on a slot
+ * boundary.
+ *
+ * Also advance if varying packing is disabled for transform feedback,
+ * and previous or current varying is used for transform feedback.
+ *
+ * Also advance to the next slot if packing is disabled. This makes sure
+ * we don't assign varyings the same locations which is possible
+ * because we still pack individual arrays, records and matrices even
+ * when packing is disabled. Note we don't advance to the next slot if
+ * we can pack varyings together that are only used for transform
+ * feedback.
+ */
+ if (var->data.must_be_shader_input ||
+ (this->disable_xfb_packing &&
+ (previous_var_xfb || var->data.is_xfb)) ||
+ (this->disable_varying_packing &&
+ !(previous_var_xfb_only && var->data.is_xfb_only)) ||
+ (previous_packing_class != this->matches[i].packing_class) ||
+ (this->matches[i].packing_order == PACKING_ORDER_VEC3 &&
+ dont_pack_vec3)) {
+ *location = ALIGN(*location, 4);
+ }
+
+ previous_var_xfb = var->data.is_xfb;
+ previous_var_xfb_only = var->data.is_xfb_only;
+ previous_packing_class = this->matches[i].packing_class;
+
+ /* The number of components taken up by this variable. For vertex shader
+ * inputs, we use the number of slots * 4, as they have different
+ * counting rules.
+ */
+ unsigned num_components = is_vertex_input ?
+ type->count_attribute_slots(is_vertex_input) * 4 :
+ this->matches[i].num_components;
+
+ /* The last slot for this variable, inclusive. */
+ unsigned slot_end = *location + num_components - 1;
+
+ /* FIXME: We could be smarter in the below code and loop back over
+ * trying to fill any locations that we skipped because we couldn't pack
+ * the varying between an explicit location. For now just let the user
+ * hit the linking error if we run out of room and suggest they use
+ * explicit locations.
+ */
+ while (slot_end < MAX_VARYING * 4u) {
+ const unsigned slots = (slot_end / 4u) - (*location / 4u) + 1;
+ const uint64_t slot_mask = ((1ull << slots) - 1) << (*location / 4u);
+
+ assert(slots > 0);
+
+ if ((reserved_slots & slot_mask) == 0) {
+ break;
+ }
+
+ *location = ALIGN(*location + 1, 4);
+ slot_end = *location + num_components - 1;
+ }
+
+ if (!var->data.patch && slot_end >= MAX_VARYING * 4u) {
+ linker_error(prog, "insufficient contiguous locations available for "
+ "%s it is possible an array or struct could not be "
+ "packed between varyings with explicit locations. Try "
+ "using an explicit location for arrays and structs.",
+ var->name);
+ }
+
+ if (slot_end < MAX_VARYINGS_INCL_PATCH * 4u) {
+ for (unsigned j = *location / 4u; j < slot_end / 4u; j++)
+ components[j] = 4;
+ components[slot_end / 4u] = (slot_end & 3) + 1;
+ }
+
+ this->matches[i].generic_location = *location;
+
+ *location = slot_end + 1;
+ }
+
+ return (generic_location + 3) / 4;
+}
+
+
+/**
+ * Update the producer and consumer shaders to reflect the locations
+ * assignments that were made by varying_matches::assign_locations().
+ */
+void
+varying_matches::store_locations() const
+{
+ /* Check is location needs to be packed with lower_packed_varyings() or if
+ * we can just use ARB_enhanced_layouts packing.
+ */
+ bool pack_loc[MAX_VARYINGS_INCL_PATCH] = { 0 };
+ const glsl_type *loc_type[MAX_VARYINGS_INCL_PATCH][4] = { {NULL, NULL} };
+
+ for (unsigned i = 0; i < this->num_matches; i++) {
+ ir_variable *producer_var = this->matches[i].producer_var;
+ ir_variable *consumer_var = this->matches[i].consumer_var;
+ unsigned generic_location = this->matches[i].generic_location;
+ unsigned slot = generic_location / 4;
+ unsigned offset = generic_location % 4;
+
+ if (producer_var) {
+ producer_var->data.location = VARYING_SLOT_VAR0 + slot;
+ producer_var->data.location_frac = offset;
+ }
+
+ if (consumer_var) {
+ assert(consumer_var->data.location == -1);
+ consumer_var->data.location = VARYING_SLOT_VAR0 + slot;
+ consumer_var->data.location_frac = offset;
+ }
+
+ /* Find locations suitable for native packing via
+ * ARB_enhanced_layouts.
+ */
+ if (producer_var && consumer_var) {
+ if (enhanced_layouts_enabled) {
+ const glsl_type *type =
+ get_varying_type(producer_var, producer_stage);
+ if (type->is_array() || type->is_matrix() || type->is_struct() ||
+ type->is_64bit()) {
+ unsigned comp_slots = type->component_slots() + offset;
+ unsigned slots = comp_slots / 4;
+ if (comp_slots % 4)
+ slots += 1;
+
+ for (unsigned j = 0; j < slots; j++) {
+ pack_loc[slot + j] = true;
+ }
+ } else if (offset + type->vector_elements > 4) {
+ pack_loc[slot] = true;
+ pack_loc[slot + 1] = true;
+ } else {
+ loc_type[slot][offset] = type;
+ }
+ }
+ }
+ }
+
+ /* Attempt to use ARB_enhanced_layouts for more efficient packing if
+ * suitable.
+ */
+ if (enhanced_layouts_enabled) {
+ for (unsigned i = 0; i < this->num_matches; i++) {
+ ir_variable *producer_var = this->matches[i].producer_var;
+ ir_variable *consumer_var = this->matches[i].consumer_var;
+ unsigned generic_location = this->matches[i].generic_location;
+ unsigned slot = generic_location / 4;
+
+ if (pack_loc[slot] || !producer_var || !consumer_var)
+ continue;
+
+ const glsl_type *type =
+ get_varying_type(producer_var, producer_stage);
+ bool type_match = true;
+ for (unsigned j = 0; j < 4; j++) {
+ if (loc_type[slot][j]) {
+ if (type->base_type != loc_type[slot][j]->base_type)
+ type_match = false;
+ }
+ }
+
+ if (type_match) {
+ producer_var->data.explicit_location = 1;
+ consumer_var->data.explicit_location = 1;
+ producer_var->data.explicit_component = 1;
+ consumer_var->data.explicit_component = 1;
+ }
+ }
+ }
+}
+
+
+/**
+ * Compute the "packing class" of the given varying. This is an unsigned
+ * integer with the property that two variables in the same packing class can
+ * be safely backed into the same vec4.
+ */
+unsigned
+varying_matches::compute_packing_class(const ir_variable *var)
+{
+ /* Without help from the back-end, there is no way to pack together
+ * variables with different interpolation types, because
+ * lower_packed_varyings must choose exactly one interpolation type for
+ * each packed varying it creates.
+ *
+ * However, we can safely pack together floats, ints, and uints, because:
+ *
+ * - varyings of base type "int" and "uint" must use the "flat"
+ * interpolation type, which can only occur in GLSL 1.30 and above.
+ *
+ * - On platforms that support GLSL 1.30 and above, lower_packed_varyings
+ * can store flat floats as ints without losing any information (using
+ * the ir_unop_bitcast_* opcodes).
+ *
+ * Therefore, the packing class depends only on the interpolation type.
+ */
+ const unsigned interp = var->is_interpolation_flat()
+ ? unsigned(INTERP_MODE_FLAT) : var->data.interpolation;
+
+ assert(interp < (1 << 3));
+
+ const unsigned packing_class = (interp << 0) |
+ (var->data.centroid << 3) |
+ (var->data.sample << 4) |
+ (var->data.patch << 5) |
+ (var->data.must_be_shader_input << 6);
+
+ return packing_class;
+}
+
+
+/**
+ * Compute the "packing order" of the given varying. This is a sort key we
+ * use to determine when to attempt to pack the given varying relative to
+ * other varyings in the same packing class.
+ */
+varying_matches::packing_order_enum
+varying_matches::compute_packing_order(const ir_variable *var)
+{
+ const glsl_type *element_type = var->type;
+
+ while (element_type->is_array()) {
+ element_type = element_type->fields.array;
+ }
+
+ switch (element_type->component_slots() % 4) {
+ case 1: return PACKING_ORDER_SCALAR;
+ case 2: return PACKING_ORDER_VEC2;
+ case 3: return PACKING_ORDER_VEC3;
+ case 0: return PACKING_ORDER_VEC4;
+ default:
+ assert(!"Unexpected value of vector_elements");
+ return PACKING_ORDER_VEC4;
+ }
+}
+
+
+/**
+ * Comparison function passed to qsort() to sort varyings by packing_class and
+ * then by packing_order.
+ */
+int
+varying_matches::match_comparator(const void *x_generic, const void *y_generic)
+{
+ const match *x = (const match *) x_generic;
+ const match *y = (const match *) y_generic;
+
+ if (x->packing_class != y->packing_class)
+ return x->packing_class - y->packing_class;
+ return x->packing_order - y->packing_order;
+}
+
+
+/**
+ * Comparison function passed to qsort() to sort varyings used only by
+ * transform feedback when packing of other varyings is disabled.
+ */
+int
+varying_matches::xfb_comparator(const void *x_generic, const void *y_generic)
+{
+ const match *x = (const match *) x_generic;
+
+ if (x->producer_var != NULL && x->producer_var->data.is_xfb_only)
+ return match_comparator(x_generic, y_generic);
+
+ /* FIXME: When the comparator returns 0 it means the elements being
+ * compared are equivalent. However the qsort documentation says:
+ *
+ * "The order of equivalent elements is undefined."
+ *
+ * In practice the sort ends up reversing the order of the varyings which
+ * means locations are also assigned in this reversed order and happens to
+ * be what we want. This is also whats happening in
+ * varying_matches::match_comparator().
+ */
+ return 0;
+}
+
+
+/**
+ * Comparison function passed to qsort() to sort varyings NOT used by
+ * transform feedback when packing of xfb varyings is disabled.
+ */
+int
+varying_matches::not_xfb_comparator(const void *x_generic, const void *y_generic)
+{
+ const match *x = (const match *) x_generic;
+
+ if (x->producer_var != NULL && !x->producer_var->data.is_xfb)
+ return match_comparator(x_generic, y_generic);
+
+ /* FIXME: When the comparator returns 0 it means the elements being
+ * compared are equivalent. However the qsort documentation says:
+ *
+ * "The order of equivalent elements is undefined."
+ *
+ * In practice the sort ends up reversing the order of the varyings which
+ * means locations are also assigned in this reversed order and happens to
+ * be what we want. This is also whats happening in
+ * varying_matches::match_comparator().
+ */
+ return 0;
+}
+
+
+/**
+ * Is the given variable a varying variable to be counted against the
+ * limit in ctx->Const.MaxVarying?
+ * This includes variables such as texcoords, colors and generic
+ * varyings, but excludes variables such as gl_FrontFacing and gl_FragCoord.
+ */
+static bool
+var_counts_against_varying_limit(gl_shader_stage stage, const ir_variable *var)
+{
+ /* Only fragment shaders will take a varying variable as an input */
+ if (stage == MESA_SHADER_FRAGMENT &&
+ var->data.mode == ir_var_shader_in) {
+ switch (var->data.location) {
+ case VARYING_SLOT_POS:
+ case VARYING_SLOT_FACE:
+ case VARYING_SLOT_PNTC:
+ return false;
+ default:
+ return true;
+ }
+ }
+ return false;
+}
+
+
+/**
+ * Visitor class that generates tfeedback_candidate structs describing all
+ * possible targets of transform feedback.
+ *
+ * tfeedback_candidate structs are stored in the hash table
+ * tfeedback_candidates, which is passed to the constructor. This hash table
+ * maps varying names to instances of the tfeedback_candidate struct.
+ */
+class tfeedback_candidate_generator : public program_resource_visitor
+{
+public:
+ tfeedback_candidate_generator(void *mem_ctx,
+ hash_table *tfeedback_candidates,
+ gl_shader_stage stage)
+ : mem_ctx(mem_ctx),
+ tfeedback_candidates(tfeedback_candidates),
+ stage(stage),
+ toplevel_var(NULL),
+ varying_floats(0)
+ {
+ }
+
+ void process(ir_variable *var)
+ {
+ /* All named varying interface blocks should be flattened by now */
+ assert(!var->is_interface_instance());
+ assert(var->data.mode == ir_var_shader_out);
+
+ this->toplevel_var = var;
+ this->varying_floats = 0;
+ const glsl_type *t =
+ var->data.from_named_ifc_block ? var->get_interface_type() : var->type;
+ if (!var->data.patch && stage == MESA_SHADER_TESS_CTRL) {
+ assert(t->is_array());
+ t = t->fields.array;
+ }
+ program_resource_visitor::process(var, t, false);
+ }
+
+private:
+ virtual void visit_field(const glsl_type *type, const char *name,
+ bool /* row_major */,
+ const glsl_type * /* record_type */,
+ const enum glsl_interface_packing,
+ bool /* last_field */)
+ {
+ assert(!type->without_array()->is_struct());
+ assert(!type->without_array()->is_interface());
+
+ tfeedback_candidate *candidate
+ = rzalloc(this->mem_ctx, tfeedback_candidate);
+ candidate->toplevel_var = this->toplevel_var;
+ candidate->type = type;
+ candidate->offset = this->varying_floats;
+ _mesa_hash_table_insert(this->tfeedback_candidates,
+ ralloc_strdup(this->mem_ctx, name),
+ candidate);
+ this->varying_floats += type->component_slots();
+ }
+
+ /**
+ * Memory context used to allocate hash table keys and values.
+ */
+ void * const mem_ctx;
+
+ /**
+ * Hash table in which tfeedback_candidate objects should be stored.
+ */
+ hash_table * const tfeedback_candidates;
+
+ gl_shader_stage stage;
+
+ /**
+ * Pointer to the toplevel variable that is being traversed.
+ */
+ ir_variable *toplevel_var;
+
+ /**
+ * Total number of varying floats that have been visited so far. This is
+ * used to determine the offset to each varying within the toplevel
+ * variable.
+ */
+ unsigned varying_floats;
+};
+
+
+namespace linker {
+
+void
+populate_consumer_input_sets(void *mem_ctx, exec_list *ir,
+ hash_table *consumer_inputs,
+ hash_table *consumer_interface_inputs,
+ ir_variable *consumer_inputs_with_locations[VARYING_SLOT_TESS_MAX])
+{
+ memset(consumer_inputs_with_locations,
+ 0,
+ sizeof(consumer_inputs_with_locations[0]) * VARYING_SLOT_TESS_MAX);
+
+ foreach_in_list(ir_instruction, node, ir) {
+ ir_variable *const input_var = node->as_variable();
+
+ if (input_var != NULL && input_var->data.mode == ir_var_shader_in) {
+ /* All interface blocks should have been lowered by this point */
+ assert(!input_var->type->is_interface());
+
+ if (input_var->data.explicit_location) {
+ /* assign_varying_locations only cares about finding the
+ * ir_variable at the start of a contiguous location block.
+ *
+ * - For !producer, consumer_inputs_with_locations isn't used.
+ *
+ * - For !consumer, consumer_inputs_with_locations is empty.
+ *
+ * For consumer && producer, if you were trying to set some
+ * ir_variable to the middle of a location block on the other side
+ * of producer/consumer, cross_validate_outputs_to_inputs() should
+ * be link-erroring due to either type mismatch or location
+ * overlaps. If the variables do match up, then they've got a
+ * matching data.location and you only looked at
+ * consumer_inputs_with_locations[var->data.location], not any
+ * following entries for the array/structure.
+ */
+ consumer_inputs_with_locations[input_var->data.location] =
+ input_var;
+ } else if (input_var->get_interface_type() != NULL) {
+ char *const iface_field_name =
+ ralloc_asprintf(mem_ctx, "%s.%s",
+ input_var->get_interface_type()->without_array()->name,
+ input_var->name);
+ _mesa_hash_table_insert(consumer_interface_inputs,
+ iface_field_name, input_var);
+ } else {
+ _mesa_hash_table_insert(consumer_inputs,
+ ralloc_strdup(mem_ctx, input_var->name),
+ input_var);
+ }
+ }
+ }
+}
+
+/**
+ * Find a variable from the consumer that "matches" the specified variable
+ *
+ * This function only finds inputs with names that match. There is no
+ * validation (here) that the types, etc. are compatible.
+ */
+ir_variable *
+get_matching_input(void *mem_ctx,
+ const ir_variable *output_var,
+ hash_table *consumer_inputs,
+ hash_table *consumer_interface_inputs,
+ ir_variable *consumer_inputs_with_locations[VARYING_SLOT_TESS_MAX])
+{
+ ir_variable *input_var;
+
+ if (output_var->data.explicit_location) {
+ input_var = consumer_inputs_with_locations[output_var->data.location];
+ } else if (output_var->get_interface_type() != NULL) {
+ char *const iface_field_name =
+ ralloc_asprintf(mem_ctx, "%s.%s",
+ output_var->get_interface_type()->without_array()->name,
+ output_var->name);
+ hash_entry *entry = _mesa_hash_table_search(consumer_interface_inputs, iface_field_name);
+ input_var = entry ? (ir_variable *) entry->data : NULL;
+ } else {
+ hash_entry *entry = _mesa_hash_table_search(consumer_inputs, output_var->name);
+ input_var = entry ? (ir_variable *) entry->data : NULL;
+ }
+
+ return (input_var == NULL || input_var->data.mode != ir_var_shader_in)
+ ? NULL : input_var;
+}
+
+}
+
+static int
+io_variable_cmp(const void *_a, const void *_b)
+{
+ const ir_variable *const a = *(const ir_variable **) _a;
+ const ir_variable *const b = *(const ir_variable **) _b;
+
+ if (a->data.explicit_location && b->data.explicit_location)
+ return b->data.location - a->data.location;
+
+ if (a->data.explicit_location && !b->data.explicit_location)
+ return 1;
+
+ if (!a->data.explicit_location && b->data.explicit_location)
+ return -1;
+
+ return -strcmp(a->name, b->name);
+}
+
+/**
+ * Sort the shader IO variables into canonical order
+ */
+static void
+canonicalize_shader_io(exec_list *ir, enum ir_variable_mode io_mode)
+{
+ ir_variable *var_table[MAX_PROGRAM_OUTPUTS * 4];
+ unsigned num_variables = 0;
+
+ foreach_in_list(ir_instruction, node, ir) {
+ ir_variable *const var = node->as_variable();
+
+ if (var == NULL || var->data.mode != io_mode)
+ continue;
+
+ /* If we have already encountered more I/O variables that could
+ * successfully link, bail.
+ */
+ if (num_variables == ARRAY_SIZE(var_table))
+ return;
+
+ var_table[num_variables++] = var;
+ }
+
+ if (num_variables == 0)
+ return;
+
+ /* Sort the list in reverse order (io_variable_cmp handles this). Later
+ * we're going to push the variables on to the IR list as a stack, so we
+ * want the last variable (in canonical order) to be first in the list.
+ */
+ qsort(var_table, num_variables, sizeof(var_table[0]), io_variable_cmp);
+
+ /* Remove the variable from it's current location in the IR, and put it at
+ * the front.
+ */
+ for (unsigned i = 0; i < num_variables; i++) {
+ var_table[i]->remove();
+ ir->push_head(var_table[i]);
+ }
+}
+
+/**
+ * Generate a bitfield map of the explicit locations for shader varyings.
+ *
+ * Note: For Tessellation shaders we are sitting right on the limits of the
+ * 64 bit map. Per-vertex and per-patch both have separate location domains
+ * with a max of MAX_VARYING.
+ */
+static uint64_t
+reserved_varying_slot(struct gl_linked_shader *stage,
+ ir_variable_mode io_mode)
+{
+ assert(io_mode == ir_var_shader_in || io_mode == ir_var_shader_out);
+ /* Avoid an overflow of the returned value */
+ assert(MAX_VARYINGS_INCL_PATCH <= 64);
+
+ uint64_t slots = 0;
+ int var_slot;
+
+ if (!stage)
+ return slots;
+
+ foreach_in_list(ir_instruction, node, stage->ir) {
+ ir_variable *const var = node->as_variable();
+
+ if (var == NULL || var->data.mode != io_mode ||
+ !var->data.explicit_location ||
+ var->data.location < VARYING_SLOT_VAR0)
+ continue;
+
+ var_slot = var->data.location - VARYING_SLOT_VAR0;
+
+ unsigned num_elements = get_varying_type(var, stage->Stage)
+ ->count_attribute_slots(io_mode == ir_var_shader_in &&
+ stage->Stage == MESA_SHADER_VERTEX);
+ for (unsigned i = 0; i < num_elements; i++) {
+ if (var_slot >= 0 && var_slot < MAX_VARYINGS_INCL_PATCH)
+ slots |= UINT64_C(1) << var_slot;
+ var_slot += 1;
+ }
+ }
+
+ return slots;
+}
+
+
+/**
+ * Assign locations for all variables that are produced in one pipeline stage
+ * (the "producer") and consumed in the next stage (the "consumer").
+ *
+ * Variables produced by the producer may also be consumed by transform
+ * feedback.
+ *
+ * \param num_tfeedback_decls is the number of declarations indicating
+ * variables that may be consumed by transform feedback.
+ *
+ * \param tfeedback_decls is a pointer to an array of tfeedback_decl objects
+ * representing the result of parsing the strings passed to
+ * glTransformFeedbackVaryings(). assign_location() will be called for
+ * each of these objects that matches one of the outputs of the
+ * producer.
+ *
+ * When num_tfeedback_decls is nonzero, it is permissible for the consumer to
+ * be NULL. In this case, varying locations are assigned solely based on the
+ * requirements of transform feedback.
+ */
+static bool
+assign_varying_locations(struct gl_context *ctx,
+ void *mem_ctx,
+ struct gl_shader_program *prog,
+ gl_linked_shader *producer,
+ gl_linked_shader *consumer,
+ unsigned num_tfeedback_decls,
+ tfeedback_decl *tfeedback_decls,
+ const uint64_t reserved_slots)
+{
+ /* Tessellation shaders treat inputs and outputs as shared memory and can
+ * access inputs and outputs of other invocations.
+ * Therefore, they can't be lowered to temps easily (and definitely not
+ * efficiently).
+ */
+ bool unpackable_tess =
+ (consumer && consumer->Stage == MESA_SHADER_TESS_EVAL) ||
+ (consumer && consumer->Stage == MESA_SHADER_TESS_CTRL) ||
+ (producer && producer->Stage == MESA_SHADER_TESS_CTRL);
+
+ /* Transform feedback code assumes varying arrays are packed, so if the
+ * driver has disabled varying packing, make sure to at least enable
+ * packing required by transform feedback. See below for exception.
+ */
+ bool xfb_enabled =
+ ctx->Extensions.EXT_transform_feedback && !unpackable_tess;
+
+ /* Some drivers actually requires packing to be explicitly disabled
+ * for varyings used by transform feedback.
+ */
+ bool disable_xfb_packing =
+ ctx->Const.DisableTransformFeedbackPacking;
+
+ /* Disable packing on outward facing interfaces for SSO because in ES we
+ * need to retain the unpacked varying information for draw time
+ * validation.
+ *
+ * Packing is still enabled on individual arrays, structs, and matrices as
+ * these are required by the transform feedback code and it is still safe
+ * to do so. We also enable packing when a varying is only used for
+ * transform feedback and its not a SSO.
+ */
+ bool disable_varying_packing =
+ ctx->Const.DisableVaryingPacking || unpackable_tess;
+ if (prog->SeparateShader && (producer == NULL || consumer == NULL))
+ disable_varying_packing = true;
+
+ varying_matches matches(disable_varying_packing,
+ disable_xfb_packing,
+ xfb_enabled,
+ ctx->Extensions.ARB_enhanced_layouts,
+ producer ? producer->Stage : MESA_SHADER_NONE,
+ consumer ? consumer->Stage : MESA_SHADER_NONE);
+ void *hash_table_ctx = ralloc_context(NULL);
+ hash_table *tfeedback_candidates =
+ _mesa_hash_table_create(hash_table_ctx, _mesa_hash_string,
+ _mesa_key_string_equal);
+ hash_table *consumer_inputs =
+ _mesa_hash_table_create(hash_table_ctx, _mesa_hash_string,
+ _mesa_key_string_equal);
+ hash_table *consumer_interface_inputs =
+ _mesa_hash_table_create(hash_table_ctx, _mesa_hash_string,
+ _mesa_key_string_equal);
+ ir_variable *consumer_inputs_with_locations[VARYING_SLOT_TESS_MAX] = {
+ NULL,
+ };
+
+ unsigned consumer_vertices = 0;
+ if (consumer && consumer->Stage == MESA_SHADER_GEOMETRY)
+ consumer_vertices = prog->Geom.VerticesIn;
+
+ /* Operate in a total of four passes.
+ *
+ * 1. Sort inputs / outputs into a canonical order. This is necessary so
+ * that inputs / outputs of separable shaders will be assigned
+ * predictable locations regardless of the order in which declarations
+ * appeared in the shader source.
+ *
+ * 2. Assign locations for any matching inputs and outputs.
+ *
+ * 3. Mark output variables in the producer that do not have locations as
+ * not being outputs. This lets the optimizer eliminate them.
+ *
+ * 4. Mark input variables in the consumer that do not have locations as
+ * not being inputs. This lets the optimizer eliminate them.
+ */
+ if (consumer)
+ canonicalize_shader_io(consumer->ir, ir_var_shader_in);
+
+ if (producer)
+ canonicalize_shader_io(producer->ir, ir_var_shader_out);
+
+ if (consumer)
+ linker::populate_consumer_input_sets(mem_ctx, consumer->ir,
+ consumer_inputs,
+ consumer_interface_inputs,
+ consumer_inputs_with_locations);
+
+ if (producer) {
+ foreach_in_list(ir_instruction, node, producer->ir) {
+ ir_variable *const output_var = node->as_variable();
+
+ if (output_var == NULL || output_var->data.mode != ir_var_shader_out)
+ continue;
+
+ /* Only geometry shaders can use non-zero streams */
+ assert(output_var->data.stream == 0 ||
+ (output_var->data.stream < MAX_VERTEX_STREAMS &&
+ producer->Stage == MESA_SHADER_GEOMETRY));
+
+ if (num_tfeedback_decls > 0) {
+ tfeedback_candidate_generator g(mem_ctx, tfeedback_candidates, producer->Stage);
+ /* From OpenGL 4.6 (Core Profile) spec, section 11.1.2.1
+ * ("Vertex Shader Variables / Output Variables")
+ *
+ * "Each program object can specify a set of output variables from
+ * one shader to be recorded in transform feedback mode (see
+ * section 13.3). The variables that can be recorded are those
+ * emitted by the first active shader, in order, from the
+ * following list:
+ *
+ * * geometry shader
+ * * tessellation evaluation shader
+ * * tessellation control shader
+ * * vertex shader"
+ *
+ * But on OpenGL ES 3.2, section 11.1.2.1 ("Vertex Shader
+ * Variables / Output Variables") tessellation control shader is
+ * not included in the stages list.
+ */
+ if (!prog->IsES || producer->Stage != MESA_SHADER_TESS_CTRL) {
+ g.process(output_var);
+ }
+ }
+
+ ir_variable *const input_var =
+ linker::get_matching_input(mem_ctx, output_var, consumer_inputs,
+ consumer_interface_inputs,
+ consumer_inputs_with_locations);
+
+ /* If a matching input variable was found, add this output (and the
+ * input) to the set. If this is a separable program and there is no
+ * consumer stage, add the output.
+ *
+ * Always add TCS outputs. They are shared by all invocations
+ * within a patch and can be used as shared memory.
+ */
+ if (input_var || (prog->SeparateShader && consumer == NULL) ||
+ producer->Stage == MESA_SHADER_TESS_CTRL) {
+ matches.record(output_var, input_var);
+ }
+
+ /* Only stream 0 outputs can be consumed in the next stage */
+ if (input_var && output_var->data.stream != 0) {
+ linker_error(prog, "output %s is assigned to stream=%d but "
+ "is linked to an input, which requires stream=0",
+ output_var->name, output_var->data.stream);
+ ralloc_free(hash_table_ctx);
+ return false;
+ }
+ }
+ } else {
+ /* If there's no producer stage, then this must be a separable program.
+ * For example, we may have a program that has just a fragment shader.
+ * Later this program will be used with some arbitrary vertex (or
+ * geometry) shader program. This means that locations must be assigned
+ * for all the inputs.
+ */
+ foreach_in_list(ir_instruction, node, consumer->ir) {
+ ir_variable *const input_var = node->as_variable();
+ if (input_var && input_var->data.mode == ir_var_shader_in) {
+ matches.record(NULL, input_var);
+ }
+ }
+ }
+
+ for (unsigned i = 0; i < num_tfeedback_decls; ++i) {
+ if (!tfeedback_decls[i].is_varying())
+ continue;
+
+ const tfeedback_candidate *matched_candidate
+ = tfeedback_decls[i].find_candidate(prog, tfeedback_candidates);
+
+ if (matched_candidate == NULL) {
+ ralloc_free(hash_table_ctx);
+ return false;
+ }
+
+ /* There are two situations where a new output varying is needed:
+ *
+ * - If varying packing is disabled for xfb and the current declaration
+ * is not aligned within the top level varying (e.g. vec3_arr[1]).
+ *
+ * - If a builtin variable needs to be copied to a new variable
+ * before its content is modified by another lowering pass (e.g.
+ * \c gl_Position is transformed by \c nir_lower_viewport_transform).
+ */
+ const unsigned dmul =
+ matched_candidate->type->without_array()->is_64bit() ? 2 : 1;
+ const bool lowered =
+ (disable_xfb_packing &&
+ !tfeedback_decls[i].is_aligned(dmul, matched_candidate->offset)) ||
+ (matched_candidate->toplevel_var->data.explicit_location &&
+ matched_candidate->toplevel_var->data.location < VARYING_SLOT_VAR0 &&
+ (ctx->Const.ShaderCompilerOptions[producer->Stage].LowerBuiltinVariablesXfb &
+ BITFIELD_BIT(matched_candidate->toplevel_var->data.location)));
+
+ if (lowered) {
+ ir_variable *new_var;
+ tfeedback_candidate *new_candidate = NULL;
+
+ new_var = lower_xfb_varying(mem_ctx, producer, tfeedback_decls[i].name());
+ if (new_var == NULL) {
+ ralloc_free(hash_table_ctx);
+ return false;
+ }
+
+ /* Create new candidate and replace matched_candidate */
+ new_candidate = rzalloc(mem_ctx, tfeedback_candidate);
+ new_candidate->toplevel_var = new_var;
+ new_candidate->toplevel_var->data.is_unmatched_generic_inout = 1;
+ new_candidate->type = new_var->type;
+ new_candidate->offset = 0;
+ _mesa_hash_table_insert(tfeedback_candidates,
+ ralloc_strdup(mem_ctx, new_var->name),
+ new_candidate);
+
+ tfeedback_decls[i].set_lowered_candidate(new_candidate);
+ matched_candidate = new_candidate;
+ }
+
+ /* Mark as xfb varying */
+ matched_candidate->toplevel_var->data.is_xfb = 1;
+
+ /* Mark xfb varyings as always active */
+ matched_candidate->toplevel_var->data.always_active_io = 1;
+
+ /* Mark any corresponding inputs as always active also. We must do this
+ * because we have a NIR pass that lowers vectors to scalars and another
+ * that removes unused varyings.
+ * We don't split varyings marked as always active because there is no
+ * point in doing so. This means we need to mark both sides of the
+ * interface as always active otherwise we will have a mismatch and
+ * start removing things we shouldn't.
+ */
+ ir_variable *const input_var =
+ linker::get_matching_input(mem_ctx, matched_candidate->toplevel_var,
+ consumer_inputs,
+ consumer_interface_inputs,
+ consumer_inputs_with_locations);
+ if (input_var) {
+ input_var->data.is_xfb = 1;
+ input_var->data.always_active_io = 1;
+ }
+
+ if (matched_candidate->toplevel_var->data.is_unmatched_generic_inout) {
+ matched_candidate->toplevel_var->data.is_xfb_only = 1;
+ matches.record(matched_candidate->toplevel_var, NULL);
+ }
+ }
+
+ uint8_t components[MAX_VARYINGS_INCL_PATCH] = {0};
+ const unsigned slots_used = matches.assign_locations(
+ prog, components, reserved_slots);
+ matches.store_locations();
+
+ for (unsigned i = 0; i < num_tfeedback_decls; ++i) {
+ if (tfeedback_decls[i].is_varying()) {
+ if (!tfeedback_decls[i].assign_location(ctx, prog)) {
+ ralloc_free(hash_table_ctx);
+ return false;
+ }
+ }
+ }
+ ralloc_free(hash_table_ctx);
+
+ if (consumer && producer) {
+ foreach_in_list(ir_instruction, node, consumer->ir) {
+ ir_variable *const var = node->as_variable();
+
+ if (var && var->data.mode == ir_var_shader_in &&
+ var->data.is_unmatched_generic_inout) {
+ if (!prog->IsES && prog->data->Version <= 120) {
+ /* On page 25 (page 31 of the PDF) of the GLSL 1.20 spec:
+ *
+ * Only those varying variables used (i.e. read) in
+ * the fragment shader executable must be written to
+ * by the vertex shader executable; declaring
+ * superfluous varying variables in a vertex shader is
+ * permissible.
+ *
+ * We interpret this text as meaning that the VS must
+ * write the variable for the FS to read it. See
+ * "glsl1-varying read but not written" in piglit.
+ */
+ linker_error(prog, "%s shader varying %s not written "
+ "by %s shader\n.",
+ _mesa_shader_stage_to_string(consumer->Stage),
+ var->name,
+ _mesa_shader_stage_to_string(producer->Stage));
+ } else {
+ linker_warning(prog, "%s shader varying %s not written "
+ "by %s shader\n.",
+ _mesa_shader_stage_to_string(consumer->Stage),
+ var->name,
+ _mesa_shader_stage_to_string(producer->Stage));
+ }
+ }
+ }
+
+ /* Now that validation is done its safe to remove unused varyings. As
+ * we have both a producer and consumer its safe to remove unused
+ * varyings even if the program is a SSO because the stages are being
+ * linked together i.e. we have a multi-stage SSO.
+ */
+ remove_unused_shader_inputs_and_outputs(false, producer,
+ ir_var_shader_out);
+ remove_unused_shader_inputs_and_outputs(false, consumer,
+ ir_var_shader_in);
+ }
+
+ if (producer) {
+ lower_packed_varyings(mem_ctx, slots_used, components, ir_var_shader_out,
+ 0, producer, disable_varying_packing,
+ disable_xfb_packing, xfb_enabled);
+ }
+
+ if (consumer) {
+ lower_packed_varyings(mem_ctx, slots_used, components, ir_var_shader_in,
+ consumer_vertices, consumer, disable_varying_packing,
+ disable_xfb_packing, xfb_enabled);
+ }
+
+ return true;
+}
+
+static bool
+check_against_output_limit(struct gl_context *ctx,
+ struct gl_shader_program *prog,
+ gl_linked_shader *producer,
+ unsigned num_explicit_locations)
+{
+ unsigned output_vectors = num_explicit_locations;
+
+ foreach_in_list(ir_instruction, node, producer->ir) {
+ ir_variable *const var = node->as_variable();
+
+ if (var && !var->data.explicit_location &&
+ var->data.mode == ir_var_shader_out &&
+ var_counts_against_varying_limit(producer->Stage, var)) {
+ /* outputs for fragment shader can't be doubles */
+ output_vectors += var->type->count_attribute_slots(false);
+ }
+ }
+
+ assert(producer->Stage != MESA_SHADER_FRAGMENT);
+ unsigned max_output_components =
+ ctx->Const.Program[producer->Stage].MaxOutputComponents;
+
+ const unsigned output_components = output_vectors * 4;
+ if (output_components > max_output_components) {
+ if (ctx->API == API_OPENGLES2 || prog->IsES)
+ linker_error(prog, "%s shader uses too many output vectors "
+ "(%u > %u)\n",
+ _mesa_shader_stage_to_string(producer->Stage),
+ output_vectors,
+ max_output_components / 4);
+ else
+ linker_error(prog, "%s shader uses too many output components "
+ "(%u > %u)\n",
+ _mesa_shader_stage_to_string(producer->Stage),
+ output_components,
+ max_output_components);
+
+ return false;
+ }
+
+ return true;
+}
+
+static bool
+check_against_input_limit(struct gl_context *ctx,
+ struct gl_shader_program *prog,
+ gl_linked_shader *consumer,
+ unsigned num_explicit_locations)
+{
+ unsigned input_vectors = num_explicit_locations;
+
+ foreach_in_list(ir_instruction, node, consumer->ir) {
+ ir_variable *const var = node->as_variable();
+
+ if (var && !var->data.explicit_location &&
+ var->data.mode == ir_var_shader_in &&
+ var_counts_against_varying_limit(consumer->Stage, var)) {
+ /* vertex inputs aren't varying counted */
+ input_vectors += var->type->count_attribute_slots(false);
+ }
+ }
+
+ assert(consumer->Stage != MESA_SHADER_VERTEX);
+ unsigned max_input_components =
+ ctx->Const.Program[consumer->Stage].MaxInputComponents;
+
+ const unsigned input_components = input_vectors * 4;
+ if (input_components > max_input_components) {
+ if (ctx->API == API_OPENGLES2 || prog->IsES)
+ linker_error(prog, "%s shader uses too many input vectors "
+ "(%u > %u)\n",
+ _mesa_shader_stage_to_string(consumer->Stage),
+ input_vectors,
+ max_input_components / 4);
+ else
+ linker_error(prog, "%s shader uses too many input components "
+ "(%u > %u)\n",
+ _mesa_shader_stage_to_string(consumer->Stage),
+ input_components,
+ max_input_components);
+
+ return false;
+ }
+
+ return true;
+}
+
+bool
+link_varyings(struct gl_shader_program *prog, unsigned first, unsigned last,
+ struct gl_context *ctx, void *mem_ctx)
+{
+ bool has_xfb_qualifiers = false;
+ unsigned num_tfeedback_decls = 0;
+ char **varying_names = NULL;
+ tfeedback_decl *tfeedback_decls = NULL;
+
+ /* From the ARB_enhanced_layouts spec:
+ *
+ * "If the shader used to record output variables for transform feedback
+ * varyings uses the "xfb_buffer", "xfb_offset", or "xfb_stride" layout
+ * qualifiers, the values specified by TransformFeedbackVaryings are
+ * ignored, and the set of variables captured for transform feedback is
+ * instead derived from the specified layout qualifiers."
+ */
+ for (int i = MESA_SHADER_FRAGMENT - 1; i >= 0; i--) {
+ /* Find last stage before fragment shader */
+ if (prog->_LinkedShaders[i]) {
+ has_xfb_qualifiers =
+ process_xfb_layout_qualifiers(mem_ctx, prog->_LinkedShaders[i],
+ prog, &num_tfeedback_decls,
+ &varying_names);
+ break;
+ }
+ }
+
+ if (!has_xfb_qualifiers) {
+ num_tfeedback_decls = prog->TransformFeedback.NumVarying;
+ varying_names = prog->TransformFeedback.VaryingNames;
+ }
+
+ if (num_tfeedback_decls != 0) {
+ /* From GL_EXT_transform_feedback:
+ * A program will fail to link if:
+ *
+ * * the <count> specified by TransformFeedbackVaryingsEXT is
+ * non-zero, but the program object has no vertex or geometry
+ * shader;
+ */
+ if (first >= MESA_SHADER_FRAGMENT) {
+ linker_error(prog, "Transform feedback varyings specified, but "
+ "no vertex, tessellation, or geometry shader is "
+ "present.\n");
+ return false;
+ }
+
+ tfeedback_decls = rzalloc_array(mem_ctx, tfeedback_decl,
+ num_tfeedback_decls);
+ if (!parse_tfeedback_decls(ctx, prog, mem_ctx, num_tfeedback_decls,
+ varying_names, tfeedback_decls))
+ return false;
+ }
+
+ /* If there is no fragment shader we need to set transform feedback.
+ *
+ * For SSO we also need to assign output locations. We assign them here
+ * because we need to do it for both single stage programs and multi stage
+ * programs.
+ */
+ if (last < MESA_SHADER_FRAGMENT &&
+ (num_tfeedback_decls != 0 || prog->SeparateShader)) {
+ const uint64_t reserved_out_slots =
+ reserved_varying_slot(prog->_LinkedShaders[last], ir_var_shader_out);
+ if (!assign_varying_locations(ctx, mem_ctx, prog,
+ prog->_LinkedShaders[last], NULL,
+ num_tfeedback_decls, tfeedback_decls,
+ reserved_out_slots))
+ return false;
+ }
+
+ if (last <= MESA_SHADER_FRAGMENT) {
+ /* Remove unused varyings from the first/last stage unless SSO */
+ remove_unused_shader_inputs_and_outputs(prog->SeparateShader,
+ prog->_LinkedShaders[first],
+ ir_var_shader_in);
+ remove_unused_shader_inputs_and_outputs(prog->SeparateShader,
+ prog->_LinkedShaders[last],
+ ir_var_shader_out);
+
+ /* If the program is made up of only a single stage */
+ if (first == last) {
+ gl_linked_shader *const sh = prog->_LinkedShaders[last];
+
+ do_dead_builtin_varyings(ctx, NULL, sh, 0, NULL);
+ do_dead_builtin_varyings(ctx, sh, NULL, num_tfeedback_decls,
+ tfeedback_decls);
+
+ if (prog->SeparateShader) {
+ const uint64_t reserved_slots =
+ reserved_varying_slot(sh, ir_var_shader_in);
+
+ /* Assign input locations for SSO, output locations are already
+ * assigned.
+ */
+ if (!assign_varying_locations(ctx, mem_ctx, prog,
+ NULL /* producer */,
+ sh /* consumer */,
+ 0 /* num_tfeedback_decls */,
+ NULL /* tfeedback_decls */,
+ reserved_slots))
+ return false;
+ }
+ } else {
+ /* Linking the stages in the opposite order (from fragment to vertex)
+ * ensures that inter-shader outputs written to in an earlier stage
+ * are eliminated if they are (transitively) not used in a later
+ * stage.
+ */
+ int next = last;
+ for (int i = next - 1; i >= 0; i--) {
+ if (prog->_LinkedShaders[i] == NULL && i != 0)
+ continue;
+
+ gl_linked_shader *const sh_i = prog->_LinkedShaders[i];
+ gl_linked_shader *const sh_next = prog->_LinkedShaders[next];
+
+ const uint64_t reserved_out_slots =
+ reserved_varying_slot(sh_i, ir_var_shader_out);
+ const uint64_t reserved_in_slots =
+ reserved_varying_slot(sh_next, ir_var_shader_in);
+
+ do_dead_builtin_varyings(ctx, sh_i, sh_next,
+ next == MESA_SHADER_FRAGMENT ? num_tfeedback_decls : 0,
+ tfeedback_decls);
+
+ if (!assign_varying_locations(ctx, mem_ctx, prog, sh_i, sh_next,
+ next == MESA_SHADER_FRAGMENT ? num_tfeedback_decls : 0,
+ tfeedback_decls,
+ reserved_out_slots | reserved_in_slots))
+ return false;
+
+ /* This must be done after all dead varyings are eliminated. */
+ if (sh_i != NULL) {
+ unsigned slots_used = util_bitcount64(reserved_out_slots);
+ if (!check_against_output_limit(ctx, prog, sh_i, slots_used)) {
+ return false;
+ }
+ }
+
+ unsigned slots_used = util_bitcount64(reserved_in_slots);
+ if (!check_against_input_limit(ctx, prog, sh_next, slots_used))
+ return false;
+
+ next = i;
+ }
+ }
+ }
+
+ if (!store_tfeedback_info(ctx, prog, num_tfeedback_decls, tfeedback_decls,
+ has_xfb_qualifiers, mem_ctx))
+ return false;
+
+ return true;
+}
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/link_varyings.h b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/link_varyings.h
new file mode 100644
index 0000000000..6f4bcdc79c
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/link_varyings.h
@@ -0,0 +1,298 @@
+/*
+ * Copyright © 2012 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef GLSL_LINK_VARYINGS_H
+#define GLSL_LINK_VARYINGS_H
+
+/**
+ * \file link_varyings.h
+ *
+ * Linker functions related specifically to linking varyings between shader
+ * stages.
+ */
+
+
+#include "main/glheader.h"
+#include "program/prog_parameter.h"
+#include "util/bitset.h"
+
+struct gl_shader_program;
+struct gl_shader;
+class ir_variable;
+
+
+/**
+ * Data structure describing a varying which is available for use in transform
+ * feedback.
+ *
+ * For example, if the vertex shader contains:
+ *
+ * struct S {
+ * vec4 foo;
+ * float[3] bar;
+ * };
+ *
+ * varying S[2] v;
+ *
+ * Then there would be tfeedback_candidate objects corresponding to the
+ * following varyings:
+ *
+ * v[0].foo
+ * v[0].bar
+ * v[1].foo
+ * v[1].bar
+ */
+struct tfeedback_candidate
+{
+ /**
+ * Toplevel variable containing this varying. In the above example, this
+ * would point to the declaration of the varying v.
+ */
+ ir_variable *toplevel_var;
+
+ /**
+ * Type of this varying. In the above example, this would point to the
+ * glsl_type for "vec4" or "float[3]".
+ */
+ const glsl_type *type;
+
+ /**
+ * Offset within the toplevel variable where this varying occurs (counted
+ * in multiples of the size of a float).
+ */
+ unsigned offset;
+};
+
+
+/**
+ * Data structure tracking information about a transform feedback declaration
+ * during linking.
+ */
+class tfeedback_decl
+{
+public:
+ void init(struct gl_context *ctx, const void *mem_ctx, const char *input);
+ static bool is_same(const tfeedback_decl &x, const tfeedback_decl &y);
+ bool assign_location(struct gl_context *ctx,
+ struct gl_shader_program *prog);
+ unsigned get_num_outputs() const;
+ bool store(struct gl_context *ctx, struct gl_shader_program *prog,
+ struct gl_transform_feedback_info *info, unsigned buffer,
+ unsigned buffer_index, const unsigned max_outputs,
+ BITSET_WORD *used_components[MAX_FEEDBACK_BUFFERS],
+ bool *explicit_stride, bool has_xfb_qualifiers,
+ const void *mem_ctx) const;
+ const tfeedback_candidate *find_candidate(gl_shader_program *prog,
+ hash_table *tfeedback_candidates);
+ void set_lowered_candidate(const tfeedback_candidate *candidate);
+
+ bool is_next_buffer_separator() const
+ {
+ return this->next_buffer_separator;
+ }
+
+ bool is_varying_written() const
+ {
+ if (this->next_buffer_separator || this->skip_components)
+ return false;
+
+ return this->matched_candidate->toplevel_var->data.assigned;
+ }
+
+ bool is_varying() const
+ {
+ return !this->next_buffer_separator && !this->skip_components;
+ }
+
+ bool is_aligned(unsigned dmul, unsigned offset) const
+ {
+ return (dmul * (this->array_subscript + offset)) % 4 == 0;
+ }
+
+ const char *name() const
+ {
+ return this->orig_name;
+ }
+
+ unsigned get_stream_id() const
+ {
+ return this->stream_id;
+ }
+
+ unsigned get_buffer() const
+ {
+ return this->buffer;
+ }
+
+ unsigned get_offset() const
+ {
+ return this->offset;
+ }
+
+ /**
+ * The total number of varying components taken up by this variable. Only
+ * valid if assign_location() has been called.
+ */
+ unsigned num_components() const
+ {
+ if (this->lowered_builtin_array_variable)
+ return this->size;
+ else
+ return this->vector_elements * this->matrix_columns * this->size *
+ (this->is_64bit() ? 2 : 1);
+ }
+
+ unsigned get_location() const {
+ return this->location;
+ }
+
+private:
+
+ bool is_64bit() const
+ {
+ return _mesa_gl_datatype_is_64bit(this->type);
+ }
+
+ /**
+ * The name that was supplied to glTransformFeedbackVaryings. Used for
+ * error reporting and glGetTransformFeedbackVarying().
+ */
+ const char *orig_name;
+
+ /**
+ * The name of the variable, parsed from orig_name.
+ */
+ const char *var_name;
+
+ /**
+ * True if the declaration in orig_name represents an array.
+ */
+ bool is_subscripted;
+
+ /**
+ * If is_subscripted is true, the subscript that was specified in orig_name.
+ */
+ unsigned array_subscript;
+
+ /**
+ * Non-zero if the variable is gl_ClipDistance, glTessLevelOuter or
+ * gl_TessLevelInner and the driver lowers it to gl_*MESA.
+ */
+ enum {
+ none,
+ clip_distance,
+ cull_distance,
+ tess_level_outer,
+ tess_level_inner,
+ } lowered_builtin_array_variable;
+
+ /**
+ * The vertex shader output location that the linker assigned for this
+ * variable. -1 if a location hasn't been assigned yet.
+ */
+ int location;
+
+ /**
+ * Used to store the buffer assigned by xfb_buffer.
+ */
+ unsigned buffer;
+
+ /**
+ * Used to store the offset assigned by xfb_offset.
+ */
+ unsigned offset;
+
+ /**
+ * If non-zero, then this variable may be packed along with other variables
+ * into a single varying slot, so this offset should be applied when
+ * accessing components. For example, an offset of 1 means that the x
+ * component of this variable is actually stored in component y of the
+ * location specified by \c location.
+ *
+ * Only valid if location != -1.
+ */
+ unsigned location_frac;
+
+ /**
+ * If location != -1, the number of vector elements in this variable, or 1
+ * if this variable is a scalar.
+ */
+ unsigned vector_elements;
+
+ /**
+ * If location != -1, the number of matrix columns in this variable, or 1
+ * if this variable is not a matrix.
+ */
+ unsigned matrix_columns;
+
+ /** Type of the varying returned by glGetTransformFeedbackVarying() */
+ GLenum type;
+
+ /**
+ * If location != -1, the size that should be returned by
+ * glGetTransformFeedbackVarying().
+ */
+ unsigned size;
+
+ /**
+ * How many components to skip. If non-zero, this is
+ * gl_SkipComponents{1,2,3,4} from ARB_transform_feedback3.
+ */
+ unsigned skip_components;
+
+ /**
+ * Whether this is gl_NextBuffer from ARB_transform_feedback3.
+ */
+ bool next_buffer_separator;
+
+ /**
+ * If find_candidate() has been called, pointer to the tfeedback_candidate
+ * data structure that was found. Otherwise NULL.
+ */
+ const tfeedback_candidate *matched_candidate;
+
+ /**
+ * StreamId assigned to this varying (defaults to 0). Can only be set to
+ * values other than 0 in geometry shaders that use the stream layout
+ * modifier. Accepted values must be in the range [0, MAX_VERTEX_STREAMS-1].
+ */
+ unsigned stream_id;
+};
+
+bool
+link_varyings(struct gl_shader_program *prog, unsigned first, unsigned last,
+ struct gl_context *ctx, void *mem_ctx);
+
+void
+validate_first_and_last_interface_explicit_locations(struct gl_context *ctx,
+ struct gl_shader_program *prog,
+ gl_shader_stage first,
+ gl_shader_stage last);
+
+void
+cross_validate_outputs_to_inputs(struct gl_context *ctx,
+ struct gl_shader_program *prog,
+ gl_linked_shader *producer,
+ gl_linked_shader *consumer);
+
+#endif /* GLSL_LINK_VARYINGS_H */
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/linker.cpp b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/linker.cpp
new file mode 100644
index 0000000000..212abf084b
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/linker.cpp
@@ -0,0 +1,4988 @@
+/*
+ * Copyright © 2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+/**
+ * \file linker.cpp
+ * GLSL linker implementation
+ *
+ * Given a set of shaders that are to be linked to generate a final program,
+ * there are three distinct stages.
+ *
+ * In the first stage shaders are partitioned into groups based on the shader
+ * type. All shaders of a particular type (e.g., vertex shaders) are linked
+ * together.
+ *
+ * - Undefined references in each shader are resolve to definitions in
+ * another shader.
+ * - Types and qualifiers of uniforms, outputs, and global variables defined
+ * in multiple shaders with the same name are verified to be the same.
+ * - Initializers for uniforms and global variables defined
+ * in multiple shaders with the same name are verified to be the same.
+ *
+ * The result, in the terminology of the GLSL spec, is a set of shader
+ * executables for each processing unit.
+ *
+ * After the first stage is complete, a series of semantic checks are performed
+ * on each of the shader executables.
+ *
+ * - Each shader executable must define a \c main function.
+ * - Each vertex shader executable must write to \c gl_Position.
+ * - Each fragment shader executable must write to either \c gl_FragData or
+ * \c gl_FragColor.
+ *
+ * In the final stage individual shader executables are linked to create a
+ * complete exectuable.
+ *
+ * - Types of uniforms defined in multiple shader stages with the same name
+ * are verified to be the same.
+ * - Initializers for uniforms defined in multiple shader stages with the
+ * same name are verified to be the same.
+ * - Types and qualifiers of outputs defined in one stage are verified to
+ * be the same as the types and qualifiers of inputs defined with the same
+ * name in a later stage.
+ *
+ * \author Ian Romanick <ian.d.romanick@intel.com>
+ */
+
+#include <ctype.h>
+#include "util/strndup.h"
+#include "glsl_symbol_table.h"
+#include "glsl_parser_extras.h"
+#include "ir.h"
+#include "program.h"
+#include "program/prog_instruction.h"
+#include "program/program.h"
+#include "util/mesa-sha1.h"
+#include "util/set.h"
+#include "string_to_uint_map.h"
+#include "linker.h"
+#include "linker_util.h"
+#include "link_varyings.h"
+#include "ir_optimization.h"
+#include "ir_rvalue_visitor.h"
+#include "ir_uniform.h"
+#include "builtin_functions.h"
+#include "shader_cache.h"
+#include "util/u_string.h"
+#include "util/u_math.h"
+
+
+#include "main/shaderobj.h"
+#include "main/enums.h"
+#include "main/mtypes.h"
+
+
+namespace {
+
+struct find_variable {
+ const char *name;
+ bool found;
+
+ find_variable(const char *name) : name(name), found(false) {}
+};
+
+/**
+ * Visitor that determines whether or not a variable is ever written.
+ *
+ * Use \ref find_assignments for convenience.
+ */
+class find_assignment_visitor : public ir_hierarchical_visitor {
+public:
+ find_assignment_visitor(unsigned num_vars,
+ find_variable * const *vars)
+ : num_variables(num_vars), num_found(0), variables(vars)
+ {
+ }
+
+ virtual ir_visitor_status visit_enter(ir_assignment *ir)
+ {
+ ir_variable *const var = ir->lhs->variable_referenced();
+
+ return check_variable_name(var->name);
+ }
+
+ virtual ir_visitor_status visit_enter(ir_call *ir)
+ {
+ foreach_two_lists(formal_node, &ir->callee->parameters,
+ actual_node, &ir->actual_parameters) {
+ ir_rvalue *param_rval = (ir_rvalue *) actual_node;
+ ir_variable *sig_param = (ir_variable *) formal_node;
+
+ if (sig_param->data.mode == ir_var_function_out ||
+ sig_param->data.mode == ir_var_function_inout) {
+ ir_variable *var = param_rval->variable_referenced();
+ if (var && check_variable_name(var->name) == visit_stop)
+ return visit_stop;
+ }
+ }
+
+ if (ir->return_deref != NULL) {
+ ir_variable *const var = ir->return_deref->variable_referenced();
+
+ if (check_variable_name(var->name) == visit_stop)
+ return visit_stop;
+ }
+
+ return visit_continue_with_parent;
+ }
+
+private:
+ ir_visitor_status check_variable_name(const char *name)
+ {
+ for (unsigned i = 0; i < num_variables; ++i) {
+ if (strcmp(variables[i]->name, name) == 0) {
+ if (!variables[i]->found) {
+ variables[i]->found = true;
+
+ assert(num_found < num_variables);
+ if (++num_found == num_variables)
+ return visit_stop;
+ }
+ break;
+ }
+ }
+
+ return visit_continue_with_parent;
+ }
+
+private:
+ unsigned num_variables; /**< Number of variables to find */
+ unsigned num_found; /**< Number of variables already found */
+ find_variable * const *variables; /**< Variables to find */
+};
+
+/**
+ * Determine whether or not any of NULL-terminated list of variables is ever
+ * written to.
+ */
+static void
+find_assignments(exec_list *ir, find_variable * const *vars)
+{
+ unsigned num_variables = 0;
+
+ for (find_variable * const *v = vars; *v; ++v)
+ num_variables++;
+
+ find_assignment_visitor visitor(num_variables, vars);
+ visitor.run(ir);
+}
+
+/**
+ * Determine whether or not the given variable is ever written to.
+ */
+static void
+find_assignments(exec_list *ir, find_variable *var)
+{
+ find_assignment_visitor visitor(1, &var);
+ visitor.run(ir);
+}
+
+/**
+ * Visitor that determines whether or not a variable is ever read.
+ */
+class find_deref_visitor : public ir_hierarchical_visitor {
+public:
+ find_deref_visitor(const char *name)
+ : name(name), found(false)
+ {
+ /* empty */
+ }
+
+ virtual ir_visitor_status visit(ir_dereference_variable *ir)
+ {
+ if (strcmp(this->name, ir->var->name) == 0) {
+ this->found = true;
+ return visit_stop;
+ }
+
+ return visit_continue;
+ }
+
+ bool variable_found() const
+ {
+ return this->found;
+ }
+
+private:
+ const char *name; /**< Find writes to a variable with this name. */
+ bool found; /**< Was a write to the variable found? */
+};
+
+
+/**
+ * A visitor helper that provides methods for updating the types of
+ * ir_dereferences. Classes that update variable types (say, updating
+ * array sizes) will want to use this so that dereference types stay in sync.
+ */
+class deref_type_updater : public ir_hierarchical_visitor {
+public:
+ virtual ir_visitor_status visit(ir_dereference_variable *ir)
+ {
+ ir->type = ir->var->type;
+ return visit_continue;
+ }
+
+ virtual ir_visitor_status visit_leave(ir_dereference_array *ir)
+ {
+ const glsl_type *const vt = ir->array->type;
+ if (vt->is_array())
+ ir->type = vt->fields.array;
+ return visit_continue;
+ }
+
+ virtual ir_visitor_status visit_leave(ir_dereference_record *ir)
+ {
+ ir->type = ir->record->type->fields.structure[ir->field_idx].type;
+ return visit_continue;
+ }
+};
+
+
+class array_resize_visitor : public deref_type_updater {
+public:
+ using deref_type_updater::visit;
+
+ unsigned num_vertices;
+ gl_shader_program *prog;
+ gl_shader_stage stage;
+
+ array_resize_visitor(unsigned num_vertices,
+ gl_shader_program *prog,
+ gl_shader_stage stage)
+ {
+ this->num_vertices = num_vertices;
+ this->prog = prog;
+ this->stage = stage;
+ }
+
+ virtual ~array_resize_visitor()
+ {
+ /* empty */
+ }
+
+ virtual ir_visitor_status visit(ir_variable *var)
+ {
+ if (!var->type->is_array() || var->data.mode != ir_var_shader_in ||
+ var->data.patch)
+ return visit_continue;
+
+ unsigned size = var->type->length;
+
+ if (stage == MESA_SHADER_GEOMETRY) {
+ /* Generate a link error if the shader has declared this array with
+ * an incorrect size.
+ */
+ if (!var->data.implicit_sized_array &&
+ size && size != this->num_vertices) {
+ linker_error(this->prog, "size of array %s declared as %u, "
+ "but number of input vertices is %u\n",
+ var->name, size, this->num_vertices);
+ return visit_continue;
+ }
+
+ /* Generate a link error if the shader attempts to access an input
+ * array using an index too large for its actual size assigned at
+ * link time.
+ */
+ if (var->data.max_array_access >= (int)this->num_vertices) {
+ linker_error(this->prog, "%s shader accesses element %i of "
+ "%s, but only %i input vertices\n",
+ _mesa_shader_stage_to_string(this->stage),
+ var->data.max_array_access, var->name, this->num_vertices);
+ return visit_continue;
+ }
+ }
+
+ var->type = glsl_type::get_array_instance(var->type->fields.array,
+ this->num_vertices);
+ var->data.max_array_access = this->num_vertices - 1;
+
+ return visit_continue;
+ }
+};
+
+/**
+ * Visitor that determines the highest stream id to which a (geometry) shader
+ * emits vertices. It also checks whether End{Stream}Primitive is ever called.
+ */
+class find_emit_vertex_visitor : public ir_hierarchical_visitor {
+public:
+ find_emit_vertex_visitor(int max_allowed)
+ : max_stream_allowed(max_allowed),
+ invalid_stream_id(0),
+ invalid_stream_id_from_emit_vertex(false),
+ end_primitive_found(false),
+ uses_non_zero_stream(false)
+ {
+ /* empty */
+ }
+
+ virtual ir_visitor_status visit_leave(ir_emit_vertex *ir)
+ {
+ int stream_id = ir->stream_id();
+
+ if (stream_id < 0) {
+ invalid_stream_id = stream_id;
+ invalid_stream_id_from_emit_vertex = true;
+ return visit_stop;
+ }
+
+ if (stream_id > max_stream_allowed) {
+ invalid_stream_id = stream_id;
+ invalid_stream_id_from_emit_vertex = true;
+ return visit_stop;
+ }
+
+ if (stream_id != 0)
+ uses_non_zero_stream = true;
+
+ return visit_continue;
+ }
+
+ virtual ir_visitor_status visit_leave(ir_end_primitive *ir)
+ {
+ end_primitive_found = true;
+
+ int stream_id = ir->stream_id();
+
+ if (stream_id < 0) {
+ invalid_stream_id = stream_id;
+ invalid_stream_id_from_emit_vertex = false;
+ return visit_stop;
+ }
+
+ if (stream_id > max_stream_allowed) {
+ invalid_stream_id = stream_id;
+ invalid_stream_id_from_emit_vertex = false;
+ return visit_stop;
+ }
+
+ if (stream_id != 0)
+ uses_non_zero_stream = true;
+
+ return visit_continue;
+ }
+
+ bool error()
+ {
+ return invalid_stream_id != 0;
+ }
+
+ const char *error_func()
+ {
+ return invalid_stream_id_from_emit_vertex ?
+ "EmitStreamVertex" : "EndStreamPrimitive";
+ }
+
+ int error_stream()
+ {
+ return invalid_stream_id;
+ }
+
+ bool uses_streams()
+ {
+ return uses_non_zero_stream;
+ }
+
+ bool uses_end_primitive()
+ {
+ return end_primitive_found;
+ }
+
+private:
+ int max_stream_allowed;
+ int invalid_stream_id;
+ bool invalid_stream_id_from_emit_vertex;
+ bool end_primitive_found;
+ bool uses_non_zero_stream;
+};
+
+/* Class that finds array derefs and check if indexes are dynamic. */
+class dynamic_sampler_array_indexing_visitor : public ir_hierarchical_visitor
+{
+public:
+ dynamic_sampler_array_indexing_visitor() :
+ dynamic_sampler_array_indexing(false)
+ {
+ }
+
+ ir_visitor_status visit_enter(ir_dereference_array *ir)
+ {
+ if (!ir->variable_referenced())
+ return visit_continue;
+
+ if (!ir->variable_referenced()->type->contains_sampler())
+ return visit_continue;
+
+ if (!ir->array_index->constant_expression_value(ralloc_parent(ir))) {
+ dynamic_sampler_array_indexing = true;
+ return visit_stop;
+ }
+ return visit_continue;
+ }
+
+ bool uses_dynamic_sampler_array_indexing()
+ {
+ return dynamic_sampler_array_indexing;
+ }
+
+private:
+ bool dynamic_sampler_array_indexing;
+};
+
+} /* anonymous namespace */
+
+void
+linker_error(gl_shader_program *prog, const char *fmt, ...)
+{
+ va_list ap;
+
+ ralloc_strcat(&prog->data->InfoLog, "error: ");
+ va_start(ap, fmt);
+ ralloc_vasprintf_append(&prog->data->InfoLog, fmt, ap);
+ va_end(ap);
+
+ prog->data->LinkStatus = LINKING_FAILURE;
+}
+
+
+void
+linker_warning(gl_shader_program *prog, const char *fmt, ...)
+{
+ va_list ap;
+
+ ralloc_strcat(&prog->data->InfoLog, "warning: ");
+ va_start(ap, fmt);
+ ralloc_vasprintf_append(&prog->data->InfoLog, fmt, ap);
+ va_end(ap);
+
+}
+
+
+/**
+ * Given a string identifying a program resource, break it into a base name
+ * and an optional array index in square brackets.
+ *
+ * If an array index is present, \c out_base_name_end is set to point to the
+ * "[" that precedes the array index, and the array index itself is returned
+ * as a long.
+ *
+ * If no array index is present (or if the array index is negative or
+ * mal-formed), \c out_base_name_end, is set to point to the null terminator
+ * at the end of the input string, and -1 is returned.
+ *
+ * Only the final array index is parsed; if the string contains other array
+ * indices (or structure field accesses), they are left in the base name.
+ *
+ * No attempt is made to check that the base name is properly formed;
+ * typically the caller will look up the base name in a hash table, so
+ * ill-formed base names simply turn into hash table lookup failures.
+ */
+long
+parse_program_resource_name(const GLchar *name,
+ const GLchar **out_base_name_end)
+{
+ /* Section 7.3.1 ("Program Interfaces") of the OpenGL 4.3 spec says:
+ *
+ * "When an integer array element or block instance number is part of
+ * the name string, it will be specified in decimal form without a "+"
+ * or "-" sign or any extra leading zeroes. Additionally, the name
+ * string will not include white space anywhere in the string."
+ */
+
+ const size_t len = strlen(name);
+ *out_base_name_end = name + len;
+
+ if (len == 0 || name[len-1] != ']')
+ return -1;
+
+ /* Walk backwards over the string looking for a non-digit character. This
+ * had better be the opening bracket for an array index.
+ *
+ * Initially, i specifies the location of the ']'. Since the string may
+ * contain only the ']' charcater, walk backwards very carefully.
+ */
+ unsigned i;
+ for (i = len - 1; (i > 0) && isdigit(name[i-1]); --i)
+ /* empty */ ;
+
+ if ((i == 0) || name[i-1] != '[')
+ return -1;
+
+ long array_index = strtol(&name[i], NULL, 10);
+ if (array_index < 0)
+ return -1;
+
+ /* Check for leading zero */
+ if (name[i] == '0' && name[i+1] != ']')
+ return -1;
+
+ *out_base_name_end = name + (i - 1);
+ return array_index;
+}
+
+
+void
+link_invalidate_variable_locations(exec_list *ir)
+{
+ foreach_in_list(ir_instruction, node, ir) {
+ ir_variable *const var = node->as_variable();
+
+ if (var == NULL)
+ continue;
+
+ /* Only assign locations for variables that lack an explicit location.
+ * Explicit locations are set for all built-in variables, generic vertex
+ * shader inputs (via layout(location=...)), and generic fragment shader
+ * outputs (also via layout(location=...)).
+ */
+ if (!var->data.explicit_location) {
+ var->data.location = -1;
+ var->data.location_frac = 0;
+ }
+
+ /* ir_variable::is_unmatched_generic_inout is used by the linker while
+ * connecting outputs from one stage to inputs of the next stage.
+ */
+ if (var->data.explicit_location &&
+ var->data.location < VARYING_SLOT_VAR0) {
+ var->data.is_unmatched_generic_inout = 0;
+ } else {
+ var->data.is_unmatched_generic_inout = 1;
+ }
+ }
+}
+
+
+/**
+ * Set clip_distance_array_size based and cull_distance_array_size on the given
+ * shader.
+ *
+ * Also check for errors based on incorrect usage of gl_ClipVertex and
+ * gl_ClipDistance and gl_CullDistance.
+ * Additionally test whether the arrays gl_ClipDistance and gl_CullDistance
+ * exceed the maximum size defined by gl_MaxCombinedClipAndCullDistances.
+ *
+ * Return false if an error was reported.
+ */
+static void
+analyze_clip_cull_usage(struct gl_shader_program *prog,
+ struct gl_linked_shader *shader,
+ struct gl_context *ctx,
+ struct shader_info *info)
+{
+ info->clip_distance_array_size = 0;
+ info->cull_distance_array_size = 0;
+
+ if (prog->data->Version >= (prog->IsES ? 300 : 130)) {
+ /* From section 7.1 (Vertex Shader Special Variables) of the
+ * GLSL 1.30 spec:
+ *
+ * "It is an error for a shader to statically write both
+ * gl_ClipVertex and gl_ClipDistance."
+ *
+ * This does not apply to GLSL ES shaders, since GLSL ES defines neither
+ * gl_ClipVertex nor gl_ClipDistance. However with
+ * GL_EXT_clip_cull_distance, this functionality is exposed in ES 3.0.
+ */
+ find_variable gl_ClipDistance("gl_ClipDistance");
+ find_variable gl_CullDistance("gl_CullDistance");
+ find_variable gl_ClipVertex("gl_ClipVertex");
+ find_variable * const variables[] = {
+ &gl_ClipDistance,
+ &gl_CullDistance,
+ !prog->IsES ? &gl_ClipVertex : NULL,
+ NULL
+ };
+ find_assignments(shader->ir, variables);
+
+ /* From the ARB_cull_distance spec:
+ *
+ * It is a compile-time or link-time error for the set of shaders forming
+ * a program to statically read or write both gl_ClipVertex and either
+ * gl_ClipDistance or gl_CullDistance.
+ *
+ * This does not apply to GLSL ES shaders, since GLSL ES doesn't define
+ * gl_ClipVertex.
+ */
+ if (!prog->IsES) {
+ if (gl_ClipVertex.found && gl_ClipDistance.found) {
+ linker_error(prog, "%s shader writes to both `gl_ClipVertex' "
+ "and `gl_ClipDistance'\n",
+ _mesa_shader_stage_to_string(shader->Stage));
+ return;
+ }
+ if (gl_ClipVertex.found && gl_CullDistance.found) {
+ linker_error(prog, "%s shader writes to both `gl_ClipVertex' "
+ "and `gl_CullDistance'\n",
+ _mesa_shader_stage_to_string(shader->Stage));
+ return;
+ }
+ }
+
+ if (gl_ClipDistance.found) {
+ ir_variable *clip_distance_var =
+ shader->symbols->get_variable("gl_ClipDistance");
+ assert(clip_distance_var);
+ info->clip_distance_array_size = clip_distance_var->type->length;
+ }
+ if (gl_CullDistance.found) {
+ ir_variable *cull_distance_var =
+ shader->symbols->get_variable("gl_CullDistance");
+ assert(cull_distance_var);
+ info->cull_distance_array_size = cull_distance_var->type->length;
+ }
+ /* From the ARB_cull_distance spec:
+ *
+ * It is a compile-time or link-time error for the set of shaders forming
+ * a program to have the sum of the sizes of the gl_ClipDistance and
+ * gl_CullDistance arrays to be larger than
+ * gl_MaxCombinedClipAndCullDistances.
+ */
+ if ((uint32_t)(info->clip_distance_array_size + info->cull_distance_array_size) >
+ ctx->Const.MaxClipPlanes) {
+ linker_error(prog, "%s shader: the combined size of "
+ "'gl_ClipDistance' and 'gl_CullDistance' size cannot "
+ "be larger than "
+ "gl_MaxCombinedClipAndCullDistances (%u)",
+ _mesa_shader_stage_to_string(shader->Stage),
+ ctx->Const.MaxClipPlanes);
+ }
+ }
+}
+
+
+/**
+ * Verify that a vertex shader executable meets all semantic requirements.
+ *
+ * Also sets info.clip_distance_array_size and
+ * info.cull_distance_array_size as a side effect.
+ *
+ * \param shader Vertex shader executable to be verified
+ */
+static void
+validate_vertex_shader_executable(struct gl_shader_program *prog,
+ struct gl_linked_shader *shader,
+ struct gl_context *ctx)
+{
+ if (shader == NULL)
+ return;
+
+ /* From the GLSL 1.10 spec, page 48:
+ *
+ * "The variable gl_Position is available only in the vertex
+ * language and is intended for writing the homogeneous vertex
+ * position. All executions of a well-formed vertex shader
+ * executable must write a value into this variable. [...] The
+ * variable gl_Position is available only in the vertex
+ * language and is intended for writing the homogeneous vertex
+ * position. All executions of a well-formed vertex shader
+ * executable must write a value into this variable."
+ *
+ * while in GLSL 1.40 this text is changed to:
+ *
+ * "The variable gl_Position is available only in the vertex
+ * language and is intended for writing the homogeneous vertex
+ * position. It can be written at any time during shader
+ * execution. It may also be read back by a vertex shader
+ * after being written. This value will be used by primitive
+ * assembly, clipping, culling, and other fixed functionality
+ * operations, if present, that operate on primitives after
+ * vertex processing has occurred. Its value is undefined if
+ * the vertex shader executable does not write gl_Position."
+ *
+ * All GLSL ES Versions are similar to GLSL 1.40--failing to write to
+ * gl_Position is not an error.
+ */
+ if (prog->data->Version < (prog->IsES ? 300 : 140)) {
+ find_variable gl_Position("gl_Position");
+ find_assignments(shader->ir, &gl_Position);
+ if (!gl_Position.found) {
+ if (prog->IsES) {
+ linker_warning(prog,
+ "vertex shader does not write to `gl_Position'. "
+ "Its value is undefined. \n");
+ } else {
+ linker_error(prog,
+ "vertex shader does not write to `gl_Position'. \n");
+ }
+ return;
+ }
+ }
+
+ analyze_clip_cull_usage(prog, shader, ctx, &shader->Program->info);
+}
+
+static void
+validate_tess_eval_shader_executable(struct gl_shader_program *prog,
+ struct gl_linked_shader *shader,
+ struct gl_context *ctx)
+{
+ if (shader == NULL)
+ return;
+
+ analyze_clip_cull_usage(prog, shader, ctx, &shader->Program->info);
+}
+
+
+/**
+ * Verify that a fragment shader executable meets all semantic requirements
+ *
+ * \param shader Fragment shader executable to be verified
+ */
+static void
+validate_fragment_shader_executable(struct gl_shader_program *prog,
+ struct gl_linked_shader *shader)
+{
+ if (shader == NULL)
+ return;
+
+ find_variable gl_FragColor("gl_FragColor");
+ find_variable gl_FragData("gl_FragData");
+ find_variable * const variables[] = { &gl_FragColor, &gl_FragData, NULL };
+ find_assignments(shader->ir, variables);
+
+ if (gl_FragColor.found && gl_FragData.found) {
+ linker_error(prog, "fragment shader writes to both "
+ "`gl_FragColor' and `gl_FragData'\n");
+ }
+}
+
+/**
+ * Verify that a geometry shader executable meets all semantic requirements
+ *
+ * Also sets prog->Geom.VerticesIn, and info.clip_distance_array_sizeand
+ * info.cull_distance_array_size as a side effect.
+ *
+ * \param shader Geometry shader executable to be verified
+ */
+static void
+validate_geometry_shader_executable(struct gl_shader_program *prog,
+ struct gl_linked_shader *shader,
+ struct gl_context *ctx)
+{
+ if (shader == NULL)
+ return;
+
+ unsigned num_vertices =
+ vertices_per_prim(shader->Program->info.gs.input_primitive);
+ prog->Geom.VerticesIn = num_vertices;
+
+ analyze_clip_cull_usage(prog, shader, ctx, &shader->Program->info);
+}
+
+/**
+ * Check if geometry shaders emit to non-zero streams and do corresponding
+ * validations.
+ */
+static void
+validate_geometry_shader_emissions(struct gl_context *ctx,
+ struct gl_shader_program *prog)
+{
+ struct gl_linked_shader *sh = prog->_LinkedShaders[MESA_SHADER_GEOMETRY];
+
+ if (sh != NULL) {
+ find_emit_vertex_visitor emit_vertex(ctx->Const.MaxVertexStreams - 1);
+ emit_vertex.run(sh->ir);
+ if (emit_vertex.error()) {
+ linker_error(prog, "Invalid call %s(%d). Accepted values for the "
+ "stream parameter are in the range [0, %d].\n",
+ emit_vertex.error_func(),
+ emit_vertex.error_stream(),
+ ctx->Const.MaxVertexStreams - 1);
+ }
+ prog->Geom.UsesStreams = emit_vertex.uses_streams();
+ prog->Geom.UsesEndPrimitive = emit_vertex.uses_end_primitive();
+
+ /* From the ARB_gpu_shader5 spec:
+ *
+ * "Multiple vertex streams are supported only if the output primitive
+ * type is declared to be "points". A program will fail to link if it
+ * contains a geometry shader calling EmitStreamVertex() or
+ * EndStreamPrimitive() if its output primitive type is not "points".
+ *
+ * However, in the same spec:
+ *
+ * "The function EmitVertex() is equivalent to calling EmitStreamVertex()
+ * with <stream> set to zero."
+ *
+ * And:
+ *
+ * "The function EndPrimitive() is equivalent to calling
+ * EndStreamPrimitive() with <stream> set to zero."
+ *
+ * Since we can call EmitVertex() and EndPrimitive() when we output
+ * primitives other than points, calling EmitStreamVertex(0) or
+ * EmitEndPrimitive(0) should not produce errors. This it also what Nvidia
+ * does. Currently we only set prog->Geom.UsesStreams to TRUE when
+ * EmitStreamVertex() or EmitEndPrimitive() are called with a non-zero
+ * stream.
+ */
+ if (prog->Geom.UsesStreams &&
+ sh->Program->info.gs.output_primitive != GL_POINTS) {
+ linker_error(prog, "EmitStreamVertex(n) and EndStreamPrimitive(n) "
+ "with n>0 requires point output\n");
+ }
+ }
+}
+
+bool
+validate_intrastage_arrays(struct gl_shader_program *prog,
+ ir_variable *const var,
+ ir_variable *const existing,
+ bool match_precision)
+{
+ /* Consider the types to be "the same" if both types are arrays
+ * of the same type and one of the arrays is implicitly sized.
+ * In addition, set the type of the linked variable to the
+ * explicitly sized array.
+ */
+ if (var->type->is_array() && existing->type->is_array()) {
+ const glsl_type *no_array_var = var->type->fields.array;
+ const glsl_type *no_array_existing = existing->type->fields.array;
+ bool type_matches;
+
+ type_matches = (match_precision ?
+ no_array_var == no_array_existing :
+ no_array_var->compare_no_precision(no_array_existing));
+
+ if (type_matches &&
+ ((var->type->length == 0)|| (existing->type->length == 0))) {
+ if (var->type->length != 0) {
+ if ((int)var->type->length <= existing->data.max_array_access) {
+ linker_error(prog, "%s `%s' declared as type "
+ "`%s' but outermost dimension has an index"
+ " of `%i'\n",
+ mode_string(var),
+ var->name, var->type->name,
+ existing->data.max_array_access);
+ }
+ existing->type = var->type;
+ return true;
+ } else if (existing->type->length != 0) {
+ if((int)existing->type->length <= var->data.max_array_access &&
+ !existing->data.from_ssbo_unsized_array) {
+ linker_error(prog, "%s `%s' declared as type "
+ "`%s' but outermost dimension has an index"
+ " of `%i'\n",
+ mode_string(var),
+ var->name, existing->type->name,
+ var->data.max_array_access);
+ }
+ return true;
+ }
+ }
+ }
+ return false;
+}
+
+
+/**
+ * Perform validation of global variables used across multiple shaders
+ */
+static void
+cross_validate_globals(struct gl_context *ctx, struct gl_shader_program *prog,
+ struct exec_list *ir, glsl_symbol_table *variables,
+ bool uniforms_only)
+{
+ foreach_in_list(ir_instruction, node, ir) {
+ ir_variable *const var = node->as_variable();
+
+ if (var == NULL)
+ continue;
+
+ if (uniforms_only && (var->data.mode != ir_var_uniform && var->data.mode != ir_var_shader_storage))
+ continue;
+
+ /* don't cross validate subroutine uniforms */
+ if (var->type->contains_subroutine())
+ continue;
+
+ /* Don't cross validate interface instances. These are only relevant
+ * inside a shader. The cross validation is done at the Interface Block
+ * name level.
+ */
+ if (var->is_interface_instance())
+ continue;
+
+ /* Don't cross validate temporaries that are at global scope. These
+ * will eventually get pulled into the shaders 'main'.
+ */
+ if (var->data.mode == ir_var_temporary)
+ continue;
+
+ /* If a global with this name has already been seen, verify that the
+ * new instance has the same type. In addition, if the globals have
+ * initializers, the values of the initializers must be the same.
+ */
+ ir_variable *const existing = variables->get_variable(var->name);
+ if (existing != NULL) {
+ /* Check if types match. */
+ if (var->type != existing->type) {
+ if (!validate_intrastage_arrays(prog, var, existing)) {
+ /* If it is an unsized array in a Shader Storage Block,
+ * two different shaders can access to different elements.
+ * Because of that, they might be converted to different
+ * sized arrays, then check that they are compatible but
+ * ignore the array size.
+ */
+ if (!(var->data.mode == ir_var_shader_storage &&
+ var->data.from_ssbo_unsized_array &&
+ existing->data.mode == ir_var_shader_storage &&
+ existing->data.from_ssbo_unsized_array &&
+ var->type->gl_type == existing->type->gl_type)) {
+ linker_error(prog, "%s `%s' declared as type "
+ "`%s' and type `%s'\n",
+ mode_string(var),
+ var->name, var->type->name,
+ existing->type->name);
+ return;
+ }
+ }
+ }
+
+ if (var->data.explicit_location) {
+ if (existing->data.explicit_location
+ && (var->data.location != existing->data.location)) {
+ linker_error(prog, "explicit locations for %s "
+ "`%s' have differing values\n",
+ mode_string(var), var->name);
+ return;
+ }
+
+ if (var->data.location_frac != existing->data.location_frac) {
+ linker_error(prog, "explicit components for %s `%s' have "
+ "differing values\n", mode_string(var), var->name);
+ return;
+ }
+
+ existing->data.location = var->data.location;
+ existing->data.explicit_location = true;
+ } else {
+ /* Check if uniform with implicit location was marked explicit
+ * by earlier shader stage. If so, mark it explicit in this stage
+ * too to make sure later processing does not treat it as
+ * implicit one.
+ */
+ if (existing->data.explicit_location) {
+ var->data.location = existing->data.location;
+ var->data.explicit_location = true;
+ }
+ }
+
+ /* From the GLSL 4.20 specification:
+ * "A link error will result if two compilation units in a program
+ * specify different integer-constant bindings for the same
+ * opaque-uniform name. However, it is not an error to specify a
+ * binding on some but not all declarations for the same name"
+ */
+ if (var->data.explicit_binding) {
+ if (existing->data.explicit_binding &&
+ var->data.binding != existing->data.binding) {
+ linker_error(prog, "explicit bindings for %s "
+ "`%s' have differing values\n",
+ mode_string(var), var->name);
+ return;
+ }
+
+ existing->data.binding = var->data.binding;
+ existing->data.explicit_binding = true;
+ }
+
+ if (var->type->contains_atomic() &&
+ var->data.offset != existing->data.offset) {
+ linker_error(prog, "offset specifications for %s "
+ "`%s' have differing values\n",
+ mode_string(var), var->name);
+ return;
+ }
+
+ /* Validate layout qualifiers for gl_FragDepth.
+ *
+ * From the AMD/ARB_conservative_depth specs:
+ *
+ * "If gl_FragDepth is redeclared in any fragment shader in a
+ * program, it must be redeclared in all fragment shaders in
+ * that program that have static assignments to
+ * gl_FragDepth. All redeclarations of gl_FragDepth in all
+ * fragment shaders in a single program must have the same set
+ * of qualifiers."
+ */
+ if (strcmp(var->name, "gl_FragDepth") == 0) {
+ bool layout_declared = var->data.depth_layout != ir_depth_layout_none;
+ bool layout_differs =
+ var->data.depth_layout != existing->data.depth_layout;
+
+ if (layout_declared && layout_differs) {
+ linker_error(prog,
+ "All redeclarations of gl_FragDepth in all "
+ "fragment shaders in a single program must have "
+ "the same set of qualifiers.\n");
+ }
+
+ if (var->data.used && layout_differs) {
+ linker_error(prog,
+ "If gl_FragDepth is redeclared with a layout "
+ "qualifier in any fragment shader, it must be "
+ "redeclared with the same layout qualifier in "
+ "all fragment shaders that have assignments to "
+ "gl_FragDepth\n");
+ }
+ }
+
+ /* Page 35 (page 41 of the PDF) of the GLSL 4.20 spec says:
+ *
+ * "If a shared global has multiple initializers, the
+ * initializers must all be constant expressions, and they
+ * must all have the same value. Otherwise, a link error will
+ * result. (A shared global having only one initializer does
+ * not require that initializer to be a constant expression.)"
+ *
+ * Previous to 4.20 the GLSL spec simply said that initializers
+ * must have the same value. In this case of non-constant
+ * initializers, this was impossible to determine. As a result,
+ * no vendor actually implemented that behavior. The 4.20
+ * behavior matches the implemented behavior of at least one other
+ * vendor, so we'll implement that for all GLSL versions.
+ */
+ if (var->constant_initializer != NULL) {
+ if (existing->constant_initializer != NULL) {
+ if (!var->constant_initializer->has_value(existing->constant_initializer)) {
+ linker_error(prog, "initializers for %s "
+ "`%s' have differing values\n",
+ mode_string(var), var->name);
+ return;
+ }
+ } else {
+ /* If the first-seen instance of a particular uniform did
+ * not have an initializer but a later instance does,
+ * replace the former with the later.
+ */
+ variables->replace_variable(existing->name, var);
+ }
+ }
+
+ if (var->data.has_initializer) {
+ if (existing->data.has_initializer
+ && (var->constant_initializer == NULL
+ || existing->constant_initializer == NULL)) {
+ linker_error(prog,
+ "shared global variable `%s' has multiple "
+ "non-constant initializers.\n",
+ var->name);
+ return;
+ }
+ }
+
+ if (existing->data.explicit_invariant != var->data.explicit_invariant) {
+ linker_error(prog, "declarations for %s `%s' have "
+ "mismatching invariant qualifiers\n",
+ mode_string(var), var->name);
+ return;
+ }
+ if (existing->data.centroid != var->data.centroid) {
+ linker_error(prog, "declarations for %s `%s' have "
+ "mismatching centroid qualifiers\n",
+ mode_string(var), var->name);
+ return;
+ }
+ if (existing->data.sample != var->data.sample) {
+ linker_error(prog, "declarations for %s `%s` have "
+ "mismatching sample qualifiers\n",
+ mode_string(var), var->name);
+ return;
+ }
+ if (existing->data.image_format != var->data.image_format) {
+ linker_error(prog, "declarations for %s `%s` have "
+ "mismatching image format qualifiers\n",
+ mode_string(var), var->name);
+ return;
+ }
+
+ /* Check the precision qualifier matches for uniform variables on
+ * GLSL ES.
+ */
+ if (!ctx->Const.AllowGLSLRelaxedES &&
+ prog->IsES && !var->get_interface_type() &&
+ existing->data.precision != var->data.precision) {
+ if ((existing->data.used && var->data.used) || prog->data->Version >= 300) {
+ linker_error(prog, "declarations for %s `%s` have "
+ "mismatching precision qualifiers\n",
+ mode_string(var), var->name);
+ return;
+ } else {
+ linker_warning(prog, "declarations for %s `%s` have "
+ "mismatching precision qualifiers\n",
+ mode_string(var), var->name);
+ }
+ }
+
+ /* In OpenGL GLSL 3.20 spec, section 4.3.9:
+ *
+ * "It is a link-time error if any particular shader interface
+ * contains:
+ *
+ * - two different blocks, each having no instance name, and each
+ * having a member of the same name, or
+ *
+ * - a variable outside a block, and a block with no instance name,
+ * where the variable has the same name as a member in the block."
+ */
+ const glsl_type *var_itype = var->get_interface_type();
+ const glsl_type *existing_itype = existing->get_interface_type();
+ if (var_itype != existing_itype) {
+ if (!var_itype || !existing_itype) {
+ linker_error(prog, "declarations for %s `%s` are inside block "
+ "`%s` and outside a block",
+ mode_string(var), var->name,
+ var_itype ? var_itype->name : existing_itype->name);
+ return;
+ } else if (strcmp(var_itype->name, existing_itype->name) != 0) {
+ linker_error(prog, "declarations for %s `%s` are inside blocks "
+ "`%s` and `%s`",
+ mode_string(var), var->name,
+ existing_itype->name,
+ var_itype->name);
+ return;
+ }
+ }
+ } else
+ variables->add_variable(var);
+ }
+}
+
+
+/**
+ * Perform validation of uniforms used across multiple shader stages
+ */
+static void
+cross_validate_uniforms(struct gl_context *ctx,
+ struct gl_shader_program *prog)
+{
+ glsl_symbol_table variables;
+ for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
+ if (prog->_LinkedShaders[i] == NULL)
+ continue;
+
+ cross_validate_globals(ctx, prog, prog->_LinkedShaders[i]->ir,
+ &variables, true);
+ }
+}
+
+/**
+ * Accumulates the array of buffer blocks and checks that all definitions of
+ * blocks agree on their contents.
+ */
+static bool
+interstage_cross_validate_uniform_blocks(struct gl_shader_program *prog,
+ bool validate_ssbo)
+{
+ int *InterfaceBlockStageIndex[MESA_SHADER_STAGES];
+ struct gl_uniform_block *blks = NULL;
+ unsigned *num_blks = validate_ssbo ? &prog->data->NumShaderStorageBlocks :
+ &prog->data->NumUniformBlocks;
+
+ unsigned max_num_buffer_blocks = 0;
+ for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
+ if (prog->_LinkedShaders[i]) {
+ if (validate_ssbo) {
+ max_num_buffer_blocks +=
+ prog->_LinkedShaders[i]->Program->info.num_ssbos;
+ } else {
+ max_num_buffer_blocks +=
+ prog->_LinkedShaders[i]->Program->info.num_ubos;
+ }
+ }
+ }
+
+ for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
+ struct gl_linked_shader *sh = prog->_LinkedShaders[i];
+
+ InterfaceBlockStageIndex[i] = new int[max_num_buffer_blocks];
+ for (unsigned int j = 0; j < max_num_buffer_blocks; j++)
+ InterfaceBlockStageIndex[i][j] = -1;
+
+ if (sh == NULL)
+ continue;
+
+ unsigned sh_num_blocks;
+ struct gl_uniform_block **sh_blks;
+ if (validate_ssbo) {
+ sh_num_blocks = prog->_LinkedShaders[i]->Program->info.num_ssbos;
+ sh_blks = sh->Program->sh.ShaderStorageBlocks;
+ } else {
+ sh_num_blocks = prog->_LinkedShaders[i]->Program->info.num_ubos;
+ sh_blks = sh->Program->sh.UniformBlocks;
+ }
+
+ for (unsigned int j = 0; j < sh_num_blocks; j++) {
+ int index = link_cross_validate_uniform_block(prog->data, &blks,
+ num_blks, sh_blks[j]);
+
+ if (index == -1) {
+ linker_error(prog, "buffer block `%s' has mismatching "
+ "definitions\n", sh_blks[j]->Name);
+
+ for (unsigned k = 0; k <= i; k++) {
+ delete[] InterfaceBlockStageIndex[k];
+ }
+
+ /* Reset the block count. This will help avoid various segfaults
+ * from api calls that assume the array exists due to the count
+ * being non-zero.
+ */
+ *num_blks = 0;
+ return false;
+ }
+
+ InterfaceBlockStageIndex[i][index] = j;
+ }
+ }
+
+ /* Update per stage block pointers to point to the program list.
+ * FIXME: We should be able to free the per stage blocks here.
+ */
+ for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
+ for (unsigned j = 0; j < *num_blks; j++) {
+ int stage_index = InterfaceBlockStageIndex[i][j];
+
+ if (stage_index != -1) {
+ struct gl_linked_shader *sh = prog->_LinkedShaders[i];
+
+ struct gl_uniform_block **sh_blks = validate_ssbo ?
+ sh->Program->sh.ShaderStorageBlocks :
+ sh->Program->sh.UniformBlocks;
+
+ blks[j].stageref |= sh_blks[stage_index]->stageref;
+ sh_blks[stage_index] = &blks[j];
+ }
+ }
+ }
+
+ for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
+ delete[] InterfaceBlockStageIndex[i];
+ }
+
+ if (validate_ssbo)
+ prog->data->ShaderStorageBlocks = blks;
+ else
+ prog->data->UniformBlocks = blks;
+
+ return true;
+}
+
+/**
+ * Verifies the invariance of built-in special variables.
+ */
+static bool
+validate_invariant_builtins(struct gl_shader_program *prog,
+ const gl_linked_shader *vert,
+ const gl_linked_shader *frag)
+{
+ const ir_variable *var_vert;
+ const ir_variable *var_frag;
+
+ if (!vert || !frag)
+ return true;
+
+ /*
+ * From OpenGL ES Shading Language 1.0 specification
+ * (4.6.4 Invariance and Linkage):
+ * "The invariance of varyings that are declared in both the vertex and
+ * fragment shaders must match. For the built-in special variables,
+ * gl_FragCoord can only be declared invariant if and only if
+ * gl_Position is declared invariant. Similarly gl_PointCoord can only
+ * be declared invariant if and only if gl_PointSize is declared
+ * invariant. It is an error to declare gl_FrontFacing as invariant.
+ * The invariance of gl_FrontFacing is the same as the invariance of
+ * gl_Position."
+ */
+ var_frag = frag->symbols->get_variable("gl_FragCoord");
+ if (var_frag && var_frag->data.invariant) {
+ var_vert = vert->symbols->get_variable("gl_Position");
+ if (var_vert && !var_vert->data.invariant) {
+ linker_error(prog,
+ "fragment shader built-in `%s' has invariant qualifier, "
+ "but vertex shader built-in `%s' lacks invariant qualifier\n",
+ var_frag->name, var_vert->name);
+ return false;
+ }
+ }
+
+ var_frag = frag->symbols->get_variable("gl_PointCoord");
+ if (var_frag && var_frag->data.invariant) {
+ var_vert = vert->symbols->get_variable("gl_PointSize");
+ if (var_vert && !var_vert->data.invariant) {
+ linker_error(prog,
+ "fragment shader built-in `%s' has invariant qualifier, "
+ "but vertex shader built-in `%s' lacks invariant qualifier\n",
+ var_frag->name, var_vert->name);
+ return false;
+ }
+ }
+
+ var_frag = frag->symbols->get_variable("gl_FrontFacing");
+ if (var_frag && var_frag->data.invariant) {
+ linker_error(prog,
+ "fragment shader built-in `%s' can not be declared as invariant\n",
+ var_frag->name);
+ return false;
+ }
+
+ return true;
+}
+
+/**
+ * Populates a shaders symbol table with all global declarations
+ */
+static void
+populate_symbol_table(gl_linked_shader *sh, glsl_symbol_table *symbols)
+{
+ sh->symbols = new(sh) glsl_symbol_table;
+
+ _mesa_glsl_copy_symbols_from_table(sh->ir, symbols, sh->symbols);
+}
+
+
+/**
+ * Remap variables referenced in an instruction tree
+ *
+ * This is used when instruction trees are cloned from one shader and placed in
+ * another. These trees will contain references to \c ir_variable nodes that
+ * do not exist in the target shader. This function finds these \c ir_variable
+ * references and replaces the references with matching variables in the target
+ * shader.
+ *
+ * If there is no matching variable in the target shader, a clone of the
+ * \c ir_variable is made and added to the target shader. The new variable is
+ * added to \b both the instruction stream and the symbol table.
+ *
+ * \param inst IR tree that is to be processed.
+ * \param symbols Symbol table containing global scope symbols in the
+ * linked shader.
+ * \param instructions Instruction stream where new variable declarations
+ * should be added.
+ */
+static void
+remap_variables(ir_instruction *inst, struct gl_linked_shader *target,
+ hash_table *temps)
+{
+ class remap_visitor : public ir_hierarchical_visitor {
+ public:
+ remap_visitor(struct gl_linked_shader *target, hash_table *temps)
+ {
+ this->target = target;
+ this->symbols = target->symbols;
+ this->instructions = target->ir;
+ this->temps = temps;
+ }
+
+ virtual ir_visitor_status visit(ir_dereference_variable *ir)
+ {
+ if (ir->var->data.mode == ir_var_temporary) {
+ hash_entry *entry = _mesa_hash_table_search(temps, ir->var);
+ ir_variable *var = entry ? (ir_variable *) entry->data : NULL;
+
+ assert(var != NULL);
+ ir->var = var;
+ return visit_continue;
+ }
+
+ ir_variable *const existing =
+ this->symbols->get_variable(ir->var->name);
+ if (existing != NULL)
+ ir->var = existing;
+ else {
+ ir_variable *copy = ir->var->clone(this->target, NULL);
+
+ this->symbols->add_variable(copy);
+ this->instructions->push_head(copy);
+ ir->var = copy;
+ }
+
+ return visit_continue;
+ }
+
+ private:
+ struct gl_linked_shader *target;
+ glsl_symbol_table *symbols;
+ exec_list *instructions;
+ hash_table *temps;
+ };
+
+ remap_visitor v(target, temps);
+
+ inst->accept(&v);
+}
+
+
+/**
+ * Move non-declarations from one instruction stream to another
+ *
+ * The intended usage pattern of this function is to pass the pointer to the
+ * head sentinel of a list (i.e., a pointer to the list cast to an \c exec_node
+ * pointer) for \c last and \c false for \c make_copies on the first
+ * call. Successive calls pass the return value of the previous call for
+ * \c last and \c true for \c make_copies.
+ *
+ * \param instructions Source instruction stream
+ * \param last Instruction after which new instructions should be
+ * inserted in the target instruction stream
+ * \param make_copies Flag selecting whether instructions in \c instructions
+ * should be copied (via \c ir_instruction::clone) into the
+ * target list or moved.
+ *
+ * \return
+ * The new "last" instruction in the target instruction stream. This pointer
+ * is suitable for use as the \c last parameter of a later call to this
+ * function.
+ */
+static exec_node *
+move_non_declarations(exec_list *instructions, exec_node *last,
+ bool make_copies, gl_linked_shader *target)
+{
+ hash_table *temps = NULL;
+
+ if (make_copies)
+ temps = _mesa_pointer_hash_table_create(NULL);
+
+ foreach_in_list_safe(ir_instruction, inst, instructions) {
+ if (inst->as_function())
+ continue;
+
+ if (inst->ir_type == ir_type_precision)
+ continue;
+ if (inst->ir_type == ir_type_typedecl)
+ continue;
+
+ ir_variable *var = inst->as_variable();
+ if ((var != NULL) && (var->data.mode != ir_var_temporary))
+ continue;
+
+ assert(inst->as_assignment()
+ || inst->as_call()
+ || inst->as_if() /* for initializers with the ?: operator */
+ || ((var != NULL) && (var->data.mode == ir_var_temporary)));
+
+ if (make_copies) {
+ inst = inst->clone(target, NULL);
+
+ if (var != NULL)
+ _mesa_hash_table_insert(temps, var, inst);
+ else
+ remap_variables(inst, target, temps);
+ } else {
+ inst->remove();
+ }
+
+ last->insert_after(inst);
+ last = inst;
+ }
+
+ if (make_copies)
+ _mesa_hash_table_destroy(temps, NULL);
+
+ return last;
+}
+
+
+/**
+ * This class is only used in link_intrastage_shaders() below but declaring
+ * it inside that function leads to compiler warnings with some versions of
+ * gcc.
+ */
+class array_sizing_visitor : public deref_type_updater {
+public:
+ using deref_type_updater::visit;
+
+ array_sizing_visitor()
+ : mem_ctx(ralloc_context(NULL)),
+ unnamed_interfaces(_mesa_pointer_hash_table_create(NULL))
+ {
+ }
+
+ ~array_sizing_visitor()
+ {
+ _mesa_hash_table_destroy(this->unnamed_interfaces, NULL);
+ ralloc_free(this->mem_ctx);
+ }
+
+ virtual ir_visitor_status visit(ir_variable *var)
+ {
+ const glsl_type *type_without_array;
+ bool implicit_sized_array = var->data.implicit_sized_array;
+ fixup_type(&var->type, var->data.max_array_access,
+ var->data.from_ssbo_unsized_array,
+ &implicit_sized_array);
+ var->data.implicit_sized_array = implicit_sized_array;
+ type_without_array = var->type->without_array();
+ if (var->type->is_interface()) {
+ if (interface_contains_unsized_arrays(var->type)) {
+ const glsl_type *new_type =
+ resize_interface_members(var->type,
+ var->get_max_ifc_array_access(),
+ var->is_in_shader_storage_block());
+ var->type = new_type;
+ var->change_interface_type(new_type);
+ }
+ } else if (type_without_array->is_interface()) {
+ if (interface_contains_unsized_arrays(type_without_array)) {
+ const glsl_type *new_type =
+ resize_interface_members(type_without_array,
+ var->get_max_ifc_array_access(),
+ var->is_in_shader_storage_block());
+ var->change_interface_type(new_type);
+ var->type = update_interface_members_array(var->type, new_type);
+ }
+ } else if (const glsl_type *ifc_type = var->get_interface_type()) {
+ /* Store a pointer to the variable in the unnamed_interfaces
+ * hashtable.
+ */
+ hash_entry *entry =
+ _mesa_hash_table_search(this->unnamed_interfaces,
+ ifc_type);
+
+ ir_variable **interface_vars = entry ? (ir_variable **) entry->data : NULL;
+
+ if (interface_vars == NULL) {
+ interface_vars = rzalloc_array(mem_ctx, ir_variable *,
+ ifc_type->length);
+ _mesa_hash_table_insert(this->unnamed_interfaces, ifc_type,
+ interface_vars);
+ }
+ unsigned index = ifc_type->field_index(var->name);
+ assert(index < ifc_type->length);
+ assert(interface_vars[index] == NULL);
+ interface_vars[index] = var;
+ }
+ return visit_continue;
+ }
+
+ /**
+ * For each unnamed interface block that was discovered while running the
+ * visitor, adjust the interface type to reflect the newly assigned array
+ * sizes, and fix up the ir_variable nodes to point to the new interface
+ * type.
+ */
+ void fixup_unnamed_interface_types()
+ {
+ hash_table_call_foreach(this->unnamed_interfaces,
+ fixup_unnamed_interface_type, NULL);
+ }
+
+private:
+ /**
+ * If the type pointed to by \c type represents an unsized array, replace
+ * it with a sized array whose size is determined by max_array_access.
+ */
+ static void fixup_type(const glsl_type **type, unsigned max_array_access,
+ bool from_ssbo_unsized_array, bool *implicit_sized)
+ {
+ if (!from_ssbo_unsized_array && (*type)->is_unsized_array()) {
+ *type = glsl_type::get_array_instance((*type)->fields.array,
+ max_array_access + 1);
+ *implicit_sized = true;
+ assert(*type != NULL);
+ }
+ }
+
+ static const glsl_type *
+ update_interface_members_array(const glsl_type *type,
+ const glsl_type *new_interface_type)
+ {
+ const glsl_type *element_type = type->fields.array;
+ if (element_type->is_array()) {
+ const glsl_type *new_array_type =
+ update_interface_members_array(element_type, new_interface_type);
+ return glsl_type::get_array_instance(new_array_type, type->length);
+ } else {
+ return glsl_type::get_array_instance(new_interface_type,
+ type->length);
+ }
+ }
+
+ /**
+ * Determine whether the given interface type contains unsized arrays (if
+ * it doesn't, array_sizing_visitor doesn't need to process it).
+ */
+ static bool interface_contains_unsized_arrays(const glsl_type *type)
+ {
+ for (unsigned i = 0; i < type->length; i++) {
+ const glsl_type *elem_type = type->fields.structure[i].type;
+ if (elem_type->is_unsized_array())
+ return true;
+ }
+ return false;
+ }
+
+ /**
+ * Create a new interface type based on the given type, with unsized arrays
+ * replaced by sized arrays whose size is determined by
+ * max_ifc_array_access.
+ */
+ static const glsl_type *
+ resize_interface_members(const glsl_type *type,
+ const int *max_ifc_array_access,
+ bool is_ssbo)
+ {
+ unsigned num_fields = type->length;
+ glsl_struct_field *fields = new glsl_struct_field[num_fields];
+ memcpy(fields, type->fields.structure,
+ num_fields * sizeof(*fields));
+ for (unsigned i = 0; i < num_fields; i++) {
+ bool implicit_sized_array = fields[i].implicit_sized_array;
+ /* If SSBO last member is unsized array, we don't replace it by a sized
+ * array.
+ */
+ if (is_ssbo && i == (num_fields - 1))
+ fixup_type(&fields[i].type, max_ifc_array_access[i],
+ true, &implicit_sized_array);
+ else
+ fixup_type(&fields[i].type, max_ifc_array_access[i],
+ false, &implicit_sized_array);
+ fields[i].implicit_sized_array = implicit_sized_array;
+ }
+ glsl_interface_packing packing =
+ (glsl_interface_packing) type->interface_packing;
+ bool row_major = (bool) type->interface_row_major;
+ const glsl_type *new_ifc_type =
+ glsl_type::get_interface_instance(fields, num_fields,
+ packing, row_major, type->name);
+ delete [] fields;
+ return new_ifc_type;
+ }
+
+ static void fixup_unnamed_interface_type(const void *key, void *data,
+ void *)
+ {
+ const glsl_type *ifc_type = (const glsl_type *) key;
+ ir_variable **interface_vars = (ir_variable **) data;
+ unsigned num_fields = ifc_type->length;
+ glsl_struct_field *fields = new glsl_struct_field[num_fields];
+ memcpy(fields, ifc_type->fields.structure,
+ num_fields * sizeof(*fields));
+ bool interface_type_changed = false;
+ for (unsigned i = 0; i < num_fields; i++) {
+ if (interface_vars[i] != NULL &&
+ fields[i].type != interface_vars[i]->type) {
+ fields[i].type = interface_vars[i]->type;
+ interface_type_changed = true;
+ }
+ }
+ if (!interface_type_changed) {
+ delete [] fields;
+ return;
+ }
+ glsl_interface_packing packing =
+ (glsl_interface_packing) ifc_type->interface_packing;
+ bool row_major = (bool) ifc_type->interface_row_major;
+ const glsl_type *new_ifc_type =
+ glsl_type::get_interface_instance(fields, num_fields, packing,
+ row_major, ifc_type->name);
+ delete [] fields;
+ for (unsigned i = 0; i < num_fields; i++) {
+ if (interface_vars[i] != NULL)
+ interface_vars[i]->change_interface_type(new_ifc_type);
+ }
+ }
+
+ /**
+ * Memory context used to allocate the data in \c unnamed_interfaces.
+ */
+ void *mem_ctx;
+
+ /**
+ * Hash table from const glsl_type * to an array of ir_variable *'s
+ * pointing to the ir_variables constituting each unnamed interface block.
+ */
+ hash_table *unnamed_interfaces;
+};
+
+static bool
+validate_xfb_buffer_stride(struct gl_context *ctx, unsigned idx,
+ struct gl_shader_program *prog)
+{
+ /* We will validate doubles at a later stage */
+ if (prog->TransformFeedback.BufferStride[idx] % 4) {
+ linker_error(prog, "invalid qualifier xfb_stride=%d must be a "
+ "multiple of 4 or if its applied to a type that is "
+ "or contains a double a multiple of 8.",
+ prog->TransformFeedback.BufferStride[idx]);
+ return false;
+ }
+
+ if (prog->TransformFeedback.BufferStride[idx] / 4 >
+ ctx->Const.MaxTransformFeedbackInterleavedComponents) {
+ linker_error(prog, "The MAX_TRANSFORM_FEEDBACK_INTERLEAVED_COMPONENTS "
+ "limit has been exceeded.");
+ return false;
+ }
+
+ return true;
+}
+
+/**
+ * Check for conflicting xfb_stride default qualifiers and store buffer stride
+ * for later use.
+ */
+static void
+link_xfb_stride_layout_qualifiers(struct gl_context *ctx,
+ struct gl_shader_program *prog,
+ struct gl_shader **shader_list,
+ unsigned num_shaders)
+{
+ for (unsigned i = 0; i < MAX_FEEDBACK_BUFFERS; i++) {
+ prog->TransformFeedback.BufferStride[i] = 0;
+ }
+
+ for (unsigned i = 0; i < num_shaders; i++) {
+ struct gl_shader *shader = shader_list[i];
+
+ for (unsigned j = 0; j < MAX_FEEDBACK_BUFFERS; j++) {
+ if (shader->TransformFeedbackBufferStride[j]) {
+ if (prog->TransformFeedback.BufferStride[j] == 0) {
+ prog->TransformFeedback.BufferStride[j] =
+ shader->TransformFeedbackBufferStride[j];
+ if (!validate_xfb_buffer_stride(ctx, j, prog))
+ return;
+ } else if (prog->TransformFeedback.BufferStride[j] !=
+ shader->TransformFeedbackBufferStride[j]){
+ linker_error(prog,
+ "intrastage shaders defined with conflicting "
+ "xfb_stride for buffer %d (%d and %d)\n", j,
+ prog->TransformFeedback.BufferStride[j],
+ shader->TransformFeedbackBufferStride[j]);
+ return;
+ }
+ }
+ }
+ }
+}
+
+/**
+ * Check for conflicting bindless/bound sampler/image layout qualifiers at
+ * global scope.
+ */
+static void
+link_bindless_layout_qualifiers(struct gl_shader_program *prog,
+ struct gl_shader **shader_list,
+ unsigned num_shaders)
+{
+ bool bindless_sampler, bindless_image;
+ bool bound_sampler, bound_image;
+
+ bindless_sampler = bindless_image = false;
+ bound_sampler = bound_image = false;
+
+ for (unsigned i = 0; i < num_shaders; i++) {
+ struct gl_shader *shader = shader_list[i];
+
+ if (shader->bindless_sampler)
+ bindless_sampler = true;
+ if (shader->bindless_image)
+ bindless_image = true;
+ if (shader->bound_sampler)
+ bound_sampler = true;
+ if (shader->bound_image)
+ bound_image = true;
+
+ if ((bindless_sampler && bound_sampler) ||
+ (bindless_image && bound_image)) {
+ /* From section 4.4.6 of the ARB_bindless_texture spec:
+ *
+ * "If both bindless_sampler and bound_sampler, or bindless_image
+ * and bound_image, are declared at global scope in any
+ * compilation unit, a link- time error will be generated."
+ */
+ linker_error(prog, "both bindless_sampler and bound_sampler, or "
+ "bindless_image and bound_image, can't be declared at "
+ "global scope");
+ }
+ }
+}
+
+/**
+ * Check for conflicting viewport_relative settings across shaders, and sets
+ * the value for the linked shader.
+ */
+static void
+link_layer_viewport_relative_qualifier(struct gl_shader_program *prog,
+ struct gl_program *gl_prog,
+ struct gl_shader **shader_list,
+ unsigned num_shaders)
+{
+ unsigned i;
+
+ /* Find first shader with explicit layer declaration */
+ for (i = 0; i < num_shaders; i++) {
+ if (shader_list[i]->redeclares_gl_layer) {
+ gl_prog->info.layer_viewport_relative =
+ shader_list[i]->layer_viewport_relative;
+ break;
+ }
+ }
+
+ /* Now make sure that each subsequent shader's explicit layer declaration
+ * matches the first one's.
+ */
+ for (; i < num_shaders; i++) {
+ if (shader_list[i]->redeclares_gl_layer &&
+ shader_list[i]->layer_viewport_relative !=
+ gl_prog->info.layer_viewport_relative) {
+ linker_error(prog, "all gl_Layer redeclarations must have identical "
+ "viewport_relative settings");
+ }
+ }
+}
+
+/**
+ * Performs the cross-validation of tessellation control shader vertices and
+ * layout qualifiers for the attached tessellation control shaders,
+ * and propagates them to the linked TCS and linked shader program.
+ */
+static void
+link_tcs_out_layout_qualifiers(struct gl_shader_program *prog,
+ struct gl_program *gl_prog,
+ struct gl_shader **shader_list,
+ unsigned num_shaders)
+{
+ if (gl_prog->info.stage != MESA_SHADER_TESS_CTRL)
+ return;
+
+ gl_prog->info.tess.tcs_vertices_out = 0;
+
+ /* From the GLSL 4.0 spec (chapter 4.3.8.2):
+ *
+ * "All tessellation control shader layout declarations in a program
+ * must specify the same output patch vertex count. There must be at
+ * least one layout qualifier specifying an output patch vertex count
+ * in any program containing tessellation control shaders; however,
+ * such a declaration is not required in all tessellation control
+ * shaders."
+ */
+
+ for (unsigned i = 0; i < num_shaders; i++) {
+ struct gl_shader *shader = shader_list[i];
+
+ if (shader->info.TessCtrl.VerticesOut != 0) {
+ if (gl_prog->info.tess.tcs_vertices_out != 0 &&
+ gl_prog->info.tess.tcs_vertices_out !=
+ (unsigned) shader->info.TessCtrl.VerticesOut) {
+ linker_error(prog, "tessellation control shader defined with "
+ "conflicting output vertex count (%d and %d)\n",
+ gl_prog->info.tess.tcs_vertices_out,
+ shader->info.TessCtrl.VerticesOut);
+ return;
+ }
+ gl_prog->info.tess.tcs_vertices_out =
+ shader->info.TessCtrl.VerticesOut;
+ }
+ }
+
+ /* Just do the intrastage -> interstage propagation right now,
+ * since we already know we're in the right type of shader program
+ * for doing it.
+ */
+ if (gl_prog->info.tess.tcs_vertices_out == 0) {
+ linker_error(prog, "tessellation control shader didn't declare "
+ "vertices out layout qualifier\n");
+ return;
+ }
+}
+
+
+/**
+ * Performs the cross-validation of tessellation evaluation shader
+ * primitive type, vertex spacing, ordering and point_mode layout qualifiers
+ * for the attached tessellation evaluation shaders, and propagates them
+ * to the linked TES and linked shader program.
+ */
+static void
+link_tes_in_layout_qualifiers(struct gl_shader_program *prog,
+ struct gl_program *gl_prog,
+ struct gl_shader **shader_list,
+ unsigned num_shaders)
+{
+ if (gl_prog->info.stage != MESA_SHADER_TESS_EVAL)
+ return;
+
+ int point_mode = -1;
+ unsigned vertex_order = 0;
+
+ gl_prog->info.tess.primitive_mode = PRIM_UNKNOWN;
+ gl_prog->info.tess.spacing = TESS_SPACING_UNSPECIFIED;
+
+ /* From the GLSL 4.0 spec (chapter 4.3.8.1):
+ *
+ * "At least one tessellation evaluation shader (compilation unit) in
+ * a program must declare a primitive mode in its input layout.
+ * Declaration vertex spacing, ordering, and point mode identifiers is
+ * optional. It is not required that all tessellation evaluation
+ * shaders in a program declare a primitive mode. If spacing or
+ * vertex ordering declarations are omitted, the tessellation
+ * primitive generator will use equal spacing or counter-clockwise
+ * vertex ordering, respectively. If a point mode declaration is
+ * omitted, the tessellation primitive generator will produce lines or
+ * triangles according to the primitive mode."
+ */
+
+ for (unsigned i = 0; i < num_shaders; i++) {
+ struct gl_shader *shader = shader_list[i];
+
+ if (shader->info.TessEval.PrimitiveMode != PRIM_UNKNOWN) {
+ if (gl_prog->info.tess.primitive_mode != PRIM_UNKNOWN &&
+ gl_prog->info.tess.primitive_mode !=
+ shader->info.TessEval.PrimitiveMode) {
+ linker_error(prog, "tessellation evaluation shader defined with "
+ "conflicting input primitive modes.\n");
+ return;
+ }
+ gl_prog->info.tess.primitive_mode =
+ shader->info.TessEval.PrimitiveMode;
+ }
+
+ if (shader->info.TessEval.Spacing != 0) {
+ if (gl_prog->info.tess.spacing != 0 && gl_prog->info.tess.spacing !=
+ shader->info.TessEval.Spacing) {
+ linker_error(prog, "tessellation evaluation shader defined with "
+ "conflicting vertex spacing.\n");
+ return;
+ }
+ gl_prog->info.tess.spacing = shader->info.TessEval.Spacing;
+ }
+
+ if (shader->info.TessEval.VertexOrder != 0) {
+ if (vertex_order != 0 &&
+ vertex_order != shader->info.TessEval.VertexOrder) {
+ linker_error(prog, "tessellation evaluation shader defined with "
+ "conflicting ordering.\n");
+ return;
+ }
+ vertex_order = shader->info.TessEval.VertexOrder;
+ }
+
+ if (shader->info.TessEval.PointMode != -1) {
+ if (point_mode != -1 &&
+ point_mode != shader->info.TessEval.PointMode) {
+ linker_error(prog, "tessellation evaluation shader defined with "
+ "conflicting point modes.\n");
+ return;
+ }
+ point_mode = shader->info.TessEval.PointMode;
+ }
+
+ }
+
+ /* Just do the intrastage -> interstage propagation right now,
+ * since we already know we're in the right type of shader program
+ * for doing it.
+ */
+ if (gl_prog->info.tess.primitive_mode == PRIM_UNKNOWN) {
+ linker_error(prog,
+ "tessellation evaluation shader didn't declare input "
+ "primitive modes.\n");
+ return;
+ }
+
+ if (gl_prog->info.tess.spacing == TESS_SPACING_UNSPECIFIED)
+ gl_prog->info.tess.spacing = TESS_SPACING_EQUAL;
+
+ if (vertex_order == 0 || vertex_order == GL_CCW)
+ gl_prog->info.tess.ccw = true;
+ else
+ gl_prog->info.tess.ccw = false;
+
+
+ if (point_mode == -1 || point_mode == GL_FALSE)
+ gl_prog->info.tess.point_mode = false;
+ else
+ gl_prog->info.tess.point_mode = true;
+}
+
+
+/**
+ * Performs the cross-validation of layout qualifiers specified in
+ * redeclaration of gl_FragCoord for the attached fragment shaders,
+ * and propagates them to the linked FS and linked shader program.
+ */
+static void
+link_fs_inout_layout_qualifiers(struct gl_shader_program *prog,
+ struct gl_linked_shader *linked_shader,
+ struct gl_shader **shader_list,
+ unsigned num_shaders)
+{
+ bool redeclares_gl_fragcoord = false;
+ bool uses_gl_fragcoord = false;
+ bool origin_upper_left = false;
+ bool pixel_center_integer = false;
+
+ if (linked_shader->Stage != MESA_SHADER_FRAGMENT ||
+ (prog->data->Version < 150 &&
+ !prog->ARB_fragment_coord_conventions_enable))
+ return;
+
+ for (unsigned i = 0; i < num_shaders; i++) {
+ struct gl_shader *shader = shader_list[i];
+ /* From the GLSL 1.50 spec, page 39:
+ *
+ * "If gl_FragCoord is redeclared in any fragment shader in a program,
+ * it must be redeclared in all the fragment shaders in that program
+ * that have a static use gl_FragCoord."
+ */
+ if ((redeclares_gl_fragcoord && !shader->redeclares_gl_fragcoord &&
+ shader->uses_gl_fragcoord)
+ || (shader->redeclares_gl_fragcoord && !redeclares_gl_fragcoord &&
+ uses_gl_fragcoord)) {
+ linker_error(prog, "fragment shader defined with conflicting "
+ "layout qualifiers for gl_FragCoord\n");
+ }
+
+ /* From the GLSL 1.50 spec, page 39:
+ *
+ * "All redeclarations of gl_FragCoord in all fragment shaders in a
+ * single program must have the same set of qualifiers."
+ */
+ if (redeclares_gl_fragcoord && shader->redeclares_gl_fragcoord &&
+ (shader->origin_upper_left != origin_upper_left ||
+ shader->pixel_center_integer != pixel_center_integer)) {
+ linker_error(prog, "fragment shader defined with conflicting "
+ "layout qualifiers for gl_FragCoord\n");
+ }
+
+ /* Update the linked shader state. Note that uses_gl_fragcoord should
+ * accumulate the results. The other values should replace. If there
+ * are multiple redeclarations, all the fields except uses_gl_fragcoord
+ * are already known to be the same.
+ */
+ if (shader->redeclares_gl_fragcoord || shader->uses_gl_fragcoord) {
+ redeclares_gl_fragcoord = shader->redeclares_gl_fragcoord;
+ uses_gl_fragcoord |= shader->uses_gl_fragcoord;
+ origin_upper_left = shader->origin_upper_left;
+ pixel_center_integer = shader->pixel_center_integer;
+ }
+
+ linked_shader->Program->info.fs.early_fragment_tests |=
+ shader->EarlyFragmentTests || shader->PostDepthCoverage;
+ linked_shader->Program->info.fs.inner_coverage |= shader->InnerCoverage;
+ linked_shader->Program->info.fs.post_depth_coverage |=
+ shader->PostDepthCoverage;
+ linked_shader->Program->info.fs.pixel_interlock_ordered |=
+ shader->PixelInterlockOrdered;
+ linked_shader->Program->info.fs.pixel_interlock_unordered |=
+ shader->PixelInterlockUnordered;
+ linked_shader->Program->info.fs.sample_interlock_ordered |=
+ shader->SampleInterlockOrdered;
+ linked_shader->Program->info.fs.sample_interlock_unordered |=
+ shader->SampleInterlockUnordered;
+ linked_shader->Program->sh.fs.BlendSupport |= shader->BlendSupport;
+ }
+
+ linked_shader->Program->info.fs.pixel_center_integer = pixel_center_integer;
+ linked_shader->Program->info.fs.origin_upper_left = origin_upper_left;
+}
+
+/**
+ * Performs the cross-validation of geometry shader max_vertices and
+ * primitive type layout qualifiers for the attached geometry shaders,
+ * and propagates them to the linked GS and linked shader program.
+ */
+static void
+link_gs_inout_layout_qualifiers(struct gl_shader_program *prog,
+ struct gl_program *gl_prog,
+ struct gl_shader **shader_list,
+ unsigned num_shaders)
+{
+ /* No in/out qualifiers defined for anything but GLSL 1.50+
+ * geometry shaders so far.
+ */
+ if (gl_prog->info.stage != MESA_SHADER_GEOMETRY ||
+ prog->data->Version < 150)
+ return;
+
+ int vertices_out = -1;
+
+ gl_prog->info.gs.invocations = 0;
+ gl_prog->info.gs.input_primitive = PRIM_UNKNOWN;
+ gl_prog->info.gs.output_primitive = PRIM_UNKNOWN;
+
+ /* From the GLSL 1.50 spec, page 46:
+ *
+ * "All geometry shader output layout declarations in a program
+ * must declare the same layout and same value for
+ * max_vertices. There must be at least one geometry output
+ * layout declaration somewhere in a program, but not all
+ * geometry shaders (compilation units) are required to
+ * declare it."
+ */
+
+ for (unsigned i = 0; i < num_shaders; i++) {
+ struct gl_shader *shader = shader_list[i];
+
+ if (shader->info.Geom.InputType != PRIM_UNKNOWN) {
+ if (gl_prog->info.gs.input_primitive != PRIM_UNKNOWN &&
+ gl_prog->info.gs.input_primitive !=
+ shader->info.Geom.InputType) {
+ linker_error(prog, "geometry shader defined with conflicting "
+ "input types\n");
+ return;
+ }
+ gl_prog->info.gs.input_primitive = shader->info.Geom.InputType;
+ }
+
+ if (shader->info.Geom.OutputType != PRIM_UNKNOWN) {
+ if (gl_prog->info.gs.output_primitive != PRIM_UNKNOWN &&
+ gl_prog->info.gs.output_primitive !=
+ shader->info.Geom.OutputType) {
+ linker_error(prog, "geometry shader defined with conflicting "
+ "output types\n");
+ return;
+ }
+ gl_prog->info.gs.output_primitive = shader->info.Geom.OutputType;
+ }
+
+ if (shader->info.Geom.VerticesOut != -1) {
+ if (vertices_out != -1 &&
+ vertices_out != shader->info.Geom.VerticesOut) {
+ linker_error(prog, "geometry shader defined with conflicting "
+ "output vertex count (%d and %d)\n",
+ vertices_out, shader->info.Geom.VerticesOut);
+ return;
+ }
+ vertices_out = shader->info.Geom.VerticesOut;
+ }
+
+ if (shader->info.Geom.Invocations != 0) {
+ if (gl_prog->info.gs.invocations != 0 &&
+ gl_prog->info.gs.invocations !=
+ (unsigned) shader->info.Geom.Invocations) {
+ linker_error(prog, "geometry shader defined with conflicting "
+ "invocation count (%d and %d)\n",
+ gl_prog->info.gs.invocations,
+ shader->info.Geom.Invocations);
+ return;
+ }
+ gl_prog->info.gs.invocations = shader->info.Geom.Invocations;
+ }
+ }
+
+ /* Just do the intrastage -> interstage propagation right now,
+ * since we already know we're in the right type of shader program
+ * for doing it.
+ */
+ if (gl_prog->info.gs.input_primitive == PRIM_UNKNOWN) {
+ linker_error(prog,
+ "geometry shader didn't declare primitive input type\n");
+ return;
+ }
+
+ if (gl_prog->info.gs.output_primitive == PRIM_UNKNOWN) {
+ linker_error(prog,
+ "geometry shader didn't declare primitive output type\n");
+ return;
+ }
+
+ if (vertices_out == -1) {
+ linker_error(prog,
+ "geometry shader didn't declare max_vertices\n");
+ return;
+ } else {
+ gl_prog->info.gs.vertices_out = vertices_out;
+ }
+
+ if (gl_prog->info.gs.invocations == 0)
+ gl_prog->info.gs.invocations = 1;
+}
+
+
+/**
+ * Perform cross-validation of compute shader local_size_{x,y,z} layout and
+ * derivative arrangement qualifiers for the attached compute shaders, and
+ * propagate them to the linked CS and linked shader program.
+ */
+static void
+link_cs_input_layout_qualifiers(struct gl_shader_program *prog,
+ struct gl_program *gl_prog,
+ struct gl_shader **shader_list,
+ unsigned num_shaders)
+{
+ /* This function is called for all shader stages, but it only has an effect
+ * for compute shaders.
+ */
+ if (gl_prog->info.stage != MESA_SHADER_COMPUTE)
+ return;
+
+ for (int i = 0; i < 3; i++)
+ gl_prog->info.cs.local_size[i] = 0;
+
+ gl_prog->info.cs.local_size_variable = false;
+
+ gl_prog->info.cs.derivative_group = DERIVATIVE_GROUP_NONE;
+
+ /* From the ARB_compute_shader spec, in the section describing local size
+ * declarations:
+ *
+ * If multiple compute shaders attached to a single program object
+ * declare local work-group size, the declarations must be identical;
+ * otherwise a link-time error results. Furthermore, if a program
+ * object contains any compute shaders, at least one must contain an
+ * input layout qualifier specifying the local work sizes of the
+ * program, or a link-time error will occur.
+ */
+ for (unsigned sh = 0; sh < num_shaders; sh++) {
+ struct gl_shader *shader = shader_list[sh];
+
+ if (shader->info.Comp.LocalSize[0] != 0) {
+ if (gl_prog->info.cs.local_size[0] != 0) {
+ for (int i = 0; i < 3; i++) {
+ if (gl_prog->info.cs.local_size[i] !=
+ shader->info.Comp.LocalSize[i]) {
+ linker_error(prog, "compute shader defined with conflicting "
+ "local sizes\n");
+ return;
+ }
+ }
+ }
+ for (int i = 0; i < 3; i++) {
+ gl_prog->info.cs.local_size[i] =
+ shader->info.Comp.LocalSize[i];
+ }
+ } else if (shader->info.Comp.LocalSizeVariable) {
+ if (gl_prog->info.cs.local_size[0] != 0) {
+ /* The ARB_compute_variable_group_size spec says:
+ *
+ * If one compute shader attached to a program declares a
+ * variable local group size and a second compute shader
+ * attached to the same program declares a fixed local group
+ * size, a link-time error results.
+ */
+ linker_error(prog, "compute shader defined with both fixed and "
+ "variable local group size\n");
+ return;
+ }
+ gl_prog->info.cs.local_size_variable = true;
+ }
+
+ enum gl_derivative_group group = shader->info.Comp.DerivativeGroup;
+ if (group != DERIVATIVE_GROUP_NONE) {
+ if (gl_prog->info.cs.derivative_group != DERIVATIVE_GROUP_NONE &&
+ gl_prog->info.cs.derivative_group != group) {
+ linker_error(prog, "compute shader defined with conflicting "
+ "derivative groups\n");
+ return;
+ }
+ gl_prog->info.cs.derivative_group = group;
+ }
+ }
+
+ /* Just do the intrastage -> interstage propagation right now,
+ * since we already know we're in the right type of shader program
+ * for doing it.
+ */
+ if (gl_prog->info.cs.local_size[0] == 0 &&
+ !gl_prog->info.cs.local_size_variable) {
+ linker_error(prog, "compute shader must contain a fixed or a variable "
+ "local group size\n");
+ return;
+ }
+
+ if (gl_prog->info.cs.derivative_group == DERIVATIVE_GROUP_QUADS) {
+ if (gl_prog->info.cs.local_size[0] % 2 != 0) {
+ linker_error(prog, "derivative_group_quadsNV must be used with a "
+ "local group size whose first dimension "
+ "is a multiple of 2\n");
+ return;
+ }
+ if (gl_prog->info.cs.local_size[1] % 2 != 0) {
+ linker_error(prog, "derivative_group_quadsNV must be used with a local"
+ "group size whose second dimension "
+ "is a multiple of 2\n");
+ return;
+ }
+ } else if (gl_prog->info.cs.derivative_group == DERIVATIVE_GROUP_LINEAR) {
+ if ((gl_prog->info.cs.local_size[0] *
+ gl_prog->info.cs.local_size[1] *
+ gl_prog->info.cs.local_size[2]) % 4 != 0) {
+ linker_error(prog, "derivative_group_linearNV must be used with a "
+ "local group size whose total number of invocations "
+ "is a multiple of 4\n");
+ return;
+ }
+ }
+}
+
+/**
+ * Link all out variables on a single stage which are not
+ * directly used in a shader with the main function.
+ */
+static void
+link_output_variables(struct gl_linked_shader *linked_shader,
+ struct gl_shader **shader_list,
+ unsigned num_shaders)
+{
+ struct glsl_symbol_table *symbols = linked_shader->symbols;
+
+ for (unsigned i = 0; i < num_shaders; i++) {
+
+ /* Skip shader object with main function */
+ if (shader_list[i]->symbols->get_function("main"))
+ continue;
+
+ foreach_in_list(ir_instruction, ir, shader_list[i]->ir) {
+ if (ir->ir_type != ir_type_variable)
+ continue;
+
+ ir_variable *var = (ir_variable *) ir;
+
+ if (var->data.mode == ir_var_shader_out &&
+ !symbols->get_variable(var->name)) {
+ var = var->clone(linked_shader, NULL);
+ symbols->add_variable(var);
+ linked_shader->ir->push_head(var);
+ }
+ }
+ }
+
+ return;
+}
+
+
+/**
+ * Combine a group of shaders for a single stage to generate a linked shader
+ *
+ * \note
+ * If this function is supplied a single shader, it is cloned, and the new
+ * shader is returned.
+ */
+struct gl_linked_shader *
+link_intrastage_shaders(void *mem_ctx,
+ struct gl_context *ctx,
+ struct gl_shader_program *prog,
+ struct gl_shader **shader_list,
+ unsigned num_shaders,
+ bool allow_missing_main)
+{
+ struct gl_uniform_block *ubo_blocks = NULL;
+ struct gl_uniform_block *ssbo_blocks = NULL;
+ unsigned num_ubo_blocks = 0;
+ unsigned num_ssbo_blocks = 0;
+
+ /* Check that global variables defined in multiple shaders are consistent.
+ */
+ glsl_symbol_table variables;
+ for (unsigned i = 0; i < num_shaders; i++) {
+ if (shader_list[i] == NULL)
+ continue;
+ cross_validate_globals(ctx, prog, shader_list[i]->ir, &variables,
+ false);
+ }
+
+ if (!prog->data->LinkStatus)
+ return NULL;
+
+ /* Check that interface blocks defined in multiple shaders are consistent.
+ */
+ validate_intrastage_interface_blocks(prog, (const gl_shader **)shader_list,
+ num_shaders);
+ if (!prog->data->LinkStatus)
+ return NULL;
+
+ /* Check that there is only a single definition of each function signature
+ * across all shaders.
+ */
+ for (unsigned i = 0; i < (num_shaders - 1); i++) {
+ foreach_in_list(ir_instruction, node, shader_list[i]->ir) {
+ ir_function *const f = node->as_function();
+
+ if (f == NULL)
+ continue;
+
+ for (unsigned j = i + 1; j < num_shaders; j++) {
+ ir_function *const other =
+ shader_list[j]->symbols->get_function(f->name);
+
+ /* If the other shader has no function (and therefore no function
+ * signatures) with the same name, skip to the next shader.
+ */
+ if (other == NULL)
+ continue;
+
+ foreach_in_list(ir_function_signature, sig, &f->signatures) {
+ if (!sig->is_defined)
+ continue;
+
+ ir_function_signature *other_sig =
+ other->exact_matching_signature(NULL, &sig->parameters);
+
+ if (other_sig != NULL && other_sig->is_defined) {
+ linker_error(prog, "function `%s' is multiply defined\n",
+ f->name);
+ return NULL;
+ }
+ }
+ }
+ }
+ }
+
+ /* Find the shader that defines main, and make a clone of it.
+ *
+ * Starting with the clone, search for undefined references. If one is
+ * found, find the shader that defines it. Clone the reference and add
+ * it to the shader. Repeat until there are no undefined references or
+ * until a reference cannot be resolved.
+ */
+ gl_shader *main = NULL;
+ for (unsigned i = 0; i < num_shaders; i++) {
+ if (_mesa_get_main_function_signature(shader_list[i]->symbols)) {
+ main = shader_list[i];
+ break;
+ }
+ }
+
+ if (main == NULL && allow_missing_main)
+ main = shader_list[0];
+
+ if (main == NULL) {
+ linker_error(prog, "%s shader lacks `main'\n",
+ _mesa_shader_stage_to_string(shader_list[0]->Stage));
+ return NULL;
+ }
+
+ gl_linked_shader *linked = rzalloc(NULL, struct gl_linked_shader);
+ linked->Stage = shader_list[0]->Stage;
+
+ /* Create program and attach it to the linked shader */
+ struct gl_program *gl_prog =
+ ctx->Driver.NewProgram(ctx, shader_list[0]->Stage, prog->Name, false);
+ if (!gl_prog) {
+ prog->data->LinkStatus = LINKING_FAILURE;
+ _mesa_delete_linked_shader(ctx, linked);
+ return NULL;
+ }
+
+ _mesa_reference_shader_program_data(ctx, &gl_prog->sh.data, prog->data);
+
+ /* Don't use _mesa_reference_program() just take ownership */
+ linked->Program = gl_prog;
+
+ linked->ir = new(linked) exec_list;
+ clone_ir_list(mem_ctx, linked->ir, main->ir);
+
+ link_fs_inout_layout_qualifiers(prog, linked, shader_list, num_shaders);
+ link_tcs_out_layout_qualifiers(prog, gl_prog, shader_list, num_shaders);
+ link_tes_in_layout_qualifiers(prog, gl_prog, shader_list, num_shaders);
+ link_gs_inout_layout_qualifiers(prog, gl_prog, shader_list, num_shaders);
+ link_cs_input_layout_qualifiers(prog, gl_prog, shader_list, num_shaders);
+
+ if (linked->Stage != MESA_SHADER_FRAGMENT)
+ link_xfb_stride_layout_qualifiers(ctx, prog, shader_list, num_shaders);
+
+ link_bindless_layout_qualifiers(prog, shader_list, num_shaders);
+
+ link_layer_viewport_relative_qualifier(prog, gl_prog, shader_list, num_shaders);
+
+ populate_symbol_table(linked, shader_list[0]->symbols);
+
+ /* The pointer to the main function in the final linked shader (i.e., the
+ * copy of the original shader that contained the main function).
+ */
+ ir_function_signature *const main_sig =
+ _mesa_get_main_function_signature(linked->symbols);
+
+ /* Move any instructions other than variable declarations or function
+ * declarations into main.
+ */
+ if (main_sig != NULL) {
+ exec_node *insertion_point =
+ move_non_declarations(linked->ir, (exec_node *) &main_sig->body, false,
+ linked);
+
+ for (unsigned i = 0; i < num_shaders; i++) {
+ if (shader_list[i] == main)
+ continue;
+
+ insertion_point = move_non_declarations(shader_list[i]->ir,
+ insertion_point, true, linked);
+ }
+ }
+
+ if (!link_function_calls(prog, linked, shader_list, num_shaders)) {
+ _mesa_delete_linked_shader(ctx, linked);
+ return NULL;
+ }
+
+ if (linked->Stage != MESA_SHADER_FRAGMENT)
+ link_output_variables(linked, shader_list, num_shaders);
+
+ /* Make a pass over all variable declarations to ensure that arrays with
+ * unspecified sizes have a size specified. The size is inferred from the
+ * max_array_access field.
+ */
+ array_sizing_visitor v;
+ v.run(linked->ir);
+ v.fixup_unnamed_interface_types();
+
+ /* Link up uniform blocks defined within this stage. */
+ link_uniform_blocks(mem_ctx, ctx, prog, linked, &ubo_blocks,
+ &num_ubo_blocks, &ssbo_blocks, &num_ssbo_blocks);
+
+ const unsigned max_uniform_blocks =
+ ctx->Const.Program[linked->Stage].MaxUniformBlocks;
+ if (num_ubo_blocks > max_uniform_blocks) {
+ linker_error(prog, "Too many %s uniform blocks (%d/%d)\n",
+ _mesa_shader_stage_to_string(linked->Stage),
+ num_ubo_blocks, max_uniform_blocks);
+ }
+
+ const unsigned max_shader_storage_blocks =
+ ctx->Const.Program[linked->Stage].MaxShaderStorageBlocks;
+ if (num_ssbo_blocks > max_shader_storage_blocks) {
+ linker_error(prog, "Too many %s shader storage blocks (%d/%d)\n",
+ _mesa_shader_stage_to_string(linked->Stage),
+ num_ssbo_blocks, max_shader_storage_blocks);
+ }
+
+ if (!prog->data->LinkStatus) {
+ _mesa_delete_linked_shader(ctx, linked);
+ return NULL;
+ }
+
+ /* Copy ubo blocks to linked shader list */
+ linked->Program->sh.UniformBlocks =
+ ralloc_array(linked, gl_uniform_block *, num_ubo_blocks);
+ ralloc_steal(linked, ubo_blocks);
+ for (unsigned i = 0; i < num_ubo_blocks; i++) {
+ linked->Program->sh.UniformBlocks[i] = &ubo_blocks[i];
+ }
+ linked->Program->info.num_ubos = num_ubo_blocks;
+
+ /* Copy ssbo blocks to linked shader list */
+ linked->Program->sh.ShaderStorageBlocks =
+ ralloc_array(linked, gl_uniform_block *, num_ssbo_blocks);
+ ralloc_steal(linked, ssbo_blocks);
+ for (unsigned i = 0; i < num_ssbo_blocks; i++) {
+ linked->Program->sh.ShaderStorageBlocks[i] = &ssbo_blocks[i];
+ }
+ linked->Program->info.num_ssbos = num_ssbo_blocks;
+
+ /* At this point linked should contain all of the linked IR, so
+ * validate it to make sure nothing went wrong.
+ */
+ validate_ir_tree(linked->ir);
+
+ /* Set the size of geometry shader input arrays */
+ if (linked->Stage == MESA_SHADER_GEOMETRY) {
+ unsigned num_vertices =
+ vertices_per_prim(gl_prog->info.gs.input_primitive);
+ array_resize_visitor input_resize_visitor(num_vertices, prog,
+ MESA_SHADER_GEOMETRY);
+ foreach_in_list(ir_instruction, ir, linked->ir) {
+ ir->accept(&input_resize_visitor);
+ }
+ }
+
+ if (ctx->Const.VertexID_is_zero_based)
+ lower_vertex_id(linked);
+
+ if (ctx->Const.LowerCsDerivedVariables)
+ lower_cs_derived(linked);
+
+#ifdef DEBUG
+ /* Compute the source checksum. */
+ linked->SourceChecksum = 0;
+ for (unsigned i = 0; i < num_shaders; i++) {
+ if (shader_list[i] == NULL)
+ continue;
+ linked->SourceChecksum ^= shader_list[i]->SourceChecksum;
+ }
+#endif
+
+ return linked;
+}
+
+/**
+ * Update the sizes of linked shader uniform arrays to the maximum
+ * array index used.
+ *
+ * From page 81 (page 95 of the PDF) of the OpenGL 2.1 spec:
+ *
+ * If one or more elements of an array are active,
+ * GetActiveUniform will return the name of the array in name,
+ * subject to the restrictions listed above. The type of the array
+ * is returned in type. The size parameter contains the highest
+ * array element index used, plus one. The compiler or linker
+ * determines the highest index used. There will be only one
+ * active uniform reported by the GL per uniform array.
+
+ */
+static void
+update_array_sizes(struct gl_shader_program *prog)
+{
+ for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
+ if (prog->_LinkedShaders[i] == NULL)
+ continue;
+
+ bool types_were_updated = false;
+
+ foreach_in_list(ir_instruction, node, prog->_LinkedShaders[i]->ir) {
+ ir_variable *const var = node->as_variable();
+
+ if ((var == NULL) || (var->data.mode != ir_var_uniform) ||
+ !var->type->is_array())
+ continue;
+
+ /* GL_ARB_uniform_buffer_object says that std140 uniforms
+ * will not be eliminated. Since we always do std140, just
+ * don't resize arrays in UBOs.
+ *
+ * Atomic counters are supposed to get deterministic
+ * locations assigned based on the declaration ordering and
+ * sizes, array compaction would mess that up.
+ *
+ * Subroutine uniforms are not removed.
+ */
+ if (var->is_in_buffer_block() || var->type->contains_atomic() ||
+ var->type->contains_subroutine() || var->constant_initializer)
+ continue;
+
+ int size = var->data.max_array_access;
+ for (unsigned j = 0; j < MESA_SHADER_STAGES; j++) {
+ if (prog->_LinkedShaders[j] == NULL)
+ continue;
+
+ foreach_in_list(ir_instruction, node2, prog->_LinkedShaders[j]->ir) {
+ ir_variable *other_var = node2->as_variable();
+ if (!other_var)
+ continue;
+
+ if (strcmp(var->name, other_var->name) == 0 &&
+ other_var->data.max_array_access > size) {
+ size = other_var->data.max_array_access;
+ }
+ }
+ }
+
+ if (size + 1 != (int)var->type->length) {
+ /* If this is a built-in uniform (i.e., it's backed by some
+ * fixed-function state), adjust the number of state slots to
+ * match the new array size. The number of slots per array entry
+ * is not known. It seems safe to assume that the total number of
+ * slots is an integer multiple of the number of array elements.
+ * Determine the number of slots per array element by dividing by
+ * the old (total) size.
+ */
+ const unsigned num_slots = var->get_num_state_slots();
+ if (num_slots > 0) {
+ var->set_num_state_slots((size + 1)
+ * (num_slots / var->type->length));
+ }
+
+ var->type = glsl_type::get_array_instance(var->type->fields.array,
+ size + 1);
+ types_were_updated = true;
+ }
+ }
+
+ /* Update the types of dereferences in case we changed any. */
+ if (types_were_updated) {
+ deref_type_updater v;
+ v.run(prog->_LinkedShaders[i]->ir);
+ }
+ }
+}
+
+/**
+ * Resize tessellation evaluation per-vertex inputs to the size of
+ * tessellation control per-vertex outputs.
+ */
+static void
+resize_tes_inputs(struct gl_context *ctx,
+ struct gl_shader_program *prog)
+{
+ if (prog->_LinkedShaders[MESA_SHADER_TESS_EVAL] == NULL)
+ return;
+
+ gl_linked_shader *const tcs = prog->_LinkedShaders[MESA_SHADER_TESS_CTRL];
+ gl_linked_shader *const tes = prog->_LinkedShaders[MESA_SHADER_TESS_EVAL];
+
+ /* If no control shader is present, then the TES inputs are statically
+ * sized to MaxPatchVertices; the actual size of the arrays won't be
+ * known until draw time.
+ */
+ const int num_vertices = tcs
+ ? tcs->Program->info.tess.tcs_vertices_out
+ : ctx->Const.MaxPatchVertices;
+
+ array_resize_visitor input_resize_visitor(num_vertices, prog,
+ MESA_SHADER_TESS_EVAL);
+ foreach_in_list(ir_instruction, ir, tes->ir) {
+ ir->accept(&input_resize_visitor);
+ }
+
+ if (tcs) {
+ /* Convert the gl_PatchVerticesIn system value into a constant, since
+ * the value is known at this point.
+ */
+ foreach_in_list(ir_instruction, ir, tes->ir) {
+ ir_variable *var = ir->as_variable();
+ if (var && var->data.mode == ir_var_system_value &&
+ var->data.location == SYSTEM_VALUE_VERTICES_IN) {
+ void *mem_ctx = ralloc_parent(var);
+ var->data.location = 0;
+ var->data.explicit_location = false;
+ var->data.mode = ir_var_auto;
+ var->constant_value = new(mem_ctx) ir_constant(num_vertices);
+ }
+ }
+ }
+}
+
+/**
+ * Find a contiguous set of available bits in a bitmask.
+ *
+ * \param used_mask Bits representing used (1) and unused (0) locations
+ * \param needed_count Number of contiguous bits needed.
+ *
+ * \return
+ * Base location of the available bits on success or -1 on failure.
+ */
+static int
+find_available_slots(unsigned used_mask, unsigned needed_count)
+{
+ unsigned needed_mask = (1 << needed_count) - 1;
+ const int max_bit_to_test = (8 * sizeof(used_mask)) - needed_count;
+
+ /* The comparison to 32 is redundant, but without it GCC emits "warning:
+ * cannot optimize possibly infinite loops" for the loop below.
+ */
+ if ((needed_count == 0) || (max_bit_to_test < 0) || (max_bit_to_test > 32))
+ return -1;
+
+ for (int i = 0; i <= max_bit_to_test; i++) {
+ if ((needed_mask & ~used_mask) == needed_mask)
+ return i;
+
+ needed_mask <<= 1;
+ }
+
+ return -1;
+}
+
+
+#define SAFE_MASK_FROM_INDEX(i) (((i) >= 32) ? ~0 : ((1 << (i)) - 1))
+
+/**
+ * Assign locations for either VS inputs or FS outputs.
+ *
+ * \param mem_ctx Temporary ralloc context used for linking.
+ * \param prog Shader program whose variables need locations
+ * assigned.
+ * \param constants Driver specific constant values for the program.
+ * \param target_index Selector for the program target to receive location
+ * assignmnets. Must be either \c MESA_SHADER_VERTEX or
+ * \c MESA_SHADER_FRAGMENT.
+ * \param do_assignment Whether we are actually marking the assignment or we
+ * are just doing a dry-run checking.
+ *
+ * \return
+ * If locations are (or can be, in case of dry-running) successfully assigned,
+ * true is returned. Otherwise an error is emitted to the shader link log and
+ * false is returned.
+ */
+static bool
+assign_attribute_or_color_locations(void *mem_ctx,
+ gl_shader_program *prog,
+ struct gl_constants *constants,
+ unsigned target_index,
+ bool do_assignment)
+{
+ /* Maximum number of generic locations. This corresponds to either the
+ * maximum number of draw buffers or the maximum number of generic
+ * attributes.
+ */
+ unsigned max_index = (target_index == MESA_SHADER_VERTEX) ?
+ constants->Program[target_index].MaxAttribs :
+ MAX2(constants->MaxDrawBuffers, constants->MaxDualSourceDrawBuffers);
+
+ /* Mark invalid locations as being used.
+ */
+ unsigned used_locations = ~SAFE_MASK_FROM_INDEX(max_index);
+ unsigned double_storage_locations = 0;
+
+ assert((target_index == MESA_SHADER_VERTEX)
+ || (target_index == MESA_SHADER_FRAGMENT));
+
+ gl_linked_shader *const sh = prog->_LinkedShaders[target_index];
+ if (sh == NULL)
+ return true;
+
+ /* Operate in a total of four passes.
+ *
+ * 1. Invalidate the location assignments for all vertex shader inputs.
+ *
+ * 2. Assign locations for inputs that have user-defined (via
+ * glBindVertexAttribLocation) locations and outputs that have
+ * user-defined locations (via glBindFragDataLocation).
+ *
+ * 3. Sort the attributes without assigned locations by number of slots
+ * required in decreasing order. Fragmentation caused by attribute
+ * locations assigned by the application may prevent large attributes
+ * from having enough contiguous space.
+ *
+ * 4. Assign locations to any inputs without assigned locations.
+ */
+
+ const int generic_base = (target_index == MESA_SHADER_VERTEX)
+ ? (int) VERT_ATTRIB_GENERIC0 : (int) FRAG_RESULT_DATA0;
+
+ const enum ir_variable_mode direction =
+ (target_index == MESA_SHADER_VERTEX)
+ ? ir_var_shader_in : ir_var_shader_out;
+
+
+ /* Temporary storage for the set of attributes that need locations assigned.
+ */
+ struct temp_attr {
+ unsigned slots;
+ ir_variable *var;
+
+ /* Used below in the call to qsort. */
+ static int compare(const void *a, const void *b)
+ {
+ const temp_attr *const l = (const temp_attr *) a;
+ const temp_attr *const r = (const temp_attr *) b;
+
+ /* Reversed because we want a descending order sort below. */
+ return r->slots - l->slots;
+ }
+ } to_assign[32];
+ assert(max_index <= 32);
+
+ /* Temporary array for the set of attributes that have locations assigned,
+ * for the purpose of checking overlapping slots/components of (non-ES)
+ * fragment shader outputs.
+ */
+ ir_variable *assigned[12 * 4]; /* (max # of FS outputs) * # components */
+ unsigned assigned_attr = 0;
+
+ unsigned num_attr = 0;
+
+ foreach_in_list(ir_instruction, node, sh->ir) {
+ ir_variable *const var = node->as_variable();
+
+ if ((var == NULL) || (var->data.mode != (unsigned) direction))
+ continue;
+
+ if (var->data.explicit_location) {
+ var->data.is_unmatched_generic_inout = 0;
+ if ((var->data.location >= (int)(max_index + generic_base))
+ || (var->data.location < 0)) {
+ linker_error(prog,
+ "invalid explicit location %d specified for `%s'\n",
+ (var->data.location < 0)
+ ? var->data.location
+ : var->data.location - generic_base,
+ var->name);
+ return false;
+ }
+ } else if (target_index == MESA_SHADER_VERTEX) {
+ unsigned binding;
+
+ if (prog->AttributeBindings->get(binding, var->name)) {
+ assert(binding >= VERT_ATTRIB_GENERIC0);
+ var->data.location = binding;
+ var->data.is_unmatched_generic_inout = 0;
+ }
+ } else if (target_index == MESA_SHADER_FRAGMENT) {
+ unsigned binding;
+ unsigned index;
+ const char *name = var->name;
+ const glsl_type *type = var->type;
+
+ while (type) {
+ /* Check if there's a binding for the variable name */
+ if (prog->FragDataBindings->get(binding, name)) {
+ assert(binding >= FRAG_RESULT_DATA0);
+ var->data.location = binding;
+ var->data.is_unmatched_generic_inout = 0;
+
+ if (prog->FragDataIndexBindings->get(index, name)) {
+ var->data.index = index;
+ }
+ break;
+ }
+
+ /* If not, but it's an array type, look for name[0] */
+ if (type->is_array()) {
+ name = ralloc_asprintf(mem_ctx, "%s[0]", name);
+ type = type->fields.array;
+ continue;
+ }
+
+ break;
+ }
+ }
+
+ if (strcmp(var->name, "gl_LastFragData") == 0)
+ continue;
+
+ /* From GL4.5 core spec, section 15.2 (Shader Execution):
+ *
+ * "Output binding assignments will cause LinkProgram to fail:
+ * ...
+ * If the program has an active output assigned to a location greater
+ * than or equal to the value of MAX_DUAL_SOURCE_DRAW_BUFFERS and has
+ * an active output assigned an index greater than or equal to one;"
+ */
+ if (target_index == MESA_SHADER_FRAGMENT && var->data.index >= 1 &&
+ var->data.location - generic_base >=
+ (int) constants->MaxDualSourceDrawBuffers) {
+ linker_error(prog,
+ "output location %d >= GL_MAX_DUAL_SOURCE_DRAW_BUFFERS "
+ "with index %u for %s\n",
+ var->data.location - generic_base, var->data.index,
+ var->name);
+ return false;
+ }
+
+ const unsigned slots = var->type->count_attribute_slots(target_index == MESA_SHADER_VERTEX);
+
+ /* If the variable is not a built-in and has a location statically
+ * assigned in the shader (presumably via a layout qualifier), make sure
+ * that it doesn't collide with other assigned locations. Otherwise,
+ * add it to the list of variables that need linker-assigned locations.
+ */
+ if (var->data.location != -1) {
+ if (var->data.location >= generic_base && var->data.index < 1) {
+ /* From page 61 of the OpenGL 4.0 spec:
+ *
+ * "LinkProgram will fail if the attribute bindings assigned
+ * by BindAttribLocation do not leave not enough space to
+ * assign a location for an active matrix attribute or an
+ * active attribute array, both of which require multiple
+ * contiguous generic attributes."
+ *
+ * I think above text prohibits the aliasing of explicit and
+ * automatic assignments. But, aliasing is allowed in manual
+ * assignments of attribute locations. See below comments for
+ * the details.
+ *
+ * From OpenGL 4.0 spec, page 61:
+ *
+ * "It is possible for an application to bind more than one
+ * attribute name to the same location. This is referred to as
+ * aliasing. This will only work if only one of the aliased
+ * attributes is active in the executable program, or if no
+ * path through the shader consumes more than one attribute of
+ * a set of attributes aliased to the same location. A link
+ * error can occur if the linker determines that every path
+ * through the shader consumes multiple aliased attributes,
+ * but implementations are not required to generate an error
+ * in this case."
+ *
+ * From GLSL 4.30 spec, page 54:
+ *
+ * "A program will fail to link if any two non-vertex shader
+ * input variables are assigned to the same location. For
+ * vertex shaders, multiple input variables may be assigned
+ * to the same location using either layout qualifiers or via
+ * the OpenGL API. However, such aliasing is intended only to
+ * support vertex shaders where each execution path accesses
+ * at most one input per each location. Implementations are
+ * permitted, but not required, to generate link-time errors
+ * if they detect that every path through the vertex shader
+ * executable accesses multiple inputs assigned to any single
+ * location. For all shader types, a program will fail to link
+ * if explicit location assignments leave the linker unable
+ * to find space for other variables without explicit
+ * assignments."
+ *
+ * From OpenGL ES 3.0 spec, page 56:
+ *
+ * "Binding more than one attribute name to the same location
+ * is referred to as aliasing, and is not permitted in OpenGL
+ * ES Shading Language 3.00 vertex shaders. LinkProgram will
+ * fail when this condition exists. However, aliasing is
+ * possible in OpenGL ES Shading Language 1.00 vertex shaders.
+ * This will only work if only one of the aliased attributes
+ * is active in the executable program, or if no path through
+ * the shader consumes more than one attribute of a set of
+ * attributes aliased to the same location. A link error can
+ * occur if the linker determines that every path through the
+ * shader consumes multiple aliased attributes, but implemen-
+ * tations are not required to generate an error in this case."
+ *
+ * After looking at above references from OpenGL, OpenGL ES and
+ * GLSL specifications, we allow aliasing of vertex input variables
+ * in: OpenGL 2.0 (and above) and OpenGL ES 2.0.
+ *
+ * NOTE: This is not required by the spec but its worth mentioning
+ * here that we're not doing anything to make sure that no path
+ * through the vertex shader executable accesses multiple inputs
+ * assigned to any single location.
+ */
+
+ /* Mask representing the contiguous slots that will be used by
+ * this attribute.
+ */
+ const unsigned attr = var->data.location - generic_base;
+ const unsigned use_mask = (1 << slots) - 1;
+ const char *const string = (target_index == MESA_SHADER_VERTEX)
+ ? "vertex shader input" : "fragment shader output";
+
+ /* Generate a link error if the requested locations for this
+ * attribute exceed the maximum allowed attribute location.
+ */
+ if (attr + slots > max_index) {
+ linker_error(prog,
+ "insufficient contiguous locations "
+ "available for %s `%s' %d %d %d\n", string,
+ var->name, used_locations, use_mask, attr);
+ return false;
+ }
+
+ /* Generate a link error if the set of bits requested for this
+ * attribute overlaps any previously allocated bits.
+ */
+ if ((~(use_mask << attr) & used_locations) != used_locations) {
+ if (target_index == MESA_SHADER_FRAGMENT && !prog->IsES) {
+ /* From section 4.4.2 (Output Layout Qualifiers) of the GLSL
+ * 4.40 spec:
+ *
+ * "Additionally, for fragment shader outputs, if two
+ * variables are placed within the same location, they
+ * must have the same underlying type (floating-point or
+ * integer). No component aliasing of output variables or
+ * members is allowed.
+ */
+ for (unsigned i = 0; i < assigned_attr; i++) {
+ unsigned assigned_slots =
+ assigned[i]->type->count_attribute_slots(false);
+ unsigned assig_attr =
+ assigned[i]->data.location - generic_base;
+ unsigned assigned_use_mask = (1 << assigned_slots) - 1;
+
+ if ((assigned_use_mask << assig_attr) &
+ (use_mask << attr)) {
+
+ const glsl_type *assigned_type =
+ assigned[i]->type->without_array();
+ const glsl_type *type = var->type->without_array();
+ if (assigned_type->base_type != type->base_type) {
+ linker_error(prog, "types do not match for aliased"
+ " %ss %s and %s\n", string,
+ assigned[i]->name, var->name);
+ return false;
+ }
+
+ unsigned assigned_component_mask =
+ ((1 << assigned_type->vector_elements) - 1) <<
+ assigned[i]->data.location_frac;
+ unsigned component_mask =
+ ((1 << type->vector_elements) - 1) <<
+ var->data.location_frac;
+ if (assigned_component_mask & component_mask) {
+ linker_error(prog, "overlapping component is "
+ "assigned to %ss %s and %s "
+ "(component=%d)\n",
+ string, assigned[i]->name, var->name,
+ var->data.location_frac);
+ return false;
+ }
+ }
+ }
+ } else if (target_index == MESA_SHADER_FRAGMENT ||
+ (prog->IsES && prog->data->Version >= 300)) {
+ linker_error(prog, "overlapping location is assigned "
+ "to %s `%s' %d %d %d\n", string, var->name,
+ used_locations, use_mask, attr);
+ return false;
+ } else {
+ linker_warning(prog, "overlapping location is assigned "
+ "to %s `%s' %d %d %d\n", string, var->name,
+ used_locations, use_mask, attr);
+ }
+ }
+
+ if (target_index == MESA_SHADER_FRAGMENT && !prog->IsES) {
+ /* Only track assigned variables for non-ES fragment shaders
+ * to avoid overflowing the array.
+ *
+ * At most one variable per fragment output component should
+ * reach this.
+ */
+ assert(assigned_attr < ARRAY_SIZE(assigned));
+ assigned[assigned_attr] = var;
+ assigned_attr++;
+ }
+
+ used_locations |= (use_mask << attr);
+
+ /* From the GL 4.5 core spec, section 11.1.1 (Vertex Attributes):
+ *
+ * "A program with more than the value of MAX_VERTEX_ATTRIBS
+ * active attribute variables may fail to link, unless
+ * device-dependent optimizations are able to make the program
+ * fit within available hardware resources. For the purposes
+ * of this test, attribute variables of the type dvec3, dvec4,
+ * dmat2x3, dmat2x4, dmat3, dmat3x4, dmat4x3, and dmat4 may
+ * count as consuming twice as many attributes as equivalent
+ * single-precision types. While these types use the same number
+ * of generic attributes as their single-precision equivalents,
+ * implementations are permitted to consume two single-precision
+ * vectors of internal storage for each three- or four-component
+ * double-precision vector."
+ *
+ * Mark this attribute slot as taking up twice as much space
+ * so we can count it properly against limits. According to
+ * issue (3) of the GL_ARB_vertex_attrib_64bit behavior, this
+ * is optional behavior, but it seems preferable.
+ */
+ if (var->type->without_array()->is_dual_slot())
+ double_storage_locations |= (use_mask << attr);
+ }
+
+ continue;
+ }
+
+ if (num_attr >= max_index) {
+ linker_error(prog, "too many %s (max %u)",
+ target_index == MESA_SHADER_VERTEX ?
+ "vertex shader inputs" : "fragment shader outputs",
+ max_index);
+ return false;
+ }
+ to_assign[num_attr].slots = slots;
+ to_assign[num_attr].var = var;
+ num_attr++;
+ }
+
+ if (!do_assignment)
+ return true;
+
+ if (target_index == MESA_SHADER_VERTEX) {
+ unsigned total_attribs_size =
+ util_bitcount(used_locations & SAFE_MASK_FROM_INDEX(max_index)) +
+ util_bitcount(double_storage_locations);
+ if (total_attribs_size > max_index) {
+ linker_error(prog,
+ "attempt to use %d vertex attribute slots only %d available ",
+ total_attribs_size, max_index);
+ return false;
+ }
+ }
+
+ /* If all of the attributes were assigned locations by the application (or
+ * are built-in attributes with fixed locations), return early. This should
+ * be the common case.
+ */
+ if (num_attr == 0)
+ return true;
+
+ qsort(to_assign, num_attr, sizeof(to_assign[0]), temp_attr::compare);
+
+ if (target_index == MESA_SHADER_VERTEX) {
+ /* VERT_ATTRIB_GENERIC0 is a pseudo-alias for VERT_ATTRIB_POS. It can
+ * only be explicitly assigned by via glBindAttribLocation. Mark it as
+ * reserved to prevent it from being automatically allocated below.
+ */
+ find_deref_visitor find("gl_Vertex");
+ find.run(sh->ir);
+ if (find.variable_found())
+ used_locations |= (1 << 0);
+ }
+
+ for (unsigned i = 0; i < num_attr; i++) {
+ /* Mask representing the contiguous slots that will be used by this
+ * attribute.
+ */
+ const unsigned use_mask = (1 << to_assign[i].slots) - 1;
+
+ int location = find_available_slots(used_locations, to_assign[i].slots);
+
+ if (location < 0) {
+ const char *const string = (target_index == MESA_SHADER_VERTEX)
+ ? "vertex shader input" : "fragment shader output";
+
+ linker_error(prog,
+ "insufficient contiguous locations "
+ "available for %s `%s'\n",
+ string, to_assign[i].var->name);
+ return false;
+ }
+
+ to_assign[i].var->data.location = generic_base + location;
+ to_assign[i].var->data.is_unmatched_generic_inout = 0;
+ used_locations |= (use_mask << location);
+
+ if (to_assign[i].var->type->without_array()->is_dual_slot())
+ double_storage_locations |= (use_mask << location);
+ }
+
+ /* Now that we have all the locations, from the GL 4.5 core spec, section
+ * 11.1.1 (Vertex Attributes), dvec3, dvec4, dmat2x3, dmat2x4, dmat3,
+ * dmat3x4, dmat4x3, and dmat4 count as consuming twice as many attributes
+ * as equivalent single-precision types.
+ */
+ if (target_index == MESA_SHADER_VERTEX) {
+ unsigned total_attribs_size =
+ util_bitcount(used_locations & SAFE_MASK_FROM_INDEX(max_index)) +
+ util_bitcount(double_storage_locations);
+ if (total_attribs_size > max_index) {
+ linker_error(prog,
+ "attempt to use %d vertex attribute slots only %d available ",
+ total_attribs_size, max_index);
+ return false;
+ }
+ }
+
+ return true;
+}
+
+/**
+ * Match explicit locations of outputs to inputs and deactivate the
+ * unmatch flag if found so we don't optimise them away.
+ */
+static void
+match_explicit_outputs_to_inputs(gl_linked_shader *producer,
+ gl_linked_shader *consumer)
+{
+ glsl_symbol_table parameters;
+ ir_variable *explicit_locations[MAX_VARYINGS_INCL_PATCH][4] =
+ { {NULL, NULL} };
+
+ /* Find all shader outputs in the "producer" stage.
+ */
+ foreach_in_list(ir_instruction, node, producer->ir) {
+ ir_variable *const var = node->as_variable();
+
+ if ((var == NULL) || (var->data.mode != ir_var_shader_out))
+ continue;
+
+ if (var->data.explicit_location &&
+ var->data.location >= VARYING_SLOT_VAR0) {
+ const unsigned idx = var->data.location - VARYING_SLOT_VAR0;
+ if (explicit_locations[idx][var->data.location_frac] == NULL)
+ explicit_locations[idx][var->data.location_frac] = var;
+
+ /* Always match TCS outputs. They are shared by all invocations
+ * within a patch and can be used as shared memory.
+ */
+ if (producer->Stage == MESA_SHADER_TESS_CTRL)
+ var->data.is_unmatched_generic_inout = 0;
+ }
+ }
+
+ /* Match inputs to outputs */
+ foreach_in_list(ir_instruction, node, consumer->ir) {
+ ir_variable *const input = node->as_variable();
+
+ if ((input == NULL) || (input->data.mode != ir_var_shader_in))
+ continue;
+
+ ir_variable *output = NULL;
+ if (input->data.explicit_location
+ && input->data.location >= VARYING_SLOT_VAR0) {
+ output = explicit_locations[input->data.location - VARYING_SLOT_VAR0]
+ [input->data.location_frac];
+
+ if (output != NULL){
+ input->data.is_unmatched_generic_inout = 0;
+ output->data.is_unmatched_generic_inout = 0;
+ }
+ }
+ }
+}
+
+/**
+ * Store the gl_FragDepth layout in the gl_shader_program struct.
+ */
+static void
+store_fragdepth_layout(struct gl_shader_program *prog)
+{
+ if (prog->_LinkedShaders[MESA_SHADER_FRAGMENT] == NULL) {
+ return;
+ }
+
+ struct exec_list *ir = prog->_LinkedShaders[MESA_SHADER_FRAGMENT]->ir;
+
+ /* We don't look up the gl_FragDepth symbol directly because if
+ * gl_FragDepth is not used in the shader, it's removed from the IR.
+ * However, the symbol won't be removed from the symbol table.
+ *
+ * We're only interested in the cases where the variable is NOT removed
+ * from the IR.
+ */
+ foreach_in_list(ir_instruction, node, ir) {
+ ir_variable *const var = node->as_variable();
+
+ if (var == NULL || var->data.mode != ir_var_shader_out) {
+ continue;
+ }
+
+ if (strcmp(var->name, "gl_FragDepth") == 0) {
+ switch (var->data.depth_layout) {
+ case ir_depth_layout_none:
+ prog->FragDepthLayout = FRAG_DEPTH_LAYOUT_NONE;
+ return;
+ case ir_depth_layout_any:
+ prog->FragDepthLayout = FRAG_DEPTH_LAYOUT_ANY;
+ return;
+ case ir_depth_layout_greater:
+ prog->FragDepthLayout = FRAG_DEPTH_LAYOUT_GREATER;
+ return;
+ case ir_depth_layout_less:
+ prog->FragDepthLayout = FRAG_DEPTH_LAYOUT_LESS;
+ return;
+ case ir_depth_layout_unchanged:
+ prog->FragDepthLayout = FRAG_DEPTH_LAYOUT_UNCHANGED;
+ return;
+ default:
+ assert(0);
+ return;
+ }
+ }
+ }
+}
+
+/**
+ * Validate shader image resources.
+ */
+static void
+check_image_resources(struct gl_context *ctx, struct gl_shader_program *prog)
+{
+ unsigned total_image_units = 0;
+ unsigned fragment_outputs = 0;
+ unsigned total_shader_storage_blocks = 0;
+
+ if (!ctx->Extensions.ARB_shader_image_load_store)
+ return;
+
+ for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
+ struct gl_linked_shader *sh = prog->_LinkedShaders[i];
+
+ if (sh) {
+ total_image_units += sh->Program->info.num_images;
+ total_shader_storage_blocks += sh->Program->info.num_ssbos;
+
+ if (i == MESA_SHADER_FRAGMENT) {
+ foreach_in_list(ir_instruction, node, sh->ir) {
+ ir_variable *var = node->as_variable();
+ if (var && var->data.mode == ir_var_shader_out)
+ /* since there are no double fs outputs - pass false */
+ fragment_outputs += var->type->count_attribute_slots(false);
+ }
+ }
+ }
+ }
+
+ if (total_image_units > ctx->Const.MaxCombinedImageUniforms)
+ linker_error(prog, "Too many combined image uniforms\n");
+
+ if (total_image_units + fragment_outputs + total_shader_storage_blocks >
+ ctx->Const.MaxCombinedShaderOutputResources)
+ linker_error(prog, "Too many combined image uniforms, shader storage "
+ " buffers and fragment outputs\n");
+}
+
+
+/**
+ * Initializes explicit location slots to INACTIVE_UNIFORM_EXPLICIT_LOCATION
+ * for a variable, checks for overlaps between other uniforms using explicit
+ * locations.
+ */
+static int
+reserve_explicit_locations(struct gl_shader_program *prog,
+ string_to_uint_map *map, ir_variable *var)
+{
+ unsigned slots = var->type->uniform_locations();
+ unsigned max_loc = var->data.location + slots - 1;
+ unsigned return_value = slots;
+
+ /* Resize remap table if locations do not fit in the current one. */
+ if (max_loc + 1 > prog->NumUniformRemapTable) {
+ prog->UniformRemapTable =
+ reralloc(prog, prog->UniformRemapTable,
+ gl_uniform_storage *,
+ max_loc + 1);
+
+ if (!prog->UniformRemapTable) {
+ linker_error(prog, "Out of memory during linking.\n");
+ return -1;
+ }
+
+ /* Initialize allocated space. */
+ for (unsigned i = prog->NumUniformRemapTable; i < max_loc + 1; i++)
+ prog->UniformRemapTable[i] = NULL;
+
+ prog->NumUniformRemapTable = max_loc + 1;
+ }
+
+ for (unsigned i = 0; i < slots; i++) {
+ unsigned loc = var->data.location + i;
+
+ /* Check if location is already used. */
+ if (prog->UniformRemapTable[loc] == INACTIVE_UNIFORM_EXPLICIT_LOCATION) {
+
+ /* Possibly same uniform from a different stage, this is ok. */
+ unsigned hash_loc;
+ if (map->get(hash_loc, var->name) && hash_loc == loc - i) {
+ return_value = 0;
+ continue;
+ }
+
+ /* ARB_explicit_uniform_location specification states:
+ *
+ * "No two default-block uniform variables in the program can have
+ * the same location, even if they are unused, otherwise a compiler
+ * or linker error will be generated."
+ */
+ linker_error(prog,
+ "location qualifier for uniform %s overlaps "
+ "previously used location\n",
+ var->name);
+ return -1;
+ }
+
+ /* Initialize location as inactive before optimization
+ * rounds and location assignment.
+ */
+ prog->UniformRemapTable[loc] = INACTIVE_UNIFORM_EXPLICIT_LOCATION;
+ }
+
+ /* Note, base location used for arrays. */
+ map->put(var->data.location, var->name);
+
+ return return_value;
+}
+
+static bool
+reserve_subroutine_explicit_locations(struct gl_shader_program *prog,
+ struct gl_program *p,
+ ir_variable *var)
+{
+ unsigned slots = var->type->uniform_locations();
+ unsigned max_loc = var->data.location + slots - 1;
+
+ /* Resize remap table if locations do not fit in the current one. */
+ if (max_loc + 1 > p->sh.NumSubroutineUniformRemapTable) {
+ p->sh.SubroutineUniformRemapTable =
+ reralloc(p, p->sh.SubroutineUniformRemapTable,
+ gl_uniform_storage *,
+ max_loc + 1);
+
+ if (!p->sh.SubroutineUniformRemapTable) {
+ linker_error(prog, "Out of memory during linking.\n");
+ return false;
+ }
+
+ /* Initialize allocated space. */
+ for (unsigned i = p->sh.NumSubroutineUniformRemapTable; i < max_loc + 1; i++)
+ p->sh.SubroutineUniformRemapTable[i] = NULL;
+
+ p->sh.NumSubroutineUniformRemapTable = max_loc + 1;
+ }
+
+ for (unsigned i = 0; i < slots; i++) {
+ unsigned loc = var->data.location + i;
+
+ /* Check if location is already used. */
+ if (p->sh.SubroutineUniformRemapTable[loc] == INACTIVE_UNIFORM_EXPLICIT_LOCATION) {
+
+ /* ARB_explicit_uniform_location specification states:
+ * "No two subroutine uniform variables can have the same location
+ * in the same shader stage, otherwise a compiler or linker error
+ * will be generated."
+ */
+ linker_error(prog,
+ "location qualifier for uniform %s overlaps "
+ "previously used location\n",
+ var->name);
+ return false;
+ }
+
+ /* Initialize location as inactive before optimization
+ * rounds and location assignment.
+ */
+ p->sh.SubroutineUniformRemapTable[loc] = INACTIVE_UNIFORM_EXPLICIT_LOCATION;
+ }
+
+ return true;
+}
+/**
+ * Check and reserve all explicit uniform locations, called before
+ * any optimizations happen to handle also inactive uniforms and
+ * inactive array elements that may get trimmed away.
+ */
+static void
+check_explicit_uniform_locations(struct gl_context *ctx,
+ struct gl_shader_program *prog)
+{
+ prog->NumExplicitUniformLocations = 0;
+
+ if (!ctx->Extensions.ARB_explicit_uniform_location)
+ return;
+
+ /* This map is used to detect if overlapping explicit locations
+ * occur with the same uniform (from different stage) or a different one.
+ */
+ string_to_uint_map *uniform_map = new string_to_uint_map;
+
+ if (!uniform_map) {
+ linker_error(prog, "Out of memory during linking.\n");
+ return;
+ }
+
+ unsigned entries_total = 0;
+ unsigned mask = prog->data->linked_stages;
+ while (mask) {
+ const int i = u_bit_scan(&mask);
+ struct gl_program *p = prog->_LinkedShaders[i]->Program;
+
+ foreach_in_list(ir_instruction, node, prog->_LinkedShaders[i]->ir) {
+ ir_variable *var = node->as_variable();
+ if (!var || var->data.mode != ir_var_uniform)
+ continue;
+
+ if (var->data.explicit_location) {
+ bool ret = false;
+ if (var->type->without_array()->is_subroutine())
+ ret = reserve_subroutine_explicit_locations(prog, p, var);
+ else {
+ int slots = reserve_explicit_locations(prog, uniform_map,
+ var);
+ if (slots != -1) {
+ ret = true;
+ entries_total += slots;
+ }
+ }
+ if (!ret) {
+ delete uniform_map;
+ return;
+ }
+ }
+ }
+ }
+
+ link_util_update_empty_uniform_locations(prog);
+
+ delete uniform_map;
+ prog->NumExplicitUniformLocations = entries_total;
+}
+
+/* Function checks if a variable var is a packed varying and
+ * if given name is part of packed varying's list.
+ *
+ * If a variable is a packed varying, it has a name like
+ * 'packed:a,b,c' where a, b and c are separate variables.
+ */
+static bool
+included_in_packed_varying(ir_variable *var, const char *name)
+{
+ if (strncmp(var->name, "packed:", 7) != 0)
+ return false;
+
+ char *list = strdup(var->name + 7);
+ assert(list);
+
+ bool found = false;
+ char *saveptr;
+ char *token = strtok_r(list, ",", &saveptr);
+ while (token) {
+ if (strcmp(token, name) == 0) {
+ found = true;
+ break;
+ }
+ token = strtok_r(NULL, ",", &saveptr);
+ }
+ free(list);
+ return found;
+}
+
+/**
+ * Function builds a stage reference bitmask from variable name.
+ */
+static uint8_t
+build_stageref(struct gl_shader_program *shProg, const char *name,
+ unsigned mode)
+{
+ uint8_t stages = 0;
+
+ /* Note, that we assume MAX 8 stages, if there will be more stages, type
+ * used for reference mask in gl_program_resource will need to be changed.
+ */
+ assert(MESA_SHADER_STAGES < 8);
+
+ for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
+ struct gl_linked_shader *sh = shProg->_LinkedShaders[i];
+ if (!sh)
+ continue;
+
+ /* Shader symbol table may contain variables that have
+ * been optimized away. Search IR for the variable instead.
+ */
+ foreach_in_list(ir_instruction, node, sh->ir) {
+ ir_variable *var = node->as_variable();
+ if (var) {
+ unsigned baselen = strlen(var->name);
+
+ if (included_in_packed_varying(var, name)) {
+ stages |= (1 << i);
+ break;
+ }
+
+ /* Type needs to match if specified, otherwise we might
+ * pick a variable with same name but different interface.
+ */
+ if (var->data.mode != mode)
+ continue;
+
+ if (strncmp(var->name, name, baselen) == 0) {
+ /* Check for exact name matches but also check for arrays and
+ * structs.
+ */
+ if (name[baselen] == '\0' ||
+ name[baselen] == '[' ||
+ name[baselen] == '.') {
+ stages |= (1 << i);
+ break;
+ }
+ }
+ }
+ }
+ }
+ return stages;
+}
+
+/**
+ * Create gl_shader_variable from ir_variable class.
+ */
+static gl_shader_variable *
+create_shader_variable(struct gl_shader_program *shProg,
+ const ir_variable *in,
+ const char *name, const glsl_type *type,
+ const glsl_type *interface_type,
+ bool use_implicit_location, int location,
+ const glsl_type *outermost_struct_type)
+{
+ /* Allocate zero-initialized memory to ensure that bitfield padding
+ * is zero.
+ */
+ gl_shader_variable *out = rzalloc(shProg, struct gl_shader_variable);
+ if (!out)
+ return NULL;
+
+ /* Since gl_VertexID may be lowered to gl_VertexIDMESA, but applications
+ * expect to see gl_VertexID in the program resource list. Pretend.
+ */
+ if (in->data.mode == ir_var_system_value &&
+ in->data.location == SYSTEM_VALUE_VERTEX_ID_ZERO_BASE) {
+ out->name = ralloc_strdup(shProg, "gl_VertexID");
+ } else if ((in->data.mode == ir_var_shader_out &&
+ in->data.location == VARYING_SLOT_TESS_LEVEL_OUTER) ||
+ (in->data.mode == ir_var_system_value &&
+ in->data.location == SYSTEM_VALUE_TESS_LEVEL_OUTER)) {
+ out->name = ralloc_strdup(shProg, "gl_TessLevelOuter");
+ type = glsl_type::get_array_instance(glsl_type::float_type, 4);
+ } else if ((in->data.mode == ir_var_shader_out &&
+ in->data.location == VARYING_SLOT_TESS_LEVEL_INNER) ||
+ (in->data.mode == ir_var_system_value &&
+ in->data.location == SYSTEM_VALUE_TESS_LEVEL_INNER)) {
+ out->name = ralloc_strdup(shProg, "gl_TessLevelInner");
+ type = glsl_type::get_array_instance(glsl_type::float_type, 2);
+ } else {
+ out->name = ralloc_strdup(shProg, name);
+ }
+
+ if (!out->name)
+ return NULL;
+
+ /* The ARB_program_interface_query spec says:
+ *
+ * "Not all active variables are assigned valid locations; the
+ * following variables will have an effective location of -1:
+ *
+ * * uniforms declared as atomic counters;
+ *
+ * * members of a uniform block;
+ *
+ * * built-in inputs, outputs, and uniforms (starting with "gl_"); and
+ *
+ * * inputs or outputs not declared with a "location" layout
+ * qualifier, except for vertex shader inputs and fragment shader
+ * outputs."
+ */
+ if (in->type->is_atomic_uint() || is_gl_identifier(in->name) ||
+ !(in->data.explicit_location || use_implicit_location)) {
+ out->location = -1;
+ } else {
+ out->location = location;
+ }
+
+ out->type = type;
+ out->outermost_struct_type = outermost_struct_type;
+ out->interface_type = interface_type;
+ out->component = in->data.location_frac;
+ out->index = in->data.index;
+ out->patch = in->data.patch;
+ out->mode = in->data.mode;
+ out->interpolation = in->data.interpolation;
+ out->explicit_location = in->data.explicit_location;
+ out->precision = in->data.precision;
+
+ return out;
+}
+
+static bool
+add_shader_variable(const struct gl_context *ctx,
+ struct gl_shader_program *shProg,
+ struct set *resource_set,
+ unsigned stage_mask,
+ GLenum programInterface, ir_variable *var,
+ const char *name, const glsl_type *type,
+ bool use_implicit_location, int location,
+ bool inouts_share_location,
+ const glsl_type *outermost_struct_type = NULL)
+{
+ const glsl_type *interface_type = var->get_interface_type();
+
+ if (outermost_struct_type == NULL) {
+ if (var->data.from_named_ifc_block) {
+ const char *interface_name = interface_type->name;
+
+ if (interface_type->is_array()) {
+ /* Issue #16 of the ARB_program_interface_query spec says:
+ *
+ * "* If a variable is a member of an interface block without an
+ * instance name, it is enumerated using just the variable name.
+ *
+ * * If a variable is a member of an interface block with an
+ * instance name, it is enumerated as "BlockName.Member", where
+ * "BlockName" is the name of the interface block (not the
+ * instance name) and "Member" is the name of the variable."
+ *
+ * In particular, it indicates that it should be "BlockName",
+ * not "BlockName[array length]". The conformance suite and
+ * dEQP both require this behavior.
+ *
+ * Here, we unwrap the extra array level added by named interface
+ * block array lowering so we have the correct variable type. We
+ * also unwrap the interface type when constructing the name.
+ *
+ * We leave interface_type the same so that ES 3.x SSO pipeline
+ * validation can enforce the rules requiring array length to
+ * match on interface blocks.
+ */
+ type = type->fields.array;
+
+ interface_name = interface_type->fields.array->name;
+ }
+
+ name = ralloc_asprintf(shProg, "%s.%s", interface_name, name);
+ }
+ }
+
+ switch (type->base_type) {
+ case GLSL_TYPE_STRUCT: {
+ /* The ARB_program_interface_query spec says:
+ *
+ * "For an active variable declared as a structure, a separate entry
+ * will be generated for each active structure member. The name of
+ * each entry is formed by concatenating the name of the structure,
+ * the "." character, and the name of the structure member. If a
+ * structure member to enumerate is itself a structure or array,
+ * these enumeration rules are applied recursively."
+ */
+ if (outermost_struct_type == NULL)
+ outermost_struct_type = type;
+
+ unsigned field_location = location;
+ for (unsigned i = 0; i < type->length; i++) {
+ const struct glsl_struct_field *field = &type->fields.structure[i];
+ char *field_name = ralloc_asprintf(shProg, "%s.%s", name, field->name);
+ if (!add_shader_variable(ctx, shProg, resource_set,
+ stage_mask, programInterface,
+ var, field_name, field->type,
+ use_implicit_location, field_location,
+ false, outermost_struct_type))
+ return false;
+
+ field_location += field->type->count_attribute_slots(false);
+ }
+ return true;
+ }
+
+ case GLSL_TYPE_ARRAY: {
+ /* The ARB_program_interface_query spec says:
+ *
+ * "For an active variable declared as an array of basic types, a
+ * single entry will be generated, with its name string formed by
+ * concatenating the name of the array and the string "[0]"."
+ *
+ * "For an active variable declared as an array of an aggregate data
+ * type (structures or arrays), a separate entry will be generated
+ * for each active array element, unless noted immediately below.
+ * The name of each entry is formed by concatenating the name of
+ * the array, the "[" character, an integer identifying the element
+ * number, and the "]" character. These enumeration rules are
+ * applied recursively, treating each enumerated array element as a
+ * separate active variable."
+ */
+ const struct glsl_type *array_type = type->fields.array;
+ if (array_type->base_type == GLSL_TYPE_STRUCT ||
+ array_type->base_type == GLSL_TYPE_ARRAY) {
+ unsigned elem_location = location;
+ unsigned stride = inouts_share_location ? 0 :
+ array_type->count_attribute_slots(false);
+ for (unsigned i = 0; i < type->length; i++) {
+ char *elem = ralloc_asprintf(shProg, "%s[%d]", name, i);
+ if (!add_shader_variable(ctx, shProg, resource_set,
+ stage_mask, programInterface,
+ var, elem, array_type,
+ use_implicit_location, elem_location,
+ false, outermost_struct_type))
+ return false;
+ elem_location += stride;
+ }
+ return true;
+ }
+ /* fallthrough */
+ }
+
+ default: {
+ /* The ARB_program_interface_query spec says:
+ *
+ * "For an active variable declared as a single instance of a basic
+ * type, a single entry will be generated, using the variable name
+ * from the shader source."
+ */
+ gl_shader_variable *sha_v =
+ create_shader_variable(shProg, var, name, type, interface_type,
+ use_implicit_location, location,
+ outermost_struct_type);
+ if (!sha_v)
+ return false;
+
+ return link_util_add_program_resource(shProg, resource_set,
+ programInterface, sha_v, stage_mask);
+ }
+ }
+}
+
+static bool
+inout_has_same_location(const ir_variable *var, unsigned stage)
+{
+ if (!var->data.patch &&
+ ((var->data.mode == ir_var_shader_out &&
+ stage == MESA_SHADER_TESS_CTRL) ||
+ (var->data.mode == ir_var_shader_in &&
+ (stage == MESA_SHADER_TESS_CTRL || stage == MESA_SHADER_TESS_EVAL ||
+ stage == MESA_SHADER_GEOMETRY))))
+ return true;
+ else
+ return false;
+}
+
+static bool
+add_interface_variables(const struct gl_context *ctx,
+ struct gl_shader_program *shProg,
+ struct set *resource_set,
+ unsigned stage, GLenum programInterface)
+{
+ exec_list *ir = shProg->_LinkedShaders[stage]->ir;
+
+ foreach_in_list(ir_instruction, node, ir) {
+ ir_variable *var = node->as_variable();
+
+ if (!var || var->data.how_declared == ir_var_hidden)
+ continue;
+
+ int loc_bias;
+
+ switch (var->data.mode) {
+ case ir_var_system_value:
+ case ir_var_shader_in:
+ if (programInterface != GL_PROGRAM_INPUT)
+ continue;
+ loc_bias = (stage == MESA_SHADER_VERTEX) ? int(VERT_ATTRIB_GENERIC0)
+ : int(VARYING_SLOT_VAR0);
+ break;
+ case ir_var_shader_out:
+ if (programInterface != GL_PROGRAM_OUTPUT)
+ continue;
+ loc_bias = (stage == MESA_SHADER_FRAGMENT) ? int(FRAG_RESULT_DATA0)
+ : int(VARYING_SLOT_VAR0);
+ break;
+ default:
+ continue;
+ };
+
+ if (var->data.patch)
+ loc_bias = int(VARYING_SLOT_PATCH0);
+
+ /* Skip packed varyings, packed varyings are handled separately
+ * by add_packed_varyings.
+ */
+ if (strncmp(var->name, "packed:", 7) == 0)
+ continue;
+
+ /* Skip fragdata arrays, these are handled separately
+ * by add_fragdata_arrays.
+ */
+ if (strncmp(var->name, "gl_out_FragData", 15) == 0)
+ continue;
+
+ const bool vs_input_or_fs_output =
+ (stage == MESA_SHADER_VERTEX && var->data.mode == ir_var_shader_in) ||
+ (stage == MESA_SHADER_FRAGMENT && var->data.mode == ir_var_shader_out);
+
+ if (!add_shader_variable(ctx, shProg, resource_set,
+ 1 << stage, programInterface,
+ var, var->name, var->type, vs_input_or_fs_output,
+ var->data.location - loc_bias,
+ inout_has_same_location(var, stage)))
+ return false;
+ }
+ return true;
+}
+
+static bool
+add_packed_varyings(const struct gl_context *ctx,
+ struct gl_shader_program *shProg,
+ struct set *resource_set,
+ int stage, GLenum type)
+{
+ struct gl_linked_shader *sh = shProg->_LinkedShaders[stage];
+ GLenum iface;
+
+ if (!sh || !sh->packed_varyings)
+ return true;
+
+ foreach_in_list(ir_instruction, node, sh->packed_varyings) {
+ ir_variable *var = node->as_variable();
+ if (var) {
+ switch (var->data.mode) {
+ case ir_var_shader_in:
+ iface = GL_PROGRAM_INPUT;
+ break;
+ case ir_var_shader_out:
+ iface = GL_PROGRAM_OUTPUT;
+ break;
+ default:
+ unreachable("unexpected type");
+ }
+
+ if (type == iface) {
+ const int stage_mask =
+ build_stageref(shProg, var->name, var->data.mode);
+ if (!add_shader_variable(ctx, shProg, resource_set,
+ stage_mask,
+ iface, var, var->name, var->type, false,
+ var->data.location - VARYING_SLOT_VAR0,
+ inout_has_same_location(var, stage)))
+ return false;
+ }
+ }
+ }
+ return true;
+}
+
+static bool
+add_fragdata_arrays(const struct gl_context *ctx,
+ struct gl_shader_program *shProg,
+ struct set *resource_set)
+{
+ struct gl_linked_shader *sh = shProg->_LinkedShaders[MESA_SHADER_FRAGMENT];
+
+ if (!sh || !sh->fragdata_arrays)
+ return true;
+
+ foreach_in_list(ir_instruction, node, sh->fragdata_arrays) {
+ ir_variable *var = node->as_variable();
+ if (var) {
+ assert(var->data.mode == ir_var_shader_out);
+
+ if (!add_shader_variable(ctx, shProg, resource_set,
+ 1 << MESA_SHADER_FRAGMENT,
+ GL_PROGRAM_OUTPUT, var, var->name, var->type,
+ true, var->data.location - FRAG_RESULT_DATA0,
+ false))
+ return false;
+ }
+ }
+ return true;
+}
+
+/**
+ * Builds up a list of program resources that point to existing
+ * resource data.
+ */
+void
+build_program_resource_list(struct gl_context *ctx,
+ struct gl_shader_program *shProg,
+ bool add_packed_varyings_only)
+{
+ /* Rebuild resource list. */
+ if (shProg->data->ProgramResourceList) {
+ ralloc_free(shProg->data->ProgramResourceList);
+ shProg->data->ProgramResourceList = NULL;
+ shProg->data->NumProgramResourceList = 0;
+ }
+
+ int input_stage = MESA_SHADER_STAGES, output_stage = 0;
+
+ /* Determine first input and final output stage. These are used to
+ * detect which variables should be enumerated in the resource list
+ * for GL_PROGRAM_INPUT and GL_PROGRAM_OUTPUT.
+ */
+ for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
+ if (!shProg->_LinkedShaders[i])
+ continue;
+ if (input_stage == MESA_SHADER_STAGES)
+ input_stage = i;
+ output_stage = i;
+ }
+
+ /* Empty shader, no resources. */
+ if (input_stage == MESA_SHADER_STAGES && output_stage == 0)
+ return;
+
+ struct set *resource_set = _mesa_pointer_set_create(NULL);
+
+ /* Program interface needs to expose varyings in case of SSO. */
+ if (shProg->SeparateShader) {
+ if (!add_packed_varyings(ctx, shProg, resource_set,
+ input_stage, GL_PROGRAM_INPUT))
+ return;
+
+ if (!add_packed_varyings(ctx, shProg, resource_set,
+ output_stage, GL_PROGRAM_OUTPUT))
+ return;
+ }
+
+ if (add_packed_varyings_only) {
+ _mesa_set_destroy(resource_set, NULL);
+ return;
+ }
+
+ if (!add_fragdata_arrays(ctx, shProg, resource_set))
+ return;
+
+ /* Add inputs and outputs to the resource list. */
+ if (!add_interface_variables(ctx, shProg, resource_set,
+ input_stage, GL_PROGRAM_INPUT))
+ return;
+
+ if (!add_interface_variables(ctx, shProg, resource_set,
+ output_stage, GL_PROGRAM_OUTPUT))
+ return;
+
+ if (shProg->last_vert_prog) {
+ struct gl_transform_feedback_info *linked_xfb =
+ shProg->last_vert_prog->sh.LinkedTransformFeedback;
+
+ /* Add transform feedback varyings. */
+ if (linked_xfb->NumVarying > 0) {
+ for (int i = 0; i < linked_xfb->NumVarying; i++) {
+ if (!link_util_add_program_resource(shProg, resource_set,
+ GL_TRANSFORM_FEEDBACK_VARYING,
+ &linked_xfb->Varyings[i], 0))
+ return;
+ }
+ }
+
+ /* Add transform feedback buffers. */
+ for (unsigned i = 0; i < ctx->Const.MaxTransformFeedbackBuffers; i++) {
+ if ((linked_xfb->ActiveBuffers >> i) & 1) {
+ linked_xfb->Buffers[i].Binding = i;
+ if (!link_util_add_program_resource(shProg, resource_set,
+ GL_TRANSFORM_FEEDBACK_BUFFER,
+ &linked_xfb->Buffers[i], 0))
+ return;
+ }
+ }
+ }
+
+ int top_level_array_base_offset = -1;
+ int top_level_array_size_in_bytes = -1;
+ int second_element_offset = -1;
+ int buffer_block_index = -1;
+
+ /* Add uniforms from uniform storage. */
+ for (unsigned i = 0; i < shProg->data->NumUniformStorage; i++) {
+ /* Do not add uniforms internally used by Mesa. */
+ if (shProg->data->UniformStorage[i].hidden)
+ continue;
+
+ bool is_shader_storage =
+ shProg->data->UniformStorage[i].is_shader_storage;
+ GLenum type = is_shader_storage ? GL_BUFFER_VARIABLE : GL_UNIFORM;
+ if (!link_util_should_add_buffer_variable(shProg,
+ &shProg->data->UniformStorage[i],
+ top_level_array_base_offset,
+ top_level_array_size_in_bytes,
+ second_element_offset,
+ buffer_block_index))
+ continue;
+
+ if (is_shader_storage) {
+ /* From the OpenGL 4.6 specification, 7.3.1.1 Naming Active Resources:
+ *
+ * "For an active shader storage block member declared as an array
+ * of an aggregate type, an entry will be generated only for the
+ * first array element, regardless of its type. Such block members
+ * are referred to as top-level arrays. If the block member is an
+ * aggregate type, the enumeration rules are then applied
+ * recursively."
+ *
+ * Below we update our tracking values used by
+ * link_util_should_add_buffer_variable(). We only want to reset the
+ * offsets once we have moved past the first element.
+ */
+ if (shProg->data->UniformStorage[i].offset >= second_element_offset) {
+ top_level_array_base_offset =
+ shProg->data->UniformStorage[i].offset;
+
+ top_level_array_size_in_bytes =
+ shProg->data->UniformStorage[i].top_level_array_size *
+ shProg->data->UniformStorage[i].top_level_array_stride;
+
+ /* Set or reset the second element offset. For non arrays this
+ * will be set to -1.
+ */
+ second_element_offset = top_level_array_size_in_bytes ?
+ top_level_array_base_offset +
+ shProg->data->UniformStorage[i].top_level_array_stride : -1;
+ }
+
+ buffer_block_index = shProg->data->UniformStorage[i].block_index;
+ }
+
+ uint8_t stageref = shProg->data->UniformStorage[i].active_shader_mask;
+ if (!link_util_add_program_resource(shProg, resource_set, type,
+ &shProg->data->UniformStorage[i], stageref))
+ return;
+ }
+
+ /* Add program uniform blocks. */
+ for (unsigned i = 0; i < shProg->data->NumUniformBlocks; i++) {
+ if (!link_util_add_program_resource(shProg, resource_set, GL_UNIFORM_BLOCK,
+ &shProg->data->UniformBlocks[i], 0))
+ return;
+ }
+
+ /* Add program shader storage blocks. */
+ for (unsigned i = 0; i < shProg->data->NumShaderStorageBlocks; i++) {
+ if (!link_util_add_program_resource(shProg, resource_set, GL_SHADER_STORAGE_BLOCK,
+ &shProg->data->ShaderStorageBlocks[i], 0))
+ return;
+ }
+
+ /* Add atomic counter buffers. */
+ for (unsigned i = 0; i < shProg->data->NumAtomicBuffers; i++) {
+ if (!link_util_add_program_resource(shProg, resource_set, GL_ATOMIC_COUNTER_BUFFER,
+ &shProg->data->AtomicBuffers[i], 0))
+ return;
+ }
+
+ for (unsigned i = 0; i < shProg->data->NumUniformStorage; i++) {
+ GLenum type;
+ if (!shProg->data->UniformStorage[i].hidden)
+ continue;
+
+ for (int j = MESA_SHADER_VERTEX; j < MESA_SHADER_STAGES; j++) {
+ if (!shProg->data->UniformStorage[i].opaque[j].active ||
+ !shProg->data->UniformStorage[i].type->is_subroutine())
+ continue;
+
+ type = _mesa_shader_stage_to_subroutine_uniform((gl_shader_stage)j);
+ /* add shader subroutines */
+ if (!link_util_add_program_resource(shProg, resource_set,
+ type, &shProg->data->UniformStorage[i], 0))
+ return;
+ }
+ }
+
+ unsigned mask = shProg->data->linked_stages;
+ while (mask) {
+ const int i = u_bit_scan(&mask);
+ struct gl_program *p = shProg->_LinkedShaders[i]->Program;
+
+ GLuint type = _mesa_shader_stage_to_subroutine((gl_shader_stage)i);
+ for (unsigned j = 0; j < p->sh.NumSubroutineFunctions; j++) {
+ if (!link_util_add_program_resource(shProg, resource_set,
+ type, &p->sh.SubroutineFunctions[j], 0))
+ return;
+ }
+ }
+
+ _mesa_set_destroy(resource_set, NULL);
+}
+
+/**
+ * This check is done to make sure we allow only constant expression
+ * indexing and "constant-index-expression" (indexing with an expression
+ * that includes loop induction variable).
+ */
+static bool
+validate_sampler_array_indexing(struct gl_context *ctx,
+ struct gl_shader_program *prog)
+{
+ dynamic_sampler_array_indexing_visitor v;
+ for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
+ if (prog->_LinkedShaders[i] == NULL)
+ continue;
+
+ bool no_dynamic_indexing =
+ ctx->Const.ShaderCompilerOptions[i].EmitNoIndirectSampler;
+
+ /* Search for array derefs in shader. */
+ v.run(prog->_LinkedShaders[i]->ir);
+ if (v.uses_dynamic_sampler_array_indexing()) {
+ const char *msg = "sampler arrays indexed with non-constant "
+ "expressions is forbidden in GLSL %s %u";
+ /* Backend has indicated that it has no dynamic indexing support. */
+ if (no_dynamic_indexing) {
+ linker_error(prog, msg, prog->IsES ? "ES" : "",
+ prog->data->Version);
+ return false;
+ } else {
+ linker_warning(prog, msg, prog->IsES ? "ES" : "",
+ prog->data->Version);
+ }
+ }
+ }
+ return true;
+}
+
+static void
+link_assign_subroutine_types(struct gl_shader_program *prog)
+{
+ unsigned mask = prog->data->linked_stages;
+ while (mask) {
+ const int i = u_bit_scan(&mask);
+ gl_program *p = prog->_LinkedShaders[i]->Program;
+
+ p->sh.MaxSubroutineFunctionIndex = 0;
+ foreach_in_list(ir_instruction, node, prog->_LinkedShaders[i]->ir) {
+ ir_function *fn = node->as_function();
+ if (!fn)
+ continue;
+
+ if (fn->is_subroutine)
+ p->sh.NumSubroutineUniformTypes++;
+
+ if (!fn->num_subroutine_types)
+ continue;
+
+ /* these should have been calculated earlier. */
+ assert(fn->subroutine_index != -1);
+ if (p->sh.NumSubroutineFunctions + 1 > MAX_SUBROUTINES) {
+ linker_error(prog, "Too many subroutine functions declared.\n");
+ return;
+ }
+ p->sh.SubroutineFunctions = reralloc(p, p->sh.SubroutineFunctions,
+ struct gl_subroutine_function,
+ p->sh.NumSubroutineFunctions + 1);
+ p->sh.SubroutineFunctions[p->sh.NumSubroutineFunctions].name = ralloc_strdup(p, fn->name);
+ p->sh.SubroutineFunctions[p->sh.NumSubroutineFunctions].num_compat_types = fn->num_subroutine_types;
+ p->sh.SubroutineFunctions[p->sh.NumSubroutineFunctions].types =
+ ralloc_array(p, const struct glsl_type *,
+ fn->num_subroutine_types);
+
+ /* From Section 4.4.4(Subroutine Function Layout Qualifiers) of the
+ * GLSL 4.5 spec:
+ *
+ * "Each subroutine with an index qualifier in the shader must be
+ * given a unique index, otherwise a compile or link error will be
+ * generated."
+ */
+ for (unsigned j = 0; j < p->sh.NumSubroutineFunctions; j++) {
+ if (p->sh.SubroutineFunctions[j].index != -1 &&
+ p->sh.SubroutineFunctions[j].index == fn->subroutine_index) {
+ linker_error(prog, "each subroutine index qualifier in the "
+ "shader must be unique\n");
+ return;
+ }
+ }
+ p->sh.SubroutineFunctions[p->sh.NumSubroutineFunctions].index =
+ fn->subroutine_index;
+
+ if (fn->subroutine_index > (int)p->sh.MaxSubroutineFunctionIndex)
+ p->sh.MaxSubroutineFunctionIndex = fn->subroutine_index;
+
+ for (int j = 0; j < fn->num_subroutine_types; j++)
+ p->sh.SubroutineFunctions[p->sh.NumSubroutineFunctions].types[j] = fn->subroutine_types[j];
+ p->sh.NumSubroutineFunctions++;
+ }
+ }
+}
+
+static void
+verify_subroutine_associated_funcs(struct gl_shader_program *prog)
+{
+ unsigned mask = prog->data->linked_stages;
+ while (mask) {
+ const int i = u_bit_scan(&mask);
+ gl_program *p = prog->_LinkedShaders[i]->Program;
+ glsl_symbol_table *symbols = prog->_LinkedShaders[i]->symbols;
+
+ /* Section 6.1.2 (Subroutines) of the GLSL 4.00 spec says:
+ *
+ * "A program will fail to compile or link if any shader
+ * or stage contains two or more functions with the same
+ * name if the name is associated with a subroutine type."
+ */
+ for (unsigned j = 0; j < p->sh.NumSubroutineFunctions; j++) {
+ unsigned definitions = 0;
+ char *name = p->sh.SubroutineFunctions[j].name;
+ ir_function *fn = symbols->get_function(name);
+
+ /* Calculate number of function definitions with the same name */
+ foreach_in_list(ir_function_signature, sig, &fn->signatures) {
+ if (sig->is_defined) {
+ if (++definitions > 1) {
+ linker_error(prog, "%s shader contains two or more function "
+ "definitions with name `%s', which is "
+ "associated with a subroutine type.\n",
+ _mesa_shader_stage_to_string(i),
+ fn->name);
+ return;
+ }
+ }
+ }
+ }
+ }
+}
+
+
+static void
+set_always_active_io(exec_list *ir, ir_variable_mode io_mode)
+{
+ assert(io_mode == ir_var_shader_in || io_mode == ir_var_shader_out);
+
+ foreach_in_list(ir_instruction, node, ir) {
+ ir_variable *const var = node->as_variable();
+
+ if (var == NULL || var->data.mode != io_mode)
+ continue;
+
+ /* Don't set always active on builtins that haven't been redeclared */
+ if (var->data.how_declared == ir_var_declared_implicitly)
+ continue;
+
+ var->data.always_active_io = true;
+ }
+}
+
+/**
+ * When separate shader programs are enabled, only input/outputs between
+ * the stages of a multi-stage separate program can be safely removed
+ * from the shader interface. Other inputs/outputs must remain active.
+ */
+static void
+disable_varying_optimizations_for_sso(struct gl_shader_program *prog)
+{
+ unsigned first, last;
+ assert(prog->SeparateShader);
+
+ first = MESA_SHADER_STAGES;
+ last = 0;
+
+ /* Determine first and last stage. Excluding the compute stage */
+ for (unsigned i = 0; i < MESA_SHADER_COMPUTE; i++) {
+ if (!prog->_LinkedShaders[i])
+ continue;
+ if (first == MESA_SHADER_STAGES)
+ first = i;
+ last = i;
+ }
+
+ if (first == MESA_SHADER_STAGES)
+ return;
+
+ for (unsigned stage = 0; stage < MESA_SHADER_STAGES; stage++) {
+ gl_linked_shader *sh = prog->_LinkedShaders[stage];
+ if (!sh)
+ continue;
+
+ /* Prevent the removal of inputs to the first and outputs from the last
+ * stage, unless they are the initial pipeline inputs or final pipeline
+ * outputs, respectively.
+ *
+ * The removal of IO between shaders in the same program is always
+ * allowed.
+ */
+ if (stage == first && stage != MESA_SHADER_VERTEX)
+ set_always_active_io(sh->ir, ir_var_shader_in);
+ if (stage == last && stage != MESA_SHADER_FRAGMENT)
+ set_always_active_io(sh->ir, ir_var_shader_out);
+ }
+}
+
+static void
+link_and_validate_uniforms(struct gl_context *ctx,
+ struct gl_shader_program *prog)
+{
+ update_array_sizes(prog);
+
+ if (!ctx->Const.UseNIRGLSLLinker) {
+ link_assign_uniform_locations(prog, ctx);
+
+ if (prog->data->LinkStatus == LINKING_FAILURE)
+ return;
+
+ link_util_calculate_subroutine_compat(prog);
+ link_util_check_uniform_resources(ctx, prog);
+ link_util_check_subroutine_resources(prog);
+ check_image_resources(ctx, prog);
+ link_assign_atomic_counter_resources(ctx, prog);
+ link_check_atomic_counter_resources(ctx, prog);
+ }
+}
+
+static bool
+link_varyings_and_uniforms(unsigned first, unsigned last,
+ struct gl_context *ctx,
+ struct gl_shader_program *prog, void *mem_ctx)
+{
+ /* Mark all generic shader inputs and outputs as unpaired. */
+ for (unsigned i = MESA_SHADER_VERTEX; i <= MESA_SHADER_FRAGMENT; i++) {
+ if (prog->_LinkedShaders[i] != NULL) {
+ link_invalidate_variable_locations(prog->_LinkedShaders[i]->ir);
+ }
+ }
+
+ unsigned prev = first;
+ for (unsigned i = prev + 1; i <= MESA_SHADER_FRAGMENT; i++) {
+ if (prog->_LinkedShaders[i] == NULL)
+ continue;
+
+ match_explicit_outputs_to_inputs(prog->_LinkedShaders[prev],
+ prog->_LinkedShaders[i]);
+ prev = i;
+ }
+
+ if (!assign_attribute_or_color_locations(mem_ctx, prog, &ctx->Const,
+ MESA_SHADER_VERTEX, true)) {
+ return false;
+ }
+
+ if (!assign_attribute_or_color_locations(mem_ctx, prog, &ctx->Const,
+ MESA_SHADER_FRAGMENT, true)) {
+ return false;
+ }
+
+ prog->last_vert_prog = NULL;
+ for (int i = MESA_SHADER_GEOMETRY; i >= MESA_SHADER_VERTEX; i--) {
+ if (prog->_LinkedShaders[i] == NULL)
+ continue;
+
+ prog->last_vert_prog = prog->_LinkedShaders[i]->Program;
+ break;
+ }
+
+ if (!link_varyings(prog, first, last, ctx, mem_ctx))
+ return false;
+
+ link_and_validate_uniforms(ctx, prog);
+
+ if (!prog->data->LinkStatus)
+ return false;
+
+ for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
+ if (prog->_LinkedShaders[i] == NULL)
+ continue;
+
+ const struct gl_shader_compiler_options *options =
+ &ctx->Const.ShaderCompilerOptions[i];
+
+ if (options->LowerBufferInterfaceBlocks)
+ lower_ubo_reference(prog->_LinkedShaders[i],
+ options->ClampBlockIndicesToArrayBounds,
+ ctx->Const.UseSTD430AsDefaultPacking);
+
+ if (i == MESA_SHADER_COMPUTE)
+ lower_shared_reference(ctx, prog, prog->_LinkedShaders[i]);
+
+ lower_vector_derefs(prog->_LinkedShaders[i]);
+ do_vec_index_to_swizzle(prog->_LinkedShaders[i]->ir);
+ }
+
+ return true;
+}
+
+static void
+linker_optimisation_loop(struct gl_context *ctx, exec_list *ir,
+ unsigned stage)
+{
+ if (ctx->Const.GLSLOptimizeConservatively) {
+ /* Run it just once. */
+ do_common_optimization(ir, true, false,
+ &ctx->Const.ShaderCompilerOptions[stage],
+ ctx->Const.NativeIntegers);
+ } else {
+ /* Repeat it until it stops making changes. */
+ while (do_common_optimization(ir, true, false,
+ &ctx->Const.ShaderCompilerOptions[stage],
+ ctx->Const.NativeIntegers))
+ ;
+ }
+}
+
+void
+link_shaders(struct gl_context *ctx, struct gl_shader_program *prog)
+{
+ prog->data->LinkStatus = LINKING_SUCCESS; /* All error paths will set this to false */
+ prog->data->Validated = false;
+
+ /* Section 7.3 (Program Objects) of the OpenGL 4.5 Core Profile spec says:
+ *
+ * "Linking can fail for a variety of reasons as specified in the
+ * OpenGL Shading Language Specification, as well as any of the
+ * following reasons:
+ *
+ * - No shader objects are attached to program."
+ *
+ * The Compatibility Profile specification does not list the error. In
+ * Compatibility Profile missing shader stages are replaced by
+ * fixed-function. This applies to the case where all stages are
+ * missing.
+ */
+ if (prog->NumShaders == 0) {
+ if (ctx->API != API_OPENGL_COMPAT)
+ linker_error(prog, "no shaders attached to the program\n");
+ return;
+ }
+
+#ifdef ENABLE_SHADER_CACHE
+ if (shader_cache_read_program_metadata(ctx, prog))
+ return;
+#endif
+
+ void *mem_ctx = ralloc_context(NULL); // temporary linker context
+
+ prog->ARB_fragment_coord_conventions_enable = false;
+
+ /* Separate the shaders into groups based on their type.
+ */
+ struct gl_shader **shader_list[MESA_SHADER_STAGES];
+ unsigned num_shaders[MESA_SHADER_STAGES];
+
+ for (int i = 0; i < MESA_SHADER_STAGES; i++) {
+ shader_list[i] = (struct gl_shader **)
+ calloc(prog->NumShaders, sizeof(struct gl_shader *));
+ num_shaders[i] = 0;
+ }
+
+ unsigned min_version = UINT_MAX;
+ unsigned max_version = 0;
+ for (unsigned i = 0; i < prog->NumShaders; i++) {
+ min_version = MIN2(min_version, prog->Shaders[i]->Version);
+ max_version = MAX2(max_version, prog->Shaders[i]->Version);
+
+ if (!ctx->Const.AllowGLSLRelaxedES &&
+ prog->Shaders[i]->IsES != prog->Shaders[0]->IsES) {
+ linker_error(prog, "all shaders must use same shading "
+ "language version\n");
+ goto done;
+ }
+
+ if (prog->Shaders[i]->ARB_fragment_coord_conventions_enable) {
+ prog->ARB_fragment_coord_conventions_enable = true;
+ }
+
+ gl_shader_stage shader_type = prog->Shaders[i]->Stage;
+ shader_list[shader_type][num_shaders[shader_type]] = prog->Shaders[i];
+ num_shaders[shader_type]++;
+ }
+
+ /* In desktop GLSL, different shader versions may be linked together. In
+ * GLSL ES, all shader versions must be the same.
+ */
+ if (!ctx->Const.AllowGLSLRelaxedES && prog->Shaders[0]->IsES &&
+ min_version != max_version) {
+ linker_error(prog, "all shaders must use same shading "
+ "language version\n");
+ goto done;
+ }
+
+ prog->data->Version = max_version;
+ prog->IsES = prog->Shaders[0]->IsES;
+
+ /* Some shaders have to be linked with some other shaders present.
+ */
+ if (!prog->SeparateShader) {
+ if (num_shaders[MESA_SHADER_GEOMETRY] > 0 &&
+ num_shaders[MESA_SHADER_VERTEX] == 0) {
+ linker_error(prog, "Geometry shader must be linked with "
+ "vertex shader\n");
+ goto done;
+ }
+ if (num_shaders[MESA_SHADER_TESS_EVAL] > 0 &&
+ num_shaders[MESA_SHADER_VERTEX] == 0) {
+ linker_error(prog, "Tessellation evaluation shader must be linked "
+ "with vertex shader\n");
+ goto done;
+ }
+ if (num_shaders[MESA_SHADER_TESS_CTRL] > 0 &&
+ num_shaders[MESA_SHADER_VERTEX] == 0) {
+ linker_error(prog, "Tessellation control shader must be linked with "
+ "vertex shader\n");
+ goto done;
+ }
+
+ /* Section 7.3 of the OpenGL ES 3.2 specification says:
+ *
+ * "Linking can fail for [...] any of the following reasons:
+ *
+ * * program contains an object to form a tessellation control
+ * shader [...] and [...] the program is not separable and
+ * contains no object to form a tessellation evaluation shader"
+ *
+ * The OpenGL spec is contradictory. It allows linking without a tess
+ * eval shader, but that can only be used with transform feedback and
+ * rasterization disabled. However, transform feedback isn't allowed
+ * with GL_PATCHES, so it can't be used.
+ *
+ * More investigation showed that the idea of transform feedback after
+ * a tess control shader was dropped, because some hw vendors couldn't
+ * support tessellation without a tess eval shader, but the linker
+ * section wasn't updated to reflect that.
+ *
+ * All specifications (ARB_tessellation_shader, GL 4.0-4.5) have this
+ * spec bug.
+ *
+ * Do what's reasonable and always require a tess eval shader if a tess
+ * control shader is present.
+ */
+ if (num_shaders[MESA_SHADER_TESS_CTRL] > 0 &&
+ num_shaders[MESA_SHADER_TESS_EVAL] == 0) {
+ linker_error(prog, "Tessellation control shader must be linked with "
+ "tessellation evaluation shader\n");
+ goto done;
+ }
+
+ if (prog->IsES) {
+ if (num_shaders[MESA_SHADER_TESS_EVAL] > 0 &&
+ num_shaders[MESA_SHADER_TESS_CTRL] == 0) {
+ linker_error(prog, "GLSL ES requires non-separable programs "
+ "containing a tessellation evaluation shader to also "
+ "be linked with a tessellation control shader\n");
+ goto done;
+ }
+ }
+ }
+
+ /* Compute shaders have additional restrictions. */
+ if (num_shaders[MESA_SHADER_COMPUTE] > 0 &&
+ num_shaders[MESA_SHADER_COMPUTE] != prog->NumShaders) {
+ linker_error(prog, "Compute shaders may not be linked with any other "
+ "type of shader\n");
+ }
+
+ /* Link all shaders for a particular stage and validate the result.
+ */
+ for (int stage = 0; stage < MESA_SHADER_STAGES; stage++) {
+ if (num_shaders[stage] > 0) {
+ gl_linked_shader *const sh =
+ link_intrastage_shaders(mem_ctx, ctx, prog, shader_list[stage],
+ num_shaders[stage], false);
+
+ if (!prog->data->LinkStatus) {
+ if (sh)
+ _mesa_delete_linked_shader(ctx, sh);
+ goto done;
+ }
+
+ switch (stage) {
+ case MESA_SHADER_VERTEX:
+ validate_vertex_shader_executable(prog, sh, ctx);
+ break;
+ case MESA_SHADER_TESS_CTRL:
+ /* nothing to be done */
+ break;
+ case MESA_SHADER_TESS_EVAL:
+ validate_tess_eval_shader_executable(prog, sh, ctx);
+ break;
+ case MESA_SHADER_GEOMETRY:
+ validate_geometry_shader_executable(prog, sh, ctx);
+ break;
+ case MESA_SHADER_FRAGMENT:
+ validate_fragment_shader_executable(prog, sh);
+ break;
+ }
+ if (!prog->data->LinkStatus) {
+ if (sh)
+ _mesa_delete_linked_shader(ctx, sh);
+ goto done;
+ }
+
+ prog->_LinkedShaders[stage] = sh;
+ prog->data->linked_stages |= 1 << stage;
+ }
+ }
+
+ /* Here begins the inter-stage linking phase. Some initial validation is
+ * performed, then locations are assigned for uniforms, attributes, and
+ * varyings.
+ */
+ cross_validate_uniforms(ctx, prog);
+ if (!prog->data->LinkStatus)
+ goto done;
+
+ unsigned first, last, prev;
+
+ first = MESA_SHADER_STAGES;
+ last = 0;
+
+ /* Determine first and last stage. */
+ for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
+ if (!prog->_LinkedShaders[i])
+ continue;
+ if (first == MESA_SHADER_STAGES)
+ first = i;
+ last = i;
+ }
+
+ check_explicit_uniform_locations(ctx, prog);
+ link_assign_subroutine_types(prog);
+ verify_subroutine_associated_funcs(prog);
+
+ if (!prog->data->LinkStatus)
+ goto done;
+
+ resize_tes_inputs(ctx, prog);
+
+ /* Validate the inputs of each stage with the output of the preceding
+ * stage.
+ */
+ prev = first;
+ for (unsigned i = prev + 1; i <= MESA_SHADER_FRAGMENT; i++) {
+ if (prog->_LinkedShaders[i] == NULL)
+ continue;
+
+ validate_interstage_inout_blocks(prog, prog->_LinkedShaders[prev],
+ prog->_LinkedShaders[i]);
+ if (!prog->data->LinkStatus)
+ goto done;
+
+ cross_validate_outputs_to_inputs(ctx, prog,
+ prog->_LinkedShaders[prev],
+ prog->_LinkedShaders[i]);
+ if (!prog->data->LinkStatus)
+ goto done;
+
+ prev = i;
+ }
+
+ /* The cross validation of outputs/inputs above validates interstage
+ * explicit locations. We need to do this also for the inputs in the first
+ * stage and outputs of the last stage included in the program, since there
+ * is no cross validation for these.
+ */
+ validate_first_and_last_interface_explicit_locations(ctx, prog,
+ (gl_shader_stage) first,
+ (gl_shader_stage) last);
+
+ /* Cross-validate uniform blocks between shader stages */
+ validate_interstage_uniform_blocks(prog, prog->_LinkedShaders);
+ if (!prog->data->LinkStatus)
+ goto done;
+
+ for (unsigned int i = 0; i < MESA_SHADER_STAGES; i++) {
+ if (prog->_LinkedShaders[i] != NULL)
+ lower_named_interface_blocks(mem_ctx, prog->_LinkedShaders[i]);
+ }
+
+ if (prog->IsES && prog->data->Version == 100)
+ if (!validate_invariant_builtins(prog,
+ prog->_LinkedShaders[MESA_SHADER_VERTEX],
+ prog->_LinkedShaders[MESA_SHADER_FRAGMENT]))
+ goto done;
+
+ /* Implement the GLSL 1.30+ rule for discard vs infinite loops Do
+ * it before optimization because we want most of the checks to get
+ * dropped thanks to constant propagation.
+ *
+ * This rule also applies to GLSL ES 3.00.
+ */
+ if (max_version >= (prog->IsES ? 300 : 130)) {
+ struct gl_linked_shader *sh = prog->_LinkedShaders[MESA_SHADER_FRAGMENT];
+ if (sh) {
+ lower_discard_flow(sh->ir);
+ }
+ }
+
+ if (prog->SeparateShader)
+ disable_varying_optimizations_for_sso(prog);
+
+ /* Process UBOs */
+ if (!interstage_cross_validate_uniform_blocks(prog, false))
+ goto done;
+
+ /* Process SSBOs */
+ if (!interstage_cross_validate_uniform_blocks(prog, true))
+ goto done;
+
+ /* Do common optimization before assigning storage for attributes,
+ * uniforms, and varyings. Later optimization could possibly make
+ * some of that unused.
+ */
+ for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
+ if (prog->_LinkedShaders[i] == NULL)
+ continue;
+
+ detect_recursion_linked(prog, prog->_LinkedShaders[i]->ir);
+ if (!prog->data->LinkStatus)
+ goto done;
+
+ if (ctx->Const.ShaderCompilerOptions[i].LowerCombinedClipCullDistance) {
+ lower_clip_cull_distance(prog, prog->_LinkedShaders[i]);
+ }
+
+ if (ctx->Const.LowerTessLevel) {
+ lower_tess_level(prog->_LinkedShaders[i]);
+ }
+
+ /* Section 13.46 (Vertex Attribute Aliasing) of the OpenGL ES 3.2
+ * specification says:
+ *
+ * "In general, the behavior of GLSL ES should not depend on compiler
+ * optimizations which might be implementation-dependent. Name matching
+ * rules in most languages, including C++ from which GLSL ES is derived,
+ * are based on declarations rather than use.
+ *
+ * RESOLUTION: The existence of aliasing is determined by declarations
+ * present after preprocessing."
+ *
+ * Because of this rule, we do a 'dry-run' of attribute assignment for
+ * vertex shader inputs here.
+ */
+ if (prog->IsES && i == MESA_SHADER_VERTEX) {
+ if (!assign_attribute_or_color_locations(mem_ctx, prog, &ctx->Const,
+ MESA_SHADER_VERTEX, false)) {
+ goto done;
+ }
+ }
+
+ /* Call opts before lowering const arrays to uniforms so we can const
+ * propagate any elements accessed directly.
+ */
+ linker_optimisation_loop(ctx, prog->_LinkedShaders[i]->ir, i);
+
+ /* Call opts after lowering const arrays to copy propagate things. */
+ if (ctx->Const.GLSLLowerConstArrays &&
+ lower_const_arrays_to_uniforms(prog->_LinkedShaders[i]->ir, i,
+ ctx->Const.Program[i].MaxUniformComponents))
+ linker_optimisation_loop(ctx, prog->_LinkedShaders[i]->ir, i);
+
+ }
+
+ /* Validation for special cases where we allow sampler array indexing
+ * with loop induction variable. This check emits a warning or error
+ * depending if backend can handle dynamic indexing.
+ */
+ if ((!prog->IsES && prog->data->Version < 130) ||
+ (prog->IsES && prog->data->Version < 300)) {
+ if (!validate_sampler_array_indexing(ctx, prog))
+ goto done;
+ }
+
+ /* Check and validate stream emissions in geometry shaders */
+ validate_geometry_shader_emissions(ctx, prog);
+
+ store_fragdepth_layout(prog);
+
+ if(!link_varyings_and_uniforms(first, last, ctx, prog, mem_ctx))
+ goto done;
+
+ /* Linking varyings can cause some extra, useless swizzles to be generated
+ * due to packing and unpacking.
+ */
+ for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
+ if (prog->_LinkedShaders[i] == NULL)
+ continue;
+
+ optimize_swizzles(prog->_LinkedShaders[i]->ir);
+ }
+
+ /* OpenGL ES < 3.1 requires that a vertex shader and a fragment shader both
+ * be present in a linked program. GL_ARB_ES2_compatibility doesn't say
+ * anything about shader linking when one of the shaders (vertex or
+ * fragment shader) is absent. So, the extension shouldn't change the
+ * behavior specified in GLSL specification.
+ *
+ * From OpenGL ES 3.1 specification (7.3 Program Objects):
+ * "Linking can fail for a variety of reasons as specified in the
+ * OpenGL ES Shading Language Specification, as well as any of the
+ * following reasons:
+ *
+ * ...
+ *
+ * * program contains objects to form either a vertex shader or
+ * fragment shader, and program is not separable, and does not
+ * contain objects to form both a vertex shader and fragment
+ * shader."
+ *
+ * However, the only scenario in 3.1+ where we don't require them both is
+ * when we have a compute shader. For example:
+ *
+ * - No shaders is a link error.
+ * - Geom or Tess without a Vertex shader is a link error which means we
+ * always require a Vertex shader and hence a Fragment shader.
+ * - Finally a Compute shader linked with any other stage is a link error.
+ */
+ if (!prog->SeparateShader && ctx->API == API_OPENGLES2 &&
+ num_shaders[MESA_SHADER_COMPUTE] == 0) {
+ if (prog->_LinkedShaders[MESA_SHADER_VERTEX] == NULL) {
+ linker_error(prog, "program lacks a vertex shader\n");
+ } else if (prog->_LinkedShaders[MESA_SHADER_FRAGMENT] == NULL) {
+ linker_error(prog, "program lacks a fragment shader\n");
+ }
+ }
+
+done:
+ for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
+ free(shader_list[i]);
+ if (prog->_LinkedShaders[i] == NULL)
+ continue;
+
+ /* Do a final validation step to make sure that the IR wasn't
+ * invalidated by any modifications performed after intrastage linking.
+ */
+ validate_ir_tree(prog->_LinkedShaders[i]->ir);
+
+ /* Retain any live IR, but trash the rest. */
+ reparent_ir(prog->_LinkedShaders[i]->ir, prog->_LinkedShaders[i]->ir);
+
+ /* The symbol table in the linked shaders may contain references to
+ * variables that were removed (e.g., unused uniforms). Since it may
+ * contain junk, there is no possible valid use. Delete it and set the
+ * pointer to NULL.
+ */
+ delete prog->_LinkedShaders[i]->symbols;
+ prog->_LinkedShaders[i]->symbols = NULL;
+ }
+
+ ralloc_free(mem_ctx);
+}
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/linker.h b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/linker.h
new file mode 100644
index 0000000000..037b0ef472
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/linker.h
@@ -0,0 +1,218 @@
+/* -*- c++ -*- */
+/*
+ * Copyright © 2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef GLSL_LINKER_H
+#define GLSL_LINKER_H
+
+#include "linker_util.h"
+
+struct gl_shader_program;
+struct gl_shader;
+struct gl_linked_shader;
+
+extern bool
+link_function_calls(gl_shader_program *prog, gl_linked_shader *main,
+ gl_shader **shader_list, unsigned num_shaders);
+
+extern void
+link_invalidate_variable_locations(exec_list *ir);
+
+extern void
+link_assign_uniform_locations(struct gl_shader_program *prog,
+ struct gl_context *ctx);
+
+extern void
+link_set_uniform_initializers(struct gl_shader_program *prog,
+ unsigned int boolean_true);
+
+extern int
+link_cross_validate_uniform_block(void *mem_ctx,
+ struct gl_uniform_block **linked_blocks,
+ unsigned int *num_linked_blocks,
+ struct gl_uniform_block *new_block);
+
+extern void
+link_uniform_blocks(void *mem_ctx,
+ struct gl_context *ctx,
+ struct gl_shader_program *prog,
+ struct gl_linked_shader *shader,
+ struct gl_uniform_block **ubo_blocks,
+ unsigned *num_ubo_blocks,
+ struct gl_uniform_block **ssbo_blocks,
+ unsigned *num_ssbo_blocks);
+
+bool
+validate_intrastage_arrays(struct gl_shader_program *prog,
+ ir_variable *const var,
+ ir_variable *const existing,
+ bool match_precision = true);
+
+void
+validate_intrastage_interface_blocks(struct gl_shader_program *prog,
+ const gl_shader **shader_list,
+ unsigned num_shaders);
+
+void
+validate_interstage_inout_blocks(struct gl_shader_program *prog,
+ const gl_linked_shader *producer,
+ const gl_linked_shader *consumer);
+
+void
+validate_interstage_uniform_blocks(struct gl_shader_program *prog,
+ gl_linked_shader **stages);
+
+extern void
+link_assign_atomic_counter_resources(struct gl_context *ctx,
+ struct gl_shader_program *prog);
+
+extern void
+link_check_atomic_counter_resources(struct gl_context *ctx,
+ struct gl_shader_program *prog);
+
+
+extern struct gl_linked_shader *
+link_intrastage_shaders(void *mem_ctx,
+ struct gl_context *ctx,
+ struct gl_shader_program *prog,
+ struct gl_shader **shader_list,
+ unsigned num_shaders,
+ bool allow_missing_main);
+
+extern unsigned
+link_calculate_matrix_stride(const glsl_type *matrix, bool row_major,
+ enum glsl_interface_packing packing);
+
+/**
+ * Class for processing all of the leaf fields of a variable that corresponds
+ * to a program resource.
+ *
+ * The leaf fields are all the parts of the variable that the application
+ * could query using \c glGetProgramResourceIndex (or that could be returned
+ * by \c glGetProgramResourceName).
+ *
+ * Classes my derive from this class to implement specific functionality.
+ * This class only provides the mechanism to iterate over the leaves. Derived
+ * classes must implement \c ::visit_field and may override \c ::process.
+ */
+class program_resource_visitor {
+public:
+ /**
+ * Begin processing a variable
+ *
+ * Classes that overload this function should call \c ::process from the
+ * base class to start the recursive processing of the variable.
+ *
+ * \param var The variable that is to be processed
+ *
+ * Calls \c ::visit_field for each leaf of the variable.
+ *
+ * \warning
+ * When processing a uniform block, this entry should only be used in cases
+ * where the row / column ordering of matrices in the block does not
+ * matter. For example, enumerating the names of members of the block, but
+ * not for determining the offsets of members.
+ */
+ void process(ir_variable *var, bool use_std430_as_default);
+
+ /**
+ * Begin processing a variable
+ *
+ * Classes that overload this function should call \c ::process from the
+ * base class to start the recursive processing of the variable.
+ *
+ * \param var The variable that is to be processed
+ * \param var_type The glsl_type reference of the variable
+ *
+ * Calls \c ::visit_field for each leaf of the variable.
+ *
+ * \warning
+ * When processing a uniform block, this entry should only be used in cases
+ * where the row / column ordering of matrices in the block does not
+ * matter. For example, enumerating the names of members of the block, but
+ * not for determining the offsets of members.
+ */
+ void process(ir_variable *var, const glsl_type *var_type,
+ bool use_std430_as_default);
+
+ /**
+ * Begin processing a variable of a structured type.
+ *
+ * This flavor of \c process should be used to handle structured types
+ * (i.e., structures, interfaces, or arrays there of) that need special
+ * name handling. A common usage is to handle cases where the block name
+ * (instead of the instance name) is used for an interface block.
+ *
+ * \param type Type that is to be processed, associated with \c name
+ * \param name Base name of the structured variable being processed
+ *
+ * \note
+ * \c type must be \c GLSL_TYPE_RECORD, \c GLSL_TYPE_INTERFACE, or an array
+ * there of.
+ */
+ void process(const glsl_type *type, const char *name,
+ bool use_std430_as_default);
+
+protected:
+ /**
+ * Method invoked for each leaf of the variable
+ *
+ * \param type Type of the field.
+ * \param name Fully qualified name of the field.
+ * \param row_major For a matrix type, is it stored row-major.
+ * \param record_type Type of the record containing the field.
+ * \param last_field Set if \c name is the last field of the structure
+ * containing it. This will always be false for items
+ * not contained in a structure or interface block.
+ */
+ virtual void visit_field(const glsl_type *type, const char *name,
+ bool row_major, const glsl_type *record_type,
+ const enum glsl_interface_packing packing,
+ bool last_field) = 0;
+
+ virtual void enter_record(const glsl_type *type, const char *name,
+ bool row_major, const enum glsl_interface_packing packing);
+
+ virtual void leave_record(const glsl_type *type, const char *name,
+ bool row_major, const enum glsl_interface_packing packing);
+
+ virtual void set_buffer_offset(unsigned offset);
+
+ virtual void set_record_array_count(unsigned record_array_count);
+
+private:
+ /**
+ * \param name_length Length of the current name \b not including the
+ * terminating \c NUL character.
+ * \param last_field Set if \c name is the last field of the structure
+ * containing it. This will always be false for items
+ * not contained in a structure or interface block.
+ */
+ void recursion(const glsl_type *t, char **name, size_t name_length,
+ bool row_major, const glsl_type *record_type,
+ const enum glsl_interface_packing packing,
+ bool last_field, unsigned record_array_count,
+ const glsl_struct_field *named_ifc_member);
+};
+
+#endif /* GLSL_LINKER_H */
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/linker_util.cpp b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/linker_util.cpp
new file mode 100644
index 0000000000..a790de3ca3
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/linker_util.cpp
@@ -0,0 +1,376 @@
+/*
+ * Copyright © 2018 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+#include "main/mtypes.h"
+#include "glsl_types.h"
+#include "linker_util.h"
+#include "util/bitscan.h"
+#include "util/set.h"
+#include "ir_uniform.h" /* for gl_uniform_storage */
+
+/* Utility methods shared between the GLSL IR and the NIR */
+
+/* From the OpenGL 4.6 specification, 7.3.1.1 Naming Active Resources:
+ *
+ * "For an active shader storage block member declared as an array of an
+ * aggregate type, an entry will be generated only for the first array
+ * element, regardless of its type. Such block members are referred to as
+ * top-level arrays. If the block member is an aggregate type, the
+ * enumeration rules are then applied recursively."
+ */
+bool
+link_util_should_add_buffer_variable(struct gl_shader_program *prog,
+ struct gl_uniform_storage *uniform,
+ int top_level_array_base_offset,
+ int top_level_array_size_in_bytes,
+ int second_element_offset,
+ int block_index)
+{
+ /* If the uniform is not a shader storage buffer or is not an array return
+ * true.
+ */
+ if (!uniform->is_shader_storage || top_level_array_size_in_bytes == 0)
+ return true;
+
+ int after_top_level_array = top_level_array_base_offset +
+ top_level_array_size_in_bytes;
+
+ /* Check for a new block, or that we are not dealing with array elements of
+ * a top member array other than the first element.
+ */
+ if (block_index != uniform->block_index ||
+ uniform->offset >= after_top_level_array ||
+ uniform->offset < second_element_offset) {
+ return true;
+ }
+
+ return false;
+}
+
+bool
+link_util_add_program_resource(struct gl_shader_program *prog,
+ struct set *resource_set,
+ GLenum type, const void *data, uint8_t stages)
+{
+ assert(data);
+
+ /* If resource already exists, do not add it again. */
+ if (_mesa_set_search(resource_set, data))
+ return true;
+
+ prog->data->ProgramResourceList =
+ reralloc(prog->data,
+ prog->data->ProgramResourceList,
+ gl_program_resource,
+ prog->data->NumProgramResourceList + 1);
+
+ if (!prog->data->ProgramResourceList) {
+ linker_error(prog, "Out of memory during linking.\n");
+ return false;
+ }
+
+ struct gl_program_resource *res =
+ &prog->data->ProgramResourceList[prog->data->NumProgramResourceList];
+
+ res->Type = type;
+ res->Data = data;
+ res->StageReferences = stages;
+
+ prog->data->NumProgramResourceList++;
+
+ _mesa_set_add(resource_set, data);
+
+ return true;
+}
+
+/**
+ * Search through the list of empty blocks to find one that fits the current
+ * uniform.
+ */
+int
+link_util_find_empty_block(struct gl_shader_program *prog,
+ struct gl_uniform_storage *uniform)
+{
+ const unsigned entries = MAX2(1, uniform->array_elements);
+
+ foreach_list_typed(struct empty_uniform_block, block, link,
+ &prog->EmptyUniformLocations) {
+ /* Found a block with enough slots to fit the uniform */
+ if (block->slots == entries) {
+ unsigned start = block->start;
+ exec_node_remove(&block->link);
+ ralloc_free(block);
+
+ return start;
+ /* Found a block with more slots than needed. It can still be used. */
+ } else if (block->slots > entries) {
+ unsigned start = block->start;
+ block->start += entries;
+ block->slots -= entries;
+
+ return start;
+ }
+ }
+
+ return -1;
+}
+
+void
+link_util_update_empty_uniform_locations(struct gl_shader_program *prog)
+{
+ struct empty_uniform_block *current_block = NULL;
+
+ for (unsigned i = 0; i < prog->NumUniformRemapTable; i++) {
+ /* We found empty space in UniformRemapTable. */
+ if (prog->UniformRemapTable[i] == NULL) {
+ /* We've found the beginning of a new continous block of empty slots */
+ if (!current_block || current_block->start + current_block->slots != i) {
+ current_block = rzalloc(prog, struct empty_uniform_block);
+ current_block->start = i;
+ exec_list_push_tail(&prog->EmptyUniformLocations,
+ &current_block->link);
+ }
+
+ /* The current block continues, so we simply increment its slots */
+ current_block->slots++;
+ }
+ }
+}
+
+void
+link_util_check_subroutine_resources(struct gl_shader_program *prog)
+{
+ unsigned mask = prog->data->linked_stages;
+ while (mask) {
+ const int i = u_bit_scan(&mask);
+ struct gl_program *p = prog->_LinkedShaders[i]->Program;
+
+ if (p->sh.NumSubroutineUniformRemapTable > MAX_SUBROUTINE_UNIFORM_LOCATIONS) {
+ linker_error(prog, "Too many %s shader subroutine uniforms\n",
+ _mesa_shader_stage_to_string(i));
+ }
+ }
+}
+
+/**
+ * Validate uniform resources used by a program versus the implementation limits
+ */
+void
+link_util_check_uniform_resources(struct gl_context *ctx,
+ struct gl_shader_program *prog)
+{
+ unsigned total_uniform_blocks = 0;
+ unsigned total_shader_storage_blocks = 0;
+
+ for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
+ struct gl_linked_shader *sh = prog->_LinkedShaders[i];
+
+ if (sh == NULL)
+ continue;
+
+ if (sh->num_uniform_components >
+ ctx->Const.Program[i].MaxUniformComponents) {
+ if (ctx->Const.GLSLSkipStrictMaxUniformLimitCheck) {
+ linker_warning(prog, "Too many %s shader default uniform block "
+ "components, but the driver will try to optimize "
+ "them out; this is non-portable out-of-spec "
+ "behavior\n",
+ _mesa_shader_stage_to_string(i));
+ } else {
+ linker_error(prog, "Too many %s shader default uniform block "
+ "components\n",
+ _mesa_shader_stage_to_string(i));
+ }
+ }
+
+ if (sh->num_combined_uniform_components >
+ ctx->Const.Program[i].MaxCombinedUniformComponents) {
+ if (ctx->Const.GLSLSkipStrictMaxUniformLimitCheck) {
+ linker_warning(prog, "Too many %s shader uniform components, "
+ "but the driver will try to optimize them out; "
+ "this is non-portable out-of-spec behavior\n",
+ _mesa_shader_stage_to_string(i));
+ } else {
+ linker_error(prog, "Too many %s shader uniform components\n",
+ _mesa_shader_stage_to_string(i));
+ }
+ }
+
+ total_shader_storage_blocks += sh->Program->info.num_ssbos;
+ total_uniform_blocks += sh->Program->info.num_ubos;
+ }
+
+ if (total_uniform_blocks > ctx->Const.MaxCombinedUniformBlocks) {
+ linker_error(prog, "Too many combined uniform blocks (%d/%d)\n",
+ total_uniform_blocks, ctx->Const.MaxCombinedUniformBlocks);
+ }
+
+ if (total_shader_storage_blocks > ctx->Const.MaxCombinedShaderStorageBlocks) {
+ linker_error(prog, "Too many combined shader storage blocks (%d/%d)\n",
+ total_shader_storage_blocks,
+ ctx->Const.MaxCombinedShaderStorageBlocks);
+ }
+
+ for (unsigned i = 0; i < prog->data->NumUniformBlocks; i++) {
+ if (prog->data->UniformBlocks[i].UniformBufferSize >
+ ctx->Const.MaxUniformBlockSize) {
+ linker_error(prog, "Uniform block %s too big (%d/%d)\n",
+ prog->data->UniformBlocks[i].Name,
+ prog->data->UniformBlocks[i].UniformBufferSize,
+ ctx->Const.MaxUniformBlockSize);
+ }
+ }
+
+ for (unsigned i = 0; i < prog->data->NumShaderStorageBlocks; i++) {
+ if (prog->data->ShaderStorageBlocks[i].UniformBufferSize >
+ ctx->Const.MaxShaderStorageBlockSize) {
+ linker_error(prog, "Shader storage block %s too big (%d/%d)\n",
+ prog->data->ShaderStorageBlocks[i].Name,
+ prog->data->ShaderStorageBlocks[i].UniformBufferSize,
+ ctx->Const.MaxShaderStorageBlockSize);
+ }
+ }
+}
+
+void
+link_util_calculate_subroutine_compat(struct gl_shader_program *prog)
+{
+ unsigned mask = prog->data->linked_stages;
+ while (mask) {
+ const int i = u_bit_scan(&mask);
+ struct gl_program *p = prog->_LinkedShaders[i]->Program;
+
+ for (unsigned j = 0; j < p->sh.NumSubroutineUniformRemapTable; j++) {
+ if (p->sh.SubroutineUniformRemapTable[j] == INACTIVE_UNIFORM_EXPLICIT_LOCATION)
+ continue;
+
+ struct gl_uniform_storage *uni = p->sh.SubroutineUniformRemapTable[j];
+
+ if (!uni)
+ continue;
+
+ int count = 0;
+ if (p->sh.NumSubroutineFunctions == 0) {
+ linker_error(prog, "subroutine uniform %s defined but no valid functions found\n", uni->type->name);
+ continue;
+ }
+ for (unsigned f = 0; f < p->sh.NumSubroutineFunctions; f++) {
+ struct gl_subroutine_function *fn = &p->sh.SubroutineFunctions[f];
+ for (int k = 0; k < fn->num_compat_types; k++) {
+ if (fn->types[k] == uni->type) {
+ count++;
+ break;
+ }
+ }
+ }
+ uni->num_compatible_subroutines = count;
+ }
+ }
+}
+
+/**
+ * Recursive part of the public mark_array_elements_referenced function.
+ *
+ * The recursion occurs when an entire array-of- is accessed. See the
+ * implementation for more details.
+ *
+ * \param dr List of array_deref_range elements to be
+ * processed.
+ * \param count Number of array_deref_range elements to be
+ * processed.
+ * \param scale Current offset scale.
+ * \param linearized_index Current accumulated linearized array index.
+ */
+void
+_mark_array_elements_referenced(const struct array_deref_range *dr,
+ unsigned count, unsigned scale,
+ unsigned linearized_index,
+ BITSET_WORD *bits)
+{
+ /* Walk through the list of array dereferences in least- to
+ * most-significant order. Along the way, accumulate the current
+ * linearized offset and the scale factor for each array-of-.
+ */
+ for (unsigned i = 0; i < count; i++) {
+ if (dr[i].index < dr[i].size) {
+ linearized_index += dr[i].index * scale;
+ scale *= dr[i].size;
+ } else {
+ /* For each element in the current array, update the count and
+ * offset, then recurse to process the remaining arrays.
+ *
+ * There is some inefficency here if the last eBITSET_WORD *bitslement in the
+ * array_deref_range list specifies the entire array. In that case,
+ * the loop will make recursive calls with count == 0. In the call,
+ * all that will happen is the bit will be set.
+ */
+ for (unsigned j = 0; j < dr[i].size; j++) {
+ _mark_array_elements_referenced(&dr[i + 1],
+ count - (i + 1),
+ scale * dr[i].size,
+ linearized_index + (j * scale),
+ bits);
+ }
+
+ return;
+ }
+ }
+
+ BITSET_SET(bits, linearized_index);
+}
+
+/**
+ * Mark a set of array elements as accessed.
+ *
+ * If every \c array_deref_range is for a single index, only a single
+ * element will be marked. If any \c array_deref_range is for an entire
+ * array-of-, then multiple elements will be marked.
+ *
+ * Items in the \c array_deref_range list appear in least- to
+ * most-significant order. This is the \b opposite order the indices
+ * appear in the GLSL shader text. An array access like
+ *
+ * x = y[1][i][3];
+ *
+ * would appear as
+ *
+ * { { 3, n }, { m, m }, { 1, p } }
+ *
+ * where n, m, and p are the sizes of the arrays-of-arrays.
+ *
+ * The set of marked array elements can later be queried by
+ * \c ::is_linearized_index_referenced.
+ *
+ * \param dr List of array_deref_range elements to be processed.
+ * \param count Number of array_deref_range elements to be processed.
+ */
+void
+link_util_mark_array_elements_referenced(const struct array_deref_range *dr,
+ unsigned count, unsigned array_depth,
+ BITSET_WORD *bits)
+{
+ if (count != array_depth)
+ return;
+
+ _mark_array_elements_referenced(dr, count, 1, 0, bits);
+}
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/linker_util.h b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/linker_util.h
new file mode 100644
index 0000000000..16f5ca9e40
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/linker_util.h
@@ -0,0 +1,112 @@
+/*
+ * Copyright © 2018 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef GLSL_LINKER_UTIL_H
+#define GLSL_LINKER_UTIL_H
+
+#include "util/bitset.h"
+
+struct gl_context;
+struct gl_shader_program;
+struct gl_uniform_storage;
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * Sometimes there are empty slots left over in UniformRemapTable after we
+ * allocate slots to explicit locations. This struct represents a single
+ * continouous block of empty slots in UniformRemapTable.
+ */
+struct empty_uniform_block {
+ struct exec_node link;
+ /* The start location of the block */
+ unsigned start;
+ /* The number of slots in the block */
+ unsigned slots;
+};
+
+/**
+ * Describes an access of an array element or an access of the whole array
+ */
+struct array_deref_range {
+ /**
+ * Index that was accessed.
+ *
+ * All valid array indices are less than the size of the array. If index
+ * is equal to the size of the array, this means the entire array has been
+ * accessed (e.g., due to use of a non-constant index).
+ */
+ unsigned index;
+
+ /** Size of the array. Used for offset calculations. */
+ unsigned size;
+};
+
+void
+linker_error(struct gl_shader_program *prog, const char *fmt, ...);
+
+void
+linker_warning(struct gl_shader_program *prog, const char *fmt, ...);
+
+bool
+link_util_should_add_buffer_variable(struct gl_shader_program *prog,
+ struct gl_uniform_storage *uniform,
+ int top_level_array_base_offset,
+ int top_level_array_size_in_bytes,
+ int second_element_offset,
+ int block_index);
+
+bool
+link_util_add_program_resource(struct gl_shader_program *prog,
+ struct set *resource_set,
+ GLenum type, const void *data, uint8_t stages);
+
+int
+link_util_find_empty_block(struct gl_shader_program *prog,
+ struct gl_uniform_storage *uniform);
+
+void
+link_util_update_empty_uniform_locations(struct gl_shader_program *prog);
+
+void
+link_util_check_subroutine_resources(struct gl_shader_program *prog);
+
+void
+link_util_check_uniform_resources(struct gl_context *ctx,
+ struct gl_shader_program *prog);
+
+void
+link_util_calculate_subroutine_compat(struct gl_shader_program *prog);
+
+void
+link_util_mark_array_elements_referenced(const struct array_deref_range *dr,
+ unsigned count, unsigned array_depth,
+ BITSET_WORD *bits);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* GLSL_LINKER_UTIL_H */
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/list.h b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/list.h
new file mode 100644
index 0000000000..9153d07cb9
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/list.h
@@ -0,0 +1,777 @@
+/*
+ * Copyright © 2008, 2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+/**
+ * \file list.h
+ * \brief Doubly-linked list abstract container type.
+ *
+ * Each doubly-linked list has a sentinel head and tail node. These nodes
+ * contain no data. The head sentinel can be identified by its \c prev
+ * pointer being \c NULL. The tail sentinel can be identified by its
+ * \c next pointer being \c NULL.
+ *
+ * A list is empty if either the head sentinel's \c next pointer points to the
+ * tail sentinel or the tail sentinel's \c prev poiner points to the head
+ * sentinel. The head sentinel and tail sentinel nodes are allocated within the
+ * list structure.
+ *
+ * Do note that this means that the list nodes will contain pointers into the
+ * list structure itself and as a result you may not \c realloc() an \c
+ * exec_list or any structure in which an \c exec_list is embedded.
+ */
+
+#ifndef LIST_CONTAINER_H
+#define LIST_CONTAINER_H
+
+#ifndef __cplusplus
+#include <stddef.h>
+#endif
+#include <assert.h>
+
+#include "util/ralloc.h"
+
+struct exec_node {
+ struct exec_node *next;
+ struct exec_node *prev;
+
+#ifdef __cplusplus
+ DECLARE_RZALLOC_CXX_OPERATORS(exec_node)
+
+ exec_node() : next(NULL), prev(NULL)
+ {
+ /* empty */
+ }
+
+ const exec_node *get_next() const;
+ exec_node *get_next();
+
+ const exec_node *get_prev() const;
+ exec_node *get_prev();
+
+ void remove();
+
+ /**
+ * Link a node with itself
+ *
+ * This creates a sort of degenerate list that is occasionally useful.
+ */
+ void self_link();
+
+ /**
+ * Insert a node in the list after the current node
+ */
+ void insert_after(exec_node *after);
+
+ /**
+ * Insert another list in the list after the current node
+ */
+ void insert_after(struct exec_list *after);
+
+ /**
+ * Insert a node in the list before the current node
+ */
+ void insert_before(exec_node *before);
+
+ /**
+ * Insert another list in the list before the current node
+ */
+ void insert_before(struct exec_list *before);
+
+ /**
+ * Replace the current node with the given node.
+ */
+ void replace_with(exec_node *replacement);
+
+ /**
+ * Is this the sentinel at the tail of the list?
+ */
+ bool is_tail_sentinel() const;
+
+ /**
+ * Is this the sentinel at the head of the list?
+ */
+ bool is_head_sentinel() const;
+#endif
+};
+
+static inline void
+exec_node_init(struct exec_node *n)
+{
+ n->next = NULL;
+ n->prev = NULL;
+}
+
+static inline const struct exec_node *
+exec_node_get_next_const(const struct exec_node *n)
+{
+ return n->next;
+}
+
+static inline struct exec_node *
+exec_node_get_next(struct exec_node *n)
+{
+ return n->next;
+}
+
+static inline const struct exec_node *
+exec_node_get_prev_const(const struct exec_node *n)
+{
+ return n->prev;
+}
+
+static inline struct exec_node *
+exec_node_get_prev(struct exec_node *n)
+{
+ return n->prev;
+}
+
+static inline void
+exec_node_remove(struct exec_node *n)
+{
+ n->next->prev = n->prev;
+ n->prev->next = n->next;
+ n->next = NULL;
+ n->prev = NULL;
+}
+
+static inline void
+exec_node_self_link(struct exec_node *n)
+{
+ n->next = n;
+ n->prev = n;
+}
+
+static inline void
+exec_node_insert_after(struct exec_node *n, struct exec_node *after)
+{
+ after->next = n->next;
+ after->prev = n;
+
+ n->next->prev = after;
+ n->next = after;
+}
+
+static inline void
+exec_node_insert_node_before(struct exec_node *n, struct exec_node *before)
+{
+ before->next = n;
+ before->prev = n->prev;
+
+ n->prev->next = before;
+ n->prev = before;
+}
+
+static inline void
+exec_node_replace_with(struct exec_node *n, struct exec_node *replacement)
+{
+ replacement->prev = n->prev;
+ replacement->next = n->next;
+
+ n->prev->next = replacement;
+ n->next->prev = replacement;
+}
+
+static inline bool
+exec_node_is_tail_sentinel(const struct exec_node *n)
+{
+ return n->next == NULL;
+}
+
+static inline bool
+exec_node_is_head_sentinel(const struct exec_node *n)
+{
+ return n->prev == NULL;
+}
+
+#ifdef __cplusplus
+inline const exec_node *exec_node::get_next() const
+{
+ return exec_node_get_next_const(this);
+}
+
+inline exec_node *exec_node::get_next()
+{
+ return exec_node_get_next(this);
+}
+
+inline const exec_node *exec_node::get_prev() const
+{
+ return exec_node_get_prev_const(this);
+}
+
+inline exec_node *exec_node::get_prev()
+{
+ return exec_node_get_prev(this);
+}
+
+inline void exec_node::remove()
+{
+ exec_node_remove(this);
+}
+
+inline void exec_node::self_link()
+{
+ exec_node_self_link(this);
+}
+
+inline void exec_node::insert_after(exec_node *after)
+{
+ exec_node_insert_after(this, after);
+}
+
+inline void exec_node::insert_before(exec_node *before)
+{
+ exec_node_insert_node_before(this, before);
+}
+
+inline void exec_node::replace_with(exec_node *replacement)
+{
+ exec_node_replace_with(this, replacement);
+}
+
+inline bool exec_node::is_tail_sentinel() const
+{
+ return exec_node_is_tail_sentinel(this);
+}
+
+inline bool exec_node::is_head_sentinel() const
+{
+ return exec_node_is_head_sentinel(this);
+}
+#endif
+
+#ifdef __cplusplus
+/* This macro will not work correctly if `t' uses virtual inheritance. If you
+ * are using virtual inheritance, you deserve a slow and painful death. Enjoy!
+ */
+#define exec_list_offsetof(t, f, p) \
+ (((char *) &((t *) p)->f) - ((char *) p))
+#else
+#define exec_list_offsetof(t, f, p) offsetof(t, f)
+#endif
+
+/**
+ * Get a pointer to the structure containing an exec_node
+ *
+ * Given a pointer to an \c exec_node embedded in a structure, get a pointer to
+ * the containing structure.
+ *
+ * \param type Base type of the structure containing the node
+ * \param node Pointer to the \c exec_node
+ * \param field Name of the field in \c type that is the embedded \c exec_node
+ */
+#define exec_node_data(type, node, field) \
+ ((type *) (((uintptr_t) node) - exec_list_offsetof(type, field, node)))
+
+#ifdef __cplusplus
+struct exec_node;
+#endif
+
+struct exec_list {
+ struct exec_node head_sentinel;
+ struct exec_node tail_sentinel;
+
+#ifdef __cplusplus
+ DECLARE_RALLOC_CXX_OPERATORS(exec_list)
+
+ exec_list()
+ {
+ make_empty();
+ }
+
+ void make_empty();
+
+ bool is_empty() const;
+
+ const exec_node *get_head() const;
+ exec_node *get_head();
+ const exec_node *get_head_raw() const;
+ exec_node *get_head_raw();
+
+ const exec_node *get_tail() const;
+ exec_node *get_tail();
+ const exec_node *get_tail_raw() const;
+ exec_node *get_tail_raw();
+
+ unsigned length() const;
+
+ void push_head(exec_node *n);
+ void push_tail(exec_node *n);
+ void push_degenerate_list_at_head(exec_node *n);
+
+ /**
+ * Remove the first node from a list and return it
+ *
+ * \return
+ * The first node in the list or \c NULL if the list is empty.
+ *
+ * \sa exec_list::get_head
+ */
+ exec_node *pop_head();
+
+ /**
+ * Move all of the nodes from this list to the target list
+ */
+ void move_nodes_to(exec_list *target);
+
+ /**
+ * Append all nodes from the source list to the end of the target list
+ */
+ void append_list(exec_list *source);
+
+ /**
+ * Prepend all nodes from the source list to the beginning of the target
+ * list
+ */
+ void prepend_list(exec_list *source);
+#endif
+};
+
+static inline void
+exec_list_make_empty(struct exec_list *list)
+{
+ list->head_sentinel.next = &list->tail_sentinel;
+ list->head_sentinel.prev = NULL;
+ list->tail_sentinel.next = NULL;
+ list->tail_sentinel.prev = &list->head_sentinel;
+}
+
+static inline bool
+exec_list_is_empty(const struct exec_list *list)
+{
+ /* There are three ways to test whether a list is empty or not.
+ *
+ * - Check to see if the head sentinel's \c next is the tail sentinel.
+ * - Check to see if the tail sentinel's \c prev is the head sentinel.
+ * - Check to see if the head is the sentinel node by test whether its
+ * \c next pointer is \c NULL.
+ *
+ * The first two methods tend to generate better code on modern systems
+ * because they save a pointer dereference.
+ */
+ return list->head_sentinel.next == &list->tail_sentinel;
+}
+
+static inline bool
+exec_list_is_singular(const struct exec_list *list)
+{
+ return !exec_list_is_empty(list) &&
+ list->head_sentinel.next->next == &list->tail_sentinel;
+}
+
+static inline const struct exec_node *
+exec_list_get_head_const(const struct exec_list *list)
+{
+ return !exec_list_is_empty(list) ? list->head_sentinel.next : NULL;
+}
+
+static inline struct exec_node *
+exec_list_get_head(struct exec_list *list)
+{
+ return !exec_list_is_empty(list) ? list->head_sentinel.next : NULL;
+}
+
+static inline const struct exec_node *
+exec_list_get_head_raw_const(const struct exec_list *list)
+{
+ return list->head_sentinel.next;
+}
+
+static inline struct exec_node *
+exec_list_get_head_raw(struct exec_list *list)
+{
+ return list->head_sentinel.next;
+}
+
+static inline const struct exec_node *
+exec_list_get_tail_const(const struct exec_list *list)
+{
+ return !exec_list_is_empty(list) ? list->tail_sentinel.prev : NULL;
+}
+
+static inline struct exec_node *
+exec_list_get_tail(struct exec_list *list)
+{
+ return !exec_list_is_empty(list) ? list->tail_sentinel.prev : NULL;
+}
+
+static inline const struct exec_node *
+exec_list_get_tail_raw_const(const struct exec_list *list)
+{
+ return list->tail_sentinel.prev;
+}
+
+static inline struct exec_node *
+exec_list_get_tail_raw(struct exec_list *list)
+{
+ return list->tail_sentinel.prev;
+}
+
+static inline unsigned
+exec_list_length(const struct exec_list *list)
+{
+ unsigned size = 0;
+ struct exec_node *node;
+
+ for (node = list->head_sentinel.next; node->next != NULL; node = node->next) {
+ size++;
+ }
+
+ return size;
+}
+
+static inline void
+exec_list_push_head(struct exec_list *list, struct exec_node *n)
+{
+ n->next = list->head_sentinel.next;
+ n->prev = &list->head_sentinel;
+
+ n->next->prev = n;
+ list->head_sentinel.next = n;
+}
+
+static inline void
+exec_list_push_tail(struct exec_list *list, struct exec_node *n)
+{
+ n->next = &list->tail_sentinel;
+ n->prev = list->tail_sentinel.prev;
+
+ n->prev->next = n;
+ list->tail_sentinel.prev = n;
+}
+
+static inline void
+exec_list_push_degenerate_list_at_head(struct exec_list *list, struct exec_node *n)
+{
+ assert(n->prev->next == n);
+
+ n->prev->next = list->head_sentinel.next;
+ list->head_sentinel.next->prev = n->prev;
+ n->prev = &list->head_sentinel;
+ list->head_sentinel.next = n;
+}
+
+static inline struct exec_node *
+exec_list_pop_head(struct exec_list *list)
+{
+ struct exec_node *const n = exec_list_get_head(list);
+ if (n != NULL)
+ exec_node_remove(n);
+
+ return n;
+}
+
+static inline void
+exec_list_move_nodes_to(struct exec_list *list, struct exec_list *target)
+{
+ if (exec_list_is_empty(list)) {
+ exec_list_make_empty(target);
+ } else {
+ target->head_sentinel.next = list->head_sentinel.next;
+ target->head_sentinel.prev = NULL;
+ target->tail_sentinel.next = NULL;
+ target->tail_sentinel.prev = list->tail_sentinel.prev;
+
+ target->head_sentinel.next->prev = &target->head_sentinel;
+ target->tail_sentinel.prev->next = &target->tail_sentinel;
+
+ exec_list_make_empty(list);
+ }
+}
+
+static inline void
+exec_list_append(struct exec_list *list, struct exec_list *source)
+{
+ if (exec_list_is_empty(source))
+ return;
+
+ /* Link the first node of the source with the last node of the target list.
+ */
+ list->tail_sentinel.prev->next = source->head_sentinel.next;
+ source->head_sentinel.next->prev = list->tail_sentinel.prev;
+
+ /* Make the tail of the source list be the tail of the target list.
+ */
+ list->tail_sentinel.prev = source->tail_sentinel.prev;
+ list->tail_sentinel.prev->next = &list->tail_sentinel;
+
+ /* Make the source list empty for good measure.
+ */
+ exec_list_make_empty(source);
+}
+
+static inline void
+exec_node_insert_list_after(struct exec_node *n, struct exec_list *after)
+{
+ if (exec_list_is_empty(after))
+ return;
+
+ after->tail_sentinel.prev->next = n->next;
+ after->head_sentinel.next->prev = n;
+
+ n->next->prev = after->tail_sentinel.prev;
+ n->next = after->head_sentinel.next;
+
+ exec_list_make_empty(after);
+}
+
+static inline void
+exec_list_prepend(struct exec_list *list, struct exec_list *source)
+{
+ exec_list_append(source, list);
+ exec_list_move_nodes_to(source, list);
+}
+
+static inline void
+exec_node_insert_list_before(struct exec_node *n, struct exec_list *before)
+{
+ if (exec_list_is_empty(before))
+ return;
+
+ before->tail_sentinel.prev->next = n;
+ before->head_sentinel.next->prev = n->prev;
+
+ n->prev->next = before->head_sentinel.next;
+ n->prev = before->tail_sentinel.prev;
+
+ exec_list_make_empty(before);
+}
+
+static inline void
+exec_list_validate(const struct exec_list *list)
+{
+ const struct exec_node *node;
+
+ assert(list->head_sentinel.next->prev == &list->head_sentinel);
+ assert(list->head_sentinel.prev == NULL);
+ assert(list->tail_sentinel.next == NULL);
+ assert(list->tail_sentinel.prev->next == &list->tail_sentinel);
+
+ /* We could try to use one of the interators below for this but they all
+ * either require C++ or assume the exec_node is embedded in a structure
+ * which is not the case for this function.
+ */
+ for (node = list->head_sentinel.next; node->next != NULL; node = node->next) {
+ assert(node->next->prev == node);
+ assert(node->prev->next == node);
+ }
+}
+
+#ifdef __cplusplus
+inline void exec_list::make_empty()
+{
+ exec_list_make_empty(this);
+}
+
+inline bool exec_list::is_empty() const
+{
+ return exec_list_is_empty(this);
+}
+
+inline const exec_node *exec_list::get_head() const
+{
+ return exec_list_get_head_const(this);
+}
+
+inline exec_node *exec_list::get_head()
+{
+ return exec_list_get_head(this);
+}
+
+inline const exec_node *exec_list::get_head_raw() const
+{
+ return exec_list_get_head_raw_const(this);
+}
+
+inline exec_node *exec_list::get_head_raw()
+{
+ return exec_list_get_head_raw(this);
+}
+
+inline const exec_node *exec_list::get_tail() const
+{
+ return exec_list_get_tail_const(this);
+}
+
+inline exec_node *exec_list::get_tail()
+{
+ return exec_list_get_tail(this);
+}
+
+inline const exec_node *exec_list::get_tail_raw() const
+{
+ return exec_list_get_tail_raw_const(this);
+}
+
+inline exec_node *exec_list::get_tail_raw()
+{
+ return exec_list_get_tail_raw(this);
+}
+
+inline unsigned exec_list::length() const
+{
+ return exec_list_length(this);
+}
+
+inline void exec_list::push_head(exec_node *n)
+{
+ exec_list_push_head(this, n);
+}
+
+inline void exec_list::push_tail(exec_node *n)
+{
+ exec_list_push_tail(this, n);
+}
+
+inline void exec_list::push_degenerate_list_at_head(exec_node *n)
+{
+ exec_list_push_degenerate_list_at_head(this, n);
+}
+
+inline exec_node *exec_list::pop_head()
+{
+ return exec_list_pop_head(this);
+}
+
+inline void exec_list::move_nodes_to(exec_list *target)
+{
+ exec_list_move_nodes_to(this, target);
+}
+
+inline void exec_list::append_list(exec_list *source)
+{
+ exec_list_append(this, source);
+}
+
+inline void exec_node::insert_after(exec_list *after)
+{
+ exec_node_insert_list_after(this, after);
+}
+
+inline void exec_list::prepend_list(exec_list *source)
+{
+ exec_list_prepend(this, source);
+}
+
+inline void exec_node::insert_before(exec_list *before)
+{
+ exec_node_insert_list_before(this, before);
+}
+#endif
+
+#define exec_node_typed_forward(__node, __type) \
+ (!exec_node_is_tail_sentinel(__node) ? (__type) (__node) : NULL)
+
+#define exec_node_typed_backward(__node, __type) \
+ (!exec_node_is_head_sentinel(__node) ? (__type) (__node) : NULL)
+
+#define foreach_in_list(__type, __inst, __list) \
+ for (__type *__inst = exec_node_typed_forward((__list)->head_sentinel.next, __type *); \
+ (__inst) != NULL; \
+ (__inst) = exec_node_typed_forward((__inst)->next, __type *))
+
+#define foreach_in_list_reverse(__type, __inst, __list) \
+ for (__type *__inst = exec_node_typed_backward((__list)->tail_sentinel.prev, __type *); \
+ (__inst) != NULL; \
+ (__inst) = exec_node_typed_backward((__inst)->prev, __type *))
+
+/**
+ * This version is safe even if the current node is removed.
+ */
+
+#define foreach_in_list_safe(__type, __node, __list) \
+ for (__type *__node = exec_node_typed_forward((__list)->head_sentinel.next, __type *), \
+ *__next = (__node) ? exec_node_typed_forward((__list)->head_sentinel.next->next, __type *) : NULL; \
+ (__node) != NULL; \
+ (__node) = __next, __next = __next ? exec_node_typed_forward(__next->next, __type *) : NULL)
+
+#define foreach_in_list_reverse_safe(__type, __node, __list) \
+ for (__type *__node = exec_node_typed_backward((__list)->tail_sentinel.prev, __type *), \
+ *__prev = (__node) ? exec_node_typed_backward((__list)->tail_sentinel.prev->prev, __type *) : NULL; \
+ (__node) != NULL; \
+ (__node) = __prev, __prev = __prev ? exec_node_typed_backward(__prev->prev, __type *) : NULL)
+
+#define foreach_in_list_use_after(__type, __inst, __list) \
+ __type *__inst; \
+ for ((__inst) = exec_node_typed_forward((__list)->head_sentinel.next, __type *); \
+ (__inst) != NULL; \
+ (__inst) = exec_node_typed_forward((__inst)->next, __type *))
+
+/**
+ * Iterate through two lists at once. Stops at the end of the shorter list.
+ *
+ * This is safe against either current node being removed or replaced.
+ */
+#define foreach_two_lists(__node1, __list1, __node2, __list2) \
+ for (struct exec_node * __node1 = (__list1)->head_sentinel.next, \
+ * __node2 = (__list2)->head_sentinel.next, \
+ * __next1 = __node1->next, \
+ * __next2 = __node2->next \
+ ; __next1 != NULL && __next2 != NULL \
+ ; __node1 = __next1, \
+ __node2 = __next2, \
+ __next1 = __next1->next, \
+ __next2 = __next2->next)
+
+#define exec_node_data_forward(type, node, field) \
+ (!exec_node_is_tail_sentinel(node) ? exec_node_data(type, node, field) : NULL)
+
+#define exec_node_data_backward(type, node, field) \
+ (!exec_node_is_head_sentinel(node) ? exec_node_data(type, node, field) : NULL)
+
+#define foreach_list_typed(__type, __node, __field, __list) \
+ for (__type * __node = \
+ exec_node_data_forward(__type, (__list)->head_sentinel.next, __field); \
+ (__node) != NULL; \
+ (__node) = exec_node_data_forward(__type, (__node)->__field.next, __field))
+
+#define foreach_list_typed_from(__type, __node, __field, __list, __start) \
+ for (__type * __node = exec_node_data_forward(__type, (__start), __field); \
+ (__node) != NULL; \
+ (__node) = exec_node_data_forward(__type, (__node)->__field.next, __field))
+
+#define foreach_list_typed_reverse(__type, __node, __field, __list) \
+ for (__type * __node = \
+ exec_node_data_backward(__type, (__list)->tail_sentinel.prev, __field); \
+ (__node) != NULL; \
+ (__node) = exec_node_data_backward(__type, (__node)->__field.prev, __field))
+
+#define foreach_list_typed_safe(__type, __node, __field, __list) \
+ for (__type * __node = \
+ exec_node_data_forward(__type, (__list)->head_sentinel.next, __field), \
+ * __next = (__node) ? \
+ exec_node_data_forward(__type, (__node)->__field.next, __field) : NULL; \
+ (__node) != NULL; \
+ (__node) = __next, __next = (__next && (__next)->__field.next) ? \
+ exec_node_data_forward(__type, (__next)->__field.next, __field) : NULL)
+
+#define foreach_list_typed_reverse_safe(__type, __node, __field, __list) \
+ for (__type * __node = \
+ exec_node_data_backward(__type, (__list)->tail_sentinel.prev, __field), \
+ * __prev = (__node) ? \
+ exec_node_data_backward(__type, (__node)->__field.prev, __field) : NULL; \
+ (__node) != NULL; \
+ (__node) = __prev, __prev = (__prev && (__prev)->__field.prev) ? \
+ exec_node_data_backward(__type, (__prev)->__field.prev, __field) : NULL)
+
+#endif /* LIST_CONTAINER_H */
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/loop_analysis.cpp b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/loop_analysis.cpp
new file mode 100644
index 0000000000..9429e69c2a
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/loop_analysis.cpp
@@ -0,0 +1,845 @@
+/*
+ * Copyright © 2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include "compiler/glsl_types.h"
+#include "loop_analysis.h"
+#include "ir_hierarchical_visitor.h"
+
+static void try_add_loop_terminator(loop_variable_state *ls, ir_if *ir);
+
+static bool all_expression_operands_are_loop_constant(ir_rvalue *,
+ hash_table *);
+
+static ir_rvalue *get_basic_induction_increment(ir_assignment *, hash_table *);
+
+/**
+ * Find an initializer of a variable outside a loop
+ *
+ * Works backwards from the loop to find the pre-loop value of the variable.
+ * This is used, for example, to find the initial value of loop induction
+ * variables.
+ *
+ * \param loop Loop where \c var is an induction variable
+ * \param var Variable whose initializer is to be found
+ *
+ * \return
+ * The \c ir_rvalue assigned to the variable outside the loop. May return
+ * \c NULL if no initializer can be found.
+ */
+static ir_rvalue *
+find_initial_value(ir_loop *loop, ir_variable *var)
+{
+ for (exec_node *node = loop->prev; !node->is_head_sentinel();
+ node = node->prev) {
+ ir_instruction *ir = (ir_instruction *) node;
+
+ switch (ir->ir_type) {
+ case ir_type_call:
+ case ir_type_loop:
+ case ir_type_loop_jump:
+ case ir_type_return:
+ case ir_type_if:
+ return NULL;
+
+ case ir_type_function:
+ case ir_type_function_signature:
+ assert(!"Should not get here.");
+ return NULL;
+
+ case ir_type_assignment: {
+ ir_assignment *assign = ir->as_assignment();
+ ir_variable *assignee = assign->lhs->whole_variable_referenced();
+
+ if (assignee == var)
+ return (assign->condition != NULL) ? NULL : assign->rhs;
+
+ break;
+ }
+
+ default:
+ break;
+ }
+ }
+
+ return NULL;
+}
+
+
+static int
+calculate_iterations(ir_rvalue *from, ir_rvalue *to, ir_rvalue *increment,
+ enum ir_expression_operation op, bool continue_from_then,
+ bool swap_compare_operands, bool inc_before_terminator)
+{
+ if (from == NULL || to == NULL || increment == NULL)
+ return -1;
+
+ void *mem_ctx = ralloc_context(NULL);
+
+ ir_expression *const sub =
+ new(mem_ctx) ir_expression(ir_binop_sub, from->type, to, from);
+
+ ir_expression *const div =
+ new(mem_ctx) ir_expression(ir_binop_div, sub->type, sub, increment);
+
+ ir_constant *iter = div->constant_expression_value(mem_ctx);
+ if (iter == NULL) {
+ ralloc_free(mem_ctx);
+ return -1;
+ }
+
+ if (!iter->type->is_integer_32()) {
+ const ir_expression_operation op = iter->type->is_double()
+ ? ir_unop_d2i : ir_unop_f2i;
+ ir_rvalue *cast =
+ new(mem_ctx) ir_expression(op, glsl_type::int_type, iter, NULL);
+
+ iter = cast->constant_expression_value(mem_ctx);
+ }
+
+ int iter_value = iter->get_int_component(0);
+
+ /* Code after this block works under assumption that iterator will be
+ * incremented or decremented until it hits the limit,
+ * however the loop condition can be false on the first iteration.
+ * Handle such loops first.
+ */
+ {
+ ir_rvalue *first_value = from;
+ if (inc_before_terminator) {
+ first_value =
+ new(mem_ctx) ir_expression(ir_binop_add, from->type, from, increment);
+ }
+
+ ir_expression *cmp = swap_compare_operands
+ ? new(mem_ctx) ir_expression(op, glsl_type::bool_type, to, first_value)
+ : new(mem_ctx) ir_expression(op, glsl_type::bool_type, first_value, to);
+ if (continue_from_then)
+ cmp = new(mem_ctx) ir_expression(ir_unop_logic_not, cmp);
+
+ ir_constant *const cmp_result = cmp->constant_expression_value(mem_ctx);
+ assert(cmp_result != NULL);
+ if (cmp_result->get_bool_component(0)) {
+ ralloc_free(mem_ctx);
+ return 0;
+ }
+ }
+
+ /* Make sure that the calculated number of iterations satisfies the exit
+ * condition. This is needed to catch off-by-one errors and some types of
+ * ill-formed loops. For example, we need to detect that the following
+ * loop does not have a maximum iteration count.
+ *
+ * for (float x = 0.0; x != 0.9; x += 0.2)
+ * ;
+ */
+ const int bias[] = { -1, 0, 1 };
+ bool valid_loop = false;
+
+ for (unsigned i = 0; i < ARRAY_SIZE(bias); i++) {
+ /* Increment may be of type int, uint or float. */
+ switch (increment->type->base_type) {
+ case GLSL_TYPE_INT:
+ iter = new(mem_ctx) ir_constant(iter_value + bias[i]);
+ break;
+ case GLSL_TYPE_UINT:
+ iter = new(mem_ctx) ir_constant(unsigned(iter_value + bias[i]));
+ break;
+ case GLSL_TYPE_FLOAT:
+ iter = new(mem_ctx) ir_constant(float(iter_value + bias[i]));
+ break;
+ case GLSL_TYPE_DOUBLE:
+ iter = new(mem_ctx) ir_constant(double(iter_value + bias[i]));
+ break;
+ default:
+ unreachable("Unsupported type for loop iterator.");
+ }
+
+ ir_expression *const mul =
+ new(mem_ctx) ir_expression(ir_binop_mul, increment->type, iter,
+ increment);
+
+ ir_expression *const add =
+ new(mem_ctx) ir_expression(ir_binop_add, mul->type, mul, from);
+
+ ir_expression *cmp = swap_compare_operands
+ ? new(mem_ctx) ir_expression(op, glsl_type::bool_type, to, add)
+ : new(mem_ctx) ir_expression(op, glsl_type::bool_type, add, to);
+ if (continue_from_then)
+ cmp = new(mem_ctx) ir_expression(ir_unop_logic_not, cmp);
+
+ ir_constant *const cmp_result = cmp->constant_expression_value(mem_ctx);
+
+ assert(cmp_result != NULL);
+ if (cmp_result->get_bool_component(0)) {
+ iter_value += bias[i];
+ valid_loop = true;
+ break;
+ }
+ }
+
+ ralloc_free(mem_ctx);
+
+ if (inc_before_terminator) {
+ iter_value--;
+ }
+
+ return (valid_loop) ? iter_value : -1;
+}
+
+static bool
+incremented_before_terminator(ir_loop *loop, ir_variable *var,
+ ir_if *terminator)
+{
+ for (exec_node *node = loop->body_instructions.get_head();
+ !node->is_tail_sentinel();
+ node = node->get_next()) {
+ ir_instruction *ir = (ir_instruction *) node;
+
+ switch (ir->ir_type) {
+ case ir_type_if:
+ if (ir->as_if() == terminator)
+ return false;
+ break;
+
+ case ir_type_assignment: {
+ ir_assignment *assign = ir->as_assignment();
+ ir_variable *assignee = assign->lhs->whole_variable_referenced();
+
+ if (assignee == var) {
+ assert(assign->condition == NULL);
+ return true;
+ }
+
+ break;
+ }
+
+ default:
+ break;
+ }
+ }
+
+ unreachable("Unable to find induction variable");
+}
+
+/**
+ * Record the fact that the given loop variable was referenced inside the loop.
+ *
+ * \arg in_assignee is true if the reference was on the LHS of an assignment.
+ *
+ * \arg in_conditional_code_or_nested_loop is true if the reference occurred
+ * inside an if statement or a nested loop.
+ *
+ * \arg current_assignment is the ir_assignment node that the loop variable is
+ * on the LHS of, if any (ignored if \c in_assignee is false).
+ */
+void
+loop_variable::record_reference(bool in_assignee,
+ bool in_conditional_code_or_nested_loop,
+ ir_assignment *current_assignment)
+{
+ if (in_assignee) {
+ assert(current_assignment != NULL);
+
+ if (in_conditional_code_or_nested_loop ||
+ current_assignment->condition != NULL) {
+ this->conditional_or_nested_assignment = true;
+ }
+
+ if (this->first_assignment == NULL) {
+ assert(this->num_assignments == 0);
+
+ this->first_assignment = current_assignment;
+ }
+
+ this->num_assignments++;
+ } else if (this->first_assignment == current_assignment) {
+ /* This catches the case where the variable is used in the RHS of an
+ * assignment where it is also in the LHS.
+ */
+ this->read_before_write = true;
+ }
+}
+
+
+loop_state::loop_state()
+{
+ this->ht = _mesa_pointer_hash_table_create(NULL);
+ this->mem_ctx = ralloc_context(NULL);
+ this->loop_found = false;
+}
+
+
+loop_state::~loop_state()
+{
+ _mesa_hash_table_destroy(this->ht, NULL);
+ ralloc_free(this->mem_ctx);
+}
+
+
+loop_variable_state *
+loop_state::insert(ir_loop *ir)
+{
+ loop_variable_state *ls = new(this->mem_ctx) loop_variable_state;
+
+ _mesa_hash_table_insert(this->ht, ir, ls);
+ this->loop_found = true;
+
+ return ls;
+}
+
+
+loop_variable_state *
+loop_state::get(const ir_loop *ir)
+{
+ hash_entry *entry = _mesa_hash_table_search(this->ht, ir);
+ return entry ? (loop_variable_state *) entry->data : NULL;
+}
+
+
+loop_variable *
+loop_variable_state::get(const ir_variable *ir)
+{
+ if (ir == NULL)
+ return NULL;
+
+ hash_entry *entry = _mesa_hash_table_search(this->var_hash, ir);
+ return entry ? (loop_variable *) entry->data : NULL;
+}
+
+
+loop_variable *
+loop_variable_state::insert(ir_variable *var)
+{
+ void *mem_ctx = ralloc_parent(this);
+ loop_variable *lv = rzalloc(mem_ctx, loop_variable);
+
+ lv->var = var;
+
+ _mesa_hash_table_insert(this->var_hash, lv->var, lv);
+ this->variables.push_tail(lv);
+
+ return lv;
+}
+
+
+loop_terminator *
+loop_variable_state::insert(ir_if *if_stmt, bool continue_from_then)
+{
+ void *mem_ctx = ralloc_parent(this);
+ loop_terminator *t = new(mem_ctx) loop_terminator();
+
+ t->ir = if_stmt;
+ t->continue_from_then = continue_from_then;
+
+ this->terminators.push_tail(t);
+
+ return t;
+}
+
+
+/**
+ * If the given variable already is recorded in the state for this loop,
+ * return the corresponding loop_variable object that records information
+ * about it.
+ *
+ * Otherwise, create a new loop_variable object to record information about
+ * the variable, and set its \c read_before_write field appropriately based on
+ * \c in_assignee.
+ *
+ * \arg in_assignee is true if this variable was encountered on the LHS of an
+ * assignment.
+ */
+loop_variable *
+loop_variable_state::get_or_insert(ir_variable *var, bool in_assignee)
+{
+ loop_variable *lv = this->get(var);
+
+ if (lv == NULL) {
+ lv = this->insert(var);
+ lv->read_before_write = !in_assignee;
+ }
+
+ return lv;
+}
+
+
+namespace {
+
+class loop_analysis : public ir_hierarchical_visitor {
+public:
+ loop_analysis(loop_state *loops);
+
+ virtual ir_visitor_status visit(ir_loop_jump *);
+ virtual ir_visitor_status visit(ir_dereference_variable *);
+
+ virtual ir_visitor_status visit_enter(ir_call *);
+
+ virtual ir_visitor_status visit_enter(ir_loop *);
+ virtual ir_visitor_status visit_leave(ir_loop *);
+ virtual ir_visitor_status visit_enter(ir_assignment *);
+ virtual ir_visitor_status visit_leave(ir_assignment *);
+ virtual ir_visitor_status visit_enter(ir_if *);
+ virtual ir_visitor_status visit_leave(ir_if *);
+
+ loop_state *loops;
+
+ int if_statement_depth;
+
+ ir_assignment *current_assignment;
+
+ exec_list state;
+};
+
+} /* anonymous namespace */
+
+loop_analysis::loop_analysis(loop_state *loops)
+ : loops(loops), if_statement_depth(0), current_assignment(NULL)
+{
+ /* empty */
+}
+
+
+ir_visitor_status
+loop_analysis::visit(ir_loop_jump *ir)
+{
+ (void) ir;
+
+ assert(!this->state.is_empty());
+
+ loop_variable_state *const ls =
+ (loop_variable_state *) this->state.get_head();
+
+ ls->num_loop_jumps++;
+
+ return visit_continue;
+}
+
+
+ir_visitor_status
+loop_analysis::visit_enter(ir_call *)
+{
+ /* Mark every loop that we're currently analyzing as containing an ir_call
+ * (even those at outer nesting levels).
+ */
+ foreach_in_list(loop_variable_state, ls, &this->state) {
+ ls->contains_calls = true;
+ }
+
+ return visit_continue_with_parent;
+}
+
+
+ir_visitor_status
+loop_analysis::visit(ir_dereference_variable *ir)
+{
+ /* If we're not somewhere inside a loop, there's nothing to do.
+ */
+ if (this->state.is_empty())
+ return visit_continue;
+
+ bool nested = false;
+
+ foreach_in_list(loop_variable_state, ls, &this->state) {
+ ir_variable *var = ir->variable_referenced();
+ loop_variable *lv = ls->get_or_insert(var, this->in_assignee);
+
+ lv->record_reference(this->in_assignee,
+ nested || this->if_statement_depth > 0,
+ this->current_assignment);
+ nested = true;
+ }
+
+ return visit_continue;
+}
+
+ir_visitor_status
+loop_analysis::visit_enter(ir_loop *ir)
+{
+ loop_variable_state *ls = this->loops->insert(ir);
+ this->state.push_head(ls);
+
+ return visit_continue;
+}
+
+ir_visitor_status
+loop_analysis::visit_leave(ir_loop *ir)
+{
+ loop_variable_state *const ls =
+ (loop_variable_state *) this->state.pop_head();
+
+ /* Function calls may contain side effects. These could alter any of our
+ * variables in ways that cannot be known, and may even terminate shader
+ * execution (say, calling discard in the fragment shader). So we can't
+ * rely on any of our analysis about assignments to variables.
+ *
+ * We could perform some conservative analysis (prove there's no statically
+ * possible assignment, etc.) but it isn't worth it for now; function
+ * inlining will allow us to unroll loops anyway.
+ */
+ if (ls->contains_calls)
+ return visit_continue;
+
+ foreach_in_list(ir_instruction, node, &ir->body_instructions) {
+ /* Skip over declarations at the start of a loop.
+ */
+ if (node->as_variable())
+ continue;
+
+ ir_if *if_stmt = ((ir_instruction *) node)->as_if();
+
+ if (if_stmt != NULL)
+ try_add_loop_terminator(ls, if_stmt);
+ }
+
+
+ foreach_in_list_safe(loop_variable, lv, &ls->variables) {
+ /* Move variables that are already marked as being loop constant to
+ * a separate list. These trivially don't need to be tested.
+ */
+ if (lv->is_loop_constant()) {
+ lv->remove();
+ ls->constants.push_tail(lv);
+ }
+ }
+
+ /* Each variable assigned in the loop that isn't already marked as being loop
+ * constant might still be loop constant. The requirements at this point
+ * are:
+ *
+ * - Variable is written before it is read.
+ *
+ * - Only one assignment to the variable.
+ *
+ * - All operands on the RHS of the assignment are also loop constants.
+ *
+ * The last requirement is the reason for the progress loop. A variable
+ * marked as a loop constant on one pass may allow other variables to be
+ * marked as loop constant on following passes.
+ */
+ bool progress;
+ do {
+ progress = false;
+
+ foreach_in_list_safe(loop_variable, lv, &ls->variables) {
+ if (lv->conditional_or_nested_assignment || (lv->num_assignments > 1))
+ continue;
+
+ /* Process the RHS of the assignment. If all of the variables
+ * accessed there are loop constants, then add this
+ */
+ ir_rvalue *const rhs = lv->first_assignment->rhs;
+ if (all_expression_operands_are_loop_constant(rhs, ls->var_hash)) {
+ lv->rhs_clean = true;
+
+ if (lv->is_loop_constant()) {
+ progress = true;
+
+ lv->remove();
+ ls->constants.push_tail(lv);
+ }
+ }
+ }
+ } while (progress);
+
+ /* The remaining variables that are not loop invariant might be loop
+ * induction variables.
+ */
+ foreach_in_list_safe(loop_variable, lv, &ls->variables) {
+ /* If there is more than one assignment to a variable, it cannot be a
+ * loop induction variable. This isn't strictly true, but this is a
+ * very simple induction variable detector, and it can't handle more
+ * complex cases.
+ */
+ if (lv->num_assignments > 1)
+ continue;
+
+ /* All of the variables with zero assignments in the loop are loop
+ * invariant, and they should have already been filtered out.
+ */
+ assert(lv->num_assignments == 1);
+ assert(lv->first_assignment != NULL);
+
+ /* The assignment to the variable in the loop must be unconditional and
+ * not inside a nested loop.
+ */
+ if (lv->conditional_or_nested_assignment)
+ continue;
+
+ /* Basic loop induction variables have a single assignment in the loop
+ * that has the form 'VAR = VAR + i' or 'VAR = VAR - i' where i is a
+ * loop invariant.
+ */
+ ir_rvalue *const inc =
+ get_basic_induction_increment(lv->first_assignment, ls->var_hash);
+ if (inc != NULL) {
+ lv->increment = inc;
+
+ lv->remove();
+ ls->induction_variables.push_tail(lv);
+ }
+ }
+
+ /* Search the loop terminating conditions for those of the form 'i < c'
+ * where i is a loop induction variable, c is a constant, and < is any
+ * relative operator. From each of these we can infer an iteration count.
+ * Also figure out which terminator (if any) produces the smallest
+ * iteration count--this is the limiting terminator.
+ */
+ foreach_in_list(loop_terminator, t, &ls->terminators) {
+ ir_if *if_stmt = t->ir;
+
+ /* If-statements can be either 'if (expr)' or 'if (deref)'. We only care
+ * about the former here.
+ */
+ ir_expression *cond = if_stmt->condition->as_expression();
+ if (cond == NULL)
+ continue;
+
+ switch (cond->operation) {
+ case ir_binop_less:
+ case ir_binop_gequal: {
+ /* The expressions that we care about will either be of the form
+ * 'counter < limit' or 'limit < counter'. Figure out which is
+ * which.
+ */
+ ir_rvalue *counter = cond->operands[0]->as_dereference_variable();
+ ir_constant *limit = cond->operands[1]->as_constant();
+ enum ir_expression_operation cmp = cond->operation;
+ bool swap_compare_operands = false;
+
+ if (limit == NULL) {
+ counter = cond->operands[1]->as_dereference_variable();
+ limit = cond->operands[0]->as_constant();
+ swap_compare_operands = true;
+ }
+
+ if ((counter == NULL) || (limit == NULL))
+ break;
+
+ ir_variable *var = counter->variable_referenced();
+
+ ir_rvalue *init = find_initial_value(ir, var);
+
+ loop_variable *lv = ls->get(var);
+ if (lv != NULL && lv->is_induction_var()) {
+ bool inc_before_terminator =
+ incremented_before_terminator(ir, var, t->ir);
+
+ t->iterations = calculate_iterations(init, limit, lv->increment,
+ cmp, t->continue_from_then,
+ swap_compare_operands,
+ inc_before_terminator);
+
+ if (t->iterations >= 0 &&
+ (ls->limiting_terminator == NULL ||
+ t->iterations < ls->limiting_terminator->iterations)) {
+ ls->limiting_terminator = t;
+ }
+ }
+ break;
+ }
+
+ default:
+ break;
+ }
+ }
+
+ return visit_continue;
+}
+
+ir_visitor_status
+loop_analysis::visit_enter(ir_if *ir)
+{
+ (void) ir;
+
+ if (!this->state.is_empty())
+ this->if_statement_depth++;
+
+ return visit_continue;
+}
+
+ir_visitor_status
+loop_analysis::visit_leave(ir_if *ir)
+{
+ (void) ir;
+
+ if (!this->state.is_empty())
+ this->if_statement_depth--;
+
+ return visit_continue;
+}
+
+ir_visitor_status
+loop_analysis::visit_enter(ir_assignment *ir)
+{
+ /* If we're not somewhere inside a loop, there's nothing to do.
+ */
+ if (this->state.is_empty())
+ return visit_continue_with_parent;
+
+ this->current_assignment = ir;
+
+ return visit_continue;
+}
+
+ir_visitor_status
+loop_analysis::visit_leave(ir_assignment *ir)
+{
+ /* Since the visit_enter exits with visit_continue_with_parent for this
+ * case, the loop state stack should never be empty here.
+ */
+ assert(!this->state.is_empty());
+
+ assert(this->current_assignment == ir);
+ this->current_assignment = NULL;
+
+ return visit_continue;
+}
+
+
+class examine_rhs : public ir_hierarchical_visitor {
+public:
+ examine_rhs(hash_table *loop_variables)
+ {
+ this->only_uses_loop_constants = true;
+ this->loop_variables = loop_variables;
+ }
+
+ virtual ir_visitor_status visit(ir_dereference_variable *ir)
+ {
+ hash_entry *entry = _mesa_hash_table_search(this->loop_variables,
+ ir->var);
+ loop_variable *lv = entry ? (loop_variable *) entry->data : NULL;
+
+ assert(lv != NULL);
+
+ if (lv->is_loop_constant()) {
+ return visit_continue;
+ } else {
+ this->only_uses_loop_constants = false;
+ return visit_stop;
+ }
+ }
+
+ hash_table *loop_variables;
+ bool only_uses_loop_constants;
+};
+
+
+bool
+all_expression_operands_are_loop_constant(ir_rvalue *ir, hash_table *variables)
+{
+ examine_rhs v(variables);
+
+ ir->accept(&v);
+
+ return v.only_uses_loop_constants;
+}
+
+
+ir_rvalue *
+get_basic_induction_increment(ir_assignment *ir, hash_table *var_hash)
+{
+ /* The RHS must be a binary expression.
+ */
+ ir_expression *const rhs = ir->rhs->as_expression();
+ if ((rhs == NULL)
+ || ((rhs->operation != ir_binop_add)
+ && (rhs->operation != ir_binop_sub)))
+ return NULL;
+
+ /* One of the of operands of the expression must be the variable assigned.
+ * If the operation is subtraction, the variable in question must be the
+ * "left" operand.
+ */
+ ir_variable *const var = ir->lhs->variable_referenced();
+
+ ir_variable *const op0 = rhs->operands[0]->variable_referenced();
+ ir_variable *const op1 = rhs->operands[1]->variable_referenced();
+
+ if (((op0 != var) && (op1 != var))
+ || ((op1 == var) && (rhs->operation == ir_binop_sub)))
+ return NULL;
+
+ ir_rvalue *inc = (op0 == var) ? rhs->operands[1] : rhs->operands[0];
+
+ if (inc->as_constant() == NULL) {
+ ir_variable *const inc_var = inc->variable_referenced();
+ if (inc_var != NULL) {
+ hash_entry *entry = _mesa_hash_table_search(var_hash, inc_var);
+ loop_variable *lv = entry ? (loop_variable *) entry->data : NULL;
+
+ if (lv == NULL || !lv->is_loop_constant()) {
+ assert(lv != NULL);
+ inc = NULL;
+ }
+ } else
+ inc = NULL;
+ }
+
+ if ((inc != NULL) && (rhs->operation == ir_binop_sub)) {
+ void *mem_ctx = ralloc_parent(ir);
+
+ inc = new(mem_ctx) ir_expression(ir_unop_neg,
+ inc->type,
+ inc->clone(mem_ctx, NULL),
+ NULL);
+ }
+
+ return inc;
+}
+
+
+/**
+ * Detect whether an if-statement is a loop terminating condition, if so
+ * add it to the list of loop terminators.
+ *
+ * Detects if-statements of the form
+ *
+ * (if (expression bool ...) (...then_instrs...break))
+ *
+ * or
+ *
+ * (if (expression bool ...) ... (...else_instrs...break))
+ */
+void
+try_add_loop_terminator(loop_variable_state *ls, ir_if *ir)
+{
+ ir_instruction *inst = (ir_instruction *) ir->then_instructions.get_tail();
+ ir_instruction *else_inst =
+ (ir_instruction *) ir->else_instructions.get_tail();
+
+ if (is_break(inst) || is_break(else_inst))
+ ls->insert(ir, is_break(else_inst));
+}
+
+
+loop_state *
+analyze_loop_variables(exec_list *instructions)
+{
+ loop_state *loops = new loop_state;
+ loop_analysis v(loops);
+
+ v.run(instructions);
+ return v.loops;
+}
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/loop_analysis.h b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/loop_analysis.h
new file mode 100644
index 0000000000..d6fdb908e0
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/loop_analysis.h
@@ -0,0 +1,244 @@
+/* -*- c++ -*- */
+/*
+ * Copyright © 2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef LOOP_ANALYSIS_H
+#define LOOP_ANALYSIS_H
+
+#include "ir.h"
+#include "util/hash_table.h"
+
+/**
+ * Analyze and classify all variables used in all loops in the instruction list
+ */
+extern class loop_state *
+analyze_loop_variables(exec_list *instructions);
+
+static inline bool
+is_break(ir_instruction *ir)
+{
+ return ir != NULL && ir->ir_type == ir_type_loop_jump &&
+ ((ir_loop_jump *) ir)->is_break();
+}
+
+
+extern bool
+unroll_loops(exec_list *instructions, loop_state *ls,
+ const struct gl_shader_compiler_options *options);
+
+
+/**
+ * Tracking for all variables used in a loop
+ */
+class loop_variable_state : public exec_node {
+public:
+ class loop_variable *get(const ir_variable *);
+ class loop_variable *insert(ir_variable *);
+ class loop_variable *get_or_insert(ir_variable *, bool in_assignee);
+ class loop_terminator *insert(ir_if *, bool continue_from_then);
+
+
+ /**
+ * Variables that have not yet been classified
+ */
+ exec_list variables;
+
+ /**
+ * Variables whose values are constant within the body of the loop
+ *
+ * This list contains \c loop_variable objects.
+ */
+ exec_list constants;
+
+ /**
+ * Induction variables for this loop
+ *
+ * This list contains \c loop_variable objects.
+ */
+ exec_list induction_variables;
+
+ /**
+ * Simple if-statements that lead to the termination of the loop
+ *
+ * This list contains \c loop_terminator objects.
+ *
+ * \sa is_loop_terminator
+ */
+ exec_list terminators;
+
+ /**
+ * If any of the terminators in \c terminators leads to termination of the
+ * loop after a constant number of iterations, this is the terminator that
+ * leads to termination after the smallest number of iterations. Otherwise
+ * NULL.
+ */
+ loop_terminator *limiting_terminator;
+
+ /**
+ * Hash table containing all variables accessed in this loop
+ */
+ hash_table *var_hash;
+
+ /**
+ * Number of ir_loop_jump instructions that operate on this loop
+ */
+ unsigned num_loop_jumps;
+
+ /**
+ * Whether this loop contains any function calls.
+ */
+ bool contains_calls;
+
+ loop_variable_state()
+ {
+ this->num_loop_jumps = 0;
+ this->contains_calls = false;
+ this->var_hash = _mesa_pointer_hash_table_create(NULL);
+ this->limiting_terminator = NULL;
+ }
+
+ ~loop_variable_state()
+ {
+ _mesa_hash_table_destroy(this->var_hash, NULL);
+ }
+
+ DECLARE_RALLOC_CXX_OPERATORS(loop_variable_state)
+};
+
+
+class loop_variable : public exec_node {
+public:
+ /** The variable in question. */
+ ir_variable *var;
+
+ /** Is the variable read in the loop before it is written? */
+ bool read_before_write;
+
+ /** Are all variables in the RHS of the assignment loop constants? */
+ bool rhs_clean;
+
+ /**
+ * Is there an assignment to the variable that is conditional, or inside a
+ * nested loop?
+ */
+ bool conditional_or_nested_assignment;
+
+ /** Reference to the first assignment to the variable in the loop body. */
+ ir_assignment *first_assignment;
+
+ /** Number of assignments to the variable in the loop body. */
+ unsigned num_assignments;
+
+ /**
+ * Increment value for a loop induction variable
+ *
+ * If this is a loop induction variable, the amount by which the variable
+ * is incremented on each iteration through the loop.
+ *
+ * If this is not a loop induction variable, NULL.
+ */
+ ir_rvalue *increment;
+
+
+ inline bool is_induction_var() const
+ {
+ /* Induction variables always have a non-null increment, and vice
+ * versa.
+ */
+ return this->increment != NULL;
+ }
+
+
+ inline bool is_loop_constant() const
+ {
+ const bool is_const = (this->num_assignments == 0)
+ || (((this->num_assignments == 1)
+ && !this->conditional_or_nested_assignment
+ && !this->read_before_write
+ && this->rhs_clean) || this->var->data.read_only);
+
+ /* If the RHS of *the* assignment is clean, then there must be exactly
+ * one assignment of the variable.
+ */
+ assert((this->rhs_clean && (this->num_assignments == 1))
+ || !this->rhs_clean);
+
+ return is_const;
+ }
+
+ void record_reference(bool in_assignee,
+ bool in_conditional_code_or_nested_loop,
+ ir_assignment *current_assignment);
+};
+
+
+class loop_terminator : public exec_node {
+public:
+ loop_terminator()
+ : ir(NULL), iterations(-1)
+ {
+ }
+
+ /**
+ * Statement which terminates the loop.
+ */
+ ir_if *ir;
+
+ /**
+ * The number of iterations after which the terminator is known to
+ * terminate the loop (if that is a fixed value). Otherwise -1.
+ */
+ int iterations;
+
+ /* Does the if continue from the then branch or the else branch */
+ bool continue_from_then;
+};
+
+
+class loop_state {
+public:
+ ~loop_state();
+
+ /**
+ * Get the loop variable state data for a particular loop
+ */
+ loop_variable_state *get(const ir_loop *);
+
+ loop_variable_state *insert(ir_loop *ir);
+
+ bool loop_found;
+
+private:
+ loop_state();
+
+ /**
+ * Hash table containing all loops that have been analyzed.
+ */
+ hash_table *ht;
+
+ void *mem_ctx;
+
+ friend loop_state *analyze_loop_variables(exec_list *instructions);
+};
+
+#endif /* LOOP_ANALYSIS_H */
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/loop_unroll.cpp b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/loop_unroll.cpp
new file mode 100644
index 0000000000..1a9229acc3
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/loop_unroll.cpp
@@ -0,0 +1,591 @@
+/*
+ * Copyright © 2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include "compiler/glsl_types.h"
+#include "loop_analysis.h"
+#include "ir_hierarchical_visitor.h"
+
+#include "main/mtypes.h"
+
+namespace {
+
+class loop_unroll_visitor : public ir_hierarchical_visitor {
+public:
+ loop_unroll_visitor(loop_state *state,
+ const struct gl_shader_compiler_options *options)
+ {
+ this->state = state;
+ this->progress = false;
+ this->options = options;
+ }
+
+ virtual ir_visitor_status visit_leave(ir_loop *ir);
+ void simple_unroll(ir_loop *ir, int iterations);
+ void complex_unroll(ir_loop *ir, int iterations,
+ bool continue_from_then_branch,
+ bool limiting_term_first,
+ bool lt_continue_from_then_branch);
+ void splice_post_if_instructions(ir_if *ir_if, exec_list *splice_dest);
+
+ loop_state *state;
+
+ bool progress;
+ const struct gl_shader_compiler_options *options;
+};
+
+} /* anonymous namespace */
+
+class loop_unroll_count : public ir_hierarchical_visitor {
+public:
+ int nodes;
+ bool unsupported_variable_indexing;
+ bool array_indexed_by_induction_var_with_exact_iterations;
+ /* If there are nested loops, the node count will be inaccurate. */
+ bool nested_loop;
+
+ loop_unroll_count(exec_list *list, loop_variable_state *ls,
+ const struct gl_shader_compiler_options *options)
+ : ls(ls), options(options)
+ {
+ nodes = 0;
+ nested_loop = false;
+ unsupported_variable_indexing = false;
+ array_indexed_by_induction_var_with_exact_iterations = false;
+
+ run(list);
+ }
+
+ virtual ir_visitor_status visit_enter(ir_assignment *)
+ {
+ nodes++;
+ return visit_continue;
+ }
+
+ virtual ir_visitor_status visit_enter(ir_expression *)
+ {
+ nodes++;
+ return visit_continue;
+ }
+
+ virtual ir_visitor_status visit_enter(ir_loop *)
+ {
+ nested_loop = true;
+ return visit_continue;
+ }
+
+ virtual ir_visitor_status visit_enter(ir_dereference_array *ir)
+ {
+ /* Force unroll in case of dynamic indexing with sampler arrays
+ * when EmitNoIndirectSampler is set.
+ */
+ if (options->EmitNoIndirectSampler) {
+ if ((ir->array->type->is_array() &&
+ ir->array->type->contains_sampler()) &&
+ !ir->array_index->constant_expression_value(ralloc_parent(ir))) {
+ unsupported_variable_indexing = true;
+ return visit_continue;
+ }
+ }
+
+ /* Check for arrays variably-indexed by a loop induction variable.
+ * Unrolling the loop may convert that access into constant-indexing.
+ *
+ * Many drivers don't support particular kinds of variable indexing,
+ * and have to resort to using lower_variable_index_to_cond_assign to
+ * handle it. This results in huge amounts of horrible code, so we'd
+ * like to avoid that if possible. Here, we just note that it will
+ * happen.
+ */
+ if ((ir->array->type->is_array() || ir->array->type->is_matrix()) &&
+ !ir->array_index->as_constant()) {
+ ir_variable *array = ir->array->variable_referenced();
+ loop_variable *lv = ls->get(ir->array_index->variable_referenced());
+ if (array && lv && lv->is_induction_var()) {
+ /* If an array is indexed by a loop induction variable, and the
+ * array size is exactly the number of loop iterations, this is
+ * probably a simple for-loop trying to access each element in
+ * turn; the application may expect it to be unrolled.
+ */
+ if (int(array->type->length) == ls->limiting_terminator->iterations)
+ array_indexed_by_induction_var_with_exact_iterations = true;
+
+ switch (array->data.mode) {
+ case ir_var_auto:
+ case ir_var_temporary:
+ case ir_var_const_in:
+ case ir_var_function_in:
+ case ir_var_function_out:
+ case ir_var_function_inout:
+ if (options->EmitNoIndirectTemp)
+ unsupported_variable_indexing = true;
+ break;
+ case ir_var_uniform:
+ case ir_var_shader_storage:
+ if (options->EmitNoIndirectUniform)
+ unsupported_variable_indexing = true;
+ break;
+ case ir_var_shader_in:
+ if (options->EmitNoIndirectInput)
+ unsupported_variable_indexing = true;
+ break;
+ case ir_var_shader_out:
+ if (options->EmitNoIndirectOutput)
+ unsupported_variable_indexing = true;
+ break;
+ }
+ }
+ }
+ return visit_continue;
+ }
+
+private:
+ loop_variable_state *ls;
+ const struct gl_shader_compiler_options *options;
+};
+
+
+/**
+ * Unroll a loop which does not contain any jumps. For example, if the input
+ * is:
+ *
+ * (loop (...) ...instrs...)
+ *
+ * And the iteration count is 3, the output will be:
+ *
+ * ...instrs... ...instrs... ...instrs...
+ */
+void
+loop_unroll_visitor::simple_unroll(ir_loop *ir, int iterations)
+{
+ void *const mem_ctx = ralloc_parent(ir);
+ loop_variable_state *const ls = this->state->get(ir);
+
+ /* If there are no terminators, then the loop iteration count must be 1.
+ * This is the 'do { } while (false);' case.
+ */
+ assert(!ls->terminators.is_empty() || iterations == 1);
+
+ ir_instruction *first_ir =
+ (ir_instruction *) ir->body_instructions.get_head();
+
+ if (!first_ir) {
+ /* The loop is empty remove it and return */
+ ir->remove();
+ return;
+ }
+
+ ir_if *limit_if = NULL;
+ bool exit_branch_has_instructions = false;
+ if (ls->limiting_terminator) {
+ limit_if = ls->limiting_terminator->ir;
+ ir_instruction *ir_if_last = (ir_instruction *)
+ limit_if->then_instructions.get_tail();
+
+ if (is_break(ir_if_last)) {
+ if (ir_if_last != limit_if->then_instructions.get_head())
+ exit_branch_has_instructions = true;
+
+ splice_post_if_instructions(limit_if, &limit_if->else_instructions);
+ ir_if_last->remove();
+ } else {
+ ir_if_last = (ir_instruction *)
+ limit_if->else_instructions.get_tail();
+ assert(is_break(ir_if_last));
+
+ if (ir_if_last != limit_if->else_instructions.get_head())
+ exit_branch_has_instructions = true;
+
+ splice_post_if_instructions(limit_if, &limit_if->then_instructions);
+ ir_if_last->remove();
+ }
+ }
+
+ /* Because 'iterations' is the number of times we pass over the *entire*
+ * loop body before hitting the first break, we need to bump the number of
+ * iterations if the limiting terminator is not the first instruction in
+ * the loop, or it the exit branch contains instructions. This ensures we
+ * execute any instructions before the terminator or in its exit branch.
+ */
+ if (!ls->terminators.is_empty() &&
+ (limit_if != first_ir->as_if() || exit_branch_has_instructions))
+ iterations++;
+
+ for (int i = 0; i < iterations; i++) {
+ exec_list copy_list;
+
+ copy_list.make_empty();
+ clone_ir_list(mem_ctx, &copy_list, &ir->body_instructions);
+
+ ir->insert_before(&copy_list);
+ }
+
+ /* The loop has been replaced by the unrolled copies. Remove the original
+ * loop from the IR sequence.
+ */
+ ir->remove();
+
+ this->progress = true;
+}
+
+
+/**
+ * Unroll a loop whose last statement is an ir_if. If \c
+ * continue_from_then_branch is true, the loop is repeated only when the
+ * "then" branch of the if is taken; otherwise it is repeated only when the
+ * "else" branch of the if is taken.
+ *
+ * For example, if the input is:
+ *
+ * (loop (...)
+ * ...body...
+ * (if (cond)
+ * (...then_instrs...)
+ * (...else_instrs...)))
+ *
+ * And the iteration count is 3, and \c continue_from_then_branch is true,
+ * then the output will be:
+ *
+ * ...body...
+ * (if (cond)
+ * (...then_instrs...
+ * ...body...
+ * (if (cond)
+ * (...then_instrs...
+ * ...body...
+ * (if (cond)
+ * (...then_instrs...)
+ * (...else_instrs...)))
+ * (...else_instrs...)))
+ * (...else_instrs))
+ */
+void
+loop_unroll_visitor::complex_unroll(ir_loop *ir, int iterations,
+ bool second_term_then_continue,
+ bool extra_iteration_required,
+ bool first_term_then_continue)
+{
+ void *const mem_ctx = ralloc_parent(ir);
+ ir_instruction *ir_to_replace = ir;
+
+ /* Because 'iterations' is the number of times we pass over the *entire*
+ * loop body before hitting the first break, we need to bump the number of
+ * iterations if the limiting terminator is not the first instruction in
+ * the loop, or it the exit branch contains instructions. This ensures we
+ * execute any instructions before the terminator or in its exit branch.
+ */
+ if (extra_iteration_required)
+ iterations++;
+
+ for (int i = 0; i < iterations; i++) {
+ exec_list copy_list;
+
+ copy_list.make_empty();
+ clone_ir_list(mem_ctx, &copy_list, &ir->body_instructions);
+
+ ir_if *ir_if = ((ir_instruction *) copy_list.get_tail())->as_if();
+ assert(ir_if != NULL);
+
+ exec_list *const first_list = first_term_then_continue
+ ? &ir_if->then_instructions : &ir_if->else_instructions;
+ ir_if = ((ir_instruction *) first_list->get_tail())->as_if();
+
+ ir_to_replace->insert_before(&copy_list);
+ ir_to_replace->remove();
+
+ /* placeholder that will be removed in the next iteration */
+ ir_to_replace =
+ new(mem_ctx) ir_loop_jump(ir_loop_jump::jump_continue);
+
+ exec_list *const second_term_continue_list = second_term_then_continue
+ ? &ir_if->then_instructions : &ir_if->else_instructions;
+
+ second_term_continue_list->push_tail(ir_to_replace);
+ }
+
+ ir_to_replace->remove();
+
+ this->progress = true;
+}
+
+
+/**
+ * Move all of the instructions which follow \c ir_if to the end of
+ * \c splice_dest.
+ *
+ * For example, in the code snippet:
+ *
+ * (if (cond)
+ * (...then_instructions...
+ * break)
+ * (...else_instructions...))
+ * ...post_if_instructions...
+ *
+ * If \c ir_if points to the "if" instruction, and \c splice_dest points to
+ * (...else_instructions...), the code snippet is transformed into:
+ *
+ * (if (cond)
+ * (...then_instructions...
+ * break)
+ * (...else_instructions...
+ * ...post_if_instructions...))
+ */
+void
+loop_unroll_visitor::splice_post_if_instructions(ir_if *ir_if,
+ exec_list *splice_dest)
+{
+ while (!ir_if->get_next()->is_tail_sentinel()) {
+ ir_instruction *move_ir = (ir_instruction *) ir_if->get_next();
+
+ move_ir->remove();
+ splice_dest->push_tail(move_ir);
+ }
+}
+
+static bool
+exit_branch_has_instructions(ir_if *term_if, bool lt_then_continue)
+{
+ if (lt_then_continue) {
+ if (term_if->else_instructions.get_head() ==
+ term_if->else_instructions.get_tail())
+ return false;
+ } else {
+ if (term_if->then_instructions.get_head() ==
+ term_if->then_instructions.get_tail())
+ return false;
+ }
+
+ return true;
+}
+
+ir_visitor_status
+loop_unroll_visitor::visit_leave(ir_loop *ir)
+{
+ loop_variable_state *const ls = this->state->get(ir);
+
+ /* If we've entered a loop that hasn't been analyzed, something really,
+ * really bad has happened.
+ */
+ if (ls == NULL) {
+ assert(ls != NULL);
+ return visit_continue;
+ }
+
+ /* Limiting terminator may have iteration count of zero,
+ * this is a valid case because the loop may break during
+ * the first iteration.
+ */
+
+ /* Remove the conditional break statements associated with all terminators
+ * that are associated with a fixed iteration count, except for the one
+ * associated with the limiting terminator--that one needs to stay, since
+ * it terminates the loop. Exception: if the loop still has a normative
+ * bound, then that terminates the loop, so we don't even need the limiting
+ * terminator.
+ */
+ foreach_in_list_safe(loop_terminator, t, &ls->terminators) {
+ if (t->iterations < 0)
+ continue;
+
+ exec_list *branch_instructions;
+ if (t != ls->limiting_terminator) {
+ ir_instruction *ir_if_last = (ir_instruction *)
+ t->ir->then_instructions.get_tail();
+ if (is_break(ir_if_last)) {
+ branch_instructions = &t->ir->else_instructions;
+ } else {
+ branch_instructions = &t->ir->then_instructions;
+ assert(is_break((ir_instruction *)
+ t->ir->else_instructions.get_tail()));
+ }
+
+ exec_list copy_list;
+ copy_list.make_empty();
+ clone_ir_list(ir, &copy_list, branch_instructions);
+
+ t->ir->insert_before(&copy_list);
+ t->ir->remove();
+
+ assert(ls->num_loop_jumps > 0);
+ ls->num_loop_jumps--;
+
+ /* Also remove it from the terminator list */
+ t->remove();
+
+ this->progress = true;
+ }
+ }
+
+ if (ls->limiting_terminator == NULL) {
+ ir_instruction *last_ir =
+ (ir_instruction *) ir->body_instructions.get_tail();
+
+ /* If a loop has no induction variable and the last instruction is
+ * a break, unroll the loop with a count of 1. This is the classic
+ *
+ * do {
+ * // ...
+ * } while (false)
+ *
+ * that is used to wrap multi-line macros.
+ *
+ * If num_loop_jumps is not zero, last_ir cannot be NULL... there has to
+ * be at least num_loop_jumps instructions in the loop.
+ */
+ if (ls->num_loop_jumps == 1 && is_break(last_ir)) {
+ last_ir->remove();
+
+ simple_unroll(ir, 1);
+ }
+
+ /* Don't try to unroll loops where the number of iterations is not known
+ * at compile-time.
+ */
+ return visit_continue;
+ }
+
+ int iterations = ls->limiting_terminator->iterations;
+
+ const int max_iterations = options->MaxUnrollIterations;
+
+ /* Don't try to unroll loops that have zillions of iterations either.
+ */
+ if (iterations > max_iterations)
+ return visit_continue;
+
+ /* Don't try to unroll nested loops and loops with a huge body.
+ */
+ loop_unroll_count count(&ir->body_instructions, ls, options);
+
+ bool loop_too_large =
+ count.nested_loop || count.nodes * iterations > max_iterations * 5;
+
+ if (loop_too_large && !count.unsupported_variable_indexing &&
+ !count.array_indexed_by_induction_var_with_exact_iterations)
+ return visit_continue;
+
+ /* Note: the limiting terminator contributes 1 to ls->num_loop_jumps.
+ * We'll be removing the limiting terminator before we unroll.
+ */
+ assert(ls->num_loop_jumps > 0);
+ unsigned predicted_num_loop_jumps = ls->num_loop_jumps - 1;
+
+ if (predicted_num_loop_jumps > 1)
+ return visit_continue;
+
+ if (predicted_num_loop_jumps == 0) {
+ simple_unroll(ir, iterations);
+ return visit_continue;
+ }
+
+ ir_instruction *last_ir = (ir_instruction *) ir->body_instructions.get_tail();
+ assert(last_ir != NULL);
+
+ if (is_break(last_ir)) {
+ /* If the only loop-jump is a break at the end of the loop, the loop
+ * will execute exactly once. Remove the break and use the simple
+ * unroller with an iteration count of 1.
+ */
+ last_ir->remove();
+
+ simple_unroll(ir, 1);
+ return visit_continue;
+ }
+
+ /* Complex unrolling can only handle two terminators. One with an unknown
+ * iteration count and one with a known iteration count. We have already
+ * made sure we have a known iteration count above and removed any
+ * unreachable terminators with a known count. Here we make sure there
+ * isn't any additional unknown terminators, or any other jumps nested
+ * inside futher ifs.
+ */
+ if (ls->num_loop_jumps != 2 || ls->terminators.length() != 2)
+ return visit_continue;
+
+ ir_instruction *first_ir =
+ (ir_instruction *) ir->body_instructions.get_head();
+
+ unsigned term_count = 0;
+ bool first_term_then_continue = false;
+ foreach_in_list(loop_terminator, t, &ls->terminators) {
+ ir_if *ir_if = t->ir->as_if();
+ assert(ir_if != NULL);
+
+ ir_instruction *ir_if_last =
+ (ir_instruction *) ir_if->then_instructions.get_tail();
+
+ if (is_break(ir_if_last)) {
+ splice_post_if_instructions(ir_if, &ir_if->else_instructions);
+ ir_if_last->remove();
+ if (term_count == 1) {
+ bool ebi =
+ exit_branch_has_instructions(ls->limiting_terminator->ir,
+ first_term_then_continue);
+ complex_unroll(ir, iterations, false,
+ first_ir->as_if() != ls->limiting_terminator->ir ||
+ ebi,
+ first_term_then_continue);
+ return visit_continue;
+ }
+ } else {
+ ir_if_last =
+ (ir_instruction *) ir_if->else_instructions.get_tail();
+
+ assert(is_break(ir_if_last));
+ if (is_break(ir_if_last)) {
+ splice_post_if_instructions(ir_if, &ir_if->then_instructions);
+ ir_if_last->remove();
+ if (term_count == 1) {
+ bool ebi =
+ exit_branch_has_instructions(ls->limiting_terminator->ir,
+ first_term_then_continue);
+ complex_unroll(ir, iterations, true,
+ first_ir->as_if() != ls->limiting_terminator->ir ||
+ ebi,
+ first_term_then_continue);
+ return visit_continue;
+ } else {
+ first_term_then_continue = true;
+ }
+ }
+ }
+
+ term_count++;
+ }
+
+ /* Did not find the break statement. It must be in a complex if-nesting,
+ * so don't try to unroll.
+ */
+ return visit_continue;
+}
+
+
+bool
+unroll_loops(exec_list *instructions, loop_state *ls,
+ const struct gl_shader_compiler_options *options)
+{
+ loop_unroll_visitor v(ls, options);
+
+ v.run(instructions);
+
+ return v.progress;
+}
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/lower_blend_equation_advanced.cpp b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/lower_blend_equation_advanced.cpp
new file mode 100644
index 0000000000..c85b39bcaa
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/lower_blend_equation_advanced.cpp
@@ -0,0 +1,572 @@
+/*
+ * Copyright © 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include "ir.h"
+#include "ir_builder.h"
+#include "ir_optimization.h"
+#include "ir_hierarchical_visitor.h"
+#include "program/prog_instruction.h"
+#include "program/prog_statevars.h"
+#include "util/bitscan.h"
+#include "builtin_functions.h"
+#include "main/mtypes.h"
+
+using namespace ir_builder;
+
+#define imm1(x) new(mem_ctx) ir_constant((float) (x), 1)
+#define imm3(x) new(mem_ctx) ir_constant((float) (x), 3)
+
+static ir_rvalue *
+blend_multiply(ir_variable *src, ir_variable *dst)
+{
+ /* f(Cs,Cd) = Cs*Cd */
+ return mul(src, dst);
+}
+
+static ir_rvalue *
+blend_screen(ir_variable *src, ir_variable *dst)
+{
+ /* f(Cs,Cd) = Cs+Cd-Cs*Cd */
+ return sub(add(src, dst), mul(src, dst));
+}
+
+static ir_rvalue *
+blend_overlay(ir_variable *src, ir_variable *dst)
+{
+ void *mem_ctx = ralloc_parent(src);
+
+ /* f(Cs,Cd) = 2*Cs*Cd, if Cd <= 0.5
+ * 1-2*(1-Cs)*(1-Cd), otherwise
+ */
+ ir_rvalue *rule_1 = mul(imm3(2), mul(src, dst));
+ ir_rvalue *rule_2 =
+ sub(imm3(1), mul(imm3(2), mul(sub(imm3(1), src), sub(imm3(1), dst))));
+ return csel(lequal(dst, imm3(0.5f)), rule_1, rule_2);
+}
+
+static ir_rvalue *
+blend_darken(ir_variable *src, ir_variable *dst)
+{
+ /* f(Cs,Cd) = min(Cs,Cd) */
+ return min2(src, dst);
+}
+
+static ir_rvalue *
+blend_lighten(ir_variable *src, ir_variable *dst)
+{
+ /* f(Cs,Cd) = max(Cs,Cd) */
+ return max2(src, dst);
+}
+
+static ir_rvalue *
+blend_colordodge(ir_variable *src, ir_variable *dst)
+{
+ void *mem_ctx = ralloc_parent(src);
+
+ /* f(Cs,Cd) =
+ * 0, if Cd <= 0
+ * min(1,Cd/(1-Cs)), if Cd > 0 and Cs < 1
+ * 1, if Cd > 0 and Cs >= 1
+ */
+ return csel(lequal(dst, imm3(0)), imm3(0),
+ csel(gequal(src, imm3(1)), imm3(1),
+ min2(imm3(1), div(dst, sub(imm3(1), src)))));
+}
+
+static ir_rvalue *
+blend_colorburn(ir_variable *src, ir_variable *dst)
+{
+ void *mem_ctx = ralloc_parent(src);
+
+ /* f(Cs,Cd) =
+ * 1, if Cd >= 1
+ * 1 - min(1,(1-Cd)/Cs), if Cd < 1 and Cs > 0
+ * 0, if Cd < 1 and Cs <= 0
+ */
+ return csel(gequal(dst, imm3(1)), imm3(1),
+ csel(lequal(src, imm3(0)), imm3(0),
+ sub(imm3(1), min2(imm3(1), div(sub(imm3(1), dst), src)))));
+}
+
+static ir_rvalue *
+blend_hardlight(ir_variable *src, ir_variable *dst)
+{
+ void *mem_ctx = ralloc_parent(src);
+
+ /* f(Cs,Cd) = 2*Cs*Cd, if Cs <= 0.5
+ * 1-2*(1-Cs)*(1-Cd), otherwise
+ */
+ ir_rvalue *rule_1 = mul(imm3(2), mul(src, dst));
+ ir_rvalue *rule_2 =
+ sub(imm3(1), mul(imm3(2), mul(sub(imm3(1), src), sub(imm3(1), dst))));
+ return csel(lequal(src, imm3(0.5f)), rule_1, rule_2);
+}
+
+static ir_rvalue *
+blend_softlight(ir_variable *src, ir_variable *dst)
+{
+ void *mem_ctx = ralloc_parent(src);
+
+ /* f(Cs,Cd) =
+ * Cd-(1-2*Cs)*Cd*(1-Cd),
+ * if Cs <= 0.5
+ * Cd+(2*Cs-1)*Cd*((16*Cd-12)*Cd+3),
+ * if Cs > 0.5 and Cd <= 0.25
+ * Cd+(2*Cs-1)*(sqrt(Cd)-Cd),
+ * if Cs > 0.5 and Cd > 0.25
+ *
+ * We can simplify this to
+ *
+ * f(Cs,Cd) = Cd+(2*Cs-1)*g(Cs,Cd) where
+ * g(Cs,Cd) = Cd*Cd-Cd if Cs <= 0.5
+ * Cd*((16*Cd-12)*Cd+3) if Cs > 0.5 and Cd <= 0.25
+ * sqrt(Cd)-Cd, otherwise
+ */
+ ir_rvalue *factor_1 = mul(dst, sub(imm3(1), dst));
+ ir_rvalue *factor_2 =
+ mul(dst, add(mul(sub(mul(imm3(16), dst), imm3(12)), dst), imm3(3)));
+ ir_rvalue *factor_3 = sub(sqrt(dst), dst);
+ ir_rvalue *factor = csel(lequal(src, imm3(0.5f)), factor_1,
+ csel(lequal(dst, imm3(0.25f)),
+ factor_2, factor_3));
+ return add(dst, mul(sub(mul(imm3(2), src), imm3(1)), factor));
+}
+
+static ir_rvalue *
+blend_difference(ir_variable *src, ir_variable *dst)
+{
+ return abs(sub(dst, src));
+}
+
+static ir_rvalue *
+blend_exclusion(ir_variable *src, ir_variable *dst)
+{
+ void *mem_ctx = ralloc_parent(src);
+
+ return add(src, sub(dst, mul(imm3(2), mul(src, dst))));
+}
+
+/* Return the minimum of a vec3's components */
+static ir_rvalue *
+minv3(ir_variable *v)
+{
+ return min2(min2(swizzle_x(v), swizzle_y(v)), swizzle_z(v));
+}
+
+/* Return the maximum of a vec3's components */
+static ir_rvalue *
+maxv3(ir_variable *v)
+{
+ return max2(max2(swizzle_x(v), swizzle_y(v)), swizzle_z(v));
+}
+
+static ir_rvalue *
+lumv3(ir_variable *c)
+{
+ ir_constant_data data;
+ data.f[0] = 0.30;
+ data.f[1] = 0.59;
+ data.f[2] = 0.11;
+
+ void *mem_ctx = ralloc_parent(c);
+
+ /* dot(c, vec3(0.30, 0.59, 0.11)) */
+ return dot(c, new(mem_ctx) ir_constant(glsl_type::vec3_type, &data));
+}
+
+static ir_rvalue *
+satv3(ir_variable *c)
+{
+ return sub(maxv3(c), minv3(c));
+}
+
+/* Take the base RGB color <cbase> and override its luminosity with that
+ * of the RGB color <clum>.
+ *
+ * This follows the equations given in the ES 3.2 (June 15th, 2016)
+ * specification. Revision 16 of GL_KHR_blend_equation_advanced and
+ * revision 9 of GL_NV_blend_equation_advanced specify a different set
+ * of equations. Older revisions match ES 3.2's text, and dEQP expects
+ * the ES 3.2 rules implemented here.
+ */
+static void
+set_lum(ir_factory *f,
+ ir_variable *color,
+ ir_variable *cbase,
+ ir_variable *clum)
+{
+ void *mem_ctx = f->mem_ctx;
+ f->emit(assign(color, add(cbase, sub(lumv3(clum), lumv3(cbase)))));
+
+ ir_variable *llum = f->make_temp(glsl_type::float_type, "__blend_lum");
+ ir_variable *mincol = f->make_temp(glsl_type::float_type, "__blend_mincol");
+ ir_variable *maxcol = f->make_temp(glsl_type::float_type, "__blend_maxcol");
+
+ f->emit(assign(llum, lumv3(color)));
+ f->emit(assign(mincol, minv3(color)));
+ f->emit(assign(maxcol, maxv3(color)));
+
+ f->emit(if_tree(less(mincol, imm1(0)),
+ assign(color, add(llum, div(mul(sub(color, llum), llum),
+ sub(llum, mincol)))),
+ if_tree(greater(maxcol, imm1(1)),
+ assign(color, add(llum, div(mul(sub(color, llum),
+ sub(imm3(1), llum)),
+ sub(maxcol, llum)))))));
+
+}
+
+/* Take the base RGB color <cbase> and override its saturation with
+ * that of the RGB color <csat>. The override the luminosity of the
+ * result with that of the RGB color <clum>.
+ */
+static void
+set_lum_sat(ir_factory *f,
+ ir_variable *color,
+ ir_variable *cbase,
+ ir_variable *csat,
+ ir_variable *clum)
+{
+ void *mem_ctx = f->mem_ctx;
+
+ ir_rvalue *minbase = minv3(cbase);
+ ir_rvalue *ssat = satv3(csat);
+
+ ir_variable *sbase = f->make_temp(glsl_type::float_type, "__blend_sbase");
+ f->emit(assign(sbase, satv3(cbase)));
+
+ /* Equivalent (modulo rounding errors) to setting the
+ * smallest (R,G,B) component to 0, the largest to <ssat>,
+ * and interpolating the "middle" component based on its
+ * original value relative to the smallest/largest.
+ */
+ f->emit(if_tree(greater(sbase, imm1(0)),
+ assign(color, div(mul(sub(cbase, minbase), ssat), sbase)),
+ assign(color, imm3(0))));
+ set_lum(f, color, color, clum);
+}
+
+static ir_rvalue *
+is_mode(ir_variable *mode, enum gl_advanced_blend_mode q)
+{
+ return equal(mode, new(ralloc_parent(mode)) ir_constant(unsigned(q)));
+}
+
+static ir_variable *
+calc_blend_result(ir_factory f,
+ ir_variable *mode,
+ ir_variable *fb,
+ ir_rvalue *blend_src,
+ GLbitfield blend_qualifiers)
+{
+ void *mem_ctx = f.mem_ctx;
+ ir_variable *result = f.make_temp(glsl_type::vec4_type, "__blend_result");
+
+ /* Save blend_src to a temporary so we can reference it multiple times. */
+ ir_variable *src = f.make_temp(glsl_type::vec4_type, "__blend_src");
+ f.emit(assign(src, blend_src));
+
+ /* If we're not doing advanced blending, just write the original value. */
+ ir_if *if_blending = new(mem_ctx) ir_if(is_mode(mode, BLEND_NONE));
+ f.emit(if_blending);
+ if_blending->then_instructions.push_tail(assign(result, src));
+
+ f.instructions = &if_blending->else_instructions;
+
+ /* (Rs', Gs', Bs') =
+ * (0, 0, 0), if As == 0
+ * (Rs/As, Gs/As, Bs/As), otherwise
+ */
+ ir_variable *src_rgb = f.make_temp(glsl_type::vec3_type, "__blend_src_rgb");
+ ir_variable *src_alpha = f.make_temp(glsl_type::float_type, "__blend_src_a");
+
+ /* (Rd', Gd', Bd') =
+ * (0, 0, 0), if Ad == 0
+ * (Rd/Ad, Gd/Ad, Bd/Ad), otherwise
+ */
+ ir_variable *dst_rgb = f.make_temp(glsl_type::vec3_type, "__blend_dst_rgb");
+ ir_variable *dst_alpha = f.make_temp(glsl_type::float_type, "__blend_dst_a");
+
+ f.emit(assign(dst_alpha, swizzle_w(fb)));
+ f.emit(if_tree(equal(dst_alpha, imm1(0)),
+ assign(dst_rgb, imm3(0)),
+ assign(dst_rgb, csel(equal(swizzle_xyz(fb),
+ swizzle(fb, SWIZZLE_WWWW, 3)),
+ imm3(1),
+ div(swizzle_xyz(fb), dst_alpha)))));
+
+ f.emit(assign(src_alpha, swizzle_w(src)));
+ f.emit(if_tree(equal(src_alpha, imm1(0)),
+ assign(src_rgb, imm3(0)),
+ assign(src_rgb, csel(equal(swizzle_xyz(src),
+ swizzle(src, SWIZZLE_WWWW, 3)),
+ imm3(1),
+ div(swizzle_xyz(src), src_alpha)))));
+
+ ir_variable *factor = f.make_temp(glsl_type::vec3_type, "__blend_factor");
+
+ ir_factory casefactory = f;
+
+ unsigned choices = blend_qualifiers;
+ while (choices) {
+ enum gl_advanced_blend_mode choice = (enum gl_advanced_blend_mode)
+ (1u << u_bit_scan(&choices));
+
+ ir_if *iff = new(mem_ctx) ir_if(is_mode(mode, choice));
+ casefactory.emit(iff);
+ casefactory.instructions = &iff->then_instructions;
+
+ ir_rvalue *val = NULL;
+
+ switch (choice) {
+ case BLEND_MULTIPLY:
+ val = blend_multiply(src_rgb, dst_rgb);
+ break;
+ case BLEND_SCREEN:
+ val = blend_screen(src_rgb, dst_rgb);
+ break;
+ case BLEND_OVERLAY:
+ val = blend_overlay(src_rgb, dst_rgb);
+ break;
+ case BLEND_DARKEN:
+ val = blend_darken(src_rgb, dst_rgb);
+ break;
+ case BLEND_LIGHTEN:
+ val = blend_lighten(src_rgb, dst_rgb);
+ break;
+ case BLEND_COLORDODGE:
+ val = blend_colordodge(src_rgb, dst_rgb);
+ break;
+ case BLEND_COLORBURN:
+ val = blend_colorburn(src_rgb, dst_rgb);
+ break;
+ case BLEND_HARDLIGHT:
+ val = blend_hardlight(src_rgb, dst_rgb);
+ break;
+ case BLEND_SOFTLIGHT:
+ val = blend_softlight(src_rgb, dst_rgb);
+ break;
+ case BLEND_DIFFERENCE:
+ val = blend_difference(src_rgb, dst_rgb);
+ break;
+ case BLEND_EXCLUSION:
+ val = blend_exclusion(src_rgb, dst_rgb);
+ break;
+ case BLEND_HSL_HUE:
+ set_lum_sat(&casefactory, factor, src_rgb, dst_rgb, dst_rgb);
+ break;
+ case BLEND_HSL_SATURATION:
+ set_lum_sat(&casefactory, factor, dst_rgb, src_rgb, dst_rgb);
+ break;
+ case BLEND_HSL_COLOR:
+ set_lum(&casefactory, factor, src_rgb, dst_rgb);
+ break;
+ case BLEND_HSL_LUMINOSITY:
+ set_lum(&casefactory, factor, dst_rgb, src_rgb);
+ break;
+ case BLEND_NONE:
+ case BLEND_ALL:
+ unreachable("not real cases");
+ }
+
+ if (val)
+ casefactory.emit(assign(factor, val));
+
+ casefactory.instructions = &iff->else_instructions;
+ }
+
+ /* p0(As,Ad) = As*Ad
+ * p1(As,Ad) = As*(1-Ad)
+ * p2(As,Ad) = Ad*(1-As)
+ */
+ ir_variable *p0 = f.make_temp(glsl_type::float_type, "__blend_p0");
+ ir_variable *p1 = f.make_temp(glsl_type::float_type, "__blend_p1");
+ ir_variable *p2 = f.make_temp(glsl_type::float_type, "__blend_p2");
+
+ f.emit(assign(p0, mul(src_alpha, dst_alpha)));
+ f.emit(assign(p1, mul(src_alpha, sub(imm1(1), dst_alpha))));
+ f.emit(assign(p2, mul(dst_alpha, sub(imm1(1), src_alpha))));
+
+ /* R = f(Rs',Rd')*p0(As,Ad) + Y*Rs'*p1(As,Ad) + Z*Rd'*p2(As,Ad)
+ * G = f(Gs',Gd')*p0(As,Ad) + Y*Gs'*p1(As,Ad) + Z*Gd'*p2(As,Ad)
+ * B = f(Bs',Bd')*p0(As,Ad) + Y*Bs'*p1(As,Ad) + Z*Bd'*p2(As,Ad)
+ * A = X*p0(As,Ad) + Y*p1(As,Ad) + Z*p2(As,Ad)
+ *
+ * <X, Y, Z> is always <1, 1, 1>, so we can ignore it.
+ *
+ * In vector form, this is:
+ * RGB = factor * p0 + Cs * p1 + Cd * p2
+ * A = p0 + p1 + p2
+ */
+ f.emit(assign(result,
+ add(add(mul(factor, p0), mul(src_rgb, p1)), mul(dst_rgb, p2)),
+ WRITEMASK_XYZ));
+ f.emit(assign(result, add(add(p0, p1), p2), WRITEMASK_W));
+
+ return result;
+}
+
+/**
+ * Dereference var, or var[0] if it's an array.
+ */
+static ir_dereference *
+deref_output(ir_variable *var)
+{
+ void *mem_ctx = ralloc_parent(var);
+
+ ir_dereference *val = new(mem_ctx) ir_dereference_variable(var);
+ if (val->type->is_array()) {
+ ir_constant *index = new(mem_ctx) ir_constant(0);
+ val = new(mem_ctx) ir_dereference_array(val, index);
+ }
+
+ return val;
+}
+
+static ir_function_signature *
+get_main(gl_linked_shader *sh)
+{
+ ir_function_signature *sig = NULL;
+ /* We can't use _mesa_get_main_function_signature() because we don't
+ * have a symbol table at this point. Just go find main() by hand.
+ */
+ foreach_in_list(ir_instruction, ir, sh->ir) {
+ ir_function *f = ir->as_function();
+ if (f && strcmp(f->name, "main") == 0) {
+ exec_list void_parameters;
+ sig = f->matching_signature(NULL, &void_parameters, false);
+ break;
+ }
+ }
+ assert(sig != NULL); /* main() must exist */
+ return sig;
+}
+
+bool
+lower_blend_equation_advanced(struct gl_linked_shader *sh, bool coherent)
+{
+ if (sh->Program->sh.fs.BlendSupport == 0)
+ return false;
+
+ /* Lower early returns in main() so there's a single exit point
+ * where we can insert our lowering code.
+ */
+ do_lower_jumps(sh->ir, false, false, true, false, false);
+
+ void *mem_ctx = ralloc_parent(sh->ir);
+
+ ir_variable *fb = new(mem_ctx) ir_variable(glsl_type::vec4_type,
+ "__blend_fb_fetch",
+ ir_var_shader_out);
+ fb->data.location = FRAG_RESULT_DATA0;
+ fb->data.read_only = 1;
+ fb->data.fb_fetch_output = 1;
+ fb->data.memory_coherent = coherent;
+ fb->data.how_declared = ir_var_hidden;
+
+ ir_variable *mode = new(mem_ctx) ir_variable(glsl_type::uint_type,
+ "gl_AdvancedBlendModeMESA",
+ ir_var_uniform);
+ mode->data.how_declared = ir_var_hidden;
+ mode->allocate_state_slots(1);
+ ir_state_slot *slot0 = &mode->get_state_slots()[0];
+ slot0->swizzle = SWIZZLE_XXXX;
+ slot0->tokens[0] = STATE_INTERNAL;
+ slot0->tokens[1] = STATE_ADVANCED_BLENDING_MODE;
+ for (int i = 2; i < STATE_LENGTH; i++)
+ slot0->tokens[i] = 0;
+
+ sh->ir->push_head(fb);
+ sh->ir->push_head(mode);
+
+ /* Gather any output variables referring to render target 0.
+ *
+ * ARB_enhanced_layouts irritatingly allows the shader to specify
+ * multiple output variables for the same render target, each of
+ * which writes a subset of the components, starting at location_frac.
+ * The variables can't overlap, thankfully.
+ */
+ ir_variable *outputs[4] = { NULL, NULL, NULL, NULL };
+ foreach_in_list(ir_instruction, ir, sh->ir) {
+ ir_variable *var = ir->as_variable();
+ if (!var || var->data.mode != ir_var_shader_out)
+ continue;
+
+ if (var->data.location == FRAG_RESULT_DATA0 ||
+ var->data.location == FRAG_RESULT_COLOR) {
+ const int components = var->type->without_array()->vector_elements;
+
+ for (int i = 0; i < components; i++) {
+ outputs[var->data.location_frac + i] = var;
+ }
+ }
+ }
+
+ /* Combine values written to outputs into a single RGBA blend source.
+ * We assign <0, 0, 0, 1> to any components with no corresponding output.
+ */
+ ir_rvalue *blend_source;
+ if (outputs[0] && outputs[0]->type->without_array()->vector_elements == 4) {
+ blend_source = deref_output(outputs[0]);
+ } else {
+ ir_rvalue *blend_comps[4];
+ for (int i = 0; i < 4; i++) {
+ ir_variable *var = outputs[i];
+ if (var) {
+ blend_comps[i] = swizzle(deref_output(outputs[i]),
+ i - outputs[i]->data.location_frac, 1);
+ } else {
+ blend_comps[i] = new(mem_ctx) ir_constant(i < 3 ? 0.0f : 1.0f);
+ }
+ }
+
+ blend_source =
+ new(mem_ctx) ir_expression(ir_quadop_vector, glsl_type::vec4_type,
+ blend_comps[0], blend_comps[1],
+ blend_comps[2], blend_comps[3]);
+ }
+
+ ir_function_signature *main = get_main(sh);
+ ir_factory f(&main->body, mem_ctx);
+
+ ir_variable *result_dest =
+ calc_blend_result(f, mode, fb, blend_source,
+ sh->Program->sh.fs.BlendSupport);
+
+ /* Copy the result back to the original values. It would be simpler
+ * to demote the program's output variables, and create a new vec4
+ * output for our result, but this pass runs before we create the
+ * ARB_program_interface_query resource list. So we have to leave
+ * the original outputs in place and use them.
+ */
+ for (int i = 0; i < 4; i++) {
+ if (!outputs[i])
+ continue;
+
+ f.emit(assign(deref_output(outputs[i]), swizzle(result_dest, i, 1),
+ 1 << i));
+ }
+
+ validate_ir_tree(sh->ir);
+ return true;
+}
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/lower_buffer_access.cpp b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/lower_buffer_access.cpp
new file mode 100644
index 0000000000..a6e2f741eb
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/lower_buffer_access.cpp
@@ -0,0 +1,447 @@
+/*
+ * Copyright (c) 2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+/**
+ * \file lower_buffer_access.cpp
+ *
+ * Helper for IR lowering pass to replace dereferences of buffer object based
+ * shader variables with intrinsic function calls.
+ *
+ * This helper is used by lowering passes for UBOs, SSBOs and compute shader
+ * shared variables.
+ */
+
+#include "lower_buffer_access.h"
+#include "ir_builder.h"
+#include "main/macros.h"
+#include "util/list.h"
+#include "glsl_parser_extras.h"
+#include "linker.h"
+
+using namespace ir_builder;
+
+namespace lower_buffer_access {
+
+static inline int
+writemask_for_size(unsigned n)
+{
+ return ((1 << n) - 1);
+}
+
+/**
+ * Takes a deref and recursively calls itself to break the deref down to the
+ * point that the reads or writes generated are contiguous scalars or vectors.
+ */
+void
+lower_buffer_access::emit_access(void *mem_ctx,
+ bool is_write,
+ ir_dereference *deref,
+ ir_variable *base_offset,
+ unsigned int deref_offset,
+ bool row_major,
+ const glsl_type *matrix_type,
+ enum glsl_interface_packing packing,
+ unsigned int write_mask)
+{
+ if (deref->type->is_struct()) {
+ unsigned int field_offset = 0;
+
+ for (unsigned i = 0; i < deref->type->length; i++) {
+ const struct glsl_struct_field *field =
+ &deref->type->fields.structure[i];
+ ir_dereference *field_deref =
+ new(mem_ctx) ir_dereference_record(deref->clone(mem_ctx, NULL),
+ field->name);
+
+ unsigned field_align;
+ if (packing == GLSL_INTERFACE_PACKING_STD430)
+ field_align = field->type->std430_base_alignment(row_major);
+ else
+ field_align = field->type->std140_base_alignment(row_major);
+ field_offset = glsl_align(field_offset, field_align);
+
+ emit_access(mem_ctx, is_write, field_deref, base_offset,
+ deref_offset + field_offset,
+ row_major, NULL, packing,
+ writemask_for_size(field_deref->type->vector_elements));
+
+ if (packing == GLSL_INTERFACE_PACKING_STD430)
+ field_offset += field->type->std430_size(row_major);
+ else
+ field_offset += field->type->std140_size(row_major);
+ }
+ return;
+ }
+
+ if (deref->type->is_array()) {
+ unsigned array_stride = packing == GLSL_INTERFACE_PACKING_STD430 ?
+ deref->type->fields.array->std430_array_stride(row_major) :
+ glsl_align(deref->type->fields.array->std140_size(row_major), 16);
+
+ for (unsigned i = 0; i < deref->type->length; i++) {
+ ir_constant *element = new(mem_ctx) ir_constant(i);
+ ir_dereference *element_deref =
+ new(mem_ctx) ir_dereference_array(deref->clone(mem_ctx, NULL),
+ element);
+ emit_access(mem_ctx, is_write, element_deref, base_offset,
+ deref_offset + i * array_stride,
+ row_major, NULL, packing,
+ writemask_for_size(element_deref->type->vector_elements));
+ }
+ return;
+ }
+
+ if (deref->type->is_matrix()) {
+ for (unsigned i = 0; i < deref->type->matrix_columns; i++) {
+ ir_constant *col = new(mem_ctx) ir_constant(i);
+ ir_dereference *col_deref =
+ new(mem_ctx) ir_dereference_array(deref->clone(mem_ctx, NULL), col);
+
+ /* For a row-major matrix, the next column starts at the next
+ * element. Otherwise it is offset by the matrix stride.
+ */
+ const unsigned size_mul = row_major
+ ? (deref->type->is_double() ? 8 : 4)
+ : link_calculate_matrix_stride(deref->type, row_major, packing);
+
+ emit_access(mem_ctx, is_write, col_deref, base_offset,
+ deref_offset + i * size_mul,
+ row_major, deref->type, packing,
+ writemask_for_size(col_deref->type->vector_elements));
+ }
+ return;
+ }
+
+ assert(deref->type->is_scalar() || deref->type->is_vector());
+
+ if (!row_major) {
+ ir_rvalue *offset =
+ add(base_offset, new(mem_ctx) ir_constant(deref_offset));
+ unsigned mask =
+ is_write ? write_mask : (1 << deref->type->vector_elements) - 1;
+ insert_buffer_access(mem_ctx, deref, deref->type, offset, mask, -1);
+ } else {
+ /* We're dereffing a column out of a row-major matrix, so we
+ * gather the vector from each stored row.
+ */
+ assert(deref->type->is_float() || deref->type->is_double());
+ assert(matrix_type != NULL);
+
+ const unsigned matrix_stride =
+ link_calculate_matrix_stride(matrix_type, row_major, packing);
+
+ const glsl_type *deref_type = deref->type->get_scalar_type();
+
+ for (unsigned i = 0; i < deref->type->vector_elements; i++) {
+ ir_rvalue *chan_offset =
+ add(base_offset,
+ new(mem_ctx) ir_constant(deref_offset + i * matrix_stride));
+ if (!is_write || ((1U << i) & write_mask))
+ insert_buffer_access(mem_ctx, deref, deref_type, chan_offset,
+ (1U << i), i);
+ }
+ }
+}
+
+/**
+ * Determine if a thing being dereferenced is row-major
+ *
+ * There is some trickery here.
+ *
+ * If the thing being dereferenced is a member of uniform block \b without an
+ * instance name, then the name of the \c ir_variable is the field name of an
+ * interface type. If this field is row-major, then the thing referenced is
+ * row-major.
+ *
+ * If the thing being dereferenced is a member of uniform block \b with an
+ * instance name, then the last dereference in the tree will be an
+ * \c ir_dereference_record. If that record field is row-major, then the
+ * thing referenced is row-major.
+ */
+bool
+lower_buffer_access::is_dereferenced_thing_row_major(const ir_rvalue *deref)
+{
+ bool matrix = false;
+ const ir_rvalue *ir = deref;
+
+ while (true) {
+ matrix = matrix || ir->type->without_array()->is_matrix();
+
+ switch (ir->ir_type) {
+ case ir_type_dereference_array: {
+ const ir_dereference_array *const array_deref =
+ (const ir_dereference_array *) ir;
+
+ ir = array_deref->array;
+ break;
+ }
+
+ case ir_type_dereference_record: {
+ const ir_dereference_record *const record_deref =
+ (const ir_dereference_record *) ir;
+
+ ir = record_deref->record;
+
+ const int idx = record_deref->field_idx;
+ assert(idx >= 0);
+
+ const enum glsl_matrix_layout matrix_layout =
+ glsl_matrix_layout(ir->type->fields.structure[idx].matrix_layout);
+
+ switch (matrix_layout) {
+ case GLSL_MATRIX_LAYOUT_INHERITED:
+ break;
+ case GLSL_MATRIX_LAYOUT_COLUMN_MAJOR:
+ return false;
+ case GLSL_MATRIX_LAYOUT_ROW_MAJOR:
+ return matrix || deref->type->without_array()->is_struct();
+ }
+
+ break;
+ }
+
+ case ir_type_dereference_variable: {
+ const ir_dereference_variable *const var_deref =
+ (const ir_dereference_variable *) ir;
+
+ const enum glsl_matrix_layout matrix_layout =
+ glsl_matrix_layout(var_deref->var->data.matrix_layout);
+
+ switch (matrix_layout) {
+ case GLSL_MATRIX_LAYOUT_INHERITED: {
+ /* For interface block matrix variables we handle inherited
+ * layouts at HIR generation time, but we don't do that for shared
+ * variables, which are always column-major
+ */
+ ASSERTED ir_variable *var = deref->variable_referenced();
+ assert((var->is_in_buffer_block() && !matrix) ||
+ var->data.mode == ir_var_shader_shared);
+ return false;
+ }
+ case GLSL_MATRIX_LAYOUT_COLUMN_MAJOR:
+ return false;
+ case GLSL_MATRIX_LAYOUT_ROW_MAJOR:
+ return matrix || deref->type->without_array()->is_struct();
+ }
+
+ unreachable("invalid matrix layout");
+ break;
+ }
+
+ default:
+ return false;
+ }
+ }
+
+ /* The tree must have ended with a dereference that wasn't an
+ * ir_dereference_variable. That is invalid, and it should be impossible.
+ */
+ unreachable("invalid dereference tree");
+ return false;
+}
+
+/**
+ * This function initializes various values that will be used later by
+ * emit_access when actually emitting loads or stores.
+ *
+ * Note: const_offset is an input as well as an output, clients must
+ * initialize it to the offset of the variable in the underlying block, and
+ * this function will adjust it by adding the constant offset of the member
+ * being accessed into that variable.
+ */
+void
+lower_buffer_access::setup_buffer_access(void *mem_ctx,
+ ir_rvalue *deref,
+ ir_rvalue **offset,
+ unsigned *const_offset,
+ bool *row_major,
+ const glsl_type **matrix_type,
+ const glsl_struct_field **struct_field,
+ enum glsl_interface_packing packing)
+{
+ *offset = new(mem_ctx) ir_constant(0u);
+ *row_major = is_dereferenced_thing_row_major(deref);
+ *matrix_type = NULL;
+
+ /* Calculate the offset to the start of the region of the UBO
+ * dereferenced by *rvalue. This may be a variable offset if an
+ * array dereference has a variable index.
+ */
+ while (deref) {
+ switch (deref->ir_type) {
+ case ir_type_dereference_variable: {
+ deref = NULL;
+ break;
+ }
+
+ case ir_type_dereference_array: {
+ ir_dereference_array *deref_array = (ir_dereference_array *) deref;
+ unsigned array_stride;
+ if (deref_array->array->type->is_vector()) {
+ /* We get this when storing or loading a component out of a vector
+ * with a non-constant index. This happens for v[i] = f where v is
+ * a vector (or m[i][j] = f where m is a matrix). If we don't
+ * lower that here, it gets turned into v = vector_insert(v, i,
+ * f), which loads the entire vector, modifies one component and
+ * then write the entire thing back. That breaks if another
+ * thread or SIMD channel is modifying the same vector.
+ */
+ array_stride = 4;
+ if (deref_array->array->type->is_64bit())
+ array_stride *= 2;
+ } else if (deref_array->array->type->is_matrix() && *row_major) {
+ /* When loading a vector out of a row major matrix, the
+ * step between the columns (vectors) is the size of a
+ * float, while the step between the rows (elements of a
+ * vector) is handled below in emit_ubo_loads.
+ */
+ array_stride = 4;
+ if (deref_array->array->type->is_64bit())
+ array_stride *= 2;
+ *matrix_type = deref_array->array->type;
+ } else if (deref_array->type->without_array()->is_interface()) {
+ /* We're processing an array dereference of an interface instance
+ * array. The thing being dereferenced *must* be a variable
+ * dereference because interfaces cannot be embedded in other
+ * types. In terms of calculating the offsets for the lowering
+ * pass, we don't care about the array index. All elements of an
+ * interface instance array will have the same offsets relative to
+ * the base of the block that backs them.
+ */
+ deref = deref_array->array->as_dereference();
+ break;
+ } else {
+ /* Whether or not the field is row-major (because it might be a
+ * bvec2 or something) does not affect the array itself. We need
+ * to know whether an array element in its entirety is row-major.
+ */
+ const bool array_row_major =
+ is_dereferenced_thing_row_major(deref_array);
+
+ /* The array type will give the correct interface packing
+ * information
+ */
+ if (packing == GLSL_INTERFACE_PACKING_STD430) {
+ array_stride = deref_array->type->std430_array_stride(array_row_major);
+ } else {
+ array_stride = deref_array->type->std140_size(array_row_major);
+ array_stride = glsl_align(array_stride, 16);
+ }
+ }
+
+ ir_rvalue *array_index = deref_array->array_index;
+ if (array_index->type->base_type == GLSL_TYPE_INT)
+ array_index = i2u(array_index);
+
+ ir_constant *const_index =
+ array_index->constant_expression_value(mem_ctx, NULL);
+ if (const_index) {
+ *const_offset += array_stride * const_index->value.u[0];
+ } else {
+ *offset = add(*offset,
+ mul(array_index,
+ new(mem_ctx) ir_constant(array_stride)));
+ }
+ deref = deref_array->array->as_dereference();
+ break;
+ }
+
+ case ir_type_dereference_record: {
+ ir_dereference_record *deref_record = (ir_dereference_record *) deref;
+ const glsl_type *struct_type = deref_record->record->type;
+ unsigned intra_struct_offset = 0;
+
+ for (unsigned int i = 0; i < struct_type->length; i++) {
+ const glsl_type *type = struct_type->fields.structure[i].type;
+
+ ir_dereference_record *field_deref = new(mem_ctx)
+ ir_dereference_record(deref_record->record,
+ struct_type->fields.structure[i].name);
+ const bool field_row_major =
+ is_dereferenced_thing_row_major(field_deref);
+
+ ralloc_free(field_deref);
+
+ unsigned field_align = 0;
+
+ if (packing == GLSL_INTERFACE_PACKING_STD430)
+ field_align = type->std430_base_alignment(field_row_major);
+ else
+ field_align = type->std140_base_alignment(field_row_major);
+
+ if (struct_type->fields.structure[i].offset != -1) {
+ intra_struct_offset = struct_type->fields.structure[i].offset;
+ }
+
+ intra_struct_offset = glsl_align(intra_struct_offset, field_align);
+
+ assert(deref_record->field_idx >= 0);
+ if (i == (unsigned) deref_record->field_idx) {
+ if (struct_field)
+ *struct_field = &struct_type->fields.structure[i];
+ break;
+ }
+
+ if (packing == GLSL_INTERFACE_PACKING_STD430)
+ intra_struct_offset += type->std430_size(field_row_major);
+ else
+ intra_struct_offset += type->std140_size(field_row_major);
+
+ /* If the field just examined was itself a structure, apply rule
+ * #9:
+ *
+ * "The structure may have padding at the end; the base offset
+ * of the member following the sub-structure is rounded up to
+ * the next multiple of the base alignment of the structure."
+ */
+ if (type->without_array()->is_struct()) {
+ intra_struct_offset = glsl_align(intra_struct_offset,
+ field_align);
+
+ }
+ }
+
+ *const_offset += intra_struct_offset;
+ deref = deref_record->record->as_dereference();
+ break;
+ }
+
+ case ir_type_swizzle: {
+ ir_swizzle *deref_swizzle = (ir_swizzle *) deref;
+
+ assert(deref_swizzle->mask.num_components == 1);
+
+ *const_offset += deref_swizzle->mask.x * sizeof(int);
+ deref = deref_swizzle->val->as_dereference();
+ break;
+ }
+
+ default:
+ assert(!"not reached");
+ deref = NULL;
+ break;
+ }
+ }
+}
+
+} /* namespace lower_buffer_access */
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/lower_buffer_access.h b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/lower_buffer_access.h
new file mode 100644
index 0000000000..bd177ca142
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/lower_buffer_access.h
@@ -0,0 +1,70 @@
+/*
+ * Copyright (c) 2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+/**
+ * \file lower_buffer_access.h
+ *
+ * Helper for IR lowering pass to replace dereferences of buffer object based
+ * shader variables with intrinsic function calls.
+ *
+ * This helper is used by lowering passes for UBOs, SSBOs and compute shader
+ * shared variables.
+ */
+
+#ifndef LOWER_BUFFER_ACCESS_H
+#define LOWER_BUFFER_ACCESS_H
+
+#include "ir.h"
+#include "ir_rvalue_visitor.h"
+
+namespace lower_buffer_access {
+
+class lower_buffer_access : public ir_rvalue_enter_visitor {
+public:
+ virtual void
+ insert_buffer_access(void *mem_ctx, ir_dereference *deref,
+ const glsl_type *type, ir_rvalue *offset,
+ unsigned mask, int channel) = 0;
+
+ void emit_access(void *mem_ctx, bool is_write, ir_dereference *deref,
+ ir_variable *base_offset, unsigned int deref_offset,
+ bool row_major, const glsl_type *matrix_type,
+ enum glsl_interface_packing packing,
+ unsigned int write_mask);
+
+ bool is_dereferenced_thing_row_major(const ir_rvalue *deref);
+
+ void setup_buffer_access(void *mem_ctx, ir_rvalue *deref,
+ ir_rvalue **offset, unsigned *const_offset,
+ bool *row_major,
+ const glsl_type **matrix_type,
+ const glsl_struct_field **struct_field,
+ enum glsl_interface_packing packing);
+
+protected:
+ bool use_std430_as_default;
+};
+
+} /* namespace lower_buffer_access */
+
+#endif /* LOWER_BUFFER_ACCESS_H */
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/lower_builtins.cpp b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/lower_builtins.cpp
new file mode 100644
index 0000000000..e7130df7ac
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/lower_builtins.cpp
@@ -0,0 +1,64 @@
+/*
+ * Copyright © 2019 Google, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+/**
+ * \file lower_builtins.cpp
+ *
+ * Inline calls to builtin functions.
+ */
+
+#include "ir.h"
+#include "ir_optimization.h"
+
+namespace {
+
+class lower_builtins_visitor : public ir_hierarchical_visitor {
+public:
+ lower_builtins_visitor() : progress(false) { }
+ ir_visitor_status visit_leave(ir_call *);
+ bool progress;
+};
+
+}
+
+bool
+lower_builtins(exec_list *instructions)
+{
+ lower_builtins_visitor v;
+ visit_list_elements(&v, instructions);
+ return v.progress;
+}
+
+ir_visitor_status
+lower_builtins_visitor::visit_leave(ir_call *ir)
+{
+ if (!ir->callee->is_builtin())
+ return visit_continue;
+
+ ir->generate_inline(ir);
+ ir->remove();
+
+ this->progress = true;
+
+ return visit_continue;
+}
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/lower_const_arrays_to_uniforms.cpp b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/lower_const_arrays_to_uniforms.cpp
new file mode 100644
index 0000000000..dbca6321be
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/lower_const_arrays_to_uniforms.cpp
@@ -0,0 +1,157 @@
+/*
+ * Copyright © 2014 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+/**
+ * \file lower_const_arrays_to_uniforms.cpp
+ *
+ * Lower constant arrays to uniform arrays.
+ *
+ * Some driver backends (such as i965 and nouveau) don't handle constant arrays
+ * gracefully, instead treating them as ordinary writable temporary arrays.
+ * Since arrays can be large, this often means spilling them to scratch memory,
+ * which usually involves a large number of instructions.
+ *
+ * This must be called prior to link_set_uniform_initializers(); we need the
+ * linker to process our new uniform's constant initializer.
+ *
+ * This should be called after optimizations, since those can result in
+ * splitting and removing arrays that are indexed by constant expressions.
+ */
+#include "ir.h"
+#include "ir_visitor.h"
+#include "ir_rvalue_visitor.h"
+#include "compiler/glsl_types.h"
+
+namespace {
+class lower_const_array_visitor : public ir_rvalue_visitor {
+public:
+ lower_const_array_visitor(exec_list *insts, unsigned s,
+ unsigned available_uni_components)
+ {
+ instructions = insts;
+ stage = s;
+ const_count = 0;
+ free_uni_components = available_uni_components;
+ progress = false;
+ }
+
+ bool run()
+ {
+ visit_list_elements(this, instructions);
+ return progress;
+ }
+
+ ir_visitor_status visit_enter(ir_texture *);
+ void handle_rvalue(ir_rvalue **rvalue);
+
+private:
+ exec_list *instructions;
+ unsigned stage;
+ unsigned const_count;
+ unsigned free_uni_components;
+ bool progress;
+};
+
+ir_visitor_status
+lower_const_array_visitor::visit_enter(ir_texture *)
+{
+ return visit_continue_with_parent;
+}
+
+void
+lower_const_array_visitor::handle_rvalue(ir_rvalue **rvalue)
+{
+ if (!*rvalue)
+ return;
+
+ ir_constant *con = (*rvalue)->as_constant();
+ if (!con || !con->type->is_array())
+ return;
+
+ /* How many uniform component slots are required? */
+ unsigned component_slots = con->type->component_slots();
+
+ /* We would utilize more than is available, bail out. */
+ if (component_slots > free_uni_components)
+ return;
+
+ free_uni_components -= component_slots;
+
+ void *mem_ctx = ralloc_parent(con);
+
+ /* In the very unlikely event of 4294967295 constant arrays in a single
+ * shader, don't promote this to a uniform.
+ */
+ unsigned limit = ~0;
+ if (const_count == limit)
+ return;
+
+ char *uniform_name = ralloc_asprintf(mem_ctx, "constarray_%x_%u",
+ const_count, stage);
+ const_count++;
+
+ ir_variable *uni =
+ new(mem_ctx) ir_variable(con->type, uniform_name, ir_var_uniform);
+ uni->constant_initializer = con;
+ uni->constant_value = con;
+ uni->data.has_initializer = true;
+ uni->data.how_declared = ir_var_hidden;
+ uni->data.read_only = true;
+ /* Assume the whole thing is accessed. */
+ uni->data.max_array_access = uni->type->length - 1;
+ instructions->push_head(uni);
+
+ *rvalue = new(mem_ctx) ir_dereference_variable(uni);
+
+ progress = true;
+}
+
+} /* anonymous namespace */
+
+
+static unsigned
+count_uniforms(exec_list *instructions)
+{
+ unsigned total = 0;
+
+ foreach_in_list(ir_instruction, node, instructions) {
+ ir_variable *const var = node->as_variable();
+
+ if (!var || var->data.mode != ir_var_uniform)
+ continue;
+
+ total += var->type->component_slots();
+ }
+ return total;
+}
+
+bool
+lower_const_arrays_to_uniforms(exec_list *instructions, unsigned stage,
+ unsigned max_uniform_components)
+{
+ unsigned uniform_components = count_uniforms(instructions);
+ unsigned free_uniform_slots = max_uniform_components - uniform_components;
+
+ lower_const_array_visitor v(instructions, stage, free_uniform_slots);
+ return v.run();
+}
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/lower_cs_derived.cpp b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/lower_cs_derived.cpp
new file mode 100644
index 0000000000..15534b0ac6
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/lower_cs_derived.cpp
@@ -0,0 +1,235 @@
+/*
+ * Copyright © 2017 Ilia Mirkin
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+/**
+ * \file lower_cs_derived.cpp
+ *
+ * For hardware that does not support the gl_GlobalInvocationID and
+ * gl_LocalInvocationIndex system values, replace them with fresh
+ * globals. Note that we can't rely on gl_WorkGroupSize or
+ * gl_LocalGroupSizeARB being available, since they may only have been defined
+ * in a non-main shader.
+ *
+ * [ This can happen if only a secondary shader has the layout(local_size_*)
+ * declaration. ]
+ *
+ * This is meant to be run post-linking.
+ */
+
+#include "glsl_symbol_table.h"
+#include "ir_hierarchical_visitor.h"
+#include "ir.h"
+#include "ir_builder.h"
+#include "linker.h"
+#include "program/prog_statevars.h"
+#include "builtin_functions.h"
+#include "main/mtypes.h"
+
+using namespace ir_builder;
+
+namespace {
+
+class lower_cs_derived_visitor : public ir_hierarchical_visitor {
+public:
+ explicit lower_cs_derived_visitor(gl_linked_shader *shader)
+ : progress(false),
+ shader(shader),
+ local_size_variable(shader->Program->info.cs.local_size_variable),
+ gl_WorkGroupSize(NULL),
+ gl_WorkGroupID(NULL),
+ gl_LocalInvocationID(NULL),
+ gl_GlobalInvocationID(NULL),
+ gl_LocalInvocationIndex(NULL)
+ {
+ main_sig = _mesa_get_main_function_signature(shader->symbols);
+ assert(main_sig);
+ }
+
+ virtual ir_visitor_status visit(ir_dereference_variable *);
+
+ ir_variable *add_system_value(
+ int slot, const glsl_type *type, const char *name);
+ void find_sysvals();
+ void make_gl_GlobalInvocationID();
+ void make_gl_LocalInvocationIndex();
+
+ bool progress;
+
+private:
+ gl_linked_shader *shader;
+ bool local_size_variable;
+ ir_function_signature *main_sig;
+
+ ir_rvalue *gl_WorkGroupSize;
+ ir_variable *gl_WorkGroupID;
+ ir_variable *gl_LocalInvocationID;
+
+ ir_variable *gl_GlobalInvocationID;
+ ir_variable *gl_LocalInvocationIndex;
+};
+
+} /* anonymous namespace */
+
+ir_variable *
+lower_cs_derived_visitor::add_system_value(
+ int slot, const glsl_type *type, const char *name)
+{
+ ir_variable *var = new(shader) ir_variable(type, name, ir_var_system_value);
+ var->data.how_declared = ir_var_declared_implicitly;
+ var->data.read_only = true;
+ var->data.location = slot;
+ var->data.explicit_location = true;
+ var->data.explicit_index = 0;
+ shader->ir->push_head(var);
+
+ return var;
+}
+
+void
+lower_cs_derived_visitor::find_sysvals()
+{
+ if (gl_WorkGroupSize != NULL)
+ return;
+
+ ir_variable *WorkGroupSize;
+ if (local_size_variable)
+ WorkGroupSize = shader->symbols->get_variable("gl_LocalGroupSizeARB");
+ else
+ WorkGroupSize = shader->symbols->get_variable("gl_WorkGroupSize");
+ if (WorkGroupSize)
+ gl_WorkGroupSize = new(shader) ir_dereference_variable(WorkGroupSize);
+ gl_WorkGroupID = shader->symbols->get_variable("gl_WorkGroupID");
+ gl_LocalInvocationID = shader->symbols->get_variable("gl_LocalInvocationID");
+
+ /*
+ * These may be missing due to either dead code elimination, or, in the
+ * case of the group size, due to the layout being declared in a non-main
+ * shader. Re-create them.
+ */
+
+ if (!gl_WorkGroupID)
+ gl_WorkGroupID = add_system_value(
+ SYSTEM_VALUE_WORK_GROUP_ID, glsl_type::uvec3_type, "gl_WorkGroupID");
+ if (!gl_LocalInvocationID)
+ gl_LocalInvocationID = add_system_value(
+ SYSTEM_VALUE_LOCAL_INVOCATION_ID, glsl_type::uvec3_type,
+ "gl_LocalInvocationID");
+ if (!WorkGroupSize) {
+ if (local_size_variable) {
+ gl_WorkGroupSize = new(shader) ir_dereference_variable(
+ add_system_value(
+ SYSTEM_VALUE_LOCAL_GROUP_SIZE, glsl_type::uvec3_type,
+ "gl_LocalGroupSizeARB"));
+ } else {
+ ir_constant_data data;
+ memset(&data, 0, sizeof(data));
+ for (int i = 0; i < 3; i++)
+ data.u[i] = shader->Program->info.cs.local_size[i];
+ gl_WorkGroupSize = new(shader) ir_constant(glsl_type::uvec3_type, &data);
+ }
+ }
+}
+
+void
+lower_cs_derived_visitor::make_gl_GlobalInvocationID()
+{
+ if (gl_GlobalInvocationID != NULL)
+ return;
+
+ find_sysvals();
+
+ /* gl_GlobalInvocationID =
+ * gl_WorkGroupID * gl_WorkGroupSize + gl_LocalInvocationID
+ */
+ gl_GlobalInvocationID = new(shader) ir_variable(
+ glsl_type::uvec3_type, "__GlobalInvocationID", ir_var_temporary);
+ shader->ir->push_head(gl_GlobalInvocationID);
+
+ ir_instruction *inst =
+ assign(gl_GlobalInvocationID,
+ add(mul(gl_WorkGroupID, gl_WorkGroupSize->clone(shader, NULL)),
+ gl_LocalInvocationID));
+ main_sig->body.push_head(inst);
+}
+
+void
+lower_cs_derived_visitor::make_gl_LocalInvocationIndex()
+{
+ if (gl_LocalInvocationIndex != NULL)
+ return;
+
+ find_sysvals();
+
+ /* gl_LocalInvocationIndex =
+ * gl_LocalInvocationID.z * gl_WorkGroupSize.x * gl_WorkGroupSize.y +
+ * gl_LocalInvocationID.y * gl_WorkGroupSize.x +
+ * gl_LocalInvocationID.x;
+ */
+ gl_LocalInvocationIndex = new(shader)
+ ir_variable(glsl_type::uint_type, "__LocalInvocationIndex", ir_var_temporary);
+ shader->ir->push_head(gl_LocalInvocationIndex);
+
+ ir_expression *index_z =
+ mul(mul(swizzle_z(gl_LocalInvocationID), swizzle_x(gl_WorkGroupSize->clone(shader, NULL))),
+ swizzle_y(gl_WorkGroupSize->clone(shader, NULL)));
+ ir_expression *index_y =
+ mul(swizzle_y(gl_LocalInvocationID), swizzle_x(gl_WorkGroupSize->clone(shader, NULL)));
+ ir_expression *index_y_plus_z = add(index_y, index_z);
+ operand index_x(swizzle_x(gl_LocalInvocationID));
+ ir_expression *index_x_plus_y_plus_z = add(index_y_plus_z, index_x);
+ ir_instruction *inst =
+ assign(gl_LocalInvocationIndex, index_x_plus_y_plus_z);
+ main_sig->body.push_head(inst);
+}
+
+ir_visitor_status
+lower_cs_derived_visitor::visit(ir_dereference_variable *ir)
+{
+ if (ir->var->data.mode == ir_var_system_value &&
+ ir->var->data.location == SYSTEM_VALUE_GLOBAL_INVOCATION_ID) {
+ make_gl_GlobalInvocationID();
+ ir->var = gl_GlobalInvocationID;
+ progress = true;
+ }
+
+ if (ir->var->data.mode == ir_var_system_value &&
+ ir->var->data.location == SYSTEM_VALUE_LOCAL_INVOCATION_INDEX) {
+ make_gl_LocalInvocationIndex();
+ ir->var = gl_LocalInvocationIndex;
+ progress = true;
+ }
+
+ return visit_continue;
+}
+
+bool
+lower_cs_derived(gl_linked_shader *shader)
+{
+ if (shader->Stage != MESA_SHADER_COMPUTE)
+ return false;
+
+ lower_cs_derived_visitor v(shader);
+ v.run(shader->ir);
+
+ return v.progress;
+}
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/lower_discard.cpp b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/lower_discard.cpp
new file mode 100644
index 0000000000..203d9e3b96
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/lower_discard.cpp
@@ -0,0 +1,201 @@
+/*
+ * Copyright © 2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+/**
+ * \file lower_discard.cpp
+ *
+ * This pass moves discards out of if-statements.
+ *
+ * Case 1: The "then" branch contains a conditional discard:
+ * ---------------------------------------------------------
+ *
+ * if (cond1) {
+ * s1;
+ * discard cond2;
+ * s2;
+ * } else {
+ * s3;
+ * }
+ *
+ * becomes:
+ *
+ * temp = false;
+ * if (cond1) {
+ * s1;
+ * temp = cond2;
+ * s2;
+ * } else {
+ * s3;
+ * }
+ * discard temp;
+ *
+ * Case 2: The "else" branch contains a conditional discard:
+ * ---------------------------------------------------------
+ *
+ * if (cond1) {
+ * s1;
+ * } else {
+ * s2;
+ * discard cond2;
+ * s3;
+ * }
+ *
+ * becomes:
+ *
+ * temp = false;
+ * if (cond1) {
+ * s1;
+ * } else {
+ * s2;
+ * temp = cond2;
+ * s3;
+ * }
+ * discard temp;
+ *
+ * Case 3: Both branches contain a conditional discard:
+ * ----------------------------------------------------
+ *
+ * if (cond1) {
+ * s1;
+ * discard cond2;
+ * s2;
+ * } else {
+ * s3;
+ * discard cond3;
+ * s4;
+ * }
+ *
+ * becomes:
+ *
+ * temp = false;
+ * if (cond1) {
+ * s1;
+ * temp = cond2;
+ * s2;
+ * } else {
+ * s3;
+ * temp = cond3;
+ * s4;
+ * }
+ * discard temp;
+ *
+ * If there are multiple conditional discards, we need only deal with one of
+ * them. Repeatedly applying this pass will take care of the others.
+ *
+ * Unconditional discards are treated as having a condition of "true".
+ */
+
+#include "compiler/glsl_types.h"
+#include "ir.h"
+
+namespace {
+
+class lower_discard_visitor : public ir_hierarchical_visitor {
+public:
+ lower_discard_visitor()
+ {
+ this->progress = false;
+ }
+
+ ir_visitor_status visit_leave(ir_if *);
+
+ bool progress;
+};
+
+} /* anonymous namespace */
+
+bool
+lower_discard(exec_list *instructions)
+{
+ lower_discard_visitor v;
+
+ visit_list_elements(&v, instructions);
+
+ return v.progress;
+}
+
+
+static ir_discard *
+find_discard(exec_list &instructions)
+{
+ foreach_in_list(ir_instruction, node, &instructions) {
+ ir_discard *ir = node->as_discard();
+ if (ir != NULL)
+ return ir;
+ }
+ return NULL;
+}
+
+
+static void
+replace_discard(void *mem_ctx, ir_variable *var, ir_discard *ir)
+{
+ ir_rvalue *condition = ir->condition;
+
+ /* For unconditional discards, use "true" as the condition. */
+ if (condition == NULL)
+ condition = new(mem_ctx) ir_constant(true);
+
+ ir_assignment *assignment =
+ new(mem_ctx) ir_assignment(new(mem_ctx) ir_dereference_variable(var),
+ condition);
+
+ ir->replace_with(assignment);
+}
+
+
+ir_visitor_status
+lower_discard_visitor::visit_leave(ir_if *ir)
+{
+ ir_discard *then_discard = find_discard(ir->then_instructions);
+ ir_discard *else_discard = find_discard(ir->else_instructions);
+
+ if (then_discard == NULL && else_discard == NULL)
+ return visit_continue;
+
+ void *mem_ctx = ralloc_parent(ir);
+
+ ir_variable *temp = new(mem_ctx) ir_variable(glsl_type::bool_type,
+ "discard_cond_temp",
+ ir_var_temporary);
+ ir_assignment *temp_initializer =
+ new(mem_ctx) ir_assignment(new(mem_ctx) ir_dereference_variable(temp),
+ new(mem_ctx) ir_constant(false));
+
+ ir->insert_before(temp);
+ ir->insert_before(temp_initializer);
+
+ if (then_discard != NULL)
+ replace_discard(mem_ctx, temp, then_discard);
+
+ if (else_discard != NULL)
+ replace_discard(mem_ctx, temp, else_discard);
+
+ ir_discard *discard = then_discard != NULL ? then_discard : else_discard;
+ discard->condition = new(mem_ctx) ir_dereference_variable(temp);
+ ir->insert_after(discard);
+
+ this->progress = true;
+
+ return visit_continue;
+}
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/lower_discard_flow.cpp b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/lower_discard_flow.cpp
new file mode 100644
index 0000000000..1a30afe7c1
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/lower_discard_flow.cpp
@@ -0,0 +1,154 @@
+/*
+ * Copyright © 2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+/** @file lower_discard_flow.cpp
+ *
+ * Implements the GLSL 1.30 revision 9 rule for fragment shader
+ * discard handling:
+ *
+ * "Control flow exits the shader, and subsequent implicit or
+ * explicit derivatives are undefined when this control flow is
+ * non-uniform (meaning different fragments within the primitive
+ * take different control paths)."
+ *
+ * There seem to be two conflicting things here. "Control flow exits
+ * the shader" sounds like the discarded fragments should effectively
+ * jump to the end of the shader, but that breaks derivatives in the
+ * case of uniform control flow and causes rendering failure in the
+ * bushes in Unigine Tropics.
+ *
+ * The question, then, is whether the intent was "loops stop at the
+ * point that the only active channels left are discarded pixels" or
+ * "discarded pixels become inactive at the point that control flow
+ * returns to the top of a loop". This implements the second
+ * interpretation.
+ */
+
+#include "compiler/glsl_types.h"
+#include "ir.h"
+
+namespace {
+
+class lower_discard_flow_visitor : public ir_hierarchical_visitor {
+public:
+ lower_discard_flow_visitor(ir_variable *discarded)
+ : discarded(discarded)
+ {
+ mem_ctx = ralloc_parent(discarded);
+ }
+
+ ~lower_discard_flow_visitor()
+ {
+ }
+
+ ir_visitor_status visit(ir_loop_jump *ir);
+ ir_visitor_status visit_enter(ir_discard *ir);
+ ir_visitor_status visit_enter(ir_loop *ir);
+ ir_visitor_status visit_enter(ir_function_signature *ir);
+
+ ir_if *generate_discard_break();
+
+ ir_variable *discarded;
+ void *mem_ctx;
+};
+
+} /* anonymous namespace */
+
+ir_visitor_status
+lower_discard_flow_visitor::visit(ir_loop_jump *ir)
+{
+ if (ir->mode != ir_loop_jump::jump_continue)
+ return visit_continue;
+
+ ir->insert_before(generate_discard_break());
+
+ return visit_continue;
+}
+
+ir_visitor_status
+lower_discard_flow_visitor::visit_enter(ir_discard *ir)
+{
+ ir_dereference *lhs = new(mem_ctx) ir_dereference_variable(discarded);
+ ir_rvalue *rhs;
+ if (ir->condition) {
+ /* discarded <- condition, use (var_ref discarded) as the condition */
+ rhs = ir->condition;
+ ir->condition = new(mem_ctx) ir_dereference_variable(discarded);
+ } else {
+ rhs = new(mem_ctx) ir_constant(true);
+ }
+ ir_assignment *assign = new(mem_ctx) ir_assignment(lhs, rhs);
+ ir->insert_before(assign);
+
+ return visit_continue;
+}
+
+ir_visitor_status
+lower_discard_flow_visitor::visit_enter(ir_loop *ir)
+{
+ ir->body_instructions.push_tail(generate_discard_break());
+
+ return visit_continue;
+}
+
+ir_visitor_status
+lower_discard_flow_visitor::visit_enter(ir_function_signature *ir)
+{
+ if (strcmp(ir->function_name(), "main") != 0)
+ return visit_continue;
+
+ ir_dereference *lhs = new(mem_ctx) ir_dereference_variable(discarded);
+ ir_rvalue *rhs = new(mem_ctx) ir_constant(false);
+ ir_assignment *assign = new(mem_ctx) ir_assignment(lhs, rhs);
+ ir->body.push_head(assign);
+
+ return visit_continue;
+}
+
+ir_if *
+lower_discard_flow_visitor::generate_discard_break()
+{
+ ir_rvalue *if_condition = new(mem_ctx) ir_dereference_variable(discarded);
+ ir_if *if_inst = new(mem_ctx) ir_if(if_condition);
+
+ ir_instruction *br = new(mem_ctx) ir_loop_jump(ir_loop_jump::jump_break);
+ if_inst->then_instructions.push_tail(br);
+
+ return if_inst;
+}
+
+void
+lower_discard_flow(exec_list *ir)
+{
+ void *mem_ctx = ir;
+
+ ir_variable *var = new(mem_ctx) ir_variable(glsl_type::bool_type,
+ "discarded",
+ ir_var_temporary);
+
+ ir->push_head(var);
+
+ lower_discard_flow_visitor v(var);
+
+ visit_list_elements(&v, ir);
+}
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/lower_distance.cpp b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/lower_distance.cpp
new file mode 100644
index 0000000000..b4e730c64f
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/lower_distance.cpp
@@ -0,0 +1,685 @@
+/*
+ * Copyright © 2011 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+/**
+ * \file lower_distance.cpp
+ *
+ * This pass accounts for the difference between the way
+ * gl_ClipDistance is declared in standard GLSL (as an array of
+ * floats), and the way it is frequently implemented in hardware (as
+ * a pair of vec4s, with four clip distances packed into each).
+ *
+ * The declaration of gl_ClipDistance is replaced with a declaration
+ * of gl_ClipDistanceMESA, and any references to gl_ClipDistance are
+ * translated to refer to gl_ClipDistanceMESA with the appropriate
+ * swizzling of array indices. For instance:
+ *
+ * gl_ClipDistance[i]
+ *
+ * is translated into:
+ *
+ * gl_ClipDistanceMESA[i>>2][i&3]
+ *
+ * Since some hardware may not internally represent gl_ClipDistance as a pair
+ * of vec4's, this lowering pass is optional. To enable it, set the
+ * LowerCombinedClipCullDistance flag in gl_shader_compiler_options to true.
+ */
+
+#include "main/macros.h"
+#include "glsl_symbol_table.h"
+#include "ir_rvalue_visitor.h"
+#include "ir.h"
+#include "program/prog_instruction.h" /* For WRITEMASK_* */
+#include "main/mtypes.h"
+
+#define GLSL_CLIP_VAR_NAME "gl_ClipDistanceMESA"
+
+namespace {
+
+class lower_distance_visitor : public ir_rvalue_visitor {
+public:
+ explicit lower_distance_visitor(gl_shader_stage shader_stage,
+ const char *in_name, int total_size,
+ int offset)
+ : progress(false), old_distance_out_var(NULL),
+ old_distance_in_var(NULL), new_distance_out_var(NULL),
+ new_distance_in_var(NULL), shader_stage(shader_stage),
+ in_name(in_name), total_size(total_size), offset(offset)
+ {
+ }
+
+ explicit lower_distance_visitor(gl_shader_stage shader_stage,
+ const char *in_name,
+ const lower_distance_visitor *orig,
+ int offset)
+ : progress(false),
+ old_distance_out_var(NULL),
+ old_distance_in_var(NULL),
+ new_distance_out_var(orig->new_distance_out_var),
+ new_distance_in_var(orig->new_distance_in_var),
+ shader_stage(shader_stage),
+ in_name(in_name),
+ total_size(orig->total_size),
+ offset(offset)
+ {
+ }
+
+ virtual ir_visitor_status visit(ir_variable *);
+ void create_indices(ir_rvalue*, ir_rvalue *&, ir_rvalue *&);
+ bool is_distance_vec8(ir_rvalue *ir);
+ ir_rvalue *lower_distance_vec8(ir_rvalue *ir);
+ virtual ir_visitor_status visit_leave(ir_assignment *);
+ void visit_new_assignment(ir_assignment *ir);
+ virtual ir_visitor_status visit_leave(ir_call *);
+
+ virtual void handle_rvalue(ir_rvalue **rvalue);
+
+ void fix_lhs(ir_assignment *);
+
+ bool progress;
+
+ /**
+ * Pointer to the declaration of gl_ClipDistance, if found.
+ *
+ * Note:
+ *
+ * - the in_var is for geometry and both tessellation shader inputs only.
+ *
+ * - since gl_ClipDistance is available in tessellation control,
+ * tessellation evaluation and geometry shaders as both an input
+ * and an output, it's possible for both old_distance_out_var
+ * and old_distance_in_var to be non-null.
+ */
+ ir_variable *old_distance_out_var;
+ ir_variable *old_distance_in_var;
+
+ /**
+ * Pointer to the newly-created gl_ClipDistanceMESA variable.
+ */
+ ir_variable *new_distance_out_var;
+ ir_variable *new_distance_in_var;
+
+ /**
+ * Type of shader we are compiling (e.g. MESA_SHADER_VERTEX)
+ */
+ const gl_shader_stage shader_stage;
+ const char *in_name;
+ int total_size;
+ int offset;
+};
+
+} /* anonymous namespace */
+
+/**
+ * Replace any declaration of 'in_name' as an array of floats with a
+ * declaration of gl_ClipDistanceMESA as an array of vec4's.
+ */
+ir_visitor_status
+lower_distance_visitor::visit(ir_variable *ir)
+{
+ ir_variable **old_var;
+ ir_variable **new_var;
+
+ if (!ir->name || strcmp(ir->name, in_name) != 0)
+ return visit_continue;
+ assert (ir->type->is_array());
+
+ if (ir->data.mode == ir_var_shader_out) {
+ if (this->old_distance_out_var)
+ return visit_continue;
+ old_var = &old_distance_out_var;
+ new_var = &new_distance_out_var;
+ } else if (ir->data.mode == ir_var_shader_in) {
+ if (this->old_distance_in_var)
+ return visit_continue;
+ old_var = &old_distance_in_var;
+ new_var = &new_distance_in_var;
+ } else {
+ unreachable("not reached");
+ }
+
+ this->progress = true;
+
+ *old_var = ir;
+
+ if (!(*new_var)) {
+ unsigned new_size = (total_size + 3) / 4;
+
+ /* Clone the old var so that we inherit all of its properties */
+ *new_var = ir->clone(ralloc_parent(ir), NULL);
+ (*new_var)->name = ralloc_strdup(*new_var, GLSL_CLIP_VAR_NAME);
+ (*new_var)->data.location = VARYING_SLOT_CLIP_DIST0;
+
+ if (!ir->type->fields.array->is_array()) {
+ /* gl_ClipDistance (used for vertex, tessellation evaluation and
+ * geometry output, and fragment input).
+ */
+ assert((ir->data.mode == ir_var_shader_in &&
+ this->shader_stage == MESA_SHADER_FRAGMENT) ||
+ (ir->data.mode == ir_var_shader_out &&
+ (this->shader_stage == MESA_SHADER_VERTEX ||
+ this->shader_stage == MESA_SHADER_TESS_EVAL ||
+ this->shader_stage == MESA_SHADER_GEOMETRY)));
+
+ assert (ir->type->fields.array == glsl_type::float_type);
+ (*new_var)->data.max_array_access = new_size - 1;
+
+ /* And change the properties that we need to change */
+ (*new_var)->type = glsl_type::get_array_instance(glsl_type::vec4_type,
+ new_size);
+ } else {
+ /* 2D gl_ClipDistance (used for tessellation control, tessellation
+ * evaluation and geometry input, and tessellation control output).
+ */
+ assert((ir->data.mode == ir_var_shader_in &&
+ (this->shader_stage == MESA_SHADER_GEOMETRY ||
+ this->shader_stage == MESA_SHADER_TESS_EVAL)) ||
+ this->shader_stage == MESA_SHADER_TESS_CTRL);
+
+ assert (ir->type->fields.array->fields.array == glsl_type::float_type);
+
+ /* And change the properties that we need to change */
+ (*new_var)->type = glsl_type::get_array_instance(
+ glsl_type::get_array_instance(glsl_type::vec4_type,
+ new_size),
+ ir->type->array_size());
+ }
+ ir->replace_with(*new_var);
+ } else {
+ ir->remove();
+ }
+
+ return visit_continue;
+}
+
+
+/**
+ * Create the necessary GLSL rvalues to index into gl_ClipDistanceMESA based
+ * on the rvalue previously used to index into gl_ClipDistance.
+ *
+ * \param array_index Selects one of the vec4's in gl_ClipDistanceMESA
+ * \param swizzle_index Selects a component within the vec4 selected by
+ * array_index.
+ */
+void
+lower_distance_visitor::create_indices(ir_rvalue *old_index,
+ ir_rvalue *&array_index,
+ ir_rvalue *&swizzle_index)
+{
+ void *ctx = ralloc_parent(old_index);
+
+ /* Make sure old_index is a signed int so that the bitwise "shift" and
+ * "and" operations below type check properly.
+ */
+ if (old_index->type != glsl_type::int_type) {
+ assert (old_index->type == glsl_type::uint_type);
+ old_index = new(ctx) ir_expression(ir_unop_u2i, old_index);
+ }
+
+ ir_constant *old_index_constant =
+ old_index->constant_expression_value(ctx);
+ if (old_index_constant) {
+ /* gl_ClipDistance is being accessed via a constant index. Don't bother
+ * creating expressions to calculate the lowered indices. Just create
+ * constants.
+ */
+ int const_val = old_index_constant->get_int_component(0) + offset;
+ array_index = new(ctx) ir_constant(const_val / 4);
+ swizzle_index = new(ctx) ir_constant(const_val % 4);
+ } else {
+ /* Create a variable to hold the value of old_index (so that we
+ * don't compute it twice).
+ */
+ ir_variable *old_index_var = new(ctx) ir_variable(
+ glsl_type::int_type, "distance_index", ir_var_temporary);
+ this->base_ir->insert_before(old_index_var);
+ this->base_ir->insert_before(new(ctx) ir_assignment(
+ new(ctx) ir_dereference_variable(old_index_var), old_index));
+
+ /* Create the expression distance_index / 4. Do this as a bit
+ * shift because that's likely to be more efficient.
+ */
+ array_index = new(ctx) ir_expression(
+ ir_binop_rshift,
+ new(ctx) ir_expression(ir_binop_add,
+ new(ctx) ir_dereference_variable(old_index_var),
+ new(ctx) ir_constant(offset)),
+ new(ctx) ir_constant(2));
+
+ /* Create the expression distance_index % 4. Do this as a bitwise
+ * AND because that's likely to be more efficient.
+ */
+ swizzle_index = new(ctx) ir_expression(
+ ir_binop_bit_and,
+ new(ctx) ir_expression(ir_binop_add,
+ new(ctx) ir_dereference_variable(old_index_var),
+ new(ctx) ir_constant(offset)),
+ new(ctx) ir_constant(3));
+ }
+}
+
+
+/**
+ * Determine whether the given rvalue describes an array of 8 floats that
+ * needs to be lowered to an array of 2 vec4's; that is, determine whether it
+ * matches one of the following patterns:
+ *
+ * - gl_ClipDistance (if gl_ClipDistance is 1D)
+ * - gl_ClipDistance[i] (if gl_ClipDistance is 2D)
+ */
+bool
+lower_distance_visitor::is_distance_vec8(ir_rvalue *ir)
+{
+ /* Note that geometry shaders contain gl_ClipDistance both as an input
+ * (which is a 2D array) and an output (which is a 1D array), so it's
+ * possible for both this->old_distance_out_var and
+ * this->old_distance_in_var to be non-NULL in the same shader.
+ */
+
+ if (!ir->type->is_array())
+ return false;
+ if (ir->type->fields.array != glsl_type::float_type)
+ return false;
+
+ if (this->old_distance_out_var) {
+ if (ir->variable_referenced() == this->old_distance_out_var)
+ return true;
+ }
+ if (this->old_distance_in_var) {
+ assert(this->shader_stage == MESA_SHADER_TESS_CTRL ||
+ this->shader_stage == MESA_SHADER_TESS_EVAL ||
+ this->shader_stage == MESA_SHADER_GEOMETRY ||
+ this->shader_stage == MESA_SHADER_FRAGMENT);
+
+ if (ir->variable_referenced() == this->old_distance_in_var)
+ return true;
+ }
+ return false;
+}
+
+
+/**
+ * If the given ir satisfies is_distance_vec8(), return new ir
+ * representing its lowered equivalent. That is, map:
+ *
+ * - gl_ClipDistance => gl_ClipDistanceMESA (if gl_ClipDistance is 1D)
+ * - gl_ClipDistance[i] => gl_ClipDistanceMESA[i] (if gl_ClipDistance is 2D)
+ *
+ * Otherwise return NULL.
+ */
+ir_rvalue *
+lower_distance_visitor::lower_distance_vec8(ir_rvalue *ir)
+{
+ if (!ir->type->is_array())
+ return NULL;
+ if (ir->type->fields.array != glsl_type::float_type)
+ return NULL;
+
+ ir_variable **new_var = NULL;
+ if (this->old_distance_out_var) {
+ if (ir->variable_referenced() == this->old_distance_out_var)
+ new_var = &this->new_distance_out_var;
+ }
+ if (this->old_distance_in_var) {
+ if (ir->variable_referenced() == this->old_distance_in_var)
+ new_var = &this->new_distance_in_var;
+ }
+ if (new_var == NULL)
+ return NULL;
+
+ if (ir->as_dereference_variable()) {
+ return new(ralloc_parent(ir)) ir_dereference_variable(*new_var);
+ } else {
+ ir_dereference_array *array_ref = ir->as_dereference_array();
+ assert(array_ref);
+ assert(array_ref->array->as_dereference_variable());
+
+ return new(ralloc_parent(ir))
+ ir_dereference_array(*new_var, array_ref->array_index);
+ }
+}
+
+
+void
+lower_distance_visitor::handle_rvalue(ir_rvalue **rv)
+{
+ if (*rv == NULL)
+ return;
+
+ ir_dereference_array *const array_deref = (*rv)->as_dereference_array();
+ if (array_deref == NULL)
+ return;
+
+ /* Replace any expression that indexes one of the floats in gl_ClipDistance
+ * with an expression that indexes into one of the vec4's in
+ * gl_ClipDistanceMESA and accesses the appropriate component.
+ */
+ ir_rvalue *lowered_vec8 =
+ this->lower_distance_vec8(array_deref->array);
+ if (lowered_vec8 != NULL) {
+ this->progress = true;
+ ir_rvalue *array_index;
+ ir_rvalue *swizzle_index;
+ this->create_indices(array_deref->array_index, array_index, swizzle_index);
+ void *mem_ctx = ralloc_parent(array_deref);
+
+ ir_dereference_array *const new_array_deref =
+ new(mem_ctx) ir_dereference_array(lowered_vec8, array_index);
+
+ ir_expression *const expr =
+ new(mem_ctx) ir_expression(ir_binop_vector_extract,
+ new_array_deref,
+ swizzle_index);
+
+ *rv = expr;
+ }
+}
+
+void
+lower_distance_visitor::fix_lhs(ir_assignment *ir)
+{
+ if (ir->lhs->ir_type == ir_type_expression) {
+ void *mem_ctx = ralloc_parent(ir);
+ ir_expression *const expr = (ir_expression *) ir->lhs;
+
+ /* The expression must be of the form:
+ *
+ * (vector_extract gl_ClipDistanceMESA[i], j).
+ */
+ assert(expr->operation == ir_binop_vector_extract);
+ assert(expr->operands[0]->ir_type == ir_type_dereference_array);
+ assert(expr->operands[0]->type == glsl_type::vec4_type);
+
+ ir_dereference *const new_lhs = (ir_dereference *) expr->operands[0];
+ ir->rhs = new(mem_ctx) ir_expression(ir_triop_vector_insert,
+ glsl_type::vec4_type,
+ new_lhs->clone(mem_ctx, NULL),
+ ir->rhs,
+ expr->operands[1]);
+ ir->set_lhs(new_lhs);
+ ir->write_mask = WRITEMASK_XYZW;
+ }
+}
+
+/**
+ * Replace any assignment having the 1D gl_ClipDistance (undereferenced) as
+ * its LHS or RHS with a sequence of assignments, one for each component of
+ * the array. Each of these assignments is lowered to refer to
+ * gl_ClipDistanceMESA as appropriate.
+ *
+ * We need to do a similar replacement for 2D gl_ClipDistance, however since
+ * it's an input, the only case we need to address is where a 1D slice of it
+ * is the entire RHS of an assignment, e.g.:
+ *
+ * foo = gl_in[i].gl_ClipDistance
+ */
+ir_visitor_status
+lower_distance_visitor::visit_leave(ir_assignment *ir)
+{
+ /* First invoke the base class visitor. This causes handle_rvalue() to be
+ * called on ir->rhs and ir->condition.
+ */
+ ir_rvalue_visitor::visit_leave(ir);
+
+ if (this->is_distance_vec8(ir->lhs) ||
+ this->is_distance_vec8(ir->rhs)) {
+ /* LHS or RHS of the assignment is the entire 1D gl_ClipDistance array
+ * (or a 1D slice of a 2D gl_ClipDistance input array). Since we are
+ * reshaping gl_ClipDistance from an array of floats to an array of
+ * vec4's, this isn't going to work as a bulk assignment anymore, so
+ * unroll it to element-by-element assignments and lower each of them.
+ *
+ * Note: to unroll into element-by-element assignments, we need to make
+ * clones of the LHS and RHS. This is safe because expressions and
+ * l-values are side-effect free.
+ */
+ void *ctx = ralloc_parent(ir);
+ int array_size = ir->lhs->type->array_size();
+ for (int i = 0; i < array_size; ++i) {
+ ir_dereference_array *new_lhs = new(ctx) ir_dereference_array(
+ ir->lhs->clone(ctx, NULL), new(ctx) ir_constant(i));
+ ir_dereference_array *new_rhs = new(ctx) ir_dereference_array(
+ ir->rhs->clone(ctx, NULL), new(ctx) ir_constant(i));
+ this->handle_rvalue((ir_rvalue **) &new_rhs);
+
+ /* Handle the LHS after creating the new assignment. This must
+ * happen in this order because handle_rvalue may replace the old LHS
+ * with an ir_expression of ir_binop_vector_extract. Since this is
+ * not a valide l-value, this will cause an assertion in the
+ * ir_assignment constructor to fail.
+ *
+ * If this occurs, replace the mangled LHS with a dereference of the
+ * vector, and replace the RHS with an ir_triop_vector_insert.
+ */
+ ir_assignment *const assign = new(ctx) ir_assignment(new_lhs, new_rhs);
+ this->handle_rvalue((ir_rvalue **) &assign->lhs);
+ this->fix_lhs(assign);
+
+ this->base_ir->insert_before(assign);
+ }
+ ir->remove();
+
+ return visit_continue;
+ }
+
+ /* Handle the LHS as if it were an r-value. Normally
+ * rvalue_visit(ir_assignment *) only visits the RHS, but we need to lower
+ * expressions in the LHS as well.
+ *
+ * This may cause the LHS to get replaced with an ir_expression of
+ * ir_binop_vector_extract. If this occurs, replace it with a dereference
+ * of the vector, and replace the RHS with an ir_triop_vector_insert.
+ */
+ handle_rvalue((ir_rvalue **)&ir->lhs);
+ this->fix_lhs(ir);
+
+ return rvalue_visit(ir);
+}
+
+
+/**
+ * Set up base_ir properly and call visit_leave() on a newly created
+ * ir_assignment node. This is used in cases where we have to insert an
+ * ir_assignment in a place where we know the hierarchical visitor won't see
+ * it.
+ */
+void
+lower_distance_visitor::visit_new_assignment(ir_assignment *ir)
+{
+ ir_instruction *old_base_ir = this->base_ir;
+ this->base_ir = ir;
+ ir->accept(this);
+ this->base_ir = old_base_ir;
+}
+
+
+/**
+ * If a 1D gl_ClipDistance variable appears as an argument in an ir_call
+ * expression, replace it with a temporary variable, and make sure the ir_call
+ * is preceded and/or followed by assignments that copy the contents of the
+ * temporary variable to and/or from gl_ClipDistance. Each of these
+ * assignments is then lowered to refer to gl_ClipDistanceMESA.
+ *
+ * We need to do a similar replacement for 2D gl_ClipDistance, however since
+ * it's an input, the only case we need to address is where a 1D slice of it
+ * is passed as an "in" parameter to an ir_call, e.g.:
+ *
+ * foo(gl_in[i].gl_ClipDistance)
+ */
+ir_visitor_status
+lower_distance_visitor::visit_leave(ir_call *ir)
+{
+ void *ctx = ralloc_parent(ir);
+
+ const exec_node *formal_param_node = ir->callee->parameters.get_head_raw();
+ const exec_node *actual_param_node = ir->actual_parameters.get_head_raw();
+ while (!actual_param_node->is_tail_sentinel()) {
+ ir_variable *formal_param = (ir_variable *) formal_param_node;
+ ir_rvalue *actual_param = (ir_rvalue *) actual_param_node;
+
+ /* Advance formal_param_node and actual_param_node now so that we can
+ * safely replace actual_param with another node, if necessary, below.
+ */
+ formal_param_node = formal_param_node->next;
+ actual_param_node = actual_param_node->next;
+
+ if (this->is_distance_vec8(actual_param)) {
+ /* User is trying to pass the whole 1D gl_ClipDistance array (or a 1D
+ * slice of a 2D gl_ClipDistance array) to a function call. Since we
+ * are reshaping gl_ClipDistance from an array of floats to an array
+ * of vec4's, this isn't going to work anymore, so use a temporary
+ * array instead.
+ */
+ ir_variable *temp_clip_distance = new(ctx) ir_variable(
+ actual_param->type, "temp_clip_distance", ir_var_temporary);
+ this->base_ir->insert_before(temp_clip_distance);
+ actual_param->replace_with(
+ new(ctx) ir_dereference_variable(temp_clip_distance));
+ if (formal_param->data.mode == ir_var_function_in
+ || formal_param->data.mode == ir_var_function_inout) {
+ /* Copy from gl_ClipDistance to the temporary before the call.
+ * Since we are going to insert this copy before the current
+ * instruction, we need to visit it afterwards to make sure it
+ * gets lowered.
+ */
+ ir_assignment *new_assignment = new(ctx) ir_assignment(
+ new(ctx) ir_dereference_variable(temp_clip_distance),
+ actual_param->clone(ctx, NULL));
+ this->base_ir->insert_before(new_assignment);
+ this->visit_new_assignment(new_assignment);
+ }
+ if (formal_param->data.mode == ir_var_function_out
+ || formal_param->data.mode == ir_var_function_inout) {
+ /* Copy from the temporary to gl_ClipDistance after the call.
+ * Since visit_list_elements() has already decided which
+ * instruction it's going to visit next, we need to visit
+ * afterwards to make sure it gets lowered.
+ */
+ ir_assignment *new_assignment = new(ctx) ir_assignment(
+ actual_param->clone(ctx, NULL),
+ new(ctx) ir_dereference_variable(temp_clip_distance));
+ this->base_ir->insert_after(new_assignment);
+ this->visit_new_assignment(new_assignment);
+ }
+ }
+ }
+
+ return rvalue_visit(ir);
+}
+
+namespace {
+class lower_distance_visitor_counter : public ir_rvalue_visitor {
+public:
+ explicit lower_distance_visitor_counter(void)
+ : in_clip_size(0), in_cull_size(0),
+ out_clip_size(0), out_cull_size(0)
+ {
+ }
+
+ virtual ir_visitor_status visit(ir_variable *);
+ virtual void handle_rvalue(ir_rvalue **rvalue);
+
+ int in_clip_size;
+ int in_cull_size;
+ int out_clip_size;
+ int out_cull_size;
+};
+
+}
+/**
+ * Count gl_ClipDistance and gl_CullDistance sizes.
+ */
+ir_visitor_status
+lower_distance_visitor_counter::visit(ir_variable *ir)
+{
+ int *clip_size, *cull_size;
+
+ if (!ir->name)
+ return visit_continue;
+
+ if (ir->data.mode == ir_var_shader_out) {
+ clip_size = &out_clip_size;
+ cull_size = &out_cull_size;
+ } else if (ir->data.mode == ir_var_shader_in) {
+ clip_size = &in_clip_size;
+ cull_size = &in_cull_size;
+ } else
+ return visit_continue;
+
+ if (ir->type->is_unsized_array())
+ return visit_continue;
+
+ if (*clip_size == 0) {
+ if (!strcmp(ir->name, "gl_ClipDistance")) {
+ if (!ir->type->fields.array->is_array())
+ *clip_size = ir->type->array_size();
+ else
+ *clip_size = ir->type->fields.array->array_size();
+ }
+ }
+
+ if (*cull_size == 0) {
+ if (!strcmp(ir->name, "gl_CullDistance")) {
+ if (!ir->type->fields.array->is_array())
+ *cull_size = ir->type->array_size();
+ else
+ *cull_size = ir->type->fields.array->array_size();
+ }
+ }
+ return visit_continue;
+}
+
+void
+lower_distance_visitor_counter::handle_rvalue(ir_rvalue **)
+{
+ return;
+}
+
+bool
+lower_clip_cull_distance(struct gl_shader_program *prog,
+ struct gl_linked_shader *shader)
+{
+ int clip_size, cull_size;
+
+ lower_distance_visitor_counter count;
+ visit_list_elements(&count, shader->ir);
+
+ clip_size = MAX2(count.in_clip_size, count.out_clip_size);
+ cull_size = MAX2(count.in_cull_size, count.out_cull_size);
+
+ if (clip_size == 0 && cull_size == 0)
+ return false;
+
+ lower_distance_visitor v(shader->Stage, "gl_ClipDistance", clip_size + cull_size, 0);
+ visit_list_elements(&v, shader->ir);
+
+ lower_distance_visitor v2(shader->Stage, "gl_CullDistance", &v, clip_size);
+ visit_list_elements(&v2, shader->ir);
+
+ if (v2.new_distance_out_var)
+ shader->symbols->add_variable(v2.new_distance_out_var);
+ if (v2.new_distance_in_var)
+ shader->symbols->add_variable(v2.new_distance_in_var);
+
+ return v2.progress;
+}
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/lower_if_to_cond_assign.cpp b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/lower_if_to_cond_assign.cpp
new file mode 100644
index 0000000000..ca61f1d527
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/lower_if_to_cond_assign.cpp
@@ -0,0 +1,333 @@
+/*
+ * Copyright © 2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+/**
+ * \file lower_if_to_cond_assign.cpp
+ *
+ * This flattens if-statements to conditional assignments if:
+ *
+ * - the GPU has limited or no flow control support
+ * (controlled by max_depth)
+ *
+ * - small conditional branches are more expensive than conditional assignments
+ * (controlled by min_branch_cost, that's the cost for a branch to be
+ * preserved)
+ *
+ * It can't handle other control flow being inside of its block, such
+ * as calls or loops. Hopefully loop unrolling and inlining will take
+ * care of those.
+ *
+ * Drivers for GPUs with no control flow support should simply call
+ *
+ * lower_if_to_cond_assign(instructions)
+ *
+ * to attempt to flatten all if-statements.
+ *
+ * Some GPUs (such as i965 prior to gen6) do support control flow, but have a
+ * maximum nesting depth N. Drivers for such hardware can call
+ *
+ * lower_if_to_cond_assign(instructions, N)
+ *
+ * to attempt to flatten any if-statements appearing at depth > N.
+ */
+
+#include "compiler/glsl_types.h"
+#include "ir.h"
+#include "util/set.h"
+#include "util/hash_table.h" /* Needed for the hashing functions */
+#include "main/macros.h" /* for MAX2 */
+
+namespace {
+
+class ir_if_to_cond_assign_visitor : public ir_hierarchical_visitor {
+public:
+ ir_if_to_cond_assign_visitor(gl_shader_stage stage,
+ unsigned max_depth,
+ unsigned min_branch_cost)
+ {
+ this->progress = false;
+ this->stage = stage;
+ this->max_depth = max_depth;
+ this->min_branch_cost = min_branch_cost;
+ this->depth = 0;
+
+ this->condition_variables = _mesa_pointer_set_create(NULL);
+ }
+
+ ~ir_if_to_cond_assign_visitor()
+ {
+ _mesa_set_destroy(this->condition_variables, NULL);
+ }
+
+ ir_visitor_status visit_enter(ir_if *);
+ ir_visitor_status visit_leave(ir_if *);
+
+ bool found_unsupported_op;
+ bool found_expensive_op;
+ bool found_dynamic_arrayref;
+ bool is_then;
+ bool progress;
+ gl_shader_stage stage;
+ unsigned then_cost;
+ unsigned else_cost;
+ unsigned min_branch_cost;
+ unsigned max_depth;
+ unsigned depth;
+
+ struct set *condition_variables;
+};
+
+} /* anonymous namespace */
+
+bool
+lower_if_to_cond_assign(gl_shader_stage stage, exec_list *instructions,
+ unsigned max_depth, unsigned min_branch_cost)
+{
+ if (max_depth == UINT_MAX)
+ return false;
+
+ ir_if_to_cond_assign_visitor v(stage, max_depth, min_branch_cost);
+
+ visit_list_elements(&v, instructions);
+
+ return v.progress;
+}
+
+static void
+check_ir_node(ir_instruction *ir, void *data)
+{
+ ir_if_to_cond_assign_visitor *v = (ir_if_to_cond_assign_visitor *)data;
+
+ switch (ir->ir_type) {
+ case ir_type_call:
+ case ir_type_discard:
+ case ir_type_loop:
+ case ir_type_loop_jump:
+ case ir_type_return:
+ case ir_type_emit_vertex:
+ case ir_type_end_primitive:
+ case ir_type_barrier:
+ v->found_unsupported_op = true;
+ break;
+
+ case ir_type_dereference_variable: {
+ ir_variable *var = ir->as_dereference_variable()->variable_referenced();
+
+ /* Lowering branches with TCS output accesses breaks many piglit tests,
+ * so don't touch them for now.
+ */
+ if (v->stage == MESA_SHADER_TESS_CTRL &&
+ var->data.mode == ir_var_shader_out)
+ v->found_unsupported_op = true;
+ break;
+ }
+
+ /* SSBO, images, atomic counters are handled by ir_type_call */
+ case ir_type_texture:
+ v->found_expensive_op = true;
+ break;
+
+ case ir_type_dereference_array: {
+ ir_dereference_array *deref = ir->as_dereference_array();
+
+ if (deref->array_index->ir_type != ir_type_constant)
+ v->found_dynamic_arrayref = true;
+ } /* fall-through */
+ case ir_type_expression:
+ case ir_type_dereference_record:
+ if (v->is_then)
+ v->then_cost++;
+ else
+ v->else_cost++;
+ break;
+
+ default:
+ break;
+ }
+}
+
+static void
+move_block_to_cond_assign(void *mem_ctx,
+ ir_if *if_ir, ir_rvalue *cond_expr,
+ exec_list *instructions,
+ struct set *set)
+{
+ foreach_in_list_safe(ir_instruction, ir, instructions) {
+ if (ir->ir_type == ir_type_assignment) {
+ ir_assignment *assign = (ir_assignment *)ir;
+
+ if (_mesa_set_search(set, assign) == NULL) {
+ _mesa_set_add(set, assign);
+
+ /* If the LHS of the assignment is a condition variable that was
+ * previously added, insert an additional assignment of false to
+ * the variable.
+ */
+ const bool assign_to_cv =
+ _mesa_set_search(
+ set, assign->lhs->variable_referenced()) != NULL;
+
+ if (!assign->condition) {
+ if (assign_to_cv) {
+ assign->rhs =
+ new(mem_ctx) ir_expression(ir_binop_logic_and,
+ glsl_type::bool_type,
+ cond_expr->clone(mem_ctx, NULL),
+ assign->rhs);
+ } else {
+ assign->condition = cond_expr->clone(mem_ctx, NULL);
+ }
+ } else {
+ assign->condition =
+ new(mem_ctx) ir_expression(ir_binop_logic_and,
+ glsl_type::bool_type,
+ cond_expr->clone(mem_ctx, NULL),
+ assign->condition);
+ }
+ }
+ }
+
+ /* Now, move from the if block to the block surrounding it. */
+ ir->remove();
+ if_ir->insert_before(ir);
+ }
+}
+
+ir_visitor_status
+ir_if_to_cond_assign_visitor::visit_enter(ir_if *)
+{
+ this->depth++;
+
+ return visit_continue;
+}
+
+ir_visitor_status
+ir_if_to_cond_assign_visitor::visit_leave(ir_if *ir)
+{
+ bool must_lower = this->depth-- > this->max_depth;
+
+ /* Only flatten when beyond the GPU's maximum supported nesting depth. */
+ if (!must_lower && this->min_branch_cost == 0)
+ return visit_continue;
+
+ this->found_unsupported_op = false;
+ this->found_expensive_op = false;
+ this->found_dynamic_arrayref = false;
+ this->then_cost = 0;
+ this->else_cost = 0;
+
+ ir_assignment *assign;
+
+ /* Check that both blocks don't contain anything we can't support. */
+ this->is_then = true;
+ foreach_in_list(ir_instruction, then_ir, &ir->then_instructions) {
+ visit_tree(then_ir, check_ir_node, this);
+ }
+
+ this->is_then = false;
+ foreach_in_list(ir_instruction, else_ir, &ir->else_instructions) {
+ visit_tree(else_ir, check_ir_node, this);
+ }
+
+ if (this->found_unsupported_op)
+ return visit_continue; /* can't handle inner unsupported opcodes */
+
+ /* Skip if the branch cost is high enough or if there's an expensive op.
+ *
+ * Also skip if non-constant array indices were encountered, since those
+ * can be out-of-bounds for a not-taken branch, and so generating an
+ * assignment would be incorrect. In the case of must_lower, it's up to the
+ * backend to deal with any potential fall-out (perhaps by translating the
+ * assignments to hardware-predicated moves).
+ */
+ if (!must_lower &&
+ (this->found_expensive_op ||
+ this->found_dynamic_arrayref ||
+ MAX2(this->then_cost, this->else_cost) >= this->min_branch_cost))
+ return visit_continue;
+
+ void *mem_ctx = ralloc_parent(ir);
+
+ /* Store the condition to a variable. Move all of the instructions from
+ * the then-clause of the if-statement. Use the condition variable as a
+ * condition for all assignments.
+ */
+ ir_variable *const then_var =
+ new(mem_ctx) ir_variable(glsl_type::bool_type,
+ "if_to_cond_assign_then",
+ ir_var_temporary);
+ ir->insert_before(then_var);
+
+ ir_dereference_variable *then_cond =
+ new(mem_ctx) ir_dereference_variable(then_var);
+
+ assign = new(mem_ctx) ir_assignment(then_cond, ir->condition);
+ ir->insert_before(assign);
+
+ move_block_to_cond_assign(mem_ctx, ir, then_cond,
+ &ir->then_instructions,
+ this->condition_variables);
+
+ /* Add the new condition variable to the hash table. This allows us to
+ * find this variable when lowering other (enclosing) if-statements.
+ */
+ _mesa_set_add(this->condition_variables, then_var);
+
+ /* If there are instructions in the else-clause, store the inverse of the
+ * condition to a variable. Move all of the instructions from the
+ * else-clause if the if-statement. Use the (inverse) condition variable
+ * as a condition for all assignments.
+ */
+ if (!ir->else_instructions.is_empty()) {
+ ir_variable *const else_var =
+ new(mem_ctx) ir_variable(glsl_type::bool_type,
+ "if_to_cond_assign_else",
+ ir_var_temporary);
+ ir->insert_before(else_var);
+
+ ir_dereference_variable *else_cond =
+ new(mem_ctx) ir_dereference_variable(else_var);
+
+ ir_rvalue *inverse =
+ new(mem_ctx) ir_expression(ir_unop_logic_not,
+ then_cond->clone(mem_ctx, NULL));
+
+ assign = new(mem_ctx) ir_assignment(else_cond, inverse);
+ ir->insert_before(assign);
+
+ move_block_to_cond_assign(mem_ctx, ir, else_cond,
+ &ir->else_instructions,
+ this->condition_variables);
+
+ /* Add the new condition variable to the hash table. This allows us to
+ * find this variable when lowering other (enclosing) if-statements.
+ */
+ _mesa_set_add(this->condition_variables, else_var);
+ }
+
+ ir->remove();
+
+ this->progress = true;
+
+ return visit_continue;
+}
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/lower_instructions.cpp b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/lower_instructions.cpp
new file mode 100644
index 0000000000..c549d16d2a
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/lower_instructions.cpp
@@ -0,0 +1,1914 @@
+/*
+ * Copyright © 2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+/**
+ * \file lower_instructions.cpp
+ *
+ * Many GPUs lack native instructions for certain expression operations, and
+ * must replace them with some other expression tree. This pass lowers some
+ * of the most common cases, allowing the lowering code to be implemented once
+ * rather than in each driver backend.
+ *
+ * Currently supported transformations:
+ * - SUB_TO_ADD_NEG
+ * - DIV_TO_MUL_RCP
+ * - INT_DIV_TO_MUL_RCP
+ * - EXP_TO_EXP2
+ * - POW_TO_EXP2
+ * - LOG_TO_LOG2
+ * - MOD_TO_FLOOR
+ * - LDEXP_TO_ARITH
+ * - DFREXP_TO_ARITH
+ * - CARRY_TO_ARITH
+ * - BORROW_TO_ARITH
+ * - SAT_TO_CLAMP
+ * - DOPS_TO_DFRAC
+ *
+ * SUB_TO_ADD_NEG:
+ * ---------------
+ * Breaks an ir_binop_sub expression down to add(op0, neg(op1))
+ *
+ * This simplifies expression reassociation, and for many backends
+ * there is no subtract operation separate from adding the negation.
+ * For backends with native subtract operations, they will probably
+ * want to recognize add(op0, neg(op1)) or the other way around to
+ * produce a subtract anyway.
+ *
+ * FDIV_TO_MUL_RCP, DDIV_TO_MUL_RCP, and INT_DIV_TO_MUL_RCP:
+ * ---------------------------------------------------------
+ * Breaks an ir_binop_div expression down to op0 * (rcp(op1)).
+ *
+ * Many GPUs don't have a divide instruction (945 and 965 included),
+ * but they do have an RCP instruction to compute an approximate
+ * reciprocal. By breaking the operation down, constant reciprocals
+ * can get constant folded.
+ *
+ * FDIV_TO_MUL_RCP lowers single-precision and half-precision
+ * floating point division;
+ * DDIV_TO_MUL_RCP only lowers double-precision floating point division.
+ * DIV_TO_MUL_RCP is a convenience macro that sets both flags.
+ * INT_DIV_TO_MUL_RCP handles the integer case, converting to and from floating
+ * point so that RCP is possible.
+ *
+ * EXP_TO_EXP2 and LOG_TO_LOG2:
+ * ----------------------------
+ * Many GPUs don't have a base e log or exponent instruction, but they
+ * do have base 2 versions, so this pass converts exp and log to exp2
+ * and log2 operations.
+ *
+ * POW_TO_EXP2:
+ * -----------
+ * Many older GPUs don't have an x**y instruction. For these GPUs, convert
+ * x**y to 2**(y * log2(x)).
+ *
+ * MOD_TO_FLOOR:
+ * -------------
+ * Breaks an ir_binop_mod expression down to (op0 - op1 * floor(op0 / op1))
+ *
+ * Many GPUs don't have a MOD instruction (945 and 965 included), and
+ * if we have to break it down like this anyway, it gives an
+ * opportunity to do things like constant fold the (1.0 / op1) easily.
+ *
+ * Note: before we used to implement this as op1 * fract(op / op1) but this
+ * implementation had significant precision errors.
+ *
+ * LDEXP_TO_ARITH:
+ * -------------
+ * Converts ir_binop_ldexp to arithmetic and bit operations for float sources.
+ *
+ * DFREXP_DLDEXP_TO_ARITH:
+ * ---------------
+ * Converts ir_binop_ldexp, ir_unop_frexp_sig, and ir_unop_frexp_exp to
+ * arithmetic and bit ops for double arguments.
+ *
+ * CARRY_TO_ARITH:
+ * ---------------
+ * Converts ir_carry into (x + y) < x.
+ *
+ * BORROW_TO_ARITH:
+ * ----------------
+ * Converts ir_borrow into (x < y).
+ *
+ * SAT_TO_CLAMP:
+ * -------------
+ * Converts ir_unop_saturate into min(max(x, 0.0), 1.0)
+ *
+ * DOPS_TO_DFRAC:
+ * --------------
+ * Converts double trunc, ceil, floor, round to fract
+ */
+
+#include "c99_math.h"
+#include "program/prog_instruction.h" /* for swizzle */
+#include "compiler/glsl_types.h"
+#include "ir.h"
+#include "ir_builder.h"
+#include "ir_optimization.h"
+#include "util/half_float.h"
+
+using namespace ir_builder;
+
+namespace {
+
+class lower_instructions_visitor : public ir_hierarchical_visitor {
+public:
+ lower_instructions_visitor(unsigned lower)
+ : progress(false), lower(lower) { }
+
+ ir_visitor_status visit_leave(ir_expression *);
+
+ bool progress;
+
+private:
+ unsigned lower; /** Bitfield of which operations to lower */
+
+ void sub_to_add_neg(ir_expression *);
+ void div_to_mul_rcp(ir_expression *);
+ void int_div_to_mul_rcp(ir_expression *);
+ void mod_to_floor(ir_expression *);
+ void exp_to_exp2(ir_expression *);
+ void pow_to_exp2(ir_expression *);
+ void log_to_log2(ir_expression *);
+ void ldexp_to_arith(ir_expression *);
+ void dldexp_to_arith(ir_expression *);
+ void dfrexp_sig_to_arith(ir_expression *);
+ void dfrexp_exp_to_arith(ir_expression *);
+ void carry_to_arith(ir_expression *);
+ void borrow_to_arith(ir_expression *);
+ void sat_to_clamp(ir_expression *);
+ void double_dot_to_fma(ir_expression *);
+ void double_lrp(ir_expression *);
+ void dceil_to_dfrac(ir_expression *);
+ void dfloor_to_dfrac(ir_expression *);
+ void dround_even_to_dfrac(ir_expression *);
+ void dtrunc_to_dfrac(ir_expression *);
+ void dsign_to_csel(ir_expression *);
+ void bit_count_to_math(ir_expression *);
+ void extract_to_shifts(ir_expression *);
+ void insert_to_shifts(ir_expression *);
+ void reverse_to_shifts(ir_expression *ir);
+ void find_lsb_to_float_cast(ir_expression *ir);
+ void find_msb_to_float_cast(ir_expression *ir);
+ void imul_high_to_mul(ir_expression *ir);
+ void sqrt_to_abs_sqrt(ir_expression *ir);
+ void mul64_to_mul_and_mul_high(ir_expression *ir);
+
+ ir_expression *_carry(operand a, operand b);
+
+ static ir_constant *_imm_fp(void *mem_ctx,
+ const glsl_type *type,
+ double f,
+ unsigned vector_elements=1);
+};
+
+} /* anonymous namespace */
+
+/**
+ * Determine if a particular type of lowering should occur
+ */
+#define lowering(x) (this->lower & x)
+
+bool
+lower_instructions(exec_list *instructions, unsigned what_to_lower)
+{
+ lower_instructions_visitor v(what_to_lower);
+
+ visit_list_elements(&v, instructions);
+ return v.progress;
+}
+
+void
+lower_instructions_visitor::sub_to_add_neg(ir_expression *ir)
+{
+ ir->operation = ir_binop_add;
+ ir->init_num_operands();
+ ir->operands[1] = new(ir) ir_expression(ir_unop_neg, ir->operands[1]->type,
+ ir->operands[1], NULL);
+ this->progress = true;
+}
+
+void
+lower_instructions_visitor::div_to_mul_rcp(ir_expression *ir)
+{
+ assert(ir->operands[1]->type->is_float_16_32_64());
+
+ /* New expression for the 1.0 / op1 */
+ ir_rvalue *expr;
+ expr = new(ir) ir_expression(ir_unop_rcp,
+ ir->operands[1]->type,
+ ir->operands[1]);
+
+ /* op0 / op1 -> op0 * (1.0 / op1) */
+ ir->operation = ir_binop_mul;
+ ir->init_num_operands();
+ ir->operands[1] = expr;
+
+ this->progress = true;
+}
+
+void
+lower_instructions_visitor::int_div_to_mul_rcp(ir_expression *ir)
+{
+ assert(ir->operands[1]->type->is_integer_32());
+
+ /* Be careful with integer division -- we need to do it as a
+ * float and re-truncate, since rcp(n > 1) of an integer would
+ * just be 0.
+ */
+ ir_rvalue *op0, *op1;
+ const struct glsl_type *vec_type;
+
+ vec_type = glsl_type::get_instance(GLSL_TYPE_FLOAT,
+ ir->operands[1]->type->vector_elements,
+ ir->operands[1]->type->matrix_columns);
+
+ if (ir->operands[1]->type->base_type == GLSL_TYPE_INT)
+ op1 = new(ir) ir_expression(ir_unop_i2f, vec_type, ir->operands[1], NULL);
+ else
+ op1 = new(ir) ir_expression(ir_unop_u2f, vec_type, ir->operands[1], NULL);
+
+ op1 = new(ir) ir_expression(ir_unop_rcp, op1->type, op1, NULL);
+
+ vec_type = glsl_type::get_instance(GLSL_TYPE_FLOAT,
+ ir->operands[0]->type->vector_elements,
+ ir->operands[0]->type->matrix_columns);
+
+ if (ir->operands[0]->type->base_type == GLSL_TYPE_INT)
+ op0 = new(ir) ir_expression(ir_unop_i2f, vec_type, ir->operands[0], NULL);
+ else
+ op0 = new(ir) ir_expression(ir_unop_u2f, vec_type, ir->operands[0], NULL);
+
+ vec_type = glsl_type::get_instance(GLSL_TYPE_FLOAT,
+ ir->type->vector_elements,
+ ir->type->matrix_columns);
+
+ op0 = new(ir) ir_expression(ir_binop_mul, vec_type, op0, op1);
+
+ if (ir->operands[1]->type->base_type == GLSL_TYPE_INT) {
+ ir->operation = ir_unop_f2i;
+ ir->operands[0] = op0;
+ } else {
+ ir->operation = ir_unop_i2u;
+ ir->operands[0] = new(ir) ir_expression(ir_unop_f2i, op0);
+ }
+ ir->init_num_operands();
+ ir->operands[1] = NULL;
+
+ this->progress = true;
+}
+
+void
+lower_instructions_visitor::exp_to_exp2(ir_expression *ir)
+{
+ ir_constant *log2_e = _imm_fp(ir, ir->type, M_LOG2E);
+
+ ir->operation = ir_unop_exp2;
+ ir->init_num_operands();
+ ir->operands[0] = new(ir) ir_expression(ir_binop_mul, ir->operands[0]->type,
+ ir->operands[0], log2_e);
+ this->progress = true;
+}
+
+void
+lower_instructions_visitor::pow_to_exp2(ir_expression *ir)
+{
+ ir_expression *const log2_x =
+ new(ir) ir_expression(ir_unop_log2, ir->operands[0]->type,
+ ir->operands[0]);
+
+ ir->operation = ir_unop_exp2;
+ ir->init_num_operands();
+ ir->operands[0] = new(ir) ir_expression(ir_binop_mul, ir->operands[1]->type,
+ ir->operands[1], log2_x);
+ ir->operands[1] = NULL;
+ this->progress = true;
+}
+
+void
+lower_instructions_visitor::log_to_log2(ir_expression *ir)
+{
+ ir->operation = ir_binop_mul;
+ ir->init_num_operands();
+ ir->operands[0] = new(ir) ir_expression(ir_unop_log2, ir->operands[0]->type,
+ ir->operands[0], NULL);
+ ir->operands[1] = _imm_fp(ir, ir->operands[0]->type, 1.0 / M_LOG2E);
+ this->progress = true;
+}
+
+void
+lower_instructions_visitor::mod_to_floor(ir_expression *ir)
+{
+ ir_variable *x = new(ir) ir_variable(ir->operands[0]->type, "mod_x",
+ ir_var_temporary);
+ ir_variable *y = new(ir) ir_variable(ir->operands[1]->type, "mod_y",
+ ir_var_temporary);
+ this->base_ir->insert_before(x);
+ this->base_ir->insert_before(y);
+
+ ir_assignment *const assign_x =
+ new(ir) ir_assignment(new(ir) ir_dereference_variable(x),
+ ir->operands[0]);
+ ir_assignment *const assign_y =
+ new(ir) ir_assignment(new(ir) ir_dereference_variable(y),
+ ir->operands[1]);
+
+ this->base_ir->insert_before(assign_x);
+ this->base_ir->insert_before(assign_y);
+
+ ir_expression *const div_expr =
+ new(ir) ir_expression(ir_binop_div, x->type,
+ new(ir) ir_dereference_variable(x),
+ new(ir) ir_dereference_variable(y));
+
+ /* Don't generate new IR that would need to be lowered in an additional
+ * pass.
+ */
+ if ((lowering(FDIV_TO_MUL_RCP) && ir->type->is_float_16_32()) ||
+ (lowering(DDIV_TO_MUL_RCP) && ir->type->is_double()))
+ div_to_mul_rcp(div_expr);
+
+ ir_expression *const floor_expr =
+ new(ir) ir_expression(ir_unop_floor, x->type, div_expr);
+
+ if (lowering(DOPS_TO_DFRAC) && ir->type->is_double())
+ dfloor_to_dfrac(floor_expr);
+
+ ir_expression *const mul_expr =
+ new(ir) ir_expression(ir_binop_mul,
+ new(ir) ir_dereference_variable(y),
+ floor_expr);
+
+ ir->operation = ir_binop_sub;
+ ir->init_num_operands();
+ ir->operands[0] = new(ir) ir_dereference_variable(x);
+ ir->operands[1] = mul_expr;
+ this->progress = true;
+}
+
+void
+lower_instructions_visitor::ldexp_to_arith(ir_expression *ir)
+{
+ /* Translates
+ * ir_binop_ldexp x exp
+ * into
+ *
+ * extracted_biased_exp = rshift(bitcast_f2i(abs(x)), exp_shift);
+ * resulting_biased_exp = min(extracted_biased_exp + exp, 255);
+ *
+ * if (extracted_biased_exp >= 255)
+ * return x; // +/-inf, NaN
+ *
+ * sign_mantissa = bitcast_f2u(x) & sign_mantissa_mask;
+ *
+ * if (min(resulting_biased_exp, extracted_biased_exp) < 1)
+ * resulting_biased_exp = 0;
+ * if (resulting_biased_exp >= 255 ||
+ * min(resulting_biased_exp, extracted_biased_exp) < 1) {
+ * sign_mantissa &= sign_mask;
+ * }
+ *
+ * return bitcast_u2f(sign_mantissa |
+ * lshift(i2u(resulting_biased_exp), exp_shift));
+ *
+ * which we can't actually implement as such, since the GLSL IR doesn't
+ * have vectorized if-statements. We actually implement it without branches
+ * using conditional-select:
+ *
+ * extracted_biased_exp = rshift(bitcast_f2i(abs(x)), exp_shift);
+ * resulting_biased_exp = min(extracted_biased_exp + exp, 255);
+ *
+ * sign_mantissa = bitcast_f2u(x) & sign_mantissa_mask;
+ *
+ * flush_to_zero = lequal(min(resulting_biased_exp, extracted_biased_exp), 0);
+ * resulting_biased_exp = csel(flush_to_zero, 0, resulting_biased_exp)
+ * zero_mantissa = logic_or(flush_to_zero,
+ * gequal(resulting_biased_exp, 255));
+ * sign_mantissa = csel(zero_mantissa, sign_mantissa & sign_mask, sign_mantissa);
+ *
+ * result = sign_mantissa |
+ * lshift(i2u(resulting_biased_exp), exp_shift));
+ *
+ * return csel(extracted_biased_exp >= 255, x, bitcast_u2f(result));
+ *
+ * The definition of ldexp in the GLSL spec says:
+ *
+ * "If this product is too large to be represented in the
+ * floating-point type, the result is undefined."
+ *
+ * However, the definition of ldexp in the GLSL ES spec does not contain
+ * this sentence, so we do need to handle overflow correctly.
+ *
+ * There is additional language limiting the defined range of exp, but this
+ * is merely to allow implementations that store 2^exp in a temporary
+ * variable.
+ */
+
+ const unsigned vec_elem = ir->type->vector_elements;
+
+ /* Types */
+ const glsl_type *ivec = glsl_type::get_instance(GLSL_TYPE_INT, vec_elem, 1);
+ const glsl_type *uvec = glsl_type::get_instance(GLSL_TYPE_UINT, vec_elem, 1);
+ const glsl_type *bvec = glsl_type::get_instance(GLSL_TYPE_BOOL, vec_elem, 1);
+
+ /* Temporary variables */
+ ir_variable *x = new(ir) ir_variable(ir->type, "x", ir_var_temporary);
+ ir_variable *exp = new(ir) ir_variable(ivec, "exp", ir_var_temporary);
+ ir_variable *result = new(ir) ir_variable(uvec, "result", ir_var_temporary);
+
+ ir_variable *extracted_biased_exp =
+ new(ir) ir_variable(ivec, "extracted_biased_exp", ir_var_temporary);
+ ir_variable *resulting_biased_exp =
+ new(ir) ir_variable(ivec, "resulting_biased_exp", ir_var_temporary);
+
+ ir_variable *sign_mantissa =
+ new(ir) ir_variable(uvec, "sign_mantissa", ir_var_temporary);
+
+ ir_variable *flush_to_zero =
+ new(ir) ir_variable(bvec, "flush_to_zero", ir_var_temporary);
+ ir_variable *zero_mantissa =
+ new(ir) ir_variable(bvec, "zero_mantissa", ir_var_temporary);
+
+ ir_instruction &i = *base_ir;
+
+ /* Copy <x> and <exp> arguments. */
+ i.insert_before(x);
+ i.insert_before(assign(x, ir->operands[0]));
+ i.insert_before(exp);
+ i.insert_before(assign(exp, ir->operands[1]));
+
+ /* Extract the biased exponent from <x>. */
+ i.insert_before(extracted_biased_exp);
+ i.insert_before(assign(extracted_biased_exp,
+ rshift(bitcast_f2i(abs(x)),
+ new(ir) ir_constant(23, vec_elem))));
+
+ /* The definition of ldexp in the GLSL 4.60 spec says:
+ *
+ * "If exp is greater than +128 (single-precision) or +1024
+ * (double-precision), the value returned is undefined. If exp is less
+ * than -126 (single-precision) or -1022 (double-precision), the value
+ * returned may be flushed to zero."
+ *
+ * So we do not have to guard against the possibility of addition overflow,
+ * which could happen when exp is close to INT_MAX. Addition underflow
+ * cannot happen (the worst case is 0 + (-INT_MAX)).
+ */
+ i.insert_before(resulting_biased_exp);
+ i.insert_before(assign(resulting_biased_exp,
+ min2(add(extracted_biased_exp, exp),
+ new(ir) ir_constant(255, vec_elem))));
+
+ i.insert_before(sign_mantissa);
+ i.insert_before(assign(sign_mantissa,
+ bit_and(bitcast_f2u(x),
+ new(ir) ir_constant(0x807fffffu, vec_elem))));
+
+ /* We flush to zero if the original or resulting biased exponent is 0,
+ * indicating a +/-0.0 or subnormal input or output.
+ *
+ * The mantissa is set to 0 if the resulting biased exponent is 255, since
+ * an overflow should produce a +/-inf result.
+ *
+ * Note that NaN inputs are handled separately.
+ */
+ i.insert_before(flush_to_zero);
+ i.insert_before(assign(flush_to_zero,
+ lequal(min2(resulting_biased_exp,
+ extracted_biased_exp),
+ ir_constant::zero(ir, ivec))));
+ i.insert_before(assign(resulting_biased_exp,
+ csel(flush_to_zero,
+ ir_constant::zero(ir, ivec),
+ resulting_biased_exp)));
+
+ i.insert_before(zero_mantissa);
+ i.insert_before(assign(zero_mantissa,
+ logic_or(flush_to_zero,
+ equal(resulting_biased_exp,
+ new(ir) ir_constant(255, vec_elem)))));
+ i.insert_before(assign(sign_mantissa,
+ csel(zero_mantissa,
+ bit_and(sign_mantissa,
+ new(ir) ir_constant(0x80000000u, vec_elem)),
+ sign_mantissa)));
+
+ /* Don't generate new IR that would need to be lowered in an additional
+ * pass.
+ */
+ i.insert_before(result);
+ if (!lowering(INSERT_TO_SHIFTS)) {
+ i.insert_before(assign(result,
+ bitfield_insert(sign_mantissa,
+ i2u(resulting_biased_exp),
+ new(ir) ir_constant(23u, vec_elem),
+ new(ir) ir_constant(8u, vec_elem))));
+ } else {
+ i.insert_before(assign(result,
+ bit_or(sign_mantissa,
+ lshift(i2u(resulting_biased_exp),
+ new(ir) ir_constant(23, vec_elem)))));
+ }
+
+ ir->operation = ir_triop_csel;
+ ir->init_num_operands();
+ ir->operands[0] = gequal(extracted_biased_exp,
+ new(ir) ir_constant(255, vec_elem));
+ ir->operands[1] = new(ir) ir_dereference_variable(x);
+ ir->operands[2] = bitcast_u2f(result);
+
+ this->progress = true;
+}
+
+void
+lower_instructions_visitor::dldexp_to_arith(ir_expression *ir)
+{
+ /* See ldexp_to_arith for structure. Uses frexp_exp to extract the exponent
+ * from the significand.
+ */
+
+ const unsigned vec_elem = ir->type->vector_elements;
+
+ /* Types */
+ const glsl_type *ivec = glsl_type::get_instance(GLSL_TYPE_INT, vec_elem, 1);
+ const glsl_type *bvec = glsl_type::get_instance(GLSL_TYPE_BOOL, vec_elem, 1);
+
+ /* Constants */
+ ir_constant *zeroi = ir_constant::zero(ir, ivec);
+
+ ir_constant *sign_mask = new(ir) ir_constant(0x80000000u);
+
+ ir_constant *exp_shift = new(ir) ir_constant(20u);
+ ir_constant *exp_width = new(ir) ir_constant(11u);
+ ir_constant *exp_bias = new(ir) ir_constant(1022, vec_elem);
+
+ /* Temporary variables */
+ ir_variable *x = new(ir) ir_variable(ir->type, "x", ir_var_temporary);
+ ir_variable *exp = new(ir) ir_variable(ivec, "exp", ir_var_temporary);
+
+ ir_variable *zero_sign_x = new(ir) ir_variable(ir->type, "zero_sign_x",
+ ir_var_temporary);
+
+ ir_variable *extracted_biased_exp =
+ new(ir) ir_variable(ivec, "extracted_biased_exp", ir_var_temporary);
+ ir_variable *resulting_biased_exp =
+ new(ir) ir_variable(ivec, "resulting_biased_exp", ir_var_temporary);
+
+ ir_variable *is_not_zero_or_underflow =
+ new(ir) ir_variable(bvec, "is_not_zero_or_underflow", ir_var_temporary);
+
+ ir_instruction &i = *base_ir;
+
+ /* Copy <x> and <exp> arguments. */
+ i.insert_before(x);
+ i.insert_before(assign(x, ir->operands[0]));
+ i.insert_before(exp);
+ i.insert_before(assign(exp, ir->operands[1]));
+
+ ir_expression *frexp_exp = expr(ir_unop_frexp_exp, x);
+ if (lowering(DFREXP_DLDEXP_TO_ARITH))
+ dfrexp_exp_to_arith(frexp_exp);
+
+ /* Extract the biased exponent from <x>. */
+ i.insert_before(extracted_biased_exp);
+ i.insert_before(assign(extracted_biased_exp, add(frexp_exp, exp_bias)));
+
+ i.insert_before(resulting_biased_exp);
+ i.insert_before(assign(resulting_biased_exp,
+ add(extracted_biased_exp, exp)));
+
+ /* Test if result is ±0.0, subnormal, or underflow by checking if the
+ * resulting biased exponent would be less than 0x1. If so, the result is
+ * 0.0 with the sign of x. (Actually, invert the conditions so that
+ * immediate values are the second arguments, which is better for i965)
+ * TODO: Implement in a vector fashion.
+ */
+ i.insert_before(zero_sign_x);
+ for (unsigned elem = 0; elem < vec_elem; elem++) {
+ ir_variable *unpacked =
+ new(ir) ir_variable(glsl_type::uvec2_type, "unpacked", ir_var_temporary);
+ i.insert_before(unpacked);
+ i.insert_before(
+ assign(unpacked,
+ expr(ir_unop_unpack_double_2x32, swizzle(x, elem, 1))));
+ i.insert_before(assign(unpacked, bit_and(swizzle_y(unpacked), sign_mask->clone(ir, NULL)),
+ WRITEMASK_Y));
+ i.insert_before(assign(unpacked, ir_constant::zero(ir, glsl_type::uint_type), WRITEMASK_X));
+ i.insert_before(assign(zero_sign_x,
+ expr(ir_unop_pack_double_2x32, unpacked),
+ 1 << elem));
+ }
+ i.insert_before(is_not_zero_or_underflow);
+ i.insert_before(assign(is_not_zero_or_underflow,
+ gequal(resulting_biased_exp,
+ new(ir) ir_constant(0x1, vec_elem))));
+ i.insert_before(assign(x, csel(is_not_zero_or_underflow,
+ x, zero_sign_x)));
+ i.insert_before(assign(resulting_biased_exp,
+ csel(is_not_zero_or_underflow,
+ resulting_biased_exp, zeroi)));
+
+ /* We could test for overflows by checking if the resulting biased exponent
+ * would be greater than 0xFE. Turns out we don't need to because the GLSL
+ * spec says:
+ *
+ * "If this product is too large to be represented in the
+ * floating-point type, the result is undefined."
+ */
+
+ ir_rvalue *results[4] = {NULL};
+ for (unsigned elem = 0; elem < vec_elem; elem++) {
+ ir_variable *unpacked =
+ new(ir) ir_variable(glsl_type::uvec2_type, "unpacked", ir_var_temporary);
+ i.insert_before(unpacked);
+ i.insert_before(
+ assign(unpacked,
+ expr(ir_unop_unpack_double_2x32, swizzle(x, elem, 1))));
+
+ ir_expression *bfi = bitfield_insert(
+ swizzle_y(unpacked),
+ i2u(swizzle(resulting_biased_exp, elem, 1)),
+ exp_shift->clone(ir, NULL),
+ exp_width->clone(ir, NULL));
+
+ i.insert_before(assign(unpacked, bfi, WRITEMASK_Y));
+
+ results[elem] = expr(ir_unop_pack_double_2x32, unpacked);
+ }
+
+ ir->operation = ir_quadop_vector;
+ ir->init_num_operands();
+ ir->operands[0] = results[0];
+ ir->operands[1] = results[1];
+ ir->operands[2] = results[2];
+ ir->operands[3] = results[3];
+
+ /* Don't generate new IR that would need to be lowered in an additional
+ * pass.
+ */
+
+ this->progress = true;
+}
+
+void
+lower_instructions_visitor::dfrexp_sig_to_arith(ir_expression *ir)
+{
+ const unsigned vec_elem = ir->type->vector_elements;
+ const glsl_type *bvec = glsl_type::get_instance(GLSL_TYPE_BOOL, vec_elem, 1);
+
+ /* Double-precision floating-point values are stored as
+ * 1 sign bit;
+ * 11 exponent bits;
+ * 52 mantissa bits.
+ *
+ * We're just extracting the significand here, so we only need to modify
+ * the upper 32-bit uint. Unfortunately we must extract each double
+ * independently as there is no vector version of unpackDouble.
+ */
+
+ ir_instruction &i = *base_ir;
+
+ ir_variable *is_not_zero =
+ new(ir) ir_variable(bvec, "is_not_zero", ir_var_temporary);
+ ir_rvalue *results[4] = {NULL};
+
+ ir_constant *dzero = new(ir) ir_constant(0.0, vec_elem);
+ i.insert_before(is_not_zero);
+ i.insert_before(
+ assign(is_not_zero,
+ nequal(abs(ir->operands[0]->clone(ir, NULL)), dzero)));
+
+ /* TODO: Remake this as more vector-friendly when int64 support is
+ * available.
+ */
+ for (unsigned elem = 0; elem < vec_elem; elem++) {
+ ir_constant *zero = new(ir) ir_constant(0u, 1);
+ ir_constant *sign_mantissa_mask = new(ir) ir_constant(0x800fffffu, 1);
+
+ /* Exponent of double floating-point values in the range [0.5, 1.0). */
+ ir_constant *exponent_value = new(ir) ir_constant(0x3fe00000u, 1);
+
+ ir_variable *bits =
+ new(ir) ir_variable(glsl_type::uint_type, "bits", ir_var_temporary);
+ ir_variable *unpacked =
+ new(ir) ir_variable(glsl_type::uvec2_type, "unpacked", ir_var_temporary);
+
+ ir_rvalue *x = swizzle(ir->operands[0]->clone(ir, NULL), elem, 1);
+
+ i.insert_before(bits);
+ i.insert_before(unpacked);
+ i.insert_before(assign(unpacked, expr(ir_unop_unpack_double_2x32, x)));
+
+ /* Manipulate the high uint to remove the exponent and replace it with
+ * either the default exponent or zero.
+ */
+ i.insert_before(assign(bits, swizzle_y(unpacked)));
+ i.insert_before(assign(bits, bit_and(bits, sign_mantissa_mask)));
+ i.insert_before(assign(bits, bit_or(bits,
+ csel(swizzle(is_not_zero, elem, 1),
+ exponent_value,
+ zero))));
+ i.insert_before(assign(unpacked, bits, WRITEMASK_Y));
+ results[elem] = expr(ir_unop_pack_double_2x32, unpacked);
+ }
+
+ /* Put the dvec back together */
+ ir->operation = ir_quadop_vector;
+ ir->init_num_operands();
+ ir->operands[0] = results[0];
+ ir->operands[1] = results[1];
+ ir->operands[2] = results[2];
+ ir->operands[3] = results[3];
+
+ this->progress = true;
+}
+
+void
+lower_instructions_visitor::dfrexp_exp_to_arith(ir_expression *ir)
+{
+ const unsigned vec_elem = ir->type->vector_elements;
+ const glsl_type *bvec = glsl_type::get_instance(GLSL_TYPE_BOOL, vec_elem, 1);
+ const glsl_type *uvec = glsl_type::get_instance(GLSL_TYPE_UINT, vec_elem, 1);
+
+ /* Double-precision floating-point values are stored as
+ * 1 sign bit;
+ * 11 exponent bits;
+ * 52 mantissa bits.
+ *
+ * We're just extracting the exponent here, so we only care about the upper
+ * 32-bit uint.
+ */
+
+ ir_instruction &i = *base_ir;
+
+ ir_variable *is_not_zero =
+ new(ir) ir_variable(bvec, "is_not_zero", ir_var_temporary);
+ ir_variable *high_words =
+ new(ir) ir_variable(uvec, "high_words", ir_var_temporary);
+ ir_constant *dzero = new(ir) ir_constant(0.0, vec_elem);
+ ir_constant *izero = new(ir) ir_constant(0, vec_elem);
+
+ ir_rvalue *absval = abs(ir->operands[0]);
+
+ i.insert_before(is_not_zero);
+ i.insert_before(high_words);
+ i.insert_before(assign(is_not_zero, nequal(absval->clone(ir, NULL), dzero)));
+
+ /* Extract all of the upper uints. */
+ for (unsigned elem = 0; elem < vec_elem; elem++) {
+ ir_rvalue *x = swizzle(absval->clone(ir, NULL), elem, 1);
+
+ i.insert_before(assign(high_words,
+ swizzle_y(expr(ir_unop_unpack_double_2x32, x)),
+ 1 << elem));
+
+ }
+ ir_constant *exponent_shift = new(ir) ir_constant(20, vec_elem);
+ ir_constant *exponent_bias = new(ir) ir_constant(-1022, vec_elem);
+
+ /* For non-zero inputs, shift the exponent down and apply bias. */
+ ir->operation = ir_triop_csel;
+ ir->init_num_operands();
+ ir->operands[0] = new(ir) ir_dereference_variable(is_not_zero);
+ ir->operands[1] = add(exponent_bias, u2i(rshift(high_words, exponent_shift)));
+ ir->operands[2] = izero;
+
+ this->progress = true;
+}
+
+void
+lower_instructions_visitor::carry_to_arith(ir_expression *ir)
+{
+ /* Translates
+ * ir_binop_carry x y
+ * into
+ * sum = ir_binop_add x y
+ * bcarry = ir_binop_less sum x
+ * carry = ir_unop_b2i bcarry
+ */
+
+ ir_rvalue *x_clone = ir->operands[0]->clone(ir, NULL);
+ ir->operation = ir_unop_i2u;
+ ir->init_num_operands();
+ ir->operands[0] = b2i(less(add(ir->operands[0], ir->operands[1]), x_clone));
+ ir->operands[1] = NULL;
+
+ this->progress = true;
+}
+
+void
+lower_instructions_visitor::borrow_to_arith(ir_expression *ir)
+{
+ /* Translates
+ * ir_binop_borrow x y
+ * into
+ * bcarry = ir_binop_less x y
+ * carry = ir_unop_b2i bcarry
+ */
+
+ ir->operation = ir_unop_i2u;
+ ir->init_num_operands();
+ ir->operands[0] = b2i(less(ir->operands[0], ir->operands[1]));
+ ir->operands[1] = NULL;
+
+ this->progress = true;
+}
+
+void
+lower_instructions_visitor::sat_to_clamp(ir_expression *ir)
+{
+ /* Translates
+ * ir_unop_saturate x
+ * into
+ * ir_binop_min (ir_binop_max(x, 0.0), 1.0)
+ */
+
+ ir->operation = ir_binop_min;
+ ir->init_num_operands();
+
+ ir_constant *zero = _imm_fp(ir, ir->operands[0]->type, 0.0);
+ ir->operands[0] = new(ir) ir_expression(ir_binop_max, ir->operands[0]->type,
+ ir->operands[0], zero);
+ ir->operands[1] = _imm_fp(ir, ir->operands[0]->type, 1.0);
+
+ this->progress = true;
+}
+
+void
+lower_instructions_visitor::double_dot_to_fma(ir_expression *ir)
+{
+ ir_variable *temp = new(ir) ir_variable(ir->operands[0]->type->get_base_type(), "dot_res",
+ ir_var_temporary);
+ this->base_ir->insert_before(temp);
+
+ int nc = ir->operands[0]->type->components();
+ for (int i = nc - 1; i >= 1; i--) {
+ ir_assignment *assig;
+ if (i == (nc - 1)) {
+ assig = assign(temp, mul(swizzle(ir->operands[0]->clone(ir, NULL), i, 1),
+ swizzle(ir->operands[1]->clone(ir, NULL), i, 1)));
+ } else {
+ assig = assign(temp, fma(swizzle(ir->operands[0]->clone(ir, NULL), i, 1),
+ swizzle(ir->operands[1]->clone(ir, NULL), i, 1),
+ temp));
+ }
+ this->base_ir->insert_before(assig);
+ }
+
+ ir->operation = ir_triop_fma;
+ ir->init_num_operands();
+ ir->operands[0] = swizzle(ir->operands[0], 0, 1);
+ ir->operands[1] = swizzle(ir->operands[1], 0, 1);
+ ir->operands[2] = new(ir) ir_dereference_variable(temp);
+
+ this->progress = true;
+
+}
+
+void
+lower_instructions_visitor::double_lrp(ir_expression *ir)
+{
+ int swizval;
+ ir_rvalue *op0 = ir->operands[0], *op2 = ir->operands[2];
+ ir_constant *one = new(ir) ir_constant(1.0, op2->type->vector_elements);
+
+ switch (op2->type->vector_elements) {
+ case 1:
+ swizval = SWIZZLE_XXXX;
+ break;
+ default:
+ assert(op0->type->vector_elements == op2->type->vector_elements);
+ swizval = SWIZZLE_XYZW;
+ break;
+ }
+
+ ir->operation = ir_triop_fma;
+ ir->init_num_operands();
+ ir->operands[0] = swizzle(op2, swizval, op0->type->vector_elements);
+ ir->operands[2] = mul(sub(one, op2->clone(ir, NULL)), op0);
+
+ this->progress = true;
+}
+
+void
+lower_instructions_visitor::dceil_to_dfrac(ir_expression *ir)
+{
+ /*
+ * frtemp = frac(x);
+ * temp = sub(x, frtemp);
+ * result = temp + ((frtemp != 0.0) ? 1.0 : 0.0);
+ */
+ ir_instruction &i = *base_ir;
+ ir_constant *zero = new(ir) ir_constant(0.0, ir->operands[0]->type->vector_elements);
+ ir_constant *one = new(ir) ir_constant(1.0, ir->operands[0]->type->vector_elements);
+ ir_variable *frtemp = new(ir) ir_variable(ir->operands[0]->type, "frtemp",
+ ir_var_temporary);
+
+ i.insert_before(frtemp);
+ i.insert_before(assign(frtemp, fract(ir->operands[0])));
+
+ ir->operation = ir_binop_add;
+ ir->init_num_operands();
+ ir->operands[0] = sub(ir->operands[0]->clone(ir, NULL), frtemp);
+ ir->operands[1] = csel(nequal(frtemp, zero), one, zero->clone(ir, NULL));
+
+ this->progress = true;
+}
+
+void
+lower_instructions_visitor::dfloor_to_dfrac(ir_expression *ir)
+{
+ /*
+ * frtemp = frac(x);
+ * result = sub(x, frtemp);
+ */
+ ir->operation = ir_binop_sub;
+ ir->init_num_operands();
+ ir->operands[1] = fract(ir->operands[0]->clone(ir, NULL));
+
+ this->progress = true;
+}
+void
+lower_instructions_visitor::dround_even_to_dfrac(ir_expression *ir)
+{
+ /*
+ * insane but works
+ * temp = x + 0.5;
+ * frtemp = frac(temp);
+ * t2 = sub(temp, frtemp);
+ * if (frac(x) == 0.5)
+ * result = frac(t2 * 0.5) == 0 ? t2 : t2 - 1;
+ * else
+ * result = t2;
+
+ */
+ ir_instruction &i = *base_ir;
+ ir_variable *frtemp = new(ir) ir_variable(ir->operands[0]->type, "frtemp",
+ ir_var_temporary);
+ ir_variable *temp = new(ir) ir_variable(ir->operands[0]->type, "temp",
+ ir_var_temporary);
+ ir_variable *t2 = new(ir) ir_variable(ir->operands[0]->type, "t2",
+ ir_var_temporary);
+ ir_constant *p5 = new(ir) ir_constant(0.5, ir->operands[0]->type->vector_elements);
+ ir_constant *one = new(ir) ir_constant(1.0, ir->operands[0]->type->vector_elements);
+ ir_constant *zero = new(ir) ir_constant(0.0, ir->operands[0]->type->vector_elements);
+
+ i.insert_before(temp);
+ i.insert_before(assign(temp, add(ir->operands[0], p5)));
+
+ i.insert_before(frtemp);
+ i.insert_before(assign(frtemp, fract(temp)));
+
+ i.insert_before(t2);
+ i.insert_before(assign(t2, sub(temp, frtemp)));
+
+ ir->operation = ir_triop_csel;
+ ir->init_num_operands();
+ ir->operands[0] = equal(fract(ir->operands[0]->clone(ir, NULL)),
+ p5->clone(ir, NULL));
+ ir->operands[1] = csel(equal(fract(mul(t2, p5->clone(ir, NULL))),
+ zero),
+ t2,
+ sub(t2, one));
+ ir->operands[2] = new(ir) ir_dereference_variable(t2);
+
+ this->progress = true;
+}
+
+void
+lower_instructions_visitor::dtrunc_to_dfrac(ir_expression *ir)
+{
+ /*
+ * frtemp = frac(x);
+ * temp = sub(x, frtemp);
+ * result = x >= 0 ? temp : temp + (frtemp == 0.0) ? 0 : 1;
+ */
+ ir_rvalue *arg = ir->operands[0];
+ ir_instruction &i = *base_ir;
+
+ ir_constant *zero = new(ir) ir_constant(0.0, arg->type->vector_elements);
+ ir_constant *one = new(ir) ir_constant(1.0, arg->type->vector_elements);
+ ir_variable *frtemp = new(ir) ir_variable(arg->type, "frtemp",
+ ir_var_temporary);
+ ir_variable *temp = new(ir) ir_variable(ir->operands[0]->type, "temp",
+ ir_var_temporary);
+
+ i.insert_before(frtemp);
+ i.insert_before(assign(frtemp, fract(arg)));
+ i.insert_before(temp);
+ i.insert_before(assign(temp, sub(arg->clone(ir, NULL), frtemp)));
+
+ ir->operation = ir_triop_csel;
+ ir->init_num_operands();
+ ir->operands[0] = gequal(arg->clone(ir, NULL), zero);
+ ir->operands[1] = new (ir) ir_dereference_variable(temp);
+ ir->operands[2] = add(temp,
+ csel(equal(frtemp, zero->clone(ir, NULL)),
+ zero->clone(ir, NULL),
+ one));
+
+ this->progress = true;
+}
+
+void
+lower_instructions_visitor::dsign_to_csel(ir_expression *ir)
+{
+ /*
+ * temp = x > 0.0 ? 1.0 : 0.0;
+ * result = x < 0.0 ? -1.0 : temp;
+ */
+ ir_rvalue *arg = ir->operands[0];
+ ir_constant *zero = new(ir) ir_constant(0.0, arg->type->vector_elements);
+ ir_constant *one = new(ir) ir_constant(1.0, arg->type->vector_elements);
+ ir_constant *neg_one = new(ir) ir_constant(-1.0, arg->type->vector_elements);
+
+ ir->operation = ir_triop_csel;
+ ir->init_num_operands();
+ ir->operands[0] = less(arg->clone(ir, NULL),
+ zero->clone(ir, NULL));
+ ir->operands[1] = neg_one;
+ ir->operands[2] = csel(greater(arg, zero),
+ one,
+ zero->clone(ir, NULL));
+
+ this->progress = true;
+}
+
+void
+lower_instructions_visitor::bit_count_to_math(ir_expression *ir)
+{
+ /* For more details, see:
+ *
+ * http://graphics.stanford.edu/~seander/bithacks.html#CountBitsSetPaallel
+ */
+ const unsigned elements = ir->operands[0]->type->vector_elements;
+ ir_variable *temp = new(ir) ir_variable(glsl_type::uvec(elements), "temp",
+ ir_var_temporary);
+ ir_constant *c55555555 = new(ir) ir_constant(0x55555555u);
+ ir_constant *c33333333 = new(ir) ir_constant(0x33333333u);
+ ir_constant *c0F0F0F0F = new(ir) ir_constant(0x0F0F0F0Fu);
+ ir_constant *c01010101 = new(ir) ir_constant(0x01010101u);
+ ir_constant *c1 = new(ir) ir_constant(1u);
+ ir_constant *c2 = new(ir) ir_constant(2u);
+ ir_constant *c4 = new(ir) ir_constant(4u);
+ ir_constant *c24 = new(ir) ir_constant(24u);
+
+ base_ir->insert_before(temp);
+
+ if (ir->operands[0]->type->base_type == GLSL_TYPE_UINT) {
+ base_ir->insert_before(assign(temp, ir->operands[0]));
+ } else {
+ assert(ir->operands[0]->type->base_type == GLSL_TYPE_INT);
+ base_ir->insert_before(assign(temp, i2u(ir->operands[0])));
+ }
+
+ /* temp = temp - ((temp >> 1) & 0x55555555u); */
+ base_ir->insert_before(assign(temp, sub(temp, bit_and(rshift(temp, c1),
+ c55555555))));
+
+ /* temp = (temp & 0x33333333u) + ((temp >> 2) & 0x33333333u); */
+ base_ir->insert_before(assign(temp, add(bit_and(temp, c33333333),
+ bit_and(rshift(temp, c2),
+ c33333333->clone(ir, NULL)))));
+
+ /* int(((temp + (temp >> 4) & 0xF0F0F0Fu) * 0x1010101u) >> 24); */
+ ir->operation = ir_unop_u2i;
+ ir->init_num_operands();
+ ir->operands[0] = rshift(mul(bit_and(add(temp, rshift(temp, c4)), c0F0F0F0F),
+ c01010101),
+ c24);
+
+ this->progress = true;
+}
+
+void
+lower_instructions_visitor::extract_to_shifts(ir_expression *ir)
+{
+ ir_variable *bits =
+ new(ir) ir_variable(ir->operands[0]->type, "bits", ir_var_temporary);
+
+ base_ir->insert_before(bits);
+ base_ir->insert_before(assign(bits, ir->operands[2]));
+
+ if (ir->operands[0]->type->base_type == GLSL_TYPE_UINT) {
+ ir_constant *c1 =
+ new(ir) ir_constant(1u, ir->operands[0]->type->vector_elements);
+ ir_constant *c32 =
+ new(ir) ir_constant(32u, ir->operands[0]->type->vector_elements);
+ ir_constant *cFFFFFFFF =
+ new(ir) ir_constant(0xFFFFFFFFu, ir->operands[0]->type->vector_elements);
+
+ /* At least some hardware treats (x << y) as (x << (y%32)). This means
+ * we'd get a mask of 0 when bits is 32. Special case it.
+ *
+ * mask = bits == 32 ? 0xffffffff : (1u << bits) - 1u;
+ */
+ ir_expression *mask = csel(equal(bits, c32),
+ cFFFFFFFF,
+ sub(lshift(c1, bits), c1->clone(ir, NULL)));
+
+ /* Section 8.8 (Integer Functions) of the GLSL 4.50 spec says:
+ *
+ * If bits is zero, the result will be zero.
+ *
+ * Since (1 << 0) - 1 == 0, we don't need to bother with the conditional
+ * select as in the signed integer case.
+ *
+ * (value >> offset) & mask;
+ */
+ ir->operation = ir_binop_bit_and;
+ ir->init_num_operands();
+ ir->operands[0] = rshift(ir->operands[0], ir->operands[1]);
+ ir->operands[1] = mask;
+ ir->operands[2] = NULL;
+ } else {
+ ir_constant *c0 =
+ new(ir) ir_constant(int(0), ir->operands[0]->type->vector_elements);
+ ir_constant *c32 =
+ new(ir) ir_constant(int(32), ir->operands[0]->type->vector_elements);
+ ir_variable *temp =
+ new(ir) ir_variable(ir->operands[0]->type, "temp", ir_var_temporary);
+
+ /* temp = 32 - bits; */
+ base_ir->insert_before(temp);
+ base_ir->insert_before(assign(temp, sub(c32, bits)));
+
+ /* expr = value << (temp - offset)) >> temp; */
+ ir_expression *expr =
+ rshift(lshift(ir->operands[0], sub(temp, ir->operands[1])), temp);
+
+ /* Section 8.8 (Integer Functions) of the GLSL 4.50 spec says:
+ *
+ * If bits is zero, the result will be zero.
+ *
+ * Due to the (x << (y%32)) behavior mentioned before, the (value <<
+ * (32-0)) doesn't "erase" all of the data as we would like, so finish
+ * up with:
+ *
+ * (bits == 0) ? 0 : e;
+ */
+ ir->operation = ir_triop_csel;
+ ir->init_num_operands();
+ ir->operands[0] = equal(c0, bits);
+ ir->operands[1] = c0->clone(ir, NULL);
+ ir->operands[2] = expr;
+ }
+
+ this->progress = true;
+}
+
+void
+lower_instructions_visitor::insert_to_shifts(ir_expression *ir)
+{
+ ir_constant *c1;
+ ir_constant *c32;
+ ir_constant *cFFFFFFFF;
+ ir_variable *offset =
+ new(ir) ir_variable(ir->operands[0]->type, "offset", ir_var_temporary);
+ ir_variable *bits =
+ new(ir) ir_variable(ir->operands[0]->type, "bits", ir_var_temporary);
+ ir_variable *mask =
+ new(ir) ir_variable(ir->operands[0]->type, "mask", ir_var_temporary);
+
+ if (ir->operands[0]->type->base_type == GLSL_TYPE_INT) {
+ c1 = new(ir) ir_constant(int(1), ir->operands[0]->type->vector_elements);
+ c32 = new(ir) ir_constant(int(32), ir->operands[0]->type->vector_elements);
+ cFFFFFFFF = new(ir) ir_constant(int(0xFFFFFFFF), ir->operands[0]->type->vector_elements);
+ } else {
+ assert(ir->operands[0]->type->base_type == GLSL_TYPE_UINT);
+
+ c1 = new(ir) ir_constant(1u, ir->operands[0]->type->vector_elements);
+ c32 = new(ir) ir_constant(32u, ir->operands[0]->type->vector_elements);
+ cFFFFFFFF = new(ir) ir_constant(0xFFFFFFFFu, ir->operands[0]->type->vector_elements);
+ }
+
+ base_ir->insert_before(offset);
+ base_ir->insert_before(assign(offset, ir->operands[2]));
+
+ base_ir->insert_before(bits);
+ base_ir->insert_before(assign(bits, ir->operands[3]));
+
+ /* At least some hardware treats (x << y) as (x << (y%32)). This means
+ * we'd get a mask of 0 when bits is 32. Special case it.
+ *
+ * mask = (bits == 32 ? 0xffffffff : (1u << bits) - 1u) << offset;
+ *
+ * Section 8.8 (Integer Functions) of the GLSL 4.50 spec says:
+ *
+ * The result will be undefined if offset or bits is negative, or if the
+ * sum of offset and bits is greater than the number of bits used to
+ * store the operand.
+ *
+ * Since it's undefined, there are a couple other ways this could be
+ * implemented. The other way that was considered was to put the csel
+ * around the whole thing:
+ *
+ * final_result = bits == 32 ? insert : ... ;
+ */
+ base_ir->insert_before(mask);
+
+ base_ir->insert_before(assign(mask, csel(equal(bits, c32),
+ cFFFFFFFF,
+ lshift(sub(lshift(c1, bits),
+ c1->clone(ir, NULL)),
+ offset))));
+
+ /* (base & ~mask) | ((insert << offset) & mask) */
+ ir->operation = ir_binop_bit_or;
+ ir->init_num_operands();
+ ir->operands[0] = bit_and(ir->operands[0], bit_not(mask));
+ ir->operands[1] = bit_and(lshift(ir->operands[1], offset), mask);
+ ir->operands[2] = NULL;
+ ir->operands[3] = NULL;
+
+ this->progress = true;
+}
+
+void
+lower_instructions_visitor::reverse_to_shifts(ir_expression *ir)
+{
+ /* For more details, see:
+ *
+ * http://graphics.stanford.edu/~seander/bithacks.html#ReverseParallel
+ */
+ ir_constant *c1 =
+ new(ir) ir_constant(1u, ir->operands[0]->type->vector_elements);
+ ir_constant *c2 =
+ new(ir) ir_constant(2u, ir->operands[0]->type->vector_elements);
+ ir_constant *c4 =
+ new(ir) ir_constant(4u, ir->operands[0]->type->vector_elements);
+ ir_constant *c8 =
+ new(ir) ir_constant(8u, ir->operands[0]->type->vector_elements);
+ ir_constant *c16 =
+ new(ir) ir_constant(16u, ir->operands[0]->type->vector_elements);
+ ir_constant *c33333333 =
+ new(ir) ir_constant(0x33333333u, ir->operands[0]->type->vector_elements);
+ ir_constant *c55555555 =
+ new(ir) ir_constant(0x55555555u, ir->operands[0]->type->vector_elements);
+ ir_constant *c0F0F0F0F =
+ new(ir) ir_constant(0x0F0F0F0Fu, ir->operands[0]->type->vector_elements);
+ ir_constant *c00FF00FF =
+ new(ir) ir_constant(0x00FF00FFu, ir->operands[0]->type->vector_elements);
+ ir_variable *temp =
+ new(ir) ir_variable(glsl_type::uvec(ir->operands[0]->type->vector_elements),
+ "temp", ir_var_temporary);
+ ir_instruction &i = *base_ir;
+
+ i.insert_before(temp);
+
+ if (ir->operands[0]->type->base_type == GLSL_TYPE_UINT) {
+ i.insert_before(assign(temp, ir->operands[0]));
+ } else {
+ assert(ir->operands[0]->type->base_type == GLSL_TYPE_INT);
+ i.insert_before(assign(temp, i2u(ir->operands[0])));
+ }
+
+ /* Swap odd and even bits.
+ *
+ * temp = ((temp >> 1) & 0x55555555u) | ((temp & 0x55555555u) << 1);
+ */
+ i.insert_before(assign(temp, bit_or(bit_and(rshift(temp, c1), c55555555),
+ lshift(bit_and(temp, c55555555->clone(ir, NULL)),
+ c1->clone(ir, NULL)))));
+ /* Swap consecutive pairs.
+ *
+ * temp = ((temp >> 2) & 0x33333333u) | ((temp & 0x33333333u) << 2);
+ */
+ i.insert_before(assign(temp, bit_or(bit_and(rshift(temp, c2), c33333333),
+ lshift(bit_and(temp, c33333333->clone(ir, NULL)),
+ c2->clone(ir, NULL)))));
+
+ /* Swap nibbles.
+ *
+ * temp = ((temp >> 4) & 0x0F0F0F0Fu) | ((temp & 0x0F0F0F0Fu) << 4);
+ */
+ i.insert_before(assign(temp, bit_or(bit_and(rshift(temp, c4), c0F0F0F0F),
+ lshift(bit_and(temp, c0F0F0F0F->clone(ir, NULL)),
+ c4->clone(ir, NULL)))));
+
+ /* The last step is, basically, bswap. Swap the bytes, then swap the
+ * words. When this code is run through GCC on x86, it does generate a
+ * bswap instruction.
+ *
+ * temp = ((temp >> 8) & 0x00FF00FFu) | ((temp & 0x00FF00FFu) << 8);
+ * temp = ( temp >> 16 ) | ( temp << 16);
+ */
+ i.insert_before(assign(temp, bit_or(bit_and(rshift(temp, c8), c00FF00FF),
+ lshift(bit_and(temp, c00FF00FF->clone(ir, NULL)),
+ c8->clone(ir, NULL)))));
+
+ if (ir->operands[0]->type->base_type == GLSL_TYPE_UINT) {
+ ir->operation = ir_binop_bit_or;
+ ir->init_num_operands();
+ ir->operands[0] = rshift(temp, c16);
+ ir->operands[1] = lshift(temp, c16->clone(ir, NULL));
+ } else {
+ ir->operation = ir_unop_u2i;
+ ir->init_num_operands();
+ ir->operands[0] = bit_or(rshift(temp, c16),
+ lshift(temp, c16->clone(ir, NULL)));
+ }
+
+ this->progress = true;
+}
+
+void
+lower_instructions_visitor::find_lsb_to_float_cast(ir_expression *ir)
+{
+ /* For more details, see:
+ *
+ * http://graphics.stanford.edu/~seander/bithacks.html#ZerosOnRightFloatCast
+ */
+ const unsigned elements = ir->operands[0]->type->vector_elements;
+ ir_constant *c0 = new(ir) ir_constant(unsigned(0), elements);
+ ir_constant *cminus1 = new(ir) ir_constant(int(-1), elements);
+ ir_constant *c23 = new(ir) ir_constant(int(23), elements);
+ ir_constant *c7F = new(ir) ir_constant(int(0x7F), elements);
+ ir_variable *temp =
+ new(ir) ir_variable(glsl_type::ivec(elements), "temp", ir_var_temporary);
+ ir_variable *lsb_only =
+ new(ir) ir_variable(glsl_type::uvec(elements), "lsb_only", ir_var_temporary);
+ ir_variable *as_float =
+ new(ir) ir_variable(glsl_type::vec(elements), "as_float", ir_var_temporary);
+ ir_variable *lsb =
+ new(ir) ir_variable(glsl_type::ivec(elements), "lsb", ir_var_temporary);
+
+ ir_instruction &i = *base_ir;
+
+ i.insert_before(temp);
+
+ if (ir->operands[0]->type->base_type == GLSL_TYPE_INT) {
+ i.insert_before(assign(temp, ir->operands[0]));
+ } else {
+ assert(ir->operands[0]->type->base_type == GLSL_TYPE_UINT);
+ i.insert_before(assign(temp, u2i(ir->operands[0])));
+ }
+
+ /* The int-to-float conversion is lossless because (value & -value) is
+ * either a power of two or zero. We don't use the result in the zero
+ * case. The uint() cast is necessary so that 0x80000000 does not
+ * generate a negative value.
+ *
+ * uint lsb_only = uint(value & -value);
+ * float as_float = float(lsb_only);
+ */
+ i.insert_before(lsb_only);
+ i.insert_before(assign(lsb_only, i2u(bit_and(temp, neg(temp)))));
+
+ i.insert_before(as_float);
+ i.insert_before(assign(as_float, u2f(lsb_only)));
+
+ /* This is basically an open-coded frexp. Implementations that have a
+ * native frexp instruction would be better served by that. This is
+ * optimized versus a full-featured open-coded implementation in two ways:
+ *
+ * - We don't care about a correct result from subnormal numbers (including
+ * 0.0), so the raw exponent can always be safely unbiased.
+ *
+ * - The value cannot be negative, so it does not need to be masked off to
+ * extract the exponent.
+ *
+ * int lsb = (floatBitsToInt(as_float) >> 23) - 0x7f;
+ */
+ i.insert_before(lsb);
+ i.insert_before(assign(lsb, sub(rshift(bitcast_f2i(as_float), c23), c7F)));
+
+ /* Use lsb_only in the comparison instead of temp so that the & (far above)
+ * can possibly generate the result without an explicit comparison.
+ *
+ * (lsb_only == 0) ? -1 : lsb;
+ *
+ * Since our input values are all integers, the unbiased exponent must not
+ * be negative. It will only be negative (-0x7f, in fact) if lsb_only is
+ * 0. Instead of using (lsb_only == 0), we could use (lsb >= 0). Which is
+ * better is likely GPU dependent. Either way, the difference should be
+ * small.
+ */
+ ir->operation = ir_triop_csel;
+ ir->init_num_operands();
+ ir->operands[0] = equal(lsb_only, c0);
+ ir->operands[1] = cminus1;
+ ir->operands[2] = new(ir) ir_dereference_variable(lsb);
+
+ this->progress = true;
+}
+
+void
+lower_instructions_visitor::find_msb_to_float_cast(ir_expression *ir)
+{
+ /* For more details, see:
+ *
+ * http://graphics.stanford.edu/~seander/bithacks.html#ZerosOnRightFloatCast
+ */
+ const unsigned elements = ir->operands[0]->type->vector_elements;
+ ir_constant *c0 = new(ir) ir_constant(int(0), elements);
+ ir_constant *cminus1 = new(ir) ir_constant(int(-1), elements);
+ ir_constant *c23 = new(ir) ir_constant(int(23), elements);
+ ir_constant *c7F = new(ir) ir_constant(int(0x7F), elements);
+ ir_constant *c000000FF = new(ir) ir_constant(0x000000FFu, elements);
+ ir_constant *cFFFFFF00 = new(ir) ir_constant(0xFFFFFF00u, elements);
+ ir_variable *temp =
+ new(ir) ir_variable(glsl_type::uvec(elements), "temp", ir_var_temporary);
+ ir_variable *as_float =
+ new(ir) ir_variable(glsl_type::vec(elements), "as_float", ir_var_temporary);
+ ir_variable *msb =
+ new(ir) ir_variable(glsl_type::ivec(elements), "msb", ir_var_temporary);
+
+ ir_instruction &i = *base_ir;
+
+ i.insert_before(temp);
+
+ if (ir->operands[0]->type->base_type == GLSL_TYPE_UINT) {
+ i.insert_before(assign(temp, ir->operands[0]));
+ } else {
+ assert(ir->operands[0]->type->base_type == GLSL_TYPE_INT);
+
+ /* findMSB(uint(abs(some_int))) almost always does the right thing.
+ * There are two problem values:
+ *
+ * * 0x80000000. Since abs(0x80000000) == 0x80000000, findMSB returns
+ * 31. However, findMSB(int(0x80000000)) == 30.
+ *
+ * * 0xffffffff. Since abs(0xffffffff) == 1, findMSB returns
+ * 31. Section 8.8 (Integer Functions) of the GLSL 4.50 spec says:
+ *
+ * For a value of zero or negative one, -1 will be returned.
+ *
+ * For all negative number cases, including 0x80000000 and 0xffffffff,
+ * the correct value is obtained from findMSB if instead of negating the
+ * (already negative) value the logical-not is used. A conditonal
+ * logical-not can be achieved in two instructions.
+ */
+ ir_variable *as_int =
+ new(ir) ir_variable(glsl_type::ivec(elements), "as_int", ir_var_temporary);
+ ir_constant *c31 = new(ir) ir_constant(int(31), elements);
+
+ i.insert_before(as_int);
+ i.insert_before(assign(as_int, ir->operands[0]));
+ i.insert_before(assign(temp, i2u(expr(ir_binop_bit_xor,
+ as_int,
+ rshift(as_int, c31)))));
+ }
+
+ /* The int-to-float conversion is lossless because bits are conditionally
+ * masked off the bottom of temp to ensure the value has at most 24 bits of
+ * data or is zero. We don't use the result in the zero case. The uint()
+ * cast is necessary so that 0x80000000 does not generate a negative value.
+ *
+ * float as_float = float(temp > 255 ? temp & ~255 : temp);
+ */
+ i.insert_before(as_float);
+ i.insert_before(assign(as_float, u2f(csel(greater(temp, c000000FF),
+ bit_and(temp, cFFFFFF00),
+ temp))));
+
+ /* This is basically an open-coded frexp. Implementations that have a
+ * native frexp instruction would be better served by that. This is
+ * optimized versus a full-featured open-coded implementation in two ways:
+ *
+ * - We don't care about a correct result from subnormal numbers (including
+ * 0.0), so the raw exponent can always be safely unbiased.
+ *
+ * - The value cannot be negative, so it does not need to be masked off to
+ * extract the exponent.
+ *
+ * int msb = (floatBitsToInt(as_float) >> 23) - 0x7f;
+ */
+ i.insert_before(msb);
+ i.insert_before(assign(msb, sub(rshift(bitcast_f2i(as_float), c23), c7F)));
+
+ /* Use msb in the comparison instead of temp so that the subtract can
+ * possibly generate the result without an explicit comparison.
+ *
+ * (msb < 0) ? -1 : msb;
+ *
+ * Since our input values are all integers, the unbiased exponent must not
+ * be negative. It will only be negative (-0x7f, in fact) if temp is 0.
+ */
+ ir->operation = ir_triop_csel;
+ ir->init_num_operands();
+ ir->operands[0] = less(msb, c0);
+ ir->operands[1] = cminus1;
+ ir->operands[2] = new(ir) ir_dereference_variable(msb);
+
+ this->progress = true;
+}
+
+ir_expression *
+lower_instructions_visitor::_carry(operand a, operand b)
+{
+ if (lowering(CARRY_TO_ARITH))
+ return i2u(b2i(less(add(a, b),
+ a.val->clone(ralloc_parent(a.val), NULL))));
+ else
+ return carry(a, b);
+}
+
+ir_constant *
+lower_instructions_visitor::_imm_fp(void *mem_ctx,
+ const glsl_type *type,
+ double f,
+ unsigned vector_elements)
+{
+ switch (type->base_type) {
+ case GLSL_TYPE_FLOAT:
+ return new(mem_ctx) ir_constant((float) f, vector_elements);
+ case GLSL_TYPE_DOUBLE:
+ return new(mem_ctx) ir_constant((double) f, vector_elements);
+ case GLSL_TYPE_FLOAT16:
+ return new(mem_ctx) ir_constant(float16_t(f), vector_elements);
+ default:
+ assert(!"unknown float type for immediate");
+ return NULL;
+ }
+}
+
+void
+lower_instructions_visitor::imul_high_to_mul(ir_expression *ir)
+{
+ /* ABCD
+ * * EFGH
+ * ======
+ * (GH * CD) + (GH * AB) << 16 + (EF * CD) << 16 + (EF * AB) << 32
+ *
+ * In GLSL, (a * b) becomes
+ *
+ * uint m1 = (a & 0x0000ffffu) * (b & 0x0000ffffu);
+ * uint m2 = (a & 0x0000ffffu) * (b >> 16);
+ * uint m3 = (a >> 16) * (b & 0x0000ffffu);
+ * uint m4 = (a >> 16) * (b >> 16);
+ *
+ * uint c1;
+ * uint c2;
+ * uint lo_result;
+ * uint hi_result;
+ *
+ * lo_result = uaddCarry(m1, m2 << 16, c1);
+ * hi_result = m4 + c1;
+ * lo_result = uaddCarry(lo_result, m3 << 16, c2);
+ * hi_result = hi_result + c2;
+ * hi_result = hi_result + (m2 >> 16) + (m3 >> 16);
+ */
+ const unsigned elements = ir->operands[0]->type->vector_elements;
+ ir_variable *src1 =
+ new(ir) ir_variable(glsl_type::uvec(elements), "src1", ir_var_temporary);
+ ir_variable *src1h =
+ new(ir) ir_variable(glsl_type::uvec(elements), "src1h", ir_var_temporary);
+ ir_variable *src1l =
+ new(ir) ir_variable(glsl_type::uvec(elements), "src1l", ir_var_temporary);
+ ir_variable *src2 =
+ new(ir) ir_variable(glsl_type::uvec(elements), "src2", ir_var_temporary);
+ ir_variable *src2h =
+ new(ir) ir_variable(glsl_type::uvec(elements), "src2h", ir_var_temporary);
+ ir_variable *src2l =
+ new(ir) ir_variable(glsl_type::uvec(elements), "src2l", ir_var_temporary);
+ ir_variable *t1 =
+ new(ir) ir_variable(glsl_type::uvec(elements), "t1", ir_var_temporary);
+ ir_variable *t2 =
+ new(ir) ir_variable(glsl_type::uvec(elements), "t2", ir_var_temporary);
+ ir_variable *lo =
+ new(ir) ir_variable(glsl_type::uvec(elements), "lo", ir_var_temporary);
+ ir_variable *hi =
+ new(ir) ir_variable(glsl_type::uvec(elements), "hi", ir_var_temporary);
+ ir_variable *different_signs = NULL;
+ ir_constant *c0000FFFF = new(ir) ir_constant(0x0000FFFFu, elements);
+ ir_constant *c16 = new(ir) ir_constant(16u, elements);
+
+ ir_instruction &i = *base_ir;
+
+ i.insert_before(src1);
+ i.insert_before(src2);
+ i.insert_before(src1h);
+ i.insert_before(src2h);
+ i.insert_before(src1l);
+ i.insert_before(src2l);
+
+ if (ir->operands[0]->type->base_type == GLSL_TYPE_UINT) {
+ i.insert_before(assign(src1, ir->operands[0]));
+ i.insert_before(assign(src2, ir->operands[1]));
+ } else {
+ assert(ir->operands[0]->type->base_type == GLSL_TYPE_INT);
+
+ ir_variable *itmp1 =
+ new(ir) ir_variable(glsl_type::ivec(elements), "itmp1", ir_var_temporary);
+ ir_variable *itmp2 =
+ new(ir) ir_variable(glsl_type::ivec(elements), "itmp2", ir_var_temporary);
+ ir_constant *c0 = new(ir) ir_constant(int(0), elements);
+
+ i.insert_before(itmp1);
+ i.insert_before(itmp2);
+ i.insert_before(assign(itmp1, ir->operands[0]));
+ i.insert_before(assign(itmp2, ir->operands[1]));
+
+ different_signs =
+ new(ir) ir_variable(glsl_type::bvec(elements), "different_signs",
+ ir_var_temporary);
+
+ i.insert_before(different_signs);
+ i.insert_before(assign(different_signs, expr(ir_binop_logic_xor,
+ less(itmp1, c0),
+ less(itmp2, c0->clone(ir, NULL)))));
+
+ i.insert_before(assign(src1, i2u(abs(itmp1))));
+ i.insert_before(assign(src2, i2u(abs(itmp2))));
+ }
+
+ i.insert_before(assign(src1l, bit_and(src1, c0000FFFF)));
+ i.insert_before(assign(src2l, bit_and(src2, c0000FFFF->clone(ir, NULL))));
+ i.insert_before(assign(src1h, rshift(src1, c16)));
+ i.insert_before(assign(src2h, rshift(src2, c16->clone(ir, NULL))));
+
+ i.insert_before(lo);
+ i.insert_before(hi);
+ i.insert_before(t1);
+ i.insert_before(t2);
+
+ i.insert_before(assign(lo, mul(src1l, src2l)));
+ i.insert_before(assign(t1, mul(src1l, src2h)));
+ i.insert_before(assign(t2, mul(src1h, src2l)));
+ i.insert_before(assign(hi, mul(src1h, src2h)));
+
+ i.insert_before(assign(hi, add(hi, _carry(lo, lshift(t1, c16->clone(ir, NULL))))));
+ i.insert_before(assign(lo, add(lo, lshift(t1, c16->clone(ir, NULL)))));
+
+ i.insert_before(assign(hi, add(hi, _carry(lo, lshift(t2, c16->clone(ir, NULL))))));
+ i.insert_before(assign(lo, add(lo, lshift(t2, c16->clone(ir, NULL)))));
+
+ if (different_signs == NULL) {
+ assert(ir->operands[0]->type->base_type == GLSL_TYPE_UINT);
+
+ ir->operation = ir_binop_add;
+ ir->init_num_operands();
+ ir->operands[0] = add(hi, rshift(t1, c16->clone(ir, NULL)));
+ ir->operands[1] = rshift(t2, c16->clone(ir, NULL));
+ } else {
+ assert(ir->operands[0]->type->base_type == GLSL_TYPE_INT);
+
+ i.insert_before(assign(hi, add(add(hi, rshift(t1, c16->clone(ir, NULL))),
+ rshift(t2, c16->clone(ir, NULL)))));
+
+ /* For channels where different_signs is set we have to perform a 64-bit
+ * negation. This is *not* the same as just negating the high 32-bits.
+ * Consider -3 * 2. The high 32-bits is 0, but the desired result is
+ * -1, not -0! Recall -x == ~x + 1.
+ */
+ ir_variable *neg_hi =
+ new(ir) ir_variable(glsl_type::ivec(elements), "neg_hi", ir_var_temporary);
+ ir_constant *c1 = new(ir) ir_constant(1u, elements);
+
+ i.insert_before(neg_hi);
+ i.insert_before(assign(neg_hi, add(bit_not(u2i(hi)),
+ u2i(_carry(bit_not(lo), c1)))));
+
+ ir->operation = ir_triop_csel;
+ ir->init_num_operands();
+ ir->operands[0] = new(ir) ir_dereference_variable(different_signs);
+ ir->operands[1] = new(ir) ir_dereference_variable(neg_hi);
+ ir->operands[2] = u2i(hi);
+ }
+}
+
+void
+lower_instructions_visitor::sqrt_to_abs_sqrt(ir_expression *ir)
+{
+ ir->operands[0] = new(ir) ir_expression(ir_unop_abs, ir->operands[0]);
+ this->progress = true;
+}
+
+void
+lower_instructions_visitor::mul64_to_mul_and_mul_high(ir_expression *ir)
+{
+ /* Lower 32x32-> 64 to
+ * msb = imul_high(x_lo, y_lo)
+ * lsb = mul(x_lo, y_lo)
+ */
+ const unsigned elements = ir->operands[0]->type->vector_elements;
+
+ const ir_expression_operation operation =
+ ir->type->base_type == GLSL_TYPE_UINT64 ? ir_unop_pack_uint_2x32
+ : ir_unop_pack_int_2x32;
+
+ const glsl_type *var_type = ir->type->base_type == GLSL_TYPE_UINT64
+ ? glsl_type::uvec(elements)
+ : glsl_type::ivec(elements);
+
+ const glsl_type *ret_type = ir->type->base_type == GLSL_TYPE_UINT64
+ ? glsl_type::uvec2_type
+ : glsl_type::ivec2_type;
+
+ ir_instruction &i = *base_ir;
+
+ ir_variable *msb =
+ new(ir) ir_variable(var_type, "msb", ir_var_temporary);
+ ir_variable *lsb =
+ new(ir) ir_variable(var_type, "lsb", ir_var_temporary);
+ ir_variable *x =
+ new(ir) ir_variable(var_type, "x", ir_var_temporary);
+ ir_variable *y =
+ new(ir) ir_variable(var_type, "y", ir_var_temporary);
+
+ i.insert_before(x);
+ i.insert_before(assign(x, ir->operands[0]));
+ i.insert_before(y);
+ i.insert_before(assign(y, ir->operands[1]));
+ i.insert_before(msb);
+ i.insert_before(lsb);
+
+ i.insert_before(assign(msb, imul_high(x, y)));
+ i.insert_before(assign(lsb, mul(x, y)));
+
+ ir_rvalue *result[4] = {NULL};
+ for (unsigned elem = 0; elem < elements; elem++) {
+ ir_rvalue *val = new(ir) ir_expression(ir_quadop_vector, ret_type,
+ swizzle(lsb, elem, 1),
+ swizzle(msb, elem, 1), NULL, NULL);
+ result[elem] = expr(operation, val);
+ }
+
+ ir->operation = ir_quadop_vector;
+ ir->init_num_operands();
+ ir->operands[0] = result[0];
+ ir->operands[1] = result[1];
+ ir->operands[2] = result[2];
+ ir->operands[3] = result[3];
+
+ this->progress = true;
+}
+
+ir_visitor_status
+lower_instructions_visitor::visit_leave(ir_expression *ir)
+{
+ switch (ir->operation) {
+ case ir_binop_dot:
+ if (ir->operands[0]->type->is_double())
+ double_dot_to_fma(ir);
+ break;
+ case ir_triop_lrp:
+ if (ir->operands[0]->type->is_double())
+ double_lrp(ir);
+ break;
+ case ir_binop_sub:
+ if (lowering(SUB_TO_ADD_NEG))
+ sub_to_add_neg(ir);
+ break;
+
+ case ir_binop_div:
+ if (ir->operands[1]->type->is_integer_32() && lowering(INT_DIV_TO_MUL_RCP))
+ int_div_to_mul_rcp(ir);
+ else if ((ir->operands[1]->type->is_float_16_32() && lowering(FDIV_TO_MUL_RCP)) ||
+ (ir->operands[1]->type->is_double() && lowering(DDIV_TO_MUL_RCP)))
+ div_to_mul_rcp(ir);
+ break;
+
+ case ir_unop_exp:
+ if (lowering(EXP_TO_EXP2))
+ exp_to_exp2(ir);
+ break;
+
+ case ir_unop_log:
+ if (lowering(LOG_TO_LOG2))
+ log_to_log2(ir);
+ break;
+
+ case ir_binop_mod:
+ if (lowering(MOD_TO_FLOOR) && ir->type->is_float_16_32_64())
+ mod_to_floor(ir);
+ break;
+
+ case ir_binop_pow:
+ if (lowering(POW_TO_EXP2))
+ pow_to_exp2(ir);
+ break;
+
+ case ir_binop_ldexp:
+ if (lowering(LDEXP_TO_ARITH) && ir->type->is_float())
+ ldexp_to_arith(ir);
+ if (lowering(DFREXP_DLDEXP_TO_ARITH) && ir->type->is_double())
+ dldexp_to_arith(ir);
+ break;
+
+ case ir_unop_frexp_exp:
+ if (lowering(DFREXP_DLDEXP_TO_ARITH) && ir->operands[0]->type->is_double())
+ dfrexp_exp_to_arith(ir);
+ break;
+
+ case ir_unop_frexp_sig:
+ if (lowering(DFREXP_DLDEXP_TO_ARITH) && ir->operands[0]->type->is_double())
+ dfrexp_sig_to_arith(ir);
+ break;
+
+ case ir_binop_carry:
+ if (lowering(CARRY_TO_ARITH))
+ carry_to_arith(ir);
+ break;
+
+ case ir_binop_borrow:
+ if (lowering(BORROW_TO_ARITH))
+ borrow_to_arith(ir);
+ break;
+
+ case ir_unop_saturate:
+ if (lowering(SAT_TO_CLAMP))
+ sat_to_clamp(ir);
+ break;
+
+ case ir_unop_trunc:
+ if (lowering(DOPS_TO_DFRAC) && ir->type->is_double())
+ dtrunc_to_dfrac(ir);
+ break;
+
+ case ir_unop_ceil:
+ if (lowering(DOPS_TO_DFRAC) && ir->type->is_double())
+ dceil_to_dfrac(ir);
+ break;
+
+ case ir_unop_floor:
+ if (lowering(DOPS_TO_DFRAC) && ir->type->is_double())
+ dfloor_to_dfrac(ir);
+ break;
+
+ case ir_unop_round_even:
+ if (lowering(DOPS_TO_DFRAC) && ir->type->is_double())
+ dround_even_to_dfrac(ir);
+ break;
+
+ case ir_unop_sign:
+ if (lowering(DOPS_TO_DFRAC) && ir->type->is_double())
+ dsign_to_csel(ir);
+ break;
+
+ case ir_unop_bit_count:
+ if (lowering(BIT_COUNT_TO_MATH))
+ bit_count_to_math(ir);
+ break;
+
+ case ir_triop_bitfield_extract:
+ if (lowering(EXTRACT_TO_SHIFTS))
+ extract_to_shifts(ir);
+ break;
+
+ case ir_quadop_bitfield_insert:
+ if (lowering(INSERT_TO_SHIFTS))
+ insert_to_shifts(ir);
+ break;
+
+ case ir_unop_bitfield_reverse:
+ if (lowering(REVERSE_TO_SHIFTS))
+ reverse_to_shifts(ir);
+ break;
+
+ case ir_unop_find_lsb:
+ if (lowering(FIND_LSB_TO_FLOAT_CAST))
+ find_lsb_to_float_cast(ir);
+ break;
+
+ case ir_unop_find_msb:
+ if (lowering(FIND_MSB_TO_FLOAT_CAST))
+ find_msb_to_float_cast(ir);
+ break;
+
+ case ir_binop_imul_high:
+ if (lowering(IMUL_HIGH_TO_MUL))
+ imul_high_to_mul(ir);
+ break;
+
+ case ir_binop_mul:
+ if (lowering(MUL64_TO_MUL_AND_MUL_HIGH) &&
+ (ir->type->base_type == GLSL_TYPE_INT64 ||
+ ir->type->base_type == GLSL_TYPE_UINT64) &&
+ (ir->operands[0]->type->base_type == GLSL_TYPE_INT ||
+ ir->operands[1]->type->base_type == GLSL_TYPE_UINT))
+ mul64_to_mul_and_mul_high(ir);
+ break;
+
+ case ir_unop_rsq:
+ case ir_unop_sqrt:
+ if (lowering(SQRT_TO_ABS_SQRT))
+ sqrt_to_abs_sqrt(ir);
+ break;
+
+ default:
+ return visit_continue;
+ }
+
+ return visit_continue;
+}
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/lower_int64.cpp b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/lower_int64.cpp
new file mode 100644
index 0000000000..43774d6ab3
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/lower_int64.cpp
@@ -0,0 +1,391 @@
+/*
+ * Copyright © 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+/**
+ * \file lower_int64.cpp
+ *
+ * Lower 64-bit operations to 32-bit operations. Each 64-bit value is lowered
+ * to a uvec2. For each operation that can be lowered, there is a function
+ * called __builtin_foo with the same number of parameters that takes uvec2
+ * sources and produces uvec2 results. An operation like
+ *
+ * uint64_t(x) * uint64_t(y)
+ *
+ * becomes
+ *
+ * packUint2x32(__builtin_umul64(unpackUint2x32(x), unpackUint2x32(y)));
+ */
+
+#include "main/macros.h"
+#include "compiler/glsl_types.h"
+#include "ir.h"
+#include "ir_rvalue_visitor.h"
+#include "ir_builder.h"
+#include "ir_optimization.h"
+#include "util/hash_table.h"
+#include "builtin_functions.h"
+
+typedef ir_function_signature *(*function_generator)(void *mem_ctx,
+ builtin_available_predicate avail);
+
+using namespace ir_builder;
+
+namespace lower_64bit {
+void expand_source(ir_factory &, ir_rvalue *val, ir_variable **expanded_src);
+
+ir_dereference_variable *compact_destination(ir_factory &,
+ const glsl_type *type,
+ ir_variable *result[4]);
+
+ir_rvalue *lower_op_to_function_call(ir_instruction *base_ir,
+ ir_expression *ir,
+ ir_function_signature *callee);
+};
+
+using namespace lower_64bit;
+
+namespace {
+
+class lower_64bit_visitor : public ir_rvalue_visitor {
+public:
+ lower_64bit_visitor(void *mem_ctx, exec_list *instructions, unsigned lower)
+ : progress(false), lower(lower),
+ function_list(), added_functions(&function_list, mem_ctx)
+ {
+ functions = _mesa_hash_table_create(mem_ctx,
+ _mesa_hash_string,
+ _mesa_key_string_equal);
+
+ foreach_in_list(ir_instruction, node, instructions) {
+ ir_function *const f = node->as_function();
+
+ if (f == NULL || strncmp(f->name, "__builtin_", 10) != 0)
+ continue;
+
+ add_function(f);
+ }
+ }
+
+ ~lower_64bit_visitor()
+ {
+ _mesa_hash_table_destroy(functions, NULL);
+ }
+
+ void handle_rvalue(ir_rvalue **rvalue);
+
+ void add_function(ir_function *f)
+ {
+ _mesa_hash_table_insert(functions, f->name, f);
+ }
+
+ ir_function *find_function(const char *name)
+ {
+ struct hash_entry *const entry =
+ _mesa_hash_table_search(functions, name);
+
+ return entry != NULL ? (ir_function *) entry->data : NULL;
+ }
+
+ bool progress;
+
+private:
+ unsigned lower; /** Bitfield of which operations to lower */
+
+ /** Hashtable containing all of the known functions in the IR */
+ struct hash_table *functions;
+
+public:
+ exec_list function_list;
+
+private:
+ ir_factory added_functions;
+
+ ir_rvalue *handle_op(ir_expression *ir, const char *function_name,
+ function_generator generator);
+};
+
+} /* anonymous namespace */
+
+/**
+ * Determine if a particular type of lowering should occur
+ */
+#define lowering(x) (this->lower & x)
+
+bool
+lower_64bit_integer_instructions(exec_list *instructions,
+ unsigned what_to_lower)
+{
+ if (instructions->is_empty())
+ return false;
+
+ ir_instruction *first_inst = (ir_instruction *) instructions->get_head_raw();
+ void *const mem_ctx = ralloc_parent(first_inst);
+ lower_64bit_visitor v(mem_ctx, instructions, what_to_lower);
+
+ visit_list_elements(&v, instructions);
+
+ if (v.progress && !v.function_list.is_empty()) {
+ /* Move all of the nodes from function_list to the head if the incoming
+ * instruction list.
+ */
+ exec_node *const after = &instructions->head_sentinel;
+ exec_node *const before = instructions->head_sentinel.next;
+ exec_node *const head = v.function_list.head_sentinel.next;
+ exec_node *const tail = v.function_list.tail_sentinel.prev;
+
+ before->next = head;
+ head->prev = before;
+
+ after->prev = tail;
+ tail->next = after;
+ }
+
+ return v.progress;
+}
+
+
+/**
+ * Expand individual 64-bit values to uvec2 values
+ *
+ * Each operation is in one of a few forms.
+ *
+ * vector op vector
+ * vector op scalar
+ * scalar op vector
+ * scalar op scalar
+ *
+ * In the 'vector op vector' case, the two vectors must have the same size.
+ * In a way, the 'scalar op scalar' form is special case of the 'vector op
+ * vector' form.
+ *
+ * This method generates a new set of uvec2 values for each element of a
+ * single operand. If the operand is a scalar, the uvec2 is replicated
+ * multiple times. A value like
+ *
+ * u64vec3(a) + u64vec3(b)
+ *
+ * becomes
+ *
+ * u64vec3 tmp0 = u64vec3(a) + u64vec3(b);
+ * uvec2 tmp1 = unpackUint2x32(tmp0.x);
+ * uvec2 tmp2 = unpackUint2x32(tmp0.y);
+ * uvec2 tmp3 = unpackUint2x32(tmp0.z);
+ *
+ * and the returned operands array contains ir_variable pointers to
+ *
+ * { tmp1, tmp2, tmp3, tmp1 }
+ */
+void
+lower_64bit::expand_source(ir_factory &body,
+ ir_rvalue *val,
+ ir_variable **expanded_src)
+{
+ assert(val->type->is_integer_64());
+
+ ir_variable *const temp = body.make_temp(val->type, "tmp");
+
+ body.emit(assign(temp, val));
+
+ const ir_expression_operation unpack_opcode =
+ val->type->base_type == GLSL_TYPE_UINT64
+ ? ir_unop_unpack_uint_2x32 : ir_unop_unpack_int_2x32;
+
+ const glsl_type *const type =
+ val->type->base_type == GLSL_TYPE_UINT64
+ ? glsl_type::uvec2_type : glsl_type::ivec2_type;
+
+ unsigned i;
+ for (i = 0; i < val->type->vector_elements; i++) {
+ expanded_src[i] = body.make_temp(type, "expanded_64bit_source");
+
+ body.emit(assign(expanded_src[i],
+ expr(unpack_opcode, swizzle(temp, i, 1))));
+ }
+
+ for (/* empty */; i < 4; i++)
+ expanded_src[i] = expanded_src[0];
+}
+
+/**
+ * Convert a series of uvec2 results into a single 64-bit integer vector
+ */
+ir_dereference_variable *
+lower_64bit::compact_destination(ir_factory &body,
+ const glsl_type *type,
+ ir_variable *result[4])
+{
+ const ir_expression_operation pack_opcode =
+ type->base_type == GLSL_TYPE_UINT64
+ ? ir_unop_pack_uint_2x32 : ir_unop_pack_int_2x32;
+
+ ir_variable *const compacted_result =
+ body.make_temp(type, "compacted_64bit_result");
+
+ for (unsigned i = 0; i < type->vector_elements; i++) {
+ body.emit(assign(compacted_result,
+ expr(pack_opcode, result[i]),
+ 1U << i));
+ }
+
+ void *const mem_ctx = ralloc_parent(compacted_result);
+ return new(mem_ctx) ir_dereference_variable(compacted_result);
+}
+
+ir_rvalue *
+lower_64bit::lower_op_to_function_call(ir_instruction *base_ir,
+ ir_expression *ir,
+ ir_function_signature *callee)
+{
+ const unsigned num_operands = ir->num_operands;
+ ir_variable *src[4][4];
+ ir_variable *dst[4];
+ void *const mem_ctx = ralloc_parent(ir);
+ exec_list instructions;
+ unsigned source_components = 0;
+ const glsl_type *const result_type =
+ ir->type->base_type == GLSL_TYPE_UINT64
+ ? glsl_type::uvec2_type : glsl_type::ivec2_type;
+
+ ir_factory body(&instructions, mem_ctx);
+
+ for (unsigned i = 0; i < num_operands; i++) {
+ expand_source(body, ir->operands[i], src[i]);
+
+ if (ir->operands[i]->type->vector_elements > source_components)
+ source_components = ir->operands[i]->type->vector_elements;
+ }
+
+ for (unsigned i = 0; i < source_components; i++) {
+ dst[i] = body.make_temp(result_type, "expanded_64bit_result");
+
+ exec_list parameters;
+
+ for (unsigned j = 0; j < num_operands; j++)
+ parameters.push_tail(new(mem_ctx) ir_dereference_variable(src[j][i]));
+
+ ir_dereference_variable *const return_deref =
+ new(mem_ctx) ir_dereference_variable(dst[i]);
+
+ ir_call *const c = new(mem_ctx) ir_call(callee,
+ return_deref,
+ &parameters);
+
+ body.emit(c);
+ }
+
+ ir_rvalue *const rv = compact_destination(body, ir->type, dst);
+
+ /* Move all of the nodes from instructions between base_ir and the
+ * instruction before it.
+ */
+ exec_node *const after = base_ir;
+ exec_node *const before = after->prev;
+ exec_node *const head = instructions.head_sentinel.next;
+ exec_node *const tail = instructions.tail_sentinel.prev;
+
+ before->next = head;
+ head->prev = before;
+
+ after->prev = tail;
+ tail->next = after;
+
+ return rv;
+}
+
+ir_rvalue *
+lower_64bit_visitor::handle_op(ir_expression *ir,
+ const char *function_name,
+ function_generator generator)
+{
+ for (unsigned i = 0; i < ir->num_operands; i++)
+ if (!ir->operands[i]->type->is_integer_64())
+ return ir;
+
+ /* Get a handle to the correct ir_function_signature for the core
+ * operation.
+ */
+ ir_function_signature *callee = NULL;
+ ir_function *f = find_function(function_name);
+
+ if (f != NULL) {
+ callee = (ir_function_signature *) f->signatures.get_head();
+ assert(callee != NULL && callee->ir_type == ir_type_function_signature);
+ } else {
+ f = new(base_ir) ir_function(function_name);
+ callee = generator(base_ir, NULL);
+
+ f->add_signature(callee);
+
+ add_function(f);
+ }
+
+ this->progress = true;
+ return lower_op_to_function_call(this->base_ir, ir, callee);
+}
+
+void
+lower_64bit_visitor::handle_rvalue(ir_rvalue **rvalue)
+{
+ if (*rvalue == NULL || (*rvalue)->ir_type != ir_type_expression)
+ return;
+
+ ir_expression *const ir = (*rvalue)->as_expression();
+ assert(ir != NULL);
+
+ switch (ir->operation) {
+ case ir_unop_sign:
+ if (lowering(SIGN64)) {
+ *rvalue = handle_op(ir, "__builtin_sign64", generate_ir::sign64);
+ }
+ break;
+
+ case ir_binop_div:
+ if (lowering(DIV64)) {
+ if (ir->type->base_type == GLSL_TYPE_UINT64) {
+ *rvalue = handle_op(ir, "__builtin_udiv64", generate_ir::udiv64);
+ } else {
+ *rvalue = handle_op(ir, "__builtin_idiv64", generate_ir::idiv64);
+ }
+ }
+ break;
+
+ case ir_binop_mod:
+ if (lowering(MOD64)) {
+ if (ir->type->base_type == GLSL_TYPE_UINT64) {
+ *rvalue = handle_op(ir, "__builtin_umod64", generate_ir::umod64);
+ } else {
+ *rvalue = handle_op(ir, "__builtin_imod64", generate_ir::imod64);
+ }
+ }
+ break;
+
+ case ir_binop_mul:
+ if (lowering(MUL64)) {
+ *rvalue = handle_op(ir, "__builtin_umul64", generate_ir::umul64);
+ }
+ break;
+
+ default:
+ break;
+ }
+}
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/lower_jumps.cpp b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/lower_jumps.cpp
new file mode 100644
index 0000000000..37c2f0a8d1
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/lower_jumps.cpp
@@ -0,0 +1,1046 @@
+/*
+ * Copyright © 2010 Luca Barbieri
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+/**
+ * \file lower_jumps.cpp
+ *
+ * This pass lowers jumps (break, continue, and return) to if/else structures.
+ *
+ * It can be asked to:
+ * 1. Pull jumps out of ifs where possible
+ * 2. Remove all "continue"s, replacing them with an "execute flag"
+ * 3. Replace all "break" with a single conditional one at the end of the loop
+ * 4. Replace all "return"s with a single return at the end of the function,
+ * for the main function and/or other functions
+ *
+ * Applying this pass gives several benefits:
+ * 1. All functions can be inlined.
+ * 2. nv40 and other pre-DX10 chips without "continue" can be supported
+ * 3. nv30 and other pre-DX10 chips with no control flow at all are better
+ * supported
+ *
+ * Continues are lowered by adding a per-loop "execute flag", initialized to
+ * true, that when cleared inhibits all execution until the end of the loop.
+ *
+ * Breaks are lowered to continues, plus setting a "break flag" that is checked
+ * at the end of the loop, and trigger the unique "break".
+ *
+ * Returns are lowered to breaks/continues, plus adding a "return flag" that
+ * causes loops to break again out of their enclosing loops until all the
+ * loops are exited: then the "execute flag" logic will ignore everything
+ * until the end of the function.
+ *
+ * Note that "continue" and "return" can also be implemented by adding
+ * a dummy loop and using break.
+ * However, this is bad for hardware with limited nesting depth, and
+ * prevents further optimization, and thus is not currently performed.
+ */
+
+#include "compiler/glsl_types.h"
+#include <string.h>
+#include "ir.h"
+
+/**
+ * Enum recording the result of analyzing how control flow might exit
+ * an IR node.
+ *
+ * Each possible value of jump_strength indicates a strictly stronger
+ * guarantee on control flow than the previous value.
+ *
+ * The ordering of strengths roughly reflects the way jumps are
+ * lowered: jumps with higher strength tend to be lowered to jumps of
+ * lower strength. Accordingly, strength is used as a heuristic to
+ * determine which lowering to perform first.
+ *
+ * This enum is also used by get_jump_strength() to categorize
+ * instructions as either break, continue, return, or other. When
+ * used in this fashion, strength_always_clears_execute_flag is not
+ * used.
+ *
+ * The control flow analysis made by this optimization pass makes two
+ * simplifying assumptions:
+ *
+ * - It ignores discard instructions, since they are lowered by a
+ * separate pass (lower_discard.cpp).
+ *
+ * - It assumes it is always possible for control to flow from a loop
+ * to the instruction immediately following it. Technically, this
+ * is not true (since all execution paths through the loop might
+ * jump back to the top, or return from the function).
+ *
+ * Both of these simplifying assumtions are safe, since they can never
+ * cause reachable code to be incorrectly classified as unreachable;
+ * they can only do the opposite.
+ */
+enum jump_strength
+{
+ /**
+ * Analysis has produced no guarantee on how control flow might
+ * exit this IR node. It might fall out the bottom (with or
+ * without clearing the execute flag, if present), or it might
+ * continue to the top of the innermost enclosing loop, break out
+ * of it, or return from the function.
+ */
+ strength_none,
+
+ /**
+ * The only way control can fall out the bottom of this node is
+ * through a code path that clears the execute flag. It might also
+ * continue to the top of the innermost enclosing loop, break out
+ * of it, or return from the function.
+ */
+ strength_always_clears_execute_flag,
+
+ /**
+ * Control cannot fall out the bottom of this node. It might
+ * continue to the top of the innermost enclosing loop, break out
+ * of it, or return from the function.
+ */
+ strength_continue,
+
+ /**
+ * Control cannot fall out the bottom of this node, or continue the
+ * top of the innermost enclosing loop. It can only break out of
+ * it or return from the function.
+ */
+ strength_break,
+
+ /**
+ * Control cannot fall out the bottom of this node, continue to the
+ * top of the innermost enclosing loop, or break out of it. It can
+ * only return from the function.
+ */
+ strength_return
+};
+
+namespace {
+
+struct block_record
+{
+ /* minimum jump strength (of lowered IR, not pre-lowering IR)
+ *
+ * If the block ends with a jump, must be the strength of the jump.
+ * Otherwise, the jump would be dead and have been deleted before)
+ *
+ * If the block doesn't end with a jump, it can be different than strength_none if all paths before it lead to some jump
+ * (e.g. an if with a return in one branch, and a break in the other, while not lowering them)
+ * Note that identical jumps are usually unified though.
+ */
+ jump_strength min_strength;
+
+ /* can anything clear the execute flag? */
+ bool may_clear_execute_flag;
+
+ block_record()
+ {
+ this->min_strength = strength_none;
+ this->may_clear_execute_flag = false;
+ }
+};
+
+struct loop_record
+{
+ ir_function_signature* signature;
+ ir_loop* loop;
+
+ /* used to avoid lowering the break used to represent lowered breaks */
+ unsigned nesting_depth;
+ bool in_if_at_the_end_of_the_loop;
+
+ bool may_set_return_flag;
+
+ ir_variable* break_flag;
+ ir_variable* execute_flag; /* cleared to emulate continue */
+
+ loop_record(ir_function_signature* p_signature = 0, ir_loop* p_loop = 0)
+ {
+ this->signature = p_signature;
+ this->loop = p_loop;
+ this->nesting_depth = 0;
+ this->in_if_at_the_end_of_the_loop = false;
+ this->may_set_return_flag = false;
+ this->break_flag = 0;
+ this->execute_flag = 0;
+ }
+
+ ir_variable* get_execute_flag()
+ {
+ /* also supported for the "function loop" */
+ if(!this->execute_flag) {
+ exec_list& list = this->loop ? this->loop->body_instructions : signature->body;
+ this->execute_flag = new(this->signature) ir_variable(glsl_type::bool_type, "execute_flag", ir_var_temporary);
+ list.push_head(new(this->signature) ir_assignment(new(this->signature) ir_dereference_variable(execute_flag), new(this->signature) ir_constant(true)));
+ list.push_head(this->execute_flag);
+ }
+ return this->execute_flag;
+ }
+
+ ir_variable* get_break_flag()
+ {
+ assert(this->loop);
+ if(!this->break_flag) {
+ this->break_flag = new(this->signature) ir_variable(glsl_type::bool_type, "break_flag", ir_var_temporary);
+ this->loop->insert_before(this->break_flag);
+ this->loop->insert_before(new(this->signature) ir_assignment(new(this->signature) ir_dereference_variable(break_flag), new(this->signature) ir_constant(false)));
+ }
+ return this->break_flag;
+ }
+};
+
+struct function_record
+{
+ ir_function_signature* signature;
+ ir_variable* return_flag; /* used to break out of all loops and then jump to the return instruction */
+ ir_variable* return_value;
+ bool lower_return;
+ unsigned nesting_depth;
+
+ function_record(ir_function_signature* p_signature = 0,
+ bool lower_return = false)
+ {
+ this->signature = p_signature;
+ this->return_flag = 0;
+ this->return_value = 0;
+ this->nesting_depth = 0;
+ this->lower_return = lower_return;
+ }
+
+ ir_variable* get_return_flag()
+ {
+ if(!this->return_flag) {
+ this->return_flag = new(this->signature) ir_variable(glsl_type::bool_type, "return_flag", ir_var_temporary);
+ this->signature->body.push_head(new(this->signature) ir_assignment(new(this->signature) ir_dereference_variable(return_flag), new(this->signature) ir_constant(false)));
+ this->signature->body.push_head(this->return_flag);
+ }
+ return this->return_flag;
+ }
+
+ ir_variable* get_return_value()
+ {
+ if(!this->return_value) {
+ assert(!this->signature->return_type->is_void());
+ return_value = new(this->signature) ir_variable(this->signature->return_type, "return_value", ir_var_temporary);
+ this->signature->body.push_head(this->return_value);
+ }
+ return this->return_value;
+ }
+};
+
+struct ir_lower_jumps_visitor : public ir_control_flow_visitor {
+ /* Postconditions: on exit of any visit() function:
+ *
+ * ANALYSIS: this->block.min_strength,
+ * this->block.may_clear_execute_flag, and
+ * this->loop.may_set_return_flag are updated to reflect the
+ * characteristics of the visited statement.
+ *
+ * DEAD_CODE_ELIMINATION: If this->block.min_strength is not
+ * strength_none, the visited node is at the end of its exec_list.
+ * In other words, any unreachable statements that follow the
+ * visited statement in its exec_list have been removed.
+ *
+ * CONTAINED_JUMPS_LOWERED: If the visited statement contains other
+ * statements, then should_lower_jump() is false for all of the
+ * return, break, or continue statements it contains.
+ *
+ * Note that visiting a jump does not lower it. That is the
+ * responsibility of the statement (or function signature) that
+ * contains the jump.
+ */
+
+ using ir_control_flow_visitor::visit;
+
+ bool progress;
+
+ struct function_record function;
+ struct loop_record loop;
+ struct block_record block;
+
+ bool pull_out_jumps;
+ bool lower_continue;
+ bool lower_break;
+ bool lower_sub_return;
+ bool lower_main_return;
+
+ ir_lower_jumps_visitor()
+ : progress(false),
+ pull_out_jumps(false),
+ lower_continue(false),
+ lower_break(false),
+ lower_sub_return(false),
+ lower_main_return(false)
+ {
+ }
+
+ void truncate_after_instruction(exec_node *ir)
+ {
+ if (!ir)
+ return;
+
+ while (!ir->get_next()->is_tail_sentinel()) {
+ ((ir_instruction *)ir->get_next())->remove();
+ this->progress = true;
+ }
+ }
+
+ void move_outer_block_inside(ir_instruction *ir, exec_list *inner_block)
+ {
+ while (!ir->get_next()->is_tail_sentinel()) {
+ ir_instruction *move_ir = (ir_instruction *)ir->get_next();
+
+ move_ir->remove();
+ inner_block->push_tail(move_ir);
+ }
+ }
+
+ /**
+ * Insert the instructions necessary to lower a return statement,
+ * before the given return instruction.
+ */
+ void insert_lowered_return(ir_return *ir)
+ {
+ ir_variable* return_flag = this->function.get_return_flag();
+ if(!this->function.signature->return_type->is_void()) {
+ ir_variable* return_value = this->function.get_return_value();
+ ir->insert_before(
+ new(ir) ir_assignment(
+ new (ir) ir_dereference_variable(return_value),
+ ir->value));
+ }
+ ir->insert_before(
+ new(ir) ir_assignment(
+ new (ir) ir_dereference_variable(return_flag),
+ new (ir) ir_constant(true)));
+ this->loop.may_set_return_flag = true;
+ }
+
+ /**
+ * If the given instruction is a return, lower it to instructions
+ * that store the return value (if there is one), set the return
+ * flag, and then break.
+ *
+ * It is safe to pass NULL to this function.
+ */
+ void lower_return_unconditionally(ir_instruction *ir)
+ {
+ if (get_jump_strength(ir) != strength_return) {
+ return;
+ }
+ insert_lowered_return((ir_return*)ir);
+ ir->replace_with(new(ir) ir_loop_jump(ir_loop_jump::jump_break));
+ }
+
+ /**
+ * Create the necessary instruction to replace a break instruction.
+ */
+ ir_instruction *create_lowered_break()
+ {
+ void *ctx = this->function.signature;
+ return new(ctx) ir_assignment(
+ new(ctx) ir_dereference_variable(this->loop.get_break_flag()),
+ new(ctx) ir_constant(true));
+ }
+
+ /**
+ * If the given instruction is a break, lower it to an instruction
+ * that sets the break flag, without consulting
+ * should_lower_jump().
+ *
+ * It is safe to pass NULL to this function.
+ */
+ void lower_break_unconditionally(ir_instruction *ir)
+ {
+ if (get_jump_strength(ir) != strength_break) {
+ return;
+ }
+ ir->replace_with(create_lowered_break());
+ }
+
+ /**
+ * If the block ends in a conditional or unconditional break, lower
+ * it, even though should_lower_jump() says it needn't be lowered.
+ */
+ void lower_final_breaks(exec_list *block)
+ {
+ ir_instruction *ir = (ir_instruction *) block->get_tail();
+ lower_break_unconditionally(ir);
+ ir_if *ir_if = ir->as_if();
+ if (ir_if) {
+ lower_break_unconditionally(
+ (ir_instruction *) ir_if->then_instructions.get_tail());
+ lower_break_unconditionally(
+ (ir_instruction *) ir_if->else_instructions.get_tail());
+ }
+ }
+
+ virtual void visit(class ir_loop_jump * ir)
+ {
+ /* Eliminate all instructions after each one, since they are
+ * unreachable. This satisfies the DEAD_CODE_ELIMINATION
+ * postcondition.
+ */
+ truncate_after_instruction(ir);
+
+ /* Set this->block.min_strength based on this instruction. This
+ * satisfies the ANALYSIS postcondition. It is not necessary to
+ * update this->block.may_clear_execute_flag or
+ * this->loop.may_set_return_flag, because an unlowered jump
+ * instruction can't change any flags.
+ */
+ this->block.min_strength = ir->is_break() ? strength_break : strength_continue;
+
+ /* The CONTAINED_JUMPS_LOWERED postcondition is already
+ * satisfied, because jump statements can't contain other
+ * statements.
+ */
+ }
+
+ virtual void visit(class ir_return * ir)
+ {
+ /* Eliminate all instructions after each one, since they are
+ * unreachable. This satisfies the DEAD_CODE_ELIMINATION
+ * postcondition.
+ */
+ truncate_after_instruction(ir);
+
+ /* Set this->block.min_strength based on this instruction. This
+ * satisfies the ANALYSIS postcondition. It is not necessary to
+ * update this->block.may_clear_execute_flag or
+ * this->loop.may_set_return_flag, because an unlowered return
+ * instruction can't change any flags.
+ */
+ this->block.min_strength = strength_return;
+
+ /* The CONTAINED_JUMPS_LOWERED postcondition is already
+ * satisfied, because jump statements can't contain other
+ * statements.
+ */
+ }
+
+ virtual void visit(class ir_discard * ir)
+ {
+ /* Nothing needs to be done. The ANALYSIS and
+ * DEAD_CODE_ELIMINATION postconditions are already satisfied,
+ * because discard statements are ignored by this optimization
+ * pass. The CONTAINED_JUMPS_LOWERED postcondition is already
+ * satisfied, because discard statements can't contain other
+ * statements.
+ */
+ (void) ir;
+ }
+
+ virtual void visit(class ir_precision_statement * ir)
+ {
+ /* Nothing needs to be done. */
+ }
+
+ virtual void visit(class ir_typedecl_statement * ir)
+ {
+ /* Nothing needs to be done. */
+ }
+
+ enum jump_strength get_jump_strength(ir_instruction* ir)
+ {
+ if(!ir)
+ return strength_none;
+ else if(ir->ir_type == ir_type_loop_jump) {
+ if(((ir_loop_jump*)ir)->is_break())
+ return strength_break;
+ else
+ return strength_continue;
+ } else if(ir->ir_type == ir_type_return)
+ return strength_return;
+ else
+ return strength_none;
+ }
+
+ bool should_lower_jump(ir_jump* ir)
+ {
+ unsigned strength = get_jump_strength(ir);
+ bool lower;
+ switch(strength)
+ {
+ case strength_none:
+ lower = false; /* don't change this, code relies on it */
+ break;
+ case strength_continue:
+ lower = lower_continue;
+ break;
+ case strength_break:
+ assert(this->loop.loop);
+ /* never lower "canonical break" */
+ if(ir->get_next()->is_tail_sentinel() && (this->loop.nesting_depth == 0
+ || (this->loop.nesting_depth == 1 && this->loop.in_if_at_the_end_of_the_loop)))
+ lower = false;
+ else
+ lower = lower_break;
+ break;
+ case strength_return:
+ /* never lower return at the end of a this->function */
+ if(this->function.nesting_depth == 0 && ir->get_next()->is_tail_sentinel())
+ lower = false;
+ else
+ lower = this->function.lower_return;
+ break;
+ }
+ return lower;
+ }
+
+ block_record visit_block(exec_list* list)
+ {
+ /* Note: since visiting a node may change that node's next
+ * pointer, we can't use visit_exec_list(), because
+ * visit_exec_list() caches the node's next pointer before
+ * visiting it. So we use foreach_in_list() instead.
+ *
+ * foreach_in_list() isn't safe if the node being visited gets
+ * removed, but fortunately this visitor doesn't do that.
+ */
+
+ block_record saved_block = this->block;
+ this->block = block_record();
+ foreach_in_list(ir_instruction, node, list) {
+ node->accept(this);
+ }
+ block_record ret = this->block;
+ this->block = saved_block;
+ return ret;
+ }
+
+ virtual void visit(ir_if *ir)
+ {
+ if(this->loop.nesting_depth == 0 && ir->get_next()->is_tail_sentinel())
+ this->loop.in_if_at_the_end_of_the_loop = true;
+
+ ++this->function.nesting_depth;
+ ++this->loop.nesting_depth;
+
+ block_record block_records[2];
+ ir_jump* jumps[2];
+
+ /* Recursively lower nested jumps. This satisfies the
+ * CONTAINED_JUMPS_LOWERED postcondition, except in the case of
+ * unconditional jumps at the end of ir->then_instructions and
+ * ir->else_instructions, which are handled below.
+ */
+ block_records[0] = visit_block(&ir->then_instructions);
+ block_records[1] = visit_block(&ir->else_instructions);
+
+retry: /* we get here if we put code after the if inside a branch */
+
+ /* Determine which of ir->then_instructions and
+ * ir->else_instructions end with an unconditional jump.
+ */
+ for(unsigned i = 0; i < 2; ++i) {
+ exec_list& list = i ? ir->else_instructions : ir->then_instructions;
+ jumps[i] = 0;
+ if(!list.is_empty() && get_jump_strength((ir_instruction*)list.get_tail()))
+ jumps[i] = (ir_jump*)list.get_tail();
+ }
+
+ /* Loop until we have satisfied the CONTAINED_JUMPS_LOWERED
+ * postcondition by lowering jumps in both then_instructions and
+ * else_instructions.
+ */
+ for(;;) {
+ /* Determine the types of the jumps that terminate
+ * ir->then_instructions and ir->else_instructions.
+ */
+ jump_strength jump_strengths[2];
+
+ for(unsigned i = 0; i < 2; ++i) {
+ if(jumps[i]) {
+ jump_strengths[i] = block_records[i].min_strength;
+ assert(jump_strengths[i] == get_jump_strength(jumps[i]));
+ } else
+ jump_strengths[i] = strength_none;
+ }
+
+ /* If both code paths end in a jump, and the jumps are the
+ * same, and we are pulling out jumps, replace them with a
+ * single jump that comes after the if instruction. The new
+ * jump will be visited next, and it will be lowered if
+ * necessary by the loop or conditional that encloses it.
+ */
+ if(pull_out_jumps && jump_strengths[0] == jump_strengths[1]) {
+ bool unify = true;
+ if(jump_strengths[0] == strength_continue)
+ ir->insert_after(new(ir) ir_loop_jump(ir_loop_jump::jump_continue));
+ else if(jump_strengths[0] == strength_break)
+ ir->insert_after(new(ir) ir_loop_jump(ir_loop_jump::jump_break));
+ /* FINISHME: unify returns with identical expressions */
+ else if(jump_strengths[0] == strength_return && this->function.signature->return_type->is_void())
+ ir->insert_after(new(ir) ir_return(NULL));
+ else
+ unify = false;
+
+ if(unify) {
+ jumps[0]->remove();
+ jumps[1]->remove();
+ this->progress = true;
+
+ /* Update jumps[] to reflect the fact that the jumps
+ * are gone, and update block_records[] to reflect the
+ * fact that control can now flow to the next
+ * instruction.
+ */
+ jumps[0] = 0;
+ jumps[1] = 0;
+ block_records[0].min_strength = strength_none;
+ block_records[1].min_strength = strength_none;
+
+ /* The CONTAINED_JUMPS_LOWERED postcondition is now
+ * satisfied, so we can break out of the loop.
+ */
+ break;
+ }
+ }
+
+ /* lower a jump: if both need to lowered, start with the strongest one, so that
+ * we might later unify the lowered version with the other one
+ */
+ bool should_lower[2];
+ for(unsigned i = 0; i < 2; ++i)
+ should_lower[i] = should_lower_jump(jumps[i]);
+
+ int lower;
+ if(should_lower[1] && should_lower[0])
+ lower = jump_strengths[1] > jump_strengths[0];
+ else if(should_lower[0])
+ lower = 0;
+ else if(should_lower[1])
+ lower = 1;
+ else
+ /* Neither code path ends in a jump that needs to be
+ * lowered, so the CONTAINED_JUMPS_LOWERED postcondition
+ * is satisfied and we can break out of the loop.
+ */
+ break;
+
+ if(jump_strengths[lower] == strength_return) {
+ /* To lower a return, we create a return flag (if the
+ * function doesn't have one already) and add instructions
+ * that: 1. store the return value (if this function has a
+ * non-void return) and 2. set the return flag
+ */
+ insert_lowered_return((ir_return*)jumps[lower]);
+ if(this->loop.loop) {
+ /* If we are in a loop, replace the return instruction
+ * with a break instruction, and then loop so that the
+ * break instruction can be lowered if necessary.
+ */
+ ir_loop_jump* lowered = 0;
+ lowered = new(ir) ir_loop_jump(ir_loop_jump::jump_break);
+ /* Note: we must update block_records and jumps to
+ * reflect the fact that the control path has been
+ * altered from a return to a break.
+ */
+ block_records[lower].min_strength = strength_break;
+ jumps[lower]->replace_with(lowered);
+ jumps[lower] = lowered;
+ } else {
+ /* If we are not in a loop, we then proceed as we would
+ * for a continue statement (set the execute flag to
+ * false to prevent the rest of the function from
+ * executing).
+ */
+ goto lower_continue;
+ }
+ this->progress = true;
+ } else if(jump_strengths[lower] == strength_break) {
+ /* To lower a break, we create a break flag (if the loop
+ * doesn't have one already) and add an instruction that
+ * sets it.
+ *
+ * Then we proceed as we would for a continue statement
+ * (set the execute flag to false to prevent the rest of
+ * the loop body from executing).
+ *
+ * The visit() function for the loop will ensure that the
+ * break flag is checked after executing the loop body.
+ */
+ jumps[lower]->insert_before(create_lowered_break());
+ goto lower_continue;
+ } else if(jump_strengths[lower] == strength_continue) {
+lower_continue:
+ /* To lower a continue, we create an execute flag (if the
+ * loop doesn't have one already) and replace the continue
+ * with an instruction that clears it.
+ *
+ * Note that this code path gets exercised when lowering
+ * return statements that are not inside a loop, so
+ * this->loop must be initialized even outside of loops.
+ */
+ ir_variable* execute_flag = this->loop.get_execute_flag();
+ jumps[lower]->replace_with(new(ir) ir_assignment(new (ir) ir_dereference_variable(execute_flag), new (ir) ir_constant(false)));
+ /* Note: we must update block_records and jumps to reflect
+ * the fact that the control path has been altered to an
+ * instruction that clears the execute flag.
+ */
+ jumps[lower] = 0;
+ block_records[lower].min_strength = strength_always_clears_execute_flag;
+ block_records[lower].may_clear_execute_flag = true;
+ this->progress = true;
+
+ /* Let the loop run again, in case the other branch of the
+ * if needs to be lowered too.
+ */
+ }
+ }
+
+ /* move out a jump out if possible */
+ if(pull_out_jumps) {
+ /* If one of the branches ends in a jump, and control cannot
+ * fall out the bottom of the other branch, then we can move
+ * the jump after the if.
+ *
+ * Set move_out to the branch we are moving a jump out of.
+ */
+ int move_out = -1;
+ if(jumps[0] && block_records[1].min_strength >= strength_continue)
+ move_out = 0;
+ else if(jumps[1] && block_records[0].min_strength >= strength_continue)
+ move_out = 1;
+
+ if(move_out >= 0)
+ {
+ jumps[move_out]->remove();
+ ir->insert_after(jumps[move_out]);
+ /* Note: we must update block_records and jumps to reflect
+ * the fact that the jump has been moved out of the if.
+ */
+ jumps[move_out] = 0;
+ block_records[move_out].min_strength = strength_none;
+ this->progress = true;
+ }
+ }
+
+ /* Now satisfy the ANALYSIS postcondition by setting
+ * this->block.min_strength and
+ * this->block.may_clear_execute_flag based on the
+ * characteristics of the two branches.
+ */
+ if(block_records[0].min_strength < block_records[1].min_strength)
+ this->block.min_strength = block_records[0].min_strength;
+ else
+ this->block.min_strength = block_records[1].min_strength;
+ this->block.may_clear_execute_flag = this->block.may_clear_execute_flag || block_records[0].may_clear_execute_flag || block_records[1].may_clear_execute_flag;
+
+ /* Now we need to clean up the instructions that follow the
+ * if.
+ *
+ * If those instructions are unreachable, then satisfy the
+ * DEAD_CODE_ELIMINATION postcondition by eliminating them.
+ * Otherwise that postcondition is already satisfied.
+ */
+ if(this->block.min_strength)
+ truncate_after_instruction(ir);
+ else if(this->block.may_clear_execute_flag)
+ {
+ /* If the "if" instruction might clear the execute flag, then
+ * we need to guard any instructions that follow so that they
+ * are only executed if the execute flag is set.
+ *
+ * If one of the branches of the "if" always clears the
+ * execute flag, and the other branch never clears it, then
+ * this is easy: just move all the instructions following the
+ * "if" into the branch that never clears it.
+ */
+ int move_into = -1;
+ if(block_records[0].min_strength && !block_records[1].may_clear_execute_flag)
+ move_into = 1;
+ else if(block_records[1].min_strength && !block_records[0].may_clear_execute_flag)
+ move_into = 0;
+
+ if(move_into >= 0) {
+ assert(!block_records[move_into].min_strength && !block_records[move_into].may_clear_execute_flag); /* otherwise, we just truncated */
+
+ exec_list* list = move_into ? &ir->else_instructions : &ir->then_instructions;
+ exec_node* next = ir->get_next();
+ if(!next->is_tail_sentinel()) {
+ move_outer_block_inside(ir, list);
+
+ /* If any instructions moved, then we need to visit
+ * them (since they are now inside the "if"). Since
+ * block_records[move_into] is in its default state
+ * (see assertion above), we can safely replace
+ * block_records[move_into] with the result of this
+ * analysis.
+ */
+ exec_list list;
+ list.head_sentinel.next = next;
+ block_records[move_into] = visit_block(&list);
+
+ /*
+ * Then we need to re-start our jump lowering, since one
+ * of the instructions we moved might be a jump that
+ * needs to be lowered.
+ */
+ this->progress = true;
+ goto retry;
+ }
+ } else {
+ /* If we get here, then the simple case didn't apply; we
+ * need to actually guard the instructions that follow.
+ *
+ * To avoid creating unnecessarily-deep nesting, first
+ * look through the instructions that follow and unwrap
+ * any instructions that that are already wrapped in the
+ * appropriate guard.
+ */
+ ir_instruction* ir_after;
+ for(ir_after = (ir_instruction*)ir->get_next(); !ir_after->is_tail_sentinel();)
+ {
+ ir_if* ir_if = ir_after->as_if();
+ if(ir_if && ir_if->else_instructions.is_empty()) {
+ ir_dereference_variable* ir_if_cond_deref = ir_if->condition->as_dereference_variable();
+ if(ir_if_cond_deref && ir_if_cond_deref->var == this->loop.execute_flag) {
+ ir_instruction* ir_next = (ir_instruction*)ir_after->get_next();
+ ir_after->insert_before(&ir_if->then_instructions);
+ ir_after->remove();
+ ir_after = ir_next;
+ continue;
+ }
+ }
+ ir_after = (ir_instruction*)ir_after->get_next();
+
+ /* only set this if we find any unprotected instruction */
+ this->progress = true;
+ }
+
+ /* Then, wrap all the instructions that follow in a single
+ * guard.
+ */
+ if(!ir->get_next()->is_tail_sentinel()) {
+ assert(this->loop.execute_flag);
+ ir_if* if_execute = new(ir) ir_if(new(ir) ir_dereference_variable(this->loop.execute_flag));
+ move_outer_block_inside(ir, &if_execute->then_instructions);
+ ir->insert_after(if_execute);
+ }
+ }
+ }
+ --this->loop.nesting_depth;
+ --this->function.nesting_depth;
+ }
+
+ virtual void visit(ir_loop *ir)
+ {
+ /* Visit the body of the loop, with a fresh data structure in
+ * this->loop so that the analysis we do here won't bleed into
+ * enclosing loops.
+ *
+ * We assume that all code after a loop is reachable from the
+ * loop (see comments on enum jump_strength), so the
+ * DEAD_CODE_ELIMINATION postcondition is automatically
+ * satisfied, as is the block.min_strength portion of the
+ * ANALYSIS postcondition.
+ *
+ * The block.may_clear_execute_flag portion of the ANALYSIS
+ * postcondition is automatically satisfied because execute
+ * flags do not propagate outside of loops.
+ *
+ * The loop.may_set_return_flag portion of the ANALYSIS
+ * postcondition is handled below.
+ */
+ ++this->function.nesting_depth;
+ loop_record saved_loop = this->loop;
+ this->loop = loop_record(this->function.signature, ir);
+
+ /* Recursively lower nested jumps. This satisfies the
+ * CONTAINED_JUMPS_LOWERED postcondition, except in the case of
+ * an unconditional continue or return at the bottom of the
+ * loop, which are handled below.
+ */
+ block_record body = visit_block(&ir->body_instructions);
+
+ /* If the loop ends in an unconditional continue, eliminate it
+ * because it is redundant.
+ */
+ ir_instruction *ir_last
+ = (ir_instruction *) ir->body_instructions.get_tail();
+ if (get_jump_strength(ir_last) == strength_continue) {
+ ir_last->remove();
+ }
+
+ /* If the loop ends in an unconditional return, and we are
+ * lowering returns, lower it.
+ */
+ if (this->function.lower_return)
+ lower_return_unconditionally(ir_last);
+
+ if(body.min_strength >= strength_break) {
+ /* FINISHME: If the min_strength of the loop body is
+ * strength_break or strength_return, that means that it
+ * isn't a loop at all, since control flow always leaves the
+ * body of the loop via break or return. In principle the
+ * loop could be eliminated in this case. This optimization
+ * is not implemented yet.
+ */
+ }
+
+ if(this->loop.break_flag) {
+ /* We only get here if we are lowering breaks */
+ assert (lower_break);
+
+ /* If a break flag was generated while visiting the body of
+ * the loop, then at least one break was lowered, so we need
+ * to generate an if statement at the end of the loop that
+ * does a "break" if the break flag is set. The break we
+ * generate won't violate the CONTAINED_JUMPS_LOWERED
+ * postcondition, because should_lower_jump() always returns
+ * false for a break that happens at the end of a loop.
+ *
+ * However, if the loop already ends in a conditional or
+ * unconditional break, then we need to lower that break,
+ * because it won't be at the end of the loop anymore.
+ */
+ lower_final_breaks(&ir->body_instructions);
+
+ ir_if* break_if = new(ir) ir_if(new(ir) ir_dereference_variable(this->loop.break_flag));
+ break_if->then_instructions.push_tail(new(ir) ir_loop_jump(ir_loop_jump::jump_break));
+ ir->body_instructions.push_tail(break_if);
+ }
+
+ /* If the body of the loop may set the return flag, then at
+ * least one return was lowered to a break, so we need to ensure
+ * that the return flag is checked after the body of the loop is
+ * executed.
+ */
+ if(this->loop.may_set_return_flag) {
+ assert(this->function.return_flag);
+ /* Generate the if statement to check the return flag */
+ ir_if* return_if = new(ir) ir_if(new(ir) ir_dereference_variable(this->function.return_flag));
+ /* Note: we also need to propagate the knowledge that the
+ * return flag may get set to the outer context. This
+ * satisfies the loop.may_set_return_flag part of the
+ * ANALYSIS postcondition.
+ */
+ saved_loop.may_set_return_flag = true;
+ if(saved_loop.loop)
+ /* If this loop is nested inside another one, then the if
+ * statement that we generated should break out of that
+ * loop if the return flag is set. Caller will lower that
+ * break statement if necessary.
+ */
+ return_if->then_instructions.push_tail(new(ir) ir_loop_jump(ir_loop_jump::jump_break));
+ else {
+ /* Otherwise, ensure that the instructions that follow are only
+ * executed if the return flag is clear. We can do that by moving
+ * those instructions into the else clause of the generated if
+ * statement.
+ */
+ move_outer_block_inside(ir, &return_if->else_instructions);
+
+ /* In case the loop is embedded inside an if add a new return to
+ * the return flag then branch and let a future pass tidy it up.
+ */
+ if (this->function.signature->return_type->is_void())
+ return_if->then_instructions.push_tail(new(ir) ir_return(NULL));
+ else {
+ assert(this->function.return_value);
+ ir_variable* return_value = this->function.return_value;
+ return_if->then_instructions.push_tail(
+ new(ir) ir_return(new(ir) ir_dereference_variable(return_value)));
+ }
+ }
+
+ ir->insert_after(return_if);
+ }
+
+ this->loop = saved_loop;
+ --this->function.nesting_depth;
+ }
+
+ virtual void visit(ir_function_signature *ir)
+ {
+ /* these are not strictly necessary */
+ assert(!this->function.signature);
+ assert(!this->loop.loop);
+
+ bool lower_return;
+ if (strcmp(ir->function_name(), "main") == 0)
+ lower_return = lower_main_return;
+ else
+ lower_return = lower_sub_return;
+
+ function_record saved_function = this->function;
+ loop_record saved_loop = this->loop;
+ this->function = function_record(ir, lower_return);
+ this->loop = loop_record(ir);
+
+ assert(!this->loop.loop);
+
+ /* Visit the body of the function to lower any jumps that occur
+ * in it, except possibly an unconditional return statement at
+ * the end of it.
+ */
+ visit_block(&ir->body);
+
+ /* If the body ended in an unconditional return of non-void,
+ * then we don't need to lower it because it's the one canonical
+ * return.
+ *
+ * If the body ended in a return of void, eliminate it because
+ * it is redundant.
+ */
+ if (ir->return_type->is_void() &&
+ get_jump_strength((ir_instruction *) ir->body.get_tail())) {
+ ir_jump *jump = (ir_jump *) ir->body.get_tail();
+ assert (jump->ir_type == ir_type_return);
+ jump->remove();
+ }
+
+ if(this->function.return_value)
+ ir->body.push_tail(new(ir) ir_return(new (ir) ir_dereference_variable(this->function.return_value)));
+
+ this->loop = saved_loop;
+ this->function = saved_function;
+ }
+
+ virtual void visit(class ir_function * ir)
+ {
+ visit_block(&ir->signatures);
+ }
+};
+
+} /* anonymous namespace */
+
+bool
+do_lower_jumps(exec_list *instructions, bool pull_out_jumps, bool lower_sub_return, bool lower_main_return, bool lower_continue, bool lower_break)
+{
+ ir_lower_jumps_visitor v;
+ v.pull_out_jumps = pull_out_jumps;
+ v.lower_continue = lower_continue;
+ v.lower_break = lower_break;
+ v.lower_sub_return = lower_sub_return;
+ v.lower_main_return = lower_main_return;
+
+ bool progress_ever = false;
+ do {
+ v.progress = false;
+ visit_exec_list(instructions, &v);
+ progress_ever = v.progress || progress_ever;
+ } while (v.progress);
+
+ return progress_ever;
+}
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/lower_mat_op_to_vec.cpp b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/lower_mat_op_to_vec.cpp
new file mode 100644
index 0000000000..13d3ccbadd
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/lower_mat_op_to_vec.cpp
@@ -0,0 +1,441 @@
+/*
+ * Copyright © 2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+/**
+ * \file lower_mat_op_to_vec.cpp
+ *
+ * Breaks matrix operation expressions down to a series of vector operations.
+ *
+ * Generally this is how we have to codegen matrix operations for a
+ * GPU, so this gives us the chance to constant fold operations on a
+ * column or row.
+ */
+
+#include "ir.h"
+#include "ir_expression_flattening.h"
+#include "compiler/glsl_types.h"
+
+namespace {
+
+class ir_mat_op_to_vec_visitor : public ir_hierarchical_visitor {
+public:
+ ir_mat_op_to_vec_visitor()
+ {
+ this->made_progress = false;
+ this->mem_ctx = NULL;
+ }
+
+ ir_visitor_status visit_leave(ir_assignment *);
+
+ ir_dereference *get_column(ir_dereference *val, int col);
+ ir_rvalue *get_element(ir_dereference *val, int col, int row);
+
+ void do_mul_mat_mat(ir_dereference *result,
+ ir_dereference *a, ir_dereference *b);
+ void do_mul_mat_vec(ir_dereference *result,
+ ir_dereference *a, ir_dereference *b);
+ void do_mul_vec_mat(ir_dereference *result,
+ ir_dereference *a, ir_dereference *b);
+ void do_mul_mat_scalar(ir_dereference *result,
+ ir_dereference *a, ir_dereference *b);
+ void do_equal_mat_mat(ir_dereference *result, ir_dereference *a,
+ ir_dereference *b, bool test_equal);
+
+ void *mem_ctx;
+ bool made_progress;
+};
+
+} /* anonymous namespace */
+
+static bool
+mat_op_to_vec_predicate(ir_instruction *ir)
+{
+ ir_expression *expr = ir->as_expression();
+ unsigned int i;
+
+ if (!expr)
+ return false;
+
+ for (i = 0; i < expr->num_operands; i++) {
+ if (expr->operands[i]->type->is_matrix())
+ return true;
+ }
+
+ return false;
+}
+
+bool
+do_mat_op_to_vec(exec_list *instructions)
+{
+ ir_mat_op_to_vec_visitor v;
+
+ /* Pull out any matrix expression to a separate assignment to a
+ * temp. This will make our handling of the breakdown to
+ * operations on the matrix's vector components much easier.
+ */
+ do_expression_flattening(instructions, mat_op_to_vec_predicate);
+
+ visit_list_elements(&v, instructions);
+
+ return v.made_progress;
+}
+
+ir_rvalue *
+ir_mat_op_to_vec_visitor::get_element(ir_dereference *val, int col, int row)
+{
+ val = get_column(val, col);
+
+ return new(mem_ctx) ir_swizzle(val, row, 0, 0, 0, 1);
+}
+
+ir_dereference *
+ir_mat_op_to_vec_visitor::get_column(ir_dereference *val, int row)
+{
+ val = val->clone(mem_ctx, NULL);
+
+ if (val->type->is_matrix()) {
+ val = new(mem_ctx) ir_dereference_array(val,
+ new(mem_ctx) ir_constant(row));
+ }
+
+ return val;
+}
+
+void
+ir_mat_op_to_vec_visitor::do_mul_mat_mat(ir_dereference *result,
+ ir_dereference *a,
+ ir_dereference *b)
+{
+ unsigned b_col, i;
+ ir_assignment *assign;
+ ir_expression *expr;
+
+ for (b_col = 0; b_col < b->type->matrix_columns; b_col++) {
+ /* first column */
+ expr = new(mem_ctx) ir_expression(ir_binop_mul,
+ get_column(a, 0),
+ get_element(b, b_col, 0));
+
+ /* following columns */
+ for (i = 1; i < a->type->matrix_columns; i++) {
+ ir_expression *mul_expr;
+
+ mul_expr = new(mem_ctx) ir_expression(ir_binop_mul,
+ get_column(a, i),
+ get_element(b, b_col, i));
+ expr = new(mem_ctx) ir_expression(ir_binop_add,
+ expr,
+ mul_expr);
+ }
+
+ assign = new(mem_ctx) ir_assignment(get_column(result, b_col), expr);
+ base_ir->insert_before(assign);
+ }
+}
+
+void
+ir_mat_op_to_vec_visitor::do_mul_mat_vec(ir_dereference *result,
+ ir_dereference *a,
+ ir_dereference *b)
+{
+ unsigned i;
+ ir_assignment *assign;
+ ir_expression *expr;
+
+ /* first column */
+ expr = new(mem_ctx) ir_expression(ir_binop_mul,
+ get_column(a, 0),
+ get_element(b, 0, 0));
+
+ /* following columns */
+ for (i = 1; i < a->type->matrix_columns; i++) {
+ ir_expression *mul_expr;
+
+ mul_expr = new(mem_ctx) ir_expression(ir_binop_mul,
+ get_column(a, i),
+ get_element(b, 0, i));
+ expr = new(mem_ctx) ir_expression(ir_binop_add, expr, mul_expr);
+ }
+
+ result = result->clone(mem_ctx, NULL);
+ assign = new(mem_ctx) ir_assignment(result, expr);
+ base_ir->insert_before(assign);
+}
+
+void
+ir_mat_op_to_vec_visitor::do_mul_vec_mat(ir_dereference *result,
+ ir_dereference *a,
+ ir_dereference *b)
+{
+ unsigned i;
+
+ for (i = 0; i < b->type->matrix_columns; i++) {
+ ir_rvalue *column_result;
+ ir_expression *column_expr;
+ ir_assignment *column_assign;
+
+ column_result = result->clone(mem_ctx, NULL);
+ column_result = new(mem_ctx) ir_swizzle(column_result, i, 0, 0, 0, 1);
+
+ column_expr = new(mem_ctx) ir_expression(ir_binop_dot,
+ a->clone(mem_ctx, NULL),
+ get_column(b, i));
+
+ column_assign = new(mem_ctx) ir_assignment(column_result,
+ column_expr);
+ base_ir->insert_before(column_assign);
+ }
+}
+
+void
+ir_mat_op_to_vec_visitor::do_mul_mat_scalar(ir_dereference *result,
+ ir_dereference *a,
+ ir_dereference *b)
+{
+ unsigned i;
+
+ for (i = 0; i < a->type->matrix_columns; i++) {
+ ir_expression *column_expr;
+ ir_assignment *column_assign;
+
+ column_expr = new(mem_ctx) ir_expression(ir_binop_mul,
+ get_column(a, i),
+ b->clone(mem_ctx, NULL));
+
+ column_assign = new(mem_ctx) ir_assignment(get_column(result, i),
+ column_expr);
+ base_ir->insert_before(column_assign);
+ }
+}
+
+void
+ir_mat_op_to_vec_visitor::do_equal_mat_mat(ir_dereference *result,
+ ir_dereference *a,
+ ir_dereference *b,
+ bool test_equal)
+{
+ /* This essentially implements the following GLSL:
+ *
+ * bool equal(mat4 a, mat4 b)
+ * {
+ * return !any(bvec4(a[0] != b[0],
+ * a[1] != b[1],
+ * a[2] != b[2],
+ * a[3] != b[3]);
+ * }
+ *
+ * bool nequal(mat4 a, mat4 b)
+ * {
+ * return any(bvec4(a[0] != b[0],
+ * a[1] != b[1],
+ * a[2] != b[2],
+ * a[3] != b[3]);
+ * }
+ */
+ const unsigned columns = a->type->matrix_columns;
+ const glsl_type *const bvec_type =
+ glsl_type::get_instance(GLSL_TYPE_BOOL, columns, 1);
+
+ ir_variable *const tmp_bvec =
+ new(this->mem_ctx) ir_variable(bvec_type, "mat_cmp_bvec",
+ ir_var_temporary);
+ this->base_ir->insert_before(tmp_bvec);
+
+ for (unsigned i = 0; i < columns; i++) {
+ ir_expression *const cmp =
+ new(this->mem_ctx) ir_expression(ir_binop_any_nequal,
+ get_column(a, i),
+ get_column(b, i));
+
+ ir_dereference *const lhs =
+ new(this->mem_ctx) ir_dereference_variable(tmp_bvec);
+
+ ir_assignment *const assign =
+ new(this->mem_ctx) ir_assignment(lhs, cmp, NULL, (1U << i));
+
+ this->base_ir->insert_before(assign);
+ }
+
+ ir_rvalue *const val = new(this->mem_ctx) ir_dereference_variable(tmp_bvec);
+ uint8_t vec_elems = val->type->vector_elements;
+ ir_expression *any =
+ new(this->mem_ctx) ir_expression(ir_binop_any_nequal, val,
+ new(this->mem_ctx) ir_constant(false,
+ vec_elems));
+
+ if (test_equal)
+ any = new(this->mem_ctx) ir_expression(ir_unop_logic_not, any);
+
+ ir_assignment *const assign =
+ new(mem_ctx) ir_assignment(result->clone(mem_ctx, NULL), any);
+ base_ir->insert_before(assign);
+}
+
+static bool
+has_matrix_operand(const ir_expression *expr, unsigned &columns)
+{
+ for (unsigned i = 0; i < expr->num_operands; i++) {
+ if (expr->operands[i]->type->is_matrix()) {
+ columns = expr->operands[i]->type->matrix_columns;
+ return true;
+ }
+ }
+
+ return false;
+}
+
+
+ir_visitor_status
+ir_mat_op_to_vec_visitor::visit_leave(ir_assignment *orig_assign)
+{
+ ir_expression *orig_expr = orig_assign->rhs->as_expression();
+ unsigned int i, matrix_columns = 1;
+ ir_dereference *op[2];
+
+ if (!orig_expr)
+ return visit_continue;
+
+ if (!has_matrix_operand(orig_expr, matrix_columns))
+ return visit_continue;
+
+ assert(orig_expr->num_operands <= 2);
+
+ mem_ctx = ralloc_parent(orig_assign);
+
+ ir_dereference_variable *result =
+ orig_assign->lhs->as_dereference_variable();
+ assert(result);
+
+ /* Store the expression operands in temps so we can use them
+ * multiple times.
+ */
+ for (i = 0; i < orig_expr->num_operands; i++) {
+ ir_assignment *assign;
+ ir_dereference *deref = orig_expr->operands[i]->as_dereference();
+
+ /* Avoid making a temporary if we don't need to to avoid aliasing. */
+ if (deref &&
+ deref->variable_referenced() != result->variable_referenced()) {
+ op[i] = deref;
+ continue;
+ }
+
+ /* Otherwise, store the operand in a temporary generally if it's
+ * not a dereference.
+ */
+ ir_variable *var = new(mem_ctx) ir_variable(orig_expr->operands[i]->type,
+ "mat_op_to_vec",
+ ir_var_temporary);
+ base_ir->insert_before(var);
+
+ /* Note that we use this dereference for the assignment. That means
+ * that others that want to use op[i] have to clone the deref.
+ */
+ op[i] = new(mem_ctx) ir_dereference_variable(var);
+ assign = new(mem_ctx) ir_assignment(op[i], orig_expr->operands[i]);
+ base_ir->insert_before(assign);
+ }
+
+ /* OK, time to break down this matrix operation. */
+ switch (orig_expr->operation) {
+ case ir_unop_d2f:
+ case ir_unop_f2d:
+ case ir_unop_f2f16:
+ case ir_unop_f2fmp:
+ case ir_unop_f162f:
+ case ir_unop_neg: {
+ /* Apply the operation to each column.*/
+ for (i = 0; i < matrix_columns; i++) {
+ ir_expression *column_expr;
+ ir_assignment *column_assign;
+
+ column_expr = new(mem_ctx) ir_expression(orig_expr->operation,
+ get_column(op[0], i));
+
+ column_assign = new(mem_ctx) ir_assignment(get_column(result, i),
+ column_expr);
+ assert(column_assign->write_mask != 0);
+ base_ir->insert_before(column_assign);
+ }
+ break;
+ }
+ case ir_binop_add:
+ case ir_binop_sub:
+ case ir_binop_div:
+ case ir_binop_mod: {
+ /* For most operations, the matrix version is just going
+ * column-wise through and applying the operation to each column
+ * if available.
+ */
+ for (i = 0; i < matrix_columns; i++) {
+ ir_expression *column_expr;
+ ir_assignment *column_assign;
+
+ column_expr = new(mem_ctx) ir_expression(orig_expr->operation,
+ get_column(op[0], i),
+ get_column(op[1], i));
+
+ column_assign = new(mem_ctx) ir_assignment(get_column(result, i),
+ column_expr);
+ assert(column_assign->write_mask != 0);
+ base_ir->insert_before(column_assign);
+ }
+ break;
+ }
+ case ir_binop_mul:
+ if (op[0]->type->is_matrix()) {
+ if (op[1]->type->is_matrix()) {
+ do_mul_mat_mat(result, op[0], op[1]);
+ } else if (op[1]->type->is_vector()) {
+ do_mul_mat_vec(result, op[0], op[1]);
+ } else {
+ assert(op[1]->type->is_scalar());
+ do_mul_mat_scalar(result, op[0], op[1]);
+ }
+ } else {
+ assert(op[1]->type->is_matrix());
+ if (op[0]->type->is_vector()) {
+ do_mul_vec_mat(result, op[0], op[1]);
+ } else {
+ assert(op[0]->type->is_scalar());
+ do_mul_mat_scalar(result, op[1], op[0]);
+ }
+ }
+ break;
+
+ case ir_binop_all_equal:
+ case ir_binop_any_nequal:
+ do_equal_mat_mat(result, op[1], op[0],
+ (orig_expr->operation == ir_binop_all_equal));
+ break;
+
+ default:
+ printf("FINISHME: Handle matrix operation for %s\n",
+ ir_expression_operation_strings[orig_expr->operation]);
+ abort();
+ }
+ orig_assign->remove();
+ this->made_progress = true;
+
+ return visit_continue;
+}
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/lower_named_interface_blocks.cpp b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/lower_named_interface_blocks.cpp
new file mode 100644
index 0000000000..01c50932a9
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/lower_named_interface_blocks.cpp
@@ -0,0 +1,318 @@
+/*
+ * Copyright (c) 2013 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+/**
+ * \file lower_named_interface_blocks.cpp
+ *
+ * This lowering pass converts all interface blocks with instance names
+ * into interface blocks without an instance name.
+ *
+ * For example, the following shader:
+ *
+ * out block {
+ * float block_var;
+ * } inst_name;
+ *
+ * main()
+ * {
+ * inst_name.block_var = 0.0;
+ * }
+ *
+ * Is rewritten to:
+ *
+ * out block {
+ * float block_var;
+ * };
+ *
+ * main()
+ * {
+ * block_var = 0.0;
+ * }
+ *
+ * This takes place after the shader code has already been verified with
+ * the interface name in place.
+ *
+ * The linking phase will use the interface block name rather than the
+ * interface's instance name when linking interfaces.
+ *
+ * This modification to the ir allows our currently existing dead code
+ * elimination to work with interface blocks without changes.
+ */
+
+#include "glsl_symbol_table.h"
+#include "ir.h"
+#include "ir_optimization.h"
+#include "ir_rvalue_visitor.h"
+#include "util/hash_table.h"
+#include "main/mtypes.h"
+
+static const glsl_type *
+process_array_type(const glsl_type *type, unsigned idx)
+{
+ const glsl_type *element_type = type->fields.array;
+ if (element_type->is_array()) {
+ const glsl_type *new_array_type = process_array_type(element_type, idx);
+ return glsl_type::get_array_instance(new_array_type, type->length);
+ } else {
+ return glsl_type::get_array_instance(
+ element_type->fields.structure[idx].type, type->length);
+ }
+}
+
+static ir_rvalue *
+process_array_ir(void * const mem_ctx,
+ ir_dereference_array *deref_array_prev,
+ ir_rvalue *deref_var)
+{
+ ir_dereference_array *deref_array =
+ deref_array_prev->array->as_dereference_array();
+
+ if (deref_array == NULL) {
+ return new(mem_ctx) ir_dereference_array(deref_var,
+ deref_array_prev->array_index);
+ } else {
+ deref_array = (ir_dereference_array *) process_array_ir(mem_ctx,
+ deref_array,
+ deref_var);
+ return new(mem_ctx) ir_dereference_array(deref_array,
+ deref_array_prev->array_index);
+ }
+}
+
+namespace {
+
+class flatten_named_interface_blocks_declarations : public ir_rvalue_visitor
+{
+public:
+ void * const mem_ctx;
+ hash_table *interface_namespace;
+
+ flatten_named_interface_blocks_declarations(void *mem_ctx)
+ : mem_ctx(mem_ctx),
+ interface_namespace(NULL)
+ {
+ }
+
+ void run(exec_list *instructions);
+
+ virtual ir_visitor_status visit_leave(ir_assignment *);
+ virtual ir_visitor_status visit_leave(ir_expression *);
+ virtual void handle_rvalue(ir_rvalue **rvalue);
+};
+
+} /* anonymous namespace */
+
+void
+flatten_named_interface_blocks_declarations::run(exec_list *instructions)
+{
+ interface_namespace = _mesa_hash_table_create(NULL, _mesa_hash_string,
+ _mesa_key_string_equal);
+
+ /* First pass: adjust instance block variables with an instance name
+ * to not have an instance name.
+ *
+ * The interface block variables are stored in the interface_namespace
+ * hash table so they can be used in the second pass.
+ */
+ foreach_in_list_safe(ir_instruction, node, instructions) {
+ ir_variable *var = node->as_variable();
+ if (!var || !var->is_interface_instance())
+ continue;
+
+ /* It should be possible to handle uniforms during this pass,
+ * but, this will require changes to the other uniform block
+ * support code.
+ */
+ if (var->data.mode == ir_var_uniform ||
+ var->data.mode == ir_var_shader_storage)
+ continue;
+
+ const glsl_type * iface_t = var->type->without_array();
+ exec_node *insert_pos = var;
+
+ assert (iface_t->is_interface());
+
+ for (unsigned i = 0; i < iface_t->length; i++) {
+ const char * field_name = iface_t->fields.structure[i].name;
+ char *iface_field_name =
+ ralloc_asprintf(mem_ctx, "%s %s.%s.%s",
+ var->data.mode == ir_var_shader_in ? "in" : "out",
+ iface_t->name, var->name, field_name);
+
+ hash_entry *entry = _mesa_hash_table_search(interface_namespace,
+ iface_field_name);
+ ir_variable *found_var = entry ? (ir_variable *) entry->data : NULL;
+ if (!found_var) {
+ ir_variable *new_var;
+ char *var_name =
+ ralloc_strdup(mem_ctx, iface_t->fields.structure[i].name);
+ if (!var->type->is_array()) {
+ new_var =
+ new(mem_ctx) ir_variable(iface_t->fields.structure[i].type,
+ var_name,
+ (ir_variable_mode) var->data.mode);
+ } else {
+ const glsl_type *new_array_type =
+ process_array_type(var->type, i);
+ new_var =
+ new(mem_ctx) ir_variable(new_array_type,
+ var_name,
+ (ir_variable_mode) var->data.mode);
+ }
+ new_var->data.location = iface_t->fields.structure[i].location;
+ new_var->data.explicit_location = (new_var->data.location >= 0);
+ new_var->data.offset = iface_t->fields.structure[i].offset;
+ new_var->data.explicit_xfb_offset =
+ (iface_t->fields.structure[i].offset >= 0);
+ new_var->data.xfb_buffer =
+ iface_t->fields.structure[i].xfb_buffer;
+ new_var->data.explicit_xfb_buffer =
+ iface_t->fields.structure[i].explicit_xfb_buffer;
+ new_var->data.interpolation =
+ iface_t->fields.structure[i].interpolation;
+ new_var->data.centroid = iface_t->fields.structure[i].centroid;
+ new_var->data.sample = iface_t->fields.structure[i].sample;
+ new_var->data.patch = iface_t->fields.structure[i].patch;
+ new_var->data.stream = var->data.stream;
+ new_var->data.how_declared = var->data.how_declared;
+ new_var->data.from_named_ifc_block = 1;
+
+ new_var->init_interface_type(var->type);
+ _mesa_hash_table_insert(interface_namespace, iface_field_name,
+ new_var);
+ insert_pos->insert_after(new_var);
+ insert_pos = new_var;
+ }
+ }
+ var->remove();
+ }
+
+ /* Second pass: visit all ir_dereference_record instances, and if they
+ * reference an interface block, then flatten the refererence out.
+ */
+ visit_list_elements(this, instructions);
+ _mesa_hash_table_destroy(interface_namespace, NULL);
+ interface_namespace = NULL;
+}
+
+ir_visitor_status
+flatten_named_interface_blocks_declarations::visit_leave(ir_assignment *ir)
+{
+ ir_dereference_record *lhs_rec = ir->lhs->as_dereference_record();
+
+ ir_variable *lhs_var = ir->lhs->variable_referenced();
+ if (lhs_var && lhs_var->get_interface_type()) {
+ lhs_var->data.assigned = 1;
+ }
+
+ if (lhs_rec) {
+ ir_rvalue *lhs_rec_tmp = lhs_rec;
+ handle_rvalue(&lhs_rec_tmp);
+ if (lhs_rec_tmp != lhs_rec) {
+ ir->set_lhs(lhs_rec_tmp);
+ }
+
+ ir_variable *lhs_var = lhs_rec_tmp->variable_referenced();
+ if (lhs_var) {
+ lhs_var->data.assigned = 1;
+ }
+ }
+ return rvalue_visit(ir);
+}
+
+ir_visitor_status
+flatten_named_interface_blocks_declarations::visit_leave(ir_expression *ir)
+{
+ ir_visitor_status status = rvalue_visit(ir);
+
+ if (ir->operation == ir_unop_interpolate_at_centroid ||
+ ir->operation == ir_binop_interpolate_at_offset ||
+ ir->operation == ir_binop_interpolate_at_sample) {
+ const ir_rvalue *val = ir->operands[0];
+
+ /* This disables varying packing for this input. */
+ val->variable_referenced()->data.must_be_shader_input = 1;
+ }
+
+ return status;
+}
+
+void
+flatten_named_interface_blocks_declarations::handle_rvalue(ir_rvalue **rvalue)
+{
+ if (*rvalue == NULL)
+ return;
+
+ ir_dereference_record *ir = (*rvalue)->as_dereference_record();
+ if (ir == NULL)
+ return;
+
+ ir_variable *var = ir->variable_referenced();
+ if (var == NULL)
+ return;
+
+ if (!var->is_interface_instance())
+ return;
+
+ /* It should be possible to handle uniforms during this pass,
+ * but, this will require changes to the other uniform block
+ * support code.
+ */
+ if (var->data.mode == ir_var_uniform || var->data.mode == ir_var_shader_storage)
+ return;
+
+ if (var->get_interface_type() != NULL) {
+ char *iface_field_name =
+ ralloc_asprintf(mem_ctx, "%s %s.%s.%s",
+ var->data.mode == ir_var_shader_in ? "in" : "out",
+ var->get_interface_type()->name,
+ var->name,
+ ir->record->type->fields.structure[ir->field_idx].name);
+
+ /* Find the variable in the set of flattened interface blocks */
+ hash_entry *entry = _mesa_hash_table_search(interface_namespace,
+ iface_field_name);
+ assert(entry);
+ ir_variable *found_var = (ir_variable *) entry->data;
+
+ ir_dereference_variable *deref_var =
+ new(mem_ctx) ir_dereference_variable(found_var);
+
+ ir_dereference_array *deref_array =
+ ir->record->as_dereference_array();
+ if (deref_array != NULL) {
+ *rvalue = process_array_ir(mem_ctx, deref_array,
+ (ir_rvalue *)deref_var);
+ } else {
+ *rvalue = deref_var;
+ }
+ }
+}
+
+void
+lower_named_interface_blocks(void *mem_ctx, gl_linked_shader *shader)
+{
+ flatten_named_interface_blocks_declarations v_decl(mem_ctx);
+ v_decl.run(shader->ir);
+}
+
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/lower_offset_array.cpp b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/lower_offset_array.cpp
new file mode 100644
index 0000000000..96486c3a71
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/lower_offset_array.cpp
@@ -0,0 +1,91 @@
+/*
+ * Copyright © 2013 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+/**
+ * \file lower_offset_array.cpp
+ *
+ * IR lower pass to decompose ir_texture ir_tg4 with an array of offsets
+ * into four ir_tg4s with a single ivec2 offset, select the .w component of each,
+ * and return those four values packed into a gvec4.
+ *
+ * \author Chris Forbes <chrisf@ijw.co.nz>
+ */
+
+#include "compiler/glsl_types.h"
+#include "ir.h"
+#include "ir_builder.h"
+#include "ir_optimization.h"
+#include "ir_rvalue_visitor.h"
+
+using namespace ir_builder;
+
+class lower_offset_array_visitor : public ir_rvalue_visitor {
+public:
+ lower_offset_array_visitor()
+ {
+ progress = false;
+ }
+
+ void handle_rvalue(ir_rvalue **rv);
+
+ bool progress;
+};
+
+void
+lower_offset_array_visitor::handle_rvalue(ir_rvalue **rv)
+{
+ if (*rv == NULL || (*rv)->ir_type != ir_type_texture)
+ return;
+
+ ir_texture *ir = (ir_texture *) *rv;
+ if (ir->op != ir_tg4 || !ir->offset || !ir->offset->type->is_array())
+ return;
+
+ void *mem_ctx = ralloc_parent(ir);
+
+ ir_variable *var =
+ new (mem_ctx) ir_variable(ir->type, "result", ir_var_temporary);
+ base_ir->insert_before(var);
+
+ for (int i = 0; i < 4; i++) {
+ ir_texture *tex = ir->clone(mem_ctx, NULL);
+ tex->offset = new (mem_ctx) ir_dereference_array(tex->offset,
+ new (mem_ctx) ir_constant(i));
+
+ base_ir->insert_before(assign(var, swizzle_w(tex), 1 << i));
+ }
+
+ *rv = new (mem_ctx) ir_dereference_variable(var);
+
+ progress = true;
+}
+
+bool
+lower_offset_arrays(exec_list *instructions)
+{
+ lower_offset_array_visitor v;
+
+ visit_list_elements(&v, instructions);
+
+ return v.progress;
+}
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/lower_output_reads.cpp b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/lower_output_reads.cpp
new file mode 100644
index 0000000000..7a182130b2
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/lower_output_reads.cpp
@@ -0,0 +1,182 @@
+/*
+ * Copyright © 2012 Vincent Lejeune
+ * Copyright © 2012 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include "ir.h"
+#include "util/hash_table.h"
+
+/**
+ * \file lower_output_reads.cpp
+ *
+ * In GLSL, shader output variables (such as varyings) can be both read and
+ * written. However, on some hardware, reading an output register causes
+ * trouble.
+ *
+ * This pass creates temporary shadow copies of every (used) shader output,
+ * and replaces all accesses to use those instead. It also adds code to the
+ * main() function to copy the final values to the actual shader outputs.
+ */
+
+namespace {
+
+class output_read_remover : public ir_hierarchical_visitor {
+protected:
+ /**
+ * A hash table mapping from the original ir_variable shader outputs
+ * (ir_var_shader_out mode) to the new temporaries to be used instead.
+ */
+ hash_table *replacements;
+
+ unsigned stage;
+public:
+ output_read_remover(unsigned stage);
+ ~output_read_remover();
+ virtual ir_visitor_status visit(class ir_dereference_variable *);
+ virtual ir_visitor_status visit_leave(class ir_emit_vertex *);
+ virtual ir_visitor_status visit_leave(class ir_return *);
+ virtual ir_visitor_status visit_leave(class ir_function_signature *);
+};
+
+} /* anonymous namespace */
+
+/**
+ * Hash function for the output variables - computes the hash of the name.
+ * NOTE: We're using the name string to ensure that the hash doesn't depend
+ * on any random factors, otherwise the output_read_remover could produce
+ * the random order of the assignments.
+ *
+ * NOTE: If you want to reuse this function please take into account that
+ * generally the names of the variables are non-unique.
+ */
+static unsigned
+hash_table_var_hash(const void *key)
+{
+ const ir_variable * var = static_cast<const ir_variable *>(key);
+ return _mesa_hash_string(var->name);
+}
+
+output_read_remover::output_read_remover(unsigned stage)
+{
+ this->stage = stage;
+ replacements = _mesa_hash_table_create(NULL, hash_table_var_hash,
+ _mesa_key_pointer_equal);
+}
+
+output_read_remover::~output_read_remover()
+{
+ _mesa_hash_table_destroy(replacements, NULL);
+}
+
+ir_visitor_status
+output_read_remover::visit(ir_dereference_variable *ir)
+{
+ if (ir->var->data.mode != ir_var_shader_out || ir->var->data.fb_fetch_output)
+ return visit_continue;
+
+ hash_entry *entry = _mesa_hash_table_search(replacements, ir->var);
+ ir_variable *temp = entry ? (ir_variable *) entry->data : NULL;
+
+ /* If we don't have an existing temporary, create one. */
+ if (temp == NULL) {
+ void *var_ctx = ralloc_parent(ir->var);
+ temp = new(var_ctx) ir_variable(ir->var->type, ir->var->name,
+ ir_var_temporary);
+ /* copy flags which affect arithematical precision */
+ temp->data.invariant = ir->var->data.invariant;
+ temp->data.precise = ir->var->data.precise;
+ temp->data.precision = ir->var->data.precision;
+ _mesa_hash_table_insert(replacements, ir->var, temp);
+ ir->var->insert_after(temp);
+ }
+
+ /* Update the dereference to use the temporary */
+ ir->var = temp;
+
+ return visit_continue;
+}
+
+/**
+ * Create an assignment to copy a temporary value back to the actual output.
+ */
+static ir_assignment *
+copy(void *ctx, ir_variable *output, ir_variable *temp)
+{
+ ir_dereference_variable *lhs = new(ctx) ir_dereference_variable(output);
+ ir_dereference_variable *rhs = new(ctx) ir_dereference_variable(temp);
+ return new(ctx) ir_assignment(lhs, rhs);
+}
+
+/** Insert a copy-back assignment before a "return" statement or a call to
+ * EmitVertex().
+ */
+static void
+emit_return_copy(const void *key, void *data, void *closure)
+{
+ ir_return *ir = (ir_return *) closure;
+ ir->insert_before(copy(ir, (ir_variable *) key, (ir_variable *) data));
+}
+
+/** Insert a copy-back assignment at the end of the main() function */
+static void
+emit_main_copy(const void *key, void *data, void *closure)
+{
+ ir_function_signature *sig = (ir_function_signature *) closure;
+ sig->body.push_tail(copy(sig, (ir_variable *) key, (ir_variable *) data));
+}
+
+ir_visitor_status
+output_read_remover::visit_leave(ir_return *ir)
+{
+ hash_table_call_foreach(replacements, emit_return_copy, ir);
+ return visit_continue;
+}
+
+ir_visitor_status
+output_read_remover::visit_leave(ir_emit_vertex *ir)
+{
+ hash_table_call_foreach(replacements, emit_return_copy, ir);
+ return visit_continue;
+}
+
+ir_visitor_status
+output_read_remover::visit_leave(ir_function_signature *sig)
+{
+ if (strcmp(sig->function_name(), "main") != 0)
+ return visit_continue;
+
+ hash_table_call_foreach(replacements, emit_main_copy, sig);
+ return visit_continue;
+}
+
+void
+lower_output_reads(unsigned stage, exec_list *instructions)
+{
+ /* Due to the possible interactions between multiple tessellation control
+ * shader invocations, we leave output variables as-is.
+ */
+ if (stage == MESA_SHADER_TESS_CTRL)
+ return;
+
+ output_read_remover v(stage);
+ visit_list_elements(&v, instructions);
+}
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/lower_packed_varyings.cpp b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/lower_packed_varyings.cpp
new file mode 100644
index 0000000000..9c418ebae6
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/lower_packed_varyings.cpp
@@ -0,0 +1,943 @@
+/*
+ * Copyright © 2011 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+/**
+ * \file lower_varyings_to_packed.cpp
+ *
+ * This lowering pass generates GLSL code that manually packs varyings into
+ * vec4 slots, for the benefit of back-ends that don't support packed varyings
+ * natively.
+ *
+ * For example, the following shader:
+ *
+ * out mat3x2 foo; // location=4, location_frac=0
+ * out vec3 bar[2]; // location=5, location_frac=2
+ *
+ * main()
+ * {
+ * ...
+ * }
+ *
+ * Is rewritten to:
+ *
+ * mat3x2 foo;
+ * vec3 bar[2];
+ * out vec4 packed4; // location=4, location_frac=0
+ * out vec4 packed5; // location=5, location_frac=0
+ * out vec4 packed6; // location=6, location_frac=0
+ *
+ * main()
+ * {
+ * ...
+ * packed4.xy = foo[0];
+ * packed4.zw = foo[1];
+ * packed5.xy = foo[2];
+ * packed5.zw = bar[0].xy;
+ * packed6.x = bar[0].z;
+ * packed6.yzw = bar[1];
+ * }
+ *
+ * This lowering pass properly handles "double parking" of a varying vector
+ * across two varying slots. For example, in the code above, two of the
+ * components of bar[0] are stored in packed5, and the remaining component is
+ * stored in packed6.
+ *
+ * Note that in theory, the extra instructions may cause some loss of
+ * performance. However, hopefully in most cases the performance loss will
+ * either be absorbed by a later optimization pass, or it will be offset by
+ * memory bandwidth savings (because fewer varyings are used).
+ *
+ * This lowering pass also packs flat floats, ints, and uints together, by
+ * using ivec4 as the base type of flat "varyings", and using appropriate
+ * casts to convert floats and uints into ints.
+ *
+ * This lowering pass also handles varyings whose type is a struct or an array
+ * of struct. Structs are packed in order and with no gaps, so there may be a
+ * performance penalty due to structure elements being double-parked.
+ *
+ * Lowering of geometry shader inputs is slightly more complex, since geometry
+ * inputs are always arrays, so we need to lower arrays to arrays. For
+ * example, the following input:
+ *
+ * in struct Foo {
+ * float f;
+ * vec3 v;
+ * vec2 a[2];
+ * } arr[3]; // location=4, location_frac=0
+ *
+ * Would get lowered like this if it occurred in a fragment shader:
+ *
+ * struct Foo {
+ * float f;
+ * vec3 v;
+ * vec2 a[2];
+ * } arr[3];
+ * in vec4 packed4; // location=4, location_frac=0
+ * in vec4 packed5; // location=5, location_frac=0
+ * in vec4 packed6; // location=6, location_frac=0
+ * in vec4 packed7; // location=7, location_frac=0
+ * in vec4 packed8; // location=8, location_frac=0
+ * in vec4 packed9; // location=9, location_frac=0
+ *
+ * main()
+ * {
+ * arr[0].f = packed4.x;
+ * arr[0].v = packed4.yzw;
+ * arr[0].a[0] = packed5.xy;
+ * arr[0].a[1] = packed5.zw;
+ * arr[1].f = packed6.x;
+ * arr[1].v = packed6.yzw;
+ * arr[1].a[0] = packed7.xy;
+ * arr[1].a[1] = packed7.zw;
+ * arr[2].f = packed8.x;
+ * arr[2].v = packed8.yzw;
+ * arr[2].a[0] = packed9.xy;
+ * arr[2].a[1] = packed9.zw;
+ * ...
+ * }
+ *
+ * But it would get lowered like this if it occurred in a geometry shader:
+ *
+ * struct Foo {
+ * float f;
+ * vec3 v;
+ * vec2 a[2];
+ * } arr[3];
+ * in vec4 packed4[3]; // location=4, location_frac=0
+ * in vec4 packed5[3]; // location=5, location_frac=0
+ *
+ * main()
+ * {
+ * arr[0].f = packed4[0].x;
+ * arr[0].v = packed4[0].yzw;
+ * arr[0].a[0] = packed5[0].xy;
+ * arr[0].a[1] = packed5[0].zw;
+ * arr[1].f = packed4[1].x;
+ * arr[1].v = packed4[1].yzw;
+ * arr[1].a[0] = packed5[1].xy;
+ * arr[1].a[1] = packed5[1].zw;
+ * arr[2].f = packed4[2].x;
+ * arr[2].v = packed4[2].yzw;
+ * arr[2].a[0] = packed5[2].xy;
+ * arr[2].a[1] = packed5[2].zw;
+ * ...
+ * }
+ */
+
+#include "glsl_symbol_table.h"
+#include "ir.h"
+#include "ir_builder.h"
+#include "ir_optimization.h"
+#include "program/prog_instruction.h"
+#include "main/mtypes.h"
+
+using namespace ir_builder;
+
+namespace {
+
+/**
+ * Visitor that performs varying packing. For each varying declared in the
+ * shader, this visitor determines whether it needs to be packed. If so, it
+ * demotes it to an ordinary global, creates new packed varyings, and
+ * generates assignments to convert between the original varying and the
+ * packed varying.
+ */
+class lower_packed_varyings_visitor
+{
+public:
+ lower_packed_varyings_visitor(void *mem_ctx,
+ unsigned locations_used,
+ const uint8_t *components,
+ ir_variable_mode mode,
+ unsigned gs_input_vertices,
+ exec_list *out_instructions,
+ exec_list *out_variables,
+ bool disable_varying_packing,
+ bool disable_xfb_packing,
+ bool xfb_enabled);
+
+ void run(struct gl_linked_shader *shader);
+
+private:
+ void bitwise_assign_pack(ir_rvalue *lhs, ir_rvalue *rhs);
+ void bitwise_assign_unpack(ir_rvalue *lhs, ir_rvalue *rhs);
+ unsigned lower_rvalue(ir_rvalue *rvalue, unsigned fine_location,
+ ir_variable *unpacked_var, const char *name,
+ bool gs_input_toplevel, unsigned vertex_index);
+ unsigned lower_arraylike(ir_rvalue *rvalue, unsigned array_size,
+ unsigned fine_location,
+ ir_variable *unpacked_var, const char *name,
+ bool gs_input_toplevel, unsigned vertex_index);
+ ir_dereference *get_packed_varying_deref(unsigned location,
+ ir_variable *unpacked_var,
+ const char *name,
+ unsigned vertex_index);
+ bool needs_lowering(ir_variable *var);
+
+ /**
+ * Memory context used to allocate new instructions for the shader.
+ */
+ void * const mem_ctx;
+
+ /**
+ * Number of generic varying slots which are used by this shader. This is
+ * used to allocate temporary intermediate data structures. If any varying
+ * used by this shader has a location greater than or equal to
+ * VARYING_SLOT_VAR0 + locations_used, an assertion will fire.
+ */
+ const unsigned locations_used;
+
+ const uint8_t* components;
+
+ /**
+ * Array of pointers to the packed varyings that have been created for each
+ * generic varying slot. NULL entries in this array indicate varying slots
+ * for which a packed varying has not been created yet.
+ */
+ ir_variable **packed_varyings;
+
+ /**
+ * Type of varying which is being lowered in this pass (either
+ * ir_var_shader_in or ir_var_shader_out).
+ */
+ const ir_variable_mode mode;
+
+ /**
+ * If we are currently lowering geometry shader inputs, the number of input
+ * vertices the geometry shader accepts. Otherwise zero.
+ */
+ const unsigned gs_input_vertices;
+
+ /**
+ * Exec list into which the visitor should insert the packing instructions.
+ * Caller provides this list; it should insert the instructions into the
+ * appropriate place in the shader once the visitor has finished running.
+ */
+ exec_list *out_instructions;
+
+ /**
+ * Exec list into which the visitor should insert any new variables.
+ */
+ exec_list *out_variables;
+
+ bool disable_varying_packing;
+ bool disable_xfb_packing;
+ bool xfb_enabled;
+};
+
+} /* anonymous namespace */
+
+lower_packed_varyings_visitor::lower_packed_varyings_visitor(
+ void *mem_ctx, unsigned locations_used, const uint8_t *components,
+ ir_variable_mode mode,
+ unsigned gs_input_vertices, exec_list *out_instructions,
+ exec_list *out_variables, bool disable_varying_packing,
+ bool disable_xfb_packing, bool xfb_enabled)
+ : mem_ctx(mem_ctx),
+ locations_used(locations_used),
+ components(components),
+ packed_varyings((ir_variable **)
+ rzalloc_array_size(mem_ctx, sizeof(*packed_varyings),
+ locations_used)),
+ mode(mode),
+ gs_input_vertices(gs_input_vertices),
+ out_instructions(out_instructions),
+ out_variables(out_variables),
+ disable_varying_packing(disable_varying_packing),
+ disable_xfb_packing(disable_xfb_packing),
+ xfb_enabled(xfb_enabled)
+{
+}
+
+void
+lower_packed_varyings_visitor::run(struct gl_linked_shader *shader)
+{
+ foreach_in_list(ir_instruction, node, shader->ir) {
+ ir_variable *var = node->as_variable();
+ if (var == NULL)
+ continue;
+
+ if (var->data.mode != this->mode ||
+ var->data.location < VARYING_SLOT_VAR0 ||
+ !this->needs_lowering(var))
+ continue;
+
+ /* This lowering pass is only capable of packing floats and ints
+ * together when their interpolation mode is "flat". Treat integers as
+ * being flat when the interpolation mode is none.
+ */
+ assert(var->data.interpolation == INTERP_MODE_FLAT ||
+ var->data.interpolation == INTERP_MODE_NONE ||
+ !var->type->contains_integer());
+
+ /* Clone the variable for program resource list before
+ * it gets modified and lost.
+ */
+ if (!shader->packed_varyings)
+ shader->packed_varyings = new (shader) exec_list;
+
+ shader->packed_varyings->push_tail(var->clone(shader, NULL));
+
+ /* Change the old varying into an ordinary global. */
+ assert(var->data.mode != ir_var_temporary);
+ var->data.mode = ir_var_auto;
+
+ /* Create a reference to the old varying. */
+ ir_dereference_variable *deref
+ = new(this->mem_ctx) ir_dereference_variable(var);
+
+ /* Recursively pack or unpack it. */
+ this->lower_rvalue(deref, var->data.location * 4 + var->data.location_frac, var,
+ var->name, this->gs_input_vertices != 0, 0);
+ }
+}
+
+#define SWIZZLE_ZWZW MAKE_SWIZZLE4(SWIZZLE_Z, SWIZZLE_W, SWIZZLE_Z, SWIZZLE_W)
+
+/**
+ * Make an ir_assignment from \c rhs to \c lhs, performing appropriate
+ * bitcasts if necessary to match up types.
+ *
+ * This function is called when packing varyings.
+ */
+void
+lower_packed_varyings_visitor::bitwise_assign_pack(ir_rvalue *lhs,
+ ir_rvalue *rhs)
+{
+ if (lhs->type->base_type != rhs->type->base_type) {
+ /* Since we only mix types in flat varyings, and we always store flat
+ * varyings as type ivec4, we need only produce conversions from (uint
+ * or float) to int.
+ */
+ assert(lhs->type->base_type == GLSL_TYPE_INT);
+ switch (rhs->type->base_type) {
+ case GLSL_TYPE_UINT:
+ rhs = new(this->mem_ctx)
+ ir_expression(ir_unop_u2i, lhs->type, rhs);
+ break;
+ case GLSL_TYPE_FLOAT:
+ rhs = new(this->mem_ctx)
+ ir_expression(ir_unop_bitcast_f2i, lhs->type, rhs);
+ break;
+ case GLSL_TYPE_DOUBLE:
+ assert(rhs->type->vector_elements <= 2);
+ if (rhs->type->vector_elements == 2) {
+ ir_variable *t = new(mem_ctx) ir_variable(lhs->type, "pack", ir_var_temporary);
+
+ assert(lhs->type->vector_elements == 4);
+ this->out_variables->push_tail(t);
+ this->out_instructions->push_tail(
+ assign(t, u2i(expr(ir_unop_unpack_double_2x32, swizzle_x(rhs->clone(mem_ctx, NULL)))), 0x3));
+ this->out_instructions->push_tail(
+ assign(t, u2i(expr(ir_unop_unpack_double_2x32, swizzle_y(rhs))), 0xc));
+ rhs = deref(t).val;
+ } else {
+ rhs = u2i(expr(ir_unop_unpack_double_2x32, rhs));
+ }
+ break;
+ case GLSL_TYPE_INT64:
+ assert(rhs->type->vector_elements <= 2);
+ if (rhs->type->vector_elements == 2) {
+ ir_variable *t = new(mem_ctx) ir_variable(lhs->type, "pack", ir_var_temporary);
+
+ assert(lhs->type->vector_elements == 4);
+ this->out_variables->push_tail(t);
+ this->out_instructions->push_tail(
+ assign(t, expr(ir_unop_unpack_int_2x32, swizzle_x(rhs->clone(mem_ctx, NULL))), 0x3));
+ this->out_instructions->push_tail(
+ assign(t, expr(ir_unop_unpack_int_2x32, swizzle_y(rhs)), 0xc));
+ rhs = deref(t).val;
+ } else {
+ rhs = expr(ir_unop_unpack_int_2x32, rhs);
+ }
+ break;
+ case GLSL_TYPE_UINT64:
+ assert(rhs->type->vector_elements <= 2);
+ if (rhs->type->vector_elements == 2) {
+ ir_variable *t = new(mem_ctx) ir_variable(lhs->type, "pack", ir_var_temporary);
+
+ assert(lhs->type->vector_elements == 4);
+ this->out_variables->push_tail(t);
+ this->out_instructions->push_tail(
+ assign(t, u2i(expr(ir_unop_unpack_uint_2x32, swizzle_x(rhs->clone(mem_ctx, NULL)))), 0x3));
+ this->out_instructions->push_tail(
+ assign(t, u2i(expr(ir_unop_unpack_uint_2x32, swizzle_y(rhs))), 0xc));
+ rhs = deref(t).val;
+ } else {
+ rhs = u2i(expr(ir_unop_unpack_uint_2x32, rhs));
+ }
+ break;
+ case GLSL_TYPE_SAMPLER:
+ rhs = u2i(expr(ir_unop_unpack_sampler_2x32, rhs));
+ break;
+ case GLSL_TYPE_IMAGE:
+ rhs = u2i(expr(ir_unop_unpack_image_2x32, rhs));
+ break;
+ default:
+ assert(!"Unexpected type conversion while lowering varyings");
+ break;
+ }
+ }
+ this->out_instructions->push_tail(new (this->mem_ctx) ir_assignment(lhs, rhs));
+}
+
+
+/**
+ * Make an ir_assignment from \c rhs to \c lhs, performing appropriate
+ * bitcasts if necessary to match up types.
+ *
+ * This function is called when unpacking varyings.
+ */
+void
+lower_packed_varyings_visitor::bitwise_assign_unpack(ir_rvalue *lhs,
+ ir_rvalue *rhs)
+{
+ if (lhs->type->base_type != rhs->type->base_type) {
+ /* Since we only mix types in flat varyings, and we always store flat
+ * varyings as type ivec4, we need only produce conversions from int to
+ * (uint or float).
+ */
+ assert(rhs->type->base_type == GLSL_TYPE_INT);
+ switch (lhs->type->base_type) {
+ case GLSL_TYPE_UINT:
+ rhs = new(this->mem_ctx)
+ ir_expression(ir_unop_i2u, lhs->type, rhs);
+ break;
+ case GLSL_TYPE_FLOAT:
+ rhs = new(this->mem_ctx)
+ ir_expression(ir_unop_bitcast_i2f, lhs->type, rhs);
+ break;
+ case GLSL_TYPE_DOUBLE:
+ assert(lhs->type->vector_elements <= 2);
+ if (lhs->type->vector_elements == 2) {
+ ir_variable *t = new(mem_ctx) ir_variable(lhs->type, "unpack", ir_var_temporary);
+ assert(rhs->type->vector_elements == 4);
+ this->out_variables->push_tail(t);
+ this->out_instructions->push_tail(
+ assign(t, expr(ir_unop_pack_double_2x32, i2u(swizzle_xy(rhs->clone(mem_ctx, NULL)))), 0x1));
+ this->out_instructions->push_tail(
+ assign(t, expr(ir_unop_pack_double_2x32, i2u(swizzle(rhs->clone(mem_ctx, NULL), SWIZZLE_ZWZW, 2))), 0x2));
+ rhs = deref(t).val;
+ } else {
+ rhs = expr(ir_unop_pack_double_2x32, i2u(rhs));
+ }
+ break;
+ case GLSL_TYPE_INT64:
+ assert(lhs->type->vector_elements <= 2);
+ if (lhs->type->vector_elements == 2) {
+ ir_variable *t = new(mem_ctx) ir_variable(lhs->type, "unpack", ir_var_temporary);
+ assert(rhs->type->vector_elements == 4);
+ this->out_variables->push_tail(t);
+ this->out_instructions->push_tail(
+ assign(t, expr(ir_unop_pack_int_2x32, swizzle_xy(rhs->clone(mem_ctx, NULL))), 0x1));
+ this->out_instructions->push_tail(
+ assign(t, expr(ir_unop_pack_int_2x32, swizzle(rhs->clone(mem_ctx, NULL), SWIZZLE_ZWZW, 2)), 0x2));
+ rhs = deref(t).val;
+ } else {
+ rhs = expr(ir_unop_pack_int_2x32, rhs);
+ }
+ break;
+ case GLSL_TYPE_UINT64:
+ assert(lhs->type->vector_elements <= 2);
+ if (lhs->type->vector_elements == 2) {
+ ir_variable *t = new(mem_ctx) ir_variable(lhs->type, "unpack", ir_var_temporary);
+ assert(rhs->type->vector_elements == 4);
+ this->out_variables->push_tail(t);
+ this->out_instructions->push_tail(
+ assign(t, expr(ir_unop_pack_uint_2x32, i2u(swizzle_xy(rhs->clone(mem_ctx, NULL)))), 0x1));
+ this->out_instructions->push_tail(
+ assign(t, expr(ir_unop_pack_uint_2x32, i2u(swizzle(rhs->clone(mem_ctx, NULL), SWIZZLE_ZWZW, 2))), 0x2));
+ rhs = deref(t).val;
+ } else {
+ rhs = expr(ir_unop_pack_uint_2x32, i2u(rhs));
+ }
+ break;
+ case GLSL_TYPE_SAMPLER:
+ rhs = new(mem_ctx)
+ ir_expression(ir_unop_pack_sampler_2x32, lhs->type, i2u(rhs));
+ break;
+ case GLSL_TYPE_IMAGE:
+ rhs = new(mem_ctx)
+ ir_expression(ir_unop_pack_image_2x32, lhs->type, i2u(rhs));
+ break;
+ default:
+ assert(!"Unexpected type conversion while lowering varyings");
+ break;
+ }
+ }
+ this->out_instructions->push_tail(new(this->mem_ctx) ir_assignment(lhs, rhs));
+}
+
+
+/**
+ * Recursively pack or unpack the given varying (or portion of a varying) by
+ * traversing all of its constituent vectors.
+ *
+ * \param fine_location is the location where the first constituent vector
+ * should be packed--the word "fine" indicates that this location is expressed
+ * in multiples of a float, rather than multiples of a vec4 as is used
+ * elsewhere in Mesa.
+ *
+ * \param gs_input_toplevel should be set to true if we are lowering geometry
+ * shader inputs, and we are currently lowering the whole input variable
+ * (i.e. we are lowering the array whose index selects the vertex).
+ *
+ * \param vertex_index: if we are lowering geometry shader inputs, and the
+ * level of the array that we are currently lowering is *not* the top level,
+ * then this indicates which vertex we are currently lowering. Otherwise it
+ * is ignored.
+ *
+ * \return the location where the next constituent vector (after this one)
+ * should be packed.
+ */
+unsigned
+lower_packed_varyings_visitor::lower_rvalue(ir_rvalue *rvalue,
+ unsigned fine_location,
+ ir_variable *unpacked_var,
+ const char *name,
+ bool gs_input_toplevel,
+ unsigned vertex_index)
+{
+ unsigned dmul = rvalue->type->is_64bit() ? 2 : 1;
+ /* When gs_input_toplevel is set, we should be looking at a geometry shader
+ * input array.
+ */
+ assert(!gs_input_toplevel || rvalue->type->is_array());
+
+ if (rvalue->type->is_struct()) {
+ for (unsigned i = 0; i < rvalue->type->length; i++) {
+ if (i != 0)
+ rvalue = rvalue->clone(this->mem_ctx, NULL);
+ const char *field_name = rvalue->type->fields.structure[i].name;
+ ir_dereference_record *dereference_record = new(this->mem_ctx)
+ ir_dereference_record(rvalue, field_name);
+ char *deref_name
+ = ralloc_asprintf(this->mem_ctx, "%s.%s", name, field_name);
+ fine_location = this->lower_rvalue(dereference_record, fine_location,
+ unpacked_var, deref_name, false,
+ vertex_index);
+ }
+ return fine_location;
+ } else if (rvalue->type->is_array()) {
+ /* Arrays are packed/unpacked by considering each array element in
+ * sequence.
+ */
+ return this->lower_arraylike(rvalue, rvalue->type->array_size(),
+ fine_location, unpacked_var, name,
+ gs_input_toplevel, vertex_index);
+ } else if (rvalue->type->is_matrix()) {
+ /* Matrices are packed/unpacked by considering each column vector in
+ * sequence.
+ */
+ return this->lower_arraylike(rvalue, rvalue->type->matrix_columns,
+ fine_location, unpacked_var, name,
+ false, vertex_index);
+ } else if (rvalue->type->vector_elements * dmul +
+ fine_location % 4 > 4) {
+ /* This vector is going to be "double parked" across two varying slots,
+ * so handle it as two separate assignments. For doubles, a dvec3/dvec4
+ * can end up being spread over 3 slots. However the second splitting
+ * will happen later, here we just always want to split into 2.
+ */
+ unsigned left_components, right_components;
+ unsigned left_swizzle_values[4] = { 0, 0, 0, 0 };
+ unsigned right_swizzle_values[4] = { 0, 0, 0, 0 };
+ char left_swizzle_name[4] = { 0, 0, 0, 0 };
+ char right_swizzle_name[4] = { 0, 0, 0, 0 };
+
+ left_components = 4 - fine_location % 4;
+ if (rvalue->type->is_64bit()) {
+ /* We might actually end up with 0 left components! */
+ left_components /= 2;
+ }
+ right_components = rvalue->type->vector_elements - left_components;
+
+ for (unsigned i = 0; i < left_components; i++) {
+ left_swizzle_values[i] = i;
+ left_swizzle_name[i] = "xyzw"[i];
+ }
+ for (unsigned i = 0; i < right_components; i++) {
+ right_swizzle_values[i] = i + left_components;
+ right_swizzle_name[i] = "xyzw"[i + left_components];
+ }
+ ir_swizzle *left_swizzle = new(this->mem_ctx)
+ ir_swizzle(rvalue, left_swizzle_values, left_components);
+ ir_swizzle *right_swizzle = new(this->mem_ctx)
+ ir_swizzle(rvalue->clone(this->mem_ctx, NULL), right_swizzle_values,
+ right_components);
+ char *left_name
+ = ralloc_asprintf(this->mem_ctx, "%s.%s", name, left_swizzle_name);
+ char *right_name
+ = ralloc_asprintf(this->mem_ctx, "%s.%s", name, right_swizzle_name);
+ if (left_components)
+ fine_location = this->lower_rvalue(left_swizzle, fine_location,
+ unpacked_var, left_name, false,
+ vertex_index);
+ else
+ /* Top up the fine location to the next slot */
+ fine_location++;
+ return this->lower_rvalue(right_swizzle, fine_location, unpacked_var,
+ right_name, false, vertex_index);
+ } else {
+ /* No special handling is necessary; pack the rvalue into the
+ * varying.
+ */
+ unsigned swizzle_values[4] = { 0, 0, 0, 0 };
+ unsigned components = rvalue->type->vector_elements * dmul;
+ unsigned location = fine_location / 4;
+ unsigned location_frac = fine_location % 4;
+ for (unsigned i = 0; i < components; ++i)
+ swizzle_values[i] = i + location_frac;
+ ir_dereference *packed_deref =
+ this->get_packed_varying_deref(location, unpacked_var, name,
+ vertex_index);
+ if (unpacked_var->data.stream != 0) {
+ assert(unpacked_var->data.stream < 4);
+ ir_variable *packed_var = packed_deref->variable_referenced();
+ for (unsigned i = 0; i < components; ++i) {
+ packed_var->data.stream |=
+ unpacked_var->data.stream << (2 * (location_frac + i));
+ }
+ }
+ ir_swizzle *swizzle = new(this->mem_ctx)
+ ir_swizzle(packed_deref, swizzle_values, components);
+ if (this->mode == ir_var_shader_out) {
+ this->bitwise_assign_pack(swizzle, rvalue);
+ } else {
+ this->bitwise_assign_unpack(rvalue, swizzle);
+ }
+ return fine_location + components;
+ }
+}
+
+/**
+ * Recursively pack or unpack a varying for which we need to iterate over its
+ * constituent elements, accessing each one using an ir_dereference_array.
+ * This takes care of both arrays and matrices, since ir_dereference_array
+ * treats a matrix like an array of its column vectors.
+ *
+ * \param gs_input_toplevel should be set to true if we are lowering geometry
+ * shader inputs, and we are currently lowering the whole input variable
+ * (i.e. we are lowering the array whose index selects the vertex).
+ *
+ * \param vertex_index: if we are lowering geometry shader inputs, and the
+ * level of the array that we are currently lowering is *not* the top level,
+ * then this indicates which vertex we are currently lowering. Otherwise it
+ * is ignored.
+ */
+unsigned
+lower_packed_varyings_visitor::lower_arraylike(ir_rvalue *rvalue,
+ unsigned array_size,
+ unsigned fine_location,
+ ir_variable *unpacked_var,
+ const char *name,
+ bool gs_input_toplevel,
+ unsigned vertex_index)
+{
+ for (unsigned i = 0; i < array_size; i++) {
+ if (i != 0)
+ rvalue = rvalue->clone(this->mem_ctx, NULL);
+ ir_constant *constant = new(this->mem_ctx) ir_constant(i);
+ ir_dereference_array *dereference_array = new(this->mem_ctx)
+ ir_dereference_array(rvalue, constant);
+ if (gs_input_toplevel) {
+ /* Geometry shader inputs are a special case. Instead of storing
+ * each element of the array at a different location, all elements
+ * are at the same location, but with a different vertex index.
+ */
+ (void) this->lower_rvalue(dereference_array, fine_location,
+ unpacked_var, name, false, i);
+ } else {
+ char *subscripted_name
+ = ralloc_asprintf(this->mem_ctx, "%s[%d]", name, i);
+ fine_location =
+ this->lower_rvalue(dereference_array, fine_location,
+ unpacked_var, subscripted_name,
+ false, vertex_index);
+ }
+ }
+ return fine_location;
+}
+
+/**
+ * Retrieve the packed varying corresponding to the given varying location.
+ * If no packed varying has been created for the given varying location yet,
+ * create it and add it to the shader before returning it.
+ *
+ * The newly created varying inherits its interpolation parameters from \c
+ * unpacked_var. Its base type is ivec4 if we are lowering a flat varying,
+ * vec4 otherwise.
+ *
+ * \param vertex_index: if we are lowering geometry shader inputs, then this
+ * indicates which vertex we are currently lowering. Otherwise it is ignored.
+ */
+ir_dereference *
+lower_packed_varyings_visitor::get_packed_varying_deref(
+ unsigned location, ir_variable *unpacked_var, const char *name,
+ unsigned vertex_index)
+{
+ unsigned slot = location - VARYING_SLOT_VAR0;
+ assert(slot < locations_used);
+ if (this->packed_varyings[slot] == NULL) {
+ char *packed_name = ralloc_asprintf(this->mem_ctx, "packed:%s", name);
+ const glsl_type *packed_type;
+ assert(components[slot] != 0);
+ if (unpacked_var->is_interpolation_flat())
+ packed_type = glsl_type::get_instance(GLSL_TYPE_INT, components[slot], 1);
+ else
+ packed_type = glsl_type::get_instance(GLSL_TYPE_FLOAT, components[slot], 1);
+ if (this->gs_input_vertices != 0) {
+ packed_type =
+ glsl_type::get_array_instance(packed_type,
+ this->gs_input_vertices);
+ }
+ ir_variable *packed_var = new(this->mem_ctx)
+ ir_variable(packed_type, packed_name, this->mode);
+ if (this->gs_input_vertices != 0) {
+ /* Prevent update_array_sizes() from messing with the size of the
+ * array.
+ */
+ packed_var->data.max_array_access = this->gs_input_vertices - 1;
+ }
+ packed_var->data.centroid = unpacked_var->data.centroid;
+ packed_var->data.sample = unpacked_var->data.sample;
+ packed_var->data.patch = unpacked_var->data.patch;
+ packed_var->data.interpolation =
+ packed_type->without_array() == glsl_type::ivec4_type
+ ? unsigned(INTERP_MODE_FLAT) : unpacked_var->data.interpolation;
+ packed_var->data.location = location;
+ packed_var->data.precision = unpacked_var->data.precision;
+ packed_var->data.always_active_io = unpacked_var->data.always_active_io;
+ packed_var->data.stream = 1u << 31;
+ unpacked_var->insert_before(packed_var);
+ this->packed_varyings[slot] = packed_var;
+ } else {
+ ir_variable *var = this->packed_varyings[slot];
+
+ /* The slot needs to be marked as always active if any variable that got
+ * packed there was.
+ */
+ var->data.always_active_io |= unpacked_var->data.always_active_io;
+
+ /* For geometry shader inputs, only update the packed variable name the
+ * first time we visit each component.
+ */
+ if (this->gs_input_vertices == 0 || vertex_index == 0) {
+ if (var->is_name_ralloced())
+ ralloc_asprintf_append((char **) &var->name, ",%s", name);
+ else
+ var->name = ralloc_asprintf(var, "%s,%s", var->name, name);
+ }
+ }
+
+ ir_dereference *deref = new(this->mem_ctx)
+ ir_dereference_variable(this->packed_varyings[slot]);
+ if (this->gs_input_vertices != 0) {
+ /* When lowering GS inputs, the packed variable is an array, so we need
+ * to dereference it using vertex_index.
+ */
+ ir_constant *constant = new(this->mem_ctx) ir_constant(vertex_index);
+ deref = new(this->mem_ctx) ir_dereference_array(deref, constant);
+ }
+ return deref;
+}
+
+bool
+lower_packed_varyings_visitor::needs_lowering(ir_variable *var)
+{
+ /* Things composed of vec4's, varyings with explicitly assigned
+ * locations or varyings marked as must_be_shader_input (which might be used
+ * by interpolateAt* functions) shouldn't be lowered. Everything else can be.
+ */
+ if (var->data.explicit_location || var->data.must_be_shader_input)
+ return false;
+
+ const glsl_type *type = var->type;
+
+ /* Some drivers (e.g. panfrost) don't support packing of transform
+ * feedback varyings.
+ */
+ if (disable_xfb_packing && var->data.is_xfb &&
+ !(type->is_array() || type->is_struct() || type->is_matrix()) &&
+ xfb_enabled)
+ return false;
+
+ /* Override disable_varying_packing if the var is only used by transform
+ * feedback. Also override it if transform feedback is enabled and the
+ * variable is an array, struct or matrix as the elements of these types
+ * will always have the same interpolation and therefore are safe to pack.
+ */
+ if (disable_varying_packing && !var->data.is_xfb_only &&
+ !((type->is_array() || type->is_struct() || type->is_matrix()) &&
+ xfb_enabled))
+ return false;
+
+ type = type->without_array();
+ if (type->vector_elements == 4 && !type->is_64bit())
+ return false;
+ return true;
+}
+
+
+/**
+ * Visitor that splices varying packing code before every use of EmitVertex()
+ * in a geometry shader.
+ */
+class lower_packed_varyings_gs_splicer : public ir_hierarchical_visitor
+{
+public:
+ explicit lower_packed_varyings_gs_splicer(void *mem_ctx,
+ const exec_list *instructions);
+
+ virtual ir_visitor_status visit_leave(ir_emit_vertex *ev);
+
+private:
+ /**
+ * Memory context used to allocate new instructions for the shader.
+ */
+ void * const mem_ctx;
+
+ /**
+ * Instructions that should be spliced into place before each EmitVertex()
+ * call.
+ */
+ const exec_list *instructions;
+};
+
+
+lower_packed_varyings_gs_splicer::lower_packed_varyings_gs_splicer(
+ void *mem_ctx, const exec_list *instructions)
+ : mem_ctx(mem_ctx), instructions(instructions)
+{
+}
+
+
+ir_visitor_status
+lower_packed_varyings_gs_splicer::visit_leave(ir_emit_vertex *ev)
+{
+ foreach_in_list(ir_instruction, ir, this->instructions) {
+ ev->insert_before(ir->clone(this->mem_ctx, NULL));
+ }
+ return visit_continue;
+}
+
+/**
+ * Visitor that splices varying packing code before every return.
+ */
+class lower_packed_varyings_return_splicer : public ir_hierarchical_visitor
+{
+public:
+ explicit lower_packed_varyings_return_splicer(void *mem_ctx,
+ const exec_list *instructions);
+
+ virtual ir_visitor_status visit_leave(ir_return *ret);
+
+private:
+ /**
+ * Memory context used to allocate new instructions for the shader.
+ */
+ void * const mem_ctx;
+
+ /**
+ * Instructions that should be spliced into place before each return.
+ */
+ const exec_list *instructions;
+};
+
+
+lower_packed_varyings_return_splicer::lower_packed_varyings_return_splicer(
+ void *mem_ctx, const exec_list *instructions)
+ : mem_ctx(mem_ctx), instructions(instructions)
+{
+}
+
+
+ir_visitor_status
+lower_packed_varyings_return_splicer::visit_leave(ir_return *ret)
+{
+ foreach_in_list(ir_instruction, ir, this->instructions) {
+ ret->insert_before(ir->clone(this->mem_ctx, NULL));
+ }
+ return visit_continue;
+}
+
+void
+lower_packed_varyings(void *mem_ctx, unsigned locations_used,
+ const uint8_t *components,
+ ir_variable_mode mode, unsigned gs_input_vertices,
+ gl_linked_shader *shader, bool disable_varying_packing,
+ bool disable_xfb_packing, bool xfb_enabled)
+{
+ exec_list *instructions = shader->ir;
+ ir_function *main_func = shader->symbols->get_function("main");
+ exec_list void_parameters;
+ ir_function_signature *main_func_sig
+ = main_func->matching_signature(NULL, &void_parameters, false);
+ exec_list new_instructions, new_variables;
+ lower_packed_varyings_visitor visitor(mem_ctx,
+ locations_used,
+ components,
+ mode,
+ gs_input_vertices,
+ &new_instructions,
+ &new_variables,
+ disable_varying_packing,
+ disable_xfb_packing,
+ xfb_enabled);
+ visitor.run(shader);
+ if (mode == ir_var_shader_out) {
+ if (shader->Stage == MESA_SHADER_GEOMETRY) {
+ /* For geometry shaders, outputs need to be lowered before each call
+ * to EmitVertex()
+ */
+ lower_packed_varyings_gs_splicer splicer(mem_ctx, &new_instructions);
+
+ /* Add all the variables in first. */
+ main_func_sig->body.get_head_raw()->insert_before(&new_variables);
+
+ /* Now update all the EmitVertex instances */
+ splicer.run(instructions);
+ } else {
+ /* For other shader types, outputs need to be lowered before each
+ * return statement and at the end of main()
+ */
+
+ lower_packed_varyings_return_splicer splicer(mem_ctx, &new_instructions);
+
+ main_func_sig->body.get_head_raw()->insert_before(&new_variables);
+
+ splicer.run(instructions);
+
+ /* Lower outputs at the end of main() if the last instruction is not
+ * a return statement
+ */
+ if (((ir_instruction*)instructions->get_tail())->ir_type != ir_type_return) {
+ main_func_sig->body.append_list(&new_instructions);
+ }
+ }
+ } else {
+ /* Shader inputs need to be lowered at the beginning of main() */
+ main_func_sig->body.get_head_raw()->insert_before(&new_instructions);
+ main_func_sig->body.get_head_raw()->insert_before(&new_variables);
+ }
+}
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/lower_packing_builtins.cpp b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/lower_packing_builtins.cpp
new file mode 100644
index 0000000000..a41627bd56
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/lower_packing_builtins.cpp
@@ -0,0 +1,1311 @@
+/*
+ * Copyright © 2012 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include "ir.h"
+#include "ir_builder.h"
+#include "ir_optimization.h"
+#include "ir_rvalue_visitor.h"
+
+namespace {
+
+using namespace ir_builder;
+
+/**
+ * A visitor that lowers built-in floating-point pack/unpack expressions
+ * such packSnorm2x16.
+ */
+class lower_packing_builtins_visitor : public ir_rvalue_visitor {
+public:
+ /**
+ * \param op_mask is a bitmask of `enum lower_packing_builtins_op`
+ */
+ explicit lower_packing_builtins_visitor(int op_mask)
+ : op_mask(op_mask),
+ progress(false)
+ {
+ factory.instructions = &factory_instructions;
+ }
+
+ virtual ~lower_packing_builtins_visitor()
+ {
+ assert(factory_instructions.is_empty());
+ }
+
+ bool get_progress() { return progress; }
+
+ void handle_rvalue(ir_rvalue **rvalue)
+ {
+ if (!*rvalue)
+ return;
+
+ ir_expression *expr = (*rvalue)->as_expression();
+ if (!expr)
+ return;
+
+ enum lower_packing_builtins_op lowering_op =
+ choose_lowering_op(expr->operation);
+
+ if (lowering_op == LOWER_PACK_UNPACK_NONE)
+ return;
+
+ setup_factory(ralloc_parent(expr));
+
+ ir_rvalue *op0 = expr->operands[0];
+ ralloc_steal(factory.mem_ctx, op0);
+
+ switch (lowering_op) {
+ case LOWER_PACK_SNORM_2x16:
+ *rvalue = lower_pack_snorm_2x16(op0);
+ break;
+ case LOWER_PACK_SNORM_4x8:
+ *rvalue = lower_pack_snorm_4x8(op0);
+ break;
+ case LOWER_PACK_UNORM_2x16:
+ *rvalue = lower_pack_unorm_2x16(op0);
+ break;
+ case LOWER_PACK_UNORM_4x8:
+ *rvalue = lower_pack_unorm_4x8(op0);
+ break;
+ case LOWER_PACK_HALF_2x16:
+ *rvalue = lower_pack_half_2x16(op0);
+ break;
+ case LOWER_UNPACK_SNORM_2x16:
+ *rvalue = lower_unpack_snorm_2x16(op0);
+ break;
+ case LOWER_UNPACK_SNORM_4x8:
+ *rvalue = lower_unpack_snorm_4x8(op0);
+ break;
+ case LOWER_UNPACK_UNORM_2x16:
+ *rvalue = lower_unpack_unorm_2x16(op0);
+ break;
+ case LOWER_UNPACK_UNORM_4x8:
+ *rvalue = lower_unpack_unorm_4x8(op0);
+ break;
+ case LOWER_UNPACK_HALF_2x16:
+ *rvalue = lower_unpack_half_2x16(op0);
+ break;
+ case LOWER_PACK_UNPACK_NONE:
+ case LOWER_PACK_USE_BFI:
+ case LOWER_PACK_USE_BFE:
+ assert(!"not reached");
+ break;
+ }
+
+ teardown_factory();
+ progress = true;
+ }
+
+private:
+ const int op_mask;
+ bool progress;
+ ir_factory factory;
+ exec_list factory_instructions;
+
+ /**
+ * Determine the needed lowering operation by filtering \a expr_op
+ * through \ref op_mask.
+ */
+ enum lower_packing_builtins_op
+ choose_lowering_op(ir_expression_operation expr_op)
+ {
+ /* C++ regards int and enum as fundamentally different types.
+ * So, we can't simply return from each case; we must cast the return
+ * value.
+ */
+ int result;
+
+ switch (expr_op) {
+ case ir_unop_pack_snorm_2x16:
+ result = op_mask & LOWER_PACK_SNORM_2x16;
+ break;
+ case ir_unop_pack_snorm_4x8:
+ result = op_mask & LOWER_PACK_SNORM_4x8;
+ break;
+ case ir_unop_pack_unorm_2x16:
+ result = op_mask & LOWER_PACK_UNORM_2x16;
+ break;
+ case ir_unop_pack_unorm_4x8:
+ result = op_mask & LOWER_PACK_UNORM_4x8;
+ break;
+ case ir_unop_pack_half_2x16:
+ result = op_mask & LOWER_PACK_HALF_2x16;
+ break;
+ case ir_unop_unpack_snorm_2x16:
+ result = op_mask & LOWER_UNPACK_SNORM_2x16;
+ break;
+ case ir_unop_unpack_snorm_4x8:
+ result = op_mask & LOWER_UNPACK_SNORM_4x8;
+ break;
+ case ir_unop_unpack_unorm_2x16:
+ result = op_mask & LOWER_UNPACK_UNORM_2x16;
+ break;
+ case ir_unop_unpack_unorm_4x8:
+ result = op_mask & LOWER_UNPACK_UNORM_4x8;
+ break;
+ case ir_unop_unpack_half_2x16:
+ result = op_mask & LOWER_UNPACK_HALF_2x16;
+ break;
+ default:
+ result = LOWER_PACK_UNPACK_NONE;
+ break;
+ }
+
+ return static_cast<enum lower_packing_builtins_op>(result);
+ }
+
+ void
+ setup_factory(void *mem_ctx)
+ {
+ assert(factory.mem_ctx == NULL);
+ assert(factory.instructions->is_empty());
+
+ factory.mem_ctx = mem_ctx;
+ }
+
+ void
+ teardown_factory()
+ {
+ base_ir->insert_before(factory.instructions);
+ assert(factory.instructions->is_empty());
+ factory.mem_ctx = NULL;
+ }
+
+ template <typename T>
+ ir_constant*
+ constant(T x)
+ {
+ return factory.constant(x);
+ }
+
+ /**
+ * \brief Pack two uint16's into a single uint32.
+ *
+ * Interpret the given uvec2 as a uint16 pair. Pack the pair into a uint32
+ * where the least significant bits specify the first element of the pair.
+ * Return the uint32.
+ */
+ ir_rvalue*
+ pack_uvec2_to_uint(ir_rvalue *uvec2_rval)
+ {
+ assert(uvec2_rval->type == glsl_type::uvec2_type);
+
+ /* uvec2 u = UVEC2_RVAL; */
+ ir_variable *u = factory.make_temp(glsl_type::uvec2_type,
+ "tmp_pack_uvec2_to_uint");
+ factory.emit(assign(u, uvec2_rval));
+
+ if (op_mask & LOWER_PACK_USE_BFI) {
+ return bitfield_insert(bit_and(swizzle_x(u), constant(0xffffu)),
+ swizzle_y(u),
+ constant(16u),
+ constant(16u));
+ }
+
+ /* return (u.y << 16) | (u.x & 0xffff); */
+ return bit_or(lshift(swizzle_y(u), constant(16u)),
+ bit_and(swizzle_x(u), constant(0xffffu)));
+ }
+
+ /**
+ * \brief Pack four uint8's into a single uint32.
+ *
+ * Interpret the given uvec4 as a uint32 4-typle. Pack the 4-tuple into a
+ * uint32 where the least significant bits specify the first element of the
+ * 4-tuple. Return the uint32.
+ */
+ ir_rvalue*
+ pack_uvec4_to_uint(ir_rvalue *uvec4_rval)
+ {
+ assert(uvec4_rval->type == glsl_type::uvec4_type);
+
+ ir_variable *u = factory.make_temp(glsl_type::uvec4_type,
+ "tmp_pack_uvec4_to_uint");
+
+ if (op_mask & LOWER_PACK_USE_BFI) {
+ /* uvec4 u = UVEC4_RVAL; */
+ factory.emit(assign(u, uvec4_rval));
+
+ return bitfield_insert(bitfield_insert(
+ bitfield_insert(
+ bit_and(swizzle_x(u), constant(0xffu)),
+ swizzle_y(u), constant(8u), constant(8u)),
+ swizzle_z(u), constant(16u), constant(8u)),
+ swizzle_w(u), constant(24u), constant(8u));
+ }
+
+ /* uvec4 u = UVEC4_RVAL & 0xff */
+ factory.emit(assign(u, bit_and(uvec4_rval, constant(0xffu))));
+
+ /* return (u.w << 24) | (u.z << 16) | (u.y << 8) | u.x; */
+ return bit_or(bit_or(lshift(swizzle_w(u), constant(24u)),
+ lshift(swizzle_z(u), constant(16u))),
+ bit_or(lshift(swizzle_y(u), constant(8u)),
+ swizzle_x(u)));
+ }
+
+ /**
+ * \brief Unpack a uint32 into two uint16's.
+ *
+ * Interpret the given uint32 as a uint16 pair where the uint32's least
+ * significant bits specify the pair's first element. Return the uint16
+ * pair as a uvec2.
+ */
+ ir_rvalue*
+ unpack_uint_to_uvec2(ir_rvalue *uint_rval)
+ {
+ assert(uint_rval->type == glsl_type::uint_type);
+
+ /* uint u = UINT_RVAL; */
+ ir_variable *u = factory.make_temp(glsl_type::uint_type,
+ "tmp_unpack_uint_to_uvec2_u");
+ factory.emit(assign(u, uint_rval));
+
+ /* uvec2 u2; */
+ ir_variable *u2 = factory.make_temp(glsl_type::uvec2_type,
+ "tmp_unpack_uint_to_uvec2_u2");
+
+ /* u2.x = u & 0xffffu; */
+ factory.emit(assign(u2, bit_and(u, constant(0xffffu)), WRITEMASK_X));
+
+ /* u2.y = u >> 16u; */
+ factory.emit(assign(u2, rshift(u, constant(16u)), WRITEMASK_Y));
+
+ return deref(u2).val;
+ }
+
+ /**
+ * \brief Unpack a uint32 into two int16's.
+ *
+ * Specifically each 16-bit value is sign-extended to the full width of an
+ * int32 on return.
+ */
+ ir_rvalue *
+ unpack_uint_to_ivec2(ir_rvalue *uint_rval)
+ {
+ assert(uint_rval->type == glsl_type::uint_type);
+
+ if (!(op_mask & LOWER_PACK_USE_BFE)) {
+ return rshift(lshift(u2i(unpack_uint_to_uvec2(uint_rval)),
+ constant(16u)),
+ constant(16u));
+ }
+
+ ir_variable *i = factory.make_temp(glsl_type::int_type,
+ "tmp_unpack_uint_to_ivec2_i");
+ factory.emit(assign(i, u2i(uint_rval)));
+
+ /* ivec2 i2; */
+ ir_variable *i2 = factory.make_temp(glsl_type::ivec2_type,
+ "tmp_unpack_uint_to_ivec2_i2");
+
+ factory.emit(assign(i2, bitfield_extract(i, constant(0), constant(16)),
+ WRITEMASK_X));
+ factory.emit(assign(i2, bitfield_extract(i, constant(16), constant(16)),
+ WRITEMASK_Y));
+
+ return deref(i2).val;
+ }
+
+ /**
+ * \brief Unpack a uint32 into four uint8's.
+ *
+ * Interpret the given uint32 as a uint8 4-tuple where the uint32's least
+ * significant bits specify the 4-tuple's first element. Return the uint8
+ * 4-tuple as a uvec4.
+ */
+ ir_rvalue*
+ unpack_uint_to_uvec4(ir_rvalue *uint_rval)
+ {
+ assert(uint_rval->type == glsl_type::uint_type);
+
+ /* uint u = UINT_RVAL; */
+ ir_variable *u = factory.make_temp(glsl_type::uint_type,
+ "tmp_unpack_uint_to_uvec4_u");
+ factory.emit(assign(u, uint_rval));
+
+ /* uvec4 u4; */
+ ir_variable *u4 = factory.make_temp(glsl_type::uvec4_type,
+ "tmp_unpack_uint_to_uvec4_u4");
+
+ /* u4.x = u & 0xffu; */
+ factory.emit(assign(u4, bit_and(u, constant(0xffu)), WRITEMASK_X));
+
+ if (op_mask & LOWER_PACK_USE_BFE) {
+ /* u4.y = bitfield_extract(u, 8, 8); */
+ factory.emit(assign(u4, bitfield_extract(u, constant(8u), constant(8u)),
+ WRITEMASK_Y));
+
+ /* u4.z = bitfield_extract(u, 16, 8); */
+ factory.emit(assign(u4, bitfield_extract(u, constant(16u), constant(8u)),
+ WRITEMASK_Z));
+ } else {
+ /* u4.y = (u >> 8u) & 0xffu; */
+ factory.emit(assign(u4, bit_and(rshift(u, constant(8u)),
+ constant(0xffu)), WRITEMASK_Y));
+
+ /* u4.z = (u >> 16u) & 0xffu; */
+ factory.emit(assign(u4, bit_and(rshift(u, constant(16u)),
+ constant(0xffu)), WRITEMASK_Z));
+ }
+
+ /* u4.w = (u >> 24u) */
+ factory.emit(assign(u4, rshift(u, constant(24u)), WRITEMASK_W));
+
+ return deref(u4).val;
+ }
+
+ /**
+ * \brief Unpack a uint32 into four int8's.
+ *
+ * Specifically each 8-bit value is sign-extended to the full width of an
+ * int32 on return.
+ */
+ ir_rvalue *
+ unpack_uint_to_ivec4(ir_rvalue *uint_rval)
+ {
+ assert(uint_rval->type == glsl_type::uint_type);
+
+ if (!(op_mask & LOWER_PACK_USE_BFE)) {
+ return rshift(lshift(u2i(unpack_uint_to_uvec4(uint_rval)),
+ constant(24u)),
+ constant(24u));
+ }
+
+ ir_variable *i = factory.make_temp(glsl_type::int_type,
+ "tmp_unpack_uint_to_ivec4_i");
+ factory.emit(assign(i, u2i(uint_rval)));
+
+ /* ivec4 i4; */
+ ir_variable *i4 = factory.make_temp(glsl_type::ivec4_type,
+ "tmp_unpack_uint_to_ivec4_i4");
+
+ factory.emit(assign(i4, bitfield_extract(i, constant(0), constant(8)),
+ WRITEMASK_X));
+ factory.emit(assign(i4, bitfield_extract(i, constant(8), constant(8)),
+ WRITEMASK_Y));
+ factory.emit(assign(i4, bitfield_extract(i, constant(16), constant(8)),
+ WRITEMASK_Z));
+ factory.emit(assign(i4, bitfield_extract(i, constant(24), constant(8)),
+ WRITEMASK_W));
+
+ return deref(i4).val;
+ }
+
+ /**
+ * \brief Lower a packSnorm2x16 expression.
+ *
+ * \param vec2_rval is packSnorm2x16's input
+ * \return packSnorm2x16's output as a uint rvalue
+ */
+ ir_rvalue*
+ lower_pack_snorm_2x16(ir_rvalue *vec2_rval)
+ {
+ /* From page 88 (94 of pdf) of the GLSL ES 3.00 spec:
+ *
+ * highp uint packSnorm2x16(vec2 v)
+ * --------------------------------
+ * First, converts each component of the normalized floating-point value
+ * v into 16-bit integer values. Then, the results are packed into the
+ * returned 32-bit unsigned integer.
+ *
+ * The conversion for component c of v to fixed point is done as
+ * follows:
+ *
+ * packSnorm2x16: round(clamp(c, -1, +1) * 32767.0)
+ *
+ * The first component of the vector will be written to the least
+ * significant bits of the output; the last component will be written to
+ * the most significant bits.
+ *
+ * This function generates IR that approximates the following pseudo-GLSL:
+ *
+ * return pack_uvec2_to_uint(
+ * uvec2(ivec2(
+ * round(clamp(VEC2_RVALUE, -1.0f, 1.0f) * 32767.0f))));
+ *
+ * It is necessary to first convert the vec2 to ivec2 rather than directly
+ * converting vec2 to uvec2 because the latter conversion is undefined.
+ * From page 56 (62 of pdf) of the GLSL ES 3.00 spec: "It is undefined to
+ * convert a negative floating point value to an uint".
+ */
+ assert(vec2_rval->type == glsl_type::vec2_type);
+
+ ir_rvalue *result = pack_uvec2_to_uint(
+ i2u(f2i(round_even(mul(clamp(vec2_rval,
+ constant(-1.0f),
+ constant(1.0f)),
+ constant(32767.0f))))));
+
+ assert(result->type == glsl_type::uint_type);
+ return result;
+ }
+
+ /**
+ * \brief Lower a packSnorm4x8 expression.
+ *
+ * \param vec4_rval is packSnorm4x8's input
+ * \return packSnorm4x8's output as a uint rvalue
+ */
+ ir_rvalue*
+ lower_pack_snorm_4x8(ir_rvalue *vec4_rval)
+ {
+ /* From page 137 (143 of pdf) of the GLSL 4.30 spec:
+ *
+ * highp uint packSnorm4x8(vec4 v)
+ * -------------------------------
+ * First, converts each component of the normalized floating-point value
+ * v into 8-bit integer values. Then, the results are packed into the
+ * returned 32-bit unsigned integer.
+ *
+ * The conversion for component c of v to fixed point is done as
+ * follows:
+ *
+ * packSnorm4x8: round(clamp(c, -1, +1) * 127.0)
+ *
+ * The first component of the vector will be written to the least
+ * significant bits of the output; the last component will be written to
+ * the most significant bits.
+ *
+ * This function generates IR that approximates the following pseudo-GLSL:
+ *
+ * return pack_uvec4_to_uint(
+ * uvec4(ivec4(
+ * round(clamp(VEC4_RVALUE, -1.0f, 1.0f) * 127.0f))));
+ *
+ * It is necessary to first convert the vec4 to ivec4 rather than directly
+ * converting vec4 to uvec4 because the latter conversion is undefined.
+ * From page 87 (93 of pdf) of the GLSL 4.30 spec: "It is undefined to
+ * convert a negative floating point value to an uint".
+ */
+ assert(vec4_rval->type == glsl_type::vec4_type);
+
+ ir_rvalue *result = pack_uvec4_to_uint(
+ i2u(f2i(round_even(mul(clamp(vec4_rval,
+ constant(-1.0f),
+ constant(1.0f)),
+ constant(127.0f))))));
+
+ assert(result->type == glsl_type::uint_type);
+ return result;
+ }
+
+ /**
+ * \brief Lower an unpackSnorm2x16 expression.
+ *
+ * \param uint_rval is unpackSnorm2x16's input
+ * \return unpackSnorm2x16's output as a vec2 rvalue
+ */
+ ir_rvalue*
+ lower_unpack_snorm_2x16(ir_rvalue *uint_rval)
+ {
+ /* From page 88 (94 of pdf) of the GLSL ES 3.00 spec:
+ *
+ * highp vec2 unpackSnorm2x16 (highp uint p)
+ * -----------------------------------------
+ * First, unpacks a single 32-bit unsigned integer p into a pair of
+ * 16-bit unsigned integers. Then, each component is converted to
+ * a normalized floating-point value to generate the returned
+ * two-component vector.
+ *
+ * The conversion for unpacked fixed-point value f to floating point is
+ * done as follows:
+ *
+ * unpackSnorm2x16: clamp(f / 32767.0, -1,+1)
+ *
+ * The first component of the returned vector will be extracted from the
+ * least significant bits of the input; the last component will be
+ * extracted from the most significant bits.
+ *
+ * This function generates IR that approximates the following pseudo-GLSL:
+ *
+ * return clamp(
+ * ((ivec2(unpack_uint_to_uvec2(UINT_RVALUE)) << 16) >> 16) / 32767.0f,
+ * -1.0f, 1.0f);
+ *
+ * The above IR may appear unnecessarily complex, but the intermediate
+ * conversion to ivec2 and the bit shifts are necessary to correctly unpack
+ * negative floats.
+ *
+ * To see why, consider packing and then unpacking vec2(-1.0, 0.0).
+ * packSnorm2x16 encodes -1.0 as the int16 0xffff. During unpacking, we
+ * place that int16 into an int32, which results in the *positive* integer
+ * 0x0000ffff. The int16's sign bit becomes, in the int32, the rather
+ * unimportant bit 16. We must now extend the int16's sign bit into bits
+ * 17-32, which is accomplished by left-shifting then right-shifting.
+ */
+
+ assert(uint_rval->type == glsl_type::uint_type);
+
+ ir_rvalue *result =
+ clamp(div(i2f(unpack_uint_to_ivec2(uint_rval)),
+ constant(32767.0f)),
+ constant(-1.0f),
+ constant(1.0f));
+
+ assert(result->type == glsl_type::vec2_type);
+ return result;
+ }
+
+ /**
+ * \brief Lower an unpackSnorm4x8 expression.
+ *
+ * \param uint_rval is unpackSnorm4x8's input
+ * \return unpackSnorm4x8's output as a vec4 rvalue
+ */
+ ir_rvalue*
+ lower_unpack_snorm_4x8(ir_rvalue *uint_rval)
+ {
+ /* From page 137 (143 of pdf) of the GLSL 4.30 spec:
+ *
+ * highp vec4 unpackSnorm4x8 (highp uint p)
+ * ----------------------------------------
+ * First, unpacks a single 32-bit unsigned integer p into four
+ * 8-bit unsigned integers. Then, each component is converted to
+ * a normalized floating-point value to generate the returned
+ * four-component vector.
+ *
+ * The conversion for unpacked fixed-point value f to floating point is
+ * done as follows:
+ *
+ * unpackSnorm4x8: clamp(f / 127.0, -1, +1)
+ *
+ * The first component of the returned vector will be extracted from the
+ * least significant bits of the input; the last component will be
+ * extracted from the most significant bits.
+ *
+ * This function generates IR that approximates the following pseudo-GLSL:
+ *
+ * return clamp(
+ * ((ivec4(unpack_uint_to_uvec4(UINT_RVALUE)) << 24) >> 24) / 127.0f,
+ * -1.0f, 1.0f);
+ *
+ * The above IR may appear unnecessarily complex, but the intermediate
+ * conversion to ivec4 and the bit shifts are necessary to correctly unpack
+ * negative floats.
+ *
+ * To see why, consider packing and then unpacking vec4(-1.0, 0.0, 0.0,
+ * 0.0). packSnorm4x8 encodes -1.0 as the int8 0xff. During unpacking, we
+ * place that int8 into an int32, which results in the *positive* integer
+ * 0x000000ff. The int8's sign bit becomes, in the int32, the rather
+ * unimportant bit 8. We must now extend the int8's sign bit into bits
+ * 9-32, which is accomplished by left-shifting then right-shifting.
+ */
+
+ assert(uint_rval->type == glsl_type::uint_type);
+
+ ir_rvalue *result =
+ clamp(div(i2f(unpack_uint_to_ivec4(uint_rval)),
+ constant(127.0f)),
+ constant(-1.0f),
+ constant(1.0f));
+
+ assert(result->type == glsl_type::vec4_type);
+ return result;
+ }
+
+ /**
+ * \brief Lower a packUnorm2x16 expression.
+ *
+ * \param vec2_rval is packUnorm2x16's input
+ * \return packUnorm2x16's output as a uint rvalue
+ */
+ ir_rvalue*
+ lower_pack_unorm_2x16(ir_rvalue *vec2_rval)
+ {
+ /* From page 88 (94 of pdf) of the GLSL ES 3.00 spec:
+ *
+ * highp uint packUnorm2x16 (vec2 v)
+ * ---------------------------------
+ * First, converts each component of the normalized floating-point value
+ * v into 16-bit integer values. Then, the results are packed into the
+ * returned 32-bit unsigned integer.
+ *
+ * The conversion for component c of v to fixed point is done as
+ * follows:
+ *
+ * packUnorm2x16: round(clamp(c, 0, +1) * 65535.0)
+ *
+ * The first component of the vector will be written to the least
+ * significant bits of the output; the last component will be written to
+ * the most significant bits.
+ *
+ * This function generates IR that approximates the following pseudo-GLSL:
+ *
+ * return pack_uvec2_to_uint(uvec2(
+ * round(clamp(VEC2_RVALUE, 0.0f, 1.0f) * 65535.0f)));
+ *
+ * Here it is safe to directly convert the vec2 to uvec2 because the vec2
+ * has been clamped to a non-negative range.
+ */
+
+ assert(vec2_rval->type == glsl_type::vec2_type);
+
+ ir_rvalue *result = pack_uvec2_to_uint(
+ f2u(round_even(mul(saturate(vec2_rval), constant(65535.0f)))));
+
+ assert(result->type == glsl_type::uint_type);
+ return result;
+ }
+
+ /**
+ * \brief Lower a packUnorm4x8 expression.
+ *
+ * \param vec4_rval is packUnorm4x8's input
+ * \return packUnorm4x8's output as a uint rvalue
+ */
+ ir_rvalue*
+ lower_pack_unorm_4x8(ir_rvalue *vec4_rval)
+ {
+ /* From page 137 (143 of pdf) of the GLSL 4.30 spec:
+ *
+ * highp uint packUnorm4x8 (vec4 v)
+ * --------------------------------
+ * First, converts each component of the normalized floating-point value
+ * v into 8-bit integer values. Then, the results are packed into the
+ * returned 32-bit unsigned integer.
+ *
+ * The conversion for component c of v to fixed point is done as
+ * follows:
+ *
+ * packUnorm4x8: round(clamp(c, 0, +1) * 255.0)
+ *
+ * The first component of the vector will be written to the least
+ * significant bits of the output; the last component will be written to
+ * the most significant bits.
+ *
+ * This function generates IR that approximates the following pseudo-GLSL:
+ *
+ * return pack_uvec4_to_uint(uvec4(
+ * round(clamp(VEC2_RVALUE, 0.0f, 1.0f) * 255.0f)));
+ *
+ * Here it is safe to directly convert the vec4 to uvec4 because the vec4
+ * has been clamped to a non-negative range.
+ */
+
+ assert(vec4_rval->type == glsl_type::vec4_type);
+
+ ir_rvalue *result = pack_uvec4_to_uint(
+ f2u(round_even(mul(saturate(vec4_rval), constant(255.0f)))));
+
+ assert(result->type == glsl_type::uint_type);
+ return result;
+ }
+
+ /**
+ * \brief Lower an unpackUnorm2x16 expression.
+ *
+ * \param uint_rval is unpackUnorm2x16's input
+ * \return unpackUnorm2x16's output as a vec2 rvalue
+ */
+ ir_rvalue*
+ lower_unpack_unorm_2x16(ir_rvalue *uint_rval)
+ {
+ /* From page 89 (95 of pdf) of the GLSL ES 3.00 spec:
+ *
+ * highp vec2 unpackUnorm2x16 (highp uint p)
+ * -----------------------------------------
+ * First, unpacks a single 32-bit unsigned integer p into a pair of
+ * 16-bit unsigned integers. Then, each component is converted to
+ * a normalized floating-point value to generate the returned
+ * two-component vector.
+ *
+ * The conversion for unpacked fixed-point value f to floating point is
+ * done as follows:
+ *
+ * unpackUnorm2x16: f / 65535.0
+ *
+ * The first component of the returned vector will be extracted from the
+ * least significant bits of the input; the last component will be
+ * extracted from the most significant bits.
+ *
+ * This function generates IR that approximates the following pseudo-GLSL:
+ *
+ * return vec2(unpack_uint_to_uvec2(UINT_RVALUE)) / 65535.0;
+ */
+
+ assert(uint_rval->type == glsl_type::uint_type);
+
+ ir_rvalue *result = div(u2f(unpack_uint_to_uvec2(uint_rval)),
+ constant(65535.0f));
+
+ assert(result->type == glsl_type::vec2_type);
+ return result;
+ }
+
+ /**
+ * \brief Lower an unpackUnorm4x8 expression.
+ *
+ * \param uint_rval is unpackUnorm4x8's input
+ * \return unpackUnorm4x8's output as a vec4 rvalue
+ */
+ ir_rvalue*
+ lower_unpack_unorm_4x8(ir_rvalue *uint_rval)
+ {
+ /* From page 137 (143 of pdf) of the GLSL 4.30 spec:
+ *
+ * highp vec4 unpackUnorm4x8 (highp uint p)
+ * ----------------------------------------
+ * First, unpacks a single 32-bit unsigned integer p into four
+ * 8-bit unsigned integers. Then, each component is converted to
+ * a normalized floating-point value to generate the returned
+ * two-component vector.
+ *
+ * The conversion for unpacked fixed-point value f to floating point is
+ * done as follows:
+ *
+ * unpackUnorm4x8: f / 255.0
+ *
+ * The first component of the returned vector will be extracted from the
+ * least significant bits of the input; the last component will be
+ * extracted from the most significant bits.
+ *
+ * This function generates IR that approximates the following pseudo-GLSL:
+ *
+ * return vec4(unpack_uint_to_uvec4(UINT_RVALUE)) / 255.0;
+ */
+
+ assert(uint_rval->type == glsl_type::uint_type);
+
+ ir_rvalue *result = div(u2f(unpack_uint_to_uvec4(uint_rval)),
+ constant(255.0f));
+
+ assert(result->type == glsl_type::vec4_type);
+ return result;
+ }
+
+ /**
+ * \brief Lower the component-wise calculation of packHalf2x16.
+ *
+ * \param f_rval is one component of packHafl2x16's input
+ * \param e_rval is the unshifted exponent bits of f_rval
+ * \param m_rval is the unshifted mantissa bits of f_rval
+ *
+ * \return a uint rvalue that encodes a float16 in its lower 16 bits
+ */
+ ir_rvalue*
+ pack_half_1x16_nosign(ir_rvalue *f_rval,
+ ir_rvalue *e_rval,
+ ir_rvalue *m_rval)
+ {
+ assert(e_rval->type == glsl_type::uint_type);
+ assert(m_rval->type == glsl_type::uint_type);
+
+ /* uint u16; */
+ ir_variable *u16 = factory.make_temp(glsl_type::uint_type,
+ "tmp_pack_half_1x16_u16");
+
+ /* float f = FLOAT_RVAL; */
+ ir_variable *f = factory.make_temp(glsl_type::float_type,
+ "tmp_pack_half_1x16_f");
+ factory.emit(assign(f, f_rval));
+
+ /* uint e = E_RVAL; */
+ ir_variable *e = factory.make_temp(glsl_type::uint_type,
+ "tmp_pack_half_1x16_e");
+ factory.emit(assign(e, e_rval));
+
+ /* uint m = M_RVAL; */
+ ir_variable *m = factory.make_temp(glsl_type::uint_type,
+ "tmp_pack_half_1x16_m");
+ factory.emit(assign(m, m_rval));
+
+ /* Preliminaries
+ * -------------
+ *
+ * For a float16, the bit layout is:
+ *
+ * sign: 15
+ * exponent: 10:14
+ * mantissa: 0:9
+ *
+ * Let f16 be a float16 value. The sign, exponent, and mantissa
+ * determine its value thus:
+ *
+ * if e16 = 0 and m16 = 0, then zero: (-1)^s16 * 0 (1)
+ * if e16 = 0 and m16!= 0, then subnormal: (-1)^s16 * 2^(e16 - 14) * (m16 / 2^10) (2)
+ * if 0 < e16 < 31, then normal: (-1)^s16 * 2^(e16 - 15) * (1 + m16 / 2^10) (3)
+ * if e16 = 31 and m16 = 0, then infinite: (-1)^s16 * inf (4)
+ * if e16 = 31 and m16 != 0, then NaN (5)
+ *
+ * where 0 <= m16 < 2^10.
+ *
+ * For a float32, the bit layout is:
+ *
+ * sign: 31
+ * exponent: 23:30
+ * mantissa: 0:22
+ *
+ * Let f32 be a float32 value. The sign, exponent, and mantissa
+ * determine its value thus:
+ *
+ * if e32 = 0 and m32 = 0, then zero: (-1)^s * 0 (10)
+ * if e32 = 0 and m32 != 0, then subnormal: (-1)^s * 2^(e32 - 126) * (m32 / 2^23) (11)
+ * if 0 < e32 < 255, then normal: (-1)^s * 2^(e32 - 127) * (1 + m32 / 2^23) (12)
+ * if e32 = 255 and m32 = 0, then infinite: (-1)^s * inf (13)
+ * if e32 = 255 and m32 != 0, then NaN (14)
+ *
+ * where 0 <= m32 < 2^23.
+ *
+ * The minimum and maximum normal float16 values are
+ *
+ * min_norm16 = 2^(1 - 15) * (1 + 0 / 2^10) = 2^(-14) (20)
+ * max_norm16 = 2^(30 - 15) * (1 + 1023 / 2^10) (21)
+ *
+ * The step at max_norm16 is
+ *
+ * max_step16 = 2^5 (22)
+ *
+ * Observe that the float16 boundary values in equations 20-21 lie in the
+ * range of normal float32 values.
+ *
+ *
+ * Rounding Behavior
+ * -----------------
+ * Not all float32 values can be exactly represented as a float16. We
+ * round all such intermediate float32 values to the nearest float16; if
+ * the float32 is exactly between to float16 values, we round to the one
+ * with an even mantissa. This rounding behavior has several benefits:
+ *
+ * - It has no sign bias.
+ *
+ * - It reproduces the behavior of real hardware: opcode F32TO16 in Intel's
+ * GPU ISA.
+ *
+ * - By reproducing the behavior of the GPU (at least on Intel hardware),
+ * compile-time evaluation of constant packHalf2x16 GLSL expressions will
+ * result in the same value as if the expression were executed on the
+ * GPU.
+ *
+ * Calculation
+ * -----------
+ * Our task is to compute s16, e16, m16 given f32. Since this function
+ * ignores the sign bit, assume that s32 = s16 = 0. There are several
+ * cases consider.
+ */
+
+ factory.emit(
+
+ /* Case 1) f32 is NaN
+ *
+ * The resultant f16 will also be NaN.
+ */
+
+ /* if (e32 == 255 && m32 != 0) { */
+ if_tree(logic_and(equal(e, constant(0xffu << 23u)),
+ logic_not(equal(m, constant(0u)))),
+
+ assign(u16, constant(0x7fffu)),
+
+ /* Case 2) f32 lies in the range [0, min_norm16).
+ *
+ * The resultant float16 will be either zero, subnormal, or normal.
+ *
+ * Solving
+ *
+ * f32 = min_norm16 (30)
+ *
+ * gives
+ *
+ * e32 = 113 and m32 = 0 (31)
+ *
+ * Therefore this case occurs if and only if
+ *
+ * e32 < 113 (32)
+ */
+
+ /* } else if (e32 < 113) { */
+ if_tree(less(e, constant(113u << 23u)),
+
+ /* u16 = uint(round_to_even(abs(f32) * float(1u << 24u))); */
+ assign(u16, f2u(round_even(mul(expr(ir_unop_abs, f),
+ constant((float) (1 << 24)))))),
+
+ /* Case 3) f32 lies in the range
+ * [min_norm16, max_norm16 + max_step16).
+ *
+ * The resultant float16 will be either normal or infinite.
+ *
+ * Solving
+ *
+ * f32 = max_norm16 + max_step16 (40)
+ * = 2^15 * (1 + 1023 / 2^10) + 2^5 (41)
+ * = 2^16 (42)
+ * gives
+ *
+ * e32 = 143 and m32 = 0 (43)
+ *
+ * We already solved the boundary condition f32 = min_norm16 above
+ * in equation 31. Therefore this case occurs if and only if
+ *
+ * 113 <= e32 and e32 < 143
+ */
+
+ /* } else if (e32 < 143) { */
+ if_tree(less(e, constant(143u << 23u)),
+
+ /* The addition below handles the case where the mantissa rounds
+ * up to 1024 and bumps the exponent.
+ *
+ * u16 = ((e - (112u << 23u)) >> 13u)
+ * + round_to_even((float(m) / (1u << 13u));
+ */
+ assign(u16, add(rshift(sub(e, constant(112u << 23u)),
+ constant(13u)),
+ f2u(round_even(
+ div(u2f(m), constant((float) (1 << 13))))))),
+
+ /* Case 4) f32 lies in the range [max_norm16 + max_step16, inf].
+ *
+ * The resultant float16 will be infinite.
+ *
+ * The cases above caught all float32 values in the range
+ * [0, max_norm16 + max_step16), so this is the fall-through case.
+ */
+
+ /* } else { */
+
+ assign(u16, constant(31u << 10u))))));
+
+ /* } */
+
+ return deref(u16).val;
+ }
+
+ /**
+ * \brief Lower a packHalf2x16 expression.
+ *
+ * \param vec2_rval is packHalf2x16's input
+ * \return packHalf2x16's output as a uint rvalue
+ */
+ ir_rvalue*
+ lower_pack_half_2x16(ir_rvalue *vec2_rval)
+ {
+ /* From page 89 (95 of pdf) of the GLSL ES 3.00 spec:
+ *
+ * highp uint packHalf2x16 (mediump vec2 v)
+ * ----------------------------------------
+ * Returns an unsigned integer obtained by converting the components of
+ * a two-component floating-point vector to the 16-bit floating-point
+ * representation found in the OpenGL ES Specification, and then packing
+ * these two 16-bit integers into a 32-bit unsigned integer.
+ *
+ * The first vector component specifies the 16 least- significant bits
+ * of the result; the second component specifies the 16 most-significant
+ * bits.
+ */
+
+ assert(vec2_rval->type == glsl_type::vec2_type);
+
+ /* vec2 f = VEC2_RVAL; */
+ ir_variable *f = factory.make_temp(glsl_type::vec2_type,
+ "tmp_pack_half_2x16_f");
+ factory.emit(assign(f, vec2_rval));
+
+ /* uvec2 f32 = bitcast_f2u(f); */
+ ir_variable *f32 = factory.make_temp(glsl_type::uvec2_type,
+ "tmp_pack_half_2x16_f32");
+ factory.emit(assign(f32, expr(ir_unop_bitcast_f2u, f)));
+
+ /* uvec2 f16; */
+ ir_variable *f16 = factory.make_temp(glsl_type::uvec2_type,
+ "tmp_pack_half_2x16_f16");
+
+ /* Get f32's unshifted exponent bits.
+ *
+ * uvec2 e = f32 & 0x7f800000u;
+ */
+ ir_variable *e = factory.make_temp(glsl_type::uvec2_type,
+ "tmp_pack_half_2x16_e");
+ factory.emit(assign(e, bit_and(f32, constant(0x7f800000u))));
+
+ /* Get f32's unshifted mantissa bits.
+ *
+ * uvec2 m = f32 & 0x007fffffu;
+ */
+ ir_variable *m = factory.make_temp(glsl_type::uvec2_type,
+ "tmp_pack_half_2x16_m");
+ factory.emit(assign(m, bit_and(f32, constant(0x007fffffu))));
+
+ /* Set f16's exponent and mantissa bits.
+ *
+ * f16.x = pack_half_1x16_nosign(e.x, m.x);
+ * f16.y = pack_half_1y16_nosign(e.y, m.y);
+ */
+ factory.emit(assign(f16, pack_half_1x16_nosign(swizzle_x(f),
+ swizzle_x(e),
+ swizzle_x(m)),
+ WRITEMASK_X));
+ factory.emit(assign(f16, pack_half_1x16_nosign(swizzle_y(f),
+ swizzle_y(e),
+ swizzle_y(m)),
+ WRITEMASK_Y));
+
+ /* Set f16's sign bits.
+ *
+ * f16 |= (f32 & (1u << 31u) >> 16u;
+ */
+ factory.emit(
+ assign(f16, bit_or(f16,
+ rshift(bit_and(f32, constant(1u << 31u)),
+ constant(16u)))));
+
+
+ /* return (f16.y << 16u) | f16.x; */
+ ir_rvalue *result = bit_or(lshift(swizzle_y(f16),
+ constant(16u)),
+ swizzle_x(f16));
+
+ assert(result->type == glsl_type::uint_type);
+ return result;
+ }
+
+ /**
+ * \brief Lower the component-wise calculation of unpackHalf2x16.
+ *
+ * Given a uint that encodes a float16 in its lower 16 bits, this function
+ * returns a uint that encodes a float32 with the same value. The sign bit
+ * of the float16 is ignored.
+ *
+ * \param e_rval is the unshifted exponent bits of a float16
+ * \param m_rval is the unshifted mantissa bits of a float16
+ * \param a uint rvalue that encodes a float32
+ */
+ ir_rvalue*
+ unpack_half_1x16_nosign(ir_rvalue *e_rval, ir_rvalue *m_rval)
+ {
+ assert(e_rval->type == glsl_type::uint_type);
+ assert(m_rval->type == glsl_type::uint_type);
+
+ /* uint u32; */
+ ir_variable *u32 = factory.make_temp(glsl_type::uint_type,
+ "tmp_unpack_half_1x16_u32");
+
+ /* uint e = E_RVAL; */
+ ir_variable *e = factory.make_temp(glsl_type::uint_type,
+ "tmp_unpack_half_1x16_e");
+ factory.emit(assign(e, e_rval));
+
+ /* uint m = M_RVAL; */
+ ir_variable *m = factory.make_temp(glsl_type::uint_type,
+ "tmp_unpack_half_1x16_m");
+ factory.emit(assign(m, m_rval));
+
+ /* Preliminaries
+ * -------------
+ *
+ * For a float16, the bit layout is:
+ *
+ * sign: 15
+ * exponent: 10:14
+ * mantissa: 0:9
+ *
+ * Let f16 be a float16 value. The sign, exponent, and mantissa
+ * determine its value thus:
+ *
+ * if e16 = 0 and m16 = 0, then zero: (-1)^s16 * 0 (1)
+ * if e16 = 0 and m16!= 0, then subnormal: (-1)^s16 * 2^(e16 - 14) * (m16 / 2^10) (2)
+ * if 0 < e16 < 31, then normal: (-1)^s16 * 2^(e16 - 15) * (1 + m16 / 2^10) (3)
+ * if e16 = 31 and m16 = 0, then infinite: (-1)^s16 * inf (4)
+ * if e16 = 31 and m16 != 0, then NaN (5)
+ *
+ * where 0 <= m16 < 2^10.
+ *
+ * For a float32, the bit layout is:
+ *
+ * sign: 31
+ * exponent: 23:30
+ * mantissa: 0:22
+ *
+ * Let f32 be a float32 value. The sign, exponent, and mantissa
+ * determine its value thus:
+ *
+ * if e32 = 0 and m32 = 0, then zero: (-1)^s * 0 (10)
+ * if e32 = 0 and m32 != 0, then subnormal: (-1)^s * 2^(e32 - 126) * (m32 / 2^23) (11)
+ * if 0 < e32 < 255, then normal: (-1)^s * 2^(e32 - 127) * (1 + m32 / 2^23) (12)
+ * if e32 = 255 and m32 = 0, then infinite: (-1)^s * inf (13)
+ * if e32 = 255 and m32 != 0, then NaN (14)
+ *
+ * where 0 <= m32 < 2^23.
+ *
+ * Calculation
+ * -----------
+ * Our task is to compute s32, e32, m32 given f16. Since this function
+ * ignores the sign bit, assume that s32 = s16 = 0. There are several
+ * cases consider.
+ */
+
+ factory.emit(
+
+ /* Case 1) f16 is zero or subnormal.
+ *
+ * The simplest method of calcuating f32 in this case is
+ *
+ * f32 = f16 (20)
+ * = 2^(-14) * (m16 / 2^10) (21)
+ * = m16 / 2^(-24) (22)
+ */
+
+ /* if (e16 == 0) { */
+ if_tree(equal(e, constant(0u)),
+
+ /* u32 = bitcast_f2u(float(m) / float(1 << 24)); */
+ assign(u32, expr(ir_unop_bitcast_f2u,
+ div(u2f(m), constant((float)(1 << 24))))),
+
+ /* Case 2) f16 is normal.
+ *
+ * The equation
+ *
+ * f32 = f16 (30)
+ * 2^(e32 - 127) * (1 + m32 / 2^23) = (31)
+ * 2^(e16 - 15) * (1 + m16 / 2^10)
+ *
+ * can be decomposed into two
+ *
+ * 2^(e32 - 127) = 2^(e16 - 15) (32)
+ * 1 + m32 / 2^23 = 1 + m16 / 2^10 (33)
+ *
+ * which solve to
+ *
+ * e32 = e16 + 112 (34)
+ * m32 = m16 * 2^13 (35)
+ */
+
+ /* } else if (e16 < 31)) { */
+ if_tree(less(e, constant(31u << 10u)),
+
+ /* u32 = ((e + (112 << 10)) | m) << 13;
+ */
+ assign(u32, lshift(bit_or(add(e, constant(112u << 10u)), m),
+ constant(13u))),
+
+
+ /* Case 3) f16 is infinite. */
+ if_tree(equal(m, constant(0u)),
+
+ assign(u32, constant(255u << 23u)),
+
+ /* Case 4) f16 is NaN. */
+ /* } else { */
+
+ assign(u32, constant(0x7fffffffu))))));
+
+ /* } */
+
+ return deref(u32).val;
+ }
+
+ /**
+ * \brief Lower an unpackHalf2x16 expression.
+ *
+ * \param uint_rval is unpackHalf2x16's input
+ * \return unpackHalf2x16's output as a vec2 rvalue
+ */
+ ir_rvalue*
+ lower_unpack_half_2x16(ir_rvalue *uint_rval)
+ {
+ /* From page 89 (95 of pdf) of the GLSL ES 3.00 spec:
+ *
+ * mediump vec2 unpackHalf2x16 (highp uint v)
+ * ------------------------------------------
+ * Returns a two-component floating-point vector with components
+ * obtained by unpacking a 32-bit unsigned integer into a pair of 16-bit
+ * values, interpreting those values as 16-bit floating-point numbers
+ * according to the OpenGL ES Specification, and converting them to
+ * 32-bit floating-point values.
+ *
+ * The first component of the vector is obtained from the
+ * 16 least-significant bits of v; the second component is obtained
+ * from the 16 most-significant bits of v.
+ */
+ assert(uint_rval->type == glsl_type::uint_type);
+
+ /* uint u = RVALUE;
+ * uvec2 f16 = uvec2(u.x & 0xffff, u.y >> 16);
+ */
+ ir_variable *f16 = factory.make_temp(glsl_type::uvec2_type,
+ "tmp_unpack_half_2x16_f16");
+ factory.emit(assign(f16, unpack_uint_to_uvec2(uint_rval)));
+
+ /* uvec2 f32; */
+ ir_variable *f32 = factory.make_temp(glsl_type::uvec2_type,
+ "tmp_unpack_half_2x16_f32");
+
+ /* Get f16's unshifted exponent bits.
+ *
+ * uvec2 e = f16 & 0x7c00u;
+ */
+ ir_variable *e = factory.make_temp(glsl_type::uvec2_type,
+ "tmp_unpack_half_2x16_e");
+ factory.emit(assign(e, bit_and(f16, constant(0x7c00u))));
+
+ /* Get f16's unshifted mantissa bits.
+ *
+ * uvec2 m = f16 & 0x03ffu;
+ */
+ ir_variable *m = factory.make_temp(glsl_type::uvec2_type,
+ "tmp_unpack_half_2x16_m");
+ factory.emit(assign(m, bit_and(f16, constant(0x03ffu))));
+
+ /* Set f32's exponent and mantissa bits.
+ *
+ * f32.x = unpack_half_1x16_nosign(e.x, m.x);
+ * f32.y = unpack_half_1x16_nosign(e.y, m.y);
+ */
+ factory.emit(assign(f32, unpack_half_1x16_nosign(swizzle_x(e),
+ swizzle_x(m)),
+ WRITEMASK_X));
+ factory.emit(assign(f32, unpack_half_1x16_nosign(swizzle_y(e),
+ swizzle_y(m)),
+ WRITEMASK_Y));
+
+ /* Set f32's sign bit.
+ *
+ * f32 |= (f16 & 0x8000u) << 16u;
+ */
+ factory.emit(assign(f32, bit_or(f32,
+ lshift(bit_and(f16,
+ constant(0x8000u)),
+ constant(16u)))));
+
+ /* return bitcast_u2f(f32); */
+ ir_rvalue *result = expr(ir_unop_bitcast_u2f, f32);
+ assert(result->type == glsl_type::vec2_type);
+ return result;
+ }
+};
+
+} // namespace anonymous
+
+/**
+ * \brief Lower the builtin packing functions.
+ *
+ * \param op_mask is a bitmask of `enum lower_packing_builtins_op`.
+ */
+bool
+lower_packing_builtins(exec_list *instructions, int op_mask)
+{
+ lower_packing_builtins_visitor v(op_mask);
+ visit_list_elements(&v, instructions, true);
+ return v.get_progress();
+}
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/lower_precision.cpp b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/lower_precision.cpp
new file mode 100644
index 0000000000..332cd50cc7
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/lower_precision.cpp
@@ -0,0 +1,721 @@
+/*
+ * Copyright © 2019 Google, Inc
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+/**
+ * \file lower_precision.cpp
+ */
+
+#include "main/macros.h"
+#include "compiler/glsl_types.h"
+#include "ir.h"
+#include "ir_builder.h"
+#include "ir_optimization.h"
+#include "ir_rvalue_visitor.h"
+#include "util/half_float.h"
+#include "util/set.h"
+#include "util/hash_table.h"
+#include <vector>
+
+namespace {
+
+class find_precision_visitor : public ir_rvalue_enter_visitor {
+public:
+ find_precision_visitor();
+ ~find_precision_visitor();
+
+ virtual void handle_rvalue(ir_rvalue **rvalue);
+ virtual ir_visitor_status visit_enter(ir_call *ir);
+
+ ir_function_signature *map_builtin(ir_function_signature *sig);
+
+ bool progress;
+
+ /* Set of rvalues that can be lowered. This will be filled in by
+ * find_lowerable_rvalues_visitor. Only the root node of a lowerable section
+ * will be added to this set.
+ */
+ struct set *lowerable_rvalues;
+
+ /**
+ * A mapping of builtin signature functions to lowered versions. This is
+ * filled in lazily when a lowered version is needed.
+ */
+ struct hash_table *lowered_builtins;
+ /**
+ * A temporary hash table only used in order to clone functions.
+ */
+ struct hash_table *clone_ht;
+
+ void *lowered_builtin_mem_ctx;
+};
+
+class find_lowerable_rvalues_visitor : public ir_hierarchical_visitor {
+public:
+ enum can_lower_state {
+ UNKNOWN,
+ CANT_LOWER,
+ SHOULD_LOWER,
+ };
+
+ enum parent_relation {
+ /* The parent performs a further operation involving the result from the
+ * child and can be lowered along with it.
+ */
+ COMBINED_OPERATION,
+ /* The parent instruction’s operation is independent of the child type so
+ * the child should be lowered separately.
+ */
+ INDEPENDENT_OPERATION,
+ };
+
+ struct stack_entry {
+ ir_instruction *instr;
+ enum can_lower_state state;
+ /* List of child rvalues that can be lowered. When this stack entry is
+ * popped, if this node itself can’t be lowered than all of the children
+ * are root nodes to lower so we will add them to lowerable_rvalues.
+ * Otherwise if this node can also be lowered then we won’t add the
+ * children because we only want to add the topmost lowerable nodes to
+ * lowerable_rvalues and the children will be lowered as part of lowering
+ * this node.
+ */
+ std::vector<ir_instruction *> lowerable_children;
+ };
+
+ find_lowerable_rvalues_visitor(struct set *result);
+
+ static void stack_enter(class ir_instruction *ir, void *data);
+ static void stack_leave(class ir_instruction *ir, void *data);
+
+ virtual ir_visitor_status visit(ir_constant *ir);
+ virtual ir_visitor_status visit(ir_dereference_variable *ir);
+
+ virtual ir_visitor_status visit_enter(ir_dereference_record *ir);
+ virtual ir_visitor_status visit_enter(ir_dereference_array *ir);
+ virtual ir_visitor_status visit_enter(ir_texture *ir);
+ virtual ir_visitor_status visit_enter(ir_expression *ir);
+
+ virtual ir_visitor_status visit_leave(ir_assignment *ir);
+ virtual ir_visitor_status visit_leave(ir_call *ir);
+
+ static can_lower_state handle_precision(const glsl_type *type,
+ int precision);
+
+ static parent_relation get_parent_relation(ir_instruction *parent,
+ ir_instruction *child);
+
+ std::vector<stack_entry> stack;
+ struct set *lowerable_rvalues;
+
+ void pop_stack_entry();
+ void add_lowerable_children(const stack_entry &entry);
+};
+
+class lower_precision_visitor : public ir_rvalue_visitor {
+public:
+ virtual void handle_rvalue(ir_rvalue **rvalue);
+ virtual ir_visitor_status visit_enter(ir_dereference_array *);
+ virtual ir_visitor_status visit_enter(ir_dereference_record *);
+ virtual ir_visitor_status visit_enter(ir_call *ir);
+ virtual ir_visitor_status visit_enter(ir_texture *ir);
+ virtual ir_visitor_status visit_leave(ir_expression *);
+};
+
+bool
+can_lower_type(const glsl_type *type)
+{
+ /* Don’t lower any expressions involving non-float types except bool and
+ * texture samplers. This will rule out operations that change the type such
+ * as conversion to ints. Instead it will end up lowering the arguments
+ * instead and adding a final conversion to float32. We want to handle
+ * boolean types so that it will do comparisons as 16-bit.
+ */
+
+ switch (type->base_type) {
+ case GLSL_TYPE_FLOAT:
+ case GLSL_TYPE_BOOL:
+ case GLSL_TYPE_SAMPLER:
+ return true;
+
+ default:
+ return false;
+ }
+}
+
+find_lowerable_rvalues_visitor::find_lowerable_rvalues_visitor(struct set *res)
+{
+ lowerable_rvalues = res;
+ callback_enter = stack_enter;
+ callback_leave = stack_leave;
+ data_enter = this;
+ data_leave = this;
+}
+
+void
+find_lowerable_rvalues_visitor::stack_enter(class ir_instruction *ir,
+ void *data)
+{
+ find_lowerable_rvalues_visitor *state =
+ (find_lowerable_rvalues_visitor *) data;
+
+ /* Add a new stack entry for this instruction */
+ stack_entry entry;
+
+ entry.instr = ir;
+ entry.state = state->in_assignee ? CANT_LOWER : UNKNOWN;
+
+ state->stack.push_back(entry);
+}
+
+void
+find_lowerable_rvalues_visitor::add_lowerable_children(const stack_entry &entry)
+{
+ /* We can’t lower this node so if there were any pending children then they
+ * are all root lowerable nodes and we should add them to the set.
+ */
+ for (auto &it : entry.lowerable_children)
+ _mesa_set_add(lowerable_rvalues, it);
+}
+
+void
+find_lowerable_rvalues_visitor::pop_stack_entry()
+{
+ const stack_entry &entry = stack.back();
+
+ if (stack.size() >= 2) {
+ /* Combine this state into the parent state, unless the parent operation
+ * doesn’t have any relation to the child operations
+ */
+ stack_entry &parent = stack.end()[-2];
+ parent_relation rel = get_parent_relation(parent.instr, entry.instr);
+
+ if (rel == COMBINED_OPERATION) {
+ switch (entry.state) {
+ case CANT_LOWER:
+ parent.state = CANT_LOWER;
+ break;
+ case SHOULD_LOWER:
+ if (parent.state == UNKNOWN)
+ parent.state = SHOULD_LOWER;
+ break;
+ case UNKNOWN:
+ break;
+ }
+ }
+ }
+
+ if (entry.state == SHOULD_LOWER) {
+ ir_rvalue *rv = entry.instr->as_rvalue();
+
+ if (rv == NULL) {
+ add_lowerable_children(entry);
+ } else if (stack.size() >= 2) {
+ stack_entry &parent = stack.end()[-2];
+
+ switch (get_parent_relation(parent.instr, rv)) {
+ case COMBINED_OPERATION:
+ /* We only want to add the toplevel lowerable instructions to the
+ * lowerable set. Therefore if there is a parent then instead of
+ * adding this instruction to the set we will queue depending on
+ * the result of the parent instruction.
+ */
+ parent.lowerable_children.push_back(entry.instr);
+ break;
+ case INDEPENDENT_OPERATION:
+ _mesa_set_add(lowerable_rvalues, rv);
+ break;
+ }
+ } else {
+ /* This is a toplevel node so add it directly to the lowerable
+ * set.
+ */
+ _mesa_set_add(lowerable_rvalues, rv);
+ }
+ } else if (entry.state == CANT_LOWER) {
+ add_lowerable_children(entry);
+ }
+
+ stack.pop_back();
+}
+
+void
+find_lowerable_rvalues_visitor::stack_leave(class ir_instruction *ir,
+ void *data)
+{
+ find_lowerable_rvalues_visitor *state =
+ (find_lowerable_rvalues_visitor *) data;
+
+ state->pop_stack_entry();
+}
+
+enum find_lowerable_rvalues_visitor::can_lower_state
+find_lowerable_rvalues_visitor::handle_precision(const glsl_type *type,
+ int precision)
+{
+ if (!can_lower_type(type))
+ return CANT_LOWER;
+
+ switch (precision) {
+ case GLSL_PRECISION_NONE:
+ return UNKNOWN;
+ case GLSL_PRECISION_HIGH:
+ return CANT_LOWER;
+ case GLSL_PRECISION_MEDIUM:
+ case GLSL_PRECISION_LOW:
+ return SHOULD_LOWER;
+ }
+
+ return CANT_LOWER;
+}
+
+enum find_lowerable_rvalues_visitor::parent_relation
+find_lowerable_rvalues_visitor::get_parent_relation(ir_instruction *parent,
+ ir_instruction *child)
+{
+ /* If the parent is a dereference instruction then the only child could be
+ * for example an array dereference and that should be lowered independently
+ * of the parent.
+ */
+ if (parent->as_dereference())
+ return INDEPENDENT_OPERATION;
+
+ /* The precision of texture sampling depend on the precision of the sampler.
+ * The rest of the arguments don’t matter so we can treat it as an
+ * independent operation.
+ */
+ if (parent->as_texture())
+ return INDEPENDENT_OPERATION;
+
+ return COMBINED_OPERATION;
+}
+
+ir_visitor_status
+find_lowerable_rvalues_visitor::visit(ir_constant *ir)
+{
+ stack_enter(ir, this);
+
+ if (!can_lower_type(ir->type))
+ stack.back().state = CANT_LOWER;
+
+ stack_leave(ir, this);
+
+ return visit_continue;
+}
+
+ir_visitor_status
+find_lowerable_rvalues_visitor::visit(ir_dereference_variable *ir)
+{
+ stack_enter(ir, this);
+
+ if (stack.back().state == UNKNOWN)
+ stack.back().state = handle_precision(ir->type, ir->precision());
+
+ stack_leave(ir, this);
+
+ return visit_continue;
+}
+
+ir_visitor_status
+find_lowerable_rvalues_visitor::visit_enter(ir_dereference_record *ir)
+{
+ ir_hierarchical_visitor::visit_enter(ir);
+
+ if (stack.back().state == UNKNOWN)
+ stack.back().state = handle_precision(ir->type, ir->precision());
+
+ return visit_continue;
+}
+
+ir_visitor_status
+find_lowerable_rvalues_visitor::visit_enter(ir_dereference_array *ir)
+{
+ ir_hierarchical_visitor::visit_enter(ir);
+
+ if (stack.back().state == UNKNOWN)
+ stack.back().state = handle_precision(ir->type, ir->precision());
+
+ return visit_continue;
+}
+
+ir_visitor_status
+find_lowerable_rvalues_visitor::visit_enter(ir_texture *ir)
+{
+ ir_hierarchical_visitor::visit_enter(ir);
+
+ if (stack.back().state == UNKNOWN) {
+ /* The precision of the sample value depends on the precision of the
+ * sampler.
+ */
+ stack.back().state = handle_precision(ir->type,
+ ir->sampler->precision());
+ }
+
+ return visit_continue;
+}
+
+ir_visitor_status
+find_lowerable_rvalues_visitor::visit_enter(ir_expression *ir)
+{
+ ir_hierarchical_visitor::visit_enter(ir);
+
+ if (!can_lower_type(ir->type))
+ stack.back().state = CANT_LOWER;
+
+ /* Don't lower precision for derivative calculations */
+ if (ir->operation == ir_unop_dFdx ||
+ ir->operation == ir_unop_dFdx_coarse ||
+ ir->operation == ir_unop_dFdx_fine ||
+ ir->operation == ir_unop_dFdy ||
+ ir->operation == ir_unop_dFdy_coarse ||
+ ir->operation == ir_unop_dFdy_fine) {
+ stack.back().state = CANT_LOWER;
+ }
+
+ return visit_continue;
+}
+
+static bool
+is_lowerable_builtin(ir_call *ir,
+ const struct set *lowerable_rvalues)
+{
+ if (!ir->callee->is_builtin())
+ return false;
+
+ assert(ir->callee->return_precision == GLSL_PRECISION_NONE);
+
+ foreach_in_list(ir_rvalue, param, &ir->actual_parameters) {
+ if (!param->as_constant() &&
+ _mesa_set_search(lowerable_rvalues, param) == NULL)
+ return false;
+ }
+
+ return true;
+}
+
+ir_visitor_status
+find_lowerable_rvalues_visitor::visit_leave(ir_call *ir)
+{
+ ir_hierarchical_visitor::visit_leave(ir);
+
+ /* Special case for handling temporary variables generated by the compiler
+ * for function calls. If we assign to one of these using a function call
+ * that has a lowerable return type then we can assume the temporary
+ * variable should have a medium precision too.
+ */
+
+ /* Do nothing if the return type is void. */
+ if (!ir->return_deref)
+ return visit_continue;
+
+ ir_variable *var = ir->return_deref->variable_referenced();
+
+ assert(var->data.mode == ir_var_temporary);
+
+ unsigned return_precision = ir->callee->return_precision;
+
+ /* If the call is to a builtin, then the function won’t have a return
+ * precision and we should determine it from the precision of the arguments.
+ */
+ if (is_lowerable_builtin(ir, lowerable_rvalues))
+ return_precision = GLSL_PRECISION_MEDIUM;
+
+ can_lower_state lower_state =
+ handle_precision(var->type, return_precision);
+
+ if (lower_state == SHOULD_LOWER) {
+ /* There probably shouldn’t be any situations where multiple ir_call
+ * instructions write to the same temporary?
+ */
+ assert(var->data.precision == GLSL_PRECISION_NONE);
+ var->data.precision = GLSL_PRECISION_MEDIUM;
+ } else {
+ var->data.precision = GLSL_PRECISION_HIGH;
+ }
+
+ return visit_continue;
+}
+
+ir_visitor_status
+find_lowerable_rvalues_visitor::visit_leave(ir_assignment *ir)
+{
+ ir_hierarchical_visitor::visit_leave(ir);
+
+ /* Special case for handling temporary variables generated by the compiler.
+ * If we assign to one of these using a lowered precision then we can assume
+ * the temporary variable should have a medium precision too.
+ */
+ ir_variable *var = ir->lhs->variable_referenced();
+
+ if (var->data.mode == ir_var_temporary) {
+ if (_mesa_set_search(lowerable_rvalues, ir->rhs)) {
+ /* Only override the precision if this is the first assignment. For
+ * temporaries such as the ones generated for the ?: operator there
+ * can be multiple assignments with different precisions. This way we
+ * get the highest precision of all of the assignments.
+ */
+ if (var->data.precision == GLSL_PRECISION_NONE)
+ var->data.precision = GLSL_PRECISION_MEDIUM;
+ } else if (!ir->rhs->as_constant()) {
+ var->data.precision = GLSL_PRECISION_HIGH;
+ }
+ }
+
+ return visit_continue;
+}
+
+void
+find_lowerable_rvalues(exec_list *instructions,
+ struct set *result)
+{
+ find_lowerable_rvalues_visitor v(result);
+
+ visit_list_elements(&v, instructions);
+
+ assert(v.stack.empty());
+}
+
+static ir_rvalue *
+convert_precision(int op, ir_rvalue *ir)
+{
+ unsigned base_type = (op == ir_unop_f2fmp ?
+ GLSL_TYPE_FLOAT16 : GLSL_TYPE_FLOAT);
+ const glsl_type *desired_type;
+ desired_type = glsl_type::get_instance(base_type,
+ ir->type->vector_elements,
+ ir->type->matrix_columns);
+
+ void *mem_ctx = ralloc_parent(ir);
+ return new(mem_ctx) ir_expression(op, desired_type, ir, NULL);
+}
+
+void
+lower_precision_visitor::handle_rvalue(ir_rvalue **rvalue)
+{
+ ir_rvalue *ir = *rvalue;
+
+ if (ir == NULL)
+ return;
+
+ if (ir->as_dereference()) {
+ if (!ir->type->is_boolean())
+ *rvalue = convert_precision(ir_unop_f2fmp, ir);
+ } else if (ir->type->is_float()) {
+ ir->type = glsl_type::get_instance(GLSL_TYPE_FLOAT16,
+ ir->type->vector_elements,
+ ir->type->matrix_columns,
+ ir->type->explicit_stride,
+ ir->type->interface_row_major);
+
+ ir_constant *const_ir = ir->as_constant();
+
+ if (const_ir) {
+ ir_constant_data value;
+
+ for (unsigned i = 0; i < ARRAY_SIZE(value.f16); i++)
+ value.f16[i] = _mesa_float_to_half(const_ir->value.f[i]);
+
+ const_ir->value = value;
+ }
+ }
+}
+
+ir_visitor_status
+lower_precision_visitor::visit_enter(ir_dereference_record *ir)
+{
+ /* We don’t want to lower the variable */
+ return visit_continue_with_parent;
+}
+
+ir_visitor_status
+lower_precision_visitor::visit_enter(ir_dereference_array *ir)
+{
+ /* We don’t want to convert the array index or the variable. If the array
+ * index itself is lowerable that will be handled separately.
+ */
+ return visit_continue_with_parent;
+}
+
+ir_visitor_status
+lower_precision_visitor::visit_enter(ir_call *ir)
+{
+ /* We don’t want to convert the arguments. These will be handled separately.
+ */
+ return visit_continue_with_parent;
+}
+
+ir_visitor_status
+lower_precision_visitor::visit_enter(ir_texture *ir)
+{
+ /* We don’t want to convert the arguments. These will be handled separately.
+ */
+ return visit_continue_with_parent;
+}
+
+ir_visitor_status
+lower_precision_visitor::visit_leave(ir_expression *ir)
+{
+ ir_rvalue_visitor::visit_leave(ir);
+
+ /* If the expression is a conversion operation to or from bool then fix the
+ * operation.
+ */
+ switch (ir->operation) {
+ case ir_unop_b2f:
+ ir->operation = ir_unop_b2f16;
+ break;
+ case ir_unop_f2b:
+ ir->operation = ir_unop_f162b;
+ break;
+ default:
+ break;
+ }
+
+ return visit_continue;
+}
+
+void
+find_precision_visitor::handle_rvalue(ir_rvalue **rvalue)
+{
+ /* Checking the precision of rvalue can be lowered first throughout
+ * find_lowerable_rvalues_visitor.
+ * Once it found the precision of rvalue can be lowered, then we can
+ * add conversion f2fmp through lower_precision_visitor.
+ */
+ if (*rvalue == NULL)
+ return;
+
+ struct set_entry *entry = _mesa_set_search(lowerable_rvalues, *rvalue);
+
+ if (!entry)
+ return;
+
+ _mesa_set_remove(lowerable_rvalues, entry);
+
+ /* If the entire expression is just a variable dereference then trying to
+ * lower it will just directly add pointless to and from conversions without
+ * any actual operation in-between. Although these will eventually get
+ * optimised out, avoiding generating them here also avoids breaking inout
+ * parameters to functions.
+ */
+ if ((*rvalue)->as_dereference())
+ return;
+
+ lower_precision_visitor v;
+
+ (*rvalue)->accept(&v);
+ v.handle_rvalue(rvalue);
+
+ /* We don’t need to add the final conversion if the final type has been
+ * converted to bool
+ */
+ if ((*rvalue)->type->base_type != GLSL_TYPE_BOOL)
+ *rvalue = convert_precision(ir_unop_f162f, *rvalue);
+
+ progress = true;
+}
+
+ir_visitor_status
+find_precision_visitor::visit_enter(ir_call *ir)
+{
+ ir_rvalue_enter_visitor::visit_enter(ir);
+
+ /* If this is a call to a builtin and the find_lowerable_rvalues_visitor
+ * overrode the precision of the temporary return variable, then we can
+ * replace the builtin implementation with a lowered version.
+ */
+
+ if (!ir->callee->is_builtin() ||
+ ir->return_deref == NULL ||
+ ir->return_deref->variable_referenced()->data.precision !=
+ GLSL_PRECISION_MEDIUM)
+ return visit_continue;
+
+ ir->callee = map_builtin(ir->callee);
+ ir->generate_inline(ir);
+ ir->remove();
+
+ return visit_continue_with_parent;
+}
+
+ir_function_signature *
+find_precision_visitor::map_builtin(ir_function_signature *sig)
+{
+ if (lowered_builtins == NULL) {
+ lowered_builtins = _mesa_pointer_hash_table_create(NULL);
+ clone_ht =_mesa_pointer_hash_table_create(NULL);
+ lowered_builtin_mem_ctx = ralloc_context(NULL);
+ } else {
+ struct hash_entry *entry = _mesa_hash_table_search(lowered_builtins, sig);
+ if (entry)
+ return (ir_function_signature *) entry->data;
+ }
+
+ ir_function_signature *lowered_sig =
+ sig->clone(lowered_builtin_mem_ctx, clone_ht);
+
+ foreach_in_list(ir_variable, param, &lowered_sig->parameters) {
+ param->data.precision = GLSL_PRECISION_MEDIUM;
+ }
+
+ lower_precision(&lowered_sig->body);
+
+ _mesa_hash_table_clear(clone_ht, NULL);
+
+ _mesa_hash_table_insert(lowered_builtins, sig, lowered_sig);
+
+ return lowered_sig;
+}
+
+find_precision_visitor::find_precision_visitor()
+ : progress(false),
+ lowerable_rvalues(_mesa_pointer_set_create(NULL)),
+ lowered_builtins(NULL),
+ clone_ht(NULL),
+ lowered_builtin_mem_ctx(NULL)
+{
+}
+
+find_precision_visitor::~find_precision_visitor()
+{
+ _mesa_set_destroy(lowerable_rvalues, NULL);
+
+ if (lowered_builtins) {
+ _mesa_hash_table_destroy(lowered_builtins, NULL);
+ _mesa_hash_table_destroy(clone_ht, NULL);
+ ralloc_free(lowered_builtin_mem_ctx);
+ }
+}
+
+}
+
+bool
+lower_precision(exec_list *instructions)
+{
+ find_precision_visitor v;
+
+ find_lowerable_rvalues(instructions, v.lowerable_rvalues);
+
+ visit_list_elements(&v, instructions);
+
+ return v.progress;
+}
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/lower_shared_reference.cpp b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/lower_shared_reference.cpp
new file mode 100644
index 0000000000..fb6af0c088
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/lower_shared_reference.cpp
@@ -0,0 +1,517 @@
+/*
+ * Copyright (c) 2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+/**
+ * \file lower_shared_reference.cpp
+ *
+ * IR lower pass to replace dereferences of compute shader shared variables
+ * with intrinsic function calls.
+ *
+ * This relieves drivers of the responsibility of allocating space for the
+ * shared variables in the shared memory region.
+ */
+
+#include "lower_buffer_access.h"
+#include "ir_builder.h"
+#include "linker.h"
+#include "main/macros.h"
+#include "util/list.h"
+#include "glsl_parser_extras.h"
+#include "main/mtypes.h"
+
+using namespace ir_builder;
+
+namespace {
+
+struct var_offset {
+ struct list_head node;
+ const ir_variable *var;
+ unsigned offset;
+};
+
+class lower_shared_reference_visitor :
+ public lower_buffer_access::lower_buffer_access {
+public:
+
+ lower_shared_reference_visitor(struct gl_linked_shader *shader)
+ : list_ctx(ralloc_context(NULL)), shader(shader), shared_size(0u)
+ {
+ list_inithead(&var_offsets);
+ }
+
+ ~lower_shared_reference_visitor()
+ {
+ ralloc_free(list_ctx);
+ }
+
+ enum {
+ shared_load_access,
+ shared_store_access,
+ shared_atomic_access,
+ } buffer_access_type;
+
+ void insert_buffer_access(void *mem_ctx, ir_dereference *deref,
+ const glsl_type *type, ir_rvalue *offset,
+ unsigned mask, int channel);
+
+ void handle_rvalue(ir_rvalue **rvalue);
+ ir_visitor_status visit_enter(ir_assignment *ir);
+ void handle_assignment(ir_assignment *ir);
+
+ ir_call *lower_shared_atomic_intrinsic(ir_call *ir);
+ ir_call *check_for_shared_atomic_intrinsic(ir_call *ir);
+ ir_visitor_status visit_enter(ir_call *ir);
+
+ unsigned get_shared_offset(const ir_variable *);
+
+ ir_call *shared_load(void *mem_ctx, const struct glsl_type *type,
+ ir_rvalue *offset);
+ ir_call *shared_store(void *mem_ctx, ir_rvalue *deref, ir_rvalue *offset,
+ unsigned write_mask);
+
+ void *list_ctx;
+ struct gl_linked_shader *shader;
+ struct list_head var_offsets;
+ unsigned shared_size;
+ bool progress;
+};
+
+unsigned
+lower_shared_reference_visitor::get_shared_offset(const ir_variable *var)
+{
+ list_for_each_entry(var_offset, var_entry, &var_offsets, node) {
+ if (var_entry->var == var)
+ return var_entry->offset;
+ }
+
+ struct var_offset *new_entry = rzalloc(list_ctx, struct var_offset);
+ list_add(&new_entry->node, &var_offsets);
+ new_entry->var = var;
+
+ unsigned var_align = var->type->std430_base_alignment(false);
+ new_entry->offset = glsl_align(shared_size, var_align);
+
+ unsigned var_size = var->type->std430_size(false);
+ shared_size = new_entry->offset + var_size;
+
+ return new_entry->offset;
+}
+
+void
+lower_shared_reference_visitor::handle_rvalue(ir_rvalue **rvalue)
+{
+ if (!*rvalue)
+ return;
+
+ ir_dereference *deref = (*rvalue)->as_dereference();
+ if (!deref)
+ return;
+
+ ir_variable *var = deref->variable_referenced();
+ if (!var || var->data.mode != ir_var_shader_shared)
+ return;
+
+ buffer_access_type = shared_load_access;
+
+ void *mem_ctx = ralloc_parent(shader->ir);
+
+ ir_rvalue *offset = NULL;
+ unsigned const_offset = get_shared_offset(var);
+ bool row_major;
+ const glsl_type *matrix_type;
+ assert(var->get_interface_type() == NULL);
+ const enum glsl_interface_packing packing = GLSL_INTERFACE_PACKING_STD430;
+
+ setup_buffer_access(mem_ctx, deref,
+ &offset, &const_offset,
+ &row_major, &matrix_type, NULL, packing);
+
+ /* Now that we've calculated the offset to the start of the
+ * dereference, walk over the type and emit loads into a temporary.
+ */
+ const glsl_type *type = (*rvalue)->type;
+ ir_variable *load_var = new(mem_ctx) ir_variable(type,
+ "shared_load_temp",
+ ir_var_temporary);
+ base_ir->insert_before(load_var);
+
+ ir_variable *load_offset = new(mem_ctx) ir_variable(glsl_type::uint_type,
+ "shared_load_temp_offset",
+ ir_var_temporary);
+ base_ir->insert_before(load_offset);
+ base_ir->insert_before(assign(load_offset, offset));
+
+ deref = new(mem_ctx) ir_dereference_variable(load_var);
+
+ emit_access(mem_ctx, false, deref, load_offset, const_offset, row_major,
+ matrix_type, packing, 0);
+
+ *rvalue = deref;
+
+ progress = true;
+}
+
+void
+lower_shared_reference_visitor::handle_assignment(ir_assignment *ir)
+{
+ if (!ir || !ir->lhs)
+ return;
+
+ ir_rvalue *rvalue = ir->lhs->as_rvalue();
+ if (!rvalue)
+ return;
+
+ ir_dereference *deref = ir->lhs->as_dereference();
+ if (!deref)
+ return;
+
+ ir_variable *var = ir->lhs->variable_referenced();
+ if (!var || var->data.mode != ir_var_shader_shared)
+ return;
+
+ buffer_access_type = shared_store_access;
+
+ /* We have a write to a shared variable, so declare a temporary and rewrite
+ * the assignment so that the temporary is the LHS.
+ */
+ void *mem_ctx = ralloc_parent(shader->ir);
+
+ const glsl_type *type = rvalue->type;
+ ir_variable *store_var = new(mem_ctx) ir_variable(type,
+ "shared_store_temp",
+ ir_var_temporary);
+ base_ir->insert_before(store_var);
+ ir->lhs = new(mem_ctx) ir_dereference_variable(store_var);
+
+ ir_rvalue *offset = NULL;
+ unsigned const_offset = get_shared_offset(var);
+ bool row_major;
+ const glsl_type *matrix_type;
+ assert(var->get_interface_type() == NULL);
+ const enum glsl_interface_packing packing = GLSL_INTERFACE_PACKING_STD430;
+
+ setup_buffer_access(mem_ctx, deref,
+ &offset, &const_offset,
+ &row_major, &matrix_type, NULL, packing);
+
+ deref = new(mem_ctx) ir_dereference_variable(store_var);
+
+ ir_variable *store_offset = new(mem_ctx) ir_variable(glsl_type::uint_type,
+ "shared_store_temp_offset",
+ ir_var_temporary);
+ base_ir->insert_before(store_offset);
+ base_ir->insert_before(assign(store_offset, offset));
+
+ /* Now we have to write the value assigned to the temporary back to memory */
+ emit_access(mem_ctx, true, deref, store_offset, const_offset, row_major,
+ matrix_type, packing, ir->write_mask);
+
+ progress = true;
+}
+
+ir_visitor_status
+lower_shared_reference_visitor::visit_enter(ir_assignment *ir)
+{
+ handle_assignment(ir);
+ return rvalue_visit(ir);
+}
+
+void
+lower_shared_reference_visitor::insert_buffer_access(void *mem_ctx,
+ ir_dereference *deref,
+ const glsl_type *type,
+ ir_rvalue *offset,
+ unsigned mask,
+ int /* channel */)
+{
+ if (buffer_access_type == shared_store_access) {
+ ir_call *store = shared_store(mem_ctx, deref, offset, mask);
+ base_ir->insert_after(store);
+ } else {
+ ir_call *load = shared_load(mem_ctx, type, offset);
+ base_ir->insert_before(load);
+ ir_rvalue *value = load->return_deref->as_rvalue()->clone(mem_ctx, NULL);
+ base_ir->insert_before(assign(deref->clone(mem_ctx, NULL),
+ value));
+ }
+}
+
+static bool
+compute_shader_enabled(const _mesa_glsl_parse_state *state)
+{
+ return state->stage == MESA_SHADER_COMPUTE;
+}
+
+ir_call *
+lower_shared_reference_visitor::shared_store(void *mem_ctx,
+ ir_rvalue *deref,
+ ir_rvalue *offset,
+ unsigned write_mask)
+{
+ exec_list sig_params;
+
+ ir_variable *offset_ref = new(mem_ctx)
+ ir_variable(glsl_type::uint_type, "offset" , ir_var_function_in);
+ sig_params.push_tail(offset_ref);
+
+ ir_variable *val_ref = new(mem_ctx)
+ ir_variable(deref->type, "value" , ir_var_function_in);
+ sig_params.push_tail(val_ref);
+
+ ir_variable *writemask_ref = new(mem_ctx)
+ ir_variable(glsl_type::uint_type, "write_mask" , ir_var_function_in);
+ sig_params.push_tail(writemask_ref);
+
+ ir_function_signature *sig = new(mem_ctx)
+ ir_function_signature(glsl_type::void_type, compute_shader_enabled);
+ assert(sig);
+ sig->replace_parameters(&sig_params);
+ sig->intrinsic_id = ir_intrinsic_shared_store;
+
+ ir_function *f = new(mem_ctx) ir_function("__intrinsic_store_shared");
+ f->add_signature(sig);
+
+ exec_list call_params;
+ call_params.push_tail(offset->clone(mem_ctx, NULL));
+ call_params.push_tail(deref->clone(mem_ctx, NULL));
+ call_params.push_tail(new(mem_ctx) ir_constant(write_mask));
+ return new(mem_ctx) ir_call(sig, NULL, &call_params);
+}
+
+ir_call *
+lower_shared_reference_visitor::shared_load(void *mem_ctx,
+ const struct glsl_type *type,
+ ir_rvalue *offset)
+{
+ exec_list sig_params;
+
+ ir_variable *offset_ref = new(mem_ctx)
+ ir_variable(glsl_type::uint_type, "offset_ref" , ir_var_function_in);
+ sig_params.push_tail(offset_ref);
+
+ ir_function_signature *sig =
+ new(mem_ctx) ir_function_signature(type, compute_shader_enabled);
+ assert(sig);
+ sig->replace_parameters(&sig_params);
+ sig->intrinsic_id = ir_intrinsic_shared_load;
+
+ ir_function *f = new(mem_ctx) ir_function("__intrinsic_load_shared");
+ f->add_signature(sig);
+
+ ir_variable *result = new(mem_ctx)
+ ir_variable(type, "shared_load_result", ir_var_temporary);
+ base_ir->insert_before(result);
+ ir_dereference_variable *deref_result = new(mem_ctx)
+ ir_dereference_variable(result);
+
+ exec_list call_params;
+ call_params.push_tail(offset->clone(mem_ctx, NULL));
+
+ return new(mem_ctx) ir_call(sig, deref_result, &call_params);
+}
+
+/* Lowers the intrinsic call to a new internal intrinsic that swaps the access
+ * to the shared variable in the first parameter by an offset. This involves
+ * creating the new internal intrinsic (i.e. the new function signature).
+ */
+ir_call *
+lower_shared_reference_visitor::lower_shared_atomic_intrinsic(ir_call *ir)
+{
+ /* Shared atomics usually have 2 parameters, the shared variable and an
+ * integer argument. The exception is CompSwap, that has an additional
+ * integer parameter.
+ */
+ int param_count = ir->actual_parameters.length();
+ assert(param_count == 2 || param_count == 3);
+
+ /* First argument must be a scalar integer shared variable */
+ exec_node *param = ir->actual_parameters.get_head();
+ ir_instruction *inst = (ir_instruction *) param;
+ assert(inst->ir_type == ir_type_dereference_variable ||
+ inst->ir_type == ir_type_dereference_array ||
+ inst->ir_type == ir_type_dereference_record ||
+ inst->ir_type == ir_type_swizzle);
+
+ ir_rvalue *deref = (ir_rvalue *) inst;
+ assert(deref->type->is_scalar() &&
+ (deref->type->is_integer_32() || deref->type->is_float()));
+
+ ir_variable *var = deref->variable_referenced();
+ assert(var);
+
+ /* Compute the offset to the start if the dereference
+ */
+ void *mem_ctx = ralloc_parent(shader->ir);
+
+ ir_rvalue *offset = NULL;
+ unsigned const_offset = get_shared_offset(var);
+ bool row_major;
+ const glsl_type *matrix_type;
+ assert(var->get_interface_type() == NULL);
+ const enum glsl_interface_packing packing = GLSL_INTERFACE_PACKING_STD430;
+ buffer_access_type = shared_atomic_access;
+
+ setup_buffer_access(mem_ctx, deref,
+ &offset, &const_offset,
+ &row_major, &matrix_type, NULL, packing);
+
+ assert(offset);
+ assert(!row_major);
+ assert(matrix_type == NULL);
+
+ ir_rvalue *deref_offset =
+ add(offset, new(mem_ctx) ir_constant(const_offset));
+
+ /* Create the new internal function signature that will take an offset
+ * instead of a shared variable
+ */
+ exec_list sig_params;
+ ir_variable *sig_param = new(mem_ctx)
+ ir_variable(glsl_type::uint_type, "offset" , ir_var_function_in);
+ sig_params.push_tail(sig_param);
+
+ const glsl_type *type = deref->type->get_scalar_type();
+ sig_param = new(mem_ctx)
+ ir_variable(type, "data1", ir_var_function_in);
+ sig_params.push_tail(sig_param);
+
+ if (param_count == 3) {
+ sig_param = new(mem_ctx)
+ ir_variable(type, "data2", ir_var_function_in);
+ sig_params.push_tail(sig_param);
+ }
+
+ ir_function_signature *sig =
+ new(mem_ctx) ir_function_signature(deref->type,
+ compute_shader_enabled);
+ assert(sig);
+ sig->replace_parameters(&sig_params);
+
+ assert(ir->callee->intrinsic_id >= ir_intrinsic_generic_load);
+ assert(ir->callee->intrinsic_id <= ir_intrinsic_generic_atomic_comp_swap);
+ sig->intrinsic_id = MAP_INTRINSIC_TO_TYPE(ir->callee->intrinsic_id, shared);
+
+ char func_name[64];
+ sprintf(func_name, "%s_shared", ir->callee_name());
+ ir_function *f = new(mem_ctx) ir_function(func_name);
+ f->add_signature(sig);
+
+ /* Now, create the call to the internal intrinsic */
+ exec_list call_params;
+ call_params.push_tail(deref_offset);
+ param = ir->actual_parameters.get_head()->get_next();
+ ir_rvalue *param_as_rvalue = ((ir_instruction *) param)->as_rvalue();
+ call_params.push_tail(param_as_rvalue->clone(mem_ctx, NULL));
+ if (param_count == 3) {
+ param = param->get_next();
+ param_as_rvalue = ((ir_instruction *) param)->as_rvalue();
+ call_params.push_tail(param_as_rvalue->clone(mem_ctx, NULL));
+ }
+ ir_dereference_variable *return_deref =
+ ir->return_deref->clone(mem_ctx, NULL);
+ return new(mem_ctx) ir_call(sig, return_deref, &call_params);
+}
+
+ir_call *
+lower_shared_reference_visitor::check_for_shared_atomic_intrinsic(ir_call *ir)
+{
+ exec_list& params = ir->actual_parameters;
+
+ if (params.length() < 2 || params.length() > 3)
+ return ir;
+
+ ir_rvalue *rvalue =
+ ((ir_instruction *) params.get_head())->as_rvalue();
+ if (!rvalue)
+ return ir;
+
+ ir_variable *var = rvalue->variable_referenced();
+ if (!var || var->data.mode != ir_var_shader_shared)
+ return ir;
+
+ const enum ir_intrinsic_id id = ir->callee->intrinsic_id;
+ if (id == ir_intrinsic_generic_atomic_add ||
+ id == ir_intrinsic_generic_atomic_min ||
+ id == ir_intrinsic_generic_atomic_max ||
+ id == ir_intrinsic_generic_atomic_and ||
+ id == ir_intrinsic_generic_atomic_or ||
+ id == ir_intrinsic_generic_atomic_xor ||
+ id == ir_intrinsic_generic_atomic_exchange ||
+ id == ir_intrinsic_generic_atomic_comp_swap) {
+ return lower_shared_atomic_intrinsic(ir);
+ }
+
+ return ir;
+}
+
+ir_visitor_status
+lower_shared_reference_visitor::visit_enter(ir_call *ir)
+{
+ ir_call *new_ir = check_for_shared_atomic_intrinsic(ir);
+ if (new_ir != ir) {
+ progress = true;
+ base_ir->replace_with(new_ir);
+ return visit_continue_with_parent;
+ }
+
+ return rvalue_visit(ir);
+}
+
+} /* unnamed namespace */
+
+void
+lower_shared_reference(struct gl_context *ctx,
+ struct gl_shader_program *prog,
+ struct gl_linked_shader *shader)
+{
+ if (shader->Stage != MESA_SHADER_COMPUTE)
+ return;
+
+ lower_shared_reference_visitor v(shader);
+
+ /* Loop over the instructions lowering references, because we take a deref
+ * of an shared variable array using a shared variable dereference as the
+ * index will produce a collection of instructions all of which have cloned
+ * shared variable dereferences for that array index.
+ */
+ do {
+ v.progress = false;
+ visit_list_elements(&v, shader->ir);
+ } while (v.progress);
+
+ prog->Comp.SharedSize = v.shared_size;
+
+ /* Section 19.1 (Compute Shader Variables) of the OpenGL 4.5 (Core Profile)
+ * specification says:
+ *
+ * "There is a limit to the total size of all variables declared as
+ * shared in a single program object. This limit, expressed in units of
+ * basic machine units, may be queried as the value of
+ * MAX_COMPUTE_SHARED_MEMORY_SIZE."
+ */
+ if (prog->Comp.SharedSize > ctx->Const.MaxComputeSharedMemorySize) {
+ linker_error(prog, "Too much shared memory used (%u/%u)\n",
+ prog->Comp.SharedSize,
+ ctx->Const.MaxComputeSharedMemorySize);
+ }
+}
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/lower_subroutine.cpp b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/lower_subroutine.cpp
new file mode 100644
index 0000000000..de178a59b0
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/lower_subroutine.cpp
@@ -0,0 +1,124 @@
+/*
+ * Copyright © 2015 Red Hat
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+/**
+ * \file lower_subroutine.cpp
+ *
+ * lowers subroutines to an if ladder.
+ */
+
+#include "compiler/glsl_types.h"
+#include "glsl_parser_extras.h"
+#include "ir.h"
+#include "ir_builder.h"
+
+using namespace ir_builder;
+namespace {
+
+class lower_subroutine_visitor : public ir_hierarchical_visitor {
+public:
+ lower_subroutine_visitor(struct _mesa_glsl_parse_state *state)
+ : state(state)
+ {
+ this->progress = false;
+ }
+
+ ir_visitor_status visit_leave(ir_call *);
+ ir_call *call_clone(ir_call *call, ir_function_signature *callee);
+ bool progress;
+ struct _mesa_glsl_parse_state *state;
+};
+
+}
+
+bool
+lower_subroutine(exec_list *instructions, struct _mesa_glsl_parse_state *state)
+{
+ lower_subroutine_visitor v(state);
+ visit_list_elements(&v, instructions);
+ return v.progress;
+}
+
+ir_call *
+lower_subroutine_visitor::call_clone(ir_call *call, ir_function_signature *callee)
+{
+ void *mem_ctx = ralloc_parent(call);
+ ir_dereference_variable *new_return_ref = NULL;
+ if (call->return_deref != NULL)
+ new_return_ref = call->return_deref->clone(mem_ctx, NULL);
+
+ exec_list new_parameters;
+
+ foreach_in_list(ir_instruction, ir, &call->actual_parameters) {
+ new_parameters.push_tail(ir->clone(mem_ctx, NULL));
+ }
+
+ return new(mem_ctx) ir_call(callee, new_return_ref, &new_parameters);
+}
+
+ir_visitor_status
+lower_subroutine_visitor::visit_leave(ir_call *ir)
+{
+ if (!ir->sub_var)
+ return visit_continue;
+
+ void *mem_ctx = ralloc_parent(ir);
+ ir_if *last_branch = NULL;
+
+ for (int s = this->state->num_subroutines - 1; s >= 0; s--) {
+ ir_rvalue *var;
+ ir_function *fn = this->state->subroutines[s];
+ ir_constant *lc = new(mem_ctx)ir_constant(fn->subroutine_index);
+
+ bool is_compat = false;
+
+ for (int i = 0; i < fn->num_subroutine_types; i++) {
+ if (ir->sub_var->type->without_array() == fn->subroutine_types[i]) {
+ is_compat = true;
+ break;
+ }
+ }
+ if (is_compat == false)
+ continue;
+
+ if (ir->array_idx != NULL)
+ var = ir->array_idx->clone(mem_ctx, NULL);
+ else
+ var = new(mem_ctx) ir_dereference_variable(ir->sub_var);
+
+ ir_function_signature *sub_sig =
+ fn->exact_matching_signature(this->state,
+ &ir->actual_parameters);
+
+ ir_call *new_call = call_clone(ir, sub_sig);
+ if (!last_branch)
+ last_branch = if_tree(equal(subr_to_int(var), lc), new_call);
+ else
+ last_branch = if_tree(equal(subr_to_int(var), lc), new_call, last_branch);
+ }
+ if (last_branch)
+ ir->insert_before(last_branch);
+ ir->remove();
+
+ return visit_continue;
+}
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/lower_tess_level.cpp b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/lower_tess_level.cpp
new file mode 100644
index 0000000000..3e4c7f026c
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/lower_tess_level.cpp
@@ -0,0 +1,461 @@
+/*
+ * Copyright © 2014 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+/**
+ * \file lower_tess_level.cpp
+ *
+ * This pass accounts for the difference between the way gl_TessLevelOuter
+ * and gl_TessLevelInner is declared in standard GLSL (as an array of
+ * floats), and the way it is frequently implemented in hardware (as a vec4
+ * and vec2).
+ *
+ * The declaration of gl_TessLevel* is replaced with a declaration
+ * of gl_TessLevel*MESA, and any references to gl_TessLevel* are
+ * translated to refer to gl_TessLevel*MESA with the appropriate
+ * swizzling of array indices. For instance:
+ *
+ * gl_TessLevelOuter[i]
+ *
+ * is translated into:
+ *
+ * gl_TessLevelOuterMESA[i]
+ *
+ * Since some hardware may not internally represent gl_TessLevel* as a pair
+ * of vec4's, this lowering pass is optional. To enable it, set the
+ * LowerTessLevel flag in gl_shader_compiler_options to true.
+ */
+
+#include "glsl_symbol_table.h"
+#include "ir_rvalue_visitor.h"
+#include "ir.h"
+#include "program/prog_instruction.h" /* For WRITEMASK_* */
+#include "main/mtypes.h"
+
+namespace {
+
+class lower_tess_level_visitor : public ir_rvalue_visitor {
+public:
+ explicit lower_tess_level_visitor(gl_shader_stage shader_stage)
+ : progress(false), old_tess_level_outer_var(NULL),
+ old_tess_level_inner_var(NULL), new_tess_level_outer_var(NULL),
+ new_tess_level_inner_var(NULL), shader_stage(shader_stage)
+ {
+ }
+
+ virtual ir_visitor_status visit(ir_variable *);
+ bool is_tess_level_array(ir_rvalue *ir);
+ ir_rvalue *lower_tess_level_array(ir_rvalue *ir);
+ virtual ir_visitor_status visit_leave(ir_assignment *);
+ void visit_new_assignment(ir_assignment *ir);
+ virtual ir_visitor_status visit_leave(ir_call *);
+
+ virtual void handle_rvalue(ir_rvalue **rvalue);
+
+ void fix_lhs(ir_assignment *);
+
+ bool progress;
+
+ /**
+ * Pointer to the declaration of gl_TessLevel*, if found.
+ */
+ ir_variable *old_tess_level_outer_var;
+ ir_variable *old_tess_level_inner_var;
+
+ /**
+ * Pointer to the newly-created gl_TessLevel*MESA variables.
+ */
+ ir_variable *new_tess_level_outer_var;
+ ir_variable *new_tess_level_inner_var;
+
+ /**
+ * Type of shader we are compiling (e.g. MESA_SHADER_TESS_CTRL)
+ */
+ const gl_shader_stage shader_stage;
+};
+
+} /* anonymous namespace */
+
+/**
+ * Replace any declaration of gl_TessLevel* as an array of floats with a
+ * declaration of gl_TessLevel*MESA as a vec4.
+ */
+ir_visitor_status
+lower_tess_level_visitor::visit(ir_variable *ir)
+{
+ if ((!ir->name) ||
+ ((strcmp(ir->name, "gl_TessLevelInner") != 0) &&
+ (strcmp(ir->name, "gl_TessLevelOuter") != 0)))
+ return visit_continue;
+
+ assert (ir->type->is_array());
+
+ if (strcmp(ir->name, "gl_TessLevelOuter") == 0) {
+ if (this->old_tess_level_outer_var)
+ return visit_continue;
+
+ old_tess_level_outer_var = ir;
+ assert(ir->type->fields.array == glsl_type::float_type);
+
+ /* Clone the old var so that we inherit all of its properties */
+ new_tess_level_outer_var = ir->clone(ralloc_parent(ir), NULL);
+
+ /* And change the properties that we need to change */
+ new_tess_level_outer_var->name = ralloc_strdup(new_tess_level_outer_var,
+ "gl_TessLevelOuterMESA");
+ new_tess_level_outer_var->type = glsl_type::vec4_type;
+ new_tess_level_outer_var->data.max_array_access = 0;
+
+ ir->replace_with(new_tess_level_outer_var);
+ } else if (strcmp(ir->name, "gl_TessLevelInner") == 0) {
+ if (this->old_tess_level_inner_var)
+ return visit_continue;
+
+ old_tess_level_inner_var = ir;
+ assert(ir->type->fields.array == glsl_type::float_type);
+
+ /* Clone the old var so that we inherit all of its properties */
+ new_tess_level_inner_var = ir->clone(ralloc_parent(ir), NULL);
+
+ /* And change the properties that we need to change */
+ new_tess_level_inner_var->name = ralloc_strdup(new_tess_level_inner_var,
+ "gl_TessLevelInnerMESA");
+ new_tess_level_inner_var->type = glsl_type::vec2_type;
+ new_tess_level_inner_var->data.max_array_access = 0;
+
+ ir->replace_with(new_tess_level_inner_var);
+ } else {
+ assert(0);
+ }
+
+ this->progress = true;
+
+ return visit_continue;
+}
+
+
+/**
+ * Determine whether the given rvalue describes an array of floats that
+ * needs to be lowered to a vec4; that is, determine whether it
+ * matches one of the following patterns:
+ *
+ * - gl_TessLevelOuter
+ * - gl_TessLevelInner
+ */
+bool
+lower_tess_level_visitor::is_tess_level_array(ir_rvalue *ir)
+{
+ if (!ir->type->is_array())
+ return false;
+ if (ir->type->fields.array != glsl_type::float_type)
+ return false;
+
+ if (this->old_tess_level_outer_var) {
+ if (ir->variable_referenced() == this->old_tess_level_outer_var)
+ return true;
+ }
+ if (this->old_tess_level_inner_var) {
+ if (ir->variable_referenced() == this->old_tess_level_inner_var)
+ return true;
+ }
+ return false;
+}
+
+
+/**
+ * If the given ir satisfies is_tess_level_array(), return new ir
+ * representing its lowered equivalent. That is, map:
+ *
+ * - gl_TessLevelOuter => gl_TessLevelOuterMESA
+ * - gl_TessLevelInner => gl_TessLevelInnerMESA
+ *
+ * Otherwise return NULL.
+ */
+ir_rvalue *
+lower_tess_level_visitor::lower_tess_level_array(ir_rvalue *ir)
+{
+ if (!ir->type->is_array())
+ return NULL;
+ if (ir->type->fields.array != glsl_type::float_type)
+ return NULL;
+
+ ir_variable **new_var = NULL;
+
+ if (this->old_tess_level_outer_var) {
+ if (ir->variable_referenced() == this->old_tess_level_outer_var)
+ new_var = &this->new_tess_level_outer_var;
+ }
+ if (this->old_tess_level_inner_var) {
+ if (ir->variable_referenced() == this->old_tess_level_inner_var)
+ new_var = &this->new_tess_level_inner_var;
+ }
+
+ if (new_var == NULL)
+ return NULL;
+
+ assert(ir->as_dereference_variable());
+ return new(ralloc_parent(ir)) ir_dereference_variable(*new_var);
+}
+
+
+void
+lower_tess_level_visitor::handle_rvalue(ir_rvalue **rv)
+{
+ if (*rv == NULL)
+ return;
+
+ ir_dereference_array *const array_deref = (*rv)->as_dereference_array();
+ if (array_deref == NULL)
+ return;
+
+ /* Replace any expression that indexes one of the floats in gl_TessLevel*
+ * with an expression that indexes into one of the vec4's
+ * gl_TessLevel*MESA and accesses the appropriate component.
+ */
+ ir_rvalue *lowered_vec4 =
+ this->lower_tess_level_array(array_deref->array);
+ if (lowered_vec4 != NULL) {
+ this->progress = true;
+ void *mem_ctx = ralloc_parent(array_deref);
+
+ ir_expression *const expr =
+ new(mem_ctx) ir_expression(ir_binop_vector_extract,
+ lowered_vec4,
+ array_deref->array_index);
+
+ *rv = expr;
+ }
+}
+
+void
+lower_tess_level_visitor::fix_lhs(ir_assignment *ir)
+{
+ if (ir->lhs->ir_type != ir_type_expression)
+ return;
+ void *mem_ctx = ralloc_parent(ir);
+ ir_expression *const expr = (ir_expression *) ir->lhs;
+
+ /* The expression must be of the form:
+ *
+ * (vector_extract gl_TessLevel*MESA, j).
+ */
+ assert(expr->operation == ir_binop_vector_extract);
+ assert(expr->operands[0]->ir_type == ir_type_dereference_variable);
+ assert((expr->operands[0]->type == glsl_type::vec4_type) ||
+ (expr->operands[0]->type == glsl_type::vec2_type));
+
+ ir_dereference *const new_lhs = (ir_dereference *) expr->operands[0];
+
+ ir_constant *old_index_constant =
+ expr->operands[1]->constant_expression_value(mem_ctx);
+ if (!old_index_constant) {
+ ir->rhs = new(mem_ctx) ir_expression(ir_triop_vector_insert,
+ expr->operands[0]->type,
+ new_lhs->clone(mem_ctx, NULL),
+ ir->rhs,
+ expr->operands[1]);
+ }
+ ir->set_lhs(new_lhs);
+
+ if (old_index_constant) {
+ /* gl_TessLevel* is being accessed via a constant index. Don't bother
+ * creating a vector insert op. Just use a write mask.
+ */
+ ir->write_mask = 1 << old_index_constant->get_int_component(0);
+ } else {
+ ir->write_mask = (1 << expr->operands[0]->type->vector_elements) - 1;
+ }
+}
+
+/**
+ * Replace any assignment having a gl_TessLevel* (undereferenced) as
+ * its LHS or RHS with a sequence of assignments, one for each component of
+ * the array. Each of these assignments is lowered to refer to
+ * gl_TessLevel*MESA as appropriate.
+ */
+ir_visitor_status
+lower_tess_level_visitor::visit_leave(ir_assignment *ir)
+{
+ /* First invoke the base class visitor. This causes handle_rvalue() to be
+ * called on ir->rhs and ir->condition.
+ */
+ ir_rvalue_visitor::visit_leave(ir);
+
+ if (this->is_tess_level_array(ir->lhs) ||
+ this->is_tess_level_array(ir->rhs)) {
+ /* LHS or RHS of the assignment is the entire gl_TessLevel* array.
+ * Since we are
+ * reshaping gl_TessLevel* from an array of floats to a
+ * vec4, this isn't going to work as a bulk assignment anymore, so
+ * unroll it to element-by-element assignments and lower each of them.
+ *
+ * Note: to unroll into element-by-element assignments, we need to make
+ * clones of the LHS and RHS. This is safe because expressions and
+ * l-values are side-effect free.
+ */
+ void *ctx = ralloc_parent(ir);
+ int array_size = ir->lhs->type->array_size();
+ for (int i = 0; i < array_size; ++i) {
+ ir_dereference_array *new_lhs = new(ctx) ir_dereference_array(
+ ir->lhs->clone(ctx, NULL), new(ctx) ir_constant(i));
+ ir_dereference_array *new_rhs = new(ctx) ir_dereference_array(
+ ir->rhs->clone(ctx, NULL), new(ctx) ir_constant(i));
+ this->handle_rvalue((ir_rvalue **) &new_rhs);
+
+ /* Handle the LHS after creating the new assignment. This must
+ * happen in this order because handle_rvalue may replace the old LHS
+ * with an ir_expression of ir_binop_vector_extract. Since this is
+ * not a valide l-value, this will cause an assertion in the
+ * ir_assignment constructor to fail.
+ *
+ * If this occurs, replace the mangled LHS with a dereference of the
+ * vector, and replace the RHS with an ir_triop_vector_insert.
+ */
+ ir_assignment *const assign = new(ctx) ir_assignment(new_lhs, new_rhs);
+ this->handle_rvalue((ir_rvalue **) &assign->lhs);
+ this->fix_lhs(assign);
+
+ this->base_ir->insert_before(assign);
+ }
+ ir->remove();
+
+ return visit_continue;
+ }
+
+ /* Handle the LHS as if it were an r-value. Normally
+ * rvalue_visit(ir_assignment *) only visits the RHS, but we need to lower
+ * expressions in the LHS as well.
+ *
+ * This may cause the LHS to get replaced with an ir_expression of
+ * ir_binop_vector_extract. If this occurs, replace it with a dereference
+ * of the vector, and replace the RHS with an ir_triop_vector_insert.
+ */
+ handle_rvalue((ir_rvalue **)&ir->lhs);
+ this->fix_lhs(ir);
+
+ return rvalue_visit(ir);
+}
+
+
+/**
+ * Set up base_ir properly and call visit_leave() on a newly created
+ * ir_assignment node. This is used in cases where we have to insert an
+ * ir_assignment in a place where we know the hierarchical visitor won't see
+ * it.
+ */
+void
+lower_tess_level_visitor::visit_new_assignment(ir_assignment *ir)
+{
+ ir_instruction *old_base_ir = this->base_ir;
+ this->base_ir = ir;
+ ir->accept(this);
+ this->base_ir = old_base_ir;
+}
+
+
+/**
+ * If a gl_TessLevel* variable appears as an argument in an ir_call
+ * expression, replace it with a temporary variable, and make sure the ir_call
+ * is preceded and/or followed by assignments that copy the contents of the
+ * temporary variable to and/or from gl_TessLevel*. Each of these
+ * assignments is then lowered to refer to gl_TessLevel*MESA.
+ */
+ir_visitor_status
+lower_tess_level_visitor::visit_leave(ir_call *ir)
+{
+ void *ctx = ralloc_parent(ir);
+
+ const exec_node *formal_param_node = ir->callee->parameters.get_head_raw();
+ const exec_node *actual_param_node = ir->actual_parameters.get_head_raw();
+ while (!actual_param_node->is_tail_sentinel()) {
+ ir_variable *formal_param = (ir_variable *) formal_param_node;
+ ir_rvalue *actual_param = (ir_rvalue *) actual_param_node;
+
+ /* Advance formal_param_node and actual_param_node now so that we can
+ * safely replace actual_param with another node, if necessary, below.
+ */
+ formal_param_node = formal_param_node->next;
+ actual_param_node = actual_param_node->next;
+
+ if (!this->is_tess_level_array(actual_param))
+ continue;
+
+ /* User is trying to pass a whole gl_TessLevel* array to a function
+ * call. Since we are reshaping gl_TessLevel* from an array of floats
+ * to a vec4, this isn't going to work anymore, so use a temporary
+ * array instead.
+ */
+ ir_variable *temp = new(ctx) ir_variable(
+ actual_param->type, "temp_tess_level", ir_var_temporary);
+ this->base_ir->insert_before(temp);
+ actual_param->replace_with(
+ new(ctx) ir_dereference_variable(temp));
+ if (formal_param->data.mode == ir_var_function_in
+ || formal_param->data.mode == ir_var_function_inout) {
+ /* Copy from gl_TessLevel* to the temporary before the call.
+ * Since we are going to insert this copy before the current
+ * instruction, we need to visit it afterwards to make sure it
+ * gets lowered.
+ */
+ ir_assignment *new_assignment = new(ctx) ir_assignment(
+ new(ctx) ir_dereference_variable(temp),
+ actual_param->clone(ctx, NULL));
+ this->base_ir->insert_before(new_assignment);
+ this->visit_new_assignment(new_assignment);
+ }
+ if (formal_param->data.mode == ir_var_function_out
+ || formal_param->data.mode == ir_var_function_inout) {
+ /* Copy from the temporary to gl_TessLevel* after the call.
+ * Since visit_list_elements() has already decided which
+ * instruction it's going to visit next, we need to visit
+ * afterwards to make sure it gets lowered.
+ */
+ ir_assignment *new_assignment = new(ctx) ir_assignment(
+ actual_param->clone(ctx, NULL),
+ new(ctx) ir_dereference_variable(temp));
+ this->base_ir->insert_after(new_assignment);
+ this->visit_new_assignment(new_assignment);
+ }
+ }
+
+ return rvalue_visit(ir);
+}
+
+
+bool
+lower_tess_level(gl_linked_shader *shader)
+{
+ if ((shader->Stage != MESA_SHADER_TESS_CTRL) &&
+ (shader->Stage != MESA_SHADER_TESS_EVAL))
+ return false;
+
+ lower_tess_level_visitor v(shader->Stage);
+
+ visit_list_elements(&v, shader->ir);
+
+ if (v.new_tess_level_outer_var)
+ shader->symbols->add_variable(v.new_tess_level_outer_var);
+ if (v.new_tess_level_inner_var)
+ shader->symbols->add_variable(v.new_tess_level_inner_var);
+
+ return v.progress;
+}
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/lower_texture_projection.cpp b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/lower_texture_projection.cpp
new file mode 100644
index 0000000000..db847f8a8c
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/lower_texture_projection.cpp
@@ -0,0 +1,103 @@
+/*
+ * Copyright © 2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+/**
+ * \file lower_texture_projection.cpp
+ *
+ * IR lower pass to perform the division of texture coordinates by the texture
+ * projector if present.
+ *
+ * Many GPUs have a texture sampling opcode that takes the projector
+ * and does the divide internally, thus the presence of the projector
+ * in the IR. For GPUs that don't, this saves the driver needing the
+ * logic for handling the divide.
+ *
+ * \author Eric Anholt <eric@anholt.net>
+ */
+
+#include "ir.h"
+
+namespace {
+
+class lower_texture_projection_visitor : public ir_hierarchical_visitor {
+public:
+ lower_texture_projection_visitor()
+ {
+ progress = false;
+ }
+
+ ir_visitor_status visit_leave(ir_texture *ir);
+
+ bool progress;
+};
+
+} /* anonymous namespace */
+
+ir_visitor_status
+lower_texture_projection_visitor::visit_leave(ir_texture *ir)
+{
+ if (!ir->projector)
+ return visit_continue;
+
+ void *mem_ctx = ralloc_parent(ir);
+
+ ir_variable *var = new(mem_ctx) ir_variable(ir->projector->type,
+ "projector", ir_var_temporary);
+ base_ir->insert_before(var);
+ ir_dereference *deref = new(mem_ctx) ir_dereference_variable(var);
+ ir_expression *expr = new(mem_ctx) ir_expression(ir_unop_rcp,
+ ir->projector->type,
+ ir->projector,
+ NULL);
+ ir_assignment *assign = new(mem_ctx) ir_assignment(deref, expr);
+ base_ir->insert_before(assign);
+
+ deref = new(mem_ctx) ir_dereference_variable(var);
+ ir->coordinate = new(mem_ctx) ir_expression(ir_binop_mul,
+ ir->coordinate->type,
+ ir->coordinate,
+ deref);
+
+ if (ir->shadow_comparator) {
+ deref = new(mem_ctx) ir_dereference_variable(var);
+ ir->shadow_comparator = new(mem_ctx) ir_expression(ir_binop_mul,
+ ir->shadow_comparator->type,
+ ir->shadow_comparator,
+ deref);
+ }
+
+ ir->projector = NULL;
+
+ progress = true;
+ return visit_continue;
+}
+
+bool
+do_lower_texture_projection(exec_list *instructions)
+{
+ lower_texture_projection_visitor v;
+
+ visit_list_elements(&v, instructions);
+
+ return v.progress;
+}
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/lower_ubo_reference.cpp b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/lower_ubo_reference.cpp
new file mode 100644
index 0000000000..08d4f72efa
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/lower_ubo_reference.cpp
@@ -0,0 +1,1142 @@
+/*
+ * Copyright © 2012 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+/**
+ * \file lower_ubo_reference.cpp
+ *
+ * IR lower pass to replace dereferences of variables in a uniform
+ * buffer object with usage of ir_binop_ubo_load expressions, each of
+ * which can read data up to the size of a vec4.
+ *
+ * This relieves drivers of the responsibility to deal with tricky UBO
+ * layout issues like std140 structures and row_major matrices on
+ * their own.
+ */
+
+#include "lower_buffer_access.h"
+#include "ir_builder.h"
+#include "main/macros.h"
+#include "glsl_parser_extras.h"
+#include "main/mtypes.h"
+
+using namespace ir_builder;
+
+namespace {
+class lower_ubo_reference_visitor :
+ public lower_buffer_access::lower_buffer_access {
+public:
+ lower_ubo_reference_visitor(struct gl_linked_shader *shader,
+ bool clamp_block_indices,
+ bool use_std430_as_default)
+ : shader(shader), clamp_block_indices(clamp_block_indices),
+ struct_field(NULL), variable(NULL)
+ {
+ this->use_std430_as_default = use_std430_as_default;
+ }
+
+ void handle_rvalue(ir_rvalue **rvalue);
+ ir_visitor_status visit_enter(ir_assignment *ir);
+
+ void setup_for_load_or_store(void *mem_ctx,
+ ir_variable *var,
+ ir_rvalue *deref,
+ ir_rvalue **offset,
+ unsigned *const_offset,
+ bool *row_major,
+ const glsl_type **matrix_type,
+ enum glsl_interface_packing packing);
+ uint32_t ssbo_access_params();
+ ir_expression *ubo_load(void *mem_ctx, const struct glsl_type *type,
+ ir_rvalue *offset);
+ ir_call *ssbo_load(void *mem_ctx, const struct glsl_type *type,
+ ir_rvalue *offset);
+
+ bool check_for_buffer_array_copy(ir_assignment *ir);
+ bool check_for_buffer_struct_copy(ir_assignment *ir);
+ void check_for_ssbo_store(ir_assignment *ir);
+ void write_to_memory(void *mem_ctx, ir_dereference *deref, ir_variable *var,
+ ir_variable *write_var, unsigned write_mask);
+ ir_call *ssbo_store(void *mem_ctx, ir_rvalue *deref, ir_rvalue *offset,
+ unsigned write_mask);
+
+ enum {
+ ubo_load_access,
+ ssbo_load_access,
+ ssbo_store_access,
+ ssbo_unsized_array_length_access,
+ ssbo_atomic_access,
+ } buffer_access_type;
+
+ void insert_buffer_access(void *mem_ctx, ir_dereference *deref,
+ const glsl_type *type, ir_rvalue *offset,
+ unsigned mask, int channel);
+
+ ir_visitor_status visit_enter(class ir_expression *);
+ ir_expression *calculate_ssbo_unsized_array_length(ir_expression *expr);
+ void check_ssbo_unsized_array_length_expression(class ir_expression *);
+ void check_ssbo_unsized_array_length_assignment(ir_assignment *ir);
+
+ ir_expression *process_ssbo_unsized_array_length(ir_rvalue **,
+ ir_dereference *,
+ ir_variable *);
+ ir_expression *emit_ssbo_get_buffer_size(void *mem_ctx);
+
+ unsigned calculate_unsized_array_stride(ir_dereference *deref,
+ enum glsl_interface_packing packing);
+
+ ir_call *lower_ssbo_atomic_intrinsic(ir_call *ir);
+ ir_call *check_for_ssbo_atomic_intrinsic(ir_call *ir);
+ ir_visitor_status visit_enter(ir_call *ir);
+ ir_visitor_status visit_enter(ir_texture *ir);
+
+ struct gl_linked_shader *shader;
+ bool clamp_block_indices;
+ const struct glsl_struct_field *struct_field;
+ ir_variable *variable;
+ ir_rvalue *uniform_block;
+ bool progress;
+};
+
+/**
+ * Determine the name of the interface block field
+ *
+ * This is the name of the specific member as it would appear in the
+ * \c gl_uniform_buffer_variable::Name field in the shader's
+ * \c UniformBlocks array.
+ */
+static const char *
+interface_field_name(void *mem_ctx, char *base_name, ir_rvalue *d,
+ ir_rvalue **nonconst_block_index)
+{
+ *nonconst_block_index = NULL;
+ char *name_copy = NULL;
+ size_t base_length = 0;
+
+ /* Loop back through the IR until we find the uniform block */
+ ir_rvalue *ir = d;
+ while (ir != NULL) {
+ switch (ir->ir_type) {
+ case ir_type_dereference_variable: {
+ /* Exit loop */
+ ir = NULL;
+ break;
+ }
+
+ case ir_type_dereference_record: {
+ ir_dereference_record *r = (ir_dereference_record *) ir;
+ ir = r->record->as_dereference();
+
+ /* If we got here it means any previous array subscripts belong to
+ * block members and not the block itself so skip over them in the
+ * next pass.
+ */
+ d = ir;
+ break;
+ }
+
+ case ir_type_dereference_array: {
+ ir_dereference_array *a = (ir_dereference_array *) ir;
+ ir = a->array->as_dereference();
+ break;
+ }
+
+ case ir_type_swizzle: {
+ ir_swizzle *s = (ir_swizzle *) ir;
+ ir = s->val->as_dereference();
+ /* Skip swizzle in the next pass */
+ d = ir;
+ break;
+ }
+
+ default:
+ assert(!"Should not get here.");
+ break;
+ }
+ }
+
+ while (d != NULL) {
+ switch (d->ir_type) {
+ case ir_type_dereference_variable: {
+ ir_dereference_variable *v = (ir_dereference_variable *) d;
+ if (name_copy != NULL &&
+ v->var->is_interface_instance() &&
+ v->var->type->is_array()) {
+ return name_copy;
+ } else {
+ *nonconst_block_index = NULL;
+ return base_name;
+ }
+
+ break;
+ }
+
+ case ir_type_dereference_array: {
+ ir_dereference_array *a = (ir_dereference_array *) d;
+ size_t new_length;
+
+ if (name_copy == NULL) {
+ name_copy = ralloc_strdup(mem_ctx, base_name);
+ base_length = strlen(name_copy);
+ }
+
+ /* For arrays of arrays we start at the innermost array and work our
+ * way out so we need to insert the subscript at the base of the
+ * name string rather than just attaching it to the end.
+ */
+ new_length = base_length;
+ ir_constant *const_index = a->array_index->as_constant();
+ char *end = ralloc_strdup(NULL, &name_copy[new_length]);
+ if (!const_index) {
+ ir_rvalue *array_index = a->array_index;
+ if (array_index->type != glsl_type::uint_type)
+ array_index = i2u(array_index);
+
+ if (a->array->type->is_array() &&
+ a->array->type->fields.array->is_array()) {
+ ir_constant *base_size = new(mem_ctx)
+ ir_constant(a->array->type->fields.array->arrays_of_arrays_size());
+ array_index = mul(array_index, base_size);
+ }
+
+ if (*nonconst_block_index) {
+ *nonconst_block_index = add(*nonconst_block_index, array_index);
+ } else {
+ *nonconst_block_index = array_index;
+ }
+
+ ralloc_asprintf_rewrite_tail(&name_copy, &new_length, "[0]%s",
+ end);
+ } else {
+ ralloc_asprintf_rewrite_tail(&name_copy, &new_length, "[%d]%s",
+ const_index->get_uint_component(0),
+ end);
+ }
+ ralloc_free(end);
+
+ d = a->array->as_dereference();
+
+ break;
+ }
+
+ default:
+ assert(!"Should not get here.");
+ break;
+ }
+ }
+
+ assert(!"Should not get here.");
+ return NULL;
+}
+
+static ir_rvalue *
+clamp_to_array_bounds(void *mem_ctx, ir_rvalue *index, const glsl_type *type)
+{
+ assert(type->is_array());
+
+ const unsigned array_size = type->arrays_of_arrays_size();
+
+ ir_constant *max_index = new(mem_ctx) ir_constant(array_size - 1);
+ max_index->type = index->type;
+
+ ir_constant *zero = new(mem_ctx) ir_constant(0);
+ zero->type = index->type;
+
+ if (index->type->base_type == GLSL_TYPE_INT)
+ index = max2(index, zero);
+ index = min2(index, max_index);
+
+ return index;
+}
+
+void
+lower_ubo_reference_visitor::setup_for_load_or_store(void *mem_ctx,
+ ir_variable *var,
+ ir_rvalue *deref,
+ ir_rvalue **offset,
+ unsigned *const_offset,
+ bool *row_major,
+ const glsl_type **matrix_type,
+ enum glsl_interface_packing packing)
+{
+ /* Determine the name of the interface block */
+ ir_rvalue *nonconst_block_index;
+ const char *const field_name =
+ interface_field_name(mem_ctx, (char *) var->get_interface_type()->name,
+ deref, &nonconst_block_index);
+
+ if (nonconst_block_index && clamp_block_indices) {
+ nonconst_block_index =
+ clamp_to_array_bounds(mem_ctx, nonconst_block_index, var->type);
+ }
+
+ /* Locate the block by interface name */
+ unsigned num_blocks;
+ struct gl_uniform_block **blocks;
+ if (this->buffer_access_type != ubo_load_access) {
+ num_blocks = shader->Program->info.num_ssbos;
+ blocks = shader->Program->sh.ShaderStorageBlocks;
+ } else {
+ num_blocks = shader->Program->info.num_ubos;
+ blocks = shader->Program->sh.UniformBlocks;
+ }
+ this->uniform_block = NULL;
+ for (unsigned i = 0; i < num_blocks; i++) {
+ if (strcmp(field_name, blocks[i]->Name) == 0) {
+
+ ir_constant *index = new(mem_ctx) ir_constant(i);
+
+ if (nonconst_block_index) {
+ this->uniform_block = add(nonconst_block_index, index);
+ } else {
+ this->uniform_block = index;
+ }
+
+ if (var->is_interface_instance()) {
+ *const_offset = 0;
+ } else {
+ *const_offset = blocks[i]->Uniforms[var->data.location].Offset;
+ }
+
+ break;
+ }
+ }
+
+ assert(this->uniform_block);
+
+ this->struct_field = NULL;
+ setup_buffer_access(mem_ctx, deref, offset, const_offset, row_major,
+ matrix_type, &this->struct_field, packing);
+}
+
+void
+lower_ubo_reference_visitor::handle_rvalue(ir_rvalue **rvalue)
+{
+ if (!*rvalue)
+ return;
+
+ ir_dereference *deref = (*rvalue)->as_dereference();
+ if (!deref)
+ return;
+
+ ir_variable *var = deref->variable_referenced();
+ if (!var || !var->is_in_buffer_block())
+ return;
+
+ void *mem_ctx = ralloc_parent(shader->ir);
+
+ ir_rvalue *offset = NULL;
+ unsigned const_offset;
+ bool row_major;
+ const glsl_type *matrix_type;
+
+ enum glsl_interface_packing packing =
+ var->get_interface_type()->
+ get_internal_ifc_packing(use_std430_as_default);
+
+ this->buffer_access_type =
+ var->is_in_shader_storage_block() ?
+ ssbo_load_access : ubo_load_access;
+ this->variable = var;
+
+ /* Compute the offset to the start if the dereference as well as other
+ * information we need to configure the write
+ */
+ setup_for_load_or_store(mem_ctx, var, deref,
+ &offset, &const_offset,
+ &row_major, &matrix_type,
+ packing);
+ assert(offset);
+
+ /* Now that we've calculated the offset to the start of the
+ * dereference, walk over the type and emit loads into a temporary.
+ */
+ const glsl_type *type = (*rvalue)->type;
+ ir_variable *load_var = new(mem_ctx) ir_variable(type,
+ "ubo_load_temp",
+ ir_var_temporary);
+ base_ir->insert_before(load_var);
+
+ ir_variable *load_offset = new(mem_ctx) ir_variable(glsl_type::uint_type,
+ "ubo_load_temp_offset",
+ ir_var_temporary);
+ base_ir->insert_before(load_offset);
+ base_ir->insert_before(assign(load_offset, offset));
+
+ deref = new(mem_ctx) ir_dereference_variable(load_var);
+ emit_access(mem_ctx, false, deref, load_offset, const_offset,
+ row_major, matrix_type, packing, 0);
+ *rvalue = deref;
+
+ progress = true;
+}
+
+ir_expression *
+lower_ubo_reference_visitor::ubo_load(void *mem_ctx,
+ const glsl_type *type,
+ ir_rvalue *offset)
+{
+ ir_rvalue *block_ref = this->uniform_block->clone(mem_ctx, NULL);
+ return new(mem_ctx)
+ ir_expression(ir_binop_ubo_load,
+ type,
+ block_ref,
+ offset);
+
+}
+
+static bool
+shader_storage_buffer_object(const _mesa_glsl_parse_state *state)
+{
+ return state->has_shader_storage_buffer_objects();
+}
+
+uint32_t
+lower_ubo_reference_visitor::ssbo_access_params()
+{
+ assert(variable);
+
+ if (variable->is_interface_instance()) {
+ assert(struct_field);
+
+ return ((struct_field->memory_coherent ? ACCESS_COHERENT : 0) |
+ (struct_field->memory_restrict ? ACCESS_RESTRICT : 0) |
+ (struct_field->memory_volatile ? ACCESS_VOLATILE : 0));
+ } else {
+ return ((variable->data.memory_coherent ? ACCESS_COHERENT : 0) |
+ (variable->data.memory_restrict ? ACCESS_RESTRICT : 0) |
+ (variable->data.memory_volatile ? ACCESS_VOLATILE : 0));
+ }
+}
+
+ir_call *
+lower_ubo_reference_visitor::ssbo_store(void *mem_ctx,
+ ir_rvalue *deref,
+ ir_rvalue *offset,
+ unsigned write_mask)
+{
+ exec_list sig_params;
+
+ ir_variable *block_ref = new(mem_ctx)
+ ir_variable(glsl_type::uint_type, "block_ref" , ir_var_function_in);
+ sig_params.push_tail(block_ref);
+
+ ir_variable *offset_ref = new(mem_ctx)
+ ir_variable(glsl_type::uint_type, "offset" , ir_var_function_in);
+ sig_params.push_tail(offset_ref);
+
+ ir_variable *val_ref = new(mem_ctx)
+ ir_variable(deref->type, "value" , ir_var_function_in);
+ sig_params.push_tail(val_ref);
+
+ ir_variable *writemask_ref = new(mem_ctx)
+ ir_variable(glsl_type::uint_type, "write_mask" , ir_var_function_in);
+ sig_params.push_tail(writemask_ref);
+
+ ir_variable *access_ref = new(mem_ctx)
+ ir_variable(glsl_type::uint_type, "access" , ir_var_function_in);
+ sig_params.push_tail(access_ref);
+
+ ir_function_signature *sig = new(mem_ctx)
+ ir_function_signature(glsl_type::void_type, shader_storage_buffer_object);
+ assert(sig);
+ sig->replace_parameters(&sig_params);
+ sig->intrinsic_id = ir_intrinsic_ssbo_store;
+
+ ir_function *f = new(mem_ctx) ir_function("__intrinsic_store_ssbo");
+ f->add_signature(sig);
+
+ exec_list call_params;
+ call_params.push_tail(this->uniform_block->clone(mem_ctx, NULL));
+ call_params.push_tail(offset->clone(mem_ctx, NULL));
+ call_params.push_tail(deref->clone(mem_ctx, NULL));
+ call_params.push_tail(new(mem_ctx) ir_constant(write_mask));
+ call_params.push_tail(new(mem_ctx) ir_constant(ssbo_access_params()));
+ return new(mem_ctx) ir_call(sig, NULL, &call_params);
+}
+
+ir_call *
+lower_ubo_reference_visitor::ssbo_load(void *mem_ctx,
+ const struct glsl_type *type,
+ ir_rvalue *offset)
+{
+ exec_list sig_params;
+
+ ir_variable *block_ref = new(mem_ctx)
+ ir_variable(glsl_type::uint_type, "block_ref" , ir_var_function_in);
+ sig_params.push_tail(block_ref);
+
+ ir_variable *offset_ref = new(mem_ctx)
+ ir_variable(glsl_type::uint_type, "offset_ref" , ir_var_function_in);
+ sig_params.push_tail(offset_ref);
+
+ ir_variable *access_ref = new(mem_ctx)
+ ir_variable(glsl_type::uint_type, "access" , ir_var_function_in);
+ sig_params.push_tail(access_ref);
+
+ ir_function_signature *sig =
+ new(mem_ctx) ir_function_signature(type, shader_storage_buffer_object);
+ assert(sig);
+ sig->replace_parameters(&sig_params);
+ sig->intrinsic_id = ir_intrinsic_ssbo_load;
+
+ ir_function *f = new(mem_ctx) ir_function("__intrinsic_load_ssbo");
+ f->add_signature(sig);
+
+ ir_variable *result = new(mem_ctx)
+ ir_variable(type, "ssbo_load_result", ir_var_temporary);
+ base_ir->insert_before(result);
+ ir_dereference_variable *deref_result = new(mem_ctx)
+ ir_dereference_variable(result);
+
+ exec_list call_params;
+ call_params.push_tail(this->uniform_block->clone(mem_ctx, NULL));
+ call_params.push_tail(offset->clone(mem_ctx, NULL));
+ call_params.push_tail(new(mem_ctx) ir_constant(ssbo_access_params()));
+
+ return new(mem_ctx) ir_call(sig, deref_result, &call_params);
+}
+
+void
+lower_ubo_reference_visitor::insert_buffer_access(void *mem_ctx,
+ ir_dereference *deref,
+ const glsl_type *type,
+ ir_rvalue *offset,
+ unsigned mask,
+ int channel)
+{
+ switch (this->buffer_access_type) {
+ case ubo_load_access:
+ base_ir->insert_before(assign(deref->clone(mem_ctx, NULL),
+ ubo_load(mem_ctx, type, offset),
+ mask));
+ break;
+ case ssbo_load_access: {
+ ir_call *load_ssbo = ssbo_load(mem_ctx, type, offset);
+ base_ir->insert_before(load_ssbo);
+ ir_rvalue *value = load_ssbo->return_deref->as_rvalue()->clone(mem_ctx, NULL);
+ ir_assignment *assignment =
+ assign(deref->clone(mem_ctx, NULL), value, mask);
+ base_ir->insert_before(assignment);
+ break;
+ }
+ case ssbo_store_access:
+ if (channel >= 0) {
+ base_ir->insert_after(ssbo_store(mem_ctx,
+ swizzle(deref, channel, 1),
+ offset, 1));
+ } else {
+ base_ir->insert_after(ssbo_store(mem_ctx, deref, offset, mask));
+ }
+ break;
+ default:
+ unreachable("invalid buffer_access_type in insert_buffer_access");
+ }
+}
+
+void
+lower_ubo_reference_visitor::write_to_memory(void *mem_ctx,
+ ir_dereference *deref,
+ ir_variable *var,
+ ir_variable *write_var,
+ unsigned write_mask)
+{
+ ir_rvalue *offset = NULL;
+ unsigned const_offset;
+ bool row_major;
+ const glsl_type *matrix_type;
+
+ enum glsl_interface_packing packing =
+ var->get_interface_type()->
+ get_internal_ifc_packing(use_std430_as_default);
+
+ this->buffer_access_type = ssbo_store_access;
+ this->variable = var;
+
+ /* Compute the offset to the start if the dereference as well as other
+ * information we need to configure the write
+ */
+ setup_for_load_or_store(mem_ctx, var, deref,
+ &offset, &const_offset,
+ &row_major, &matrix_type,
+ packing);
+ assert(offset);
+
+ /* Now emit writes from the temporary to memory */
+ ir_variable *write_offset =
+ new(mem_ctx) ir_variable(glsl_type::uint_type,
+ "ssbo_store_temp_offset",
+ ir_var_temporary);
+
+ base_ir->insert_before(write_offset);
+ base_ir->insert_before(assign(write_offset, offset));
+
+ deref = new(mem_ctx) ir_dereference_variable(write_var);
+ emit_access(mem_ctx, true, deref, write_offset, const_offset,
+ row_major, matrix_type, packing, write_mask);
+}
+
+ir_visitor_status
+lower_ubo_reference_visitor::visit_enter(ir_expression *ir)
+{
+ check_ssbo_unsized_array_length_expression(ir);
+ return rvalue_visit(ir);
+}
+
+ir_expression *
+lower_ubo_reference_visitor::calculate_ssbo_unsized_array_length(ir_expression *expr)
+{
+ if (expr->operation !=
+ ir_expression_operation(ir_unop_ssbo_unsized_array_length))
+ return NULL;
+
+ ir_rvalue *rvalue = expr->operands[0]->as_rvalue();
+ if (!rvalue ||
+ !rvalue->type->is_array() || !rvalue->type->is_unsized_array())
+ return NULL;
+
+ ir_dereference *deref = expr->operands[0]->as_dereference();
+ if (!deref)
+ return NULL;
+
+ ir_variable *var = expr->operands[0]->variable_referenced();
+ if (!var || !var->is_in_shader_storage_block())
+ return NULL;
+ return process_ssbo_unsized_array_length(&rvalue, deref, var);
+}
+
+void
+lower_ubo_reference_visitor::check_ssbo_unsized_array_length_expression(ir_expression *ir)
+{
+ if (ir->operation ==
+ ir_expression_operation(ir_unop_ssbo_unsized_array_length)) {
+ /* Don't replace this unop if it is found alone. It is going to be
+ * removed by the optimization passes or replaced if it is part of
+ * an ir_assignment or another ir_expression.
+ */
+ return;
+ }
+
+ for (unsigned i = 0; i < ir->num_operands; i++) {
+ if (ir->operands[i]->ir_type != ir_type_expression)
+ continue;
+ ir_expression *expr = (ir_expression *) ir->operands[i];
+ ir_expression *temp = calculate_ssbo_unsized_array_length(expr);
+ if (!temp)
+ continue;
+
+ delete expr;
+ ir->operands[i] = temp;
+ }
+}
+
+void
+lower_ubo_reference_visitor::check_ssbo_unsized_array_length_assignment(ir_assignment *ir)
+{
+ if (!ir->rhs || ir->rhs->ir_type != ir_type_expression)
+ return;
+
+ ir_expression *expr = (ir_expression *) ir->rhs;
+ ir_expression *temp = calculate_ssbo_unsized_array_length(expr);
+ if (!temp)
+ return;
+
+ delete expr;
+ ir->rhs = temp;
+ return;
+}
+
+ir_expression *
+lower_ubo_reference_visitor::emit_ssbo_get_buffer_size(void *mem_ctx)
+{
+ ir_rvalue *block_ref = this->uniform_block->clone(mem_ctx, NULL);
+ return new(mem_ctx) ir_expression(ir_unop_get_buffer_size,
+ glsl_type::int_type,
+ block_ref);
+}
+
+unsigned
+lower_ubo_reference_visitor::calculate_unsized_array_stride(ir_dereference *deref,
+ enum glsl_interface_packing packing)
+{
+ unsigned array_stride = 0;
+
+ switch (deref->ir_type) {
+ case ir_type_dereference_variable:
+ {
+ ir_dereference_variable *deref_var = (ir_dereference_variable *)deref;
+ const struct glsl_type *unsized_array_type = NULL;
+ /* An unsized array can be sized by other lowering passes, so pick
+ * the first field of the array which has the data type of the unsized
+ * array.
+ */
+ unsized_array_type = deref_var->var->type->fields.array;
+
+ /* Whether or not the field is row-major (because it might be a
+ * bvec2 or something) does not affect the array itself. We need
+ * to know whether an array element in its entirety is row-major.
+ */
+ const bool array_row_major =
+ is_dereferenced_thing_row_major(deref_var);
+
+ if (packing == GLSL_INTERFACE_PACKING_STD430) {
+ array_stride = unsized_array_type->std430_array_stride(array_row_major);
+ } else {
+ array_stride = unsized_array_type->std140_size(array_row_major);
+ array_stride = glsl_align(array_stride, 16);
+ }
+ break;
+ }
+ case ir_type_dereference_record:
+ {
+ ir_dereference_record *deref_record = (ir_dereference_record *) deref;
+ ir_dereference *interface_deref =
+ deref_record->record->as_dereference();
+ assert(interface_deref != NULL);
+ const struct glsl_type *interface_type = interface_deref->type;
+ unsigned record_length = interface_type->length;
+ /* Unsized array is always the last element of the interface */
+ const struct glsl_type *unsized_array_type =
+ interface_type->fields.structure[record_length - 1].type->fields.array;
+
+ const bool array_row_major =
+ is_dereferenced_thing_row_major(deref_record);
+
+ if (packing == GLSL_INTERFACE_PACKING_STD430) {
+ array_stride = unsized_array_type->std430_array_stride(array_row_major);
+ } else {
+ array_stride = unsized_array_type->std140_size(array_row_major);
+ array_stride = glsl_align(array_stride, 16);
+ }
+ break;
+ }
+ default:
+ unreachable("Unsupported dereference type");
+ }
+ return array_stride;
+}
+
+ir_expression *
+lower_ubo_reference_visitor::process_ssbo_unsized_array_length(ir_rvalue **rvalue,
+ ir_dereference *deref,
+ ir_variable *var)
+{
+ void *mem_ctx = ralloc_parent(*rvalue);
+
+ ir_rvalue *base_offset = NULL;
+ unsigned const_offset;
+ bool row_major;
+ const glsl_type *matrix_type;
+
+ enum glsl_interface_packing packing =
+ var->get_interface_type()->
+ get_internal_ifc_packing(use_std430_as_default);
+ int unsized_array_stride =
+ calculate_unsized_array_stride(deref, packing);
+
+ this->buffer_access_type = ssbo_unsized_array_length_access;
+ this->variable = var;
+
+ /* Compute the offset to the start if the dereference as well as other
+ * information we need to calculate the length.
+ */
+ setup_for_load_or_store(mem_ctx, var, deref,
+ &base_offset, &const_offset,
+ &row_major, &matrix_type,
+ packing);
+ /* array.length() =
+ * max((buffer_object_size - offset_of_array) / stride_of_array, 0)
+ */
+ ir_expression *buffer_size = emit_ssbo_get_buffer_size(mem_ctx);
+
+ ir_expression *offset_of_array = new(mem_ctx)
+ ir_expression(ir_binop_add, base_offset,
+ new(mem_ctx) ir_constant(const_offset));
+ ir_expression *offset_of_array_int = new(mem_ctx)
+ ir_expression(ir_unop_u2i, offset_of_array);
+
+ ir_expression *sub = new(mem_ctx)
+ ir_expression(ir_binop_sub, buffer_size, offset_of_array_int);
+ ir_expression *div = new(mem_ctx)
+ ir_expression(ir_binop_div, sub,
+ new(mem_ctx) ir_constant(unsized_array_stride));
+ ir_expression *max = new(mem_ctx)
+ ir_expression(ir_binop_max, div, new(mem_ctx) ir_constant(0));
+
+ return max;
+}
+
+void
+lower_ubo_reference_visitor::check_for_ssbo_store(ir_assignment *ir)
+{
+ if (!ir || !ir->lhs)
+ return;
+
+ ir_rvalue *rvalue = ir->lhs->as_rvalue();
+ if (!rvalue)
+ return;
+
+ ir_dereference *deref = ir->lhs->as_dereference();
+ if (!deref)
+ return;
+
+ ir_variable *var = ir->lhs->variable_referenced();
+ if (!var || !var->is_in_shader_storage_block())
+ return;
+
+ /* We have a write to a buffer variable, so declare a temporary and rewrite
+ * the assignment so that the temporary is the LHS.
+ */
+ void *mem_ctx = ralloc_parent(shader->ir);
+
+ const glsl_type *type = rvalue->type;
+ ir_variable *write_var = new(mem_ctx) ir_variable(type,
+ "ssbo_store_temp",
+ ir_var_temporary);
+ base_ir->insert_before(write_var);
+ ir->lhs = new(mem_ctx) ir_dereference_variable(write_var);
+
+ /* Now we have to write the value assigned to the temporary back to memory */
+ write_to_memory(mem_ctx, deref, var, write_var, ir->write_mask);
+ progress = true;
+}
+
+static bool
+is_buffer_backed_variable(ir_variable *var)
+{
+ return var->is_in_buffer_block() ||
+ var->data.mode == ir_var_shader_shared;
+}
+
+bool
+lower_ubo_reference_visitor::check_for_buffer_array_copy(ir_assignment *ir)
+{
+ if (!ir || !ir->lhs || !ir->rhs)
+ return false;
+
+ /* LHS and RHS must be arrays
+ * FIXME: arrays of arrays?
+ */
+ if (!ir->lhs->type->is_array() || !ir->rhs->type->is_array())
+ return false;
+
+ /* RHS must be a buffer-backed variable. This is what can cause the problem
+ * since it would lead to a series of loads that need to live until we
+ * see the writes to the LHS.
+ */
+ ir_variable *rhs_var = ir->rhs->variable_referenced();
+ if (!rhs_var || !is_buffer_backed_variable(rhs_var))
+ return false;
+
+ /* Split the array copy into individual element copies to reduce
+ * register pressure
+ */
+ ir_dereference *rhs_deref = ir->rhs->as_dereference();
+ if (!rhs_deref)
+ return false;
+
+ ir_dereference *lhs_deref = ir->lhs->as_dereference();
+ if (!lhs_deref)
+ return false;
+
+ assert(lhs_deref->type->length == rhs_deref->type->length);
+ void *mem_ctx = ralloc_parent(shader->ir);
+
+ for (unsigned i = 0; i < lhs_deref->type->length; i++) {
+ ir_dereference *lhs_i =
+ new(mem_ctx) ir_dereference_array(lhs_deref->clone(mem_ctx, NULL),
+ new(mem_ctx) ir_constant(i));
+
+ ir_dereference *rhs_i =
+ new(mem_ctx) ir_dereference_array(rhs_deref->clone(mem_ctx, NULL),
+ new(mem_ctx) ir_constant(i));
+ ir->insert_after(assign(lhs_i, rhs_i));
+ }
+
+ ir->remove();
+ progress = true;
+ return true;
+}
+
+bool
+lower_ubo_reference_visitor::check_for_buffer_struct_copy(ir_assignment *ir)
+{
+ if (!ir || !ir->lhs || !ir->rhs)
+ return false;
+
+ /* LHS and RHS must be records */
+ if (!ir->lhs->type->is_struct() || !ir->rhs->type->is_struct())
+ return false;
+
+ /* RHS must be a buffer-backed variable. This is what can cause the problem
+ * since it would lead to a series of loads that need to live until we
+ * see the writes to the LHS.
+ */
+ ir_variable *rhs_var = ir->rhs->variable_referenced();
+ if (!rhs_var || !is_buffer_backed_variable(rhs_var))
+ return false;
+
+ /* Split the struct copy into individual element copies to reduce
+ * register pressure
+ */
+ ir_dereference *rhs_deref = ir->rhs->as_dereference();
+ if (!rhs_deref)
+ return false;
+
+ ir_dereference *lhs_deref = ir->lhs->as_dereference();
+ if (!lhs_deref)
+ return false;
+
+ assert(lhs_deref->type == rhs_deref->type);
+ void *mem_ctx = ralloc_parent(shader->ir);
+
+ for (unsigned i = 0; i < lhs_deref->type->length; i++) {
+ const char *field_name = lhs_deref->type->fields.structure[i].name;
+ ir_dereference *lhs_field =
+ new(mem_ctx) ir_dereference_record(lhs_deref->clone(mem_ctx, NULL),
+ field_name);
+ ir_dereference *rhs_field =
+ new(mem_ctx) ir_dereference_record(rhs_deref->clone(mem_ctx, NULL),
+ field_name);
+ ir->insert_after(assign(lhs_field, rhs_field));
+ }
+
+ ir->remove();
+ progress = true;
+ return true;
+}
+
+ir_visitor_status
+lower_ubo_reference_visitor::visit_enter(ir_assignment *ir)
+{
+ /* Array and struct copies could involve large amounts of load/store
+ * operations. To improve register pressure we want to special-case
+ * these and split them into individual element copies.
+ * This way we avoid emitting all the loads for the RHS first and
+ * all the writes for the LHS second and register usage is more
+ * efficient.
+ */
+ if (check_for_buffer_array_copy(ir))
+ return visit_continue_with_parent;
+
+ if (check_for_buffer_struct_copy(ir))
+ return visit_continue_with_parent;
+
+ check_ssbo_unsized_array_length_assignment(ir);
+ check_for_ssbo_store(ir);
+ return rvalue_visit(ir);
+}
+
+/* Lowers the intrinsic call to a new internal intrinsic that swaps the
+ * access to the buffer variable in the first parameter by an offset
+ * and block index. This involves creating the new internal intrinsic
+ * (i.e. the new function signature).
+ */
+ir_call *
+lower_ubo_reference_visitor::lower_ssbo_atomic_intrinsic(ir_call *ir)
+{
+ /* SSBO atomics usually have 2 parameters, the buffer variable and an
+ * integer argument. The exception is CompSwap, that has an additional
+ * integer parameter.
+ */
+ int param_count = ir->actual_parameters.length();
+ assert(param_count == 2 || param_count == 3);
+
+ /* First argument must be a scalar integer buffer variable */
+ exec_node *param = ir->actual_parameters.get_head();
+ ir_instruction *inst = (ir_instruction *) param;
+ assert(inst->ir_type == ir_type_dereference_variable ||
+ inst->ir_type == ir_type_dereference_array ||
+ inst->ir_type == ir_type_dereference_record ||
+ inst->ir_type == ir_type_swizzle);
+
+ ir_rvalue *deref = (ir_rvalue *) inst;
+ assert(deref->type->is_scalar() &&
+ (deref->type->is_integer_32() || deref->type->is_float()));
+
+ ir_variable *var = deref->variable_referenced();
+ assert(var);
+
+ /* Compute the offset to the start if the dereference and the
+ * block index
+ */
+ void *mem_ctx = ralloc_parent(shader->ir);
+
+ ir_rvalue *offset = NULL;
+ unsigned const_offset;
+ bool row_major;
+ const glsl_type *matrix_type;
+
+ enum glsl_interface_packing packing =
+ var->get_interface_type()->
+ get_internal_ifc_packing(use_std430_as_default);
+
+ this->buffer_access_type = ssbo_atomic_access;
+ this->variable = var;
+
+ setup_for_load_or_store(mem_ctx, var, deref,
+ &offset, &const_offset,
+ &row_major, &matrix_type,
+ packing);
+ assert(offset);
+ assert(!row_major);
+ assert(matrix_type == NULL);
+
+ ir_rvalue *deref_offset =
+ add(offset, new(mem_ctx) ir_constant(const_offset));
+ ir_rvalue *block_index = this->uniform_block->clone(mem_ctx, NULL);
+
+ /* Create the new internal function signature that will take a block
+ * index and offset instead of a buffer variable
+ */
+ exec_list sig_params;
+ ir_variable *sig_param = new(mem_ctx)
+ ir_variable(glsl_type::uint_type, "block_ref" , ir_var_function_in);
+ sig_params.push_tail(sig_param);
+
+ sig_param = new(mem_ctx)
+ ir_variable(glsl_type::uint_type, "offset" , ir_var_function_in);
+ sig_params.push_tail(sig_param);
+
+ const glsl_type *type = deref->type->get_scalar_type();
+ sig_param = new(mem_ctx)
+ ir_variable(type, "data1", ir_var_function_in);
+ sig_params.push_tail(sig_param);
+
+ if (param_count == 3) {
+ sig_param = new(mem_ctx)
+ ir_variable(type, "data2", ir_var_function_in);
+ sig_params.push_tail(sig_param);
+ }
+
+ ir_function_signature *sig =
+ new(mem_ctx) ir_function_signature(deref->type,
+ shader_storage_buffer_object);
+ assert(sig);
+ sig->replace_parameters(&sig_params);
+
+ assert(ir->callee->intrinsic_id >= ir_intrinsic_generic_load);
+ assert(ir->callee->intrinsic_id <= ir_intrinsic_generic_atomic_comp_swap);
+ sig->intrinsic_id = MAP_INTRINSIC_TO_TYPE(ir->callee->intrinsic_id, ssbo);
+
+ char func_name[64];
+ sprintf(func_name, "%s_ssbo", ir->callee_name());
+ ir_function *f = new(mem_ctx) ir_function(func_name);
+ f->add_signature(sig);
+
+ /* Now, create the call to the internal intrinsic */
+ exec_list call_params;
+ call_params.push_tail(block_index);
+ call_params.push_tail(deref_offset);
+ param = ir->actual_parameters.get_head()->get_next();
+ ir_rvalue *param_as_rvalue = ((ir_instruction *) param)->as_rvalue();
+ call_params.push_tail(param_as_rvalue->clone(mem_ctx, NULL));
+ if (param_count == 3) {
+ param = param->get_next();
+ param_as_rvalue = ((ir_instruction *) param)->as_rvalue();
+ call_params.push_tail(param_as_rvalue->clone(mem_ctx, NULL));
+ }
+ ir_dereference_variable *return_deref =
+ ir->return_deref->clone(mem_ctx, NULL);
+ return new(mem_ctx) ir_call(sig, return_deref, &call_params);
+}
+
+ir_call *
+lower_ubo_reference_visitor::check_for_ssbo_atomic_intrinsic(ir_call *ir)
+{
+ exec_list& params = ir->actual_parameters;
+
+ if (params.length() < 2 || params.length() > 3)
+ return ir;
+
+ ir_rvalue *rvalue =
+ ((ir_instruction *) params.get_head())->as_rvalue();
+ if (!rvalue)
+ return ir;
+
+ ir_variable *var = rvalue->variable_referenced();
+ if (!var || !var->is_in_shader_storage_block())
+ return ir;
+
+ const enum ir_intrinsic_id id = ir->callee->intrinsic_id;
+ if (id == ir_intrinsic_generic_atomic_add ||
+ id == ir_intrinsic_generic_atomic_min ||
+ id == ir_intrinsic_generic_atomic_max ||
+ id == ir_intrinsic_generic_atomic_and ||
+ id == ir_intrinsic_generic_atomic_or ||
+ id == ir_intrinsic_generic_atomic_xor ||
+ id == ir_intrinsic_generic_atomic_exchange ||
+ id == ir_intrinsic_generic_atomic_comp_swap) {
+ return lower_ssbo_atomic_intrinsic(ir);
+ }
+
+ return ir;
+}
+
+
+ir_visitor_status
+lower_ubo_reference_visitor::visit_enter(ir_call *ir)
+{
+ ir_call *new_ir = check_for_ssbo_atomic_intrinsic(ir);
+ if (new_ir != ir) {
+ progress = true;
+ base_ir->replace_with(new_ir);
+ return visit_continue_with_parent;
+ }
+
+ return rvalue_visit(ir);
+}
+
+
+ir_visitor_status
+lower_ubo_reference_visitor::visit_enter(ir_texture *ir)
+{
+ ir_dereference *sampler = ir->sampler;
+
+ if (sampler->ir_type == ir_type_dereference_record) {
+ handle_rvalue((ir_rvalue **)&ir->sampler);
+ return visit_continue_with_parent;
+ }
+
+ return rvalue_visit(ir);
+}
+
+
+} /* unnamed namespace */
+
+void
+lower_ubo_reference(struct gl_linked_shader *shader,
+ bool clamp_block_indices, bool use_std430_as_default)
+{
+ lower_ubo_reference_visitor v(shader, clamp_block_indices,
+ use_std430_as_default);
+
+ /* Loop over the instructions lowering references, because we take
+ * a deref of a UBO array using a UBO dereference as the index will
+ * produce a collection of instructions all of which have cloned
+ * UBO dereferences for that array index.
+ */
+ do {
+ v.progress = false;
+ visit_list_elements(&v, shader->ir);
+ } while (v.progress);
+}
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/lower_variable_index_to_cond_assign.cpp b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/lower_variable_index_to_cond_assign.cpp
new file mode 100644
index 0000000000..c22789c39e
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/lower_variable_index_to_cond_assign.cpp
@@ -0,0 +1,567 @@
+/*
+ * Copyright © 2010 Luca Barbieri
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+/**
+ * \file lower_variable_index_to_cond_assign.cpp
+ *
+ * Turns non-constant indexing into array types to a series of
+ * conditional moves of each element into a temporary.
+ *
+ * Pre-DX10 GPUs often don't have a native way to do this operation,
+ * and this works around that.
+ *
+ * The lowering process proceeds as follows. Each non-constant index
+ * found in an r-value is converted to a canonical form \c array[i]. Each
+ * element of the array is conditionally assigned to a temporary by comparing
+ * \c i to a constant index. This is done by cloning the canonical form and
+ * replacing all occurances of \c i with a constant. Each remaining occurance
+ * of the canonical form in the IR is replaced with a dereference of the
+ * temporary variable.
+ *
+ * L-values with non-constant indices are handled similarly. In this case,
+ * the RHS of the assignment is assigned to a temporary. The non-constant
+ * index is replace with the canonical form (just like for r-values). The
+ * temporary is conditionally assigned to each element of the canonical form
+ * by comparing \c i with each index. The same clone-and-replace scheme is
+ * used.
+ */
+
+#include "ir.h"
+#include "ir_rvalue_visitor.h"
+#include "ir_optimization.h"
+#include "compiler/glsl_types.h"
+#include "main/macros.h"
+#include "program/prog_instruction.h" /* For SWIZZLE_XXXX */
+#include "ir_builder.h"
+
+using namespace ir_builder;
+
+/**
+ * Generate a comparison value for a block of indices
+ *
+ * Lowering passes for non-constant indexing of arrays, matrices, or vectors
+ * can use this to generate blocks of index comparison values.
+ *
+ * \param instructions List where new instructions will be appended
+ * \param index \c ir_variable containing the desired index
+ * \param base Base value for this block of comparisons
+ * \param components Number of unique index values to compare. This must
+ * be on the range [1, 4].
+ * \param mem_ctx ralloc memory context to be used for all allocations.
+ *
+ * \returns
+ * An \c ir_variable containing the per-component comparison results. This
+ * must be dereferenced per use.
+ */
+ir_variable *
+compare_index_block(ir_factory &body, ir_variable *index,
+ unsigned base, unsigned components)
+{
+ assert(index->type->is_scalar());
+ assert(index->type->base_type == GLSL_TYPE_INT ||
+ index->type->base_type == GLSL_TYPE_UINT);
+ assert(components >= 1 && components <= 4);
+
+ ir_rvalue *const broadcast_index = components > 1
+ ? swizzle(index, SWIZZLE_XXXX, components)
+ : operand(index).val;
+
+ /* Compare the desired index value with the next block of four indices.
+ */
+ ir_constant_data test_indices_data;
+ memset(&test_indices_data, 0, sizeof(test_indices_data));
+ test_indices_data.i[0] = base;
+ test_indices_data.i[1] = base + 1;
+ test_indices_data.i[2] = base + 2;
+ test_indices_data.i[3] = base + 3;
+
+ ir_constant *const test_indices =
+ new(body.mem_ctx) ir_constant(broadcast_index->type, &test_indices_data);
+
+ ir_rvalue *const condition_val = equal(broadcast_index, test_indices);
+
+ ir_variable *const condition = body.make_temp(condition_val->type,
+ "dereference_condition");
+
+ body.emit(assign(condition, condition_val));
+
+ return condition;
+}
+
+static inline bool
+is_array_or_matrix(const ir_rvalue *ir)
+{
+ return (ir->type->is_array() || ir->type->is_matrix());
+}
+
+namespace {
+/**
+ * Replace a dereference of a variable with a specified r-value
+ *
+ * Each time a dereference of the specified value is replaced, the r-value
+ * tree is cloned.
+ */
+class deref_replacer : public ir_rvalue_visitor {
+public:
+ deref_replacer(const ir_variable *variable_to_replace, ir_rvalue *value)
+ : variable_to_replace(variable_to_replace), value(value),
+ progress(false)
+ {
+ assert(this->variable_to_replace != NULL);
+ assert(this->value != NULL);
+ }
+
+ virtual void handle_rvalue(ir_rvalue **rvalue)
+ {
+ ir_dereference_variable *const dv = (*rvalue)->as_dereference_variable();
+
+ if (dv != NULL && dv->var == this->variable_to_replace) {
+ this->progress = true;
+ *rvalue = this->value->clone(ralloc_parent(*rvalue), NULL);
+ }
+ }
+
+ const ir_variable *variable_to_replace;
+ ir_rvalue *value;
+ bool progress;
+};
+
+/**
+ * Find a variable index dereference of an array in an rvalue tree
+ */
+class find_variable_index : public ir_hierarchical_visitor {
+public:
+ find_variable_index()
+ : deref(NULL)
+ {
+ /* empty */
+ }
+
+ virtual ir_visitor_status visit_enter(ir_dereference_array *ir)
+ {
+ if (is_array_or_matrix(ir->array) &&
+ ir->array_index->as_constant() == NULL) {
+ this->deref = ir;
+ return visit_stop;
+ }
+
+ return visit_continue;
+ }
+
+ /**
+ * First array dereference found in the tree that has a non-constant index.
+ */
+ ir_dereference_array *deref;
+};
+
+struct assignment_generator
+{
+ ir_instruction* base_ir;
+ ir_dereference *rvalue;
+ ir_variable *old_index;
+ bool is_write;
+ unsigned int write_mask;
+ ir_variable* var;
+
+ assignment_generator()
+ : base_ir(NULL),
+ rvalue(NULL),
+ old_index(NULL),
+ is_write(false),
+ write_mask(0),
+ var(NULL)
+ {
+ }
+
+ void generate(unsigned i, ir_rvalue* condition, ir_factory &body) const
+ {
+ /* Clone the old r-value in its entirety. Then replace any occurances of
+ * the old variable index with the new constant index.
+ */
+ ir_dereference *element = this->rvalue->clone(body.mem_ctx, NULL);
+ ir_constant *const index = body.constant(i);
+ deref_replacer r(this->old_index, index);
+ element->accept(&r);
+ assert(r.progress);
+
+ /* Generate a conditional assignment to (or from) the constant indexed
+ * array dereference.
+ */
+ ir_assignment *const assignment = (is_write)
+ ? assign(element, this->var, condition, write_mask)
+ : assign(this->var, element, condition);
+
+ body.emit(assignment);
+ }
+};
+
+struct switch_generator
+{
+ /* make TFunction a template parameter if you need to use other generators */
+ typedef assignment_generator TFunction;
+ const TFunction& generator;
+
+ ir_variable* index;
+ unsigned linear_sequence_max_length;
+ unsigned condition_components;
+
+ void *mem_ctx;
+
+ switch_generator(const TFunction& generator, ir_variable *index,
+ unsigned linear_sequence_max_length,
+ unsigned condition_components)
+ : generator(generator), index(index),
+ linear_sequence_max_length(linear_sequence_max_length),
+ condition_components(condition_components)
+ {
+ this->mem_ctx = ralloc_parent(index);
+ }
+
+ void linear_sequence(unsigned begin, unsigned end, ir_factory &body)
+ {
+ if (begin == end)
+ return;
+
+ /* If the array access is a read, read the first element of this subregion
+ * unconditionally. The remaining tests will possibly overwrite this
+ * value with one of the other array elements.
+ *
+ * This optimization cannot be done for writes because it will cause the
+ * first element of the subregion to be written possibly *in addition* to
+ * one of the other elements.
+ */
+ unsigned first;
+ if (!this->generator.is_write) {
+ this->generator.generate(begin, 0, body);
+ first = begin + 1;
+ } else {
+ first = begin;
+ }
+
+ for (unsigned i = first; i < end; i += 4) {
+ const unsigned comps = MIN2(condition_components, end - i);
+ ir_variable *const cond = compare_index_block(body, index, i, comps);
+
+ if (comps == 1) {
+ this->generator.generate(i,
+ operand(cond).val,
+ body);
+ } else {
+ for (unsigned j = 0; j < comps; j++) {
+ this->generator.generate(i + j,
+ swizzle(cond, j, 1),
+ body);
+ }
+ }
+ }
+ }
+
+ void bisect(unsigned begin, unsigned end, ir_factory &body)
+ {
+ unsigned middle = (begin + end) >> 1;
+
+ assert(index->type->is_integer_32());
+
+ ir_constant *const middle_c = (index->type->base_type == GLSL_TYPE_UINT)
+ ? new(body.mem_ctx) ir_constant((unsigned)middle)
+ : new(body.mem_ctx) ir_constant((int)middle);
+
+ ir_if *if_less = new(body.mem_ctx) ir_if(less(this->index, middle_c));
+
+ ir_factory then_body(&if_less->then_instructions, body.mem_ctx);
+ ir_factory else_body(&if_less->else_instructions, body.mem_ctx);
+ generate(begin, middle, then_body);
+ generate(middle, end, else_body);
+
+ body.emit(if_less);
+ }
+
+ void generate(unsigned begin, unsigned end, ir_factory &body)
+ {
+ unsigned length = end - begin;
+ if (length <= this->linear_sequence_max_length)
+ return linear_sequence(begin, end, body);
+ else
+ return bisect(begin, end, body);
+ }
+};
+
+/**
+ * Visitor class for replacing expressions with ir_constant values.
+ */
+
+class variable_index_to_cond_assign_visitor : public ir_rvalue_visitor {
+public:
+ variable_index_to_cond_assign_visitor(gl_shader_stage stage,
+ bool lower_input,
+ bool lower_output,
+ bool lower_temp,
+ bool lower_uniform)
+ : progress(false), stage(stage), lower_inputs(lower_input),
+ lower_outputs(lower_output), lower_temps(lower_temp),
+ lower_uniforms(lower_uniform)
+ {
+ /* empty */
+ }
+
+ bool progress;
+
+ gl_shader_stage stage;
+ bool lower_inputs;
+ bool lower_outputs;
+ bool lower_temps;
+ bool lower_uniforms;
+
+ bool storage_type_needs_lowering(ir_dereference_array *deref) const
+ {
+ /* If a variable isn't eventually the target of this dereference, then
+ * it must be a constant or some sort of anonymous temporary storage.
+ *
+ * FINISHME: Is this correct? Most drivers treat arrays of constants as
+ * FINISHME: uniforms. It seems like this should do the same.
+ */
+ const ir_variable *const var = deref->array->variable_referenced();
+ if (var == NULL)
+ return this->lower_temps;
+
+ switch (var->data.mode) {
+ case ir_var_auto:
+ case ir_var_temporary:
+ return this->lower_temps;
+
+ case ir_var_uniform:
+ case ir_var_shader_storage:
+ return this->lower_uniforms;
+
+ case ir_var_shader_shared:
+ return false;
+
+ case ir_var_function_in:
+ case ir_var_const_in:
+ return this->lower_temps;
+
+ case ir_var_system_value:
+ /* There are only a few system values that have array types:
+ *
+ * gl_TessLevelInner[]
+ * gl_TessLevelOuter[]
+ * gl_SampleMaskIn[]
+ *
+ * The tessellation factor arrays are lowered to vec4/vec2s
+ * by lower_tess_level() before this pass occurs, so we'll
+ * never see them here.
+ *
+ * The only remaining case is gl_SampleMaskIn[], which has
+ * a length of ceil(ctx->Const.MaxSamples / 32). Most hardware
+ * supports no more than 32 samples, in which case our lowering
+ * produces a single read of gl_SampleMaskIn[0]. Even with 64x
+ * MSAA, the array length is only 2, so the lowering is fairly
+ * efficient. Therefore, lower unconditionally.
+ */
+ return true;
+
+ case ir_var_shader_in:
+ /* The input array size is unknown at compiler time for non-patch
+ * inputs in TCS and TES. The arrays are sized to
+ * the implementation-dependent limit "gl_MaxPatchVertices", but
+ * the real size is stored in the "gl_PatchVerticesIn" built-in
+ * uniform.
+ *
+ * The TCS input array size is specified by
+ * glPatchParameteri(GL_PATCH_VERTICES).
+ *
+ * The TES input array size is specified by the "vertices" output
+ * layout qualifier in TCS.
+ */
+ if ((stage == MESA_SHADER_TESS_CTRL ||
+ stage == MESA_SHADER_TESS_EVAL) && !var->data.patch)
+ return false;
+ return this->lower_inputs;
+
+ case ir_var_function_out:
+ /* TCS non-patch outputs can only be indexed with "gl_InvocationID".
+ * Other expressions are not allowed.
+ */
+ if (stage == MESA_SHADER_TESS_CTRL && !var->data.patch)
+ return false;
+ return this->lower_temps;
+
+ case ir_var_shader_out:
+ return this->lower_outputs;
+
+ case ir_var_function_inout:
+ return this->lower_temps;
+ }
+
+ assert(!"Should not get here.");
+ return false;
+ }
+
+ bool needs_lowering(ir_dereference_array *deref) const
+ {
+ if (deref == NULL || deref->array_index->as_constant() ||
+ !is_array_or_matrix(deref->array))
+ return false;
+
+ return this->storage_type_needs_lowering(deref);
+ }
+
+ ir_variable *convert_dereference_array(ir_dereference_array *orig_deref,
+ ir_assignment* orig_assign,
+ ir_dereference *orig_base)
+ {
+ void *const mem_ctx = ralloc_parent(base_ir);
+ exec_list list;
+ ir_factory body(&list, mem_ctx);
+
+ assert(is_array_or_matrix(orig_deref->array));
+
+ const unsigned length = (orig_deref->array->type->is_array())
+ ? orig_deref->array->type->length
+ : orig_deref->array->type->matrix_columns;
+
+ /* Temporary storage for either the result of the dereference of
+ * the array, or the RHS that's being assigned into the
+ * dereference of the array.
+ */
+ ir_variable *var;
+
+ if (orig_assign) {
+ var = body.make_temp(orig_assign->rhs->type,
+ "dereference_array_value");
+
+ body.emit(assign(var, orig_assign->rhs));
+ } else {
+ var = body.make_temp(orig_deref->type,
+ "dereference_array_value");
+ }
+
+ /* Store the index to a temporary to avoid reusing its tree. */
+ ir_variable *index = body.make_temp(orig_deref->array_index->type,
+ "dereference_array_index");
+
+ body.emit(assign(index, orig_deref->array_index));
+
+ orig_deref->array_index = deref(index).val;
+
+ assignment_generator ag;
+ ag.rvalue = orig_base;
+ ag.base_ir = base_ir;
+ ag.old_index = index;
+ ag.var = var;
+ if (orig_assign) {
+ ag.is_write = true;
+ ag.write_mask = orig_assign->write_mask;
+ } else {
+ ag.is_write = false;
+ }
+
+ switch_generator sg(ag, index, 4, 4);
+
+ /* If the original assignment has a condition, respect that original
+ * condition! This is acomplished by wrapping the new conditional
+ * assignments in an if-statement that uses the original condition.
+ */
+ if (orig_assign != NULL && orig_assign->condition != NULL) {
+ /* No need to clone the condition because the IR that it hangs on is
+ * going to be removed from the instruction sequence.
+ */
+ ir_if *if_stmt = new(mem_ctx) ir_if(orig_assign->condition);
+ ir_factory then_body(&if_stmt->then_instructions, body.mem_ctx);
+
+ sg.generate(0, length, then_body);
+ body.emit(if_stmt);
+ } else {
+ sg.generate(0, length, body);
+ }
+
+ base_ir->insert_before(&list);
+ return var;
+ }
+
+ virtual void handle_rvalue(ir_rvalue **pir)
+ {
+ if (this->in_assignee)
+ return;
+
+ if (!*pir)
+ return;
+
+ ir_dereference_array* orig_deref = (*pir)->as_dereference_array();
+ if (needs_lowering(orig_deref)) {
+ ir_variable *var =
+ convert_dereference_array(orig_deref, NULL, orig_deref);
+ assert(var);
+ *pir = new(ralloc_parent(base_ir)) ir_dereference_variable(var);
+ this->progress = true;
+ }
+ }
+
+ ir_visitor_status
+ visit_leave(ir_assignment *ir)
+ {
+ ir_rvalue_visitor::visit_leave(ir);
+
+ find_variable_index f;
+ ir->lhs->accept(&f);
+
+ if (f.deref != NULL && storage_type_needs_lowering(f.deref)) {
+ convert_dereference_array(f.deref, ir, ir->lhs);
+ ir->remove();
+ this->progress = true;
+ }
+
+ return visit_continue;
+ }
+};
+
+} /* anonymous namespace */
+
+bool
+lower_variable_index_to_cond_assign(gl_shader_stage stage,
+ exec_list *instructions,
+ bool lower_input,
+ bool lower_output,
+ bool lower_temp,
+ bool lower_uniform)
+{
+ variable_index_to_cond_assign_visitor v(stage,
+ lower_input,
+ lower_output,
+ lower_temp,
+ lower_uniform);
+
+ /* Continue lowering until no progress is made. If there are multiple
+ * levels of indirection (e.g., non-constant indexing of array elements and
+ * matrix columns of an array of matrix), each pass will only lower one
+ * level of indirection.
+ */
+ bool progress_ever = false;
+ do {
+ v.progress = false;
+ visit_list_elements(&v, instructions);
+ progress_ever = v.progress || progress_ever;
+ } while (v.progress);
+
+ return progress_ever;
+}
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/lower_vec_index_to_cond_assign.cpp b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/lower_vec_index_to_cond_assign.cpp
new file mode 100644
index 0000000000..8924426660
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/lower_vec_index_to_cond_assign.cpp
@@ -0,0 +1,240 @@
+/*
+ * Copyright © 2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+/**
+ * \file lower_vec_index_to_cond_assign.cpp
+ *
+ * Turns indexing into vector types to a series of conditional moves
+ * of each channel's swizzle into a temporary.
+ *
+ * Most GPUs don't have a native way to do this operation, and this
+ * works around that. For drivers using both this pass and
+ * ir_vec_index_to_swizzle, there's a risk that this pass will happen
+ * before sufficient constant folding to find that the array index is
+ * constant. However, we hope that other optimization passes,
+ * particularly constant folding of assignment conditions and copy
+ * propagation, will result in the same code in the end.
+ */
+
+#include "ir.h"
+#include "ir_visitor.h"
+#include "ir_optimization.h"
+#include "compiler/glsl_types.h"
+#include "ir_builder.h"
+
+using namespace ir_builder;
+
+namespace {
+
+/**
+ * Visitor class for replacing expressions with ir_constant values.
+ */
+
+class ir_vec_index_to_cond_assign_visitor : public ir_hierarchical_visitor {
+public:
+ ir_vec_index_to_cond_assign_visitor()
+ : progress(false)
+ {
+ /* empty */
+ }
+
+ ir_rvalue *convert_vec_index_to_cond_assign(void *mem_ctx,
+ ir_rvalue *orig_vector,
+ ir_rvalue *orig_index,
+ const glsl_type *type);
+
+ ir_rvalue *convert_vector_extract_to_cond_assign(ir_rvalue *ir);
+
+ virtual ir_visitor_status visit_enter(ir_expression *);
+ virtual ir_visitor_status visit_enter(ir_swizzle *);
+ virtual ir_visitor_status visit_leave(ir_assignment *);
+ virtual ir_visitor_status visit_enter(ir_return *);
+ virtual ir_visitor_status visit_enter(ir_call *);
+ virtual ir_visitor_status visit_enter(ir_if *);
+
+ bool progress;
+};
+
+} /* anonymous namespace */
+
+ir_rvalue *
+ir_vec_index_to_cond_assign_visitor::convert_vec_index_to_cond_assign(void *mem_ctx,
+ ir_rvalue *orig_vector,
+ ir_rvalue *orig_index,
+ const glsl_type *type)
+{
+ exec_list list;
+ ir_factory body(&list, base_ir);
+
+ /* Store the index to a temporary to avoid reusing its tree. */
+ assert(orig_index->type == glsl_type::int_type ||
+ orig_index->type == glsl_type::uint_type);
+ ir_variable *const index =
+ body.make_temp(orig_index->type, "vec_index_tmp_i");
+
+ body.emit(assign(index, orig_index));
+
+ /* Store the value inside a temp, thus avoiding matrixes duplication */
+ ir_variable *const value =
+ body.make_temp(orig_vector->type, "vec_value_tmp");
+
+ body.emit(assign(value, orig_vector));
+
+
+ /* Temporary where we store whichever value we swizzle out. */
+ ir_variable *const var = body.make_temp(type, "vec_index_tmp_v");
+
+ /* Generate a single comparison condition "mask" for all of the components
+ * in the vector.
+ */
+ ir_variable *const cond =
+ compare_index_block(body, index, 0, orig_vector->type->vector_elements);
+
+ /* Generate a conditional move of each vector element to the temp. */
+ for (unsigned i = 0; i < orig_vector->type->vector_elements; i++)
+ body.emit(assign(var, swizzle(value, i, 1), swizzle(cond, i, 1)));
+
+ /* Put all of the new instructions in the IR stream before the old
+ * instruction.
+ */
+ base_ir->insert_before(&list);
+
+ this->progress = true;
+ return deref(var).val;
+}
+
+ir_rvalue *
+ir_vec_index_to_cond_assign_visitor::convert_vector_extract_to_cond_assign(ir_rvalue *ir)
+{
+ ir_expression *const expr = ir->as_expression();
+
+ if (expr == NULL)
+ return ir;
+
+ if (expr->operation == ir_unop_interpolate_at_centroid ||
+ expr->operation == ir_binop_interpolate_at_offset ||
+ expr->operation == ir_binop_interpolate_at_sample) {
+ /* Lower interpolateAtXxx(some_vec[idx], ...) to
+ * interpolateAtXxx(some_vec, ...)[idx] before lowering to conditional
+ * assignments, to maintain the rule that the interpolant is an l-value
+ * referring to a (part of a) shader input.
+ *
+ * This is required when idx is dynamic (otherwise it gets lowered to
+ * a swizzle).
+ */
+ ir_expression *const interpolant = expr->operands[0]->as_expression();
+ if (!interpolant || interpolant->operation != ir_binop_vector_extract)
+ return ir;
+
+ ir_rvalue *vec_input = interpolant->operands[0];
+ ir_expression *const vec_interpolate =
+ new(base_ir) ir_expression(expr->operation, vec_input->type,
+ vec_input, expr->operands[1]);
+
+ return convert_vec_index_to_cond_assign(ralloc_parent(ir),
+ vec_interpolate,
+ interpolant->operands[1],
+ ir->type);
+ }
+
+ if (expr->operation != ir_binop_vector_extract)
+ return ir;
+
+ return convert_vec_index_to_cond_assign(ralloc_parent(ir),
+ expr->operands[0],
+ expr->operands[1],
+ ir->type);
+}
+
+ir_visitor_status
+ir_vec_index_to_cond_assign_visitor::visit_enter(ir_expression *ir)
+{
+ for (unsigned i = 0; i < ir->num_operands; i++)
+ ir->operands[i] = convert_vector_extract_to_cond_assign(ir->operands[i]);
+
+ return visit_continue;
+}
+
+ir_visitor_status
+ir_vec_index_to_cond_assign_visitor::visit_enter(ir_swizzle *ir)
+{
+ /* Can't be hit from normal GLSL, since you can't swizzle a scalar (which
+ * the result of indexing a vector is. But maybe at some point we'll end up
+ * using swizzling of scalars for vector construction.
+ */
+ ir->val = convert_vector_extract_to_cond_assign(ir->val);
+
+ return visit_continue;
+}
+
+ir_visitor_status
+ir_vec_index_to_cond_assign_visitor::visit_leave(ir_assignment *ir)
+{
+ ir->rhs = convert_vector_extract_to_cond_assign(ir->rhs);
+
+ if (ir->condition)
+ ir->condition = convert_vector_extract_to_cond_assign(ir->condition);
+
+ return visit_continue;
+}
+
+ir_visitor_status
+ir_vec_index_to_cond_assign_visitor::visit_enter(ir_call *ir)
+{
+ foreach_in_list_safe(ir_rvalue, param, &ir->actual_parameters) {
+ ir_rvalue *new_param = convert_vector_extract_to_cond_assign(param);
+
+ if (new_param != param) {
+ param->replace_with(new_param);
+ }
+ }
+
+ return visit_continue;
+}
+
+ir_visitor_status
+ir_vec_index_to_cond_assign_visitor::visit_enter(ir_return *ir)
+{
+ if (ir->value)
+ ir->value = convert_vector_extract_to_cond_assign(ir->value);
+
+ return visit_continue;
+}
+
+ir_visitor_status
+ir_vec_index_to_cond_assign_visitor::visit_enter(ir_if *ir)
+{
+ ir->condition = convert_vector_extract_to_cond_assign(ir->condition);
+
+ return visit_continue;
+}
+
+bool
+do_vec_index_to_cond_assign(exec_list *instructions)
+{
+ ir_vec_index_to_cond_assign_visitor v;
+
+ visit_list_elements(&v, instructions);
+
+ return v.progress;
+}
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/lower_vec_index_to_swizzle.cpp b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/lower_vec_index_to_swizzle.cpp
new file mode 100644
index 0000000000..fdbad16a34
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/lower_vec_index_to_swizzle.cpp
@@ -0,0 +1,102 @@
+/*
+ * Copyright © 2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+/**
+ * \file lower_vec_index_to_swizzle.cpp
+ *
+ * Turns constant indexing into vector types to swizzles. This will
+ * let other swizzle-aware optimization passes catch these constructs,
+ * and codegen backends not have to worry about this case.
+ */
+
+#include "ir.h"
+#include "ir_rvalue_visitor.h"
+#include "ir_optimization.h"
+#include "compiler/glsl_types.h"
+#include "main/macros.h"
+
+namespace {
+
+class ir_vec_index_to_swizzle_visitor : public ir_rvalue_visitor {
+public:
+ ir_vec_index_to_swizzle_visitor()
+ {
+ progress = false;
+ }
+
+ ir_rvalue *convert_vector_extract_to_swizzle(ir_rvalue *val);
+
+ virtual void handle_rvalue(ir_rvalue **);
+
+ bool progress;
+};
+
+} /* anonymous namespace */
+
+void
+ir_vec_index_to_swizzle_visitor::handle_rvalue(ir_rvalue **rv)
+{
+ if (*rv == NULL)
+ return;
+
+ ir_expression *const expr = (*rv)->as_expression();
+ if (expr == NULL || expr->operation != ir_binop_vector_extract)
+ return;
+
+ void *mem_ctx = ralloc_parent(expr);
+ ir_constant *const idx =
+ expr->operands[1]->constant_expression_value(mem_ctx);
+ if (idx == NULL)
+ return;
+
+ this->progress = true;
+
+ /* Page 40 of the GLSL 1.20 spec says:
+ *
+ * "When indexing with non-constant expressions, behavior is undefined
+ * if the index is negative, or greater than or equal to the size of
+ * the vector."
+ *
+ * The quoted spec text mentions non-constant expressions, but this code
+ * operates on constants. These constants are the result of non-constant
+ * expressions that have been optimized to constants. The common case here
+ * is a loop counter from an unrolled loop that is used to index a vector.
+ *
+ * The ir_swizzle constructor gets angry if the index is negative or too
+ * large. For simplicity sake, just clamp the index to [0, size-1].
+ */
+ const int i = CLAMP(idx->value.i[0], 0,
+ (int) expr->operands[0]->type->vector_elements - 1);
+
+ *rv = new(mem_ctx) ir_swizzle(expr->operands[0], i, 0, 0, 0, 1);
+}
+
+bool
+do_vec_index_to_swizzle(exec_list *instructions)
+{
+ ir_vec_index_to_swizzle_visitor v;
+
+ v.run(instructions);
+
+ return v.progress;
+}
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/lower_vector.cpp b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/lower_vector.cpp
new file mode 100644
index 0000000000..4024644b06
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/lower_vector.cpp
@@ -0,0 +1,228 @@
+/*
+ * Copyright © 2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+/**
+ * \file lower_vector.cpp
+ * IR lowering pass to remove some types of ir_quadop_vector
+ *
+ * \author Ian Romanick <ian.d.romanick@intel.com>
+ */
+
+#include "ir.h"
+#include "ir_rvalue_visitor.h"
+
+namespace {
+
+class lower_vector_visitor : public ir_rvalue_visitor {
+public:
+ lower_vector_visitor() : dont_lower_swz(false), progress(false)
+ {
+ /* empty */
+ }
+
+ void handle_rvalue(ir_rvalue **rvalue);
+
+ /**
+ * Should SWZ-like expressions be lowered?
+ */
+ bool dont_lower_swz;
+
+ bool progress;
+};
+
+} /* anonymous namespace */
+
+/**
+ * Determine if an IR expression tree looks like an extended swizzle
+ *
+ * Extended swizzles consist of access of a single vector source (with possible
+ * per component negation) and the constants -1, 0, or 1.
+ */
+static bool
+is_extended_swizzle(ir_expression *ir)
+{
+ /* Track any variables that are accessed by this expression.
+ */
+ ir_variable *var = NULL;
+
+ assert(ir->operation == ir_quadop_vector);
+
+ for (unsigned i = 0; i < ir->type->vector_elements; i++) {
+ ir_rvalue *op = ir->operands[i];
+
+ while (op != NULL) {
+ switch (op->ir_type) {
+ case ir_type_constant: {
+ const ir_constant *const c = op->as_constant();
+
+ if (!c->is_one() && !c->is_zero() && !c->is_negative_one())
+ return false;
+
+ op = NULL;
+ break;
+ }
+
+ case ir_type_dereference_variable: {
+ ir_dereference_variable *const d = (ir_dereference_variable *) op;
+
+ if ((var != NULL) && (var != d->var))
+ return false;
+
+ var = d->var;
+ op = NULL;
+ break;
+ }
+
+ case ir_type_expression: {
+ ir_expression *const ex = (ir_expression *) op;
+
+ if (ex->operation != ir_unop_neg)
+ return false;
+
+ op = ex->operands[0];
+ break;
+ }
+
+ case ir_type_swizzle:
+ op = ((ir_swizzle *) op)->val;
+ break;
+
+ default:
+ return false;
+ }
+ }
+ }
+
+ return true;
+}
+
+void
+lower_vector_visitor::handle_rvalue(ir_rvalue **rvalue)
+{
+ if (!*rvalue)
+ return;
+
+ ir_expression *expr = (*rvalue)->as_expression();
+ if ((expr == NULL) || (expr->operation != ir_quadop_vector))
+ return;
+
+ if (this->dont_lower_swz && is_extended_swizzle(expr))
+ return;
+
+ /* FINISHME: Is this the right thing to use for the ralloc context?
+ */
+ void *const mem_ctx = expr;
+
+ assert(expr->type->vector_elements == expr->num_operands);
+
+ /* Generate a temporary with the same type as the ir_quadop_operation.
+ */
+ ir_variable *const temp =
+ new(mem_ctx) ir_variable(expr->type, "vecop_tmp", ir_var_temporary);
+
+ this->base_ir->insert_before(temp);
+
+ /* Counter of the number of components collected so far.
+ */
+ unsigned assigned;
+
+ /* Write-mask in the destination that receives counted by 'assigned'.
+ */
+ unsigned write_mask;
+
+
+ /* Generate upto four assignments to that variable. Try to group component
+ * assignments together:
+ *
+ * - All constant components can be assigned at once.
+ * - All assigments of components from a single variable with the same
+ * unary operator can be assigned at once.
+ */
+ ir_constant_data d = { { 0 } };
+
+ assigned = 0;
+ write_mask = 0;
+ for (unsigned i = 0; i < expr->type->vector_elements; i++) {
+ const ir_constant *const c = expr->operands[i]->as_constant();
+
+ if (c == NULL)
+ continue;
+
+ switch (expr->type->base_type) {
+ case GLSL_TYPE_UINT: d.u[assigned] = c->value.u[0]; break;
+ case GLSL_TYPE_INT: d.i[assigned] = c->value.i[0]; break;
+ case GLSL_TYPE_FLOAT: d.f[assigned] = c->value.f[0]; break;
+ case GLSL_TYPE_BOOL: d.b[assigned] = c->value.b[0]; break;
+ default: assert(!"Should not get here."); break;
+ }
+
+ write_mask |= (1U << i);
+ assigned++;
+ }
+
+ assert((write_mask == 0) == (assigned == 0));
+
+ /* If there were constant values, generate an assignment.
+ */
+ if (assigned > 0) {
+ ir_constant *const c =
+ new(mem_ctx) ir_constant(glsl_type::get_instance(expr->type->base_type,
+ assigned, 1),
+ &d);
+ ir_dereference *const lhs = new(mem_ctx) ir_dereference_variable(temp);
+ ir_assignment *const assign =
+ new(mem_ctx) ir_assignment(lhs, c, NULL, write_mask);
+
+ this->base_ir->insert_before(assign);
+ }
+
+ /* FINISHME: This should try to coalesce assignments.
+ */
+ for (unsigned i = 0; i < expr->type->vector_elements; i++) {
+ if (expr->operands[i]->ir_type == ir_type_constant)
+ continue;
+
+ ir_dereference *const lhs = new(mem_ctx) ir_dereference_variable(temp);
+ ir_assignment *const assign =
+ new(mem_ctx) ir_assignment(lhs, expr->operands[i], NULL, (1U << i));
+
+ this->base_ir->insert_before(assign);
+ assigned++;
+ }
+
+ assert(assigned == expr->type->vector_elements);
+
+ *rvalue = new(mem_ctx) ir_dereference_variable(temp);
+ this->progress = true;
+}
+
+bool
+lower_quadop_vector(exec_list *instructions, bool dont_lower_swz)
+{
+ lower_vector_visitor v;
+
+ v.dont_lower_swz = dont_lower_swz;
+ visit_list_elements(&v, instructions);
+
+ return v.progress;
+}
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/lower_vector_derefs.cpp b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/lower_vector_derefs.cpp
new file mode 100644
index 0000000000..0c09630fa0
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/lower_vector_derefs.cpp
@@ -0,0 +1,188 @@
+/*
+ * Copyright © 2013 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+#include "ir.h"
+#include "ir_builder.h"
+#include "ir_rvalue_visitor.h"
+#include "ir_optimization.h"
+#include "main/mtypes.h"
+
+using namespace ir_builder;
+
+namespace {
+
+class vector_deref_visitor : public ir_rvalue_enter_visitor {
+public:
+ vector_deref_visitor(void *mem_ctx, gl_shader_stage shader_stage)
+ : progress(false), shader_stage(shader_stage),
+ factory(&factory_instructions, mem_ctx)
+ {
+ }
+
+ virtual ~vector_deref_visitor()
+ {
+ }
+
+ virtual void handle_rvalue(ir_rvalue **rv);
+ virtual ir_visitor_status visit_enter(ir_assignment *ir);
+
+ bool progress;
+ gl_shader_stage shader_stage;
+ exec_list factory_instructions;
+ ir_factory factory;
+};
+
+} /* anonymous namespace */
+
+ir_visitor_status
+vector_deref_visitor::visit_enter(ir_assignment *ir)
+{
+ if (!ir->lhs || ir->lhs->ir_type != ir_type_dereference_array)
+ return ir_rvalue_enter_visitor::visit_enter(ir);
+
+ ir_dereference_array *const deref = (ir_dereference_array *) ir->lhs;
+ if (!deref->array->type->is_vector())
+ return ir_rvalue_enter_visitor::visit_enter(ir);
+
+ /* SSBOs and shared variables are backed by memory and may be accessed by
+ * multiple threads simultaneously. It's not safe to lower a single
+ * component store to a load-vec-store because it may race with writes to
+ * other components.
+ */
+ ir_variable *var = deref->variable_referenced();
+ if (var->data.mode == ir_var_shader_storage ||
+ var->data.mode == ir_var_shader_shared)
+ return ir_rvalue_enter_visitor::visit_enter(ir);
+
+ ir_rvalue *const new_lhs = deref->array;
+
+ void *mem_ctx = ralloc_parent(ir);
+ ir_constant *old_index_constant =
+ deref->array_index->constant_expression_value(mem_ctx);
+ if (!old_index_constant) {
+ if (shader_stage == MESA_SHADER_TESS_CTRL &&
+ deref->variable_referenced()->data.mode == ir_var_shader_out) {
+ /* Tessellation control shader outputs act as if they have memory
+ * backing them and if we have writes from multiple threads
+ * targeting the same vec4 (this can happen for patch outputs), the
+ * load-vec-store pattern of ir_triop_vector_insert doesn't work.
+ * Instead, we have to lower to a series of conditional write-masked
+ * assignments.
+ */
+ ir_variable *const src_temp =
+ factory.make_temp(ir->rhs->type, "scalar_tmp");
+
+ /* The newly created variable declaration goes before the assignment
+ * because we're going to set it as the new LHS.
+ */
+ ir->insert_before(factory.instructions);
+ ir->set_lhs(new(mem_ctx) ir_dereference_variable(src_temp));
+
+ ir_variable *const arr_index =
+ factory.make_temp(deref->array_index->type, "index_tmp");
+ factory.emit(assign(arr_index, deref->array_index));
+
+ for (unsigned i = 0; i < new_lhs->type->vector_elements; i++) {
+ ir_constant *const cmp_index =
+ ir_constant::zero(factory.mem_ctx, deref->array_index->type);
+ cmp_index->value.u[0] = i;
+
+ ir_rvalue *const lhs_clone = new_lhs->clone(factory.mem_ctx, NULL);
+ ir_dereference_variable *const src_temp_deref =
+ new(mem_ctx) ir_dereference_variable(src_temp);
+
+ if (new_lhs->ir_type != ir_type_swizzle) {
+ assert(lhs_clone->as_dereference());
+ ir_assignment *cond_assign =
+ new(mem_ctx) ir_assignment(lhs_clone->as_dereference(),
+ src_temp_deref,
+ equal(arr_index, cmp_index),
+ WRITEMASK_X << i);
+ factory.emit(cond_assign);
+ } else {
+ ir_assignment *cond_assign =
+ new(mem_ctx) ir_assignment(swizzle(lhs_clone, i, 1),
+ src_temp_deref,
+ equal(arr_index, cmp_index));
+ factory.emit(cond_assign);
+ }
+ }
+ ir->insert_after(factory.instructions);
+ } else {
+ ir->rhs = new(mem_ctx) ir_expression(ir_triop_vector_insert,
+ new_lhs->type,
+ new_lhs->clone(mem_ctx, NULL),
+ ir->rhs,
+ deref->array_index);
+ ir->write_mask = (1 << new_lhs->type->vector_elements) - 1;
+ ir->set_lhs(new_lhs);
+ }
+ } else if (new_lhs->ir_type != ir_type_swizzle) {
+ ir->set_lhs(new_lhs);
+ ir->write_mask = 1 << old_index_constant->get_uint_component(0);
+ } else {
+ /* If the "new" LHS is a swizzle, use the set_lhs helper to instead
+ * swizzle the RHS.
+ */
+ unsigned component[1] = { old_index_constant->get_uint_component(0) };
+ ir->set_lhs(new(mem_ctx) ir_swizzle(new_lhs, component, 1));
+ }
+
+ return ir_rvalue_enter_visitor::visit_enter(ir);
+}
+
+void
+vector_deref_visitor::handle_rvalue(ir_rvalue **rv)
+{
+ if (*rv == NULL || (*rv)->ir_type != ir_type_dereference_array)
+ return;
+
+ ir_dereference_array *const deref = (ir_dereference_array *) *rv;
+ if (!deref->array->type->is_vector())
+ return;
+
+ /* Back-ends need to be able to handle derefs on vectors for SSBOs, UBOs,
+ * and shared variables. They have to handle it for writes anyway so we
+ * may as well require it for reads.
+ */
+ ir_variable *var = deref->variable_referenced();
+ if (var && (var->data.mode == ir_var_shader_storage ||
+ var->data.mode == ir_var_shader_shared ||
+ (var->data.mode == ir_var_uniform &&
+ var->get_interface_type())))
+ return;
+
+ void *mem_ctx = ralloc_parent(deref);
+ *rv = new(mem_ctx) ir_expression(ir_binop_vector_extract,
+ deref->array,
+ deref->array_index);
+}
+
+bool
+lower_vector_derefs(gl_linked_shader *shader)
+{
+ vector_deref_visitor v(shader->ir, shader->Stage);
+
+ visit_list_elements(&v, shader->ir);
+
+ return v.progress;
+}
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/lower_vector_insert.cpp b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/lower_vector_insert.cpp
new file mode 100644
index 0000000000..ceaa5887c8
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/lower_vector_insert.cpp
@@ -0,0 +1,147 @@
+/*
+ * Copyright © 2013 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+#include "ir.h"
+#include "ir_builder.h"
+#include "ir_rvalue_visitor.h"
+#include "ir_optimization.h"
+
+using namespace ir_builder;
+
+namespace {
+
+class vector_insert_visitor : public ir_rvalue_visitor {
+public:
+ vector_insert_visitor(bool lower_nonconstant_index)
+ : progress(false), lower_nonconstant_index(lower_nonconstant_index)
+ {
+ factory.instructions = &factory_instructions;
+ }
+
+ virtual ~vector_insert_visitor()
+ {
+ assert(factory_instructions.is_empty());
+ }
+
+ virtual void handle_rvalue(ir_rvalue **rv);
+
+ ir_factory factory;
+ exec_list factory_instructions;
+ bool progress;
+ bool lower_nonconstant_index;
+};
+
+} /* anonymous namespace */
+
+void
+vector_insert_visitor::handle_rvalue(ir_rvalue **rv)
+{
+ if (*rv == NULL || (*rv)->ir_type != ir_type_expression)
+ return;
+
+ ir_expression *const expr = (ir_expression *) *rv;
+
+ if (likely(expr->operation != ir_triop_vector_insert))
+ return;
+
+ factory.mem_ctx = ralloc_parent(expr);
+
+ ir_constant *const idx =
+ expr->operands[2]->constant_expression_value(factory.mem_ctx);
+ if (idx != NULL) {
+ /* Replace (vector_insert (vec) (scalar) (index)) with a dereference of
+ * a new temporary. The new temporary gets assigned as
+ *
+ * t = vec
+ * t.mask = scalar
+ *
+ * where mask is the component selected by index.
+ */
+ ir_variable *const temp =
+ factory.make_temp(expr->operands[0]->type, "vec_tmp");
+
+ const int mask = 1 << idx->value.i[0];
+
+ factory.emit(assign(temp, expr->operands[0]));
+ factory.emit(assign(temp, expr->operands[1], mask));
+
+ this->progress = true;
+ *rv = new(factory.mem_ctx) ir_dereference_variable(temp);
+ } else if (this->lower_nonconstant_index) {
+ /* Replace (vector_insert (vec) (scalar) (index)) with a dereference of
+ * a new temporary. The new temporary gets assigned as
+ *
+ * t = vec
+ * if (index == 0)
+ * t.x = scalar
+ * if (index == 1)
+ * t.y = scalar
+ * if (index == 2)
+ * t.z = scalar
+ * if (index == 3)
+ * t.w = scalar
+ */
+ ir_variable *const temp =
+ factory.make_temp(expr->operands[0]->type, "vec_tmp");
+
+ ir_variable *const src_temp =
+ factory.make_temp(expr->operands[1]->type, "src_temp");
+
+ factory.emit(assign(temp, expr->operands[0]));
+ factory.emit(assign(src_temp, expr->operands[1]));
+
+ assert(expr->operands[2]->type == glsl_type::int_type ||
+ expr->operands[2]->type == glsl_type::uint_type);
+
+ for (unsigned i = 0; i < expr->type->vector_elements; i++) {
+ ir_constant *const cmp_index =
+ ir_constant::zero(factory.mem_ctx, expr->operands[2]->type);
+ cmp_index->value.u[0] = i;
+
+ ir_variable *const cmp_result =
+ factory.make_temp(glsl_type::bool_type, "index_condition");
+
+ factory.emit(assign(cmp_result,
+ equal(expr->operands[2]->clone(factory.mem_ctx,
+ NULL),
+ cmp_index)));
+
+ factory.emit(if_tree(cmp_result,
+ assign(temp, src_temp, WRITEMASK_X << i)));
+ }
+
+ this->progress = true;
+ *rv = new(factory.mem_ctx) ir_dereference_variable(temp);
+ }
+
+ base_ir->insert_before(factory.instructions);
+}
+
+bool
+lower_vector_insert(exec_list *instructions, bool lower_nonconstant_index)
+{
+ vector_insert_visitor v(lower_nonconstant_index);
+
+ visit_list_elements(&v, instructions);
+
+ return v.progress;
+}
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/lower_vertex_id.cpp b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/lower_vertex_id.cpp
new file mode 100644
index 0000000000..3b641caa01
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/lower_vertex_id.cpp
@@ -0,0 +1,146 @@
+/*
+ * Copyright © 2014 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+/**
+ * \file lower_vertex_id.cpp
+ *
+ * There exists hardware, such as i965, that does not implement the OpenGL
+ * semantic for gl_VertexID. Instead, that hardware does not include the
+ * value of basevertex in the gl_VertexID value. To implement the OpenGL
+ * semantic, we'll have to convert gl_Vertex_ID to
+ * gl_VertexIDMESA+gl_BaseVertexMESA.
+ */
+
+#include "glsl_symbol_table.h"
+#include "ir_hierarchical_visitor.h"
+#include "ir.h"
+#include "ir_builder.h"
+#include "linker.h"
+#include "program/prog_statevars.h"
+#include "builtin_functions.h"
+#include "main/mtypes.h"
+
+namespace {
+
+class lower_vertex_id_visitor : public ir_hierarchical_visitor {
+public:
+ explicit lower_vertex_id_visitor(ir_function_signature *main_sig,
+ exec_list *ir_list)
+ : progress(false), VertexID(NULL), gl_VertexID(NULL),
+ gl_BaseVertex(NULL), main_sig(main_sig), ir_list(ir_list)
+ {
+ foreach_in_list(ir_instruction, ir, ir_list) {
+ ir_variable *const var = ir->as_variable();
+
+ if (var != NULL && var->data.mode == ir_var_system_value &&
+ var->data.location == SYSTEM_VALUE_BASE_VERTEX) {
+ gl_BaseVertex = var;
+ break;
+ }
+ }
+ }
+
+ virtual ir_visitor_status visit(ir_dereference_variable *);
+
+ bool progress;
+
+private:
+ ir_variable *VertexID;
+ ir_variable *gl_VertexID;
+ ir_variable *gl_BaseVertex;
+
+ ir_function_signature *main_sig;
+ exec_list *ir_list;
+};
+
+} /* anonymous namespace */
+
+ir_visitor_status
+lower_vertex_id_visitor::visit(ir_dereference_variable *ir)
+{
+ if (ir->var->data.mode != ir_var_system_value ||
+ ir->var->data.location != SYSTEM_VALUE_VERTEX_ID)
+ return visit_continue;
+
+ if (VertexID == NULL) {
+ const glsl_type *const int_t = glsl_type::int_type;
+ void *const mem_ctx = ralloc_parent(ir);
+
+ VertexID = new(mem_ctx) ir_variable(int_t, "__VertexID",
+ ir_var_temporary);
+ ir_list->push_head(VertexID);
+
+ gl_VertexID = new(mem_ctx) ir_variable(int_t, "gl_VertexIDMESA",
+ ir_var_system_value);
+ gl_VertexID->data.how_declared = ir_var_declared_implicitly;
+ gl_VertexID->data.read_only = true;
+ gl_VertexID->data.location = SYSTEM_VALUE_VERTEX_ID_ZERO_BASE;
+ gl_VertexID->data.explicit_location = true;
+ gl_VertexID->data.explicit_index = 0;
+ ir_list->push_head(gl_VertexID);
+
+ if (gl_BaseVertex == NULL) {
+ gl_BaseVertex = new(mem_ctx) ir_variable(int_t, "gl_BaseVertex",
+ ir_var_system_value);
+ gl_BaseVertex->data.how_declared = ir_var_hidden;
+ gl_BaseVertex->data.read_only = true;
+ gl_BaseVertex->data.location = SYSTEM_VALUE_BASE_VERTEX;
+ gl_BaseVertex->data.explicit_location = true;
+ gl_BaseVertex->data.explicit_index = 0;
+ ir_list->push_head(gl_BaseVertex);
+ }
+
+ ir_instruction *const inst =
+ ir_builder::assign(VertexID,
+ ir_builder::add(gl_VertexID, gl_BaseVertex));
+
+ main_sig->body.push_head(inst);
+ }
+
+ ir->var = VertexID;
+ progress = true;
+
+ return visit_continue;
+}
+
+bool
+lower_vertex_id(gl_linked_shader *shader)
+{
+ /* gl_VertexID only exists in the vertex shader.
+ */
+ if (shader->Stage != MESA_SHADER_VERTEX)
+ return false;
+
+ ir_function_signature *const main_sig =
+ _mesa_get_main_function_signature(shader->symbols);
+ if (main_sig == NULL) {
+ assert(main_sig != NULL);
+ return false;
+ }
+
+ lower_vertex_id_visitor v(main_sig, shader->ir);
+
+ v.run(shader->ir);
+
+ return v.progress;
+}
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/lower_xfb_varying.cpp b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/lower_xfb_varying.cpp
new file mode 100644
index 0000000000..d460bbd5ca
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/lower_xfb_varying.cpp
@@ -0,0 +1,222 @@
+/*
+ * Copyright ©2019 Collabora Ltd.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+/**
+ * \file lower_xfb_varying.cpp
+ *
+ */
+
+#include "ir.h"
+#include "main/mtypes.h"
+#include "glsl_symbol_table.h"
+#include "util/strndup.h"
+
+namespace {
+
+/**
+ * Visitor that splices varying packing code before every return.
+ */
+class lower_xfb_var_splicer : public ir_hierarchical_visitor
+{
+public:
+ explicit lower_xfb_var_splicer(void *mem_ctx,
+ const exec_list *instructions);
+
+ virtual ir_visitor_status visit_leave(ir_return *ret);
+ virtual ir_visitor_status visit_leave(ir_function_signature *sig);
+
+private:
+ /**
+ * Memory context used to allocate new instructions for the shader.
+ */
+ void * const mem_ctx;
+
+ /**
+ * Instructions that should be spliced into place before each return.
+ */
+ const exec_list *instructions;
+};
+
+} /* anonymous namespace */
+
+
+lower_xfb_var_splicer::lower_xfb_var_splicer(void *mem_ctx, const exec_list *instructions)
+ : mem_ctx(mem_ctx), instructions(instructions)
+{
+}
+
+ir_visitor_status
+lower_xfb_var_splicer::visit_leave(ir_return *ret)
+{
+ foreach_in_list(ir_instruction, ir, this->instructions) {
+ ret->insert_before(ir->clone(this->mem_ctx, NULL));
+ }
+ return visit_continue;
+}
+
+/** Insert a copy-back assignment at the end of the main() function */
+ir_visitor_status
+lower_xfb_var_splicer::visit_leave(ir_function_signature *sig)
+{
+ if (strcmp(sig->function_name(), "main") != 0)
+ return visit_continue;
+
+ if (((ir_instruction*)sig->body.get_tail())->ir_type == ir_type_return)
+ return visit_continue;
+
+ foreach_in_list(ir_instruction, ir, this->instructions) {
+ sig->body.push_tail(ir->clone(this->mem_ctx, NULL));
+ }
+
+ return visit_continue;
+}
+
+static char*
+get_field_name(const char *name)
+{
+ const char *first_dot = strchr(name, '.');
+ const char *first_square_bracket = strchr(name, '[');
+ int name_size = 0;
+
+ if (!first_square_bracket && !first_dot)
+ name_size = strlen(name);
+ else if ((!first_square_bracket ||
+ (first_dot && first_dot < first_square_bracket)))
+ name_size = first_dot - name;
+ else
+ name_size = first_square_bracket - name;
+
+ return strndup(name, name_size);
+}
+
+/* Generate a new name given the old xfb declaration string by replacing dots
+ * with '_', brackets with '@' and appending "-xfb" */
+static char *
+generate_new_name(void *mem_ctx, const char *name)
+{
+ char *new_name;
+ unsigned i = 0;
+
+ new_name = ralloc_strdup(mem_ctx, name);
+ while (new_name[i]) {
+ if (new_name[i] == '.') {
+ new_name[i] = '_';
+ } else if (new_name[i] == '[' || new_name[i] == ']') {
+ new_name[i] = '@';
+ }
+ i++;
+ }
+
+ if (!ralloc_strcat(&new_name, "-xfb")) {
+ ralloc_free(new_name);
+ return NULL;
+ }
+
+ return new_name;
+}
+
+/* Get the dereference for the given variable name. The method is called
+ * recursively to parse array indices and struct members. */
+static bool
+get_deref(void *ctx,
+ const char *name,
+ struct gl_linked_shader *shader,
+ ir_dereference **deref,
+ const glsl_type **type)
+{
+ if (name[0] == '\0') {
+ /* End */
+ return (*deref != NULL);
+ } else if (name[0] == '[') {
+ /* Array index */
+ char *endptr = NULL;
+ unsigned index;
+
+ index = strtol(name + 1, &endptr, 10);
+ assert(*type != NULL && (*type)->is_array() && endptr[0] == ']');
+ *deref = new(ctx) ir_dereference_array(*deref, new(ctx) ir_constant(index));
+ *type = (*type)->without_array();
+ return get_deref(ctx, endptr + 1, shader, deref, type);
+ } else if (name[0] == '.') {
+ /* Struct member */
+ char *field = get_field_name(name + 1);
+
+ assert(*type != NULL && (*type)->is_struct() && field != NULL);
+ *deref = new(ctx) ir_dereference_record(*deref, field);
+ *type = (*type)->field_type(field);
+ assert(*type != glsl_type::error_type);
+ name += 1 + strlen(field);
+ free(field);
+ return get_deref(ctx, name, shader, deref, type);
+ } else {
+ /* Top level variable */
+ char *field = get_field_name(name);
+ ir_variable *toplevel_var;
+
+ toplevel_var = shader->symbols->get_variable(field);
+ name += strlen(field);
+ free(field);
+ if (toplevel_var == NULL) {
+ return false;
+ }
+
+ *deref = new (ctx) ir_dereference_variable(toplevel_var);
+ *type = toplevel_var->type;
+ return get_deref(ctx, name, shader, deref, type);
+ }
+}
+
+ir_variable *
+lower_xfb_varying(void *mem_ctx,
+ struct gl_linked_shader *shader,
+ const char *old_var_name)
+{
+ exec_list new_instructions;
+ char *new_var_name;
+ ir_dereference *deref = NULL;
+ const glsl_type *type = NULL;
+
+ if (!get_deref(mem_ctx, old_var_name, shader, &deref, &type)) {
+ if (deref) {
+ delete deref;
+ }
+ return NULL;
+ }
+
+ new_var_name = generate_new_name(mem_ctx, old_var_name);
+ ir_variable *new_variable
+ = new(mem_ctx) ir_variable(type, new_var_name, ir_var_shader_out);
+ new_variable->data.assigned = true;
+ new_variable->data.used = true;
+ shader->ir->push_head(new_variable);
+ ralloc_free(new_var_name);
+
+ ir_dereference *lhs = new(mem_ctx) ir_dereference_variable(new_variable);
+ ir_assignment *new_assignment = new(mem_ctx) ir_assignment(lhs, deref);
+ new_instructions.push_tail(new_assignment);
+
+ lower_xfb_var_splicer splicer(mem_ctx, &new_instructions);
+ visit_list_elements(&splicer, shader->ir);
+
+ return new_variable;
+}
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/main.cpp b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/main.cpp
new file mode 100644
index 0000000000..c826c279fa
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/main.cpp
@@ -0,0 +1,107 @@
+/*
+ * Copyright © 2008, 2009 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include <stdlib.h>
+#include <stdio.h>
+#include <getopt.h>
+
+/** @file main.cpp
+ *
+ * This file is the main() routine and scaffolding for producing
+ * builtin_compiler (which doesn't include builtins itself and is used
+ * to generate the profile information for builtin_function.cpp), and
+ * for glsl_compiler (which does include builtins and can be used to
+ * offline compile GLSL code and examine the resulting GLSL IR.
+ */
+
+#include "main/mtypes.h"
+#include "standalone.h"
+
+static struct standalone_options options;
+
+const struct option compiler_opts[] = {
+ { "dump-ast", no_argument, &options.dump_ast, 1 },
+ { "dump-hir", no_argument, &options.dump_hir, 1 },
+ { "dump-lir", no_argument, &options.dump_lir, 1 },
+ { "dump-builder", no_argument, &options.dump_builder, 1 },
+ { "link", no_argument, &options.do_link, 1 },
+ { "just-log", no_argument, &options.just_log, 1 },
+ { "lower-precision", no_argument, &options.lower_precision, 1 },
+ { "version", required_argument, NULL, 'v' },
+ { NULL, 0, NULL, 0 }
+};
+
+/**
+ * \brief Print proper usage and exit with failure.
+ */
+static void
+usage_fail(const char *name)
+{
+
+ const char *header =
+ "usage: %s [options] <file.vert | file.tesc | file.tese | file.geom | file.frag | file.comp>\n"
+ "\n"
+ "Possible options are:\n";
+ printf(header, name);
+ for (const struct option *o = compiler_opts; o->name != 0; ++o) {
+ printf(" --%s", o->name);
+ if (o->has_arg == required_argument)
+ printf(" (mandatory)");
+ printf("\n");
+ }
+ exit(EXIT_FAILURE);
+}
+
+int
+main(int argc, char * const* argv)
+{
+ int status = EXIT_SUCCESS;
+
+ int c;
+ int idx = 0;
+ while ((c = getopt_long(argc, argv, "", compiler_opts, &idx)) != -1) {
+ switch (c) {
+ case 'v':
+ options.glsl_version = strtol(optarg, NULL, 10);
+ break;
+ default:
+ break;
+ }
+ }
+
+ if (argc <= optind)
+ usage_fail(argv[0]);
+
+ struct gl_shader_program *whole_program;
+ static struct gl_context local_ctx;
+
+ whole_program = standalone_compile_shader(&options, argc - optind,
+ &argv[optind], &local_ctx);
+
+ if (!whole_program)
+ usage_fail(argv[0]);
+
+ standalone_compiler_cleanup(whole_program);
+
+ return status;
+}
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/opt_add_neg_to_sub.h b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/opt_add_neg_to_sub.h
new file mode 100644
index 0000000000..9f970710a5
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/opt_add_neg_to_sub.h
@@ -0,0 +1,61 @@
+/*
+ * Copyright © 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef OPT_ADD_NEG_TO_SUB_H
+#define OPT_ADD_NEG_TO_SUB_H
+
+#include "ir.h"
+#include "ir_hierarchical_visitor.h"
+
+class add_neg_to_sub_visitor : public ir_hierarchical_visitor {
+public:
+ add_neg_to_sub_visitor()
+ {
+ /* empty */
+ }
+
+ ir_visitor_status visit_leave(ir_expression *ir)
+ {
+ if (ir->operation != ir_binop_add)
+ return visit_continue;
+
+ for (unsigned i = 0; i < 2; i++) {
+ ir_expression *const op = ir->operands[i]->as_expression();
+
+ if (op != NULL && op->operation == ir_unop_neg) {
+ ir->operation = ir_binop_sub;
+
+ /* This ensures that -a + b becomes b - a. */
+ if (i == 0)
+ ir->operands[0] = ir->operands[1];
+
+ ir->operands[1] = op->operands[0];
+ break;
+ }
+ }
+
+ return visit_continue;
+ }
+};
+
+#endif /* OPT_ADD_NEG_TO_SUB_H */
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/opt_algebraic.cpp b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/opt_algebraic.cpp
new file mode 100644
index 0000000000..7cef4fc6ef
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/opt_algebraic.cpp
@@ -0,0 +1,1061 @@
+/*
+ * Copyright © 2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+/**
+ * \file opt_algebraic.cpp
+ *
+ * Takes advantage of association, commutivity, and other algebraic
+ * properties to simplify expressions.
+ */
+
+#include "ir.h"
+#include "ir_visitor.h"
+#include "ir_rvalue_visitor.h"
+#include "ir_optimization.h"
+#include "ir_builder.h"
+#include "compiler/glsl_types.h"
+#include "main/mtypes.h"
+
+using namespace ir_builder;
+
+namespace {
+
+/**
+ * Visitor class for replacing expressions with ir_constant values.
+ */
+
+class ir_algebraic_visitor : public ir_rvalue_visitor {
+public:
+ ir_algebraic_visitor(bool native_integers,
+ const struct gl_shader_compiler_options *options)
+ : options(options)
+ {
+ this->progress = false;
+ this->mem_ctx = NULL;
+ this->native_integers = native_integers;
+ }
+
+ virtual ~ir_algebraic_visitor()
+ {
+ }
+
+ virtual ir_visitor_status visit_enter(ir_assignment *ir);
+
+ ir_rvalue *handle_expression(ir_expression *ir);
+ void handle_rvalue(ir_rvalue **rvalue);
+ bool reassociate_constant(ir_expression *ir1,
+ int const_index,
+ ir_constant *constant,
+ ir_expression *ir2);
+ void reassociate_operands(ir_expression *ir1,
+ int op1,
+ ir_expression *ir2,
+ int op2);
+ ir_rvalue *swizzle_if_required(ir_expression *expr,
+ ir_rvalue *operand);
+
+ const struct gl_shader_compiler_options *options;
+ void *mem_ctx;
+
+ bool native_integers;
+ bool progress;
+};
+
+} /* unnamed namespace */
+
+ir_visitor_status
+ir_algebraic_visitor::visit_enter(ir_assignment *ir)
+{
+ ir_variable *var = ir->lhs->variable_referenced();
+ if (var->data.invariant || var->data.precise) {
+ /* If we're assigning to an invariant or precise variable, just bail.
+ * Most of the algebraic optimizations aren't precision-safe.
+ *
+ * FINISHME: Find out which optimizations are precision-safe and enable
+ * then only for invariant or precise trees.
+ */
+ return visit_continue_with_parent;
+ } else {
+ return visit_continue;
+ }
+}
+
+static inline bool
+is_vec_zero(ir_constant *ir)
+{
+ return (ir == NULL) ? false : ir->is_zero();
+}
+
+static inline bool
+is_vec_one(ir_constant *ir)
+{
+ return (ir == NULL) ? false : ir->is_one();
+}
+
+static inline bool
+is_vec_two(ir_constant *ir)
+{
+ return (ir == NULL) ? false : ir->is_value(2.0, 2);
+}
+
+static inline bool
+is_vec_four(ir_constant *ir)
+{
+ return (ir == NULL) ? false : ir->is_value(4.0, 4);
+}
+
+static inline bool
+is_vec_negative_one(ir_constant *ir)
+{
+ return (ir == NULL) ? false : ir->is_negative_one();
+}
+
+static inline bool
+is_valid_vec_const(ir_constant *ir)
+{
+ if (ir == NULL)
+ return false;
+
+ if (!ir->type->is_scalar() && !ir->type->is_vector())
+ return false;
+
+ return true;
+}
+
+static inline bool
+is_less_than_one(ir_constant *ir)
+{
+ assert(ir->type->is_float());
+
+ if (!is_valid_vec_const(ir))
+ return false;
+
+ unsigned component = 0;
+ for (int c = 0; c < ir->type->vector_elements; c++) {
+ if (ir->get_float_component(c) < 1.0f)
+ component++;
+ }
+
+ return (component == ir->type->vector_elements);
+}
+
+static inline bool
+is_greater_than_zero(ir_constant *ir)
+{
+ assert(ir->type->is_float());
+
+ if (!is_valid_vec_const(ir))
+ return false;
+
+ unsigned component = 0;
+ for (int c = 0; c < ir->type->vector_elements; c++) {
+ if (ir->get_float_component(c) > 0.0f)
+ component++;
+ }
+
+ return (component == ir->type->vector_elements);
+}
+
+static void
+update_type(ir_expression *ir)
+{
+ if (ir->operands[0]->type->is_vector())
+ ir->type = ir->operands[0]->type;
+ else
+ ir->type = ir->operands[1]->type;
+}
+
+/* Recognize (v.x + v.y) + (v.z + v.w) as dot(v, 1.0) */
+static ir_expression *
+try_replace_with_dot(ir_expression *expr0, ir_expression *expr1, void *mem_ctx)
+{
+ if (expr0 && expr0->operation == ir_binop_add &&
+ expr0->type->is_float() &&
+ expr1 && expr1->operation == ir_binop_add &&
+ expr1->type->is_float()) {
+ ir_swizzle *x = expr0->operands[0]->as_swizzle();
+ ir_swizzle *y = expr0->operands[1]->as_swizzle();
+ ir_swizzle *z = expr1->operands[0]->as_swizzle();
+ ir_swizzle *w = expr1->operands[1]->as_swizzle();
+
+ if (!x || x->mask.num_components != 1 ||
+ !y || y->mask.num_components != 1 ||
+ !z || z->mask.num_components != 1 ||
+ !w || w->mask.num_components != 1) {
+ return NULL;
+ }
+
+ bool swiz_seen[4] = {false, false, false, false};
+ swiz_seen[x->mask.x] = true;
+ swiz_seen[y->mask.x] = true;
+ swiz_seen[z->mask.x] = true;
+ swiz_seen[w->mask.x] = true;
+
+ if (!swiz_seen[0] || !swiz_seen[1] ||
+ !swiz_seen[2] || !swiz_seen[3]) {
+ return NULL;
+ }
+
+ if (x->val->equals(y->val) &&
+ x->val->equals(z->val) &&
+ x->val->equals(w->val)) {
+ return dot(x->val, new(mem_ctx) ir_constant(1.0f, 4));
+ }
+ }
+ return NULL;
+}
+
+void
+ir_algebraic_visitor::reassociate_operands(ir_expression *ir1,
+ int op1,
+ ir_expression *ir2,
+ int op2)
+{
+ ir_rvalue *temp = ir2->operands[op2];
+ ir2->operands[op2] = ir1->operands[op1];
+ ir1->operands[op1] = temp;
+
+ /* Update the type of ir2. The type of ir1 won't have changed --
+ * base types matched, and at least one of the operands of the 2
+ * binops is still a vector if any of them were.
+ */
+ update_type(ir2);
+
+ this->progress = true;
+}
+
+/**
+ * Reassociates a constant down a tree of adds or multiplies.
+ *
+ * Consider (2 * (a * (b * 0.5))). We want to end up with a * b.
+ */
+bool
+ir_algebraic_visitor::reassociate_constant(ir_expression *ir1, int const_index,
+ ir_constant *constant,
+ ir_expression *ir2)
+{
+ if (!ir2 || ir1->operation != ir2->operation)
+ return false;
+
+ /* Don't want to even think about matrices. */
+ if (ir1->operands[0]->type->is_matrix() ||
+ ir1->operands[1]->type->is_matrix() ||
+ ir2->operands[0]->type->is_matrix() ||
+ ir2->operands[1]->type->is_matrix())
+ return false;
+
+ void *mem_ctx = ralloc_parent(ir2);
+
+ ir_constant *ir2_const[2];
+ ir2_const[0] = ir2->operands[0]->constant_expression_value(mem_ctx);
+ ir2_const[1] = ir2->operands[1]->constant_expression_value(mem_ctx);
+
+ if (ir2_const[0] && ir2_const[1])
+ return false;
+
+ if (ir2_const[0]) {
+ reassociate_operands(ir1, const_index, ir2, 1);
+ return true;
+ } else if (ir2_const[1]) {
+ reassociate_operands(ir1, const_index, ir2, 0);
+ return true;
+ }
+
+ if (reassociate_constant(ir1, const_index, constant,
+ ir2->operands[0]->as_expression())) {
+ update_type(ir2);
+ return true;
+ }
+
+ if (reassociate_constant(ir1, const_index, constant,
+ ir2->operands[1]->as_expression())) {
+ update_type(ir2);
+ return true;
+ }
+
+ return false;
+}
+
+/* When eliminating an expression and just returning one of its operands,
+ * we may need to swizzle that operand out to a vector if the expression was
+ * vector type.
+ */
+ir_rvalue *
+ir_algebraic_visitor::swizzle_if_required(ir_expression *expr,
+ ir_rvalue *operand)
+{
+ if (expr->type->is_vector() && operand->type->is_scalar()) {
+ return new(mem_ctx) ir_swizzle(operand, 0, 0, 0, 0,
+ expr->type->vector_elements);
+ } else
+ return operand;
+}
+
+ir_rvalue *
+ir_algebraic_visitor::handle_expression(ir_expression *ir)
+{
+ ir_constant *op_const[4] = {NULL, NULL, NULL, NULL};
+ ir_expression *op_expr[4] = {NULL, NULL, NULL, NULL};
+
+ if (ir->operation == ir_binop_mul &&
+ ir->operands[0]->type->is_matrix() &&
+ ir->operands[1]->type->is_vector()) {
+ ir_expression *matrix_mul = ir->operands[0]->as_expression();
+
+ if (matrix_mul && matrix_mul->operation == ir_binop_mul &&
+ matrix_mul->operands[0]->type->is_matrix() &&
+ matrix_mul->operands[1]->type->is_matrix()) {
+
+ return mul(matrix_mul->operands[0],
+ mul(matrix_mul->operands[1], ir->operands[1]));
+ }
+ }
+
+ assert(ir->num_operands <= 4);
+ for (unsigned i = 0; i < ir->num_operands; i++) {
+ if (ir->operands[i]->type->is_matrix())
+ return ir;
+
+ op_const[i] =
+ ir->operands[i]->constant_expression_value(ralloc_parent(ir));
+ op_expr[i] = ir->operands[i]->as_expression();
+ }
+
+ if (this->mem_ctx == NULL)
+ this->mem_ctx = ralloc_parent(ir);
+
+ switch (ir->operation) {
+ case ir_unop_bit_not:
+ if (op_expr[0] && op_expr[0]->operation == ir_unop_bit_not)
+ return op_expr[0]->operands[0];
+ break;
+
+ case ir_unop_abs:
+ if (op_expr[0] == NULL)
+ break;
+
+ switch (op_expr[0]->operation) {
+ case ir_unop_abs:
+ case ir_unop_neg:
+ return abs(op_expr[0]->operands[0]);
+ default:
+ break;
+ }
+ break;
+
+ case ir_unop_neg:
+ if (op_expr[0] == NULL)
+ break;
+
+ if (op_expr[0]->operation == ir_unop_neg) {
+ return op_expr[0]->operands[0];
+ }
+ break;
+
+ case ir_unop_exp:
+ if (op_expr[0] == NULL)
+ break;
+
+ if (op_expr[0]->operation == ir_unop_log) {
+ return op_expr[0]->operands[0];
+ }
+ break;
+
+ case ir_unop_log:
+ if (op_expr[0] == NULL)
+ break;
+
+ if (op_expr[0]->operation == ir_unop_exp) {
+ return op_expr[0]->operands[0];
+ }
+ break;
+
+ case ir_unop_exp2:
+ if (op_expr[0] == NULL)
+ break;
+
+ if (op_expr[0]->operation == ir_unop_log2) {
+ return op_expr[0]->operands[0];
+ }
+
+ if (!options->EmitNoPow && op_expr[0]->operation == ir_binop_mul) {
+ for (int log2_pos = 0; log2_pos < 2; log2_pos++) {
+ ir_expression *log2_expr =
+ op_expr[0]->operands[log2_pos]->as_expression();
+
+ if (log2_expr && log2_expr->operation == ir_unop_log2) {
+ return new(mem_ctx) ir_expression(ir_binop_pow,
+ ir->type,
+ log2_expr->operands[0],
+ op_expr[0]->operands[1 - log2_pos]);
+ }
+ }
+ }
+ break;
+
+ case ir_unop_log2:
+ if (op_expr[0] == NULL)
+ break;
+
+ if (op_expr[0]->operation == ir_unop_exp2) {
+ return op_expr[0]->operands[0];
+ }
+ break;
+
+ case ir_unop_f2i:
+ case ir_unop_f2u:
+ if (op_expr[0] && op_expr[0]->operation == ir_unop_trunc) {
+ return new(mem_ctx) ir_expression(ir->operation,
+ ir->type,
+ op_expr[0]->operands[0]);
+ }
+ break;
+
+ case ir_unop_logic_not: {
+ enum ir_expression_operation new_op = ir_unop_logic_not;
+
+ if (op_expr[0] == NULL)
+ break;
+
+ switch (op_expr[0]->operation) {
+ case ir_binop_less: new_op = ir_binop_gequal; break;
+ case ir_binop_gequal: new_op = ir_binop_less; break;
+ case ir_binop_equal: new_op = ir_binop_nequal; break;
+ case ir_binop_nequal: new_op = ir_binop_equal; break;
+ case ir_binop_all_equal: new_op = ir_binop_any_nequal; break;
+ case ir_binop_any_nequal: new_op = ir_binop_all_equal; break;
+
+ default:
+ /* The default case handler is here to silence a warning from GCC.
+ */
+ break;
+ }
+
+ if (new_op != ir_unop_logic_not) {
+ return new(mem_ctx) ir_expression(new_op,
+ ir->type,
+ op_expr[0]->operands[0],
+ op_expr[0]->operands[1]);
+ }
+
+ break;
+ }
+
+ case ir_unop_saturate:
+ if (op_expr[0] && op_expr[0]->operation == ir_binop_add) {
+ ir_expression *b2f_0 = op_expr[0]->operands[0]->as_expression();
+ ir_expression *b2f_1 = op_expr[0]->operands[1]->as_expression();
+
+ if (b2f_0 && b2f_0->operation == ir_unop_b2f &&
+ b2f_1 && b2f_1->operation == ir_unop_b2f) {
+ return b2f(logic_or(b2f_0->operands[0], b2f_1->operands[0]));
+ }
+ }
+ break;
+
+ /* This macro CANNOT use the do { } while(true) mechanism because
+ * then the breaks apply to the loop instead of the switch!
+ */
+#define HANDLE_PACK_UNPACK_INVERSE(inverse_operation) \
+ { \
+ ir_expression *const op = ir->operands[0]->as_expression(); \
+ if (op == NULL) \
+ break; \
+ if (op->operation == (inverse_operation)) \
+ return op->operands[0]; \
+ break; \
+ }
+
+ case ir_unop_unpack_uint_2x32:
+ HANDLE_PACK_UNPACK_INVERSE(ir_unop_pack_uint_2x32);
+ case ir_unop_pack_uint_2x32:
+ HANDLE_PACK_UNPACK_INVERSE(ir_unop_unpack_uint_2x32);
+ case ir_unop_unpack_int_2x32:
+ HANDLE_PACK_UNPACK_INVERSE(ir_unop_pack_int_2x32);
+ case ir_unop_pack_int_2x32:
+ HANDLE_PACK_UNPACK_INVERSE(ir_unop_unpack_int_2x32);
+ case ir_unop_unpack_double_2x32:
+ HANDLE_PACK_UNPACK_INVERSE(ir_unop_pack_double_2x32);
+ case ir_unop_pack_double_2x32:
+ HANDLE_PACK_UNPACK_INVERSE(ir_unop_unpack_double_2x32);
+
+#undef HANDLE_PACK_UNPACK_INVERSE
+
+ case ir_binop_add:
+ if (is_vec_zero(op_const[0]))
+ return ir->operands[1];
+ if (is_vec_zero(op_const[1]))
+ return ir->operands[0];
+
+ /* Replace (x + (-x)) with constant 0 */
+ for (int i = 0; i < 2; i++) {
+ if (op_expr[i]) {
+ if (op_expr[i]->operation == ir_unop_neg) {
+ ir_rvalue *other = ir->operands[(i + 1) % 2];
+ if (other && op_expr[i]->operands[0]->equals(other)) {
+ return ir_constant::zero(ir, ir->type);
+ }
+ }
+ }
+ }
+
+ /* Reassociate addition of constants so that we can do constant
+ * folding.
+ */
+ if (op_const[0] && !op_const[1])
+ reassociate_constant(ir, 0, op_const[0], op_expr[1]);
+ if (op_const[1] && !op_const[0])
+ reassociate_constant(ir, 1, op_const[1], op_expr[0]);
+
+ /* Recognize (v.x + v.y) + (v.z + v.w) as dot(v, 1.0) */
+ if (options->OptimizeForAOS) {
+ ir_expression *expr = try_replace_with_dot(op_expr[0], op_expr[1],
+ mem_ctx);
+ if (expr)
+ return expr;
+ }
+
+ /* Replace (-x + y) * a + x and commutative variations with lrp(x, y, a).
+ *
+ * (-x + y) * a + x
+ * (x * -a) + (y * a) + x
+ * x + (x * -a) + (y * a)
+ * x * (1 - a) + y * a
+ * lrp(x, y, a)
+ */
+ for (int mul_pos = 0; mul_pos < 2; mul_pos++) {
+ ir_expression *mul = op_expr[mul_pos];
+
+ if (!mul || mul->operation != ir_binop_mul)
+ continue;
+
+ /* Multiply found on one of the operands. Now check for an
+ * inner addition operation.
+ */
+ for (int inner_add_pos = 0; inner_add_pos < 2; inner_add_pos++) {
+ ir_expression *inner_add =
+ mul->operands[inner_add_pos]->as_expression();
+
+ if (!inner_add || inner_add->operation != ir_binop_add)
+ continue;
+
+ /* Inner addition found on one of the operands. Now check for
+ * one of the operands of the inner addition to be the negative
+ * of x_operand.
+ */
+ for (int neg_pos = 0; neg_pos < 2; neg_pos++) {
+ ir_expression *neg =
+ inner_add->operands[neg_pos]->as_expression();
+
+ if (!neg || neg->operation != ir_unop_neg)
+ continue;
+
+ ir_rvalue *x_operand = ir->operands[1 - mul_pos];
+
+ if (!neg->operands[0]->equals(x_operand))
+ continue;
+
+ ir_rvalue *y_operand = inner_add->operands[1 - neg_pos];
+ ir_rvalue *a_operand = mul->operands[1 - inner_add_pos];
+
+ if (!x_operand->type->is_float_16_32_64() ||
+ x_operand->type != y_operand->type ||
+ x_operand->type != a_operand->type)
+ continue;
+
+ return lrp(x_operand, y_operand, a_operand);
+ }
+ }
+ }
+
+ break;
+
+ case ir_binop_sub:
+ if (is_vec_zero(op_const[0]))
+ return neg(ir->operands[1]);
+ if (is_vec_zero(op_const[1]))
+ return ir->operands[0];
+ break;
+
+ case ir_binop_mul:
+ if (is_vec_one(op_const[0]))
+ return ir->operands[1];
+ if (is_vec_one(op_const[1]))
+ return ir->operands[0];
+
+ if (is_vec_zero(op_const[0]) || is_vec_zero(op_const[1]))
+ return ir_constant::zero(ir, ir->type);
+
+ if (is_vec_negative_one(op_const[0]))
+ return neg(ir->operands[1]);
+ if (is_vec_negative_one(op_const[1]))
+ return neg(ir->operands[0]);
+
+ if (op_expr[0] && op_expr[0]->operation == ir_unop_b2f &&
+ op_expr[1] && op_expr[1]->operation == ir_unop_b2f) {
+ return b2f(logic_and(op_expr[0]->operands[0], op_expr[1]->operands[0]));
+ }
+
+ /* Reassociate multiplication of constants so that we can do
+ * constant folding.
+ */
+ if (op_const[0] && !op_const[1])
+ reassociate_constant(ir, 0, op_const[0], op_expr[1]);
+ if (op_const[1] && !op_const[0])
+ reassociate_constant(ir, 1, op_const[1], op_expr[0]);
+
+ /* Optimizes
+ *
+ * (mul (floor (add (abs x) 0.5) (sign x)))
+ *
+ * into
+ *
+ * (trunc (add x (mul (sign x) 0.5)))
+ */
+ for (int i = 0; i < 2; i++) {
+ ir_expression *sign_expr = ir->operands[i]->as_expression();
+ ir_expression *floor_expr = ir->operands[1 - i]->as_expression();
+
+ if (!sign_expr || sign_expr->operation != ir_unop_sign ||
+ !floor_expr || floor_expr->operation != ir_unop_floor)
+ continue;
+
+ ir_expression *add_expr = floor_expr->operands[0]->as_expression();
+ if (!add_expr || add_expr->operation != ir_binop_add)
+ continue;
+
+ for (int j = 0; j < 2; j++) {
+ ir_expression *abs_expr = add_expr->operands[j]->as_expression();
+ if (!abs_expr || abs_expr->operation != ir_unop_abs)
+ continue;
+
+ ir_constant *point_five = add_expr->operands[1 - j]->as_constant();
+ if (!point_five || !point_five->is_value(0.5, 0))
+ continue;
+
+ if (abs_expr->operands[0]->equals(sign_expr->operands[0])) {
+ return trunc(add(abs_expr->operands[0],
+ mul(sign_expr, point_five)));
+ }
+ }
+ }
+ break;
+
+ case ir_binop_div:
+ if (is_vec_one(op_const[0]) && (
+ ir->type->is_float() || ir->type->is_double())) {
+ return new(mem_ctx) ir_expression(ir_unop_rcp,
+ ir->operands[1]->type,
+ ir->operands[1],
+ NULL);
+ }
+ if (is_vec_one(op_const[1]))
+ return ir->operands[0];
+ break;
+
+ case ir_binop_dot:
+ if (is_vec_zero(op_const[0]) || is_vec_zero(op_const[1]))
+ return ir_constant::zero(mem_ctx, ir->type);
+
+ for (int i = 0; i < 2; i++) {
+ if (!op_const[i])
+ continue;
+
+ unsigned components[4] = { 0 }, count = 0;
+
+ for (unsigned c = 0; c < op_const[i]->type->vector_elements; c++) {
+ if (op_const[i]->is_zero())
+ continue;
+
+ components[count] = c;
+ count++;
+ }
+
+ /* No channels had zero values; bail. */
+ if (count >= op_const[i]->type->vector_elements)
+ break;
+
+ ir_expression_operation op = count == 1 ?
+ ir_binop_mul : ir_binop_dot;
+
+ /* Swizzle both operands to remove the channels that were zero. */
+ return new(mem_ctx)
+ ir_expression(op, ir->type,
+ new(mem_ctx) ir_swizzle(ir->operands[0],
+ components, count),
+ new(mem_ctx) ir_swizzle(ir->operands[1],
+ components, count));
+ }
+ break;
+
+ case ir_binop_less:
+ case ir_binop_gequal:
+ case ir_binop_equal:
+ case ir_binop_nequal:
+ for (int add_pos = 0; add_pos < 2; add_pos++) {
+ ir_expression *add = op_expr[add_pos];
+
+ if (!add || add->operation != ir_binop_add)
+ continue;
+
+ ir_constant *zero = op_const[1 - add_pos];
+ if (!is_vec_zero(zero))
+ continue;
+
+ /* We are allowed to add scalars with a vector or matrix. In that
+ * case lets just exit early.
+ */
+ if (add->operands[0]->type != add->operands[1]->type)
+ continue;
+
+ /* Depending of the zero position we want to optimize
+ * (0 cmp x+y) into (-x cmp y) or (x+y cmp 0) into (x cmp -y)
+ */
+ if (add_pos == 1) {
+ return new(mem_ctx) ir_expression(ir->operation,
+ neg(add->operands[0]),
+ add->operands[1]);
+ } else {
+ return new(mem_ctx) ir_expression(ir->operation,
+ add->operands[0],
+ neg(add->operands[1]));
+ }
+ }
+ break;
+
+ case ir_binop_all_equal:
+ case ir_binop_any_nequal:
+ if (ir->operands[0]->type->is_scalar() &&
+ ir->operands[1]->type->is_scalar())
+ return new(mem_ctx) ir_expression(ir->operation == ir_binop_all_equal
+ ? ir_binop_equal : ir_binop_nequal,
+ ir->operands[0],
+ ir->operands[1]);
+ break;
+
+ case ir_binop_rshift:
+ case ir_binop_lshift:
+ /* 0 >> x == 0 */
+ if (is_vec_zero(op_const[0]))
+ return ir->operands[0];
+ /* x >> 0 == x */
+ if (is_vec_zero(op_const[1]))
+ return ir->operands[0];
+ break;
+
+ case ir_binop_logic_and:
+ if (is_vec_one(op_const[0])) {
+ return ir->operands[1];
+ } else if (is_vec_one(op_const[1])) {
+ return ir->operands[0];
+ } else if (is_vec_zero(op_const[0]) || is_vec_zero(op_const[1])) {
+ return ir_constant::zero(mem_ctx, ir->type);
+ } else if (op_expr[0] && op_expr[0]->operation == ir_unop_logic_not &&
+ op_expr[1] && op_expr[1]->operation == ir_unop_logic_not) {
+ /* De Morgan's Law:
+ * (not A) and (not B) === not (A or B)
+ */
+ return logic_not(logic_or(op_expr[0]->operands[0],
+ op_expr[1]->operands[0]));
+ } else if (ir->operands[0]->equals(ir->operands[1])) {
+ /* (a && a) == a */
+ return ir->operands[0];
+ }
+ break;
+
+ case ir_binop_logic_xor:
+ if (is_vec_zero(op_const[0])) {
+ return ir->operands[1];
+ } else if (is_vec_zero(op_const[1])) {
+ return ir->operands[0];
+ } else if (is_vec_one(op_const[0])) {
+ return logic_not(ir->operands[1]);
+ } else if (is_vec_one(op_const[1])) {
+ return logic_not(ir->operands[0]);
+ } else if (ir->operands[0]->equals(ir->operands[1])) {
+ /* (a ^^ a) == false */
+ return ir_constant::zero(mem_ctx, ir->type);
+ }
+ break;
+
+ case ir_binop_logic_or:
+ if (is_vec_zero(op_const[0])) {
+ return ir->operands[1];
+ } else if (is_vec_zero(op_const[1])) {
+ return ir->operands[0];
+ } else if (is_vec_one(op_const[0]) || is_vec_one(op_const[1])) {
+ ir_constant_data data;
+
+ for (unsigned i = 0; i < 16; i++)
+ data.b[i] = true;
+
+ return new(mem_ctx) ir_constant(ir->type, &data);
+ } else if (op_expr[0] && op_expr[0]->operation == ir_unop_logic_not &&
+ op_expr[1] && op_expr[1]->operation == ir_unop_logic_not) {
+ /* De Morgan's Law:
+ * (not A) or (not B) === not (A and B)
+ */
+ return logic_not(logic_and(op_expr[0]->operands[0],
+ op_expr[1]->operands[0]));
+ } else if (ir->operands[0]->equals(ir->operands[1])) {
+ /* (a || a) == a */
+ return ir->operands[0];
+ }
+ break;
+
+ case ir_binop_pow:
+ /* 1^x == 1 */
+ if (is_vec_one(op_const[0]))
+ return op_const[0];
+
+ /* x^1 == x */
+ if (is_vec_one(op_const[1]))
+ return ir->operands[0];
+
+ /* pow(2,x) == exp2(x) */
+ if (is_vec_two(op_const[0]))
+ return expr(ir_unop_exp2, ir->operands[1]);
+
+ if (is_vec_two(op_const[1])) {
+ ir_variable *x = new(ir) ir_variable(ir->operands[1]->type, "x",
+ ir_var_temporary);
+ base_ir->insert_before(x);
+ base_ir->insert_before(assign(x, ir->operands[0]));
+ return mul(x, x);
+ }
+
+ if (is_vec_four(op_const[1])) {
+ ir_variable *x = new(ir) ir_variable(ir->operands[1]->type, "x",
+ ir_var_temporary);
+ base_ir->insert_before(x);
+ base_ir->insert_before(assign(x, ir->operands[0]));
+
+ ir_variable *squared = new(ir) ir_variable(ir->operands[1]->type,
+ "squared",
+ ir_var_temporary);
+ base_ir->insert_before(squared);
+ base_ir->insert_before(assign(squared, mul(x, x)));
+ return mul(squared, squared);
+ }
+
+ break;
+
+ case ir_binop_min:
+ case ir_binop_max:
+ if (!ir->type->is_float() || options->EmitNoSat)
+ break;
+
+ /* Replace min(max) operations and its commutative combinations with
+ * a saturate operation
+ */
+ for (int op = 0; op < 2; op++) {
+ ir_expression *inner_expr = op_expr[op];
+ ir_constant *outer_const = op_const[1 - op];
+ ir_expression_operation op_cond = (ir->operation == ir_binop_max) ?
+ ir_binop_min : ir_binop_max;
+
+ if (!inner_expr || !outer_const || (inner_expr->operation != op_cond))
+ continue;
+
+ /* One of these has to be a constant */
+ if (!inner_expr->operands[0]->as_constant() &&
+ !inner_expr->operands[1]->as_constant())
+ break;
+
+ /* Found a min(max) combination. Now try to see if its operands
+ * meet our conditions that we can do just a single saturate operation
+ */
+ for (int minmax_op = 0; minmax_op < 2; minmax_op++) {
+ ir_rvalue *x = inner_expr->operands[minmax_op];
+ ir_rvalue *y = inner_expr->operands[1 - minmax_op];
+
+ ir_constant *inner_const = y->as_constant();
+ if (!inner_const)
+ continue;
+
+ /* min(max(x, 0.0), 1.0) is sat(x) */
+ if (ir->operation == ir_binop_min &&
+ inner_const->is_zero() &&
+ outer_const->is_one())
+ return saturate(x);
+
+ /* max(min(x, 1.0), 0.0) is sat(x) */
+ if (ir->operation == ir_binop_max &&
+ inner_const->is_one() &&
+ outer_const->is_zero())
+ return saturate(x);
+
+ /* min(max(x, 0.0), b) where b < 1.0 is sat(min(x, b)) */
+ if (ir->operation == ir_binop_min &&
+ inner_const->is_zero() &&
+ is_less_than_one(outer_const))
+ return saturate(expr(ir_binop_min, x, outer_const));
+
+ /* max(min(x, b), 0.0) where b < 1.0 is sat(min(x, b)) */
+ if (ir->operation == ir_binop_max &&
+ is_less_than_one(inner_const) &&
+ outer_const->is_zero())
+ return saturate(expr(ir_binop_min, x, inner_const));
+
+ /* max(min(x, 1.0), b) where b > 0.0 is sat(max(x, b)) */
+ if (ir->operation == ir_binop_max &&
+ inner_const->is_one() &&
+ is_greater_than_zero(outer_const))
+ return saturate(expr(ir_binop_max, x, outer_const));
+
+ /* min(max(x, b), 1.0) where b > 0.0 is sat(max(x, b)) */
+ if (ir->operation == ir_binop_min &&
+ is_greater_than_zero(inner_const) &&
+ outer_const->is_one())
+ return saturate(expr(ir_binop_max, x, inner_const));
+ }
+ }
+
+ break;
+
+ case ir_unop_rcp:
+ if (op_expr[0] && op_expr[0]->operation == ir_unop_rcp)
+ return op_expr[0]->operands[0];
+
+ if (op_expr[0] && (op_expr[0]->operation == ir_unop_exp2 ||
+ op_expr[0]->operation == ir_unop_exp)) {
+ return new(mem_ctx) ir_expression(op_expr[0]->operation, ir->type,
+ neg(op_expr[0]->operands[0]));
+ }
+
+ /* While ir_to_mesa.cpp will lower sqrt(x) to rcp(rsq(x)), it does so at
+ * its IR level, so we can always apply this transformation.
+ */
+ if (op_expr[0] && op_expr[0]->operation == ir_unop_rsq)
+ return sqrt(op_expr[0]->operands[0]);
+
+ /* As far as we know, all backends are OK with rsq. */
+ if (op_expr[0] && op_expr[0]->operation == ir_unop_sqrt) {
+ return rsq(op_expr[0]->operands[0]);
+ }
+
+ break;
+
+ case ir_triop_fma:
+ /* Operands are op0 * op1 + op2. */
+ if (is_vec_zero(op_const[0]) || is_vec_zero(op_const[1])) {
+ return ir->operands[2];
+ } else if (is_vec_zero(op_const[2])) {
+ return mul(ir->operands[0], ir->operands[1]);
+ } else if (is_vec_one(op_const[0])) {
+ return add(ir->operands[1], ir->operands[2]);
+ } else if (is_vec_one(op_const[1])) {
+ return add(ir->operands[0], ir->operands[2]);
+ }
+ break;
+
+ case ir_triop_lrp:
+ /* Operands are (x, y, a). */
+ if (is_vec_zero(op_const[2])) {
+ return ir->operands[0];
+ } else if (is_vec_one(op_const[2])) {
+ return ir->operands[1];
+ } else if (ir->operands[0]->equals(ir->operands[1])) {
+ return ir->operands[0];
+ } else if (is_vec_zero(op_const[0])) {
+ return mul(ir->operands[1], ir->operands[2]);
+ } else if (is_vec_zero(op_const[1])) {
+ unsigned op2_components = ir->operands[2]->type->vector_elements;
+ ir_constant *one;
+
+ switch (ir->type->base_type) {
+ case GLSL_TYPE_FLOAT16:
+ one = new(mem_ctx) ir_constant(float16_t::one(), op2_components);
+ break;
+ case GLSL_TYPE_FLOAT:
+ one = new(mem_ctx) ir_constant(1.0f, op2_components);
+ break;
+ case GLSL_TYPE_DOUBLE:
+ one = new(mem_ctx) ir_constant(1.0, op2_components);
+ break;
+ default:
+ one = NULL;
+ unreachable("unexpected type");
+ }
+
+ return mul(ir->operands[0], add(one, neg(ir->operands[2])));
+ }
+ break;
+
+ case ir_triop_csel:
+ if (is_vec_one(op_const[0]))
+ return ir->operands[1];
+ if (is_vec_zero(op_const[0]))
+ return ir->operands[2];
+ break;
+
+ /* Remove interpolateAt* instructions for demoted inputs. They are
+ * assigned a constant expression to facilitate this.
+ */
+ case ir_unop_interpolate_at_centroid:
+ case ir_binop_interpolate_at_offset:
+ case ir_binop_interpolate_at_sample:
+ if (op_const[0])
+ return ir->operands[0];
+ break;
+
+ default:
+ break;
+ }
+
+ return ir;
+}
+
+void
+ir_algebraic_visitor::handle_rvalue(ir_rvalue **rvalue)
+{
+ if (!*rvalue)
+ return;
+
+ ir_expression *expr = (*rvalue)->as_expression();
+ if (!expr || expr->operation == ir_quadop_vector)
+ return;
+
+ ir_rvalue *new_rvalue = handle_expression(expr);
+ if (new_rvalue == *rvalue)
+ return;
+
+ /* If the expr used to be some vec OP scalar returning a vector, and the
+ * optimization gave us back a scalar, we still need to turn it into a
+ * vector.
+ */
+ *rvalue = swizzle_if_required(expr, new_rvalue);
+
+ this->progress = true;
+}
+
+bool
+do_algebraic(exec_list *instructions, bool native_integers,
+ const struct gl_shader_compiler_options *options)
+{
+ ir_algebraic_visitor v(native_integers, options);
+
+ visit_list_elements(&v, instructions);
+
+ return v.progress;
+}
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/opt_array_splitting.cpp b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/opt_array_splitting.cpp
new file mode 100644
index 0000000000..7d928b9356
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/opt_array_splitting.cpp
@@ -0,0 +1,505 @@
+/*
+ * Copyright © 2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+/**
+ * \file opt_array_splitting.cpp
+ *
+ * If an array is always dereferenced with a constant index, then
+ * split it apart into its elements, making it more amenable to other
+ * optimization passes.
+ *
+ * This skips uniform/varying arrays, which would need careful
+ * handling due to their ir->location fields tying them to the GL API
+ * and other shader stages.
+ */
+
+#include "ir.h"
+#include "ir_visitor.h"
+#include "ir_rvalue_visitor.h"
+#include "compiler/glsl_types.h"
+
+static bool debug = false;
+
+namespace {
+
+namespace opt_array_splitting {
+
+class variable_entry : public exec_node
+{
+public:
+ variable_entry(ir_variable *var)
+ {
+ this->var = var;
+ this->split = true;
+ this->declaration = false;
+ this->components = NULL;
+ this->mem_ctx = NULL;
+ if (var->type->is_array())
+ this->size = var->type->length;
+ else
+ this->size = var->type->matrix_columns;
+ }
+
+ ir_variable *var; /* The key: the variable's pointer. */
+ unsigned size; /* array length or matrix columns */
+
+ /** Whether this array should be split or not. */
+ bool split;
+
+ /* If the variable had a decl we can work with in the instruction
+ * stream. We can't do splitting on function arguments, which
+ * don't get this variable set.
+ */
+ bool declaration;
+
+ ir_variable **components;
+
+ /** ralloc_parent(this->var) -- the shader's talloc context. */
+ void *mem_ctx;
+};
+
+} /* namespace */
+
+using namespace opt_array_splitting;
+
+/**
+ * This class does a walk over the tree, coming up with the set of
+ * variables that could be split by looking to see if they are arrays
+ * that are only ever constant-index dereferenced.
+ */
+class ir_array_reference_visitor : public ir_hierarchical_visitor {
+public:
+ ir_array_reference_visitor(void)
+ {
+ this->mem_ctx = ralloc_context(NULL);
+ this->variable_list.make_empty();
+ this->in_whole_array_copy = false;
+ }
+
+ ~ir_array_reference_visitor(void)
+ {
+ ralloc_free(mem_ctx);
+ }
+
+ bool get_split_list(exec_list *instructions, bool linked);
+
+ virtual ir_visitor_status visit(ir_variable *);
+ virtual ir_visitor_status visit(ir_dereference_variable *);
+ virtual ir_visitor_status visit_enter(ir_assignment *);
+ virtual ir_visitor_status visit_leave(ir_assignment *);
+ virtual ir_visitor_status visit_enter(ir_dereference_array *);
+ virtual ir_visitor_status visit_enter(ir_function_signature *);
+
+ variable_entry *get_variable_entry(ir_variable *var);
+
+ /* List of variable_entry */
+ exec_list variable_list;
+
+ void *mem_ctx;
+
+ bool in_whole_array_copy;
+};
+
+} /* namespace */
+
+variable_entry *
+ir_array_reference_visitor::get_variable_entry(ir_variable *var)
+{
+ assert(var);
+
+ if (var->data.mode != ir_var_auto &&
+ var->data.mode != ir_var_temporary)
+ return NULL;
+
+ if (!(var->type->is_array() || var->type->is_matrix()))
+ return NULL;
+
+ /* If the array hasn't been sized yet, we can't split it. After
+ * linking, this should be resolved.
+ */
+ if (var->type->is_unsized_array())
+ return NULL;
+
+ /* FIXME: arrays of arrays are not handled correctly by this pass so we
+ * skip it for now. While the pass will create functioning code it actually
+ * produces worse code.
+ *
+ * For example the array:
+ *
+ * int[3][2] a;
+ *
+ * ends up being split up into:
+ *
+ * int[3][2] a_0;
+ * int[3][2] a_1;
+ * int[3][2] a_2;
+ *
+ * And we end up referencing each of these new arrays for example:
+ *
+ * a[0][1] will be turned into a_0[0][1]
+ * a[1][0] will be turned into a_1[1][0]
+ * a[2][0] will be turned into a_2[2][0]
+ */
+ if (var->type->is_array() && var->type->fields.array->is_array())
+ return NULL;
+
+ foreach_in_list(variable_entry, entry, &this->variable_list) {
+ if (entry->var == var)
+ return entry;
+ }
+
+ variable_entry *entry = new(mem_ctx) variable_entry(var);
+ this->variable_list.push_tail(entry);
+ return entry;
+}
+
+
+ir_visitor_status
+ir_array_reference_visitor::visit(ir_variable *ir)
+{
+ variable_entry *entry = this->get_variable_entry(ir);
+
+ if (entry)
+ entry->declaration = true;
+
+ return visit_continue;
+}
+
+ir_visitor_status
+ir_array_reference_visitor::visit_enter(ir_assignment *ir)
+{
+ in_whole_array_copy =
+ ir->lhs->type->is_array() && ir->whole_variable_written();
+
+ return visit_continue;
+}
+
+ir_visitor_status
+ir_array_reference_visitor::visit_leave(ir_assignment *)
+{
+ in_whole_array_copy = false;
+
+ return visit_continue;
+}
+
+ir_visitor_status
+ir_array_reference_visitor::visit(ir_dereference_variable *ir)
+{
+ variable_entry *entry = this->get_variable_entry(ir->var);
+
+ /* Allow whole-array assignments on the LHS. We can split those
+ * by "unrolling" the assignment into component-wise assignments.
+ */
+ if (in_assignee && in_whole_array_copy)
+ return visit_continue;
+
+ /* If we made it to here without seeing an ir_dereference_array,
+ * then the dereference of this array didn't have a constant index
+ * (see the visit_continue_with_parent below), so we can't split
+ * the variable.
+ */
+ if (entry)
+ entry->split = false;
+
+ return visit_continue;
+}
+
+ir_visitor_status
+ir_array_reference_visitor::visit_enter(ir_dereference_array *ir)
+{
+ ir_dereference_variable *deref = ir->array->as_dereference_variable();
+ if (!deref)
+ return visit_continue;
+
+ variable_entry *entry = this->get_variable_entry(deref->var);
+
+ /* If the access to the array has a variable index, we wouldn't
+ * know which split variable this dereference should go to.
+ */
+ if (!ir->array_index->as_constant()) {
+ if (entry)
+ entry->split = false;
+ /* This variable indexing could come from a different array dereference
+ * that also has variable indexing, that is, something like a[b[a[b[0]]]].
+ * If we return visit_continue_with_parent here for the first appearence
+ * of a, then we can miss that b also has indirect indexing (if this is
+ * the only place in the program where such indirect indexing into b
+ * happens), so keep going.
+ */
+ return visit_continue;
+ }
+
+ /* If the index is also array dereference, visit index. */
+ if (ir->array_index->as_dereference_array())
+ visit_enter(ir->array_index->as_dereference_array());
+
+ return visit_continue_with_parent;
+}
+
+ir_visitor_status
+ir_array_reference_visitor::visit_enter(ir_function_signature *ir)
+{
+ /* We don't have logic for array-splitting function arguments,
+ * so just look at the body instructions and not the parameter
+ * declarations.
+ */
+ visit_list_elements(this, &ir->body);
+ return visit_continue_with_parent;
+}
+
+bool
+ir_array_reference_visitor::get_split_list(exec_list *instructions,
+ bool linked)
+{
+ visit_list_elements(this, instructions);
+
+ /* If the shaders aren't linked yet, we can't mess with global
+ * declarations, which need to be matched by name across shaders.
+ */
+ if (!linked) {
+ foreach_in_list(ir_instruction, node, instructions) {
+ ir_variable *var = node->as_variable();
+ if (var) {
+ variable_entry *entry = get_variable_entry(var);
+ if (entry)
+ entry->remove();
+ }
+ }
+ }
+
+ /* Trim out variables we found that we can't split. */
+ foreach_in_list_safe(variable_entry, entry, &variable_list) {
+ if (debug) {
+ printf("array %s@%p: decl %d, split %d\n",
+ entry->var->name, (void *) entry->var, entry->declaration,
+ entry->split);
+ }
+
+ if (!(entry->declaration && entry->split)) {
+ entry->remove();
+ }
+ }
+
+ return !variable_list.is_empty();
+}
+
+/**
+ * This class rewrites the dereferences of arrays that have been split
+ * to use the newly created ir_variables for each component.
+ */
+class ir_array_splitting_visitor : public ir_rvalue_visitor {
+public:
+ ir_array_splitting_visitor(exec_list *vars)
+ {
+ this->variable_list = vars;
+ }
+
+ virtual ~ir_array_splitting_visitor()
+ {
+ }
+
+ virtual ir_visitor_status visit_leave(ir_assignment *);
+
+ void split_deref(ir_dereference **deref);
+ void handle_rvalue(ir_rvalue **rvalue);
+ variable_entry *get_splitting_entry(ir_variable *var);
+
+ exec_list *variable_list;
+};
+
+variable_entry *
+ir_array_splitting_visitor::get_splitting_entry(ir_variable *var)
+{
+ assert(var);
+
+ foreach_in_list(variable_entry, entry, this->variable_list) {
+ if (entry->var == var) {
+ return entry;
+ }
+ }
+
+ return NULL;
+}
+
+void
+ir_array_splitting_visitor::split_deref(ir_dereference **deref)
+{
+ ir_dereference_array *deref_array = (*deref)->as_dereference_array();
+ if (!deref_array)
+ return;
+
+ ir_dereference_variable *deref_var = deref_array->array->as_dereference_variable();
+ if (!deref_var)
+ return;
+ ir_variable *var = deref_var->var;
+
+ variable_entry *entry = get_splitting_entry(var);
+ if (!entry)
+ return;
+
+ ir_constant *constant = deref_array->array_index->as_constant();
+ assert(constant);
+
+ if (constant->value.i[0] >= 0 && constant->value.i[0] < (int)entry->size) {
+ *deref = new(entry->mem_ctx)
+ ir_dereference_variable(entry->components[constant->value.i[0]]);
+ } else {
+ /* There was a constant array access beyond the end of the
+ * array. This might have happened due to constant folding
+ * after the initial parse. This produces an undefined value,
+ * but shouldn't crash. Just give them an uninitialized
+ * variable.
+ */
+ ir_variable *temp = new(entry->mem_ctx) ir_variable(deref_array->type,
+ "undef",
+ ir_var_temporary);
+ entry->components[0]->insert_before(temp);
+ *deref = new(entry->mem_ctx) ir_dereference_variable(temp);
+ }
+}
+
+void
+ir_array_splitting_visitor::handle_rvalue(ir_rvalue **rvalue)
+{
+ if (!*rvalue)
+ return;
+
+ ir_dereference *deref = (*rvalue)->as_dereference();
+
+ if (!deref)
+ return;
+
+ split_deref(&deref);
+ *rvalue = deref;
+}
+
+ir_visitor_status
+ir_array_splitting_visitor::visit_leave(ir_assignment *ir)
+{
+ /* The normal rvalue visitor skips the LHS of assignments, but we
+ * need to process those just the same.
+ */
+ ir_rvalue *lhs = ir->lhs;
+
+ /* "Unroll" any whole array assignments, creating assignments for
+ * each array element. Then, do splitting on each new assignment.
+ */
+ if (lhs->type->is_array() && ir->whole_variable_written() &&
+ get_splitting_entry(ir->whole_variable_written())) {
+ void *mem_ctx = ralloc_parent(ir);
+
+ for (unsigned i = 0; i < lhs->type->length; i++) {
+ ir_rvalue *lhs_i =
+ new(mem_ctx) ir_dereference_array(ir->lhs->clone(mem_ctx, NULL),
+ new(mem_ctx) ir_constant(i));
+ ir_rvalue *rhs_i =
+ new(mem_ctx) ir_dereference_array(ir->rhs->clone(mem_ctx, NULL),
+ new(mem_ctx) ir_constant(i));
+ ir_rvalue *condition_i =
+ ir->condition ? ir->condition->clone(mem_ctx, NULL) : NULL;
+
+ ir_assignment *assign_i =
+ new(mem_ctx) ir_assignment(lhs_i, rhs_i, condition_i);
+
+ ir->insert_before(assign_i);
+ assign_i->accept(this);
+ }
+ ir->remove();
+ return visit_continue;
+ }
+
+ handle_rvalue(&lhs);
+ ir->lhs = lhs->as_dereference();
+
+ ir->lhs->accept(this);
+
+ handle_rvalue(&ir->rhs);
+ ir->rhs->accept(this);
+
+ if (ir->condition) {
+ handle_rvalue(&ir->condition);
+ ir->condition->accept(this);
+ }
+
+ return visit_continue;
+}
+
+bool
+optimize_split_arrays(exec_list *instructions, bool linked)
+{
+ ir_array_reference_visitor refs;
+ if (!refs.get_split_list(instructions, linked))
+ return false;
+
+ void *mem_ctx = ralloc_context(NULL);
+
+ /* Replace the decls of the arrays to be split with their split
+ * components.
+ */
+ foreach_in_list(variable_entry, entry, &refs.variable_list) {
+ const struct glsl_type *type = entry->var->type;
+ const struct glsl_type *subtype;
+
+ if (type->is_matrix())
+ subtype = type->column_type();
+ else
+ subtype = type->fields.array;
+
+ entry->mem_ctx = ralloc_parent(entry->var);
+
+ entry->components = ralloc_array(mem_ctx, ir_variable *, entry->size);
+
+ for (unsigned int i = 0; i < entry->size; i++) {
+ const char *name = ralloc_asprintf(mem_ctx, "%s_%d",
+ entry->var->name, i);
+ ir_variable *new_var =
+ new(entry->mem_ctx) ir_variable(subtype, name, ir_var_temporary);
+
+ /* Do not lose memory/format qualifiers when arrays of images are
+ * split.
+ */
+ new_var->data.memory_read_only = entry->var->data.memory_read_only;
+ new_var->data.memory_write_only = entry->var->data.memory_write_only;
+ new_var->data.memory_coherent = entry->var->data.memory_coherent;
+ new_var->data.memory_volatile = entry->var->data.memory_volatile;
+ new_var->data.memory_restrict = entry->var->data.memory_restrict;
+ new_var->data.image_format = entry->var->data.image_format;
+
+ entry->components[i] = new_var;
+ entry->var->insert_before(entry->components[i]);
+ }
+
+ entry->var->remove();
+ }
+
+ ir_array_splitting_visitor split(&refs.variable_list);
+ visit_list_elements(&split, instructions);
+
+ if (debug)
+ _mesa_print_ir(stdout, instructions, NULL);
+
+ ralloc_free(mem_ctx);
+
+ return true;
+
+}
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/opt_conditional_discard.cpp b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/opt_conditional_discard.cpp
new file mode 100644
index 0000000000..6d8a23460d
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/opt_conditional_discard.cpp
@@ -0,0 +1,88 @@
+/*
+ * Copyright © 2014 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+/**
+ * \file opt_conditional_discard.cpp
+ *
+ * Replace
+ *
+ * if (cond) discard;
+ *
+ * with
+ *
+ * (discard <condition>)
+ */
+
+#include "compiler/glsl_types.h"
+#include "ir.h"
+
+namespace {
+
+class opt_conditional_discard_visitor : public ir_hierarchical_visitor {
+public:
+ opt_conditional_discard_visitor()
+ {
+ progress = false;
+ }
+
+ ir_visitor_status visit_leave(ir_if *);
+
+ bool progress;
+};
+
+} /* anonymous namespace */
+
+bool
+opt_conditional_discard(exec_list *instructions)
+{
+ opt_conditional_discard_visitor v;
+ v.run(instructions);
+ return v.progress;
+}
+
+ir_visitor_status
+opt_conditional_discard_visitor::visit_leave(ir_if *ir)
+{
+ /* Look for "if (...) discard" with no else clause or extra statements. */
+ if (ir->then_instructions.is_empty() ||
+ !ir->then_instructions.get_head_raw()->next->is_tail_sentinel() ||
+ !((ir_instruction *) ir->then_instructions.get_head_raw())->as_discard() ||
+ !ir->else_instructions.is_empty())
+ return visit_continue;
+
+ /* Move the condition and replace the ir_if with the ir_discard. */
+ ir_discard *discard = (ir_discard *) ir->then_instructions.get_head_raw();
+ if (!discard->condition)
+ discard->condition = ir->condition;
+ else {
+ void *ctx = ralloc_parent(ir);
+ discard->condition = new(ctx) ir_expression(ir_binop_logic_and,
+ ir->condition,
+ discard->condition);
+ }
+ ir->replace_with(discard);
+
+ progress = true;
+
+ return visit_continue;
+}
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/opt_constant_folding.cpp b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/opt_constant_folding.cpp
new file mode 100644
index 0000000000..3b9394d135
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/opt_constant_folding.cpp
@@ -0,0 +1,212 @@
+/*
+ * Copyright © 2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+/**
+ * \file opt_constant_folding.cpp
+ * Replace constant-valued expressions with references to constant values.
+ */
+
+#include "ir.h"
+#include "ir_visitor.h"
+#include "ir_rvalue_visitor.h"
+#include "ir_optimization.h"
+#include "compiler/glsl_types.h"
+
+namespace {
+
+/**
+ * Visitor class for replacing expressions with ir_constant values.
+ */
+
+class ir_constant_folding_visitor : public ir_rvalue_visitor {
+public:
+ ir_constant_folding_visitor()
+ {
+ this->progress = false;
+ }
+
+ virtual ~ir_constant_folding_visitor()
+ {
+ /* empty */
+ }
+
+ virtual ir_visitor_status visit_enter(ir_discard *ir);
+ virtual ir_visitor_status visit_enter(ir_assignment *ir);
+ virtual ir_visitor_status visit_enter(ir_call *ir);
+
+ virtual void handle_rvalue(ir_rvalue **rvalue);
+
+ bool progress;
+};
+
+} /* unnamed namespace */
+
+bool
+ir_constant_fold(ir_rvalue **rvalue)
+{
+ if (*rvalue == NULL || (*rvalue)->ir_type == ir_type_constant)
+ return false;
+
+ /* Note that we do rvalue visitoring on leaving. So if an
+ * expression has a non-constant operand, no need to go looking
+ * down it to find if it's constant. This cuts the time of this
+ * pass down drastically.
+ */
+ ir_expression *expr = (*rvalue)->as_expression();
+ if (expr) {
+ for (unsigned int i = 0; i < expr->num_operands; i++) {
+ if (!expr->operands[i]->as_constant())
+ return false;
+ }
+ }
+
+ /* Ditto for swizzles. */
+ ir_swizzle *swiz = (*rvalue)->as_swizzle();
+ if (swiz && !swiz->val->as_constant())
+ return false;
+
+ /* Ditto for array dereferences */
+ ir_dereference_array *array_ref = (*rvalue)->as_dereference_array();
+ if (array_ref && (!array_ref->array->as_constant() ||
+ !array_ref->array_index->as_constant()))
+ return false;
+
+ /* No constant folding can be performed on variable dereferences. We need
+ * to explicitly avoid them, as calling constant_expression_value() on a
+ * variable dereference will return a clone of var->constant_value. This
+ * would make us propagate the value into the tree, which isn't our job.
+ */
+ ir_dereference_variable *var_ref = (*rvalue)->as_dereference_variable();
+ if (var_ref)
+ return false;
+
+ ir_constant *constant =
+ (*rvalue)->constant_expression_value(ralloc_parent(*rvalue));
+ if (constant) {
+ *rvalue = constant;
+ return true;
+ }
+ return false;
+}
+
+void
+ir_constant_folding_visitor::handle_rvalue(ir_rvalue **rvalue)
+{
+ if (ir_constant_fold(rvalue))
+ this->progress = true;
+}
+
+ir_visitor_status
+ir_constant_folding_visitor::visit_enter(ir_discard *ir)
+{
+ if (ir->condition) {
+ ir->condition->accept(this);
+ handle_rvalue(&ir->condition);
+
+ ir_constant *const_val = ir->condition->as_constant();
+ /* If the condition is constant, either remove the condition or
+ * remove the never-executed assignment.
+ */
+ if (const_val) {
+ if (const_val->value.b[0])
+ ir->condition = NULL;
+ else
+ ir->remove();
+ this->progress = true;
+ }
+ }
+
+ return visit_continue_with_parent;
+}
+
+ir_visitor_status
+ir_constant_folding_visitor::visit_enter(ir_assignment *ir)
+{
+ ir->rhs->accept(this);
+ handle_rvalue(&ir->rhs);
+
+ if (ir->condition) {
+ ir->condition->accept(this);
+ handle_rvalue(&ir->condition);
+
+ ir_constant *const_val = ir->condition->as_constant();
+ /* If the condition is constant, either remove the condition or
+ * remove the never-executed assignment.
+ */
+ if (const_val) {
+ if (const_val->value.b[0])
+ ir->condition = NULL;
+ else
+ ir->remove();
+ this->progress = true;
+ }
+ }
+
+ /* Don't descend into the LHS because we want it to stay as a
+ * variable dereference. FINISHME: We probably should to get array
+ * indices though.
+ */
+ return visit_continue_with_parent;
+}
+
+ir_visitor_status
+ir_constant_folding_visitor::visit_enter(ir_call *ir)
+{
+ /* Attempt to constant fold parameters */
+ foreach_two_lists(formal_node, &ir->callee->parameters,
+ actual_node, &ir->actual_parameters) {
+ ir_rvalue *param_rval = (ir_rvalue *) actual_node;
+ ir_variable *sig_param = (ir_variable *) formal_node;
+
+ if (sig_param->data.mode == ir_var_function_in
+ || sig_param->data.mode == ir_var_const_in) {
+ ir_rvalue *new_param = param_rval;
+
+ handle_rvalue(&new_param);
+ if (new_param != param_rval) {
+ param_rval->replace_with(new_param);
+ }
+ }
+ }
+
+ /* Next, see if the call can be replaced with an assignment of a constant */
+ ir_constant *const_val = ir->constant_expression_value(ralloc_parent(ir));
+
+ if (const_val != NULL) {
+ ir_assignment *assignment =
+ new(ralloc_parent(ir)) ir_assignment(ir->return_deref, const_val);
+ ir->replace_with(assignment);
+ }
+
+ return visit_continue_with_parent;
+}
+
+bool
+do_constant_folding(exec_list *instructions)
+{
+ ir_constant_folding_visitor constant_folding;
+
+ visit_list_elements(&constant_folding, instructions);
+
+ return constant_folding.progress;
+}
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/opt_constant_propagation.cpp b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/opt_constant_propagation.cpp
new file mode 100644
index 0000000000..674208348b
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/opt_constant_propagation.cpp
@@ -0,0 +1,527 @@
+/*
+ * Copyright © 2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * constant of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, constant, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above constantright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR CONSTANTRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+/**
+ * \file opt_constant_propagation.cpp
+ *
+ * Tracks assignments of constants to channels of variables, and
+ * usage of those constant channels with direct usage of the constants.
+ *
+ * This can lead to constant folding and algebraic optimizations in
+ * those later expressions, while causing no increase in instruction
+ * count (due to constants being generally free to load from a
+ * constant push buffer or as instruction immediate values) and
+ * possibly reducing register pressure.
+ */
+
+#include "ir.h"
+#include "ir_visitor.h"
+#include "ir_rvalue_visitor.h"
+#include "ir_basic_block.h"
+#include "ir_optimization.h"
+#include "compiler/glsl_types.h"
+#include "util/hash_table.h"
+
+namespace {
+
+class acp_entry : public exec_node
+{
+public:
+ /* override operator new from exec_node */
+ DECLARE_LINEAR_ZALLOC_CXX_OPERATORS(acp_entry)
+
+ acp_entry(ir_variable *var, unsigned write_mask, ir_constant *constant)
+ {
+ assert(var);
+ assert(constant);
+ this->var = var;
+ this->write_mask = write_mask;
+ this->constant = constant;
+ this->initial_values = write_mask;
+ }
+
+ acp_entry(const acp_entry *src)
+ {
+ this->var = src->var;
+ this->write_mask = src->write_mask;
+ this->constant = src->constant;
+ this->initial_values = src->initial_values;
+ }
+
+ ir_variable *var;
+ ir_constant *constant;
+ unsigned write_mask;
+
+ /** Mask of values initially available in the constant. */
+ unsigned initial_values;
+};
+
+
+class ir_constant_propagation_visitor : public ir_rvalue_visitor {
+public:
+ ir_constant_propagation_visitor()
+ {
+ progress = false;
+ killed_all = false;
+ mem_ctx = ralloc_context(0);
+ this->lin_ctx = linear_alloc_parent(this->mem_ctx, 0);
+ this->acp = new(mem_ctx) exec_list;
+ this->kills = _mesa_pointer_hash_table_create(mem_ctx);
+ }
+ ~ir_constant_propagation_visitor()
+ {
+ ralloc_free(mem_ctx);
+ }
+
+ virtual ir_visitor_status visit_enter(class ir_loop *);
+ virtual ir_visitor_status visit_enter(class ir_function_signature *);
+ virtual ir_visitor_status visit_enter(class ir_function *);
+ virtual ir_visitor_status visit_leave(class ir_assignment *);
+ virtual ir_visitor_status visit_enter(class ir_call *);
+ virtual ir_visitor_status visit_enter(class ir_if *);
+
+ void add_constant(ir_assignment *ir);
+ void constant_folding(ir_rvalue **rvalue);
+ void constant_propagation(ir_rvalue **rvalue);
+ void kill(ir_variable *ir, unsigned write_mask);
+ void handle_if_block(exec_list *instructions, hash_table *kills, bool *killed_all);
+ void handle_loop(class ir_loop *, bool keep_acp);
+ void handle_rvalue(ir_rvalue **rvalue);
+
+ /** List of acp_entry: The available constants to propagate */
+ exec_list *acp;
+
+ /**
+ * Hash table of killed entries: maps variables to the mask of killed channels.
+ */
+ hash_table *kills;
+
+ bool progress;
+
+ bool killed_all;
+
+ void *mem_ctx;
+ void *lin_ctx;
+};
+
+
+void
+ir_constant_propagation_visitor::constant_folding(ir_rvalue **rvalue)
+{
+ if (this->in_assignee || *rvalue == NULL)
+ return;
+
+ if (ir_constant_fold(rvalue))
+ this->progress = true;
+
+ ir_dereference_variable *var_ref = (*rvalue)->as_dereference_variable();
+ if (var_ref && !var_ref->type->is_array()) {
+ ir_constant *constant =
+ var_ref->constant_expression_value(ralloc_parent(var_ref));
+ if (constant) {
+ *rvalue = constant;
+ this->progress = true;
+ }
+ }
+}
+
+void
+ir_constant_propagation_visitor::constant_propagation(ir_rvalue **rvalue) {
+
+ if (this->in_assignee || !*rvalue)
+ return;
+
+ const glsl_type *type = (*rvalue)->type;
+ if (!type->is_scalar() && !type->is_vector())
+ return;
+
+ ir_swizzle *swiz = NULL;
+ ir_dereference_variable *deref = (*rvalue)->as_dereference_variable();
+ if (!deref) {
+ swiz = (*rvalue)->as_swizzle();
+ if (!swiz)
+ return;
+
+ deref = swiz->val->as_dereference_variable();
+ if (!deref)
+ return;
+ }
+
+ ir_constant_data data;
+ memset(&data, 0, sizeof(data));
+
+ for (unsigned int i = 0; i < type->components(); i++) {
+ int channel;
+ acp_entry *found = NULL;
+
+ if (swiz) {
+ switch (i) {
+ case 0: channel = swiz->mask.x; break;
+ case 1: channel = swiz->mask.y; break;
+ case 2: channel = swiz->mask.z; break;
+ case 3: channel = swiz->mask.w; break;
+ default: assert(!"shouldn't be reached"); channel = 0; break;
+ }
+ } else {
+ channel = i;
+ }
+
+ foreach_in_list(acp_entry, entry, this->acp) {
+ if (entry->var == deref->var && entry->write_mask & (1 << channel)) {
+ found = entry;
+ break;
+ }
+ }
+
+ if (!found)
+ return;
+
+ int rhs_channel = 0;
+ for (int j = 0; j < 4; j++) {
+ if (j == channel)
+ break;
+ if (found->initial_values & (1 << j))
+ rhs_channel++;
+ }
+
+ switch (type->base_type) {
+ case GLSL_TYPE_FLOAT:
+ data.f[i] = found->constant->value.f[rhs_channel];
+ break;
+ case GLSL_TYPE_FLOAT16:
+ data.f16[i] = found->constant->value.f16[rhs_channel];
+ break;
+ case GLSL_TYPE_DOUBLE:
+ data.d[i] = found->constant->value.d[rhs_channel];
+ break;
+ case GLSL_TYPE_INT:
+ data.i[i] = found->constant->value.i[rhs_channel];
+ break;
+ case GLSL_TYPE_UINT:
+ data.u[i] = found->constant->value.u[rhs_channel];
+ break;
+ case GLSL_TYPE_BOOL:
+ data.b[i] = found->constant->value.b[rhs_channel];
+ break;
+ case GLSL_TYPE_UINT64:
+ data.u64[i] = found->constant->value.u64[rhs_channel];
+ break;
+ case GLSL_TYPE_INT64:
+ data.i64[i] = found->constant->value.i64[rhs_channel];
+ break;
+ default:
+ assert(!"not reached");
+ break;
+ }
+ }
+
+ *rvalue = new(ralloc_parent(deref)) ir_constant(type, &data);
+ this->progress = true;
+}
+
+void
+ir_constant_propagation_visitor::handle_rvalue(ir_rvalue **rvalue)
+{
+ constant_propagation(rvalue);
+ constant_folding(rvalue);
+}
+
+ir_visitor_status
+ir_constant_propagation_visitor::visit_enter(ir_function_signature *ir)
+{
+ /* Treat entry into a function signature as a completely separate
+ * block. Any instructions at global scope will be shuffled into
+ * main() at link time, so they're irrelevant to us.
+ */
+ exec_list *orig_acp = this->acp;
+ hash_table *orig_kills = this->kills;
+ bool orig_killed_all = this->killed_all;
+
+ this->acp = new(mem_ctx) exec_list;
+ this->kills = _mesa_pointer_hash_table_create(mem_ctx);
+ this->killed_all = false;
+
+ visit_list_elements(this, &ir->body);
+
+ this->kills = orig_kills;
+ this->acp = orig_acp;
+ this->killed_all = orig_killed_all;
+
+ return visit_continue_with_parent;
+}
+
+ir_visitor_status
+ir_constant_propagation_visitor::visit_leave(ir_assignment *ir)
+{
+ constant_folding(&ir->rhs);
+
+ if (this->in_assignee)
+ return visit_continue;
+
+ unsigned kill_mask = ir->write_mask;
+ if (ir->lhs->as_dereference_array()) {
+ /* The LHS of the assignment uses an array indexing operator (e.g. v[i]
+ * = ...;). Since we only try to constant propagate vectors and
+ * scalars, this means that either (a) array indexing is being used to
+ * select a vector component, or (b) the variable in question is neither
+ * a scalar or a vector, so we don't care about it. In the former case,
+ * we want to kill the whole vector, since in general we can't predict
+ * which vector component will be selected by array indexing. In the
+ * latter case, it doesn't matter what we do, so go ahead and kill the
+ * whole variable anyway.
+ *
+ * Note that if the array index is constant (e.g. v[2] = ...;), we could
+ * in principle be smarter, but we don't need to, because a future
+ * optimization pass will convert it to a simple assignment with the
+ * correct mask.
+ */
+ kill_mask = ~0;
+ }
+ kill(ir->lhs->variable_referenced(), kill_mask);
+
+ add_constant(ir);
+
+ return visit_continue;
+}
+
+ir_visitor_status
+ir_constant_propagation_visitor::visit_enter(ir_function *ir)
+{
+ (void) ir;
+ return visit_continue;
+}
+
+ir_visitor_status
+ir_constant_propagation_visitor::visit_enter(ir_call *ir)
+{
+ /* Do constant propagation on call parameters, but skip any out params */
+ foreach_two_lists(formal_node, &ir->callee->parameters,
+ actual_node, &ir->actual_parameters) {
+ ir_variable *sig_param = (ir_variable *) formal_node;
+ ir_rvalue *param = (ir_rvalue *) actual_node;
+ if (sig_param->data.mode != ir_var_function_out
+ && sig_param->data.mode != ir_var_function_inout) {
+ ir_rvalue *new_param = param;
+ handle_rvalue(&new_param);
+ if (new_param != param)
+ param->replace_with(new_param);
+ else
+ param->accept(this);
+ }
+ }
+
+ /* Since we're unlinked, we don't (necssarily) know the side effects of
+ * this call. So kill all copies.
+ */
+ acp->make_empty();
+ this->killed_all = true;
+
+ return visit_continue_with_parent;
+}
+
+void
+ir_constant_propagation_visitor::handle_if_block(exec_list *instructions, hash_table *kills, bool *killed_all)
+{
+ exec_list *orig_acp = this->acp;
+ hash_table *orig_kills = this->kills;
+ bool orig_killed_all = this->killed_all;
+
+ this->acp = new(mem_ctx) exec_list;
+ this->kills = kills;
+ this->killed_all = false;
+
+ /* Populate the initial acp with a constant of the original */
+ foreach_in_list(acp_entry, a, orig_acp) {
+ this->acp->push_tail(new(this->lin_ctx) acp_entry(a));
+ }
+
+ visit_list_elements(this, instructions);
+
+ *killed_all = this->killed_all;
+ this->kills = orig_kills;
+ this->acp = orig_acp;
+ this->killed_all = orig_killed_all;
+}
+
+ir_visitor_status
+ir_constant_propagation_visitor::visit_enter(ir_if *ir)
+{
+ ir->condition->accept(this);
+ handle_rvalue(&ir->condition);
+
+ hash_table *new_kills = _mesa_pointer_hash_table_create(mem_ctx);
+ bool then_killed_all = false;
+ bool else_killed_all = false;
+
+ handle_if_block(&ir->then_instructions, new_kills, &then_killed_all);
+ handle_if_block(&ir->else_instructions, new_kills, &else_killed_all);
+
+ if (then_killed_all || else_killed_all) {
+ acp->make_empty();
+ killed_all = true;
+ } else {
+ hash_table_foreach(new_kills, htk)
+ kill((ir_variable *) htk->key, (uintptr_t) htk->data);
+ }
+
+ _mesa_hash_table_destroy(new_kills, NULL);
+
+ /* handle_if_block() already descended into the children. */
+ return visit_continue_with_parent;
+}
+
+void
+ir_constant_propagation_visitor::handle_loop(ir_loop *ir, bool keep_acp)
+{
+ exec_list *orig_acp = this->acp;
+ hash_table *orig_kills = this->kills;
+ bool orig_killed_all = this->killed_all;
+
+ this->acp = new(mem_ctx) exec_list;
+ this->kills = _mesa_pointer_hash_table_create(mem_ctx);
+ this->killed_all = false;
+
+ if (keep_acp) {
+ foreach_in_list(acp_entry, a, orig_acp) {
+ this->acp->push_tail(new(this->lin_ctx) acp_entry(a));
+ }
+ }
+
+ visit_list_elements(this, &ir->body_instructions);
+
+ if (this->killed_all) {
+ orig_acp->make_empty();
+ }
+
+ hash_table *new_kills = this->kills;
+ this->kills = orig_kills;
+ this->acp = orig_acp;
+ this->killed_all = this->killed_all || orig_killed_all;
+
+ hash_table_foreach(new_kills, htk) {
+ kill((ir_variable *) htk->key, (uintptr_t) htk->data);
+ }
+}
+
+ir_visitor_status
+ir_constant_propagation_visitor::visit_enter(ir_loop *ir)
+{
+ /* Make a conservative first pass over the loop with an empty ACP set.
+ * This also removes any killed entries from the original ACP set.
+ */
+ handle_loop(ir, false);
+
+ /* Then, run it again with the real ACP set, minus any killed entries.
+ * This takes care of propagating values from before the loop into it.
+ */
+ handle_loop(ir, true);
+
+ /* already descended into the children. */
+ return visit_continue_with_parent;
+}
+
+void
+ir_constant_propagation_visitor::kill(ir_variable *var, unsigned write_mask)
+{
+ assert(var != NULL);
+
+ /* We don't track non-vectors. */
+ if (!var->type->is_vector() && !var->type->is_scalar())
+ return;
+
+ /* Remove any entries currently in the ACP for this kill. */
+ foreach_in_list_safe(acp_entry, entry, this->acp) {
+ if (entry->var == var) {
+ entry->write_mask &= ~write_mask;
+ if (entry->write_mask == 0)
+ entry->remove();
+ }
+ }
+
+ /* Add this writemask of the variable to the hash table of killed
+ * variables in this block.
+ */
+ hash_entry *kill_hash_entry = _mesa_hash_table_search(this->kills, var);
+ if (kill_hash_entry) {
+ uintptr_t new_write_mask = ((uintptr_t) kill_hash_entry->data) | write_mask;
+ kill_hash_entry->data = (void *) new_write_mask;
+ return;
+ }
+ /* Not already in the hash table. Make new entry. */
+ _mesa_hash_table_insert(this->kills, var, (void *) uintptr_t(write_mask));
+}
+
+/**
+ * Adds an entry to the available constant list if it's a plain assignment
+ * of a variable to a variable.
+ */
+void
+ir_constant_propagation_visitor::add_constant(ir_assignment *ir)
+{
+ acp_entry *entry;
+
+ if (ir->condition)
+ return;
+
+ if (!ir->write_mask)
+ return;
+
+ ir_dereference_variable *deref = ir->lhs->as_dereference_variable();
+ ir_constant *constant = ir->rhs->as_constant();
+
+ if (!deref || !constant)
+ return;
+
+ /* Only do constant propagation on vectors. Constant matrices,
+ * arrays, or structures would require more work elsewhere.
+ */
+ if (!deref->var->type->is_vector() && !deref->var->type->is_scalar())
+ return;
+
+ /* We can't do copy propagation on buffer variables, since the underlying
+ * memory storage is shared across multiple threads we can't be sure that
+ * the variable value isn't modified between this assignment and the next
+ * instruction where its value is read.
+ */
+ if (deref->var->data.mode == ir_var_shader_storage ||
+ deref->var->data.mode == ir_var_shader_shared)
+ return;
+
+ entry = new(this->lin_ctx) acp_entry(deref->var, ir->write_mask, constant);
+ this->acp->push_tail(entry);
+}
+
+} /* unnamed namespace */
+
+/**
+ * Does a constant propagation pass on the code present in the instruction stream.
+ */
+bool
+do_constant_propagation(exec_list *instructions)
+{
+ ir_constant_propagation_visitor v;
+
+ visit_list_elements(&v, instructions);
+
+ return v.progress;
+}
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/opt_constant_variable.cpp b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/opt_constant_variable.cpp
new file mode 100644
index 0000000000..3f2b6a04b9
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/opt_constant_variable.cpp
@@ -0,0 +1,235 @@
+/*
+ * Copyright © 2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+/**
+ * \file opt_constant_variable.cpp
+ *
+ * Marks variables assigned a single constant value over the course
+ * of the program as constant.
+ *
+ * The goal here is to trigger further constant folding and then dead
+ * code elimination. This is common with vector/matrix constructors
+ * and calls to builtin functions.
+ */
+
+#include "ir.h"
+#include "ir_visitor.h"
+#include "ir_optimization.h"
+#include "compiler/glsl_types.h"
+#include "util/hash_table.h"
+
+namespace {
+
+struct assignment_entry {
+ int assignment_count;
+ ir_variable *var;
+ ir_constant *constval;
+ bool our_scope;
+};
+
+class ir_constant_variable_visitor : public ir_hierarchical_visitor {
+public:
+ using ir_hierarchical_visitor::visit;
+ using ir_hierarchical_visitor::visit_enter;
+
+ virtual ir_visitor_status visit_enter(ir_dereference_variable *);
+ virtual ir_visitor_status visit(ir_variable *);
+ virtual ir_visitor_status visit_enter(ir_assignment *);
+ virtual ir_visitor_status visit_enter(ir_call *);
+
+ struct hash_table *ht;
+};
+
+} /* unnamed namespace */
+
+static struct assignment_entry *
+get_assignment_entry(ir_variable *var, struct hash_table *ht)
+{
+ struct hash_entry *hte = _mesa_hash_table_search(ht, var);
+ struct assignment_entry *entry;
+
+ if (hte) {
+ entry = (struct assignment_entry *) hte->data;
+ } else {
+ entry = (struct assignment_entry *) calloc(1, sizeof(*entry));
+ entry->var = var;
+ _mesa_hash_table_insert(ht, var, entry);
+ }
+
+ return entry;
+}
+
+ir_visitor_status
+ir_constant_variable_visitor::visit(ir_variable *ir)
+{
+ struct assignment_entry *entry = get_assignment_entry(ir, this->ht);
+ entry->our_scope = true;
+ return visit_continue;
+}
+
+/* Skip derefs of variables so that we can detect declarations. */
+ir_visitor_status
+ir_constant_variable_visitor::visit_enter(ir_dereference_variable *ir)
+{
+ (void)ir;
+ return visit_continue_with_parent;
+}
+
+ir_visitor_status
+ir_constant_variable_visitor::visit_enter(ir_assignment *ir)
+{
+ ir_constant *constval;
+ struct assignment_entry *entry;
+
+ entry = get_assignment_entry(ir->lhs->variable_referenced(), this->ht);
+ assert(entry);
+ entry->assignment_count++;
+
+ /* If there's more than one assignment, don't bother - we won't do anything
+ * with this variable anyway, and continuing just wastes memory cloning
+ * constant expressions.
+ */
+ if (entry->assignment_count > 1)
+ return visit_continue;
+
+ /* If it's already constant, don't do the work. */
+ if (entry->var->constant_value)
+ return visit_continue;
+
+ /* OK, now find if we actually have all the right conditions for
+ * this to be a constant value assigned to the var.
+ */
+ if (ir->condition)
+ return visit_continue;
+
+ ir_variable *var = ir->whole_variable_written();
+ if (!var)
+ return visit_continue;
+
+ /* Ignore buffer variables, since the underlying storage is shared
+ * and we can't be sure that this variable won't be written by another
+ * thread.
+ */
+ if (var->data.mode == ir_var_shader_storage ||
+ var->data.mode == ir_var_shader_shared)
+ return visit_continue;
+
+ constval = ir->rhs->constant_expression_value(ralloc_parent(ir));
+ if (!constval)
+ return visit_continue;
+
+ /* Mark this entry as having a constant assignment (if the
+ * assignment count doesn't go >1). do_constant_variable will fix
+ * up the variable with the constant value later.
+ */
+ entry->constval = constval;
+
+ return visit_continue;
+}
+
+ir_visitor_status
+ir_constant_variable_visitor::visit_enter(ir_call *ir)
+{
+ /* Mark any out parameters as assigned to */
+ foreach_two_lists(formal_node, &ir->callee->parameters,
+ actual_node, &ir->actual_parameters) {
+ ir_rvalue *param_rval = (ir_rvalue *) actual_node;
+ ir_variable *param = (ir_variable *) formal_node;
+
+ if (param->data.mode == ir_var_function_out ||
+ param->data.mode == ir_var_function_inout) {
+ ir_variable *var = param_rval->variable_referenced();
+ struct assignment_entry *entry;
+
+ assert(var);
+ entry = get_assignment_entry(var, this->ht);
+ entry->assignment_count++;
+ }
+
+ /* We don't know if the variable passed to this function has been
+ * assigned a value or if it is undefined, so for now we always assume
+ * it has been assigned a value. Once functions have been inlined any
+ * further potential optimisations will be taken care of.
+ */
+ struct assignment_entry *entry;
+ entry = get_assignment_entry(param, this->ht);
+ entry->assignment_count++;
+ }
+
+ /* Mark the return storage as having been assigned to */
+ if (ir->return_deref != NULL) {
+ ir_variable *var = ir->return_deref->variable_referenced();
+ struct assignment_entry *entry;
+
+ assert(var);
+ entry = get_assignment_entry(var, this->ht);
+ entry->assignment_count++;
+ }
+
+ return visit_continue;
+}
+
+/**
+ * Does a copy propagation pass on the code present in the instruction stream.
+ */
+bool
+do_constant_variable(exec_list *instructions)
+{
+ bool progress = false;
+ ir_constant_variable_visitor v;
+
+ v.ht = _mesa_pointer_hash_table_create(NULL);
+ v.run(instructions);
+
+ hash_table_foreach(v.ht, hte) {
+ struct assignment_entry *entry = (struct assignment_entry *) hte->data;
+
+ if (entry->assignment_count == 1 && entry->constval && entry->our_scope) {
+ entry->var->constant_value = entry->constval;
+ progress = true;
+ }
+ hte->data = NULL;
+ free(entry);
+ }
+ _mesa_hash_table_destroy(v.ht, NULL);
+
+ return progress;
+}
+
+bool
+do_constant_variable_unlinked(exec_list *instructions)
+{
+ bool progress = false;
+
+ foreach_in_list(ir_instruction, ir, instructions) {
+ ir_function *f = ir->as_function();
+ if (f) {
+ foreach_in_list(ir_function_signature, sig, &f->signatures) {
+ if (do_constant_variable(&sig->body))
+ progress = true;
+ }
+ }
+ }
+
+ return progress;
+}
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/opt_copy_propagation_elements.cpp b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/opt_copy_propagation_elements.cpp
new file mode 100644
index 0000000000..081909903e
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/opt_copy_propagation_elements.cpp
@@ -0,0 +1,745 @@
+/*
+ * Copyright © 2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+/**
+ * \file opt_copy_propagation_elements.cpp
+ *
+ * Replaces usage of recently-copied components of variables with the
+ * previous copy of the variable.
+ *
+ * This should reduce the number of MOV instructions in the generated
+ * programs and help triggering other optimizations that live in GLSL
+ * level.
+ */
+
+#include "ir.h"
+#include "ir_rvalue_visitor.h"
+#include "ir_basic_block.h"
+#include "ir_optimization.h"
+#include "compiler/glsl_types.h"
+#include "util/hash_table.h"
+#include "util/set.h"
+
+static bool debug = false;
+
+namespace {
+
+class acp_entry
+{
+public:
+ DECLARE_LINEAR_ZALLOC_CXX_OPERATORS(acp_entry)
+
+ /* If set, rhs_full indicates that this ACP entry represents a
+ * whole-variable copy. The rhs_element[] array will still be filled,
+ * to allow the swizzling from its components in case the variable
+ * was a vector (and to simplify some of the erase() and write_vector()
+ * logic).
+ */
+
+ ir_variable *rhs_full;
+ ir_variable *rhs_element[4];
+ unsigned rhs_channel[4];
+
+ /* Set of variables that use the variable associated with this acp_entry as
+ * RHS. This has the "reverse references" of rhs_full/rhs_element. It is
+ * used to speed up invalidating those references when the acp_entry
+ * changes.
+ */
+ set *dsts;
+};
+
+class copy_propagation_state {
+public:
+ DECLARE_RZALLOC_CXX_OPERATORS(copy_propagation_state);
+
+ static
+ copy_propagation_state* create(void *mem_ctx)
+ {
+ return new (mem_ctx) copy_propagation_state(NULL);
+ }
+
+ copy_propagation_state* clone()
+ {
+ return new (ralloc_parent(this)) copy_propagation_state(this);
+ }
+
+ void erase_all()
+ {
+ /* Individual elements were allocated from a linear allocator, so will
+ * be destroyed when the state is destroyed.
+ */
+ _mesa_hash_table_clear(acp, NULL);
+ fallback = NULL;
+ }
+
+ void erase(ir_variable *var, unsigned write_mask)
+ {
+ acp_entry *entry = pull_acp(var);
+ entry->rhs_full = NULL;
+
+ for (int i = 0; i < 4; i++) {
+ if (!entry->rhs_element[i])
+ continue;
+ if ((write_mask & (1 << i)) == 0)
+ continue;
+
+ ir_variable *to_remove = entry->rhs_element[i];
+ entry->rhs_element[i] = NULL;
+ remove_unused_var_from_dsts(entry, var, to_remove);
+ }
+
+ /* TODO: Check write mask, and possibly not clear everything. */
+
+ /* For any usage of our variable on the RHS, clear it out. */
+ set_foreach(entry->dsts, set_entry) {
+ ir_variable *dst_var = (ir_variable *)set_entry->key;
+ acp_entry *dst_entry = pull_acp(dst_var);
+ for (int i = 0; i < 4; i++) {
+ if (dst_entry->rhs_element[i] == var)
+ dst_entry->rhs_element[i] = NULL;
+ }
+ if (dst_entry->rhs_full == var)
+ dst_entry->rhs_full = NULL;
+ _mesa_set_remove(entry->dsts, set_entry);
+ }
+ }
+
+ acp_entry *read(ir_variable *var)
+ {
+ for (copy_propagation_state *s = this; s != NULL; s = s->fallback) {
+ hash_entry *ht_entry = _mesa_hash_table_search(s->acp, var);
+ if (ht_entry)
+ return (acp_entry *) ht_entry->data;
+ }
+ return NULL;
+ }
+
+ void write_elements(ir_variable *lhs, ir_variable *rhs, unsigned write_mask, int swizzle[4])
+ {
+ acp_entry *lhs_entry = pull_acp(lhs);
+ lhs_entry->rhs_full = NULL;
+
+ for (int i = 0; i < 4; i++) {
+ if ((write_mask & (1 << i)) == 0)
+ continue;
+ ir_variable *to_remove = lhs_entry->rhs_element[i];
+ lhs_entry->rhs_element[i] = rhs;
+ lhs_entry->rhs_channel[i] = swizzle[i];
+
+ remove_unused_var_from_dsts(lhs_entry, lhs, to_remove);
+ }
+
+ acp_entry *rhs_entry = pull_acp(rhs);
+ _mesa_set_add(rhs_entry->dsts, lhs);
+ }
+
+ void write_full(ir_variable *lhs, ir_variable *rhs)
+ {
+ acp_entry *lhs_entry = pull_acp(lhs);
+ if (lhs_entry->rhs_full == rhs)
+ return;
+
+ if (lhs_entry->rhs_full) {
+ remove_from_dsts(lhs_entry->rhs_full, lhs);
+ } else if (lhs->type->is_vector()) {
+ for (int i = 0; i < 4; i++) {
+ if (lhs_entry->rhs_element[i])
+ remove_from_dsts(lhs_entry->rhs_element[i], lhs);
+ }
+ }
+
+ lhs_entry->rhs_full = rhs;
+ acp_entry *rhs_entry = pull_acp(rhs);
+ _mesa_set_add(rhs_entry->dsts, lhs);
+
+ if (lhs->type->is_vector()) {
+ for (int i = 0; i < 4; i++) {
+ lhs_entry->rhs_element[i] = rhs;
+ lhs_entry->rhs_channel[i] = i;
+ }
+ }
+ }
+
+ void remove_unused_var_from_dsts(acp_entry *lhs_entry, ir_variable *lhs, ir_variable *var)
+ {
+ if (!var)
+ return;
+
+ /* If lhs still uses var, don't remove anything. */
+ for (int j = 0; j < 4; j++) {
+ if (lhs_entry->rhs_element[j] == var)
+ return;
+ }
+
+ acp_entry *element = pull_acp(var);
+ assert(element);
+ _mesa_set_remove_key(element->dsts, lhs);
+ }
+
+private:
+ explicit copy_propagation_state(copy_propagation_state *fallback)
+ {
+ this->fallback = fallback;
+ /* Use 'this' as context for the table, no explicit destruction
+ * needed later.
+ */
+ acp = _mesa_pointer_hash_table_create(this);
+ lin_ctx = linear_alloc_parent(this, 0);
+ }
+
+ acp_entry *pull_acp(ir_variable *var)
+ {
+ hash_entry *ht_entry = _mesa_hash_table_search(acp, var);
+ if (ht_entry)
+ return (acp_entry *) ht_entry->data;
+
+ /* If not found, create one and copy data from fallback if available. */
+ acp_entry *entry = new(lin_ctx) acp_entry();
+ _mesa_hash_table_insert(acp, var, entry);
+
+ bool found = false;
+ for (copy_propagation_state *s = fallback; s != NULL; s = s->fallback) {
+ hash_entry *fallback_ht_entry = _mesa_hash_table_search(s->acp, var);
+ if (fallback_ht_entry) {
+ acp_entry *fallback_entry = (acp_entry *) fallback_ht_entry->data;
+ *entry = *fallback_entry;
+ entry->dsts = _mesa_set_clone(fallback_entry->dsts, this);
+ found = true;
+ break;
+ }
+ }
+
+ if (!found) {
+ entry->dsts = _mesa_pointer_set_create(this);
+ }
+
+ return entry;
+ }
+
+ void
+ remove_from_dsts(ir_variable *var, ir_variable *to_remove)
+ {
+ acp_entry *entry = pull_acp(var);
+ assert(entry);
+ _mesa_set_remove_key(entry->dsts, to_remove);
+ }
+
+ /** Available Copy to Propagate table, from variable to the entry
+ * containing the current sources that can be used. */
+ hash_table *acp;
+
+ /** When a state is cloned, entries are copied on demand from fallback. */
+ copy_propagation_state *fallback;
+
+ void *lin_ctx;
+};
+
+class kill_entry : public exec_node
+{
+public:
+ /* override operator new from exec_node */
+ DECLARE_LINEAR_ZALLOC_CXX_OPERATORS(kill_entry)
+
+ kill_entry(ir_variable *var, int write_mask)
+ {
+ this->var = var;
+ this->write_mask = write_mask;
+ }
+
+ ir_variable *var;
+ unsigned int write_mask;
+};
+
+class ir_copy_propagation_elements_visitor : public ir_rvalue_visitor {
+public:
+ ir_copy_propagation_elements_visitor()
+ {
+ this->progress = false;
+ this->killed_all = false;
+ this->mem_ctx = ralloc_context(NULL);
+ this->lin_ctx = linear_alloc_parent(this->mem_ctx, 0);
+ this->shader_mem_ctx = NULL;
+ this->kills = new(mem_ctx) exec_list;
+ this->state = copy_propagation_state::create(mem_ctx);
+ }
+ ~ir_copy_propagation_elements_visitor()
+ {
+ ralloc_free(mem_ctx);
+ }
+
+ virtual ir_visitor_status visit(ir_dereference_variable *);
+
+ void handle_loop(ir_loop *, bool keep_acp);
+ virtual ir_visitor_status visit_enter(class ir_loop *);
+ virtual ir_visitor_status visit_enter(class ir_function_signature *);
+ virtual ir_visitor_status visit_leave(class ir_assignment *);
+ virtual ir_visitor_status visit_enter(class ir_call *);
+ virtual ir_visitor_status visit_enter(class ir_if *);
+ virtual ir_visitor_status visit_leave(class ir_swizzle *);
+
+ void handle_rvalue(ir_rvalue **rvalue);
+
+ void add_copy(ir_assignment *ir);
+ void kill(kill_entry *k);
+ void handle_if_block(exec_list *instructions, exec_list *kills, bool *killed_all);
+
+ copy_propagation_state *state;
+
+ /**
+ * List of kill_entry: The variables whose values were killed in this
+ * block.
+ */
+ exec_list *kills;
+
+ bool progress;
+
+ bool killed_all;
+
+ /* Context for our local data structures. */
+ void *mem_ctx;
+ void *lin_ctx;
+ /* Context for allocating new shader nodes. */
+ void *shader_mem_ctx;
+};
+
+} /* unnamed namespace */
+
+ir_visitor_status
+ir_copy_propagation_elements_visitor::visit(ir_dereference_variable *ir)
+{
+ if (this->in_assignee)
+ return visit_continue;
+
+ const acp_entry *entry = state->read(ir->var);
+ if (entry && entry->rhs_full) {
+ ir->var = (ir_variable *) entry->rhs_full;
+ progress = true;
+ }
+
+ return visit_continue;
+}
+
+ir_visitor_status
+ir_copy_propagation_elements_visitor::visit_enter(ir_function_signature *ir)
+{
+ /* Treat entry into a function signature as a completely separate
+ * block. Any instructions at global scope will be shuffled into
+ * main() at link time, so they're irrelevant to us.
+ */
+ exec_list *orig_kills = this->kills;
+ bool orig_killed_all = this->killed_all;
+
+ this->kills = new(mem_ctx) exec_list;
+ this->killed_all = false;
+
+ copy_propagation_state *orig_state = state;
+ this->state = copy_propagation_state::create(mem_ctx);
+
+ visit_list_elements(this, &ir->body);
+
+ delete this->state;
+ this->state = orig_state;
+
+ ralloc_free(this->kills);
+ this->kills = orig_kills;
+ this->killed_all = orig_killed_all;
+
+ return visit_continue_with_parent;
+}
+
+ir_visitor_status
+ir_copy_propagation_elements_visitor::visit_leave(ir_assignment *ir)
+{
+ ir_dereference_variable *lhs = ir->lhs->as_dereference_variable();
+ ir_variable *var = ir->lhs->variable_referenced();
+
+ kill_entry *k;
+
+ if (lhs && var->type->is_vector())
+ k = new(this->lin_ctx) kill_entry(var, ir->write_mask);
+ else
+ k = new(this->lin_ctx) kill_entry(var, ~0);
+
+ kill(k);
+
+ add_copy(ir);
+
+ return visit_continue;
+}
+
+ir_visitor_status
+ir_copy_propagation_elements_visitor::visit_leave(ir_swizzle *)
+{
+ /* Don't visit the values of swizzles since they are handled while
+ * visiting the swizzle itself.
+ */
+ return visit_continue;
+}
+
+/**
+ * Replaces dereferences of ACP RHS variables with ACP LHS variables.
+ *
+ * This is where the actual copy propagation occurs. Note that the
+ * rewriting of ir_dereference means that the ir_dereference instance
+ * must not be shared by multiple IR operations!
+ */
+void
+ir_copy_propagation_elements_visitor::handle_rvalue(ir_rvalue **ir)
+{
+ int swizzle_chan[4];
+ ir_dereference_variable *deref_var;
+ ir_variable *source[4] = {NULL, NULL, NULL, NULL};
+ int source_chan[4] = {0, 0, 0, 0};
+ int chans;
+ bool noop_swizzle = true;
+
+ if (!*ir)
+ return;
+
+ ir_swizzle *swizzle = (*ir)->as_swizzle();
+ if (swizzle) {
+ deref_var = swizzle->val->as_dereference_variable();
+ if (!deref_var)
+ return;
+
+ swizzle_chan[0] = swizzle->mask.x;
+ swizzle_chan[1] = swizzle->mask.y;
+ swizzle_chan[2] = swizzle->mask.z;
+ swizzle_chan[3] = swizzle->mask.w;
+ chans = swizzle->type->vector_elements;
+ } else {
+ deref_var = (*ir)->as_dereference_variable();
+ if (!deref_var)
+ return;
+
+ swizzle_chan[0] = 0;
+ swizzle_chan[1] = 1;
+ swizzle_chan[2] = 2;
+ swizzle_chan[3] = 3;
+ chans = deref_var->type->vector_elements;
+ }
+
+ if (this->in_assignee)
+ return;
+
+ ir_variable *var = deref_var->var;
+
+ /* Try to find ACP entries covering swizzle_chan[], hoping they're
+ * the same source variable.
+ */
+
+ const acp_entry *entry = state->read(var);
+ if (entry) {
+ for (int c = 0; c < chans; c++) {
+ unsigned index = swizzle_chan[c];
+ ir_variable *src = entry->rhs_element[index];
+ if (!src)
+ continue;
+ source[c] = src;
+ source_chan[c] = entry->rhs_channel[index];
+ if (source_chan[c] != swizzle_chan[c])
+ noop_swizzle = false;
+ }
+ }
+
+ /* Make sure all channels are copying from the same source variable. */
+ if (!source[0])
+ return;
+ for (int c = 1; c < chans; c++) {
+ if (source[c] != source[0])
+ return;
+ }
+
+ if (!shader_mem_ctx)
+ shader_mem_ctx = ralloc_parent(deref_var);
+
+ /* Don't pointlessly replace the rvalue with itself (or a noop swizzle
+ * of itself, which would just be deleted by opt_noop_swizzle).
+ */
+ if (source[0] == var && noop_swizzle)
+ return;
+
+ if (debug) {
+ printf("Copy propagation from:\n");
+ (*ir)->print();
+ }
+
+ deref_var = new(shader_mem_ctx) ir_dereference_variable(source[0]);
+ *ir = new(shader_mem_ctx) ir_swizzle(deref_var,
+ source_chan[0],
+ source_chan[1],
+ source_chan[2],
+ source_chan[3],
+ chans);
+ progress = true;
+
+ if (debug) {
+ printf("to:\n");
+ (*ir)->print();
+ printf("\n");
+ }
+}
+
+
+ir_visitor_status
+ir_copy_propagation_elements_visitor::visit_enter(ir_call *ir)
+{
+ /* Do copy propagation on call parameters, but skip any out params */
+ foreach_two_lists(formal_node, &ir->callee->parameters,
+ actual_node, &ir->actual_parameters) {
+ ir_variable *sig_param = (ir_variable *) formal_node;
+ ir_rvalue *ir = (ir_rvalue *) actual_node;
+ if (sig_param->data.mode != ir_var_function_out
+ && sig_param->data.mode != ir_var_function_inout) {
+ ir->accept(this);
+ }
+ }
+
+ if (!ir->callee->is_intrinsic()) {
+ state->erase_all();
+ this->killed_all = true;
+ } else {
+ if (ir->return_deref) {
+ kill(new(this->lin_ctx) kill_entry(ir->return_deref->var, ~0));
+ }
+
+ foreach_two_lists(formal_node, &ir->callee->parameters,
+ actual_node, &ir->actual_parameters) {
+ ir_variable *sig_param = (ir_variable *) formal_node;
+ if (sig_param->data.mode == ir_var_function_out ||
+ sig_param->data.mode == ir_var_function_inout) {
+ ir_rvalue *ir = (ir_rvalue *) actual_node;
+ ir_variable *var = ir->variable_referenced();
+ kill(new(this->lin_ctx) kill_entry(var, ~0));
+ }
+ }
+ }
+
+ return visit_continue_with_parent;
+}
+
+void
+ir_copy_propagation_elements_visitor::handle_if_block(exec_list *instructions, exec_list *kills, bool *killed_all)
+{
+ exec_list *orig_kills = this->kills;
+ bool orig_killed_all = this->killed_all;
+
+ this->kills = kills;
+ this->killed_all = false;
+
+ /* Populate the initial acp with a copy of the original */
+ copy_propagation_state *orig_state = state;
+ this->state = orig_state->clone();
+
+ visit_list_elements(this, instructions);
+
+ delete this->state;
+ this->state = orig_state;
+
+ *killed_all = this->killed_all;
+ this->kills = orig_kills;
+ this->killed_all = orig_killed_all;
+}
+
+ir_visitor_status
+ir_copy_propagation_elements_visitor::visit_enter(ir_if *ir)
+{
+ ir->condition->accept(this);
+
+ exec_list *new_kills = new(mem_ctx) exec_list;
+ bool then_killed_all = false;
+ bool else_killed_all = false;
+
+ handle_if_block(&ir->then_instructions, new_kills, &then_killed_all);
+ handle_if_block(&ir->else_instructions, new_kills, &else_killed_all);
+
+ if (then_killed_all || else_killed_all) {
+ state->erase_all();
+ killed_all = true;
+ } else {
+ foreach_in_list_safe(kill_entry, k, new_kills)
+ kill(k);
+ }
+
+ ralloc_free(new_kills);
+
+ /* handle_if_block() already descended into the children. */
+ return visit_continue_with_parent;
+}
+
+void
+ir_copy_propagation_elements_visitor::handle_loop(ir_loop *ir, bool keep_acp)
+{
+ exec_list *orig_kills = this->kills;
+ bool orig_killed_all = this->killed_all;
+
+ this->kills = new(mem_ctx) exec_list;
+ this->killed_all = false;
+
+ copy_propagation_state *orig_state = state;
+
+ if (keep_acp) {
+ /* Populate the initial acp with a copy of the original */
+ this->state = orig_state->clone();
+ } else {
+ this->state = copy_propagation_state::create(mem_ctx);
+ }
+
+ visit_list_elements(this, &ir->body_instructions);
+
+ delete this->state;
+ this->state = orig_state;
+
+ if (this->killed_all)
+ this->state->erase_all();
+
+ exec_list *new_kills = this->kills;
+ this->kills = orig_kills;
+ this->killed_all = this->killed_all || orig_killed_all;
+
+ foreach_in_list_safe(kill_entry, k, new_kills) {
+ kill(k);
+ }
+
+ ralloc_free(new_kills);
+}
+
+ir_visitor_status
+ir_copy_propagation_elements_visitor::visit_enter(ir_loop *ir)
+{
+ handle_loop(ir, false);
+ handle_loop(ir, true);
+
+ /* already descended into the children. */
+ return visit_continue_with_parent;
+}
+
+/* Remove any entries currently in the ACP for this kill. */
+void
+ir_copy_propagation_elements_visitor::kill(kill_entry *k)
+{
+ state->erase(k->var, k->write_mask);
+
+ /* If we were on a list, remove ourselves before inserting */
+ if (k->next)
+ k->remove();
+
+ this->kills->push_tail(k);
+}
+
+/**
+ * Adds directly-copied channels between vector variables to the available
+ * copy propagation list.
+ */
+void
+ir_copy_propagation_elements_visitor::add_copy(ir_assignment *ir)
+{
+ if (ir->condition)
+ return;
+
+ {
+ ir_variable *lhs_var = ir->whole_variable_written();
+ ir_dereference_variable *rhs = ir->rhs->as_dereference_variable();
+
+ if (lhs_var != NULL && rhs && rhs->var != NULL && lhs_var != rhs->var) {
+ if (lhs_var->data.mode == ir_var_shader_storage ||
+ lhs_var->data.mode == ir_var_shader_shared ||
+ rhs->var->data.mode == ir_var_shader_storage ||
+ rhs->var->data.mode == ir_var_shader_shared ||
+ lhs_var->data.precise != rhs->var->data.precise) {
+ return;
+ }
+ state->write_full(lhs_var, rhs->var);
+ return;
+ }
+ }
+
+ int orig_swizzle[4] = {0, 1, 2, 3};
+ int swizzle[4];
+
+ ir_dereference_variable *lhs = ir->lhs->as_dereference_variable();
+ if (!lhs || !(lhs->type->is_scalar() || lhs->type->is_vector()))
+ return;
+
+ if (lhs->var->data.mode == ir_var_shader_storage ||
+ lhs->var->data.mode == ir_var_shader_shared)
+ return;
+
+ ir_dereference_variable *rhs = ir->rhs->as_dereference_variable();
+ if (!rhs) {
+ ir_swizzle *swiz = ir->rhs->as_swizzle();
+ if (!swiz)
+ return;
+
+ rhs = swiz->val->as_dereference_variable();
+ if (!rhs)
+ return;
+
+ orig_swizzle[0] = swiz->mask.x;
+ orig_swizzle[1] = swiz->mask.y;
+ orig_swizzle[2] = swiz->mask.z;
+ orig_swizzle[3] = swiz->mask.w;
+ }
+
+ if (rhs->var->data.mode == ir_var_shader_storage ||
+ rhs->var->data.mode == ir_var_shader_shared)
+ return;
+
+ /* Move the swizzle channels out to the positions they match in the
+ * destination. We don't want to have to rewrite the swizzle[]
+ * array every time we clear a bit of the write_mask.
+ */
+ int j = 0;
+ for (int i = 0; i < 4; i++) {
+ if (ir->write_mask & (1 << i))
+ swizzle[i] = orig_swizzle[j++];
+ }
+
+ int write_mask = ir->write_mask;
+ if (lhs->var == rhs->var) {
+ /* If this is a copy from the variable to itself, then we need
+ * to be sure not to include the updated channels from this
+ * instruction in the set of new source channels to be
+ * copy-propagated from.
+ */
+ for (int i = 0; i < 4; i++) {
+ if (ir->write_mask & (1 << orig_swizzle[i]))
+ write_mask &= ~(1 << i);
+ }
+ }
+
+ if (lhs->var->data.precise != rhs->var->data.precise)
+ return;
+
+ state->write_elements(lhs->var, rhs->var, write_mask, swizzle);
+}
+
+bool
+do_copy_propagation_elements(exec_list *instructions)
+{
+ ir_copy_propagation_elements_visitor v;
+
+ visit_list_elements(&v, instructions);
+
+ return v.progress;
+}
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/opt_dead_builtin_variables.cpp b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/opt_dead_builtin_variables.cpp
new file mode 100644
index 0000000000..0d4e3a8f00
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/opt_dead_builtin_variables.cpp
@@ -0,0 +1,81 @@
+/*
+ * Copyright © 2014 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include "ir.h"
+#include "ir_visitor.h"
+#include "ir_optimization.h"
+
+/**
+ * Pre-linking, optimize unused built-in variables
+ *
+ * Uniforms, constants, system values, inputs (vertex shader only), and
+ * outputs (fragment shader only) that are not used can be removed.
+ */
+void
+optimize_dead_builtin_variables(exec_list *instructions,
+ enum ir_variable_mode other)
+{
+ foreach_in_list_safe(ir_variable, var, instructions) {
+ if (var->ir_type != ir_type_variable || var->data.used)
+ continue;
+
+ if (var->data.mode != ir_var_uniform
+ && var->data.mode != ir_var_auto
+ && var->data.mode != ir_var_system_value
+ && var->data.mode != other)
+ continue;
+
+ /* So that linker rules can later be enforced, we cannot elimate
+ * variables that were redeclared in the shader code.
+ */
+ if ((var->data.mode == other || var->data.mode == ir_var_system_value)
+ && var->data.how_declared != ir_var_declared_implicitly)
+ continue;
+
+ if (!is_gl_identifier(var->name))
+ continue;
+
+ /* gl_ModelViewProjectionMatrix and gl_Vertex are special because they
+ * are used by ftransform. No other built-in variable is used by a
+ * built-in function. The forward declarations of these variables in
+ * the built-in function shader does not have the "state slot"
+ * information, so removing these variables from the user shader will
+ * cause problems later.
+ *
+ * Matrix uniforms with "Transpose" are not eliminated because there's
+ * an optimization pass that can turn references to the regular matrix
+ * into references to the transpose matrix. Eliminating the transpose
+ * matrix would cause that pass to generate references to undeclareds
+ * variables (thank you, ir_validate).
+ *
+ * It doesn't seem worth the effort to track when the transpose could be
+ * eliminated (i.e., when the non-transpose was eliminated).
+ */
+ if (strcmp(var->name, "gl_ModelViewProjectionMatrix") == 0
+ || strcmp(var->name, "gl_Vertex") == 0
+ || strstr(var->name, "Transpose") != NULL)
+ continue;
+
+ var->remove();
+ }
+}
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/opt_dead_builtin_varyings.cpp b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/opt_dead_builtin_varyings.cpp
new file mode 100644
index 0000000000..3efe658232
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/opt_dead_builtin_varyings.cpp
@@ -0,0 +1,620 @@
+/*
+ * Copyright © 2013 Marek Olšák <maraeo@gmail.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+/**
+ * \file opt_dead_builtin_varyings.cpp
+ *
+ * This eliminates the built-in shader outputs which are either not written
+ * at all or not used by the next stage. It also eliminates unused elements
+ * of gl_TexCoord inputs, which reduces the overall varying usage.
+ * The varyings handled here are the primary and secondary color, the fog,
+ * and the texture coordinates (gl_TexCoord).
+ *
+ * This pass is necessary, because the Mesa GLSL linker cannot eliminate
+ * built-in varyings like it eliminates user-defined varyings, because
+ * the built-in varyings have pre-assigned locations. Also, the elimination
+ * of unused gl_TexCoord elements requires its own lowering pass anyway.
+ *
+ * It's implemented by replacing all occurrences of dead varyings with
+ * temporary variables, which creates dead code. It is recommended to run
+ * a dead-code elimination pass after this.
+ *
+ * If any texture coordinate slots can be eliminated, the gl_TexCoord array is
+ * broken down into separate vec4 variables with locations equal to
+ * VARYING_SLOT_TEX0 + i.
+ *
+ * The same is done for the gl_FragData fragment shader output.
+ */
+
+#include "ir.h"
+#include "ir_rvalue_visitor.h"
+#include "ir_optimization.h"
+#include "ir_print_visitor.h"
+#include "compiler/glsl_types.h"
+#include "link_varyings.h"
+#include "main/mtypes.h"
+#include "util/u_string.h"
+
+namespace {
+
+/**
+ * This obtains detailed information about built-in varyings from shader code.
+ */
+class varying_info_visitor : public ir_hierarchical_visitor {
+public:
+ /* "mode" can be either ir_var_shader_in or ir_var_shader_out */
+ varying_info_visitor(ir_variable_mode mode, bool find_frag_outputs = false)
+ : lower_texcoord_array(true),
+ texcoord_array(NULL),
+ texcoord_usage(0),
+ find_frag_outputs(find_frag_outputs),
+ lower_fragdata_array(true),
+ fragdata_array(NULL),
+ fragdata_usage(0),
+ color_usage(0),
+ tfeedback_color_usage(0),
+ fog(NULL),
+ has_fog(false),
+ tfeedback_has_fog(false),
+ mode(mode)
+ {
+ memset(color, 0, sizeof(color));
+ memset(backcolor, 0, sizeof(backcolor));
+ }
+
+ virtual ir_visitor_status visit_enter(ir_dereference_array *ir)
+ {
+ ir_variable *var = ir->variable_referenced();
+
+ if (!var || var->data.mode != this->mode || !var->type->is_array() ||
+ !is_gl_identifier(var->name))
+ return visit_continue;
+
+ /* Only match gl_FragData[], not gl_SecondaryFragDataEXT[] or
+ * gl_LastFragData[].
+ */
+ if (this->find_frag_outputs && strcmp(var->name, "gl_FragData") == 0) {
+ this->fragdata_array = var;
+
+ ir_constant *index = ir->array_index->as_constant();
+ if (index == NULL) {
+ /* This is variable indexing. */
+ this->fragdata_usage |= (1 << var->type->array_size()) - 1;
+ this->lower_fragdata_array = false;
+ }
+ else {
+ this->fragdata_usage |= 1 << index->get_uint_component(0);
+ /* Don't lower fragdata array if the output variable
+ * is not a float variable (or float vector) because it will
+ * generate wrong register assignments because of different
+ * data types.
+ */
+ if (var->type->gl_type != GL_FLOAT &&
+ var->type->gl_type != GL_FLOAT_VEC2 &&
+ var->type->gl_type != GL_FLOAT_VEC3 &&
+ var->type->gl_type != GL_FLOAT_VEC4)
+ this->lower_fragdata_array = false;
+ }
+
+ /* Don't visit the leaves of ir_dereference_array. */
+ return visit_continue_with_parent;
+ }
+
+ if (!this->find_frag_outputs && var->data.location == VARYING_SLOT_TEX0) {
+ this->texcoord_array = var;
+
+ ir_constant *index = ir->array_index->as_constant();
+ if (index == NULL) {
+ /* There is variable indexing, we can't lower the texcoord array.
+ */
+ this->texcoord_usage |= (1 << var->type->array_size()) - 1;
+ this->lower_texcoord_array = false;
+ }
+ else {
+ this->texcoord_usage |= 1 << index->get_uint_component(0);
+ }
+
+ /* Don't visit the leaves of ir_dereference_array. */
+ return visit_continue_with_parent;
+ }
+
+ return visit_continue;
+ }
+
+ virtual ir_visitor_status visit(ir_dereference_variable *ir)
+ {
+ ir_variable *var = ir->variable_referenced();
+
+ if (var->data.mode != this->mode || !var->type->is_array())
+ return visit_continue;
+
+ if (this->find_frag_outputs && var->data.location == FRAG_RESULT_DATA0 &&
+ var->data.index == 0) {
+ /* This is a whole array dereference. */
+ this->fragdata_usage |= (1 << var->type->array_size()) - 1;
+ this->lower_fragdata_array = false;
+ return visit_continue;
+ }
+
+ if (!this->find_frag_outputs && var->data.location == VARYING_SLOT_TEX0) {
+ /* This is a whole array dereference like "gl_TexCoord = x;",
+ * there's probably no point in lowering that.
+ */
+ this->texcoord_usage |= (1 << var->type->array_size()) - 1;
+ this->lower_texcoord_array = false;
+ }
+ return visit_continue;
+ }
+
+ virtual ir_visitor_status visit(ir_variable *var)
+ {
+ if (var->data.mode != this->mode)
+ return visit_continue;
+
+ /* Nothing to do here for fragment outputs. */
+ if (this->find_frag_outputs)
+ return visit_continue;
+
+ /* Handle colors and fog. */
+ switch (var->data.location) {
+ case VARYING_SLOT_COL0:
+ this->color[0] = var;
+ this->color_usage |= 1;
+ break;
+ case VARYING_SLOT_COL1:
+ this->color[1] = var;
+ this->color_usage |= 2;
+ break;
+ case VARYING_SLOT_BFC0:
+ this->backcolor[0] = var;
+ this->color_usage |= 1;
+ break;
+ case VARYING_SLOT_BFC1:
+ this->backcolor[1] = var;
+ this->color_usage |= 2;
+ break;
+ case VARYING_SLOT_FOGC:
+ this->fog = var;
+ this->has_fog = true;
+ break;
+ }
+
+ return visit_continue;
+ }
+
+ void get(exec_list *ir,
+ unsigned num_tfeedback_decls,
+ tfeedback_decl *tfeedback_decls)
+ {
+ /* Handle the transform feedback varyings. */
+ for (unsigned i = 0; i < num_tfeedback_decls; i++) {
+ if (!tfeedback_decls[i].is_varying())
+ continue;
+
+ unsigned location = tfeedback_decls[i].get_location();
+
+ switch (location) {
+ case VARYING_SLOT_COL0:
+ case VARYING_SLOT_BFC0:
+ this->tfeedback_color_usage |= 1;
+ break;
+ case VARYING_SLOT_COL1:
+ case VARYING_SLOT_BFC1:
+ this->tfeedback_color_usage |= 2;
+ break;
+ case VARYING_SLOT_FOGC:
+ this->tfeedback_has_fog = true;
+ break;
+ default:
+ if (location >= VARYING_SLOT_TEX0 &&
+ location <= VARYING_SLOT_TEX7) {
+ this->lower_texcoord_array = false;
+ }
+ }
+ }
+
+ /* Process the shader. */
+ visit_list_elements(this, ir);
+
+ if (!this->texcoord_array) {
+ this->lower_texcoord_array = false;
+ }
+ if (!this->fragdata_array) {
+ this->lower_fragdata_array = false;
+ }
+ }
+
+ bool lower_texcoord_array;
+ ir_variable *texcoord_array;
+ unsigned texcoord_usage; /* bitmask */
+
+ bool find_frag_outputs; /* false if it's looking for varyings */
+ bool lower_fragdata_array;
+ ir_variable *fragdata_array;
+ unsigned fragdata_usage; /* bitmask */
+
+ ir_variable *color[2];
+ ir_variable *backcolor[2];
+ unsigned color_usage; /* bitmask */
+ unsigned tfeedback_color_usage; /* bitmask */
+
+ ir_variable *fog;
+ bool has_fog;
+ bool tfeedback_has_fog;
+
+ ir_variable_mode mode;
+};
+
+
+/**
+ * This replaces unused varyings with temporary variables.
+ *
+ * If "ir" is the producer, the "external" usage should come from
+ * the consumer. It also works the other way around. If either one is
+ * missing, set the "external" usage to a full mask.
+ */
+class replace_varyings_visitor : public ir_rvalue_visitor {
+public:
+ replace_varyings_visitor(struct gl_linked_shader *sha,
+ const varying_info_visitor *info,
+ unsigned external_texcoord_usage,
+ unsigned external_color_usage,
+ bool external_has_fog)
+ : shader(sha), info(info), new_fog(NULL)
+ {
+ void *const ctx = shader->ir;
+
+ memset(this->new_fragdata, 0, sizeof(this->new_fragdata));
+ memset(this->new_texcoord, 0, sizeof(this->new_texcoord));
+ memset(this->new_color, 0, sizeof(this->new_color));
+ memset(this->new_backcolor, 0, sizeof(this->new_backcolor));
+
+ const char *mode_str =
+ info->mode == ir_var_shader_in ? "in" : "out";
+
+ /* Handle texcoord outputs.
+ *
+ * We're going to break down the gl_TexCoord array into separate
+ * variables. First, add declarations of the new variables all
+ * occurrences of gl_TexCoord will be replaced with.
+ */
+ if (info->lower_texcoord_array) {
+ prepare_array(shader->ir, this->new_texcoord,
+ ARRAY_SIZE(this->new_texcoord),
+ VARYING_SLOT_TEX0, "TexCoord", mode_str,
+ info->texcoord_usage, external_texcoord_usage);
+ }
+
+ /* Handle gl_FragData in the same way like gl_TexCoord. */
+ if (info->lower_fragdata_array) {
+ prepare_array(shader->ir, this->new_fragdata,
+ ARRAY_SIZE(this->new_fragdata),
+ FRAG_RESULT_DATA0, "FragData", mode_str,
+ info->fragdata_usage, (1 << MAX_DRAW_BUFFERS) - 1);
+ }
+
+ /* Create dummy variables which will replace set-but-unused color and
+ * fog outputs.
+ */
+ external_color_usage |= info->tfeedback_color_usage;
+
+ for (int i = 0; i < 2; i++) {
+ char name[32];
+
+ if (!(external_color_usage & (1 << i))) {
+ if (info->color[i]) {
+ snprintf(name, 32, "gl_%s_FrontColor%i_dummy", mode_str, i);
+ this->new_color[i] =
+ new (ctx) ir_variable(glsl_type::vec4_type, name,
+ ir_var_temporary);
+ }
+
+ if (info->backcolor[i]) {
+ snprintf(name, 32, "gl_%s_BackColor%i_dummy", mode_str, i);
+ this->new_backcolor[i] =
+ new (ctx) ir_variable(glsl_type::vec4_type, name,
+ ir_var_temporary);
+ }
+ }
+ }
+
+ if (!external_has_fog && !info->tfeedback_has_fog &&
+ info->fog) {
+ char name[32];
+
+ snprintf(name, 32, "gl_%s_FogFragCoord_dummy", mode_str);
+ this->new_fog = new (ctx) ir_variable(glsl_type::float_type, name,
+ ir_var_temporary);
+ }
+
+ /* Now do the replacing. */
+ visit_list_elements(this, shader->ir);
+ }
+
+ void prepare_array(exec_list *ir,
+ ir_variable **new_var,
+ int max_elements, unsigned start_location,
+ const char *var_name, const char *mode_str,
+ unsigned usage, unsigned external_usage)
+ {
+ void *const ctx = ir;
+
+ for (int i = max_elements-1; i >= 0; i--) {
+ if (usage & (1 << i)) {
+ char name[32];
+
+ if (!(external_usage & (1 << i))) {
+ /* This varying is unused in the next stage. Declare
+ * a temporary instead of an output. */
+ snprintf(name, 32, "gl_%s_%s%i_dummy", mode_str, var_name, i);
+ new_var[i] =
+ new (ctx) ir_variable(glsl_type::vec4_type, name,
+ ir_var_temporary);
+ }
+ else {
+ snprintf(name, 32, "gl_%s_%s%i", mode_str, var_name, i);
+ new_var[i] =
+ new(ctx) ir_variable(glsl_type::vec4_type, name,
+ this->info->mode);
+ new_var[i]->data.location = start_location + i;
+ new_var[i]->data.explicit_location = true;
+ new_var[i]->data.explicit_index = 0;
+ }
+
+ ir->get_head_raw()->insert_before(new_var[i]);
+ }
+ }
+ }
+
+ virtual ir_visitor_status visit(ir_variable *var)
+ {
+ /* Remove the gl_TexCoord array. */
+ if (this->info->lower_texcoord_array &&
+ var == this->info->texcoord_array) {
+ var->remove();
+ }
+
+ /* Remove the gl_FragData array. */
+ if (this->info->lower_fragdata_array &&
+ var == this->info->fragdata_array) {
+
+ /* Clone variable for program resource list before it is removed. */
+ if (!shader->fragdata_arrays)
+ shader->fragdata_arrays = new (shader) exec_list;
+
+ shader->fragdata_arrays->push_tail(var->clone(shader, NULL));
+
+ var->remove();
+ }
+
+ /* Replace set-but-unused color and fog outputs with dummy variables. */
+ for (int i = 0; i < 2; i++) {
+ if (var == this->info->color[i] && this->new_color[i]) {
+ var->replace_with(this->new_color[i]);
+ }
+ if (var == this->info->backcolor[i] &&
+ this->new_backcolor[i]) {
+ var->replace_with(this->new_backcolor[i]);
+ }
+ }
+
+ if (var == this->info->fog && this->new_fog) {
+ var->replace_with(this->new_fog);
+ }
+
+ return visit_continue;
+ }
+
+ virtual void handle_rvalue(ir_rvalue **rvalue)
+ {
+ if (!*rvalue)
+ return;
+
+ void *ctx = ralloc_parent(*rvalue);
+
+ /* Replace an array dereference gl_TexCoord[i] with a single
+ * variable dereference representing gl_TexCoord[i].
+ */
+ if (this->info->lower_texcoord_array) {
+ /* gl_TexCoord[i] occurrence */
+ ir_dereference_array *const da = (*rvalue)->as_dereference_array();
+
+ if (da && da->variable_referenced() ==
+ this->info->texcoord_array) {
+ unsigned i = da->array_index->as_constant()->get_uint_component(0);
+
+ *rvalue = new(ctx) ir_dereference_variable(this->new_texcoord[i]);
+ return;
+ }
+ }
+
+ /* Same for gl_FragData. */
+ if (this->info->lower_fragdata_array) {
+ /* gl_FragData[i] occurrence */
+ ir_dereference_array *const da = (*rvalue)->as_dereference_array();
+
+ if (da && da->variable_referenced() == this->info->fragdata_array) {
+ unsigned i = da->array_index->as_constant()->get_uint_component(0);
+
+ *rvalue = new(ctx) ir_dereference_variable(this->new_fragdata[i]);
+ return;
+ }
+ }
+
+ /* Replace set-but-unused color and fog outputs with dummy variables. */
+ ir_dereference_variable *const dv = (*rvalue)->as_dereference_variable();
+ if (!dv)
+ return;
+
+ ir_variable *var = dv->variable_referenced();
+
+ for (int i = 0; i < 2; i++) {
+ if (var == this->info->color[i] && this->new_color[i]) {
+ *rvalue = new(ctx) ir_dereference_variable(this->new_color[i]);
+ return;
+ }
+ if (var == this->info->backcolor[i] &&
+ this->new_backcolor[i]) {
+ *rvalue = new(ctx) ir_dereference_variable(this->new_backcolor[i]);
+ return;
+ }
+ }
+
+ if (var == this->info->fog && this->new_fog) {
+ *rvalue = new(ctx) ir_dereference_variable(this->new_fog);
+ }
+ }
+
+ virtual ir_visitor_status visit_leave(ir_assignment *ir)
+ {
+ handle_rvalue(&ir->rhs);
+ handle_rvalue(&ir->condition);
+
+ /* We have to use set_lhs when changing the LHS of an assignment. */
+ ir_rvalue *lhs = ir->lhs;
+
+ handle_rvalue(&lhs);
+ if (lhs != ir->lhs) {
+ ir->set_lhs(lhs);
+ }
+
+ return visit_continue;
+ }
+
+private:
+ struct gl_linked_shader *shader;
+ const varying_info_visitor *info;
+ ir_variable *new_fragdata[MAX_DRAW_BUFFERS];
+ ir_variable *new_texcoord[MAX_TEXTURE_COORD_UNITS];
+ ir_variable *new_color[2];
+ ir_variable *new_backcolor[2];
+ ir_variable *new_fog;
+};
+
+} /* anonymous namespace */
+
+static void
+lower_texcoord_array(struct gl_linked_shader *shader, const varying_info_visitor *info)
+{
+ replace_varyings_visitor(shader, info,
+ (1 << MAX_TEXTURE_COORD_UNITS) - 1,
+ 1 | 2, true);
+}
+
+static void
+lower_fragdata_array(struct gl_linked_shader *shader)
+{
+ varying_info_visitor info(ir_var_shader_out, true);
+ info.get(shader->ir, 0, NULL);
+
+ replace_varyings_visitor(shader, &info, 0, 0, 0);
+}
+
+
+void
+do_dead_builtin_varyings(struct gl_context *ctx,
+ gl_linked_shader *producer,
+ gl_linked_shader *consumer,
+ unsigned num_tfeedback_decls,
+ tfeedback_decl *tfeedback_decls)
+{
+ /* Lower the gl_FragData array to separate variables. */
+ if (consumer && consumer->Stage == MESA_SHADER_FRAGMENT &&
+ !ctx->Const.ShaderCompilerOptions[MESA_SHADER_FRAGMENT].NirOptions) {
+ lower_fragdata_array(consumer);
+ }
+
+ /* Lowering of built-in varyings has no effect with the core context and
+ * GLES2, because they are not available there.
+ */
+ if (ctx->API == API_OPENGL_CORE ||
+ ctx->API == API_OPENGLES2) {
+ return;
+ }
+
+ /* Information about built-in varyings. */
+ varying_info_visitor producer_info(ir_var_shader_out);
+ varying_info_visitor consumer_info(ir_var_shader_in);
+
+ if (producer) {
+ producer_info.get(producer->ir, num_tfeedback_decls, tfeedback_decls);
+
+ if (producer->Stage == MESA_SHADER_TESS_CTRL)
+ producer_info.lower_texcoord_array = false;
+
+ if (!consumer) {
+ /* At least eliminate unused gl_TexCoord elements. */
+ if (producer_info.lower_texcoord_array) {
+ lower_texcoord_array(producer, &producer_info);
+ }
+ return;
+ }
+ }
+
+ if (consumer) {
+ consumer_info.get(consumer->ir, 0, NULL);
+
+ if (consumer->Stage != MESA_SHADER_FRAGMENT)
+ consumer_info.lower_texcoord_array = false;
+
+ if (!producer) {
+ /* At least eliminate unused gl_TexCoord elements. */
+ if (consumer_info.lower_texcoord_array) {
+ lower_texcoord_array(consumer, &consumer_info);
+ }
+ return;
+ }
+ }
+
+ /* Eliminate the outputs unused by the consumer. */
+ if (producer_info.lower_texcoord_array ||
+ producer_info.color_usage ||
+ producer_info.has_fog) {
+ replace_varyings_visitor(producer,
+ &producer_info,
+ consumer_info.texcoord_usage,
+ consumer_info.color_usage,
+ consumer_info.has_fog);
+ }
+
+ /* The gl_TexCoord fragment shader inputs can be initialized
+ * by GL_COORD_REPLACE, so we can't eliminate them.
+ *
+ * This doesn't prevent elimination of the gl_TexCoord elements which
+ * are not read by the fragment shader. We want to eliminate those anyway.
+ */
+ if (consumer->Stage == MESA_SHADER_FRAGMENT) {
+ producer_info.texcoord_usage = (1 << MAX_TEXTURE_COORD_UNITS) - 1;
+ }
+
+ /* Eliminate the inputs uninitialized by the producer. */
+ if (consumer_info.lower_texcoord_array ||
+ consumer_info.color_usage ||
+ consumer_info.has_fog) {
+ replace_varyings_visitor(consumer,
+ &consumer_info,
+ producer_info.texcoord_usage,
+ producer_info.color_usage,
+ producer_info.has_fog);
+ }
+}
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/opt_dead_code.cpp b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/opt_dead_code.cpp
new file mode 100644
index 0000000000..3e571fc7dd
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/opt_dead_code.cpp
@@ -0,0 +1,203 @@
+/*
+ * Copyright © 2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+/**
+ * \file opt_dead_code.cpp
+ *
+ * Eliminates dead assignments and variable declarations from the code.
+ */
+
+#include "ir.h"
+#include "ir_visitor.h"
+#include "ir_variable_refcount.h"
+#include "compiler/glsl_types.h"
+#include "util/hash_table.h"
+
+static bool debug = false;
+
+/**
+ * Do a dead code pass over instructions and everything that instructions
+ * references.
+ *
+ * Note that this will remove assignments to globals, so it is not suitable
+ * for usage on an unlinked instruction stream.
+ */
+bool
+do_dead_code(exec_list *instructions, bool uniform_locations_assigned)
+{
+ ir_variable_refcount_visitor v;
+ bool progress = false;
+
+ v.run(instructions);
+
+ hash_table_foreach(v.ht, e) {
+ ir_variable_refcount_entry *entry = (ir_variable_refcount_entry *)e->data;
+
+ /* Since each assignment is a reference, the refereneced count must be
+ * greater than or equal to the assignment count. If they are equal,
+ * then all of the references are assignments, and the variable is
+ * dead.
+ *
+ * Note that if the variable is neither assigned nor referenced, both
+ * counts will be zero and will be caught by the equality test.
+ */
+ assert(entry->referenced_count >= entry->assigned_count);
+
+ if (debug) {
+ printf("%s@%p: %d refs, %d assigns, %sdeclared in our scope\n",
+ entry->var->name, (void *) entry->var,
+ entry->referenced_count, entry->assigned_count,
+ entry->declaration ? "" : "not ");
+ }
+
+ if ((entry->referenced_count > entry->assigned_count)
+ || !entry->declaration)
+ continue;
+
+ /* Section 7.4.1 (Shader Interface Matching) of the OpenGL 4.5
+ * (Core Profile) spec says:
+ *
+ * "With separable program objects, interfaces between shader
+ * stages may involve the outputs from one program object and the
+ * inputs from a second program object. For such interfaces, it is
+ * not possible to detect mismatches at link time, because the
+ * programs are linked separately. When each such program is
+ * linked, all inputs or outputs interfacing with another program
+ * stage are treated as active."
+ */
+ if (entry->var->data.always_active_io)
+ continue;
+
+ if (!entry->assign_list.is_empty()) {
+ /* Remove all the dead assignments to the variable we found.
+ * Don't do so if it's a shader or function output, though.
+ */
+ if (entry->var->data.mode != ir_var_function_out &&
+ entry->var->data.mode != ir_var_function_inout &&
+ entry->var->data.mode != ir_var_shader_out &&
+ entry->var->data.mode != ir_var_shader_storage) {
+
+ while (!entry->assign_list.is_empty()) {
+ struct assignment_entry *assignment_entry =
+ exec_node_data(struct assignment_entry,
+ entry->assign_list.get_head_raw(), link);
+
+ assignment_entry->assign->remove();
+
+ if (debug) {
+ printf("Removed assignment to %s@%p\n",
+ entry->var->name, (void *) entry->var);
+ }
+
+ assignment_entry->link.remove();
+ free(assignment_entry);
+ }
+ progress = true;
+ }
+ }
+
+ if (entry->assign_list.is_empty()) {
+ /* If there are no assignments or references to the variable left,
+ * then we can remove its declaration.
+ */
+
+ /* uniform initializers are precious, and could get used by another
+ * stage. Also, once uniform locations have been assigned, the
+ * declaration cannot be deleted.
+ */
+ if (entry->var->data.mode == ir_var_uniform ||
+ entry->var->data.mode == ir_var_shader_storage) {
+ if (uniform_locations_assigned || entry->var->constant_initializer)
+ continue;
+
+ /* Section 2.11.6 (Uniform Variables) of the OpenGL ES 3.0.3 spec
+ * says:
+ *
+ * "All members of a named uniform block declared with a
+ * shared or std140 layout qualifier are considered active,
+ * even if they are not referenced in any shader in the
+ * program. The uniform block itself is also considered
+ * active, even if no member of the block is referenced."
+ *
+ * If the variable is in a uniform block with one of those
+ * layouts, do not eliminate it.
+ */
+ if (entry->var->is_in_buffer_block()) {
+ if (entry->var->get_interface_type_packing() !=
+ GLSL_INTERFACE_PACKING_PACKED) {
+ /* Set used to false so it doesn't get set as referenced by
+ * the shader in the program resource list. This will also
+ * help avoid the state being unnecessarily flushed for the
+ * shader stage.
+ */
+ entry->var->data.used = false;
+ continue;
+ }
+ }
+
+ if (entry->var->type->is_subroutine())
+ continue;
+ }
+
+ entry->var->remove();
+ progress = true;
+
+ if (debug) {
+ printf("Removed declaration of %s@%p\n",
+ entry->var->name, (void *) entry->var);
+ }
+ }
+ }
+
+ return progress;
+}
+
+/**
+ * Does a dead code pass on the functions present in the instruction stream.
+ *
+ * This is suitable for use while the program is not linked, as it will
+ * ignore variable declarations (and the assignments to them) for variables
+ * with global scope.
+ */
+bool
+do_dead_code_unlinked(exec_list *instructions)
+{
+ bool progress = false;
+
+ foreach_in_list(ir_instruction, ir, instructions) {
+ ir_function *f = ir->as_function();
+ if (f) {
+ foreach_in_list(ir_function_signature, sig, &f->signatures) {
+ /* The setting of the uniform_locations_assigned flag here is
+ * irrelevent. If there is a uniform declaration encountered
+ * inside the body of the function, something has already gone
+ * terribly, terribly wrong.
+ */
+ if (do_dead_code(&sig->body, false))
+ progress = true;
+ }
+ }
+ }
+
+ return progress;
+}
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/opt_dead_code_local.cpp b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/opt_dead_code_local.cpp
new file mode 100644
index 0000000000..b2d35bbaff
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/opt_dead_code_local.cpp
@@ -0,0 +1,358 @@
+/*
+ * Copyright © 2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+/**
+ * \file opt_dead_code_local.cpp
+ *
+ * Eliminates local dead assignments from the code.
+ *
+ * This operates on basic blocks, tracking assignments and finding if
+ * they're used before the variable is completely reassigned.
+ *
+ * Compare this to ir_dead_code.cpp, which operates globally looking
+ * for assignments to variables that are never read.
+ */
+
+#include "ir.h"
+#include "ir_basic_block.h"
+#include "ir_optimization.h"
+#include "compiler/glsl_types.h"
+
+static bool debug = false;
+
+namespace {
+
+class assignment_entry : public exec_node
+{
+public:
+ /* override operator new from exec_node */
+ DECLARE_LINEAR_ZALLOC_CXX_OPERATORS(assignment_entry)
+
+ assignment_entry(ir_variable *lhs, ir_assignment *ir)
+ {
+ assert(lhs);
+ assert(ir);
+ this->lhs = lhs;
+ this->ir = ir;
+ this->unused = ir->write_mask;
+ }
+
+ ir_variable *lhs;
+ ir_assignment *ir;
+
+ /* bitmask of xyzw channels written that haven't been used so far. */
+ int unused;
+};
+
+class kill_for_derefs_visitor : public ir_hierarchical_visitor {
+public:
+ using ir_hierarchical_visitor::visit;
+
+ kill_for_derefs_visitor(exec_list *assignments)
+ {
+ this->assignments = assignments;
+ }
+
+ void use_channels(ir_variable *const var, int used)
+ {
+ foreach_in_list_safe(assignment_entry, entry, this->assignments) {
+ if (entry->lhs == var) {
+ if (var->type->is_scalar() || var->type->is_vector()) {
+ if (debug)
+ printf("used %s (0x%01x - 0x%01x)\n", entry->lhs->name,
+ entry->unused, used & 0xf);
+ entry->unused &= ~used;
+ if (!entry->unused)
+ entry->remove();
+ } else {
+ if (debug)
+ printf("used %s\n", entry->lhs->name);
+ entry->remove();
+ }
+ }
+ }
+ }
+
+ virtual ir_visitor_status visit(ir_dereference_variable *ir)
+ {
+ use_channels(ir->var, ~0);
+
+ return visit_continue;
+ }
+
+ virtual ir_visitor_status visit(ir_swizzle *ir)
+ {
+ ir_dereference_variable *deref = ir->val->as_dereference_variable();
+ if (!deref)
+ return visit_continue;
+
+ int used = 0;
+ used |= 1 << ir->mask.x;
+ if (ir->mask.num_components > 1)
+ used |= 1 << ir->mask.y;
+ if (ir->mask.num_components > 2)
+ used |= 1 << ir->mask.z;
+ if (ir->mask.num_components > 3)
+ used |= 1 << ir->mask.w;
+
+ use_channels(deref->var, used);
+
+ return visit_continue_with_parent;
+ }
+
+ virtual ir_visitor_status visit_leave(ir_emit_vertex *)
+ {
+ /* For the purpose of dead code elimination, emitting a vertex counts as
+ * "reading" all of the currently assigned output variables.
+ */
+ foreach_in_list_safe(assignment_entry, entry, this->assignments) {
+ if (entry->lhs->data.mode == ir_var_shader_out) {
+ if (debug)
+ printf("kill %s\n", entry->lhs->name);
+ entry->remove();
+ }
+ }
+
+ return visit_continue;
+ }
+
+private:
+ exec_list *assignments;
+};
+
+class array_index_visit : public ir_hierarchical_visitor {
+public:
+ array_index_visit(ir_hierarchical_visitor *v)
+ {
+ this->visitor = v;
+ }
+
+ virtual ir_visitor_status visit_enter(class ir_dereference_array *ir)
+ {
+ ir->array_index->accept(visitor);
+ return visit_continue;
+ }
+
+ static void run(ir_instruction *ir, ir_hierarchical_visitor *v)
+ {
+ array_index_visit top_visit(v);
+ ir->accept(& top_visit);
+ }
+
+ ir_hierarchical_visitor *visitor;
+};
+
+} /* unnamed namespace */
+
+/**
+ * Adds an entry to the available copy list if it's a plain assignment
+ * of a variable to a variable.
+ */
+static bool
+process_assignment(void *lin_ctx, ir_assignment *ir, exec_list *assignments)
+{
+ ir_variable *var = NULL;
+ bool progress = false;
+ kill_for_derefs_visitor v(assignments);
+
+ if (ir->condition == NULL) {
+ /* If this is an assignment of the form "foo = foo;", remove the whole
+ * instruction and be done with it.
+ */
+ const ir_variable *const lhs_var = ir->whole_variable_written();
+ if (lhs_var != NULL && lhs_var == ir->rhs->whole_variable_referenced()) {
+ ir->remove();
+ return true;
+ }
+ }
+
+ /* Kill assignment entries for things used to produce this assignment. */
+ ir->rhs->accept(&v);
+ if (ir->condition) {
+ ir->condition->accept(&v);
+ }
+
+ /* Kill assignment enties used as array indices.
+ */
+ array_index_visit::run(ir->lhs, &v);
+ var = ir->lhs->variable_referenced();
+ assert(var);
+
+ /* Now, check if we did a whole-variable assignment. */
+ if (!ir->condition) {
+ ir_dereference_variable *deref_var = ir->lhs->as_dereference_variable();
+
+ /* If it's a vector type, we can do per-channel elimination of
+ * use of the RHS.
+ */
+ if (deref_var && (deref_var->var->type->is_scalar() ||
+ deref_var->var->type->is_vector())) {
+
+ if (debug)
+ printf("looking for %s.0x%01x to remove\n", var->name,
+ ir->write_mask);
+
+ foreach_in_list_safe(assignment_entry, entry, assignments) {
+ if (entry->lhs != var)
+ continue;
+
+ /* Skip if the assignment we're trying to eliminate isn't a plain
+ * variable deref. */
+ if (entry->ir->lhs->ir_type != ir_type_dereference_variable)
+ continue;
+
+ int remove = entry->unused & ir->write_mask;
+ if (debug) {
+ printf("%s 0x%01x - 0x%01x = 0x%01x\n",
+ var->name,
+ entry->ir->write_mask,
+ remove, entry->ir->write_mask & ~remove);
+ }
+ if (remove) {
+ progress = true;
+
+ if (debug) {
+ printf("rewriting:\n ");
+ entry->ir->print();
+ printf("\n");
+ }
+
+ entry->ir->write_mask &= ~remove;
+ entry->unused &= ~remove;
+ if (entry->ir->write_mask == 0) {
+ /* Delete the dead assignment. */
+ entry->ir->remove();
+ entry->remove();
+ } else {
+ void *mem_ctx = ralloc_parent(entry->ir);
+ /* Reswizzle the RHS arguments according to the new
+ * write_mask.
+ */
+ unsigned components[4];
+ unsigned channels = 0;
+ unsigned next = 0;
+
+ for (int i = 0; i < 4; i++) {
+ if ((entry->ir->write_mask | remove) & (1 << i)) {
+ if (!(remove & (1 << i)))
+ components[channels++] = next;
+ next++;
+ }
+ }
+
+ entry->ir->rhs = new(mem_ctx) ir_swizzle(entry->ir->rhs,
+ components,
+ channels);
+ if (debug) {
+ printf("to:\n ");
+ entry->ir->print();
+ printf("\n");
+ }
+ }
+ }
+ }
+ } else if (ir->whole_variable_written() != NULL) {
+ /* We did a whole-variable assignment. So, any instruction in
+ * the assignment list with the same LHS is dead.
+ */
+ if (debug)
+ printf("looking for %s to remove\n", var->name);
+ foreach_in_list_safe(assignment_entry, entry, assignments) {
+ if (entry->lhs == var) {
+ if (debug)
+ printf("removing %s\n", var->name);
+ entry->ir->remove();
+ entry->remove();
+ progress = true;
+ }
+ }
+ }
+ }
+
+ /* Add this instruction to the assignment list available to be removed. */
+ assignment_entry *entry = new(lin_ctx) assignment_entry(var, ir);
+ assignments->push_tail(entry);
+
+ if (debug) {
+ printf("add %s\n", var->name);
+
+ printf("current entries\n");
+ foreach_in_list(assignment_entry, entry, assignments) {
+ printf(" %s (0x%01x)\n", entry->lhs->name, entry->unused);
+ }
+ }
+
+ return progress;
+}
+
+static void
+dead_code_local_basic_block(ir_instruction *first,
+ ir_instruction *last,
+ void *data)
+{
+ ir_instruction *ir, *ir_next;
+ /* List of avaialble_copy */
+ exec_list assignments;
+ bool *out_progress = (bool *)data;
+ bool progress = false;
+
+ void *ctx = ralloc_context(NULL);
+ void *lin_ctx = linear_alloc_parent(ctx, 0);
+
+ /* Safe looping, since process_assignment */
+ for (ir = first, ir_next = (ir_instruction *)first->next;;
+ ir = ir_next, ir_next = (ir_instruction *)ir->next) {
+ ir_assignment *ir_assign = ir->as_assignment();
+
+ if (debug) {
+ ir->print();
+ printf("\n");
+ }
+
+ if (ir_assign) {
+ progress = process_assignment(lin_ctx, ir_assign, &assignments) ||
+ progress;
+ } else {
+ kill_for_derefs_visitor kill(&assignments);
+ ir->accept(&kill);
+ }
+
+ if (ir == last)
+ break;
+ }
+ *out_progress = progress;
+ ralloc_free(ctx);
+}
+
+/**
+ * Does a copy propagation pass on the code present in the instruction stream.
+ */
+bool
+do_dead_code_local(exec_list *instructions)
+{
+ bool progress = false;
+
+ call_for_basic_blocks(instructions, dead_code_local_basic_block, &progress);
+
+ return progress;
+}
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/opt_dead_functions.cpp b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/opt_dead_functions.cpp
new file mode 100644
index 0000000000..2e90b650fa
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/opt_dead_functions.cpp
@@ -0,0 +1,152 @@
+/*
+ * Copyright © 2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+/**
+ * \file opt_dead_functions.cpp
+ *
+ * Eliminates unused functions from the linked program.
+ */
+
+#include "ir.h"
+#include "ir_visitor.h"
+#include "ir_expression_flattening.h"
+#include "compiler/glsl_types.h"
+
+namespace {
+
+class signature_entry : public exec_node
+{
+public:
+ signature_entry(ir_function_signature *sig)
+ {
+ this->signature = sig;
+ this->used = false;
+ }
+
+ ir_function_signature *signature;
+ bool used;
+};
+
+class ir_dead_functions_visitor : public ir_hierarchical_visitor {
+public:
+ ir_dead_functions_visitor()
+ {
+ this->mem_ctx = ralloc_context(NULL);
+ }
+
+ ~ir_dead_functions_visitor()
+ {
+ ralloc_free(this->mem_ctx);
+ }
+
+ virtual ir_visitor_status visit_enter(ir_function_signature *);
+ virtual ir_visitor_status visit_enter(ir_call *);
+
+ signature_entry *get_signature_entry(ir_function_signature *var);
+
+ /* List of signature_entry */
+ exec_list signature_list;
+ void *mem_ctx;
+};
+
+} /* unnamed namespace */
+
+signature_entry *
+ir_dead_functions_visitor::get_signature_entry(ir_function_signature *sig)
+{
+ foreach_in_list(signature_entry, entry, &this->signature_list) {
+ if (entry->signature == sig)
+ return entry;
+ }
+
+ signature_entry *entry = new(mem_ctx) signature_entry(sig);
+ this->signature_list.push_tail(entry);
+ return entry;
+}
+
+
+ir_visitor_status
+ir_dead_functions_visitor::visit_enter(ir_function_signature *ir)
+{
+ signature_entry *entry = this->get_signature_entry(ir);
+
+ if (strcmp(ir->function_name(), "main") == 0) {
+ entry->used = true;
+ }
+
+
+
+ return visit_continue;
+}
+
+
+ir_visitor_status
+ir_dead_functions_visitor::visit_enter(ir_call *ir)
+{
+ signature_entry *entry = this->get_signature_entry(ir->callee);
+
+ entry->used = true;
+
+ return visit_continue;
+}
+
+bool
+do_dead_functions(exec_list *instructions)
+{
+ ir_dead_functions_visitor v;
+ bool progress = false;
+
+ visit_list_elements(&v, instructions);
+
+ /* Now that we've figured out which function signatures are used, remove
+ * the unused ones, and remove function definitions that have no more
+ * signatures.
+ */
+ foreach_in_list_safe(signature_entry, entry, &v.signature_list) {
+ if (!entry->used) {
+ entry->signature->remove();
+ delete entry->signature;
+ progress = true;
+ }
+ delete(entry);
+ }
+
+ /* We don't just do this above when we nuked a signature because of
+ * const pointers.
+ */
+ foreach_in_list_safe(ir_instruction, ir, instructions) {
+ ir_function *func = ir->as_function();
+
+ if (func && func->signatures.is_empty()) {
+ /* At this point (post-linking), the symbol table is no
+ * longer in use, so not removing the function from the
+ * symbol table should be OK.
+ */
+ func->remove();
+ delete func;
+ progress = true;
+ }
+ }
+
+ return progress;
+}
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/opt_flatten_nested_if_blocks.cpp b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/opt_flatten_nested_if_blocks.cpp
new file mode 100644
index 0000000000..66d55ae5c1
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/opt_flatten_nested_if_blocks.cpp
@@ -0,0 +1,103 @@
+/*
+ * Copyright © 2013 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+/**
+ * \file opt_flatten_nested_if_blocks.cpp
+ *
+ * Flattens nested if blocks such as:
+ *
+ * if (x) {
+ * if (y) {
+ * ...
+ * }
+ * }
+ *
+ * into a single if block with a combined condition:
+ *
+ * if (x && y) {
+ * ...
+ * }
+ */
+
+#include "ir.h"
+#include "ir_builder.h"
+
+using namespace ir_builder;
+
+namespace {
+
+class nested_if_flattener : public ir_hierarchical_visitor {
+public:
+ nested_if_flattener()
+ {
+ progress = false;
+ }
+
+ ir_visitor_status visit_leave(ir_if *);
+ ir_visitor_status visit_enter(ir_assignment *);
+
+ bool progress;
+};
+
+} /* unnamed namespace */
+
+/* We only care about the top level "if" instructions, so don't
+ * descend into expressions.
+ */
+ir_visitor_status
+nested_if_flattener::visit_enter(ir_assignment *ir)
+{
+ (void) ir;
+ return visit_continue_with_parent;
+}
+
+bool
+opt_flatten_nested_if_blocks(exec_list *instructions)
+{
+ nested_if_flattener v;
+
+ v.run(instructions);
+ return v.progress;
+}
+
+
+ir_visitor_status
+nested_if_flattener::visit_leave(ir_if *ir)
+{
+ /* Only handle a single ir_if within the then clause of an ir_if. No extra
+ * instructions, no else clauses, nothing.
+ */
+ if (ir->then_instructions.is_empty() || !ir->else_instructions.is_empty())
+ return visit_continue;
+
+ ir_if *inner = ((ir_instruction *) ir->then_instructions.get_head_raw())->as_if();
+ if (!inner || !inner->next->is_tail_sentinel() ||
+ !inner->else_instructions.is_empty())
+ return visit_continue;
+
+ ir->condition = logic_and(ir->condition, inner->condition);
+ inner->then_instructions.move_nodes_to(&ir->then_instructions);
+
+ progress = true;
+ return visit_continue;
+}
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/opt_flip_matrices.cpp b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/opt_flip_matrices.cpp
new file mode 100644
index 0000000000..04c6170b84
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/opt_flip_matrices.cpp
@@ -0,0 +1,123 @@
+/*
+ * Copyright © 2013 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+/**
+ * \file opt_flip_matrices.cpp
+ *
+ * Convert (matrix * vector) operations to (vector * matrixTranspose),
+ * which can be done using dot products rather than multiplies and adds.
+ * On some hardware, this is more efficient.
+ *
+ * This currently only does the conversion for built-in matrices which
+ * already have transposed equivalents. Namely, gl_ModelViewProjectionMatrix
+ * and gl_TextureMatrix.
+ */
+#include "ir.h"
+#include "ir_optimization.h"
+#include "main/macros.h"
+
+namespace {
+class matrix_flipper : public ir_hierarchical_visitor {
+public:
+ matrix_flipper(exec_list *instructions)
+ {
+ progress = false;
+ mvp_transpose = NULL;
+ texmat_transpose = NULL;
+
+ foreach_in_list(ir_instruction, ir, instructions) {
+ ir_variable *var = ir->as_variable();
+ if (!var)
+ continue;
+ if (strcmp(var->name, "gl_ModelViewProjectionMatrixTranspose") == 0)
+ mvp_transpose = var;
+ if (strcmp(var->name, "gl_TextureMatrixTranspose") == 0)
+ texmat_transpose = var;
+ }
+ }
+
+ ir_visitor_status visit_enter(ir_expression *ir);
+
+ bool progress;
+
+private:
+ ir_variable *mvp_transpose;
+ ir_variable *texmat_transpose;
+};
+}
+
+ir_visitor_status
+matrix_flipper::visit_enter(ir_expression *ir)
+{
+ if (ir->operation != ir_binop_mul ||
+ !ir->operands[0]->type->is_matrix() ||
+ !ir->operands[1]->type->is_vector())
+ return visit_continue;
+
+ ir_variable *mat_var = ir->operands[0]->variable_referenced();
+ if (!mat_var)
+ return visit_continue;
+
+ if (mvp_transpose &&
+ strcmp(mat_var->name, "gl_ModelViewProjectionMatrix") == 0) {
+#ifndef NDEBUG
+ ir_dereference_variable *deref = ir->operands[0]->as_dereference_variable();
+ assert(deref && deref->var == mat_var);
+#endif
+
+ void *mem_ctx = ralloc_parent(ir);
+
+ ir->operands[0] = ir->operands[1];
+ ir->operands[1] = new(mem_ctx) ir_dereference_variable(mvp_transpose);
+
+ progress = true;
+ } else if (texmat_transpose &&
+ strcmp(mat_var->name, "gl_TextureMatrix") == 0) {
+ ir_dereference_array *array_ref = ir->operands[0]->as_dereference_array();
+ assert(array_ref != NULL);
+ ir_dereference_variable *var_ref = array_ref->array->as_dereference_variable();
+ assert(var_ref && var_ref->var == mat_var);
+
+ ir->operands[0] = ir->operands[1];
+ ir->operands[1] = array_ref;
+
+ var_ref->var = texmat_transpose;
+
+ texmat_transpose->data.max_array_access =
+ MAX2(texmat_transpose->data.max_array_access, mat_var->data.max_array_access);
+
+ progress = true;
+ }
+
+ return visit_continue;
+}
+
+bool
+opt_flip_matrices(struct exec_list *instructions)
+{
+ matrix_flipper v(instructions);
+
+ visit_list_elements(&v, instructions);
+
+ return v.progress;
+}
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/opt_function_inlining.cpp b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/opt_function_inlining.cpp
new file mode 100644
index 0000000000..590bd707be
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/opt_function_inlining.cpp
@@ -0,0 +1,466 @@
+/*
+ * Copyright © 2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+/**
+ * \file opt_function_inlining.cpp
+ *
+ * Replaces calls to functions with the body of the function.
+ */
+
+#include "ir.h"
+#include "ir_visitor.h"
+#include "ir_function_inlining.h"
+#include "ir_expression_flattening.h"
+#include "compiler/glsl_types.h"
+#include "util/hash_table.h"
+
+static void
+do_variable_replacement(exec_list *instructions,
+ ir_variable *orig,
+ ir_dereference *repl);
+
+namespace {
+
+class ir_function_inlining_visitor : public ir_hierarchical_visitor {
+public:
+ ir_function_inlining_visitor()
+ {
+ progress = false;
+ }
+
+ virtual ~ir_function_inlining_visitor()
+ {
+ /* empty */
+ }
+
+ virtual ir_visitor_status visit_enter(ir_expression *);
+ virtual ir_visitor_status visit_enter(ir_call *);
+ virtual ir_visitor_status visit_enter(ir_return *);
+ virtual ir_visitor_status visit_enter(ir_texture *);
+ virtual ir_visitor_status visit_enter(ir_swizzle *);
+
+ bool progress;
+};
+
+class ir_save_lvalue_visitor : public ir_hierarchical_visitor {
+public:
+ virtual ir_visitor_status visit_enter(ir_dereference_array *);
+};
+
+} /* unnamed namespace */
+
+bool
+do_function_inlining(exec_list *instructions)
+{
+ ir_function_inlining_visitor v;
+
+ v.run(instructions);
+
+ return v.progress;
+}
+
+static void
+replace_return_with_assignment(ir_instruction *ir, void *data)
+{
+ void *ctx = ralloc_parent(ir);
+ ir_dereference *orig_deref = (ir_dereference *) data;
+ ir_return *ret = ir->as_return();
+
+ if (ret) {
+ if (ret->value) {
+ ir_rvalue *lhs = orig_deref->clone(ctx, NULL);
+ ret->replace_with(new(ctx) ir_assignment(lhs, ret->value));
+ } else {
+ /* un-valued return has to be the last return, or we shouldn't
+ * have reached here. (see can_inline()).
+ */
+ assert(ret->next->is_tail_sentinel());
+ ret->remove();
+ }
+ }
+}
+
+/* Save the given lvalue before the given instruction.
+ *
+ * This is done by adding temporary variables into which the current value
+ * of any array indices are saved, and then modifying the dereference chain
+ * in-place to point to those temporary variables.
+ *
+ * The hierarchical visitor is only used to traverse the left-hand-side chain
+ * of derefs.
+ */
+ir_visitor_status
+ir_save_lvalue_visitor::visit_enter(ir_dereference_array *deref)
+{
+ if (deref->array_index->ir_type != ir_type_constant) {
+ void *ctx = ralloc_parent(deref);
+ ir_variable *index;
+ ir_assignment *assignment;
+
+ index = new(ctx) ir_variable(deref->array_index->type, "saved_idx", ir_var_temporary);
+ base_ir->insert_before(index);
+
+ assignment = new(ctx) ir_assignment(new(ctx) ir_dereference_variable(index),
+ deref->array_index);
+ base_ir->insert_before(assignment);
+
+ deref->array_index = new(ctx) ir_dereference_variable(index);
+ }
+
+ deref->array->accept(this);
+ return visit_stop;
+}
+
+static bool
+should_replace_variable(ir_variable *sig_param, ir_rvalue *param) {
+ /* For opaque types, we want the inlined variable references
+ * referencing the passed in variable, since that will have
+ * the location information, which an assignment of an opaque
+ * variable wouldn't.
+ */
+ return sig_param->type->contains_opaque() &&
+ param->is_dereference() &&
+ sig_param->data.mode == ir_var_function_in;
+}
+
+void
+ir_call::generate_inline(ir_instruction *next_ir)
+{
+ void *ctx = ralloc_parent(this);
+ ir_variable **parameters;
+ unsigned num_parameters;
+ int i;
+ struct hash_table *ht;
+
+ ht = _mesa_pointer_hash_table_create(NULL);
+
+ num_parameters = this->callee->parameters.length();
+ parameters = new ir_variable *[num_parameters];
+
+ /* Generate the declarations for the parameters to our inlined code,
+ * and set up the mapping of real function body variables to ours.
+ */
+ i = 0;
+ foreach_two_lists(formal_node, &this->callee->parameters,
+ actual_node, &this->actual_parameters) {
+ ir_variable *sig_param = (ir_variable *) formal_node;
+ ir_rvalue *param = (ir_rvalue *) actual_node;
+
+ /* Generate a new variable for the parameter. */
+ if (should_replace_variable(sig_param, param)) {
+ /* Actual replacement happens below */
+ parameters[i] = NULL;
+ } else {
+ parameters[i] = sig_param->clone(ctx, ht);
+ parameters[i]->data.mode = ir_var_temporary;
+
+ /* Remove the read-only decoration because we're going to write
+ * directly to this variable. If the cloned variable is left
+ * read-only and the inlined function is inside a loop, the loop
+ * analysis code will get confused.
+ */
+ parameters[i]->data.read_only = false;
+ next_ir->insert_before(parameters[i]);
+ }
+
+ /* Section 6.1.1 (Function Calling Conventions) of the OpenGL Shading
+ * Language 4.5 spec says:
+ *
+ * "All arguments are evaluated at call time, exactly once, in order,
+ * from left to right. [...] Evaluation of an out parameter results
+ * in an l-value that is used to copy out a value when the function
+ * returns."
+ *
+ * I.e., we have to take temporary copies of any relevant array indices
+ * before the function body is executed.
+ *
+ * This ensures that
+ * (a) if an array index expressions refers to a variable that is
+ * modified by the execution of the function body, we use the
+ * original value as intended, and
+ * (b) if an array index expression has side effects, those side effects
+ * are only executed once and at the right time.
+ */
+ if (parameters[i]) {
+ if (sig_param->data.mode == ir_var_function_in ||
+ sig_param->data.mode == ir_var_const_in) {
+ ir_assignment *assign;
+
+ assign = new(ctx) ir_assignment(new(ctx) ir_dereference_variable(parameters[i]),
+ param);
+ next_ir->insert_before(assign);
+ } else {
+ assert(sig_param->data.mode == ir_var_function_out ||
+ sig_param->data.mode == ir_var_function_inout);
+ assert(param->is_lvalue());
+
+ ir_save_lvalue_visitor v;
+ v.base_ir = next_ir;
+
+ param->accept(&v);
+
+ if (sig_param->data.mode == ir_var_function_inout) {
+ ir_assignment *assign;
+
+ assign = new(ctx) ir_assignment(new(ctx) ir_dereference_variable(parameters[i]),
+ param->clone(ctx, NULL)->as_rvalue());
+ next_ir->insert_before(assign);
+ }
+ }
+ }
+
+ ++i;
+ }
+
+ exec_list new_instructions;
+
+ /* Generate the inlined body of the function to a new list */
+ foreach_in_list(ir_instruction, ir, &callee->body) {
+ ir_instruction *new_ir = ir->clone(ctx, ht);
+
+ new_instructions.push_tail(new_ir);
+ visit_tree(new_ir, replace_return_with_assignment, this->return_deref);
+ }
+
+ /* If any opaque types were passed in, replace any deref of the
+ * opaque variable with a deref of the argument.
+ */
+ foreach_two_lists(formal_node, &this->callee->parameters,
+ actual_node, &this->actual_parameters) {
+ ir_rvalue *const param = (ir_rvalue *) actual_node;
+ ir_variable *sig_param = (ir_variable *) formal_node;
+
+ if (should_replace_variable(sig_param, param)) {
+ ir_dereference *deref = param->as_dereference();
+
+ do_variable_replacement(&new_instructions, sig_param, deref);
+ }
+ }
+
+ /* Now push those new instructions in. */
+ next_ir->insert_before(&new_instructions);
+
+ /* Copy back the value of any 'out' parameters from the function body
+ * variables to our own.
+ */
+ i = 0;
+ foreach_two_lists(formal_node, &this->callee->parameters,
+ actual_node, &this->actual_parameters) {
+ ir_rvalue *const param = (ir_rvalue *) actual_node;
+ const ir_variable *const sig_param = (ir_variable *) formal_node;
+
+ /* Move our param variable into the actual param if it's an 'out' type. */
+ if (parameters[i] && (sig_param->data.mode == ir_var_function_out ||
+ sig_param->data.mode == ir_var_function_inout)) {
+ ir_assignment *assign;
+
+ assign = new(ctx) ir_assignment(param,
+ new(ctx) ir_dereference_variable(parameters[i]));
+ next_ir->insert_before(assign);
+ }
+
+ ++i;
+ }
+
+ delete [] parameters;
+
+ _mesa_hash_table_destroy(ht, NULL);
+}
+
+
+ir_visitor_status
+ir_function_inlining_visitor::visit_enter(ir_expression *ir)
+{
+ (void) ir;
+ return visit_continue_with_parent;
+}
+
+
+ir_visitor_status
+ir_function_inlining_visitor::visit_enter(ir_return *ir)
+{
+ (void) ir;
+ return visit_continue_with_parent;
+}
+
+
+ir_visitor_status
+ir_function_inlining_visitor::visit_enter(ir_texture *ir)
+{
+ (void) ir;
+ return visit_continue_with_parent;
+}
+
+
+ir_visitor_status
+ir_function_inlining_visitor::visit_enter(ir_swizzle *ir)
+{
+ (void) ir;
+ return visit_continue_with_parent;
+}
+
+
+ir_visitor_status
+ir_function_inlining_visitor::visit_enter(ir_call *ir)
+{
+ if (can_inline(ir)) {
+ ir->generate_inline(ir);
+ ir->remove();
+ this->progress = true;
+ }
+
+ return visit_continue;
+}
+
+
+/**
+ * Replaces references to the "orig" variable with a clone of "repl."
+ *
+ * From the spec, opaque types can appear in the tree as function
+ * (non-out) parameters and as the result of array indexing and
+ * structure field selection. In our builtin implementation, they
+ * also appear in the sampler field of an ir_tex instruction.
+ */
+
+class ir_variable_replacement_visitor : public ir_hierarchical_visitor {
+public:
+ ir_variable_replacement_visitor(ir_variable *orig, ir_dereference *repl)
+ {
+ this->orig = orig;
+ this->repl = repl;
+ }
+
+ virtual ~ir_variable_replacement_visitor()
+ {
+ }
+
+ virtual ir_visitor_status visit_leave(ir_call *);
+ virtual ir_visitor_status visit_leave(ir_dereference_array *);
+ virtual ir_visitor_status visit_leave(ir_dereference_record *);
+ virtual ir_visitor_status visit_leave(ir_texture *);
+ virtual ir_visitor_status visit_leave(ir_assignment *);
+ virtual ir_visitor_status visit_leave(ir_expression *);
+ virtual ir_visitor_status visit_leave(ir_return *);
+
+ void replace_deref(ir_dereference **deref);
+ void replace_rvalue(ir_rvalue **rvalue);
+
+ ir_variable *orig;
+ ir_dereference *repl;
+};
+
+void
+ir_variable_replacement_visitor::replace_deref(ir_dereference **deref)
+{
+ ir_dereference_variable *deref_var = (*deref)->as_dereference_variable();
+ if (deref_var && deref_var->var == this->orig) {
+ *deref = this->repl->clone(ralloc_parent(*deref), NULL);
+ }
+}
+
+void
+ir_variable_replacement_visitor::replace_rvalue(ir_rvalue **rvalue)
+{
+ if (!*rvalue)
+ return;
+
+ ir_dereference *deref = (*rvalue)->as_dereference();
+
+ if (!deref)
+ return;
+
+ replace_deref(&deref);
+ *rvalue = deref;
+}
+
+ir_visitor_status
+ir_variable_replacement_visitor::visit_leave(ir_texture *ir)
+{
+ replace_deref(&ir->sampler);
+
+ return visit_continue;
+}
+
+ir_visitor_status
+ir_variable_replacement_visitor::visit_leave(ir_assignment *ir)
+{
+ replace_deref(&ir->lhs);
+ replace_rvalue(&ir->rhs);
+
+ return visit_continue;
+}
+
+ir_visitor_status
+ir_variable_replacement_visitor::visit_leave(ir_expression *ir)
+{
+ for (uint8_t i = 0; i < ir->num_operands; i++)
+ replace_rvalue(&ir->operands[i]);
+
+ return visit_continue;
+}
+
+ir_visitor_status
+ir_variable_replacement_visitor::visit_leave(ir_return *ir)
+{
+ replace_rvalue(&ir->value);
+
+ return visit_continue;
+}
+
+ir_visitor_status
+ir_variable_replacement_visitor::visit_leave(ir_dereference_array *ir)
+{
+ replace_rvalue(&ir->array);
+ return visit_continue;
+}
+
+ir_visitor_status
+ir_variable_replacement_visitor::visit_leave(ir_dereference_record *ir)
+{
+ replace_rvalue(&ir->record);
+ return visit_continue;
+}
+
+ir_visitor_status
+ir_variable_replacement_visitor::visit_leave(ir_call *ir)
+{
+ foreach_in_list_safe(ir_rvalue, param, &ir->actual_parameters) {
+ ir_rvalue *new_param = param;
+ replace_rvalue(&new_param);
+
+ if (new_param != param) {
+ param->replace_with(new_param);
+ }
+ }
+ return visit_continue;
+}
+
+static void
+do_variable_replacement(exec_list *instructions,
+ ir_variable *orig,
+ ir_dereference *repl)
+{
+ ir_variable_replacement_visitor v(orig, repl);
+
+ visit_list_elements(&v, instructions);
+}
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/opt_if_simplification.cpp b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/opt_if_simplification.cpp
new file mode 100644
index 0000000000..136ef87729
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/opt_if_simplification.cpp
@@ -0,0 +1,127 @@
+/*
+ * Copyright © 2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+/**
+ * \file opt_if_simplification.cpp
+ *
+ * Moves constant branches of if statements out to the surrounding
+ * instruction stream, and inverts if conditionals to avoid empty
+ * "then" blocks.
+ */
+
+#include "ir.h"
+
+namespace {
+
+class ir_if_simplification_visitor : public ir_hierarchical_visitor {
+public:
+ ir_if_simplification_visitor()
+ {
+ this->made_progress = false;
+ }
+
+ ir_visitor_status visit_leave(ir_if *);
+ ir_visitor_status visit_enter(ir_assignment *);
+
+ bool made_progress;
+};
+
+} /* unnamed namespace */
+
+/* We only care about the top level "if" instructions, so don't
+ * descend into expressions.
+ */
+ir_visitor_status
+ir_if_simplification_visitor::visit_enter(ir_assignment *ir)
+{
+ (void) ir;
+ return visit_continue_with_parent;
+}
+
+bool
+do_if_simplification(exec_list *instructions)
+{
+ ir_if_simplification_visitor v;
+
+ v.run(instructions);
+ return v.made_progress;
+}
+
+
+ir_visitor_status
+ir_if_simplification_visitor::visit_leave(ir_if *ir)
+{
+ /* If the if statement has nothing on either side, remove it. */
+ if (ir->then_instructions.is_empty() &&
+ ir->else_instructions.is_empty()) {
+ ir->remove();
+ this->made_progress = true;
+ return visit_continue;
+ }
+
+ /* FINISHME: Ideally there would be a way to note that the condition results
+ * FINISHME: in a constant before processing both of the other subtrees.
+ * FINISHME: This can probably be done with some flags, but it would take
+ * FINISHME: some work to get right.
+ */
+ ir_constant *condition_constant =
+ ir->condition->constant_expression_value(ralloc_parent(ir));
+ if (condition_constant) {
+ /* Move the contents of the one branch of the conditional
+ * that matters out.
+ */
+ if (condition_constant->value.b[0]) {
+ ir->insert_before(&ir->then_instructions);
+ } else {
+ ir->insert_before(&ir->else_instructions);
+ }
+ ir->remove();
+ this->made_progress = true;
+ return visit_continue;
+ }
+
+ /* Turn:
+ *
+ * if (cond) {
+ * } else {
+ * do_work();
+ * }
+ *
+ * into :
+ *
+ * if (!cond)
+ * do_work();
+ *
+ * which avoids control flow for "else" (which is usually more
+ * expensive than normal operations), and the "not" can usually be
+ * folded into the generation of "cond" anyway.
+ */
+ if (ir->then_instructions.is_empty()) {
+ ir->condition = new(ralloc_parent(ir->condition))
+ ir_expression(ir_unop_logic_not, ir->condition);
+ ir->else_instructions.move_nodes_to(&ir->then_instructions);
+ this->made_progress = true;
+ }
+
+ return visit_continue;
+}
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/opt_minmax.cpp b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/opt_minmax.cpp
new file mode 100644
index 0000000000..36fe0a9f05
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/opt_minmax.cpp
@@ -0,0 +1,507 @@
+/*
+ * Copyright © 2014 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+/**
+ * \file opt_minmax.cpp
+ *
+ * Drop operands from an expression tree of only min/max operations if they
+ * can be proven to not contribute to the final result.
+ *
+ * The algorithm is similar to alpha-beta pruning on a minmax search.
+ */
+
+#include "ir.h"
+#include "ir_visitor.h"
+#include "ir_rvalue_visitor.h"
+#include "ir_optimization.h"
+#include "ir_builder.h"
+#include "program/prog_instruction.h"
+#include "compiler/glsl_types.h"
+#include "main/macros.h"
+#include "util/half_float.h"
+
+using namespace ir_builder;
+
+namespace {
+
+enum compare_components_result {
+ LESS,
+ LESS_OR_EQUAL,
+ EQUAL,
+ GREATER_OR_EQUAL,
+ GREATER,
+ MIXED
+};
+
+class minmax_range {
+public:
+ minmax_range(ir_constant *low = NULL, ir_constant *high = NULL)
+ {
+ this->low = low;
+ this->high = high;
+ }
+
+ /* low is the lower limit of the range, high is the higher limit. NULL on
+ * low means negative infinity (unlimited) and on high positive infinity
+ * (unlimited). Because of the two interpretations of the value NULL,
+ * arbitrary comparison between ir_constants is impossible.
+ */
+ ir_constant *low;
+ ir_constant *high;
+};
+
+class ir_minmax_visitor : public ir_rvalue_enter_visitor {
+public:
+ ir_minmax_visitor()
+ : progress(false)
+ {
+ }
+
+ ir_rvalue *prune_expression(ir_expression *expr, minmax_range baserange);
+
+ void handle_rvalue(ir_rvalue **rvalue);
+
+ bool progress;
+};
+
+/*
+ * Returns LESS if all vector components of `a' are strictly lower than of `b',
+ * GREATER if all vector components of `a' are strictly greater than of `b',
+ * MIXED if some vector components of `a' are strictly lower than of `b' while
+ * others are strictly greater, or EQUAL otherwise.
+ */
+static enum compare_components_result
+compare_components(ir_constant *a, ir_constant *b)
+{
+ assert(a != NULL);
+ assert(b != NULL);
+
+ assert(a->type->base_type == b->type->base_type);
+
+ unsigned a_inc = a->type->is_scalar() ? 0 : 1;
+ unsigned b_inc = b->type->is_scalar() ? 0 : 1;
+ unsigned components = MAX2(a->type->components(), b->type->components());
+
+ bool foundless = false;
+ bool foundgreater = false;
+ bool foundequal = false;
+
+ for (unsigned i = 0, c0 = 0, c1 = 0;
+ i < components;
+ c0 += a_inc, c1 += b_inc, ++i) {
+ switch (a->type->base_type) {
+ case GLSL_TYPE_UINT:
+ if (a->value.u[c0] < b->value.u[c1])
+ foundless = true;
+ else if (a->value.u[c0] > b->value.u[c1])
+ foundgreater = true;
+ else
+ foundequal = true;
+ break;
+ case GLSL_TYPE_INT:
+ if (a->value.i[c0] < b->value.i[c1])
+ foundless = true;
+ else if (a->value.i[c0] > b->value.i[c1])
+ foundgreater = true;
+ else
+ foundequal = true;
+ break;
+ case GLSL_TYPE_FLOAT16: {
+ float af = _mesa_half_to_float(a->value.f16[c0]);
+ float bf = _mesa_half_to_float(b->value.f16[c1]);
+ if (af < bf)
+ foundless = true;
+ else if (af > bf)
+ foundgreater = true;
+ else
+ foundequal = true;
+ break;
+ }
+ case GLSL_TYPE_FLOAT:
+ if (a->value.f[c0] < b->value.f[c1])
+ foundless = true;
+ else if (a->value.f[c0] > b->value.f[c1])
+ foundgreater = true;
+ else
+ foundequal = true;
+ break;
+ case GLSL_TYPE_DOUBLE:
+ if (a->value.d[c0] < b->value.d[c1])
+ foundless = true;
+ else if (a->value.d[c0] > b->value.d[c1])
+ foundgreater = true;
+ else
+ foundequal = true;
+ break;
+ default:
+ unreachable("not reached");
+ }
+ }
+
+ if (foundless && foundgreater) {
+ /* Some components are strictly lower, others are strictly greater */
+ return MIXED;
+ }
+
+ if (foundequal) {
+ /* It is not mixed, but it is not strictly lower or greater */
+ if (foundless)
+ return LESS_OR_EQUAL;
+ if (foundgreater)
+ return GREATER_OR_EQUAL;
+ return EQUAL;
+ }
+
+ /* All components are strictly lower or strictly greater */
+ return foundless ? LESS : GREATER;
+}
+
+static ir_constant *
+combine_constant(bool ismin, ir_constant *a, ir_constant *b)
+{
+ void *mem_ctx = ralloc_parent(a);
+ ir_constant *c = a->clone(mem_ctx, NULL);
+ for (unsigned i = 0; i < c->type->components(); i++) {
+ switch (c->type->base_type) {
+ case GLSL_TYPE_UINT:
+ if ((ismin && b->value.u[i] < c->value.u[i]) ||
+ (!ismin && b->value.u[i] > c->value.u[i]))
+ c->value.u[i] = b->value.u[i];
+ break;
+ case GLSL_TYPE_INT:
+ if ((ismin && b->value.i[i] < c->value.i[i]) ||
+ (!ismin && b->value.i[i] > c->value.i[i]))
+ c->value.i[i] = b->value.i[i];
+ break;
+ case GLSL_TYPE_FLOAT16: {
+ float bf = _mesa_half_to_float(b->value.f16[i]);
+ float cf = _mesa_half_to_float(c->value.f16[i]);
+ if ((ismin && bf < cf) || (!ismin && bf > cf))
+ c->value.f16[i] = b->value.f16[i];
+ break;
+ }
+ case GLSL_TYPE_FLOAT:
+ if ((ismin && b->value.f[i] < c->value.f[i]) ||
+ (!ismin && b->value.f[i] > c->value.f[i]))
+ c->value.f[i] = b->value.f[i];
+ break;
+ case GLSL_TYPE_DOUBLE:
+ if ((ismin && b->value.d[i] < c->value.d[i]) ||
+ (!ismin && b->value.d[i] > c->value.d[i]))
+ c->value.d[i] = b->value.d[i];
+ break;
+ default:
+ assert(!"not reached");
+ }
+ }
+ return c;
+}
+
+static ir_constant *
+smaller_constant(ir_constant *a, ir_constant *b)
+{
+ assert(a != NULL);
+ assert(b != NULL);
+
+ enum compare_components_result ret = compare_components(a, b);
+ if (ret == MIXED)
+ return combine_constant(true, a, b);
+ else if (ret < EQUAL)
+ return a;
+ else
+ return b;
+}
+
+static ir_constant *
+larger_constant(ir_constant *a, ir_constant *b)
+{
+ assert(a != NULL);
+ assert(b != NULL);
+
+ enum compare_components_result ret = compare_components(a, b);
+ if (ret == MIXED)
+ return combine_constant(false, a, b);
+ else if (ret < EQUAL)
+ return b;
+ else
+ return a;
+}
+
+/* Combines two ranges by doing an element-wise min() / max() depending on the
+ * operation.
+ */
+static minmax_range
+combine_range(minmax_range r0, minmax_range r1, bool ismin)
+{
+ minmax_range ret;
+
+ if (!r0.low) {
+ ret.low = ismin ? r0.low : r1.low;
+ } else if (!r1.low) {
+ ret.low = ismin ? r1.low : r0.low;
+ } else {
+ ret.low = ismin ? smaller_constant(r0.low, r1.low) :
+ larger_constant(r0.low, r1.low);
+ }
+
+ if (!r0.high) {
+ ret.high = ismin ? r1.high : r0.high;
+ } else if (!r1.high) {
+ ret.high = ismin ? r0.high : r1.high;
+ } else {
+ ret.high = ismin ? smaller_constant(r0.high, r1.high) :
+ larger_constant(r0.high, r1.high);
+ }
+
+ return ret;
+}
+
+/* Returns a range so that lower limit is the larger of the two lower limits,
+ * and higher limit is the smaller of the two higher limits.
+ */
+static minmax_range
+range_intersection(minmax_range r0, minmax_range r1)
+{
+ minmax_range ret;
+
+ if (!r0.low)
+ ret.low = r1.low;
+ else if (!r1.low)
+ ret.low = r0.low;
+ else
+ ret.low = larger_constant(r0.low, r1.low);
+
+ if (!r0.high)
+ ret.high = r1.high;
+ else if (!r1.high)
+ ret.high = r0.high;
+ else
+ ret.high = smaller_constant(r0.high, r1.high);
+
+ return ret;
+}
+
+static minmax_range
+get_range(ir_rvalue *rval)
+{
+ ir_expression *expr = rval->as_expression();
+ if (expr && (expr->operation == ir_binop_min ||
+ expr->operation == ir_binop_max)) {
+ minmax_range r0 = get_range(expr->operands[0]);
+ minmax_range r1 = get_range(expr->operands[1]);
+ return combine_range(r0, r1, expr->operation == ir_binop_min);
+ }
+
+ ir_constant *c = rval->as_constant();
+ if (c) {
+ return minmax_range(c, c);
+ }
+
+ return minmax_range();
+}
+
+/**
+ * Prunes a min/max expression considering the base range of the parent
+ * min/max expression.
+ *
+ * @param baserange the range that the parents of this min/max expression
+ * in the min/max tree will clamp its value to.
+ */
+ir_rvalue *
+ir_minmax_visitor::prune_expression(ir_expression *expr, minmax_range baserange)
+{
+ assert(expr->operation == ir_binop_min ||
+ expr->operation == ir_binop_max);
+
+ bool ismin = expr->operation == ir_binop_min;
+ minmax_range limits[2];
+
+ /* Recurse to get the ranges for each of the subtrees of this
+ * expression. We need to do this as a separate step because we need to
+ * know the ranges of each of the subtrees before we prune either one.
+ * Consider something like this:
+ *
+ * max
+ * / \
+ * max max
+ * / \ / \
+ * 3 a b 2
+ *
+ * We would like to prune away the max on the bottom-right, but to do so
+ * we need to know the range of the expression on the left beforehand,
+ * and there's no guarantee that we will visit either subtree in a
+ * particular order.
+ */
+ for (unsigned i = 0; i < 2; ++i)
+ limits[i] = get_range(expr->operands[i]);
+
+ for (unsigned i = 0; i < 2; ++i) {
+ bool is_redundant = false;
+
+ enum compare_components_result cr = LESS;
+ if (ismin) {
+ /* If this operand will always be greater than the other one, it's
+ * redundant.
+ */
+ if (limits[i].low && limits[1 - i].high) {
+ cr = compare_components(limits[i].low, limits[1 - i].high);
+ if (cr >= EQUAL && cr != MIXED)
+ is_redundant = true;
+ }
+ /* If this operand is always greater than baserange, then even if
+ * it's smaller than the other one it'll get clamped, so it's
+ * redundant.
+ */
+ if (!is_redundant && limits[i].low && baserange.high) {
+ cr = compare_components(limits[i].low, baserange.high);
+ if (cr > EQUAL && cr != MIXED)
+ is_redundant = true;
+ }
+ } else {
+ /* If this operand will always be lower than the other one, it's
+ * redundant.
+ */
+ if (limits[i].high && limits[1 - i].low) {
+ cr = compare_components(limits[i].high, limits[1 - i].low);
+ if (cr <= EQUAL)
+ is_redundant = true;
+ }
+ /* If this operand is always lower than baserange, then even if
+ * it's greater than the other one it'll get clamped, so it's
+ * redundant.
+ */
+ if (!is_redundant && limits[i].high && baserange.low) {
+ cr = compare_components(limits[i].high, baserange.low);
+ if (cr < EQUAL)
+ is_redundant = true;
+ }
+ }
+
+ if (is_redundant) {
+ progress = true;
+
+ /* Recurse if necessary. */
+ ir_expression *op_expr = expr->operands[1 - i]->as_expression();
+ if (op_expr && (op_expr->operation == ir_binop_min ||
+ op_expr->operation == ir_binop_max)) {
+ return prune_expression(op_expr, baserange);
+ }
+
+ return expr->operands[1 - i];
+ } else if (cr == MIXED) {
+ /* If we have mixed vector operands, we can try to resolve the minmax
+ * expression by doing a component-wise minmax:
+ *
+ * min min
+ * / \ / \
+ * min a ===> [1,1] a
+ * / \
+ * [1,3] [3,1]
+ *
+ */
+ ir_constant *a = expr->operands[0]->as_constant();
+ ir_constant *b = expr->operands[1]->as_constant();
+ if (a && b)
+ return combine_constant(ismin, a, b);
+ }
+ }
+
+ /* Now recurse to operands giving them the proper baserange. The baserange
+ * to pass is the intersection of our baserange and the other operand's
+ * limit with one of the ranges unlimited. If we can't compute a valid
+ * intersection, we use the current baserange.
+ */
+ for (unsigned i = 0; i < 2; ++i) {
+ ir_expression *op_expr = expr->operands[i]->as_expression();
+ if (op_expr && (op_expr->operation == ir_binop_min ||
+ op_expr->operation == ir_binop_max)) {
+ /* We can only compute a new baserange for this operand if we managed
+ * to compute a valid range for the other operand.
+ */
+ if (ismin)
+ limits[1 - i].low = NULL;
+ else
+ limits[1 - i].high = NULL;
+ minmax_range base = range_intersection(limits[1 - i], baserange);
+ expr->operands[i] = prune_expression(op_expr, base);
+ }
+ }
+
+ /* If we got here we could not discard any of the operands of the minmax
+ * expression, but we can still try to resolve the expression if both
+ * operands are constant. We do this after the loop above, to make sure
+ * that if our operands are minmax expressions we have tried to prune them
+ * first (hopefully reducing them to constants).
+ */
+ ir_constant *a = expr->operands[0]->as_constant();
+ ir_constant *b = expr->operands[1]->as_constant();
+ if (a && b)
+ return combine_constant(ismin, a, b);
+
+ return expr;
+}
+
+static ir_rvalue *
+swizzle_if_required(ir_expression *expr, ir_rvalue *rval)
+{
+ if (expr->type->is_vector() && rval->type->is_scalar()) {
+ return swizzle(rval, SWIZZLE_XXXX, expr->type->vector_elements);
+ } else {
+ return rval;
+ }
+}
+
+void
+ir_minmax_visitor::handle_rvalue(ir_rvalue **rvalue)
+{
+ if (!*rvalue)
+ return;
+
+ ir_expression *expr = (*rvalue)->as_expression();
+ if (!expr || (expr->operation != ir_binop_min &&
+ expr->operation != ir_binop_max))
+ return;
+
+ ir_rvalue *new_rvalue = prune_expression(expr, minmax_range());
+ if (new_rvalue == *rvalue)
+ return;
+
+ /* If the expression type is a vector and the optimization leaves a scalar
+ * as the result, we need to turn it into a vector.
+ */
+ *rvalue = swizzle_if_required(expr, new_rvalue);
+
+ progress = true;
+}
+
+}
+
+bool
+do_minmax_prune(exec_list *instructions)
+{
+ ir_minmax_visitor v;
+
+ visit_list_elements(&v, instructions);
+
+ return v.progress;
+}
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/opt_rebalance_tree.cpp b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/opt_rebalance_tree.cpp
new file mode 100644
index 0000000000..8045d51033
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/opt_rebalance_tree.cpp
@@ -0,0 +1,337 @@
+/*
+ * Copyright © 2014 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+/**
+ * \file opt_rebalance_tree.cpp
+ *
+ * Rebalances a reduction expression tree.
+ *
+ * For reduction operations (e.g., x + y + z + w) we generate an expression
+ * tree like
+ *
+ * +
+ * / \
+ * + w
+ * / \
+ * + z
+ * / \
+ * x y
+ *
+ * which we can rebalance into
+ *
+ * +
+ * / \
+ * / \
+ * + +
+ * / \ / \
+ * x y z w
+ *
+ * to get a better instruction scheduling.
+ *
+ * See "Tree Rebalancing in Optimal Editor Time and Space" by Quentin F. Stout
+ * and Bette L. Warren.
+ *
+ * Also see http://penguin.ewu.edu/~trolfe/DSWpaper/ for a very readable
+ * explanation of the of the tree_to_vine() (rightward rotation) and
+ * vine_to_tree() (leftward rotation) algorithms.
+ */
+
+#include "ir.h"
+#include "ir_visitor.h"
+#include "ir_rvalue_visitor.h"
+#include "ir_optimization.h"
+#include "main/macros.h" /* for MAX2 */
+
+/* The DSW algorithm generates a degenerate tree (really, a linked list) in
+ * tree_to_vine(). We'd rather not leave a binary expression with only one
+ * operand, so trivial modifications (the ternary operators below) are needed
+ * to ensure that we only rotate around the ir_expression nodes of the tree.
+ */
+static unsigned
+tree_to_vine(ir_expression *root)
+{
+ unsigned size = 0;
+ ir_rvalue *vine_tail = root;
+ ir_rvalue *remainder = root->operands[1];
+
+ while (remainder != NULL) {
+ ir_expression *remainder_temp = remainder->as_expression();
+ ir_expression *remainder_left = remainder_temp ?
+ remainder_temp->operands[0]->as_expression() : NULL;
+
+ if (remainder_left == NULL) {
+ /* move vine_tail down one */
+ vine_tail = remainder;
+ remainder = remainder->as_expression() ?
+ ((ir_expression *)remainder)->operands[1] : NULL;
+ size++;
+ } else {
+ /* rotate */
+ ir_expression *tempptr = remainder_left;
+ ((ir_expression *)remainder)->operands[0] = tempptr->operands[1];
+ tempptr->operands[1] = remainder;
+ remainder = tempptr;
+ ((ir_expression *)vine_tail)->operands[1] = tempptr;
+ }
+ }
+
+ return size;
+}
+
+static void
+compression(ir_expression *root, unsigned count)
+{
+ ir_expression *scanner = root;
+
+ for (unsigned i = 0; i < count; i++) {
+ ir_expression *child = (ir_expression *)scanner->operands[1];
+ scanner->operands[1] = child->operands[1];
+ scanner = (ir_expression *)scanner->operands[1];
+ child->operands[1] = scanner->operands[0];
+ scanner->operands[0] = child;
+ }
+}
+
+static void
+vine_to_tree(ir_expression *root, unsigned size)
+{
+ int n = size - 1;
+ for (int m = n / 2; m > 0; m = n / 2) {
+ compression(root, m);
+ n -= m + 1;
+ }
+}
+
+namespace {
+
+class ir_rebalance_visitor : public ir_rvalue_enter_visitor {
+public:
+ ir_rebalance_visitor()
+ {
+ progress = false;
+ }
+
+ virtual ir_visitor_status visit_enter(ir_assignment *ir);
+
+ void handle_rvalue(ir_rvalue **rvalue);
+
+ bool progress;
+};
+
+struct is_reduction_data {
+ ir_expression_operation operation;
+ const glsl_type *type;
+ unsigned num_expr;
+ bool is_reduction;
+ bool contains_constant;
+};
+
+} /* anonymous namespace */
+
+ir_visitor_status
+ir_rebalance_visitor::visit_enter(ir_assignment *ir)
+{
+ ir_variable *var = ir->lhs->variable_referenced();
+ if (var->data.invariant || var->data.precise) {
+ /* If we're assigning to an invariant variable, just bail. Tree
+ * rebalancing (reassociation) isn't precision-safe.
+ */
+ return visit_continue_with_parent;
+ } else {
+ return visit_continue;
+ }
+}
+
+static bool
+is_reduction_operation(ir_expression_operation operation)
+{
+ switch (operation) {
+ case ir_binop_add:
+ case ir_binop_mul:
+ case ir_binop_bit_and:
+ case ir_binop_bit_xor:
+ case ir_binop_bit_or:
+ case ir_binop_logic_and:
+ case ir_binop_logic_xor:
+ case ir_binop_logic_or:
+ case ir_binop_min:
+ case ir_binop_max:
+ return true;
+ default:
+ return false;
+ }
+}
+
+/* Note that this function does not attempt to recognize that reduction trees
+ * are already balanced.
+ *
+ * We return false from this function for a number of reasons other than an
+ * expression tree not being a mathematical reduction. Namely,
+ *
+ * - if the tree contains multiple constants that we may be able to combine.
+ * - if the tree contains matrices:
+ * - they might contain vec4's with many constant components that we can
+ * simplify after splitting.
+ * - applying the matrix chain ordering optimization is more than just
+ * balancing an expression tree.
+ * - if the tree contains operations on multiple types.
+ * - if the tree contains ir_dereference_{array,record}, since foo[a+b] + c
+ * would trick the visiting pass.
+ */
+static void
+is_reduction(ir_instruction *ir, void *data)
+{
+ struct is_reduction_data *ird = (struct is_reduction_data *)data;
+ if (!ird->is_reduction)
+ return;
+
+ /* We don't want to balance a tree that contains multiple constants, since
+ * we'll be able to constant fold them if they're not in separate subtrees.
+ */
+ if (ir->as_constant()) {
+ if (ird->contains_constant) {
+ ird->is_reduction = false;
+ }
+ ird->contains_constant = true;
+ return;
+ }
+
+ /* Array/record dereferences have subtrees that are not part of the expr
+ * tree we're balancing. Skip trees containing them.
+ */
+ if (ir->ir_type == ir_type_dereference_array ||
+ ir->ir_type == ir_type_dereference_record) {
+ ird->is_reduction = false;
+ return;
+ }
+
+ ir_expression *expr = ir->as_expression();
+ if (!expr)
+ return;
+
+ /* Non-constant matrices might still contain constant vec4 that we can
+ * constant fold once split up. Handling matrices will need some more
+ * work.
+ */
+ if (expr->type->is_matrix() ||
+ expr->operands[0]->type->is_matrix() ||
+ (expr->operands[1] && expr->operands[1]->type->is_matrix())) {
+ ird->is_reduction = false;
+ return;
+ }
+
+ if (ird->type != NULL && ird->type != expr->type) {
+ ird->is_reduction = false;
+ return;
+ }
+ ird->type = expr->type;
+
+ ird->num_expr++;
+ if (is_reduction_operation(expr->operation)) {
+ if (ird->operation != 0 && ird->operation != expr->operation)
+ ird->is_reduction = false;
+ ird->operation = expr->operation;
+ } else {
+ ird->is_reduction = false;
+ }
+}
+
+static ir_rvalue *
+handle_expression(ir_expression *expr)
+{
+ struct is_reduction_data ird;
+ ird.operation = (ir_expression_operation)0;
+ ird.type = NULL;
+ ird.num_expr = 0;
+ ird.is_reduction = true;
+ ird.contains_constant = false;
+
+ visit_tree(expr, is_reduction, (void *)&ird);
+
+ if (ird.is_reduction && ird.num_expr > 2) {
+ ir_constant z = ir_constant(0.0f);
+ ir_expression pseudo_root = ir_expression(ir_binop_add, &z, expr);
+
+ unsigned size = tree_to_vine(&pseudo_root);
+ vine_to_tree(&pseudo_root, size);
+
+ expr = (ir_expression *)pseudo_root.operands[1];
+ }
+ return expr;
+}
+
+static void
+update_types(ir_instruction *ir, void *)
+{
+ ir_expression *expr = ir->as_expression();
+ if (!expr)
+ return;
+
+ const glsl_type *const new_type =
+ glsl_type::get_instance(expr->type->base_type,
+ MAX2(expr->operands[0]->type->vector_elements,
+ expr->operands[1]->type->vector_elements),
+ 1);
+ assert(new_type != glsl_type::error_type);
+ expr->type = new_type;
+}
+
+void
+ir_rebalance_visitor::handle_rvalue(ir_rvalue **rvalue)
+{
+ if (!*rvalue)
+ return;
+
+ ir_expression *expr = (*rvalue)->as_expression();
+ if (!expr || !is_reduction_operation(expr->operation))
+ return;
+
+ ir_rvalue *new_rvalue = handle_expression(expr);
+
+ /* If we failed to rebalance the tree (e.g., because it wasn't a reduction,
+ * or some other set of cases) new_rvalue will point to the same root as
+ * before.
+ *
+ * Similarly, if the tree rooted at *rvalue was a reduction and was already
+ * balanced, the algorithm will rearrange the tree but will ultimately
+ * return an identical tree, so this check will handle that as well and
+ * will not set progress = true.
+ */
+ if (new_rvalue == *rvalue)
+ return;
+
+ visit_tree(new_rvalue, NULL, NULL, update_types);
+
+ *rvalue = new_rvalue;
+ this->progress = true;
+}
+
+bool
+do_rebalance_tree(exec_list *instructions)
+{
+ ir_rebalance_visitor v;
+
+ v.run(instructions);
+
+ return v.progress;
+}
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/opt_redundant_jumps.cpp b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/opt_redundant_jumps.cpp
new file mode 100644
index 0000000000..ee384d0f23
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/opt_redundant_jumps.cpp
@@ -0,0 +1,124 @@
+/*
+ * Copyright © 2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+/**
+ * \file opt_redundant_jumps.cpp
+ * Remove certain types of redundant jumps
+ */
+
+#include "ir.h"
+
+namespace {
+
+class redundant_jumps_visitor : public ir_hierarchical_visitor {
+public:
+ redundant_jumps_visitor()
+ {
+ this->progress = false;
+ }
+
+ virtual ir_visitor_status visit_leave(ir_if *);
+ virtual ir_visitor_status visit_leave(ir_loop *);
+ virtual ir_visitor_status visit_enter(ir_assignment *);
+
+ bool progress;
+};
+
+} /* unnamed namespace */
+
+/* We only care about the top level instructions, so don't descend
+ * into expressions.
+ */
+ir_visitor_status
+redundant_jumps_visitor::visit_enter(ir_assignment *)
+{
+ return visit_continue_with_parent;
+}
+
+ir_visitor_status
+redundant_jumps_visitor::visit_leave(ir_if *ir)
+{
+ /* If the last instruction in both branches is a 'break' or a 'continue',
+ * pull it out of the branches and insert it after the if-statment. Note
+ * that both must be the same type (either 'break' or 'continue').
+ */
+ ir_instruction *const last_then =
+ (ir_instruction *) ir->then_instructions.get_tail();
+ ir_instruction *const last_else =
+ (ir_instruction *) ir->else_instructions.get_tail();
+
+ if ((last_then == NULL) || (last_else == NULL))
+ return visit_continue;
+
+ if ((last_then->ir_type != ir_type_loop_jump)
+ || (last_else->ir_type != ir_type_loop_jump))
+ return visit_continue;
+
+ ir_loop_jump *const then_jump = (ir_loop_jump *) last_then;
+ ir_loop_jump *const else_jump = (ir_loop_jump *) last_else;
+
+ if (then_jump->mode != else_jump->mode)
+ return visit_continue;
+
+ then_jump->remove();
+ else_jump->remove();
+ this->progress = true;
+
+ ir->insert_after(then_jump);
+
+ /* If both branchs of the if-statement are now empty, remove the
+ * if-statement.
+ */
+ if (ir->then_instructions.is_empty() && ir->else_instructions.is_empty())
+ ir->remove();
+
+ return visit_continue;
+}
+
+
+ir_visitor_status
+redundant_jumps_visitor::visit_leave(ir_loop *ir)
+{
+ /* If the last instruction of a loop body is a 'continue', remove it.
+ */
+ ir_instruction *const last =
+ (ir_instruction *) ir->body_instructions.get_tail();
+
+ if (last && (last->ir_type == ir_type_loop_jump)
+ && (((ir_loop_jump *) last)->mode == ir_loop_jump::jump_continue)) {
+ last->remove();
+ this->progress = true;
+ }
+
+ return visit_continue;
+}
+
+
+bool
+optimize_redundant_jumps(exec_list *instructions)
+{
+ redundant_jumps_visitor v;
+
+ v.run(instructions);
+ return v.progress;
+}
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/opt_structure_splitting.cpp b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/opt_structure_splitting.cpp
new file mode 100644
index 0000000000..c573d07c26
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/opt_structure_splitting.cpp
@@ -0,0 +1,377 @@
+/*
+ * Copyright © 2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+/**
+ * \file opt_structure_splitting.cpp
+ *
+ * If a structure is only ever referenced by its components, then
+ * split those components out to individual variables so they can be
+ * handled normally by other optimization passes.
+ *
+ * This skips structures like uniforms, which need to be accessible as
+ * structures for their access by the GL.
+ */
+
+#include "ir.h"
+#include "ir_visitor.h"
+#include "ir_rvalue_visitor.h"
+#include "compiler/glsl_types.h"
+
+namespace {
+
+static bool debug = false;
+
+class variable_entry : public exec_node
+{
+public:
+ variable_entry(ir_variable *var)
+ {
+ this->var = var;
+ this->whole_structure_access = 0;
+ this->declaration = false;
+ this->components = NULL;
+ this->mem_ctx = NULL;
+ }
+
+ ir_variable *var; /* The key: the variable's pointer. */
+
+ /** Number of times the variable is referenced, including assignments. */
+ unsigned whole_structure_access;
+
+ /* If the variable had a decl we can work with in the instruction
+ * stream. We can't do splitting on function arguments, which
+ * don't get this variable set.
+ */
+ bool declaration;
+
+ ir_variable **components;
+
+ /** ralloc_parent(this->var) -- the shader's ralloc context. */
+ void *mem_ctx;
+};
+
+
+class ir_structure_reference_visitor : public ir_hierarchical_visitor {
+public:
+ ir_structure_reference_visitor(void)
+ {
+ this->mem_ctx = ralloc_context(NULL);
+ this->variable_list.make_empty();
+ }
+
+ ~ir_structure_reference_visitor(void)
+ {
+ ralloc_free(mem_ctx);
+ }
+
+ virtual ir_visitor_status visit(ir_variable *);
+ virtual ir_visitor_status visit(ir_dereference_variable *);
+ virtual ir_visitor_status visit_enter(ir_dereference_record *);
+ virtual ir_visitor_status visit_enter(ir_assignment *);
+ virtual ir_visitor_status visit_enter(ir_function_signature *);
+
+ variable_entry *get_variable_entry(ir_variable *var);
+
+ /* List of variable_entry */
+ exec_list variable_list;
+
+ void *mem_ctx;
+};
+
+variable_entry *
+ir_structure_reference_visitor::get_variable_entry(ir_variable *var)
+{
+ assert(var);
+
+ if (!var->type->is_struct() ||
+ var->data.mode == ir_var_uniform || var->data.mode == ir_var_shader_storage ||
+ var->data.mode == ir_var_shader_in || var->data.mode == ir_var_shader_out)
+ return NULL;
+
+ foreach_in_list(variable_entry, entry, &this->variable_list) {
+ if (entry->var == var)
+ return entry;
+ }
+
+ variable_entry *entry = new(mem_ctx) variable_entry(var);
+ this->variable_list.push_tail(entry);
+ return entry;
+}
+
+
+ir_visitor_status
+ir_structure_reference_visitor::visit(ir_variable *ir)
+{
+ variable_entry *entry = this->get_variable_entry(ir);
+
+ if (entry)
+ entry->declaration = true;
+
+ return visit_continue;
+}
+
+ir_visitor_status
+ir_structure_reference_visitor::visit(ir_dereference_variable *ir)
+{
+ ir_variable *const var = ir->variable_referenced();
+ variable_entry *entry = this->get_variable_entry(var);
+
+ if (entry)
+ entry->whole_structure_access++;
+
+ return visit_continue;
+}
+
+ir_visitor_status
+ir_structure_reference_visitor::visit_enter(ir_dereference_record *ir)
+{
+ (void) ir;
+ /* Don't descend into the ir_dereference_variable below. */
+ return visit_continue_with_parent;
+}
+
+ir_visitor_status
+ir_structure_reference_visitor::visit_enter(ir_assignment *ir)
+{
+ /* If there are no structure references yet, no need to bother with
+ * processing the expression tree.
+ */
+ if (this->variable_list.is_empty())
+ return visit_continue_with_parent;
+
+ if (ir->lhs->as_dereference_variable() &&
+ ir->rhs->as_dereference_variable() &&
+ !ir->condition) {
+ /* We'll split copies of a structure to copies of components, so don't
+ * descend to the ir_dereference_variables.
+ */
+ return visit_continue_with_parent;
+ }
+ return visit_continue;
+}
+
+ir_visitor_status
+ir_structure_reference_visitor::visit_enter(ir_function_signature *ir)
+{
+ /* We don't have logic for structure-splitting function arguments,
+ * so just look at the body instructions and not the parameter
+ * declarations.
+ */
+ visit_list_elements(this, &ir->body);
+ return visit_continue_with_parent;
+}
+
+class ir_structure_splitting_visitor : public ir_rvalue_visitor {
+public:
+ ir_structure_splitting_visitor(exec_list *vars)
+ {
+ this->variable_list = vars;
+ }
+
+ virtual ~ir_structure_splitting_visitor()
+ {
+ }
+
+ virtual ir_visitor_status visit_leave(ir_assignment *);
+
+ void split_deref(ir_dereference **deref);
+ void handle_rvalue(ir_rvalue **rvalue);
+ variable_entry *get_splitting_entry(ir_variable *var);
+
+ exec_list *variable_list;
+};
+
+variable_entry *
+ir_structure_splitting_visitor::get_splitting_entry(ir_variable *var)
+{
+ assert(var);
+
+ if (!var->type->is_struct())
+ return NULL;
+
+ foreach_in_list(variable_entry, entry, this->variable_list) {
+ if (entry->var == var) {
+ return entry;
+ }
+ }
+
+ return NULL;
+}
+
+void
+ir_structure_splitting_visitor::split_deref(ir_dereference **deref)
+{
+ if ((*deref)->ir_type != ir_type_dereference_record)
+ return;
+
+ ir_dereference_record *deref_record = (ir_dereference_record *)*deref;
+ ir_dereference_variable *deref_var = deref_record->record->as_dereference_variable();
+ if (!deref_var)
+ return;
+
+ variable_entry *entry = get_splitting_entry(deref_var->var);
+ if (!entry)
+ return;
+
+ int i = deref_record->field_idx;
+ assert(i >= 0);
+ assert((unsigned) i < entry->var->type->length);
+
+ *deref = new(entry->mem_ctx) ir_dereference_variable(entry->components[i]);
+}
+
+void
+ir_structure_splitting_visitor::handle_rvalue(ir_rvalue **rvalue)
+{
+ if (!*rvalue)
+ return;
+
+ ir_dereference *deref = (*rvalue)->as_dereference();
+
+ if (!deref)
+ return;
+
+ split_deref(&deref);
+ *rvalue = deref;
+}
+
+ir_visitor_status
+ir_structure_splitting_visitor::visit_leave(ir_assignment *ir)
+{
+ ir_dereference_variable *lhs_deref = ir->lhs->as_dereference_variable();
+ ir_dereference_variable *rhs_deref = ir->rhs->as_dereference_variable();
+ variable_entry *lhs_entry = lhs_deref ? get_splitting_entry(lhs_deref->var) : NULL;
+ variable_entry *rhs_entry = rhs_deref ? get_splitting_entry(rhs_deref->var) : NULL;
+ const glsl_type *type = ir->rhs->type;
+
+ if ((lhs_entry || rhs_entry) && !ir->condition) {
+ for (unsigned int i = 0; i < type->length; i++) {
+ ir_dereference *new_lhs, *new_rhs;
+ void *mem_ctx = lhs_entry ? lhs_entry->mem_ctx : rhs_entry->mem_ctx;
+
+ if (lhs_entry) {
+ new_lhs = new(mem_ctx) ir_dereference_variable(lhs_entry->components[i]);
+ } else {
+ new_lhs = new(mem_ctx)
+ ir_dereference_record(ir->lhs->clone(mem_ctx, NULL),
+ type->fields.structure[i].name);
+ }
+
+ if (rhs_entry) {
+ new_rhs = new(mem_ctx) ir_dereference_variable(rhs_entry->components[i]);
+ } else {
+ new_rhs = new(mem_ctx)
+ ir_dereference_record(ir->rhs->clone(mem_ctx, NULL),
+ type->fields.structure[i].name);
+ }
+
+ ir->insert_before(new(mem_ctx) ir_assignment(new_lhs, new_rhs));
+ }
+ ir->remove();
+ } else {
+ handle_rvalue(&ir->rhs);
+ split_deref(&ir->lhs);
+ }
+
+ handle_rvalue(&ir->condition);
+
+ return visit_continue;
+}
+
+} /* unnamed namespace */
+
+bool
+do_structure_splitting(exec_list *instructions)
+{
+ ir_structure_reference_visitor refs;
+
+ visit_list_elements(&refs, instructions);
+
+ /* Trim out variables we can't split. */
+ foreach_in_list_safe(variable_entry, entry, &refs.variable_list) {
+ if (debug) {
+ printf("structure %s@%p: decl %d, whole_access %d\n",
+ entry->var->name, (void *) entry->var, entry->declaration,
+ entry->whole_structure_access);
+ }
+
+ if (!entry->declaration || entry->whole_structure_access) {
+ entry->remove();
+ }
+ }
+
+ if (refs.variable_list.is_empty())
+ return false;
+
+ void *mem_ctx = ralloc_context(NULL);
+
+ /* Replace the decls of the structures to be split with their split
+ * components.
+ */
+ foreach_in_list_safe(variable_entry, entry, &refs.variable_list) {
+ const struct glsl_type *type = entry->var->type;
+
+ entry->mem_ctx = ralloc_parent(entry->var);
+
+ entry->components = ralloc_array(mem_ctx, ir_variable *, type->length);
+
+ for (unsigned int i = 0; i < entry->var->type->length; i++) {
+ const char *name = ralloc_asprintf(mem_ctx, "%s_%s", entry->var->name,
+ type->fields.structure[i].name);
+ ir_variable *new_var =
+ new(entry->mem_ctx) ir_variable(type->fields.structure[i].type,
+ name,
+ (ir_variable_mode) entry->var->data.mode);
+
+ if (type->fields.structure[i].type->without_array()->is_image()) {
+ /* Do not lose memory/format qualifiers for images declared inside
+ * structures as allowed by ARB_bindless_texture.
+ */
+ new_var->data.memory_read_only =
+ type->fields.structure[i].memory_read_only;
+ new_var->data.memory_write_only =
+ type->fields.structure[i].memory_write_only;
+ new_var->data.memory_coherent =
+ type->fields.structure[i].memory_coherent;
+ new_var->data.memory_volatile =
+ type->fields.structure[i].memory_volatile;
+ new_var->data.memory_restrict =
+ type->fields.structure[i].memory_restrict;
+ new_var->data.image_format =
+ type->fields.structure[i].image_format;
+ }
+
+ entry->components[i] = new_var;
+ entry->var->insert_before(entry->components[i]);
+ }
+
+ entry->var->remove();
+ }
+
+ ir_structure_splitting_visitor split(&refs.variable_list);
+ visit_list_elements(&split, instructions);
+
+ ralloc_free(mem_ctx);
+
+ return true;
+}
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/opt_swizzle.cpp b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/opt_swizzle.cpp
new file mode 100644
index 0000000000..2fbe362188
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/opt_swizzle.cpp
@@ -0,0 +1,119 @@
+/*
+ * Copyright © 2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+/**
+ * \file opt_swizzle.cpp
+ * Optimize swizzle operations.
+ *
+ * First, compact a sequence of swizzled swizzles into a single swizzle.
+ *
+ * If the final resulting swizzle doesn't change the order or count of
+ * components, then remove the swizzle so that other optimization passes see
+ * the value behind it.
+ */
+
+#include "ir.h"
+#include "ir_visitor.h"
+#include "ir_rvalue_visitor.h"
+#include "compiler/glsl_types.h"
+
+namespace {
+
+class ir_opt_swizzle_visitor : public ir_rvalue_visitor {
+public:
+ ir_opt_swizzle_visitor()
+ {
+ this->progress = false;
+ }
+
+ void handle_rvalue(ir_rvalue **rvalue);
+ bool progress;
+};
+
+} /* unnamed namespace */
+
+void
+ir_opt_swizzle_visitor::handle_rvalue(ir_rvalue **rvalue)
+{
+ if (!*rvalue)
+ return;
+
+ ir_swizzle *swiz = (*rvalue)->as_swizzle();
+
+ if (!swiz)
+ return;
+
+ ir_swizzle *swiz2;
+
+ while ((swiz2 = swiz->val->as_swizzle()) != NULL) {
+ int mask2[4];
+
+ memset(&mask2, 0, sizeof(mask2));
+ if (swiz2->mask.num_components >= 1)
+ mask2[0] = swiz2->mask.x;
+ if (swiz2->mask.num_components >= 2)
+ mask2[1] = swiz2->mask.y;
+ if (swiz2->mask.num_components >= 3)
+ mask2[2] = swiz2->mask.z;
+ if (swiz2->mask.num_components >= 4)
+ mask2[3] = swiz2->mask.w;
+
+ if (swiz->mask.num_components >= 1)
+ swiz->mask.x = mask2[swiz->mask.x];
+ if (swiz->mask.num_components >= 2)
+ swiz->mask.y = mask2[swiz->mask.y];
+ if (swiz->mask.num_components >= 3)
+ swiz->mask.z = mask2[swiz->mask.z];
+ if (swiz->mask.num_components >= 4)
+ swiz->mask.w = mask2[swiz->mask.w];
+
+ swiz->val = swiz2->val;
+
+ this->progress = true;
+ }
+
+ if (swiz->type != swiz->val->type)
+ return;
+
+ int elems = swiz->val->type->vector_elements;
+ if (swiz->mask.x != 0)
+ return;
+ if (elems >= 2 && swiz->mask.y != 1)
+ return;
+ if (elems >= 3 && swiz->mask.z != 2)
+ return;
+ if (elems >= 4 && swiz->mask.w != 3)
+ return;
+
+ this->progress = true;
+ *rvalue = swiz->val;
+}
+
+bool
+optimize_swizzles(exec_list *instructions)
+{
+ ir_opt_swizzle_visitor v;
+ visit_list_elements(&v, instructions);
+
+ return v.progress;
+}
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/opt_tree_grafting.cpp b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/opt_tree_grafting.cpp
new file mode 100644
index 0000000000..6b5d93af66
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/opt_tree_grafting.cpp
@@ -0,0 +1,419 @@
+/*
+ * Copyright © 2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+/**
+ * \file opt_tree_grafting.cpp
+ *
+ * Takes assignments to variables that are dereferenced only once and
+ * pastes the RHS expression into where the variable is dereferenced.
+ *
+ * In the process of various operations like function inlining and
+ * tertiary op handling, we'll end up with our expression trees having
+ * been chopped up into a series of assignments of short expressions
+ * to temps. Other passes like ir_algebraic.cpp would prefer to see
+ * the deepest expression trees they can to try to optimize them.
+ *
+ * This is a lot like copy propagaton. In comparison, copy
+ * propagation only acts on plain copies, not arbitrary expressions on
+ * the RHS. Generally, we wouldn't want to go pasting some
+ * complicated expression everywhere it got used, though, so we don't
+ * handle expressions in that pass.
+ *
+ * The hard part is making sure we don't move an expression across
+ * some other assignments that would change the value of the
+ * expression. So we split this into two passes: First, find the
+ * variables in our scope which are written to once and read once, and
+ * then go through basic blocks seeing if we find an opportunity to
+ * move those expressions safely.
+ */
+
+#include "ir.h"
+#include "ir_visitor.h"
+#include "ir_variable_refcount.h"
+#include "ir_basic_block.h"
+#include "ir_optimization.h"
+#include "compiler/glsl_types.h"
+
+namespace {
+
+static bool debug = false;
+
+class ir_tree_grafting_visitor : public ir_hierarchical_visitor {
+public:
+ ir_tree_grafting_visitor(ir_assignment *graft_assign,
+ ir_variable *graft_var)
+ {
+ this->progress = false;
+ this->graft_assign = graft_assign;
+ this->graft_var = graft_var;
+ }
+
+ virtual ir_visitor_status visit_leave(class ir_assignment *);
+ virtual ir_visitor_status visit_enter(class ir_call *);
+ virtual ir_visitor_status visit_enter(class ir_expression *);
+ virtual ir_visitor_status visit_enter(class ir_function *);
+ virtual ir_visitor_status visit_enter(class ir_function_signature *);
+ virtual ir_visitor_status visit_enter(class ir_if *);
+ virtual ir_visitor_status visit_enter(class ir_loop *);
+ virtual ir_visitor_status visit_enter(class ir_swizzle *);
+ virtual ir_visitor_status visit_enter(class ir_texture *);
+
+ ir_visitor_status check_graft(ir_instruction *ir, ir_variable *var);
+
+ bool do_graft(ir_rvalue **rvalue);
+
+ bool progress;
+ ir_variable *graft_var;
+ ir_assignment *graft_assign;
+};
+
+struct find_deref_info {
+ ir_variable *var;
+ bool found;
+};
+
+void
+dereferences_variable_callback(ir_instruction *ir, void *data)
+{
+ struct find_deref_info *info = (struct find_deref_info *)data;
+ ir_dereference_variable *deref = ir->as_dereference_variable();
+
+ if (deref && deref->var == info->var)
+ info->found = true;
+}
+
+static bool
+dereferences_variable(ir_instruction *ir, ir_variable *var)
+{
+ struct find_deref_info info;
+
+ info.var = var;
+ info.found = false;
+
+ visit_tree(ir, dereferences_variable_callback, &info);
+
+ return info.found;
+}
+
+bool
+ir_tree_grafting_visitor::do_graft(ir_rvalue **rvalue)
+{
+ if (!*rvalue)
+ return false;
+
+ ir_dereference_variable *deref = (*rvalue)->as_dereference_variable();
+
+ if (!deref || deref->var != this->graft_var)
+ return false;
+
+ if (debug) {
+ fprintf(stderr, "GRAFTING:\n");
+ this->graft_assign->fprint(stderr);
+ fprintf(stderr, "\n");
+ fprintf(stderr, "TO:\n");
+ (*rvalue)->fprint(stderr);
+ fprintf(stderr, "\n");
+ }
+
+ this->graft_assign->remove();
+ *rvalue = this->graft_assign->rhs;
+
+ this->progress = true;
+ return true;
+}
+
+ir_visitor_status
+ir_tree_grafting_visitor::visit_enter(ir_loop *ir)
+{
+ (void)ir;
+ /* Do not traverse into the body of the loop since that is a
+ * different basic block.
+ */
+ return visit_stop;
+}
+
+/**
+ * Check if we can continue grafting after writing to a variable. If the
+ * expression we're trying to graft references the variable, we must stop.
+ *
+ * \param ir An instruction that writes to a variable.
+ * \param var The variable being updated.
+ */
+ir_visitor_status
+ir_tree_grafting_visitor::check_graft(ir_instruction *ir, ir_variable *var)
+{
+ if (dereferences_variable(this->graft_assign->rhs, var)) {
+ if (debug) {
+ fprintf(stderr, "graft killed by: ");
+ ir->fprint(stderr);
+ fprintf(stderr, "\n");
+ }
+ return visit_stop;
+ }
+
+ return visit_continue;
+}
+
+ir_visitor_status
+ir_tree_grafting_visitor::visit_leave(ir_assignment *ir)
+{
+ if (do_graft(&ir->rhs) ||
+ do_graft(&ir->condition))
+ return visit_stop;
+
+ /* If this assignment updates a variable used in the assignment
+ * we're trying to graft, then we're done.
+ */
+ return check_graft(ir, ir->lhs->variable_referenced());
+}
+
+ir_visitor_status
+ir_tree_grafting_visitor::visit_enter(ir_function *ir)
+{
+ (void) ir;
+ return visit_continue_with_parent;
+}
+
+ir_visitor_status
+ir_tree_grafting_visitor::visit_enter(ir_function_signature *ir)
+{
+ (void)ir;
+ return visit_continue_with_parent;
+}
+
+ir_visitor_status
+ir_tree_grafting_visitor::visit_enter(ir_call *ir)
+{
+ foreach_two_lists(formal_node, &ir->callee->parameters,
+ actual_node, &ir->actual_parameters) {
+ ir_variable *sig_param = (ir_variable *) formal_node;
+ ir_rvalue *ir = (ir_rvalue *) actual_node;
+ ir_rvalue *new_ir = ir;
+
+ if (sig_param->data.mode != ir_var_function_in
+ && sig_param->data.mode != ir_var_const_in) {
+ if (check_graft(ir, sig_param) == visit_stop)
+ return visit_stop;
+ continue;
+ }
+
+ if (do_graft(&new_ir)) {
+ ir->replace_with(new_ir);
+ return visit_stop;
+ }
+ }
+
+ if (ir->return_deref && check_graft(ir, ir->return_deref->var) == visit_stop)
+ return visit_stop;
+
+ return visit_continue;
+}
+
+ir_visitor_status
+ir_tree_grafting_visitor::visit_enter(ir_expression *ir)
+{
+ for (unsigned int i = 0; i < ir->num_operands; i++) {
+ if (do_graft(&ir->operands[i]))
+ return visit_stop;
+ }
+
+ return visit_continue;
+}
+
+ir_visitor_status
+ir_tree_grafting_visitor::visit_enter(ir_if *ir)
+{
+ if (do_graft(&ir->condition))
+ return visit_stop;
+
+ /* Do not traverse into the body of the if-statement since that is a
+ * different basic block.
+ */
+ return visit_continue_with_parent;
+}
+
+ir_visitor_status
+ir_tree_grafting_visitor::visit_enter(ir_swizzle *ir)
+{
+ if (do_graft(&ir->val))
+ return visit_stop;
+
+ return visit_continue;
+}
+
+ir_visitor_status
+ir_tree_grafting_visitor::visit_enter(ir_texture *ir)
+{
+ if (do_graft(&ir->coordinate) ||
+ do_graft(&ir->projector) ||
+ do_graft(&ir->offset) ||
+ do_graft(&ir->shadow_comparator))
+ return visit_stop;
+
+ switch (ir->op) {
+ case ir_tex:
+ case ir_lod:
+ case ir_query_levels:
+ case ir_texture_samples:
+ case ir_samples_identical:
+ break;
+ case ir_txb:
+ if (do_graft(&ir->lod_info.bias))
+ return visit_stop;
+ break;
+ case ir_txf:
+ case ir_txl:
+ case ir_txs:
+ if (do_graft(&ir->lod_info.lod))
+ return visit_stop;
+ break;
+ case ir_txf_ms:
+ if (do_graft(&ir->lod_info.sample_index))
+ return visit_stop;
+ break;
+ case ir_txd:
+ if (do_graft(&ir->lod_info.grad.dPdx) ||
+ do_graft(&ir->lod_info.grad.dPdy))
+ return visit_stop;
+ break;
+ case ir_tg4:
+ if (do_graft(&ir->lod_info.component))
+ return visit_stop;
+ break;
+ }
+
+ return visit_continue;
+}
+
+struct tree_grafting_info {
+ ir_variable_refcount_visitor *refs;
+ bool progress;
+};
+
+static bool
+try_tree_grafting(ir_assignment *start,
+ ir_variable *lhs_var,
+ ir_instruction *bb_last)
+{
+ ir_tree_grafting_visitor v(start, lhs_var);
+
+ if (debug) {
+ fprintf(stderr, "trying to graft: ");
+ lhs_var->fprint(stderr);
+ fprintf(stderr, "\n");
+ }
+
+ for (ir_instruction *ir = (ir_instruction *)start->next;
+ ir != bb_last->next;
+ ir = (ir_instruction *)ir->next) {
+
+ if (debug) {
+ fprintf(stderr, "- ");
+ ir->fprint(stderr);
+ fprintf(stderr, "\n");
+ }
+
+ ir_visitor_status s = ir->accept(&v);
+ if (s == visit_stop)
+ return v.progress;
+ }
+
+ return false;
+}
+
+static void
+tree_grafting_basic_block(ir_instruction *bb_first,
+ ir_instruction *bb_last,
+ void *data)
+{
+ struct tree_grafting_info *info = (struct tree_grafting_info *)data;
+ ir_instruction *ir, *next;
+
+ for (ir = bb_first, next = (ir_instruction *)ir->next;
+ ir != bb_last->next;
+ ir = next, next = (ir_instruction *)ir->next) {
+ ir_assignment *assign = ir->as_assignment();
+
+ if (!assign)
+ continue;
+
+ ir_variable *lhs_var = assign->whole_variable_written();
+ if (!lhs_var)
+ continue;
+
+ if (lhs_var->data.mode == ir_var_function_out ||
+ lhs_var->data.mode == ir_var_function_inout ||
+ lhs_var->data.mode == ir_var_shader_out ||
+ lhs_var->data.mode == ir_var_shader_storage ||
+ lhs_var->data.mode == ir_var_shader_shared)
+ continue;
+
+ if (lhs_var->data.precise)
+ continue;
+
+ /* Do not graft sampler and image variables. This is a workaround to
+ * st/glsl_to_tgsi being unable to handle expression parameters to image
+ * intrinsics.
+ *
+ * Note that if this is ever fixed, we still need to skip grafting when
+ * any image layout qualifiers (including the image format) are set,
+ * since we must not lose those.
+ */
+ if (lhs_var->type->is_sampler() || lhs_var->type->is_image())
+ continue;
+
+ ir_variable_refcount_entry *entry = info->refs->get_variable_entry(lhs_var);
+
+ if (!entry->declaration ||
+ entry->assigned_count != 1 ||
+ entry->referenced_count != 2)
+ continue;
+
+ /* Found a possibly graftable assignment. Now, walk through the
+ * rest of the BB seeing if the deref is here, and if nothing interfered with
+ * pasting its expression's values in between.
+ */
+ info->progress |= try_tree_grafting(assign, lhs_var, bb_last);
+ }
+}
+
+} /* unnamed namespace */
+
+/**
+ * Does a copy propagation pass on the code present in the instruction stream.
+ */
+bool
+do_tree_grafting(exec_list *instructions)
+{
+ ir_variable_refcount_visitor refs;
+ struct tree_grafting_info info;
+
+ info.progress = false;
+ info.refs = &refs;
+
+ visit_list_elements(info.refs, instructions);
+
+ call_for_basic_blocks(instructions, tree_grafting_basic_block, &info);
+
+ return info.progress;
+}
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/opt_vectorize.cpp b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/opt_vectorize.cpp
new file mode 100644
index 0000000000..88318cd8a6
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/opt_vectorize.cpp
@@ -0,0 +1,407 @@
+/*
+ * Copyright © 2013 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+/**
+ * \file opt_vectorize.cpp
+ *
+ * Combines scalar assignments of the same expression (modulo swizzle) to
+ * multiple channels of the same variable into a single vectorized expression
+ * and assignment.
+ *
+ * Many generated shaders contain scalarized code. That is, they contain
+ *
+ * r1.x = log2(v0.x);
+ * r1.y = log2(v0.y);
+ * r1.z = log2(v0.z);
+ *
+ * rather than
+ *
+ * r1.xyz = log2(v0.xyz);
+ *
+ * We look for consecutive assignments of the same expression (modulo swizzle)
+ * to each channel of the same variable.
+ *
+ * For instance, we want to convert these three scalar operations
+ *
+ * (assign (x) (var_ref r1) (expression float log2 (swiz x (var_ref v0))))
+ * (assign (y) (var_ref r1) (expression float log2 (swiz y (var_ref v0))))
+ * (assign (z) (var_ref r1) (expression float log2 (swiz z (var_ref v0))))
+ *
+ * into a single vector operation
+ *
+ * (assign (xyz) (var_ref r1) (expression vec3 log2 (swiz xyz (var_ref v0))))
+ */
+
+#include "ir.h"
+#include "ir_visitor.h"
+#include "ir_optimization.h"
+#include "compiler/glsl_types.h"
+#include "program/prog_instruction.h"
+
+namespace {
+
+class ir_vectorize_visitor : public ir_hierarchical_visitor {
+public:
+ void clear()
+ {
+ assignment[0] = NULL;
+ assignment[1] = NULL;
+ assignment[2] = NULL;
+ assignment[3] = NULL;
+ current_assignment = NULL;
+ last_assignment = NULL;
+ channels = 0;
+ has_swizzle = false;
+ }
+
+ ir_vectorize_visitor()
+ {
+ clear();
+ progress = false;
+ }
+
+ virtual ir_visitor_status visit_enter(ir_assignment *);
+ virtual ir_visitor_status visit_enter(ir_swizzle *);
+ virtual ir_visitor_status visit_enter(ir_dereference_array *);
+ virtual ir_visitor_status visit_enter(ir_expression *);
+ virtual ir_visitor_status visit_enter(ir_if *);
+ virtual ir_visitor_status visit_enter(ir_loop *);
+ virtual ir_visitor_status visit_enter(ir_texture *);
+
+ virtual ir_visitor_status visit_leave(ir_assignment *);
+
+ void try_vectorize();
+
+ ir_assignment *assignment[4];
+ ir_assignment *current_assignment, *last_assignment;
+ unsigned channels;
+ bool has_swizzle;
+
+ bool progress;
+};
+
+} /* unnamed namespace */
+
+/**
+ * Rewrites the swizzles and types of a right-hand side of an assignment.
+ *
+ * From the example above, this function would be called (by visit_tree()) on
+ * the nodes of the tree (expression float log2 (swiz z (var_ref v0))),
+ * rewriting it into (expression vec3 log2 (swiz xyz (var_ref v0))).
+ *
+ * The function operates on ir_expressions (and its operands) and ir_swizzles.
+ * For expressions it sets a new type and swizzles any non-expression and non-
+ * swizzle scalar operands into appropriately sized vector arguments. For
+ * example, if combining
+ *
+ * (assign (x) (var_ref r1) (expression float + (swiz x (var_ref v0) (var_ref v1))))
+ * (assign (y) (var_ref r1) (expression float + (swiz y (var_ref v0) (var_ref v1))))
+ *
+ * where v1 is a scalar, rewrite_swizzle() would insert a swizzle on
+ * (var_ref v1) such that the final result was
+ *
+ * (assign (xy) (var_ref r1) (expression vec2 + (swiz xy (var_ref v0))
+ * (swiz xx (var_ref v1))))
+ *
+ * For swizzles, it sets a new type, and if the variable being swizzled is a
+ * vector it overwrites the swizzle mask with the ir_swizzle_mask passed as the
+ * data parameter. If the swizzled variable is scalar, then the swizzle was
+ * added by an earlier call to rewrite_swizzle() on an expression, so the
+ * mask should not be modified.
+ */
+static void
+rewrite_swizzle(ir_instruction *ir, void *data)
+{
+ ir_swizzle_mask *mask = (ir_swizzle_mask *)data;
+
+ switch (ir->ir_type) {
+ case ir_type_swizzle: {
+ ir_swizzle *swz = (ir_swizzle *)ir;
+ if (swz->val->type->is_vector()) {
+ swz->mask = *mask;
+ }
+ swz->type = glsl_type::get_instance(swz->type->base_type,
+ mask->num_components, 1);
+ break;
+ }
+ case ir_type_expression: {
+ ir_expression *expr = (ir_expression *)ir;
+ expr->type = glsl_type::get_instance(expr->type->base_type,
+ mask->num_components, 1);
+ for (unsigned i = 0; i < 4; i++) {
+ if (expr->operands[i]) {
+ ir_rvalue *rval = expr->operands[i]->as_rvalue();
+ if (rval && rval->type->is_scalar() &&
+ !rval->as_expression() && !rval->as_swizzle()) {
+ expr->operands[i] = new(ir) ir_swizzle(rval, 0, 0, 0, 0,
+ mask->num_components);
+ }
+ }
+ }
+ break;
+ }
+ default:
+ break;
+ }
+}
+
+/**
+ * Attempt to vectorize the previously saved assignments, and clear them from
+ * consideration.
+ *
+ * If the assignments are able to be combined, it modifies in-place the last
+ * assignment seen to be an equivalent vector form of the scalar assignments.
+ * It then removes the other now obsolete scalar assignments.
+ */
+void
+ir_vectorize_visitor::try_vectorize()
+{
+ if (this->last_assignment && this->channels > 1) {
+ ir_swizzle_mask mask = {0, 0, 0, 0, channels, 0};
+
+ this->last_assignment->write_mask = 0;
+
+ for (unsigned i = 0, j = 0; i < 4; i++) {
+ if (this->assignment[i]) {
+ this->last_assignment->write_mask |= 1 << i;
+
+ if (this->assignment[i] != this->last_assignment) {
+ this->assignment[i]->remove();
+ }
+
+ switch (j) {
+ case 0: mask.x = i; break;
+ case 1: mask.y = i; break;
+ case 2: mask.z = i; break;
+ case 3: mask.w = i; break;
+ }
+
+ j++;
+ }
+ }
+
+ visit_tree(this->last_assignment->rhs, rewrite_swizzle, &mask);
+
+ this->progress = true;
+ }
+ clear();
+}
+
+/**
+ * Returns whether the write mask is a single channel.
+ */
+static bool
+single_channel_write_mask(unsigned write_mask)
+{
+ return write_mask != 0 && (write_mask & (write_mask - 1)) == 0;
+}
+
+/**
+ * Translates single-channeled write mask to single-channeled swizzle.
+ */
+static unsigned
+write_mask_to_swizzle(unsigned write_mask)
+{
+ switch (write_mask) {
+ case WRITEMASK_X: return SWIZZLE_X;
+ case WRITEMASK_Y: return SWIZZLE_Y;
+ case WRITEMASK_Z: return SWIZZLE_Z;
+ case WRITEMASK_W: return SWIZZLE_W;
+ }
+ unreachable("not reached");
+}
+
+/**
+ * Returns whether a single-channeled write mask matches a swizzle.
+ */
+static bool
+write_mask_matches_swizzle(unsigned write_mask,
+ const ir_swizzle *swz)
+{
+ return ((write_mask == WRITEMASK_X && swz->mask.x == SWIZZLE_X) ||
+ (write_mask == WRITEMASK_Y && swz->mask.x == SWIZZLE_Y) ||
+ (write_mask == WRITEMASK_Z && swz->mask.x == SWIZZLE_Z) ||
+ (write_mask == WRITEMASK_W && swz->mask.x == SWIZZLE_W));
+}
+
+/**
+ * Upon entering an ir_assignment, attempt to vectorize the currently tracked
+ * assignments if the current assignment is not suitable. Keep a pointer to
+ * the current assignment.
+ */
+ir_visitor_status
+ir_vectorize_visitor::visit_enter(ir_assignment *ir)
+{
+ ir_dereference *lhs = this->last_assignment != NULL ?
+ this->last_assignment->lhs : NULL;
+ ir_rvalue *rhs = this->last_assignment != NULL ?
+ this->last_assignment->rhs : NULL;
+
+ if (ir->condition ||
+ this->channels >= 4 ||
+ !single_channel_write_mask(ir->write_mask) ||
+ this->assignment[write_mask_to_swizzle(ir->write_mask)] != NULL ||
+ (lhs && !ir->lhs->equals(lhs)) ||
+ (rhs && !ir->rhs->equals(rhs, ir_type_swizzle))) {
+ try_vectorize();
+ }
+
+ this->current_assignment = ir;
+
+ return visit_continue;
+}
+
+/**
+ * Upon entering an ir_swizzle, set ::has_swizzle if we're visiting from an
+ * ir_assignment (i.e., that ::current_assignment is set) and the swizzle mask
+ * matches the current assignment's write mask.
+ *
+ * If the write mask doesn't match the swizzle mask, remove the current
+ * assignment from further consideration.
+ */
+ir_visitor_status
+ir_vectorize_visitor::visit_enter(ir_swizzle *ir)
+{
+ if (this->current_assignment) {
+ if (write_mask_matches_swizzle(this->current_assignment->write_mask, ir)) {
+ this->has_swizzle = true;
+ } else {
+ this->current_assignment = NULL;
+ }
+ }
+ return visit_continue;
+}
+
+/* Upon entering an ir_array_dereference, remove the current assignment from
+ * further consideration. Since the index of an array dereference must scalar,
+ * we are not able to vectorize it.
+ *
+ * FINISHME: If all of scalar indices are identical we could vectorize.
+ */
+ir_visitor_status
+ir_vectorize_visitor::visit_enter(ir_dereference_array *)
+{
+ this->current_assignment = NULL;
+ return visit_continue_with_parent;
+}
+
+/**
+ * Upon entering an ir_expression, remove the current assignment from further
+ * consideration if the expression operates horizontally on vectors.
+ */
+ir_visitor_status
+ir_vectorize_visitor::visit_enter(ir_expression *ir)
+{
+ if (ir->is_horizontal()) {
+ this->current_assignment = NULL;
+ return visit_continue_with_parent;
+ }
+ return visit_continue;
+}
+
+/* Since there is no statement to visit between the "then" and "else"
+ * instructions try to vectorize before, in between, and after them to avoid
+ * combining statements from different basic blocks.
+ */
+ir_visitor_status
+ir_vectorize_visitor::visit_enter(ir_if *ir)
+{
+ try_vectorize();
+
+ visit_list_elements(this, &ir->then_instructions);
+ try_vectorize();
+
+ visit_list_elements(this, &ir->else_instructions);
+ try_vectorize();
+
+ return visit_continue_with_parent;
+}
+
+/* Since there is no statement to visit between the instructions in the body of
+ * the loop and the instructions after it try to vectorize before and after the
+ * body to avoid combining statements from different basic blocks.
+ */
+ir_visitor_status
+ir_vectorize_visitor::visit_enter(ir_loop *ir)
+{
+ try_vectorize();
+
+ visit_list_elements(this, &ir->body_instructions);
+ try_vectorize();
+
+ return visit_continue_with_parent;
+}
+
+/**
+ * Upon entering an ir_texture, remove the current assignment from
+ * further consideration. Vectorizing multiple texture lookups into one
+ * is wrong.
+ */
+ir_visitor_status
+ir_vectorize_visitor::visit_enter(ir_texture *)
+{
+ this->current_assignment = NULL;
+ return visit_continue_with_parent;
+}
+
+/**
+ * Upon leaving an ir_assignment, save a pointer to it in ::assignment[] if
+ * the swizzle mask(s) found were appropriate. Also save a pointer in
+ * ::last_assignment so that we can compare future assignments with it.
+ *
+ * Finally, clear ::current_assignment and ::has_swizzle.
+ */
+ir_visitor_status
+ir_vectorize_visitor::visit_leave(ir_assignment *ir)
+{
+ if (this->has_swizzle && this->current_assignment) {
+ assert(this->current_assignment == ir);
+
+ unsigned channel = write_mask_to_swizzle(this->current_assignment->write_mask);
+ this->assignment[channel] = ir;
+ this->channels++;
+
+ this->last_assignment = this->current_assignment;
+ }
+ this->current_assignment = NULL;
+ this->has_swizzle = false;
+ return visit_continue;
+}
+
+/**
+ * Combines scalar assignments of the same expression (modulo swizzle) to
+ * multiple channels of the same variable into a single vectorized expression
+ * and assignment.
+ */
+bool
+do_vectorize(exec_list *instructions)
+{
+ ir_vectorize_visitor v;
+
+ v.run(instructions);
+
+ /* Try to vectorize the last assignments seen. */
+ v.try_vectorize();
+
+ return v.progress;
+}
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/program.h b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/program.h
new file mode 100644
index 0000000000..0106e1dd6f
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/program.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright (C) 1999-2008 Brian Paul All Rights Reserved.
+ * Copyright (C) 2009 VMware, Inc. All Rights Reserved.
+ * Copyright © 2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef GLSL_PROGRAM_H
+#define GLSL_PROGRAM_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+struct gl_context;
+struct gl_shader;
+struct gl_shader_program;
+
+extern void
+_mesa_glsl_compile_shader(struct gl_context *ctx, struct gl_shader *shader,
+ bool dump_ast, bool dump_hir, bool force_recompile);
+
+#ifdef __cplusplus
+} /* extern "C" */
+#endif
+
+extern void
+link_shaders(struct gl_context *ctx, struct gl_shader_program *prog);
+
+extern void
+build_program_resource_list(struct gl_context *ctx,
+ struct gl_shader_program *shProg,
+ bool add_packed_varyings_only);
+
+extern long
+parse_program_resource_name(const GLchar *name,
+ const GLchar **out_base_name_end);
+
+#endif /* GLSL_PROGRAM_H */
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/propagate_invariance.cpp b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/propagate_invariance.cpp
new file mode 100644
index 0000000000..b3f1d810cd
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/propagate_invariance.cpp
@@ -0,0 +1,125 @@
+/*
+ * Copyright © 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+/**
+ * \file propagate_invariance.cpp
+ * Propagate the "invariant" and "precise" qualifiers to variables used to
+ * compute invariant or precise values.
+ *
+ * The GLSL spec (depending on what version you read) says, among the
+ * conditions for getting bit-for-bit the same values on an invariant output:
+ *
+ * "All operations in the consuming expressions and any intermediate
+ * expressions must be the same, with the same order of operands and same
+ * associativity, to give the same order of evaluation."
+ *
+ * This effectively means that if a variable is used to compute an invariant
+ * value then that variable becomes invariant. The same should apply to the
+ * "precise" qualifier.
+ */
+
+#include "ir.h"
+#include "ir_visitor.h"
+#include "ir_rvalue_visitor.h"
+#include "ir_optimization.h"
+#include "compiler/glsl_types.h"
+
+namespace {
+
+class ir_invariance_propagation_visitor : public ir_hierarchical_visitor {
+public:
+ ir_invariance_propagation_visitor()
+ {
+ this->progress = false;
+ this->dst_var = NULL;
+ }
+
+ virtual ~ir_invariance_propagation_visitor()
+ {
+ /* empty */
+ }
+
+ virtual ir_visitor_status visit_enter(ir_assignment *ir);
+ virtual ir_visitor_status visit_leave(ir_assignment *ir);
+ virtual ir_visitor_status visit(ir_dereference_variable *ir);
+
+ ir_variable *dst_var;
+ bool progress;
+};
+
+} /* unnamed namespace */
+
+ir_visitor_status
+ir_invariance_propagation_visitor::visit_enter(ir_assignment *ir)
+{
+ assert(this->dst_var == NULL);
+ ir_variable *var = ir->lhs->variable_referenced();
+ if (var->data.invariant || var->data.precise) {
+ this->dst_var = var;
+ return visit_continue;
+ } else {
+ return visit_continue_with_parent;
+ }
+}
+
+ir_visitor_status
+ir_invariance_propagation_visitor::visit_leave(ir_assignment *)
+{
+ this->dst_var = NULL;
+
+ return visit_continue;
+}
+
+ir_visitor_status
+ir_invariance_propagation_visitor::visit(ir_dereference_variable *ir)
+{
+ if (this->dst_var == NULL)
+ return visit_continue;
+
+ if (this->dst_var->data.invariant) {
+ if (!ir->var->data.invariant)
+ this->progress = true;
+
+ ir->var->data.invariant = true;
+ }
+
+ if (this->dst_var->data.precise) {
+ if (!ir->var->data.precise)
+ this->progress = true;
+
+ ir->var->data.precise = true;
+ }
+
+ return visit_continue;
+}
+
+void
+propagate_invariance(exec_list *instructions)
+{
+ ir_invariance_propagation_visitor visitor;
+
+ do {
+ visitor.progress = false;
+ visit_list_elements(&visitor, instructions);
+ } while (visitor.progress);
+}
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/s_expression.cpp b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/s_expression.cpp
new file mode 100644
index 0000000000..12baf1d3ed
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/s_expression.cpp
@@ -0,0 +1,220 @@
+/* -*- c++ -*- */
+/*
+ * Copyright © 2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include <assert.h>
+#include <stdio.h>
+#include <math.h>
+#include <string.h>
+#include <stdlib.h>
+#include "s_expression.h"
+
+s_symbol::s_symbol(const char *str, size_t n)
+{
+ /* Assume the given string is already nul-terminated and in memory that
+ * will live as long as this node.
+ */
+ assert(str[n] == '\0');
+ this->str = str;
+}
+
+s_list::s_list()
+{
+}
+
+static void
+skip_whitespace(const char *&src, char *&symbol_buffer)
+{
+ size_t n = strspn(src, " \v\t\r\n");
+ src += n;
+ symbol_buffer += n;
+ /* Also skip Scheme-style comments: semi-colon 'til end of line */
+ if (src[0] == ';') {
+ n = strcspn(src, "\n");
+ src += n;
+ symbol_buffer += n;
+ skip_whitespace(src, symbol_buffer);
+ }
+}
+
+static s_expression *
+read_atom(void *ctx, const char *&src, char *&symbol_buffer)
+{
+ s_expression *expr = NULL;
+
+ skip_whitespace(src, symbol_buffer);
+
+ size_t n = strcspn(src, "( \v\t\r\n);");
+ if (n == 0)
+ return NULL; // no atom
+
+ // Check for the special symbol '+INF', which means +Infinity. Note: C99
+ // requires strtof to parse '+INF' as +Infinity, but we still support some
+ // non-C99-compliant compilers (e.g. MSVC).
+ if (n == 4 && strncmp(src, "+INF", 4) == 0) {
+ expr = new(ctx) s_float(INFINITY);
+ } else {
+ // Check if the atom is a number.
+ char *float_end = NULL;
+ float f = _mesa_strtof(src, &float_end);
+ if (float_end != src) {
+ char *int_end = NULL;
+ int i = strtol(src, &int_end, 10);
+ // If strtof matched more characters, it must have a decimal part
+ if (float_end > int_end)
+ expr = new(ctx) s_float(f);
+ else
+ expr = new(ctx) s_int(i);
+ } else {
+ // Not a number; return a symbol.
+ symbol_buffer[n] = '\0';
+ expr = new(ctx) s_symbol(symbol_buffer, n);
+ }
+ }
+
+ src += n;
+ symbol_buffer += n;
+
+ return expr;
+}
+
+static s_expression *
+__read_expression(void *ctx, const char *&src, char *&symbol_buffer)
+{
+ s_expression *atom = read_atom(ctx, src, symbol_buffer);
+ if (atom != NULL)
+ return atom;
+
+ skip_whitespace(src, symbol_buffer);
+ if (src[0] == '(') {
+ ++src;
+ ++symbol_buffer;
+
+ s_list *list = new(ctx) s_list;
+ s_expression *expr;
+
+ while ((expr = __read_expression(ctx, src, symbol_buffer)) != NULL) {
+ list->subexpressions.push_tail(expr);
+ }
+ skip_whitespace(src, symbol_buffer);
+ if (src[0] != ')') {
+ printf("Unclosed expression (check your parenthesis).\n");
+ return NULL;
+ }
+ ++src;
+ ++symbol_buffer;
+ return list;
+ }
+ return NULL;
+}
+
+s_expression *
+s_expression::read_expression(void *ctx, const char *&src)
+{
+ assert(src != NULL);
+
+ /* When we encounter a Symbol, we need to save a nul-terminated copy of
+ * the string. However, ralloc_strndup'ing every individual Symbol is
+ * extremely expensive. We could avoid this by simply overwriting the
+ * next character (guaranteed to be whitespace, parens, or semicolon) with
+ * a nul-byte. But overwriting non-whitespace would mess up parsing.
+ *
+ * So, just copy the whole buffer ahead of time. Walk both, leaving the
+ * original source string unmodified, and altering the copy to contain the
+ * necessary nul-bytes whenever we encounter a symbol.
+ */
+ char *symbol_buffer = ralloc_strdup(ctx, src);
+ return __read_expression(ctx, src, symbol_buffer);
+}
+
+void s_int::print()
+{
+ printf("%d", this->val);
+}
+
+void s_float::print()
+{
+ printf("%f", this->val);
+}
+
+void s_symbol::print()
+{
+ printf("%s", this->str);
+}
+
+void s_list::print()
+{
+ printf("(");
+ foreach_in_list(s_expression, expr, &this->subexpressions) {
+ expr->print();
+ if (!expr->next->is_tail_sentinel())
+ printf(" ");
+ }
+ printf(")");
+}
+
+// --------------------------------------------------
+
+bool
+s_pattern::match(s_expression *expr)
+{
+ switch (type)
+ {
+ case EXPR: *p_expr = expr; break;
+ case LIST: if (expr->is_list()) *p_list = (s_list *) expr; break;
+ case SYMBOL: if (expr->is_symbol()) *p_symbol = (s_symbol *) expr; break;
+ case NUMBER: if (expr->is_number()) *p_number = (s_number *) expr; break;
+ case INT: if (expr->is_int()) *p_int = (s_int *) expr; break;
+ case STRING:
+ s_symbol *sym = SX_AS_SYMBOL(expr);
+ if (sym != NULL && strcmp(sym->value(), literal) == 0)
+ return true;
+ return false;
+ };
+
+ return *p_expr == expr;
+}
+
+bool
+s_match(s_expression *top, unsigned n, s_pattern *pattern, bool partial)
+{
+ s_list *list = SX_AS_LIST(top);
+ if (list == NULL)
+ return false;
+
+ unsigned i = 0;
+ foreach_in_list(s_expression, expr, &list->subexpressions) {
+ if (i >= n)
+ return partial; /* More actual items than the pattern expected */
+
+ if (expr == NULL || !pattern[i].match(expr))
+ return false;
+
+ i++;
+ }
+
+ if (i < n)
+ return false; /* Less actual items than the pattern expected */
+
+ return true;
+}
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/s_expression.h b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/s_expression.h
new file mode 100644
index 0000000000..38caabbf89
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/s_expression.h
@@ -0,0 +1,178 @@
+/* -*- c++ -*- */
+/*
+ * Copyright © 2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef S_EXPRESSION_H
+#define S_EXPRESSION_H
+
+#include "util/strtod.h"
+#include "list.h"
+
+/* Type-safe downcasting macros (also safe to pass NULL) */
+#define SX_AS_(t,x) ((x) && ((s_expression*) x)->is_##t()) ? ((s_##t*) (x)) \
+ : NULL
+#define SX_AS_LIST(x) SX_AS_(list, x)
+#define SX_AS_SYMBOL(x) SX_AS_(symbol, x)
+#define SX_AS_NUMBER(x) SX_AS_(number, x)
+#define SX_AS_INT(x) SX_AS_(int, x)
+
+/* Pattern matching macros */
+#define MATCH(list, pat) s_match(list, ARRAY_SIZE(pat), pat, false)
+#define PARTIAL_MATCH(list, pat) s_match(list, ARRAY_SIZE(pat), pat, true)
+
+/* For our purposes, S-Expressions are:
+ * - <int>
+ * - <float>
+ * - symbol
+ * - (expr1 expr2 ... exprN) where exprN is an S-Expression
+ *
+ * Unlike LISP/Scheme, we do not support (foo . bar) pairs.
+ */
+class s_expression : public exec_node
+{
+public:
+ /**
+ * Read an S-Expression from the given string.
+ * Advances the supplied pointer to just after the expression read.
+ *
+ * Any allocation will be performed with 'ctx' as the ralloc owner.
+ */
+ static s_expression *read_expression(void *ctx, const char *&src);
+
+ /**
+ * Print out an S-Expression. Useful for debugging.
+ */
+ virtual void print() = 0;
+
+ virtual bool is_list() const { return false; }
+ virtual bool is_symbol() const { return false; }
+ virtual bool is_number() const { return false; }
+ virtual bool is_int() const { return false; }
+
+protected:
+ s_expression() { }
+};
+
+/* Atoms */
+
+class s_number : public s_expression
+{
+public:
+ bool is_number() const { return true; }
+
+ virtual float fvalue() = 0;
+
+protected:
+ s_number() { }
+};
+
+class s_int : public s_number
+{
+public:
+ s_int(int x) : val(x) { }
+
+ bool is_int() const { return true; }
+
+ float fvalue() { return float(this->val); }
+ int value() { return this->val; }
+
+ void print();
+
+private:
+ int val;
+};
+
+class s_float : public s_number
+{
+public:
+ s_float(float x) : val(x) { }
+
+ float fvalue() { return this->val; }
+
+ void print();
+
+private:
+ float val;
+};
+
+class s_symbol : public s_expression
+{
+public:
+ s_symbol(const char *, size_t);
+
+ bool is_symbol() const { return true; }
+
+ const char *value() { return this->str; }
+
+ void print();
+
+private:
+ const char *str;
+};
+
+/* Lists of expressions: (expr1 ... exprN) */
+class s_list : public s_expression
+{
+public:
+ s_list();
+
+ virtual bool is_list() const { return true; }
+
+ void print();
+
+ exec_list subexpressions;
+};
+
+// ------------------------------------------------------------
+
+/**
+ * Part of a pattern to match - essentially a record holding a pointer to the
+ * storage for the component to match, along with the appropriate type.
+ */
+class s_pattern {
+public:
+ s_pattern(s_expression *&s) : p_expr(&s), type(EXPR) { }
+ s_pattern(s_list *&s) : p_list(&s), type(LIST) { }
+ s_pattern(s_symbol *&s) : p_symbol(&s), type(SYMBOL) { }
+ s_pattern(s_number *&s) : p_number(&s), type(NUMBER) { }
+ s_pattern(s_int *&s) : p_int(&s), type(INT) { }
+ s_pattern(const char *str) : literal(str), type(STRING) { }
+
+ bool match(s_expression *expr);
+
+private:
+ union {
+ s_expression **p_expr;
+ s_list **p_list;
+ s_symbol **p_symbol;
+ s_number **p_number;
+ s_int **p_int;
+ const char *literal;
+ };
+ enum { EXPR, LIST, SYMBOL, NUMBER, INT, STRING } type;
+};
+
+bool
+s_match(s_expression *top, unsigned n, s_pattern *pattern, bool partial);
+
+#endif /* S_EXPRESSION_H */
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/serialize.cpp b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/serialize.cpp
new file mode 100644
index 0000000000..d4aacc1cb9
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/serialize.cpp
@@ -0,0 +1,1340 @@
+/*
+ * Copyright © 2014 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+/**
+ * \file serialize.cpp
+ *
+ * GLSL serialization
+ *
+ * Supports serializing and deserializing glsl programs using a blob.
+ */
+
+#include "compiler/glsl_types.h"
+#include "compiler/shader_info.h"
+#include "ir_uniform.h"
+#include "main/mtypes.h"
+#include "main/shaderobj.h"
+#include "program/program.h"
+#include "string_to_uint_map.h"
+#include "util/bitscan.h"
+
+
+static void
+write_subroutines(struct blob *metadata, struct gl_shader_program *prog)
+{
+ for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
+ struct gl_linked_shader *sh = prog->_LinkedShaders[i];
+ if (!sh)
+ continue;
+
+ struct gl_program *glprog = sh->Program;
+
+ blob_write_uint32(metadata, glprog->sh.NumSubroutineUniforms);
+ blob_write_uint32(metadata, glprog->sh.MaxSubroutineFunctionIndex);
+ blob_write_uint32(metadata, glprog->sh.NumSubroutineFunctions);
+ for (unsigned j = 0; j < glprog->sh.NumSubroutineFunctions; j++) {
+ int num_types = glprog->sh.SubroutineFunctions[j].num_compat_types;
+
+ blob_write_string(metadata, glprog->sh.SubroutineFunctions[j].name);
+ blob_write_uint32(metadata, glprog->sh.SubroutineFunctions[j].index);
+ blob_write_uint32(metadata, num_types);
+
+ for (int k = 0; k < num_types; k++) {
+ encode_type_to_blob(metadata,
+ glprog->sh.SubroutineFunctions[j].types[k]);
+ }
+ }
+ }
+}
+
+static void
+read_subroutines(struct blob_reader *metadata, struct gl_shader_program *prog)
+{
+ struct gl_subroutine_function *subs;
+
+ for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
+ struct gl_linked_shader *sh = prog->_LinkedShaders[i];
+ if (!sh)
+ continue;
+
+ struct gl_program *glprog = sh->Program;
+
+ glprog->sh.NumSubroutineUniforms = blob_read_uint32(metadata);
+ glprog->sh.MaxSubroutineFunctionIndex = blob_read_uint32(metadata);
+ glprog->sh.NumSubroutineFunctions = blob_read_uint32(metadata);
+
+ subs = rzalloc_array(prog, struct gl_subroutine_function,
+ glprog->sh.NumSubroutineFunctions);
+ glprog->sh.SubroutineFunctions = subs;
+
+ for (unsigned j = 0; j < glprog->sh.NumSubroutineFunctions; j++) {
+ subs[j].name = ralloc_strdup(prog, blob_read_string (metadata));
+ subs[j].index = (int) blob_read_uint32(metadata);
+ subs[j].num_compat_types = (int) blob_read_uint32(metadata);
+
+ subs[j].types = rzalloc_array(prog, const struct glsl_type *,
+ subs[j].num_compat_types);
+ for (int k = 0; k < subs[j].num_compat_types; k++) {
+ subs[j].types[k] = decode_type_from_blob(metadata);
+ }
+ }
+ }
+}
+
+static void
+write_buffer_block(struct blob *metadata, struct gl_uniform_block *b)
+{
+ blob_write_string(metadata, b->Name);
+ blob_write_uint32(metadata, b->NumUniforms);
+ blob_write_uint32(metadata, b->Binding);
+ blob_write_uint32(metadata, b->UniformBufferSize);
+ blob_write_uint32(metadata, b->stageref);
+
+ for (unsigned j = 0; j < b->NumUniforms; j++) {
+ blob_write_string(metadata, b->Uniforms[j].Name);
+ blob_write_string(metadata, b->Uniforms[j].IndexName);
+ encode_type_to_blob(metadata, b->Uniforms[j].Type);
+ blob_write_uint32(metadata, b->Uniforms[j].Offset);
+ }
+}
+
+static void
+write_buffer_blocks(struct blob *metadata, struct gl_shader_program *prog)
+{
+ blob_write_uint32(metadata, prog->data->NumUniformBlocks);
+ blob_write_uint32(metadata, prog->data->NumShaderStorageBlocks);
+
+ for (unsigned i = 0; i < prog->data->NumUniformBlocks; i++) {
+ write_buffer_block(metadata, &prog->data->UniformBlocks[i]);
+ }
+
+ for (unsigned i = 0; i < prog->data->NumShaderStorageBlocks; i++) {
+ write_buffer_block(metadata, &prog->data->ShaderStorageBlocks[i]);
+ }
+
+ for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
+ struct gl_linked_shader *sh = prog->_LinkedShaders[i];
+ if (!sh)
+ continue;
+
+ struct gl_program *glprog = sh->Program;
+
+ blob_write_uint32(metadata, glprog->info.num_ubos);
+ blob_write_uint32(metadata, glprog->info.num_ssbos);
+
+ for (unsigned j = 0; j < glprog->info.num_ubos; j++) {
+ uint32_t offset =
+ glprog->sh.UniformBlocks[j] - prog->data->UniformBlocks;
+ blob_write_uint32(metadata, offset);
+ }
+
+ for (unsigned j = 0; j < glprog->info.num_ssbos; j++) {
+ uint32_t offset = glprog->sh.ShaderStorageBlocks[j] -
+ prog->data->ShaderStorageBlocks;
+ blob_write_uint32(metadata, offset);
+ }
+ }
+}
+
+static void
+read_buffer_block(struct blob_reader *metadata, struct gl_uniform_block *b,
+ struct gl_shader_program *prog)
+{
+ b->Name = ralloc_strdup(prog->data, blob_read_string (metadata));
+ b->NumUniforms = blob_read_uint32(metadata);
+ b->Binding = blob_read_uint32(metadata);
+ b->UniformBufferSize = blob_read_uint32(metadata);
+ b->stageref = blob_read_uint32(metadata);
+
+ b->Uniforms =
+ rzalloc_array(prog->data, struct gl_uniform_buffer_variable,
+ b->NumUniforms);
+ for (unsigned j = 0; j < b->NumUniforms; j++) {
+ b->Uniforms[j].Name = ralloc_strdup(prog->data,
+ blob_read_string (metadata));
+
+ char *index_name = blob_read_string(metadata);
+ if (strcmp(b->Uniforms[j].Name, index_name) == 0) {
+ b->Uniforms[j].IndexName = b->Uniforms[j].Name;
+ } else {
+ b->Uniforms[j].IndexName = ralloc_strdup(prog->data, index_name);
+ }
+
+ b->Uniforms[j].Type = decode_type_from_blob(metadata);
+ b->Uniforms[j].Offset = blob_read_uint32(metadata);
+ }
+}
+
+static void
+read_buffer_blocks(struct blob_reader *metadata,
+ struct gl_shader_program *prog)
+{
+ prog->data->NumUniformBlocks = blob_read_uint32(metadata);
+ prog->data->NumShaderStorageBlocks = blob_read_uint32(metadata);
+
+ prog->data->UniformBlocks =
+ rzalloc_array(prog->data, struct gl_uniform_block,
+ prog->data->NumUniformBlocks);
+
+ prog->data->ShaderStorageBlocks =
+ rzalloc_array(prog->data, struct gl_uniform_block,
+ prog->data->NumShaderStorageBlocks);
+
+ for (unsigned i = 0; i < prog->data->NumUniformBlocks; i++) {
+ read_buffer_block(metadata, &prog->data->UniformBlocks[i], prog);
+ }
+
+ for (unsigned i = 0; i < prog->data->NumShaderStorageBlocks; i++) {
+ read_buffer_block(metadata, &prog->data->ShaderStorageBlocks[i], prog);
+ }
+
+ for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
+ struct gl_linked_shader *sh = prog->_LinkedShaders[i];
+ if (!sh)
+ continue;
+
+ struct gl_program *glprog = sh->Program;
+
+ glprog->info.num_ubos = blob_read_uint32(metadata);
+ glprog->info.num_ssbos = blob_read_uint32(metadata);
+
+ glprog->sh.UniformBlocks =
+ rzalloc_array(glprog, gl_uniform_block *, glprog->info.num_ubos);
+ glprog->sh.ShaderStorageBlocks =
+ rzalloc_array(glprog, gl_uniform_block *, glprog->info.num_ssbos);
+
+ for (unsigned j = 0; j < glprog->info.num_ubos; j++) {
+ uint32_t offset = blob_read_uint32(metadata);
+ glprog->sh.UniformBlocks[j] = prog->data->UniformBlocks + offset;
+ }
+
+ for (unsigned j = 0; j < glprog->info.num_ssbos; j++) {
+ uint32_t offset = blob_read_uint32(metadata);
+ glprog->sh.ShaderStorageBlocks[j] =
+ prog->data->ShaderStorageBlocks + offset;
+ }
+ }
+}
+
+static void
+write_atomic_buffers(struct blob *metadata, struct gl_shader_program *prog)
+{
+ blob_write_uint32(metadata, prog->data->NumAtomicBuffers);
+
+ for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
+ if (prog->_LinkedShaders[i]) {
+ struct gl_program *glprog = prog->_LinkedShaders[i]->Program;
+ blob_write_uint32(metadata, glprog->info.num_abos);
+ }
+ }
+
+ for (unsigned i = 0; i < prog->data->NumAtomicBuffers; i++) {
+ blob_write_uint32(metadata, prog->data->AtomicBuffers[i].Binding);
+ blob_write_uint32(metadata, prog->data->AtomicBuffers[i].MinimumSize);
+ blob_write_uint32(metadata, prog->data->AtomicBuffers[i].NumUniforms);
+
+ blob_write_bytes(metadata, prog->data->AtomicBuffers[i].StageReferences,
+ sizeof(prog->data->AtomicBuffers[i].StageReferences));
+
+ for (unsigned j = 0; j < prog->data->AtomicBuffers[i].NumUniforms; j++) {
+ blob_write_uint32(metadata, prog->data->AtomicBuffers[i].Uniforms[j]);
+ }
+ }
+}
+
+static void
+read_atomic_buffers(struct blob_reader *metadata,
+ struct gl_shader_program *prog)
+{
+ prog->data->NumAtomicBuffers = blob_read_uint32(metadata);
+ prog->data->AtomicBuffers =
+ rzalloc_array(prog, gl_active_atomic_buffer,
+ prog->data->NumAtomicBuffers);
+
+ struct gl_active_atomic_buffer **stage_buff_list[MESA_SHADER_STAGES];
+ for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
+ if (prog->_LinkedShaders[i]) {
+ struct gl_program *glprog = prog->_LinkedShaders[i]->Program;
+
+ glprog->info.num_abos = blob_read_uint32(metadata);
+ glprog->sh.AtomicBuffers =
+ rzalloc_array(glprog, gl_active_atomic_buffer *,
+ glprog->info.num_abos);
+ stage_buff_list[i] = glprog->sh.AtomicBuffers;
+ }
+ }
+
+ for (unsigned i = 0; i < prog->data->NumAtomicBuffers; i++) {
+ prog->data->AtomicBuffers[i].Binding = blob_read_uint32(metadata);
+ prog->data->AtomicBuffers[i].MinimumSize = blob_read_uint32(metadata);
+ prog->data->AtomicBuffers[i].NumUniforms = blob_read_uint32(metadata);
+
+ blob_copy_bytes(metadata,
+ (uint8_t *) &prog->data->AtomicBuffers[i].StageReferences,
+ sizeof(prog->data->AtomicBuffers[i].StageReferences));
+
+ prog->data->AtomicBuffers[i].Uniforms = rzalloc_array(prog, unsigned,
+ prog->data->AtomicBuffers[i].NumUniforms);
+
+ for (unsigned j = 0; j < prog->data->AtomicBuffers[i].NumUniforms; j++) {
+ prog->data->AtomicBuffers[i].Uniforms[j] = blob_read_uint32(metadata);
+ }
+
+ for (unsigned j = 0; j < MESA_SHADER_STAGES; j++) {
+ if (prog->data->AtomicBuffers[i].StageReferences[j]) {
+ *stage_buff_list[j] = &prog->data->AtomicBuffers[i];
+ stage_buff_list[j]++;
+ }
+ }
+ }
+}
+
+static void
+write_xfb(struct blob *metadata, struct gl_shader_program *shProg)
+{
+ struct gl_program *prog = shProg->last_vert_prog;
+
+ if (!prog) {
+ blob_write_uint32(metadata, ~0u);
+ return;
+ }
+
+ struct gl_transform_feedback_info *ltf = prog->sh.LinkedTransformFeedback;
+
+ blob_write_uint32(metadata, prog->info.stage);
+
+ /* Data set by glTransformFeedbackVaryings. */
+ blob_write_uint32(metadata, shProg->TransformFeedback.BufferMode);
+ blob_write_bytes(metadata, shProg->TransformFeedback.BufferStride,
+ sizeof(shProg->TransformFeedback.BufferStride));
+ blob_write_uint32(metadata, shProg->TransformFeedback.NumVarying);
+ for (unsigned i = 0; i < shProg->TransformFeedback.NumVarying; i++)
+ blob_write_string(metadata, shProg->TransformFeedback.VaryingNames[i]);
+
+ blob_write_uint32(metadata, ltf->NumOutputs);
+ blob_write_uint32(metadata, ltf->ActiveBuffers);
+ blob_write_uint32(metadata, ltf->NumVarying);
+
+ blob_write_bytes(metadata, ltf->Outputs,
+ sizeof(struct gl_transform_feedback_output) *
+ ltf->NumOutputs);
+
+ for (int i = 0; i < ltf->NumVarying; i++) {
+ blob_write_string(metadata, ltf->Varyings[i].Name);
+ blob_write_uint32(metadata, ltf->Varyings[i].Type);
+ blob_write_uint32(metadata, ltf->Varyings[i].BufferIndex);
+ blob_write_uint32(metadata, ltf->Varyings[i].Size);
+ blob_write_uint32(metadata, ltf->Varyings[i].Offset);
+ }
+
+ blob_write_bytes(metadata, ltf->Buffers,
+ sizeof(struct gl_transform_feedback_buffer) *
+ MAX_FEEDBACK_BUFFERS);
+}
+
+static void
+read_xfb(struct blob_reader *metadata, struct gl_shader_program *shProg)
+{
+ unsigned xfb_stage = blob_read_uint32(metadata);
+
+ if (xfb_stage == ~0u)
+ return;
+
+ if (shProg->TransformFeedback.VaryingNames) {
+ for (unsigned i = 0; i < shProg->TransformFeedback.NumVarying; ++i)
+ free(shProg->TransformFeedback.VaryingNames[i]);
+ }
+
+ /* Data set by glTransformFeedbackVaryings. */
+ shProg->TransformFeedback.BufferMode = blob_read_uint32(metadata);
+ blob_copy_bytes(metadata, &shProg->TransformFeedback.BufferStride,
+ sizeof(shProg->TransformFeedback.BufferStride));
+ shProg->TransformFeedback.NumVarying = blob_read_uint32(metadata);
+
+ shProg->TransformFeedback.VaryingNames = (char **)
+ realloc(shProg->TransformFeedback.VaryingNames,
+ shProg->TransformFeedback.NumVarying * sizeof(GLchar *));
+ /* Note, malloc used with VaryingNames. */
+ for (unsigned i = 0; i < shProg->TransformFeedback.NumVarying; i++)
+ shProg->TransformFeedback.VaryingNames[i] =
+ strdup(blob_read_string(metadata));
+
+ struct gl_program *prog = shProg->_LinkedShaders[xfb_stage]->Program;
+ struct gl_transform_feedback_info *ltf =
+ rzalloc(prog, struct gl_transform_feedback_info);
+
+ prog->sh.LinkedTransformFeedback = ltf;
+ shProg->last_vert_prog = prog;
+
+ ltf->NumOutputs = blob_read_uint32(metadata);
+ ltf->ActiveBuffers = blob_read_uint32(metadata);
+ ltf->NumVarying = blob_read_uint32(metadata);
+
+ ltf->Outputs = rzalloc_array(prog, struct gl_transform_feedback_output,
+ ltf->NumOutputs);
+
+ blob_copy_bytes(metadata, (uint8_t *) ltf->Outputs,
+ sizeof(struct gl_transform_feedback_output) *
+ ltf->NumOutputs);
+
+ ltf->Varyings = rzalloc_array(prog,
+ struct gl_transform_feedback_varying_info,
+ ltf->NumVarying);
+
+ for (int i = 0; i < ltf->NumVarying; i++) {
+ ltf->Varyings[i].Name = ralloc_strdup(prog, blob_read_string(metadata));
+ ltf->Varyings[i].Type = blob_read_uint32(metadata);
+ ltf->Varyings[i].BufferIndex = blob_read_uint32(metadata);
+ ltf->Varyings[i].Size = blob_read_uint32(metadata);
+ ltf->Varyings[i].Offset = blob_read_uint32(metadata);
+ }
+
+ blob_copy_bytes(metadata, (uint8_t *) ltf->Buffers,
+ sizeof(struct gl_transform_feedback_buffer) *
+ MAX_FEEDBACK_BUFFERS);
+}
+
+static bool
+has_uniform_storage(struct gl_shader_program *prog, unsigned idx)
+{
+ if (!prog->data->UniformStorage[idx].builtin &&
+ !prog->data->UniformStorage[idx].is_shader_storage &&
+ prog->data->UniformStorage[idx].block_index == -1)
+ return true;
+
+ return false;
+}
+
+static void
+write_uniforms(struct blob *metadata, struct gl_shader_program *prog)
+{
+ blob_write_uint32(metadata, prog->SamplersValidated);
+ blob_write_uint32(metadata, prog->data->NumUniformStorage);
+ blob_write_uint32(metadata, prog->data->NumUniformDataSlots);
+
+ for (unsigned i = 0; i < prog->data->NumUniformStorage; i++) {
+ encode_type_to_blob(metadata, prog->data->UniformStorage[i].type);
+ blob_write_uint32(metadata, prog->data->UniformStorage[i].array_elements);
+ if (prog->data->UniformStorage[i].name) {
+ blob_write_string(metadata, prog->data->UniformStorage[i].name);
+ } else {
+ blob_write_string(metadata, "");
+ }
+ blob_write_uint32(metadata, prog->data->UniformStorage[i].builtin);
+ blob_write_uint32(metadata, prog->data->UniformStorage[i].remap_location);
+ blob_write_uint32(metadata, prog->data->UniformStorage[i].block_index);
+ blob_write_uint32(metadata, prog->data->UniformStorage[i].atomic_buffer_index);
+ blob_write_uint32(metadata, prog->data->UniformStorage[i].offset);
+ blob_write_uint32(metadata, prog->data->UniformStorage[i].array_stride);
+ blob_write_uint32(metadata, prog->data->UniformStorage[i].hidden);
+ blob_write_uint32(metadata, prog->data->UniformStorage[i].is_shader_storage);
+ blob_write_uint32(metadata, prog->data->UniformStorage[i].active_shader_mask);
+ blob_write_uint32(metadata, prog->data->UniformStorage[i].matrix_stride);
+ blob_write_uint32(metadata, prog->data->UniformStorage[i].row_major);
+ blob_write_uint32(metadata, prog->data->UniformStorage[i].is_bindless);
+ blob_write_uint32(metadata,
+ prog->data->UniformStorage[i].num_compatible_subroutines);
+ blob_write_uint32(metadata,
+ prog->data->UniformStorage[i].top_level_array_size);
+ blob_write_uint32(metadata,
+ prog->data->UniformStorage[i].top_level_array_stride);
+
+ if (has_uniform_storage(prog, i)) {
+ blob_write_uint32(metadata, prog->data->UniformStorage[i].storage -
+ prog->data->UniformDataSlots);
+ }
+
+ blob_write_bytes(metadata, prog->data->UniformStorage[i].opaque,
+ sizeof(prog->data->UniformStorage[i].opaque));
+ }
+
+ /* Here we cache all uniform values. We do this to retain values for
+ * uniforms with initialisers and also hidden uniforms that may be lowered
+ * constant arrays. We could possibly just store the values we need but for
+ * now we just store everything.
+ */
+ blob_write_uint32(metadata, prog->data->NumHiddenUniforms);
+ for (unsigned i = 0; i < prog->data->NumUniformStorage; i++) {
+ if (has_uniform_storage(prog, i)) {
+ unsigned vec_size =
+ prog->data->UniformStorage[i].type->component_slots() *
+ MAX2(prog->data->UniformStorage[i].array_elements, 1);
+ unsigned slot =
+ prog->data->UniformStorage[i].storage -
+ prog->data->UniformDataSlots;
+ blob_write_bytes(metadata, &prog->data->UniformDataDefaults[slot],
+ sizeof(union gl_constant_value) * vec_size);
+ }
+ }
+}
+
+static void
+read_uniforms(struct blob_reader *metadata, struct gl_shader_program *prog)
+{
+ struct gl_uniform_storage *uniforms;
+ union gl_constant_value *data;
+
+ prog->SamplersValidated = blob_read_uint32(metadata);
+ prog->data->NumUniformStorage = blob_read_uint32(metadata);
+ prog->data->NumUniformDataSlots = blob_read_uint32(metadata);
+
+ uniforms = rzalloc_array(prog->data, struct gl_uniform_storage,
+ prog->data->NumUniformStorage);
+ prog->data->UniformStorage = uniforms;
+
+ data = rzalloc_array(uniforms, union gl_constant_value,
+ prog->data->NumUniformDataSlots);
+ prog->data->UniformDataSlots = data;
+ prog->data->UniformDataDefaults =
+ rzalloc_array(uniforms, union gl_constant_value,
+ prog->data->NumUniformDataSlots);
+
+ prog->UniformHash = new string_to_uint_map;
+
+ for (unsigned i = 0; i < prog->data->NumUniformStorage; i++) {
+ uniforms[i].type = decode_type_from_blob(metadata);
+ uniforms[i].array_elements = blob_read_uint32(metadata);
+ uniforms[i].name = ralloc_strdup(prog, blob_read_string (metadata));
+ uniforms[i].builtin = blob_read_uint32(metadata);
+ uniforms[i].remap_location = blob_read_uint32(metadata);
+ uniforms[i].block_index = blob_read_uint32(metadata);
+ uniforms[i].atomic_buffer_index = blob_read_uint32(metadata);
+ uniforms[i].offset = blob_read_uint32(metadata);
+ uniforms[i].array_stride = blob_read_uint32(metadata);
+ uniforms[i].hidden = blob_read_uint32(metadata);
+ uniforms[i].is_shader_storage = blob_read_uint32(metadata);
+ uniforms[i].active_shader_mask = blob_read_uint32(metadata);
+ uniforms[i].matrix_stride = blob_read_uint32(metadata);
+ uniforms[i].row_major = blob_read_uint32(metadata);
+ uniforms[i].is_bindless = blob_read_uint32(metadata);
+ uniforms[i].num_compatible_subroutines = blob_read_uint32(metadata);
+ uniforms[i].top_level_array_size = blob_read_uint32(metadata);
+ uniforms[i].top_level_array_stride = blob_read_uint32(metadata);
+ prog->UniformHash->put(i, uniforms[i].name);
+
+ if (has_uniform_storage(prog, i)) {
+ uniforms[i].storage = data + blob_read_uint32(metadata);
+ }
+
+ memcpy(uniforms[i].opaque,
+ blob_read_bytes(metadata, sizeof(uniforms[i].opaque)),
+ sizeof(uniforms[i].opaque));
+ }
+
+ /* Restore uniform values. */
+ prog->data->NumHiddenUniforms = blob_read_uint32(metadata);
+ for (unsigned i = 0; i < prog->data->NumUniformStorage; i++) {
+ if (has_uniform_storage(prog, i)) {
+ unsigned vec_size =
+ prog->data->UniformStorage[i].type->component_slots() *
+ MAX2(prog->data->UniformStorage[i].array_elements, 1);
+ unsigned slot =
+ prog->data->UniformStorage[i].storage -
+ prog->data->UniformDataSlots;
+ blob_copy_bytes(metadata,
+ (uint8_t *) &prog->data->UniformDataSlots[slot],
+ sizeof(union gl_constant_value) * vec_size);
+
+ assert(vec_size + prog->data->UniformStorage[i].storage <=
+ data + prog->data->NumUniformDataSlots);
+ }
+ }
+
+ memcpy(prog->data->UniformDataDefaults, prog->data->UniformDataSlots,
+ sizeof(union gl_constant_value) * prog->data->NumUniformDataSlots);
+}
+
+enum uniform_remap_type
+{
+ remap_type_inactive_explicit_location,
+ remap_type_null_ptr,
+ remap_type_uniform_offset,
+ remap_type_uniform_offsets_equal,
+};
+
+static void
+write_uniform_remap_table(struct blob *metadata,
+ unsigned num_entries,
+ gl_uniform_storage *uniform_storage,
+ gl_uniform_storage **remap_table)
+{
+ blob_write_uint32(metadata, num_entries);
+
+ for (unsigned i = 0; i < num_entries; i++) {
+ gl_uniform_storage *entry = remap_table[i];
+ uint32_t offset = entry - uniform_storage;
+
+ if (entry == INACTIVE_UNIFORM_EXPLICIT_LOCATION) {
+ blob_write_uint32(metadata, remap_type_inactive_explicit_location);
+ } else if (entry == NULL) {
+ blob_write_uint32(metadata, remap_type_null_ptr);
+ } else if (i+1 < num_entries && entry == remap_table[i+1]) {
+ blob_write_uint32(metadata, remap_type_uniform_offsets_equal);
+
+ /* If many offsets are equal, write only one offset and the number
+ * of consecutive entries being equal.
+ */
+ unsigned count = 1;
+ for (unsigned j = i + 1; j < num_entries; j++) {
+ if (entry != remap_table[j])
+ break;
+
+ count++;
+ }
+
+ blob_write_uint32(metadata, offset);
+ blob_write_uint32(metadata, count);
+ i += count - 1;
+ } else {
+ blob_write_uint32(metadata, remap_type_uniform_offset);
+
+ blob_write_uint32(metadata, offset);
+ }
+ }
+}
+
+static void
+write_uniform_remap_tables(struct blob *metadata,
+ struct gl_shader_program *prog)
+{
+ write_uniform_remap_table(metadata, prog->NumUniformRemapTable,
+ prog->data->UniformStorage,
+ prog->UniformRemapTable);
+
+ for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
+ struct gl_linked_shader *sh = prog->_LinkedShaders[i];
+ if (sh) {
+ write_uniform_remap_table(metadata,
+ sh->Program->sh.NumSubroutineUniformRemapTable,
+ prog->data->UniformStorage,
+ sh->Program->sh.SubroutineUniformRemapTable);
+ }
+ }
+}
+
+static struct gl_uniform_storage **
+read_uniform_remap_table(struct blob_reader *metadata,
+ struct gl_shader_program *prog,
+ unsigned *num_entries,
+ gl_uniform_storage *uniform_storage)
+{
+ unsigned num = blob_read_uint32(metadata);
+ *num_entries = num;
+
+ struct gl_uniform_storage **remap_table =
+ rzalloc_array(prog, struct gl_uniform_storage *, num);
+
+ for (unsigned i = 0; i < num; i++) {
+ enum uniform_remap_type type =
+ (enum uniform_remap_type) blob_read_uint32(metadata);
+
+ if (type == remap_type_inactive_explicit_location) {
+ remap_table[i] = INACTIVE_UNIFORM_EXPLICIT_LOCATION;
+ } else if (type == remap_type_null_ptr) {
+ remap_table[i] = NULL;
+ } else if (type == remap_type_uniform_offsets_equal) {
+ uint32_t uni_offset = blob_read_uint32(metadata);
+ uint32_t count = blob_read_uint32(metadata);
+ struct gl_uniform_storage *entry = uniform_storage + uni_offset;
+
+ for (unsigned j = 0; j < count; j++)
+ remap_table[i+j] = entry;
+ i += count - 1;
+ } else {
+ uint32_t uni_offset = blob_read_uint32(metadata);
+ remap_table[i] = uniform_storage + uni_offset;
+ }
+ }
+ return remap_table;
+}
+
+static void
+read_uniform_remap_tables(struct blob_reader *metadata,
+ struct gl_shader_program *prog)
+{
+ prog->UniformRemapTable =
+ read_uniform_remap_table(metadata, prog, &prog->NumUniformRemapTable,
+ prog->data->UniformStorage);
+
+ for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
+ struct gl_linked_shader *sh = prog->_LinkedShaders[i];
+ if (sh) {
+ struct gl_program *glprog = sh->Program;
+
+ glprog->sh.SubroutineUniformRemapTable =
+ read_uniform_remap_table(metadata, prog,
+ &glprog->sh.NumSubroutineUniformRemapTable,
+ prog->data->UniformStorage);
+ }
+ }
+}
+
+struct whte_closure
+{
+ struct blob *blob;
+ size_t num_entries;
+};
+
+static void
+write_hash_table_entry(const char *key, unsigned value, void *closure)
+{
+ struct whte_closure *whte = (struct whte_closure *) closure;
+
+ blob_write_string(whte->blob, key);
+ blob_write_uint32(whte->blob, value);
+
+ whte->num_entries++;
+}
+
+static void
+write_hash_table(struct blob *metadata, struct string_to_uint_map *hash)
+{
+ size_t offset;
+ struct whte_closure whte;
+
+ whte.blob = metadata;
+ whte.num_entries = 0;
+
+ offset = metadata->size;
+
+ /* Write a placeholder for the hashtable size. */
+ blob_write_uint32 (metadata, 0);
+
+ hash->iterate(write_hash_table_entry, &whte);
+
+ /* Overwrite with the computed number of entries written. */
+ blob_overwrite_uint32 (metadata, offset, whte.num_entries);
+}
+
+static void
+read_hash_table(struct blob_reader *metadata, struct string_to_uint_map *hash)
+{
+ size_t i, num_entries;
+ const char *key;
+ uint32_t value;
+
+ num_entries = blob_read_uint32 (metadata);
+
+ for (i = 0; i < num_entries; i++) {
+ key = blob_read_string(metadata);
+ value = blob_read_uint32(metadata);
+
+ hash->put(value, key);
+ }
+}
+
+static void
+write_hash_tables(struct blob *metadata, struct gl_shader_program *prog)
+{
+ write_hash_table(metadata, prog->AttributeBindings);
+ write_hash_table(metadata, prog->FragDataBindings);
+ write_hash_table(metadata, prog->FragDataIndexBindings);
+}
+
+static void
+read_hash_tables(struct blob_reader *metadata, struct gl_shader_program *prog)
+{
+ read_hash_table(metadata, prog->AttributeBindings);
+ read_hash_table(metadata, prog->FragDataBindings);
+ read_hash_table(metadata, prog->FragDataIndexBindings);
+}
+
+static void
+write_shader_subroutine_index(struct blob *metadata,
+ struct gl_linked_shader *sh,
+ struct gl_program_resource *res)
+{
+ assert(sh);
+
+ for (unsigned j = 0; j < sh->Program->sh.NumSubroutineFunctions; j++) {
+ if (strcmp(((gl_subroutine_function *)res->Data)->name,
+ sh->Program->sh.SubroutineFunctions[j].name) == 0) {
+ blob_write_uint32(metadata, j);
+ break;
+ }
+ }
+}
+
+static void
+get_shader_var_and_pointer_sizes(size_t *s_var_size, size_t *s_var_ptrs,
+ const gl_shader_variable *var)
+{
+ *s_var_size = sizeof(gl_shader_variable);
+ *s_var_ptrs =
+ sizeof(var->type) +
+ sizeof(var->interface_type) +
+ sizeof(var->outermost_struct_type) +
+ sizeof(var->name);
+}
+
+enum uniform_type
+{
+ uniform_remapped,
+ uniform_not_remapped
+};
+
+static void
+write_program_resource_data(struct blob *metadata,
+ struct gl_shader_program *prog,
+ struct gl_program_resource *res)
+{
+ struct gl_linked_shader *sh;
+
+ switch(res->Type) {
+ case GL_PROGRAM_INPUT:
+ case GL_PROGRAM_OUTPUT: {
+ const gl_shader_variable *var = (gl_shader_variable *)res->Data;
+
+ encode_type_to_blob(metadata, var->type);
+ encode_type_to_blob(metadata, var->interface_type);
+ encode_type_to_blob(metadata, var->outermost_struct_type);
+
+ if (var->name) {
+ blob_write_string(metadata, var->name);
+ } else {
+ blob_write_string(metadata, "");
+ }
+
+ size_t s_var_size, s_var_ptrs;
+ get_shader_var_and_pointer_sizes(&s_var_size, &s_var_ptrs, var);
+
+ /* Write gl_shader_variable skipping over the pointers */
+ blob_write_bytes(metadata, ((char *)var) + s_var_ptrs,
+ s_var_size - s_var_ptrs);
+ break;
+ }
+ case GL_UNIFORM_BLOCK:
+ for (unsigned i = 0; i < prog->data->NumUniformBlocks; i++) {
+ if (strcmp(((gl_uniform_block *)res->Data)->Name,
+ prog->data->UniformBlocks[i].Name) == 0) {
+ blob_write_uint32(metadata, i);
+ break;
+ }
+ }
+ break;
+ case GL_SHADER_STORAGE_BLOCK:
+ for (unsigned i = 0; i < prog->data->NumShaderStorageBlocks; i++) {
+ if (strcmp(((gl_uniform_block *)res->Data)->Name,
+ prog->data->ShaderStorageBlocks[i].Name) == 0) {
+ blob_write_uint32(metadata, i);
+ break;
+ }
+ }
+ break;
+ case GL_BUFFER_VARIABLE:
+ case GL_VERTEX_SUBROUTINE_UNIFORM:
+ case GL_GEOMETRY_SUBROUTINE_UNIFORM:
+ case GL_FRAGMENT_SUBROUTINE_UNIFORM:
+ case GL_COMPUTE_SUBROUTINE_UNIFORM:
+ case GL_TESS_CONTROL_SUBROUTINE_UNIFORM:
+ case GL_TESS_EVALUATION_SUBROUTINE_UNIFORM:
+ case GL_UNIFORM:
+ if (((gl_uniform_storage *)res->Data)->builtin ||
+ res->Type != GL_UNIFORM) {
+ blob_write_uint32(metadata, uniform_not_remapped);
+ for (unsigned i = 0; i < prog->data->NumUniformStorage; i++) {
+ if (strcmp(((gl_uniform_storage *)res->Data)->name,
+ prog->data->UniformStorage[i].name) == 0) {
+ blob_write_uint32(metadata, i);
+ break;
+ }
+ }
+ } else {
+ blob_write_uint32(metadata, uniform_remapped);
+ blob_write_uint32(metadata, ((gl_uniform_storage *)res->Data)->remap_location);
+ }
+ break;
+ case GL_ATOMIC_COUNTER_BUFFER:
+ for (unsigned i = 0; i < prog->data->NumAtomicBuffers; i++) {
+ if (((gl_active_atomic_buffer *)res->Data)->Binding ==
+ prog->data->AtomicBuffers[i].Binding) {
+ blob_write_uint32(metadata, i);
+ break;
+ }
+ }
+ break;
+ case GL_TRANSFORM_FEEDBACK_BUFFER:
+ for (unsigned i = 0; i < MAX_FEEDBACK_BUFFERS; i++) {
+ if (((gl_transform_feedback_buffer *)res->Data)->Binding ==
+ prog->last_vert_prog->sh.LinkedTransformFeedback->Buffers[i].Binding) {
+ blob_write_uint32(metadata, i);
+ break;
+ }
+ }
+ break;
+ case GL_TRANSFORM_FEEDBACK_VARYING:
+ for (int i = 0; i < prog->last_vert_prog->sh.LinkedTransformFeedback->NumVarying; i++) {
+ if (strcmp(((gl_transform_feedback_varying_info *)res->Data)->Name,
+ prog->last_vert_prog->sh.LinkedTransformFeedback->Varyings[i].Name) == 0) {
+ blob_write_uint32(metadata, i);
+ break;
+ }
+ }
+ break;
+ case GL_VERTEX_SUBROUTINE:
+ case GL_TESS_CONTROL_SUBROUTINE:
+ case GL_TESS_EVALUATION_SUBROUTINE:
+ case GL_GEOMETRY_SUBROUTINE:
+ case GL_FRAGMENT_SUBROUTINE:
+ case GL_COMPUTE_SUBROUTINE:
+ sh =
+ prog->_LinkedShaders[_mesa_shader_stage_from_subroutine(res->Type)];
+ write_shader_subroutine_index(metadata, sh, res);
+ break;
+ default:
+ assert(!"Support for writing resource not yet implemented.");
+ }
+}
+
+static void
+read_program_resource_data(struct blob_reader *metadata,
+ struct gl_shader_program *prog,
+ struct gl_program_resource *res)
+{
+ struct gl_linked_shader *sh;
+
+ switch(res->Type) {
+ case GL_PROGRAM_INPUT:
+ case GL_PROGRAM_OUTPUT: {
+ gl_shader_variable *var = ralloc(prog, struct gl_shader_variable);
+
+ var->type = decode_type_from_blob(metadata);
+ var->interface_type = decode_type_from_blob(metadata);
+ var->outermost_struct_type = decode_type_from_blob(metadata);
+
+ var->name = ralloc_strdup(prog, blob_read_string(metadata));
+
+ size_t s_var_size, s_var_ptrs;
+ get_shader_var_and_pointer_sizes(&s_var_size, &s_var_ptrs, var);
+
+ blob_copy_bytes(metadata, ((uint8_t *) var) + s_var_ptrs,
+ s_var_size - s_var_ptrs);
+
+ res->Data = var;
+ break;
+ }
+ case GL_UNIFORM_BLOCK:
+ res->Data = &prog->data->UniformBlocks[blob_read_uint32(metadata)];
+ break;
+ case GL_SHADER_STORAGE_BLOCK:
+ res->Data = &prog->data->ShaderStorageBlocks[blob_read_uint32(metadata)];
+ break;
+ case GL_BUFFER_VARIABLE:
+ case GL_VERTEX_SUBROUTINE_UNIFORM:
+ case GL_GEOMETRY_SUBROUTINE_UNIFORM:
+ case GL_FRAGMENT_SUBROUTINE_UNIFORM:
+ case GL_COMPUTE_SUBROUTINE_UNIFORM:
+ case GL_TESS_CONTROL_SUBROUTINE_UNIFORM:
+ case GL_TESS_EVALUATION_SUBROUTINE_UNIFORM:
+ case GL_UNIFORM: {
+ enum uniform_type type = (enum uniform_type) blob_read_uint32(metadata);
+ if (type == uniform_not_remapped) {
+ res->Data = &prog->data->UniformStorage[blob_read_uint32(metadata)];
+ } else {
+ res->Data = prog->UniformRemapTable[blob_read_uint32(metadata)];
+ }
+ break;
+ }
+ case GL_ATOMIC_COUNTER_BUFFER:
+ res->Data = &prog->data->AtomicBuffers[blob_read_uint32(metadata)];
+ break;
+ case GL_TRANSFORM_FEEDBACK_BUFFER:
+ res->Data = &prog->last_vert_prog->
+ sh.LinkedTransformFeedback->Buffers[blob_read_uint32(metadata)];
+ break;
+ case GL_TRANSFORM_FEEDBACK_VARYING:
+ res->Data = &prog->last_vert_prog->
+ sh.LinkedTransformFeedback->Varyings[blob_read_uint32(metadata)];
+ break;
+ case GL_VERTEX_SUBROUTINE:
+ case GL_TESS_CONTROL_SUBROUTINE:
+ case GL_TESS_EVALUATION_SUBROUTINE:
+ case GL_GEOMETRY_SUBROUTINE:
+ case GL_FRAGMENT_SUBROUTINE:
+ case GL_COMPUTE_SUBROUTINE:
+ sh =
+ prog->_LinkedShaders[_mesa_shader_stage_from_subroutine(res->Type)];
+ res->Data =
+ &sh->Program->sh.SubroutineFunctions[blob_read_uint32(metadata)];
+ break;
+ default:
+ assert(!"Support for reading resource not yet implemented.");
+ }
+}
+
+static void
+write_program_resource_list(struct blob *metadata,
+ struct gl_shader_program *prog)
+{
+ blob_write_uint32(metadata, prog->data->NumProgramResourceList);
+
+ for (unsigned i = 0; i < prog->data->NumProgramResourceList; i++) {
+ blob_write_uint32(metadata, prog->data->ProgramResourceList[i].Type);
+ write_program_resource_data(metadata, prog,
+ &prog->data->ProgramResourceList[i]);
+ blob_write_bytes(metadata,
+ &prog->data->ProgramResourceList[i].StageReferences,
+ sizeof(prog->data->ProgramResourceList[i].StageReferences));
+ }
+}
+
+static void
+read_program_resource_list(struct blob_reader *metadata,
+ struct gl_shader_program *prog)
+{
+ prog->data->NumProgramResourceList = blob_read_uint32(metadata);
+
+ prog->data->ProgramResourceList =
+ ralloc_array(prog->data, gl_program_resource,
+ prog->data->NumProgramResourceList);
+
+ for (unsigned i = 0; i < prog->data->NumProgramResourceList; i++) {
+ prog->data->ProgramResourceList[i].Type = blob_read_uint32(metadata);
+ read_program_resource_data(metadata, prog,
+ &prog->data->ProgramResourceList[i]);
+ blob_copy_bytes(metadata,
+ (uint8_t *) &prog->data->ProgramResourceList[i].StageReferences,
+ sizeof(prog->data->ProgramResourceList[i].StageReferences));
+ }
+}
+
+static void
+write_shader_parameters(struct blob *metadata,
+ struct gl_program_parameter_list *params)
+{
+ blob_write_uint32(metadata, params->NumParameters);
+ uint32_t i = 0;
+
+ while (i < params->NumParameters) {
+ struct gl_program_parameter *param = &params->Parameters[i];
+ blob_write_uint32(metadata, param->Type);
+ blob_write_string(metadata, param->Name);
+ blob_write_uint32(metadata, param->Size);
+ blob_write_uint32(metadata, param->Padded);
+ blob_write_uint32(metadata, param->DataType);
+ blob_write_bytes(metadata, param->StateIndexes,
+ sizeof(param->StateIndexes));
+ blob_write_uint32(metadata, param->UniformStorageIndex);
+ blob_write_uint32(metadata, param->MainUniformStorageIndex);
+
+ i++;
+ }
+
+ blob_write_bytes(metadata, params->ParameterValues,
+ sizeof(gl_constant_value) * params->NumParameterValues);
+
+ blob_write_uint32(metadata, params->StateFlags);
+}
+
+static void
+read_shader_parameters(struct blob_reader *metadata,
+ struct gl_program_parameter_list *params)
+{
+ gl_state_index16 state_indexes[STATE_LENGTH];
+ uint32_t i = 0;
+ uint32_t num_parameters = blob_read_uint32(metadata);
+
+ _mesa_reserve_parameter_storage(params, num_parameters);
+ while (i < num_parameters) {
+ gl_register_file type = (gl_register_file) blob_read_uint32(metadata);
+ const char *name = blob_read_string(metadata);
+ unsigned size = blob_read_uint32(metadata);
+ bool padded = blob_read_uint32(metadata);
+ unsigned data_type = blob_read_uint32(metadata);
+ blob_copy_bytes(metadata, (uint8_t *) state_indexes,
+ sizeof(state_indexes));
+
+ _mesa_add_parameter(params, type, name, size, data_type,
+ NULL, state_indexes, padded);
+
+ gl_program_parameter *param = &params->Parameters[i];
+ param->UniformStorageIndex = blob_read_uint32(metadata);
+ param->MainUniformStorageIndex = blob_read_uint32(metadata);
+
+ i++;
+ }
+
+ blob_copy_bytes(metadata, (uint8_t *) params->ParameterValues,
+ sizeof(gl_constant_value) * params->NumParameterValues);
+
+ params->StateFlags = blob_read_uint32(metadata);
+}
+
+static void
+write_shader_metadata(struct blob *metadata, gl_linked_shader *shader)
+{
+ assert(shader->Program);
+ struct gl_program *glprog = shader->Program;
+ unsigned i;
+
+ blob_write_uint64(metadata, glprog->DualSlotInputs);
+ blob_write_bytes(metadata, glprog->TexturesUsed,
+ sizeof(glprog->TexturesUsed));
+ blob_write_uint64(metadata, glprog->SamplersUsed);
+
+ blob_write_bytes(metadata, glprog->SamplerUnits,
+ sizeof(glprog->SamplerUnits));
+ blob_write_bytes(metadata, glprog->sh.SamplerTargets,
+ sizeof(glprog->sh.SamplerTargets));
+ blob_write_uint32(metadata, glprog->ShadowSamplers);
+ blob_write_uint32(metadata, glprog->ExternalSamplersUsed);
+ blob_write_uint32(metadata, glprog->sh.ShaderStorageBlocksWriteAccess);
+
+ blob_write_bytes(metadata, glprog->sh.ImageAccess,
+ sizeof(glprog->sh.ImageAccess));
+ blob_write_bytes(metadata, glprog->sh.ImageUnits,
+ sizeof(glprog->sh.ImageUnits));
+
+ size_t ptr_size = sizeof(GLvoid *);
+
+ blob_write_uint32(metadata, glprog->sh.NumBindlessSamplers);
+ blob_write_uint32(metadata, glprog->sh.HasBoundBindlessSampler);
+ for (i = 0; i < glprog->sh.NumBindlessSamplers; i++) {
+ blob_write_bytes(metadata, &glprog->sh.BindlessSamplers[i],
+ sizeof(struct gl_bindless_sampler) - ptr_size);
+ }
+
+ blob_write_uint32(metadata, glprog->sh.NumBindlessImages);
+ blob_write_uint32(metadata, glprog->sh.HasBoundBindlessImage);
+ for (i = 0; i < glprog->sh.NumBindlessImages; i++) {
+ blob_write_bytes(metadata, &glprog->sh.BindlessImages[i],
+ sizeof(struct gl_bindless_image) - ptr_size);
+ }
+
+ blob_write_bytes(metadata, &glprog->sh.fs.BlendSupport,
+ sizeof(glprog->sh.fs.BlendSupport));
+
+ write_shader_parameters(metadata, glprog->Parameters);
+
+ assert((glprog->driver_cache_blob == NULL) ==
+ (glprog->driver_cache_blob_size == 0));
+ blob_write_uint32(metadata, (uint32_t)glprog->driver_cache_blob_size);
+ if (glprog->driver_cache_blob_size > 0) {
+ blob_write_bytes(metadata, glprog->driver_cache_blob,
+ glprog->driver_cache_blob_size);
+ }
+}
+
+static void
+read_shader_metadata(struct blob_reader *metadata,
+ struct gl_program *glprog,
+ gl_linked_shader *linked)
+{
+ unsigned i;
+
+ glprog->DualSlotInputs = blob_read_uint64(metadata);
+ blob_copy_bytes(metadata, (uint8_t *) glprog->TexturesUsed,
+ sizeof(glprog->TexturesUsed));
+ glprog->SamplersUsed = blob_read_uint64(metadata);
+
+ blob_copy_bytes(metadata, (uint8_t *) glprog->SamplerUnits,
+ sizeof(glprog->SamplerUnits));
+ blob_copy_bytes(metadata, (uint8_t *) glprog->sh.SamplerTargets,
+ sizeof(glprog->sh.SamplerTargets));
+ glprog->ShadowSamplers = blob_read_uint32(metadata);
+ glprog->ExternalSamplersUsed = blob_read_uint32(metadata);
+ glprog->sh.ShaderStorageBlocksWriteAccess = blob_read_uint32(metadata);
+
+ blob_copy_bytes(metadata, (uint8_t *) glprog->sh.ImageAccess,
+ sizeof(glprog->sh.ImageAccess));
+ blob_copy_bytes(metadata, (uint8_t *) glprog->sh.ImageUnits,
+ sizeof(glprog->sh.ImageUnits));
+
+ size_t ptr_size = sizeof(GLvoid *);
+
+ glprog->sh.NumBindlessSamplers = blob_read_uint32(metadata);
+ glprog->sh.HasBoundBindlessSampler = blob_read_uint32(metadata);
+ if (glprog->sh.NumBindlessSamplers > 0) {
+ glprog->sh.BindlessSamplers =
+ rzalloc_array(glprog, gl_bindless_sampler,
+ glprog->sh.NumBindlessSamplers);
+
+ for (i = 0; i < glprog->sh.NumBindlessSamplers; i++) {
+ blob_copy_bytes(metadata, (uint8_t *) &glprog->sh.BindlessSamplers[i],
+ sizeof(struct gl_bindless_sampler) - ptr_size);
+ }
+ }
+
+ glprog->sh.NumBindlessImages = blob_read_uint32(metadata);
+ glprog->sh.HasBoundBindlessImage = blob_read_uint32(metadata);
+ if (glprog->sh.NumBindlessImages > 0) {
+ glprog->sh.BindlessImages =
+ rzalloc_array(glprog, gl_bindless_image,
+ glprog->sh.NumBindlessImages);
+
+ for (i = 0; i < glprog->sh.NumBindlessImages; i++) {
+ blob_copy_bytes(metadata, (uint8_t *) &glprog->sh.BindlessImages[i],
+ sizeof(struct gl_bindless_image) - ptr_size);
+ }
+ }
+
+ blob_copy_bytes(metadata, (uint8_t *) &glprog->sh.fs.BlendSupport,
+ sizeof(glprog->sh.fs.BlendSupport));
+
+ glprog->Parameters = _mesa_new_parameter_list();
+ read_shader_parameters(metadata, glprog->Parameters);
+
+ glprog->driver_cache_blob_size = (size_t)blob_read_uint32(metadata);
+ if (glprog->driver_cache_blob_size > 0) {
+ glprog->driver_cache_blob =
+ (uint8_t*)ralloc_size(glprog, glprog->driver_cache_blob_size);
+ blob_copy_bytes(metadata, glprog->driver_cache_blob,
+ glprog->driver_cache_blob_size);
+ }
+}
+
+static void
+get_shader_info_and_pointer_sizes(size_t *s_info_size, size_t *s_info_ptrs,
+ shader_info *info)
+{
+ *s_info_size = sizeof(shader_info);
+ *s_info_ptrs = sizeof(info->name) + sizeof(info->label);
+}
+
+static void
+create_linked_shader_and_program(struct gl_context *ctx,
+ gl_shader_stage stage,
+ struct gl_shader_program *prog,
+ struct blob_reader *metadata)
+{
+ struct gl_program *glprog;
+
+ struct gl_linked_shader *linked = rzalloc(NULL, struct gl_linked_shader);
+ linked->Stage = stage;
+
+ glprog = ctx->Driver.NewProgram(ctx, stage, prog->Name, false);
+ glprog->info.stage = stage;
+ linked->Program = glprog;
+
+ read_shader_metadata(metadata, glprog, linked);
+
+ glprog->info.name = ralloc_strdup(glprog, blob_read_string(metadata));
+ glprog->info.label = ralloc_strdup(glprog, blob_read_string(metadata));
+
+ size_t s_info_size, s_info_ptrs;
+ get_shader_info_and_pointer_sizes(&s_info_size, &s_info_ptrs,
+ &glprog->info);
+
+ /* Restore shader info */
+ blob_copy_bytes(metadata, ((uint8_t *) &glprog->info) + s_info_ptrs,
+ s_info_size - s_info_ptrs);
+
+ _mesa_reference_shader_program_data(ctx, &glprog->sh.data, prog->data);
+ _mesa_reference_program(ctx, &linked->Program, glprog);
+ prog->_LinkedShaders[stage] = linked;
+}
+
+extern "C" void
+serialize_glsl_program(struct blob *blob, struct gl_context *ctx,
+ struct gl_shader_program *prog)
+{
+ blob_write_bytes(blob, prog->data->sha1, sizeof(prog->data->sha1));
+
+ write_uniforms(blob, prog);
+
+ write_hash_tables(blob, prog);
+
+ blob_write_uint32(blob, prog->data->Version);
+ blob_write_uint32(blob, prog->IsES);
+ blob_write_uint32(blob, prog->data->linked_stages);
+
+ for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
+ struct gl_linked_shader *sh = prog->_LinkedShaders[i];
+ if (sh) {
+ write_shader_metadata(blob, sh);
+
+ if (sh->Program->info.name)
+ blob_write_string(blob, sh->Program->info.name);
+ else
+ blob_write_string(blob, "");
+
+ if (sh->Program->info.label)
+ blob_write_string(blob, sh->Program->info.label);
+ else
+ blob_write_string(blob, "");
+
+ size_t s_info_size, s_info_ptrs;
+ get_shader_info_and_pointer_sizes(&s_info_size, &s_info_ptrs,
+ &sh->Program->info);
+
+ /* Store shader info */
+ blob_write_bytes(blob,
+ ((char *) &sh->Program->info) + s_info_ptrs,
+ s_info_size - s_info_ptrs);
+ }
+ }
+
+ write_xfb(blob, prog);
+
+ write_uniform_remap_tables(blob, prog);
+
+ write_atomic_buffers(blob, prog);
+
+ write_buffer_blocks(blob, prog);
+
+ write_subroutines(blob, prog);
+
+ write_program_resource_list(blob, prog);
+}
+
+extern "C" bool
+deserialize_glsl_program(struct blob_reader *blob, struct gl_context *ctx,
+ struct gl_shader_program *prog)
+{
+ /* Fixed function programs generated by Mesa can't be serialized. */
+ if (prog->Name == 0)
+ return false;
+
+ assert(prog->data->UniformStorage == NULL);
+
+ blob_copy_bytes(blob, prog->data->sha1, sizeof(prog->data->sha1));
+
+ read_uniforms(blob, prog);
+
+ read_hash_tables(blob, prog);
+
+ prog->data->Version = blob_read_uint32(blob);
+ prog->IsES = blob_read_uint32(blob);
+ prog->data->linked_stages = blob_read_uint32(blob);
+
+ unsigned mask = prog->data->linked_stages;
+ while (mask) {
+ const int j = u_bit_scan(&mask);
+ create_linked_shader_and_program(ctx, (gl_shader_stage) j, prog,
+ blob);
+ }
+
+ read_xfb(blob, prog);
+
+ read_uniform_remap_tables(blob, prog);
+
+ read_atomic_buffers(blob, prog);
+
+ read_buffer_blocks(blob, prog);
+
+ read_subroutines(blob, prog);
+
+ read_program_resource_list(blob, prog);
+
+ return !blob->overrun;
+}
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/serialize.h b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/serialize.h
new file mode 100644
index 0000000000..789e307e99
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/serialize.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 2017 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef GLSL_SERIALIZE
+#define GLSL_SERIALIZE
+
+#include <stdbool.h>
+
+struct blob;
+struct blob_reader;
+struct gl_context;
+struct gl_shader_program;
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+void
+serialize_glsl_program(struct blob *blob, struct gl_context *ctx,
+ struct gl_shader_program *prog);
+
+bool
+deserialize_glsl_program(struct blob_reader *blob, struct gl_context *ctx,
+ struct gl_shader_program *prog);
+
+#ifdef __cplusplus
+} /* extern "C" */
+#endif
+
+#endif /* GLSL_SERIALIZE */
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/shader_cache.cpp b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/shader_cache.cpp
new file mode 100644
index 0000000000..230dbaaa0a
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/shader_cache.cpp
@@ -0,0 +1,263 @@
+/*
+ * Copyright © 2014 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+/**
+ * \file shader_cache.cpp
+ *
+ * GLSL shader cache implementation
+ *
+ * This uses disk_cache.c to write out a serialization of various
+ * state that's required in order to successfully load and use a
+ * binary written out by a drivers backend, this state is referred to as
+ * "metadata" throughout the implementation.
+ *
+ * The hash key for glsl metadata is a hash of the hashes of each GLSL
+ * source string as well as some API settings that change the final program
+ * such as SSO, attribute bindings, frag data bindings, etc.
+ *
+ * In order to avoid caching any actual IR we use the put_key/get_key support
+ * in the disk_cache to put the SHA-1 hash for each successfully compiled
+ * shader into the cache, and optimisticly return early from glCompileShader
+ * (if the identical shader had been successfully compiled in the past),
+ * in the hope that the final linked shader will be found in the cache.
+ * If anything goes wrong (shader variant not found, backend cache item is
+ * corrupt, etc) we will use a fallback path to compile and link the IR.
+ */
+
+#include "compiler/shader_info.h"
+#include "glsl_symbol_table.h"
+#include "glsl_parser_extras.h"
+#include "ir.h"
+#include "ir_optimization.h"
+#include "ir_rvalue_visitor.h"
+#include "ir_uniform.h"
+#include "linker.h"
+#include "link_varyings.h"
+#include "program.h"
+#include "serialize.h"
+#include "shader_cache.h"
+#include "util/mesa-sha1.h"
+#include "string_to_uint_map.h"
+#include "main/mtypes.h"
+
+extern "C" {
+#include "main/enums.h"
+#include "main/shaderobj.h"
+#include "program/program.h"
+}
+
+static void
+compile_shaders(struct gl_context *ctx, struct gl_shader_program *prog) {
+ for (unsigned i = 0; i < prog->NumShaders; i++) {
+ _mesa_glsl_compile_shader(ctx, prog->Shaders[i], false, false, true);
+ }
+}
+
+static void
+create_binding_str(const char *key, unsigned value, void *closure)
+{
+ char **bindings_str = (char **) closure;
+ ralloc_asprintf_append(bindings_str, "%s:%u,", key, value);
+}
+
+void
+shader_cache_write_program_metadata(struct gl_context *ctx,
+ struct gl_shader_program *prog)
+{
+ struct disk_cache *cache = ctx->Cache;
+ if (!cache)
+ return;
+
+ /* Exit early when we are dealing with a ff shader with no source file to
+ * generate a source from, or with a SPIR-V shader.
+ *
+ * TODO: In future we should use another method to generate a key for ff
+ * programs, and SPIR-V shaders.
+ */
+ static const char zero[sizeof(prog->data->sha1)] = {0};
+ if (memcmp(prog->data->sha1, zero, sizeof(prog->data->sha1)) == 0)
+ return;
+
+ struct blob metadata;
+ blob_init(&metadata);
+
+ if (ctx->Driver.ShaderCacheSerializeDriverBlob) {
+ for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
+ struct gl_linked_shader *sh = prog->_LinkedShaders[i];
+ if (sh)
+ ctx->Driver.ShaderCacheSerializeDriverBlob(ctx, sh->Program);
+ }
+ }
+
+ serialize_glsl_program(&metadata, ctx, prog);
+
+ struct cache_item_metadata cache_item_metadata;
+ cache_item_metadata.type = CACHE_ITEM_TYPE_GLSL;
+ cache_item_metadata.keys =
+ (cache_key *) malloc(prog->NumShaders * sizeof(cache_key));
+ cache_item_metadata.num_keys = prog->NumShaders;
+
+ if (!cache_item_metadata.keys)
+ goto fail;
+
+ for (unsigned i = 0; i < prog->NumShaders; i++) {
+ memcpy(cache_item_metadata.keys[i], prog->Shaders[i]->sha1,
+ sizeof(cache_key));
+ }
+
+ disk_cache_put(cache, prog->data->sha1, metadata.data, metadata.size,
+ &cache_item_metadata);
+
+ char sha1_buf[41];
+ if (ctx->_Shader->Flags & GLSL_CACHE_INFO) {
+ _mesa_sha1_format(sha1_buf, prog->data->sha1);
+ fprintf(stderr, "putting program metadata in cache: %s\n", sha1_buf);
+ }
+
+fail:
+ free(cache_item_metadata.keys);
+ blob_finish(&metadata);
+}
+
+bool
+shader_cache_read_program_metadata(struct gl_context *ctx,
+ struct gl_shader_program *prog)
+{
+ /* Fixed function programs generated by Mesa, or SPIR-V shaders, are not
+ * cached. So don't try to read metadata for them from the cache.
+ */
+ if (prog->Name == 0 || prog->data->spirv)
+ return false;
+
+ struct disk_cache *cache = ctx->Cache;
+ if (!cache)
+ return false;
+
+ /* Include bindings when creating sha1. These bindings change the resulting
+ * binary so they are just as important as the shader source.
+ */
+ char *buf = ralloc_strdup(NULL, "vb: ");
+ prog->AttributeBindings->iterate(create_binding_str, &buf);
+ ralloc_strcat(&buf, "fb: ");
+ prog->FragDataBindings->iterate(create_binding_str, &buf);
+ ralloc_strcat(&buf, "fbi: ");
+ prog->FragDataIndexBindings->iterate(create_binding_str, &buf);
+ ralloc_asprintf_append(&buf, "tf: %d ", prog->TransformFeedback.BufferMode);
+ for (unsigned int i = 0; i < prog->TransformFeedback.NumVarying; i++) {
+ ralloc_asprintf_append(&buf, "%s ",
+ prog->TransformFeedback.VaryingNames[i]);
+ }
+
+ /* SSO has an effect on the linked program so include this when generating
+ * the sha also.
+ */
+ ralloc_asprintf_append(&buf, "sso: %s\n",
+ prog->SeparateShader ? "T" : "F");
+
+ /* A shader might end up producing different output depending on the glsl
+ * version supported by the compiler. For example a different path might be
+ * taken by the preprocessor, so add the version to the hash input.
+ */
+ ralloc_asprintf_append(&buf, "api: %d glsl: %d fglsl: %d\n",
+ ctx->API, ctx->Const.GLSLVersion,
+ ctx->Const.ForceGLSLVersion);
+
+ /* We run the preprocessor on shaders after hashing them, so we need to
+ * add any extension override vars to the hash. If we don't do this the
+ * preprocessor could result in different output and we could load the
+ * wrong shader.
+ */
+ char *ext_override = getenv("MESA_EXTENSION_OVERRIDE");
+ if (ext_override) {
+ ralloc_asprintf_append(&buf, "ext:%s", ext_override);
+ }
+
+ /* DRI config options may also change the output from the compiler so
+ * include them as an input to sha1 creation.
+ */
+ char sha1buf[41];
+ _mesa_sha1_format(sha1buf, ctx->Const.dri_config_options_sha1);
+ ralloc_strcat(&buf, sha1buf);
+
+ for (unsigned i = 0; i < prog->NumShaders; i++) {
+ struct gl_shader *sh = prog->Shaders[i];
+ _mesa_sha1_format(sha1buf, sh->sha1);
+ ralloc_asprintf_append(&buf, "%s: %s\n",
+ _mesa_shader_stage_to_abbrev(sh->Stage), sha1buf);
+ }
+ disk_cache_compute_key(cache, buf, strlen(buf), prog->data->sha1);
+ ralloc_free(buf);
+
+ size_t size;
+ uint8_t *buffer = (uint8_t *) disk_cache_get(cache, prog->data->sha1,
+ &size);
+ if (buffer == NULL) {
+ /* Cached program not found. We may have seen the individual shaders
+ * before and skipped compiling but they may not have been used together
+ * in this combination before. Fall back to linking shaders but first
+ * re-compile the shaders.
+ *
+ * We could probably only compile the shaders which were skipped here
+ * but we need to be careful because the source may also have been
+ * changed since the last compile so for now we just recompile
+ * everything.
+ */
+ compile_shaders(ctx, prog);
+ return false;
+ }
+
+ if (ctx->_Shader->Flags & GLSL_CACHE_INFO) {
+ _mesa_sha1_format(sha1buf, prog->data->sha1);
+ fprintf(stderr, "loading shader program meta data from cache: %s\n",
+ sha1buf);
+ }
+
+ struct blob_reader metadata;
+ blob_reader_init(&metadata, buffer, size);
+
+ bool deserialized = deserialize_glsl_program(&metadata, ctx, prog);
+
+ if (!deserialized || metadata.current != metadata.end || metadata.overrun) {
+ /* Something has gone wrong discard the item from the cache and rebuild
+ * from source.
+ */
+ assert(!"Invalid GLSL shader disk cache item!");
+
+ if (ctx->_Shader->Flags & GLSL_CACHE_INFO) {
+ fprintf(stderr, "Error reading program from cache (invalid GLSL "
+ "cache item)\n");
+ }
+
+ disk_cache_remove(cache, prog->data->sha1);
+ compile_shaders(ctx, prog);
+ free(buffer);
+ return false;
+ }
+
+ /* This is used to flag a shader retrieved from cache */
+ prog->data->LinkStatus = LINKING_SKIPPED;
+
+ free (buffer);
+
+ return true;
+}
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/shader_cache.h b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/shader_cache.h
new file mode 100644
index 0000000000..b3603d956d
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/shader_cache.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright © 2014 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef SHADER_CACHE_H
+#define SHADER_CACHE_H
+
+#include "util/disk_cache.h"
+
+struct gl_context;
+struct gl_shader_program;
+
+void
+shader_cache_write_program_metadata(struct gl_context *ctx,
+ struct gl_shader_program *prog);
+
+bool
+shader_cache_read_program_metadata(struct gl_context *ctx,
+ struct gl_shader_program *prog);
+
+#endif /* SHADER_CACHE_H */
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/standalone.cpp b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/standalone.cpp
new file mode 100644
index 0000000000..9a7f7d58ed
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/standalone.cpp
@@ -0,0 +1,620 @@
+/*
+ * Copyright © 2008, 2009 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+#include <getopt.h>
+
+/** @file standalone.cpp
+ *
+ * Standalone compiler helper lib. Used by standalone glsl_compiler and
+ * also available to drivers to implement their own standalone compiler
+ * with driver backend.
+ */
+
+#include "ast.h"
+#include "glsl_parser_extras.h"
+#include "ir_optimization.h"
+#include "program.h"
+#include "loop_analysis.h"
+#include "standalone_scaffolding.h"
+#include "standalone.h"
+#include "string_to_uint_map.h"
+#include "util/set.h"
+#include "linker.h"
+#include "glsl_parser_extras.h"
+#include "ir_builder_print_visitor.h"
+#include "builtin_functions.h"
+#include "opt_add_neg_to_sub.h"
+#include "main/mtypes.h"
+#include "program/program.h"
+
+class dead_variable_visitor : public ir_hierarchical_visitor {
+public:
+ dead_variable_visitor()
+ {
+ variables = _mesa_pointer_set_create(NULL);
+ }
+
+ virtual ~dead_variable_visitor()
+ {
+ _mesa_set_destroy(variables, NULL);
+ }
+
+ virtual ir_visitor_status visit(ir_variable *ir)
+ {
+ /* If the variable is auto or temp, add it to the set of variables that
+ * are candidates for removal.
+ */
+ if (ir->data.mode != ir_var_auto && ir->data.mode != ir_var_temporary)
+ return visit_continue;
+
+ _mesa_set_add(variables, ir);
+
+ return visit_continue;
+ }
+
+ virtual ir_visitor_status visit(ir_dereference_variable *ir)
+ {
+ struct set_entry *entry = _mesa_set_search(variables, ir->var);
+
+ /* If a variable is dereferenced at all, remove it from the set of
+ * variables that are candidates for removal.
+ */
+ if (entry != NULL)
+ _mesa_set_remove(variables, entry);
+
+ return visit_continue;
+ }
+
+ void remove_dead_variables()
+ {
+ set_foreach(variables, entry) {
+ ir_variable *ir = (ir_variable *) entry->key;
+
+ assert(ir->ir_type == ir_type_variable);
+ ir->remove();
+ }
+ }
+
+private:
+ set *variables;
+};
+
+static void
+init_gl_program(struct gl_program *prog, bool is_arb_asm, gl_shader_stage stage)
+{
+ prog->RefCount = 1;
+ prog->Format = GL_PROGRAM_FORMAT_ASCII_ARB;
+ prog->is_arb_asm = is_arb_asm;
+ prog->info.stage = stage;
+}
+
+static struct gl_program *
+new_program(UNUSED struct gl_context *ctx, gl_shader_stage stage,
+ UNUSED GLuint id, bool is_arb_asm)
+{
+ struct gl_program *prog = rzalloc(NULL, struct gl_program);
+ init_gl_program(prog, is_arb_asm, stage);
+ return prog;
+}
+
+static const struct standalone_options *options;
+
+static void
+initialize_context(struct gl_context *ctx, gl_api api)
+{
+ initialize_context_to_defaults(ctx, api);
+ _mesa_glsl_builtin_functions_init_or_ref();
+
+ /* The standalone compiler needs to claim support for almost
+ * everything in order to compile the built-in functions.
+ */
+ ctx->Const.GLSLVersion = options->glsl_version;
+ ctx->Extensions.ARB_ES3_compatibility = true;
+ ctx->Extensions.ARB_ES3_1_compatibility = true;
+ ctx->Extensions.ARB_ES3_2_compatibility = true;
+ ctx->Const.MaxComputeWorkGroupCount[0] = 65535;
+ ctx->Const.MaxComputeWorkGroupCount[1] = 65535;
+ ctx->Const.MaxComputeWorkGroupCount[2] = 65535;
+ ctx->Const.MaxComputeWorkGroupSize[0] = 1024;
+ ctx->Const.MaxComputeWorkGroupSize[1] = 1024;
+ ctx->Const.MaxComputeWorkGroupSize[2] = 64;
+ ctx->Const.MaxComputeWorkGroupInvocations = 1024;
+ ctx->Const.MaxComputeSharedMemorySize = 32768;
+ ctx->Const.MaxComputeVariableGroupSize[0] = 512;
+ ctx->Const.MaxComputeVariableGroupSize[1] = 512;
+ ctx->Const.MaxComputeVariableGroupSize[2] = 64;
+ ctx->Const.MaxComputeVariableGroupInvocations = 512;
+ ctx->Const.Program[MESA_SHADER_COMPUTE].MaxTextureImageUnits = 16;
+ ctx->Const.Program[MESA_SHADER_COMPUTE].MaxUniformComponents = 1024;
+ ctx->Const.Program[MESA_SHADER_COMPUTE].MaxCombinedUniformComponents = 1024;
+ ctx->Const.Program[MESA_SHADER_COMPUTE].MaxInputComponents = 0; /* not used */
+ ctx->Const.Program[MESA_SHADER_COMPUTE].MaxOutputComponents = 0; /* not used */
+ ctx->Const.Program[MESA_SHADER_COMPUTE].MaxAtomicBuffers = 8;
+ ctx->Const.Program[MESA_SHADER_COMPUTE].MaxAtomicCounters = 8;
+ ctx->Const.Program[MESA_SHADER_COMPUTE].MaxImageUniforms = 8;
+ ctx->Const.Program[MESA_SHADER_COMPUTE].MaxUniformBlocks = 12;
+
+ switch (ctx->Const.GLSLVersion) {
+ case 100:
+ ctx->Const.MaxClipPlanes = 0;
+ ctx->Const.MaxCombinedTextureImageUnits = 8;
+ ctx->Const.MaxDrawBuffers = 2;
+ ctx->Const.MinProgramTexelOffset = 0;
+ ctx->Const.MaxProgramTexelOffset = 0;
+ ctx->Const.MaxLights = 0;
+ ctx->Const.MaxTextureCoordUnits = 0;
+ ctx->Const.MaxTextureUnits = 8;
+
+ ctx->Const.Program[MESA_SHADER_VERTEX].MaxAttribs = 8;
+ ctx->Const.Program[MESA_SHADER_VERTEX].MaxTextureImageUnits = 0;
+ ctx->Const.Program[MESA_SHADER_VERTEX].MaxUniformComponents = 128 * 4;
+ ctx->Const.Program[MESA_SHADER_VERTEX].MaxCombinedUniformComponents = 128 * 4;
+ ctx->Const.Program[MESA_SHADER_VERTEX].MaxInputComponents = 0; /* not used */
+ ctx->Const.Program[MESA_SHADER_VERTEX].MaxOutputComponents = 32;
+
+ ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxTextureImageUnits =
+ ctx->Const.MaxCombinedTextureImageUnits;
+ ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxUniformComponents = 16 * 4;
+ ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxCombinedUniformComponents = 16 * 4;
+ ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxInputComponents =
+ ctx->Const.Program[MESA_SHADER_VERTEX].MaxOutputComponents;
+ ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxOutputComponents = 0; /* not used */
+
+ ctx->Const.MaxVarying = ctx->Const.Program[MESA_SHADER_VERTEX].MaxOutputComponents / 4;
+ break;
+ case 110:
+ case 120:
+ ctx->Const.MaxClipPlanes = 6;
+ ctx->Const.MaxCombinedTextureImageUnits = 2;
+ ctx->Const.MaxDrawBuffers = 1;
+ ctx->Const.MinProgramTexelOffset = 0;
+ ctx->Const.MaxProgramTexelOffset = 0;
+ ctx->Const.MaxLights = 8;
+ ctx->Const.MaxTextureCoordUnits = 2;
+ ctx->Const.MaxTextureUnits = 2;
+
+ ctx->Const.Program[MESA_SHADER_VERTEX].MaxAttribs = 16;
+ ctx->Const.Program[MESA_SHADER_VERTEX].MaxTextureImageUnits = 0;
+ ctx->Const.Program[MESA_SHADER_VERTEX].MaxUniformComponents = 512;
+ ctx->Const.Program[MESA_SHADER_VERTEX].MaxCombinedUniformComponents = 512;
+ ctx->Const.Program[MESA_SHADER_VERTEX].MaxInputComponents = 0; /* not used */
+ ctx->Const.Program[MESA_SHADER_VERTEX].MaxOutputComponents = 32;
+
+ ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxTextureImageUnits =
+ ctx->Const.MaxCombinedTextureImageUnits;
+ ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxUniformComponents = 64;
+ ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxCombinedUniformComponents = 64;
+ ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxInputComponents =
+ ctx->Const.Program[MESA_SHADER_VERTEX].MaxOutputComponents;
+ ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxOutputComponents = 0; /* not used */
+
+ ctx->Const.MaxVarying = ctx->Const.Program[MESA_SHADER_VERTEX].MaxOutputComponents / 4;
+ break;
+ case 130:
+ case 140:
+ ctx->Const.MaxClipPlanes = 8;
+ ctx->Const.MaxCombinedTextureImageUnits = 16;
+ ctx->Const.MaxDrawBuffers = 8;
+ ctx->Const.MinProgramTexelOffset = -8;
+ ctx->Const.MaxProgramTexelOffset = 7;
+ ctx->Const.MaxLights = 8;
+ ctx->Const.MaxTextureCoordUnits = 8;
+ ctx->Const.MaxTextureUnits = 2;
+ ctx->Const.MaxUniformBufferBindings = 84;
+ ctx->Const.MaxVertexStreams = 4;
+ ctx->Const.MaxTransformFeedbackBuffers = 4;
+
+ ctx->Const.Program[MESA_SHADER_VERTEX].MaxAttribs = 16;
+ ctx->Const.Program[MESA_SHADER_VERTEX].MaxTextureImageUnits = 16;
+ ctx->Const.Program[MESA_SHADER_VERTEX].MaxUniformComponents = 1024;
+ ctx->Const.Program[MESA_SHADER_VERTEX].MaxCombinedUniformComponents = 1024;
+ ctx->Const.Program[MESA_SHADER_VERTEX].MaxInputComponents = 0; /* not used */
+ ctx->Const.Program[MESA_SHADER_VERTEX].MaxOutputComponents = 64;
+
+ ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxTextureImageUnits = 16;
+ ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxUniformComponents = 1024;
+ ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxCombinedUniformComponents = 1024;
+ ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxInputComponents =
+ ctx->Const.Program[MESA_SHADER_VERTEX].MaxOutputComponents;
+ ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxOutputComponents = 0; /* not used */
+
+ ctx->Const.MaxVarying = ctx->Const.Program[MESA_SHADER_VERTEX].MaxOutputComponents / 4;
+ break;
+ case 150:
+ case 330:
+ case 400:
+ case 410:
+ case 420:
+ case 430:
+ case 440:
+ case 450:
+ case 460:
+ ctx->Const.MaxClipPlanes = 8;
+ ctx->Const.MaxDrawBuffers = 8;
+ ctx->Const.MinProgramTexelOffset = -8;
+ ctx->Const.MaxProgramTexelOffset = 7;
+ ctx->Const.MaxLights = 8;
+ ctx->Const.MaxTextureCoordUnits = 8;
+ ctx->Const.MaxTextureUnits = 2;
+ ctx->Const.MaxUniformBufferBindings = 84;
+ ctx->Const.MaxVertexStreams = 4;
+ ctx->Const.MaxTransformFeedbackBuffers = 4;
+ ctx->Const.MaxShaderStorageBufferBindings = 4;
+ ctx->Const.MaxShaderStorageBlockSize = 4096;
+ ctx->Const.MaxAtomicBufferBindings = 4;
+
+ ctx->Const.Program[MESA_SHADER_VERTEX].MaxAttribs = 16;
+ ctx->Const.Program[MESA_SHADER_VERTEX].MaxTextureImageUnits = 16;
+ ctx->Const.Program[MESA_SHADER_VERTEX].MaxUniformComponents = 1024;
+ ctx->Const.Program[MESA_SHADER_VERTEX].MaxCombinedUniformComponents = 1024;
+ ctx->Const.Program[MESA_SHADER_VERTEX].MaxInputComponents = 0; /* not used */
+ ctx->Const.Program[MESA_SHADER_VERTEX].MaxOutputComponents = 64;
+
+ ctx->Const.Program[MESA_SHADER_GEOMETRY].MaxTextureImageUnits = 16;
+ ctx->Const.Program[MESA_SHADER_GEOMETRY].MaxUniformComponents = 1024;
+ ctx->Const.Program[MESA_SHADER_GEOMETRY].MaxCombinedUniformComponents = 1024;
+ ctx->Const.Program[MESA_SHADER_GEOMETRY].MaxInputComponents =
+ ctx->Const.Program[MESA_SHADER_VERTEX].MaxOutputComponents;
+ ctx->Const.Program[MESA_SHADER_GEOMETRY].MaxOutputComponents = 128;
+
+ ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxTextureImageUnits = 16;
+ ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxUniformComponents = 1024;
+ ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxCombinedUniformComponents = 1024;
+ ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxInputComponents =
+ ctx->Const.Program[MESA_SHADER_GEOMETRY].MaxOutputComponents;
+ ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxOutputComponents = 0; /* not used */
+
+ ctx->Const.MaxCombinedTextureImageUnits =
+ ctx->Const.Program[MESA_SHADER_VERTEX].MaxTextureImageUnits
+ + ctx->Const.Program[MESA_SHADER_GEOMETRY].MaxTextureImageUnits
+ + ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxTextureImageUnits;
+
+ ctx->Const.MaxGeometryOutputVertices = 256;
+ ctx->Const.MaxGeometryTotalOutputComponents = 1024;
+
+ ctx->Const.MaxVarying = 60 / 4;
+ break;
+ case 300:
+ ctx->Const.MaxClipPlanes = 8;
+ ctx->Const.MaxCombinedTextureImageUnits = 32;
+ ctx->Const.MaxDrawBuffers = 4;
+ ctx->Const.MinProgramTexelOffset = -8;
+ ctx->Const.MaxProgramTexelOffset = 7;
+ ctx->Const.MaxLights = 0;
+ ctx->Const.MaxTextureCoordUnits = 0;
+ ctx->Const.MaxTextureUnits = 0;
+ ctx->Const.MaxUniformBufferBindings = 84;
+ ctx->Const.MaxVertexStreams = 4;
+ ctx->Const.MaxTransformFeedbackBuffers = 4;
+
+ ctx->Const.Program[MESA_SHADER_VERTEX].MaxAttribs = 16;
+ ctx->Const.Program[MESA_SHADER_VERTEX].MaxTextureImageUnits = 16;
+ ctx->Const.Program[MESA_SHADER_VERTEX].MaxUniformComponents = 1024;
+ ctx->Const.Program[MESA_SHADER_VERTEX].MaxCombinedUniformComponents = 1024;
+ ctx->Const.Program[MESA_SHADER_VERTEX].MaxInputComponents = 0; /* not used */
+ ctx->Const.Program[MESA_SHADER_VERTEX].MaxOutputComponents = 16 * 4;
+
+ ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxTextureImageUnits = 16;
+ ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxUniformComponents = 224;
+ ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxCombinedUniformComponents = 224;
+ ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxInputComponents = 15 * 4;
+ ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxOutputComponents = 0; /* not used */
+
+ ctx->Const.MaxVarying = ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxInputComponents / 4;
+ break;
+ }
+
+ ctx->Const.GenerateTemporaryNames = true;
+ ctx->Const.MaxPatchVertices = 32;
+
+ /* GL_ARB_explicit_uniform_location, GL_MAX_UNIFORM_LOCATIONS */
+ ctx->Const.MaxUserAssignableUniformLocations =
+ 4 * MESA_SHADER_STAGES * MAX_UNIFORMS;
+
+ ctx->Driver.NewProgram = new_program;
+}
+
+/* Returned string will have 'ctx' as its ralloc owner. */
+static char *
+load_text_file(void *ctx, const char *file_name)
+{
+ char *text = NULL;
+ size_t size;
+ size_t total_read = 0;
+ FILE *fp = fopen(file_name, "rb");
+
+ if (!fp) {
+ return NULL;
+ }
+
+ fseek(fp, 0L, SEEK_END);
+ size = ftell(fp);
+ fseek(fp, 0L, SEEK_SET);
+
+ text = (char *) ralloc_size(ctx, size + 1);
+ if (text != NULL) {
+ do {
+ size_t bytes = fread(text + total_read,
+ 1, size - total_read, fp);
+ if (bytes < size - total_read) {
+ free(text);
+ text = NULL;
+ goto error;
+ }
+
+ if (bytes == 0) {
+ break;
+ }
+
+ total_read += bytes;
+ } while (total_read < size);
+
+ text[total_read] = '\0';
+ error:;
+ }
+
+ fclose(fp);
+
+ return text;
+}
+
+static void
+compile_shader(struct gl_context *ctx, struct gl_shader *shader)
+{
+ struct _mesa_glsl_parse_state *state =
+ new(shader) _mesa_glsl_parse_state(ctx, shader->Stage, shader);
+
+ _mesa_glsl_compile_shader(ctx, shader, options->dump_ast,
+ options->dump_hir, true);
+
+ /* Print out the resulting IR */
+ if (!state->error && options->dump_lir) {
+ _mesa_print_ir(stdout, shader->ir, state);
+ }
+
+ return;
+}
+
+extern "C" struct gl_shader_program *
+standalone_compile_shader(const struct standalone_options *_options,
+ unsigned num_files, char* const* files, struct gl_context *ctx)
+{
+ int status = EXIT_SUCCESS;
+ bool glsl_es = false;
+
+ options = _options;
+
+ switch (options->glsl_version) {
+ case 100:
+ case 300:
+ glsl_es = true;
+ break;
+ case 110:
+ case 120:
+ case 130:
+ case 140:
+ case 150:
+ case 330:
+ case 400:
+ case 410:
+ case 420:
+ case 430:
+ case 440:
+ case 450:
+ case 460:
+ glsl_es = false;
+ break;
+ default:
+ fprintf(stderr, "Unrecognized GLSL version `%d'\n", options->glsl_version);
+ return NULL;
+ }
+
+ if (glsl_es) {
+ initialize_context(ctx, API_OPENGLES2);
+ } else {
+ initialize_context(ctx, options->glsl_version > 130 ? API_OPENGL_CORE : API_OPENGL_COMPAT);
+ }
+
+ if (options->lower_precision) {
+ for (unsigned i = MESA_SHADER_VERTEX; i <= MESA_SHADER_FRAGMENT; i++) {
+ struct gl_shader_compiler_options *options =
+ &ctx->Const.ShaderCompilerOptions[i];
+ options->LowerPrecision = true;
+ }
+ }
+
+ struct gl_shader_program *whole_program;
+
+ whole_program = rzalloc (NULL, struct gl_shader_program);
+ assert(whole_program != NULL);
+ whole_program->data = rzalloc(whole_program, struct gl_shader_program_data);
+ assert(whole_program->data != NULL);
+ whole_program->data->InfoLog = ralloc_strdup(whole_program->data, "");
+
+ /* Created just to avoid segmentation faults */
+ whole_program->AttributeBindings = new string_to_uint_map;
+ whole_program->FragDataBindings = new string_to_uint_map;
+ whole_program->FragDataIndexBindings = new string_to_uint_map;
+
+ for (unsigned i = 0; i < num_files; i++) {
+ whole_program->Shaders =
+ reralloc(whole_program, whole_program->Shaders,
+ struct gl_shader *, whole_program->NumShaders + 1);
+ assert(whole_program->Shaders != NULL);
+
+ struct gl_shader *shader = rzalloc(whole_program, gl_shader);
+
+ whole_program->Shaders[whole_program->NumShaders] = shader;
+ whole_program->NumShaders++;
+
+ const unsigned len = strlen(files[i]);
+ if (len < 6)
+ goto fail;
+
+ const char *const ext = & files[i][len - 5];
+ /* TODO add support to read a .shader_test */
+ if (strncmp(".vert", ext, 5) == 0 || strncmp(".glsl", ext, 5) == 0)
+ shader->Type = GL_VERTEX_SHADER;
+ else if (strncmp(".tesc", ext, 5) == 0)
+ shader->Type = GL_TESS_CONTROL_SHADER;
+ else if (strncmp(".tese", ext, 5) == 0)
+ shader->Type = GL_TESS_EVALUATION_SHADER;
+ else if (strncmp(".geom", ext, 5) == 0)
+ shader->Type = GL_GEOMETRY_SHADER;
+ else if (strncmp(".frag", ext, 5) == 0)
+ shader->Type = GL_FRAGMENT_SHADER;
+ else if (strncmp(".comp", ext, 5) == 0)
+ shader->Type = GL_COMPUTE_SHADER;
+ else
+ goto fail;
+ shader->Stage = _mesa_shader_enum_to_shader_stage(shader->Type);
+
+ shader->Source = load_text_file(whole_program, files[i]);
+ if (shader->Source == NULL) {
+ printf("File \"%s\" does not exist.\n", files[i]);
+ exit(EXIT_FAILURE);
+ }
+
+ compile_shader(ctx, shader);
+
+ if (strlen(shader->InfoLog) > 0) {
+ if (!options->just_log)
+ printf("Info log for %s:\n", files[i]);
+
+ printf("%s", shader->InfoLog);
+ if (!options->just_log)
+ printf("\n");
+ }
+
+ if (!shader->CompileStatus) {
+ status = EXIT_FAILURE;
+ break;
+ }
+ }
+
+ if (status == EXIT_SUCCESS) {
+ _mesa_clear_shader_program_data(ctx, whole_program);
+
+ if (options->do_link) {
+ link_shaders(ctx, whole_program);
+ } else {
+ const gl_shader_stage stage = whole_program->Shaders[0]->Stage;
+
+ whole_program->data->LinkStatus = LINKING_SUCCESS;
+ whole_program->_LinkedShaders[stage] =
+ link_intrastage_shaders(whole_program /* mem_ctx */,
+ ctx,
+ whole_program,
+ whole_program->Shaders,
+ 1,
+ true);
+
+ /* Par-linking can fail, for example, if there are undefined external
+ * references.
+ */
+ if (whole_program->_LinkedShaders[stage] != NULL) {
+ assert(whole_program->data->LinkStatus);
+
+ struct gl_shader_compiler_options *const compiler_options =
+ &ctx->Const.ShaderCompilerOptions[stage];
+
+ exec_list *const ir =
+ whole_program->_LinkedShaders[stage]->ir;
+
+ bool progress;
+ do {
+ progress = do_function_inlining(ir);
+
+ progress = do_common_optimization(ir,
+ false,
+ false,
+ compiler_options,
+ true)
+ && progress;
+ } while(progress);
+ }
+ }
+
+ status = (whole_program->data->LinkStatus) ? EXIT_SUCCESS : EXIT_FAILURE;
+
+ if (strlen(whole_program->data->InfoLog) > 0) {
+ printf("\n");
+ if (!options->just_log)
+ printf("Info log for linking:\n");
+ printf("%s", whole_program->data->InfoLog);
+ if (!options->just_log)
+ printf("\n");
+ }
+
+ for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
+ struct gl_linked_shader *shader = whole_program->_LinkedShaders[i];
+
+ if (!shader)
+ continue;
+
+ add_neg_to_sub_visitor v;
+ visit_list_elements(&v, shader->ir);
+
+ dead_variable_visitor dv;
+ visit_list_elements(&dv, shader->ir);
+ dv.remove_dead_variables();
+ }
+
+ if (options->dump_builder) {
+ for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
+ struct gl_linked_shader *shader = whole_program->_LinkedShaders[i];
+
+ if (!shader)
+ continue;
+
+ _mesa_print_builder_for_ir(stdout, shader->ir);
+ }
+ }
+ }
+
+ return whole_program;
+
+fail:
+ for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
+ if (whole_program->_LinkedShaders[i])
+ ralloc_free(whole_program->_LinkedShaders[i]->Program);
+ }
+
+ ralloc_free(whole_program);
+ return NULL;
+}
+
+extern "C" void
+standalone_compiler_cleanup(struct gl_shader_program *whole_program)
+{
+ for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
+ if (whole_program->_LinkedShaders[i])
+ ralloc_free(whole_program->_LinkedShaders[i]->Program);
+ }
+
+ delete whole_program->AttributeBindings;
+ delete whole_program->FragDataBindings;
+ delete whole_program->FragDataIndexBindings;
+
+ ralloc_free(whole_program);
+ _mesa_glsl_builtin_functions_decref();
+}
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/standalone.h b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/standalone.h
new file mode 100644
index 0000000000..2c2d923816
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/standalone.h
@@ -0,0 +1,55 @@
+/*
+ * Copyright © 2016 Red Hat
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef GLSL_STANDALONE_H
+#define GLSL_STANDALONE_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+struct standalone_options {
+ int glsl_version;
+ int dump_ast;
+ int dump_hir;
+ int dump_lir;
+ int dump_builder;
+ int do_link;
+ int just_log;
+ int lower_precision;
+};
+
+struct gl_shader_program;
+
+struct gl_shader_program * standalone_compile_shader(
+ const struct standalone_options *options,
+ unsigned num_files, char* const* files,
+ struct gl_context *ctx);
+
+void standalone_compiler_cleanup(struct gl_shader_program *prog);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* GLSL_STANDALONE_H */
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/standalone_scaffolding.cpp b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/standalone_scaffolding.cpp
new file mode 100644
index 0000000000..c3c96494a8
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/standalone_scaffolding.cpp
@@ -0,0 +1,289 @@
+/*
+ * Copyright © 2011 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+/* This file declares stripped-down versions of functions that
+ * normally exist outside of the glsl folder, so that they can be used
+ * when running the GLSL compiler standalone (for unit testing or
+ * compiling builtins).
+ */
+
+#include "standalone_scaffolding.h"
+
+#include <assert.h>
+#include <stdio.h>
+#include <string.h>
+#include "util/ralloc.h"
+#include "util/strtod.h"
+#include "main/mtypes.h"
+
+void
+_mesa_warning(struct gl_context *ctx, const char *fmt, ...)
+{
+ va_list vargs;
+ (void) ctx;
+
+ va_start(vargs, fmt);
+
+ /* This output is not thread-safe, but that's good enough for the
+ * standalone compiler.
+ */
+ fprintf(stderr, "Mesa warning: ");
+ vfprintf(stderr, fmt, vargs);
+ fprintf(stderr, "\n");
+
+ va_end(vargs);
+}
+
+void
+_mesa_problem(struct gl_context *ctx, const char *fmt, ...)
+{
+ va_list vargs;
+ (void) ctx;
+
+ va_start(vargs, fmt);
+
+ /* This output is not thread-safe, but that's good enough for the
+ * standalone compiler.
+ */
+ fprintf(stderr, "Mesa problem: ");
+ vfprintf(stderr, fmt, vargs);
+ fprintf(stderr, "\n");
+
+ va_end(vargs);
+}
+
+void
+_mesa_reference_shader_program_data(struct gl_context *ctx,
+ struct gl_shader_program_data **ptr,
+ struct gl_shader_program_data *data)
+{
+ (void) ctx;
+ *ptr = data;
+}
+
+void
+_mesa_reference_shader(struct gl_context *ctx, struct gl_shader **ptr,
+ struct gl_shader *sh)
+{
+ (void) ctx;
+ *ptr = sh;
+}
+
+void
+_mesa_reference_program_(struct gl_context *ctx, struct gl_program **ptr,
+ struct gl_program *prog)
+{
+ (void) ctx;
+ *ptr = prog;
+}
+
+void
+_mesa_shader_debug(struct gl_context *, GLenum, GLuint *,
+ const char *)
+{
+}
+
+struct gl_shader *
+_mesa_new_shader(GLuint name, gl_shader_stage stage)
+{
+ struct gl_shader *shader;
+
+ assert(stage == MESA_SHADER_FRAGMENT || stage == MESA_SHADER_VERTEX);
+ shader = rzalloc(NULL, struct gl_shader);
+ if (shader) {
+ shader->Stage = stage;
+ shader->Name = name;
+ shader->RefCount = 1;
+ }
+ return shader;
+}
+
+GLbitfield
+_mesa_program_state_flags(UNUSED const gl_state_index16 state[STATE_LENGTH])
+{
+ return 0;
+}
+
+char *
+_mesa_program_state_string(UNUSED const gl_state_index16 state[STATE_LENGTH])
+{
+ return NULL;
+}
+
+void
+_mesa_delete_shader(struct gl_context *, struct gl_shader *sh)
+{
+ free((void *)sh->Source);
+ free(sh->Label);
+ ralloc_free(sh);
+}
+
+void
+_mesa_delete_linked_shader(struct gl_context *,
+ struct gl_linked_shader *sh)
+{
+ ralloc_free(sh);
+}
+
+void
+_mesa_clear_shader_program_data(struct gl_context *ctx,
+ struct gl_shader_program *shProg)
+{
+ for (unsigned i = 0; i < MESA_SHADER_STAGES; i++) {
+ if (shProg->_LinkedShaders[i] != NULL) {
+ _mesa_delete_linked_shader(ctx, shProg->_LinkedShaders[i]);
+ shProg->_LinkedShaders[i] = NULL;
+ }
+ }
+
+ shProg->data->NumUniformStorage = 0;
+ shProg->data->UniformStorage = NULL;
+ shProg->NumUniformRemapTable = 0;
+ shProg->UniformRemapTable = NULL;
+ shProg->UniformHash = NULL;
+
+ ralloc_free(shProg->data->InfoLog);
+ shProg->data->InfoLog = ralloc_strdup(shProg->data, "");
+
+ ralloc_free(shProg->data->UniformBlocks);
+ shProg->data->UniformBlocks = NULL;
+ shProg->data->NumUniformBlocks = 0;
+
+ ralloc_free(shProg->data->ShaderStorageBlocks);
+ shProg->data->ShaderStorageBlocks = NULL;
+ shProg->data->NumShaderStorageBlocks = 0;
+
+ ralloc_free(shProg->data->AtomicBuffers);
+ shProg->data->AtomicBuffers = NULL;
+ shProg->data->NumAtomicBuffers = 0;
+}
+
+void initialize_context_to_defaults(struct gl_context *ctx, gl_api api)
+{
+ memset(ctx, 0, sizeof(*ctx));
+
+ ctx->API = api;
+
+ ctx->Extensions.dummy_false = false;
+ ctx->Extensions.dummy_true = true;
+ ctx->Extensions.ARB_blend_func_extended = true;
+ ctx->Extensions.ARB_compute_shader = true;
+ ctx->Extensions.ARB_compute_variable_group_size = true;
+ ctx->Extensions.ARB_conservative_depth = true;
+ ctx->Extensions.ARB_draw_instanced = true;
+ ctx->Extensions.ARB_ES2_compatibility = true;
+ ctx->Extensions.ARB_ES3_compatibility = true;
+ ctx->Extensions.ARB_explicit_attrib_location = true;
+ ctx->Extensions.ARB_fragment_coord_conventions = true;
+ ctx->Extensions.ARB_fragment_layer_viewport = true;
+ ctx->Extensions.ARB_gpu_shader5 = true;
+ ctx->Extensions.ARB_gpu_shader_fp64 = true;
+ ctx->Extensions.ARB_gpu_shader_int64 = true;
+ ctx->Extensions.ARB_sample_shading = true;
+ ctx->Extensions.ARB_shader_bit_encoding = true;
+ ctx->Extensions.ARB_shader_draw_parameters = true;
+ ctx->Extensions.ARB_shader_stencil_export = true;
+ ctx->Extensions.ARB_shader_storage_buffer_object = true;
+ ctx->Extensions.ARB_shader_texture_lod = true;
+ ctx->Extensions.ARB_shading_language_420pack = true;
+ ctx->Extensions.ARB_shading_language_packing = true;
+ ctx->Extensions.ARB_tessellation_shader = true;
+ ctx->Extensions.ARB_texture_cube_map_array = true;
+ ctx->Extensions.ARB_texture_gather = true;
+ ctx->Extensions.ARB_texture_multisample = true;
+ ctx->Extensions.ARB_texture_query_levels = true;
+ ctx->Extensions.ARB_texture_query_lod = true;
+ ctx->Extensions.ARB_uniform_buffer_object = true;
+ ctx->Extensions.ARB_viewport_array = true;
+ ctx->Extensions.ARB_cull_distance = true;
+ ctx->Extensions.ARB_bindless_texture = true;
+
+ ctx->Extensions.KHR_blend_equation_advanced = true;
+
+ ctx->Extensions.OES_EGL_image_external = true;
+ ctx->Extensions.OES_standard_derivatives = true;
+
+ ctx->Extensions.EXT_gpu_shader4 = true;
+ ctx->Extensions.EXT_shader_integer_mix = true;
+ ctx->Extensions.EXT_texture_array = true;
+
+ ctx->Extensions.MESA_shader_integer_functions = true;
+
+ ctx->Extensions.NV_texture_rectangle = true;
+
+ ctx->Const.GLSLVersion = 120;
+
+ /* 1.20 minimums. */
+ ctx->Const.MaxLights = 8;
+ ctx->Const.MaxClipPlanes = 6;
+ ctx->Const.MaxTextureUnits = 2;
+ ctx->Const.MaxTextureCoordUnits = 2;
+ ctx->Const.Program[MESA_SHADER_VERTEX].MaxAttribs = 16;
+
+ ctx->Const.Program[MESA_SHADER_VERTEX].MaxUniformComponents = 512;
+ ctx->Const.Program[MESA_SHADER_VERTEX].MaxOutputComponents = 32;
+ ctx->Const.MaxVarying = 8; /* == gl_MaxVaryingFloats / 4 */
+ ctx->Const.Program[MESA_SHADER_VERTEX].MaxTextureImageUnits = 0;
+ ctx->Const.MaxCombinedTextureImageUnits = 2;
+ ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxTextureImageUnits = 2;
+ ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxUniformComponents = 64;
+ ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxInputComponents = 32;
+
+ ctx->Const.MaxDrawBuffers = 1;
+ ctx->Const.MaxComputeWorkGroupCount[0] = 65535;
+ ctx->Const.MaxComputeWorkGroupCount[1] = 65535;
+ ctx->Const.MaxComputeWorkGroupCount[2] = 65535;
+ ctx->Const.MaxComputeWorkGroupSize[0] = 1024;
+ ctx->Const.MaxComputeWorkGroupSize[1] = 1024;
+ ctx->Const.MaxComputeWorkGroupSize[2] = 64;
+ ctx->Const.MaxComputeWorkGroupInvocations = 1024;
+ ctx->Const.MaxComputeVariableGroupSize[0] = 512;
+ ctx->Const.MaxComputeVariableGroupSize[1] = 512;
+ ctx->Const.MaxComputeVariableGroupSize[2] = 64;
+ ctx->Const.MaxComputeVariableGroupInvocations = 512;
+ ctx->Const.Program[MESA_SHADER_COMPUTE].MaxTextureImageUnits = 16;
+ ctx->Const.Program[MESA_SHADER_COMPUTE].MaxUniformComponents = 1024;
+ ctx->Const.Program[MESA_SHADER_COMPUTE].MaxInputComponents = 0; /* not used */
+ ctx->Const.Program[MESA_SHADER_COMPUTE].MaxOutputComponents = 0; /* not used */
+
+ ctx->Const.MaxVertexStreams = 4;
+ ctx->Const.MaxTransformFeedbackBuffers = 4;
+ ctx->Const.MaxShaderStorageBufferBindings = 4;
+ ctx->Const.MaxShaderStorageBlockSize = 4096;
+ ctx->Const.Program[MESA_SHADER_VERTEX].MaxShaderStorageBlocks = 8;
+ ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxShaderStorageBlocks = 8;
+
+ ctx->Const.Program[MESA_SHADER_VERTEX].MaxUniformBlocks = 12;
+ ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxUniformBlocks = 12;
+
+ /* Set up default shader compiler options. */
+ struct gl_shader_compiler_options options;
+ memset(&options, 0, sizeof(options));
+ options.MaxUnrollIterations = 32;
+ options.MaxIfDepth = UINT_MAX;
+
+ for (int sh = 0; sh < MESA_SHADER_STAGES; ++sh)
+ memcpy(&ctx->Const.ShaderCompilerOptions[sh], &options, sizeof(options));
+
+ _mesa_locale_init();
+}
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/standalone_scaffolding.h b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/standalone_scaffolding.h
new file mode 100644
index 0000000000..d7d1a9ea7f
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/standalone_scaffolding.h
@@ -0,0 +1,114 @@
+/*
+ * Copyright © 2011 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+/* This file declares stripped-down versions of functions that
+ * normally exist outside of the glsl folder, so that they can be used
+ * when running the GLSL compiler standalone (for unit testing or
+ * compiling builtins).
+ */
+
+#ifndef STANDALONE_SCAFFOLDING_H
+#define STANDALONE_SCAFFOLDING_H
+
+#include <assert.h>
+#include "main/menums.h"
+#include "program/prog_statevars.h"
+
+extern "C" void
+_mesa_warning(struct gl_context *ctx, const char *fmtString, ... );
+
+extern "C" void
+_mesa_problem(struct gl_context *ctx, const char *fmtString, ... );
+
+extern "C" void
+_mesa_reference_shader_program_data(struct gl_context *ctx,
+ struct gl_shader_program_data **ptr,
+ struct gl_shader_program_data *data);
+
+extern "C" void
+_mesa_reference_shader(struct gl_context *ctx, struct gl_shader **ptr,
+ struct gl_shader *sh);
+
+extern "C" void
+_mesa_reference_program_(struct gl_context *ctx, struct gl_program **ptr,
+ struct gl_program *prog);
+
+extern "C" struct gl_shader *
+_mesa_new_shader(GLuint name, gl_shader_stage stage);
+
+extern "C" void
+_mesa_delete_shader(struct gl_context *ctx, struct gl_shader *sh);
+
+extern "C" void
+_mesa_delete_linked_shader(struct gl_context *ctx,
+ struct gl_linked_shader *sh);
+
+extern "C" void
+_mesa_clear_shader_program_data(struct gl_context *ctx,
+ struct gl_shader_program *);
+
+extern "C" void
+_mesa_shader_debug(struct gl_context *ctx, GLenum type, GLuint *id,
+ const char *msg);
+
+extern "C" GLbitfield
+_mesa_program_state_flags(const gl_state_index16 state[STATE_LENGTH]);
+
+
+extern "C" char *
+_mesa_program_state_string(const gl_state_index16 state[STATE_LENGTH]);
+
+static inline gl_shader_stage
+_mesa_shader_enum_to_shader_stage(GLenum v)
+{
+ switch (v) {
+ case GL_VERTEX_SHADER:
+ return MESA_SHADER_VERTEX;
+ case GL_FRAGMENT_SHADER:
+ return MESA_SHADER_FRAGMENT;
+ case GL_GEOMETRY_SHADER:
+ return MESA_SHADER_GEOMETRY;
+ case GL_TESS_CONTROL_SHADER:
+ return MESA_SHADER_TESS_CTRL;
+ case GL_TESS_EVALUATION_SHADER:
+ return MESA_SHADER_TESS_EVAL;
+ case GL_COMPUTE_SHADER:
+ return MESA_SHADER_COMPUTE;
+ default:
+ assert(!"bad value in _mesa_shader_enum_to_shader_stage()");
+ return MESA_SHADER_VERTEX;
+ }
+}
+
+/**
+ * Initialize the given gl_context structure to a reasonable set of
+ * defaults representing the minimum capabilities required by the
+ * OpenGL spec.
+ *
+ * This is used when compiling builtin functions and in testing, when
+ * we don't have a connection to an actual driver.
+ */
+void initialize_context_to_defaults(struct gl_context *ctx, gl_api api);
+
+
+#endif /* STANDALONE_SCAFFOLDING_H */
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/string_to_uint_map.cpp b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/string_to_uint_map.cpp
new file mode 100644
index 0000000000..35fb76bf78
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/string_to_uint_map.cpp
@@ -0,0 +1,42 @@
+/*
+ * Copyright © 2011 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+/**
+ * \file string_to_uint_map.cpp
+ * \brief Dumb wrapprs so that C code can create and destroy maps.
+ *
+ * \author Ian Romanick <ian.d.romanick@intel.com>
+ */
+#include "string_to_uint_map.h"
+
+extern "C" struct string_to_uint_map *
+string_to_uint_map_ctor()
+{
+ return new string_to_uint_map;
+}
+
+extern "C" void
+string_to_uint_map_dtor(struct string_to_uint_map *map)
+{
+ delete map;
+}
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/string_to_uint_map.h b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/string_to_uint_map.h
new file mode 100644
index 0000000000..6f9251f90c
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/string_to_uint_map.h
@@ -0,0 +1,177 @@
+/*
+ * Copyright © 2008 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#ifndef STRING_TO_UINT_MAP_H
+#define STRING_TO_UINT_MAP_H
+
+#include <string.h>
+#include <limits.h>
+#include "util/hash_table.h"
+
+struct string_to_uint_map;
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+struct string_to_uint_map *
+string_to_uint_map_ctor();
+
+void
+string_to_uint_map_dtor(struct string_to_uint_map *);
+
+
+#ifdef __cplusplus
+}
+
+struct string_map_iterate_wrapper_closure {
+ void (*callback)(const char *key, unsigned value, void *closure);
+ void *closure;
+};
+
+/**
+ * Map from a string (name) to an unsigned integer value
+ *
+ * \note
+ * Because of the way this class interacts with the \c hash_table
+ * implementation, values of \c UINT_MAX cannot be stored in the map.
+ */
+struct string_to_uint_map {
+public:
+ string_to_uint_map()
+ {
+ this->ht = _mesa_hash_table_create(NULL, _mesa_hash_string,
+ _mesa_key_string_equal);
+ }
+
+ ~string_to_uint_map()
+ {
+ hash_table_call_foreach(this->ht, delete_key, NULL);
+ _mesa_hash_table_destroy(this->ht, NULL);
+ }
+
+ /**
+ * Remove all mappings from this map.
+ */
+ void clear()
+ {
+ hash_table_call_foreach(this->ht, delete_key, NULL);
+ _mesa_hash_table_clear(this->ht, NULL);
+ }
+
+ /**
+ * Runs a passed callback for the hash
+ */
+ void iterate(void (*func)(const char *, unsigned, void *), void *closure)
+ {
+ struct string_map_iterate_wrapper_closure *wrapper;
+
+ wrapper = (struct string_map_iterate_wrapper_closure *)
+ malloc(sizeof(struct string_map_iterate_wrapper_closure));
+ if (wrapper == NULL)
+ return;
+
+ wrapper->callback = func;
+ wrapper->closure = closure;
+
+ hash_table_call_foreach(this->ht, subtract_one_wrapper, wrapper);
+ free(wrapper);
+ }
+
+ /**
+ * Get the value associated with a particular key
+ *
+ * \return
+ * If \c key is found in the map, \c true is returned. Otherwise \c false
+ * is returned.
+ *
+ * \note
+ * If \c key is not found in the table, \c value is not modified.
+ */
+ bool get(unsigned &value, const char *key)
+ {
+ hash_entry *entry = _mesa_hash_table_search(this->ht,
+ (const void *) key);
+
+ if (!entry)
+ return false;
+
+ const intptr_t v = (intptr_t) entry->data;
+ value = (unsigned)(v - 1);
+ return true;
+ }
+
+ void put(unsigned value, const char *key)
+ {
+ /* The low-level hash table structure returns NULL if key is not in the
+ * hash table. However, users of this map might want to store zero as a
+ * valid value in the table. Bias the value by +1 so that a
+ * user-specified zero is stored as 1. This enables ::get to tell the
+ * difference between a user-specified zero (returned as 1 by
+ * _mesa_hash_table_search) and the key not in the table (returned as 0 by
+ * _mesa_hash_table_search).
+ *
+ * The net effect is that we can't store UINT_MAX in the table. This is
+ * because UINT_MAX+1 = 0.
+ */
+ assert(value != UINT_MAX);
+ char *dup_key = strdup(key);
+
+ struct hash_entry *entry = _mesa_hash_table_search(this->ht, dup_key);
+ if (entry) {
+ entry->data = (void *) (intptr_t) (value + 1);
+ } else {
+ _mesa_hash_table_insert(this->ht, dup_key,
+ (void *) (intptr_t) (value + 1));
+ }
+
+ if (entry)
+ free(dup_key);
+ }
+
+private:
+ static void delete_key(const void *key, void *data, void *closure)
+ {
+ (void) data;
+ (void) closure;
+
+ free((char *)key);
+ }
+
+ static void subtract_one_wrapper(const void *key, void *data, void *closure)
+ {
+ struct string_map_iterate_wrapper_closure *wrapper =
+ (struct string_map_iterate_wrapper_closure *) closure;
+ unsigned value = (intptr_t) data;
+
+ value -= 1;
+
+ wrapper->callback((const char *) key, value, wrapper->closure);
+ }
+
+ struct hash_table *ht;
+};
+
+#endif /* __cplusplus */
+#endif /* STRING_TO_UINT_MAP_H */
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/test_optpass.h b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/test_optpass.h
new file mode 100644
index 0000000000..477a8f2113
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/test_optpass.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright © 2011 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef TEST_OPTPASS_H
+#define TEST_OPTPASS_H
+
+int test_optpass(int argc, char **argv);
+
+#endif /* TEST_OPTPASS_H */
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/xxd.py b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/xxd.py
new file mode 100644
index 0000000000..f8f57d7712
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/xxd.py
@@ -0,0 +1,111 @@
+# encoding=utf-8
+# Copyright © 2018 Intel Corporation
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice (including the next
+# paragraph) shall be included in all copies or substantial portions of the
+# Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+# Converts a file to a C/C++ #include containing a string
+
+from __future__ import unicode_literals
+import argparse
+import io
+import string
+import sys
+
+
+def get_args():
+ parser = argparse.ArgumentParser()
+ parser.add_argument('input', help="Name of input file")
+ parser.add_argument('output', help="Name of output file")
+ parser.add_argument("-n", "--name",
+ help="Name of C variable")
+ args = parser.parse_args()
+ return args
+
+
+def filename_to_C_identifier(n):
+ if n[0] != '_' and not n[0].isalpha():
+ n = "_" + n[1:]
+
+ return "".join([c if c.isalnum() or c == "_" else "_" for c in n])
+
+
+def emit_byte(f, b):
+ if ord(b) == ord('\n'):
+ f.write(b"\\n\"\n \"")
+ return
+ elif ord(b) == ord('\r'):
+ f.write(b"\\r\"\n \"")
+ return
+ elif ord(b) == ord('\t'):
+ f.write(b"\\t")
+ return
+ elif ord(b) == ord('"'):
+ f.write(b"\\\"")
+ return
+ elif ord(b) == ord('\\'):
+ f.write(b"\\\\")
+ return
+
+ if ord(b) >= ord(' ') and ord(b) <= ord('~'):
+ f.write(b)
+ else:
+ hi = ord(b) >> 4
+ lo = ord(b) & 0x0f
+ f.write("\\x{:x}{:x}".format(hi, lo).encode('utf-8'))
+
+
+def process_file(args):
+ with io.open(args.input, "rb") as infile:
+ try:
+ with io.open(args.output, "wb") as outfile:
+ # If a name was not specified on the command line, pick one based on the
+ # name of the input file. If no input filename was specified, use
+ # from_stdin.
+ if args.name is not None:
+ name = args.name
+ else:
+ name = filename_to_C_identifier(args.input)
+
+ outfile.write("static const char {}[] =\n \"".format(name).encode('utf-8'))
+
+ while True:
+ byte = infile.read(1)
+ if byte == b"":
+ break
+
+ emit_byte(outfile, byte)
+
+ outfile.write(b"\"\n ;\n")
+ except Exception:
+ # In the event that anything goes wrong, delete the output file,
+ # then re-raise the exception. Deleteing the output file should
+ # ensure that the build system doesn't try to use the stale,
+ # half-generated file.
+ os.unlink(args.output)
+ raise
+
+
+def main():
+ args = get_args()
+ process_file(args)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl_types.cpp b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl_types.cpp
new file mode 100644
index 0000000000..445659599c
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl_types.cpp
@@ -0,0 +1,2954 @@
+/*
+ * Copyright © 2009 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include <stdio.h>
+#include "main/macros.h"
+#include "compiler/glsl/glsl_parser_extras.h"
+#include "glsl_types.h"
+#include "util/hash_table.h"
+#include "util/u_string.h"
+
+
+mtx_t glsl_type::hash_mutex = _MTX_INITIALIZER_NP;
+hash_table *glsl_type::explicit_matrix_types = NULL;
+hash_table *glsl_type::array_types = NULL;
+hash_table *glsl_type::struct_types = NULL;
+hash_table *glsl_type::interface_types = NULL;
+hash_table *glsl_type::function_types = NULL;
+hash_table *glsl_type::subroutine_types = NULL;
+
+/* There might be multiple users for types (e.g. application using OpenGL
+ * and Vulkan simultanously or app using multiple Vulkan instances). Counter
+ * is used to make sure we don't release the types if a user is still present.
+ */
+static uint32_t glsl_type_users = 0;
+
+glsl_type::glsl_type(GLenum gl_type,
+ glsl_base_type base_type, unsigned vector_elements,
+ unsigned matrix_columns, const char *name,
+ unsigned explicit_stride, bool row_major) :
+ gl_type(gl_type),
+ base_type(base_type), sampled_type(GLSL_TYPE_VOID),
+ sampler_dimensionality(0), sampler_shadow(0), sampler_array(0),
+ interface_packing(0), interface_row_major(row_major), packed(0),
+ vector_elements(vector_elements), matrix_columns(matrix_columns),
+ length(0), explicit_stride(explicit_stride)
+{
+ /* Values of these types must fit in the two bits of
+ * glsl_type::sampled_type.
+ */
+ STATIC_ASSERT((unsigned(GLSL_TYPE_UINT) & 3) == unsigned(GLSL_TYPE_UINT));
+ STATIC_ASSERT((unsigned(GLSL_TYPE_INT) & 3) == unsigned(GLSL_TYPE_INT));
+ STATIC_ASSERT((unsigned(GLSL_TYPE_FLOAT) & 3) == unsigned(GLSL_TYPE_FLOAT));
+
+ ASSERT_BITFIELD_SIZE(glsl_type, base_type, GLSL_TYPE_ERROR);
+ ASSERT_BITFIELD_SIZE(glsl_type, sampled_type, GLSL_TYPE_ERROR);
+ ASSERT_BITFIELD_SIZE(glsl_type, sampler_dimensionality,
+ GLSL_SAMPLER_DIM_SUBPASS_MS);
+
+ this->mem_ctx = ralloc_context(NULL);
+ assert(this->mem_ctx != NULL);
+
+ assert(name != NULL);
+ this->name = ralloc_strdup(this->mem_ctx, name);
+
+ /* Neither dimension is zero or both dimensions are zero.
+ */
+ assert((vector_elements == 0) == (matrix_columns == 0));
+ memset(& fields, 0, sizeof(fields));
+}
+
+glsl_type::glsl_type(GLenum gl_type, glsl_base_type base_type,
+ enum glsl_sampler_dim dim, bool shadow, bool array,
+ glsl_base_type type, const char *name) :
+ gl_type(gl_type),
+ base_type(base_type), sampled_type(type),
+ sampler_dimensionality(dim), sampler_shadow(shadow),
+ sampler_array(array), interface_packing(0),
+ interface_row_major(0), packed(0),
+ length(0), explicit_stride(0)
+{
+ this->mem_ctx = ralloc_context(NULL);
+ assert(this->mem_ctx != NULL);
+
+ assert(name != NULL);
+ this->name = ralloc_strdup(this->mem_ctx, name);
+
+ memset(& fields, 0, sizeof(fields));
+
+ matrix_columns = vector_elements = 1;
+}
+
+glsl_type::glsl_type(const glsl_struct_field *fields, unsigned num_fields,
+ const char *name, bool packed) :
+ gl_type(0),
+ base_type(GLSL_TYPE_STRUCT), sampled_type(GLSL_TYPE_VOID),
+ sampler_dimensionality(0), sampler_shadow(0), sampler_array(0),
+ interface_packing(0), interface_row_major(0), packed(packed),
+ vector_elements(0), matrix_columns(0),
+ length(num_fields), explicit_stride(0)
+{
+ unsigned int i;
+
+ this->mem_ctx = ralloc_context(NULL);
+ assert(this->mem_ctx != NULL);
+
+ assert(name != NULL);
+ this->name = ralloc_strdup(this->mem_ctx, name);
+ /* Zero-fill to prevent spurious Valgrind errors when serializing NIR
+ * due to uninitialized unused bits in bit fields. */
+ this->fields.structure = rzalloc_array(this->mem_ctx,
+ glsl_struct_field, length);
+
+ for (i = 0; i < length; i++) {
+ this->fields.structure[i] = fields[i];
+ this->fields.structure[i].name = ralloc_strdup(this->fields.structure,
+ fields[i].name);
+ }
+}
+
+glsl_type::glsl_type(const glsl_struct_field *fields, unsigned num_fields,
+ enum glsl_interface_packing packing,
+ bool row_major, const char *name) :
+ gl_type(0),
+ base_type(GLSL_TYPE_INTERFACE), sampled_type(GLSL_TYPE_VOID),
+ sampler_dimensionality(0), sampler_shadow(0), sampler_array(0),
+ interface_packing((unsigned) packing),
+ interface_row_major((unsigned) row_major), packed(0),
+ vector_elements(0), matrix_columns(0),
+ length(num_fields), explicit_stride(0)
+{
+ unsigned int i;
+
+ this->mem_ctx = ralloc_context(NULL);
+ assert(this->mem_ctx != NULL);
+
+ assert(name != NULL);
+ this->name = ralloc_strdup(this->mem_ctx, name);
+ this->fields.structure = rzalloc_array(this->mem_ctx,
+ glsl_struct_field, length);
+ for (i = 0; i < length; i++) {
+ this->fields.structure[i] = fields[i];
+ this->fields.structure[i].name = ralloc_strdup(this->fields.structure,
+ fields[i].name);
+ }
+}
+
+glsl_type::glsl_type(const glsl_type *return_type,
+ const glsl_function_param *params, unsigned num_params) :
+ gl_type(0),
+ base_type(GLSL_TYPE_FUNCTION), sampled_type(GLSL_TYPE_VOID),
+ sampler_dimensionality(0), sampler_shadow(0), sampler_array(0),
+ interface_packing(0), interface_row_major(0), packed(0),
+ vector_elements(0), matrix_columns(0),
+ length(num_params), explicit_stride(0)
+{
+ unsigned int i;
+
+ this->mem_ctx = ralloc_context(NULL);
+ assert(this->mem_ctx != NULL);
+
+ this->fields.parameters = rzalloc_array(this->mem_ctx,
+ glsl_function_param, num_params + 1);
+
+ /* We store the return type as the first parameter */
+ this->fields.parameters[0].type = return_type;
+ this->fields.parameters[0].in = false;
+ this->fields.parameters[0].out = true;
+
+ /* We store the i'th parameter in slot i+1 */
+ for (i = 0; i < length; i++) {
+ this->fields.parameters[i + 1].type = params[i].type;
+ this->fields.parameters[i + 1].in = params[i].in;
+ this->fields.parameters[i + 1].out = params[i].out;
+ }
+}
+
+glsl_type::glsl_type(const char *subroutine_name) :
+ gl_type(0),
+ base_type(GLSL_TYPE_SUBROUTINE), sampled_type(GLSL_TYPE_VOID),
+ sampler_dimensionality(0), sampler_shadow(0), sampler_array(0),
+ interface_packing(0), interface_row_major(0), packed(0),
+ vector_elements(1), matrix_columns(1),
+ length(0), explicit_stride(0)
+{
+ this->mem_ctx = ralloc_context(NULL);
+ assert(this->mem_ctx != NULL);
+
+ assert(subroutine_name != NULL);
+ this->name = ralloc_strdup(this->mem_ctx, subroutine_name);
+}
+
+glsl_type::~glsl_type()
+{
+ ralloc_free(this->mem_ctx);
+}
+
+bool
+glsl_type::contains_sampler() const
+{
+ if (this->is_array()) {
+ return this->fields.array->contains_sampler();
+ } else if (this->is_struct() || this->is_interface()) {
+ for (unsigned int i = 0; i < this->length; i++) {
+ if (this->fields.structure[i].type->contains_sampler())
+ return true;
+ }
+ return false;
+ } else {
+ return this->is_sampler();
+ }
+}
+
+bool
+glsl_type::contains_array() const
+{
+ if (this->is_struct() || this->is_interface()) {
+ for (unsigned int i = 0; i < this->length; i++) {
+ if (this->fields.structure[i].type->contains_array())
+ return true;
+ }
+ return false;
+ } else {
+ return this->is_array();
+ }
+}
+
+bool
+glsl_type::contains_integer() const
+{
+ if (this->is_array()) {
+ return this->fields.array->contains_integer();
+ } else if (this->is_struct() || this->is_interface()) {
+ for (unsigned int i = 0; i < this->length; i++) {
+ if (this->fields.structure[i].type->contains_integer())
+ return true;
+ }
+ return false;
+ } else {
+ return this->is_integer();
+ }
+}
+
+bool
+glsl_type::contains_double() const
+{
+ if (this->is_array()) {
+ return this->fields.array->contains_double();
+ } else if (this->is_struct() || this->is_interface()) {
+ for (unsigned int i = 0; i < this->length; i++) {
+ if (this->fields.structure[i].type->contains_double())
+ return true;
+ }
+ return false;
+ } else {
+ return this->is_double();
+ }
+}
+
+bool
+glsl_type::contains_64bit() const
+{
+ if (this->is_array()) {
+ return this->fields.array->contains_64bit();
+ } else if (this->is_struct() || this->is_interface()) {
+ for (unsigned int i = 0; i < this->length; i++) {
+ if (this->fields.structure[i].type->contains_64bit())
+ return true;
+ }
+ return false;
+ } else {
+ return this->is_64bit();
+ }
+}
+
+bool
+glsl_type::contains_opaque() const {
+ switch (base_type) {
+ case GLSL_TYPE_SAMPLER:
+ case GLSL_TYPE_IMAGE:
+ case GLSL_TYPE_ATOMIC_UINT:
+ return true;
+ case GLSL_TYPE_ARRAY:
+ return fields.array->contains_opaque();
+ case GLSL_TYPE_STRUCT:
+ case GLSL_TYPE_INTERFACE:
+ for (unsigned int i = 0; i < length; i++) {
+ if (fields.structure[i].type->contains_opaque())
+ return true;
+ }
+ return false;
+ default:
+ return false;
+ }
+}
+
+bool
+glsl_type::contains_subroutine() const
+{
+ if (this->is_array()) {
+ return this->fields.array->contains_subroutine();
+ } else if (this->is_struct() || this->is_interface()) {
+ for (unsigned int i = 0; i < this->length; i++) {
+ if (this->fields.structure[i].type->contains_subroutine())
+ return true;
+ }
+ return false;
+ } else {
+ return this->is_subroutine();
+ }
+}
+
+gl_texture_index
+glsl_type::sampler_index() const
+{
+ const glsl_type *const t = (this->is_array()) ? this->fields.array : this;
+
+ assert(t->is_sampler() || t->is_image());
+
+ switch (t->sampler_dimensionality) {
+ case GLSL_SAMPLER_DIM_1D:
+ return (t->sampler_array) ? TEXTURE_1D_ARRAY_INDEX : TEXTURE_1D_INDEX;
+ case GLSL_SAMPLER_DIM_2D:
+ return (t->sampler_array) ? TEXTURE_2D_ARRAY_INDEX : TEXTURE_2D_INDEX;
+ case GLSL_SAMPLER_DIM_3D:
+ return TEXTURE_3D_INDEX;
+ case GLSL_SAMPLER_DIM_CUBE:
+ return (t->sampler_array) ? TEXTURE_CUBE_ARRAY_INDEX : TEXTURE_CUBE_INDEX;
+ case GLSL_SAMPLER_DIM_RECT:
+ return TEXTURE_RECT_INDEX;
+ case GLSL_SAMPLER_DIM_BUF:
+ return TEXTURE_BUFFER_INDEX;
+ case GLSL_SAMPLER_DIM_EXTERNAL:
+ return TEXTURE_EXTERNAL_INDEX;
+ case GLSL_SAMPLER_DIM_MS:
+ return (t->sampler_array) ? TEXTURE_2D_MULTISAMPLE_ARRAY_INDEX : TEXTURE_2D_MULTISAMPLE_INDEX;
+ default:
+ assert(!"Should not get here.");
+ return TEXTURE_BUFFER_INDEX;
+ }
+}
+
+bool
+glsl_type::contains_image() const
+{
+ if (this->is_array()) {
+ return this->fields.array->contains_image();
+ } else if (this->is_struct() || this->is_interface()) {
+ for (unsigned int i = 0; i < this->length; i++) {
+ if (this->fields.structure[i].type->contains_image())
+ return true;
+ }
+ return false;
+ } else {
+ return this->is_image();
+ }
+}
+
+const glsl_type *glsl_type::get_base_type() const
+{
+ switch (base_type) {
+ case GLSL_TYPE_UINT:
+ return uint_type;
+ case GLSL_TYPE_UINT16:
+ return uint16_t_type;
+ case GLSL_TYPE_UINT8:
+ return uint8_t_type;
+ case GLSL_TYPE_INT:
+ return int_type;
+ case GLSL_TYPE_INT16:
+ return int16_t_type;
+ case GLSL_TYPE_INT8:
+ return int8_t_type;
+ case GLSL_TYPE_FLOAT:
+ return float_type;
+ case GLSL_TYPE_FLOAT16:
+ return float16_t_type;
+ case GLSL_TYPE_DOUBLE:
+ return double_type;
+ case GLSL_TYPE_BOOL:
+ return bool_type;
+ case GLSL_TYPE_UINT64:
+ return uint64_t_type;
+ case GLSL_TYPE_INT64:
+ return int64_t_type;
+ default:
+ return error_type;
+ }
+}
+
+
+const glsl_type *glsl_type::get_scalar_type() const
+{
+ const glsl_type *type = this;
+
+ /* Handle arrays */
+ while (type->base_type == GLSL_TYPE_ARRAY)
+ type = type->fields.array;
+
+ const glsl_type *scalar_type = type->get_base_type();
+ if (scalar_type == error_type)
+ return type;
+
+ return scalar_type;
+}
+
+
+const glsl_type *glsl_type::get_bare_type() const
+{
+ switch (this->base_type) {
+ case GLSL_TYPE_UINT8:
+ case GLSL_TYPE_INT8:
+ case GLSL_TYPE_UINT16:
+ case GLSL_TYPE_INT16:
+ case GLSL_TYPE_FLOAT16:
+ case GLSL_TYPE_UINT:
+ case GLSL_TYPE_INT:
+ case GLSL_TYPE_FLOAT:
+ case GLSL_TYPE_BOOL:
+ case GLSL_TYPE_DOUBLE:
+ case GLSL_TYPE_UINT64:
+ case GLSL_TYPE_INT64:
+ return get_instance(this->base_type, this->vector_elements,
+ this->matrix_columns);
+
+ case GLSL_TYPE_STRUCT:
+ case GLSL_TYPE_INTERFACE: {
+ glsl_struct_field *bare_fields = new glsl_struct_field[this->length];
+ for (unsigned i = 0; i < this->length; i++) {
+ bare_fields[i].type = this->fields.structure[i].type->get_bare_type();
+ bare_fields[i].name = this->fields.structure[i].name;
+ }
+ const glsl_type *bare_type =
+ get_struct_instance(bare_fields, this->length, this->name);
+ delete[] bare_fields;
+ return bare_type;
+ }
+
+ case GLSL_TYPE_ARRAY:
+ return get_array_instance(this->fields.array->get_bare_type(),
+ this->length);
+
+ case GLSL_TYPE_SAMPLER:
+ case GLSL_TYPE_IMAGE:
+ case GLSL_TYPE_ATOMIC_UINT:
+ case GLSL_TYPE_VOID:
+ case GLSL_TYPE_SUBROUTINE:
+ case GLSL_TYPE_FUNCTION:
+ case GLSL_TYPE_ERROR:
+ return this;
+ }
+
+ unreachable("Invalid base type");
+}
+
+const glsl_type *glsl_type::get_float16_type() const
+{
+ assert(this->base_type == GLSL_TYPE_FLOAT);
+
+ return get_instance(GLSL_TYPE_FLOAT16,
+ this->vector_elements,
+ this->matrix_columns,
+ this->explicit_stride,
+ this->interface_row_major);
+}
+
+static void
+hash_free_type_function(struct hash_entry *entry)
+{
+ glsl_type *type = (glsl_type *) entry->data;
+
+ if (type->is_array())
+ free((void*)entry->key);
+
+ delete type;
+}
+
+void
+glsl_type_singleton_init_or_ref()
+{
+ mtx_lock(&glsl_type::hash_mutex);
+ glsl_type_users++;
+ mtx_unlock(&glsl_type::hash_mutex);
+}
+
+void
+glsl_type_singleton_decref()
+{
+ mtx_lock(&glsl_type::hash_mutex);
+ assert(glsl_type_users > 0);
+
+ /* Do not release glsl_types if they are still used. */
+ if (--glsl_type_users) {
+ mtx_unlock(&glsl_type::hash_mutex);
+ return;
+ }
+
+ if (glsl_type::explicit_matrix_types != NULL) {
+ _mesa_hash_table_destroy(glsl_type::explicit_matrix_types,
+ hash_free_type_function);
+ glsl_type::explicit_matrix_types = NULL;
+ }
+
+ if (glsl_type::array_types != NULL) {
+ _mesa_hash_table_destroy(glsl_type::array_types, hash_free_type_function);
+ glsl_type::array_types = NULL;
+ }
+
+ if (glsl_type::struct_types != NULL) {
+ _mesa_hash_table_destroy(glsl_type::struct_types, hash_free_type_function);
+ glsl_type::struct_types = NULL;
+ }
+
+ if (glsl_type::interface_types != NULL) {
+ _mesa_hash_table_destroy(glsl_type::interface_types, hash_free_type_function);
+ glsl_type::interface_types = NULL;
+ }
+
+ if (glsl_type::function_types != NULL) {
+ _mesa_hash_table_destroy(glsl_type::function_types, hash_free_type_function);
+ glsl_type::function_types = NULL;
+ }
+
+ if (glsl_type::subroutine_types != NULL) {
+ _mesa_hash_table_destroy(glsl_type::subroutine_types, hash_free_type_function);
+ glsl_type::subroutine_types = NULL;
+ }
+
+ mtx_unlock(&glsl_type::hash_mutex);
+}
+
+
+glsl_type::glsl_type(const glsl_type *array, unsigned length,
+ unsigned explicit_stride) :
+ base_type(GLSL_TYPE_ARRAY), sampled_type(GLSL_TYPE_VOID),
+ sampler_dimensionality(0), sampler_shadow(0), sampler_array(0),
+ interface_packing(0), interface_row_major(0), packed(0),
+ vector_elements(0), matrix_columns(0),
+ length(length), name(NULL), explicit_stride(explicit_stride)
+{
+ this->fields.array = array;
+ /* Inherit the gl type of the base. The GL type is used for
+ * uniform/statevar handling in Mesa and the arrayness of the type
+ * is represented by the size rather than the type.
+ */
+ this->gl_type = array->gl_type;
+
+ /* Allow a maximum of 10 characters for the array size. This is enough
+ * for 32-bits of ~0. The extra 3 are for the '[', ']', and terminating
+ * NUL.
+ */
+ const unsigned name_length = strlen(array->name) + 10 + 3;
+
+ this->mem_ctx = ralloc_context(NULL);
+ assert(this->mem_ctx != NULL);
+
+ char *const n = (char *) ralloc_size(this->mem_ctx, name_length);
+
+ if (length == 0)
+ snprintf(n, name_length, "%s[]", array->name);
+ else {
+ /* insert outermost dimensions in the correct spot
+ * otherwise the dimension order will be backwards
+ */
+ const char *pos = strchr(array->name, '[');
+ if (pos) {
+ int idx = pos - array->name;
+ snprintf(n, idx+1, "%s", array->name);
+ snprintf(n + idx, name_length - idx, "[%u]%s",
+ length, array->name + idx);
+ } else {
+ snprintf(n, name_length, "%s[%u]", array->name, length);
+ }
+ }
+
+ this->name = n;
+}
+
+const glsl_type *
+glsl_type::vec(unsigned components, const glsl_type *const ts[])
+{
+ unsigned n = components;
+
+ if (components == 8)
+ n = 5;
+ else if (components == 16)
+ n = 6;
+
+ if (n == 0 || n > 6)
+ return error_type;
+
+ return ts[n - 1];
+}
+
+#define VECN(components, sname, vname) \
+const glsl_type * \
+glsl_type:: vname (unsigned components) \
+{ \
+ static const glsl_type *const ts[] = { \
+ sname ## _type, vname ## 2_type, \
+ vname ## 3_type, vname ## 4_type, \
+ vname ## 8_type, vname ## 16_type, \
+ }; \
+ return glsl_type::vec(components, ts); \
+}
+
+VECN(components, float, vec)
+VECN(components, float16_t, f16vec)
+VECN(components, double, dvec)
+VECN(components, int, ivec)
+VECN(components, uint, uvec)
+VECN(components, bool, bvec)
+VECN(components, int64_t, i64vec)
+VECN(components, uint64_t, u64vec)
+VECN(components, int16_t, i16vec)
+VECN(components, uint16_t, u16vec)
+VECN(components, int8_t, i8vec)
+VECN(components, uint8_t, u8vec)
+
+const glsl_type *
+glsl_type::get_instance(unsigned base_type, unsigned rows, unsigned columns,
+ unsigned explicit_stride, bool row_major)
+{
+ if (base_type == GLSL_TYPE_VOID) {
+ assert(explicit_stride == 0 && !row_major);
+ return void_type;
+ }
+
+ /* Matrix and vector types with explicit strides have to be looked up in a
+ * table so they're handled separately.
+ */
+ if (explicit_stride > 0) {
+ const glsl_type *bare_type = get_instance(base_type, rows, columns);
+
+ assert(columns > 1 || !row_major);
+
+ char name[128];
+ snprintf(name, sizeof(name), "%sx%uB%s", bare_type->name,
+ explicit_stride, row_major ? "RM" : "");
+
+ mtx_lock(&glsl_type::hash_mutex);
+ assert(glsl_type_users > 0);
+
+ if (explicit_matrix_types == NULL) {
+ explicit_matrix_types =
+ _mesa_hash_table_create(NULL, _mesa_hash_string,
+ _mesa_key_string_equal);
+ }
+
+ const struct hash_entry *entry =
+ _mesa_hash_table_search(explicit_matrix_types, name);
+ if (entry == NULL) {
+ const glsl_type *t = new glsl_type(bare_type->gl_type,
+ (glsl_base_type)base_type,
+ rows, columns, name,
+ explicit_stride, row_major);
+
+ entry = _mesa_hash_table_insert(explicit_matrix_types,
+ t->name, (void *)t);
+ }
+
+ assert(((glsl_type *) entry->data)->base_type == base_type);
+ assert(((glsl_type *) entry->data)->vector_elements == rows);
+ assert(((glsl_type *) entry->data)->matrix_columns == columns);
+ assert(((glsl_type *) entry->data)->explicit_stride == explicit_stride);
+
+ const glsl_type *t = (const glsl_type *) entry->data;
+
+ mtx_unlock(&glsl_type::hash_mutex);
+
+ return t;
+ }
+
+ assert(!row_major);
+
+ /* Treat GLSL vectors as Nx1 matrices.
+ */
+ if (columns == 1) {
+ switch (base_type) {
+ case GLSL_TYPE_UINT:
+ return uvec(rows);
+ case GLSL_TYPE_INT:
+ return ivec(rows);
+ case GLSL_TYPE_FLOAT:
+ return vec(rows);
+ case GLSL_TYPE_FLOAT16:
+ return f16vec(rows);
+ case GLSL_TYPE_DOUBLE:
+ return dvec(rows);
+ case GLSL_TYPE_BOOL:
+ return bvec(rows);
+ case GLSL_TYPE_UINT64:
+ return u64vec(rows);
+ case GLSL_TYPE_INT64:
+ return i64vec(rows);
+ case GLSL_TYPE_UINT16:
+ return u16vec(rows);
+ case GLSL_TYPE_INT16:
+ return i16vec(rows);
+ case GLSL_TYPE_UINT8:
+ return u8vec(rows);
+ case GLSL_TYPE_INT8:
+ return i8vec(rows);
+ default:
+ return error_type;
+ }
+ } else {
+ if ((base_type != GLSL_TYPE_FLOAT &&
+ base_type != GLSL_TYPE_DOUBLE &&
+ base_type != GLSL_TYPE_FLOAT16) || (rows == 1))
+ return error_type;
+
+ /* GLSL matrix types are named mat{COLUMNS}x{ROWS}. Only the following
+ * combinations are valid:
+ *
+ * 1 2 3 4
+ * 1
+ * 2 x x x
+ * 3 x x x
+ * 4 x x x
+ */
+#define IDX(c,r) (((c-1)*3) + (r-1))
+
+ switch (base_type) {
+ case GLSL_TYPE_DOUBLE: {
+ switch (IDX(columns, rows)) {
+ case IDX(2,2): return dmat2_type;
+ case IDX(2,3): return dmat2x3_type;
+ case IDX(2,4): return dmat2x4_type;
+ case IDX(3,2): return dmat3x2_type;
+ case IDX(3,3): return dmat3_type;
+ case IDX(3,4): return dmat3x4_type;
+ case IDX(4,2): return dmat4x2_type;
+ case IDX(4,3): return dmat4x3_type;
+ case IDX(4,4): return dmat4_type;
+ default: return error_type;
+ }
+ }
+ case GLSL_TYPE_FLOAT: {
+ switch (IDX(columns, rows)) {
+ case IDX(2,2): return mat2_type;
+ case IDX(2,3): return mat2x3_type;
+ case IDX(2,4): return mat2x4_type;
+ case IDX(3,2): return mat3x2_type;
+ case IDX(3,3): return mat3_type;
+ case IDX(3,4): return mat3x4_type;
+ case IDX(4,2): return mat4x2_type;
+ case IDX(4,3): return mat4x3_type;
+ case IDX(4,4): return mat4_type;
+ default: return error_type;
+ }
+ }
+ case GLSL_TYPE_FLOAT16: {
+ switch (IDX(columns, rows)) {
+ case IDX(2,2): return f16mat2_type;
+ case IDX(2,3): return f16mat2x3_type;
+ case IDX(2,4): return f16mat2x4_type;
+ case IDX(3,2): return f16mat3x2_type;
+ case IDX(3,3): return f16mat3_type;
+ case IDX(3,4): return f16mat3x4_type;
+ case IDX(4,2): return f16mat4x2_type;
+ case IDX(4,3): return f16mat4x3_type;
+ case IDX(4,4): return f16mat4_type;
+ default: return error_type;
+ }
+ }
+ default: return error_type;
+ }
+ }
+
+ assert(!"Should not get here.");
+ return error_type;
+}
+
+const glsl_type *
+glsl_type::get_sampler_instance(enum glsl_sampler_dim dim,
+ bool shadow,
+ bool array,
+ glsl_base_type type)
+{
+ switch (type) {
+ case GLSL_TYPE_FLOAT:
+ switch (dim) {
+ case GLSL_SAMPLER_DIM_1D:
+ if (shadow)
+ return (array ? sampler1DArrayShadow_type : sampler1DShadow_type);
+ else
+ return (array ? sampler1DArray_type : sampler1D_type);
+ case GLSL_SAMPLER_DIM_2D:
+ if (shadow)
+ return (array ? sampler2DArrayShadow_type : sampler2DShadow_type);
+ else
+ return (array ? sampler2DArray_type : sampler2D_type);
+ case GLSL_SAMPLER_DIM_3D:
+ if (shadow || array)
+ return error_type;
+ else
+ return sampler3D_type;
+ case GLSL_SAMPLER_DIM_CUBE:
+ if (shadow)
+ return (array ? samplerCubeArrayShadow_type : samplerCubeShadow_type);
+ else
+ return (array ? samplerCubeArray_type : samplerCube_type);
+ case GLSL_SAMPLER_DIM_RECT:
+ if (array)
+ return error_type;
+ if (shadow)
+ return sampler2DRectShadow_type;
+ else
+ return sampler2DRect_type;
+ case GLSL_SAMPLER_DIM_BUF:
+ if (shadow || array)
+ return error_type;
+ else
+ return samplerBuffer_type;
+ case GLSL_SAMPLER_DIM_MS:
+ if (shadow)
+ return error_type;
+ return (array ? sampler2DMSArray_type : sampler2DMS_type);
+ case GLSL_SAMPLER_DIM_EXTERNAL:
+ if (shadow || array)
+ return error_type;
+ else
+ return samplerExternalOES_type;
+ case GLSL_SAMPLER_DIM_SUBPASS:
+ case GLSL_SAMPLER_DIM_SUBPASS_MS:
+ return error_type;
+ }
+ case GLSL_TYPE_INT:
+ if (shadow)
+ return error_type;
+ switch (dim) {
+ case GLSL_SAMPLER_DIM_1D:
+ return (array ? isampler1DArray_type : isampler1D_type);
+ case GLSL_SAMPLER_DIM_2D:
+ return (array ? isampler2DArray_type : isampler2D_type);
+ case GLSL_SAMPLER_DIM_3D:
+ if (array)
+ return error_type;
+ return isampler3D_type;
+ case GLSL_SAMPLER_DIM_CUBE:
+ return (array ? isamplerCubeArray_type : isamplerCube_type);
+ case GLSL_SAMPLER_DIM_RECT:
+ if (array)
+ return error_type;
+ return isampler2DRect_type;
+ case GLSL_SAMPLER_DIM_BUF:
+ if (array)
+ return error_type;
+ return isamplerBuffer_type;
+ case GLSL_SAMPLER_DIM_MS:
+ return (array ? isampler2DMSArray_type : isampler2DMS_type);
+ case GLSL_SAMPLER_DIM_EXTERNAL:
+ return error_type;
+ case GLSL_SAMPLER_DIM_SUBPASS:
+ case GLSL_SAMPLER_DIM_SUBPASS_MS:
+ return error_type;
+ }
+ case GLSL_TYPE_UINT:
+ if (shadow)
+ return error_type;
+ switch (dim) {
+ case GLSL_SAMPLER_DIM_1D:
+ return (array ? usampler1DArray_type : usampler1D_type);
+ case GLSL_SAMPLER_DIM_2D:
+ return (array ? usampler2DArray_type : usampler2D_type);
+ case GLSL_SAMPLER_DIM_3D:
+ if (array)
+ return error_type;
+ return usampler3D_type;
+ case GLSL_SAMPLER_DIM_CUBE:
+ return (array ? usamplerCubeArray_type : usamplerCube_type);
+ case GLSL_SAMPLER_DIM_RECT:
+ if (array)
+ return error_type;
+ return usampler2DRect_type;
+ case GLSL_SAMPLER_DIM_BUF:
+ if (array)
+ return error_type;
+ return usamplerBuffer_type;
+ case GLSL_SAMPLER_DIM_MS:
+ return (array ? usampler2DMSArray_type : usampler2DMS_type);
+ case GLSL_SAMPLER_DIM_EXTERNAL:
+ return error_type;
+ case GLSL_SAMPLER_DIM_SUBPASS:
+ case GLSL_SAMPLER_DIM_SUBPASS_MS:
+ return error_type;
+ }
+ default:
+ return error_type;
+ }
+
+ unreachable("switch statement above should be complete");
+}
+
+const glsl_type *
+glsl_type::get_image_instance(enum glsl_sampler_dim dim,
+ bool array, glsl_base_type type)
+{
+ switch (type) {
+ case GLSL_TYPE_FLOAT:
+ switch (dim) {
+ case GLSL_SAMPLER_DIM_1D:
+ return (array ? image1DArray_type : image1D_type);
+ case GLSL_SAMPLER_DIM_2D:
+ return (array ? image2DArray_type : image2D_type);
+ case GLSL_SAMPLER_DIM_3D:
+ return image3D_type;
+ case GLSL_SAMPLER_DIM_CUBE:
+ return (array ? imageCubeArray_type : imageCube_type);
+ case GLSL_SAMPLER_DIM_RECT:
+ if (array)
+ return error_type;
+ else
+ return image2DRect_type;
+ case GLSL_SAMPLER_DIM_BUF:
+ if (array)
+ return error_type;
+ else
+ return imageBuffer_type;
+ case GLSL_SAMPLER_DIM_MS:
+ return (array ? image2DMSArray_type : image2DMS_type);
+ case GLSL_SAMPLER_DIM_SUBPASS:
+ return subpassInput_type;
+ case GLSL_SAMPLER_DIM_SUBPASS_MS:
+ return subpassInputMS_type;
+ case GLSL_SAMPLER_DIM_EXTERNAL:
+ return error_type;
+ }
+ case GLSL_TYPE_INT:
+ switch (dim) {
+ case GLSL_SAMPLER_DIM_1D:
+ return (array ? iimage1DArray_type : iimage1D_type);
+ case GLSL_SAMPLER_DIM_2D:
+ return (array ? iimage2DArray_type : iimage2D_type);
+ case GLSL_SAMPLER_DIM_3D:
+ if (array)
+ return error_type;
+ return iimage3D_type;
+ case GLSL_SAMPLER_DIM_CUBE:
+ return (array ? iimageCubeArray_type : iimageCube_type);
+ case GLSL_SAMPLER_DIM_RECT:
+ if (array)
+ return error_type;
+ return iimage2DRect_type;
+ case GLSL_SAMPLER_DIM_BUF:
+ if (array)
+ return error_type;
+ return iimageBuffer_type;
+ case GLSL_SAMPLER_DIM_MS:
+ return (array ? iimage2DMSArray_type : iimage2DMS_type);
+ case GLSL_SAMPLER_DIM_SUBPASS:
+ return isubpassInput_type;
+ case GLSL_SAMPLER_DIM_SUBPASS_MS:
+ return isubpassInputMS_type;
+ case GLSL_SAMPLER_DIM_EXTERNAL:
+ return error_type;
+ }
+ case GLSL_TYPE_UINT:
+ switch (dim) {
+ case GLSL_SAMPLER_DIM_1D:
+ return (array ? uimage1DArray_type : uimage1D_type);
+ case GLSL_SAMPLER_DIM_2D:
+ return (array ? uimage2DArray_type : uimage2D_type);
+ case GLSL_SAMPLER_DIM_3D:
+ if (array)
+ return error_type;
+ return uimage3D_type;
+ case GLSL_SAMPLER_DIM_CUBE:
+ return (array ? uimageCubeArray_type : uimageCube_type);
+ case GLSL_SAMPLER_DIM_RECT:
+ if (array)
+ return error_type;
+ return uimage2DRect_type;
+ case GLSL_SAMPLER_DIM_BUF:
+ if (array)
+ return error_type;
+ return uimageBuffer_type;
+ case GLSL_SAMPLER_DIM_MS:
+ return (array ? uimage2DMSArray_type : uimage2DMS_type);
+ case GLSL_SAMPLER_DIM_SUBPASS:
+ return usubpassInput_type;
+ case GLSL_SAMPLER_DIM_SUBPASS_MS:
+ return usubpassInputMS_type;
+ case GLSL_SAMPLER_DIM_EXTERNAL:
+ return error_type;
+ }
+ default:
+ return error_type;
+ }
+
+ unreachable("switch statement above should be complete");
+}
+
+const glsl_type *
+glsl_type::get_array_instance(const glsl_type *base,
+ unsigned array_size,
+ unsigned explicit_stride)
+{
+ /* Generate a name using the base type pointer in the key. This is
+ * done because the name of the base type may not be unique across
+ * shaders. For example, two shaders may have different record types
+ * named 'foo'.
+ */
+ char key[128];
+ snprintf(key, sizeof(key), "%p[%u]x%uB", (void *) base, array_size,
+ explicit_stride);
+
+ mtx_lock(&glsl_type::hash_mutex);
+ assert(glsl_type_users > 0);
+
+ if (array_types == NULL) {
+ array_types = _mesa_hash_table_create(NULL, _mesa_hash_string,
+ _mesa_key_string_equal);
+ }
+
+ const struct hash_entry *entry = _mesa_hash_table_search(array_types, key);
+ if (entry == NULL) {
+ const glsl_type *t = new glsl_type(base, array_size, explicit_stride);
+
+ entry = _mesa_hash_table_insert(array_types,
+ strdup(key),
+ (void *) t);
+ }
+
+ assert(((glsl_type *) entry->data)->base_type == GLSL_TYPE_ARRAY);
+ assert(((glsl_type *) entry->data)->length == array_size);
+ assert(((glsl_type *) entry->data)->fields.array == base);
+
+ glsl_type *t = (glsl_type *) entry->data;
+
+ mtx_unlock(&glsl_type::hash_mutex);
+
+ return t;
+}
+
+bool
+glsl_type::compare_no_precision(const glsl_type *b) const
+{
+ if (this == b)
+ return true;
+
+ if (this->is_array()) {
+ if (!b->is_array() || this->length != b->length)
+ return false;
+
+ const glsl_type *b_no_array = b->fields.array;
+
+ return this->fields.array->compare_no_precision(b_no_array);
+ }
+
+ if (this->is_struct()) {
+ if (!b->is_struct())
+ return false;
+ } else if (this->is_interface()) {
+ if (!b->is_interface())
+ return false;
+ } else {
+ return false;
+ }
+
+ return record_compare(b,
+ true, /* match_name */
+ true, /* match_locations */
+ false /* match_precision */);
+}
+
+bool
+glsl_type::record_compare(const glsl_type *b, bool match_name,
+ bool match_locations, bool match_precision) const
+{
+ if (this->length != b->length)
+ return false;
+
+ if (this->interface_packing != b->interface_packing)
+ return false;
+
+ if (this->interface_row_major != b->interface_row_major)
+ return false;
+
+ /* From the GLSL 4.20 specification (Sec 4.2):
+ *
+ * "Structures must have the same name, sequence of type names, and
+ * type definitions, and field names to be considered the same type."
+ *
+ * GLSL ES behaves the same (Ver 1.00 Sec 4.2.4, Ver 3.00 Sec 4.2.5).
+ *
+ * Section 7.4.1 (Shader Interface Matching) of the OpenGL 4.30 spec says:
+ *
+ * "Variables or block members declared as structures are considered
+ * to match in type if and only if structure members match in name,
+ * type, qualification, and declaration order."
+ */
+ if (match_name)
+ if (strcmp(this->name, b->name) != 0)
+ return false;
+
+ for (unsigned i = 0; i < this->length; i++) {
+ if (match_precision) {
+ if (this->fields.structure[i].type != b->fields.structure[i].type)
+ return false;
+ } else {
+ const glsl_type *ta = this->fields.structure[i].type;
+ const glsl_type *tb = b->fields.structure[i].type;
+ if (!ta->compare_no_precision(tb))
+ return false;
+ }
+ if (strcmp(this->fields.structure[i].name,
+ b->fields.structure[i].name) != 0)
+ return false;
+ if (this->fields.structure[i].matrix_layout
+ != b->fields.structure[i].matrix_layout)
+ return false;
+ if (match_locations && this->fields.structure[i].location
+ != b->fields.structure[i].location)
+ return false;
+ if (this->fields.structure[i].offset
+ != b->fields.structure[i].offset)
+ return false;
+ if (this->fields.structure[i].interpolation
+ != b->fields.structure[i].interpolation)
+ return false;
+ if (this->fields.structure[i].centroid
+ != b->fields.structure[i].centroid)
+ return false;
+ if (this->fields.structure[i].sample
+ != b->fields.structure[i].sample)
+ return false;
+ if (this->fields.structure[i].patch
+ != b->fields.structure[i].patch)
+ return false;
+ if (this->fields.structure[i].memory_read_only
+ != b->fields.structure[i].memory_read_only)
+ return false;
+ if (this->fields.structure[i].memory_write_only
+ != b->fields.structure[i].memory_write_only)
+ return false;
+ if (this->fields.structure[i].memory_coherent
+ != b->fields.structure[i].memory_coherent)
+ return false;
+ if (this->fields.structure[i].memory_volatile
+ != b->fields.structure[i].memory_volatile)
+ return false;
+ if (this->fields.structure[i].memory_restrict
+ != b->fields.structure[i].memory_restrict)
+ return false;
+ if (this->fields.structure[i].image_format
+ != b->fields.structure[i].image_format)
+ return false;
+ if (match_precision &&
+ this->fields.structure[i].precision
+ != b->fields.structure[i].precision)
+ return false;
+ if (this->fields.structure[i].explicit_xfb_buffer
+ != b->fields.structure[i].explicit_xfb_buffer)
+ return false;
+ if (this->fields.structure[i].xfb_buffer
+ != b->fields.structure[i].xfb_buffer)
+ return false;
+ if (this->fields.structure[i].xfb_stride
+ != b->fields.structure[i].xfb_stride)
+ return false;
+ }
+
+ return true;
+}
+
+
+bool
+glsl_type::record_key_compare(const void *a, const void *b)
+{
+ const glsl_type *const key1 = (glsl_type *) a;
+ const glsl_type *const key2 = (glsl_type *) b;
+
+ return strcmp(key1->name, key2->name) == 0 &&
+ key1->record_compare(key2, true);
+}
+
+
+/**
+ * Generate an integer hash value for a glsl_type structure type.
+ */
+unsigned
+glsl_type::record_key_hash(const void *a)
+{
+ const glsl_type *const key = (glsl_type *) a;
+ uintptr_t hash = key->length;
+ unsigned retval;
+
+ for (unsigned i = 0; i < key->length; i++) {
+ /* casting pointer to uintptr_t */
+ hash = (hash * 13 ) + (uintptr_t) key->fields.structure[i].type;
+ }
+
+ if (sizeof(hash) == 8)
+ retval = (hash & 0xffffffff) ^ ((uint64_t) hash >> 32);
+ else
+ retval = hash;
+
+ return retval;
+}
+
+
+const glsl_type *
+glsl_type::get_struct_instance(const glsl_struct_field *fields,
+ unsigned num_fields,
+ const char *name,
+ bool packed)
+{
+ const glsl_type key(fields, num_fields, name, packed);
+
+ mtx_lock(&glsl_type::hash_mutex);
+ assert(glsl_type_users > 0);
+
+ if (struct_types == NULL) {
+ struct_types = _mesa_hash_table_create(NULL, record_key_hash,
+ record_key_compare);
+ }
+
+ const struct hash_entry *entry = _mesa_hash_table_search(struct_types,
+ &key);
+ if (entry == NULL) {
+ const glsl_type *t = new glsl_type(fields, num_fields, name, packed);
+
+ entry = _mesa_hash_table_insert(struct_types, t, (void *) t);
+ }
+
+ assert(((glsl_type *) entry->data)->base_type == GLSL_TYPE_STRUCT);
+ assert(((glsl_type *) entry->data)->length == num_fields);
+ assert(strcmp(((glsl_type *) entry->data)->name, name) == 0);
+ assert(((glsl_type *) entry->data)->packed == packed);
+
+ glsl_type *t = (glsl_type *) entry->data;
+
+ mtx_unlock(&glsl_type::hash_mutex);
+
+ return t;
+}
+
+
+const glsl_type *
+glsl_type::get_interface_instance(const glsl_struct_field *fields,
+ unsigned num_fields,
+ enum glsl_interface_packing packing,
+ bool row_major,
+ const char *block_name)
+{
+ const glsl_type key(fields, num_fields, packing, row_major, block_name);
+
+ mtx_lock(&glsl_type::hash_mutex);
+ assert(glsl_type_users > 0);
+
+ if (interface_types == NULL) {
+ interface_types = _mesa_hash_table_create(NULL, record_key_hash,
+ record_key_compare);
+ }
+
+ const struct hash_entry *entry = _mesa_hash_table_search(interface_types,
+ &key);
+ if (entry == NULL) {
+ const glsl_type *t = new glsl_type(fields, num_fields,
+ packing, row_major, block_name);
+
+ entry = _mesa_hash_table_insert(interface_types, t, (void *) t);
+ }
+
+ assert(((glsl_type *) entry->data)->base_type == GLSL_TYPE_INTERFACE);
+ assert(((glsl_type *) entry->data)->length == num_fields);
+ assert(strcmp(((glsl_type *) entry->data)->name, block_name) == 0);
+
+ glsl_type *t = (glsl_type *) entry->data;
+
+ mtx_unlock(&glsl_type::hash_mutex);
+
+ return t;
+}
+
+const glsl_type *
+glsl_type::get_subroutine_instance(const char *subroutine_name)
+{
+ const glsl_type key(subroutine_name);
+
+ mtx_lock(&glsl_type::hash_mutex);
+ assert(glsl_type_users > 0);
+
+ if (subroutine_types == NULL) {
+ subroutine_types = _mesa_hash_table_create(NULL, record_key_hash,
+ record_key_compare);
+ }
+
+ const struct hash_entry *entry = _mesa_hash_table_search(subroutine_types,
+ &key);
+ if (entry == NULL) {
+ const glsl_type *t = new glsl_type(subroutine_name);
+
+ entry = _mesa_hash_table_insert(subroutine_types, t, (void *) t);
+ }
+
+ assert(((glsl_type *) entry->data)->base_type == GLSL_TYPE_SUBROUTINE);
+ assert(strcmp(((glsl_type *) entry->data)->name, subroutine_name) == 0);
+
+ glsl_type *t = (glsl_type *) entry->data;
+
+ mtx_unlock(&glsl_type::hash_mutex);
+
+ return t;
+}
+
+
+static bool
+function_key_compare(const void *a, const void *b)
+{
+ const glsl_type *const key1 = (glsl_type *) a;
+ const glsl_type *const key2 = (glsl_type *) b;
+
+ if (key1->length != key2->length)
+ return false;
+
+ return memcmp(key1->fields.parameters, key2->fields.parameters,
+ (key1->length + 1) * sizeof(*key1->fields.parameters)) == 0;
+}
+
+
+static uint32_t
+function_key_hash(const void *a)
+{
+ const glsl_type *const key = (glsl_type *) a;
+ return _mesa_hash_data(key->fields.parameters,
+ (key->length + 1) * sizeof(*key->fields.parameters));
+}
+
+const glsl_type *
+glsl_type::get_function_instance(const glsl_type *return_type,
+ const glsl_function_param *params,
+ unsigned num_params)
+{
+ const glsl_type key(return_type, params, num_params);
+
+ mtx_lock(&glsl_type::hash_mutex);
+ assert(glsl_type_users > 0);
+
+ if (function_types == NULL) {
+ function_types = _mesa_hash_table_create(NULL, function_key_hash,
+ function_key_compare);
+ }
+
+ struct hash_entry *entry = _mesa_hash_table_search(function_types, &key);
+ if (entry == NULL) {
+ const glsl_type *t = new glsl_type(return_type, params, num_params);
+
+ entry = _mesa_hash_table_insert(function_types, t, (void *) t);
+ }
+
+ const glsl_type *t = (const glsl_type *)entry->data;
+
+ assert(t->base_type == GLSL_TYPE_FUNCTION);
+ assert(t->length == num_params);
+
+ mtx_unlock(&glsl_type::hash_mutex);
+
+ return t;
+}
+
+
+const glsl_type *
+glsl_type::get_mul_type(const glsl_type *type_a, const glsl_type *type_b)
+{
+ if (type_a->is_matrix() && type_b->is_matrix()) {
+ /* Matrix multiply. The columns of A must match the rows of B. Given
+ * the other previously tested constraints, this means the vector type
+ * of a row from A must be the same as the vector type of a column from
+ * B.
+ */
+ if (type_a->row_type() == type_b->column_type()) {
+ /* The resulting matrix has the number of columns of matrix B and
+ * the number of rows of matrix A. We get the row count of A by
+ * looking at the size of a vector that makes up a column. The
+ * transpose (size of a row) is done for B.
+ */
+ const glsl_type *const type =
+ get_instance(type_a->base_type,
+ type_a->column_type()->vector_elements,
+ type_b->row_type()->vector_elements);
+ assert(type != error_type);
+
+ return type;
+ }
+ } else if (type_a == type_b) {
+ return type_a;
+ } else if (type_a->is_matrix()) {
+ /* A is a matrix and B is a column vector. Columns of A must match
+ * rows of B. Given the other previously tested constraints, this
+ * means the vector type of a row from A must be the same as the
+ * vector the type of B.
+ */
+ if (type_a->row_type() == type_b) {
+ /* The resulting vector has a number of elements equal to
+ * the number of rows of matrix A. */
+ const glsl_type *const type =
+ get_instance(type_a->base_type,
+ type_a->column_type()->vector_elements,
+ 1);
+ assert(type != error_type);
+
+ return type;
+ }
+ } else {
+ assert(type_b->is_matrix());
+
+ /* A is a row vector and B is a matrix. Columns of A must match rows
+ * of B. Given the other previously tested constraints, this means
+ * the type of A must be the same as the vector type of a column from
+ * B.
+ */
+ if (type_a == type_b->column_type()) {
+ /* The resulting vector has a number of elements equal to
+ * the number of columns of matrix B. */
+ const glsl_type *const type =
+ get_instance(type_a->base_type,
+ type_b->row_type()->vector_elements,
+ 1);
+ assert(type != error_type);
+
+ return type;
+ }
+ }
+
+ return error_type;
+}
+
+
+const glsl_type *
+glsl_type::field_type(const char *name) const
+{
+ if (this->base_type != GLSL_TYPE_STRUCT
+ && this->base_type != GLSL_TYPE_INTERFACE)
+ return error_type;
+
+ for (unsigned i = 0; i < this->length; i++) {
+ if (strcmp(name, this->fields.structure[i].name) == 0)
+ return this->fields.structure[i].type;
+ }
+
+ return error_type;
+}
+
+
+int
+glsl_type::field_index(const char *name) const
+{
+ if (this->base_type != GLSL_TYPE_STRUCT
+ && this->base_type != GLSL_TYPE_INTERFACE)
+ return -1;
+
+ for (unsigned i = 0; i < this->length; i++) {
+ if (strcmp(name, this->fields.structure[i].name) == 0)
+ return i;
+ }
+
+ return -1;
+}
+
+
+unsigned
+glsl_type::component_slots() const
+{
+ switch (this->base_type) {
+ case GLSL_TYPE_UINT:
+ case GLSL_TYPE_INT:
+ case GLSL_TYPE_UINT8:
+ case GLSL_TYPE_INT8:
+ case GLSL_TYPE_UINT16:
+ case GLSL_TYPE_INT16:
+ case GLSL_TYPE_FLOAT:
+ case GLSL_TYPE_FLOAT16:
+ case GLSL_TYPE_BOOL:
+ return this->components();
+
+ case GLSL_TYPE_DOUBLE:
+ case GLSL_TYPE_UINT64:
+ case GLSL_TYPE_INT64:
+ return 2 * this->components();
+
+ case GLSL_TYPE_STRUCT:
+ case GLSL_TYPE_INTERFACE: {
+ unsigned size = 0;
+
+ for (unsigned i = 0; i < this->length; i++)
+ size += this->fields.structure[i].type->component_slots();
+
+ return size;
+ }
+
+ case GLSL_TYPE_ARRAY:
+ return this->length * this->fields.array->component_slots();
+
+ case GLSL_TYPE_SAMPLER:
+ case GLSL_TYPE_IMAGE:
+ return 2;
+
+ case GLSL_TYPE_SUBROUTINE:
+ return 1;
+
+ case GLSL_TYPE_FUNCTION:
+ case GLSL_TYPE_ATOMIC_UINT:
+ case GLSL_TYPE_VOID:
+ case GLSL_TYPE_ERROR:
+ break;
+ }
+
+ return 0;
+}
+
+unsigned
+glsl_type::struct_location_offset(unsigned length) const
+{
+ unsigned offset = 0;
+ const glsl_type *t = this->without_array();
+ if (t->is_struct()) {
+ assert(length <= t->length);
+
+ for (unsigned i = 0; i < length; i++) {
+ const glsl_type *st = t->fields.structure[i].type;
+ const glsl_type *wa = st->without_array();
+ if (wa->is_struct()) {
+ unsigned r_offset = wa->struct_location_offset(wa->length);
+ offset += st->is_array() ?
+ st->arrays_of_arrays_size() * r_offset : r_offset;
+ } else if (st->is_array() && st->fields.array->is_array()) {
+ unsigned outer_array_size = st->length;
+ const glsl_type *base_type = st->fields.array;
+
+ /* For arrays of arrays the outer arrays take up a uniform
+ * slot for each element. The innermost array elements share a
+ * single slot so we ignore the innermost array when calculating
+ * the offset.
+ */
+ while (base_type->fields.array->is_array()) {
+ outer_array_size = outer_array_size * base_type->length;
+ base_type = base_type->fields.array;
+ }
+ offset += outer_array_size;
+ } else {
+ /* We dont worry about arrays here because unless the array
+ * contains a structure or another array it only takes up a single
+ * uniform slot.
+ */
+ offset += 1;
+ }
+ }
+ }
+ return offset;
+}
+
+unsigned
+glsl_type::uniform_locations() const
+{
+ unsigned size = 0;
+
+ switch (this->base_type) {
+ case GLSL_TYPE_UINT:
+ case GLSL_TYPE_INT:
+ case GLSL_TYPE_FLOAT:
+ case GLSL_TYPE_FLOAT16:
+ case GLSL_TYPE_DOUBLE:
+ case GLSL_TYPE_UINT16:
+ case GLSL_TYPE_UINT8:
+ case GLSL_TYPE_INT16:
+ case GLSL_TYPE_INT8:
+ case GLSL_TYPE_UINT64:
+ case GLSL_TYPE_INT64:
+ case GLSL_TYPE_BOOL:
+ case GLSL_TYPE_SAMPLER:
+ case GLSL_TYPE_IMAGE:
+ case GLSL_TYPE_SUBROUTINE:
+ return 1;
+
+ case GLSL_TYPE_STRUCT:
+ case GLSL_TYPE_INTERFACE:
+ for (unsigned i = 0; i < this->length; i++)
+ size += this->fields.structure[i].type->uniform_locations();
+ return size;
+ case GLSL_TYPE_ARRAY:
+ return this->length * this->fields.array->uniform_locations();
+ default:
+ return 0;
+ }
+}
+
+unsigned
+glsl_type::varying_count() const
+{
+ unsigned size = 0;
+
+ switch (this->base_type) {
+ case GLSL_TYPE_UINT:
+ case GLSL_TYPE_INT:
+ case GLSL_TYPE_FLOAT:
+ case GLSL_TYPE_FLOAT16:
+ case GLSL_TYPE_DOUBLE:
+ case GLSL_TYPE_BOOL:
+ case GLSL_TYPE_UINT16:
+ case GLSL_TYPE_UINT8:
+ case GLSL_TYPE_INT16:
+ case GLSL_TYPE_INT8:
+ case GLSL_TYPE_UINT64:
+ case GLSL_TYPE_INT64:
+ return 1;
+
+ case GLSL_TYPE_STRUCT:
+ case GLSL_TYPE_INTERFACE:
+ for (unsigned i = 0; i < this->length; i++)
+ size += this->fields.structure[i].type->varying_count();
+ return size;
+ case GLSL_TYPE_ARRAY:
+ /* Don't count innermost array elements */
+ if (this->without_array()->is_struct() ||
+ this->without_array()->is_interface() ||
+ this->fields.array->is_array())
+ return this->length * this->fields.array->varying_count();
+ else
+ return this->fields.array->varying_count();
+ default:
+ assert(!"unsupported varying type");
+ return 0;
+ }
+}
+
+bool
+glsl_type::can_implicitly_convert_to(const glsl_type *desired,
+ _mesa_glsl_parse_state *state) const
+{
+ if (this == desired)
+ return true;
+
+ /* GLSL 1.10 and ESSL do not allow implicit conversions. If there is no
+ * state, we're doing intra-stage function linking where these checks have
+ * already been done.
+ */
+ if (state && !state->has_implicit_conversions())
+ return false;
+
+ /* There is no conversion among matrix types. */
+ if (this->matrix_columns > 1 || desired->matrix_columns > 1)
+ return false;
+
+ /* Vector size must match. */
+ if (this->vector_elements != desired->vector_elements)
+ return false;
+
+ /* int and uint can be converted to float. */
+ if (desired->is_float() && this->is_integer_32())
+ return true;
+
+ /* With GLSL 4.0, ARB_gpu_shader5, or MESA_shader_integer_functions, int
+ * can be converted to uint. Note that state may be NULL here, when
+ * resolving function calls in the linker. By this time, all the
+ * state-dependent checks have already happened though, so allow anything
+ * that's allowed in any shader version.
+ */
+ if ((!state || state->has_implicit_uint_to_int_conversion()) &&
+ desired->base_type == GLSL_TYPE_UINT && this->base_type == GLSL_TYPE_INT)
+ return true;
+
+ /* No implicit conversions from double. */
+ if ((!state || state->has_double()) && this->is_double())
+ return false;
+
+ /* Conversions from different types to double. */
+ if ((!state || state->has_double()) && desired->is_double()) {
+ if (this->is_float())
+ return true;
+ if (this->is_integer_32())
+ return true;
+ }
+
+ return false;
+}
+
+unsigned
+glsl_type::std140_base_alignment(bool row_major) const
+{
+ unsigned N = is_64bit() ? 8 : 4;
+
+ /* (1) If the member is a scalar consuming <N> basic machine units, the
+ * base alignment is <N>.
+ *
+ * (2) If the member is a two- or four-component vector with components
+ * consuming <N> basic machine units, the base alignment is 2<N> or
+ * 4<N>, respectively.
+ *
+ * (3) If the member is a three-component vector with components consuming
+ * <N> basic machine units, the base alignment is 4<N>.
+ */
+ if (this->is_scalar() || this->is_vector()) {
+ switch (this->vector_elements) {
+ case 1:
+ return N;
+ case 2:
+ return 2 * N;
+ case 3:
+ case 4:
+ return 4 * N;
+ }
+ }
+
+ /* (4) If the member is an array of scalars or vectors, the base alignment
+ * and array stride are set to match the base alignment of a single
+ * array element, according to rules (1), (2), and (3), and rounded up
+ * to the base alignment of a vec4. The array may have padding at the
+ * end; the base offset of the member following the array is rounded up
+ * to the next multiple of the base alignment.
+ *
+ * (6) If the member is an array of <S> column-major matrices with <C>
+ * columns and <R> rows, the matrix is stored identically to a row of
+ * <S>*<C> column vectors with <R> components each, according to rule
+ * (4).
+ *
+ * (8) If the member is an array of <S> row-major matrices with <C> columns
+ * and <R> rows, the matrix is stored identically to a row of <S>*<R>
+ * row vectors with <C> components each, according to rule (4).
+ *
+ * (10) If the member is an array of <S> structures, the <S> elements of
+ * the array are laid out in order, according to rule (9).
+ */
+ if (this->is_array()) {
+ if (this->fields.array->is_scalar() ||
+ this->fields.array->is_vector() ||
+ this->fields.array->is_matrix()) {
+ return MAX2(this->fields.array->std140_base_alignment(row_major), 16);
+ } else {
+ assert(this->fields.array->is_struct() ||
+ this->fields.array->is_array());
+ return this->fields.array->std140_base_alignment(row_major);
+ }
+ }
+
+ /* (5) If the member is a column-major matrix with <C> columns and
+ * <R> rows, the matrix is stored identically to an array of
+ * <C> column vectors with <R> components each, according to
+ * rule (4).
+ *
+ * (7) If the member is a row-major matrix with <C> columns and <R>
+ * rows, the matrix is stored identically to an array of <R>
+ * row vectors with <C> components each, according to rule (4).
+ */
+ if (this->is_matrix()) {
+ const struct glsl_type *vec_type, *array_type;
+ int c = this->matrix_columns;
+ int r = this->vector_elements;
+
+ if (row_major) {
+ vec_type = get_instance(base_type, c, 1);
+ array_type = glsl_type::get_array_instance(vec_type, r);
+ } else {
+ vec_type = get_instance(base_type, r, 1);
+ array_type = glsl_type::get_array_instance(vec_type, c);
+ }
+
+ return array_type->std140_base_alignment(false);
+ }
+
+ /* (9) If the member is a structure, the base alignment of the
+ * structure is <N>, where <N> is the largest base alignment
+ * value of any of its members, and rounded up to the base
+ * alignment of a vec4. The individual members of this
+ * sub-structure are then assigned offsets by applying this set
+ * of rules recursively, where the base offset of the first
+ * member of the sub-structure is equal to the aligned offset
+ * of the structure. The structure may have padding at the end;
+ * the base offset of the member following the sub-structure is
+ * rounded up to the next multiple of the base alignment of the
+ * structure.
+ */
+ if (this->is_struct()) {
+ unsigned base_alignment = 16;
+ for (unsigned i = 0; i < this->length; i++) {
+ bool field_row_major = row_major;
+ const enum glsl_matrix_layout matrix_layout =
+ glsl_matrix_layout(this->fields.structure[i].matrix_layout);
+ if (matrix_layout == GLSL_MATRIX_LAYOUT_ROW_MAJOR) {
+ field_row_major = true;
+ } else if (matrix_layout == GLSL_MATRIX_LAYOUT_COLUMN_MAJOR) {
+ field_row_major = false;
+ }
+
+ const struct glsl_type *field_type = this->fields.structure[i].type;
+ base_alignment = MAX2(base_alignment,
+ field_type->std140_base_alignment(field_row_major));
+ }
+ return base_alignment;
+ }
+
+ assert(!"not reached");
+ return -1;
+}
+
+unsigned
+glsl_type::std140_size(bool row_major) const
+{
+ unsigned N = is_64bit() ? 8 : 4;
+
+ /* (1) If the member is a scalar consuming <N> basic machine units, the
+ * base alignment is <N>.
+ *
+ * (2) If the member is a two- or four-component vector with components
+ * consuming <N> basic machine units, the base alignment is 2<N> or
+ * 4<N>, respectively.
+ *
+ * (3) If the member is a three-component vector with components consuming
+ * <N> basic machine units, the base alignment is 4<N>.
+ */
+ if (this->is_scalar() || this->is_vector()) {
+ assert(this->explicit_stride == 0);
+ return this->vector_elements * N;
+ }
+
+ /* (5) If the member is a column-major matrix with <C> columns and
+ * <R> rows, the matrix is stored identically to an array of
+ * <C> column vectors with <R> components each, according to
+ * rule (4).
+ *
+ * (6) If the member is an array of <S> column-major matrices with <C>
+ * columns and <R> rows, the matrix is stored identically to a row of
+ * <S>*<C> column vectors with <R> components each, according to rule
+ * (4).
+ *
+ * (7) If the member is a row-major matrix with <C> columns and <R>
+ * rows, the matrix is stored identically to an array of <R>
+ * row vectors with <C> components each, according to rule (4).
+ *
+ * (8) If the member is an array of <S> row-major matrices with <C> columns
+ * and <R> rows, the matrix is stored identically to a row of <S>*<R>
+ * row vectors with <C> components each, according to rule (4).
+ */
+ if (this->without_array()->is_matrix()) {
+ const struct glsl_type *element_type;
+ const struct glsl_type *vec_type;
+ unsigned int array_len;
+
+ if (this->is_array()) {
+ element_type = this->without_array();
+ array_len = this->arrays_of_arrays_size();
+ } else {
+ element_type = this;
+ array_len = 1;
+ }
+
+ if (row_major) {
+ vec_type = get_instance(element_type->base_type,
+ element_type->matrix_columns, 1);
+
+ array_len *= element_type->vector_elements;
+ } else {
+ vec_type = get_instance(element_type->base_type,
+ element_type->vector_elements, 1);
+ array_len *= element_type->matrix_columns;
+ }
+ const glsl_type *array_type = glsl_type::get_array_instance(vec_type,
+ array_len);
+
+ return array_type->std140_size(false);
+ }
+
+ /* (4) If the member is an array of scalars or vectors, the base alignment
+ * and array stride are set to match the base alignment of a single
+ * array element, according to rules (1), (2), and (3), and rounded up
+ * to the base alignment of a vec4. The array may have padding at the
+ * end; the base offset of the member following the array is rounded up
+ * to the next multiple of the base alignment.
+ *
+ * (10) If the member is an array of <S> structures, the <S> elements of
+ * the array are laid out in order, according to rule (9).
+ */
+ if (this->is_array()) {
+ unsigned stride;
+ if (this->without_array()->is_struct()) {
+ stride = this->without_array()->std140_size(row_major);
+ } else {
+ unsigned element_base_align =
+ this->without_array()->std140_base_alignment(row_major);
+ stride = MAX2(element_base_align, 16);
+ }
+
+ unsigned size = this->arrays_of_arrays_size() * stride;
+ assert(this->explicit_stride == 0 ||
+ size == this->length * this->explicit_stride);
+ return size;
+ }
+
+ /* (9) If the member is a structure, the base alignment of the
+ * structure is <N>, where <N> is the largest base alignment
+ * value of any of its members, and rounded up to the base
+ * alignment of a vec4. The individual members of this
+ * sub-structure are then assigned offsets by applying this set
+ * of rules recursively, where the base offset of the first
+ * member of the sub-structure is equal to the aligned offset
+ * of the structure. The structure may have padding at the end;
+ * the base offset of the member following the sub-structure is
+ * rounded up to the next multiple of the base alignment of the
+ * structure.
+ */
+ if (this->is_struct() || this->is_interface()) {
+ unsigned size = 0;
+ unsigned max_align = 0;
+
+ for (unsigned i = 0; i < this->length; i++) {
+ bool field_row_major = row_major;
+ const enum glsl_matrix_layout matrix_layout =
+ glsl_matrix_layout(this->fields.structure[i].matrix_layout);
+ if (matrix_layout == GLSL_MATRIX_LAYOUT_ROW_MAJOR) {
+ field_row_major = true;
+ } else if (matrix_layout == GLSL_MATRIX_LAYOUT_COLUMN_MAJOR) {
+ field_row_major = false;
+ }
+
+ const struct glsl_type *field_type = this->fields.structure[i].type;
+ unsigned align = field_type->std140_base_alignment(field_row_major);
+
+ /* Ignore unsized arrays when calculating size */
+ if (field_type->is_unsized_array())
+ continue;
+
+ size = glsl_align(size, align);
+ size += field_type->std140_size(field_row_major);
+
+ max_align = MAX2(align, max_align);
+
+ if (field_type->is_struct() && (i + 1 < this->length))
+ size = glsl_align(size, 16);
+ }
+ size = glsl_align(size, MAX2(max_align, 16));
+ return size;
+ }
+
+ assert(!"not reached");
+ return -1;
+}
+
+const glsl_type *
+glsl_type::get_explicit_std140_type(bool row_major) const
+{
+ if (this->is_vector() || this->is_scalar()) {
+ return this;
+ } else if (this->is_matrix()) {
+ const glsl_type *vec_type;
+ if (row_major)
+ vec_type = get_instance(this->base_type, this->matrix_columns, 1);
+ else
+ vec_type = get_instance(this->base_type, this->vector_elements, 1);
+ unsigned elem_size = vec_type->std140_size(false);
+ unsigned stride = glsl_align(elem_size, 16);
+ return get_instance(this->base_type, this->vector_elements,
+ this->matrix_columns, stride, row_major);
+ } else if (this->is_array()) {
+ unsigned elem_size = this->fields.array->std140_size(row_major);
+ const glsl_type *elem_type =
+ this->fields.array->get_explicit_std140_type(row_major);
+ unsigned stride = glsl_align(elem_size, 16);
+ return get_array_instance(elem_type, this->length, stride);
+ } else if (this->is_struct() || this->is_interface()) {
+ glsl_struct_field *fields = new glsl_struct_field[this->length];
+ unsigned offset = 0;
+ for (unsigned i = 0; i < length; i++) {
+ fields[i] = this->fields.structure[i];
+
+ bool field_row_major = row_major;
+ if (fields[i].matrix_layout == GLSL_MATRIX_LAYOUT_COLUMN_MAJOR) {
+ field_row_major = false;
+ } else if (fields[i].matrix_layout == GLSL_MATRIX_LAYOUT_ROW_MAJOR) {
+ field_row_major = true;
+ }
+ fields[i].type =
+ fields[i].type->get_explicit_std140_type(field_row_major);
+
+ unsigned fsize = fields[i].type->std140_size(field_row_major);
+ unsigned falign = fields[i].type->std140_base_alignment(field_row_major);
+ /* From the GLSL 460 spec section "Uniform and Shader Storage Block
+ * Layout Qualifiers":
+ *
+ * "The actual offset of a member is computed as follows: If
+ * offset was declared, start with that offset, otherwise start
+ * with the next available offset. If the resulting offset is not
+ * a multiple of the actual alignment, increase it to the first
+ * offset that is a multiple of the actual alignment. This results
+ * in the actual offset the member will have."
+ */
+ if (fields[i].offset >= 0) {
+ assert((unsigned)fields[i].offset >= offset);
+ offset = fields[i].offset;
+ }
+ offset = glsl_align(offset, falign);
+ fields[i].offset = offset;
+ offset += fsize;
+ }
+
+ const glsl_type *type;
+ if (this->is_struct())
+ type = get_struct_instance(fields, this->length, this->name);
+ else
+ type = get_interface_instance(fields, this->length,
+ (enum glsl_interface_packing)this->interface_packing,
+ this->interface_row_major,
+ this->name);
+
+ delete[] fields;
+ return type;
+ } else {
+ unreachable("Invalid type for UBO or SSBO");
+ }
+}
+
+unsigned
+glsl_type::std430_base_alignment(bool row_major) const
+{
+
+ unsigned N = is_64bit() ? 8 : 4;
+
+ /* (1) If the member is a scalar consuming <N> basic machine units, the
+ * base alignment is <N>.
+ *
+ * (2) If the member is a two- or four-component vector with components
+ * consuming <N> basic machine units, the base alignment is 2<N> or
+ * 4<N>, respectively.
+ *
+ * (3) If the member is a three-component vector with components consuming
+ * <N> basic machine units, the base alignment is 4<N>.
+ */
+ if (this->is_scalar() || this->is_vector()) {
+ switch (this->vector_elements) {
+ case 1:
+ return N;
+ case 2:
+ return 2 * N;
+ case 3:
+ case 4:
+ return 4 * N;
+ }
+ }
+
+ /* OpenGL 4.30 spec, section 7.6.2.2 "Standard Uniform Block Layout":
+ *
+ * "When using the std430 storage layout, shader storage blocks will be
+ * laid out in buffer storage identically to uniform and shader storage
+ * blocks using the std140 layout, except that the base alignment and
+ * stride of arrays of scalars and vectors in rule 4 and of structures
+ * in rule 9 are not rounded up a multiple of the base alignment of a vec4.
+ */
+
+ /* (1) If the member is a scalar consuming <N> basic machine units, the
+ * base alignment is <N>.
+ *
+ * (2) If the member is a two- or four-component vector with components
+ * consuming <N> basic machine units, the base alignment is 2<N> or
+ * 4<N>, respectively.
+ *
+ * (3) If the member is a three-component vector with components consuming
+ * <N> basic machine units, the base alignment is 4<N>.
+ */
+ if (this->is_array())
+ return this->fields.array->std430_base_alignment(row_major);
+
+ /* (5) If the member is a column-major matrix with <C> columns and
+ * <R> rows, the matrix is stored identically to an array of
+ * <C> column vectors with <R> components each, according to
+ * rule (4).
+ *
+ * (7) If the member is a row-major matrix with <C> columns and <R>
+ * rows, the matrix is stored identically to an array of <R>
+ * row vectors with <C> components each, according to rule (4).
+ */
+ if (this->is_matrix()) {
+ const struct glsl_type *vec_type, *array_type;
+ int c = this->matrix_columns;
+ int r = this->vector_elements;
+
+ if (row_major) {
+ vec_type = get_instance(base_type, c, 1);
+ array_type = glsl_type::get_array_instance(vec_type, r);
+ } else {
+ vec_type = get_instance(base_type, r, 1);
+ array_type = glsl_type::get_array_instance(vec_type, c);
+ }
+
+ return array_type->std430_base_alignment(false);
+ }
+
+ /* (9) If the member is a structure, the base alignment of the
+ * structure is <N>, where <N> is the largest base alignment
+ * value of any of its members, and rounded up to the base
+ * alignment of a vec4. The individual members of this
+ * sub-structure are then assigned offsets by applying this set
+ * of rules recursively, where the base offset of the first
+ * member of the sub-structure is equal to the aligned offset
+ * of the structure. The structure may have padding at the end;
+ * the base offset of the member following the sub-structure is
+ * rounded up to the next multiple of the base alignment of the
+ * structure.
+ */
+ if (this->is_struct()) {
+ unsigned base_alignment = 0;
+ for (unsigned i = 0; i < this->length; i++) {
+ bool field_row_major = row_major;
+ const enum glsl_matrix_layout matrix_layout =
+ glsl_matrix_layout(this->fields.structure[i].matrix_layout);
+ if (matrix_layout == GLSL_MATRIX_LAYOUT_ROW_MAJOR) {
+ field_row_major = true;
+ } else if (matrix_layout == GLSL_MATRIX_LAYOUT_COLUMN_MAJOR) {
+ field_row_major = false;
+ }
+
+ const struct glsl_type *field_type = this->fields.structure[i].type;
+ base_alignment = MAX2(base_alignment,
+ field_type->std430_base_alignment(field_row_major));
+ }
+ assert(base_alignment > 0);
+ return base_alignment;
+ }
+ assert(!"not reached");
+ return -1;
+}
+
+unsigned
+glsl_type::std430_array_stride(bool row_major) const
+{
+ unsigned N = is_64bit() ? 8 : 4;
+
+ /* Notice that the array stride of a vec3 is not 3 * N but 4 * N.
+ * See OpenGL 4.30 spec, section 7.6.2.2 "Standard Uniform Block Layout"
+ *
+ * (3) If the member is a three-component vector with components consuming
+ * <N> basic machine units, the base alignment is 4<N>.
+ */
+ if (this->is_vector() && this->vector_elements == 3)
+ return 4 * N;
+
+ /* By default use std430_size(row_major) */
+ unsigned stride = this->std430_size(row_major);
+ assert(this->explicit_stride == 0 || this->explicit_stride == stride);
+ return stride;
+}
+
+/* Note that the value returned by this method is only correct if the
+ * explit offset, and stride values are set, so only with SPIR-V shaders.
+ * Should not be used with GLSL shaders.
+ */
+
+unsigned
+glsl_type::explicit_size(bool align_to_stride) const
+{
+ if (this->is_struct() || this->is_interface()) {
+ if (this->length > 0) {
+ unsigned size = 0;
+
+ for (unsigned i = 0; i < this->length; i++) {
+ assert(this->fields.structure[i].offset >= 0);
+ unsigned last_byte = this->fields.structure[i].offset +
+ this->fields.structure[i].type->explicit_size();
+ size = MAX2(size, last_byte);
+ }
+
+ return size;
+ } else {
+ return 0;
+ }
+ } else if (this->is_array()) {
+ /* From ARB_program_interface_query spec:
+ *
+ * "For the property of BUFFER_DATA_SIZE, then the implementation-dependent
+ * minimum total buffer object size, in basic machine units, required to
+ * hold all active variables associated with an active uniform block, shader
+ * storage block, or atomic counter buffer is written to <params>. If the
+ * final member of an active shader storage block is array with no declared
+ * size, the minimum buffer size is computed assuming the array was declared
+ * as an array with one element."
+ *
+ */
+ if (this->is_unsized_array())
+ return this->explicit_stride;
+
+ assert(this->length > 0);
+ unsigned elem_size = align_to_stride ? this->explicit_stride : this->fields.array->explicit_size();
+ assert(this->explicit_stride >= elem_size);
+
+ return this->explicit_stride * (this->length - 1) + elem_size;
+ } else if (this->is_matrix()) {
+ const struct glsl_type *elem_type;
+ unsigned length;
+
+ if (this->interface_row_major) {
+ elem_type = get_instance(this->base_type,
+ this->matrix_columns, 1);
+ length = this->vector_elements;
+ } else {
+ elem_type = get_instance(this->base_type,
+ this->vector_elements, 1);
+ length = this->matrix_columns;
+ }
+
+ unsigned elem_size = align_to_stride ? this->explicit_stride : elem_type->explicit_size();
+
+ assert(this->explicit_stride);
+ return this->explicit_stride * (length - 1) + elem_size;
+ }
+
+ unsigned N = this->bit_size() / 8;
+
+ return this->vector_elements * N;
+}
+
+unsigned
+glsl_type::std430_size(bool row_major) const
+{
+ unsigned N = is_64bit() ? 8 : 4;
+
+ /* OpenGL 4.30 spec, section 7.6.2.2 "Standard Uniform Block Layout":
+ *
+ * "When using the std430 storage layout, shader storage blocks will be
+ * laid out in buffer storage identically to uniform and shader storage
+ * blocks using the std140 layout, except that the base alignment and
+ * stride of arrays of scalars and vectors in rule 4 and of structures
+ * in rule 9 are not rounded up a multiple of the base alignment of a vec4.
+ */
+ if (this->is_scalar() || this->is_vector()) {
+ assert(this->explicit_stride == 0);
+ return this->vector_elements * N;
+ }
+
+ if (this->without_array()->is_matrix()) {
+ const struct glsl_type *element_type;
+ const struct glsl_type *vec_type;
+ unsigned int array_len;
+
+ if (this->is_array()) {
+ element_type = this->without_array();
+ array_len = this->arrays_of_arrays_size();
+ } else {
+ element_type = this;
+ array_len = 1;
+ }
+
+ if (row_major) {
+ vec_type = get_instance(element_type->base_type,
+ element_type->matrix_columns, 1);
+
+ array_len *= element_type->vector_elements;
+ } else {
+ vec_type = get_instance(element_type->base_type,
+ element_type->vector_elements, 1);
+ array_len *= element_type->matrix_columns;
+ }
+ const glsl_type *array_type = glsl_type::get_array_instance(vec_type,
+ array_len);
+
+ return array_type->std430_size(false);
+ }
+
+ if (this->is_array()) {
+ unsigned stride;
+ if (this->without_array()->is_struct())
+ stride = this->without_array()->std430_size(row_major);
+ else
+ stride = this->without_array()->std430_base_alignment(row_major);
+
+ unsigned size = this->arrays_of_arrays_size() * stride;
+ assert(this->explicit_stride == 0 ||
+ size == this->length * this->explicit_stride);
+ return size;
+ }
+
+ if (this->is_struct() || this->is_interface()) {
+ unsigned size = 0;
+ unsigned max_align = 0;
+
+ for (unsigned i = 0; i < this->length; i++) {
+ bool field_row_major = row_major;
+ const enum glsl_matrix_layout matrix_layout =
+ glsl_matrix_layout(this->fields.structure[i].matrix_layout);
+ if (matrix_layout == GLSL_MATRIX_LAYOUT_ROW_MAJOR) {
+ field_row_major = true;
+ } else if (matrix_layout == GLSL_MATRIX_LAYOUT_COLUMN_MAJOR) {
+ field_row_major = false;
+ }
+
+ const struct glsl_type *field_type = this->fields.structure[i].type;
+ unsigned align = field_type->std430_base_alignment(field_row_major);
+ size = glsl_align(size, align);
+ size += field_type->std430_size(field_row_major);
+
+ max_align = MAX2(align, max_align);
+ }
+ size = glsl_align(size, max_align);
+ return size;
+ }
+
+ assert(!"not reached");
+ return -1;
+}
+
+const glsl_type *
+glsl_type::get_explicit_std430_type(bool row_major) const
+{
+ if (this->is_vector() || this->is_scalar()) {
+ return this;
+ } else if (this->is_matrix()) {
+ const glsl_type *vec_type;
+ if (row_major)
+ vec_type = get_instance(this->base_type, this->matrix_columns, 1);
+ else
+ vec_type = get_instance(this->base_type, this->vector_elements, 1);
+ unsigned stride = vec_type->std430_array_stride(false);
+ return get_instance(this->base_type, this->vector_elements,
+ this->matrix_columns, stride, row_major);
+ } else if (this->is_array()) {
+ const glsl_type *elem_type =
+ this->fields.array->get_explicit_std430_type(row_major);
+ unsigned stride = this->fields.array->std430_array_stride(row_major);
+ return get_array_instance(elem_type, this->length, stride);
+ } else if (this->is_struct() || this->is_interface()) {
+ glsl_struct_field *fields = new glsl_struct_field[this->length];
+ unsigned offset = 0;
+ for (unsigned i = 0; i < length; i++) {
+ fields[i] = this->fields.structure[i];
+
+ bool field_row_major = row_major;
+ if (fields[i].matrix_layout == GLSL_MATRIX_LAYOUT_COLUMN_MAJOR) {
+ field_row_major = false;
+ } else if (fields[i].matrix_layout == GLSL_MATRIX_LAYOUT_ROW_MAJOR) {
+ field_row_major = true;
+ }
+ fields[i].type =
+ fields[i].type->get_explicit_std430_type(field_row_major);
+
+ unsigned fsize = fields[i].type->std430_size(field_row_major);
+ unsigned falign = fields[i].type->std430_base_alignment(field_row_major);
+ /* From the GLSL 460 spec section "Uniform and Shader Storage Block
+ * Layout Qualifiers":
+ *
+ * "The actual offset of a member is computed as follows: If
+ * offset was declared, start with that offset, otherwise start
+ * with the next available offset. If the resulting offset is not
+ * a multiple of the actual alignment, increase it to the first
+ * offset that is a multiple of the actual alignment. This results
+ * in the actual offset the member will have."
+ */
+ if (fields[i].offset >= 0) {
+ assert((unsigned)fields[i].offset >= offset);
+ offset = fields[i].offset;
+ }
+ offset = glsl_align(offset, falign);
+ fields[i].offset = offset;
+ offset += fsize;
+ }
+
+ const glsl_type *type;
+ if (this->is_struct())
+ type = get_struct_instance(fields, this->length, this->name);
+ else
+ type = get_interface_instance(fields, this->length,
+ (enum glsl_interface_packing)this->interface_packing,
+ this->interface_row_major,
+ this->name);
+
+ delete[] fields;
+ return type;
+ } else {
+ unreachable("Invalid type for SSBO");
+ }
+}
+
+const glsl_type *
+glsl_type::get_explicit_interface_type(bool supports_std430) const
+{
+ enum glsl_interface_packing packing =
+ this->get_internal_ifc_packing(supports_std430);
+ if (packing == GLSL_INTERFACE_PACKING_STD140) {
+ return this->get_explicit_std140_type(this->interface_row_major);
+ } else {
+ assert(packing == GLSL_INTERFACE_PACKING_STD430);
+ return this->get_explicit_std430_type(this->interface_row_major);
+ }
+}
+
+/* This differs from get_explicit_std430_type() in that it:
+ * - can size arrays slightly smaller ("stride * (len - 1) + elem_size" instead
+ * of "stride * len")
+ * - consumes a glsl_type_size_align_func which allows 8 and 16-bit values to be
+ * packed more tightly
+ * - overrides any struct field offsets but get_explicit_std430_type() tries to
+ * respect any existing ones
+ */
+const glsl_type *
+glsl_type::get_explicit_type_for_size_align(glsl_type_size_align_func type_info,
+ unsigned *size, unsigned *alignment) const
+{
+ if (this->is_scalar() || this->is_vector()) {
+ type_info(this, size, alignment);
+ return this;
+ } else if (this->is_array()) {
+ unsigned elem_size, elem_align;
+ const struct glsl_type *explicit_element =
+ this->fields.array->get_explicit_type_for_size_align(type_info, &elem_size, &elem_align);
+
+ unsigned stride = align(elem_size, elem_align);
+
+ *size = stride * (this->length - 1) + elem_size;
+ *alignment = elem_align;
+ return glsl_type::get_array_instance(explicit_element, this->length, stride);
+ } else if (this->is_struct()) {
+ struct glsl_struct_field *fields = (struct glsl_struct_field *)
+ malloc(sizeof(struct glsl_struct_field) * this->length);
+
+ *size = 0;
+ *alignment = 0;
+ for (unsigned i = 0; i < this->length; i++) {
+ fields[i] = this->fields.structure[i];
+ assert(fields[i].matrix_layout != GLSL_MATRIX_LAYOUT_ROW_MAJOR);
+
+ unsigned field_size, field_align;
+ fields[i].type =
+ fields[i].type->get_explicit_type_for_size_align(type_info, &field_size, &field_align);
+ fields[i].offset = align(*size, field_align);
+
+ *size = fields[i].offset + field_size;
+ *alignment = MAX2(*alignment, field_align);
+ }
+
+ const glsl_type *type = glsl_type::get_struct_instance(fields, this->length, this->name, false);
+ free(fields);
+ return type;
+ } else if (this->is_matrix()) {
+ unsigned col_size, col_align;
+ type_info(this->column_type(), &col_size, &col_align);
+ unsigned stride = align(col_size, col_align);
+
+ *size = this->matrix_columns * stride;
+ *alignment = col_align;
+ return glsl_type::get_instance(this->base_type, this->vector_elements,
+ this->matrix_columns, stride, false);
+ } else {
+ unreachable("Unhandled type.");
+ }
+}
+
+unsigned
+glsl_type::count_vec4_slots(bool is_gl_vertex_input, bool is_bindless) const
+{
+ /* From page 31 (page 37 of the PDF) of the GLSL 1.50 spec:
+ *
+ * "A scalar input counts the same amount against this limit as a vec4,
+ * so applications may want to consider packing groups of four
+ * unrelated float inputs together into a vector to better utilize the
+ * capabilities of the underlying hardware. A matrix input will use up
+ * multiple locations. The number of locations used will equal the
+ * number of columns in the matrix."
+ *
+ * The spec does not explicitly say how arrays are counted. However, it
+ * should be safe to assume the total number of slots consumed by an array
+ * is the number of entries in the array multiplied by the number of slots
+ * consumed by a single element of the array.
+ *
+ * The spec says nothing about how structs are counted, because vertex
+ * attributes are not allowed to be (or contain) structs. However, Mesa
+ * allows varying structs, the number of varying slots taken up by a
+ * varying struct is simply equal to the sum of the number of slots taken
+ * up by each element.
+ *
+ * Doubles are counted different depending on whether they are vertex
+ * inputs or everything else. Vertex inputs from ARB_vertex_attrib_64bit
+ * take one location no matter what size they are, otherwise dvec3/4
+ * take two locations.
+ */
+ switch (this->base_type) {
+ case GLSL_TYPE_UINT:
+ case GLSL_TYPE_INT:
+ case GLSL_TYPE_UINT8:
+ case GLSL_TYPE_INT8:
+ case GLSL_TYPE_UINT16:
+ case GLSL_TYPE_INT16:
+ case GLSL_TYPE_FLOAT:
+ case GLSL_TYPE_FLOAT16:
+ case GLSL_TYPE_BOOL:
+ return this->matrix_columns;
+ case GLSL_TYPE_DOUBLE:
+ case GLSL_TYPE_UINT64:
+ case GLSL_TYPE_INT64:
+ if (this->vector_elements > 2 && !is_gl_vertex_input)
+ return this->matrix_columns * 2;
+ else
+ return this->matrix_columns;
+ case GLSL_TYPE_STRUCT:
+ case GLSL_TYPE_INTERFACE: {
+ unsigned size = 0;
+
+ for (unsigned i = 0; i < this->length; i++) {
+ const glsl_type *member_type = this->fields.structure[i].type;
+ size += member_type->count_vec4_slots(is_gl_vertex_input, is_bindless);
+ }
+
+ return size;
+ }
+
+ case GLSL_TYPE_ARRAY: {
+ const glsl_type *element = this->fields.array;
+ return this->length * element->count_vec4_slots(is_gl_vertex_input,
+ is_bindless);
+ }
+
+ case GLSL_TYPE_SAMPLER:
+ case GLSL_TYPE_IMAGE:
+ if (!is_bindless)
+ return 0;
+ else
+ return 1;
+
+ case GLSL_TYPE_SUBROUTINE:
+ return 1;
+
+ case GLSL_TYPE_FUNCTION:
+ case GLSL_TYPE_ATOMIC_UINT:
+ case GLSL_TYPE_VOID:
+ case GLSL_TYPE_ERROR:
+ break;
+ }
+
+ assert(!"Unexpected type in count_attribute_slots()");
+
+ return 0;
+}
+
+unsigned
+glsl_type::count_dword_slots(bool is_bindless) const
+{
+ switch (this->base_type) {
+ case GLSL_TYPE_UINT:
+ case GLSL_TYPE_INT:
+ case GLSL_TYPE_FLOAT:
+ case GLSL_TYPE_BOOL:
+ return this->components();
+ case GLSL_TYPE_UINT16:
+ case GLSL_TYPE_INT16:
+ case GLSL_TYPE_FLOAT16:
+ return DIV_ROUND_UP(this->components(), 2);
+ case GLSL_TYPE_UINT8:
+ case GLSL_TYPE_INT8:
+ return DIV_ROUND_UP(this->components(), 4);
+ case GLSL_TYPE_IMAGE:
+ case GLSL_TYPE_SAMPLER:
+ if (!is_bindless)
+ return 0;
+ /* FALLTHROUGH */
+ case GLSL_TYPE_DOUBLE:
+ case GLSL_TYPE_UINT64:
+ case GLSL_TYPE_INT64:
+ return this->components() * 2;
+ case GLSL_TYPE_ARRAY:
+ return this->fields.array->count_dword_slots(is_bindless) *
+ this->length;
+
+ case GLSL_TYPE_INTERFACE:
+ case GLSL_TYPE_STRUCT: {
+ unsigned size = 0;
+ for (unsigned i = 0; i < this->length; i++) {
+ size += this->fields.structure[i].type->count_dword_slots(is_bindless);
+ }
+ return size;
+ }
+
+ case GLSL_TYPE_ATOMIC_UINT:
+ return 0;
+ case GLSL_TYPE_SUBROUTINE:
+ return 1;
+ case GLSL_TYPE_VOID:
+ case GLSL_TYPE_ERROR:
+ case GLSL_TYPE_FUNCTION:
+ default:
+ unreachable("invalid type in st_glsl_type_dword_size()");
+ }
+
+ return 0;
+}
+
+int
+glsl_type::coordinate_components() const
+{
+ enum glsl_sampler_dim dim = (enum glsl_sampler_dim)sampler_dimensionality;
+ int size = glsl_get_sampler_dim_coordinate_components(dim);
+
+ /* Array textures need an additional component for the array index, except
+ * for cubemap array images that behave like a 2D array of interleaved
+ * cubemap faces.
+ */
+ if (sampler_array &&
+ !(is_image() && sampler_dimensionality == GLSL_SAMPLER_DIM_CUBE))
+ size += 1;
+
+ return size;
+}
+
+/**
+ * Declarations of type flyweights (glsl_type::_foo_type) and
+ * convenience pointers (glsl_type::foo_type).
+ * @{
+ */
+#define DECL_TYPE(NAME, ...) \
+ const glsl_type glsl_type::_##NAME##_type = glsl_type(__VA_ARGS__, #NAME); \
+ const glsl_type *const glsl_type::NAME##_type = &glsl_type::_##NAME##_type;
+
+#define STRUCT_TYPE(NAME)
+
+#include "compiler/builtin_type_macros.h"
+/** @} */
+
+static void
+get_struct_type_field_and_pointer_sizes(size_t *s_field_size,
+ size_t *s_field_ptrs)
+{
+ *s_field_size = sizeof(glsl_struct_field);
+ *s_field_ptrs =
+ sizeof(((glsl_struct_field *)0)->type) +
+ sizeof(((glsl_struct_field *)0)->name);
+}
+
+union packed_type {
+ uint32_t u32;
+ struct {
+ unsigned base_type:5;
+ unsigned interface_row_major:1;
+ unsigned vector_elements:3;
+ unsigned matrix_columns:3;
+ unsigned explicit_stride:20;
+ } basic;
+ struct {
+ unsigned base_type:5;
+ unsigned dimensionality:4;
+ unsigned shadow:1;
+ unsigned array:1;
+ unsigned sampled_type:2;
+ unsigned _pad:19;
+ } sampler;
+ struct {
+ unsigned base_type:5;
+ unsigned length:13;
+ unsigned explicit_stride:14;
+ } array;
+ struct {
+ unsigned base_type:5;
+ unsigned interface_packing_or_packed:2;
+ unsigned interface_row_major:1;
+ unsigned length:24;
+ } strct;
+};
+
+void
+encode_type_to_blob(struct blob *blob, const glsl_type *type)
+{
+ if (!type) {
+ blob_write_uint32(blob, 0);
+ return;
+ }
+
+ STATIC_ASSERT(sizeof(union packed_type) == 4);
+ union packed_type encoded;
+ encoded.u32 = 0;
+ encoded.basic.base_type = type->base_type;
+
+ switch (type->base_type) {
+ case GLSL_TYPE_UINT:
+ case GLSL_TYPE_INT:
+ case GLSL_TYPE_FLOAT:
+ case GLSL_TYPE_FLOAT16:
+ case GLSL_TYPE_DOUBLE:
+ case GLSL_TYPE_UINT8:
+ case GLSL_TYPE_INT8:
+ case GLSL_TYPE_UINT16:
+ case GLSL_TYPE_INT16:
+ case GLSL_TYPE_UINT64:
+ case GLSL_TYPE_INT64:
+ case GLSL_TYPE_BOOL:
+ encoded.basic.interface_row_major = type->interface_row_major;
+ assert(type->matrix_columns < 8);
+ if (type->vector_elements <= 4)
+ encoded.basic.vector_elements = type->vector_elements;
+ else if (type->vector_elements == 8)
+ encoded.basic.vector_elements = 5;
+ else if (type->vector_elements == 16)
+ encoded.basic.vector_elements = 6;
+ encoded.basic.matrix_columns = type->matrix_columns;
+ encoded.basic.explicit_stride = MIN2(type->explicit_stride, 0xfffff);
+ blob_write_uint32(blob, encoded.u32);
+ /* If we don't have enough bits for explicit_stride, store it
+ * separately.
+ */
+ if (encoded.basic.explicit_stride == 0xfffff)
+ blob_write_uint32(blob, type->explicit_stride);
+ return;
+ case GLSL_TYPE_SAMPLER:
+ encoded.sampler.dimensionality = type->sampler_dimensionality;
+ encoded.sampler.shadow = type->sampler_shadow;
+ encoded.sampler.array = type->sampler_array;
+ encoded.sampler.sampled_type = type->sampled_type;
+ break;
+ case GLSL_TYPE_SUBROUTINE:
+ blob_write_uint32(blob, encoded.u32);
+ blob_write_string(blob, type->name);
+ return;
+ case GLSL_TYPE_IMAGE:
+ encoded.sampler.dimensionality = type->sampler_dimensionality;
+ encoded.sampler.array = type->sampler_array;
+ encoded.sampler.sampled_type = type->sampled_type;
+ break;
+ case GLSL_TYPE_ATOMIC_UINT:
+ break;
+ case GLSL_TYPE_ARRAY:
+ encoded.array.length = MIN2(type->length, 0x1fff);
+ encoded.array.explicit_stride = MIN2(type->explicit_stride, 0x3fff);
+ blob_write_uint32(blob, encoded.u32);
+ /* If we don't have enough bits for length or explicit_stride, store it
+ * separately.
+ */
+ if (encoded.array.length == 0x1fff)
+ blob_write_uint32(blob, type->length);
+ if (encoded.array.explicit_stride == 0x3fff)
+ blob_write_uint32(blob, type->explicit_stride);
+ encode_type_to_blob(blob, type->fields.array);
+ return;
+ case GLSL_TYPE_STRUCT:
+ case GLSL_TYPE_INTERFACE:
+ encoded.strct.length = MIN2(type->length, 0xffffff);
+ if (type->is_interface()) {
+ encoded.strct.interface_packing_or_packed = type->interface_packing;
+ encoded.strct.interface_row_major = type->interface_row_major;
+ } else {
+ encoded.strct.interface_packing_or_packed = type->packed;
+ }
+ blob_write_uint32(blob, encoded.u32);
+ blob_write_string(blob, type->name);
+
+ /* If we don't have enough bits for length, store it separately. */
+ if (encoded.strct.length == 0xffffff)
+ blob_write_uint32(blob, type->length);
+
+ size_t s_field_size, s_field_ptrs;
+ get_struct_type_field_and_pointer_sizes(&s_field_size, &s_field_ptrs);
+
+ for (unsigned i = 0; i < type->length; i++) {
+ encode_type_to_blob(blob, type->fields.structure[i].type);
+ blob_write_string(blob, type->fields.structure[i].name);
+
+ /* Write the struct field skipping the pointers */
+ blob_write_bytes(blob,
+ ((char *)&type->fields.structure[i]) + s_field_ptrs,
+ s_field_size - s_field_ptrs);
+ }
+ return;
+ case GLSL_TYPE_VOID:
+ break;
+ case GLSL_TYPE_ERROR:
+ default:
+ assert(!"Cannot encode type!");
+ encoded.u32 = 0;
+ break;
+ }
+
+ blob_write_uint32(blob, encoded.u32);
+}
+
+const glsl_type *
+decode_type_from_blob(struct blob_reader *blob)
+{
+ union packed_type encoded;
+ encoded.u32 = blob_read_uint32(blob);
+
+ if (encoded.u32 == 0) {
+ return NULL;
+ }
+
+ glsl_base_type base_type = (glsl_base_type)encoded.basic.base_type;
+
+ switch (base_type) {
+ case GLSL_TYPE_UINT:
+ case GLSL_TYPE_INT:
+ case GLSL_TYPE_FLOAT:
+ case GLSL_TYPE_FLOAT16:
+ case GLSL_TYPE_DOUBLE:
+ case GLSL_TYPE_UINT8:
+ case GLSL_TYPE_INT8:
+ case GLSL_TYPE_UINT16:
+ case GLSL_TYPE_INT16:
+ case GLSL_TYPE_UINT64:
+ case GLSL_TYPE_INT64:
+ case GLSL_TYPE_BOOL: {
+ unsigned explicit_stride = encoded.basic.explicit_stride;
+ if (explicit_stride == 0xfffff)
+ explicit_stride = blob_read_uint32(blob);
+ uint32_t vector_elements = encoded.basic.vector_elements;
+ if (vector_elements == 5)
+ vector_elements = 8;
+ else if (vector_elements == 6)
+ vector_elements = 16;
+ return glsl_type::get_instance(base_type, encoded.basic.vector_elements,
+ encoded.basic.matrix_columns,
+ explicit_stride,
+ encoded.basic.interface_row_major);
+ }
+ case GLSL_TYPE_SAMPLER:
+ return glsl_type::get_sampler_instance((enum glsl_sampler_dim)encoded.sampler.dimensionality,
+ encoded.sampler.shadow,
+ encoded.sampler.array,
+ (glsl_base_type) encoded.sampler.sampled_type);
+ case GLSL_TYPE_SUBROUTINE:
+ return glsl_type::get_subroutine_instance(blob_read_string(blob));
+ case GLSL_TYPE_IMAGE:
+ return glsl_type::get_image_instance((enum glsl_sampler_dim)encoded.sampler.dimensionality,
+ encoded.sampler.array,
+ (glsl_base_type) encoded.sampler.sampled_type);
+ case GLSL_TYPE_ATOMIC_UINT:
+ return glsl_type::atomic_uint_type;
+ case GLSL_TYPE_ARRAY: {
+ unsigned length = encoded.array.length;
+ if (length == 0x1fff)
+ length = blob_read_uint32(blob);
+ unsigned explicit_stride = encoded.array.explicit_stride;
+ if (explicit_stride == 0x3fff)
+ explicit_stride = blob_read_uint32(blob);
+ return glsl_type::get_array_instance(decode_type_from_blob(blob),
+ length, explicit_stride);
+ }
+ case GLSL_TYPE_STRUCT:
+ case GLSL_TYPE_INTERFACE: {
+ char *name = blob_read_string(blob);
+ unsigned num_fields = encoded.strct.length;
+ if (num_fields == 0xffffff)
+ num_fields = blob_read_uint32(blob);
+
+ size_t s_field_size, s_field_ptrs;
+ get_struct_type_field_and_pointer_sizes(&s_field_size, &s_field_ptrs);
+
+ glsl_struct_field *fields =
+ (glsl_struct_field *) malloc(s_field_size * num_fields);
+ for (unsigned i = 0; i < num_fields; i++) {
+ fields[i].type = decode_type_from_blob(blob);
+ fields[i].name = blob_read_string(blob);
+
+ blob_copy_bytes(blob, ((uint8_t *) &fields[i]) + s_field_ptrs,
+ s_field_size - s_field_ptrs);
+ }
+
+ const glsl_type *t;
+ if (base_type == GLSL_TYPE_INTERFACE) {
+ enum glsl_interface_packing packing =
+ (glsl_interface_packing) encoded.strct.interface_packing_or_packed;
+ bool row_major = encoded.strct.interface_row_major;
+ t = glsl_type::get_interface_instance(fields, num_fields, packing,
+ row_major, name);
+ } else {
+ unsigned packed = encoded.strct.interface_packing_or_packed;
+ t = glsl_type::get_struct_instance(fields, num_fields, name, packed);
+ }
+
+ free(fields);
+ return t;
+ }
+ case GLSL_TYPE_VOID:
+ return glsl_type::void_type;
+ case GLSL_TYPE_ERROR:
+ default:
+ assert(!"Cannot decode type!");
+ return NULL;
+ }
+}
+
+unsigned
+glsl_type::cl_alignment() const
+{
+ /* vectors unlike arrays are aligned to their size */
+ if (this->is_scalar() || this->is_vector())
+ return this->cl_size();
+ else if (this->is_array())
+ return this->without_array()->cl_alignment();
+ else if (this->is_struct()) {
+ /* Packed Structs are 0x1 aligned despite their size. */
+ if (this->packed)
+ return 1;
+
+ unsigned res = 1;
+ for (unsigned i = 0; i < this->length; ++i) {
+ struct glsl_struct_field &field = this->fields.structure[i];
+ res = MAX2(res, field.type->cl_alignment());
+ }
+ return res;
+ }
+ return 1;
+}
+
+unsigned
+glsl_type::cl_size() const
+{
+ if (this->is_scalar()) {
+ return glsl_base_type_get_bit_size(this->base_type) / 8;
+ } else if (this->is_vector()) {
+ unsigned vec_elemns = this->vector_elements == 3 ? 4 : this->vector_elements;
+ return vec_elemns * glsl_base_type_get_bit_size(this->base_type) / 8;
+ } else if (this->is_array()) {
+ unsigned size = this->without_array()->cl_size();
+ return size * this->length;
+ } else if (this->is_struct()) {
+ unsigned size = 0;
+ for (unsigned i = 0; i < this->length; ++i) {
+ struct glsl_struct_field &field = this->fields.structure[i];
+ /* if a struct is packed, members don't get aligned */
+ if (!this->packed)
+ size = align(size, field.type->cl_alignment());
+ size += field.type->cl_size();
+ }
+ return size;
+ }
+ return 1;
+}
+
+extern "C" {
+
+int
+glsl_get_sampler_dim_coordinate_components(enum glsl_sampler_dim dim)
+{
+ switch (dim) {
+ case GLSL_SAMPLER_DIM_1D:
+ case GLSL_SAMPLER_DIM_BUF:
+ return 1;
+ case GLSL_SAMPLER_DIM_2D:
+ case GLSL_SAMPLER_DIM_RECT:
+ case GLSL_SAMPLER_DIM_MS:
+ case GLSL_SAMPLER_DIM_EXTERNAL:
+ case GLSL_SAMPLER_DIM_SUBPASS:
+ case GLSL_SAMPLER_DIM_SUBPASS_MS:
+ return 2;
+ case GLSL_SAMPLER_DIM_3D:
+ case GLSL_SAMPLER_DIM_CUBE:
+ return 3;
+ default:
+ unreachable("Unknown sampler dim");
+ }
+}
+
+}
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl_types.h b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl_types.h
new file mode 100644
index 0000000000..f709bdd702
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl_types.h
@@ -0,0 +1,1380 @@
+/* -*- c++ -*- */
+/*
+ * Copyright © 2009 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef GLSL_TYPES_H
+#define GLSL_TYPES_H
+
+#include <string.h>
+#include <assert.h>
+
+#include "shader_enums.h"
+#include "c11/threads.h"
+#include "util/blob.h"
+#include "util/format/u_format.h"
+#include "util/macros.h"
+
+#ifdef __cplusplus
+#include "main/config.h"
+#endif
+
+struct glsl_type;
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+struct _mesa_glsl_parse_state;
+struct glsl_symbol_table;
+
+extern void
+glsl_type_singleton_init_or_ref();
+
+extern void
+glsl_type_singleton_decref();
+
+extern void
+_mesa_glsl_initialize_types(struct _mesa_glsl_parse_state *state);
+
+void encode_type_to_blob(struct blob *blob, const struct glsl_type *type);
+
+const struct glsl_type *decode_type_from_blob(struct blob_reader *blob);
+
+typedef void (*glsl_type_size_align_func)(const struct glsl_type *type,
+ unsigned *size, unsigned *align);
+
+enum glsl_base_type {
+ /* Note: GLSL_TYPE_UINT, GLSL_TYPE_INT, and GLSL_TYPE_FLOAT must be 0, 1,
+ * and 2 so that they will fit in the 2 bits of glsl_type::sampled_type.
+ */
+ GLSL_TYPE_UINT = 0,
+ GLSL_TYPE_INT,
+ GLSL_TYPE_FLOAT,
+ GLSL_TYPE_FLOAT16,
+ GLSL_TYPE_DOUBLE,
+ GLSL_TYPE_UINT8,
+ GLSL_TYPE_INT8,
+ GLSL_TYPE_UINT16,
+ GLSL_TYPE_INT16,
+ GLSL_TYPE_UINT64,
+ GLSL_TYPE_INT64,
+ GLSL_TYPE_BOOL,
+ GLSL_TYPE_SAMPLER,
+ GLSL_TYPE_IMAGE,
+ GLSL_TYPE_ATOMIC_UINT,
+ GLSL_TYPE_STRUCT,
+ GLSL_TYPE_INTERFACE,
+ GLSL_TYPE_ARRAY,
+ GLSL_TYPE_VOID,
+ GLSL_TYPE_SUBROUTINE,
+ GLSL_TYPE_FUNCTION,
+ GLSL_TYPE_ERROR
+};
+
+/* Return the bit size of a type. Note that this differs from
+ * glsl_get_bit_size in that it returns 32 bits for bools, whereas at
+ * the NIR level we would want to return 1 bit for bools.
+ */
+static unsigned glsl_base_type_bit_size(enum glsl_base_type type)
+{
+ switch (type) {
+ case GLSL_TYPE_BOOL:
+ case GLSL_TYPE_INT:
+ case GLSL_TYPE_UINT:
+ case GLSL_TYPE_FLOAT: /* TODO handle mediump */
+ case GLSL_TYPE_SUBROUTINE:
+ return 32;
+
+ case GLSL_TYPE_FLOAT16:
+ case GLSL_TYPE_UINT16:
+ case GLSL_TYPE_INT16:
+ return 16;
+
+ case GLSL_TYPE_UINT8:
+ case GLSL_TYPE_INT8:
+ return 8;
+
+ case GLSL_TYPE_DOUBLE:
+ case GLSL_TYPE_INT64:
+ case GLSL_TYPE_UINT64:
+ case GLSL_TYPE_IMAGE:
+ case GLSL_TYPE_SAMPLER:
+ return 64;
+
+ default:
+ /* For GLSL_TYPE_STRUCT etc, it should be ok to return 0. This usually
+ * happens when calling this method through is_64bit and is_16bit
+ * methods
+ */
+ return 0;
+ }
+
+ return 0;
+}
+
+static inline bool glsl_base_type_is_16bit(enum glsl_base_type type)
+{
+ return glsl_base_type_bit_size(type) == 16;
+}
+
+static inline bool glsl_base_type_is_64bit(enum glsl_base_type type)
+{
+ return glsl_base_type_bit_size(type) == 64;
+}
+
+static inline bool glsl_base_type_is_integer(enum glsl_base_type type)
+{
+ return type == GLSL_TYPE_UINT8 ||
+ type == GLSL_TYPE_INT8 ||
+ type == GLSL_TYPE_UINT16 ||
+ type == GLSL_TYPE_INT16 ||
+ type == GLSL_TYPE_UINT ||
+ type == GLSL_TYPE_INT ||
+ type == GLSL_TYPE_UINT64 ||
+ type == GLSL_TYPE_INT64 ||
+ type == GLSL_TYPE_BOOL ||
+ type == GLSL_TYPE_SAMPLER ||
+ type == GLSL_TYPE_IMAGE;
+}
+
+static inline unsigned int
+glsl_base_type_get_bit_size(const enum glsl_base_type base_type)
+{
+ switch (base_type) {
+ case GLSL_TYPE_BOOL:
+ return 1;
+
+ case GLSL_TYPE_INT:
+ case GLSL_TYPE_UINT:
+ case GLSL_TYPE_FLOAT: /* TODO handle mediump */
+ case GLSL_TYPE_SUBROUTINE:
+ return 32;
+
+ case GLSL_TYPE_FLOAT16:
+ case GLSL_TYPE_UINT16:
+ case GLSL_TYPE_INT16:
+ return 16;
+
+ case GLSL_TYPE_UINT8:
+ case GLSL_TYPE_INT8:
+ return 8;
+
+ case GLSL_TYPE_DOUBLE:
+ case GLSL_TYPE_INT64:
+ case GLSL_TYPE_UINT64:
+ case GLSL_TYPE_IMAGE:
+ case GLSL_TYPE_SAMPLER:
+ return 64;
+
+ default:
+ unreachable("unknown base type");
+ }
+
+ return 0;
+}
+
+static inline enum glsl_base_type
+glsl_unsigned_base_type_of(enum glsl_base_type type)
+{
+ switch (type) {
+ case GLSL_TYPE_INT:
+ return GLSL_TYPE_UINT;
+ case GLSL_TYPE_INT8:
+ return GLSL_TYPE_UINT8;
+ case GLSL_TYPE_INT16:
+ return GLSL_TYPE_UINT16;
+ case GLSL_TYPE_INT64:
+ return GLSL_TYPE_UINT64;
+ default:
+ assert(type == GLSL_TYPE_UINT ||
+ type == GLSL_TYPE_UINT8 ||
+ type == GLSL_TYPE_UINT16 ||
+ type == GLSL_TYPE_UINT64);
+ return type;
+ }
+}
+
+enum glsl_sampler_dim {
+ GLSL_SAMPLER_DIM_1D = 0,
+ GLSL_SAMPLER_DIM_2D,
+ GLSL_SAMPLER_DIM_3D,
+ GLSL_SAMPLER_DIM_CUBE,
+ GLSL_SAMPLER_DIM_RECT,
+ GLSL_SAMPLER_DIM_BUF,
+ GLSL_SAMPLER_DIM_EXTERNAL,
+ GLSL_SAMPLER_DIM_MS,
+ GLSL_SAMPLER_DIM_SUBPASS, /* for vulkan input attachments */
+ GLSL_SAMPLER_DIM_SUBPASS_MS, /* for multisampled vulkan input attachments */
+};
+
+int
+glsl_get_sampler_dim_coordinate_components(enum glsl_sampler_dim dim);
+
+enum glsl_matrix_layout {
+ /**
+ * The layout of the matrix is inherited from the object containing the
+ * matrix (the top level structure or the uniform block).
+ */
+ GLSL_MATRIX_LAYOUT_INHERITED,
+
+ /**
+ * Explicit column-major layout
+ *
+ * If a uniform block doesn't have an explicit layout set, it will default
+ * to this layout.
+ */
+ GLSL_MATRIX_LAYOUT_COLUMN_MAJOR,
+
+ /**
+ * Row-major layout
+ */
+ GLSL_MATRIX_LAYOUT_ROW_MAJOR
+};
+
+enum {
+ GLSL_PRECISION_NONE = 0,
+ GLSL_PRECISION_HIGH,
+ GLSL_PRECISION_MEDIUM,
+ GLSL_PRECISION_LOW
+};
+
+#ifdef __cplusplus
+} /* extern "C" */
+
+#include "GL/gl.h"
+#include "util/ralloc.h"
+#include "main/menums.h" /* for gl_texture_index, C++'s enum rules are broken */
+
+struct glsl_type {
+ GLenum gl_type;
+ glsl_base_type base_type:8;
+
+ glsl_base_type sampled_type:8; /**< Type of data returned using this
+ * sampler or image. Only \c
+ * GLSL_TYPE_FLOAT, \c GLSL_TYPE_INT,
+ * and \c GLSL_TYPE_UINT are valid.
+ */
+
+ unsigned sampler_dimensionality:4; /**< \see glsl_sampler_dim */
+ unsigned sampler_shadow:1;
+ unsigned sampler_array:1;
+ unsigned interface_packing:2;
+ unsigned interface_row_major:1;
+
+ /**
+ * For \c GLSL_TYPE_STRUCT this specifies if the struct is packed or not.
+ *
+ * Only used for Compute kernels
+ */
+ unsigned packed:1;
+
+private:
+ glsl_type() : mem_ctx(NULL)
+ {
+ // Dummy constructor, just for the sake of ASSERT_BITFIELD_SIZE.
+ }
+
+public:
+ /**
+ * \name Vector and matrix element counts
+ *
+ * For scalars, each of these values will be 1. For non-numeric types
+ * these will be 0.
+ */
+ /*@{*/
+ uint8_t vector_elements; /**< 1, 2, 3, or 4 vector elements. */
+ uint8_t matrix_columns; /**< 1, 2, 3, or 4 matrix columns. */
+ /*@}*/
+
+ /**
+ * For \c GLSL_TYPE_ARRAY, this is the length of the array. For
+ * \c GLSL_TYPE_STRUCT or \c GLSL_TYPE_INTERFACE, it is the number of
+ * elements in the structure and the number of values pointed to by
+ * \c fields.structure (below).
+ */
+ unsigned length;
+
+ /**
+ * Name of the data type
+ *
+ * Will never be \c NULL.
+ */
+ const char *name;
+
+ /**
+ * Explicit array, matrix, or vector stride. This is used to communicate
+ * explicit array layouts from SPIR-V. Should be 0 if the type has no
+ * explicit stride.
+ */
+ unsigned explicit_stride;
+
+ /**
+ * Subtype of composite data types.
+ */
+ union {
+ const struct glsl_type *array; /**< Type of array elements. */
+ struct glsl_function_param *parameters; /**< Parameters to function. */
+ struct glsl_struct_field *structure; /**< List of struct fields. */
+ } fields;
+
+ /**
+ * \name Pointers to various public type singletons
+ */
+ /*@{*/
+#undef DECL_TYPE
+#define DECL_TYPE(NAME, ...) \
+ static const glsl_type *const NAME##_type;
+#undef STRUCT_TYPE
+#define STRUCT_TYPE(NAME) \
+ static const glsl_type *const struct_##NAME##_type;
+#include "compiler/builtin_type_macros.h"
+ /*@}*/
+
+ /**
+ * Convenience accessors for vector types (shorter than get_instance()).
+ * @{
+ */
+ static const glsl_type *vec(unsigned components, const glsl_type *const ts[]);
+ static const glsl_type *vec(unsigned components);
+ static const glsl_type *f16vec(unsigned components);
+ static const glsl_type *dvec(unsigned components);
+ static const glsl_type *ivec(unsigned components);
+ static const glsl_type *uvec(unsigned components);
+ static const glsl_type *bvec(unsigned components);
+ static const glsl_type *i64vec(unsigned components);
+ static const glsl_type *u64vec(unsigned components);
+ static const glsl_type *i16vec(unsigned components);
+ static const glsl_type *u16vec(unsigned components);
+ static const glsl_type *i8vec(unsigned components);
+ static const glsl_type *u8vec(unsigned components);
+ /**@}*/
+
+ /**
+ * For numeric and boolean derived types returns the basic scalar type
+ *
+ * If the type is a numeric or boolean scalar, vector, or matrix type,
+ * this function gets the scalar type of the individual components. For
+ * all other types, including arrays of numeric or boolean types, the
+ * error type is returned.
+ */
+ const glsl_type *get_base_type() const;
+
+ /**
+ * Get the basic scalar type which this type aggregates.
+ *
+ * If the type is a numeric or boolean scalar, vector, or matrix, or an
+ * array of any of those, this function gets the scalar type of the
+ * individual components. For structs and arrays of structs, this function
+ * returns the struct type. For samplers and arrays of samplers, this
+ * function returns the sampler type.
+ */
+ const glsl_type *get_scalar_type() const;
+
+ /**
+ * Gets the "bare" type without any decorations or layout information.
+ */
+ const glsl_type *get_bare_type() const;
+
+ /**
+ * Gets the float16 version of this type.
+ */
+ const glsl_type *get_float16_type() const;
+
+ /**
+ * Get the instance of a built-in scalar, vector, or matrix type
+ */
+ static const glsl_type *get_instance(unsigned base_type, unsigned rows,
+ unsigned columns,
+ unsigned explicit_stride = 0,
+ bool row_major = false);
+
+ /**
+ * Get the instance of a sampler type
+ */
+ static const glsl_type *get_sampler_instance(enum glsl_sampler_dim dim,
+ bool shadow,
+ bool array,
+ glsl_base_type type);
+
+ static const glsl_type *get_image_instance(enum glsl_sampler_dim dim,
+ bool array, glsl_base_type type);
+
+ /**
+ * Get the instance of an array type
+ */
+ static const glsl_type *get_array_instance(const glsl_type *base,
+ unsigned elements,
+ unsigned explicit_stride = 0);
+
+ /**
+ * Get the instance of a record type
+ */
+ static const glsl_type *get_struct_instance(const glsl_struct_field *fields,
+ unsigned num_fields,
+ const char *name,
+ bool packed = false);
+
+ /**
+ * Get the instance of an interface block type
+ */
+ static const glsl_type *get_interface_instance(const glsl_struct_field *fields,
+ unsigned num_fields,
+ enum glsl_interface_packing packing,
+ bool row_major,
+ const char *block_name);
+
+ /**
+ * Get the instance of an subroutine type
+ */
+ static const glsl_type *get_subroutine_instance(const char *subroutine_name);
+
+ /**
+ * Get the instance of a function type
+ */
+ static const glsl_type *get_function_instance(const struct glsl_type *return_type,
+ const glsl_function_param *parameters,
+ unsigned num_params);
+
+ /**
+ * Get the type resulting from a multiplication of \p type_a * \p type_b
+ */
+ static const glsl_type *get_mul_type(const glsl_type *type_a,
+ const glsl_type *type_b);
+
+ /**
+ * Query the total number of scalars that make up a scalar, vector or matrix
+ */
+ unsigned components() const
+ {
+ return vector_elements * matrix_columns;
+ }
+
+ /**
+ * Calculate the number of components slots required to hold this type
+ *
+ * This is used to determine how many uniform or varying locations a type
+ * might occupy.
+ */
+ unsigned component_slots() const;
+
+ /**
+ * Calculate offset between the base location of the struct in
+ * uniform storage and a struct member.
+ * For the initial call, length is the index of the member to find the
+ * offset for.
+ */
+ unsigned struct_location_offset(unsigned length) const;
+
+ /**
+ * Calculate the number of unique values from glGetUniformLocation for the
+ * elements of the type.
+ *
+ * This is used to allocate slots in the UniformRemapTable, the amount of
+ * locations may not match with actual used storage space by the driver.
+ */
+ unsigned uniform_locations() const;
+
+ /**
+ * Used to count the number of varyings contained in the type ignoring
+ * innermost array elements.
+ */
+ unsigned varying_count() const;
+
+ /**
+ * Calculate the number of vec4 slots required to hold this type.
+ *
+ * This is the underlying recursive type_size function for
+ * count_attribute_slots() (vertex inputs and varyings) but also for
+ * gallium's !PIPE_CAP_PACKED_UNIFORMS case.
+ */
+ unsigned count_vec4_slots(bool is_gl_vertex_input, bool bindless) const;
+
+ /**
+ * Calculate the number of vec4 slots required to hold this type.
+ *
+ * This is the underlying recursive type_size function for
+ * gallium's PIPE_CAP_PACKED_UNIFORMS case.
+ */
+ unsigned count_dword_slots(bool bindless) const;
+
+ /**
+ * Calculate the number of attribute slots required to hold this type
+ *
+ * This implements the language rules of GLSL 1.50 for counting the number
+ * of slots used by a vertex attribute. It also determines the number of
+ * varying slots the type will use up in the absence of varying packing
+ * (and thus, it can be used to measure the number of varying slots used by
+ * the varyings that are generated by lower_packed_varyings).
+ *
+ * For vertex shader attributes - doubles only take one slot.
+ * For inter-shader varyings - dvec3/dvec4 take two slots.
+ *
+ * Vulkan doesn’t make this distinction so the argument should always be
+ * false.
+ */
+ unsigned count_attribute_slots(bool is_gl_vertex_input) const {
+ return count_vec4_slots(is_gl_vertex_input, true);
+ }
+
+ /**
+ * Alignment in bytes of the start of this type in a std140 uniform
+ * block.
+ */
+ unsigned std140_base_alignment(bool row_major) const;
+
+ /** Size in bytes of this type in a std140 uniform block.
+ *
+ * Note that this is not GL_UNIFORM_SIZE (which is the number of
+ * elements in the array)
+ */
+ unsigned std140_size(bool row_major) const;
+
+ /**
+ * Gets an explicitly laid out type with the std140 layout.
+ */
+ const glsl_type *get_explicit_std140_type(bool row_major) const;
+
+ /**
+ * Alignment in bytes of the start of this type in a std430 shader
+ * storage block.
+ */
+ unsigned std430_base_alignment(bool row_major) const;
+
+ /**
+ * Calculate array stride in bytes of this type in a std430 shader storage
+ * block.
+ */
+ unsigned std430_array_stride(bool row_major) const;
+
+ /**
+ * Size in bytes of this type in a std430 shader storage block.
+ *
+ * Note that this is not GL_BUFFER_SIZE
+ */
+ unsigned std430_size(bool row_major) const;
+
+ /**
+ * Gets an explicitly laid out type with the std430 layout.
+ */
+ const glsl_type *get_explicit_std430_type(bool row_major) const;
+
+ /**
+ * Gets an explicitly laid out interface type.
+ */
+ const glsl_type *get_explicit_interface_type(bool supports_std430) const;
+
+ /** Returns an explicitly laid out type given a type and size/align func
+ *
+ * The size/align func is only called for scalar and vector types and the
+ * returned type is otherwise laid out in the natural way as follows:
+ *
+ * - Arrays and matrices have a stride of ALIGN(elem_size, elem_align).
+ *
+ * - Structure types have their elements in-order and as tightly packed as
+ * possible following the alignment required by the size/align func.
+ *
+ * - All composite types (structures, matrices, and arrays) have an
+ * alignment equal to the highest alighment of any member of the composite.
+ *
+ * The types returned by this function are likely not suitable for most UBO
+ * or SSBO layout because they do not add the extra array and substructure
+ * alignment that is required by std140 and std430.
+ */
+ const glsl_type *get_explicit_type_for_size_align(glsl_type_size_align_func type_info,
+ unsigned *size, unsigned *align) const;
+
+ /**
+ * Alignment in bytes of the start of this type in OpenCL memory.
+ */
+ unsigned cl_alignment() const;
+
+ /**
+ * Size in bytes of this type in OpenCL memory
+ */
+ unsigned cl_size() const;
+
+ /**
+ * Size in bytes of this type based on its explicit data.
+ *
+ * When using SPIR-V shaders (ARB_gl_spirv), memory layouts are expressed
+ * through explicit offset, stride and matrix layout, so the size
+ * can/should be computed used those values.
+ *
+ * Note that the value returned by this method is only correct if such
+ * values are set, so only with SPIR-V shaders. Should not be used with
+ * GLSL shaders.
+ */
+ unsigned explicit_size(bool align_to_stride=false) const;
+
+ /**
+ * \brief Can this type be implicitly converted to another?
+ *
+ * \return True if the types are identical or if this type can be converted
+ * to \c desired according to Section 4.1.10 of the GLSL spec.
+ *
+ * \verbatim
+ * From page 25 (31 of the pdf) of the GLSL 1.50 spec, Section 4.1.10
+ * Implicit Conversions:
+ *
+ * In some situations, an expression and its type will be implicitly
+ * converted to a different type. The following table shows all allowed
+ * implicit conversions:
+ *
+ * Type of expression | Can be implicitly converted to
+ * --------------------------------------------------
+ * int float
+ * uint
+ *
+ * ivec2 vec2
+ * uvec2
+ *
+ * ivec3 vec3
+ * uvec3
+ *
+ * ivec4 vec4
+ * uvec4
+ *
+ * There are no implicit array or structure conversions. For example,
+ * an array of int cannot be implicitly converted to an array of float.
+ * There are no implicit conversions between signed and unsigned
+ * integers.
+ * \endverbatim
+ */
+ bool can_implicitly_convert_to(const glsl_type *desired,
+ _mesa_glsl_parse_state *state) const;
+
+ /**
+ * Query whether or not a type is a scalar (non-vector and non-matrix).
+ */
+ bool is_scalar() const
+ {
+ return (vector_elements == 1)
+ && (base_type >= GLSL_TYPE_UINT)
+ && (base_type <= GLSL_TYPE_IMAGE);
+ }
+
+ /**
+ * Query whether or not a type is a vector
+ */
+ bool is_vector() const
+ {
+ return (vector_elements > 1)
+ && (matrix_columns == 1)
+ && (base_type >= GLSL_TYPE_UINT)
+ && (base_type <= GLSL_TYPE_BOOL);
+ }
+
+ /**
+ * Query whether or not a type is a matrix
+ */
+ bool is_matrix() const
+ {
+ /* GLSL only has float matrices. */
+ return (matrix_columns > 1) && (base_type == GLSL_TYPE_FLOAT ||
+ base_type == GLSL_TYPE_DOUBLE ||
+ base_type == GLSL_TYPE_FLOAT16);
+ }
+
+ /**
+ * Query whether or not a type is a non-array numeric type
+ */
+ bool is_numeric() const
+ {
+ return (base_type >= GLSL_TYPE_UINT) && (base_type <= GLSL_TYPE_INT64);
+ }
+
+ /**
+ * Query whether or not a type is an integer.
+ */
+ bool is_integer() const
+ {
+ return glsl_base_type_is_integer(base_type);
+ }
+
+ /**
+ * Query whether or not a type is an 32-bit integer.
+ */
+ bool is_integer_32() const
+ {
+ return (base_type == GLSL_TYPE_UINT) || (base_type == GLSL_TYPE_INT);
+ }
+
+ /**
+ * Query whether or not a type is a 64-bit integer.
+ */
+ bool is_integer_64() const
+ {
+ return base_type == GLSL_TYPE_UINT64 || base_type == GLSL_TYPE_INT64;
+ }
+
+ /**
+ * Query whether or not a type is a 32-bit or 64-bit integer
+ */
+ bool is_integer_32_64() const
+ {
+ return is_integer_32() || is_integer_64();
+ }
+
+ /**
+ * Query whether or not type is an integral type, or for struct and array
+ * types, contains an integral type.
+ */
+ bool contains_integer() const;
+
+ /**
+ * Query whether or not type is a double type, or for struct, interface and
+ * array types, contains a double type.
+ */
+ bool contains_double() const;
+
+ /**
+ * Query whether or not type is a 64-bit type, or for struct, interface and
+ * array types, contains a double type.
+ */
+ bool contains_64bit() const;
+
+ /**
+ * Query whether or not a type is a float type
+ */
+ bool is_float() const
+ {
+ return base_type == GLSL_TYPE_FLOAT;
+ }
+
+ /**
+ * Query whether or not a type is a half-float or float type
+ */
+ bool is_float_16_32() const
+ {
+ return base_type == GLSL_TYPE_FLOAT16 || is_float();
+ }
+
+ /**
+ * Query whether or not a type is a half-float, float or double
+ */
+ bool is_float_16_32_64() const
+ {
+ return base_type == GLSL_TYPE_FLOAT16 || is_float() || is_double();
+ }
+
+ /**
+ * Query whether or not a type is a double type
+ */
+ bool is_double() const
+ {
+ return base_type == GLSL_TYPE_DOUBLE;
+ }
+
+ /**
+ * Query whether a 64-bit type takes two slots.
+ */
+ bool is_dual_slot() const
+ {
+ return is_64bit() && vector_elements > 2;
+ }
+
+ /**
+ * Query whether or not a type is 64-bit
+ */
+ bool is_64bit() const
+ {
+ return glsl_base_type_is_64bit(base_type);
+ }
+
+ /**
+ * Query whether or not a type is 16-bit
+ */
+ bool is_16bit() const
+ {
+ return glsl_base_type_is_16bit(base_type);
+ }
+
+ /**
+ * Query whether or not a type is 32-bit
+ */
+ bool is_32bit() const
+ {
+ return base_type == GLSL_TYPE_UINT ||
+ base_type == GLSL_TYPE_INT ||
+ base_type == GLSL_TYPE_FLOAT;
+ }
+
+ /**
+ * Query whether or not a type is a non-array boolean type
+ */
+ bool is_boolean() const
+ {
+ return base_type == GLSL_TYPE_BOOL;
+ }
+
+ /**
+ * Query whether or not a type is a sampler
+ */
+ bool is_sampler() const
+ {
+ return base_type == GLSL_TYPE_SAMPLER;
+ }
+
+ /**
+ * Query whether or not type is a sampler, or for struct, interface and
+ * array types, contains a sampler.
+ */
+ bool contains_sampler() const;
+
+ /**
+ * Query whether or not type is an array or for struct, interface and
+ * array types, contains an array.
+ */
+ bool contains_array() const;
+
+ /**
+ * Get the Mesa texture target index for a sampler type.
+ */
+ gl_texture_index sampler_index() const;
+
+ /**
+ * Query whether or not type is an image, or for struct, interface and
+ * array types, contains an image.
+ */
+ bool contains_image() const;
+
+ /**
+ * Query whether or not a type is an image
+ */
+ bool is_image() const
+ {
+ return base_type == GLSL_TYPE_IMAGE;
+ }
+
+ /**
+ * Query whether or not a type is an array
+ */
+ bool is_array() const
+ {
+ return base_type == GLSL_TYPE_ARRAY;
+ }
+
+ bool is_array_of_arrays() const
+ {
+ return is_array() && fields.array->is_array();
+ }
+
+ /**
+ * Query whether or not a type is a record
+ */
+ bool is_struct() const
+ {
+ return base_type == GLSL_TYPE_STRUCT;
+ }
+
+ /**
+ * Query whether or not a type is an interface
+ */
+ bool is_interface() const
+ {
+ return base_type == GLSL_TYPE_INTERFACE;
+ }
+
+ /**
+ * Query whether or not a type is the void type singleton.
+ */
+ bool is_void() const
+ {
+ return base_type == GLSL_TYPE_VOID;
+ }
+
+ /**
+ * Query whether or not a type is the error type singleton.
+ */
+ bool is_error() const
+ {
+ return base_type == GLSL_TYPE_ERROR;
+ }
+
+ /**
+ * Query if a type is unnamed/anonymous (named by the parser)
+ */
+
+ bool is_subroutine() const
+ {
+ return base_type == GLSL_TYPE_SUBROUTINE;
+ }
+ bool contains_subroutine() const;
+
+ bool is_anonymous() const
+ {
+ return !strncmp(name, "#anon", 5);
+ }
+
+ /**
+ * Get the type stripped of any arrays
+ *
+ * \return
+ * Pointer to the type of elements of the first non-array type for array
+ * types, or pointer to itself for non-array types.
+ */
+ const glsl_type *without_array() const
+ {
+ const glsl_type *t = this;
+
+ while (t->is_array())
+ t = t->fields.array;
+
+ return t;
+ }
+
+ /**
+ * Return the total number of elements in an array including the elements
+ * in arrays of arrays.
+ */
+ unsigned arrays_of_arrays_size() const
+ {
+ if (!is_array())
+ return 0;
+
+ unsigned size = length;
+ const glsl_type *base_type = fields.array;
+
+ while (base_type->is_array()) {
+ size = size * base_type->length;
+ base_type = base_type->fields.array;
+ }
+ return size;
+ }
+
+ /**
+ * Return bit size for this type.
+ */
+ unsigned bit_size() const
+ {
+ return glsl_base_type_bit_size(this->base_type);
+ }
+
+
+ /**
+ * Query whether or not a type is an atomic_uint.
+ */
+ bool is_atomic_uint() const
+ {
+ return base_type == GLSL_TYPE_ATOMIC_UINT;
+ }
+
+ /**
+ * Return the amount of atomic counter storage required for a type.
+ */
+ unsigned atomic_size() const
+ {
+ if (is_atomic_uint())
+ return ATOMIC_COUNTER_SIZE;
+ else if (is_array())
+ return length * fields.array->atomic_size();
+ else
+ return 0;
+ }
+
+ /**
+ * Return whether a type contains any atomic counters.
+ */
+ bool contains_atomic() const
+ {
+ return atomic_size() > 0;
+ }
+
+ /**
+ * Return whether a type contains any opaque types.
+ */
+ bool contains_opaque() const;
+
+ /**
+ * Query the full type of a matrix row
+ *
+ * \return
+ * If the type is not a matrix, \c glsl_type::error_type is returned.
+ * Otherwise a type matching the rows of the matrix is returned.
+ */
+ const glsl_type *row_type() const
+ {
+ if (!is_matrix())
+ return error_type;
+
+ if (explicit_stride && !interface_row_major)
+ return get_instance(base_type, matrix_columns, 1, explicit_stride);
+ else
+ return get_instance(base_type, matrix_columns, 1);
+ }
+
+ /**
+ * Query the full type of a matrix column
+ *
+ * \return
+ * If the type is not a matrix, \c glsl_type::error_type is returned.
+ * Otherwise a type matching the columns of the matrix is returned.
+ */
+ const glsl_type *column_type() const
+ {
+ if (!is_matrix())
+ return error_type;
+
+ if (explicit_stride && interface_row_major)
+ return get_instance(base_type, vector_elements, 1, explicit_stride);
+ else
+ return get_instance(base_type, vector_elements, 1);
+ }
+
+ /**
+ * Get the type of a structure field
+ *
+ * \return
+ * Pointer to the type of the named field. If the type is not a structure
+ * or the named field does not exist, \c glsl_type::error_type is returned.
+ */
+ const glsl_type *field_type(const char *name) const;
+
+ /**
+ * Get the location of a field within a record type
+ */
+ int field_index(const char *name) const;
+
+ /**
+ * Query the number of elements in an array type
+ *
+ * \return
+ * The number of elements in the array for array types or -1 for non-array
+ * types. If the number of elements in the array has not yet been declared,
+ * zero is returned.
+ */
+ int array_size() const
+ {
+ return is_array() ? length : -1;
+ }
+
+ /**
+ * Query whether the array size for all dimensions has been declared.
+ */
+ bool is_unsized_array() const
+ {
+ return is_array() && length == 0;
+ }
+
+ /**
+ * Return the number of coordinate components needed for this
+ * sampler or image type.
+ *
+ * This is based purely on the sampler's dimensionality. For example, this
+ * returns 1 for sampler1D, and 3 for sampler2DArray.
+ *
+ * Note that this is often different than actual coordinate type used in
+ * a texturing built-in function, since those pack additional values (such
+ * as the shadow comparator or projector) into the coordinate type.
+ */
+ int coordinate_components() const;
+
+ /**
+ * Compares whether this type matches another type without taking into
+ * account the precision in structures.
+ *
+ * This is applied recursively so that structures containing structure
+ * members can also ignore the precision.
+ */
+ bool compare_no_precision(const glsl_type *b) const;
+
+ /**
+ * Compare a record type against another record type.
+ *
+ * This is useful for matching record types declared on the same shader
+ * stage as well as across different shader stages.
+ * The option to not match name is needed for matching record types
+ * declared across different shader stages.
+ * The option to not match locations is to deal with places where the
+ * same struct is defined in a block which has a location set on it.
+ */
+ bool record_compare(const glsl_type *b, bool match_name,
+ bool match_locations = true,
+ bool match_precision = true) const;
+
+ /**
+ * Get the type interface packing.
+ */
+ enum glsl_interface_packing get_interface_packing() const
+ {
+ return (enum glsl_interface_packing)interface_packing;
+ }
+
+ /**
+ * Get the type interface packing used internally. For shared and packing
+ * layouts this is implementation defined.
+ */
+ enum glsl_interface_packing get_internal_ifc_packing(bool std430_supported) const
+ {
+ enum glsl_interface_packing packing = this->get_interface_packing();
+ if (packing == GLSL_INTERFACE_PACKING_STD140 ||
+ (!std430_supported &&
+ (packing == GLSL_INTERFACE_PACKING_SHARED ||
+ packing == GLSL_INTERFACE_PACKING_PACKED))) {
+ return GLSL_INTERFACE_PACKING_STD140;
+ } else {
+ assert(packing == GLSL_INTERFACE_PACKING_STD430 ||
+ (std430_supported &&
+ (packing == GLSL_INTERFACE_PACKING_SHARED ||
+ packing == GLSL_INTERFACE_PACKING_PACKED)));
+ return GLSL_INTERFACE_PACKING_STD430;
+ }
+ }
+
+ /**
+ * Check if the type interface is row major
+ */
+ bool get_interface_row_major() const
+ {
+ return (bool) interface_row_major;
+ }
+
+ ~glsl_type();
+
+private:
+
+ static mtx_t hash_mutex;
+
+ /**
+ * ralloc context for the type itself.
+ */
+ void *mem_ctx;
+
+ /** Constructor for vector and matrix types */
+ glsl_type(GLenum gl_type,
+ glsl_base_type base_type, unsigned vector_elements,
+ unsigned matrix_columns, const char *name,
+ unsigned explicit_stride = 0, bool row_major = false);
+
+ /** Constructor for sampler or image types */
+ glsl_type(GLenum gl_type, glsl_base_type base_type,
+ enum glsl_sampler_dim dim, bool shadow, bool array,
+ glsl_base_type type, const char *name);
+
+ /** Constructor for record types */
+ glsl_type(const glsl_struct_field *fields, unsigned num_fields,
+ const char *name, bool packed = false);
+
+ /** Constructor for interface types */
+ glsl_type(const glsl_struct_field *fields, unsigned num_fields,
+ enum glsl_interface_packing packing,
+ bool row_major, const char *name);
+
+ /** Constructor for interface types */
+ glsl_type(const glsl_type *return_type,
+ const glsl_function_param *params, unsigned num_params);
+
+ /** Constructors for array types */
+ glsl_type(const glsl_type *array, unsigned length, unsigned explicit_stride);
+
+ /** Constructor for subroutine types */
+ glsl_type(const char *name);
+
+ /** Hash table containing the known explicit matrix and vector types. */
+ static struct hash_table *explicit_matrix_types;
+
+ /** Hash table containing the known array types. */
+ static struct hash_table *array_types;
+
+ /** Hash table containing the known struct types. */
+ static struct hash_table *struct_types;
+
+ /** Hash table containing the known interface types. */
+ static struct hash_table *interface_types;
+
+ /** Hash table containing the known subroutine types. */
+ static struct hash_table *subroutine_types;
+
+ /** Hash table containing the known function types. */
+ static struct hash_table *function_types;
+
+ static bool record_key_compare(const void *a, const void *b);
+ static unsigned record_key_hash(const void *key);
+
+ /**
+ * \name Built-in type flyweights
+ */
+ /*@{*/
+#undef DECL_TYPE
+#define DECL_TYPE(NAME, ...) static const glsl_type _##NAME##_type;
+#undef STRUCT_TYPE
+#define STRUCT_TYPE(NAME) static const glsl_type _struct_##NAME##_type;
+#include "compiler/builtin_type_macros.h"
+ /*@}*/
+
+ /**
+ * \name Friend functions.
+ *
+ * These functions are friends because they must have C linkage and the
+ * need to call various private methods or access various private static
+ * data.
+ */
+ /*@{*/
+ friend void glsl_type_singleton_init_or_ref(void);
+ friend void glsl_type_singleton_decref(void);
+ friend void _mesa_glsl_initialize_types(struct _mesa_glsl_parse_state *);
+ /*@}*/
+};
+
+#undef DECL_TYPE
+#undef STRUCT_TYPE
+#endif /* __cplusplus */
+
+struct glsl_struct_field {
+ const struct glsl_type *type;
+ const char *name;
+
+ /**
+ * For interface blocks, gl_varying_slot corresponding to the input/output
+ * if this is a built-in input/output (i.e. a member of the built-in
+ * gl_PerVertex interface block); -1 otherwise.
+ *
+ * Ignored for structs.
+ */
+ int location;
+
+ /**
+ * For interface blocks, members may have an explicit byte offset
+ * specified; -1 otherwise. Also used for xfb_offset layout qualifier.
+ *
+ * Unless used for xfb_offset this field is ignored for structs.
+ */
+ int offset;
+
+ /**
+ * For interface blocks, members may define a transform feedback buffer;
+ * -1 otherwise.
+ */
+ int xfb_buffer;
+
+ /**
+ * For interface blocks, members may define a transform feedback stride;
+ * -1 otherwise.
+ */
+ int xfb_stride;
+
+ /**
+ * For interface blocks, the interpolation mode (as in
+ * ir_variable::interpolation). 0 otherwise.
+ */
+ unsigned interpolation:3;
+
+ /**
+ * For interface blocks, 1 if this variable uses centroid interpolation (as
+ * in ir_variable::centroid). 0 otherwise.
+ */
+ unsigned centroid:1;
+
+ /**
+ * For interface blocks, 1 if this variable uses sample interpolation (as
+ * in ir_variable::sample). 0 otherwise.
+ */
+ unsigned sample:1;
+
+ /**
+ * Layout of the matrix. Uses glsl_matrix_layout values.
+ */
+ unsigned matrix_layout:2;
+
+ /**
+ * For interface blocks, 1 if this variable is a per-patch input or output
+ * (as in ir_variable::patch). 0 otherwise.
+ */
+ unsigned patch:1;
+
+ /**
+ * Precision qualifier
+ */
+ unsigned precision:2;
+
+ /**
+ * Memory qualifiers, applicable to buffer variables defined in shader
+ * storage buffer objects (SSBOs)
+ */
+ unsigned memory_read_only:1;
+ unsigned memory_write_only:1;
+ unsigned memory_coherent:1;
+ unsigned memory_volatile:1;
+ unsigned memory_restrict:1;
+
+ /**
+ * Layout format, applicable to image variables only.
+ */
+ enum pipe_format image_format;
+
+ /**
+ * Any of the xfb_* qualifiers trigger the shader to be in transform
+ * feedback mode so we need to keep track of whether the buffer was
+ * explicitly set or if its just been assigned the default global value.
+ */
+ unsigned explicit_xfb_buffer:1;
+
+ unsigned implicit_sized_array:1;
+#ifdef __cplusplus
+#define DEFAULT_CONSTRUCTORS(_type, _precision, _name) \
+ type(_type), name(_name), location(-1), offset(-1), xfb_buffer(0), \
+ xfb_stride(0), interpolation(0), centroid(0), \
+ sample(0), matrix_layout(GLSL_MATRIX_LAYOUT_INHERITED), patch(0), \
+ precision(_precision), memory_read_only(0), \
+ memory_write_only(0), memory_coherent(0), memory_volatile(0), \
+ memory_restrict(0), image_format(PIPE_FORMAT_NONE), \
+ explicit_xfb_buffer(0), \
+ implicit_sized_array(0)
+
+ glsl_struct_field(const struct glsl_type *_type,
+ int _precision,
+ const char *_name)
+ : DEFAULT_CONSTRUCTORS(_type, _precision, _name)
+ {
+ /* empty */
+ }
+
+ glsl_struct_field(const struct glsl_type *_type, const char *_name)
+ : DEFAULT_CONSTRUCTORS(_type, GLSL_PRECISION_NONE, _name)
+ {
+ /* empty */
+ }
+
+ glsl_struct_field()
+ : DEFAULT_CONSTRUCTORS(NULL, GLSL_PRECISION_NONE, NULL)
+ {
+ /* empty */
+ }
+#undef DEFAULT_CONSTRUCTORS
+#endif
+};
+
+struct glsl_function_param {
+ const struct glsl_type *type;
+
+ bool in;
+ bool out;
+};
+
+static inline unsigned int
+glsl_align(unsigned int a, unsigned int align)
+{
+ return (a + align - 1) / align * align;
+}
+
+#endif /* GLSL_TYPES_H */
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/compiler/shader_enums.c b/third_party/rust/glslopt/glsl-optimizer/src/compiler/shader_enums.c
new file mode 100644
index 0000000000..a2a5eb82a6
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/compiler/shader_enums.c
@@ -0,0 +1,302 @@
+/*
+ * Mesa 3-D graphics library
+ *
+ * Copyright © 2015 Red Hat
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Rob Clark <robclark@freedesktop.org>
+ */
+
+#include "shader_enums.h"
+#include "util/macros.h"
+#include "mesa/main/config.h"
+
+#define ENUM(x) [x] = #x
+#define NAME(val) ((((val) < ARRAY_SIZE(names)) && names[(val)]) ? names[(val)] : "UNKNOWN")
+
+const char *
+gl_shader_stage_name(gl_shader_stage stage)
+{
+ static const char *names[] = {
+ ENUM(MESA_SHADER_VERTEX),
+ ENUM(MESA_SHADER_TESS_CTRL),
+ ENUM(MESA_SHADER_TESS_EVAL),
+ ENUM(MESA_SHADER_GEOMETRY),
+ ENUM(MESA_SHADER_FRAGMENT),
+ ENUM(MESA_SHADER_COMPUTE),
+ ENUM(MESA_SHADER_KERNEL),
+ };
+ STATIC_ASSERT(ARRAY_SIZE(names) == MESA_ALL_SHADER_STAGES);
+ return NAME(stage);
+}
+
+/**
+ * Translate a gl_shader_stage to a short shader stage name for debug
+ * printouts and error messages.
+ */
+const char *
+_mesa_shader_stage_to_string(unsigned stage)
+{
+ switch (stage) {
+ case MESA_SHADER_VERTEX: return "vertex";
+ case MESA_SHADER_FRAGMENT: return "fragment";
+ case MESA_SHADER_GEOMETRY: return "geometry";
+ case MESA_SHADER_COMPUTE: return "compute";
+ case MESA_SHADER_KERNEL: return "kernel";
+ case MESA_SHADER_TESS_CTRL: return "tessellation control";
+ case MESA_SHADER_TESS_EVAL: return "tessellation evaluation";
+ }
+
+ unreachable("Unknown shader stage.");
+}
+
+/**
+ * Translate a gl_shader_stage to a shader stage abbreviation (VS, GS, FS)
+ * for debug printouts and error messages.
+ */
+const char *
+_mesa_shader_stage_to_abbrev(unsigned stage)
+{
+ switch (stage) {
+ case MESA_SHADER_VERTEX: return "VS";
+ case MESA_SHADER_FRAGMENT: return "FS";
+ case MESA_SHADER_GEOMETRY: return "GS";
+ case MESA_SHADER_COMPUTE: return "CS";
+ case MESA_SHADER_KERNEL: return "CL";
+ case MESA_SHADER_TESS_CTRL: return "TCS";
+ case MESA_SHADER_TESS_EVAL: return "TES";
+ }
+
+ unreachable("Unknown shader stage.");
+}
+
+const char *
+gl_vert_attrib_name(gl_vert_attrib attrib)
+{
+ static const char *names[] = {
+ ENUM(VERT_ATTRIB_POS),
+ ENUM(VERT_ATTRIB_NORMAL),
+ ENUM(VERT_ATTRIB_COLOR0),
+ ENUM(VERT_ATTRIB_COLOR1),
+ ENUM(VERT_ATTRIB_FOG),
+ ENUM(VERT_ATTRIB_COLOR_INDEX),
+ ENUM(VERT_ATTRIB_EDGEFLAG),
+ ENUM(VERT_ATTRIB_TEX0),
+ ENUM(VERT_ATTRIB_TEX1),
+ ENUM(VERT_ATTRIB_TEX2),
+ ENUM(VERT_ATTRIB_TEX3),
+ ENUM(VERT_ATTRIB_TEX4),
+ ENUM(VERT_ATTRIB_TEX5),
+ ENUM(VERT_ATTRIB_TEX6),
+ ENUM(VERT_ATTRIB_TEX7),
+ ENUM(VERT_ATTRIB_POINT_SIZE),
+ ENUM(VERT_ATTRIB_GENERIC0),
+ ENUM(VERT_ATTRIB_GENERIC1),
+ ENUM(VERT_ATTRIB_GENERIC2),
+ ENUM(VERT_ATTRIB_GENERIC3),
+ ENUM(VERT_ATTRIB_GENERIC4),
+ ENUM(VERT_ATTRIB_GENERIC5),
+ ENUM(VERT_ATTRIB_GENERIC6),
+ ENUM(VERT_ATTRIB_GENERIC7),
+ ENUM(VERT_ATTRIB_GENERIC8),
+ ENUM(VERT_ATTRIB_GENERIC9),
+ ENUM(VERT_ATTRIB_GENERIC10),
+ ENUM(VERT_ATTRIB_GENERIC11),
+ ENUM(VERT_ATTRIB_GENERIC12),
+ ENUM(VERT_ATTRIB_GENERIC13),
+ ENUM(VERT_ATTRIB_GENERIC14),
+ ENUM(VERT_ATTRIB_GENERIC15),
+ };
+ STATIC_ASSERT(ARRAY_SIZE(names) == VERT_ATTRIB_MAX);
+ return NAME(attrib);
+}
+
+const char *
+gl_varying_slot_name(gl_varying_slot slot)
+{
+ static const char *names[] = {
+ ENUM(VARYING_SLOT_POS),
+ ENUM(VARYING_SLOT_COL0),
+ ENUM(VARYING_SLOT_COL1),
+ ENUM(VARYING_SLOT_FOGC),
+ ENUM(VARYING_SLOT_TEX0),
+ ENUM(VARYING_SLOT_TEX1),
+ ENUM(VARYING_SLOT_TEX2),
+ ENUM(VARYING_SLOT_TEX3),
+ ENUM(VARYING_SLOT_TEX4),
+ ENUM(VARYING_SLOT_TEX5),
+ ENUM(VARYING_SLOT_TEX6),
+ ENUM(VARYING_SLOT_TEX7),
+ ENUM(VARYING_SLOT_PSIZ),
+ ENUM(VARYING_SLOT_BFC0),
+ ENUM(VARYING_SLOT_BFC1),
+ ENUM(VARYING_SLOT_EDGE),
+ ENUM(VARYING_SLOT_CLIP_VERTEX),
+ ENUM(VARYING_SLOT_CLIP_DIST0),
+ ENUM(VARYING_SLOT_CLIP_DIST1),
+ ENUM(VARYING_SLOT_CULL_DIST0),
+ ENUM(VARYING_SLOT_CULL_DIST1),
+ ENUM(VARYING_SLOT_PRIMITIVE_ID),
+ ENUM(VARYING_SLOT_LAYER),
+ ENUM(VARYING_SLOT_VIEWPORT),
+ ENUM(VARYING_SLOT_FACE),
+ ENUM(VARYING_SLOT_PNTC),
+ ENUM(VARYING_SLOT_TESS_LEVEL_OUTER),
+ ENUM(VARYING_SLOT_TESS_LEVEL_INNER),
+ ENUM(VARYING_SLOT_BOUNDING_BOX0),
+ ENUM(VARYING_SLOT_BOUNDING_BOX1),
+ ENUM(VARYING_SLOT_VIEW_INDEX),
+ ENUM(VARYING_SLOT_VIEWPORT_MASK),
+ ENUM(VARYING_SLOT_VAR0),
+ ENUM(VARYING_SLOT_VAR1),
+ ENUM(VARYING_SLOT_VAR2),
+ ENUM(VARYING_SLOT_VAR3),
+ ENUM(VARYING_SLOT_VAR4),
+ ENUM(VARYING_SLOT_VAR5),
+ ENUM(VARYING_SLOT_VAR6),
+ ENUM(VARYING_SLOT_VAR7),
+ ENUM(VARYING_SLOT_VAR8),
+ ENUM(VARYING_SLOT_VAR9),
+ ENUM(VARYING_SLOT_VAR10),
+ ENUM(VARYING_SLOT_VAR11),
+ ENUM(VARYING_SLOT_VAR12),
+ ENUM(VARYING_SLOT_VAR13),
+ ENUM(VARYING_SLOT_VAR14),
+ ENUM(VARYING_SLOT_VAR15),
+ ENUM(VARYING_SLOT_VAR16),
+ ENUM(VARYING_SLOT_VAR17),
+ ENUM(VARYING_SLOT_VAR18),
+ ENUM(VARYING_SLOT_VAR19),
+ ENUM(VARYING_SLOT_VAR20),
+ ENUM(VARYING_SLOT_VAR21),
+ ENUM(VARYING_SLOT_VAR22),
+ ENUM(VARYING_SLOT_VAR23),
+ ENUM(VARYING_SLOT_VAR24),
+ ENUM(VARYING_SLOT_VAR25),
+ ENUM(VARYING_SLOT_VAR26),
+ ENUM(VARYING_SLOT_VAR27),
+ ENUM(VARYING_SLOT_VAR28),
+ ENUM(VARYING_SLOT_VAR29),
+ ENUM(VARYING_SLOT_VAR30),
+ ENUM(VARYING_SLOT_VAR31),
+ };
+ STATIC_ASSERT(ARRAY_SIZE(names) == VARYING_SLOT_MAX);
+ return NAME(slot);
+}
+
+const char *
+gl_system_value_name(gl_system_value sysval)
+{
+ static const char *names[] = {
+ ENUM(SYSTEM_VALUE_SUBGROUP_SIZE),
+ ENUM(SYSTEM_VALUE_SUBGROUP_INVOCATION),
+ ENUM(SYSTEM_VALUE_SUBGROUP_EQ_MASK),
+ ENUM(SYSTEM_VALUE_SUBGROUP_GE_MASK),
+ ENUM(SYSTEM_VALUE_SUBGROUP_GT_MASK),
+ ENUM(SYSTEM_VALUE_SUBGROUP_LE_MASK),
+ ENUM(SYSTEM_VALUE_SUBGROUP_LT_MASK),
+ ENUM(SYSTEM_VALUE_NUM_SUBGROUPS),
+ ENUM(SYSTEM_VALUE_SUBGROUP_ID),
+ ENUM(SYSTEM_VALUE_VERTEX_ID),
+ ENUM(SYSTEM_VALUE_INSTANCE_ID),
+ ENUM(SYSTEM_VALUE_INSTANCE_INDEX),
+ ENUM(SYSTEM_VALUE_VERTEX_ID_ZERO_BASE),
+ ENUM(SYSTEM_VALUE_BASE_VERTEX),
+ ENUM(SYSTEM_VALUE_FIRST_VERTEX),
+ ENUM(SYSTEM_VALUE_IS_INDEXED_DRAW),
+ ENUM(SYSTEM_VALUE_BASE_INSTANCE),
+ ENUM(SYSTEM_VALUE_DRAW_ID),
+ ENUM(SYSTEM_VALUE_INVOCATION_ID),
+ ENUM(SYSTEM_VALUE_FRAG_COORD),
+ ENUM(SYSTEM_VALUE_POINT_COORD),
+ ENUM(SYSTEM_VALUE_FRONT_FACE),
+ ENUM(SYSTEM_VALUE_SAMPLE_ID),
+ ENUM(SYSTEM_VALUE_SAMPLE_POS),
+ ENUM(SYSTEM_VALUE_SAMPLE_MASK_IN),
+ ENUM(SYSTEM_VALUE_HELPER_INVOCATION),
+ ENUM(SYSTEM_VALUE_COLOR0),
+ ENUM(SYSTEM_VALUE_COLOR1),
+ ENUM(SYSTEM_VALUE_TESS_COORD),
+ ENUM(SYSTEM_VALUE_VERTICES_IN),
+ ENUM(SYSTEM_VALUE_PRIMITIVE_ID),
+ ENUM(SYSTEM_VALUE_TESS_LEVEL_OUTER),
+ ENUM(SYSTEM_VALUE_TESS_LEVEL_INNER),
+ ENUM(SYSTEM_VALUE_LOCAL_INVOCATION_ID),
+ ENUM(SYSTEM_VALUE_LOCAL_INVOCATION_INDEX),
+ ENUM(SYSTEM_VALUE_GLOBAL_INVOCATION_ID),
+ ENUM(SYSTEM_VALUE_WORK_GROUP_ID),
+ ENUM(SYSTEM_VALUE_NUM_WORK_GROUPS),
+ ENUM(SYSTEM_VALUE_LOCAL_GROUP_SIZE),
+ ENUM(SYSTEM_VALUE_GLOBAL_GROUP_SIZE),
+ ENUM(SYSTEM_VALUE_WORK_DIM),
+ ENUM(SYSTEM_VALUE_DEVICE_INDEX),
+ ENUM(SYSTEM_VALUE_VIEW_INDEX),
+ ENUM(SYSTEM_VALUE_VERTEX_CNT),
+ ENUM(SYSTEM_VALUE_BARYCENTRIC_PERSP_PIXEL),
+ ENUM(SYSTEM_VALUE_BARYCENTRIC_PERSP_SAMPLE),
+ ENUM(SYSTEM_VALUE_BARYCENTRIC_PERSP_CENTROID),
+ ENUM(SYSTEM_VALUE_BARYCENTRIC_PERSP_SIZE),
+ ENUM(SYSTEM_VALUE_BARYCENTRIC_LINEAR_PIXEL),
+ ENUM(SYSTEM_VALUE_BARYCENTRIC_LINEAR_CENTROID),
+ ENUM(SYSTEM_VALUE_BARYCENTRIC_LINEAR_SAMPLE),
+ ENUM(SYSTEM_VALUE_BARYCENTRIC_PULL_MODEL),
+ ENUM(SYSTEM_VALUE_GS_HEADER_IR3),
+ ENUM(SYSTEM_VALUE_TCS_HEADER_IR3),
+ };
+ STATIC_ASSERT(ARRAY_SIZE(names) == SYSTEM_VALUE_MAX);
+ return NAME(sysval);
+}
+
+const char *
+glsl_interp_mode_name(enum glsl_interp_mode qual)
+{
+ static const char *names[] = {
+ ENUM(INTERP_MODE_NONE),
+ ENUM(INTERP_MODE_SMOOTH),
+ ENUM(INTERP_MODE_FLAT),
+ ENUM(INTERP_MODE_NOPERSPECTIVE),
+ ENUM(INTERP_MODE_EXPLICIT),
+ };
+ STATIC_ASSERT(ARRAY_SIZE(names) == INTERP_MODE_COUNT);
+ return NAME(qual);
+}
+
+const char *
+gl_frag_result_name(gl_frag_result result)
+{
+ static const char *names[] = {
+ ENUM(FRAG_RESULT_DEPTH),
+ ENUM(FRAG_RESULT_STENCIL),
+ ENUM(FRAG_RESULT_COLOR),
+ ENUM(FRAG_RESULT_SAMPLE_MASK),
+ ENUM(FRAG_RESULT_DATA0),
+ ENUM(FRAG_RESULT_DATA1),
+ ENUM(FRAG_RESULT_DATA2),
+ ENUM(FRAG_RESULT_DATA3),
+ ENUM(FRAG_RESULT_DATA4),
+ ENUM(FRAG_RESULT_DATA5),
+ ENUM(FRAG_RESULT_DATA6),
+ ENUM(FRAG_RESULT_DATA7),
+ };
+ STATIC_ASSERT(ARRAY_SIZE(names) == FRAG_RESULT_MAX);
+ return NAME(result);
+}
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/compiler/shader_enums.h b/third_party/rust/glslopt/glsl-optimizer/src/compiler/shader_enums.h
new file mode 100644
index 0000000000..56062894a9
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/compiler/shader_enums.h
@@ -0,0 +1,902 @@
+/*
+ * Mesa 3-D graphics library
+ *
+ * Copyright (C) 1999-2008 Brian Paul All Rights Reserved.
+ * Copyright (C) 2009 VMware, Inc. All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef SHADER_ENUMS_H
+#define SHADER_ENUMS_H
+
+#include <stdbool.h>
+
+/* Project-wide (GL and Vulkan) maximum. */
+#define MAX_DRAW_BUFFERS 8
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * Shader stages.
+ *
+ * The order must match how shaders are ordered in the pipeline.
+ * The GLSL linker assumes that if i<j, then the j-th shader is
+ * executed later than the i-th shader.
+ */
+typedef enum
+{
+ MESA_SHADER_NONE = -1,
+ MESA_SHADER_VERTEX = 0,
+ MESA_SHADER_TESS_CTRL = 1,
+ MESA_SHADER_TESS_EVAL = 2,
+ MESA_SHADER_GEOMETRY = 3,
+ MESA_SHADER_FRAGMENT = 4,
+ MESA_SHADER_COMPUTE = 5,
+ /* must be last so it doesn't affect the GL pipeline */
+ MESA_SHADER_KERNEL = 6,
+} gl_shader_stage;
+
+static inline bool
+gl_shader_stage_is_compute(gl_shader_stage stage)
+{
+ return stage == MESA_SHADER_COMPUTE || stage == MESA_SHADER_KERNEL;
+}
+
+/**
+ * Number of STATE_* values we need to address any GL state.
+ * Used to dimension arrays.
+ */
+#define STATE_LENGTH 5
+
+typedef short gl_state_index16; /* see enum gl_state_index */
+
+const char *gl_shader_stage_name(gl_shader_stage stage);
+
+/**
+ * Translate a gl_shader_stage to a short shader stage name for debug
+ * printouts and error messages.
+ */
+const char *_mesa_shader_stage_to_string(unsigned stage);
+
+/**
+ * Translate a gl_shader_stage to a shader stage abbreviation (VS, GS, FS)
+ * for debug printouts and error messages.
+ */
+const char *_mesa_shader_stage_to_abbrev(unsigned stage);
+
+/**
+ * GL related stages (not including CL)
+ */
+#define MESA_SHADER_STAGES (MESA_SHADER_COMPUTE + 1)
+
+/**
+ * All stages
+ */
+#define MESA_ALL_SHADER_STAGES (MESA_SHADER_KERNEL + 1)
+
+
+/**
+ * Indexes for vertex program attributes.
+ * GL_NV_vertex_program aliases generic attributes over the conventional
+ * attributes. In GL_ARB_vertex_program shader the aliasing is optional.
+ * In GL_ARB_vertex_shader / OpenGL 2.0 the aliasing is disallowed (the
+ * generic attributes are distinct/separate).
+ */
+typedef enum
+{
+ VERT_ATTRIB_POS,
+ VERT_ATTRIB_NORMAL,
+ VERT_ATTRIB_COLOR0,
+ VERT_ATTRIB_COLOR1,
+ VERT_ATTRIB_FOG,
+ VERT_ATTRIB_COLOR_INDEX,
+ VERT_ATTRIB_EDGEFLAG,
+ VERT_ATTRIB_TEX0,
+ VERT_ATTRIB_TEX1,
+ VERT_ATTRIB_TEX2,
+ VERT_ATTRIB_TEX3,
+ VERT_ATTRIB_TEX4,
+ VERT_ATTRIB_TEX5,
+ VERT_ATTRIB_TEX6,
+ VERT_ATTRIB_TEX7,
+ VERT_ATTRIB_POINT_SIZE,
+ VERT_ATTRIB_GENERIC0,
+ VERT_ATTRIB_GENERIC1,
+ VERT_ATTRIB_GENERIC2,
+ VERT_ATTRIB_GENERIC3,
+ VERT_ATTRIB_GENERIC4,
+ VERT_ATTRIB_GENERIC5,
+ VERT_ATTRIB_GENERIC6,
+ VERT_ATTRIB_GENERIC7,
+ VERT_ATTRIB_GENERIC8,
+ VERT_ATTRIB_GENERIC9,
+ VERT_ATTRIB_GENERIC10,
+ VERT_ATTRIB_GENERIC11,
+ VERT_ATTRIB_GENERIC12,
+ VERT_ATTRIB_GENERIC13,
+ VERT_ATTRIB_GENERIC14,
+ VERT_ATTRIB_GENERIC15,
+ VERT_ATTRIB_MAX
+} gl_vert_attrib;
+
+const char *gl_vert_attrib_name(gl_vert_attrib attrib);
+
+/**
+ * Symbolic constats to help iterating over
+ * specific blocks of vertex attributes.
+ *
+ * VERT_ATTRIB_FF
+ * includes all fixed function attributes as well as
+ * the aliased GL_NV_vertex_program shader attributes.
+ * VERT_ATTRIB_TEX
+ * include the classic texture coordinate attributes.
+ * Is a subset of VERT_ATTRIB_FF.
+ * VERT_ATTRIB_GENERIC
+ * include the OpenGL 2.0+ GLSL generic shader attributes.
+ * These alias the generic GL_ARB_vertex_shader attributes.
+ * VERT_ATTRIB_MAT
+ * include the generic shader attributes used to alias
+ * varying material values for the TNL shader programs.
+ * They are located at the end of the generic attribute
+ * block not to overlap with the generic 0 attribute.
+ */
+#define VERT_ATTRIB_FF(i) (VERT_ATTRIB_POS + (i))
+#define VERT_ATTRIB_FF_MAX VERT_ATTRIB_GENERIC0
+
+#define VERT_ATTRIB_TEX(i) (VERT_ATTRIB_TEX0 + (i))
+#define VERT_ATTRIB_TEX_MAX MAX_TEXTURE_COORD_UNITS
+
+#define VERT_ATTRIB_GENERIC(i) (VERT_ATTRIB_GENERIC0 + (i))
+#define VERT_ATTRIB_GENERIC_MAX MAX_VERTEX_GENERIC_ATTRIBS
+
+#define VERT_ATTRIB_MAT0 \
+ (VERT_ATTRIB_GENERIC_MAX - VERT_ATTRIB_MAT_MAX)
+#define VERT_ATTRIB_MAT(i) \
+ VERT_ATTRIB_GENERIC((i) + VERT_ATTRIB_MAT0)
+#define VERT_ATTRIB_MAT_MAX MAT_ATTRIB_MAX
+
+/**
+ * Bitflags for vertex attributes.
+ * These are used in bitfields in many places.
+ */
+/*@{*/
+#define VERT_BIT_POS BITFIELD_BIT(VERT_ATTRIB_POS)
+#define VERT_BIT_NORMAL BITFIELD_BIT(VERT_ATTRIB_NORMAL)
+#define VERT_BIT_COLOR0 BITFIELD_BIT(VERT_ATTRIB_COLOR0)
+#define VERT_BIT_COLOR1 BITFIELD_BIT(VERT_ATTRIB_COLOR1)
+#define VERT_BIT_FOG BITFIELD_BIT(VERT_ATTRIB_FOG)
+#define VERT_BIT_COLOR_INDEX BITFIELD_BIT(VERT_ATTRIB_COLOR_INDEX)
+#define VERT_BIT_EDGEFLAG BITFIELD_BIT(VERT_ATTRIB_EDGEFLAG)
+#define VERT_BIT_TEX0 BITFIELD_BIT(VERT_ATTRIB_TEX0)
+#define VERT_BIT_TEX1 BITFIELD_BIT(VERT_ATTRIB_TEX1)
+#define VERT_BIT_TEX2 BITFIELD_BIT(VERT_ATTRIB_TEX2)
+#define VERT_BIT_TEX3 BITFIELD_BIT(VERT_ATTRIB_TEX3)
+#define VERT_BIT_TEX4 BITFIELD_BIT(VERT_ATTRIB_TEX4)
+#define VERT_BIT_TEX5 BITFIELD_BIT(VERT_ATTRIB_TEX5)
+#define VERT_BIT_TEX6 BITFIELD_BIT(VERT_ATTRIB_TEX6)
+#define VERT_BIT_TEX7 BITFIELD_BIT(VERT_ATTRIB_TEX7)
+#define VERT_BIT_POINT_SIZE BITFIELD_BIT(VERT_ATTRIB_POINT_SIZE)
+#define VERT_BIT_GENERIC0 BITFIELD_BIT(VERT_ATTRIB_GENERIC0)
+
+#define VERT_BIT(i) BITFIELD_BIT(i)
+#define VERT_BIT_ALL BITFIELD_RANGE(0, VERT_ATTRIB_MAX)
+
+#define VERT_BIT_FF(i) VERT_BIT(i)
+#define VERT_BIT_FF_ALL BITFIELD_RANGE(0, VERT_ATTRIB_FF_MAX)
+#define VERT_BIT_TEX(i) VERT_BIT(VERT_ATTRIB_TEX(i))
+#define VERT_BIT_TEX_ALL \
+ BITFIELD_RANGE(VERT_ATTRIB_TEX(0), VERT_ATTRIB_TEX_MAX)
+
+#define VERT_BIT_GENERIC(i) VERT_BIT(VERT_ATTRIB_GENERIC(i))
+#define VERT_BIT_GENERIC_ALL \
+ BITFIELD_RANGE(VERT_ATTRIB_GENERIC(0), VERT_ATTRIB_GENERIC_MAX)
+
+#define VERT_BIT_MAT(i) VERT_BIT(VERT_ATTRIB_MAT(i))
+#define VERT_BIT_MAT_ALL \
+ BITFIELD_RANGE(VERT_ATTRIB_MAT(0), VERT_ATTRIB_MAT_MAX)
+/*@}*/
+
+#define MAX_VARYING 32 /**< number of float[4] vectors */
+
+/**
+ * Indexes for vertex shader outputs, geometry shader inputs/outputs, and
+ * fragment shader inputs.
+ *
+ * Note that some of these values are not available to all pipeline stages.
+ *
+ * When this enum is updated, the following code must be updated too:
+ * - vertResults (in prog_print.c's arb_output_attrib_string())
+ * - fragAttribs (in prog_print.c's arb_input_attrib_string())
+ * - _mesa_varying_slot_in_fs()
+ */
+typedef enum
+{
+ VARYING_SLOT_POS,
+ VARYING_SLOT_COL0, /* COL0 and COL1 must be contiguous */
+ VARYING_SLOT_COL1,
+ VARYING_SLOT_FOGC,
+ VARYING_SLOT_TEX0, /* TEX0-TEX7 must be contiguous */
+ VARYING_SLOT_TEX1,
+ VARYING_SLOT_TEX2,
+ VARYING_SLOT_TEX3,
+ VARYING_SLOT_TEX4,
+ VARYING_SLOT_TEX5,
+ VARYING_SLOT_TEX6,
+ VARYING_SLOT_TEX7,
+ VARYING_SLOT_PSIZ, /* Does not appear in FS */
+ VARYING_SLOT_BFC0, /* Does not appear in FS */
+ VARYING_SLOT_BFC1, /* Does not appear in FS */
+ VARYING_SLOT_EDGE, /* Does not appear in FS */
+ VARYING_SLOT_CLIP_VERTEX, /* Does not appear in FS */
+ VARYING_SLOT_CLIP_DIST0,
+ VARYING_SLOT_CLIP_DIST1,
+ VARYING_SLOT_CULL_DIST0,
+ VARYING_SLOT_CULL_DIST1,
+ VARYING_SLOT_PRIMITIVE_ID, /* Does not appear in VS */
+ VARYING_SLOT_LAYER, /* Appears as VS or GS output */
+ VARYING_SLOT_VIEWPORT, /* Appears as VS or GS output */
+ VARYING_SLOT_FACE, /* FS only */
+ VARYING_SLOT_PNTC, /* FS only */
+ VARYING_SLOT_TESS_LEVEL_OUTER, /* Only appears as TCS output. */
+ VARYING_SLOT_TESS_LEVEL_INNER, /* Only appears as TCS output. */
+ VARYING_SLOT_BOUNDING_BOX0, /* Only appears as TCS output. */
+ VARYING_SLOT_BOUNDING_BOX1, /* Only appears as TCS output. */
+ VARYING_SLOT_VIEW_INDEX,
+ VARYING_SLOT_VIEWPORT_MASK, /* Does not appear in FS */
+ VARYING_SLOT_VAR0, /* First generic varying slot */
+ /* the remaining are simply for the benefit of gl_varying_slot_name()
+ * and not to be construed as an upper bound:
+ */
+ VARYING_SLOT_VAR1,
+ VARYING_SLOT_VAR2,
+ VARYING_SLOT_VAR3,
+ VARYING_SLOT_VAR4,
+ VARYING_SLOT_VAR5,
+ VARYING_SLOT_VAR6,
+ VARYING_SLOT_VAR7,
+ VARYING_SLOT_VAR8,
+ VARYING_SLOT_VAR9,
+ VARYING_SLOT_VAR10,
+ VARYING_SLOT_VAR11,
+ VARYING_SLOT_VAR12,
+ VARYING_SLOT_VAR13,
+ VARYING_SLOT_VAR14,
+ VARYING_SLOT_VAR15,
+ VARYING_SLOT_VAR16,
+ VARYING_SLOT_VAR17,
+ VARYING_SLOT_VAR18,
+ VARYING_SLOT_VAR19,
+ VARYING_SLOT_VAR20,
+ VARYING_SLOT_VAR21,
+ VARYING_SLOT_VAR22,
+ VARYING_SLOT_VAR23,
+ VARYING_SLOT_VAR24,
+ VARYING_SLOT_VAR25,
+ VARYING_SLOT_VAR26,
+ VARYING_SLOT_VAR27,
+ VARYING_SLOT_VAR28,
+ VARYING_SLOT_VAR29,
+ VARYING_SLOT_VAR30,
+ VARYING_SLOT_VAR31,
+} gl_varying_slot;
+
+
+#define VARYING_SLOT_MAX (VARYING_SLOT_VAR0 + MAX_VARYING)
+#define VARYING_SLOT_PATCH0 (VARYING_SLOT_MAX)
+#define VARYING_SLOT_TESS_MAX (VARYING_SLOT_PATCH0 + MAX_VARYING)
+#define MAX_VARYINGS_INCL_PATCH (VARYING_SLOT_TESS_MAX - VARYING_SLOT_VAR0)
+
+const char *gl_varying_slot_name(gl_varying_slot slot);
+
+/**
+ * Bitflags for varying slots.
+ */
+/*@{*/
+#define VARYING_BIT_POS BITFIELD64_BIT(VARYING_SLOT_POS)
+#define VARYING_BIT_COL0 BITFIELD64_BIT(VARYING_SLOT_COL0)
+#define VARYING_BIT_COL1 BITFIELD64_BIT(VARYING_SLOT_COL1)
+#define VARYING_BIT_FOGC BITFIELD64_BIT(VARYING_SLOT_FOGC)
+#define VARYING_BIT_TEX0 BITFIELD64_BIT(VARYING_SLOT_TEX0)
+#define VARYING_BIT_TEX1 BITFIELD64_BIT(VARYING_SLOT_TEX1)
+#define VARYING_BIT_TEX2 BITFIELD64_BIT(VARYING_SLOT_TEX2)
+#define VARYING_BIT_TEX3 BITFIELD64_BIT(VARYING_SLOT_TEX3)
+#define VARYING_BIT_TEX4 BITFIELD64_BIT(VARYING_SLOT_TEX4)
+#define VARYING_BIT_TEX5 BITFIELD64_BIT(VARYING_SLOT_TEX5)
+#define VARYING_BIT_TEX6 BITFIELD64_BIT(VARYING_SLOT_TEX6)
+#define VARYING_BIT_TEX7 BITFIELD64_BIT(VARYING_SLOT_TEX7)
+#define VARYING_BIT_TEX(U) BITFIELD64_BIT(VARYING_SLOT_TEX0 + (U))
+#define VARYING_BITS_TEX_ANY BITFIELD64_RANGE(VARYING_SLOT_TEX0, \
+ MAX_TEXTURE_COORD_UNITS)
+#define VARYING_BIT_PSIZ BITFIELD64_BIT(VARYING_SLOT_PSIZ)
+#define VARYING_BIT_BFC0 BITFIELD64_BIT(VARYING_SLOT_BFC0)
+#define VARYING_BIT_BFC1 BITFIELD64_BIT(VARYING_SLOT_BFC1)
+#define VARYING_BIT_EDGE BITFIELD64_BIT(VARYING_SLOT_EDGE)
+#define VARYING_BIT_CLIP_VERTEX BITFIELD64_BIT(VARYING_SLOT_CLIP_VERTEX)
+#define VARYING_BIT_CLIP_DIST0 BITFIELD64_BIT(VARYING_SLOT_CLIP_DIST0)
+#define VARYING_BIT_CLIP_DIST1 BITFIELD64_BIT(VARYING_SLOT_CLIP_DIST1)
+#define VARYING_BIT_CULL_DIST0 BITFIELD64_BIT(VARYING_SLOT_CULL_DIST0)
+#define VARYING_BIT_CULL_DIST1 BITFIELD64_BIT(VARYING_SLOT_CULL_DIST1)
+#define VARYING_BIT_PRIMITIVE_ID BITFIELD64_BIT(VARYING_SLOT_PRIMITIVE_ID)
+#define VARYING_BIT_LAYER BITFIELD64_BIT(VARYING_SLOT_LAYER)
+#define VARYING_BIT_VIEWPORT BITFIELD64_BIT(VARYING_SLOT_VIEWPORT)
+#define VARYING_BIT_FACE BITFIELD64_BIT(VARYING_SLOT_FACE)
+#define VARYING_BIT_PNTC BITFIELD64_BIT(VARYING_SLOT_PNTC)
+#define VARYING_BIT_TESS_LEVEL_OUTER BITFIELD64_BIT(VARYING_SLOT_TESS_LEVEL_OUTER)
+#define VARYING_BIT_TESS_LEVEL_INNER BITFIELD64_BIT(VARYING_SLOT_TESS_LEVEL_INNER)
+#define VARYING_BIT_BOUNDING_BOX0 BITFIELD64_BIT(VARYING_SLOT_BOUNDING_BOX0)
+#define VARYING_BIT_BOUNDING_BOX1 BITFIELD64_BIT(VARYING_SLOT_BOUNDING_BOX1)
+#define VARYING_BIT_VIEWPORT_MASK BITFIELD64_BIT(VARYING_SLOT_VIEWPORT_MASK)
+#define VARYING_BIT_VAR(V) BITFIELD64_BIT(VARYING_SLOT_VAR0 + (V))
+/*@}*/
+
+/**
+ * Bitflags for system values.
+ */
+#define SYSTEM_BIT_SAMPLE_ID ((uint64_t)1 << SYSTEM_VALUE_SAMPLE_ID)
+#define SYSTEM_BIT_SAMPLE_POS ((uint64_t)1 << SYSTEM_VALUE_SAMPLE_POS)
+#define SYSTEM_BIT_SAMPLE_MASK_IN ((uint64_t)1 << SYSTEM_VALUE_SAMPLE_MASK_IN)
+#define SYSTEM_BIT_LOCAL_INVOCATION_ID ((uint64_t)1 << SYSTEM_VALUE_LOCAL_INVOCATION_ID)
+
+/**
+ * If the gl_register_file is PROGRAM_SYSTEM_VALUE, the register index will be
+ * one of these values. If a NIR variable's mode is nir_var_system_value, it
+ * will be one of these values.
+ */
+typedef enum
+{
+ /**
+ * \name System values applicable to all shaders
+ */
+ /*@{*/
+
+ /**
+ * Builtin variables added by GL_ARB_shader_ballot.
+ */
+ /*@{*/
+
+ /**
+ * From the GL_ARB_shader-ballot spec:
+ *
+ * "A sub-group is a collection of invocations which execute in lockstep.
+ * The variable <gl_SubGroupSizeARB> is the maximum number of
+ * invocations in a sub-group. The maximum <gl_SubGroupSizeARB>
+ * supported in this extension is 64."
+ *
+ * The spec defines this as a uniform. However, it's highly unlikely that
+ * implementations actually treat it as a uniform (which is loaded from a
+ * constant buffer). Most likely, this is an implementation-wide constant,
+ * or perhaps something that depends on the shader stage.
+ */
+ SYSTEM_VALUE_SUBGROUP_SIZE,
+
+ /**
+ * From the GL_ARB_shader_ballot spec:
+ *
+ * "The variable <gl_SubGroupInvocationARB> holds the index of the
+ * invocation within sub-group. This variable is in the range 0 to
+ * <gl_SubGroupSizeARB>-1, where <gl_SubGroupSizeARB> is the total
+ * number of invocations in a sub-group."
+ */
+ SYSTEM_VALUE_SUBGROUP_INVOCATION,
+
+ /**
+ * From the GL_ARB_shader_ballot spec:
+ *
+ * "The <gl_SubGroup??MaskARB> variables provide a bitmask for all
+ * invocations, with one bit per invocation starting with the least
+ * significant bit, according to the following table,
+ *
+ * variable equation for bit values
+ * -------------------- ------------------------------------
+ * gl_SubGroupEqMaskARB bit index == gl_SubGroupInvocationARB
+ * gl_SubGroupGeMaskARB bit index >= gl_SubGroupInvocationARB
+ * gl_SubGroupGtMaskARB bit index > gl_SubGroupInvocationARB
+ * gl_SubGroupLeMaskARB bit index <= gl_SubGroupInvocationARB
+ * gl_SubGroupLtMaskARB bit index < gl_SubGroupInvocationARB
+ */
+ SYSTEM_VALUE_SUBGROUP_EQ_MASK,
+ SYSTEM_VALUE_SUBGROUP_GE_MASK,
+ SYSTEM_VALUE_SUBGROUP_GT_MASK,
+ SYSTEM_VALUE_SUBGROUP_LE_MASK,
+ SYSTEM_VALUE_SUBGROUP_LT_MASK,
+ /*@}*/
+
+ /**
+ * Builtin variables added by VK_KHR_subgroups
+ */
+ /*@{*/
+ SYSTEM_VALUE_NUM_SUBGROUPS,
+ SYSTEM_VALUE_SUBGROUP_ID,
+ /*@}*/
+
+ /*@}*/
+
+ /**
+ * \name Vertex shader system values
+ */
+ /*@{*/
+ /**
+ * OpenGL-style vertex ID.
+ *
+ * Section 2.11.7 (Shader Execution), subsection Shader Inputs, of the
+ * OpenGL 3.3 core profile spec says:
+ *
+ * "gl_VertexID holds the integer index i implicitly passed by
+ * DrawArrays or one of the other drawing commands defined in section
+ * 2.8.3."
+ *
+ * Section 2.8.3 (Drawing Commands) of the same spec says:
+ *
+ * "The commands....are equivalent to the commands with the same base
+ * name (without the BaseVertex suffix), except that the ith element
+ * transferred by the corresponding draw call will be taken from
+ * element indices[i] + basevertex of each enabled array."
+ *
+ * Additionally, the overview in the GL_ARB_shader_draw_parameters spec
+ * says:
+ *
+ * "In unextended GL, vertex shaders have inputs named gl_VertexID and
+ * gl_InstanceID, which contain, respectively the index of the vertex
+ * and instance. The value of gl_VertexID is the implicitly passed
+ * index of the vertex being processed, which includes the value of
+ * baseVertex, for those commands that accept it."
+ *
+ * gl_VertexID gets basevertex added in. This differs from DirectX where
+ * SV_VertexID does \b not get basevertex added in.
+ *
+ * \note
+ * If all system values are available, \c SYSTEM_VALUE_VERTEX_ID will be
+ * equal to \c SYSTEM_VALUE_VERTEX_ID_ZERO_BASE plus
+ * \c SYSTEM_VALUE_BASE_VERTEX.
+ *
+ * \sa SYSTEM_VALUE_VERTEX_ID_ZERO_BASE, SYSTEM_VALUE_BASE_VERTEX
+ */
+ SYSTEM_VALUE_VERTEX_ID,
+
+ /**
+ * Instanced ID as supplied to gl_InstanceID
+ *
+ * Values assigned to gl_InstanceID always begin with zero, regardless of
+ * the value of baseinstance.
+ *
+ * Section 11.1.3.9 (Shader Inputs) of the OpenGL 4.4 core profile spec
+ * says:
+ *
+ * "gl_InstanceID holds the integer instance number of the current
+ * primitive in an instanced draw call (see section 10.5)."
+ *
+ * Through a big chain of pseudocode, section 10.5 describes that
+ * baseinstance is not counted by gl_InstanceID. In that section, notice
+ *
+ * "If an enabled vertex attribute array is instanced (it has a
+ * non-zero divisor as specified by VertexAttribDivisor), the element
+ * index that is transferred to the GL, for all vertices, is given by
+ *
+ * floor(instance/divisor) + baseinstance
+ *
+ * If an array corresponding to an attribute required by a vertex
+ * shader is not enabled, then the corresponding element is taken from
+ * the current attribute state (see section 10.2)."
+ *
+ * Note that baseinstance is \b not included in the value of instance.
+ */
+ SYSTEM_VALUE_INSTANCE_ID,
+
+ /**
+ * Vulkan InstanceIndex.
+ *
+ * InstanceIndex = gl_InstanceID + gl_BaseInstance
+ */
+ SYSTEM_VALUE_INSTANCE_INDEX,
+
+ /**
+ * DirectX-style vertex ID.
+ *
+ * Unlike \c SYSTEM_VALUE_VERTEX_ID, this system value does \b not include
+ * the value of basevertex.
+ *
+ * \sa SYSTEM_VALUE_VERTEX_ID, SYSTEM_VALUE_BASE_VERTEX
+ */
+ SYSTEM_VALUE_VERTEX_ID_ZERO_BASE,
+
+ /**
+ * Value of \c basevertex passed to \c glDrawElementsBaseVertex and similar
+ * functions.
+ *
+ * \sa SYSTEM_VALUE_VERTEX_ID, SYSTEM_VALUE_VERTEX_ID_ZERO_BASE
+ */
+ SYSTEM_VALUE_BASE_VERTEX,
+
+ /**
+ * Depending on the type of the draw call (indexed or non-indexed),
+ * is the value of \c basevertex passed to \c glDrawElementsBaseVertex and
+ * similar, or is the value of \c first passed to \c glDrawArrays and
+ * similar.
+ *
+ * \note
+ * It can be used to calculate the \c SYSTEM_VALUE_VERTEX_ID as
+ * \c SYSTEM_VALUE_VERTEX_ID_ZERO_BASE plus \c SYSTEM_VALUE_FIRST_VERTEX.
+ *
+ * \sa SYSTEM_VALUE_VERTEX_ID_ZERO_BASE, SYSTEM_VALUE_VERTEX_ID
+ */
+ SYSTEM_VALUE_FIRST_VERTEX,
+
+ /**
+ * If the Draw command used to start the rendering was an indexed draw
+ * or not (~0/0). Useful to calculate \c SYSTEM_VALUE_BASE_VERTEX as
+ * \c SYSTEM_VALUE_IS_INDEXED_DRAW & \c SYSTEM_VALUE_FIRST_VERTEX.
+ */
+ SYSTEM_VALUE_IS_INDEXED_DRAW,
+
+ /**
+ * Value of \c baseinstance passed to instanced draw entry points
+ *
+ * \sa SYSTEM_VALUE_INSTANCE_ID
+ */
+ SYSTEM_VALUE_BASE_INSTANCE,
+
+ /**
+ * From _ARB_shader_draw_parameters:
+ *
+ * "Additionally, this extension adds a further built-in variable,
+ * gl_DrawID to the shading language. This variable contains the index
+ * of the draw currently being processed by a Multi* variant of a
+ * drawing command (such as MultiDrawElements or
+ * MultiDrawArraysIndirect)."
+ *
+ * If GL_ARB_multi_draw_indirect is not supported, this is always 0.
+ */
+ SYSTEM_VALUE_DRAW_ID,
+ /*@}*/
+
+ /**
+ * \name Geometry shader system values
+ */
+ /*@{*/
+ SYSTEM_VALUE_INVOCATION_ID, /**< (Also in Tessellation Control shader) */
+ /*@}*/
+
+ /**
+ * \name Fragment shader system values
+ */
+ /*@{*/
+ SYSTEM_VALUE_FRAG_COORD,
+ SYSTEM_VALUE_POINT_COORD,
+ SYSTEM_VALUE_FRONT_FACE,
+ SYSTEM_VALUE_SAMPLE_ID,
+ SYSTEM_VALUE_SAMPLE_POS,
+ SYSTEM_VALUE_SAMPLE_MASK_IN,
+ SYSTEM_VALUE_HELPER_INVOCATION,
+ SYSTEM_VALUE_COLOR0,
+ SYSTEM_VALUE_COLOR1,
+ /*@}*/
+
+ /**
+ * \name Tessellation Evaluation shader system values
+ */
+ /*@{*/
+ SYSTEM_VALUE_TESS_COORD,
+ SYSTEM_VALUE_VERTICES_IN, /**< Tessellation vertices in input patch */
+ SYSTEM_VALUE_PRIMITIVE_ID,
+ SYSTEM_VALUE_TESS_LEVEL_OUTER, /**< TES input */
+ SYSTEM_VALUE_TESS_LEVEL_INNER, /**< TES input */
+ SYSTEM_VALUE_TESS_LEVEL_OUTER_DEFAULT, /**< TCS input for passthru TCS */
+ SYSTEM_VALUE_TESS_LEVEL_INNER_DEFAULT, /**< TCS input for passthru TCS */
+ /*@}*/
+
+ /**
+ * \name Compute shader system values
+ */
+ /*@{*/
+ SYSTEM_VALUE_LOCAL_INVOCATION_ID,
+ SYSTEM_VALUE_LOCAL_INVOCATION_INDEX,
+ SYSTEM_VALUE_GLOBAL_INVOCATION_ID,
+ SYSTEM_VALUE_GLOBAL_INVOCATION_INDEX,
+ SYSTEM_VALUE_WORK_GROUP_ID,
+ SYSTEM_VALUE_NUM_WORK_GROUPS,
+ SYSTEM_VALUE_LOCAL_GROUP_SIZE,
+ SYSTEM_VALUE_GLOBAL_GROUP_SIZE,
+ SYSTEM_VALUE_WORK_DIM,
+ SYSTEM_VALUE_USER_DATA_AMD,
+ /*@}*/
+
+ /** Required for VK_KHR_device_group */
+ SYSTEM_VALUE_DEVICE_INDEX,
+
+ /** Required for VK_KHX_multiview */
+ SYSTEM_VALUE_VIEW_INDEX,
+
+ /**
+ * Driver internal vertex-count, used (for example) for drivers to
+ * calculate stride for stream-out outputs. Not externally visible.
+ */
+ SYSTEM_VALUE_VERTEX_CNT,
+
+ /**
+ * Required for AMD_shader_explicit_vertex_parameter and also used for
+ * varying-fetch instructions.
+ *
+ * The _SIZE value is "primitive size", used to scale i/j in primitive
+ * space to pixel space.
+ */
+ SYSTEM_VALUE_BARYCENTRIC_PERSP_PIXEL,
+ SYSTEM_VALUE_BARYCENTRIC_PERSP_SAMPLE,
+ SYSTEM_VALUE_BARYCENTRIC_PERSP_CENTROID,
+ SYSTEM_VALUE_BARYCENTRIC_PERSP_SIZE,
+ SYSTEM_VALUE_BARYCENTRIC_LINEAR_PIXEL,
+ SYSTEM_VALUE_BARYCENTRIC_LINEAR_CENTROID,
+ SYSTEM_VALUE_BARYCENTRIC_LINEAR_SAMPLE,
+ SYSTEM_VALUE_BARYCENTRIC_PULL_MODEL,
+
+ /**
+ * IR3 specific geometry shader and tesselation control shader system
+ * values that packs invocation id, thread id and vertex id. Having this
+ * as a nir level system value lets us do the unpacking in nir.
+ */
+ SYSTEM_VALUE_GS_HEADER_IR3,
+ SYSTEM_VALUE_TCS_HEADER_IR3,
+
+ SYSTEM_VALUE_MAX /**< Number of values */
+} gl_system_value;
+
+const char *gl_system_value_name(gl_system_value sysval);
+
+/**
+ * The possible interpolation qualifiers that can be applied to a fragment
+ * shader input in GLSL.
+ *
+ * Note: INTERP_MODE_NONE must be 0 so that memsetting the
+ * ir_variable data structure to 0 causes the default behavior.
+ */
+enum glsl_interp_mode
+{
+ INTERP_MODE_NONE = 0,
+ INTERP_MODE_SMOOTH,
+ INTERP_MODE_FLAT,
+ INTERP_MODE_NOPERSPECTIVE,
+ INTERP_MODE_EXPLICIT,
+ INTERP_MODE_COUNT /**< Number of interpolation qualifiers */
+};
+
+enum glsl_interface_packing {
+ GLSL_INTERFACE_PACKING_STD140,
+ GLSL_INTERFACE_PACKING_SHARED,
+ GLSL_INTERFACE_PACKING_PACKED,
+ GLSL_INTERFACE_PACKING_STD430
+};
+
+const char *glsl_interp_mode_name(enum glsl_interp_mode qual);
+
+/**
+ * Fragment program results
+ */
+typedef enum
+{
+ FRAG_RESULT_DEPTH = 0,
+ FRAG_RESULT_STENCIL = 1,
+ /* If a single color should be written to all render targets, this
+ * register is written. No FRAG_RESULT_DATAn will be written.
+ */
+ FRAG_RESULT_COLOR = 2,
+ FRAG_RESULT_SAMPLE_MASK = 3,
+
+ /* FRAG_RESULT_DATAn are the per-render-target (GLSL gl_FragData[n]
+ * or ARB_fragment_program fragment.color[n]) color results. If
+ * any are written, FRAG_RESULT_COLOR will not be written.
+ * FRAG_RESULT_DATA1 and up are simply for the benefit of
+ * gl_frag_result_name() and not to be construed as an upper bound
+ */
+ FRAG_RESULT_DATA0 = 4,
+ FRAG_RESULT_DATA1,
+ FRAG_RESULT_DATA2,
+ FRAG_RESULT_DATA3,
+ FRAG_RESULT_DATA4,
+ FRAG_RESULT_DATA5,
+ FRAG_RESULT_DATA6,
+ FRAG_RESULT_DATA7,
+} gl_frag_result;
+
+const char *gl_frag_result_name(gl_frag_result result);
+
+#define FRAG_RESULT_MAX (FRAG_RESULT_DATA0 + MAX_DRAW_BUFFERS)
+
+/**
+ * \brief Layout qualifiers for gl_FragDepth.
+ *
+ * Extension AMD_conservative_depth allows gl_FragDepth to be redeclared with
+ * a layout qualifier.
+ *
+ * \see enum ir_depth_layout
+ */
+enum gl_frag_depth_layout
+{
+ FRAG_DEPTH_LAYOUT_NONE, /**< No layout is specified. */
+ FRAG_DEPTH_LAYOUT_ANY,
+ FRAG_DEPTH_LAYOUT_GREATER,
+ FRAG_DEPTH_LAYOUT_LESS,
+ FRAG_DEPTH_LAYOUT_UNCHANGED
+};
+
+/**
+ * \brief Buffer access qualifiers
+ */
+enum gl_access_qualifier
+{
+ ACCESS_COHERENT = (1 << 0),
+ ACCESS_RESTRICT = (1 << 1),
+ ACCESS_VOLATILE = (1 << 2),
+ ACCESS_NON_READABLE = (1 << 3),
+ ACCESS_NON_WRITEABLE = (1 << 4),
+
+ /** The access may use a non-uniform buffer or image index */
+ ACCESS_NON_UNIFORM = (1 << 5),
+
+ /* This has the same semantics as NIR_INTRINSIC_CAN_REORDER, only to be
+ * used with loads. In other words, it means that the load can be
+ * arbitrarily reordered, or combined with other loads to the same address.
+ * It is implied by ACCESS_NON_WRITEABLE together with ACCESS_RESTRICT, and
+ * a lack of ACCESS_COHERENT and ACCESS_VOLATILE.
+ */
+ ACCESS_CAN_REORDER = (1 << 6),
+
+ /** Use as little cache space as possible. */
+ ACCESS_STREAM_CACHE_POLICY = (1 << 7),
+};
+
+/**
+ * \brief Blend support qualifiers
+ */
+enum gl_advanced_blend_mode
+{
+ BLEND_NONE = 0x0000,
+
+ BLEND_MULTIPLY = 0x0001,
+ BLEND_SCREEN = 0x0002,
+ BLEND_OVERLAY = 0x0004,
+ BLEND_DARKEN = 0x0008,
+ BLEND_LIGHTEN = 0x0010,
+ BLEND_COLORDODGE = 0x0020,
+ BLEND_COLORBURN = 0x0040,
+ BLEND_HARDLIGHT = 0x0080,
+ BLEND_SOFTLIGHT = 0x0100,
+ BLEND_DIFFERENCE = 0x0200,
+ BLEND_EXCLUSION = 0x0400,
+ BLEND_HSL_HUE = 0x0800,
+ BLEND_HSL_SATURATION = 0x1000,
+ BLEND_HSL_COLOR = 0x2000,
+ BLEND_HSL_LUMINOSITY = 0x4000,
+
+ BLEND_ALL = 0x7fff,
+};
+
+enum blend_func
+{
+ BLEND_FUNC_ADD,
+ BLEND_FUNC_SUBTRACT,
+ BLEND_FUNC_REVERSE_SUBTRACT,
+ BLEND_FUNC_MIN,
+ BLEND_FUNC_MAX,
+};
+
+enum blend_factor
+{
+ BLEND_FACTOR_ZERO,
+ BLEND_FACTOR_SRC_COLOR,
+ BLEND_FACTOR_DST_COLOR,
+ BLEND_FACTOR_SRC_ALPHA,
+ BLEND_FACTOR_DST_ALPHA,
+ BLEND_FACTOR_CONSTANT_COLOR,
+ BLEND_FACTOR_CONSTANT_ALPHA,
+ BLEND_FACTOR_SRC_ALPHA_SATURATE,
+};
+
+enum gl_tess_spacing
+{
+ TESS_SPACING_UNSPECIFIED,
+ TESS_SPACING_EQUAL,
+ TESS_SPACING_FRACTIONAL_ODD,
+ TESS_SPACING_FRACTIONAL_EVEN,
+};
+
+/**
+ * A compare function enum for use in compiler lowering passes. This is in
+ * the same order as GL's compare functions (shifted down by GL_NEVER), and is
+ * exactly the same as gallium's PIPE_FUNC_*.
+ */
+enum compare_func
+{
+ COMPARE_FUNC_NEVER,
+ COMPARE_FUNC_LESS,
+ COMPARE_FUNC_EQUAL,
+ COMPARE_FUNC_LEQUAL,
+ COMPARE_FUNC_GREATER,
+ COMPARE_FUNC_NOTEQUAL,
+ COMPARE_FUNC_GEQUAL,
+ COMPARE_FUNC_ALWAYS,
+};
+
+/**
+ * Arrangements for grouping invocations from NV_compute_shader_derivatives.
+ *
+ * The extension provides new layout qualifiers that support two different
+ * arrangements of compute shader invocations for the purpose of derivative
+ * computation. When specifying
+ *
+ * layout(derivative_group_quadsNV) in;
+ *
+ * compute shader invocations are grouped into 2x2x1 arrays whose four local
+ * invocation ID values follow the pattern:
+ *
+ * +-----------------+------------------+
+ * | (2x+0, 2y+0, z) | (2x+1, 2y+0, z) |
+ * +-----------------+------------------+
+ * | (2x+0, 2y+1, z) | (2x+1, 2y+1, z) |
+ * +-----------------+------------------+
+ *
+ * where Y increases from bottom to top. When specifying
+ *
+ * layout(derivative_group_linearNV) in;
+ *
+ * compute shader invocations are grouped into 2x2x1 arrays whose four local
+ * invocation index values follow the pattern:
+ *
+ * +------+------+
+ * | 4n+0 | 4n+1 |
+ * +------+------+
+ * | 4n+2 | 4n+3 |
+ * +------+------+
+ *
+ * If neither layout qualifier is specified, derivatives in compute shaders
+ * return zero, which is consistent with the handling of built-in texture
+ * functions like texture() in GLSL 4.50 compute shaders.
+ */
+enum gl_derivative_group {
+ DERIVATIVE_GROUP_NONE = 0,
+ DERIVATIVE_GROUP_QUADS,
+ DERIVATIVE_GROUP_LINEAR,
+};
+
+enum float_controls
+{
+ FLOAT_CONTROLS_DEFAULT_FLOAT_CONTROL_MODE = 0x0000,
+ FLOAT_CONTROLS_DENORM_PRESERVE_FP16 = 0x0001,
+ FLOAT_CONTROLS_DENORM_PRESERVE_FP32 = 0x0002,
+ FLOAT_CONTROLS_DENORM_PRESERVE_FP64 = 0x0004,
+ FLOAT_CONTROLS_DENORM_FLUSH_TO_ZERO_FP16 = 0x0008,
+ FLOAT_CONTROLS_DENORM_FLUSH_TO_ZERO_FP32 = 0x0010,
+ FLOAT_CONTROLS_DENORM_FLUSH_TO_ZERO_FP64 = 0x0020,
+ FLOAT_CONTROLS_SIGNED_ZERO_INF_NAN_PRESERVE_FP16 = 0x0040,
+ FLOAT_CONTROLS_SIGNED_ZERO_INF_NAN_PRESERVE_FP32 = 0x0080,
+ FLOAT_CONTROLS_SIGNED_ZERO_INF_NAN_PRESERVE_FP64 = 0x0100,
+ FLOAT_CONTROLS_ROUNDING_MODE_RTE_FP16 = 0x0200,
+ FLOAT_CONTROLS_ROUNDING_MODE_RTE_FP32 = 0x0400,
+ FLOAT_CONTROLS_ROUNDING_MODE_RTE_FP64 = 0x0800,
+ FLOAT_CONTROLS_ROUNDING_MODE_RTZ_FP16 = 0x1000,
+ FLOAT_CONTROLS_ROUNDING_MODE_RTZ_FP32 = 0x2000,
+ FLOAT_CONTROLS_ROUNDING_MODE_RTZ_FP64 = 0x4000,
+};
+
+#ifdef __cplusplus
+} /* extern "C" */
+#endif
+
+#endif /* SHADER_ENUMS_H */
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/compiler/shader_info.h b/third_party/rust/glslopt/glsl-optimizer/src/compiler/shader_info.h
new file mode 100644
index 0000000000..2e22614b75
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/compiler/shader_info.h
@@ -0,0 +1,358 @@
+/*
+ * Copyright © 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#ifndef SHADER_INFO_H
+#define SHADER_INFO_H
+
+#include "shader_enums.h"
+#include <stdint.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+struct spirv_supported_capabilities {
+ bool address;
+ bool atomic_storage;
+ bool demote_to_helper_invocation;
+ bool derivative_group;
+ bool descriptor_array_dynamic_indexing;
+ bool descriptor_array_non_uniform_indexing;
+ bool descriptor_indexing;
+ bool device_group;
+ bool draw_parameters;
+ bool float64;
+ bool fragment_shader_sample_interlock;
+ bool fragment_shader_pixel_interlock;
+ bool geometry_streams;
+ bool image_ms_array;
+ bool image_read_without_format;
+ bool image_write_without_format;
+ bool int8;
+ bool int16;
+ bool int64;
+ bool int64_atomics;
+ bool integer_functions2;
+ bool kernel;
+ bool min_lod;
+ bool multiview;
+ bool physical_storage_buffer_address;
+ bool post_depth_coverage;
+ bool runtime_descriptor_array;
+ bool float_controls;
+ bool shader_clock;
+ bool shader_viewport_index_layer;
+ bool stencil_export;
+ bool storage_8bit;
+ bool storage_16bit;
+ bool storage_image_ms;
+ bool subgroup_arithmetic;
+ bool subgroup_ballot;
+ bool subgroup_basic;
+ bool subgroup_quad;
+ bool subgroup_shuffle;
+ bool subgroup_vote;
+ bool tessellation;
+ bool transform_feedback;
+ bool variable_pointers;
+ bool vk_memory_model;
+ bool vk_memory_model_device_scope;
+ bool float16;
+ bool amd_fragment_mask;
+ bool amd_gcn_shader;
+ bool amd_shader_ballot;
+ bool amd_trinary_minmax;
+ bool amd_image_read_write_lod;
+ bool amd_shader_explicit_vertex_parameter;
+};
+
+typedef struct shader_info {
+ const char *name;
+
+ /* Descriptive name provided by the client; may be NULL */
+ const char *label;
+
+ /** The shader stage, such as MESA_SHADER_VERTEX. */
+ gl_shader_stage stage:8;
+
+ /** The shader stage in a non SSO linked program that follows this stage,
+ * such as MESA_SHADER_FRAGMENT.
+ */
+ gl_shader_stage next_stage:8;
+
+ /* Number of textures used by this shader */
+ uint8_t num_textures;
+ /* Number of uniform buffers used by this shader */
+ uint8_t num_ubos;
+ /* Number of atomic buffers used by this shader */
+ uint8_t num_abos;
+ /* Number of shader storage buffers (max .driver_location + 1) used by this
+ * shader. In the case of nir_lower_atomics_to_ssbo being used, this will
+ * be the number of actual SSBOs in gl_program->info, and the lowered SSBOs
+ * and atomic counters in nir_shader->info.
+ */
+ uint8_t num_ssbos;
+ /* Number of images used by this shader */
+ uint8_t num_images;
+ /* Index of the last MSAA image. */
+ int8_t last_msaa_image;
+
+ /* Which inputs are actually read */
+ uint64_t inputs_read;
+ /* Which outputs are actually written */
+ uint64_t outputs_written;
+ /* Which outputs are actually read */
+ uint64_t outputs_read;
+ /* Which system values are actually read */
+ uint64_t system_values_read;
+
+ /* Which patch inputs are actually read */
+ uint32_t patch_inputs_read;
+ /* Which patch outputs are actually written */
+ uint32_t patch_outputs_written;
+ /* Which patch outputs are read */
+ uint32_t patch_outputs_read;
+
+ /* Which inputs are read indirectly (subset of inputs_read) */
+ uint64_t inputs_read_indirectly;
+ /* Which outputs are read or written indirectly */
+ uint64_t outputs_accessed_indirectly;
+ /* Which patch inputs are read indirectly (subset of patch_inputs_read) */
+ uint64_t patch_inputs_read_indirectly;
+ /* Which patch outputs are read or written indirectly */
+ uint64_t patch_outputs_accessed_indirectly;
+
+ /** Bitfield of which textures are used */
+ uint32_t textures_used;
+
+ /** Bitfield of which textures are used by texelFetch() */
+ uint32_t textures_used_by_txf;
+
+ /** Bitfield of which images are used */
+ uint32_t images_used;
+
+ /* SPV_KHR_float_controls: execution mode for floating point ops */
+ uint16_t float_controls_execution_mode;
+
+ /* The size of the gl_ClipDistance[] array, if declared. */
+ uint8_t clip_distance_array_size:4;
+
+ /* The size of the gl_CullDistance[] array, if declared. */
+ uint8_t cull_distance_array_size:4;
+
+ /* Whether or not this shader ever uses textureGather() */
+ bool uses_texture_gather:1;
+
+ /**
+ * True if this shader uses the fddx/fddy opcodes.
+ *
+ * Note that this does not include the "fine" and "coarse" variants.
+ */
+ bool uses_fddx_fddy:1;
+
+ /**
+ * True if this shader uses 64-bit ALU operations
+ */
+ bool uses_64bit:1;
+
+ /* Whether the first UBO is the default uniform buffer, i.e. uniforms. */
+ bool first_ubo_is_default_ubo:1;
+
+ /* Whether or not separate shader objects were used */
+ bool separate_shader:1;
+
+ /** Was this shader linked with any transform feedback varyings? */
+ bool has_transform_feedback_varyings:1;
+
+ /* Whether flrp has been lowered. */
+ bool flrp_lowered:1;
+
+ /* Whether the shader writes memory, including transform feedback. */
+ bool writes_memory:1;
+
+ /* Whether gl_Layer is viewport-relative */
+ bool layer_viewport_relative:1;
+
+ union {
+ struct {
+ /* Which inputs are doubles */
+ uint64_t double_inputs;
+
+ /* For AMD-specific driver-internal shaders. It replaces vertex
+ * buffer loads with code generating VS inputs from scalar registers.
+ *
+ * Valid values: SI_VS_BLIT_SGPRS_POS_*
+ */
+ uint8_t blit_sgprs_amd:4;
+
+ /* True if the shader writes position in window space coordinates pre-transform */
+ bool window_space_position:1;
+ } vs;
+
+ struct {
+ /** The output primitive type (GL enum value) */
+ uint16_t output_primitive;
+
+ /** The input primitive type (GL enum value) */
+ uint16_t input_primitive;
+
+ /** The maximum number of vertices the geometry shader might write. */
+ uint16_t vertices_out;
+
+ /** 1 .. MAX_GEOMETRY_SHADER_INVOCATIONS */
+ uint8_t invocations;
+
+ /** The number of vertices recieves per input primitive (max. 6) */
+ uint8_t vertices_in:3;
+
+ /** Whether or not this shader uses EndPrimitive */
+ bool uses_end_primitive:1;
+
+ /** Whether or not this shader uses non-zero streams */
+ bool uses_streams:1;
+ } gs;
+
+ struct {
+ bool uses_discard:1;
+ bool uses_demote:1;
+
+ /**
+ * True if this fragment shader requires helper invocations. This
+ * can be caused by the use of ALU derivative ops, texture
+ * instructions which do implicit derivatives, and the use of quad
+ * subgroup operations.
+ */
+ bool needs_helper_invocations:1;
+
+ /**
+ * Whether any inputs are declared with the "sample" qualifier.
+ */
+ bool uses_sample_qualifier:1;
+
+ /**
+ * Whether early fragment tests are enabled as defined by
+ * ARB_shader_image_load_store.
+ */
+ bool early_fragment_tests:1;
+
+ /**
+ * Defined by INTEL_conservative_rasterization.
+ */
+ bool inner_coverage:1;
+
+ bool post_depth_coverage:1;
+
+ /**
+ * \name ARB_fragment_coord_conventions
+ * @{
+ */
+ bool pixel_center_integer:1;
+ bool origin_upper_left:1;
+ /*@}*/
+
+ bool pixel_interlock_ordered:1;
+ bool pixel_interlock_unordered:1;
+ bool sample_interlock_ordered:1;
+ bool sample_interlock_unordered:1;
+
+ /**
+ * Flags whether NIR's base types on the FS color outputs should be
+ * ignored.
+ *
+ * GLSL requires that fragment shader output base types match the
+ * render target's base types for the behavior to be defined. From
+ * the GL 4.6 spec:
+ *
+ * "If the values written by the fragment shader do not match the
+ * format(s) of the corresponding color buffer(s), the result is
+ * undefined."
+ *
+ * However, for NIR shaders translated from TGSI, we don't have the
+ * output types any more, so the driver will need to do whatever
+ * fixups are necessary to handle effectively untyped data being
+ * output from the FS.
+ */
+ bool untyped_color_outputs:1;
+
+ /** gl_FragDepth layout for ARB_conservative_depth. */
+ enum gl_frag_depth_layout depth_layout:3;
+ } fs;
+
+ struct {
+ uint16_t local_size[3];
+ uint16_t max_variable_local_size;
+
+ bool local_size_variable:1;
+ uint8_t user_data_components_amd:3;
+
+ /*
+ * Arrangement of invocations used to calculate derivatives in a compute
+ * shader. From NV_compute_shader_derivatives.
+ */
+ enum gl_derivative_group derivative_group:2;
+
+ /**
+ * Size of shared variables accessed by the compute shader.
+ */
+ unsigned shared_size;
+
+ /**
+ * pointer size is:
+ * AddressingModelLogical: 0 (default)
+ * AddressingModelPhysical32: 32
+ * AddressingModelPhysical64: 64
+ */
+ unsigned ptr_size;
+ } cs;
+
+ /* Applies to both TCS and TES. */
+ struct {
+ uint16_t primitive_mode; /* GL_TRIANGLES, GL_QUADS or GL_ISOLINES */
+
+ /** The number of vertices in the TCS output patch. */
+ uint8_t tcs_vertices_out;
+ enum gl_tess_spacing spacing:2;
+
+ /** Is the vertex order counterclockwise? */
+ bool ccw:1;
+ bool point_mode:1;
+
+ /* Bit mask of TCS per-vertex inputs (VS outputs) that are used
+ * with a vertex index that is NOT the invocation id
+ */
+ uint64_t tcs_cross_invocation_inputs_read;
+
+ /* Bit mask of TCS per-vertex outputs that are used
+ * with a vertex index that is NOT the invocation id
+ */
+ uint64_t tcs_cross_invocation_outputs_read;
+ } tess;
+ };
+} shader_info;
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* SHADER_INFO_H */
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/gallium/auxiliary/util/u_half.h b/third_party/rust/glslopt/glsl-optimizer/src/gallium/auxiliary/util/u_half.h
new file mode 100644
index 0000000000..bbcc843c31
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/gallium/auxiliary/util/u_half.h
@@ -0,0 +1,143 @@
+/**************************************************************************
+ *
+ * Copyright 2010 Luca Barbieri
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+
+#ifndef U_HALF_H
+#define U_HALF_H
+
+#include "pipe/p_compiler.h"
+#include "util/u_math.h"
+#include "util/half_float.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+ * References for float <-> half conversions
+ *
+ * http://fgiesen.wordpress.com/2012/03/28/half-to-float-done-quic/
+ * https://gist.github.com/2156668
+ * https://gist.github.com/2144712
+ */
+
+static inline uint16_t
+util_float_to_half(float f)
+{
+ return _mesa_float_to_half(f);
+}
+
+static inline uint16_t
+util_float_to_half_rtz(float f)
+{
+ uint32_t sign_mask = 0x80000000;
+ uint32_t round_mask = ~0xfff;
+ uint32_t f32inf = 0xff << 23;
+ uint32_t f16inf = 0x1f << 23;
+ uint32_t sign;
+ union fi magic;
+ union fi f32;
+ uint16_t f16;
+
+ magic.ui = 0xf << 23;
+
+ f32.f = f;
+
+ /* Sign */
+ sign = f32.ui & sign_mask;
+ f32.ui ^= sign;
+
+ if (f32.ui == f32inf) {
+ /* Inf */
+ f16 = 0x7c00;
+ } else if (f32.ui > f32inf) {
+ /* NaN */
+ f16 = 0x7e00;
+ } else {
+ /* Number */
+ f32.ui &= round_mask;
+ f32.f *= magic.f;
+ f32.ui -= round_mask;
+ /*
+ * XXX: The magic mul relies on denorms being available, otherwise
+ * all f16 denorms get flushed to zero - hence when this is used
+ * for tgsi_exec in softpipe we won't get f16 denorms.
+ */
+ /*
+ * Clamp to max finite value if overflowed.
+ * OpenGL has completely undefined rounding behavior for float to
+ * half-float conversions, and this matches what is mandated for float
+ * to fp11/fp10, which recommend round-to-nearest-finite too.
+ * (d3d10 is deeply unhappy about flushing such values to infinity, and
+ * while it also mandates round-to-zero it doesn't care nearly as much
+ * about that.)
+ */
+ if (f32.ui > f16inf)
+ f32.ui = f16inf - 1;
+
+ f16 = f32.ui >> 13;
+ }
+
+ /* Sign */
+ f16 |= sign >> 16;
+
+ return f16;
+}
+
+static inline float
+util_half_to_float(uint16_t f16)
+{
+ union fi infnan;
+ union fi magic;
+ union fi f32;
+
+ infnan.ui = 0x8f << 23;
+ infnan.f = 65536.0f;
+ magic.ui = 0xef << 23;
+
+ /* Exponent / Mantissa */
+ f32.ui = (f16 & 0x7fff) << 13;
+
+ /* Adjust */
+ f32.f *= magic.f;
+ /* XXX: The magic mul relies on denorms being available */
+
+ /* Inf / NaN */
+ if (f32.f >= infnan.f)
+ f32.ui |= 0xff << 23;
+
+ /* Sign */
+ f32.ui |= (uint32_t)(f16 & 0x8000) << 16;
+
+ return f32.f;
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* U_HALF_H */
+
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/gallium/include/pipe/p_compiler.h b/third_party/rust/glslopt/glsl-optimizer/src/gallium/include/pipe/p_compiler.h
new file mode 100644
index 0000000000..8c3a793e33
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/gallium/include/pipe/p_compiler.h
@@ -0,0 +1,179 @@
+/**************************************************************************
+ *
+ * Copyright 2007-2008 VMware, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifndef P_COMPILER_H
+#define P_COMPILER_H
+
+
+#include "c99_compat.h" /* inline, __func__, etc. */
+
+#include "p_config.h"
+
+#include "util/macros.h"
+
+#include <stdlib.h>
+#include <string.h>
+#include <stddef.h>
+#include <stdarg.h>
+#include <limits.h>
+
+
+#if defined(_WIN32) && !defined(__WIN32__)
+#define __WIN32__
+#endif
+
+#if defined(_MSC_VER)
+
+#include <intrin.h>
+
+/* Avoid 'expression is always true' warning */
+#pragma warning(disable: 4296)
+
+#endif /* _MSC_VER */
+
+
+/*
+ * Alternative stdint.h and stdbool.h headers are supplied in include/c99 for
+ * systems that lack it.
+ */
+#include <stdint.h>
+#include <stdbool.h>
+
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+#if !defined(__HAIKU__) && !defined(__USE_MISC)
+#if !defined(PIPE_OS_ANDROID)
+typedef unsigned int uint;
+#endif
+typedef unsigned short ushort;
+#endif
+typedef unsigned char ubyte;
+
+typedef unsigned char boolean;
+#ifndef TRUE
+#define TRUE true
+#endif
+#ifndef FALSE
+#define FALSE false
+#endif
+
+#ifndef va_copy
+#ifdef __va_copy
+#define va_copy(dest, src) __va_copy((dest), (src))
+#else
+#define va_copy(dest, src) (dest) = (src)
+#endif
+#endif
+
+
+/* XXX: Use standard `__func__` instead */
+#ifndef __FUNCTION__
+# define __FUNCTION__ __func__
+#endif
+
+
+/* This should match linux gcc cdecl semantics everywhere, so that we
+ * just codegen one calling convention on all platforms.
+ */
+#ifdef _MSC_VER
+#define PIPE_CDECL __cdecl
+#else
+#define PIPE_CDECL
+#endif
+
+
+
+#if defined(__GNUC__)
+#define PIPE_DEPRECATED __attribute__((__deprecated__))
+#else
+#define PIPE_DEPRECATED
+#endif
+
+
+
+/* Macros for data alignment. */
+#if defined(__GNUC__)
+
+/* See http://gcc.gnu.org/onlinedocs/gcc-4.4.2/gcc/Type-Attributes.html */
+#define PIPE_ALIGN_TYPE(_alignment, _type) _type __attribute__((aligned(_alignment)))
+
+/* See http://gcc.gnu.org/onlinedocs/gcc-4.4.2/gcc/Variable-Attributes.html */
+#define PIPE_ALIGN_VAR(_alignment) __attribute__((aligned(_alignment)))
+
+#if defined(__GNUC__) && defined(PIPE_ARCH_X86)
+#define PIPE_ALIGN_STACK __attribute__((force_align_arg_pointer))
+#else
+#define PIPE_ALIGN_STACK
+#endif
+
+#elif defined(_MSC_VER)
+
+/* See http://msdn.microsoft.com/en-us/library/83ythb65.aspx */
+#define PIPE_ALIGN_TYPE(_alignment, _type) __declspec(align(_alignment)) _type
+#define PIPE_ALIGN_VAR(_alignment) __declspec(align(_alignment))
+
+#define PIPE_ALIGN_STACK
+
+#elif defined(SWIG)
+
+#define PIPE_ALIGN_TYPE(_alignment, _type) _type
+#define PIPE_ALIGN_VAR(_alignment)
+
+#define PIPE_ALIGN_STACK
+
+#else
+
+#error "Unsupported compiler"
+
+#endif
+
+
+#if defined(__GNUC__)
+
+#define PIPE_READ_WRITE_BARRIER() __asm__("":::"memory")
+
+#elif defined(_MSC_VER)
+
+#define PIPE_READ_WRITE_BARRIER() _ReadWriteBarrier()
+
+#else
+
+#warning "Unsupported compiler"
+#define PIPE_READ_WRITE_BARRIER() /* */
+
+#endif
+
+#if defined(__cplusplus)
+}
+#endif
+
+
+#endif /* P_COMPILER_H */
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/gallium/include/pipe/p_config.h b/third_party/rust/glslopt/glsl-optimizer/src/gallium/include/pipe/p_config.h
new file mode 100644
index 0000000000..2c1698d0e9
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/gallium/include/pipe/p_config.h
@@ -0,0 +1,192 @@
+/**************************************************************************
+ *
+ * Copyright 2008 VMware, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/**
+ * @file
+ * Gallium configuration defines.
+ *
+ * This header file sets several defines based on the compiler, processor
+ * architecture, and operating system being used. These defines should be used
+ * throughout the code to facilitate porting to new platforms. It is likely that
+ * this file is auto-generated by an autoconf-like tool at some point, as some
+ * things cannot be determined by pre-defined environment alone.
+ *
+ * See also:
+ * - http://gcc.gnu.org/onlinedocs/cpp/Common-Predefined-Macros.html
+ * - echo | gcc -dM -E - | sort
+ * - http://msdn.microsoft.com/en-us/library/b0084kay.aspx
+ *
+ * @author José Fonseca <jfonseca@vmware.com>
+ */
+
+#ifndef P_CONFIG_H_
+#define P_CONFIG_H_
+
+#include <limits.h>
+/*
+ * Compiler
+ */
+
+#if defined(__GNUC__)
+#define PIPE_CC_GCC
+#define PIPE_CC_GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__)
+#endif
+
+/*
+ * Meaning of _MSC_VER value:
+ * - 1800: Visual Studio 2013
+ * - 1700: Visual Studio 2012
+ * - 1600: Visual Studio 2010
+ * - 1500: Visual Studio 2008
+ * - 1400: Visual C++ 2005
+ * - 1310: Visual C++ .NET 2003
+ * - 1300: Visual C++ .NET 2002
+ *
+ * __MSC__ seems to be an old macro -- it is not pre-defined on recent MSVC
+ * versions.
+ */
+#if defined(_MSC_VER) || defined(__MSC__)
+#define PIPE_CC_MSVC
+#endif
+
+#if defined(__ICL)
+#define PIPE_CC_ICL
+#endif
+
+
+/*
+ * Processor architecture
+ */
+
+#if defined(__i386__) /* gcc */ || defined(_M_IX86) /* msvc */ || defined(_X86_) || defined(__386__) || defined(i386) || defined(__i386) /* Sun cc */
+#define PIPE_ARCH_X86
+#endif
+
+#if defined(__x86_64__) /* gcc */ || defined(_M_X64) /* msvc */ || defined(_M_AMD64) /* msvc */ || defined(__x86_64) /* Sun cc */
+#define PIPE_ARCH_X86_64
+#endif
+
+#if defined(PIPE_ARCH_X86) || defined(PIPE_ARCH_X86_64)
+#if defined(PIPE_CC_GCC) && !defined(__SSE2__)
+/* #warning SSE2 support requires -msse -msse2 compiler options */
+#else
+#define PIPE_ARCH_SSE
+#endif
+#if defined(PIPE_CC_GCC) && (__GNUC__ * 100 + __GNUC_MINOR__) < 409 && !defined(__SSSE3__)
+/* #warning SSE3 support requires -msse3 compiler options before GCC 4.9 */
+#else
+#define PIPE_ARCH_SSSE3
+#endif
+#endif
+
+#if defined(__ppc__) || defined(__ppc64__) || defined(__PPC__)
+#define PIPE_ARCH_PPC
+#if defined(__ppc64__) || defined(__PPC64__)
+#define PIPE_ARCH_PPC_64
+#endif
+#endif
+
+#if defined(__s390x__)
+#define PIPE_ARCH_S390
+#endif
+
+#if defined(__arm__)
+#define PIPE_ARCH_ARM
+#endif
+
+#if defined(__aarch64__)
+#define PIPE_ARCH_AARCH64
+#endif
+
+/*
+ * Endian detection.
+ */
+
+#include "util/u_endian.h"
+
+/*
+ * Auto-detect the operating system family.
+ */
+#include "util/detect_os.h"
+
+#if DETECT_OS_LINUX
+#define PIPE_OS_LINUX
+#endif
+
+#if DETECT_OS_UNIX
+#define PIPE_OS_UNIX
+#endif
+
+#if DETECT_OS_ANDROID
+#define PIPE_OS_ANDROID
+#endif
+
+#if DETECT_OS_FREEBSD
+#define PIPE_OS_FREEBSD
+#endif
+
+#if DETECT_OS_BSD
+#define PIPE_OS_BSD
+#endif
+
+#if DETECT_OS_OPENBSD
+#define PIPE_OS_OPENBSD
+#endif
+
+#if DETECT_OS_NETBSD
+#define PIPE_OS_NETBSD
+#endif
+
+#if DETECT_OS_DRAGONFLY
+#define PIPE_OS_DRAGONFLY
+#endif
+
+#if DETECT_OS_HURD
+#define PIPE_OS_HURD
+#endif
+
+#if DETECT_OS_SOLARIS
+#define PIPE_OS_SOLARIS
+#endif
+
+#if DETECT_OS_APPLE
+#define PIPE_OS_APPLE
+#endif
+
+#if DETECT_OS_WINDOWS
+#define PIPE_OS_WINDOWS
+#endif
+
+#if DETECT_OS_HAIKU
+#define PIPE_OS_HAIKU
+#endif
+
+#if DETECT_OS_CYGWIN
+#define PIPE_OS_CYGWIN
+#endif
+
+#endif /* P_CONFIG_H_ */
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/gallium/include/pipe/p_defines.h b/third_party/rust/glslopt/glsl-optimizer/src/gallium/include/pipe/p_defines.h
new file mode 100644
index 0000000000..dd0c7331b6
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/gallium/include/pipe/p_defines.h
@@ -0,0 +1,1305 @@
+/**************************************************************************
+ *
+ * Copyright 2007 VMware, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifndef PIPE_DEFINES_H
+#define PIPE_DEFINES_H
+
+#include "p_compiler.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * Gallium error codes.
+ *
+ * - A zero value always means success.
+ * - A negative value always means failure.
+ * - The meaning of a positive value is function dependent.
+ */
+enum pipe_error
+{
+ PIPE_OK = 0,
+ PIPE_ERROR = -1, /**< Generic error */
+ PIPE_ERROR_BAD_INPUT = -2,
+ PIPE_ERROR_OUT_OF_MEMORY = -3,
+ PIPE_ERROR_RETRY = -4
+ /* TODO */
+};
+
+enum pipe_blendfactor {
+ PIPE_BLENDFACTOR_ONE = 1,
+ PIPE_BLENDFACTOR_SRC_COLOR,
+ PIPE_BLENDFACTOR_SRC_ALPHA,
+ PIPE_BLENDFACTOR_DST_ALPHA,
+ PIPE_BLENDFACTOR_DST_COLOR,
+ PIPE_BLENDFACTOR_SRC_ALPHA_SATURATE,
+ PIPE_BLENDFACTOR_CONST_COLOR,
+ PIPE_BLENDFACTOR_CONST_ALPHA,
+ PIPE_BLENDFACTOR_SRC1_COLOR,
+ PIPE_BLENDFACTOR_SRC1_ALPHA,
+
+ PIPE_BLENDFACTOR_ZERO = 0x11,
+ PIPE_BLENDFACTOR_INV_SRC_COLOR,
+ PIPE_BLENDFACTOR_INV_SRC_ALPHA,
+ PIPE_BLENDFACTOR_INV_DST_ALPHA,
+ PIPE_BLENDFACTOR_INV_DST_COLOR,
+
+ PIPE_BLENDFACTOR_INV_CONST_COLOR = 0x17,
+ PIPE_BLENDFACTOR_INV_CONST_ALPHA,
+ PIPE_BLENDFACTOR_INV_SRC1_COLOR,
+ PIPE_BLENDFACTOR_INV_SRC1_ALPHA,
+};
+
+enum pipe_blend_func {
+ PIPE_BLEND_ADD,
+ PIPE_BLEND_SUBTRACT,
+ PIPE_BLEND_REVERSE_SUBTRACT,
+ PIPE_BLEND_MIN,
+ PIPE_BLEND_MAX,
+};
+
+enum pipe_logicop {
+ PIPE_LOGICOP_CLEAR,
+ PIPE_LOGICOP_NOR,
+ PIPE_LOGICOP_AND_INVERTED,
+ PIPE_LOGICOP_COPY_INVERTED,
+ PIPE_LOGICOP_AND_REVERSE,
+ PIPE_LOGICOP_INVERT,
+ PIPE_LOGICOP_XOR,
+ PIPE_LOGICOP_NAND,
+ PIPE_LOGICOP_AND,
+ PIPE_LOGICOP_EQUIV,
+ PIPE_LOGICOP_NOOP,
+ PIPE_LOGICOP_OR_INVERTED,
+ PIPE_LOGICOP_COPY,
+ PIPE_LOGICOP_OR_REVERSE,
+ PIPE_LOGICOP_OR,
+ PIPE_LOGICOP_SET,
+};
+
+#define PIPE_MASK_R 0x1
+#define PIPE_MASK_G 0x2
+#define PIPE_MASK_B 0x4
+#define PIPE_MASK_A 0x8
+#define PIPE_MASK_RGBA 0xf
+#define PIPE_MASK_Z 0x10
+#define PIPE_MASK_S 0x20
+#define PIPE_MASK_ZS 0x30
+#define PIPE_MASK_RGBAZS (PIPE_MASK_RGBA|PIPE_MASK_ZS)
+
+
+/**
+ * Inequality functions. Used for depth test, stencil compare, alpha
+ * test, shadow compare, etc.
+ */
+enum pipe_compare_func {
+ PIPE_FUNC_NEVER,
+ PIPE_FUNC_LESS,
+ PIPE_FUNC_EQUAL,
+ PIPE_FUNC_LEQUAL,
+ PIPE_FUNC_GREATER,
+ PIPE_FUNC_NOTEQUAL,
+ PIPE_FUNC_GEQUAL,
+ PIPE_FUNC_ALWAYS,
+};
+
+/** Polygon fill mode */
+enum {
+ PIPE_POLYGON_MODE_FILL,
+ PIPE_POLYGON_MODE_LINE,
+ PIPE_POLYGON_MODE_POINT,
+ PIPE_POLYGON_MODE_FILL_RECTANGLE,
+};
+
+/** Polygon face specification, eg for culling */
+#define PIPE_FACE_NONE 0
+#define PIPE_FACE_FRONT 1
+#define PIPE_FACE_BACK 2
+#define PIPE_FACE_FRONT_AND_BACK (PIPE_FACE_FRONT | PIPE_FACE_BACK)
+
+/** Stencil ops */
+enum pipe_stencil_op {
+ PIPE_STENCIL_OP_KEEP,
+ PIPE_STENCIL_OP_ZERO,
+ PIPE_STENCIL_OP_REPLACE,
+ PIPE_STENCIL_OP_INCR,
+ PIPE_STENCIL_OP_DECR,
+ PIPE_STENCIL_OP_INCR_WRAP,
+ PIPE_STENCIL_OP_DECR_WRAP,
+ PIPE_STENCIL_OP_INVERT,
+};
+
+/** Texture types.
+ * See the documentation for info on PIPE_TEXTURE_RECT vs PIPE_TEXTURE_2D
+ */
+enum pipe_texture_target
+{
+ PIPE_BUFFER,
+ PIPE_TEXTURE_1D,
+ PIPE_TEXTURE_2D,
+ PIPE_TEXTURE_3D,
+ PIPE_TEXTURE_CUBE,
+ PIPE_TEXTURE_RECT,
+ PIPE_TEXTURE_1D_ARRAY,
+ PIPE_TEXTURE_2D_ARRAY,
+ PIPE_TEXTURE_CUBE_ARRAY,
+ PIPE_MAX_TEXTURE_TYPES,
+};
+
+enum pipe_tex_face {
+ PIPE_TEX_FACE_POS_X,
+ PIPE_TEX_FACE_NEG_X,
+ PIPE_TEX_FACE_POS_Y,
+ PIPE_TEX_FACE_NEG_Y,
+ PIPE_TEX_FACE_POS_Z,
+ PIPE_TEX_FACE_NEG_Z,
+ PIPE_TEX_FACE_MAX,
+};
+
+enum pipe_tex_wrap {
+ PIPE_TEX_WRAP_REPEAT,
+ PIPE_TEX_WRAP_CLAMP,
+ PIPE_TEX_WRAP_CLAMP_TO_EDGE,
+ PIPE_TEX_WRAP_CLAMP_TO_BORDER,
+ PIPE_TEX_WRAP_MIRROR_REPEAT,
+ PIPE_TEX_WRAP_MIRROR_CLAMP,
+ PIPE_TEX_WRAP_MIRROR_CLAMP_TO_EDGE,
+ PIPE_TEX_WRAP_MIRROR_CLAMP_TO_BORDER,
+};
+
+/** Between mipmaps, ie mipfilter */
+enum pipe_tex_mipfilter {
+ PIPE_TEX_MIPFILTER_NEAREST,
+ PIPE_TEX_MIPFILTER_LINEAR,
+ PIPE_TEX_MIPFILTER_NONE,
+};
+
+/** Within a mipmap, ie min/mag filter */
+enum pipe_tex_filter {
+ PIPE_TEX_FILTER_NEAREST,
+ PIPE_TEX_FILTER_LINEAR,
+};
+
+enum pipe_tex_compare {
+ PIPE_TEX_COMPARE_NONE,
+ PIPE_TEX_COMPARE_R_TO_TEXTURE,
+};
+
+/**
+ * Clear buffer bits
+ */
+#define PIPE_CLEAR_DEPTH (1 << 0)
+#define PIPE_CLEAR_STENCIL (1 << 1)
+#define PIPE_CLEAR_COLOR0 (1 << 2)
+#define PIPE_CLEAR_COLOR1 (1 << 3)
+#define PIPE_CLEAR_COLOR2 (1 << 4)
+#define PIPE_CLEAR_COLOR3 (1 << 5)
+#define PIPE_CLEAR_COLOR4 (1 << 6)
+#define PIPE_CLEAR_COLOR5 (1 << 7)
+#define PIPE_CLEAR_COLOR6 (1 << 8)
+#define PIPE_CLEAR_COLOR7 (1 << 9)
+/** Combined flags */
+/** All color buffers currently bound */
+#define PIPE_CLEAR_COLOR (PIPE_CLEAR_COLOR0 | PIPE_CLEAR_COLOR1 | \
+ PIPE_CLEAR_COLOR2 | PIPE_CLEAR_COLOR3 | \
+ PIPE_CLEAR_COLOR4 | PIPE_CLEAR_COLOR5 | \
+ PIPE_CLEAR_COLOR6 | PIPE_CLEAR_COLOR7)
+#define PIPE_CLEAR_DEPTHSTENCIL (PIPE_CLEAR_DEPTH | PIPE_CLEAR_STENCIL)
+
+/**
+ * Transfer object usage flags
+ */
+enum pipe_transfer_usage
+{
+ /**
+ * Resource contents read back (or accessed directly) at transfer
+ * create time.
+ */
+ PIPE_TRANSFER_READ = (1 << 0),
+
+ /**
+ * Resource contents will be written back at transfer_unmap
+ * time (or modified as a result of being accessed directly).
+ */
+ PIPE_TRANSFER_WRITE = (1 << 1),
+
+ /**
+ * Read/modify/write
+ */
+ PIPE_TRANSFER_READ_WRITE = PIPE_TRANSFER_READ | PIPE_TRANSFER_WRITE,
+
+ /**
+ * The transfer should map the texture storage directly. The driver may
+ * return NULL if that isn't possible, and the state tracker needs to cope
+ * with that and use an alternative path without this flag.
+ *
+ * E.g. the state tracker could have a simpler path which maps textures and
+ * does read/modify/write cycles on them directly, and a more complicated
+ * path which uses minimal read and write transfers.
+ *
+ * This flag supresses implicit "DISCARD" for buffer_subdata.
+ */
+ PIPE_TRANSFER_MAP_DIRECTLY = (1 << 2),
+
+ /**
+ * Discards the memory within the mapped region.
+ *
+ * It should not be used with PIPE_TRANSFER_READ.
+ *
+ * See also:
+ * - OpenGL's ARB_map_buffer_range extension, MAP_INVALIDATE_RANGE_BIT flag.
+ */
+ PIPE_TRANSFER_DISCARD_RANGE = (1 << 8),
+
+ /**
+ * Fail if the resource cannot be mapped immediately.
+ *
+ * See also:
+ * - Direct3D's D3DLOCK_DONOTWAIT flag.
+ * - Mesa's MESA_MAP_NOWAIT_BIT flag.
+ * - WDDM's D3DDDICB_LOCKFLAGS.DonotWait flag.
+ */
+ PIPE_TRANSFER_DONTBLOCK = (1 << 9),
+
+ /**
+ * Do not attempt to synchronize pending operations on the resource when mapping.
+ *
+ * It should not be used with PIPE_TRANSFER_READ.
+ *
+ * See also:
+ * - OpenGL's ARB_map_buffer_range extension, MAP_UNSYNCHRONIZED_BIT flag.
+ * - Direct3D's D3DLOCK_NOOVERWRITE flag.
+ * - WDDM's D3DDDICB_LOCKFLAGS.IgnoreSync flag.
+ */
+ PIPE_TRANSFER_UNSYNCHRONIZED = (1 << 10),
+
+ /**
+ * Written ranges will be notified later with
+ * pipe_context::transfer_flush_region.
+ *
+ * It should not be used with PIPE_TRANSFER_READ.
+ *
+ * See also:
+ * - pipe_context::transfer_flush_region
+ * - OpenGL's ARB_map_buffer_range extension, MAP_FLUSH_EXPLICIT_BIT flag.
+ */
+ PIPE_TRANSFER_FLUSH_EXPLICIT = (1 << 11),
+
+ /**
+ * Discards all memory backing the resource.
+ *
+ * It should not be used with PIPE_TRANSFER_READ.
+ *
+ * This is equivalent to:
+ * - OpenGL's ARB_map_buffer_range extension, MAP_INVALIDATE_BUFFER_BIT
+ * - BufferData(NULL) on a GL buffer
+ * - Direct3D's D3DLOCK_DISCARD flag.
+ * - WDDM's D3DDDICB_LOCKFLAGS.Discard flag.
+ * - D3D10 DDI's D3D10_DDI_MAP_WRITE_DISCARD flag
+ * - D3D10's D3D10_MAP_WRITE_DISCARD flag.
+ */
+ PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE = (1 << 12),
+
+ /**
+ * Allows the resource to be used for rendering while mapped.
+ *
+ * PIPE_RESOURCE_FLAG_MAP_PERSISTENT must be set when creating
+ * the resource.
+ *
+ * If COHERENT is not set, memory_barrier(PIPE_BARRIER_MAPPED_BUFFER)
+ * must be called to ensure the device can see what the CPU has written.
+ */
+ PIPE_TRANSFER_PERSISTENT = (1 << 13),
+
+ /**
+ * If PERSISTENT is set, this ensures any writes done by the device are
+ * immediately visible to the CPU and vice versa.
+ *
+ * PIPE_RESOURCE_FLAG_MAP_COHERENT must be set when creating
+ * the resource.
+ */
+ PIPE_TRANSFER_COHERENT = (1 << 14),
+
+ /**
+ * Map a resource in a thread-safe manner, because the calling thread can
+ * be any thread. It can only be used if both WRITE and UNSYNCHRONIZED are
+ * set.
+ */
+ PIPE_TRANSFER_THREAD_SAFE = 1 << 15,
+
+ /**
+ * This and higher bits are reserved for private use by drivers. Drivers
+ * should use this as (PIPE_TRANSFER_DRV_PRV << i).
+ */
+ PIPE_TRANSFER_DRV_PRV = (1 << 24)
+};
+
+/**
+ * Flags for the flush function.
+ */
+enum pipe_flush_flags
+{
+ PIPE_FLUSH_END_OF_FRAME = (1 << 0),
+ PIPE_FLUSH_DEFERRED = (1 << 1),
+ PIPE_FLUSH_FENCE_FD = (1 << 2),
+ PIPE_FLUSH_ASYNC = (1 << 3),
+ PIPE_FLUSH_HINT_FINISH = (1 << 4),
+ PIPE_FLUSH_TOP_OF_PIPE = (1 << 5),
+ PIPE_FLUSH_BOTTOM_OF_PIPE = (1 << 6),
+};
+
+/**
+ * Flags for pipe_context::dump_debug_state.
+ */
+#define PIPE_DUMP_DEVICE_STATUS_REGISTERS (1 << 0)
+
+/**
+ * Create a compute-only context. Use in pipe_screen::context_create.
+ * This disables draw, blit, and clear*, render_condition, and other graphics
+ * functions. Interop with other graphics contexts is still allowed.
+ * This allows scheduling jobs on a compute-only hardware command queue that
+ * can run in parallel with graphics without stalling it.
+ */
+#define PIPE_CONTEXT_COMPUTE_ONLY (1 << 0)
+
+/**
+ * Gather debug information and expect that pipe_context::dump_debug_state
+ * will be called. Use in pipe_screen::context_create.
+ */
+#define PIPE_CONTEXT_DEBUG (1 << 1)
+
+/**
+ * Whether out-of-bounds shader loads must return zero and out-of-bounds
+ * shader stores must be dropped.
+ */
+#define PIPE_CONTEXT_ROBUST_BUFFER_ACCESS (1 << 2)
+
+/**
+ * Prefer threaded pipe_context. It also implies that video codec functions
+ * will not be used. (they will be either no-ops or NULL when threading is
+ * enabled)
+ */
+#define PIPE_CONTEXT_PREFER_THREADED (1 << 3)
+
+/**
+ * Create a high priority context.
+ */
+#define PIPE_CONTEXT_HIGH_PRIORITY (1 << 4)
+
+/**
+ * Create a low priority context.
+ */
+#define PIPE_CONTEXT_LOW_PRIORITY (1 << 5)
+
+/** Stop execution if the device is reset. */
+#define PIPE_CONTEXT_LOSE_CONTEXT_ON_RESET (1 << 6)
+
+/**
+ * Flags for pipe_context::memory_barrier.
+ */
+#define PIPE_BARRIER_MAPPED_BUFFER (1 << 0)
+#define PIPE_BARRIER_SHADER_BUFFER (1 << 1)
+#define PIPE_BARRIER_QUERY_BUFFER (1 << 2)
+#define PIPE_BARRIER_VERTEX_BUFFER (1 << 3)
+#define PIPE_BARRIER_INDEX_BUFFER (1 << 4)
+#define PIPE_BARRIER_CONSTANT_BUFFER (1 << 5)
+#define PIPE_BARRIER_INDIRECT_BUFFER (1 << 6)
+#define PIPE_BARRIER_TEXTURE (1 << 7)
+#define PIPE_BARRIER_IMAGE (1 << 8)
+#define PIPE_BARRIER_FRAMEBUFFER (1 << 9)
+#define PIPE_BARRIER_STREAMOUT_BUFFER (1 << 10)
+#define PIPE_BARRIER_GLOBAL_BUFFER (1 << 11)
+#define PIPE_BARRIER_UPDATE_BUFFER (1 << 12)
+#define PIPE_BARRIER_UPDATE_TEXTURE (1 << 13)
+#define PIPE_BARRIER_ALL ((1 << 14) - 1)
+
+#define PIPE_BARRIER_UPDATE \
+ (PIPE_BARRIER_UPDATE_BUFFER | PIPE_BARRIER_UPDATE_TEXTURE)
+
+/**
+ * Flags for pipe_context::texture_barrier.
+ */
+#define PIPE_TEXTURE_BARRIER_SAMPLER (1 << 0)
+#define PIPE_TEXTURE_BARRIER_FRAMEBUFFER (1 << 1)
+
+/**
+ * Resource binding flags -- state tracker must specify in advance all
+ * the ways a resource might be used.
+ */
+#define PIPE_BIND_DEPTH_STENCIL (1 << 0) /* create_surface */
+#define PIPE_BIND_RENDER_TARGET (1 << 1) /* create_surface */
+#define PIPE_BIND_BLENDABLE (1 << 2) /* create_surface */
+#define PIPE_BIND_SAMPLER_VIEW (1 << 3) /* create_sampler_view */
+#define PIPE_BIND_VERTEX_BUFFER (1 << 4) /* set_vertex_buffers */
+#define PIPE_BIND_INDEX_BUFFER (1 << 5) /* draw_elements */
+#define PIPE_BIND_CONSTANT_BUFFER (1 << 6) /* set_constant_buffer */
+#define PIPE_BIND_DISPLAY_TARGET (1 << 7) /* flush_front_buffer */
+/* gap */
+#define PIPE_BIND_STREAM_OUTPUT (1 << 10) /* set_stream_output_buffers */
+#define PIPE_BIND_CURSOR (1 << 11) /* mouse cursor */
+#define PIPE_BIND_CUSTOM (1 << 12) /* state-tracker/winsys usages */
+#define PIPE_BIND_GLOBAL (1 << 13) /* set_global_binding */
+#define PIPE_BIND_SHADER_BUFFER (1 << 14) /* set_shader_buffers */
+#define PIPE_BIND_SHADER_IMAGE (1 << 15) /* set_shader_images */
+#define PIPE_BIND_COMPUTE_RESOURCE (1 << 16) /* set_compute_resources */
+#define PIPE_BIND_COMMAND_ARGS_BUFFER (1 << 17) /* pipe_draw_info.indirect */
+#define PIPE_BIND_QUERY_BUFFER (1 << 18) /* get_query_result_resource */
+
+/**
+ * The first two flags above were previously part of the amorphous
+ * TEXTURE_USAGE, most of which are now descriptions of the ways a
+ * particular texture can be bound to the gallium pipeline. The two flags
+ * below do not fit within that and probably need to be migrated to some
+ * other place.
+ *
+ * It seems like scanout is used by the Xorg state tracker to ask for
+ * a texture suitable for actual scanout (hence the name), which
+ * implies extra layout constraints on some hardware. It may also
+ * have some special meaning regarding mouse cursor images.
+ *
+ * The shared flag is quite underspecified, but certainly isn't a
+ * binding flag - it seems more like a message to the winsys to create
+ * a shareable allocation.
+ *
+ * The third flag has been added to be able to force textures to be created
+ * in linear mode (no tiling).
+ */
+#define PIPE_BIND_SCANOUT (1 << 19) /* */
+#define PIPE_BIND_SHARED (1 << 20) /* get_texture_handle ??? */
+#define PIPE_BIND_LINEAR (1 << 21)
+
+
+/**
+ * Flags for the driver about resource behaviour:
+ */
+#define PIPE_RESOURCE_FLAG_MAP_PERSISTENT (1 << 0)
+#define PIPE_RESOURCE_FLAG_MAP_COHERENT (1 << 1)
+#define PIPE_RESOURCE_FLAG_TEXTURING_MORE_LIKELY (1 << 2)
+#define PIPE_RESOURCE_FLAG_SPARSE (1 << 3)
+#define PIPE_RESOURCE_FLAG_SINGLE_THREAD_USE (1 << 4)
+#define PIPE_RESOURCE_FLAG_DRV_PRIV (1 << 8) /* driver/winsys private */
+#define PIPE_RESOURCE_FLAG_ST_PRIV (1 << 24) /* state-tracker/winsys private */
+
+/**
+ * Hint about the expected lifecycle of a resource.
+ * Sorted according to GPU vs CPU access.
+ */
+enum pipe_resource_usage {
+ PIPE_USAGE_DEFAULT, /* fast GPU access */
+ PIPE_USAGE_IMMUTABLE, /* fast GPU access, immutable */
+ PIPE_USAGE_DYNAMIC, /* uploaded data is used multiple times */
+ PIPE_USAGE_STREAM, /* uploaded data is used once */
+ PIPE_USAGE_STAGING, /* fast CPU access */
+};
+
+/**
+ * Shaders
+ */
+enum pipe_shader_type {
+ PIPE_SHADER_VERTEX,
+ PIPE_SHADER_FRAGMENT,
+ PIPE_SHADER_GEOMETRY,
+ PIPE_SHADER_TESS_CTRL,
+ PIPE_SHADER_TESS_EVAL,
+ PIPE_SHADER_COMPUTE,
+ PIPE_SHADER_TYPES,
+};
+
+/**
+ * Primitive types:
+ */
+enum pipe_prim_type {
+ PIPE_PRIM_POINTS,
+ PIPE_PRIM_LINES,
+ PIPE_PRIM_LINE_LOOP,
+ PIPE_PRIM_LINE_STRIP,
+ PIPE_PRIM_TRIANGLES,
+ PIPE_PRIM_TRIANGLE_STRIP,
+ PIPE_PRIM_TRIANGLE_FAN,
+ PIPE_PRIM_QUADS,
+ PIPE_PRIM_QUAD_STRIP,
+ PIPE_PRIM_POLYGON,
+ PIPE_PRIM_LINES_ADJACENCY,
+ PIPE_PRIM_LINE_STRIP_ADJACENCY,
+ PIPE_PRIM_TRIANGLES_ADJACENCY,
+ PIPE_PRIM_TRIANGLE_STRIP_ADJACENCY,
+ PIPE_PRIM_PATCHES,
+ PIPE_PRIM_MAX,
+};
+
+/**
+ * Tessellator spacing types
+ */
+enum pipe_tess_spacing {
+ PIPE_TESS_SPACING_FRACTIONAL_ODD,
+ PIPE_TESS_SPACING_FRACTIONAL_EVEN,
+ PIPE_TESS_SPACING_EQUAL,
+};
+
+/**
+ * Query object types
+ */
+enum pipe_query_type {
+ PIPE_QUERY_OCCLUSION_COUNTER,
+ PIPE_QUERY_OCCLUSION_PREDICATE,
+ PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE,
+ PIPE_QUERY_TIMESTAMP,
+ PIPE_QUERY_TIMESTAMP_DISJOINT,
+ PIPE_QUERY_TIME_ELAPSED,
+ PIPE_QUERY_PRIMITIVES_GENERATED,
+ PIPE_QUERY_PRIMITIVES_EMITTED,
+ PIPE_QUERY_SO_STATISTICS,
+ PIPE_QUERY_SO_OVERFLOW_PREDICATE,
+ PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE,
+ PIPE_QUERY_GPU_FINISHED,
+ PIPE_QUERY_PIPELINE_STATISTICS,
+ PIPE_QUERY_PIPELINE_STATISTICS_SINGLE,
+ PIPE_QUERY_TYPES,
+ /* start of driver queries, see pipe_screen::get_driver_query_info */
+ PIPE_QUERY_DRIVER_SPECIFIC = 256,
+};
+
+/**
+ * Index for PIPE_QUERY_PIPELINE_STATISTICS subqueries.
+ */
+enum pipe_statistics_query_index {
+ PIPE_STAT_QUERY_IA_VERTICES,
+ PIPE_STAT_QUERY_IA_PRIMITIVES,
+ PIPE_STAT_QUERY_VS_INVOCATIONS,
+ PIPE_STAT_QUERY_GS_INVOCATIONS,
+ PIPE_STAT_QUERY_GS_PRIMITIVES,
+ PIPE_STAT_QUERY_C_INVOCATIONS,
+ PIPE_STAT_QUERY_C_PRIMITIVES,
+ PIPE_STAT_QUERY_PS_INVOCATIONS,
+ PIPE_STAT_QUERY_HS_INVOCATIONS,
+ PIPE_STAT_QUERY_DS_INVOCATIONS,
+ PIPE_STAT_QUERY_CS_INVOCATIONS,
+};
+
+/**
+ * Conditional rendering modes
+ */
+enum pipe_render_cond_flag {
+ PIPE_RENDER_COND_WAIT,
+ PIPE_RENDER_COND_NO_WAIT,
+ PIPE_RENDER_COND_BY_REGION_WAIT,
+ PIPE_RENDER_COND_BY_REGION_NO_WAIT,
+};
+
+/**
+ * Point sprite coord modes
+ */
+enum pipe_sprite_coord_mode {
+ PIPE_SPRITE_COORD_UPPER_LEFT,
+ PIPE_SPRITE_COORD_LOWER_LEFT,
+};
+
+/**
+ * Texture & format swizzles
+ */
+enum pipe_swizzle {
+ PIPE_SWIZZLE_X,
+ PIPE_SWIZZLE_Y,
+ PIPE_SWIZZLE_Z,
+ PIPE_SWIZZLE_W,
+ PIPE_SWIZZLE_0,
+ PIPE_SWIZZLE_1,
+ PIPE_SWIZZLE_NONE,
+ PIPE_SWIZZLE_MAX, /**< Number of enums counter (must be last) */
+};
+
+/**
+ * Viewport swizzles
+ */
+enum pipe_viewport_swizzle {
+ PIPE_VIEWPORT_SWIZZLE_POSITIVE_X,
+ PIPE_VIEWPORT_SWIZZLE_NEGATIVE_X,
+ PIPE_VIEWPORT_SWIZZLE_POSITIVE_Y,
+ PIPE_VIEWPORT_SWIZZLE_NEGATIVE_Y,
+ PIPE_VIEWPORT_SWIZZLE_POSITIVE_Z,
+ PIPE_VIEWPORT_SWIZZLE_NEGATIVE_Z,
+ PIPE_VIEWPORT_SWIZZLE_POSITIVE_W,
+ PIPE_VIEWPORT_SWIZZLE_NEGATIVE_W,
+};
+
+#define PIPE_TIMEOUT_INFINITE 0xffffffffffffffffull
+
+
+/**
+ * Device reset status.
+ */
+enum pipe_reset_status
+{
+ PIPE_NO_RESET,
+ PIPE_GUILTY_CONTEXT_RESET,
+ PIPE_INNOCENT_CONTEXT_RESET,
+ PIPE_UNKNOWN_CONTEXT_RESET,
+};
+
+
+/**
+ * Conservative rasterization modes.
+ */
+enum pipe_conservative_raster_mode
+{
+ PIPE_CONSERVATIVE_RASTER_OFF,
+
+ /**
+ * The post-snap mode means the conservative rasterization occurs after
+ * the conversion from floating-point to fixed-point coordinates
+ * on the subpixel grid.
+ */
+ PIPE_CONSERVATIVE_RASTER_POST_SNAP,
+
+ /**
+ * The pre-snap mode means the conservative rasterization occurs before
+ * the conversion from floating-point to fixed-point coordinates.
+ */
+ PIPE_CONSERVATIVE_RASTER_PRE_SNAP,
+};
+
+
+/**
+ * resource_get_handle flags.
+ */
+/* Requires pipe_context::flush_resource before external use. */
+#define PIPE_HANDLE_USAGE_EXPLICIT_FLUSH (1 << 0)
+/* Expected external use of the resource: */
+#define PIPE_HANDLE_USAGE_FRAMEBUFFER_WRITE (1 << 1)
+#define PIPE_HANDLE_USAGE_SHADER_WRITE (1 << 2)
+
+/**
+ * pipe_image_view access flags.
+ */
+#define PIPE_IMAGE_ACCESS_READ (1 << 0)
+#define PIPE_IMAGE_ACCESS_WRITE (1 << 1)
+#define PIPE_IMAGE_ACCESS_READ_WRITE (PIPE_IMAGE_ACCESS_READ | \
+ PIPE_IMAGE_ACCESS_WRITE)
+
+/**
+ * Implementation capabilities/limits which are queried through
+ * pipe_screen::get_param()
+ */
+enum pipe_cap
+{
+ PIPE_CAP_GRAPHICS,
+ PIPE_CAP_NPOT_TEXTURES,
+ PIPE_CAP_MAX_DUAL_SOURCE_RENDER_TARGETS,
+ PIPE_CAP_ANISOTROPIC_FILTER,
+ PIPE_CAP_POINT_SPRITE,
+ PIPE_CAP_MAX_RENDER_TARGETS,
+ PIPE_CAP_OCCLUSION_QUERY,
+ PIPE_CAP_QUERY_TIME_ELAPSED,
+ PIPE_CAP_TEXTURE_SHADOW_MAP,
+ PIPE_CAP_TEXTURE_SWIZZLE,
+ PIPE_CAP_MAX_TEXTURE_2D_SIZE,
+ PIPE_CAP_MAX_TEXTURE_3D_LEVELS,
+ PIPE_CAP_MAX_TEXTURE_CUBE_LEVELS,
+ PIPE_CAP_TEXTURE_MIRROR_CLAMP,
+ PIPE_CAP_BLEND_EQUATION_SEPARATE,
+ PIPE_CAP_MAX_STREAM_OUTPUT_BUFFERS,
+ PIPE_CAP_PRIMITIVE_RESTART,
+ /** blend enables and write masks per rendertarget */
+ PIPE_CAP_INDEP_BLEND_ENABLE,
+ /** different blend funcs per rendertarget */
+ PIPE_CAP_INDEP_BLEND_FUNC,
+ PIPE_CAP_MAX_TEXTURE_ARRAY_LAYERS,
+ PIPE_CAP_TGSI_FS_COORD_ORIGIN_UPPER_LEFT,
+ PIPE_CAP_TGSI_FS_COORD_ORIGIN_LOWER_LEFT,
+ PIPE_CAP_TGSI_FS_COORD_PIXEL_CENTER_HALF_INTEGER,
+ PIPE_CAP_TGSI_FS_COORD_PIXEL_CENTER_INTEGER,
+ PIPE_CAP_DEPTH_CLIP_DISABLE,
+ PIPE_CAP_DEPTH_CLIP_DISABLE_SEPARATE,
+ PIPE_CAP_SHADER_STENCIL_EXPORT,
+ PIPE_CAP_TGSI_INSTANCEID,
+ PIPE_CAP_VERTEX_ELEMENT_INSTANCE_DIVISOR,
+ PIPE_CAP_FRAGMENT_COLOR_CLAMPED,
+ PIPE_CAP_MIXED_COLORBUFFER_FORMATS,
+ PIPE_CAP_SEAMLESS_CUBE_MAP,
+ PIPE_CAP_SEAMLESS_CUBE_MAP_PER_TEXTURE,
+ PIPE_CAP_MIN_TEXEL_OFFSET,
+ PIPE_CAP_MAX_TEXEL_OFFSET,
+ PIPE_CAP_CONDITIONAL_RENDER,
+ PIPE_CAP_TEXTURE_BARRIER,
+ PIPE_CAP_MAX_STREAM_OUTPUT_SEPARATE_COMPONENTS,
+ PIPE_CAP_MAX_STREAM_OUTPUT_INTERLEAVED_COMPONENTS,
+ PIPE_CAP_STREAM_OUTPUT_PAUSE_RESUME,
+ PIPE_CAP_TGSI_CAN_COMPACT_CONSTANTS,
+ PIPE_CAP_VERTEX_COLOR_UNCLAMPED,
+ PIPE_CAP_VERTEX_COLOR_CLAMPED,
+ PIPE_CAP_GLSL_FEATURE_LEVEL,
+ PIPE_CAP_GLSL_FEATURE_LEVEL_COMPATIBILITY,
+ PIPE_CAP_ESSL_FEATURE_LEVEL,
+ PIPE_CAP_QUADS_FOLLOW_PROVOKING_VERTEX_CONVENTION,
+ PIPE_CAP_USER_VERTEX_BUFFERS,
+ PIPE_CAP_VERTEX_BUFFER_OFFSET_4BYTE_ALIGNED_ONLY,
+ PIPE_CAP_VERTEX_BUFFER_STRIDE_4BYTE_ALIGNED_ONLY,
+ PIPE_CAP_VERTEX_ELEMENT_SRC_OFFSET_4BYTE_ALIGNED_ONLY,
+ PIPE_CAP_COMPUTE,
+ PIPE_CAP_CONSTANT_BUFFER_OFFSET_ALIGNMENT,
+ PIPE_CAP_START_INSTANCE,
+ PIPE_CAP_QUERY_TIMESTAMP,
+ PIPE_CAP_TEXTURE_MULTISAMPLE,
+ PIPE_CAP_MIN_MAP_BUFFER_ALIGNMENT,
+ PIPE_CAP_CUBE_MAP_ARRAY,
+ PIPE_CAP_TEXTURE_BUFFER_OBJECTS,
+ PIPE_CAP_TEXTURE_BUFFER_OFFSET_ALIGNMENT,
+ PIPE_CAP_BUFFER_SAMPLER_VIEW_RGBA_ONLY,
+ PIPE_CAP_TGSI_TEXCOORD,
+ PIPE_CAP_PREFER_BLIT_BASED_TEXTURE_TRANSFER,
+ PIPE_CAP_QUERY_PIPELINE_STATISTICS,
+ PIPE_CAP_TEXTURE_BORDER_COLOR_QUIRK,
+ PIPE_CAP_MAX_TEXTURE_BUFFER_SIZE,
+ PIPE_CAP_MAX_VIEWPORTS,
+ PIPE_CAP_ENDIANNESS,
+ PIPE_CAP_MIXED_FRAMEBUFFER_SIZES,
+ PIPE_CAP_TGSI_VS_LAYER_VIEWPORT,
+ PIPE_CAP_MAX_GEOMETRY_OUTPUT_VERTICES,
+ PIPE_CAP_MAX_GEOMETRY_TOTAL_OUTPUT_COMPONENTS,
+ PIPE_CAP_MAX_TEXTURE_GATHER_COMPONENTS,
+ PIPE_CAP_TEXTURE_GATHER_SM5,
+ PIPE_CAP_BUFFER_MAP_PERSISTENT_COHERENT,
+ PIPE_CAP_FAKE_SW_MSAA,
+ PIPE_CAP_TEXTURE_QUERY_LOD,
+ PIPE_CAP_MIN_TEXTURE_GATHER_OFFSET,
+ PIPE_CAP_MAX_TEXTURE_GATHER_OFFSET,
+ PIPE_CAP_SAMPLE_SHADING,
+ PIPE_CAP_TEXTURE_GATHER_OFFSETS,
+ PIPE_CAP_TGSI_VS_WINDOW_SPACE_POSITION,
+ PIPE_CAP_MAX_VERTEX_STREAMS,
+ PIPE_CAP_DRAW_INDIRECT,
+ PIPE_CAP_TGSI_FS_FINE_DERIVATIVE,
+ PIPE_CAP_VENDOR_ID,
+ PIPE_CAP_DEVICE_ID,
+ PIPE_CAP_ACCELERATED,
+ PIPE_CAP_VIDEO_MEMORY,
+ PIPE_CAP_UMA,
+ PIPE_CAP_CONDITIONAL_RENDER_INVERTED,
+ PIPE_CAP_MAX_VERTEX_ATTRIB_STRIDE,
+ PIPE_CAP_SAMPLER_VIEW_TARGET,
+ PIPE_CAP_CLIP_HALFZ,
+ PIPE_CAP_VERTEXID_NOBASE,
+ PIPE_CAP_POLYGON_OFFSET_CLAMP,
+ PIPE_CAP_MULTISAMPLE_Z_RESOLVE,
+ PIPE_CAP_RESOURCE_FROM_USER_MEMORY,
+ PIPE_CAP_DEVICE_RESET_STATUS_QUERY,
+ PIPE_CAP_MAX_SHADER_PATCH_VARYINGS,
+ PIPE_CAP_TEXTURE_FLOAT_LINEAR,
+ PIPE_CAP_TEXTURE_HALF_FLOAT_LINEAR,
+ PIPE_CAP_DEPTH_BOUNDS_TEST,
+ PIPE_CAP_TGSI_TXQS,
+ PIPE_CAP_FORCE_PERSAMPLE_INTERP,
+ PIPE_CAP_SHAREABLE_SHADERS,
+ PIPE_CAP_COPY_BETWEEN_COMPRESSED_AND_PLAIN_FORMATS,
+ PIPE_CAP_CLEAR_TEXTURE,
+ PIPE_CAP_CLEAR_SCISSORED,
+ PIPE_CAP_DRAW_PARAMETERS,
+ PIPE_CAP_TGSI_PACK_HALF_FLOAT,
+ PIPE_CAP_MULTI_DRAW_INDIRECT,
+ PIPE_CAP_MULTI_DRAW_INDIRECT_PARAMS,
+ PIPE_CAP_TGSI_FS_POSITION_IS_SYSVAL,
+ PIPE_CAP_TGSI_FS_POINT_IS_SYSVAL,
+ PIPE_CAP_TGSI_FS_FACE_IS_INTEGER_SYSVAL,
+ PIPE_CAP_SHADER_BUFFER_OFFSET_ALIGNMENT,
+ PIPE_CAP_INVALIDATE_BUFFER,
+ PIPE_CAP_GENERATE_MIPMAP,
+ PIPE_CAP_STRING_MARKER,
+ PIPE_CAP_SURFACE_REINTERPRET_BLOCKS,
+ PIPE_CAP_QUERY_BUFFER_OBJECT,
+ PIPE_CAP_QUERY_MEMORY_INFO,
+ PIPE_CAP_PCI_GROUP,
+ PIPE_CAP_PCI_BUS,
+ PIPE_CAP_PCI_DEVICE,
+ PIPE_CAP_PCI_FUNCTION,
+ PIPE_CAP_FRAMEBUFFER_NO_ATTACHMENT,
+ PIPE_CAP_ROBUST_BUFFER_ACCESS_BEHAVIOR,
+ PIPE_CAP_CULL_DISTANCE,
+ PIPE_CAP_PRIMITIVE_RESTART_FOR_PATCHES,
+ PIPE_CAP_TGSI_VOTE,
+ PIPE_CAP_MAX_WINDOW_RECTANGLES,
+ PIPE_CAP_POLYGON_OFFSET_UNITS_UNSCALED,
+ PIPE_CAP_VIEWPORT_SUBPIXEL_BITS,
+ PIPE_CAP_RASTERIZER_SUBPIXEL_BITS,
+ PIPE_CAP_MIXED_COLOR_DEPTH_BITS,
+ PIPE_CAP_TGSI_ARRAY_COMPONENTS,
+ PIPE_CAP_STREAM_OUTPUT_INTERLEAVE_BUFFERS,
+ PIPE_CAP_TGSI_CAN_READ_OUTPUTS,
+ PIPE_CAP_NATIVE_FENCE_FD,
+ PIPE_CAP_GLSL_OPTIMIZE_CONSERVATIVELY,
+ PIPE_CAP_GLSL_TESS_LEVELS_AS_INPUTS,
+ PIPE_CAP_FBFETCH,
+ PIPE_CAP_TGSI_MUL_ZERO_WINS,
+ PIPE_CAP_DOUBLES,
+ PIPE_CAP_INT64,
+ PIPE_CAP_INT64_DIVMOD,
+ PIPE_CAP_TGSI_TEX_TXF_LZ,
+ PIPE_CAP_TGSI_CLOCK,
+ PIPE_CAP_POLYGON_MODE_FILL_RECTANGLE,
+ PIPE_CAP_SPARSE_BUFFER_PAGE_SIZE,
+ PIPE_CAP_TGSI_BALLOT,
+ PIPE_CAP_TGSI_TES_LAYER_VIEWPORT,
+ PIPE_CAP_CAN_BIND_CONST_BUFFER_AS_VERTEX,
+ PIPE_CAP_ALLOW_MAPPED_BUFFERS_DURING_EXECUTION,
+ PIPE_CAP_POST_DEPTH_COVERAGE,
+ PIPE_CAP_BINDLESS_TEXTURE,
+ PIPE_CAP_NIR_SAMPLERS_AS_DEREF,
+ PIPE_CAP_QUERY_SO_OVERFLOW,
+ PIPE_CAP_MEMOBJ,
+ PIPE_CAP_LOAD_CONSTBUF,
+ PIPE_CAP_TGSI_ANY_REG_AS_ADDRESS,
+ PIPE_CAP_TILE_RASTER_ORDER,
+ PIPE_CAP_MAX_COMBINED_SHADER_OUTPUT_RESOURCES,
+ PIPE_CAP_FRAMEBUFFER_MSAA_CONSTRAINTS,
+ PIPE_CAP_SIGNED_VERTEX_BUFFER_OFFSET,
+ PIPE_CAP_CONTEXT_PRIORITY_MASK,
+ PIPE_CAP_FENCE_SIGNAL,
+ PIPE_CAP_CONSTBUF0_FLAGS,
+ PIPE_CAP_PACKED_UNIFORMS,
+ PIPE_CAP_CONSERVATIVE_RASTER_POST_SNAP_TRIANGLES,
+ PIPE_CAP_CONSERVATIVE_RASTER_POST_SNAP_POINTS_LINES,
+ PIPE_CAP_CONSERVATIVE_RASTER_PRE_SNAP_TRIANGLES,
+ PIPE_CAP_CONSERVATIVE_RASTER_PRE_SNAP_POINTS_LINES,
+ PIPE_CAP_MAX_CONSERVATIVE_RASTER_SUBPIXEL_PRECISION_BIAS,
+ PIPE_CAP_CONSERVATIVE_RASTER_POST_DEPTH_COVERAGE,
+ PIPE_CAP_CONSERVATIVE_RASTER_INNER_COVERAGE,
+ PIPE_CAP_PROGRAMMABLE_SAMPLE_LOCATIONS,
+ PIPE_CAP_MAX_GS_INVOCATIONS,
+ PIPE_CAP_MAX_SHADER_BUFFER_SIZE,
+ PIPE_CAP_TEXTURE_MIRROR_CLAMP_TO_EDGE,
+ PIPE_CAP_MAX_COMBINED_SHADER_BUFFERS,
+ PIPE_CAP_MAX_COMBINED_HW_ATOMIC_COUNTERS,
+ PIPE_CAP_MAX_COMBINED_HW_ATOMIC_COUNTER_BUFFERS,
+ PIPE_CAP_MAX_TEXTURE_UPLOAD_MEMORY_BUDGET,
+ PIPE_CAP_MAX_VERTEX_ELEMENT_SRC_OFFSET,
+ PIPE_CAP_SURFACE_SAMPLE_COUNT,
+ PIPE_CAP_TGSI_ATOMFADD,
+ PIPE_CAP_QUERY_PIPELINE_STATISTICS_SINGLE,
+ PIPE_CAP_RGB_OVERRIDE_DST_ALPHA_BLEND,
+ PIPE_CAP_DEST_SURFACE_SRGB_CONTROL,
+ PIPE_CAP_NIR_COMPACT_ARRAYS,
+ PIPE_CAP_MAX_VARYINGS,
+ PIPE_CAP_COMPUTE_GRID_INFO_LAST_BLOCK,
+ PIPE_CAP_COMPUTE_SHADER_DERIVATIVES,
+ PIPE_CAP_TGSI_SKIP_SHRINK_IO_ARRAYS,
+ PIPE_CAP_IMAGE_LOAD_FORMATTED,
+ PIPE_CAP_THROTTLE,
+ PIPE_CAP_DMABUF,
+ PIPE_CAP_PREFER_COMPUTE_FOR_MULTIMEDIA,
+ PIPE_CAP_FRAGMENT_SHADER_INTERLOCK,
+ PIPE_CAP_FBFETCH_COHERENT,
+ PIPE_CAP_CS_DERIVED_SYSTEM_VALUES_SUPPORTED,
+ PIPE_CAP_ATOMIC_FLOAT_MINMAX,
+ PIPE_CAP_TGSI_DIV,
+ PIPE_CAP_FRAGMENT_SHADER_TEXTURE_LOD,
+ PIPE_CAP_FRAGMENT_SHADER_DERIVATIVES,
+ PIPE_CAP_VERTEX_SHADER_SATURATE,
+ PIPE_CAP_TEXTURE_SHADOW_LOD,
+ PIPE_CAP_SHADER_SAMPLES_IDENTICAL,
+ PIPE_CAP_TGSI_ATOMINC_WRAP,
+ PIPE_CAP_PREFER_IMM_ARRAYS_AS_CONSTBUF,
+ PIPE_CAP_GL_SPIRV,
+ PIPE_CAP_GL_SPIRV_VARIABLE_POINTERS,
+ PIPE_CAP_DEMOTE_TO_HELPER_INVOCATION,
+ PIPE_CAP_TGSI_TG4_COMPONENT_IN_SWIZZLE,
+ PIPE_CAP_FLATSHADE,
+ PIPE_CAP_ALPHA_TEST,
+ PIPE_CAP_POINT_SIZE_FIXED,
+ PIPE_CAP_TWO_SIDED_COLOR,
+ PIPE_CAP_CLIP_PLANES,
+ PIPE_CAP_MAX_VERTEX_BUFFERS,
+ PIPE_CAP_OPENCL_INTEGER_FUNCTIONS,
+ PIPE_CAP_INTEGER_MULTIPLY_32X16,
+ /* Turn draw, dispatch, blit into NOOP */
+ PIPE_CAP_FRONTEND_NOOP,
+ PIPE_CAP_NIR_IMAGES_AS_DEREF,
+ PIPE_CAP_PACKED_STREAM_OUTPUT,
+ PIPE_CAP_VIEWPORT_TRANSFORM_LOWERED,
+ PIPE_CAP_PSIZ_CLAMPED,
+ PIPE_CAP_DRAW_INFO_START_WITH_USER_INDICES,
+ PIPE_CAP_GL_BEGIN_END_BUFFER_SIZE,
+ PIPE_CAP_VIEWPORT_SWIZZLE,
+ PIPE_CAP_SYSTEM_SVM,
+ PIPE_CAP_VIEWPORT_MASK,
+ PIPE_CAP_ALPHA_TO_COVERAGE_DITHER_CONTROL,
+ PIPE_CAP_MAP_UNSYNCHRONIZED_THREAD_SAFE,
+};
+
+/**
+ * Possible bits for PIPE_CAP_CONTEXT_PRIORITY_MASK param, which should
+ * return a bitmask of the supported priorities. If the driver does not
+ * support prioritized contexts, it can return 0.
+ *
+ * Note that these match __DRI2_RENDERER_HAS_CONTEXT_PRIORITY_*
+ */
+#define PIPE_CONTEXT_PRIORITY_LOW (1 << 0)
+#define PIPE_CONTEXT_PRIORITY_MEDIUM (1 << 1)
+#define PIPE_CONTEXT_PRIORITY_HIGH (1 << 2)
+
+#define PIPE_QUIRK_TEXTURE_BORDER_COLOR_SWIZZLE_NV50 (1 << 0)
+#define PIPE_QUIRK_TEXTURE_BORDER_COLOR_SWIZZLE_R600 (1 << 1)
+
+enum pipe_endian
+{
+ PIPE_ENDIAN_LITTLE = 0,
+ PIPE_ENDIAN_BIG = 1,
+#if UTIL_ARCH_LITTLE_ENDIAN
+ PIPE_ENDIAN_NATIVE = PIPE_ENDIAN_LITTLE
+#elif UTIL_ARCH_BIG_ENDIAN
+ PIPE_ENDIAN_NATIVE = PIPE_ENDIAN_BIG
+#endif
+};
+
+/**
+ * Implementation limits which are queried through
+ * pipe_screen::get_paramf()
+ */
+enum pipe_capf
+{
+ PIPE_CAPF_MAX_LINE_WIDTH,
+ PIPE_CAPF_MAX_LINE_WIDTH_AA,
+ PIPE_CAPF_MAX_POINT_WIDTH,
+ PIPE_CAPF_MAX_POINT_WIDTH_AA,
+ PIPE_CAPF_MAX_TEXTURE_ANISOTROPY,
+ PIPE_CAPF_MAX_TEXTURE_LOD_BIAS,
+ PIPE_CAPF_MIN_CONSERVATIVE_RASTER_DILATE,
+ PIPE_CAPF_MAX_CONSERVATIVE_RASTER_DILATE,
+ PIPE_CAPF_CONSERVATIVE_RASTER_DILATE_GRANULARITY,
+};
+
+/** Shader caps not specific to any single stage */
+enum pipe_shader_cap
+{
+ PIPE_SHADER_CAP_MAX_INSTRUCTIONS, /* if 0, it means the stage is unsupported */
+ PIPE_SHADER_CAP_MAX_ALU_INSTRUCTIONS,
+ PIPE_SHADER_CAP_MAX_TEX_INSTRUCTIONS,
+ PIPE_SHADER_CAP_MAX_TEX_INDIRECTIONS,
+ PIPE_SHADER_CAP_MAX_CONTROL_FLOW_DEPTH,
+ PIPE_SHADER_CAP_MAX_INPUTS,
+ PIPE_SHADER_CAP_MAX_OUTPUTS,
+ PIPE_SHADER_CAP_MAX_CONST_BUFFER_SIZE,
+ PIPE_SHADER_CAP_MAX_CONST_BUFFERS,
+ PIPE_SHADER_CAP_MAX_TEMPS,
+ /* boolean caps */
+ PIPE_SHADER_CAP_TGSI_CONT_SUPPORTED,
+ PIPE_SHADER_CAP_INDIRECT_INPUT_ADDR,
+ PIPE_SHADER_CAP_INDIRECT_OUTPUT_ADDR,
+ PIPE_SHADER_CAP_INDIRECT_TEMP_ADDR,
+ PIPE_SHADER_CAP_INDIRECT_CONST_ADDR,
+ PIPE_SHADER_CAP_SUBROUTINES, /* BGNSUB, ENDSUB, CAL, RET */
+ PIPE_SHADER_CAP_INTEGERS,
+ PIPE_SHADER_CAP_INT64_ATOMICS,
+ PIPE_SHADER_CAP_FP16,
+ PIPE_SHADER_CAP_MAX_TEXTURE_SAMPLERS,
+ PIPE_SHADER_CAP_PREFERRED_IR,
+ PIPE_SHADER_CAP_TGSI_SQRT_SUPPORTED,
+ PIPE_SHADER_CAP_MAX_SAMPLER_VIEWS,
+ PIPE_SHADER_CAP_TGSI_DROUND_SUPPORTED, /* all rounding modes */
+ PIPE_SHADER_CAP_TGSI_DFRACEXP_DLDEXP_SUPPORTED,
+ PIPE_SHADER_CAP_TGSI_FMA_SUPPORTED,
+ PIPE_SHADER_CAP_TGSI_ANY_INOUT_DECL_RANGE,
+ PIPE_SHADER_CAP_MAX_UNROLL_ITERATIONS_HINT,
+ PIPE_SHADER_CAP_MAX_SHADER_BUFFERS,
+ PIPE_SHADER_CAP_SUPPORTED_IRS,
+ PIPE_SHADER_CAP_MAX_SHADER_IMAGES,
+ PIPE_SHADER_CAP_LOWER_IF_THRESHOLD,
+ PIPE_SHADER_CAP_TGSI_SKIP_MERGE_REGISTERS,
+ PIPE_SHADER_CAP_TGSI_LDEXP_SUPPORTED,
+ PIPE_SHADER_CAP_MAX_HW_ATOMIC_COUNTERS,
+ PIPE_SHADER_CAP_MAX_HW_ATOMIC_COUNTER_BUFFERS,
+};
+
+/**
+ * Shader intermediate representation.
+ *
+ * Note that if the driver requests something other than TGSI, it must
+ * always be prepared to receive TGSI in addition to its preferred IR.
+ * If the driver requests TGSI as its preferred IR, it will *always*
+ * get TGSI.
+ *
+ * Note that PIPE_SHADER_IR_TGSI should be zero for backwards compat with
+ * state trackers that only understand TGSI.
+ */
+enum pipe_shader_ir
+{
+ PIPE_SHADER_IR_TGSI = 0,
+ PIPE_SHADER_IR_NATIVE,
+ PIPE_SHADER_IR_NIR,
+ PIPE_SHADER_IR_NIR_SERIALIZED,
+};
+
+/**
+ * Compute-specific implementation capability. They can be queried
+ * using pipe_screen::get_compute_param.
+ */
+enum pipe_compute_cap
+{
+ PIPE_COMPUTE_CAP_ADDRESS_BITS,
+ PIPE_COMPUTE_CAP_IR_TARGET,
+ PIPE_COMPUTE_CAP_GRID_DIMENSION,
+ PIPE_COMPUTE_CAP_MAX_GRID_SIZE,
+ PIPE_COMPUTE_CAP_MAX_BLOCK_SIZE,
+ PIPE_COMPUTE_CAP_MAX_THREADS_PER_BLOCK,
+ PIPE_COMPUTE_CAP_MAX_GLOBAL_SIZE,
+ PIPE_COMPUTE_CAP_MAX_LOCAL_SIZE,
+ PIPE_COMPUTE_CAP_MAX_PRIVATE_SIZE,
+ PIPE_COMPUTE_CAP_MAX_INPUT_SIZE,
+ PIPE_COMPUTE_CAP_MAX_MEM_ALLOC_SIZE,
+ PIPE_COMPUTE_CAP_MAX_CLOCK_FREQUENCY,
+ PIPE_COMPUTE_CAP_MAX_COMPUTE_UNITS,
+ PIPE_COMPUTE_CAP_IMAGES_SUPPORTED,
+ PIPE_COMPUTE_CAP_SUBGROUP_SIZE,
+ PIPE_COMPUTE_CAP_MAX_VARIABLE_THREADS_PER_BLOCK,
+};
+
+/**
+ * Resource parameters. They can be queried using
+ * pipe_screen::get_resource_param.
+ */
+enum pipe_resource_param
+{
+ PIPE_RESOURCE_PARAM_NPLANES,
+ PIPE_RESOURCE_PARAM_STRIDE,
+ PIPE_RESOURCE_PARAM_OFFSET,
+ PIPE_RESOURCE_PARAM_MODIFIER,
+ PIPE_RESOURCE_PARAM_HANDLE_TYPE_SHARED,
+ PIPE_RESOURCE_PARAM_HANDLE_TYPE_KMS,
+ PIPE_RESOURCE_PARAM_HANDLE_TYPE_FD,
+};
+
+/**
+ * Types of parameters for pipe_context::set_context_param.
+ */
+enum pipe_context_param
+{
+ /* A hint for the driver that it should pin its execution threads to
+ * a group of cores sharing a specific L3 cache if the CPU has multiple
+ * L3 caches. This is needed for good multithreading performance on
+ * AMD Zen CPUs. "value" is the L3 cache index. Drivers that don't have
+ * any internal threads or don't run on affected CPUs can ignore this.
+ */
+ PIPE_CONTEXT_PARAM_PIN_THREADS_TO_L3_CACHE,
+};
+
+/**
+ * Composite query types
+ */
+
+/**
+ * Query result for PIPE_QUERY_SO_STATISTICS.
+ */
+struct pipe_query_data_so_statistics
+{
+ uint64_t num_primitives_written;
+ uint64_t primitives_storage_needed;
+};
+
+/**
+ * Query result for PIPE_QUERY_TIMESTAMP_DISJOINT.
+ */
+struct pipe_query_data_timestamp_disjoint
+{
+ uint64_t frequency;
+ bool disjoint;
+};
+
+/**
+ * Query result for PIPE_QUERY_PIPELINE_STATISTICS.
+ */
+struct pipe_query_data_pipeline_statistics
+{
+ uint64_t ia_vertices; /**< Num vertices read by the vertex fetcher. */
+ uint64_t ia_primitives; /**< Num primitives read by the vertex fetcher. */
+ uint64_t vs_invocations; /**< Num vertex shader invocations. */
+ uint64_t gs_invocations; /**< Num geometry shader invocations. */
+ uint64_t gs_primitives; /**< Num primitives output by a geometry shader. */
+ uint64_t c_invocations; /**< Num primitives sent to the rasterizer. */
+ uint64_t c_primitives; /**< Num primitives that were rendered. */
+ uint64_t ps_invocations; /**< Num pixel shader invocations. */
+ uint64_t hs_invocations; /**< Num hull shader invocations. */
+ uint64_t ds_invocations; /**< Num domain shader invocations. */
+ uint64_t cs_invocations; /**< Num compute shader invocations. */
+};
+
+/**
+ * For batch queries.
+ */
+union pipe_numeric_type_union
+{
+ uint64_t u64;
+ uint32_t u32;
+ float f;
+};
+
+/**
+ * Query result (returned by pipe_context::get_query_result).
+ */
+union pipe_query_result
+{
+ /* PIPE_QUERY_OCCLUSION_PREDICATE */
+ /* PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE */
+ /* PIPE_QUERY_SO_OVERFLOW_PREDICATE */
+ /* PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE */
+ /* PIPE_QUERY_GPU_FINISHED */
+ bool b;
+
+ /* PIPE_QUERY_OCCLUSION_COUNTER */
+ /* PIPE_QUERY_TIMESTAMP */
+ /* PIPE_QUERY_TIME_ELAPSED */
+ /* PIPE_QUERY_PRIMITIVES_GENERATED */
+ /* PIPE_QUERY_PRIMITIVES_EMITTED */
+ /* PIPE_DRIVER_QUERY_TYPE_UINT64 */
+ /* PIPE_DRIVER_QUERY_TYPE_BYTES */
+ /* PIPE_DRIVER_QUERY_TYPE_MICROSECONDS */
+ /* PIPE_DRIVER_QUERY_TYPE_HZ */
+ uint64_t u64;
+
+ /* PIPE_DRIVER_QUERY_TYPE_UINT */
+ uint32_t u32;
+
+ /* PIPE_DRIVER_QUERY_TYPE_FLOAT */
+ /* PIPE_DRIVER_QUERY_TYPE_PERCENTAGE */
+ float f;
+
+ /* PIPE_QUERY_SO_STATISTICS */
+ struct pipe_query_data_so_statistics so_statistics;
+
+ /* PIPE_QUERY_TIMESTAMP_DISJOINT */
+ struct pipe_query_data_timestamp_disjoint timestamp_disjoint;
+
+ /* PIPE_QUERY_PIPELINE_STATISTICS */
+ struct pipe_query_data_pipeline_statistics pipeline_statistics;
+
+ /* batch queries (variable length) */
+ union pipe_numeric_type_union batch[1];
+};
+
+enum pipe_query_value_type
+{
+ PIPE_QUERY_TYPE_I32,
+ PIPE_QUERY_TYPE_U32,
+ PIPE_QUERY_TYPE_I64,
+ PIPE_QUERY_TYPE_U64,
+};
+
+union pipe_color_union
+{
+ float f[4];
+ int i[4];
+ unsigned int ui[4];
+};
+
+enum pipe_driver_query_type
+{
+ PIPE_DRIVER_QUERY_TYPE_UINT64,
+ PIPE_DRIVER_QUERY_TYPE_UINT,
+ PIPE_DRIVER_QUERY_TYPE_FLOAT,
+ PIPE_DRIVER_QUERY_TYPE_PERCENTAGE,
+ PIPE_DRIVER_QUERY_TYPE_BYTES,
+ PIPE_DRIVER_QUERY_TYPE_MICROSECONDS,
+ PIPE_DRIVER_QUERY_TYPE_HZ,
+ PIPE_DRIVER_QUERY_TYPE_DBM,
+ PIPE_DRIVER_QUERY_TYPE_TEMPERATURE,
+ PIPE_DRIVER_QUERY_TYPE_VOLTS,
+ PIPE_DRIVER_QUERY_TYPE_AMPS,
+ PIPE_DRIVER_QUERY_TYPE_WATTS,
+};
+
+/* Whether an average value per frame or a cumulative value should be
+ * displayed.
+ */
+enum pipe_driver_query_result_type
+{
+ PIPE_DRIVER_QUERY_RESULT_TYPE_AVERAGE,
+ PIPE_DRIVER_QUERY_RESULT_TYPE_CUMULATIVE,
+};
+
+/**
+ * Some hardware requires some hardware-specific queries to be submitted
+ * as batched queries. The corresponding query objects are created using
+ * create_batch_query, and at most one such query may be active at
+ * any time.
+ */
+#define PIPE_DRIVER_QUERY_FLAG_BATCH (1 << 0)
+
+/* Do not list this query in the HUD. */
+#define PIPE_DRIVER_QUERY_FLAG_DONT_LIST (1 << 1)
+
+struct pipe_driver_query_info
+{
+ const char *name;
+ unsigned query_type; /* PIPE_QUERY_DRIVER_SPECIFIC + i */
+ union pipe_numeric_type_union max_value; /* max value that can be returned */
+ enum pipe_driver_query_type type;
+ enum pipe_driver_query_result_type result_type;
+ unsigned group_id;
+ unsigned flags;
+};
+
+struct pipe_driver_query_group_info
+{
+ const char *name;
+ unsigned max_active_queries;
+ unsigned num_queries;
+};
+
+enum pipe_fd_type
+{
+ PIPE_FD_TYPE_NATIVE_SYNC,
+ PIPE_FD_TYPE_SYNCOBJ,
+};
+
+/**
+ * counter type and counter data type enums used by INTEL_performance_query
+ * APIs in gallium drivers.
+ */
+enum pipe_perf_counter_type
+{
+ PIPE_PERF_COUNTER_TYPE_EVENT,
+ PIPE_PERF_COUNTER_TYPE_DURATION_NORM,
+ PIPE_PERF_COUNTER_TYPE_DURATION_RAW,
+ PIPE_PERF_COUNTER_TYPE_THROUGHPUT,
+ PIPE_PERF_COUNTER_TYPE_RAW,
+ PIPE_PERF_COUNTER_TYPE_TIMESTAMP,
+};
+
+enum pipe_perf_counter_data_type
+{
+ PIPE_PERF_COUNTER_DATA_TYPE_BOOL32,
+ PIPE_PERF_COUNTER_DATA_TYPE_UINT32,
+ PIPE_PERF_COUNTER_DATA_TYPE_UINT64,
+ PIPE_PERF_COUNTER_DATA_TYPE_FLOAT,
+ PIPE_PERF_COUNTER_DATA_TYPE_DOUBLE,
+};
+
+#define PIPE_UUID_SIZE 16
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/gallium/include/pipe/p_format.h b/third_party/rust/glslopt/glsl-optimizer/src/gallium/include/pipe/p_format.h
new file mode 100644
index 0000000000..a51843caa2
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/gallium/include/pipe/p_format.h
@@ -0,0 +1,587 @@
+/**************************************************************************
+ *
+ * Copyright 2007 VMware, Inc.
+ * Copyright (c) 2008 VMware, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifndef PIPE_FORMAT_H
+#define PIPE_FORMAT_H
+
+#include "p_config.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * Formats for textures, surfaces and vertex data
+ */
+enum pipe_format {
+ PIPE_FORMAT_NONE,
+ PIPE_FORMAT_B8G8R8A8_UNORM,
+ PIPE_FORMAT_B8G8R8X8_UNORM,
+ PIPE_FORMAT_A8R8G8B8_UNORM,
+ PIPE_FORMAT_X8R8G8B8_UNORM,
+ PIPE_FORMAT_B5G5R5A1_UNORM,
+ PIPE_FORMAT_R4G4B4A4_UNORM,
+ PIPE_FORMAT_B4G4R4A4_UNORM,
+ PIPE_FORMAT_R5G6B5_UNORM,
+ PIPE_FORMAT_B5G6R5_UNORM,
+ PIPE_FORMAT_R10G10B10A2_UNORM,
+ PIPE_FORMAT_L8_UNORM, /**< ubyte luminance */
+ PIPE_FORMAT_A8_UNORM, /**< ubyte alpha */
+ PIPE_FORMAT_I8_UNORM, /**< ubyte intensity */
+ PIPE_FORMAT_L8A8_UNORM, /**< ubyte alpha, luminance */
+ PIPE_FORMAT_L16_UNORM, /**< ushort luminance */
+ PIPE_FORMAT_UYVY,
+ PIPE_FORMAT_YUYV,
+ PIPE_FORMAT_Z16_UNORM,
+ PIPE_FORMAT_Z32_UNORM,
+ PIPE_FORMAT_Z32_FLOAT,
+ PIPE_FORMAT_Z24_UNORM_S8_UINT,
+ PIPE_FORMAT_S8_UINT_Z24_UNORM,
+ PIPE_FORMAT_Z24X8_UNORM,
+ PIPE_FORMAT_X8Z24_UNORM,
+ PIPE_FORMAT_S8_UINT, /**< ubyte stencil */
+ PIPE_FORMAT_R64_FLOAT,
+ PIPE_FORMAT_R64G64_FLOAT,
+ PIPE_FORMAT_R64G64B64_FLOAT,
+ PIPE_FORMAT_R64G64B64A64_FLOAT,
+ PIPE_FORMAT_R32_FLOAT,
+ PIPE_FORMAT_R32G32_FLOAT,
+ PIPE_FORMAT_R32G32B32_FLOAT,
+ PIPE_FORMAT_R32G32B32A32_FLOAT,
+ PIPE_FORMAT_R32_UNORM,
+ PIPE_FORMAT_R32G32_UNORM,
+ PIPE_FORMAT_R32G32B32_UNORM,
+ PIPE_FORMAT_R32G32B32A32_UNORM,
+ PIPE_FORMAT_R32_USCALED,
+ PIPE_FORMAT_R32G32_USCALED,
+ PIPE_FORMAT_R32G32B32_USCALED,
+ PIPE_FORMAT_R32G32B32A32_USCALED,
+ PIPE_FORMAT_R32_SNORM,
+ PIPE_FORMAT_R32G32_SNORM,
+ PIPE_FORMAT_R32G32B32_SNORM,
+ PIPE_FORMAT_R32G32B32A32_SNORM,
+ PIPE_FORMAT_R32_SSCALED,
+ PIPE_FORMAT_R32G32_SSCALED,
+ PIPE_FORMAT_R32G32B32_SSCALED,
+ PIPE_FORMAT_R32G32B32A32_SSCALED,
+ PIPE_FORMAT_R16_UNORM,
+ PIPE_FORMAT_R16G16_UNORM,
+ PIPE_FORMAT_R16G16B16_UNORM,
+ PIPE_FORMAT_R16G16B16A16_UNORM,
+ PIPE_FORMAT_R16_USCALED,
+ PIPE_FORMAT_R16G16_USCALED,
+ PIPE_FORMAT_R16G16B16_USCALED,
+ PIPE_FORMAT_R16G16B16A16_USCALED,
+ PIPE_FORMAT_R16_SNORM,
+ PIPE_FORMAT_R16G16_SNORM,
+ PIPE_FORMAT_R16G16B16_SNORM,
+ PIPE_FORMAT_R16G16B16A16_SNORM,
+ PIPE_FORMAT_R16_SSCALED,
+ PIPE_FORMAT_R16G16_SSCALED,
+ PIPE_FORMAT_R16G16B16_SSCALED,
+ PIPE_FORMAT_R16G16B16A16_SSCALED,
+ PIPE_FORMAT_R8_UNORM,
+ PIPE_FORMAT_R8G8_UNORM,
+ PIPE_FORMAT_R8G8B8_UNORM,
+ PIPE_FORMAT_B8G8R8_UNORM,
+ PIPE_FORMAT_R8G8B8A8_UNORM,
+ PIPE_FORMAT_X8B8G8R8_UNORM,
+ PIPE_FORMAT_R8_USCALED,
+ PIPE_FORMAT_R8G8_USCALED,
+ PIPE_FORMAT_R8G8B8_USCALED,
+ PIPE_FORMAT_B8G8R8_USCALED,
+ PIPE_FORMAT_R8G8B8A8_USCALED,
+ PIPE_FORMAT_B8G8R8A8_USCALED,
+ PIPE_FORMAT_A8B8G8R8_USCALED,
+ PIPE_FORMAT_R8_SNORM,
+ PIPE_FORMAT_R8G8_SNORM,
+ PIPE_FORMAT_R8G8B8_SNORM,
+ PIPE_FORMAT_B8G8R8_SNORM,
+ PIPE_FORMAT_R8G8B8A8_SNORM,
+ PIPE_FORMAT_B8G8R8A8_SNORM,
+ PIPE_FORMAT_R8_SSCALED,
+ PIPE_FORMAT_R8G8_SSCALED,
+ PIPE_FORMAT_R8G8B8_SSCALED,
+ PIPE_FORMAT_B8G8R8_SSCALED,
+ PIPE_FORMAT_R8G8B8A8_SSCALED,
+ PIPE_FORMAT_B8G8R8A8_SSCALED,
+ PIPE_FORMAT_A8B8G8R8_SSCALED,
+ PIPE_FORMAT_R32_FIXED,
+ PIPE_FORMAT_R32G32_FIXED,
+ PIPE_FORMAT_R32G32B32_FIXED,
+ PIPE_FORMAT_R32G32B32A32_FIXED,
+ PIPE_FORMAT_R16_FLOAT,
+ PIPE_FORMAT_R16G16_FLOAT,
+ PIPE_FORMAT_R16G16B16_FLOAT,
+ PIPE_FORMAT_R16G16B16A16_FLOAT,
+
+ /* sRGB formats */
+ PIPE_FORMAT_L8_SRGB,
+ PIPE_FORMAT_R8_SRGB,
+ PIPE_FORMAT_L8A8_SRGB,
+ PIPE_FORMAT_R8G8_SRGB,
+ PIPE_FORMAT_R8G8B8_SRGB,
+ PIPE_FORMAT_B8G8R8_SRGB,
+ PIPE_FORMAT_A8B8G8R8_SRGB,
+ PIPE_FORMAT_X8B8G8R8_SRGB,
+ PIPE_FORMAT_B8G8R8A8_SRGB,
+ PIPE_FORMAT_B8G8R8X8_SRGB,
+ PIPE_FORMAT_A8R8G8B8_SRGB,
+ PIPE_FORMAT_X8R8G8B8_SRGB,
+ PIPE_FORMAT_R8G8B8A8_SRGB,
+
+ /* compressed formats */
+ PIPE_FORMAT_DXT1_RGB,
+ PIPE_FORMAT_DXT1_RGBA,
+ PIPE_FORMAT_DXT3_RGBA,
+ PIPE_FORMAT_DXT5_RGBA,
+
+ /* sRGB, compressed */
+ PIPE_FORMAT_DXT1_SRGB,
+ PIPE_FORMAT_DXT1_SRGBA,
+ PIPE_FORMAT_DXT3_SRGBA,
+ PIPE_FORMAT_DXT5_SRGBA,
+
+ /* rgtc compressed */
+ PIPE_FORMAT_RGTC1_UNORM,
+ PIPE_FORMAT_RGTC1_SNORM,
+ PIPE_FORMAT_RGTC2_UNORM,
+ PIPE_FORMAT_RGTC2_SNORM,
+
+ PIPE_FORMAT_R8G8_B8G8_UNORM,
+ PIPE_FORMAT_G8R8_G8B8_UNORM,
+
+ /* mixed formats */
+ PIPE_FORMAT_R8SG8SB8UX8U_NORM,
+ PIPE_FORMAT_R5SG5SB6U_NORM,
+
+ /* TODO: re-order these */
+ PIPE_FORMAT_A8B8G8R8_UNORM,
+ PIPE_FORMAT_B5G5R5X1_UNORM,
+ PIPE_FORMAT_R10G10B10A2_USCALED,
+ PIPE_FORMAT_R11G11B10_FLOAT,
+ PIPE_FORMAT_R9G9B9E5_FLOAT,
+ PIPE_FORMAT_Z32_FLOAT_S8X24_UINT,
+ PIPE_FORMAT_R1_UNORM,
+ PIPE_FORMAT_R10G10B10X2_USCALED,
+ PIPE_FORMAT_R10G10B10X2_SNORM,
+ PIPE_FORMAT_L4A4_UNORM,
+ PIPE_FORMAT_A2R10G10B10_UNORM,
+ PIPE_FORMAT_A2B10G10R10_UNORM,
+ PIPE_FORMAT_B10G10R10A2_UNORM,
+ PIPE_FORMAT_R10SG10SB10SA2U_NORM,
+ PIPE_FORMAT_R8G8Bx_SNORM,
+ PIPE_FORMAT_R8G8B8X8_UNORM,
+ PIPE_FORMAT_B4G4R4X4_UNORM,
+
+ /* some stencil samplers formats */
+ PIPE_FORMAT_X24S8_UINT,
+ PIPE_FORMAT_S8X24_UINT,
+ PIPE_FORMAT_X32_S8X24_UINT,
+
+ PIPE_FORMAT_R3G3B2_UNORM,
+ PIPE_FORMAT_B2G3R3_UNORM,
+ PIPE_FORMAT_L16A16_UNORM,
+ PIPE_FORMAT_A16_UNORM,
+ PIPE_FORMAT_I16_UNORM,
+
+ PIPE_FORMAT_LATC1_UNORM,
+ PIPE_FORMAT_LATC1_SNORM,
+ PIPE_FORMAT_LATC2_UNORM,
+ PIPE_FORMAT_LATC2_SNORM,
+
+ PIPE_FORMAT_A8_SNORM,
+ PIPE_FORMAT_L8_SNORM,
+ PIPE_FORMAT_L8A8_SNORM,
+ PIPE_FORMAT_I8_SNORM,
+ PIPE_FORMAT_A16_SNORM,
+ PIPE_FORMAT_L16_SNORM,
+ PIPE_FORMAT_L16A16_SNORM,
+ PIPE_FORMAT_I16_SNORM,
+
+ PIPE_FORMAT_A16_FLOAT,
+ PIPE_FORMAT_L16_FLOAT,
+ PIPE_FORMAT_L16A16_FLOAT,
+ PIPE_FORMAT_I16_FLOAT,
+ PIPE_FORMAT_A32_FLOAT,
+ PIPE_FORMAT_L32_FLOAT,
+ PIPE_FORMAT_L32A32_FLOAT,
+ PIPE_FORMAT_I32_FLOAT,
+
+ PIPE_FORMAT_YV12,
+ PIPE_FORMAT_YV16,
+ PIPE_FORMAT_IYUV, /**< aka I420 */
+ PIPE_FORMAT_NV12,
+ PIPE_FORMAT_NV21,
+
+ PIPE_FORMAT_A4R4_UNORM,
+ PIPE_FORMAT_R4A4_UNORM,
+ PIPE_FORMAT_R8A8_UNORM,
+ PIPE_FORMAT_A8R8_UNORM,
+
+ PIPE_FORMAT_R10G10B10A2_SSCALED,
+ PIPE_FORMAT_R10G10B10A2_SNORM,
+
+ PIPE_FORMAT_B10G10R10A2_USCALED,
+ PIPE_FORMAT_B10G10R10A2_SSCALED,
+ PIPE_FORMAT_B10G10R10A2_SNORM,
+
+ PIPE_FORMAT_R8_UINT,
+ PIPE_FORMAT_R8G8_UINT,
+ PIPE_FORMAT_R8G8B8_UINT,
+ PIPE_FORMAT_R8G8B8A8_UINT,
+
+ PIPE_FORMAT_R8_SINT,
+ PIPE_FORMAT_R8G8_SINT,
+ PIPE_FORMAT_R8G8B8_SINT,
+ PIPE_FORMAT_R8G8B8A8_SINT,
+
+ PIPE_FORMAT_R16_UINT,
+ PIPE_FORMAT_R16G16_UINT,
+ PIPE_FORMAT_R16G16B16_UINT,
+ PIPE_FORMAT_R16G16B16A16_UINT,
+
+ PIPE_FORMAT_R16_SINT,
+ PIPE_FORMAT_R16G16_SINT,
+ PIPE_FORMAT_R16G16B16_SINT,
+ PIPE_FORMAT_R16G16B16A16_SINT,
+
+ PIPE_FORMAT_R32_UINT,
+ PIPE_FORMAT_R32G32_UINT,
+ PIPE_FORMAT_R32G32B32_UINT,
+ PIPE_FORMAT_R32G32B32A32_UINT,
+
+ PIPE_FORMAT_R32_SINT,
+ PIPE_FORMAT_R32G32_SINT,
+ PIPE_FORMAT_R32G32B32_SINT,
+ PIPE_FORMAT_R32G32B32A32_SINT,
+
+ PIPE_FORMAT_A8_UINT,
+ PIPE_FORMAT_I8_UINT,
+ PIPE_FORMAT_L8_UINT,
+ PIPE_FORMAT_L8A8_UINT,
+
+ PIPE_FORMAT_A8_SINT,
+ PIPE_FORMAT_I8_SINT,
+ PIPE_FORMAT_L8_SINT,
+ PIPE_FORMAT_L8A8_SINT,
+
+ PIPE_FORMAT_A16_UINT,
+ PIPE_FORMAT_I16_UINT,
+ PIPE_FORMAT_L16_UINT,
+ PIPE_FORMAT_L16A16_UINT,
+
+ PIPE_FORMAT_A16_SINT,
+ PIPE_FORMAT_I16_SINT,
+ PIPE_FORMAT_L16_SINT,
+ PIPE_FORMAT_L16A16_SINT,
+
+ PIPE_FORMAT_A32_UINT,
+ PIPE_FORMAT_I32_UINT,
+ PIPE_FORMAT_L32_UINT,
+ PIPE_FORMAT_L32A32_UINT,
+
+ PIPE_FORMAT_A32_SINT,
+ PIPE_FORMAT_I32_SINT,
+ PIPE_FORMAT_L32_SINT,
+ PIPE_FORMAT_L32A32_SINT,
+
+ PIPE_FORMAT_B8G8R8_UINT,
+ PIPE_FORMAT_B8G8R8A8_UINT,
+
+ PIPE_FORMAT_B8G8R8_SINT,
+ PIPE_FORMAT_B8G8R8A8_SINT,
+
+ PIPE_FORMAT_A8R8G8B8_UINT,
+ PIPE_FORMAT_A8B8G8R8_UINT,
+ PIPE_FORMAT_A2R10G10B10_UINT,
+ PIPE_FORMAT_A2B10G10R10_UINT,
+ PIPE_FORMAT_B10G10R10A2_UINT,
+ PIPE_FORMAT_B10G10R10A2_SINT,
+ PIPE_FORMAT_R5G6B5_UINT,
+ PIPE_FORMAT_B5G6R5_UINT,
+ PIPE_FORMAT_R5G5B5A1_UINT,
+ PIPE_FORMAT_B5G5R5A1_UINT,
+ PIPE_FORMAT_A1R5G5B5_UINT,
+ PIPE_FORMAT_A1B5G5R5_UINT,
+ PIPE_FORMAT_R4G4B4A4_UINT,
+ PIPE_FORMAT_B4G4R4A4_UINT,
+ PIPE_FORMAT_A4R4G4B4_UINT,
+ PIPE_FORMAT_A4B4G4R4_UINT,
+ PIPE_FORMAT_R3G3B2_UINT,
+ PIPE_FORMAT_B2G3R3_UINT,
+
+ PIPE_FORMAT_ETC1_RGB8,
+
+ PIPE_FORMAT_R8G8_R8B8_UNORM,
+ PIPE_FORMAT_G8R8_B8R8_UNORM,
+
+ PIPE_FORMAT_R8G8B8X8_SNORM,
+ PIPE_FORMAT_R8G8B8X8_SRGB,
+ PIPE_FORMAT_R8G8B8X8_UINT,
+ PIPE_FORMAT_R8G8B8X8_SINT,
+ PIPE_FORMAT_B10G10R10X2_UNORM,
+ PIPE_FORMAT_R16G16B16X16_UNORM,
+ PIPE_FORMAT_R16G16B16X16_SNORM,
+ PIPE_FORMAT_R16G16B16X16_FLOAT,
+ PIPE_FORMAT_R16G16B16X16_UINT,
+ PIPE_FORMAT_R16G16B16X16_SINT,
+ PIPE_FORMAT_R32G32B32X32_FLOAT,
+ PIPE_FORMAT_R32G32B32X32_UINT,
+ PIPE_FORMAT_R32G32B32X32_SINT,
+
+ PIPE_FORMAT_R8A8_SNORM,
+ PIPE_FORMAT_R16A16_UNORM,
+ PIPE_FORMAT_R16A16_SNORM,
+ PIPE_FORMAT_R16A16_FLOAT,
+ PIPE_FORMAT_R32A32_FLOAT,
+ PIPE_FORMAT_R8A8_UINT,
+ PIPE_FORMAT_R8A8_SINT,
+ PIPE_FORMAT_R16A16_UINT,
+ PIPE_FORMAT_R16A16_SINT,
+ PIPE_FORMAT_R32A32_UINT,
+ PIPE_FORMAT_R32A32_SINT,
+ PIPE_FORMAT_R10G10B10A2_UINT,
+ PIPE_FORMAT_R10G10B10A2_SINT,
+
+ PIPE_FORMAT_B5G6R5_SRGB,
+
+ PIPE_FORMAT_BPTC_RGBA_UNORM,
+ PIPE_FORMAT_BPTC_SRGBA,
+ PIPE_FORMAT_BPTC_RGB_FLOAT,
+ PIPE_FORMAT_BPTC_RGB_UFLOAT,
+
+ PIPE_FORMAT_G8R8_UNORM,
+ PIPE_FORMAT_G8R8_SNORM,
+ PIPE_FORMAT_G16R16_UNORM,
+ PIPE_FORMAT_G16R16_SNORM,
+
+ PIPE_FORMAT_A8B8G8R8_SNORM,
+ PIPE_FORMAT_X8B8G8R8_SNORM,
+
+ PIPE_FORMAT_ETC2_RGB8,
+ PIPE_FORMAT_ETC2_SRGB8,
+ PIPE_FORMAT_ETC2_RGB8A1,
+ PIPE_FORMAT_ETC2_SRGB8A1,
+ PIPE_FORMAT_ETC2_RGBA8,
+ PIPE_FORMAT_ETC2_SRGBA8,
+ PIPE_FORMAT_ETC2_R11_UNORM,
+ PIPE_FORMAT_ETC2_R11_SNORM,
+ PIPE_FORMAT_ETC2_RG11_UNORM,
+ PIPE_FORMAT_ETC2_RG11_SNORM,
+
+ PIPE_FORMAT_ASTC_4x4,
+ PIPE_FORMAT_ASTC_5x4,
+ PIPE_FORMAT_ASTC_5x5,
+ PIPE_FORMAT_ASTC_6x5,
+ PIPE_FORMAT_ASTC_6x6,
+ PIPE_FORMAT_ASTC_8x5,
+ PIPE_FORMAT_ASTC_8x6,
+ PIPE_FORMAT_ASTC_8x8,
+ PIPE_FORMAT_ASTC_10x5,
+ PIPE_FORMAT_ASTC_10x6,
+ PIPE_FORMAT_ASTC_10x8,
+ PIPE_FORMAT_ASTC_10x10,
+ PIPE_FORMAT_ASTC_12x10,
+ PIPE_FORMAT_ASTC_12x12,
+
+ PIPE_FORMAT_ASTC_4x4_SRGB,
+ PIPE_FORMAT_ASTC_5x4_SRGB,
+ PIPE_FORMAT_ASTC_5x5_SRGB,
+ PIPE_FORMAT_ASTC_6x5_SRGB,
+ PIPE_FORMAT_ASTC_6x6_SRGB,
+ PIPE_FORMAT_ASTC_8x5_SRGB,
+ PIPE_FORMAT_ASTC_8x6_SRGB,
+ PIPE_FORMAT_ASTC_8x8_SRGB,
+ PIPE_FORMAT_ASTC_10x5_SRGB,
+ PIPE_FORMAT_ASTC_10x6_SRGB,
+ PIPE_FORMAT_ASTC_10x8_SRGB,
+ PIPE_FORMAT_ASTC_10x10_SRGB,
+ PIPE_FORMAT_ASTC_12x10_SRGB,
+ PIPE_FORMAT_ASTC_12x12_SRGB,
+
+ PIPE_FORMAT_ASTC_3x3x3,
+ PIPE_FORMAT_ASTC_4x3x3,
+ PIPE_FORMAT_ASTC_4x4x3,
+ PIPE_FORMAT_ASTC_4x4x4,
+ PIPE_FORMAT_ASTC_5x4x4,
+ PIPE_FORMAT_ASTC_5x5x4,
+ PIPE_FORMAT_ASTC_5x5x5,
+ PIPE_FORMAT_ASTC_6x5x5,
+ PIPE_FORMAT_ASTC_6x6x5,
+ PIPE_FORMAT_ASTC_6x6x6,
+
+ PIPE_FORMAT_ASTC_3x3x3_SRGB,
+ PIPE_FORMAT_ASTC_4x3x3_SRGB,
+ PIPE_FORMAT_ASTC_4x4x3_SRGB,
+ PIPE_FORMAT_ASTC_4x4x4_SRGB,
+ PIPE_FORMAT_ASTC_5x4x4_SRGB,
+ PIPE_FORMAT_ASTC_5x5x4_SRGB,
+ PIPE_FORMAT_ASTC_5x5x5_SRGB,
+ PIPE_FORMAT_ASTC_6x5x5_SRGB,
+ PIPE_FORMAT_ASTC_6x6x5_SRGB,
+ PIPE_FORMAT_ASTC_6x6x6_SRGB,
+
+ PIPE_FORMAT_FXT1_RGB,
+ PIPE_FORMAT_FXT1_RGBA,
+
+ PIPE_FORMAT_P010,
+ PIPE_FORMAT_P016,
+
+ PIPE_FORMAT_R10G10B10X2_UNORM,
+ PIPE_FORMAT_A1R5G5B5_UNORM,
+ PIPE_FORMAT_A1B5G5R5_UNORM,
+ PIPE_FORMAT_X1B5G5R5_UNORM,
+ PIPE_FORMAT_R5G5B5A1_UNORM,
+ PIPE_FORMAT_A4R4G4B4_UNORM,
+ PIPE_FORMAT_A4B4G4R4_UNORM,
+
+ PIPE_FORMAT_G8R8_SINT,
+ PIPE_FORMAT_A8B8G8R8_SINT,
+ PIPE_FORMAT_X8B8G8R8_SINT,
+
+ PIPE_FORMAT_ATC_RGB,
+ PIPE_FORMAT_ATC_RGBA_EXPLICIT,
+ PIPE_FORMAT_ATC_RGBA_INTERPOLATED,
+
+ PIPE_FORMAT_Z24_UNORM_S8_UINT_AS_R8G8B8A8,
+
+ PIPE_FORMAT_AYUV,
+ PIPE_FORMAT_XYUV,
+
+ PIPE_FORMAT_COUNT
+};
+
+#if UTIL_ARCH_LITTLE_ENDIAN
+#define PIPE_FORMAT_RGBA8888_UNORM PIPE_FORMAT_R8G8B8A8_UNORM
+#define PIPE_FORMAT_RGBX8888_UNORM PIPE_FORMAT_R8G8B8X8_UNORM
+#define PIPE_FORMAT_BGRA8888_UNORM PIPE_FORMAT_B8G8R8A8_UNORM
+#define PIPE_FORMAT_BGRX8888_UNORM PIPE_FORMAT_B8G8R8X8_UNORM
+#define PIPE_FORMAT_ARGB8888_UNORM PIPE_FORMAT_A8R8G8B8_UNORM
+#define PIPE_FORMAT_XRGB8888_UNORM PIPE_FORMAT_X8R8G8B8_UNORM
+#define PIPE_FORMAT_ABGR8888_UNORM PIPE_FORMAT_A8B8G8R8_UNORM
+#define PIPE_FORMAT_XBGR8888_UNORM PIPE_FORMAT_X8B8G8R8_UNORM
+#define PIPE_FORMAT_RGBA8888_SNORM PIPE_FORMAT_R8G8B8A8_SNORM
+#define PIPE_FORMAT_RGBX8888_SNORM PIPE_FORMAT_R8G8B8X8_SNORM
+#define PIPE_FORMAT_ABGR8888_SNORM PIPE_FORMAT_A8B8G8R8_SNORM
+#define PIPE_FORMAT_XBGR8888_SNORM PIPE_FORMAT_X8B8G8R8_SNORM
+#define PIPE_FORMAT_RGBA8888_SRGB PIPE_FORMAT_R8G8B8A8_SRGB
+#define PIPE_FORMAT_RGBX8888_SRGB PIPE_FORMAT_R8G8B8X8_SRGB
+#define PIPE_FORMAT_BGRA8888_SRGB PIPE_FORMAT_B8G8R8A8_SRGB
+#define PIPE_FORMAT_BGRX8888_SRGB PIPE_FORMAT_B8G8R8X8_SRGB
+#define PIPE_FORMAT_ARGB8888_SRGB PIPE_FORMAT_A8R8G8B8_SRGB
+#define PIPE_FORMAT_XRGB8888_SRGB PIPE_FORMAT_X8R8G8B8_SRGB
+#define PIPE_FORMAT_ABGR8888_SRGB PIPE_FORMAT_A8B8G8R8_SRGB
+#define PIPE_FORMAT_XBGR8888_SRGB PIPE_FORMAT_X8B8G8R8_SRGB
+#define PIPE_FORMAT_RGBA8888_USCALED PIPE_FORMAT_R8G8B8A8_USCALED
+#define PIPE_FORMAT_RGBA8888_SSCALED PIPE_FORMAT_R8G8B8A8_SSCALED
+#define PIPE_FORMAT_RGBA8888_UINT PIPE_FORMAT_R8G8B8A8_UINT
+#define PIPE_FORMAT_BGRA8888_UINT PIPE_FORMAT_B8G8R8A8_UINT
+#define PIPE_FORMAT_ARGB8888_UINT PIPE_FORMAT_A8R8G8B8_UINT
+#define PIPE_FORMAT_ABGR8888_UINT PIPE_FORMAT_A8B8G8R8_UINT
+#define PIPE_FORMAT_RGBA8888_SINT PIPE_FORMAT_R8G8B8A8_SINT
+#define PIPE_FORMAT_RG88_UNORM PIPE_FORMAT_R8G8_UNORM
+#define PIPE_FORMAT_GR88_UNORM PIPE_FORMAT_G8R8_UNORM
+#define PIPE_FORMAT_RG88_SNORM PIPE_FORMAT_R8G8_SNORM
+#define PIPE_FORMAT_GR88_SNORM PIPE_FORMAT_G8R8_SNORM
+#define PIPE_FORMAT_RG1616_UNORM PIPE_FORMAT_R16G16_UNORM
+#define PIPE_FORMAT_GR1616_UNORM PIPE_FORMAT_G16R16_UNORM
+#define PIPE_FORMAT_RG1616_SNORM PIPE_FORMAT_R16G16_SNORM
+#define PIPE_FORMAT_GR1616_SNORM PIPE_FORMAT_G16R16_SNORM
+#elif UTIL_ARCH_BIG_ENDIAN
+#define PIPE_FORMAT_ABGR8888_UNORM PIPE_FORMAT_R8G8B8A8_UNORM
+#define PIPE_FORMAT_XBGR8888_UNORM PIPE_FORMAT_R8G8B8X8_UNORM
+#define PIPE_FORMAT_ARGB8888_UNORM PIPE_FORMAT_B8G8R8A8_UNORM
+#define PIPE_FORMAT_XRGB8888_UNORM PIPE_FORMAT_B8G8R8X8_UNORM
+#define PIPE_FORMAT_BGRA8888_UNORM PIPE_FORMAT_A8R8G8B8_UNORM
+#define PIPE_FORMAT_BGRX8888_UNORM PIPE_FORMAT_X8R8G8B8_UNORM
+#define PIPE_FORMAT_RGBA8888_UNORM PIPE_FORMAT_A8B8G8R8_UNORM
+#define PIPE_FORMAT_RGBX8888_UNORM PIPE_FORMAT_X8B8G8R8_UNORM
+#define PIPE_FORMAT_ABGR8888_SNORM PIPE_FORMAT_R8G8B8A8_SNORM
+#define PIPE_FORMAT_XBGR8888_SNORM PIPE_FORMAT_R8G8B8X8_SNORM
+#define PIPE_FORMAT_RGBA8888_SNORM PIPE_FORMAT_A8B8G8R8_SNORM
+#define PIPE_FORMAT_RGBX8888_SNORM PIPE_FORMAT_X8B8G8R8_SNORM
+#define PIPE_FORMAT_ABGR8888_SRGB PIPE_FORMAT_R8G8B8A8_SRGB
+#define PIPE_FORMAT_XBGR8888_SRGB PIPE_FORMAT_R8G8B8X8_SRGB
+#define PIPE_FORMAT_ARGB8888_SRGB PIPE_FORMAT_B8G8R8A8_SRGB
+#define PIPE_FORMAT_XRGB8888_SRGB PIPE_FORMAT_B8G8R8X8_SRGB
+#define PIPE_FORMAT_BGRA8888_SRGB PIPE_FORMAT_A8R8G8B8_SRGB
+#define PIPE_FORMAT_BGRX8888_SRGB PIPE_FORMAT_X8R8G8B8_SRGB
+#define PIPE_FORMAT_RGBA8888_SRGB PIPE_FORMAT_A8B8G8R8_SRGB
+#define PIPE_FORMAT_RGBX8888_SRGB PIPE_FORMAT_X8B8G8R8_SRGB
+#define PIPE_FORMAT_RGBA8888_USCALED PIPE_FORMAT_A8B8G8R8_USCALED
+#define PIPE_FORMAT_RGBA8888_SSCALED PIPE_FORMAT_A8B8G8R8_SSCALED
+#define PIPE_FORMAT_RGBA8888_UINT PIPE_FORMAT_A8B8G8R8_UINT
+#define PIPE_FORMAT_BGRA8888_UINT PIPE_FORMAT_A8R8G8B8_UINT
+#define PIPE_FORMAT_ARGB8888_UINT PIPE_FORMAT_B8G8R8A8_UINT
+#define PIPE_FORMAT_ABGR8888_UINT PIPE_FORMAT_R8G8B8A8_UINT
+#define PIPE_FORMAT_RGBA8888_SINT PIPE_FORMAT_A8B8G8R8_SINT
+#define PIPE_FORMAT_RG88_UNORM PIPE_FORMAT_G8R8_UNORM
+#define PIPE_FORMAT_GR88_UNORM PIPE_FORMAT_R8G8_UNORM
+#define PIPE_FORMAT_RG88_SNORM PIPE_FORMAT_G8R8_SNORM
+#define PIPE_FORMAT_GR88_SNORM PIPE_FORMAT_R8G8_SNORM
+#define PIPE_FORMAT_RG1616_UNORM PIPE_FORMAT_G16R16_UNORM
+#define PIPE_FORMAT_GR1616_UNORM PIPE_FORMAT_R16G16_UNORM
+#define PIPE_FORMAT_RG1616_SNORM PIPE_FORMAT_G16R16_SNORM
+#define PIPE_FORMAT_GR1616_SNORM PIPE_FORMAT_R16G16_SNORM
+#endif
+
+enum pipe_video_chroma_format
+{
+ PIPE_VIDEO_CHROMA_FORMAT_400,
+ PIPE_VIDEO_CHROMA_FORMAT_420,
+ PIPE_VIDEO_CHROMA_FORMAT_422,
+ PIPE_VIDEO_CHROMA_FORMAT_444,
+ PIPE_VIDEO_CHROMA_FORMAT_NONE
+};
+
+static inline enum pipe_video_chroma_format
+pipe_format_to_chroma_format(enum pipe_format format)
+{
+ switch (format) {
+ case PIPE_FORMAT_NV12:
+ case PIPE_FORMAT_NV21:
+ case PIPE_FORMAT_YV12:
+ case PIPE_FORMAT_IYUV:
+ case PIPE_FORMAT_P010:
+ case PIPE_FORMAT_P016:
+ return PIPE_VIDEO_CHROMA_FORMAT_420;
+ case PIPE_FORMAT_UYVY:
+ case PIPE_FORMAT_YUYV:
+ case PIPE_FORMAT_YV16:
+ return PIPE_VIDEO_CHROMA_FORMAT_422;
+ default:
+ return PIPE_VIDEO_CHROMA_FORMAT_NONE;
+ }
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/gallium/include/pipe/p_state.h b/third_party/rust/glslopt/glsl-optimizer/src/gallium/include/pipe/p_state.h
new file mode 100644
index 0000000000..2a14b9a435
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/gallium/include/pipe/p_state.h
@@ -0,0 +1,980 @@
+/**************************************************************************
+ *
+ * Copyright 2007 VMware, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+
+/**
+ * @file
+ *
+ * Abstract graphics pipe state objects.
+ *
+ * Basic notes:
+ * 1. Want compact representations, so we use bitfields.
+ * 2. Put bitfields before other (GLfloat) fields.
+ * 3. enum bitfields need to be at least one bit extra in size so the most
+ * significant bit is zero. MSVC treats enums as signed so if the high
+ * bit is set, the value will be interpreted as a negative number.
+ * That causes trouble in various places.
+ */
+
+
+#ifndef PIPE_STATE_H
+#define PIPE_STATE_H
+
+#include "p_compiler.h"
+#include "p_defines.h"
+#include "p_format.h"
+
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+/**
+ * Implementation limits
+ */
+#define PIPE_MAX_ATTRIBS 32
+#define PIPE_MAX_CLIP_PLANES 8
+#define PIPE_MAX_COLOR_BUFS 8
+#define PIPE_MAX_CONSTANT_BUFFERS 32
+#define PIPE_MAX_SAMPLERS 32
+#define PIPE_MAX_SHADER_INPUTS 80 /* 32 GENERIC + 32 PATCH + 16 others */
+#define PIPE_MAX_SHADER_OUTPUTS 80 /* 32 GENERIC + 32 PATCH + 16 others */
+#define PIPE_MAX_SHADER_SAMPLER_VIEWS 128
+#define PIPE_MAX_SHADER_BUFFERS 32
+#define PIPE_MAX_SHADER_IMAGES 32
+#define PIPE_MAX_TEXTURE_LEVELS 16
+#define PIPE_MAX_SO_BUFFERS 4
+#define PIPE_MAX_SO_OUTPUTS 64
+#define PIPE_MAX_VIEWPORTS 16
+#define PIPE_MAX_CLIP_OR_CULL_DISTANCE_COUNT 8
+#define PIPE_MAX_CLIP_OR_CULL_DISTANCE_ELEMENT_COUNT 2
+#define PIPE_MAX_WINDOW_RECTANGLES 8
+#define PIPE_MAX_SAMPLE_LOCATION_GRID_SIZE 4
+
+#define PIPE_MAX_HW_ATOMIC_BUFFERS 32
+#define PIPE_MAX_VERTEX_STREAMS 4
+
+struct pipe_reference
+{
+ int32_t count; /* atomic */
+};
+
+
+
+/**
+ * Primitive (point/line/tri) rasterization info
+ */
+struct pipe_rasterizer_state
+{
+ unsigned flatshade:1;
+ unsigned light_twoside:1;
+ unsigned clamp_vertex_color:1;
+ unsigned clamp_fragment_color:1;
+ unsigned front_ccw:1;
+ unsigned cull_face:2; /**< PIPE_FACE_x */
+ unsigned fill_front:2; /**< PIPE_POLYGON_MODE_x */
+ unsigned fill_back:2; /**< PIPE_POLYGON_MODE_x */
+ unsigned offset_point:1;
+ unsigned offset_line:1;
+ unsigned offset_tri:1;
+ unsigned scissor:1;
+ unsigned poly_smooth:1;
+ unsigned poly_stipple_enable:1;
+ unsigned point_smooth:1;
+ unsigned sprite_coord_mode:1; /**< PIPE_SPRITE_COORD_ */
+ unsigned point_quad_rasterization:1; /** points rasterized as quads or points */
+ unsigned point_tri_clip:1; /** large points clipped as tris or points */
+ unsigned point_size_per_vertex:1; /**< size computed in vertex shader */
+ unsigned multisample:1; /* XXX maybe more ms state in future */
+ unsigned force_persample_interp:1;
+ unsigned line_smooth:1;
+ unsigned line_stipple_enable:1;
+ unsigned line_last_pixel:1;
+ unsigned conservative_raster_mode:2; /**< PIPE_CONSERVATIVE_RASTER_x */
+
+ /**
+ * Use the first vertex of a primitive as the provoking vertex for
+ * flat shading.
+ */
+ unsigned flatshade_first:1;
+
+ unsigned half_pixel_center:1;
+ unsigned bottom_edge_rule:1;
+
+ /*
+ * Conservative rasterization subpixel precision bias in bits
+ */
+ unsigned subpixel_precision_x:4;
+ unsigned subpixel_precision_y:4;
+
+ /**
+ * When true, rasterization is disabled and no pixels are written.
+ * This only makes sense with the Stream Out functionality.
+ */
+ unsigned rasterizer_discard:1;
+
+ /**
+ * Exposed by PIPE_CAP_TILE_RASTER_ORDER. When true,
+ * tile_raster_order_increasing_* indicate the order that the rasterizer
+ * should render tiles, to meet the requirements of
+ * GL_MESA_tile_raster_order.
+ */
+ unsigned tile_raster_order_fixed:1;
+ unsigned tile_raster_order_increasing_x:1;
+ unsigned tile_raster_order_increasing_y:1;
+
+ /**
+ * When false, depth clipping is disabled and the depth value will be
+ * clamped later at the per-pixel level before depth testing.
+ * This depends on PIPE_CAP_DEPTH_CLIP_DISABLE.
+ *
+ * If PIPE_CAP_DEPTH_CLIP_DISABLE_SEPARATE is unsupported, depth_clip_near
+ * is equal to depth_clip_far.
+ */
+ unsigned depth_clip_near:1;
+ unsigned depth_clip_far:1;
+
+ /**
+ * When true clip space in the z axis goes from [0..1] (D3D). When false
+ * [-1, 1] (GL).
+ *
+ * NOTE: D3D will always use depth clamping.
+ */
+ unsigned clip_halfz:1;
+
+ /**
+ * When true do not scale offset_units and use same rules for unorm and
+ * float depth buffers (D3D9). When false use GL/D3D1X behaviour.
+ * This depends on PIPE_CAP_POLYGON_OFFSET_UNITS_UNSCALED.
+ */
+ unsigned offset_units_unscaled:1;
+
+ /**
+ * Enable bits for clipping half-spaces.
+ * This applies to both user clip planes and shader clip distances.
+ * Note that if the bound shader exports any clip distances, these
+ * replace all user clip planes, and clip half-spaces enabled here
+ * but not written by the shader count as disabled.
+ */
+ unsigned clip_plane_enable:PIPE_MAX_CLIP_PLANES;
+
+ unsigned line_stipple_factor:8; /**< [1..256] actually */
+ unsigned line_stipple_pattern:16;
+
+ /**
+ * Replace the given TEXCOORD inputs with point coordinates, max. 8 inputs.
+ * If TEXCOORD (including PCOORD) are unsupported, replace GENERIC inputs
+ * instead. Max. 9 inputs: 8x GENERIC to emulate TEXCOORD, and 1x GENERIC
+ * to emulate PCOORD.
+ */
+ uint16_t sprite_coord_enable; /* 0-7: TEXCOORD/GENERIC, 8: PCOORD */
+
+ float line_width;
+ float point_size; /**< used when no per-vertex size */
+ float offset_units;
+ float offset_scale;
+ float offset_clamp;
+ float conservative_raster_dilate;
+};
+
+
+struct pipe_poly_stipple
+{
+ unsigned stipple[32];
+};
+
+
+struct pipe_viewport_state
+{
+ float scale[3];
+ float translate[3];
+ enum pipe_viewport_swizzle swizzle_x:3;
+ enum pipe_viewport_swizzle swizzle_y:3;
+ enum pipe_viewport_swizzle swizzle_z:3;
+ enum pipe_viewport_swizzle swizzle_w:3;
+};
+
+
+struct pipe_scissor_state
+{
+ unsigned minx:16;
+ unsigned miny:16;
+ unsigned maxx:16;
+ unsigned maxy:16;
+};
+
+
+struct pipe_clip_state
+{
+ float ucp[PIPE_MAX_CLIP_PLANES][4];
+};
+
+/**
+ * A single output for vertex transform feedback.
+ */
+struct pipe_stream_output
+{
+ unsigned register_index:6; /**< 0 to 63 (OUT index) */
+ unsigned start_component:2; /** 0 to 3 */
+ unsigned num_components:3; /** 1 to 4 */
+ unsigned output_buffer:3; /**< 0 to PIPE_MAX_SO_BUFFERS */
+ unsigned dst_offset:16; /**< offset into the buffer in dwords */
+ unsigned stream:2; /**< 0 to 3 */
+};
+
+/**
+ * Stream output for vertex transform feedback.
+ */
+struct pipe_stream_output_info
+{
+ unsigned num_outputs;
+ /** stride for an entire vertex for each buffer in dwords */
+ uint16_t stride[PIPE_MAX_SO_BUFFERS];
+
+ /**
+ * Array of stream outputs, in the order they are to be written in.
+ * Selected components are tightly packed into the output buffer.
+ */
+ struct pipe_stream_output output[PIPE_MAX_SO_OUTPUTS];
+};
+
+/**
+ * The 'type' parameter identifies whether the shader state contains TGSI
+ * tokens, etc. If the driver returns 'PIPE_SHADER_IR_TGSI' for the
+ * 'PIPE_SHADER_CAP_PREFERRED_IR' shader param, the ir will *always* be
+ * 'PIPE_SHADER_IR_TGSI' and the tokens ptr will be valid. If the driver
+ * requests a different 'pipe_shader_ir' type, then it must check the 'type'
+ * enum to see if it is getting TGSI tokens or its preferred IR.
+ *
+ * TODO pipe_compute_state should probably get similar treatment to handle
+ * multiple IR's in a cleaner way..
+ *
+ * NOTE: since it is expected that the consumer will want to perform
+ * additional passes on the nir_shader, the driver takes ownership of
+ * the nir_shader. If state trackers need to hang on to the IR (for
+ * example, variant management), it should use nir_shader_clone().
+ */
+struct pipe_shader_state
+{
+ enum pipe_shader_ir type;
+ /* TODO move tokens into union. */
+ const struct tgsi_token *tokens;
+ union {
+ void *native;
+ void *nir;
+ } ir;
+ struct pipe_stream_output_info stream_output;
+};
+
+static inline void
+pipe_shader_state_from_tgsi(struct pipe_shader_state *state,
+ const struct tgsi_token *tokens)
+{
+ state->type = PIPE_SHADER_IR_TGSI;
+ state->tokens = tokens;
+ memset(&state->stream_output, 0, sizeof(state->stream_output));
+}
+
+struct pipe_depth_state
+{
+ unsigned enabled:1; /**< depth test enabled? */
+ unsigned writemask:1; /**< allow depth buffer writes? */
+ unsigned func:3; /**< depth test func (PIPE_FUNC_x) */
+ unsigned bounds_test:1; /**< depth bounds test enabled? */
+ float bounds_min; /**< minimum depth bound */
+ float bounds_max; /**< maximum depth bound */
+};
+
+
+struct pipe_stencil_state
+{
+ unsigned enabled:1; /**< stencil[0]: stencil enabled, stencil[1]: two-side enabled */
+ unsigned func:3; /**< PIPE_FUNC_x */
+ unsigned fail_op:3; /**< PIPE_STENCIL_OP_x */
+ unsigned zpass_op:3; /**< PIPE_STENCIL_OP_x */
+ unsigned zfail_op:3; /**< PIPE_STENCIL_OP_x */
+ unsigned valuemask:8;
+ unsigned writemask:8;
+};
+
+
+struct pipe_alpha_state
+{
+ unsigned enabled:1;
+ unsigned func:3; /**< PIPE_FUNC_x */
+ float ref_value; /**< reference value */
+};
+
+
+struct pipe_depth_stencil_alpha_state
+{
+ struct pipe_depth_state depth;
+ struct pipe_stencil_state stencil[2]; /**< [0] = front, [1] = back */
+ struct pipe_alpha_state alpha;
+};
+
+
+struct pipe_rt_blend_state
+{
+ unsigned blend_enable:1;
+
+ unsigned rgb_func:3; /**< PIPE_BLEND_x */
+ unsigned rgb_src_factor:5; /**< PIPE_BLENDFACTOR_x */
+ unsigned rgb_dst_factor:5; /**< PIPE_BLENDFACTOR_x */
+
+ unsigned alpha_func:3; /**< PIPE_BLEND_x */
+ unsigned alpha_src_factor:5; /**< PIPE_BLENDFACTOR_x */
+ unsigned alpha_dst_factor:5; /**< PIPE_BLENDFACTOR_x */
+
+ unsigned colormask:4; /**< bitmask of PIPE_MASK_R/G/B/A */
+};
+
+
+struct pipe_blend_state
+{
+ unsigned independent_blend_enable:1;
+ unsigned logicop_enable:1;
+ unsigned logicop_func:4; /**< PIPE_LOGICOP_x */
+ unsigned dither:1;
+ unsigned alpha_to_coverage:1;
+ unsigned alpha_to_coverage_dither:1;
+ unsigned alpha_to_one:1;
+ unsigned max_rt:3; /* index of max rt, Ie. # of cbufs minus 1 */
+ struct pipe_rt_blend_state rt[PIPE_MAX_COLOR_BUFS];
+};
+
+
+struct pipe_blend_color
+{
+ float color[4];
+};
+
+
+struct pipe_stencil_ref
+{
+ ubyte ref_value[2];
+};
+
+
+/**
+ * Note that pipe_surfaces are "texture views for rendering"
+ * and so in the case of ARB_framebuffer_no_attachment there
+ * is no pipe_surface state available such that we may
+ * extract the number of samples and layers.
+ */
+struct pipe_framebuffer_state
+{
+ uint16_t width, height;
+ uint16_t layers; /**< Number of layers in a no-attachment framebuffer */
+ ubyte samples; /**< Number of samples in a no-attachment framebuffer */
+
+ /** multiple color buffers for multiple render targets */
+ ubyte nr_cbufs;
+ struct pipe_surface *cbufs[PIPE_MAX_COLOR_BUFS];
+
+ struct pipe_surface *zsbuf; /**< Z/stencil buffer */
+};
+
+
+/**
+ * Texture sampler state.
+ */
+struct pipe_sampler_state
+{
+ unsigned wrap_s:3; /**< PIPE_TEX_WRAP_x */
+ unsigned wrap_t:3; /**< PIPE_TEX_WRAP_x */
+ unsigned wrap_r:3; /**< PIPE_TEX_WRAP_x */
+ unsigned min_img_filter:1; /**< PIPE_TEX_FILTER_x */
+ unsigned min_mip_filter:2; /**< PIPE_TEX_MIPFILTER_x */
+ unsigned mag_img_filter:1; /**< PIPE_TEX_FILTER_x */
+ unsigned compare_mode:1; /**< PIPE_TEX_COMPARE_x */
+ unsigned compare_func:3; /**< PIPE_FUNC_x */
+ unsigned normalized_coords:1; /**< Are coords normalized to [0,1]? */
+ unsigned max_anisotropy:5;
+ unsigned seamless_cube_map:1;
+ float lod_bias; /**< LOD/lambda bias */
+ float min_lod, max_lod; /**< LOD clamp range, after bias */
+ union pipe_color_union border_color;
+};
+
+union pipe_surface_desc {
+ struct {
+ unsigned level;
+ unsigned first_layer:16;
+ unsigned last_layer:16;
+ } tex;
+ struct {
+ unsigned first_element;
+ unsigned last_element;
+ } buf;
+};
+
+/**
+ * A view into a texture that can be bound to a color render target /
+ * depth stencil attachment point.
+ */
+struct pipe_surface
+{
+ struct pipe_reference reference;
+ enum pipe_format format:16;
+ unsigned writable:1; /**< writable shader resource */
+ struct pipe_resource *texture; /**< resource into which this is a view */
+ struct pipe_context *context; /**< context this surface belongs to */
+
+ /* XXX width/height should be removed */
+ uint16_t width; /**< logical width in pixels */
+ uint16_t height; /**< logical height in pixels */
+
+ /**
+ * Number of samples for the surface. This will be 0 if rendering
+ * should use the resource's nr_samples, or another value if the resource
+ * is bound using FramebufferTexture2DMultisampleEXT.
+ */
+ unsigned nr_samples:8;
+
+ union pipe_surface_desc u;
+};
+
+
+/**
+ * A view into a texture that can be bound to a shader stage.
+ */
+struct pipe_sampler_view
+{
+ struct pipe_reference reference;
+ enum pipe_format format:15; /**< typed PIPE_FORMAT_x */
+ enum pipe_texture_target target:5; /**< PIPE_TEXTURE_x */
+ unsigned swizzle_r:3; /**< PIPE_SWIZZLE_x for red component */
+ unsigned swizzle_g:3; /**< PIPE_SWIZZLE_x for green component */
+ unsigned swizzle_b:3; /**< PIPE_SWIZZLE_x for blue component */
+ unsigned swizzle_a:3; /**< PIPE_SWIZZLE_x for alpha component */
+ struct pipe_resource *texture; /**< texture into which this is a view */
+ struct pipe_context *context; /**< context this view belongs to */
+ union {
+ struct {
+ unsigned first_layer:16; /**< first layer to use for array textures */
+ unsigned last_layer:16; /**< last layer to use for array textures */
+ unsigned first_level:8; /**< first mipmap level to use */
+ unsigned last_level:8; /**< last mipmap level to use */
+ } tex;
+ struct {
+ unsigned offset; /**< offset in bytes */
+ unsigned size; /**< size of the readable sub-range in bytes */
+ } buf;
+ } u;
+};
+
+
+/**
+ * A description of a buffer or texture image that can be bound to a shader
+ * stage.
+ */
+struct pipe_image_view
+{
+ struct pipe_resource *resource; /**< resource into which this is a view */
+ enum pipe_format format; /**< typed PIPE_FORMAT_x */
+ uint16_t access; /**< PIPE_IMAGE_ACCESS_x */
+ uint16_t shader_access; /**< PIPE_IMAGE_ACCESS_x */
+
+ union {
+ struct {
+ unsigned first_layer:16; /**< first layer to use for array textures */
+ unsigned last_layer:16; /**< last layer to use for array textures */
+ unsigned level:8; /**< mipmap level to use */
+ } tex;
+ struct {
+ unsigned offset; /**< offset in bytes */
+ unsigned size; /**< size of the accessible sub-range in bytes */
+ } buf;
+ } u;
+};
+
+
+/**
+ * Subregion of 1D/2D/3D image resource.
+ */
+struct pipe_box
+{
+ /* Fields only used by textures use int16_t instead of int.
+ * x and width are used by buffers, so they need the full 32-bit range.
+ */
+ int x;
+ int16_t y;
+ int16_t z;
+ int width;
+ int16_t height;
+ int16_t depth;
+};
+
+
+/**
+ * A memory object/resource such as a vertex buffer or texture.
+ */
+struct pipe_resource
+{
+ struct pipe_reference reference;
+
+ unsigned width0; /**< Used by both buffers and textures. */
+ uint16_t height0; /* Textures: The maximum height/depth/array_size is 16k. */
+ uint16_t depth0;
+ uint16_t array_size;
+
+ enum pipe_format format:16; /**< PIPE_FORMAT_x */
+ enum pipe_texture_target target:8; /**< PIPE_TEXTURE_x */
+ unsigned last_level:8; /**< Index of last mipmap level present/defined */
+
+ /** Number of samples determining quality, driving rasterizer, shading,
+ * and framebuffer.
+ */
+ unsigned nr_samples:8;
+
+ /** Multiple samples within a pixel can have the same value.
+ * nr_storage_samples determines how many slots for different values
+ * there are per pixel. Only color buffers can set this lower than
+ * nr_samples.
+ */
+ unsigned nr_storage_samples:8;
+
+ unsigned usage:8; /**< PIPE_USAGE_x (not a bitmask) */
+ unsigned bind; /**< bitmask of PIPE_BIND_x */
+ unsigned flags; /**< bitmask of PIPE_RESOURCE_FLAG_x */
+
+ /**
+ * For planar images, ie. YUV EGLImage external, etc, pointer to the
+ * next plane.
+ */
+ struct pipe_resource *next;
+ /* The screen pointer should be last for optimal structure packing. */
+ struct pipe_screen *screen; /**< screen that this texture belongs to */
+};
+
+
+/**
+ * Transfer object. For data transfer to/from a resource.
+ */
+struct pipe_transfer
+{
+ struct pipe_resource *resource; /**< resource to transfer to/from */
+ unsigned level; /**< texture mipmap level */
+ enum pipe_transfer_usage usage;
+ struct pipe_box box; /**< region of the resource to access */
+ unsigned stride; /**< row stride in bytes */
+ unsigned layer_stride; /**< image/layer stride in bytes */
+};
+
+
+/**
+ * A vertex buffer. Typically, all the vertex data/attributes for
+ * drawing something will be in one buffer. But it's also possible, for
+ * example, to put colors in one buffer and texcoords in another.
+ */
+struct pipe_vertex_buffer
+{
+ uint16_t stride; /**< stride to same attrib in next vertex, in bytes */
+ bool is_user_buffer;
+ unsigned buffer_offset; /**< offset to start of data in buffer, in bytes */
+
+ union {
+ struct pipe_resource *resource; /**< the actual buffer */
+ const void *user; /**< pointer to a user buffer */
+ } buffer;
+};
+
+
+/**
+ * A constant buffer. A subrange of an existing buffer can be set
+ * as a constant buffer.
+ */
+struct pipe_constant_buffer
+{
+ struct pipe_resource *buffer; /**< the actual buffer */
+ unsigned buffer_offset; /**< offset to start of data in buffer, in bytes */
+ unsigned buffer_size; /**< how much data can be read in shader */
+ const void *user_buffer; /**< pointer to a user buffer if buffer == NULL */
+};
+
+
+/**
+ * An untyped shader buffer supporting loads, stores, and atomics.
+ */
+struct pipe_shader_buffer {
+ struct pipe_resource *buffer; /**< the actual buffer */
+ unsigned buffer_offset; /**< offset to start of data in buffer, in bytes */
+ unsigned buffer_size; /**< how much data can be read in shader */
+};
+
+
+/**
+ * A stream output target. The structure specifies the range vertices can
+ * be written to.
+ *
+ * In addition to that, the structure should internally maintain the offset
+ * into the buffer, which should be incremented everytime something is written
+ * (appended) to it. The internal offset is buffer_offset + how many bytes
+ * have been written. The internal offset can be stored on the device
+ * and the CPU actually doesn't have to query it.
+ *
+ * Note that the buffer_size variable is actually specifying the available
+ * space in the buffer, not the size of the attached buffer.
+ * In other words in majority of cases buffer_size would simply be
+ * 'buffer->width0 - buffer_offset', so buffer_size refers to the size
+ * of the buffer left, after accounting for buffer offset, for stream output
+ * to write to.
+ *
+ * Use PIPE_QUERY_SO_STATISTICS to know how many primitives have
+ * actually been written.
+ */
+struct pipe_stream_output_target
+{
+ struct pipe_reference reference;
+ struct pipe_resource *buffer; /**< the output buffer */
+ struct pipe_context *context; /**< context this SO target belongs to */
+
+ unsigned buffer_offset; /**< offset where data should be written, in bytes */
+ unsigned buffer_size; /**< how much data is allowed to be written */
+};
+
+
+/**
+ * Information to describe a vertex attribute (position, color, etc)
+ */
+struct pipe_vertex_element
+{
+ /** Offset of this attribute, in bytes, from the start of the vertex */
+ unsigned src_offset:16;
+
+ /** Which vertex_buffer (as given to pipe->set_vertex_buffer()) does
+ * this attribute live in?
+ */
+ unsigned vertex_buffer_index:5;
+
+ enum pipe_format src_format:11;
+
+ /** Instance data rate divisor. 0 means this is per-vertex data,
+ * n means per-instance data used for n consecutive instances (n > 0).
+ */
+ unsigned instance_divisor;
+};
+
+
+struct pipe_draw_indirect_info
+{
+ unsigned offset; /**< must be 4 byte aligned */
+ unsigned stride; /**< must be 4 byte aligned */
+ unsigned draw_count; /**< number of indirect draws */
+ unsigned indirect_draw_count_offset; /**< must be 4 byte aligned */
+
+ /* Indirect draw parameters resource is laid out as follows:
+ *
+ * if using indexed drawing:
+ * struct {
+ * uint32_t count;
+ * uint32_t instance_count;
+ * uint32_t start;
+ * int32_t index_bias;
+ * uint32_t start_instance;
+ * };
+ * otherwise:
+ * struct {
+ * uint32_t count;
+ * uint32_t instance_count;
+ * uint32_t start;
+ * uint32_t start_instance;
+ * };
+ */
+ struct pipe_resource *buffer;
+
+ /* Indirect draw count resource: If not NULL, contains a 32-bit value which
+ * is to be used as the real draw_count.
+ */
+ struct pipe_resource *indirect_draw_count;
+};
+
+
+/**
+ * Information to describe a draw_vbo call.
+ */
+struct pipe_draw_info
+{
+ ubyte index_size; /**< if 0, the draw is not indexed. */
+ enum pipe_prim_type mode:8; /**< the mode of the primitive */
+ unsigned primitive_restart:1;
+ unsigned has_user_indices:1; /**< if true, use index.user_buffer */
+ ubyte vertices_per_patch; /**< the number of vertices per patch */
+
+ /**
+ * Direct draws: start is the index of the first vertex
+ * Non-indexed indirect draws: not used
+ * Indexed indirect draws: start is added to the indirect start.
+ */
+ unsigned start;
+ unsigned count; /**< number of vertices */
+
+ unsigned start_instance; /**< first instance id */
+ unsigned instance_count; /**< number of instances */
+
+ unsigned drawid; /**< id of this draw in a multidraw */
+
+ /**
+ * For indexed drawing, these fields apply after index lookup.
+ */
+ int index_bias; /**< a bias to be added to each index */
+ unsigned min_index; /**< the min index */
+ unsigned max_index; /**< the max index */
+
+ /**
+ * Primitive restart enable/index (only applies to indexed drawing)
+ */
+ unsigned restart_index;
+
+ /* Pointers must be at the end for an optimal structure layout on 64-bit. */
+
+ /**
+ * An index buffer. When an index buffer is bound, all indices to vertices
+ * will be looked up from the buffer.
+ *
+ * If has_user_indices, use index.user, else use index.resource.
+ */
+ union {
+ struct pipe_resource *resource; /**< real buffer */
+ const void *user; /**< pointer to a user buffer */
+ } index;
+
+ struct pipe_draw_indirect_info *indirect; /**< Indirect draw. */
+
+ /**
+ * Stream output target. If not NULL, it's used to provide the 'count'
+ * parameter based on the number vertices captured by the stream output
+ * stage. (or generally, based on the number of bytes captured)
+ *
+ * Only 'mode', 'start_instance', and 'instance_count' are taken into
+ * account, all the other variables from pipe_draw_info are ignored.
+ *
+ * 'start' is implicitly 0 and 'count' is set as discussed above.
+ * The draw command is non-indexed.
+ *
+ * Note that this only provides the count. The vertex buffers must
+ * be set via set_vertex_buffers manually.
+ */
+ struct pipe_stream_output_target *count_from_stream_output;
+};
+
+
+/**
+ * Information to describe a blit call.
+ */
+struct pipe_blit_info
+{
+ struct {
+ struct pipe_resource *resource;
+ unsigned level;
+ struct pipe_box box; /**< negative width, height only legal for src */
+ /* For pipe_surface-like format casting: */
+ enum pipe_format format; /**< must be supported for sampling (src)
+ or rendering (dst), ZS is always supported */
+ } dst, src;
+
+ unsigned mask; /**< bitmask of PIPE_MASK_R/G/B/A/Z/S */
+ unsigned filter; /**< PIPE_TEX_FILTER_* */
+
+ bool scissor_enable;
+ struct pipe_scissor_state scissor;
+
+ /* Window rectangles can either be inclusive or exclusive. */
+ bool window_rectangle_include;
+ unsigned num_window_rectangles;
+ struct pipe_scissor_state window_rectangles[PIPE_MAX_WINDOW_RECTANGLES];
+
+ bool render_condition_enable; /**< whether the blit should honor the
+ current render condition */
+ bool alpha_blend; /* dst.rgb = src.rgb * src.a + dst.rgb * (1 - src.a) */
+};
+
+/**
+ * Information to describe a launch_grid call.
+ */
+struct pipe_grid_info
+{
+ /**
+ * For drivers that use PIPE_SHADER_IR_NATIVE as their prefered IR, this
+ * value will be the index of the kernel in the opencl.kernels metadata
+ * list.
+ */
+ uint32_t pc;
+
+ /**
+ * Will be used to initialize the INPUT resource, and it should point to a
+ * buffer of at least pipe_compute_state::req_input_mem bytes.
+ */
+ void *input;
+
+ /**
+ * Grid number of dimensions, 1-3, e.g. the work_dim parameter passed to
+ * clEnqueueNDRangeKernel. Note block[] and grid[] must be padded with
+ * 1 for non-used dimensions.
+ */
+ uint work_dim;
+
+ /**
+ * Determine the layout of the working block (in thread units) to be used.
+ */
+ uint block[3];
+
+ /**
+ * last_block allows disabling threads at the farthermost grid boundary.
+ * Full blocks as specified by "block" are launched, but the threads
+ * outside of "last_block" dimensions are disabled.
+ *
+ * If a block touches the grid boundary in the i-th axis, threads with
+ * THREAD_ID[i] >= last_block[i] are disabled.
+ *
+ * If last_block[i] is 0, it has the same behavior as last_block[i] = block[i],
+ * meaning no effect.
+ *
+ * It's equivalent to doing this at the beginning of the compute shader:
+ *
+ * for (i = 0; i < 3; i++) {
+ * if (block_id[i] == grid[i] - 1 &&
+ * last_block[i] && thread_id[i] >= last_block[i])
+ * return;
+ * }
+ */
+ uint last_block[3];
+
+ /**
+ * Determine the layout of the grid (in block units) to be used.
+ */
+ uint grid[3];
+
+ /* Indirect compute parameters resource: If not NULL, block sizes are taken
+ * from this buffer instead, which is laid out as follows:
+ *
+ * struct {
+ * uint32_t num_blocks_x;
+ * uint32_t num_blocks_y;
+ * uint32_t num_blocks_z;
+ * };
+ */
+ struct pipe_resource *indirect;
+ unsigned indirect_offset; /**< must be 4 byte aligned */
+};
+
+/**
+ * Structure used as a header for serialized compute programs.
+ */
+struct pipe_binary_program_header
+{
+ uint32_t num_bytes; /**< Number of bytes in the LLVM bytecode program. */
+ char blob[];
+};
+
+struct pipe_compute_state
+{
+ enum pipe_shader_ir ir_type; /**< IR type contained in prog. */
+ const void *prog; /**< Compute program to be executed. */
+ unsigned req_local_mem; /**< Required size of the LOCAL resource. */
+ unsigned req_private_mem; /**< Required size of the PRIVATE resource. */
+ unsigned req_input_mem; /**< Required size of the INPUT resource. */
+};
+
+/**
+ * Structure that contains a callback for debug messages from the driver back
+ * to the state tracker.
+ */
+struct pipe_debug_callback
+{
+ /**
+ * When set to \c true, the callback may be called asynchronously from a
+ * driver-created thread.
+ */
+ bool async;
+
+ /**
+ * Callback for the driver to report debug/performance/etc information back
+ * to the state tracker.
+ *
+ * \param data user-supplied data pointer
+ * \param id message type identifier, if pointed value is 0, then a
+ * new id is assigned
+ * \param type PIPE_DEBUG_TYPE_*
+ * \param format printf-style format string
+ * \param args args for format string
+ */
+ void (*debug_message)(void *data,
+ unsigned *id,
+ enum pipe_debug_type type,
+ const char *fmt,
+ va_list args);
+ void *data;
+};
+
+/**
+ * Structure that contains a callback for device reset messages from the driver
+ * back to the state tracker.
+ *
+ * The callback must not be called from driver-created threads.
+ */
+struct pipe_device_reset_callback
+{
+ /**
+ * Callback for the driver to report when a device reset is detected.
+ *
+ * \param data user-supplied data pointer
+ * \param status PIPE_*_RESET
+ */
+ void (*reset)(void *data, enum pipe_reset_status status);
+
+ void *data;
+};
+
+/**
+ * Information about memory usage. All sizes are in kilobytes.
+ */
+struct pipe_memory_info
+{
+ unsigned total_device_memory; /**< size of device memory, e.g. VRAM */
+ unsigned avail_device_memory; /**< free device memory at the moment */
+ unsigned total_staging_memory; /**< size of staging memory, e.g. GART */
+ unsigned avail_staging_memory; /**< free staging memory at the moment */
+ unsigned device_memory_evicted; /**< size of memory evicted (monotonic counter) */
+ unsigned nr_device_memory_evictions; /**< # of evictions (monotonic counter) */
+};
+
+/**
+ * Structure that contains information about external memory
+ */
+struct pipe_memory_object
+{
+ bool dedicated;
+};
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/mapi/glapi/glapi.h b/third_party/rust/glslopt/glsl-optimizer/src/mapi/glapi/glapi.h
new file mode 100644
index 0000000000..b11fe46107
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/mapi/glapi/glapi.h
@@ -0,0 +1,188 @@
+/*
+ * Mesa 3-D graphics library
+ *
+ * Copyright (C) 1999-2008 Brian Paul All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+
+/**
+ * \mainpage Mesa GL API Module
+ *
+ * \section GLAPIIntroduction Introduction
+ *
+ * The Mesa GL API module is responsible for dispatching all the
+ * gl*() functions. All GL functions are dispatched by jumping through
+ * the current dispatch table (basically a struct full of function
+ * pointers.)
+ *
+ * A per-thread current dispatch table and per-thread current context
+ * pointer are managed by this module too.
+ *
+ * This module is intended to be non-Mesa-specific so it can be used
+ * with the X/DRI libGL also.
+ */
+
+
+#ifndef _GLAPI_H
+#define _GLAPI_H
+
+#include "util/macros.h"
+
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+#ifdef _GLAPI_NO_EXPORTS
+# define _GLAPI_EXPORT
+#else /* _GLAPI_NO_EXPORTS */
+# ifdef _WIN32
+# ifdef _GLAPI_DLL_EXPORTS
+# define _GLAPI_EXPORT __declspec(dllexport)
+# else
+# define _GLAPI_EXPORT __declspec(dllimport)
+# endif
+# elif defined(__GNUC__)
+# define _GLAPI_EXPORT __attribute__((visibility("default")))
+# else
+# define _GLAPI_EXPORT
+# endif
+#endif /* _GLAPI_NO_EXPORTS */
+
+
+typedef void (*_glapi_proc)(void);
+
+typedef void (*_glapi_nop_handler_proc)(const char *name);
+
+struct _glapi_table;
+
+
+#if defined (USE_ELF_TLS)
+
+_GLAPI_EXPORT extern __thread struct _glapi_table * _glapi_tls_Dispatch
+ __attribute__((tls_model("initial-exec")));
+
+_GLAPI_EXPORT extern __thread void * _glapi_tls_Context
+ __attribute__((tls_model("initial-exec")));
+
+_GLAPI_EXPORT extern const struct _glapi_table *_glapi_Dispatch;
+_GLAPI_EXPORT extern const void *_glapi_Context;
+
+# define GET_DISPATCH() _glapi_tls_Dispatch
+# define GET_CURRENT_CONTEXT(C) struct gl_context *C = (struct gl_context *) _glapi_tls_Context
+
+#else
+
+_GLAPI_EXPORT extern struct _glapi_table *_glapi_Dispatch;
+_GLAPI_EXPORT extern void *_glapi_Context;
+
+#define GET_DISPATCH() \
+ (likely(_glapi_Dispatch) ? _glapi_Dispatch : _glapi_get_dispatch())
+
+#define GET_CURRENT_CONTEXT(C) struct gl_context *C = (struct gl_context *) \
+ (likely(_glapi_Context) ? _glapi_Context : _glapi_get_context())
+
+#endif /* defined (USE_ELF_TLS) */
+
+
+_GLAPI_EXPORT void
+_glapi_destroy_multithread(void);
+
+
+_GLAPI_EXPORT void
+_glapi_check_multithread(void);
+
+
+_GLAPI_EXPORT void
+_glapi_set_context(void *context);
+
+
+_GLAPI_EXPORT void *
+_glapi_get_context(void);
+
+
+_GLAPI_EXPORT void
+_glapi_set_dispatch(struct _glapi_table *dispatch);
+
+
+_GLAPI_EXPORT struct _glapi_table *
+_glapi_get_dispatch(void);
+
+
+_GLAPI_EXPORT unsigned int
+_glapi_get_dispatch_table_size(void);
+
+
+_GLAPI_EXPORT int
+_glapi_add_dispatch( const char * const * function_names,
+ const char * parameter_signature );
+
+_GLAPI_EXPORT int
+_glapi_get_proc_offset(const char *funcName);
+
+
+_GLAPI_EXPORT _glapi_proc
+_glapi_get_proc_address(const char *funcName);
+
+
+_GLAPI_EXPORT const char *
+_glapi_get_proc_name(unsigned int offset);
+
+
+#if defined(GLX_USE_APPLEGL) || defined(GLX_USE_WINDOWSGL)
+_GLAPI_EXPORT struct _glapi_table *
+_glapi_create_table_from_handle(void *handle, const char *symbol_prefix);
+
+_GLAPI_EXPORT void
+_glapi_table_patch(struct _glapi_table *, const char *name, void *wrapper);
+#endif
+
+
+_GLAPI_EXPORT void
+_glapi_set_nop_handler(_glapi_nop_handler_proc func);
+
+/** Return pointer to new dispatch table filled with no-op functions */
+_GLAPI_EXPORT struct _glapi_table *
+_glapi_new_nop_table(unsigned num_entries);
+
+
+/** Deprecated function */
+_GLAPI_EXPORT unsigned long
+_glthread_GetID(void);
+
+
+/*
+ * These stubs are kept so that the old DRI drivers still load.
+ */
+_GLAPI_EXPORT void
+_glapi_noop_enable_warnings(unsigned char enable);
+
+
+_GLAPI_EXPORT void
+_glapi_set_warning_func(_glapi_proc func);
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _GLAPI_H */
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/mesa/main/config.h b/third_party/rust/glslopt/glsl-optimizer/src/mesa/main/config.h
new file mode 100644
index 0000000000..4ea2e645b6
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/mesa/main/config.h
@@ -0,0 +1,322 @@
+/*
+ * Mesa 3-D graphics library
+ *
+ * Copyright (C) 1999-2007 Brian Paul All Rights Reserved.
+ * Copyright (C) 2008 VMware, Inc. All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/**
+ * \file config.h
+ * Tunable configuration parameters.
+ */
+
+#ifndef MESA_CONFIG_H_INCLUDED
+#define MESA_CONFIG_H_INCLUDED
+
+#include "compiler/shader_enums.h"
+
+/**
+ * \name OpenGL implementation limits
+ */
+/*@{*/
+
+/** Maximum modelview matrix stack depth */
+#define MAX_MODELVIEW_STACK_DEPTH 32
+
+/** Maximum projection matrix stack depth */
+#define MAX_PROJECTION_STACK_DEPTH 32
+
+/** Maximum texture matrix stack depth */
+#define MAX_TEXTURE_STACK_DEPTH 10
+
+/** Maximum attribute stack depth */
+#define MAX_ATTRIB_STACK_DEPTH 16
+
+/** Maximum client attribute stack depth */
+#define MAX_CLIENT_ATTRIB_STACK_DEPTH 16
+
+/** Maximum recursion depth of display list calls */
+#define MAX_LIST_NESTING 64
+
+/** Maximum number of lights */
+#define MAX_LIGHTS 8
+
+/**
+ * Maximum number of user-defined clipping planes supported by any driver in
+ * Mesa. This is used to size arrays.
+ */
+#define MAX_CLIP_PLANES 8
+
+/** Maximum pixel map lookup table size */
+#define MAX_PIXEL_MAP_TABLE 256
+
+/** Maximum number of auxillary color buffers */
+#define MAX_AUX_BUFFERS 1
+
+/** Maximum order (degree) of curves */
+#define MAX_EVAL_ORDER 30
+
+/** Maximum Name stack depth */
+#define MAX_NAME_STACK_DEPTH 64
+
+/** Minimum point size */
+#define MIN_POINT_SIZE 1.0
+/** Maximum point size */
+#define MAX_POINT_SIZE 60.0
+/** Point size granularity */
+#define POINT_SIZE_GRANULARITY 0.1
+
+/** Minimum line width */
+#define MIN_LINE_WIDTH 1.0
+/** Maximum line width */
+#define MAX_LINE_WIDTH 10.0
+/** Line width granularity */
+#define LINE_WIDTH_GRANULARITY 0.1
+
+/** Max memory to allow for a single texture image (in megabytes) */
+#define MAX_TEXTURE_MBYTES 1024
+
+/** Number of 1D/2D texture mipmap levels */
+#define MAX_TEXTURE_LEVELS 15
+
+/** Number of 3D texture mipmap levels */
+#define MAX_3D_TEXTURE_LEVELS 15
+
+/** Number of cube texture mipmap levels - GL_ARB_texture_cube_map */
+#define MAX_CUBE_TEXTURE_LEVELS 15
+
+/** Maximum rectangular texture size - GL_NV_texture_rectangle */
+#define MAX_TEXTURE_RECT_SIZE 16384
+
+/**
+ * Maximum number of layers in a 1D or 2D array texture - GL_MESA_texture_array
+ */
+#define MAX_ARRAY_TEXTURE_LAYERS 64
+
+/**
+ * Max number of texture coordinate units. This mainly just applies to
+ * the fixed-function vertex code. This will be difficult to raise above
+ * eight because of various vertex attribute bitvectors.
+ */
+#define MAX_TEXTURE_COORD_UNITS 8
+
+/**
+ * Max number of texture image units. Also determines number of texture
+ * samplers in shaders.
+ */
+#define MAX_TEXTURE_IMAGE_UNITS 32
+
+/**
+ * Larger of MAX_TEXTURE_COORD_UNITS and MAX_TEXTURE_IMAGE_UNITS.
+ * This value is only used for dimensioning arrays.
+ * Either MAX_TEXTURE_COORD_UNITS or MAX_TEXTURE_IMAGE_UNITS (or the
+ * corresponding ctx->Const.MaxTextureCoord/ImageUnits fields) should be
+ * used almost everywhere else.
+ */
+#define MAX_TEXTURE_UNITS ((MAX_TEXTURE_COORD_UNITS > MAX_TEXTURE_IMAGE_UNITS) ? MAX_TEXTURE_COORD_UNITS : MAX_TEXTURE_IMAGE_UNITS)
+
+/** Maximum number of viewports supported with ARB_viewport_array */
+#define MAX_VIEWPORTS 16
+
+/** Maximum number of window rectangles supported with EXT_window_rectangles */
+#define MAX_WINDOW_RECTANGLES 8
+
+/** Maximum size for CVA. May be overridden by the drivers. */
+#define MAX_ARRAY_LOCK_SIZE 3000
+
+/** Subpixel precision for antialiasing, window coordinate snapping */
+#define SUB_PIXEL_BITS 4
+
+/** For GL_ARB_texture_compression */
+#define MAX_COMPRESSED_TEXTURE_FORMATS 25
+
+/** For GL_EXT_texture_filter_anisotropic */
+#define MAX_TEXTURE_MAX_ANISOTROPY 16.0
+
+/** For GL_EXT_texture_lod_bias (typically MAX_TEXTURE_LEVELS - 1) */
+#define MAX_TEXTURE_LOD_BIAS 14.0
+
+/** For any program target/extension */
+/*@{*/
+#define MAX_PROGRAM_INSTRUCTIONS (16 * 1024)
+
+/**
+ * Per-program constants (power of two)
+ *
+ * \c MAX_PROGRAM_LOCAL_PARAMS and \c MAX_UNIFORMS are just the assembly shader
+ * and GLSL shader names for the same thing. They should \b always have the
+ * same value. Each refers to the number of vec4 values supplied as
+ * per-program parameters.
+ */
+/*@{*/
+#define MAX_PROGRAM_LOCAL_PARAMS 4096
+#define MAX_UNIFORMS 4096
+#define MAX_UNIFORM_BUFFERS 15 /* + 1 default uniform buffer */
+#define MAX_SHADER_STORAGE_BUFFERS 16
+/* 6 is for vertex, hull, domain, geometry, fragment, and compute shader. */
+#define MAX_COMBINED_UNIFORM_BUFFERS (MAX_UNIFORM_BUFFERS * 6)
+#define MAX_COMBINED_SHADER_STORAGE_BUFFERS (MAX_SHADER_STORAGE_BUFFERS * 6)
+#define MAX_ATOMIC_COUNTERS 4096
+/* 6 is for vertex, hull, domain, geometry, fragment, and compute shader. */
+#define MAX_COMBINED_ATOMIC_BUFFERS (MAX_UNIFORM_BUFFERS * 6)
+/* Size of an atomic counter in bytes according to ARB_shader_atomic_counters */
+#define ATOMIC_COUNTER_SIZE 4
+#define MAX_IMAGE_UNIFORMS 32
+/* 6 is for vertex, hull, domain, geometry, fragment, and compute shader. */
+#define MAX_IMAGE_UNITS (MAX_IMAGE_UNIFORMS * 6)
+/*@}*/
+
+/**
+ * Per-context constants (power of two)
+ *
+ * \note
+ * This value should always be less than or equal to \c MAX_PROGRAM_LOCAL_PARAMS
+ * and \c MAX_VERTEX_PROGRAM_PARAMS. Otherwise some applications will make
+ * incorrect assumptions.
+ */
+#define MAX_PROGRAM_ENV_PARAMS 256
+
+#define MAX_PROGRAM_MATRICES 8
+#define MAX_PROGRAM_MATRIX_STACK_DEPTH 4
+#define MAX_PROGRAM_CALL_DEPTH 8
+#define MAX_PROGRAM_TEMPS 256
+#define MAX_PROGRAM_ADDRESS_REGS 1
+#define MAX_SAMPLERS MAX_TEXTURE_IMAGE_UNITS
+#define MAX_PROGRAM_INPUTS 32
+#define MAX_PROGRAM_OUTPUTS 64
+/*@}*/
+
+/** For GL_ARB_vertex_program */
+/*@{*/
+#define MAX_VERTEX_PROGRAM_ADDRESS_REGS 1
+#define MAX_VERTEX_PROGRAM_PARAMS MAX_UNIFORMS
+/*@}*/
+
+/** For GL_ARB_fragment_program */
+/*@{*/
+#define MAX_FRAGMENT_PROGRAM_ADDRESS_REGS 0
+#define MAX_FRAGMENT_PROGRAM_PARAMS 64
+#define MAX_FRAGMENT_PROGRAM_INPUTS 12
+/*@}*/
+
+/** For GL_ARB_vertex_shader */
+/*@{*/
+#define MAX_VERTEX_GENERIC_ATTRIBS 16
+/* 6 is for vertex, hull, domain, geometry, fragment, and compute shader. */
+#define MAX_COMBINED_TEXTURE_IMAGE_UNITS (MAX_TEXTURE_IMAGE_UNITS * 6)
+/*@}*/
+
+
+/** For GL_EXT_framebuffer_object */
+/*@{*/
+#define MAX_COLOR_ATTACHMENTS 8
+#define MAX_RENDERBUFFER_SIZE 16384
+/*@}*/
+
+/** For GL_ATI_envmap_bump - support bump mapping on first 8 units */
+#define SUPPORTED_ATI_BUMP_UNITS 0xff
+
+/** For GL_EXT_transform_feedback */
+#define MAX_FEEDBACK_BUFFERS 4
+#define MAX_FEEDBACK_ATTRIBS 32
+
+/** For geometry shader */
+/*@{*/
+#define MAX_GEOMETRY_UNIFORM_COMPONENTS 512
+#define MAX_GEOMETRY_OUTPUT_VERTICES 256
+#define MAX_GEOMETRY_TOTAL_OUTPUT_COMPONENTS 1024
+/*@}*/
+
+/** For GL_ARB_debug_output and GL_KHR_debug */
+/*@{*/
+#define MAX_DEBUG_LOGGED_MESSAGES 10
+#define MAX_DEBUG_MESSAGE_LENGTH 4096
+/*@}*/
+
+/** For GL_KHR_debug */
+/*@{*/
+#define MAX_LABEL_LENGTH 256
+#define MAX_DEBUG_GROUP_STACK_DEPTH 64
+/*@}*/
+
+/** For GL_ARB_gpu_shader5 */
+/*@{*/
+#define MAX_GEOMETRY_SHADER_INVOCATIONS 32
+#define MIN_FRAGMENT_INTERPOLATION_OFFSET -0.5
+#define MAX_FRAGMENT_INTERPOLATION_OFFSET 0.5
+#define FRAGMENT_INTERPOLATION_OFFSET_BITS 4
+#define MAX_VERTEX_STREAMS 4
+/*@}*/
+
+/** For GL_ARB_shader_subroutine */
+/*@{*/
+#define MAX_SUBROUTINES 256
+#define MAX_SUBROUTINE_UNIFORM_LOCATIONS 1024
+/*@}*/
+
+/** For GL_INTEL_performance_query */
+/*@{*/
+#define MAX_PERFQUERY_QUERY_NAME_LENGTH 256
+#define MAX_PERFQUERY_COUNTER_NAME_LENGTH 256
+#define MAX_PERFQUERY_COUNTER_DESC_LENGTH 1024
+#define PERFQUERY_HAVE_GPA_EXTENDED_COUNTERS 0
+/*@}*/
+
+/** For GL_ARB_pipeline_statistics_query */
+#define MAX_PIPELINE_STATISTICS 11
+
+/** For GL_ARB_tessellation_shader */
+/*@{*/
+#define MAX_TESS_GEN_LEVEL 64
+#define MAX_PATCH_VERTICES 32
+#define MAX_TESS_PATCH_COMPONENTS 120
+#define MAX_TESS_CONTROL_TOTAL_OUTPUT_COMPONENTS 4096
+/*@}*/
+
+/*
+ * Color channel component order
+ *
+ * \note Changes will almost certainly cause problems at this time.
+ */
+#define RCOMP 0
+#define GCOMP 1
+#define BCOMP 2
+#define ACOMP 3
+
+
+/**
+ * Maximum number of temporary vertices required for clipping.
+ *
+ * Used in array_cache and tnl modules.
+ */
+#define MAX_CLIPPED_VERTICES ((2 * (6 + MAX_CLIP_PLANES))+1)
+
+
+/** For GL_ARB_sample_locations - maximum of SAMPLE_LOCATION_PIXEL_GRID_*_ARB */
+#define MAX_SAMPLE_LOCATION_GRID_SIZE 4
+
+/* It is theoretically possible for Consts.MaxSamples to be >32 but
+ * other code seems to assume that is not the case.
+ */
+#define MAX_SAMPLE_LOCATION_TABLE_SIZE \
+ (MAX_SAMPLE_LOCATION_GRID_SIZE * MAX_SAMPLE_LOCATION_GRID_SIZE * 32)
+
+#endif /* MESA_CONFIG_H_INCLUDED */
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/mesa/main/context.c b/third_party/rust/glslopt/glsl-optimizer/src/mesa/main/context.c
new file mode 100644
index 0000000000..2b8becb806
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/mesa/main/context.c
@@ -0,0 +1,1920 @@
+/*
+ * Mesa 3-D graphics library
+ *
+ * Copyright (C) 1999-2007 Brian Paul All Rights Reserved.
+ * Copyright (C) 2008 VMware, Inc. All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/**
+ * \file context.c
+ * Mesa context/visual/framebuffer management functions.
+ * \author Brian Paul
+ */
+
+/**
+ * \mainpage Mesa Main Module
+ *
+ * \section MainIntroduction Introduction
+ *
+ * The Mesa Main module consists of all the files in the main/ directory.
+ * Among the features of this module are:
+ * <UL>
+ * <LI> Structures to represent most GL state </LI>
+ * <LI> State set/get functions </LI>
+ * <LI> Display lists </LI>
+ * <LI> Texture unit, object and image handling </LI>
+ * <LI> Matrix and attribute stacks </LI>
+ * </UL>
+ *
+ * Other modules are responsible for API dispatch, vertex transformation,
+ * point/line/triangle setup, rasterization, vertex array caching,
+ * vertex/fragment programs/shaders, etc.
+ *
+ *
+ * \section AboutDoxygen About Doxygen
+ *
+ * If you're viewing this information as Doxygen-generated HTML you'll
+ * see the documentation index at the top of this page.
+ *
+ * The first line lists the Mesa source code modules.
+ * The second line lists the indexes available for viewing the documentation
+ * for each module.
+ *
+ * Selecting the <b>Main page</b> link will display a summary of the module
+ * (this page).
+ *
+ * Selecting <b>Data Structures</b> will list all C structures.
+ *
+ * Selecting the <b>File List</b> link will list all the source files in
+ * the module.
+ * Selecting a filename will show a list of all functions defined in that file.
+ *
+ * Selecting the <b>Data Fields</b> link will display a list of all
+ * documented structure members.
+ *
+ * Selecting the <b>Globals</b> link will display a list
+ * of all functions, structures, global variables and macros in the module.
+ *
+ */
+
+
+#include "glheader.h"
+
+#include "accum.h"
+#include "api_exec.h"
+#include "api_loopback.h"
+#include "arrayobj.h"
+#include "attrib.h"
+#include "bbox.h"
+#include "blend.h"
+#include "buffers.h"
+#include "bufferobj.h"
+#include "conservativeraster.h"
+#include "context.h"
+#include "cpuinfo.h"
+#include "debug.h"
+#include "debug_output.h"
+#include "depth.h"
+#include "dlist.h"
+#include "eval.h"
+#include "extensions.h"
+#include "fbobject.h"
+#include "feedback.h"
+#include "fog.h"
+#include "formats.h"
+#include "framebuffer.h"
+#include "glthread.h"
+#include "hint.h"
+#include "hash.h"
+#include "light.h"
+#include "lines.h"
+#include "macros.h"
+#include "matrix.h"
+#include "multisample.h"
+#include "performance_monitor.h"
+#include "performance_query.h"
+#include "pipelineobj.h"
+#include "pixel.h"
+#include "pixelstore.h"
+#include "points.h"
+#include "polygon.h"
+#include "queryobj.h"
+#include "syncobj.h"
+#include "rastpos.h"
+#include "remap.h"
+#include "scissor.h"
+#include "shared.h"
+#include "shaderobj.h"
+#include "shaderimage.h"
+#include "state.h"
+#include "util/debug.h"
+#include "util/disk_cache.h"
+#include "util/strtod.h"
+#include "stencil.h"
+#include "shaderimage.h"
+#include "texcompress_s3tc.h"
+#include "texstate.h"
+#include "transformfeedback.h"
+#include "mtypes.h"
+#include "varray.h"
+#include "version.h"
+#include "viewport.h"
+#include "texturebindless.h"
+#include "program/program.h"
+#include "math/m_matrix.h"
+#include "main/dispatch.h" /* for _gloffset_COUNT */
+#include "macros.h"
+#include "git_sha1.h"
+
+#ifdef USE_SPARC_ASM
+#include "sparc/sparc.h"
+#endif
+
+#include "compiler/glsl_types.h"
+#include "compiler/glsl/builtin_functions.h"
+#include "compiler/glsl/glsl_parser_extras.h"
+#include <stdbool.h>
+#include "util/u_memory.h"
+
+
+#ifndef MESA_VERBOSE
+int MESA_VERBOSE = 0;
+#endif
+
+#ifndef MESA_DEBUG_FLAGS
+int MESA_DEBUG_FLAGS = 0;
+#endif
+
+
+/* ubyte -> float conversion */
+GLfloat _mesa_ubyte_to_float_color_tab[256];
+
+
+
+/**
+ * Swap buffers notification callback.
+ *
+ * \param ctx GL context.
+ *
+ * Called by window system just before swapping buffers.
+ * We have to finish any pending rendering.
+ */
+void
+_mesa_notifySwapBuffers(struct gl_context *ctx)
+{
+ if (MESA_VERBOSE & VERBOSE_SWAPBUFFERS)
+ _mesa_debug(ctx, "SwapBuffers\n");
+ FLUSH_VERTICES(ctx, 0);
+ if (ctx->Driver.Flush) {
+ ctx->Driver.Flush(ctx);
+ }
+}
+
+
+/**********************************************************************/
+/** \name GL Visual allocation/destruction */
+/**********************************************************************/
+/*@{*/
+
+/**
+ * Allocates a struct gl_config structure and initializes it via
+ * _mesa_initialize_visual().
+ *
+ * \param dbFlag double buffering
+ * \param stereoFlag stereo buffer
+ * \param depthBits requested bits per depth buffer value. Any value in [0, 32]
+ * is acceptable but the actual depth type will be GLushort or GLuint as
+ * needed.
+ * \param stencilBits requested minimum bits per stencil buffer value
+ * \param accumRedBits, accumGreenBits, accumBlueBits, accumAlphaBits number
+ * of bits per color component in accum buffer.
+ * \param redBits number of bits per color component in frame buffer for RGB(A)
+ * mode. We always use 8 in core Mesa though.
+ * \param greenBits same as above.
+ * \param blueBits same as above.
+ * \param alphaBits same as above.
+ * \param numSamples not really used.
+ *
+ * \return pointer to new struct gl_config or NULL if requested parameters
+ * can't be met.
+ *
+ * \note Need to add params for level and numAuxBuffers (at least)
+ */
+struct gl_config *
+_mesa_create_visual( GLboolean dbFlag,
+ GLboolean stereoFlag,
+ GLint redBits,
+ GLint greenBits,
+ GLint blueBits,
+ GLint alphaBits,
+ GLint depthBits,
+ GLint stencilBits,
+ GLint accumRedBits,
+ GLint accumGreenBits,
+ GLint accumBlueBits,
+ GLint accumAlphaBits,
+ GLuint numSamples )
+{
+ struct gl_config *vis = CALLOC_STRUCT(gl_config);
+ if (vis) {
+ if (!_mesa_initialize_visual(vis, dbFlag, stereoFlag,
+ redBits, greenBits, blueBits, alphaBits,
+ depthBits, stencilBits,
+ accumRedBits, accumGreenBits,
+ accumBlueBits, accumAlphaBits,
+ numSamples)) {
+ free(vis);
+ return NULL;
+ }
+ }
+ return vis;
+}
+
+
+/**
+ * Makes some sanity checks and fills in the fields of the struct
+ * gl_config object with the given parameters. If the caller needs to
+ * set additional fields, he should just probably init the whole
+ * gl_config object himself.
+ *
+ * \return GL_TRUE on success, or GL_FALSE on failure.
+ *
+ * \sa _mesa_create_visual() above for the parameter description.
+ */
+GLboolean
+_mesa_initialize_visual( struct gl_config *vis,
+ GLboolean dbFlag,
+ GLboolean stereoFlag,
+ GLint redBits,
+ GLint greenBits,
+ GLint blueBits,
+ GLint alphaBits,
+ GLint depthBits,
+ GLint stencilBits,
+ GLint accumRedBits,
+ GLint accumGreenBits,
+ GLint accumBlueBits,
+ GLint accumAlphaBits,
+ GLuint numSamples )
+{
+ assert(vis);
+
+ if (depthBits < 0 || depthBits > 32) {
+ return GL_FALSE;
+ }
+ if (stencilBits < 0 || stencilBits > 8) {
+ return GL_FALSE;
+ }
+ assert(accumRedBits >= 0);
+ assert(accumGreenBits >= 0);
+ assert(accumBlueBits >= 0);
+ assert(accumAlphaBits >= 0);
+
+ vis->doubleBufferMode = dbFlag;
+ vis->stereoMode = stereoFlag;
+
+ vis->redBits = redBits;
+ vis->greenBits = greenBits;
+ vis->blueBits = blueBits;
+ vis->alphaBits = alphaBits;
+ vis->rgbBits = redBits + greenBits + blueBits;
+
+ vis->depthBits = depthBits;
+ vis->stencilBits = stencilBits;
+
+ vis->accumRedBits = accumRedBits;
+ vis->accumGreenBits = accumGreenBits;
+ vis->accumBlueBits = accumBlueBits;
+ vis->accumAlphaBits = accumAlphaBits;
+
+ vis->numAuxBuffers = 0;
+ vis->level = 0;
+ vis->sampleBuffers = numSamples > 0 ? 1 : 0;
+ vis->samples = numSamples;
+
+ return GL_TRUE;
+}
+
+
+/**
+ * Destroy a visual and free its memory.
+ *
+ * \param vis visual.
+ *
+ * Frees the visual structure.
+ */
+void
+_mesa_destroy_visual( struct gl_config *vis )
+{
+ free(vis);
+}
+
+/*@}*/
+
+
+/**********************************************************************/
+/** \name Context allocation, initialization, destroying
+ *
+ * The purpose of the most initialization functions here is to provide the
+ * default state values according to the OpenGL specification.
+ */
+/**********************************************************************/
+/*@{*/
+
+
+/**
+ * One-time initialization mutex lock.
+ *
+ * \sa Used by one_time_init().
+ */
+mtx_t OneTimeLock = _MTX_INITIALIZER_NP;
+
+
+/**
+ * Calls all the various one-time-fini functions in Mesa
+ */
+
+static void
+one_time_fini(void)
+{
+ glsl_type_singleton_decref();
+ _mesa_locale_fini();
+}
+
+/**
+ * Calls all the various one-time-init functions in Mesa.
+ *
+ * While holding a global mutex lock, calls several initialization functions,
+ * and sets the glapi callbacks if the \c MESA_DEBUG environment variable is
+ * defined.
+ *
+ * \sa _math_init().
+ */
+void
+_mesa_initialize(void)
+{
+ static bool initialized;
+
+ mtx_lock(&OneTimeLock);
+
+ /* truly one-time init */
+ if (!initialized) {
+ GLuint i;
+
+ STATIC_ASSERT(sizeof(GLbyte) == 1);
+ STATIC_ASSERT(sizeof(GLubyte) == 1);
+ STATIC_ASSERT(sizeof(GLshort) == 2);
+ STATIC_ASSERT(sizeof(GLushort) == 2);
+ STATIC_ASSERT(sizeof(GLint) == 4);
+ STATIC_ASSERT(sizeof(GLuint) == 4);
+
+ _mesa_locale_init();
+
+ _mesa_one_time_init_extension_overrides();
+
+ _mesa_get_cpu_features();
+
+ for (i = 0; i < 256; i++) {
+ _mesa_ubyte_to_float_color_tab[i] = (float) i / 255.0F;
+ }
+
+ atexit(one_time_fini);
+
+#if defined(DEBUG)
+ if (MESA_VERBOSE != 0) {
+ _mesa_debug(NULL, "Mesa " PACKAGE_VERSION " DEBUG build" MESA_GIT_SHA1 "\n");
+ }
+#endif
+
+ /* Take a glsl type reference for the duration of libGL's life to avoid
+ * unecessary creation/destruction of glsl types.
+ */
+ glsl_type_singleton_init_or_ref();
+
+ _mesa_init_remap_table();
+ }
+
+ initialized = true;
+
+ mtx_unlock(&OneTimeLock);
+}
+
+
+/**
+ * Initialize fields of gl_current_attrib (aka ctx->Current.*)
+ */
+static void
+_mesa_init_current(struct gl_context *ctx)
+{
+ GLuint i;
+
+ /* Init all to (0,0,0,1) */
+ for (i = 0; i < ARRAY_SIZE(ctx->Current.Attrib); i++) {
+ ASSIGN_4V( ctx->Current.Attrib[i], 0.0, 0.0, 0.0, 1.0 );
+ }
+
+ /* redo special cases: */
+ ASSIGN_4V( ctx->Current.Attrib[VERT_ATTRIB_NORMAL], 0.0, 0.0, 1.0, 1.0 );
+ ASSIGN_4V( ctx->Current.Attrib[VERT_ATTRIB_COLOR0], 1.0, 1.0, 1.0, 1.0 );
+ ASSIGN_4V( ctx->Current.Attrib[VERT_ATTRIB_COLOR1], 0.0, 0.0, 0.0, 1.0 );
+ ASSIGN_4V( ctx->Current.Attrib[VERT_ATTRIB_COLOR_INDEX], 1.0, 0.0, 0.0, 1.0 );
+ ASSIGN_4V( ctx->Current.Attrib[VERT_ATTRIB_EDGEFLAG], 1.0, 0.0, 0.0, 1.0 );
+}
+
+
+/**
+ * Init vertex/fragment/geometry program limits.
+ * Important: drivers should override these with actual limits.
+ */
+static void
+init_program_limits(struct gl_constants *consts, gl_shader_stage stage,
+ struct gl_program_constants *prog)
+{
+ prog->MaxInstructions = MAX_PROGRAM_INSTRUCTIONS;
+ prog->MaxAluInstructions = MAX_PROGRAM_INSTRUCTIONS;
+ prog->MaxTexInstructions = MAX_PROGRAM_INSTRUCTIONS;
+ prog->MaxTexIndirections = MAX_PROGRAM_INSTRUCTIONS;
+ prog->MaxTemps = MAX_PROGRAM_TEMPS;
+ prog->MaxEnvParams = MAX_PROGRAM_ENV_PARAMS;
+ prog->MaxLocalParams = MAX_PROGRAM_LOCAL_PARAMS;
+ prog->MaxAddressOffset = MAX_PROGRAM_LOCAL_PARAMS;
+
+ switch (stage) {
+ case MESA_SHADER_VERTEX:
+ prog->MaxParameters = MAX_VERTEX_PROGRAM_PARAMS;
+ prog->MaxAttribs = MAX_VERTEX_GENERIC_ATTRIBS;
+ prog->MaxAddressRegs = MAX_VERTEX_PROGRAM_ADDRESS_REGS;
+ prog->MaxUniformComponents = 4 * MAX_UNIFORMS;
+ prog->MaxInputComponents = 0; /* value not used */
+ prog->MaxOutputComponents = 16 * 4; /* old limit not to break tnl and swrast */
+ break;
+ case MESA_SHADER_FRAGMENT:
+ prog->MaxParameters = MAX_FRAGMENT_PROGRAM_PARAMS;
+ prog->MaxAttribs = MAX_FRAGMENT_PROGRAM_INPUTS;
+ prog->MaxAddressRegs = MAX_FRAGMENT_PROGRAM_ADDRESS_REGS;
+ prog->MaxUniformComponents = 4 * MAX_UNIFORMS;
+ prog->MaxInputComponents = 16 * 4; /* old limit not to break tnl and swrast */
+ prog->MaxOutputComponents = 0; /* value not used */
+ break;
+ case MESA_SHADER_TESS_CTRL:
+ case MESA_SHADER_TESS_EVAL:
+ case MESA_SHADER_GEOMETRY:
+ prog->MaxParameters = MAX_VERTEX_PROGRAM_PARAMS;
+ prog->MaxAttribs = MAX_VERTEX_GENERIC_ATTRIBS;
+ prog->MaxAddressRegs = MAX_VERTEX_PROGRAM_ADDRESS_REGS;
+ prog->MaxUniformComponents = 4 * MAX_UNIFORMS;
+ prog->MaxInputComponents = 16 * 4; /* old limit not to break tnl and swrast */
+ prog->MaxOutputComponents = 16 * 4; /* old limit not to break tnl and swrast */
+ break;
+ case MESA_SHADER_COMPUTE:
+ prog->MaxParameters = 0; /* not meaningful for compute shaders */
+ prog->MaxAttribs = 0; /* not meaningful for compute shaders */
+ prog->MaxAddressRegs = 0; /* not meaningful for compute shaders */
+ prog->MaxUniformComponents = 4 * MAX_UNIFORMS;
+ prog->MaxInputComponents = 0; /* not meaningful for compute shaders */
+ prog->MaxOutputComponents = 0; /* not meaningful for compute shaders */
+ break;
+ default:
+ assert(0 && "Bad shader stage in init_program_limits()");
+ }
+
+ /* Set the native limits to zero. This implies that there is no native
+ * support for shaders. Let the drivers fill in the actual values.
+ */
+ prog->MaxNativeInstructions = 0;
+ prog->MaxNativeAluInstructions = 0;
+ prog->MaxNativeTexInstructions = 0;
+ prog->MaxNativeTexIndirections = 0;
+ prog->MaxNativeAttribs = 0;
+ prog->MaxNativeTemps = 0;
+ prog->MaxNativeAddressRegs = 0;
+ prog->MaxNativeParameters = 0;
+
+ /* Set GLSL datatype range/precision info assuming IEEE float values.
+ * Drivers should override these defaults as needed.
+ */
+ prog->MediumFloat.RangeMin = 127;
+ prog->MediumFloat.RangeMax = 127;
+ prog->MediumFloat.Precision = 23;
+ prog->LowFloat = prog->HighFloat = prog->MediumFloat;
+
+ /* Assume ints are stored as floats for now, since this is the least-common
+ * denominator. The OpenGL ES spec implies (page 132) that the precision
+ * of integer types should be 0. Practically speaking, IEEE
+ * single-precision floating point values can only store integers in the
+ * range [-0x01000000, 0x01000000] without loss of precision.
+ */
+ prog->MediumInt.RangeMin = 24;
+ prog->MediumInt.RangeMax = 24;
+ prog->MediumInt.Precision = 0;
+ prog->LowInt = prog->HighInt = prog->MediumInt;
+
+ prog->MaxUniformBlocks = 12;
+ prog->MaxCombinedUniformComponents = (prog->MaxUniformComponents +
+ consts->MaxUniformBlockSize / 4 *
+ prog->MaxUniformBlocks);
+
+ prog->MaxAtomicBuffers = 0;
+ prog->MaxAtomicCounters = 0;
+
+ prog->MaxShaderStorageBlocks = 8;
+}
+
+
+/**
+ * Initialize fields of gl_constants (aka ctx->Const.*).
+ * Use defaults from config.h. The device drivers will often override
+ * some of these values (such as number of texture units).
+ */
+void
+_mesa_init_constants(struct gl_constants *consts, gl_api api)
+{
+ int i;
+ assert(consts);
+
+ /* Constants, may be overriden (usually only reduced) by device drivers */
+ consts->MaxTextureMbytes = MAX_TEXTURE_MBYTES;
+ consts->MaxTextureSize = 1 << (MAX_TEXTURE_LEVELS - 1);
+ consts->Max3DTextureLevels = MAX_3D_TEXTURE_LEVELS;
+ consts->MaxCubeTextureLevels = MAX_CUBE_TEXTURE_LEVELS;
+ consts->MaxTextureRectSize = MAX_TEXTURE_RECT_SIZE;
+ consts->MaxArrayTextureLayers = MAX_ARRAY_TEXTURE_LAYERS;
+ consts->MaxTextureCoordUnits = MAX_TEXTURE_COORD_UNITS;
+ consts->Program[MESA_SHADER_FRAGMENT].MaxTextureImageUnits = MAX_TEXTURE_IMAGE_UNITS;
+ consts->MaxTextureUnits = MIN2(consts->MaxTextureCoordUnits,
+ consts->Program[MESA_SHADER_FRAGMENT].MaxTextureImageUnits);
+ consts->MaxTextureMaxAnisotropy = MAX_TEXTURE_MAX_ANISOTROPY;
+ consts->MaxTextureLodBias = MAX_TEXTURE_LOD_BIAS;
+ consts->MaxTextureBufferSize = 65536;
+ consts->TextureBufferOffsetAlignment = 1;
+ consts->MaxArrayLockSize = MAX_ARRAY_LOCK_SIZE;
+ consts->SubPixelBits = SUB_PIXEL_BITS;
+ consts->MinPointSize = MIN_POINT_SIZE;
+ consts->MaxPointSize = MAX_POINT_SIZE;
+ consts->MinPointSizeAA = MIN_POINT_SIZE;
+ consts->MaxPointSizeAA = MAX_POINT_SIZE;
+ consts->PointSizeGranularity = (GLfloat) POINT_SIZE_GRANULARITY;
+ consts->MinLineWidth = MIN_LINE_WIDTH;
+ consts->MaxLineWidth = MAX_LINE_WIDTH;
+ consts->MinLineWidthAA = MIN_LINE_WIDTH;
+ consts->MaxLineWidthAA = MAX_LINE_WIDTH;
+ consts->LineWidthGranularity = (GLfloat) LINE_WIDTH_GRANULARITY;
+ consts->MaxClipPlanes = 6;
+ consts->MaxLights = MAX_LIGHTS;
+ consts->MaxShininess = 128.0;
+ consts->MaxSpotExponent = 128.0;
+ consts->MaxViewportWidth = 16384;
+ consts->MaxViewportHeight = 16384;
+ consts->MinMapBufferAlignment = 64;
+
+ /* Driver must override these values if ARB_viewport_array is supported. */
+ consts->MaxViewports = 1;
+ consts->ViewportSubpixelBits = 0;
+ consts->ViewportBounds.Min = 0;
+ consts->ViewportBounds.Max = 0;
+
+ /** GL_ARB_uniform_buffer_object */
+ consts->MaxCombinedUniformBlocks = 36;
+ consts->MaxUniformBufferBindings = 36;
+ consts->MaxUniformBlockSize = 16384;
+ consts->UniformBufferOffsetAlignment = 1;
+
+ /** GL_ARB_shader_storage_buffer_object */
+ consts->MaxCombinedShaderStorageBlocks = 8;
+ consts->MaxShaderStorageBufferBindings = 8;
+ consts->MaxShaderStorageBlockSize = 128 * 1024 * 1024; /* 2^27 */
+ consts->ShaderStorageBufferOffsetAlignment = 256;
+
+ /* GL_ARB_explicit_uniform_location, GL_MAX_UNIFORM_LOCATIONS */
+ consts->MaxUserAssignableUniformLocations =
+ 4 * MESA_SHADER_STAGES * MAX_UNIFORMS;
+
+ for (i = 0; i < MESA_SHADER_STAGES; i++)
+ init_program_limits(consts, i, &consts->Program[i]);
+
+ consts->MaxProgramMatrices = MAX_PROGRAM_MATRICES;
+ consts->MaxProgramMatrixStackDepth = MAX_PROGRAM_MATRIX_STACK_DEPTH;
+
+ /* Set the absolute minimum possible GLSL version. API_OPENGL_CORE can
+ * mean an OpenGL 3.0 forward-compatible context, so that implies a minimum
+ * possible version of 1.30. Otherwise, the minimum possible version 1.20.
+ * Since Mesa unconditionally advertises GL_ARB_shading_language_100 and
+ * GL_ARB_shader_objects, every driver has GLSL 1.20... even if they don't
+ * advertise any extensions to enable any shader stages (e.g.,
+ * GL_ARB_vertex_shader).
+ */
+ consts->GLSLVersion = api == API_OPENGL_CORE ? 130 : 120;
+ consts->GLSLVersionCompat = consts->GLSLVersion;
+
+ consts->GLSLLowerConstArrays = true;
+
+ /* Assume that if GLSL 1.30+ (or GLSL ES 3.00+) is supported that
+ * gl_VertexID is implemented using a native hardware register with OpenGL
+ * semantics.
+ */
+ consts->VertexID_is_zero_based = false;
+
+ /* GL_ARB_draw_buffers */
+ consts->MaxDrawBuffers = MAX_DRAW_BUFFERS;
+
+ consts->MaxColorAttachments = MAX_COLOR_ATTACHMENTS;
+ consts->MaxRenderbufferSize = MAX_RENDERBUFFER_SIZE;
+
+ consts->Program[MESA_SHADER_VERTEX].MaxTextureImageUnits = MAX_TEXTURE_IMAGE_UNITS;
+ consts->MaxCombinedTextureImageUnits = MAX_COMBINED_TEXTURE_IMAGE_UNITS;
+ consts->MaxVarying = 16; /* old limit not to break tnl and swrast */
+ consts->Program[MESA_SHADER_GEOMETRY].MaxTextureImageUnits = MAX_TEXTURE_IMAGE_UNITS;
+ consts->MaxGeometryOutputVertices = MAX_GEOMETRY_OUTPUT_VERTICES;
+ consts->MaxGeometryTotalOutputComponents = MAX_GEOMETRY_TOTAL_OUTPUT_COMPONENTS;
+ consts->MaxGeometryShaderInvocations = MAX_GEOMETRY_SHADER_INVOCATIONS;
+
+#ifdef DEBUG
+ consts->GenerateTemporaryNames = true;
+#else
+ consts->GenerateTemporaryNames = false;
+#endif
+
+ /* GL_ARB_framebuffer_object */
+ consts->MaxSamples = 0;
+
+ /* GLSL default if NativeIntegers == FALSE */
+ consts->UniformBooleanTrue = FLOAT_AS_UNION(1.0f).u;
+
+ /* GL_ARB_sync */
+ consts->MaxServerWaitTimeout = 0x7fffffff7fffffffULL;
+
+ /* GL_EXT_provoking_vertex */
+ consts->QuadsFollowProvokingVertexConvention = GL_TRUE;
+
+ /** GL_ARB_viewport_array */
+ consts->LayerAndVPIndexProvokingVertex = GL_UNDEFINED_VERTEX;
+
+ /* GL_EXT_transform_feedback */
+ consts->MaxTransformFeedbackBuffers = MAX_FEEDBACK_BUFFERS;
+ consts->MaxTransformFeedbackSeparateComponents = 4 * MAX_FEEDBACK_ATTRIBS;
+ consts->MaxTransformFeedbackInterleavedComponents = 4 * MAX_FEEDBACK_ATTRIBS;
+ consts->MaxVertexStreams = 1;
+
+ /* GL 3.2 */
+ consts->ProfileMask = api == API_OPENGL_CORE
+ ? GL_CONTEXT_CORE_PROFILE_BIT
+ : GL_CONTEXT_COMPATIBILITY_PROFILE_BIT;
+
+ /* GL 4.4 */
+ consts->MaxVertexAttribStride = 2048;
+
+ /** GL_EXT_gpu_shader4 */
+ consts->MinProgramTexelOffset = -8;
+ consts->MaxProgramTexelOffset = 7;
+
+ /* GL_ARB_texture_gather */
+ consts->MinProgramTextureGatherOffset = -8;
+ consts->MaxProgramTextureGatherOffset = 7;
+
+ /* GL_ARB_robustness */
+ consts->ResetStrategy = GL_NO_RESET_NOTIFICATION_ARB;
+
+ /* GL_KHR_robustness */
+ consts->RobustAccess = GL_FALSE;
+
+ /* ES 3.0 or ARB_ES3_compatibility */
+ consts->MaxElementIndex = 0xffffffffu;
+
+ /* GL_ARB_texture_multisample */
+ consts->MaxColorTextureSamples = 1;
+ consts->MaxDepthTextureSamples = 1;
+ consts->MaxIntegerSamples = 1;
+
+ /* GL_ARB_shader_atomic_counters */
+ consts->MaxAtomicBufferBindings = MAX_COMBINED_ATOMIC_BUFFERS;
+ consts->MaxAtomicBufferSize = MAX_ATOMIC_COUNTERS * ATOMIC_COUNTER_SIZE;
+ consts->MaxCombinedAtomicBuffers = MAX_COMBINED_ATOMIC_BUFFERS;
+ consts->MaxCombinedAtomicCounters = MAX_ATOMIC_COUNTERS;
+
+ /* GL_ARB_vertex_attrib_binding */
+ consts->MaxVertexAttribRelativeOffset = 2047;
+ consts->MaxVertexAttribBindings = MAX_VERTEX_GENERIC_ATTRIBS;
+
+ /* GL_ARB_compute_shader */
+ consts->MaxComputeWorkGroupCount[0] = 65535;
+ consts->MaxComputeWorkGroupCount[1] = 65535;
+ consts->MaxComputeWorkGroupCount[2] = 65535;
+ consts->MaxComputeWorkGroupSize[0] = 1024;
+ consts->MaxComputeWorkGroupSize[1] = 1024;
+ consts->MaxComputeWorkGroupSize[2] = 64;
+ /* Enables compute support for GLES 3.1 if >= 128 */
+ consts->MaxComputeWorkGroupInvocations = 0;
+
+ /** GL_ARB_gpu_shader5 */
+ consts->MinFragmentInterpolationOffset = MIN_FRAGMENT_INTERPOLATION_OFFSET;
+ consts->MaxFragmentInterpolationOffset = MAX_FRAGMENT_INTERPOLATION_OFFSET;
+
+ /** GL_KHR_context_flush_control */
+ consts->ContextReleaseBehavior = GL_CONTEXT_RELEASE_BEHAVIOR_FLUSH;
+
+ /** GL_ARB_tessellation_shader */
+ consts->MaxTessGenLevel = MAX_TESS_GEN_LEVEL;
+ consts->MaxPatchVertices = MAX_PATCH_VERTICES;
+ consts->Program[MESA_SHADER_TESS_CTRL].MaxTextureImageUnits = MAX_TEXTURE_IMAGE_UNITS;
+ consts->Program[MESA_SHADER_TESS_EVAL].MaxTextureImageUnits = MAX_TEXTURE_IMAGE_UNITS;
+ consts->MaxTessPatchComponents = MAX_TESS_PATCH_COMPONENTS;
+ consts->MaxTessControlTotalOutputComponents = MAX_TESS_CONTROL_TOTAL_OUTPUT_COMPONENTS;
+ consts->PrimitiveRestartForPatches = false;
+
+ /** GL_ARB_compute_variable_group_size */
+ consts->MaxComputeVariableGroupSize[0] = 512;
+ consts->MaxComputeVariableGroupSize[1] = 512;
+ consts->MaxComputeVariableGroupSize[2] = 64;
+ consts->MaxComputeVariableGroupInvocations = 512;
+
+ /** GL_NV_conservative_raster */
+ consts->MaxSubpixelPrecisionBiasBits = 0;
+
+ /** GL_NV_conservative_raster_dilate */
+ consts->ConservativeRasterDilateRange[0] = 0.0;
+ consts->ConservativeRasterDilateRange[1] = 0.0;
+ consts->ConservativeRasterDilateGranularity = 0.0;
+
+ consts->glBeginEndBufferSize = 512 * 1024;
+}
+
+
+/**
+ * Do some sanity checks on the limits/constants for the given context.
+ * Only called the first time a context is bound.
+ */
+static void
+check_context_limits(struct gl_context *ctx)
+{
+ (void) ctx;
+
+ /* check that we don't exceed the size of various bitfields */
+ assert(VARYING_SLOT_MAX <=
+ (8 * sizeof(ctx->VertexProgram._Current->info.outputs_written)));
+ assert(VARYING_SLOT_MAX <=
+ (8 * sizeof(ctx->FragmentProgram._Current->info.inputs_read)));
+
+ /* shader-related checks */
+ assert(ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxLocalParams <= MAX_PROGRAM_LOCAL_PARAMS);
+ assert(ctx->Const.Program[MESA_SHADER_VERTEX].MaxLocalParams <= MAX_PROGRAM_LOCAL_PARAMS);
+
+ /* Texture unit checks */
+ assert(ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxTextureImageUnits > 0);
+ assert(ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxTextureImageUnits <= MAX_TEXTURE_IMAGE_UNITS);
+ assert(ctx->Const.MaxTextureCoordUnits > 0);
+ assert(ctx->Const.MaxTextureCoordUnits <= MAX_TEXTURE_COORD_UNITS);
+ assert(ctx->Const.MaxTextureUnits > 0);
+ assert(ctx->Const.MaxTextureUnits <= MAX_TEXTURE_IMAGE_UNITS);
+ assert(ctx->Const.MaxTextureUnits <= MAX_TEXTURE_COORD_UNITS);
+ assert(ctx->Const.MaxTextureUnits == MIN2(ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxTextureImageUnits,
+ ctx->Const.MaxTextureCoordUnits));
+ assert(ctx->Const.MaxCombinedTextureImageUnits > 0);
+ assert(ctx->Const.MaxCombinedTextureImageUnits <= MAX_COMBINED_TEXTURE_IMAGE_UNITS);
+ assert(ctx->Const.MaxTextureCoordUnits <= MAX_COMBINED_TEXTURE_IMAGE_UNITS);
+ /* number of coord units cannot be greater than number of image units */
+ assert(ctx->Const.MaxTextureCoordUnits <= ctx->Const.Program[MESA_SHADER_FRAGMENT].MaxTextureImageUnits);
+
+
+ /* Texture size checks */
+ assert(ctx->Const.MaxTextureSize <= (1 << (MAX_TEXTURE_LEVELS - 1)));
+ assert(ctx->Const.Max3DTextureLevels <= MAX_3D_TEXTURE_LEVELS);
+ assert(ctx->Const.MaxCubeTextureLevels <= MAX_CUBE_TEXTURE_LEVELS);
+ assert(ctx->Const.MaxTextureRectSize <= MAX_TEXTURE_RECT_SIZE);
+
+ /* Texture level checks */
+ assert(MAX_TEXTURE_LEVELS >= MAX_3D_TEXTURE_LEVELS);
+ assert(MAX_TEXTURE_LEVELS >= MAX_CUBE_TEXTURE_LEVELS);
+
+ /* Max texture size should be <= max viewport size (render to texture) */
+ assert(ctx->Const.MaxTextureSize <= ctx->Const.MaxViewportWidth);
+ assert(ctx->Const.MaxTextureSize <= ctx->Const.MaxViewportHeight);
+
+ assert(ctx->Const.MaxDrawBuffers <= MAX_DRAW_BUFFERS);
+
+ /* if this fails, add more enum values to gl_buffer_index */
+ assert(BUFFER_COLOR0 + MAX_DRAW_BUFFERS <= BUFFER_COUNT);
+
+ /* XXX probably add more tests */
+}
+
+
+/**
+ * Initialize the attribute groups in a GL context.
+ *
+ * \param ctx GL context.
+ *
+ * Initializes all the attributes, calling the respective <tt>init*</tt>
+ * functions for the more complex data structures.
+ */
+static GLboolean
+init_attrib_groups(struct gl_context *ctx)
+{
+ assert(ctx);
+
+ /* Constants */
+ _mesa_init_constants(&ctx->Const, ctx->API);
+
+ /* Extensions */
+ _mesa_init_extensions(&ctx->Extensions);
+
+ /* Attribute Groups */
+ _mesa_init_accum( ctx );
+ _mesa_init_attrib( ctx );
+ _mesa_init_bbox( ctx );
+ _mesa_init_buffer_objects( ctx );
+ _mesa_init_color( ctx );
+ _mesa_init_conservative_raster( ctx );
+ _mesa_init_current( ctx );
+ _mesa_init_depth( ctx );
+ _mesa_init_debug( ctx );
+ _mesa_init_debug_output( ctx );
+ _mesa_init_display_list( ctx );
+ _mesa_init_eval( ctx );
+ _mesa_init_fbobjects( ctx );
+ _mesa_init_feedback( ctx );
+ _mesa_init_fog( ctx );
+ _mesa_init_hint( ctx );
+ _mesa_init_image_units( ctx );
+ _mesa_init_line( ctx );
+ _mesa_init_lighting( ctx );
+ _mesa_init_matrix( ctx );
+ _mesa_init_multisample( ctx );
+ _mesa_init_performance_monitors( ctx );
+ _mesa_init_performance_queries( ctx );
+ _mesa_init_pipeline( ctx );
+ _mesa_init_pixel( ctx );
+ _mesa_init_pixelstore( ctx );
+ _mesa_init_point( ctx );
+ _mesa_init_polygon( ctx );
+ _mesa_init_program( ctx );
+ _mesa_init_queryobj( ctx );
+ _mesa_init_sync( ctx );
+ _mesa_init_rastpos( ctx );
+ _mesa_init_scissor( ctx );
+ _mesa_init_shader_state( ctx );
+ _mesa_init_stencil( ctx );
+ _mesa_init_transform( ctx );
+ _mesa_init_transform_feedback( ctx );
+ _mesa_init_varray( ctx );
+ _mesa_init_viewport( ctx );
+ _mesa_init_resident_handles( ctx );
+
+ if (!_mesa_init_texture( ctx ))
+ return GL_FALSE;
+
+ /* Miscellaneous */
+ ctx->TileRasterOrderIncreasingX = GL_TRUE;
+ ctx->TileRasterOrderIncreasingY = GL_TRUE;
+ ctx->NewState = _NEW_ALL;
+ ctx->NewDriverState = ~0;
+ ctx->ErrorValue = GL_NO_ERROR;
+ ctx->ShareGroupReset = false;
+ ctx->varying_vp_inputs = VERT_BIT_ALL;
+
+ return GL_TRUE;
+}
+
+
+/**
+ * Update default objects in a GL context with respect to shared state.
+ *
+ * \param ctx GL context.
+ *
+ * Removes references to old default objects, (texture objects, program
+ * objects, etc.) and changes to reference those from the current shared
+ * state.
+ */
+static GLboolean
+update_default_objects(struct gl_context *ctx)
+{
+ assert(ctx);
+
+ _mesa_update_default_objects_program(ctx);
+ _mesa_update_default_objects_texture(ctx);
+ _mesa_update_default_objects_buffer_objects(ctx);
+
+ return GL_TRUE;
+}
+
+
+/* XXX this is temporary and should be removed at some point in the
+ * future when there's a reasonable expectation that the libGL library
+ * contains the _glapi_new_nop_table() and _glapi_set_nop_handler()
+ * functions which were added in Mesa 10.6.
+ */
+#if !defined(_WIN32)
+/* Avoid libGL / driver ABI break */
+#define USE_GLAPI_NOP_FEATURES 0
+#else
+#define USE_GLAPI_NOP_FEATURES 1
+#endif
+
+
+/**
+ * This function is called by the glapi no-op functions. For each OpenGL
+ * function/entrypoint there's a simple no-op function. These "no-op"
+ * functions call this function.
+ *
+ * If there's a current OpenGL context for the calling thread, we record a
+ * GL_INVALID_OPERATION error. This can happen either because the app's
+ * calling an unsupported extension function, or calling an illegal function
+ * (such as glClear between glBegin/glEnd).
+ *
+ * If there's no current OpenGL context for the calling thread, we can
+ * print a message to stderr.
+ *
+ * \param name the name of the OpenGL function
+ */
+#if USE_GLAPI_NOP_FEATURES
+static void
+nop_handler(const char *name)
+{
+ GET_CURRENT_CONTEXT(ctx);
+ if (ctx) {
+ _mesa_error(ctx, GL_INVALID_OPERATION, "%s(invalid call)", name);
+ }
+#ifndef NDEBUG
+ else if (getenv("MESA_DEBUG") || getenv("LIBGL_DEBUG")) {
+ fprintf(stderr,
+ "GL User Error: gl%s called without a rendering context\n",
+ name);
+ fflush(stderr);
+ }
+#endif
+}
+#endif
+
+
+/**
+ * Special no-op glFlush, see below.
+ */
+#if defined(_WIN32)
+static void GLAPIENTRY
+nop_glFlush(void)
+{
+ /* don't record an error like we do in nop_handler() */
+}
+#endif
+
+
+#if !USE_GLAPI_NOP_FEATURES
+static int
+generic_nop(void)
+{
+ GET_CURRENT_CONTEXT(ctx);
+ _mesa_error(ctx, GL_INVALID_OPERATION,
+ "unsupported function called "
+ "(unsupported extension or deprecated function?)");
+ return 0;
+}
+#endif
+
+
+/**
+ * Create a new API dispatch table in which all entries point to the
+ * generic_nop() function. This will not work on Windows because of
+ * the __stdcall convention which requires the callee to clean up the
+ * call stack. That's impossible with one generic no-op function.
+ */
+struct _glapi_table *
+_mesa_new_nop_table(unsigned numEntries)
+{
+ struct _glapi_table *table;
+
+#if !USE_GLAPI_NOP_FEATURES
+ table = malloc(numEntries * sizeof(_glapi_proc));
+ if (table) {
+ _glapi_proc *entry = (_glapi_proc *) table;
+ unsigned i;
+ for (i = 0; i < numEntries; i++) {
+ entry[i] = (_glapi_proc) generic_nop;
+ }
+ }
+#else
+ table = _glapi_new_nop_table(numEntries);
+#endif
+ return table;
+}
+
+
+/**
+ * Allocate and initialize a new dispatch table. The table will be
+ * populated with pointers to "no-op" functions. In turn, the no-op
+ * functions will call nop_handler() above.
+ */
+struct _glapi_table *
+_mesa_alloc_dispatch_table(void)
+{
+ /* Find the larger of Mesa's dispatch table and libGL's dispatch table.
+ * In practice, this'll be the same for stand-alone Mesa. But for DRI
+ * Mesa we do this to accommodate different versions of libGL and various
+ * DRI drivers.
+ */
+ int numEntries = MAX2(_glapi_get_dispatch_table_size(), _gloffset_COUNT);
+
+ struct _glapi_table *table = _mesa_new_nop_table(numEntries);
+
+#if defined(_WIN32)
+ if (table) {
+ /* This is a special case for Windows in the event that
+ * wglGetProcAddress is called between glBegin/End().
+ *
+ * The MS opengl32.dll library apparently calls glFlush from
+ * wglGetProcAddress(). If we're inside glBegin/End(), glFlush
+ * will dispatch to _mesa_generic_nop() and we'll generate a
+ * GL_INVALID_OPERATION error.
+ *
+ * The specific case which hits this is piglit's primitive-restart
+ * test which calls glPrimitiveRestartNV() inside glBegin/End. The
+ * first time we call glPrimitiveRestartNV() Piglit's API dispatch
+ * code will try to resolve the function by calling wglGetProcAddress.
+ * This raises GL_INVALID_OPERATION and an assert(glGetError()==0)
+ * will fail causing the test to fail. By suppressing the error, the
+ * assertion passes and the test continues.
+ */
+ SET_Flush(table, nop_glFlush);
+ }
+#endif
+
+#if USE_GLAPI_NOP_FEATURES
+ _glapi_set_nop_handler(nop_handler);
+#endif
+
+ return table;
+}
+
+/**
+ * Creates a minimal dispatch table for use within glBegin()/glEnd().
+ *
+ * This ensures that we generate GL_INVALID_OPERATION errors from most
+ * functions, since the set of functions that are valid within Begin/End is
+ * very small.
+ *
+ * From the GL 1.0 specification section 2.6.3, "GL Commands within
+ * Begin/End"
+ *
+ * "The only GL commands that are allowed within any Begin/End pairs are
+ * the commands for specifying vertex coordinates, vertex color, normal
+ * coordinates, and texture coordinates (Vertex, Color, Index, Normal,
+ * TexCoord), EvalCoord and EvalPoint commands (see section 5.1),
+ * commands for specifying lighting material parameters (Material
+ * commands see section 2.12.2), display list invocation commands
+ * (CallList and CallLists see section 5.4), and the EdgeFlag
+ * command. Executing Begin after Begin has already been executed but
+ * before an End is issued generates the INVALID OPERATION error, as does
+ * executing End without a previous corresponding Begin. Executing any
+ * other GL command within Begin/End results in the error INVALID
+ * OPERATION."
+ *
+ * The table entries for specifying vertex attributes are set up by
+ * install_vtxfmt() and _mesa_loopback_init_api_table(), and End() and dlists
+ * are set by install_vtxfmt() as well.
+ */
+static struct _glapi_table *
+create_beginend_table(const struct gl_context *ctx)
+{
+ struct _glapi_table *table;
+
+ table = _mesa_alloc_dispatch_table();
+ if (!table)
+ return NULL;
+
+ /* Fill in functions which return a value, since they should return some
+ * specific value even if they emit a GL_INVALID_OPERATION error from them
+ * being called within glBegin()/glEnd().
+ */
+#define COPY_DISPATCH(func) SET_##func(table, GET_##func(ctx->Exec))
+
+ COPY_DISPATCH(GenLists);
+ COPY_DISPATCH(IsProgram);
+ COPY_DISPATCH(IsVertexArray);
+ COPY_DISPATCH(IsBuffer);
+ COPY_DISPATCH(IsEnabled);
+ COPY_DISPATCH(IsEnabledi);
+ COPY_DISPATCH(IsRenderbuffer);
+ COPY_DISPATCH(IsFramebuffer);
+ COPY_DISPATCH(CheckFramebufferStatus);
+ COPY_DISPATCH(RenderMode);
+ COPY_DISPATCH(GetString);
+ COPY_DISPATCH(GetStringi);
+ COPY_DISPATCH(GetPointerv);
+ COPY_DISPATCH(IsQuery);
+ COPY_DISPATCH(IsSampler);
+ COPY_DISPATCH(IsSync);
+ COPY_DISPATCH(IsTexture);
+ COPY_DISPATCH(IsTransformFeedback);
+ COPY_DISPATCH(DeleteQueries);
+ COPY_DISPATCH(AreTexturesResident);
+ COPY_DISPATCH(FenceSync);
+ COPY_DISPATCH(ClientWaitSync);
+ COPY_DISPATCH(MapBuffer);
+ COPY_DISPATCH(UnmapBuffer);
+ COPY_DISPATCH(MapBufferRange);
+ COPY_DISPATCH(ObjectPurgeableAPPLE);
+ COPY_DISPATCH(ObjectUnpurgeableAPPLE);
+
+ _mesa_loopback_init_api_table(ctx, table);
+
+ return table;
+}
+
+void
+_mesa_initialize_dispatch_tables(struct gl_context *ctx)
+{
+ /* Do the code-generated setup of the exec table in api_exec.c. */
+ _mesa_initialize_exec_table(ctx);
+
+ if (ctx->Save)
+ _mesa_initialize_save_table(ctx);
+}
+
+/**
+ * Initialize a struct gl_context struct (rendering context).
+ *
+ * This includes allocating all the other structs and arrays which hang off of
+ * the context by pointers.
+ * Note that the driver needs to pass in its dd_function_table here since
+ * we need to at least call driverFunctions->NewTextureObject to create the
+ * default texture objects.
+ *
+ * Called by _mesa_create_context().
+ *
+ * Performs the imports and exports callback tables initialization, and
+ * miscellaneous one-time initializations. If no shared context is supplied one
+ * is allocated, and increase its reference count. Setups the GL API dispatch
+ * tables. Initialize the TNL module. Sets the maximum Z buffer depth.
+ * Finally queries the \c MESA_DEBUG and \c MESA_VERBOSE environment variables
+ * for debug flags.
+ *
+ * \param ctx the context to initialize
+ * \param api the GL API type to create the context for
+ * \param visual describes the visual attributes for this context or NULL to
+ * create a configless context
+ * \param share_list points to context to share textures, display lists,
+ * etc with, or NULL
+ * \param driverFunctions table of device driver functions for this context
+ * to use
+ */
+GLboolean
+_mesa_initialize_context(struct gl_context *ctx,
+ gl_api api,
+ const struct gl_config *visual,
+ struct gl_context *share_list,
+ const struct dd_function_table *driverFunctions)
+{
+ struct gl_shared_state *shared;
+ int i;
+
+ assert(driverFunctions->NewTextureObject);
+ assert(driverFunctions->FreeTextureImageBuffer);
+
+ ctx->API = api;
+ ctx->DrawBuffer = NULL;
+ ctx->ReadBuffer = NULL;
+ ctx->WinSysDrawBuffer = NULL;
+ ctx->WinSysReadBuffer = NULL;
+
+ if (visual) {
+ ctx->Visual = *visual;
+ ctx->HasConfig = GL_TRUE;
+ }
+ else {
+ memset(&ctx->Visual, 0, sizeof ctx->Visual);
+ ctx->HasConfig = GL_FALSE;
+ }
+
+ _mesa_override_gl_version(ctx);
+
+ /* misc one-time initializations */
+ _mesa_initialize();
+
+ /* Plug in driver functions and context pointer here.
+ * This is important because when we call alloc_shared_state() below
+ * we'll call ctx->Driver.NewTextureObject() to create the default
+ * textures.
+ */
+ ctx->Driver = *driverFunctions;
+
+ if (share_list) {
+ /* share state with another context */
+ shared = share_list->Shared;
+ }
+ else {
+ /* allocate new, unshared state */
+ shared = _mesa_alloc_shared_state(ctx);
+ if (!shared)
+ return GL_FALSE;
+ }
+
+ _mesa_reference_shared_state(ctx, &ctx->Shared, shared);
+
+ if (!init_attrib_groups( ctx ))
+ goto fail;
+
+ /* KHR_no_error is likely to crash, overflow memory, etc if an application
+ * has errors so don't enable it for setuid processes.
+ */
+ if (env_var_as_boolean("MESA_NO_ERROR", false)) {
+#if !defined(_WIN32)
+ if (geteuid() == getuid())
+#endif
+ ctx->Const.ContextFlags |= GL_CONTEXT_FLAG_NO_ERROR_BIT_KHR;
+ }
+
+ /* setup the API dispatch tables with all nop functions */
+ ctx->OutsideBeginEnd = _mesa_alloc_dispatch_table();
+ if (!ctx->OutsideBeginEnd)
+ goto fail;
+ ctx->Exec = ctx->OutsideBeginEnd;
+ ctx->CurrentClientDispatch = ctx->CurrentServerDispatch = ctx->OutsideBeginEnd;
+
+ ctx->FragmentProgram._MaintainTexEnvProgram
+ = (getenv("MESA_TEX_PROG") != NULL);
+
+ ctx->VertexProgram._MaintainTnlProgram
+ = (getenv("MESA_TNL_PROG") != NULL);
+ if (ctx->VertexProgram._MaintainTnlProgram) {
+ /* this is required... */
+ ctx->FragmentProgram._MaintainTexEnvProgram = GL_TRUE;
+ }
+
+ /* Mesa core handles all the formats that mesa core knows about.
+ * Drivers will want to override this list with just the formats
+ * they can handle, and confirm that appropriate fallbacks exist in
+ * _mesa_choose_tex_format().
+ */
+ memset(&ctx->TextureFormatSupported, GL_TRUE,
+ sizeof(ctx->TextureFormatSupported));
+
+ switch (ctx->API) {
+ case API_OPENGL_COMPAT:
+ ctx->BeginEnd = create_beginend_table(ctx);
+ ctx->Save = _mesa_alloc_dispatch_table();
+ if (!ctx->BeginEnd || !ctx->Save)
+ goto fail;
+
+ /* fall-through */
+ case API_OPENGL_CORE:
+ break;
+ case API_OPENGLES:
+ /**
+ * GL_OES_texture_cube_map says
+ * "Initially all texture generation modes are set to REFLECTION_MAP_OES"
+ */
+ for (i = 0; i < ARRAY_SIZE(ctx->Texture.FixedFuncUnit); i++) {
+ struct gl_fixedfunc_texture_unit *texUnit =
+ &ctx->Texture.FixedFuncUnit[i];
+
+ texUnit->GenS.Mode = GL_REFLECTION_MAP_NV;
+ texUnit->GenT.Mode = GL_REFLECTION_MAP_NV;
+ texUnit->GenR.Mode = GL_REFLECTION_MAP_NV;
+ texUnit->GenS._ModeBit = TEXGEN_REFLECTION_MAP_NV;
+ texUnit->GenT._ModeBit = TEXGEN_REFLECTION_MAP_NV;
+ texUnit->GenR._ModeBit = TEXGEN_REFLECTION_MAP_NV;
+ }
+ break;
+ case API_OPENGLES2:
+ ctx->FragmentProgram._MaintainTexEnvProgram = GL_TRUE;
+ ctx->VertexProgram._MaintainTnlProgram = GL_TRUE;
+ break;
+ }
+
+ ctx->FirstTimeCurrent = GL_TRUE;
+
+ return GL_TRUE;
+
+fail:
+ _mesa_reference_shared_state(ctx, &ctx->Shared, NULL);
+ free(ctx->BeginEnd);
+ free(ctx->OutsideBeginEnd);
+ free(ctx->Save);
+ return GL_FALSE;
+}
+
+
+/**
+ * Free the data associated with the given context.
+ *
+ * But doesn't free the struct gl_context struct itself.
+ *
+ * \sa _mesa_initialize_context() and init_attrib_groups().
+ */
+void
+_mesa_free_context_data(struct gl_context *ctx)
+{
+ if (!_mesa_get_current_context()){
+ /* No current context, but we may need one in order to delete
+ * texture objs, etc. So temporarily bind the context now.
+ */
+ _mesa_make_current(ctx, NULL, NULL);
+ }
+
+ /* unreference WinSysDraw/Read buffers */
+ _mesa_reference_framebuffer(&ctx->WinSysDrawBuffer, NULL);
+ _mesa_reference_framebuffer(&ctx->WinSysReadBuffer, NULL);
+ _mesa_reference_framebuffer(&ctx->DrawBuffer, NULL);
+ _mesa_reference_framebuffer(&ctx->ReadBuffer, NULL);
+
+ _mesa_reference_program(ctx, &ctx->VertexProgram.Current, NULL);
+ _mesa_reference_program(ctx, &ctx->VertexProgram._Current, NULL);
+ _mesa_reference_program(ctx, &ctx->VertexProgram._TnlProgram, NULL);
+
+ _mesa_reference_program(ctx, &ctx->TessCtrlProgram._Current, NULL);
+ _mesa_reference_program(ctx, &ctx->TessEvalProgram._Current, NULL);
+ _mesa_reference_program(ctx, &ctx->GeometryProgram._Current, NULL);
+
+ _mesa_reference_program(ctx, &ctx->FragmentProgram.Current, NULL);
+ _mesa_reference_program(ctx, &ctx->FragmentProgram._Current, NULL);
+ _mesa_reference_program(ctx, &ctx->FragmentProgram._TexEnvProgram, NULL);
+
+ _mesa_reference_program(ctx, &ctx->ComputeProgram._Current, NULL);
+
+ _mesa_reference_vao(ctx, &ctx->Array.VAO, NULL);
+ _mesa_reference_vao(ctx, &ctx->Array.DefaultVAO, NULL);
+ _mesa_reference_vao(ctx, &ctx->Array._EmptyVAO, NULL);
+ _mesa_reference_vao(ctx, &ctx->Array._DrawVAO, NULL);
+
+ _mesa_free_attrib_data(ctx);
+ _mesa_free_buffer_objects(ctx);
+ _mesa_free_eval_data( ctx );
+ _mesa_free_texture_data( ctx );
+ _mesa_free_image_textures(ctx);
+ _mesa_free_matrix_data( ctx );
+ _mesa_free_pipeline_data(ctx);
+ _mesa_free_program_data(ctx);
+ _mesa_free_shader_state(ctx);
+ _mesa_free_queryobj_data(ctx);
+ _mesa_free_sync_data(ctx);
+ _mesa_free_varray_data(ctx);
+ _mesa_free_transform_feedback(ctx);
+ _mesa_free_performance_monitors(ctx);
+ _mesa_free_performance_queries(ctx);
+ _mesa_free_resident_handles(ctx);
+
+ _mesa_reference_buffer_object(ctx, &ctx->Pack.BufferObj, NULL);
+ _mesa_reference_buffer_object(ctx, &ctx->Unpack.BufferObj, NULL);
+ _mesa_reference_buffer_object(ctx, &ctx->DefaultPacking.BufferObj, NULL);
+ _mesa_reference_buffer_object(ctx, &ctx->Array.ArrayBufferObj, NULL);
+
+ /* free dispatch tables */
+ free(ctx->BeginEnd);
+ free(ctx->OutsideBeginEnd);
+ free(ctx->Save);
+ free(ctx->ContextLost);
+ free(ctx->MarshalExec);
+
+ /* Shared context state (display lists, textures, etc) */
+ _mesa_reference_shared_state(ctx, &ctx->Shared, NULL);
+
+ /* needs to be after freeing shared state */
+ _mesa_free_display_list_data(ctx);
+
+ _mesa_free_errors_data(ctx);
+
+ free((void *)ctx->Extensions.String);
+
+ free(ctx->VersionString);
+
+ ralloc_free(ctx->SoftFP64);
+
+ /* unbind the context if it's currently bound */
+ if (ctx == _mesa_get_current_context()) {
+ _mesa_make_current(NULL, NULL, NULL);
+ }
+
+ /* Do this after unbinding context to ensure any thread is finished. */
+ if (ctx->shader_builtin_ref) {
+ _mesa_glsl_builtin_functions_decref();
+ ctx->shader_builtin_ref = false;
+ }
+
+ free(ctx->Const.SpirVExtensions);
+}
+
+
+/**
+ * Destroy a struct gl_context structure.
+ *
+ * \param ctx GL context.
+ *
+ * Calls _mesa_free_context_data() and frees the gl_context object itself.
+ */
+void
+_mesa_destroy_context( struct gl_context *ctx )
+{
+ if (ctx) {
+ _mesa_free_context_data(ctx);
+ free( (void *) ctx );
+ }
+}
+
+
+/**
+ * Copy attribute groups from one context to another.
+ *
+ * \param src source context
+ * \param dst destination context
+ * \param mask bitwise OR of GL_*_BIT flags
+ *
+ * According to the bits specified in \p mask, copies the corresponding
+ * attributes from \p src into \p dst. For many of the attributes a simple \c
+ * memcpy is not enough due to the existence of internal pointers in their data
+ * structures.
+ */
+void
+_mesa_copy_context( const struct gl_context *src, struct gl_context *dst,
+ GLuint mask )
+{
+ if (mask & GL_ACCUM_BUFFER_BIT) {
+ /* OK to memcpy */
+ dst->Accum = src->Accum;
+ }
+ if (mask & GL_COLOR_BUFFER_BIT) {
+ /* OK to memcpy */
+ dst->Color = src->Color;
+ }
+ if (mask & GL_CURRENT_BIT) {
+ /* OK to memcpy */
+ dst->Current = src->Current;
+ }
+ if (mask & GL_DEPTH_BUFFER_BIT) {
+ /* OK to memcpy */
+ dst->Depth = src->Depth;
+ }
+ if (mask & GL_ENABLE_BIT) {
+ /* no op */
+ }
+ if (mask & GL_EVAL_BIT) {
+ /* OK to memcpy */
+ dst->Eval = src->Eval;
+ }
+ if (mask & GL_FOG_BIT) {
+ /* OK to memcpy */
+ dst->Fog = src->Fog;
+ }
+ if (mask & GL_HINT_BIT) {
+ /* OK to memcpy */
+ dst->Hint = src->Hint;
+ }
+ if (mask & GL_LIGHTING_BIT) {
+ /* OK to memcpy */
+ dst->Light = src->Light;
+ }
+ if (mask & GL_LINE_BIT) {
+ /* OK to memcpy */
+ dst->Line = src->Line;
+ }
+ if (mask & GL_LIST_BIT) {
+ /* OK to memcpy */
+ dst->List = src->List;
+ }
+ if (mask & GL_PIXEL_MODE_BIT) {
+ /* OK to memcpy */
+ dst->Pixel = src->Pixel;
+ }
+ if (mask & GL_POINT_BIT) {
+ /* OK to memcpy */
+ dst->Point = src->Point;
+ }
+ if (mask & GL_POLYGON_BIT) {
+ /* OK to memcpy */
+ dst->Polygon = src->Polygon;
+ }
+ if (mask & GL_POLYGON_STIPPLE_BIT) {
+ /* Use loop instead of memcpy due to problem with Portland Group's
+ * C compiler. Reported by John Stone.
+ */
+ GLuint i;
+ for (i = 0; i < 32; i++) {
+ dst->PolygonStipple[i] = src->PolygonStipple[i];
+ }
+ }
+ if (mask & GL_SCISSOR_BIT) {
+ /* OK to memcpy */
+ dst->Scissor = src->Scissor;
+ }
+ if (mask & GL_STENCIL_BUFFER_BIT) {
+ /* OK to memcpy */
+ dst->Stencil = src->Stencil;
+ }
+ if (mask & GL_TEXTURE_BIT) {
+ /* Cannot memcpy because of pointers */
+ _mesa_copy_texture_state(src, dst);
+ }
+ if (mask & GL_TRANSFORM_BIT) {
+ /* OK to memcpy */
+ dst->Transform = src->Transform;
+ }
+ if (mask & GL_VIEWPORT_BIT) {
+ unsigned i;
+ for (i = 0; i < src->Const.MaxViewports; i++) {
+ /* OK to memcpy */
+ dst->ViewportArray[i] = src->ViewportArray[i];
+ }
+ }
+
+ /* XXX FIXME: Call callbacks?
+ */
+ dst->NewState = _NEW_ALL;
+ dst->NewDriverState = ~0;
+}
+
+
+/**
+ * Check if the given context can render into the given framebuffer
+ * by checking visual attributes.
+ *
+ * \return GL_TRUE if compatible, GL_FALSE otherwise.
+ */
+static GLboolean
+check_compatible(const struct gl_context *ctx,
+ const struct gl_framebuffer *buffer)
+{
+ const struct gl_config *ctxvis = &ctx->Visual;
+ const struct gl_config *bufvis = &buffer->Visual;
+
+ if (buffer == _mesa_get_incomplete_framebuffer())
+ return GL_TRUE;
+
+#define check_component(foo) \
+ if (ctxvis->foo && bufvis->foo && \
+ ctxvis->foo != bufvis->foo) \
+ return GL_FALSE
+
+ check_component(redShift);
+ check_component(greenShift);
+ check_component(blueShift);
+ check_component(redBits);
+ check_component(greenBits);
+ check_component(blueBits);
+ check_component(depthBits);
+ check_component(stencilBits);
+
+#undef check_component
+
+ return GL_TRUE;
+}
+
+
+/**
+ * Check if the viewport/scissor size has not yet been initialized.
+ * Initialize the size if the given width and height are non-zero.
+ */
+static void
+check_init_viewport(struct gl_context *ctx, GLuint width, GLuint height)
+{
+ if (!ctx->ViewportInitialized && width > 0 && height > 0) {
+ unsigned i;
+
+ /* Note: set flag here, before calling _mesa_set_viewport(), to prevent
+ * potential infinite recursion.
+ */
+ ctx->ViewportInitialized = GL_TRUE;
+
+ /* Note: ctx->Const.MaxViewports may not have been set by the driver
+ * yet, so just initialize all of them.
+ */
+ for (i = 0; i < MAX_VIEWPORTS; i++) {
+ _mesa_set_viewport(ctx, i, 0, 0, width, height);
+ _mesa_set_scissor(ctx, i, 0, 0, width, height);
+ }
+ }
+}
+
+
+static void
+handle_first_current(struct gl_context *ctx)
+{
+ if (ctx->Version == 0 || !ctx->DrawBuffer) {
+ /* probably in the process of tearing down the context */
+ return;
+ }
+
+ check_context_limits(ctx);
+
+ _mesa_update_vertex_processing_mode(ctx);
+
+ /* According to GL_MESA_configless_context the default value of
+ * glDrawBuffers depends on the config of the first surface it is bound to.
+ * For GLES it is always GL_BACK which has a magic interpretation.
+ */
+ if (!ctx->HasConfig && _mesa_is_desktop_gl(ctx)) {
+ if (ctx->DrawBuffer != _mesa_get_incomplete_framebuffer()) {
+ GLenum16 buffer;
+
+ if (ctx->DrawBuffer->Visual.doubleBufferMode)
+ buffer = GL_BACK;
+ else
+ buffer = GL_FRONT;
+
+ _mesa_drawbuffers(ctx, ctx->DrawBuffer, 1, &buffer,
+ NULL /* destMask */);
+ }
+
+ if (ctx->ReadBuffer != _mesa_get_incomplete_framebuffer()) {
+ gl_buffer_index bufferIndex;
+ GLenum buffer;
+
+ if (ctx->ReadBuffer->Visual.doubleBufferMode) {
+ buffer = GL_BACK;
+ bufferIndex = BUFFER_BACK_LEFT;
+ }
+ else {
+ buffer = GL_FRONT;
+ bufferIndex = BUFFER_FRONT_LEFT;
+ }
+
+ _mesa_readbuffer(ctx, ctx->ReadBuffer, buffer, bufferIndex);
+ }
+ }
+
+ /* Determine if generic vertex attribute 0 aliases the conventional
+ * glVertex position.
+ */
+ {
+ const bool is_forward_compatible_context =
+ ctx->Const.ContextFlags & GL_CONTEXT_FLAG_FORWARD_COMPATIBLE_BIT;
+
+ /* In OpenGL 3.1 attribute 0 becomes non-magic, just like in OpenGL ES
+ * 2.0. Note that we cannot just check for API_OPENGL_COMPAT here because
+ * that will erroneously allow this usage in a 3.0 forward-compatible
+ * context too.
+ */
+ ctx->_AttribZeroAliasesVertex = (ctx->API == API_OPENGLES
+ || (ctx->API == API_OPENGL_COMPAT
+ && !is_forward_compatible_context));
+ }
+
+ /* We can use this to help debug user's problems. Tell them to set
+ * the MESA_INFO env variable before running their app. Then the
+ * first time each context is made current we'll print some useful
+ * information.
+ */
+ if (getenv("MESA_INFO")) {
+ _mesa_print_info(ctx);
+ }
+}
+
+/**
+ * Bind the given context to the given drawBuffer and readBuffer and
+ * make it the current context for the calling thread.
+ * We'll render into the drawBuffer and read pixels from the
+ * readBuffer (i.e. glRead/CopyPixels, glCopyTexImage, etc).
+ *
+ * We check that the context's and framebuffer's visuals are compatible
+ * and return immediately if they're not.
+ *
+ * \param newCtx the new GL context. If NULL then there will be no current GL
+ * context.
+ * \param drawBuffer the drawing framebuffer
+ * \param readBuffer the reading framebuffer
+ */
+GLboolean
+_mesa_make_current( struct gl_context *newCtx,
+ struct gl_framebuffer *drawBuffer,
+ struct gl_framebuffer *readBuffer )
+{
+ GET_CURRENT_CONTEXT(curCtx);
+
+ if (MESA_VERBOSE & VERBOSE_API)
+ _mesa_debug(newCtx, "_mesa_make_current()\n");
+
+ /* Check that the context's and framebuffer's visuals are compatible.
+ */
+ if (newCtx && drawBuffer && newCtx->WinSysDrawBuffer != drawBuffer) {
+ if (!check_compatible(newCtx, drawBuffer)) {
+ _mesa_warning(newCtx,
+ "MakeCurrent: incompatible visuals for context and drawbuffer");
+ return GL_FALSE;
+ }
+ }
+ if (newCtx && readBuffer && newCtx->WinSysReadBuffer != readBuffer) {
+ if (!check_compatible(newCtx, readBuffer)) {
+ _mesa_warning(newCtx,
+ "MakeCurrent: incompatible visuals for context and readbuffer");
+ return GL_FALSE;
+ }
+ }
+
+ if (curCtx &&
+ (curCtx->WinSysDrawBuffer || curCtx->WinSysReadBuffer) &&
+ /* make sure this context is valid for flushing */
+ curCtx != newCtx &&
+ curCtx->Const.ContextReleaseBehavior ==
+ GL_CONTEXT_RELEASE_BEHAVIOR_FLUSH) {
+ _mesa_flush(curCtx);
+ }
+
+ /* Call this periodically to detect when the user has begun using
+ * GL rendering from multiple threads.
+ */
+ _glapi_check_multithread();
+
+ if (!newCtx) {
+ _glapi_set_dispatch(NULL); /* none current */
+ /* We need old ctx to correctly release Draw/ReadBuffer
+ * and avoid a surface leak in st_renderbuffer_delete.
+ * Therefore, first drop buffers then set new ctx to NULL.
+ */
+ if (curCtx) {
+ _mesa_reference_framebuffer(&curCtx->WinSysDrawBuffer, NULL);
+ _mesa_reference_framebuffer(&curCtx->WinSysReadBuffer, NULL);
+ }
+ _glapi_set_context(NULL);
+ assert(_mesa_get_current_context() == NULL);
+ }
+ else {
+ _glapi_set_context((void *) newCtx);
+ assert(_mesa_get_current_context() == newCtx);
+ _glapi_set_dispatch(newCtx->CurrentClientDispatch);
+
+ if (drawBuffer && readBuffer) {
+ assert(_mesa_is_winsys_fbo(drawBuffer));
+ assert(_mesa_is_winsys_fbo(readBuffer));
+ _mesa_reference_framebuffer(&newCtx->WinSysDrawBuffer, drawBuffer);
+ _mesa_reference_framebuffer(&newCtx->WinSysReadBuffer, readBuffer);
+
+ /*
+ * Only set the context's Draw/ReadBuffer fields if they're NULL
+ * or not bound to a user-created FBO.
+ */
+ if (!newCtx->DrawBuffer || _mesa_is_winsys_fbo(newCtx->DrawBuffer)) {
+ _mesa_reference_framebuffer(&newCtx->DrawBuffer, drawBuffer);
+ /* Update the FBO's list of drawbuffers/renderbuffers.
+ * For winsys FBOs this comes from the GL state (which may have
+ * changed since the last time this FBO was bound).
+ */
+ _mesa_update_draw_buffers(newCtx);
+ _mesa_update_allow_draw_out_of_order(newCtx);
+ }
+ if (!newCtx->ReadBuffer || _mesa_is_winsys_fbo(newCtx->ReadBuffer)) {
+ _mesa_reference_framebuffer(&newCtx->ReadBuffer, readBuffer);
+ /* In _mesa_initialize_window_framebuffer, for single-buffered
+ * visuals, the ColorReadBuffer is set to be GL_FRONT, even with
+ * GLES contexts. When calling read_buffer, we verify we are reading
+ * from GL_BACK in is_legal_es3_readbuffer_enum. But the default is
+ * incorrect, and certain dEQP tests check this. So fix it here.
+ */
+ if (_mesa_is_gles(newCtx) &&
+ !newCtx->ReadBuffer->Visual.doubleBufferMode)
+ if (newCtx->ReadBuffer->ColorReadBuffer == GL_FRONT)
+ newCtx->ReadBuffer->ColorReadBuffer = GL_BACK;
+ }
+
+ /* XXX only set this flag if we're really changing the draw/read
+ * framebuffer bindings.
+ */
+ newCtx->NewState |= _NEW_BUFFERS;
+
+ check_init_viewport(newCtx, drawBuffer->Width, drawBuffer->Height);
+ }
+
+ if (newCtx->FirstTimeCurrent) {
+ handle_first_current(newCtx);
+ newCtx->FirstTimeCurrent = GL_FALSE;
+ }
+ }
+
+ return GL_TRUE;
+}
+
+
+/**
+ * Make context 'ctx' share the display lists, textures and programs
+ * that are associated with 'ctxToShare'.
+ * Any display lists, textures or programs associated with 'ctx' will
+ * be deleted if nobody else is sharing them.
+ */
+GLboolean
+_mesa_share_state(struct gl_context *ctx, struct gl_context *ctxToShare)
+{
+ if (ctx && ctxToShare && ctx->Shared && ctxToShare->Shared) {
+ struct gl_shared_state *oldShared = NULL;
+
+ /* save ref to old state to prevent it from being deleted immediately */
+ _mesa_reference_shared_state(ctx, &oldShared, ctx->Shared);
+
+ /* update ctx's Shared pointer */
+ _mesa_reference_shared_state(ctx, &ctx->Shared, ctxToShare->Shared);
+
+ update_default_objects(ctx);
+
+ /* release the old shared state */
+ _mesa_reference_shared_state(ctx, &oldShared, NULL);
+
+ return GL_TRUE;
+ }
+ else {
+ return GL_FALSE;
+ }
+}
+
+
+
+/**
+ * \return pointer to the current GL context for this thread.
+ *
+ * Calls _glapi_get_context(). This isn't the fastest way to get the current
+ * context. If you need speed, see the #GET_CURRENT_CONTEXT macro in
+ * context.h.
+ */
+struct gl_context *
+_mesa_get_current_context( void )
+{
+ return (struct gl_context *) _glapi_get_context();
+}
+
+
+/**
+ * Get context's current API dispatch table.
+ *
+ * It'll either be the immediate-mode execute dispatcher, the display list
+ * compile dispatcher, or the thread marshalling dispatcher.
+ *
+ * \param ctx GL context.
+ *
+ * \return pointer to dispatch_table.
+ *
+ * Simply returns __struct gl_contextRec::CurrentClientDispatch.
+ */
+struct _glapi_table *
+_mesa_get_dispatch(struct gl_context *ctx)
+{
+ return ctx->CurrentClientDispatch;
+}
+
+/*@}*/
+
+
+/**********************************************************************/
+/** \name Miscellaneous functions */
+/**********************************************************************/
+/*@{*/
+/**
+ * Flush commands.
+ */
+void
+_mesa_flush(struct gl_context *ctx)
+{
+ FLUSH_VERTICES( ctx, 0 );
+ if (ctx->Driver.Flush) {
+ ctx->Driver.Flush(ctx);
+ }
+}
+
+
+
+/**
+ * Flush commands and wait for completion.
+ *
+ * Calls the #ASSERT_OUTSIDE_BEGIN_END_AND_FLUSH macro and the
+ * dd_function_table::Finish driver callback, if not NULL.
+ */
+void GLAPIENTRY
+_mesa_Finish(void)
+{
+ GET_CURRENT_CONTEXT(ctx);
+ ASSERT_OUTSIDE_BEGIN_END(ctx);
+
+ FLUSH_VERTICES(ctx, 0);
+
+ if (ctx->Driver.Finish) {
+ ctx->Driver.Finish(ctx);
+ }
+}
+
+
+/**
+ * Execute glFlush().
+ *
+ * Calls the #ASSERT_OUTSIDE_BEGIN_END_AND_FLUSH macro and the
+ * dd_function_table::Flush driver callback, if not NULL.
+ */
+void GLAPIENTRY
+_mesa_Flush(void)
+{
+ GET_CURRENT_CONTEXT(ctx);
+ ASSERT_OUTSIDE_BEGIN_END(ctx);
+ _mesa_flush(ctx);
+}
+
+
+/*@}*/
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/mesa/main/context.h b/third_party/rust/glslopt/glsl-optimizer/src/mesa/main/context.h
new file mode 100644
index 0000000000..d5b97ac4fc
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/mesa/main/context.h
@@ -0,0 +1,460 @@
+/*
+ * Mesa 3-D graphics library
+ *
+ * Copyright (C) 1999-2006 Brian Paul All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+
+/**
+ * \file context.h
+ * Mesa context and visual-related functions.
+ *
+ * There are three large Mesa data types/classes which are meant to be
+ * used by device drivers:
+ * - struct gl_context: this contains the Mesa rendering state
+ * - struct gl_config: this describes the color buffer (RGB vs. ci), whether
+ * or not there's a depth buffer, stencil buffer, etc.
+ * - struct gl_framebuffer: contains pointers to the depth buffer, stencil
+ * buffer, accum buffer and alpha buffers.
+ *
+ * These types should be encapsulated by corresponding device driver
+ * data types. See xmesa.h and xmesaP.h for an example.
+ *
+ * In OOP terms, struct gl_context, struct gl_config, and struct gl_framebuffer
+ * are base classes which the device driver must derive from.
+ *
+ * The following functions create and destroy these data types.
+ */
+
+
+#ifndef CONTEXT_H
+#define CONTEXT_H
+
+
+#include "errors.h"
+
+#include "extensions.h"
+#include "mtypes.h"
+#include "vbo/vbo.h"
+
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+struct _glapi_table;
+
+
+/** \name Visual-related functions */
+/*@{*/
+
+extern struct gl_config *
+_mesa_create_visual( GLboolean dbFlag,
+ GLboolean stereoFlag,
+ GLint redBits,
+ GLint greenBits,
+ GLint blueBits,
+ GLint alphaBits,
+ GLint depthBits,
+ GLint stencilBits,
+ GLint accumRedBits,
+ GLint accumGreenBits,
+ GLint accumBlueBits,
+ GLint accumAlphaBits,
+ GLuint numSamples );
+
+extern GLboolean
+_mesa_initialize_visual( struct gl_config *v,
+ GLboolean dbFlag,
+ GLboolean stereoFlag,
+ GLint redBits,
+ GLint greenBits,
+ GLint blueBits,
+ GLint alphaBits,
+ GLint depthBits,
+ GLint stencilBits,
+ GLint accumRedBits,
+ GLint accumGreenBits,
+ GLint accumBlueBits,
+ GLint accumAlphaBits,
+ GLuint numSamples );
+
+extern void
+_mesa_destroy_visual( struct gl_config *vis );
+
+/*@}*/
+
+
+/** \name Context-related functions */
+/*@{*/
+
+extern void
+_mesa_initialize(void);
+
+extern GLboolean
+_mesa_initialize_context( struct gl_context *ctx,
+ gl_api api,
+ const struct gl_config *visual,
+ struct gl_context *share_list,
+ const struct dd_function_table *driverFunctions);
+
+extern void
+_mesa_free_context_data(struct gl_context *ctx);
+
+extern void
+_mesa_destroy_context( struct gl_context *ctx );
+
+
+extern void
+_mesa_copy_context(const struct gl_context *src, struct gl_context *dst, GLuint mask);
+
+extern GLboolean
+_mesa_make_current( struct gl_context *ctx, struct gl_framebuffer *drawBuffer,
+ struct gl_framebuffer *readBuffer );
+
+extern GLboolean
+_mesa_share_state(struct gl_context *ctx, struct gl_context *ctxToShare);
+
+extern struct gl_context *
+_mesa_get_current_context(void);
+
+/*@}*/
+
+extern void
+_mesa_init_constants(struct gl_constants *consts, gl_api api);
+
+extern void
+_mesa_notifySwapBuffers(struct gl_context *gc);
+
+
+extern struct _glapi_table *
+_mesa_get_dispatch(struct gl_context *ctx);
+
+extern void
+_mesa_set_context_lost_dispatch(struct gl_context *ctx);
+
+
+
+/** \name Miscellaneous */
+/*@{*/
+
+extern void
+_mesa_flush(struct gl_context *ctx);
+
+extern void GLAPIENTRY
+_mesa_Finish( void );
+
+extern void GLAPIENTRY
+_mesa_Flush( void );
+
+/*@}*/
+
+
+/**
+ * Are we currently between glBegin and glEnd?
+ * During execution, not display list compilation.
+ */
+static inline GLboolean
+_mesa_inside_begin_end(const struct gl_context *ctx)
+{
+ return ctx->Driver.CurrentExecPrimitive != PRIM_OUTSIDE_BEGIN_END;
+}
+
+
+/**
+ * Are we currently between glBegin and glEnd in a display list?
+ */
+static inline GLboolean
+_mesa_inside_dlist_begin_end(const struct gl_context *ctx)
+{
+ return ctx->Driver.CurrentSavePrimitive <= PRIM_MAX;
+}
+
+
+
+/**
+ * \name Macros for flushing buffered rendering commands before state changes,
+ * checking if inside glBegin/glEnd, etc.
+ */
+/*@{*/
+
+/**
+ * Flush vertices.
+ *
+ * \param ctx GL context.
+ * \param newstate new state.
+ *
+ * Checks if dd_function_table::NeedFlush is marked to flush stored vertices,
+ * and calls dd_function_table::FlushVertices if so. Marks
+ * __struct gl_contextRec::NewState with \p newstate.
+ */
+#define FLUSH_VERTICES(ctx, newstate) \
+do { \
+ if (MESA_VERBOSE & VERBOSE_STATE) \
+ _mesa_debug(ctx, "FLUSH_VERTICES in %s\n", __func__); \
+ if (ctx->Driver.NeedFlush & FLUSH_STORED_VERTICES) \
+ vbo_exec_FlushVertices(ctx, FLUSH_STORED_VERTICES); \
+ ctx->NewState |= newstate; \
+} while (0)
+
+/**
+ * Flush current state.
+ *
+ * \param ctx GL context.
+ * \param newstate new state.
+ *
+ * Checks if dd_function_table::NeedFlush is marked to flush current state,
+ * and calls dd_function_table::FlushVertices if so. Marks
+ * __struct gl_contextRec::NewState with \p newstate.
+ */
+#define FLUSH_CURRENT(ctx, newstate) \
+do { \
+ if (MESA_VERBOSE & VERBOSE_STATE) \
+ _mesa_debug(ctx, "FLUSH_CURRENT in %s\n", __func__); \
+ if (ctx->Driver.NeedFlush & FLUSH_UPDATE_CURRENT) \
+ vbo_exec_FlushVertices(ctx, FLUSH_UPDATE_CURRENT); \
+ ctx->NewState |= newstate; \
+} while (0)
+
+/**
+ * Flush vertices.
+ *
+ * \param ctx GL context.
+ *
+ * Checks if dd_function_table::NeedFlush is marked to flush stored vertices
+ * or current state and calls dd_function_table::FlushVertices if so.
+ */
+#define FLUSH_FOR_DRAW(ctx) \
+do { \
+ if (MESA_VERBOSE & VERBOSE_STATE) \
+ _mesa_debug(ctx, "FLUSH_FOR_DRAW in %s\n", __func__); \
+ if (ctx->Driver.NeedFlush) { \
+ if (ctx->_AllowDrawOutOfOrder) { \
+ if (ctx->Driver.NeedFlush & FLUSH_UPDATE_CURRENT) \
+ vbo_exec_FlushVertices(ctx, FLUSH_UPDATE_CURRENT); \
+ } else { \
+ vbo_exec_FlushVertices(ctx, ctx->Driver.NeedFlush); \
+ } \
+ } \
+} while (0)
+
+/**
+ * Macro to assert that the API call was made outside the
+ * glBegin()/glEnd() pair, with return value.
+ *
+ * \param ctx GL context.
+ * \param retval value to return in case the assertion fails.
+ */
+#define ASSERT_OUTSIDE_BEGIN_END_WITH_RETVAL(ctx, retval) \
+do { \
+ if (_mesa_inside_begin_end(ctx)) { \
+ _mesa_error(ctx, GL_INVALID_OPERATION, "Inside glBegin/glEnd"); \
+ return retval; \
+ } \
+} while (0)
+
+/**
+ * Macro to assert that the API call was made outside the
+ * glBegin()/glEnd() pair.
+ *
+ * \param ctx GL context.
+ */
+#define ASSERT_OUTSIDE_BEGIN_END(ctx) \
+do { \
+ if (_mesa_inside_begin_end(ctx)) { \
+ _mesa_error(ctx, GL_INVALID_OPERATION, "Inside glBegin/glEnd"); \
+ return; \
+ } \
+} while (0)
+
+/*@}*/
+
+
+/**
+ * Checks if the context is for Desktop GL (Compatibility or Core)
+ */
+static inline bool
+_mesa_is_desktop_gl(const struct gl_context *ctx)
+{
+ return ctx->API == API_OPENGL_COMPAT || ctx->API == API_OPENGL_CORE;
+}
+
+
+/**
+ * Checks if the context is for any GLES version
+ */
+static inline bool
+_mesa_is_gles(const struct gl_context *ctx)
+{
+ return ctx->API == API_OPENGLES || ctx->API == API_OPENGLES2;
+}
+
+
+/**
+ * Checks if the context is for GLES 3.0 or later
+ */
+static inline bool
+_mesa_is_gles3(const struct gl_context *ctx)
+{
+ return ctx->API == API_OPENGLES2 && ctx->Version >= 30;
+}
+
+
+/**
+ * Checks if the context is for GLES 3.1 or later
+ */
+static inline bool
+_mesa_is_gles31(const struct gl_context *ctx)
+{
+ return ctx->API == API_OPENGLES2 && ctx->Version >= 31;
+}
+
+
+/**
+ * Checks if the context is for GLES 3.2 or later
+ */
+static inline bool
+_mesa_is_gles32(const struct gl_context *ctx)
+{
+ return ctx->API == API_OPENGLES2 && ctx->Version >= 32;
+}
+
+
+static inline bool
+_mesa_is_no_error_enabled(const struct gl_context *ctx)
+{
+ return ctx->Const.ContextFlags & GL_CONTEXT_FLAG_NO_ERROR_BIT_KHR;
+}
+
+
+static inline bool
+_mesa_has_integer_textures(const struct gl_context *ctx)
+{
+ return _mesa_has_EXT_texture_integer(ctx) || _mesa_is_gles3(ctx);
+}
+
+static inline bool
+_mesa_has_half_float_textures(const struct gl_context *ctx)
+{
+ return _mesa_has_ARB_texture_float(ctx) ||
+ _mesa_has_OES_texture_half_float(ctx) || _mesa_is_gles3(ctx);
+}
+
+static inline bool
+_mesa_has_float_textures(const struct gl_context *ctx)
+{
+ return _mesa_has_ARB_texture_float(ctx) ||
+ _mesa_has_OES_texture_float(ctx) || _mesa_is_gles3(ctx);
+ }
+
+static inline bool
+_mesa_has_texture_rgb10_a2ui(const struct gl_context *ctx)
+{
+ return _mesa_has_ARB_texture_rgb10_a2ui(ctx) || _mesa_is_gles3(ctx);
+}
+
+static inline bool
+_mesa_has_float_depth_buffer(const struct gl_context *ctx)
+{
+ return _mesa_has_ARB_depth_buffer_float(ctx) || _mesa_is_gles3(ctx);
+}
+
+static inline bool
+_mesa_has_packed_float(const struct gl_context *ctx)
+{
+ return _mesa_has_EXT_packed_float(ctx) || _mesa_is_gles3(ctx);
+}
+
+static inline bool
+_mesa_has_rg_textures(const struct gl_context *ctx)
+{
+ return _mesa_has_ARB_texture_rg(ctx) || _mesa_has_EXT_texture_rg(ctx) ||
+ _mesa_is_gles3(ctx);
+}
+
+static inline bool
+_mesa_has_texture_shared_exponent(const struct gl_context *ctx)
+{
+ return _mesa_has_EXT_texture_shared_exponent(ctx) || _mesa_is_gles3(ctx);
+}
+
+static inline bool
+_mesa_has_texture_type_2_10_10_10_REV(const struct gl_context *ctx)
+{
+ return _mesa_is_desktop_gl(ctx) ||
+ _mesa_has_EXT_texture_type_2_10_10_10_REV(ctx);
+}
+
+/**
+ * Checks if the context supports geometry shaders.
+ */
+static inline bool
+_mesa_has_geometry_shaders(const struct gl_context *ctx)
+{
+ return _mesa_has_OES_geometry_shader(ctx) ||
+ (_mesa_is_desktop_gl(ctx) && ctx->Version >= 32);
+}
+
+
+/**
+ * Checks if the context supports compute shaders.
+ */
+static inline bool
+_mesa_has_compute_shaders(const struct gl_context *ctx)
+{
+ return _mesa_has_ARB_compute_shader(ctx) ||
+ (ctx->API == API_OPENGLES2 && ctx->Version >= 31);
+}
+
+/**
+ * Checks if the context supports tessellation.
+ */
+static inline bool
+_mesa_has_tessellation(const struct gl_context *ctx)
+{
+ /* _mesa_has_EXT_tessellation_shader(ctx) is redundant with the OES
+ * check, so don't bother calling it.
+ */
+ return _mesa_has_OES_tessellation_shader(ctx) ||
+ _mesa_has_ARB_tessellation_shader(ctx);
+}
+
+static inline bool
+_mesa_has_texture_cube_map_array(const struct gl_context *ctx)
+{
+ return _mesa_has_ARB_texture_cube_map_array(ctx) ||
+ _mesa_has_OES_texture_cube_map_array(ctx);
+}
+
+static inline bool
+_mesa_has_texture_view(const struct gl_context *ctx)
+{
+ return _mesa_has_ARB_texture_view(ctx) ||
+ _mesa_has_OES_texture_view(ctx);
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+
+#endif /* CONTEXT_H */
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/mesa/main/dd.h b/third_party/rust/glslopt/glsl-optimizer/src/mesa/main/dd.h
new file mode 100644
index 0000000000..4a7d775db4
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/mesa/main/dd.h
@@ -0,0 +1,1503 @@
+/**
+ * \file dd.h
+ * Device driver interfaces.
+ */
+
+/*
+ * Mesa 3-D graphics library
+ *
+ * Copyright (C) 1999-2006 Brian Paul All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+
+#ifndef DD_INCLUDED
+#define DD_INCLUDED
+
+#include "glheader.h"
+#include "formats.h"
+#include "menums.h"
+#include "compiler/shader_enums.h"
+
+struct gl_bitmap_atlas;
+struct gl_buffer_object;
+struct gl_context;
+struct gl_display_list;
+struct gl_framebuffer;
+struct gl_image_unit;
+struct gl_pixelstore_attrib;
+struct gl_program;
+struct gl_renderbuffer;
+struct gl_renderbuffer_attachment;
+struct gl_shader;
+struct gl_shader_program;
+struct gl_texture_image;
+struct gl_texture_object;
+struct gl_memory_info;
+struct gl_transform_feedback_object;
+struct ati_fragment_shader;
+struct util_queue_monitoring;
+struct _mesa_prim;
+struct _mesa_index_buffer;
+
+/* GL_ARB_vertex_buffer_object */
+/* Modifies GL_MAP_UNSYNCHRONIZED_BIT to allow driver to fail (return
+ * NULL) if buffer is unavailable for immediate mapping.
+ *
+ * Does GL_MAP_INVALIDATE_RANGE_BIT do this? It seems so, but it
+ * would require more book-keeping in the driver than seems necessary
+ * at this point.
+ *
+ * Does GL_MAP_INVALDIATE_BUFFER_BIT do this? Not really -- we don't
+ * want to provoke the driver to throw away the old storage, we will
+ * respect the contents of already referenced data.
+ */
+#define MESA_MAP_NOWAIT_BIT 0x4000
+
+/* Mapping a buffer is allowed from any thread. */
+#define MESA_MAP_THREAD_SAFE_BIT 0x8000
+
+
+/**
+ * Device driver function table.
+ * Core Mesa uses these function pointers to call into device drivers.
+ * Most of these functions directly correspond to OpenGL state commands.
+ * Core Mesa will call these functions after error checking has been done
+ * so that the drivers don't have to worry about error testing.
+ *
+ * Vertex transformation/clipping/lighting is patched into the T&L module.
+ * Rasterization functions are patched into the swrast module.
+ *
+ * Note: when new functions are added here, the drivers/common/driverfuncs.c
+ * file should be updated too!!!
+ */
+struct dd_function_table {
+ /**
+ * Return a string as needed by glGetString().
+ * Only the GL_RENDERER query must be implemented. Otherwise, NULL can be
+ * returned.
+ */
+ const GLubyte * (*GetString)( struct gl_context *ctx, GLenum name );
+
+ /**
+ * Notify the driver after Mesa has made some internal state changes.
+ *
+ * This is in addition to any state change callbacks Mesa may already have
+ * made.
+ */
+ void (*UpdateState)(struct gl_context *ctx);
+
+ /**
+ * This is called whenever glFinish() is called.
+ */
+ void (*Finish)( struct gl_context *ctx );
+
+ /**
+ * This is called whenever glFlush() is called.
+ */
+ void (*Flush)( struct gl_context *ctx );
+
+ /**
+ * Clear the color/depth/stencil/accum buffer(s).
+ * \param buffers a bitmask of BUFFER_BIT_* flags indicating which
+ * renderbuffers need to be cleared.
+ */
+ void (*Clear)( struct gl_context *ctx, GLbitfield buffers );
+
+ /**
+ * Execute glRasterPos, updating the ctx->Current.Raster fields
+ */
+ void (*RasterPos)( struct gl_context *ctx, const GLfloat v[4] );
+
+ /**
+ * \name Image-related functions
+ */
+ /*@{*/
+
+ /**
+ * Called by glDrawPixels().
+ * \p unpack describes how to unpack the source image data.
+ */
+ void (*DrawPixels)( struct gl_context *ctx,
+ GLint x, GLint y, GLsizei width, GLsizei height,
+ GLenum format, GLenum type,
+ const struct gl_pixelstore_attrib *unpack,
+ const GLvoid *pixels );
+
+ /**
+ * Called by glReadPixels().
+ */
+ void (*ReadPixels)( struct gl_context *ctx,
+ GLint x, GLint y, GLsizei width, GLsizei height,
+ GLenum format, GLenum type,
+ const struct gl_pixelstore_attrib *unpack,
+ GLvoid *dest );
+
+ /**
+ * Called by glCopyPixels().
+ */
+ void (*CopyPixels)( struct gl_context *ctx, GLint srcx, GLint srcy,
+ GLsizei width, GLsizei height,
+ GLint dstx, GLint dsty, GLenum type );
+
+ /**
+ * Called by glBitmap().
+ */
+ void (*Bitmap)( struct gl_context *ctx,
+ GLint x, GLint y, GLsizei width, GLsizei height,
+ const struct gl_pixelstore_attrib *unpack,
+ const GLubyte *bitmap );
+
+ /**
+ * Called by display list code for optimized glCallLists/glBitmap rendering
+ * The driver must support texture rectangles of width 1024 or more.
+ */
+ void (*DrawAtlasBitmaps)(struct gl_context *ctx,
+ const struct gl_bitmap_atlas *atlas,
+ GLuint count, const GLubyte *ids);
+ /*@}*/
+
+
+ /**
+ * \name Texture image functions
+ */
+ /*@{*/
+
+ /**
+ * Choose actual hardware texture format given the texture target, the
+ * user-provided source image format and type and the desired internal
+ * format. In some cases, srcFormat and srcType can be GL_NONE.
+ * Note: target may be GL_TEXTURE_CUBE_MAP, but never
+ * GL_TEXTURE_CUBE_MAP_[POSITIVE/NEGATIVE]_[XYZ].
+ * Called by glTexImage(), etc.
+ */
+ mesa_format (*ChooseTextureFormat)(struct gl_context *ctx,
+ GLenum target, GLint internalFormat,
+ GLenum srcFormat, GLenum srcType );
+
+ /**
+ * Queries different driver parameters for a particular target and format.
+ * Since ARB_internalformat_query2 introduced several new query parameters
+ * over ARB_internalformat_query, having one driver hook for each parameter
+ * is no longer feasible. So this is the generic entry-point for calls
+ * to glGetInternalFormativ and glGetInternalFormati64v, after Mesa has
+ * checked errors and default values.
+ *
+ * \param ctx GL context
+ * \param target GL target enum
+ * \param internalFormat GL format enum
+ * \param pname GL enum that specifies the info to query.
+ * \param params Buffer to hold the result of the query.
+ */
+ void (*QueryInternalFormat)(struct gl_context *ctx,
+ GLenum target,
+ GLenum internalFormat,
+ GLenum pname,
+ GLint *params);
+
+ /**
+ * Called by glTexImage[123]D() and glCopyTexImage[12]D()
+ * Allocate texture memory and copy the user's image to the buffer.
+ * The gl_texture_image fields, etc. will be fully initialized.
+ * The parameters are the same as glTexImage3D(), plus:
+ * \param dims 1, 2, or 3 indicating glTexImage1/2/3D()
+ * \param packing describes how to unpack the source data.
+ * \param texImage is the destination texture image.
+ */
+ void (*TexImage)(struct gl_context *ctx, GLuint dims,
+ struct gl_texture_image *texImage,
+ GLenum format, GLenum type, const GLvoid *pixels,
+ const struct gl_pixelstore_attrib *packing);
+
+ /**
+ * Called by glTexSubImage[123]D().
+ * Replace a subset of the target texture with new texel data.
+ */
+ void (*TexSubImage)(struct gl_context *ctx, GLuint dims,
+ struct gl_texture_image *texImage,
+ GLint xoffset, GLint yoffset, GLint zoffset,
+ GLsizei width, GLsizei height, GLint depth,
+ GLenum format, GLenum type,
+ const GLvoid *pixels,
+ const struct gl_pixelstore_attrib *packing);
+
+
+ /**
+ * Called by glGetTexImage(), glGetTextureSubImage().
+ */
+ void (*GetTexSubImage)(struct gl_context *ctx,
+ GLint xoffset, GLint yoffset, GLint zoffset,
+ GLsizei width, GLsizei height, GLsizei depth,
+ GLenum format, GLenum type, GLvoid *pixels,
+ struct gl_texture_image *texImage);
+
+ /**
+ * Called by glClearTex[Sub]Image
+ *
+ * Clears a rectangular region of the image to a given value. The
+ * clearValue argument is either NULL or points to a single texel to use as
+ * the clear value in the same internal format as the texture image. If it
+ * is NULL then the texture should be cleared to zeroes.
+ */
+ void (*ClearTexSubImage)(struct gl_context *ctx,
+ struct gl_texture_image *texImage,
+ GLint xoffset, GLint yoffset, GLint zoffset,
+ GLsizei width, GLsizei height, GLsizei depth,
+ const GLvoid *clearValue);
+
+ /**
+ * Called by glCopyTex[Sub]Image[123]D().
+ *
+ * This function should copy a rectangular region in the rb to a single
+ * destination slice, specified by @slice. In the case of 1D array
+ * textures (where one GL call can potentially affect multiple destination
+ * slices), core mesa takes care of calling this function multiple times,
+ * once for each scanline to be copied.
+ */
+ void (*CopyTexSubImage)(struct gl_context *ctx, GLuint dims,
+ struct gl_texture_image *texImage,
+ GLint xoffset, GLint yoffset, GLint slice,
+ struct gl_renderbuffer *rb,
+ GLint x, GLint y,
+ GLsizei width, GLsizei height);
+ /**
+ * Called by glCopyImageSubData().
+ *
+ * This function should copy one 2-D slice from src_teximage or
+ * src_renderbuffer to dst_teximage or dst_renderbuffer. Either the
+ * teximage or renderbuffer pointer will be non-null to indicate which
+ * is the real src/dst.
+ *
+ * If one of the textures is 3-D or is a 1-D or 2-D array
+ * texture, this function will be called multiple times: once for each
+ * slice. If one of the textures is a cube map, this function will be
+ * called once for each face to be copied.
+ */
+ void (*CopyImageSubData)(struct gl_context *ctx,
+ struct gl_texture_image *src_teximage,
+ struct gl_renderbuffer *src_renderbuffer,
+ int src_x, int src_y, int src_z,
+ struct gl_texture_image *dst_teximage,
+ struct gl_renderbuffer *dst_renderbuffer,
+ int dst_x, int dst_y, int dst_z,
+ int src_width, int src_height);
+
+ /**
+ * Called by glGenerateMipmap() or when GL_GENERATE_MIPMAP_SGIS is enabled.
+ * Note that if the texture is a cube map, the <target> parameter will
+ * indicate which cube face to generate (GL_POSITIVE/NEGATIVE_X/Y/Z).
+ * texObj->BaseLevel is the level from which to generate the remaining
+ * mipmap levels.
+ */
+ void (*GenerateMipmap)(struct gl_context *ctx, GLenum target,
+ struct gl_texture_object *texObj);
+
+ /**
+ * Called by glTexImage, glCompressedTexImage, glCopyTexImage
+ * and glTexStorage to check if the dimensions of the texture image
+ * are too large.
+ * \param target any GL_PROXY_TEXTURE_x target
+ * \return GL_TRUE if the image is OK, GL_FALSE if too large
+ */
+ GLboolean (*TestProxyTexImage)(struct gl_context *ctx, GLenum target,
+ GLuint numLevels, GLint level,
+ mesa_format format, GLuint numSamples,
+ GLint width, GLint height,
+ GLint depth);
+ /*@}*/
+
+
+ /**
+ * \name Compressed texture functions
+ */
+ /*@{*/
+
+ /**
+ * Called by glCompressedTexImage[123]D().
+ */
+ void (*CompressedTexImage)(struct gl_context *ctx, GLuint dims,
+ struct gl_texture_image *texImage,
+ GLsizei imageSize, const GLvoid *data);
+
+ /**
+ * Called by glCompressedTexSubImage[123]D().
+ */
+ void (*CompressedTexSubImage)(struct gl_context *ctx, GLuint dims,
+ struct gl_texture_image *texImage,
+ GLint xoffset, GLint yoffset, GLint zoffset,
+ GLsizei width, GLsizei height, GLsizei depth,
+ GLenum format,
+ GLsizei imageSize, const GLvoid *data);
+ /*@}*/
+
+ /**
+ * \name Texture object / image functions
+ */
+ /*@{*/
+
+ /**
+ * Called by glBindTexture() and glBindTextures().
+ */
+ void (*BindTexture)( struct gl_context *ctx, GLuint texUnit,
+ GLenum target, struct gl_texture_object *tObj );
+
+ /**
+ * Called to allocate a new texture object. Drivers will usually
+ * allocate/return a subclass of gl_texture_object.
+ */
+ struct gl_texture_object * (*NewTextureObject)(struct gl_context *ctx,
+ GLuint name, GLenum target);
+ /**
+ * Called to delete/free a texture object. Drivers should free the
+ * object and any image data it contains.
+ */
+ void (*DeleteTexture)(struct gl_context *ctx,
+ struct gl_texture_object *texObj);
+
+ /** Called to allocate a new texture image object. */
+ struct gl_texture_image * (*NewTextureImage)(struct gl_context *ctx);
+
+ /** Called to free a texture image object returned by NewTextureImage() */
+ void (*DeleteTextureImage)(struct gl_context *ctx,
+ struct gl_texture_image *);
+
+ /** Called to allocate memory for a single texture image */
+ GLboolean (*AllocTextureImageBuffer)(struct gl_context *ctx,
+ struct gl_texture_image *texImage);
+
+ /** Free the memory for a single texture image */
+ void (*FreeTextureImageBuffer)(struct gl_context *ctx,
+ struct gl_texture_image *texImage);
+
+ /** Map a slice of a texture image into user space.
+ * Note: for GL_TEXTURE_1D_ARRAY, height must be 1, y must be 0 and slice
+ * indicates the 1D array index.
+ * \param texImage the texture image
+ * \param slice the 3D image slice or array texture slice
+ * \param x, y, w, h region of interest
+ * \param mode bitmask of GL_MAP_READ_BIT, GL_MAP_WRITE_BIT and
+ * GL_MAP_INVALIDATE_RANGE_BIT (if writing)
+ * \param mapOut returns start of mapping of region of interest
+ * \param rowStrideOut returns row stride (in bytes). In the case of a
+ * compressed texture, this is the byte stride between one row of blocks
+ * and another.
+ */
+ void (*MapTextureImage)(struct gl_context *ctx,
+ struct gl_texture_image *texImage,
+ GLuint slice,
+ GLuint x, GLuint y, GLuint w, GLuint h,
+ GLbitfield mode,
+ GLubyte **mapOut, GLint *rowStrideOut);
+
+ void (*UnmapTextureImage)(struct gl_context *ctx,
+ struct gl_texture_image *texImage,
+ GLuint slice);
+
+ /** For GL_ARB_texture_storage. Allocate memory for whole mipmap stack.
+ * All the gl_texture_images in the texture object will have their
+ * dimensions, format, etc. initialized already.
+ */
+ GLboolean (*AllocTextureStorage)(struct gl_context *ctx,
+ struct gl_texture_object *texObj,
+ GLsizei levels, GLsizei width,
+ GLsizei height, GLsizei depth);
+
+ /** Called as part of glTextureView to add views to origTexObj */
+ GLboolean (*TextureView)(struct gl_context *ctx,
+ struct gl_texture_object *texObj,
+ struct gl_texture_object *origTexObj);
+
+ /**
+ * Map a renderbuffer into user space.
+ * \param mode bitmask of GL_MAP_READ_BIT, GL_MAP_WRITE_BIT and
+ * GL_MAP_INVALIDATE_RANGE_BIT (if writing)
+ */
+ void (*MapRenderbuffer)(struct gl_context *ctx,
+ struct gl_renderbuffer *rb,
+ GLuint x, GLuint y, GLuint w, GLuint h,
+ GLbitfield mode,
+ GLubyte **mapOut, GLint *rowStrideOut,
+ bool flip_y);
+
+ void (*UnmapRenderbuffer)(struct gl_context *ctx,
+ struct gl_renderbuffer *rb);
+
+ /**
+ * Optional driver entrypoint that binds a non-texture renderbuffer's
+ * contents to a texture image.
+ */
+ GLboolean (*BindRenderbufferTexImage)(struct gl_context *ctx,
+ struct gl_renderbuffer *rb,
+ struct gl_texture_image *texImage);
+ /*@}*/
+
+
+ /**
+ * \name Vertex/fragment program functions
+ */
+ /*@{*/
+ /** Allocate a new program */
+ struct gl_program * (*NewProgram)(struct gl_context *ctx,
+ gl_shader_stage stage,
+ GLuint id, bool is_arb_asm);
+ /** Delete a program */
+ void (*DeleteProgram)(struct gl_context *ctx, struct gl_program *prog);
+ /**
+ * Allocate a program to associate with the new ATI fragment shader (optional)
+ */
+ struct gl_program * (*NewATIfs)(struct gl_context *ctx,
+ struct ati_fragment_shader *curProg);
+ /**
+ * Notify driver that a program string (and GPU code) has been specified
+ * or modified. Return GL_TRUE or GL_FALSE to indicate if the program is
+ * supported by the driver.
+ */
+ GLboolean (*ProgramStringNotify)(struct gl_context *ctx, GLenum target,
+ struct gl_program *prog);
+
+ /**
+ * Notify driver that the sampler uniforms for the current program have
+ * changed. On some drivers, this may require shader recompiles.
+ */
+ void (*SamplerUniformChange)(struct gl_context *ctx, GLenum target,
+ struct gl_program *prog);
+
+ /** Query if program can be loaded onto hardware */
+ GLboolean (*IsProgramNative)(struct gl_context *ctx, GLenum target,
+ struct gl_program *prog);
+
+ /*@}*/
+
+ /**
+ * \name GLSL shader/program functions.
+ */
+ /*@{*/
+ /**
+ * Called when a shader program is linked.
+ *
+ * This gives drivers an opportunity to clone the IR and make their
+ * own transformations on it for the purposes of code generation.
+ */
+ GLboolean (*LinkShader)(struct gl_context *ctx,
+ struct gl_shader_program *shader);
+ /*@}*/
+
+
+ /**
+ * \name Draw functions.
+ */
+ /*@{*/
+ /**
+ * For indirect array drawing:
+ *
+ * typedef struct {
+ * GLuint count;
+ * GLuint primCount;
+ * GLuint first;
+ * GLuint baseInstance; // in GL 4.2 and later, must be zero otherwise
+ * } DrawArraysIndirectCommand;
+ *
+ * For indirect indexed drawing:
+ *
+ * typedef struct {
+ * GLuint count;
+ * GLuint primCount;
+ * GLuint firstIndex;
+ * GLint baseVertex;
+ * GLuint baseInstance; // in GL 4.2 and later, must be zero otherwise
+ * } DrawElementsIndirectCommand;
+ */
+
+ /**
+ * Draw a number of primitives.
+ * \param prims array [nr_prims] describing what to draw (prim type,
+ * vertex count, first index, instance count, etc).
+ * \param ib index buffer for indexed drawing, NULL for array drawing
+ * \param index_bounds_valid are min_index and max_index valid?
+ * \param min_index lowest vertex index used
+ * \param max_index highest vertex index used
+ * \param num_instances instance count from ARB_draw_instanced
+ * \param base_instance base instance from ARB_base_instance
+ * \param tfb_vertcount if non-null, indicates which transform feedback
+ * object has the vertex count.
+ * \param tfb_stream If called via DrawTransformFeedbackStream, specifies
+ * the vertex stream buffer from which to get the vertex
+ * count.
+ * \param indirect If any prims are indirect, this specifies the buffer
+ * to find the "DrawArrays/ElementsIndirectCommand" data.
+ * This may be deprecated in the future
+ */
+ void (*Draw)(struct gl_context *ctx,
+ const struct _mesa_prim *prims, GLuint nr_prims,
+ const struct _mesa_index_buffer *ib,
+ GLboolean index_bounds_valid,
+ GLuint min_index, GLuint max_index,
+ GLuint num_instances, GLuint base_instance,
+ struct gl_transform_feedback_object *tfb_vertcount,
+ unsigned tfb_stream);
+
+
+ /**
+ * Draw a primitive, getting the vertex count, instance count, start
+ * vertex, etc. from a buffer object.
+ * \param mode GL_POINTS, GL_LINES, GL_TRIANGLE_STRIP, etc.
+ * \param indirect_data buffer to get "DrawArrays/ElementsIndirectCommand"
+ * data
+ * \param indirect_offset offset of first primitive in indrect_data buffer
+ * \param draw_count number of primitives to draw
+ * \param stride stride, in bytes, between
+ * "DrawArrays/ElementsIndirectCommand" objects
+ * \param indirect_draw_count_buffer if non-NULL specifies a buffer to get
+ * the real draw_count value. Used for
+ * GL_ARB_indirect_parameters.
+ * \param indirect_draw_count_offset offset to the draw_count value in
+ * indirect_draw_count_buffer
+ * \param ib index buffer for indexed drawing, NULL otherwise.
+ */
+ void (*DrawIndirect)(struct gl_context *ctx, GLuint mode,
+ struct gl_buffer_object *indirect_data,
+ GLsizeiptr indirect_offset, unsigned draw_count,
+ unsigned stride,
+ struct gl_buffer_object *indirect_draw_count_buffer,
+ GLsizeiptr indirect_draw_count_offset,
+ const struct _mesa_index_buffer *ib);
+ /*@}*/
+
+
+ /**
+ * \name State-changing functions.
+ *
+ * \note drawing functions are above.
+ *
+ * These functions are called by their corresponding OpenGL API functions.
+ * They are \e also called by the gl_PopAttrib() function!!!
+ * May add more functions like these to the device driver in the future.
+ */
+ /*@{*/
+ /** Specify the alpha test function */
+ void (*AlphaFunc)(struct gl_context *ctx, GLenum func, GLfloat ref);
+ /** Set the blend color */
+ void (*BlendColor)(struct gl_context *ctx, const GLfloat color[4]);
+ /** Set the blend equation */
+ void (*BlendEquationSeparate)(struct gl_context *ctx,
+ GLenum modeRGB, GLenum modeA);
+ /** Specify pixel arithmetic */
+ void (*BlendFuncSeparate)(struct gl_context *ctx,
+ GLenum sfactorRGB, GLenum dfactorRGB,
+ GLenum sfactorA, GLenum dfactorA);
+ /** Specify a plane against which all geometry is clipped */
+ void (*ClipPlane)(struct gl_context *ctx, GLenum plane, const GLfloat *eq);
+ /** Enable and disable writing of frame buffer color components */
+ void (*ColorMask)(struct gl_context *ctx, GLboolean rmask, GLboolean gmask,
+ GLboolean bmask, GLboolean amask );
+ /** Cause a material color to track the current color */
+ void (*ColorMaterial)(struct gl_context *ctx, GLenum face, GLenum mode);
+ /** Specify whether front- or back-facing facets can be culled */
+ void (*CullFace)(struct gl_context *ctx, GLenum mode);
+ /** Define front- and back-facing polygons */
+ void (*FrontFace)(struct gl_context *ctx, GLenum mode);
+ /** Specify the value used for depth buffer comparisons */
+ void (*DepthFunc)(struct gl_context *ctx, GLenum func);
+ /** Enable or disable writing into the depth buffer */
+ void (*DepthMask)(struct gl_context *ctx, GLboolean flag);
+ /** Specify mapping of depth values from NDC to window coordinates */
+ void (*DepthRange)(struct gl_context *ctx);
+ /** Specify the current buffer for writing */
+ void (*DrawBuffer)(struct gl_context *ctx);
+ /** Used to allocated any buffers with on-demand creation */
+ void (*DrawBufferAllocate)(struct gl_context *ctx);
+ /** Enable or disable server-side gl capabilities */
+ void (*Enable)(struct gl_context *ctx, GLenum cap, GLboolean state);
+ /** Specify fog parameters */
+ void (*Fogfv)(struct gl_context *ctx, GLenum pname, const GLfloat *params);
+ /** Set light source parameters.
+ * Note: for GL_POSITION and GL_SPOT_DIRECTION, params will have already
+ * been transformed to eye-space.
+ */
+ void (*Lightfv)(struct gl_context *ctx, GLenum light,
+ GLenum pname, const GLfloat *params );
+ /** Set the lighting model parameters */
+ void (*LightModelfv)(struct gl_context *ctx, GLenum pname,
+ const GLfloat *params);
+ /** Specify the line stipple pattern */
+ void (*LineStipple)(struct gl_context *ctx, GLint factor, GLushort pattern );
+ /** Specify the width of rasterized lines */
+ void (*LineWidth)(struct gl_context *ctx, GLfloat width);
+ /** Specify a logical pixel operation for color index rendering */
+ void (*LogicOpcode)(struct gl_context *ctx, enum gl_logicop_mode opcode);
+ void (*PointParameterfv)(struct gl_context *ctx, GLenum pname,
+ const GLfloat *params);
+ /** Specify the diameter of rasterized points */
+ void (*PointSize)(struct gl_context *ctx, GLfloat size);
+ /** Select a polygon rasterization mode */
+ void (*PolygonMode)(struct gl_context *ctx, GLenum face, GLenum mode);
+ /** Set the scale and units used to calculate depth values */
+ void (*PolygonOffset)(struct gl_context *ctx, GLfloat factor, GLfloat units, GLfloat clamp);
+ /** Set the polygon stippling pattern */
+ void (*PolygonStipple)(struct gl_context *ctx, const GLubyte *mask );
+ /* Specifies the current buffer for reading */
+ void (*ReadBuffer)( struct gl_context *ctx, GLenum buffer );
+ /** Set rasterization mode */
+ void (*RenderMode)(struct gl_context *ctx, GLenum mode );
+ /** Define the scissor box */
+ void (*Scissor)(struct gl_context *ctx);
+ /** Select flat or smooth shading */
+ void (*ShadeModel)(struct gl_context *ctx, GLenum mode);
+ /** OpenGL 2.0 two-sided StencilFunc */
+ void (*StencilFuncSeparate)(struct gl_context *ctx, GLenum face, GLenum func,
+ GLint ref, GLuint mask);
+ /** OpenGL 2.0 two-sided StencilMask */
+ void (*StencilMaskSeparate)(struct gl_context *ctx, GLenum face, GLuint mask);
+ /** OpenGL 2.0 two-sided StencilOp */
+ void (*StencilOpSeparate)(struct gl_context *ctx, GLenum face, GLenum fail,
+ GLenum zfail, GLenum zpass);
+ /** Control the generation of texture coordinates */
+ void (*TexGen)(struct gl_context *ctx, GLenum coord, GLenum pname,
+ const GLfloat *params);
+ /** Set texture environment parameters */
+ void (*TexEnv)(struct gl_context *ctx, GLenum target, GLenum pname,
+ const GLfloat *param);
+ /** Set texture parameter (callee gets param value from the texObj) */
+ void (*TexParameter)(struct gl_context *ctx,
+ struct gl_texture_object *texObj, GLenum pname);
+ /** Set the viewport */
+ void (*Viewport)(struct gl_context *ctx);
+ /*@}*/
+
+
+ /**
+ * \name Vertex/pixel buffer object functions
+ */
+ /*@{*/
+ struct gl_buffer_object * (*NewBufferObject)(struct gl_context *ctx,
+ GLuint buffer);
+
+ void (*DeleteBuffer)( struct gl_context *ctx, struct gl_buffer_object *obj );
+
+ GLboolean (*BufferData)(struct gl_context *ctx, GLenum target,
+ GLsizeiptrARB size, const GLvoid *data, GLenum usage,
+ GLenum storageFlags, struct gl_buffer_object *obj);
+
+ void (*BufferSubData)( struct gl_context *ctx, GLintptrARB offset,
+ GLsizeiptrARB size, const GLvoid *data,
+ struct gl_buffer_object *obj );
+
+ void (*GetBufferSubData)( struct gl_context *ctx,
+ GLintptrARB offset, GLsizeiptrARB size,
+ GLvoid *data, struct gl_buffer_object *obj );
+
+ void (*ClearBufferSubData)( struct gl_context *ctx,
+ GLintptr offset, GLsizeiptr size,
+ const GLvoid *clearValue,
+ GLsizeiptr clearValueSize,
+ struct gl_buffer_object *obj );
+
+ void (*CopyBufferSubData)( struct gl_context *ctx,
+ struct gl_buffer_object *src,
+ struct gl_buffer_object *dst,
+ GLintptr readOffset, GLintptr writeOffset,
+ GLsizeiptr size );
+
+ void (*InvalidateBufferSubData)( struct gl_context *ctx,
+ struct gl_buffer_object *obj,
+ GLintptr offset,
+ GLsizeiptr length );
+
+ /* Returns pointer to the start of the mapped range.
+ * May return NULL if MESA_MAP_NOWAIT_BIT is set in access:
+ */
+ void * (*MapBufferRange)( struct gl_context *ctx, GLintptr offset,
+ GLsizeiptr length, GLbitfield access,
+ struct gl_buffer_object *obj,
+ gl_map_buffer_index index);
+
+ void (*FlushMappedBufferRange)(struct gl_context *ctx,
+ GLintptr offset, GLsizeiptr length,
+ struct gl_buffer_object *obj,
+ gl_map_buffer_index index);
+
+ GLboolean (*UnmapBuffer)( struct gl_context *ctx,
+ struct gl_buffer_object *obj,
+ gl_map_buffer_index index);
+ /*@}*/
+
+ /**
+ * \name Functions for GL_APPLE_object_purgeable
+ */
+ /*@{*/
+ /* variations on ObjectPurgeable */
+ GLenum (*BufferObjectPurgeable)(struct gl_context *ctx,
+ struct gl_buffer_object *obj, GLenum option);
+ GLenum (*RenderObjectPurgeable)(struct gl_context *ctx,
+ struct gl_renderbuffer *obj, GLenum option);
+ GLenum (*TextureObjectPurgeable)(struct gl_context *ctx,
+ struct gl_texture_object *obj,
+ GLenum option);
+
+ /* variations on ObjectUnpurgeable */
+ GLenum (*BufferObjectUnpurgeable)(struct gl_context *ctx,
+ struct gl_buffer_object *obj,
+ GLenum option);
+ GLenum (*RenderObjectUnpurgeable)(struct gl_context *ctx,
+ struct gl_renderbuffer *obj,
+ GLenum option);
+ GLenum (*TextureObjectUnpurgeable)(struct gl_context *ctx,
+ struct gl_texture_object *obj,
+ GLenum option);
+ /*@}*/
+
+ /**
+ * \name Functions for GL_EXT_framebuffer_{object,blit,discard}.
+ */
+ /*@{*/
+ struct gl_framebuffer * (*NewFramebuffer)(struct gl_context *ctx,
+ GLuint name);
+ struct gl_renderbuffer * (*NewRenderbuffer)(struct gl_context *ctx,
+ GLuint name);
+ void (*BindFramebuffer)(struct gl_context *ctx, GLenum target,
+ struct gl_framebuffer *drawFb,
+ struct gl_framebuffer *readFb);
+ void (*FramebufferRenderbuffer)(struct gl_context *ctx,
+ struct gl_framebuffer *fb,
+ GLenum attachment,
+ struct gl_renderbuffer *rb);
+ void (*RenderTexture)(struct gl_context *ctx,
+ struct gl_framebuffer *fb,
+ struct gl_renderbuffer_attachment *att);
+ void (*FinishRenderTexture)(struct gl_context *ctx,
+ struct gl_renderbuffer *rb);
+ void (*ValidateFramebuffer)(struct gl_context *ctx,
+ struct gl_framebuffer *fb);
+ /*@}*/
+ void (*BlitFramebuffer)(struct gl_context *ctx,
+ struct gl_framebuffer *readFb,
+ struct gl_framebuffer *drawFb,
+ GLint srcX0, GLint srcY0, GLint srcX1, GLint srcY1,
+ GLint dstX0, GLint dstY0, GLint dstX1, GLint dstY1,
+ GLbitfield mask, GLenum filter);
+ void (*DiscardFramebuffer)(struct gl_context *ctx, struct gl_framebuffer *fb,
+ struct gl_renderbuffer_attachment *att);
+
+ /**
+ * \name Functions for GL_ARB_sample_locations
+ */
+ void (*GetProgrammableSampleCaps)(struct gl_context *ctx,
+ const struct gl_framebuffer *fb,
+ GLuint *bits, GLuint *width, GLuint *height);
+ void (*EvaluateDepthValues)(struct gl_context *ctx);
+
+ /**
+ * \name Query objects
+ */
+ /*@{*/
+ struct gl_query_object * (*NewQueryObject)(struct gl_context *ctx, GLuint id);
+ void (*DeleteQuery)(struct gl_context *ctx, struct gl_query_object *q);
+ void (*BeginQuery)(struct gl_context *ctx, struct gl_query_object *q);
+ void (*QueryCounter)(struct gl_context *ctx, struct gl_query_object *q);
+ void (*EndQuery)(struct gl_context *ctx, struct gl_query_object *q);
+ void (*CheckQuery)(struct gl_context *ctx, struct gl_query_object *q);
+ void (*WaitQuery)(struct gl_context *ctx, struct gl_query_object *q);
+ /*
+ * \pname the value requested to be written (GL_QUERY_RESULT, etc)
+ * \ptype the type of the value requested to be written:
+ * GL_UNSIGNED_INT, GL_UNSIGNED_INT64_ARB,
+ * GL_INT, GL_INT64_ARB
+ */
+ void (*StoreQueryResult)(struct gl_context *ctx, struct gl_query_object *q,
+ struct gl_buffer_object *buf, intptr_t offset,
+ GLenum pname, GLenum ptype);
+ /*@}*/
+
+ /**
+ * \name Performance monitors
+ */
+ /*@{*/
+ void (*InitPerfMonitorGroups)(struct gl_context *ctx);
+ struct gl_perf_monitor_object * (*NewPerfMonitor)(struct gl_context *ctx);
+ void (*DeletePerfMonitor)(struct gl_context *ctx,
+ struct gl_perf_monitor_object *m);
+ GLboolean (*BeginPerfMonitor)(struct gl_context *ctx,
+ struct gl_perf_monitor_object *m);
+
+ /** Stop an active performance monitor, discarding results. */
+ void (*ResetPerfMonitor)(struct gl_context *ctx,
+ struct gl_perf_monitor_object *m);
+ void (*EndPerfMonitor)(struct gl_context *ctx,
+ struct gl_perf_monitor_object *m);
+ GLboolean (*IsPerfMonitorResultAvailable)(struct gl_context *ctx,
+ struct gl_perf_monitor_object *m);
+ void (*GetPerfMonitorResult)(struct gl_context *ctx,
+ struct gl_perf_monitor_object *m,
+ GLsizei dataSize,
+ GLuint *data,
+ GLint *bytesWritten);
+ /*@}*/
+
+ /**
+ * \name Performance Query objects
+ */
+ /*@{*/
+ unsigned (*InitPerfQueryInfo)(struct gl_context *ctx);
+ void (*GetPerfQueryInfo)(struct gl_context *ctx,
+ unsigned queryIndex,
+ const char **name,
+ GLuint *dataSize,
+ GLuint *numCounters,
+ GLuint *numActive);
+ void (*GetPerfCounterInfo)(struct gl_context *ctx,
+ unsigned queryIndex,
+ unsigned counterIndex,
+ const char **name,
+ const char **desc,
+ GLuint *offset,
+ GLuint *data_size,
+ GLuint *type_enum,
+ GLuint *data_type_enum,
+ GLuint64 *raw_max);
+ struct gl_perf_query_object * (*NewPerfQueryObject)(struct gl_context *ctx,
+ unsigned queryIndex);
+ void (*DeletePerfQuery)(struct gl_context *ctx,
+ struct gl_perf_query_object *obj);
+ bool (*BeginPerfQuery)(struct gl_context *ctx,
+ struct gl_perf_query_object *obj);
+ void (*EndPerfQuery)(struct gl_context *ctx,
+ struct gl_perf_query_object *obj);
+ void (*WaitPerfQuery)(struct gl_context *ctx,
+ struct gl_perf_query_object *obj);
+ bool (*IsPerfQueryReady)(struct gl_context *ctx,
+ struct gl_perf_query_object *obj);
+ void (*GetPerfQueryData)(struct gl_context *ctx,
+ struct gl_perf_query_object *obj,
+ GLsizei dataSize,
+ GLuint *data,
+ GLuint *bytesWritten);
+ /*@}*/
+
+
+ /**
+ * \name GREMEDY debug/marker functions
+ */
+ /*@{*/
+ void (*EmitStringMarker)(struct gl_context *ctx, const GLchar *string, GLsizei len);
+ /*@}*/
+
+ /**
+ * \name Support for multiple T&L engines
+ */
+ /*@{*/
+
+ /**
+ * Set by the driver-supplied T&L engine.
+ *
+ * Set to PRIM_OUTSIDE_BEGIN_END when outside glBegin()/glEnd().
+ */
+ GLuint CurrentExecPrimitive;
+
+ /**
+ * Current glBegin state of an in-progress compilation. May be
+ * GL_POINTS, GL_TRIANGLE_STRIP, etc. or PRIM_OUTSIDE_BEGIN_END
+ * or PRIM_UNKNOWN.
+ */
+ GLuint CurrentSavePrimitive;
+
+
+#define FLUSH_STORED_VERTICES 0x1
+#define FLUSH_UPDATE_CURRENT 0x2
+ /**
+ * Set by the driver-supplied T&L engine whenever vertices are buffered
+ * between glBegin()/glEnd() objects or __struct gl_contextRec::Current
+ * is not updated. A bitmask of the FLUSH_x values above.
+ *
+ * The dd_function_table::FlushVertices call below may be used to resolve
+ * these conditions.
+ */
+ GLbitfield NeedFlush;
+
+ /** Need to call vbo_save_SaveFlushVertices() upon state change? */
+ GLboolean SaveNeedFlush;
+
+ /**
+ * Notify driver that the special derived value _NeedEyeCoords has
+ * changed.
+ */
+ void (*LightingSpaceChange)( struct gl_context *ctx );
+
+ /**@}*/
+
+ /**
+ * \name GL_ARB_sync interfaces
+ */
+ /*@{*/
+ struct gl_sync_object * (*NewSyncObject)(struct gl_context *);
+ void (*FenceSync)(struct gl_context *, struct gl_sync_object *,
+ GLenum, GLbitfield);
+ void (*DeleteSyncObject)(struct gl_context *, struct gl_sync_object *);
+ void (*CheckSync)(struct gl_context *, struct gl_sync_object *);
+ void (*ClientWaitSync)(struct gl_context *, struct gl_sync_object *,
+ GLbitfield, GLuint64);
+ void (*ServerWaitSync)(struct gl_context *, struct gl_sync_object *,
+ GLbitfield, GLuint64);
+ /*@}*/
+
+ /** GL_NV_conditional_render */
+ void (*BeginConditionalRender)(struct gl_context *ctx,
+ struct gl_query_object *q,
+ GLenum mode);
+ void (*EndConditionalRender)(struct gl_context *ctx,
+ struct gl_query_object *q);
+
+ /**
+ * \name GL_OES_draw_texture interface
+ */
+ /*@{*/
+ void (*DrawTex)(struct gl_context *ctx, GLfloat x, GLfloat y, GLfloat z,
+ GLfloat width, GLfloat height);
+ /*@}*/
+
+ /**
+ * \name GL_OES_EGL_image interface
+ */
+ void (*EGLImageTargetTexture2D)(struct gl_context *ctx, GLenum target,
+ struct gl_texture_object *texObj,
+ struct gl_texture_image *texImage,
+ GLeglImageOES image_handle);
+ void (*EGLImageTargetRenderbufferStorage)(struct gl_context *ctx,
+ struct gl_renderbuffer *rb,
+ void *image_handle);
+
+ /**
+ * \name GL_EXT_EGL_image_storage interface
+ */
+ void (*EGLImageTargetTexStorage)(struct gl_context *ctx, GLenum target,
+ struct gl_texture_object *texObj,
+ struct gl_texture_image *texImage,
+ GLeglImageOES image_handle);
+ /**
+ * \name GL_EXT_transform_feedback interface
+ */
+ struct gl_transform_feedback_object *
+ (*NewTransformFeedback)(struct gl_context *ctx, GLuint name);
+ void (*DeleteTransformFeedback)(struct gl_context *ctx,
+ struct gl_transform_feedback_object *obj);
+ void (*BeginTransformFeedback)(struct gl_context *ctx, GLenum mode,
+ struct gl_transform_feedback_object *obj);
+ void (*EndTransformFeedback)(struct gl_context *ctx,
+ struct gl_transform_feedback_object *obj);
+ void (*PauseTransformFeedback)(struct gl_context *ctx,
+ struct gl_transform_feedback_object *obj);
+ void (*ResumeTransformFeedback)(struct gl_context *ctx,
+ struct gl_transform_feedback_object *obj);
+
+ /**
+ * Return the number of vertices written to a stream during the last
+ * Begin/EndTransformFeedback block.
+ */
+ GLsizei (*GetTransformFeedbackVertexCount)(struct gl_context *ctx,
+ struct gl_transform_feedback_object *obj,
+ GLuint stream);
+
+ /**
+ * \name GL_NV_texture_barrier interface
+ */
+ void (*TextureBarrier)(struct gl_context *ctx);
+
+ /**
+ * \name GL_ARB_sampler_objects
+ */
+ struct gl_sampler_object * (*NewSamplerObject)(struct gl_context *ctx,
+ GLuint name);
+
+ /**
+ * \name Return a timestamp in nanoseconds as defined by GL_ARB_timer_query.
+ * This should be equivalent to glGetInteger64v(GL_TIMESTAMP);
+ */
+ uint64_t (*GetTimestamp)(struct gl_context *ctx);
+
+ /**
+ * \name GL_ARB_texture_multisample
+ */
+ void (*GetSamplePosition)(struct gl_context *ctx,
+ struct gl_framebuffer *fb,
+ GLuint index,
+ GLfloat *outValue);
+
+ /**
+ * \name NV_vdpau_interop interface
+ */
+ void (*VDPAUMapSurface)(struct gl_context *ctx, GLenum target,
+ GLenum access, GLboolean output,
+ struct gl_texture_object *texObj,
+ struct gl_texture_image *texImage,
+ const GLvoid *vdpSurface, GLuint index);
+ void (*VDPAUUnmapSurface)(struct gl_context *ctx, GLenum target,
+ GLenum access, GLboolean output,
+ struct gl_texture_object *texObj,
+ struct gl_texture_image *texImage,
+ const GLvoid *vdpSurface, GLuint index);
+
+ /**
+ * Query reset status for GL_ARB_robustness
+ *
+ * Per \c glGetGraphicsResetStatusARB, this function should return a
+ * non-zero value once after a reset. If a reset is non-atomic, the
+ * non-zero status should be returned for the duration of the reset.
+ */
+ GLenum (*GetGraphicsResetStatus)(struct gl_context *ctx);
+
+ /**
+ * \name GL_ARB_shader_image_load_store interface.
+ */
+ /** @{ */
+ void (*MemoryBarrier)(struct gl_context *ctx, GLbitfield barriers);
+ /** @} */
+
+ /**
+ * GL_EXT_shader_framebuffer_fetch_non_coherent rendering barrier.
+ *
+ * On return from this function any framebuffer contents written by
+ * previous draw commands are guaranteed to be visible from subsequent
+ * fragment shader invocations using the
+ * EXT_shader_framebuffer_fetch_non_coherent interface.
+ */
+ /** @{ */
+ void (*FramebufferFetchBarrier)(struct gl_context *ctx);
+ /** @} */
+
+ /**
+ * \name GL_ARB_compute_shader interface
+ */
+ /*@{*/
+ void (*DispatchCompute)(struct gl_context *ctx, const GLuint *num_groups);
+ void (*DispatchComputeIndirect)(struct gl_context *ctx, GLintptr indirect);
+ /*@}*/
+
+ /**
+ * \name GL_ARB_compute_variable_group_size interface
+ */
+ /*@{*/
+ void (*DispatchComputeGroupSize)(struct gl_context *ctx,
+ const GLuint *num_groups,
+ const GLuint *group_size);
+ /*@}*/
+
+ /**
+ * Query information about memory. Device memory is e.g. VRAM. Staging
+ * memory is e.g. GART. All sizes are in kilobytes.
+ */
+ void (*QueryMemoryInfo)(struct gl_context *ctx,
+ struct gl_memory_info *info);
+
+ /**
+ * Indicate that this thread is being used by Mesa as a background drawing
+ * thread for the given GL context.
+ *
+ * If this function is called more than once from any given thread, each
+ * subsequent call overrides the context that was passed in the previous
+ * call. Mesa takes advantage of this to re-use a background thread to
+ * perform drawing on behalf of multiple contexts.
+ *
+ * Mesa may sometimes call this function from a non-background thread
+ * (i.e. a thread that has already been bound to a context using
+ * __DriverAPIRec::MakeCurrent()); when this happens, ctx will be equal to
+ * the context that is bound to this thread.
+ *
+ * Mesa will only call this function if GL multithreading is enabled.
+ */
+ void (*SetBackgroundContext)(struct gl_context *ctx,
+ struct util_queue_monitoring *queue_info);
+
+ /**
+ * \name GL_ARB_sparse_buffer interface
+ */
+ /*@{*/
+ void (*BufferPageCommitment)(struct gl_context *ctx,
+ struct gl_buffer_object *bufferObj,
+ GLintptr offset, GLsizeiptr size,
+ GLboolean commit);
+ /*@}*/
+
+ /**
+ * \name GL_ARB_bindless_texture interface
+ */
+ /*@{*/
+ GLuint64 (*NewTextureHandle)(struct gl_context *ctx,
+ struct gl_texture_object *texObj,
+ struct gl_sampler_object *sampObj);
+ void (*DeleteTextureHandle)(struct gl_context *ctx, GLuint64 handle);
+ void (*MakeTextureHandleResident)(struct gl_context *ctx, GLuint64 handle,
+ bool resident);
+ GLuint64 (*NewImageHandle)(struct gl_context *ctx,
+ struct gl_image_unit *imgObj);
+ void (*DeleteImageHandle)(struct gl_context *ctx, GLuint64 handle);
+ void (*MakeImageHandleResident)(struct gl_context *ctx, GLuint64 handle,
+ GLenum access, bool resident);
+ /*@}*/
+
+
+ /**
+ * \name GL_EXT_external_objects interface
+ */
+ /*@{*/
+ /**
+ * Called to allocate a new memory object. Drivers will usually
+ * allocate/return a subclass of gl_memory_object.
+ */
+ struct gl_memory_object * (*NewMemoryObject)(struct gl_context *ctx,
+ GLuint name);
+ /**
+ * Called to delete/free a memory object. Drivers should free the
+ * object and any image data it contains.
+ */
+ void (*DeleteMemoryObject)(struct gl_context *ctx,
+ struct gl_memory_object *memObj);
+
+ /**
+ * Set the given memory object as the texture's storage.
+ */
+ GLboolean (*SetTextureStorageForMemoryObject)(struct gl_context *ctx,
+ struct gl_texture_object *tex_obj,
+ struct gl_memory_object *mem_obj,
+ GLsizei levels, GLsizei width,
+ GLsizei height, GLsizei depth,
+ GLuint64 offset);
+
+ /**
+ * Use a memory object as the backing data for a buffer object
+ */
+ GLboolean (*BufferDataMem)(struct gl_context *ctx,
+ GLenum target,
+ GLsizeiptrARB size,
+ struct gl_memory_object *memObj,
+ GLuint64 offset,
+ GLenum usage,
+ struct gl_buffer_object *bufObj);
+
+ /**
+ * Fill uuid with an unique identifier for this driver
+ *
+ * uuid must point to GL_UUID_SIZE_EXT bytes of available memory
+ */
+ void (*GetDriverUuid)(struct gl_context *ctx, char *uuid);
+
+ /**
+ * Fill uuid with an unique identifier for the device associated
+ * to this driver
+ *
+ * uuid must point to GL_UUID_SIZE_EXT bytes of available memory
+ */
+ void (*GetDeviceUuid)(struct gl_context *ctx, char *uuid);
+
+ /*@}*/
+
+ /**
+ * \name GL_EXT_external_objects_fd interface
+ */
+ /*@{*/
+ /**
+ * Called to import a memory object. The caller relinquishes ownership
+ * of fd after the call returns.
+ *
+ * Accessing fd after ImportMemoryObjectFd returns results in undefined
+ * behaviour. This is consistent with EXT_external_object_fd.
+ */
+ void (*ImportMemoryObjectFd)(struct gl_context *ctx,
+ struct gl_memory_object *memObj,
+ GLuint64 size,
+ int fd);
+ /*@}*/
+
+ /**
+ * \name GL_ARB_get_program_binary
+ */
+ /*@{*/
+ /**
+ * Calls to retrieve/store a binary serialized copy of the current program.
+ */
+ void (*GetProgramBinaryDriverSHA1)(struct gl_context *ctx, uint8_t *sha1);
+
+ void (*ProgramBinarySerializeDriverBlob)(struct gl_context *ctx,
+ struct gl_shader_program *shProg,
+ struct gl_program *prog);
+
+ void (*ProgramBinaryDeserializeDriverBlob)(struct gl_context *ctx,
+ struct gl_shader_program *shProg,
+ struct gl_program *prog);
+ /*@}*/
+
+ /**
+ * \name GL_EXT_semaphore interface
+ */
+ /*@{*/
+ /**
+ * Called to allocate a new semaphore object. Drivers will usually
+ * allocate/return a subclass of gl_semaphore_object.
+ */
+ struct gl_semaphore_object * (*NewSemaphoreObject)(struct gl_context *ctx,
+ GLuint name);
+ /**
+ * Called to delete/free a semaphore object. Drivers should free the
+ * object and any associated resources.
+ */
+ void (*DeleteSemaphoreObject)(struct gl_context *ctx,
+ struct gl_semaphore_object *semObj);
+
+ /**
+ * Introduce an operation to wait for the semaphore object in the GL
+ * server's command stream
+ */
+ void (*ServerWaitSemaphoreObject)(struct gl_context *ctx,
+ struct gl_semaphore_object *semObj,
+ GLuint numBufferBarriers,
+ struct gl_buffer_object **bufObjs,
+ GLuint numTextureBarriers,
+ struct gl_texture_object **texObjs,
+ const GLenum *srcLayouts);
+
+ /**
+ * Introduce an operation to signal the semaphore object in the GL
+ * server's command stream
+ */
+ void (*ServerSignalSemaphoreObject)(struct gl_context *ctx,
+ struct gl_semaphore_object *semObj,
+ GLuint numBufferBarriers,
+ struct gl_buffer_object **bufObjs,
+ GLuint numTextureBarriers,
+ struct gl_texture_object **texObjs,
+ const GLenum *dstLayouts);
+ /*@}*/
+
+ /**
+ * \name GL_EXT_semaphore_fd interface
+ */
+ /*@{*/
+ /**
+ * Called to import a semaphore object. The caller relinquishes ownership
+ * of fd after the call returns.
+ *
+ * Accessing fd after ImportSemaphoreFd returns results in undefined
+ * behaviour. This is consistent with EXT_semaphore_fd.
+ */
+ void (*ImportSemaphoreFd)(struct gl_context *ctx,
+ struct gl_semaphore_object *semObj,
+ int fd);
+ /*@}*/
+
+ /**
+ * \name Disk shader cache functions
+ */
+ /*@{*/
+ /**
+ * Called to initialize gl_program::driver_cache_blob (and size) with a
+ * ralloc allocated buffer.
+ *
+ * This buffer will be saved and restored as part of the gl_program
+ * serialization and deserialization.
+ */
+ void (*ShaderCacheSerializeDriverBlob)(struct gl_context *ctx,
+ struct gl_program *prog);
+ /*@}*/
+
+ /**
+ * \name Set the number of compiler threads for ARB_parallel_shader_compile
+ */
+ void (*SetMaxShaderCompilerThreads)(struct gl_context *ctx, unsigned count);
+ bool (*GetShaderProgramCompletionStatus)(struct gl_context *ctx,
+ struct gl_shader_program *shprog);
+};
+
+
+/**
+ * Per-vertex functions.
+ *
+ * These are the functions which can appear between glBegin and glEnd.
+ * Depending on whether we're inside or outside a glBegin/End pair
+ * and whether we're in immediate mode or building a display list, these
+ * functions behave differently. This structure allows us to switch
+ * between those modes more easily.
+ *
+ * Generally, these pointers point to functions in the VBO module.
+ */
+typedef struct {
+ void (GLAPIENTRYP ArrayElement)( GLint );
+ void (GLAPIENTRYP Color3f)( GLfloat, GLfloat, GLfloat );
+ void (GLAPIENTRYP Color3fv)( const GLfloat * );
+ void (GLAPIENTRYP Color4f)( GLfloat, GLfloat, GLfloat, GLfloat );
+ void (GLAPIENTRYP Color4fv)( const GLfloat * );
+ void (GLAPIENTRYP EdgeFlag)( GLboolean );
+ void (GLAPIENTRYP EvalCoord1f)( GLfloat );
+ void (GLAPIENTRYP EvalCoord1fv)( const GLfloat * );
+ void (GLAPIENTRYP EvalCoord2f)( GLfloat, GLfloat );
+ void (GLAPIENTRYP EvalCoord2fv)( const GLfloat * );
+ void (GLAPIENTRYP EvalPoint1)( GLint );
+ void (GLAPIENTRYP EvalPoint2)( GLint, GLint );
+ void (GLAPIENTRYP FogCoordfEXT)( GLfloat );
+ void (GLAPIENTRYP FogCoordfvEXT)( const GLfloat * );
+ void (GLAPIENTRYP Indexf)( GLfloat );
+ void (GLAPIENTRYP Indexfv)( const GLfloat * );
+ void (GLAPIENTRYP Materialfv)( GLenum face, GLenum pname, const GLfloat * );
+ void (GLAPIENTRYP MultiTexCoord1fARB)( GLenum, GLfloat );
+ void (GLAPIENTRYP MultiTexCoord1fvARB)( GLenum, const GLfloat * );
+ void (GLAPIENTRYP MultiTexCoord2fARB)( GLenum, GLfloat, GLfloat );
+ void (GLAPIENTRYP MultiTexCoord2fvARB)( GLenum, const GLfloat * );
+ void (GLAPIENTRYP MultiTexCoord3fARB)( GLenum, GLfloat, GLfloat, GLfloat );
+ void (GLAPIENTRYP MultiTexCoord3fvARB)( GLenum, const GLfloat * );
+ void (GLAPIENTRYP MultiTexCoord4fARB)( GLenum, GLfloat, GLfloat, GLfloat, GLfloat );
+ void (GLAPIENTRYP MultiTexCoord4fvARB)( GLenum, const GLfloat * );
+ void (GLAPIENTRYP Normal3f)( GLfloat, GLfloat, GLfloat );
+ void (GLAPIENTRYP Normal3fv)( const GLfloat * );
+ void (GLAPIENTRYP SecondaryColor3fEXT)( GLfloat, GLfloat, GLfloat );
+ void (GLAPIENTRYP SecondaryColor3fvEXT)( const GLfloat * );
+ void (GLAPIENTRYP TexCoord1f)( GLfloat );
+ void (GLAPIENTRYP TexCoord1fv)( const GLfloat * );
+ void (GLAPIENTRYP TexCoord2f)( GLfloat, GLfloat );
+ void (GLAPIENTRYP TexCoord2fv)( const GLfloat * );
+ void (GLAPIENTRYP TexCoord3f)( GLfloat, GLfloat, GLfloat );
+ void (GLAPIENTRYP TexCoord3fv)( const GLfloat * );
+ void (GLAPIENTRYP TexCoord4f)( GLfloat, GLfloat, GLfloat, GLfloat );
+ void (GLAPIENTRYP TexCoord4fv)( const GLfloat * );
+ void (GLAPIENTRYP Vertex2f)( GLfloat, GLfloat );
+ void (GLAPIENTRYP Vertex2fv)( const GLfloat * );
+ void (GLAPIENTRYP Vertex3f)( GLfloat, GLfloat, GLfloat );
+ void (GLAPIENTRYP Vertex3fv)( const GLfloat * );
+ void (GLAPIENTRYP Vertex4f)( GLfloat, GLfloat, GLfloat, GLfloat );
+ void (GLAPIENTRYP Vertex4fv)( const GLfloat * );
+ void (GLAPIENTRYP CallList)( GLuint );
+ void (GLAPIENTRYP CallLists)( GLsizei, GLenum, const GLvoid * );
+ void (GLAPIENTRYP Begin)( GLenum );
+ void (GLAPIENTRYP End)( void );
+ void (GLAPIENTRYP PrimitiveRestartNV)( void );
+ /* Originally for GL_NV_vertex_program, now used only dlist.c and friends */
+ void (GLAPIENTRYP VertexAttrib1fNV)( GLuint index, GLfloat x );
+ void (GLAPIENTRYP VertexAttrib1fvNV)( GLuint index, const GLfloat *v );
+ void (GLAPIENTRYP VertexAttrib2fNV)( GLuint index, GLfloat x, GLfloat y );
+ void (GLAPIENTRYP VertexAttrib2fvNV)( GLuint index, const GLfloat *v );
+ void (GLAPIENTRYP VertexAttrib3fNV)( GLuint index, GLfloat x, GLfloat y, GLfloat z );
+ void (GLAPIENTRYP VertexAttrib3fvNV)( GLuint index, const GLfloat *v );
+ void (GLAPIENTRYP VertexAttrib4fNV)( GLuint index, GLfloat x, GLfloat y, GLfloat z, GLfloat w );
+ void (GLAPIENTRYP VertexAttrib4fvNV)( GLuint index, const GLfloat *v );
+ /* GL_ARB_vertex_program */
+ void (GLAPIENTRYP VertexAttrib1fARB)( GLuint index, GLfloat x );
+ void (GLAPIENTRYP VertexAttrib1fvARB)( GLuint index, const GLfloat *v );
+ void (GLAPIENTRYP VertexAttrib2fARB)( GLuint index, GLfloat x, GLfloat y );
+ void (GLAPIENTRYP VertexAttrib2fvARB)( GLuint index, const GLfloat *v );
+ void (GLAPIENTRYP VertexAttrib3fARB)( GLuint index, GLfloat x, GLfloat y, GLfloat z );
+ void (GLAPIENTRYP VertexAttrib3fvARB)( GLuint index, const GLfloat *v );
+ void (GLAPIENTRYP VertexAttrib4fARB)( GLuint index, GLfloat x, GLfloat y, GLfloat z, GLfloat w );
+ void (GLAPIENTRYP VertexAttrib4fvARB)( GLuint index, const GLfloat *v );
+
+ /* GL_EXT_gpu_shader4 / GL 3.0 */
+ void (GLAPIENTRYP VertexAttribI1i)( GLuint index, GLint x);
+ void (GLAPIENTRYP VertexAttribI2i)( GLuint index, GLint x, GLint y);
+ void (GLAPIENTRYP VertexAttribI3i)( GLuint index, GLint x, GLint y, GLint z);
+ void (GLAPIENTRYP VertexAttribI4i)( GLuint index, GLint x, GLint y, GLint z, GLint w);
+ void (GLAPIENTRYP VertexAttribI2iv)( GLuint index, const GLint *v);
+ void (GLAPIENTRYP VertexAttribI3iv)( GLuint index, const GLint *v);
+ void (GLAPIENTRYP VertexAttribI4iv)( GLuint index, const GLint *v);
+
+ void (GLAPIENTRYP VertexAttribI1ui)( GLuint index, GLuint x);
+ void (GLAPIENTRYP VertexAttribI2ui)( GLuint index, GLuint x, GLuint y);
+ void (GLAPIENTRYP VertexAttribI3ui)( GLuint index, GLuint x, GLuint y, GLuint z);
+ void (GLAPIENTRYP VertexAttribI4ui)( GLuint index, GLuint x, GLuint y, GLuint z, GLuint w);
+ void (GLAPIENTRYP VertexAttribI2uiv)( GLuint index, const GLuint *v);
+ void (GLAPIENTRYP VertexAttribI3uiv)( GLuint index, const GLuint *v);
+ void (GLAPIENTRYP VertexAttribI4uiv)( GLuint index, const GLuint *v);
+
+ /* GL_ARB_vertex_type_10_10_10_2_rev / GL3.3 */
+ void (GLAPIENTRYP VertexP2ui)( GLenum type, GLuint value );
+ void (GLAPIENTRYP VertexP2uiv)( GLenum type, const GLuint *value);
+
+ void (GLAPIENTRYP VertexP3ui)( GLenum type, GLuint value );
+ void (GLAPIENTRYP VertexP3uiv)( GLenum type, const GLuint *value);
+
+ void (GLAPIENTRYP VertexP4ui)( GLenum type, GLuint value );
+ void (GLAPIENTRYP VertexP4uiv)( GLenum type, const GLuint *value);
+
+ void (GLAPIENTRYP TexCoordP1ui)( GLenum type, GLuint coords );
+ void (GLAPIENTRYP TexCoordP1uiv)( GLenum type, const GLuint *coords );
+
+ void (GLAPIENTRYP TexCoordP2ui)( GLenum type, GLuint coords );
+ void (GLAPIENTRYP TexCoordP2uiv)( GLenum type, const GLuint *coords );
+
+ void (GLAPIENTRYP TexCoordP3ui)( GLenum type, GLuint coords );
+ void (GLAPIENTRYP TexCoordP3uiv)( GLenum type, const GLuint *coords );
+
+ void (GLAPIENTRYP TexCoordP4ui)( GLenum type, GLuint coords );
+ void (GLAPIENTRYP TexCoordP4uiv)( GLenum type, const GLuint *coords );
+
+ void (GLAPIENTRYP MultiTexCoordP1ui)( GLenum texture, GLenum type, GLuint coords );
+ void (GLAPIENTRYP MultiTexCoordP1uiv)( GLenum texture, GLenum type, const GLuint *coords );
+ void (GLAPIENTRYP MultiTexCoordP2ui)( GLenum texture, GLenum type, GLuint coords );
+ void (GLAPIENTRYP MultiTexCoordP2uiv)( GLenum texture, GLenum type, const GLuint *coords );
+ void (GLAPIENTRYP MultiTexCoordP3ui)( GLenum texture, GLenum type, GLuint coords );
+ void (GLAPIENTRYP MultiTexCoordP3uiv)( GLenum texture, GLenum type, const GLuint *coords );
+ void (GLAPIENTRYP MultiTexCoordP4ui)( GLenum texture, GLenum type, GLuint coords );
+ void (GLAPIENTRYP MultiTexCoordP4uiv)( GLenum texture, GLenum type, const GLuint *coords );
+
+ void (GLAPIENTRYP NormalP3ui)( GLenum type, GLuint coords );
+ void (GLAPIENTRYP NormalP3uiv)( GLenum type, const GLuint *coords );
+
+ void (GLAPIENTRYP ColorP3ui)( GLenum type, GLuint color );
+ void (GLAPIENTRYP ColorP3uiv)( GLenum type, const GLuint *color );
+
+ void (GLAPIENTRYP ColorP4ui)( GLenum type, GLuint color );
+ void (GLAPIENTRYP ColorP4uiv)( GLenum type, const GLuint *color );
+
+ void (GLAPIENTRYP SecondaryColorP3ui)( GLenum type, GLuint color );
+ void (GLAPIENTRYP SecondaryColorP3uiv)( GLenum type, const GLuint *color );
+
+ void (GLAPIENTRYP VertexAttribP1ui)( GLuint index, GLenum type,
+ GLboolean normalized, GLuint value);
+ void (GLAPIENTRYP VertexAttribP2ui)( GLuint index, GLenum type,
+ GLboolean normalized, GLuint value);
+ void (GLAPIENTRYP VertexAttribP3ui)( GLuint index, GLenum type,
+ GLboolean normalized, GLuint value);
+ void (GLAPIENTRYP VertexAttribP4ui)( GLuint index, GLenum type,
+ GLboolean normalized, GLuint value);
+ void (GLAPIENTRYP VertexAttribP1uiv)( GLuint index, GLenum type,
+ GLboolean normalized,
+ const GLuint *value);
+ void (GLAPIENTRYP VertexAttribP2uiv)( GLuint index, GLenum type,
+ GLboolean normalized,
+ const GLuint *value);
+ void (GLAPIENTRYP VertexAttribP3uiv)( GLuint index, GLenum type,
+ GLboolean normalized,
+ const GLuint *value);
+ void (GLAPIENTRYP VertexAttribP4uiv)( GLuint index, GLenum type,
+ GLboolean normalized,
+ const GLuint *value);
+
+ /* GL_ARB_vertex_attrib_64bit / GL 4.1 */
+ void (GLAPIENTRYP VertexAttribL1d)( GLuint index, GLdouble x);
+ void (GLAPIENTRYP VertexAttribL2d)( GLuint index, GLdouble x, GLdouble y);
+ void (GLAPIENTRYP VertexAttribL3d)( GLuint index, GLdouble x, GLdouble y, GLdouble z);
+ void (GLAPIENTRYP VertexAttribL4d)( GLuint index, GLdouble x, GLdouble y, GLdouble z, GLdouble w);
+
+
+ void (GLAPIENTRYP VertexAttribL1dv)( GLuint index, const GLdouble *v);
+ void (GLAPIENTRYP VertexAttribL2dv)( GLuint index, const GLdouble *v);
+ void (GLAPIENTRYP VertexAttribL3dv)( GLuint index, const GLdouble *v);
+ void (GLAPIENTRYP VertexAttribL4dv)( GLuint index, const GLdouble *v);
+
+ void (GLAPIENTRYP VertexAttribL1ui64ARB)( GLuint index, GLuint64EXT x);
+ void (GLAPIENTRYP VertexAttribL1ui64vARB)( GLuint index, const GLuint64EXT *v);
+} GLvertexformat;
+
+
+#endif /* DD_INCLUDED */
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/mesa/main/debug_output.h b/third_party/rust/glslopt/glsl-optimizer/src/mesa/main/debug_output.h
new file mode 100644
index 0000000000..8a5eedc21f
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/mesa/main/debug_output.h
@@ -0,0 +1,107 @@
+/*
+ * Mesa 3-D graphics library
+ *
+ * Copyright (C) 1999-2016 Brian Paul, et al All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+
+#ifndef DEBUG_OUTPUT_H
+#define DEBUG_OUTPUT_H
+
+
+#include <stdio.h>
+#include <stdarg.h>
+#include "glheader.h"
+#include "menums.h"
+
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+struct gl_context;
+
+void
+_mesa_init_debug_output(struct gl_context *ctx);
+
+void
+_mesa_free_errors_data(struct gl_context *ctx);
+
+void
+_mesa_debug_get_id(GLuint *id);
+
+bool
+_mesa_set_debug_state_int(struct gl_context *ctx, GLenum pname, GLint val);
+
+GLint
+_mesa_get_debug_state_int(struct gl_context *ctx, GLenum pname);
+
+void *
+_mesa_get_debug_state_ptr(struct gl_context *ctx, GLenum pname);
+
+void
+_mesa_log_msg(struct gl_context *ctx, enum mesa_debug_source source,
+ enum mesa_debug_type type, GLuint id,
+ enum mesa_debug_severity severity, GLint len, const char *buf);
+
+bool
+_mesa_debug_is_message_enabled(const struct gl_debug_state *debug,
+ enum mesa_debug_source source,
+ enum mesa_debug_type type,
+ GLuint id,
+ enum mesa_debug_severity severity);
+
+void GLAPIENTRY
+_mesa_DebugMessageInsert(GLenum source, GLenum type, GLuint id,
+ GLenum severity, GLint length,
+ const GLchar* buf);
+
+GLuint GLAPIENTRY
+_mesa_GetDebugMessageLog(GLuint count, GLsizei logSize, GLenum* sources,
+ GLenum* types, GLenum* ids, GLenum* severities,
+ GLsizei* lengths, GLchar* messageLog);
+
+void GLAPIENTRY
+_mesa_DebugMessageControl(GLenum source, GLenum type, GLenum severity,
+ GLsizei count, const GLuint *ids,
+ GLboolean enabled);
+
+void GLAPIENTRY
+_mesa_DebugMessageCallback(GLDEBUGPROC callback,
+ const void *userParam);
+
+void GLAPIENTRY
+_mesa_PushDebugGroup(GLenum source, GLuint id, GLsizei length,
+ const GLchar *message);
+
+void GLAPIENTRY
+_mesa_PopDebugGroup(void);
+
+void GLAPIENTRY
+_mesa_StringMarkerGREMEDY(GLsizei len, const GLvoid *string);
+
+
+#ifdef __cplusplus
+}
+#endif
+
+
+#endif /* DEBUG_OUTPUT_H */
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/mesa/main/draw.h b/third_party/rust/glslopt/glsl-optimizer/src/mesa/main/draw.h
new file mode 100644
index 0000000000..212e70a3fd
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/mesa/main/draw.h
@@ -0,0 +1,227 @@
+/*
+ * mesa 3-D graphics library
+ *
+ * Copyright (C) 1999-2006 Brian Paul All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/**
+ * \brief Array type draw functions, the main workhorse of any OpenGL API
+ * \author Keith Whitwell
+ */
+
+
+#ifndef DRAW_H
+#define DRAW_H
+
+#include <stdbool.h>
+#include "main/glheader.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+struct gl_context;
+
+struct _mesa_prim
+{
+ GLubyte mode; /**< GL_POINTS, GL_LINES, GL_QUAD_STRIP, etc */
+
+ /**
+ * tnl: If true, line stipple emulation will reset the pattern walker.
+ * vbo: If false and the primitive is a line loop, the first vertex is
+ * the beginning of the line loop and it won't be drawn.
+ * Instead, it will be moved to the end.
+ */
+ bool begin;
+
+ /**
+ * tnl: If true and the primitive is a line loop, it will be closed.
+ * vbo: Same as tnl.
+ */
+ bool end;
+
+ GLuint start;
+ GLuint count;
+ GLint basevertex;
+ GLuint draw_id;
+};
+
+/* Would like to call this a "vbo_index_buffer", but this would be
+ * confusing as the indices are not neccessarily yet in a non-null
+ * buffer object.
+ */
+struct _mesa_index_buffer
+{
+ GLuint count;
+ uint8_t index_size_shift; /* logbase2(index_size) */
+ struct gl_buffer_object *obj;
+ const void *ptr;
+};
+
+
+void
+_mesa_initialize_exec_dispatch(const struct gl_context *ctx,
+ struct _glapi_table *exec);
+
+void GLAPIENTRY
+_mesa_EvalMesh1(GLenum mode, GLint i1, GLint i2);
+
+void GLAPIENTRY
+_mesa_EvalMesh2(GLenum mode, GLint i1, GLint i2, GLint j1, GLint j2);
+
+void GLAPIENTRY
+_mesa_DrawElementsInstancedARB(GLenum mode, GLsizei count, GLenum type,
+ const GLvoid * indices, GLsizei numInstances);
+
+void GLAPIENTRY
+_mesa_DrawArraysInstancedBaseInstance(GLenum mode, GLint first,
+ GLsizei count, GLsizei numInstances,
+ GLuint baseInstance);
+
+void GLAPIENTRY
+_mesa_DrawElementsInstancedBaseVertex(GLenum mode, GLsizei count,
+ GLenum type, const GLvoid * indices,
+ GLsizei numInstances,
+ GLint basevertex);
+
+void GLAPIENTRY
+_mesa_DrawElementsInstancedBaseInstance(GLenum mode, GLsizei count,
+ GLenum type,
+ const GLvoid *indices,
+ GLsizei numInstances,
+ GLuint baseInstance);
+
+void GLAPIENTRY
+_mesa_DrawTransformFeedbackStream(GLenum mode, GLuint name, GLuint stream);
+
+void GLAPIENTRY
+_mesa_DrawTransformFeedbackInstanced(GLenum mode, GLuint name,
+ GLsizei primcount);
+
+void GLAPIENTRY
+_mesa_DrawTransformFeedbackStreamInstanced(GLenum mode, GLuint name,
+ GLuint stream,
+ GLsizei primcount);
+
+void GLAPIENTRY
+_mesa_DrawArraysIndirect(GLenum mode, const GLvoid *indirect);
+
+void GLAPIENTRY
+_mesa_DrawElementsIndirect(GLenum mode, GLenum type, const GLvoid *indirect);
+
+void GLAPIENTRY
+_mesa_MultiDrawArraysIndirect(GLenum mode, const GLvoid *indirect,
+ GLsizei primcount, GLsizei stride);
+
+void GLAPIENTRY
+_mesa_MultiDrawElementsIndirect(GLenum mode, GLenum type,
+ const GLvoid *indirect,
+ GLsizei primcount, GLsizei stride);
+
+void GLAPIENTRY
+_mesa_MultiDrawArraysIndirectCountARB(GLenum mode, GLintptr indirect,
+ GLintptr drawcount_offset,
+ GLsizei maxdrawcount, GLsizei stride);
+
+void GLAPIENTRY
+_mesa_MultiDrawElementsIndirectCountARB(GLenum mode, GLenum type,
+ GLintptr indirect,
+ GLintptr drawcount_offset,
+ GLsizei maxdrawcount, GLsizei stride);
+
+void GLAPIENTRY
+_mesa_DrawArrays(GLenum mode, GLint first, GLsizei count);
+
+
+void GLAPIENTRY
+_mesa_DrawArraysInstancedARB(GLenum mode, GLint first, GLsizei count,
+ GLsizei primcount);
+
+void GLAPIENTRY
+_mesa_DrawElementsInstancedBaseVertexBaseInstance(GLenum mode,
+ GLsizei count,
+ GLenum type,
+ const GLvoid *indices,
+ GLsizei numInstances,
+ GLint basevertex,
+ GLuint baseInstance);
+
+void GLAPIENTRY
+_mesa_DrawElements(GLenum mode, GLsizei count, GLenum type,
+ const GLvoid *indices);
+
+
+void GLAPIENTRY
+_mesa_DrawRangeElements(GLenum mode, GLuint start, GLuint end, GLsizei count,
+ GLenum type, const GLvoid *indices);
+
+
+void GLAPIENTRY
+_mesa_DrawElementsBaseVertex(GLenum mode, GLsizei count, GLenum type,
+ const GLvoid *indices, GLint basevertex);
+
+
+void GLAPIENTRY
+_mesa_DrawRangeElementsBaseVertex(GLenum mode, GLuint start, GLuint end,
+ GLsizei count, GLenum type,
+ const GLvoid *indices,
+ GLint basevertex);
+
+
+void GLAPIENTRY
+_mesa_DrawTransformFeedback(GLenum mode, GLuint name);
+
+
+
+void GLAPIENTRY
+_mesa_MultiDrawArrays(GLenum mode, const GLint *first,
+ const GLsizei *count, GLsizei primcount);
+
+
+void GLAPIENTRY
+_mesa_MultiDrawElements(GLenum mode, const GLsizei *count, GLenum type,
+ const GLvoid *const *indices, GLsizei primcount);
+
+
+void GLAPIENTRY
+_mesa_MultiDrawElementsBaseVertex(GLenum mode,
+ const GLsizei *count, GLenum type,
+ const GLvoid * const * indices, GLsizei primcount,
+ const GLint *basevertex);
+
+
+void GLAPIENTRY
+_mesa_MultiModeDrawArraysIBM(const GLenum * mode, const GLint * first,
+ const GLsizei * count,
+ GLsizei primcount, GLint modestride);
+
+
+void GLAPIENTRY
+_mesa_MultiModeDrawElementsIBM(const GLenum * mode, const GLsizei * count,
+ GLenum type, const GLvoid * const * indices,
+ GLsizei primcount, GLint modestride);
+
+
+#ifdef __cplusplus
+} // extern "C"
+#endif
+
+#endif
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/mesa/main/enums.h b/third_party/rust/glslopt/glsl-optimizer/src/mesa/main/enums.h
new file mode 100644
index 0000000000..0e18cd407e
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/mesa/main/enums.h
@@ -0,0 +1,58 @@
+/**
+ * \file enums.h
+ * Enumeration name/number lookup functions.
+ *
+ * \if subset
+ * (No-op)
+ *
+ * \endif
+ */
+
+/*
+ * Mesa 3-D graphics library
+ *
+ * Copyright (C) 1999-2006 Brian Paul All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+
+#ifndef _ENUMS_H_
+#define _ENUMS_H_
+
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+extern const char *_mesa_enum_to_string( int nr );
+
+/* Get the name of an enum given that it is a primitive type. Avoids
+ * GL_FALSE/GL_POINTS ambiguity and others.
+ */
+const char *_mesa_lookup_prim_by_nr( unsigned nr );
+
+
+#ifdef __cplusplus
+}
+#endif
+
+
+#endif
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/mesa/main/errors.h b/third_party/rust/glslopt/glsl-optimizer/src/mesa/main/errors.h
new file mode 100644
index 0000000000..17fe380f26
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/mesa/main/errors.h
@@ -0,0 +1,118 @@
+/*
+ * Mesa 3-D graphics library
+ *
+ * Copyright (C) 1999-2008 Brian Paul All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+
+/**
+ * \file errors.h
+ * Mesa debugging and error handling functions.
+ *
+ * This file provides functions to record errors, warnings, and miscellaneous
+ * debug information.
+ */
+
+
+#ifndef ERRORS_H
+#define ERRORS_H
+
+
+#include <stdio.h>
+#include <stdarg.h>
+#include "glheader.h"
+#include "menums.h"
+
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+struct gl_context;
+
+extern void
+_mesa_warning( struct gl_context *gc, const char *fmtString, ... ) PRINTFLIKE(2, 3);
+
+extern void
+_mesa_problem( const struct gl_context *ctx, const char *fmtString, ... ) PRINTFLIKE(2, 3);
+
+extern void
+_mesa_error( struct gl_context *ctx, GLenum error, const char *fmtString, ... ) PRINTFLIKE(3, 4);
+
+extern void
+_mesa_error_no_memory(const char *caller);
+
+extern void
+_mesa_debug( const struct gl_context *ctx, const char *fmtString, ... ) PRINTFLIKE(2, 3);
+
+extern void
+_mesa_log(const char *fmtString, ...) PRINTFLIKE(1, 2);
+
+extern FILE *
+_mesa_get_log_file(void);
+
+void
+_mesa_shader_debug(struct gl_context *ctx, GLenum type, GLuint *id,
+ const char *msg);
+
+extern void
+_mesa_gl_vdebugf(struct gl_context *ctx,
+ GLuint *id,
+ enum mesa_debug_source source,
+ enum mesa_debug_type type,
+ enum mesa_debug_severity severity,
+ const char *fmtString,
+ va_list args);
+
+extern void
+_mesa_gl_debugf(struct gl_context *ctx,
+ GLuint *id,
+ enum mesa_debug_source source,
+ enum mesa_debug_type type,
+ enum mesa_debug_severity severity,
+ const char *fmtString, ...) PRINTFLIKE(6, 7);
+
+extern size_t
+_mesa_gl_debug(struct gl_context *ctx,
+ GLuint *id,
+ enum mesa_debug_source source,
+ enum mesa_debug_type type,
+ enum mesa_debug_severity severity,
+ const char *msg);
+
+#define _mesa_perf_debug(ctx, sev, ...) do { \
+ static GLuint msg_id = 0; \
+ if (unlikely(ctx->Const.ContextFlags & GL_CONTEXT_FLAG_DEBUG_BIT)) { \
+ _mesa_gl_debugf(ctx, &msg_id, \
+ MESA_DEBUG_SOURCE_API, \
+ MESA_DEBUG_TYPE_PERFORMANCE, \
+ sev, \
+ __VA_ARGS__); \
+ } \
+} while (0)
+
+
+#ifdef __cplusplus
+}
+#endif
+
+
+#endif /* ERRORS_H */
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/mesa/main/extensions.h b/third_party/rust/glslopt/glsl-optimizer/src/mesa/main/extensions.h
new file mode 100644
index 0000000000..303929baab
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/mesa/main/extensions.h
@@ -0,0 +1,125 @@
+/**
+ * \file extensions.h
+ * Extension handling.
+ *
+ * \if subset
+ * (No-op)
+ *
+ * \endif
+ */
+
+/*
+ * Mesa 3-D graphics library
+ *
+ * Copyright (C) 1999-2006 Brian Paul All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+
+#ifndef _EXTENSIONS_H_
+#define _EXTENSIONS_H_
+
+#include "mtypes.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+struct gl_context;
+struct gl_extensions;
+
+extern void _mesa_enable_sw_extensions(struct gl_context *ctx);
+
+extern void _mesa_one_time_init_extension_overrides(void);
+
+extern void _mesa_init_extensions(struct gl_extensions *extentions);
+
+extern GLubyte *_mesa_make_extension_string(struct gl_context *ctx);
+
+extern void _mesa_override_extensions(struct gl_context *ctx);
+
+extern GLuint
+_mesa_get_extension_count(struct gl_context *ctx);
+
+extern const GLubyte *
+_mesa_get_enabled_extension(struct gl_context *ctx, GLuint index);
+
+
+/**
+ * \brief An element of the \c extension_table.
+ */
+struct mesa_extension {
+ /** Name of extension, such as "GL_ARB_depth_clamp". */
+ const char *name;
+
+ /** Offset (in bytes) of the corresponding member in struct gl_extensions. */
+ size_t offset;
+
+ /** Minimum version the extension requires for the given API
+ * (see gl_api defined in mtypes.h). The value is equal to:
+ * 10 * major_version + minor_version
+ */
+ uint8_t version[API_OPENGL_LAST + 1];
+
+ /** Year the extension was proposed or approved. Used to sort the
+ * extension string chronologically. */
+ uint16_t year;
+};
+
+extern const struct mesa_extension _mesa_extension_table[];
+
+
+/* Generate enums for the functions below */
+enum {
+#define EXT(name_str, ...) MESA_EXTENSION_##name_str,
+#include "extensions_table.h"
+#undef EXT
+MESA_EXTENSION_COUNT
+};
+
+
+/** Checks if the context supports a user-facing extension */
+#define EXT(name_str, driver_cap, ...) \
+static inline bool \
+_mesa_has_##name_str(const struct gl_context *ctx) \
+{ \
+ return ctx->Extensions.driver_cap && (ctx->Extensions.Version >= \
+ _mesa_extension_table[MESA_EXTENSION_##name_str].version[ctx->API]); \
+}
+#include "extensions_table.h"
+#undef EXT
+
+/* Sometimes the driver wants to query the extension override status before
+ * a context is created. These variables are filled with extension override
+ * information before context creation.
+ *
+ * This can be useful during extension bring-up when an extension is
+ * partially implemented, but cannot yet be advertised as supported.
+ *
+ * Use it with care and keep access read-only.
+ */
+extern struct gl_extensions _mesa_extension_override_enables;
+extern struct gl_extensions _mesa_extension_override_disables;
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/mesa/main/extensions_table.c b/third_party/rust/glslopt/glsl-optimizer/src/mesa/main/extensions_table.c
new file mode 100644
index 0000000000..6241705de1
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/mesa/main/extensions_table.c
@@ -0,0 +1,50 @@
+/*
+ * Mesa 3-D graphics library
+ *
+ * Copyright (C) 1999-2008 Brian Paul All Rights Reserved.
+ * Copyright (C) 2009 VMware, Inc. All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "main/extensions.h"
+
+/**
+ * Given a member \c x of struct gl_extensions, return offset of
+ * \c x in bytes.
+ */
+#define o(x) offsetof(struct gl_extensions, x)
+
+/**
+ * \brief Table of supported OpenGL extensions for all API's.
+ */
+const struct mesa_extension _mesa_extension_table[] = {
+#define EXT(name_str, driver_cap, gll_ver, glc_ver, gles_ver, gles2_ver, yyyy) \
+ { .name = "GL_" #name_str, .offset = o(driver_cap), \
+ .version = { \
+ [API_OPENGL_COMPAT] = gll_ver, \
+ [API_OPENGL_CORE] = glc_ver, \
+ [API_OPENGLES] = gles_ver, \
+ [API_OPENGLES2] = gles2_ver, \
+ }, \
+ .year = yyyy \
+ },
+#include "extensions_table.h"
+#undef EXT
+};
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/mesa/main/extensions_table.h b/third_party/rust/glslopt/glsl-optimizer/src/mesa/main/extensions_table.h
new file mode 100644
index 0000000000..18d5f8073b
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/mesa/main/extensions_table.h
@@ -0,0 +1,490 @@
+/* The extension table is alphabetically sorted by the extension name string column. */
+
+#define GLL 0
+#define GLC 0
+#define ES1 0
+#define ES2 0
+#define x ~0
+
+EXT(3DFX_texture_compression_FXT1 , TDFX_texture_compression_FXT1 , GLL, GLC, x , x , 1999)
+
+EXT(AMD_compressed_ATC_texture , AMD_compressed_ATC_texture , x , x , ES1, ES2, 2008)
+EXT(AMD_conservative_depth , ARB_conservative_depth , GLL, GLC, x , x , 2009)
+EXT(AMD_depth_clamp_separate , AMD_depth_clamp_separate , GLL, GLC, x , x , 2009)
+EXT(AMD_draw_buffers_blend , ARB_draw_buffers_blend , GLL, GLC, x , x , 2009)
+EXT(AMD_framebuffer_multisample_advanced , AMD_framebuffer_multisample_advanced , GLL, GLC, x , ES2, 2018)
+EXT(AMD_gpu_shader_int64 , ARB_gpu_shader_int64 , x , GLC, x , x , 2015)
+EXT(AMD_multi_draw_indirect , ARB_draw_indirect , GLL, GLC, x , x , 2011)
+EXT(AMD_performance_monitor , AMD_performance_monitor , GLL, GLC, x , ES2, 2007)
+EXT(AMD_pinned_memory , AMD_pinned_memory , GLL, GLC, x , x , 2013)
+EXT(AMD_query_buffer_object , ARB_query_buffer_object , GLL, GLC, x , x , 2012)
+EXT(AMD_seamless_cubemap_per_texture , AMD_seamless_cubemap_per_texture , GLL, GLC, x , x , 2009)
+EXT(AMD_shader_stencil_export , ARB_shader_stencil_export , GLL, GLC, x , x , 2009)
+EXT(AMD_shader_trinary_minmax , dummy_true , GLL, GLC, x , x , 2012)
+EXT(AMD_texture_texture4 , ARB_texture_gather , GLL, GLC, x , x , 2008)
+EXT(AMD_vertex_shader_layer , AMD_vertex_shader_layer , GLL, GLC, x , x , 2012)
+EXT(AMD_vertex_shader_viewport_index , AMD_vertex_shader_viewport_index , GLL, GLC, x , x , 2012)
+
+EXT(ANDROID_extension_pack_es31a , ANDROID_extension_pack_es31a , x , x , x , 31, 2014)
+
+EXT(ANGLE_texture_compression_dxt3 , ANGLE_texture_compression_dxt , GLL, GLC, ES1, ES2, 2011)
+EXT(ANGLE_texture_compression_dxt5 , ANGLE_texture_compression_dxt , GLL, GLC, ES1, ES2, 2011)
+
+EXT(APPLE_object_purgeable , APPLE_object_purgeable , GLL, GLC, x , x , 2006)
+EXT(APPLE_packed_pixels , dummy_true , GLL, x , x , x , 2002)
+EXT(APPLE_texture_max_level , dummy_true , x , x , ES1, ES2, 2009)
+
+EXT(ARB_ES2_compatibility , ARB_ES2_compatibility , GLL, GLC, x , x , 2009)
+EXT(ARB_ES3_1_compatibility , ARB_ES3_1_compatibility , GLL, GLC, x , x , 2014)
+EXT(ARB_ES3_2_compatibility , ARB_ES3_2_compatibility , x , GLC, x , x , 2015)
+EXT(ARB_ES3_compatibility , ARB_ES3_compatibility , GLL, GLC, x , x , 2012)
+EXT(ARB_arrays_of_arrays , ARB_arrays_of_arrays , GLL, GLC, x , x , 2012)
+EXT(ARB_base_instance , ARB_base_instance , GLL, GLC, x , x , 2011)
+EXT(ARB_bindless_texture , ARB_bindless_texture , GLL, GLC, x , x , 2013)
+EXT(ARB_blend_func_extended , ARB_blend_func_extended , GLL, GLC, x , x , 2009)
+EXT(ARB_buffer_storage , ARB_buffer_storage , GLL, GLC, x , x , 2013)
+EXT(ARB_clear_buffer_object , dummy_true , GLL, GLC, x , x , 2012)
+EXT(ARB_clear_texture , ARB_clear_texture , GLL, GLC, x , x , 2013)
+EXT(ARB_clip_control , ARB_clip_control , GLL, GLC, x , x , 2014)
+EXT(ARB_color_buffer_float , ARB_color_buffer_float , GLL, GLC, x , x , 2004)
+EXT(ARB_compatibility , ARB_compatibility , GLL, x , x , x , 2009)
+EXT(ARB_compressed_texture_pixel_storage , dummy_true , GLL, GLC, x , x , 2011)
+EXT(ARB_compute_shader , ARB_compute_shader , GLL, GLC, x , x , 2012)
+EXT(ARB_compute_variable_group_size , ARB_compute_variable_group_size , GLL, GLC, x , x , 2013)
+EXT(ARB_conditional_render_inverted , ARB_conditional_render_inverted , GLL, GLC, x , x , 2014)
+EXT(ARB_conservative_depth , ARB_conservative_depth , GLL, GLC, x , x , 2011)
+EXT(ARB_copy_buffer , dummy_true , GLL, GLC, x , x , 2008)
+EXT(ARB_copy_image , ARB_copy_image , GLL, GLC, x , x , 2012)
+EXT(ARB_cull_distance , ARB_cull_distance , GLL, GLC, x , x , 2014)
+EXT(ARB_debug_output , dummy_true , GLL, GLC, x , x , 2009)
+EXT(ARB_depth_buffer_float , ARB_depth_buffer_float , GLL, GLC, x , x , 2008)
+EXT(ARB_depth_clamp , ARB_depth_clamp , GLL, GLC, x , x , 2003)
+EXT(ARB_depth_texture , ARB_depth_texture , GLL, x , x , x , 2001)
+EXT(ARB_derivative_control , ARB_derivative_control , GLL, GLC, x , x , 2014)
+EXT(ARB_direct_state_access , dummy_true , 31, GLC, x , x , 2014)
+EXT(ARB_draw_buffers , dummy_true , GLL, GLC, x , x , 2002)
+EXT(ARB_draw_buffers_blend , ARB_draw_buffers_blend , GLL, GLC, x , x , 2009)
+EXT(ARB_draw_elements_base_vertex , ARB_draw_elements_base_vertex , GLL, GLC, x , x , 2009)
+EXT(ARB_draw_indirect , ARB_draw_indirect , GLL, GLC, x , x , 2010)
+EXT(ARB_draw_instanced , ARB_draw_instanced , GLL, GLC, x , x , 2008)
+EXT(ARB_enhanced_layouts , ARB_enhanced_layouts , GLL, GLC, x , x , 2013)
+EXT(ARB_explicit_attrib_location , ARB_explicit_attrib_location , GLL, GLC, x , x , 2009)
+EXT(ARB_explicit_uniform_location , ARB_explicit_uniform_location , GLL, GLC, x , x , 2012)
+EXT(ARB_fragment_coord_conventions , ARB_fragment_coord_conventions , GLL, GLC, x , x , 2009)
+EXT(ARB_fragment_layer_viewport , ARB_fragment_layer_viewport , GLL, GLC, x , x , 2012)
+EXT(ARB_fragment_program , ARB_fragment_program , GLL, x , x , x , 2002)
+EXT(ARB_fragment_program_shadow , ARB_fragment_program_shadow , GLL, x , x , x , 2003)
+EXT(ARB_fragment_shader , ARB_fragment_shader , GLL, GLC, x , x , 2002)
+EXT(ARB_fragment_shader_interlock , ARB_fragment_shader_interlock , GLL, GLC, x , x , 2015)
+EXT(ARB_framebuffer_no_attachments , ARB_framebuffer_no_attachments , GLL, GLC, x , x , 2012)
+EXT(ARB_framebuffer_object , ARB_framebuffer_object , GLL, GLC, x , x , 2005)
+EXT(ARB_framebuffer_sRGB , EXT_framebuffer_sRGB , GLL, GLC, x , x , 1998)
+EXT(ARB_get_program_binary , dummy_true , GLL, GLC, x , x , 2010)
+EXT(ARB_get_texture_sub_image , dummy_true , GLL, GLC, x , x , 2014)
+EXT(ARB_gl_spirv , ARB_gl_spirv , GLL, GLC, x , x , 2016)
+EXT(ARB_gpu_shader5 , ARB_gpu_shader5 , GLL, GLC, x , x , 2010)
+EXT(ARB_gpu_shader_fp64 , ARB_gpu_shader_fp64 , 32, GLC, x , x , 2010)
+EXT(ARB_gpu_shader_int64 , ARB_gpu_shader_int64 , 40, GLC, x , x , 2015)
+EXT(ARB_half_float_pixel , dummy_true , GLL, GLC, x , x , 2003)
+EXT(ARB_half_float_vertex , ARB_half_float_vertex , GLL, GLC, x , x , 2008)
+EXT(ARB_indirect_parameters , ARB_indirect_parameters , GLL, GLC, x , x , 2013)
+EXT(ARB_instanced_arrays , ARB_instanced_arrays , GLL, GLC, x , x , 2008)
+EXT(ARB_internalformat_query , ARB_internalformat_query , GLL, GLC, x , x , 2011)
+EXT(ARB_internalformat_query2 , ARB_internalformat_query2 , GLL, GLC, x , x , 2013)
+EXT(ARB_invalidate_subdata , dummy_true , GLL, GLC, x , x , 2012)
+EXT(ARB_map_buffer_alignment , dummy_true , GLL, GLC, x , x , 2011)
+EXT(ARB_map_buffer_range , ARB_map_buffer_range , GLL, GLC, x , x , 2008)
+EXT(ARB_multi_bind , dummy_true , GLL, GLC, x , x , 2013)
+EXT(ARB_multi_draw_indirect , ARB_draw_indirect , GLL, GLC, x , x , 2012)
+EXT(ARB_multisample , dummy_true , GLL, x , x , x , 1994)
+EXT(ARB_multitexture , dummy_true , GLL, x , x , x , 1998)
+EXT(ARB_occlusion_query , ARB_occlusion_query , GLL, x , x , x , 2001)
+EXT(ARB_occlusion_query2 , ARB_occlusion_query2 , GLL, GLC, x , x , 2003)
+EXT(ARB_parallel_shader_compile , dummy_true , GLL, GLC, x , x , 2015)
+EXT(ARB_pipeline_statistics_query , ARB_pipeline_statistics_query , GLL, GLC, x , x , 2014)
+EXT(ARB_pixel_buffer_object , EXT_pixel_buffer_object , GLL, GLC, x , x , 2004)
+EXT(ARB_point_parameters , EXT_point_parameters , GLL, x , x , x , 1997)
+EXT(ARB_point_sprite , ARB_point_sprite , GLL, GLC, x , x , 2003)
+EXT(ARB_polygon_offset_clamp , ARB_polygon_offset_clamp , GLL, GLC, x , x , 2017)
+EXT(ARB_post_depth_coverage , ARB_post_depth_coverage , GLL, GLC, x , x, 2015)
+EXT(ARB_program_interface_query , dummy_true , GLL, GLC, x , x , 2012)
+EXT(ARB_provoking_vertex , EXT_provoking_vertex , GLL, GLC, x , x , 2009)
+EXT(ARB_query_buffer_object , ARB_query_buffer_object , GLL, GLC, x , x , 2013)
+EXT(ARB_robust_buffer_access_behavior , ARB_robust_buffer_access_behavior , GLL, GLC, x , x , 2012)
+EXT(ARB_robustness , dummy_true , GLL, GLC, x , x , 2010)
+EXT(ARB_sample_locations , ARB_sample_locations , GLL, GLC, x , x , 2015)
+EXT(ARB_sample_shading , ARB_sample_shading , GLL, GLC, x , x , 2009)
+EXT(ARB_sampler_objects , dummy_true , GLL, GLC, x , x , 2009)
+EXT(ARB_seamless_cube_map , ARB_seamless_cube_map , GLL, GLC, x , x , 2009)
+EXT(ARB_seamless_cubemap_per_texture , AMD_seamless_cubemap_per_texture , GLL, GLC, x , x , 2013)
+EXT(ARB_separate_shader_objects , dummy_true , GLL, GLC, x , x , 2010)
+EXT(ARB_shader_atomic_counter_ops , ARB_shader_atomic_counter_ops , GLL, GLC, x , x , 2015)
+EXT(ARB_shader_atomic_counters , ARB_shader_atomic_counters , GLL, GLC, x , x , 2011)
+EXT(ARB_shader_ballot , ARB_shader_ballot , GLL, GLC, x , x , 2015)
+EXT(ARB_shader_bit_encoding , ARB_shader_bit_encoding , GLL, GLC, x , x , 2010)
+EXT(ARB_shader_clock , ARB_shader_clock , GLL, GLC, x , x , 2015)
+EXT(ARB_shader_draw_parameters , ARB_shader_draw_parameters , GLL, GLC, x , x , 2013)
+EXT(ARB_shader_group_vote , ARB_shader_group_vote , GLL, GLC, x , x , 2013)
+EXT(ARB_shader_image_load_store , ARB_shader_image_load_store , GLL, GLC, x , x , 2011)
+EXT(ARB_shader_image_size , ARB_shader_image_size , GLL, GLC, x , x , 2012)
+EXT(ARB_shader_objects , dummy_true , GLL, GLC, x , x , 2002)
+EXT(ARB_shader_precision , ARB_shader_precision , GLL, GLC, x , x , 2010)
+EXT(ARB_shader_stencil_export , ARB_shader_stencil_export , GLL, GLC, x , x , 2009)
+EXT(ARB_shader_storage_buffer_object , ARB_shader_storage_buffer_object , GLL, GLC, x , x , 2012)
+EXT(ARB_shader_subroutine , dummy_true , 31, GLC, x , x , 2010)
+EXT(ARB_shader_texture_image_samples , ARB_shader_texture_image_samples , GLL, GLC, x , x , 2014)
+EXT(ARB_shader_texture_lod , ARB_shader_texture_lod , GLL, GLC, x , x , 2009)
+EXT(ARB_shader_viewport_layer_array , ARB_shader_viewport_layer_array , GLL, GLC, x , x , 2015)
+EXT(ARB_shading_language_100 , dummy_true , GLL, x , x , x , 2003)
+EXT(ARB_shading_language_420pack , ARB_shading_language_420pack , GLL, GLC, x , x , 2011)
+EXT(ARB_shading_language_include , dummy_true , GLL, GLC, x , x , 2013)
+EXT(ARB_shading_language_packing , ARB_shading_language_packing , GLL, GLC, x , x , 2011)
+EXT(ARB_shadow , ARB_shadow , GLL, x , x , x , 2001)
+EXT(ARB_sparse_buffer , ARB_sparse_buffer , GLL, GLC, x , x , 2014)
+EXT(ARB_spirv_extensions , ARB_spirv_extensions , GLL, GLC, x , x , 2016)
+EXT(ARB_stencil_texturing , ARB_stencil_texturing , GLL, GLC, x , x , 2012)
+EXT(ARB_sync , ARB_sync , GLL, GLC, x , x , 2003)
+EXT(ARB_tessellation_shader , ARB_tessellation_shader , GLL, GLC, x , x , 2009)
+EXT(ARB_texture_barrier , NV_texture_barrier , GLL, GLC, x , x , 2014)
+EXT(ARB_texture_border_clamp , ARB_texture_border_clamp , GLL, x , x , x , 2000)
+EXT(ARB_texture_buffer_object , ARB_texture_buffer_object , GLL, GLC, x , x , 2008)
+EXT(ARB_texture_buffer_object_rgb32 , ARB_texture_buffer_object_rgb32 , GLL, GLC, x , x , 2009)
+EXT(ARB_texture_buffer_range , ARB_texture_buffer_range , GLL, GLC, x , x , 2012)
+EXT(ARB_texture_compression , dummy_true , GLL, x , x , x , 2000)
+EXT(ARB_texture_compression_bptc , ARB_texture_compression_bptc , GLL, GLC, x , x , 2010)
+EXT(ARB_texture_compression_rgtc , ARB_texture_compression_rgtc , GLL, GLC, x , x , 2004)
+EXT(ARB_texture_cube_map , ARB_texture_cube_map , GLL, x , x , x , 1999)
+EXT(ARB_texture_cube_map_array , ARB_texture_cube_map_array , GLL, GLC, x , x , 2009)
+EXT(ARB_texture_env_add , dummy_true , GLL, x , x , x , 1999)
+EXT(ARB_texture_env_combine , ARB_texture_env_combine , GLL, x , x , x , 2001)
+EXT(ARB_texture_env_crossbar , ARB_texture_env_crossbar , GLL, x , x , x , 2001)
+EXT(ARB_texture_env_dot3 , ARB_texture_env_dot3 , GLL, x , x , x , 2001)
+EXT(ARB_texture_filter_anisotropic , ARB_texture_filter_anisotropic , GLL, GLC, x , x , 2017)
+EXT(ARB_texture_float , ARB_texture_float , GLL, GLC, x , x , 2004)
+EXT(ARB_texture_gather , ARB_texture_gather , GLL, GLC, x , x , 2009)
+EXT(ARB_texture_mirror_clamp_to_edge , ARB_texture_mirror_clamp_to_edge , GLL, GLC, x , x , 2013)
+EXT(ARB_texture_mirrored_repeat , dummy_true , GLL, x , x , x , 2001)
+EXT(ARB_texture_multisample , ARB_texture_multisample , GLL, GLC, x , x , 2009)
+EXT(ARB_texture_non_power_of_two , ARB_texture_non_power_of_two , GLL, GLC, x , x , 2003)
+EXT(ARB_texture_query_levels , ARB_texture_query_levels , GLL, GLC, x , x , 2012)
+EXT(ARB_texture_query_lod , ARB_texture_query_lod , GLL, GLC, x , x , 2009)
+EXT(ARB_texture_rectangle , NV_texture_rectangle , GLL, GLC, x , x , 2004)
+EXT(ARB_texture_rg , ARB_texture_rg , GLL, GLC, x , x , 2008)
+EXT(ARB_texture_rgb10_a2ui , ARB_texture_rgb10_a2ui , GLL, GLC, x , x , 2009)
+EXT(ARB_texture_stencil8 , ARB_texture_stencil8 , GLL, GLC, x , x , 2013)
+EXT(ARB_texture_storage , dummy_true , GLL, GLC, x , x , 2011)
+EXT(ARB_texture_storage_multisample , ARB_texture_multisample , GLL, GLC, x , x , 2012)
+EXT(ARB_texture_swizzle , EXT_texture_swizzle , GLL, GLC, x , x , 2008)
+EXT(ARB_texture_view , ARB_texture_view , GLL, GLC, x , x , 2012)
+EXT(ARB_timer_query , ARB_timer_query , GLL, GLC, x , x , 2010)
+EXT(ARB_transform_feedback2 , ARB_transform_feedback2 , GLL, GLC, x , x , 2010)
+EXT(ARB_transform_feedback3 , ARB_transform_feedback3 , GLL, GLC, x , x , 2010)
+EXT(ARB_transform_feedback_instanced , ARB_transform_feedback_instanced , GLL, GLC, x , x , 2011)
+EXT(ARB_transform_feedback_overflow_query , ARB_transform_feedback_overflow_query , GLL, GLC, x , x , 2014)
+EXT(ARB_transpose_matrix , dummy_true , GLL, x , x , x , 1999)
+EXT(ARB_uniform_buffer_object , ARB_uniform_buffer_object , GLL, GLC, x , x , 2009)
+EXT(ARB_vertex_array_bgra , EXT_vertex_array_bgra , GLL, GLC, x , x , 2008)
+EXT(ARB_vertex_array_object , dummy_true , GLL, GLC, x , x , 2006)
+EXT(ARB_vertex_attrib_64bit , ARB_vertex_attrib_64bit , 32, GLC, x , x , 2010)
+EXT(ARB_vertex_attrib_binding , dummy_true , GLL, GLC, x , x , 2012)
+EXT(ARB_vertex_buffer_object , dummy_true , GLL, GLC, x , x , 2003)
+EXT(ARB_vertex_program , ARB_vertex_program , GLL, x , x , x , 2002)
+EXT(ARB_vertex_shader , ARB_vertex_shader , GLL, GLC, x , x , 2002)
+EXT(ARB_vertex_type_10f_11f_11f_rev , ARB_vertex_type_10f_11f_11f_rev , GLL, GLC, x , x , 2013)
+EXT(ARB_vertex_type_2_10_10_10_rev , ARB_vertex_type_2_10_10_10_rev , GLL, GLC, x , x , 2009)
+EXT(ARB_viewport_array , ARB_viewport_array , GLL, GLC, x , x , 2010)
+EXT(ARB_window_pos , dummy_true , GLL, x , x , x , 2001)
+
+EXT(ATI_blend_equation_separate , EXT_blend_equation_separate , GLL, GLC, x , x , 2003)
+EXT(ATI_draw_buffers , dummy_true , GLL, x , x , x , 2002)
+EXT(ATI_fragment_shader , ATI_fragment_shader , GLL, x , x , x , 2001)
+EXT(ATI_meminfo , ATI_meminfo , GLL, GLC, x , x , 2009)
+EXT(ATI_separate_stencil , EXT_stencil_two_side , GLL, x , x , x , 2006)
+EXT(ATI_texture_compression_3dc , ATI_texture_compression_3dc , GLL, x , x , x , 2004)
+EXT(ATI_texture_env_combine3 , ATI_texture_env_combine3 , GLL, x , x , x , 2002)
+EXT(ATI_texture_float , ARB_texture_float , GLL, GLC, x , x , 2002)
+EXT(ATI_texture_mirror_once , ATI_texture_mirror_once , GLL, GLC, x , x , 2006)
+
+EXT(EXT_EGL_image_storage , EXT_EGL_image_storage , GLL, GLC , x , 30, 2018)
+EXT(EXT_EGL_sync , dummy_true , GLL, GLC, x , x , 2019)
+EXT(EXT_abgr , dummy_true , GLL, GLC, x , x , 1995)
+EXT(EXT_base_instance , ARB_base_instance , x , x , x , 30, 2014)
+EXT(EXT_bgra , dummy_true , GLL, x , x , x , 1995)
+EXT(EXT_blend_color , EXT_blend_color , GLL, x , x , x , 1995)
+EXT(EXT_blend_equation_separate , EXT_blend_equation_separate , GLL, GLC, x , x , 2003)
+EXT(EXT_blend_func_extended , ARB_blend_func_extended , x , x , x , ES2, 2015)
+EXT(EXT_blend_func_separate , EXT_blend_func_separate , GLL, x , x , x , 1999)
+EXT(EXT_blend_minmax , EXT_blend_minmax , GLL, x , ES1, ES2, 1995)
+EXT(EXT_blend_subtract , dummy_true , GLL, x , x , x , 1995)
+EXT(EXT_buffer_storage , ARB_buffer_storage , x , x , x , 31, 2015)
+EXT(EXT_clip_control , ARB_clip_control , x , x , x , ES2, 2017)
+EXT(EXT_clip_cull_distance , ARB_cull_distance , x , x , x , 30, 2016)
+EXT(EXT_color_buffer_float , dummy_true , x , x , x , 30, 2013)
+EXT(EXT_compiled_vertex_array , dummy_true , GLL, x , x , x , 1996)
+EXT(EXT_compressed_ETC1_RGB8_sub_texture , OES_compressed_ETC1_RGB8_texture , x , x , ES1, ES2, 2014)
+EXT(EXT_copy_image , OES_copy_image , x , x , x , 30, 2014)
+EXT(EXT_copy_texture , dummy_true , GLL, x , x , x , 1995)
+EXT(EXT_demote_to_helper_invocation , EXT_demote_to_helper_invocation , GLL, GLC, ES1, ES2, 2019)
+EXT(EXT_depth_bounds_test , EXT_depth_bounds_test , GLL, GLC, x , x , 2002)
+EXT(EXT_depth_clamp , ARB_depth_clamp , x , x , x , ES2, 2019)
+EXT(EXT_direct_state_access , dummy_true , GLL, x , x , x , 2010)
+EXT(EXT_discard_framebuffer , dummy_true , x , x , ES1, ES2, 2009)
+EXT(EXT_disjoint_timer_query , EXT_disjoint_timer_query , x , x , x , ES2, 2016)
+EXT(EXT_draw_buffers , dummy_true , x , x , x , ES2, 2012)
+EXT(EXT_draw_buffers2 , EXT_draw_buffers2 , GLL, GLC, x , x , 2006)
+EXT(EXT_draw_buffers_indexed , ARB_draw_buffers_blend , x , x , x , 30, 2014)
+EXT(EXT_draw_elements_base_vertex , ARB_draw_elements_base_vertex , x , x , x , ES2, 2014)
+EXT(EXT_draw_instanced , ARB_draw_instanced , GLL, GLC, x , ES2 , 2006)
+EXT(EXT_draw_range_elements , dummy_true , GLL, x , x , x , 1997)
+EXT(EXT_float_blend , EXT_float_blend , x , x , x , 30, 2015)
+EXT(EXT_fog_coord , dummy_true , GLL, x , x , x , 1999)
+EXT(EXT_frag_depth , dummy_true , x , x , x , ES2, 2010)
+EXT(EXT_framebuffer_blit , dummy_true , GLL, GLC, x , x , 2005)
+EXT(EXT_framebuffer_multisample , EXT_framebuffer_multisample , GLL, GLC, x , x , 2005)
+EXT(EXT_framebuffer_multisample_blit_scaled , EXT_framebuffer_multisample_blit_scaled, GLL, GLC, x , x , 2011)
+EXT(EXT_framebuffer_object , dummy_true , GLL, GLC, x , x , 2000)
+EXT(EXT_framebuffer_sRGB , EXT_framebuffer_sRGB , GLL, GLC, x , x , 1998)
+EXT(EXT_geometry_point_size , OES_geometry_shader , x , x , x , 31, 2015)
+EXT(EXT_geometry_shader , OES_geometry_shader , x , x , x , 31, 2015)
+EXT(EXT_gpu_program_parameters , EXT_gpu_program_parameters , GLL, x , x , x , 2006)
+/* Since all of EXT_gpu_shader4 features were rolled into GLSL 1.40, it shouldn't be exposed in a core context.
+ * Additionally, EXT_gpu_shader4 would reintroduce functions that were removed in GLSL 1.40. */
+EXT(EXT_gpu_shader4 , EXT_gpu_shader4 , GLL, x , x , x , 2006)
+EXT(EXT_gpu_shader5 , ARB_gpu_shader5 , x , x , x , 31, 2014)
+EXT(EXT_map_buffer_range , ARB_map_buffer_range , x , x , ES1, ES2, 2012)
+EXT(EXT_memory_object , EXT_memory_object , GLL, GLC, x , ES2, 2017)
+EXT(EXT_memory_object_fd , EXT_memory_object_fd , GLL, GLC, x , ES2, 2017)
+EXT(EXT_multi_draw_arrays , dummy_true , GLL, x , ES1, ES2, 1999)
+EXT(EXT_multisampled_render_to_texture , EXT_multisampled_render_to_texture , x , x , x , ES2, 2016)
+EXT(EXT_multisampled_render_to_texture2 , EXT_multisampled_render_to_texture , x , x , x , ES2, 2016)
+EXT(EXT_occlusion_query_boolean , ARB_occlusion_query2 , x , x , x , ES2, 2011)
+EXT(EXT_packed_depth_stencil , dummy_true , GLL, GLC, x , x , 2005)
+EXT(EXT_packed_float , EXT_packed_float , GLL, GLC, x , x , 2004)
+EXT(EXT_packed_pixels , dummy_true , GLL, x , x , x , 1997)
+EXT(EXT_pixel_buffer_object , EXT_pixel_buffer_object , GLL, GLC, x , x , 2004)
+EXT(EXT_point_parameters , EXT_point_parameters , GLL, x , x , x , 1997)
+EXT(EXT_polygon_offset_clamp , ARB_polygon_offset_clamp , GLL, GLC, ES1, ES2, 2014)
+EXT(EXT_primitive_bounding_box , OES_primitive_bounding_box , x , x , x , 31, 2014)
+EXT(EXT_provoking_vertex , EXT_provoking_vertex , GLL, GLC, x , x , 2009)
+EXT(EXT_read_format_bgra , dummy_true , x , x , ES1, ES2, 2009)
+EXT(EXT_render_snorm , EXT_render_snorm , x , x , x, 31, 2014)
+EXT(EXT_rescale_normal , dummy_true , GLL, x , x , x , 1997)
+EXT(EXT_robustness , KHR_robustness , x, x, x , ES2, 2011)
+EXT(EXT_sRGB_write_control , EXT_framebuffer_sRGB , x, x , x , 30, 2013)
+EXT(EXT_secondary_color , dummy_true , GLL, x , x , x , 1999)
+EXT(EXT_semaphore , EXT_semaphore , GLL, GLC, x , ES2, 2017)
+EXT(EXT_semaphore_fd , EXT_semaphore_fd , GLL, GLC, x , ES2, 2017)
+EXT(EXT_separate_shader_objects , dummy_true , x , x , x , ES2, 2013)
+EXT(EXT_separate_specular_color , dummy_true , GLL, x , x , x , 1997)
+EXT(EXT_shader_framebuffer_fetch , EXT_shader_framebuffer_fetch , GLL, GLC, x , ES2, 2013)
+EXT(EXT_shader_framebuffer_fetch_non_coherent, EXT_shader_framebuffer_fetch_non_coherent, GLL, GLC, x, ES2, 2018)
+EXT(EXT_shader_image_load_formatted , EXT_shader_image_load_formatted , GLL, GLC, x , x , 2014)
+EXT(EXT_shader_image_load_store , EXT_shader_image_load_store , GLL, GLC, x , x , 2010)
+EXT(EXT_shader_implicit_conversions , dummy_true , x , x , x , 31, 2013)
+EXT(EXT_shader_integer_mix , EXT_shader_integer_mix , GLL, GLC, x , 30, 2013)
+EXT(EXT_shader_io_blocks , dummy_true , x , x , x , 31, 2014)
+EXT(EXT_shader_samples_identical , EXT_shader_samples_identical , GLL, GLC, x , 31, 2015)
+EXT(EXT_shadow_funcs , ARB_shadow , GLL, x , x , x , 2002)
+EXT(EXT_stencil_two_side , EXT_stencil_two_side , GLL, x , x , x , 2001)
+EXT(EXT_stencil_wrap , dummy_true , GLL, x , x , x , 2002)
+EXT(EXT_subtexture , dummy_true , GLL, x , x , x , 1995)
+EXT(EXT_tessellation_point_size , ARB_tessellation_shader , x , x , x , 31, 2013)
+EXT(EXT_tessellation_shader , ARB_tessellation_shader , x , x , x , 31, 2013)
+EXT(EXT_texture , dummy_true , GLL, x , x , x , 1996)
+EXT(EXT_texture3D , dummy_true , GLL, x , x , x , 1996)
+EXT(EXT_texture_array , EXT_texture_array , GLL, GLC, x , x , 2006)
+EXT(EXT_texture_border_clamp , ARB_texture_border_clamp , x , x , x , ES2, 2014)
+EXT(EXT_texture_buffer , OES_texture_buffer , x , x , x , 31, 2014)
+EXT(EXT_texture_buffer_object , EXT_texture_buffer_object , GLL, x , x , x , 2007)
+EXT(EXT_texture_compression_bptc , ARB_texture_compression_bptc , x , x , x , 30, 2017)
+EXT(EXT_texture_compression_dxt1 , ANGLE_texture_compression_dxt , GLL, GLC, ES1, ES2, 2004)
+EXT(EXT_texture_compression_latc , EXT_texture_compression_latc , GLL, x , x , x , 2006)
+EXT(EXT_texture_compression_rgtc , ARB_texture_compression_rgtc , GLL, GLC, x , 30, 2004)
+EXT(EXT_texture_compression_s3tc , EXT_texture_compression_s3tc , GLL, GLC, x , ES2, 2000)
+EXT(EXT_texture_compression_s3tc_srgb , EXT_texture_compression_s3tc_srgb , x , x, x , ES2, 2016)
+EXT(EXT_texture_cube_map , ARB_texture_cube_map , GLL, x , x , x , 2001)
+EXT(EXT_texture_cube_map_array , OES_texture_cube_map_array , x , x , x , 31, 2014)
+EXT(EXT_texture_edge_clamp , dummy_true , GLL, x , x , x , 1997)
+EXT(EXT_texture_env_add , dummy_true , GLL, x , x , x , 1999)
+EXT(EXT_texture_env_combine , dummy_true , GLL, x , x , x , 2000)
+EXT(EXT_texture_env_dot3 , EXT_texture_env_dot3 , GLL, x , x , x , 2000)
+EXT(EXT_texture_filter_anisotropic , EXT_texture_filter_anisotropic , GLL, GLC, ES1, ES2, 1999)
+EXT(EXT_texture_format_BGRA8888 , dummy_true , x , x , ES1, ES2, 2005)
+EXT(EXT_texture_integer , EXT_texture_integer , GLL, GLC, x , x , 2006)
+EXT(EXT_texture_lod_bias , dummy_true , GLL, x , ES1, x , 1999)
+EXT(EXT_texture_mirror_clamp , EXT_texture_mirror_clamp , GLL, GLC, x , x , 2004)
+EXT(EXT_texture_norm16 , EXT_texture_norm16 , x , x , x , 31, 2014)
+EXT(EXT_texture_object , dummy_true , GLL, x , x , x , 1995)
+EXT(EXT_texture_query_lod , ARB_texture_query_lod , x , x , x , 30, 2019)
+EXT(EXT_texture_rectangle , NV_texture_rectangle , GLL, x , x , x , 2004)
+EXT(EXT_texture_rg , ARB_texture_rg , x , x , x , ES2, 2011)
+EXT(EXT_texture_sRGB , EXT_texture_sRGB , GLL, GLC, x , x , 2004)
+EXT(EXT_texture_sRGB_R8 , EXT_texture_sRGB_R8 , GLL ,GLC, x , 30, 2018)
+EXT(EXT_texture_sRGB_decode , EXT_texture_sRGB_decode , GLL, GLC, x , 30, 2006)
+EXT(EXT_texture_shadow_lod , EXT_texture_shadow_lod , GLL, GLC, x , 30, 2018)
+EXT(EXT_texture_shared_exponent , EXT_texture_shared_exponent , GLL, GLC, x , x , 2004)
+EXT(EXT_texture_snorm , EXT_texture_snorm , GLL, GLC, x , x , 2009)
+EXT(EXT_texture_swizzle , EXT_texture_swizzle , GLL, GLC, x , x , 2008)
+EXT(EXT_texture_type_2_10_10_10_REV , EXT_texture_type_2_10_10_10_REV , x , x , x , ES2, 2008)
+EXT(EXT_texture_view , OES_texture_view , x , x , x , 31, 2014)
+EXT(EXT_timer_query , EXT_timer_query , GLL, GLC, x , x , 2006)
+EXT(EXT_transform_feedback , EXT_transform_feedback , GLL, GLC, x , x , 2011)
+EXT(EXT_unpack_subimage , dummy_true , x , x , x , ES2, 2011)
+EXT(EXT_vertex_array , dummy_true , GLL, x , x , x , 1995)
+EXT(EXT_vertex_array_bgra , EXT_vertex_array_bgra , GLL, GLC, x , x , 2008)
+EXT(EXT_vertex_attrib_64bit , ARB_vertex_attrib_64bit , 32, GLC, x , x , 2010)
+EXT(EXT_window_rectangles , EXT_window_rectangles , GLL, GLC, x , 30, 2016)
+
+EXT(GREMEDY_string_marker , GREMEDY_string_marker , GLL, GLC, x , x , 2007)
+
+EXT(IBM_multimode_draw_arrays , dummy_true , GLL, GLC, x , x , 1998)
+EXT(IBM_rasterpos_clip , dummy_true , GLL, x , x , x , 1996)
+EXT(IBM_texture_mirrored_repeat , dummy_true , GLL, x , x , x , 1998)
+
+EXT(INGR_blend_func_separate , EXT_blend_func_separate , GLL, x , x , x , 1999)
+
+EXT(INTEL_blackhole_render , INTEL_blackhole_render , 30, 30, x , ES2, 2018)
+EXT(INTEL_conservative_rasterization , INTEL_conservative_rasterization , x , GLC, x , 31, 2013)
+EXT(INTEL_performance_query , INTEL_performance_query , GLL, GLC, x , ES2, 2013)
+EXT(INTEL_shader_atomic_float_minmax , INTEL_shader_atomic_float_minmax , GLL, GLC, x , x , 2018)
+EXT(INTEL_shader_integer_functions2 , INTEL_shader_integer_functions2 , GLL, GLC, x , x , 2018)
+
+EXT(KHR_blend_equation_advanced , KHR_blend_equation_advanced , GLL, GLC, x , ES2, 2014)
+EXT(KHR_blend_equation_advanced_coherent , KHR_blend_equation_advanced_coherent , GLL, GLC, x , ES2, 2014)
+EXT(KHR_context_flush_control , dummy_true , GLL, GLC, x , ES2, 2014)
+EXT(KHR_debug , dummy_true , GLL, GLC, 11, ES2, 2012)
+EXT(KHR_no_error , dummy_true , GLL, GLC, ES1, ES2, 2015)
+EXT(KHR_parallel_shader_compile , dummy_true , GLL, GLC, x , ES2, 2017)
+EXT(KHR_robust_buffer_access_behavior , ARB_robust_buffer_access_behavior , GLL, GLC, x , ES2, 2014)
+EXT(KHR_robustness , KHR_robustness , GLL, GLC, x , ES2, 2012)
+EXT(KHR_texture_compression_astc_hdr , KHR_texture_compression_astc_hdr , GLL, GLC, x , ES2, 2012)
+EXT(KHR_texture_compression_astc_ldr , KHR_texture_compression_astc_ldr , GLL, GLC, x , ES2, 2012)
+EXT(KHR_texture_compression_astc_sliced_3d , KHR_texture_compression_astc_sliced_3d , GLL, GLC, x , ES2, 2015)
+
+EXT(MESA_framebuffer_flip_y , MESA_framebuffer_flip_y , 43, 43, x , 30, 2018)
+EXT(MESA_pack_invert , MESA_pack_invert , GLL, GLC, x , x , 2002)
+EXT(MESA_shader_integer_functions , MESA_shader_integer_functions , GLL, GLC, x , 30, 2016)
+EXT(MESA_texture_signed_rgba , EXT_texture_snorm , GLL, GLC, x , x , 2009)
+EXT(MESA_tile_raster_order , MESA_tile_raster_order , GLL, GLC, x , ES2, 2017)
+EXT(MESA_window_pos , dummy_true , GLL, x , x , x , 2000)
+EXT(MESA_ycbcr_texture , MESA_ycbcr_texture , GLL, GLC, x , x , 2002)
+
+EXT(NVX_gpu_memory_info , NVX_gpu_memory_info , GLL, GLC, x , x , 2013)
+
+EXT(NV_alpha_to_coverage_dither_control , NV_alpha_to_coverage_dither_control , GLL, GLC, x , ES2, 2017)
+EXT(NV_blend_square , dummy_true , GLL, x , x , x , 1999)
+EXT(NV_compute_shader_derivatives , NV_compute_shader_derivatives , GLL, GLC, x , 32, 2018)
+EXT(NV_conditional_render , NV_conditional_render , GLL, GLC, x , ES2, 2008)
+EXT(NV_conservative_raster , NV_conservative_raster , GLL, GLC, ES1, ES2, 2015)
+EXT(NV_conservative_raster_dilate , NV_conservative_raster_dilate , GLL, GLC, ES1, ES2, 2015)
+EXT(NV_conservative_raster_pre_snap , NV_conservative_raster_pre_snap , GLL, GLC, ES1, ES2, 2017)
+EXT(NV_conservative_raster_pre_snap_triangles, NV_conservative_raster_pre_snap_triangles, GLL, GLC, ES1, ES2, 2015)
+EXT(NV_copy_image , NV_copy_image , GLL, GLC, x , x, 2009)
+EXT(NV_depth_clamp , ARB_depth_clamp , GLL, GLC, x , x , 2001)
+EXT(NV_draw_buffers , dummy_true , x , x , x , ES2, 2011)
+EXT(NV_fbo_color_attachments , dummy_true , x , x , x , ES2, 2010)
+EXT(NV_fill_rectangle , NV_fill_rectangle , GLL, GLC, x , x , 2015)
+EXT(NV_fog_distance , NV_fog_distance , GLL, x , x , x , 2001)
+EXT(NV_fragment_shader_interlock , ARB_fragment_shader_interlock , GLL, GLC, x , 31, 2015)
+EXT(NV_image_formats , ARB_shader_image_load_store , x , x , x , 31, 2014)
+EXT(NV_light_max_exponent , dummy_true , GLL, x , x , x , 1999)
+EXT(NV_packed_depth_stencil , dummy_true , GLL, GLC, x , x , 2000)
+EXT(NV_pixel_buffer_object , EXT_pixel_buffer_object , x , x , x , ES2, 2012)
+EXT(NV_point_sprite , NV_point_sprite , GLL, GLC, x , x , 2001)
+EXT(NV_primitive_restart , NV_primitive_restart , GLL, x , x , x , 2002)
+EXT(NV_read_buffer , dummy_true , x , x , x , ES2, 2011)
+EXT(NV_read_depth , dummy_true , x , x , x , ES2, 2011)
+EXT(NV_read_depth_stencil , dummy_true , x , x , x , ES2, 2011)
+EXT(NV_read_stencil , dummy_true , x , x , x , ES2, 2011)
+EXT(NV_sample_locations , ARB_sample_locations , GLL, GLC, x , ES2, 2015)
+EXT(NV_shader_atomic_float , NV_shader_atomic_float , GLL, GLC, x , x , 2012)
+EXT(NV_texgen_reflection , dummy_true , GLL, x , x , x , 1999)
+EXT(NV_texture_barrier , NV_texture_barrier , GLL, GLC, x , x , 2009)
+EXT(NV_texture_env_combine4 , NV_texture_env_combine4 , GLL, x , x , x , 1999)
+EXT(NV_texture_rectangle , NV_texture_rectangle , GLL, x , x , x , 2000)
+EXT(NV_vdpau_interop , NV_vdpau_interop , GLL, GLC, x , x , 2010)
+EXT(NV_viewport_array2 , NV_viewport_array2 , GLL, GLC, x , 31, 2015)
+EXT(NV_viewport_swizzle , NV_viewport_swizzle , GLL, GLC, x , 31, 2015)
+
+EXT(OES_EGL_image , OES_EGL_image , GLL, GLC, ES1, ES2, 2006) /* FIXME: Mesa expects GL_OES_EGL_image to be available in OpenGL contexts. */
+EXT(OES_EGL_image_external , OES_EGL_image_external , x , x , ES1, ES2, 2010)
+EXT(OES_EGL_image_external_essl3 , OES_EGL_image_external , x , x , x , 30, 2015)
+EXT(OES_EGL_sync , dummy_true , x , x , ES1, ES2, 2010)
+EXT(OES_blend_equation_separate , EXT_blend_equation_separate , x , x , ES1, x , 2009)
+EXT(OES_blend_func_separate , EXT_blend_func_separate , x , x , ES1, x , 2009)
+EXT(OES_blend_subtract , dummy_true , x , x , ES1, x , 2009)
+EXT(OES_byte_coordinates , dummy_true , x , x , ES1, x , 2002)
+EXT(OES_compressed_ETC1_RGB8_texture , OES_compressed_ETC1_RGB8_texture , x , x , ES1, ES2, 2005)
+EXT(OES_compressed_paletted_texture , dummy_true , x , x , ES1, x , 2003)
+EXT(OES_copy_image , OES_copy_image , x , x , x , 30, 2014)
+EXT(OES_depth24 , dummy_true , x , x , ES1, ES2, 2005)
+EXT(OES_depth32 , dummy_false , x , x , x , x , 2005)
+EXT(OES_depth_texture , ARB_depth_texture , x , x , x , ES2, 2006)
+EXT(OES_depth_texture_cube_map , OES_depth_texture_cube_map , x , x , x , ES2, 2012)
+EXT(OES_draw_buffers_indexed , ARB_draw_buffers_blend , x , x , x , 30, 2014)
+EXT(OES_draw_elements_base_vertex , ARB_draw_elements_base_vertex , x , x , x , ES2, 2014)
+EXT(OES_draw_texture , OES_draw_texture , x , x , ES1, x , 2004)
+EXT(OES_element_index_uint , dummy_true , x , x , ES1, ES2, 2005)
+EXT(OES_fbo_render_mipmap , dummy_true , x , x , ES1, ES2, 2005)
+EXT(OES_fixed_point , dummy_true , x , x , ES1, x , 2002)
+EXT(OES_framebuffer_object , dummy_true , x , x , ES1, x , 2005)
+EXT(OES_geometry_point_size , OES_geometry_shader , x , x , x , 31, 2015)
+EXT(OES_geometry_shader , OES_geometry_shader , x , x , x , 31, 2015)
+EXT(OES_get_program_binary , dummy_true , x , x , x , ES2, 2008)
+EXT(OES_gpu_shader5 , ARB_gpu_shader5 , x , x , x , 31, 2014)
+EXT(OES_mapbuffer , dummy_true , x , x , ES1, ES2, 2005)
+EXT(OES_packed_depth_stencil , dummy_true , x , x , ES1, ES2, 2007)
+EXT(OES_point_size_array , dummy_true , x , x , ES1, x , 2004)
+EXT(OES_point_sprite , ARB_point_sprite , x , x , ES1, x , 2004)
+EXT(OES_primitive_bounding_box , OES_primitive_bounding_box , x , x , x , 31, 2014)
+EXT(OES_query_matrix , dummy_true , x , x , ES1, x , 2003)
+EXT(OES_read_format , dummy_true , GLL, x , ES1, x , 2003)
+EXT(OES_required_internalformat , dummy_true , x , x , ES1, ES2, 2012)
+EXT(OES_rgb8_rgba8 , dummy_true , x , x , ES1, ES2, 2005)
+EXT(OES_sample_shading , OES_sample_variables , x , x , x , 30, 2014)
+EXT(OES_sample_variables , OES_sample_variables , x , x , x , 30, 2014)
+EXT(OES_shader_image_atomic , ARB_shader_image_load_store , x , x , x , 31, 2015)
+EXT(OES_shader_io_blocks , dummy_true , x , x , x , 31, 2014)
+EXT(OES_shader_multisample_interpolation , OES_sample_variables , x , x , x , 30, 2014)
+EXT(OES_single_precision , dummy_true , x , x , ES1, x , 2003)
+EXT(OES_standard_derivatives , OES_standard_derivatives , x , x , x , ES2, 2005)
+EXT(OES_stencil1 , dummy_false , x , x , x , x , 2005)
+EXT(OES_stencil4 , dummy_false , x , x , x , x , 2005)
+EXT(OES_stencil8 , dummy_true , x , x , ES1, ES2, 2005)
+EXT(OES_stencil_wrap , dummy_true , x , x , ES1, x , 2002)
+EXT(OES_surfaceless_context , dummy_true , x , x , ES1, ES2, 2012)
+EXT(OES_tessellation_point_size , ARB_tessellation_shader , x , x , x , 31, 2014)
+EXT(OES_tessellation_shader , ARB_tessellation_shader , x , x , x , 31, 2014)
+EXT(OES_texture_3D , dummy_true , x , x , x , ES2, 2005)
+EXT(OES_texture_border_clamp , ARB_texture_border_clamp , x , x , x , ES2, 2014)
+EXT(OES_texture_buffer , OES_texture_buffer , x , x , x , 31, 2014)
+EXT(OES_texture_compression_astc , OES_texture_compression_astc , x , x , ES1, ES2, 2015)
+EXT(OES_texture_cube_map , ARB_texture_cube_map , x , x , ES1, x , 2007)
+EXT(OES_texture_cube_map_array , OES_texture_cube_map_array , x , x , x , 31, 2014)
+EXT(OES_texture_env_crossbar , ARB_texture_env_crossbar , x , x , ES1, x , 2005)
+EXT(OES_texture_float , OES_texture_float , x , x , x , ES2, 2005)
+EXT(OES_texture_float_linear , OES_texture_float_linear , x , x , x , ES2, 2005)
+EXT(OES_texture_half_float , OES_texture_half_float , x , x , x , ES2, 2005)
+EXT(OES_texture_half_float_linear , OES_texture_half_float_linear , x , x , x , ES2, 2005)
+EXT(OES_texture_mirrored_repeat , dummy_true , x , x , ES1, x , 2005)
+EXT(OES_texture_npot , ARB_texture_non_power_of_two , x , x , ES1, ES2, 2005)
+EXT(OES_texture_stencil8 , ARB_texture_stencil8 , x , x , x , 30, 2014)
+EXT(OES_texture_storage_multisample_2d_array, ARB_texture_multisample , x , x , x , 31, 2014)
+EXT(OES_texture_view , OES_texture_view , x , x , x , 31, 2014)
+EXT(OES_vertex_array_object , dummy_true , x , x , ES1, ES2, 2010)
+EXT(OES_vertex_half_float , ARB_half_float_vertex , x , x , x , ES2, 2005)
+EXT(OES_viewport_array , OES_viewport_array , x , x , x , 31, 2010)
+
+EXT(S3_s3tc , ANGLE_texture_compression_dxt , GLL, GLC, x , x , 1999)
+
+EXT(SGIS_generate_mipmap , dummy_true , GLL, x , x , x , 1997)
+EXT(SGIS_texture_border_clamp , ARB_texture_border_clamp , GLL, x , x , x , 1997)
+EXT(SGIS_texture_edge_clamp , dummy_true , GLL, x , x , x , 1997)
+EXT(SGIS_texture_lod , dummy_true , GLL, x , x , x , 1997)
+
+EXT(SUN_multi_draw_arrays , dummy_true , GLL, x , x , x , 1999)
+#undef GLL
+#undef GLC
+#undef ES1
+#undef ES2
+#undef x
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/mesa/main/formats.h b/third_party/rust/glslopt/glsl-optimizer/src/mesa/main/formats.h
new file mode 100644
index 0000000000..f0e58b327e
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/mesa/main/formats.h
@@ -0,0 +1,749 @@
+/*
+ * Mesa 3-D graphics library
+ *
+ * Copyright (C) 1999-2008 Brian Paul All Rights Reserved.
+ * Copyright (c) 2008-2009 VMware, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/*
+ * Authors:
+ * Brian Paul
+ */
+
+
+#ifndef FORMATS_H
+#define FORMATS_H
+
+
+#include <GL/gl.h>
+#include <stdbool.h>
+#include <stdint.h>
+#include "gallium/include/pipe/p_format.h"
+#include "util/u_endian.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+/**
+ * OpenGL doesn't have GL_UNSIGNED_BYTE_4_4, so we must define our own type
+ * for GL_LUMINANCE4_ALPHA4.
+ */
+#define MESA_UNSIGNED_BYTE_4_4 (GL_UNSIGNED_BYTE<<1)
+
+
+/**
+ * Max number of bytes for any non-compressed pixel format below, or for
+ * intermediate pixel storage in Mesa. This should never be less than
+ * 16. Maybe 32 someday?
+ */
+#define MAX_PIXEL_BYTES 16
+
+/**
+ * Specifies the layout of a pixel format. See the MESA_FORMAT
+ * documentation below.
+ */
+enum mesa_format_layout {
+ MESA_FORMAT_LAYOUT_ARRAY,
+ MESA_FORMAT_LAYOUT_PACKED,
+ MESA_FORMAT_LAYOUT_S3TC,
+ MESA_FORMAT_LAYOUT_RGTC,
+ MESA_FORMAT_LAYOUT_LATC,
+ MESA_FORMAT_LAYOUT_FXT1,
+ MESA_FORMAT_LAYOUT_ETC1,
+ MESA_FORMAT_LAYOUT_ETC2,
+ MESA_FORMAT_LAYOUT_BPTC,
+ MESA_FORMAT_LAYOUT_ASTC,
+ MESA_FORMAT_LAYOUT_ATC,
+ MESA_FORMAT_LAYOUT_OTHER,
+};
+
+/**
+ * An enum representing different possible swizzling values. This is used
+ * to interpret the output of _mesa_get_format_swizzle
+ */
+enum {
+ MESA_FORMAT_SWIZZLE_X = 0,
+ MESA_FORMAT_SWIZZLE_Y = 1,
+ MESA_FORMAT_SWIZZLE_Z = 2,
+ MESA_FORMAT_SWIZZLE_W = 3,
+ MESA_FORMAT_SWIZZLE_ZERO = 4,
+ MESA_FORMAT_SWIZZLE_ONE = 5,
+ MESA_FORMAT_SWIZZLE_NONE = 6,
+};
+
+/**
+ * An uint32_t that encodes the information necessary to represent an
+ * array format
+ */
+typedef uint32_t mesa_array_format;
+
+/**
+ * Encoding for valid array format data types
+ */
+enum mesa_array_format_datatype {
+ MESA_ARRAY_FORMAT_TYPE_UBYTE = 0x0,
+ MESA_ARRAY_FORMAT_TYPE_USHORT = 0x1,
+ MESA_ARRAY_FORMAT_TYPE_UINT = 0x2,
+ MESA_ARRAY_FORMAT_TYPE_BYTE = 0x4,
+ MESA_ARRAY_FORMAT_TYPE_SHORT = 0x5,
+ MESA_ARRAY_FORMAT_TYPE_INT = 0x6,
+ MESA_ARRAY_FORMAT_TYPE_HALF = 0xd,
+ MESA_ARRAY_FORMAT_TYPE_FLOAT = 0xe,
+};
+
+enum mesa_array_format_base_format {
+ MESA_ARRAY_FORMAT_BASE_FORMAT_RGBA_VARIANTS = 0x0,
+ MESA_ARRAY_FORMAT_BASE_FORMAT_DEPTH = 0x1,
+ MESA_ARRAY_FORMAT_BASE_FORMAT_STENCIL = 0x2,
+};
+
+/**
+ * An enum useful to encode/decode information stored in a mesa_array_format
+ */
+enum {
+ MESA_ARRAY_FORMAT_TYPE_IS_SIGNED = 0x4,
+ MESA_ARRAY_FORMAT_TYPE_IS_FLOAT = 0x8,
+ MESA_ARRAY_FORMAT_TYPE_NORMALIZED = 0x10,
+ MESA_ARRAY_FORMAT_DATATYPE_MASK = 0xf,
+ MESA_ARRAY_FORMAT_TYPE_MASK = 0x1f,
+ MESA_ARRAY_FORMAT_TYPE_SIZE_MASK = 0x3,
+ MESA_ARRAY_FORMAT_NUM_CHANS_MASK = 0xe0,
+ MESA_ARRAY_FORMAT_SWIZZLE_X_MASK = 0x00700,
+ MESA_ARRAY_FORMAT_SWIZZLE_Y_MASK = 0x03800,
+ MESA_ARRAY_FORMAT_SWIZZLE_Z_MASK = 0x1c000,
+ MESA_ARRAY_FORMAT_SWIZZLE_W_MASK = 0xe0000,
+ MESA_ARRAY_FORMAT_BASE_FORMAT_MASK = 0x300000,
+ MESA_ARRAY_FORMAT_BIT = 0x80000000
+};
+
+#define MESA_ARRAY_FORMAT(BASE_FORMAT, SIZE, SIGNED, IS_FLOAT, NORM, NUM_CHANS, \
+ SWIZZLE_X, SWIZZLE_Y, SWIZZLE_Z, SWIZZLE_W) ( \
+ (((SIZE >> 1) ) & MESA_ARRAY_FORMAT_TYPE_SIZE_MASK) | \
+ (((SIGNED) << 2 ) & MESA_ARRAY_FORMAT_TYPE_IS_SIGNED) | \
+ (((IS_FLOAT) << 3 ) & MESA_ARRAY_FORMAT_TYPE_IS_FLOAT) | \
+ (((NORM) << 4 ) & MESA_ARRAY_FORMAT_TYPE_NORMALIZED) | \
+ (((NUM_CHANS) << 5 ) & MESA_ARRAY_FORMAT_NUM_CHANS_MASK) | \
+ (((SWIZZLE_X) << 8 ) & MESA_ARRAY_FORMAT_SWIZZLE_X_MASK) | \
+ (((SWIZZLE_Y) << 11) & MESA_ARRAY_FORMAT_SWIZZLE_Y_MASK) | \
+ (((SWIZZLE_Z) << 14) & MESA_ARRAY_FORMAT_SWIZZLE_Z_MASK) | \
+ (((SWIZZLE_W) << 17) & MESA_ARRAY_FORMAT_SWIZZLE_W_MASK) | \
+ (((BASE_FORMAT) << 20) & MESA_ARRAY_FORMAT_BASE_FORMAT_MASK) | \
+ MESA_ARRAY_FORMAT_BIT)
+
+/**
+ * Various helpers to access the data encoded in a mesa_array_format
+ */
+static inline bool
+_mesa_array_format_is_signed(mesa_array_format f)
+{
+ return (f & MESA_ARRAY_FORMAT_TYPE_IS_SIGNED) != 0;
+}
+
+static inline bool
+_mesa_array_format_is_float(mesa_array_format f)
+{
+ return (f & MESA_ARRAY_FORMAT_TYPE_IS_FLOAT) != 0;
+}
+
+static inline bool
+_mesa_array_format_is_normalized(mesa_array_format f)
+{
+ return (f & MESA_ARRAY_FORMAT_TYPE_NORMALIZED) !=0;
+}
+
+static inline enum mesa_array_format_base_format
+_mesa_array_format_get_base_format(mesa_array_format f)
+{
+ return (enum mesa_array_format_base_format)
+ ((f & MESA_ARRAY_FORMAT_BASE_FORMAT_MASK) >> 20);
+}
+
+static inline enum mesa_array_format_datatype
+_mesa_array_format_get_datatype(mesa_array_format f)
+{
+ return (enum mesa_array_format_datatype)
+ (f & MESA_ARRAY_FORMAT_DATATYPE_MASK);
+}
+
+static inline int
+_mesa_array_format_datatype_get_size(enum mesa_array_format_datatype type)
+{
+ return 1 << (type & MESA_ARRAY_FORMAT_TYPE_SIZE_MASK);
+}
+
+static inline int
+_mesa_array_format_get_type_size(mesa_array_format f)
+{
+ return 1 << (f & MESA_ARRAY_FORMAT_TYPE_SIZE_MASK);
+}
+
+static inline int
+_mesa_array_format_get_num_channels(mesa_array_format f)
+{
+ return (f & MESA_ARRAY_FORMAT_NUM_CHANS_MASK) >> 5;
+}
+
+static inline void
+_mesa_array_format_get_swizzle(mesa_array_format f, uint8_t *swizzle)
+{
+ swizzle[0] = (f & MESA_ARRAY_FORMAT_SWIZZLE_X_MASK) >> 8;
+ swizzle[1] = (f & MESA_ARRAY_FORMAT_SWIZZLE_Y_MASK) >> 11;
+ swizzle[2] = (f & MESA_ARRAY_FORMAT_SWIZZLE_Z_MASK) >> 14;
+ swizzle[3] = (f & MESA_ARRAY_FORMAT_SWIZZLE_W_MASK) >> 17;
+}
+
+static inline void
+_mesa_array_format_set_swizzle(mesa_array_format *f,
+ int32_t x, int32_t y, int32_t z, int32_t w)
+{
+ *f &= ~(MESA_ARRAY_FORMAT_SWIZZLE_X_MASK |
+ MESA_ARRAY_FORMAT_SWIZZLE_Y_MASK |
+ MESA_ARRAY_FORMAT_SWIZZLE_Z_MASK |
+ MESA_ARRAY_FORMAT_SWIZZLE_W_MASK);
+
+ *f |= ((x << 8 ) & MESA_ARRAY_FORMAT_SWIZZLE_X_MASK) |
+ ((y << 11) & MESA_ARRAY_FORMAT_SWIZZLE_Y_MASK) |
+ ((z << 14) & MESA_ARRAY_FORMAT_SWIZZLE_Z_MASK) |
+ ((w << 17) & MESA_ARRAY_FORMAT_SWIZZLE_W_MASK);
+}
+
+/**
+ * A helper to know if the format stored in a uint32_t is a mesa_format
+ * or a mesa_array_format
+ */
+static inline bool
+_mesa_format_is_mesa_array_format(uint32_t f)
+{
+ return (f & MESA_ARRAY_FORMAT_BIT) != 0;
+}
+
+/**
+ * Mesa texture/renderbuffer image formats. These are just other names of the
+ * gallium p_format.h formats.
+ */
+typedef enum pipe_format mesa_format;
+
+ /**
+ * \name Basic hardware formats
+ *
+ * The mesa format name specification is as follows:
+ *
+ * There shall be 3 naming format base types: those for component array
+ * formats (type A); those for compressed formats (type C); and those for
+ * packed component formats (type P). With type A formats, color component
+ * order does not change with endianess. Each format name shall begin with
+ * MESA_FORMAT_, followed by a component label (from the Component Label
+ * list below) for each component in the order that the component(s) occur
+ * in the format, except for non-linear color formats where the first
+ * letter shall be 'S'. For type P formats, each component label is
+ * followed by the number of bits that represent it in the fundamental
+ * data type used by the format.
+ *
+ * Following the listing of the component labels shall be an underscore; a
+ * compression type followed by an underscore for Type C formats only; a
+ * storage type from the list below; and a bit with for type A formats,
+ * which is the bit width for each array element.
+ *
+ *
+ * ---------- Format Base Type A: Array ----------
+ * MESA_FORMAT_[component list]_[storage type][array element bit width]
+ *
+ * examples:
+ * MESA_FORMAT_A_SNORM8 - uchar[i] = A
+ * MESA_FORMAT_RGBA_16 - ushort[i * 4 + 0] = R, ushort[i * 4 + 1] = G,
+ * ushort[i * 4 + 2] = B, ushort[i * 4 + 3] = A
+ * MESA_FORMAT_Z_UNORM32 - float[i] = Z
+ *
+ *
+ *
+ * ---------- Format Base Type C: Compressed ----------
+ * MESA_FORMAT_[component list*][_*][compression type][storage type*]
+ * * where required
+ *
+ * examples:
+ * MESA_FORMAT_RGB_ETC1
+ * MESA_FORMAT_RGBA_ETC2
+ * MESA_FORMAT_LATC1_UNORM
+ * MESA_FORMAT_RGBA_FXT1
+ *
+ *
+ *
+ * ---------- Format Base Type P: Packed ----------
+ * MESA_FORMAT_[[component list,bit width][storage type*][_]][_][storage type**]
+ * * when type differs between component
+ * ** when type applies to all components
+ *
+ * examples: msb <------ TEXEL BITS -----------> lsb
+ * MESA_FORMAT_A8B8G8R8_UNORM, RRRR RRRR GGGG GGGG BBBB BBBB AAAA AAAA
+ * MESA_FORMAT_R5G6B5_UNORM BBBB BGGG GGGR RRRR
+ * MESA_FORMAT_B4G4R4X4_UNORM XXXX RRRR GGGG BBBB
+ * MESA_FORMAT_Z32_FLOAT_S8X24_UINT
+ * MESA_FORMAT_R10G10B10A2_UINT
+ * MESA_FORMAT_R9G9B9E5_FLOAT
+ *
+ *
+ *
+ * ---------- Component Labels: ----------
+ * A - Alpha
+ * B - Blue
+ * DU - Delta U
+ * DV - Delta V
+ * E - Shared Exponent
+ * G - Green
+ * I - Intensity
+ * L - Luminance
+ * R - Red
+ * S - Stencil (when not followed by RGB or RGBA)
+ * U - Chrominance
+ * V - Chrominance
+ * Y - Luma
+ * X - Packing bits
+ * Z - Depth
+ *
+ *
+ *
+ * ---------- Type C Compression Types: ----------
+ * DXT1 - Color component labels shall be given
+ * DXT3 - Color component labels shall be given
+ * DXT5 - Color component labels shall be given
+ * ETC1 - No other information required
+ * ETC2 - No other information required
+ * FXT1 - Color component labels shall be given
+ * FXT3 - Color component labels shall be given
+ * LATC1 - Fundamental data type shall be given
+ * LATC2 - Fundamental data type shall be given
+ * RGTC1 - Color component labels and data type shall be given
+ * RGTC2 - Color component labels and data type shall be given
+ *
+ *
+ *
+ * ---------- Storage Types: ----------
+ * FLOAT
+ * SINT
+ * UINT
+ * SNORM
+ * UNORM
+ * SRGB - RGB components, or L are UNORMs in sRGB color space.
+ * Alpha, if present is linear.
+ *
+ */
+
+#define MESA_FORMAT_NONE PIPE_FORMAT_NONE
+#define MESA_FORMAT_A8B8G8R8_UNORM PIPE_FORMAT_ABGR8888_UNORM
+#define MESA_FORMAT_X8B8G8R8_UNORM PIPE_FORMAT_XBGR8888_UNORM
+#define MESA_FORMAT_R8G8B8A8_UNORM PIPE_FORMAT_RGBA8888_UNORM
+#define MESA_FORMAT_R8G8B8X8_UNORM PIPE_FORMAT_RGBX8888_UNORM
+#define MESA_FORMAT_B8G8R8A8_UNORM PIPE_FORMAT_BGRA8888_UNORM
+#define MESA_FORMAT_B8G8R8X8_UNORM PIPE_FORMAT_BGRX8888_UNORM
+#define MESA_FORMAT_A8R8G8B8_UNORM PIPE_FORMAT_ARGB8888_UNORM
+#define MESA_FORMAT_X8R8G8B8_UNORM PIPE_FORMAT_XRGB8888_UNORM
+#define MESA_FORMAT_B5G6R5_UNORM PIPE_FORMAT_B5G6R5_UNORM
+#define MESA_FORMAT_R5G6B5_UNORM PIPE_FORMAT_R5G6B5_UNORM
+#define MESA_FORMAT_B4G4R4A4_UNORM PIPE_FORMAT_B4G4R4A4_UNORM
+#define MESA_FORMAT_B4G4R4X4_UNORM PIPE_FORMAT_B4G4R4X4_UNORM
+#define MESA_FORMAT_A4R4G4B4_UNORM PIPE_FORMAT_A4R4G4B4_UNORM
+#define MESA_FORMAT_A1B5G5R5_UNORM PIPE_FORMAT_A1B5G5R5_UNORM
+#define MESA_FORMAT_X1B5G5R5_UNORM PIPE_FORMAT_X1B5G5R5_UNORM
+#define MESA_FORMAT_B5G5R5A1_UNORM PIPE_FORMAT_B5G5R5A1_UNORM
+#define MESA_FORMAT_B5G5R5X1_UNORM PIPE_FORMAT_B5G5R5X1_UNORM
+#define MESA_FORMAT_A1R5G5B5_UNORM PIPE_FORMAT_A1R5G5B5_UNORM
+#define MESA_FORMAT_L4A4_UNORM PIPE_FORMAT_L4A4_UNORM
+#define MESA_FORMAT_B2G3R3_UNORM PIPE_FORMAT_B2G3R3_UNORM
+#define MESA_FORMAT_B10G10R10A2_UNORM PIPE_FORMAT_B10G10R10A2_UNORM
+#define MESA_FORMAT_B10G10R10X2_UNORM PIPE_FORMAT_B10G10R10X2_UNORM
+#define MESA_FORMAT_R10G10B10A2_UNORM PIPE_FORMAT_R10G10B10A2_UNORM
+#define MESA_FORMAT_R10G10B10X2_UNORM PIPE_FORMAT_R10G10B10X2_UNORM
+#define MESA_FORMAT_S8_UINT_Z24_UNORM PIPE_FORMAT_S8_UINT_Z24_UNORM
+#define MESA_FORMAT_X8_UINT_Z24_UNORM PIPE_FORMAT_X8Z24_UNORM
+#define MESA_FORMAT_Z24_UNORM_S8_UINT PIPE_FORMAT_Z24_UNORM_S8_UINT
+#define MESA_FORMAT_Z24_UNORM_X8_UINT PIPE_FORMAT_Z24X8_UNORM
+#define MESA_FORMAT_R3G3B2_UNORM PIPE_FORMAT_R3G3B2_UNORM
+#define MESA_FORMAT_A4B4G4R4_UNORM PIPE_FORMAT_A4B4G4R4_UNORM
+#define MESA_FORMAT_R4G4B4A4_UNORM PIPE_FORMAT_R4G4B4A4_UNORM
+#define MESA_FORMAT_R5G5B5A1_UNORM PIPE_FORMAT_R5G5B5A1_UNORM
+#define MESA_FORMAT_A2B10G10R10_UNORM PIPE_FORMAT_A2B10G10R10_UNORM
+#define MESA_FORMAT_A2R10G10B10_UNORM PIPE_FORMAT_A2R10G10B10_UNORM
+#define MESA_FORMAT_YCBCR PIPE_FORMAT_UYVY
+#define MESA_FORMAT_YCBCR_REV PIPE_FORMAT_YUYV
+#define MESA_FORMAT_A_UNORM8 PIPE_FORMAT_A8_UNORM
+#define MESA_FORMAT_A_UNORM16 PIPE_FORMAT_A16_UNORM
+#define MESA_FORMAT_L_UNORM8 PIPE_FORMAT_L8_UNORM
+#define MESA_FORMAT_L_UNORM16 PIPE_FORMAT_L16_UNORM
+#define MESA_FORMAT_LA_UNORM8 PIPE_FORMAT_L8A8_UNORM
+#define MESA_FORMAT_LA_UNORM16 PIPE_FORMAT_L16A16_UNORM
+#define MESA_FORMAT_I_UNORM8 PIPE_FORMAT_I8_UNORM
+#define MESA_FORMAT_I_UNORM16 PIPE_FORMAT_I16_UNORM
+#define MESA_FORMAT_R_UNORM8 PIPE_FORMAT_R8_UNORM
+#define MESA_FORMAT_R_UNORM16 PIPE_FORMAT_R16_UNORM
+#define MESA_FORMAT_RG_UNORM8 PIPE_FORMAT_R8G8_UNORM
+#define MESA_FORMAT_RG_UNORM16 PIPE_FORMAT_R16G16_UNORM
+#define MESA_FORMAT_BGR_UNORM8 PIPE_FORMAT_B8G8R8_UNORM
+#define MESA_FORMAT_RGB_UNORM8 PIPE_FORMAT_R8G8B8_UNORM
+#define MESA_FORMAT_RGBA_UNORM16 PIPE_FORMAT_R16G16B16A16_UNORM
+#define MESA_FORMAT_RGBX_UNORM16 PIPE_FORMAT_R16G16B16X16_UNORM
+#define MESA_FORMAT_Z_UNORM16 PIPE_FORMAT_Z16_UNORM
+#define MESA_FORMAT_Z_UNORM32 PIPE_FORMAT_Z32_UNORM
+#define MESA_FORMAT_S_UINT8 PIPE_FORMAT_S8_UINT
+#define MESA_FORMAT_A8B8G8R8_SNORM PIPE_FORMAT_ABGR8888_SNORM
+#define MESA_FORMAT_X8B8G8R8_SNORM PIPE_FORMAT_XBGR8888_SNORM
+#define MESA_FORMAT_R8G8B8A8_SNORM PIPE_FORMAT_RGBA8888_SNORM
+#define MESA_FORMAT_R8G8B8X8_SNORM PIPE_FORMAT_RGBX8888_SNORM
+#define MESA_FORMAT_A_SNORM8 PIPE_FORMAT_A8_SNORM
+#define MESA_FORMAT_A_SNORM16 PIPE_FORMAT_A16_SNORM
+#define MESA_FORMAT_L_SNORM8 PIPE_FORMAT_L8_SNORM
+#define MESA_FORMAT_L_SNORM16 PIPE_FORMAT_L16_SNORM
+#define MESA_FORMAT_I_SNORM8 PIPE_FORMAT_I8_SNORM
+#define MESA_FORMAT_I_SNORM16 PIPE_FORMAT_I16_SNORM
+#define MESA_FORMAT_R_SNORM8 PIPE_FORMAT_R8_SNORM
+#define MESA_FORMAT_R_SNORM16 PIPE_FORMAT_R16_SNORM
+#define MESA_FORMAT_LA_SNORM8 PIPE_FORMAT_L8A8_SNORM
+#define MESA_FORMAT_LA_SNORM16 PIPE_FORMAT_L16A16_SNORM
+#define MESA_FORMAT_RG_SNORM8 PIPE_FORMAT_R8G8_SNORM
+#define MESA_FORMAT_RG_SNORM16 PIPE_FORMAT_R16G16_SNORM
+#define MESA_FORMAT_RGB_SNORM16 PIPE_FORMAT_R16G16B16_SNORM
+#define MESA_FORMAT_RGBA_SNORM16 PIPE_FORMAT_R16G16B16A16_SNORM
+#define MESA_FORMAT_RGBX_SNORM16 PIPE_FORMAT_R16G16B16X16_SNORM
+#define MESA_FORMAT_A8B8G8R8_SRGB PIPE_FORMAT_ABGR8888_SRGB
+#define MESA_FORMAT_B8G8R8A8_SRGB PIPE_FORMAT_BGRA8888_SRGB
+#define MESA_FORMAT_A8R8G8B8_SRGB PIPE_FORMAT_ARGB8888_SRGB
+#define MESA_FORMAT_B8G8R8X8_SRGB PIPE_FORMAT_BGRX8888_SRGB
+#define MESA_FORMAT_X8R8G8B8_SRGB PIPE_FORMAT_XRGB8888_SRGB
+#define MESA_FORMAT_R8G8B8A8_SRGB PIPE_FORMAT_RGBA8888_SRGB
+#define MESA_FORMAT_R8G8B8X8_SRGB PIPE_FORMAT_RGBX8888_SRGB
+#define MESA_FORMAT_X8B8G8R8_SRGB PIPE_FORMAT_XBGR8888_SRGB
+#define MESA_FORMAT_R_SRGB8 PIPE_FORMAT_R8_SRGB
+#define MESA_FORMAT_L_SRGB8 PIPE_FORMAT_L8_SRGB
+#define MESA_FORMAT_LA_SRGB8 PIPE_FORMAT_L8A8_SRGB
+#define MESA_FORMAT_BGR_SRGB8 PIPE_FORMAT_R8G8B8_SRGB
+#define MESA_FORMAT_R9G9B9E5_FLOAT PIPE_FORMAT_R9G9B9E5_FLOAT
+#define MESA_FORMAT_R11G11B10_FLOAT PIPE_FORMAT_R11G11B10_FLOAT
+#define MESA_FORMAT_Z32_FLOAT_S8X24_UINT PIPE_FORMAT_Z32_FLOAT_S8X24_UINT
+#define MESA_FORMAT_A_FLOAT16 PIPE_FORMAT_A16_FLOAT
+#define MESA_FORMAT_A_FLOAT32 PIPE_FORMAT_A32_FLOAT
+#define MESA_FORMAT_L_FLOAT16 PIPE_FORMAT_L16_FLOAT
+#define MESA_FORMAT_L_FLOAT32 PIPE_FORMAT_L32_FLOAT
+#define MESA_FORMAT_LA_FLOAT16 PIPE_FORMAT_L16A16_FLOAT
+#define MESA_FORMAT_LA_FLOAT32 PIPE_FORMAT_L32A32_FLOAT
+#define MESA_FORMAT_I_FLOAT16 PIPE_FORMAT_I16_FLOAT
+#define MESA_FORMAT_I_FLOAT32 PIPE_FORMAT_I32_FLOAT
+#define MESA_FORMAT_R_FLOAT16 PIPE_FORMAT_R16_FLOAT
+#define MESA_FORMAT_R_FLOAT32 PIPE_FORMAT_R32_FLOAT
+#define MESA_FORMAT_RG_FLOAT16 PIPE_FORMAT_R16G16_FLOAT
+#define MESA_FORMAT_RG_FLOAT32 PIPE_FORMAT_R32G32_FLOAT
+#define MESA_FORMAT_RGB_FLOAT16 PIPE_FORMAT_R16G16B16_FLOAT
+#define MESA_FORMAT_RGB_FLOAT32 PIPE_FORMAT_R32G32B32_FLOAT
+#define MESA_FORMAT_RGBA_FLOAT16 PIPE_FORMAT_R16G16B16A16_FLOAT
+#define MESA_FORMAT_RGBA_FLOAT32 PIPE_FORMAT_R32G32B32A32_FLOAT
+#define MESA_FORMAT_RGBX_FLOAT16 PIPE_FORMAT_R16G16B16X16_FLOAT
+#define MESA_FORMAT_RGBX_FLOAT32 PIPE_FORMAT_R32G32B32X32_FLOAT
+#define MESA_FORMAT_Z_FLOAT32 PIPE_FORMAT_Z32_FLOAT
+#define MESA_FORMAT_A8B8G8R8_UINT PIPE_FORMAT_ABGR8888_UINT
+#define MESA_FORMAT_A8R8G8B8_UINT PIPE_FORMAT_ARGB8888_UINT
+#define MESA_FORMAT_R8G8B8A8_UINT PIPE_FORMAT_RGBA8888_UINT
+#define MESA_FORMAT_B8G8R8A8_UINT PIPE_FORMAT_BGRA8888_UINT
+#define MESA_FORMAT_B10G10R10A2_UINT PIPE_FORMAT_B10G10R10A2_UINT
+#define MESA_FORMAT_R10G10B10A2_UINT PIPE_FORMAT_R10G10B10A2_UINT
+#define MESA_FORMAT_A2B10G10R10_UINT PIPE_FORMAT_A2B10G10R10_UINT
+#define MESA_FORMAT_A2R10G10B10_UINT PIPE_FORMAT_A2R10G10B10_UINT
+#define MESA_FORMAT_B5G6R5_UINT PIPE_FORMAT_B5G6R5_UINT
+#define MESA_FORMAT_R5G6B5_UINT PIPE_FORMAT_R5G6B5_UINT
+#define MESA_FORMAT_B2G3R3_UINT PIPE_FORMAT_B2G3R3_UINT
+#define MESA_FORMAT_R3G3B2_UINT PIPE_FORMAT_R3G3B2_UINT
+#define MESA_FORMAT_A4B4G4R4_UINT PIPE_FORMAT_A4B4G4R4_UINT
+#define MESA_FORMAT_R4G4B4A4_UINT PIPE_FORMAT_R4G4B4A4_UINT
+#define MESA_FORMAT_B4G4R4A4_UINT PIPE_FORMAT_B4G4R4A4_UINT
+#define MESA_FORMAT_A4R4G4B4_UINT PIPE_FORMAT_A4R4G4B4_UINT
+#define MESA_FORMAT_A1B5G5R5_UINT PIPE_FORMAT_A1B5G5R5_UINT
+#define MESA_FORMAT_B5G5R5A1_UINT PIPE_FORMAT_B5G5R5A1_UINT
+#define MESA_FORMAT_A1R5G5B5_UINT PIPE_FORMAT_A1R5G5B5_UINT
+#define MESA_FORMAT_R5G5B5A1_UINT PIPE_FORMAT_R5G5B5A1_UINT
+#define MESA_FORMAT_A_UINT8 PIPE_FORMAT_A8_UINT
+#define MESA_FORMAT_A_UINT16 PIPE_FORMAT_A16_UINT
+#define MESA_FORMAT_A_UINT32 PIPE_FORMAT_A32_UINT
+#define MESA_FORMAT_A_SINT8 PIPE_FORMAT_A8_SINT
+#define MESA_FORMAT_A_SINT16 PIPE_FORMAT_A16_SINT
+#define MESA_FORMAT_A_SINT32 PIPE_FORMAT_A32_SINT
+#define MESA_FORMAT_I_UINT8 PIPE_FORMAT_I8_UINT
+#define MESA_FORMAT_I_UINT16 PIPE_FORMAT_I16_UINT
+#define MESA_FORMAT_I_UINT32 PIPE_FORMAT_I32_UINT
+#define MESA_FORMAT_I_SINT8 PIPE_FORMAT_I8_SINT
+#define MESA_FORMAT_I_SINT16 PIPE_FORMAT_I16_SINT
+#define MESA_FORMAT_I_SINT32 PIPE_FORMAT_I32_SINT
+#define MESA_FORMAT_L_UINT8 PIPE_FORMAT_L8_UINT
+#define MESA_FORMAT_L_UINT16 PIPE_FORMAT_L16_UINT
+#define MESA_FORMAT_L_UINT32 PIPE_FORMAT_L32_UINT
+#define MESA_FORMAT_L_SINT8 PIPE_FORMAT_L8_SINT
+#define MESA_FORMAT_L_SINT16 PIPE_FORMAT_L16_SINT
+#define MESA_FORMAT_L_SINT32 PIPE_FORMAT_L32_SINT
+#define MESA_FORMAT_LA_UINT8 PIPE_FORMAT_L8A8_UINT
+#define MESA_FORMAT_LA_UINT16 PIPE_FORMAT_L16A16_UINT
+#define MESA_FORMAT_LA_UINT32 PIPE_FORMAT_L32A32_UINT
+#define MESA_FORMAT_LA_SINT8 PIPE_FORMAT_L8A8_SINT
+#define MESA_FORMAT_LA_SINT16 PIPE_FORMAT_L16A16_SINT
+#define MESA_FORMAT_LA_SINT32 PIPE_FORMAT_L32A32_SINT
+#define MESA_FORMAT_R_UINT8 PIPE_FORMAT_R8_UINT
+#define MESA_FORMAT_R_UINT16 PIPE_FORMAT_R16_UINT
+#define MESA_FORMAT_R_UINT32 PIPE_FORMAT_R32_UINT
+#define MESA_FORMAT_R_SINT8 PIPE_FORMAT_R8_SINT
+#define MESA_FORMAT_R_SINT16 PIPE_FORMAT_R16_SINT
+#define MESA_FORMAT_R_SINT32 PIPE_FORMAT_R32_SINT
+#define MESA_FORMAT_RG_UINT8 PIPE_FORMAT_R8G8_UINT
+#define MESA_FORMAT_RG_UINT16 PIPE_FORMAT_R16G16_UINT
+#define MESA_FORMAT_RG_UINT32 PIPE_FORMAT_R32G32_UINT
+#define MESA_FORMAT_RG_SINT8 PIPE_FORMAT_R8G8_SINT
+#define MESA_FORMAT_RG_SINT16 PIPE_FORMAT_R16G16_SINT
+#define MESA_FORMAT_RG_SINT32 PIPE_FORMAT_R32G32_SINT
+#define MESA_FORMAT_RGB_UINT8 PIPE_FORMAT_R8G8B8_UINT
+#define MESA_FORMAT_RGB_UINT16 PIPE_FORMAT_R16G16B16_UINT
+#define MESA_FORMAT_RGB_UINT32 PIPE_FORMAT_R32G32B32_UINT
+#define MESA_FORMAT_RGB_SINT8 PIPE_FORMAT_R8G8B8_SINT
+#define MESA_FORMAT_RGB_SINT16 PIPE_FORMAT_R16G16B16_SINT
+#define MESA_FORMAT_RGB_SINT32 PIPE_FORMAT_R32G32B32_SINT
+#define MESA_FORMAT_RGBA_UINT16 PIPE_FORMAT_R16G16B16A16_UINT
+#define MESA_FORMAT_RGBA_UINT32 PIPE_FORMAT_R32G32B32A32_UINT
+#define MESA_FORMAT_RGBA_SINT8 PIPE_FORMAT_R8G8B8A8_SINT
+#define MESA_FORMAT_RGBA_SINT16 PIPE_FORMAT_R16G16B16A16_SINT
+#define MESA_FORMAT_RGBA_SINT32 PIPE_FORMAT_R32G32B32A32_SINT
+#define MESA_FORMAT_RGBX_UINT8 PIPE_FORMAT_R8G8B8X8_UINT
+#define MESA_FORMAT_RGBX_UINT16 PIPE_FORMAT_R16G16B16X16_UINT
+#define MESA_FORMAT_RGBX_UINT32 PIPE_FORMAT_R32G32B32X32_UINT
+#define MESA_FORMAT_RGBX_SINT8 PIPE_FORMAT_R8G8B8X8_SINT
+#define MESA_FORMAT_RGBX_SINT16 PIPE_FORMAT_R16G16B16X16_SINT
+#define MESA_FORMAT_RGBX_SINT32 PIPE_FORMAT_R32G32B32X32_SINT
+#define MESA_FORMAT_RGB_DXT1 PIPE_FORMAT_DXT1_RGB
+#define MESA_FORMAT_RGBA_DXT1 PIPE_FORMAT_DXT1_RGBA
+#define MESA_FORMAT_RGBA_DXT3 PIPE_FORMAT_DXT3_RGBA
+#define MESA_FORMAT_RGBA_DXT5 PIPE_FORMAT_DXT5_RGBA
+#define MESA_FORMAT_SRGB_DXT1 PIPE_FORMAT_DXT1_SRGB
+#define MESA_FORMAT_SRGBA_DXT1 PIPE_FORMAT_DXT1_SRGBA
+#define MESA_FORMAT_SRGBA_DXT3 PIPE_FORMAT_DXT3_SRGBA
+#define MESA_FORMAT_SRGBA_DXT5 PIPE_FORMAT_DXT5_SRGBA
+#define MESA_FORMAT_RGB_FXT1 PIPE_FORMAT_FXT1_RGB
+#define MESA_FORMAT_RGBA_FXT1 PIPE_FORMAT_FXT1_RGBA
+#define MESA_FORMAT_R_RGTC1_UNORM PIPE_FORMAT_RGTC1_UNORM
+#define MESA_FORMAT_R_RGTC1_SNORM PIPE_FORMAT_RGTC1_SNORM
+#define MESA_FORMAT_RG_RGTC2_UNORM PIPE_FORMAT_RGTC2_UNORM
+#define MESA_FORMAT_RG_RGTC2_SNORM PIPE_FORMAT_RGTC2_SNORM
+#define MESA_FORMAT_L_LATC1_UNORM PIPE_FORMAT_LATC1_UNORM
+#define MESA_FORMAT_L_LATC1_SNORM PIPE_FORMAT_LATC1_SNORM
+#define MESA_FORMAT_LA_LATC2_UNORM PIPE_FORMAT_LATC2_UNORM
+#define MESA_FORMAT_LA_LATC2_SNORM PIPE_FORMAT_LATC2_SNORM
+#define MESA_FORMAT_ETC1_RGB8 PIPE_FORMAT_ETC1_RGB8
+#define MESA_FORMAT_ETC2_RGB8 PIPE_FORMAT_ETC2_RGB8
+#define MESA_FORMAT_ETC2_SRGB8 PIPE_FORMAT_ETC2_SRGB8
+#define MESA_FORMAT_ETC2_RGBA8_EAC PIPE_FORMAT_ETC2_RGBA8
+#define MESA_FORMAT_ETC2_SRGB8_ALPHA8_EAC PIPE_FORMAT_ETC2_SRGBA8
+#define MESA_FORMAT_ETC2_R11_EAC PIPE_FORMAT_ETC2_R11_UNORM
+#define MESA_FORMAT_ETC2_RG11_EAC PIPE_FORMAT_ETC2_RG11_UNORM
+#define MESA_FORMAT_ETC2_SIGNED_R11_EAC PIPE_FORMAT_ETC2_R11_SNORM
+#define MESA_FORMAT_ETC2_SIGNED_RG11_EAC PIPE_FORMAT_ETC2_RG11_SNORM
+#define MESA_FORMAT_ETC2_RGB8_PUNCHTHROUGH_ALPHA1 PIPE_FORMAT_ETC2_RGB8A1
+#define MESA_FORMAT_ETC2_SRGB8_PUNCHTHROUGH_ALPHA1 PIPE_FORMAT_ETC2_SRGB8A1
+#define MESA_FORMAT_BPTC_RGBA_UNORM PIPE_FORMAT_BPTC_RGBA_UNORM
+#define MESA_FORMAT_BPTC_SRGB_ALPHA_UNORM PIPE_FORMAT_BPTC_SRGBA
+#define MESA_FORMAT_BPTC_RGB_SIGNED_FLOAT PIPE_FORMAT_BPTC_RGB_FLOAT
+#define MESA_FORMAT_BPTC_RGB_UNSIGNED_FLOAT PIPE_FORMAT_BPTC_RGB_UFLOAT
+#define MESA_FORMAT_RGBA_ASTC_4x4 PIPE_FORMAT_ASTC_4x4
+#define MESA_FORMAT_RGBA_ASTC_5x4 PIPE_FORMAT_ASTC_5x4
+#define MESA_FORMAT_RGBA_ASTC_5x5 PIPE_FORMAT_ASTC_5x5
+#define MESA_FORMAT_RGBA_ASTC_6x5 PIPE_FORMAT_ASTC_6x5
+#define MESA_FORMAT_RGBA_ASTC_6x6 PIPE_FORMAT_ASTC_6x6
+#define MESA_FORMAT_RGBA_ASTC_8x5 PIPE_FORMAT_ASTC_8x5
+#define MESA_FORMAT_RGBA_ASTC_8x6 PIPE_FORMAT_ASTC_8x6
+#define MESA_FORMAT_RGBA_ASTC_8x8 PIPE_FORMAT_ASTC_8x8
+#define MESA_FORMAT_RGBA_ASTC_10x5 PIPE_FORMAT_ASTC_10x5
+#define MESA_FORMAT_RGBA_ASTC_10x6 PIPE_FORMAT_ASTC_10x6
+#define MESA_FORMAT_RGBA_ASTC_10x8 PIPE_FORMAT_ASTC_10x8
+#define MESA_FORMAT_RGBA_ASTC_10x10 PIPE_FORMAT_ASTC_10x10
+#define MESA_FORMAT_RGBA_ASTC_12x10 PIPE_FORMAT_ASTC_12x10
+#define MESA_FORMAT_RGBA_ASTC_12x12 PIPE_FORMAT_ASTC_12x12
+#define MESA_FORMAT_SRGB8_ALPHA8_ASTC_4x4 PIPE_FORMAT_ASTC_4x4_SRGB
+#define MESA_FORMAT_SRGB8_ALPHA8_ASTC_5x4 PIPE_FORMAT_ASTC_5x4_SRGB
+#define MESA_FORMAT_SRGB8_ALPHA8_ASTC_5x5 PIPE_FORMAT_ASTC_5x5_SRGB
+#define MESA_FORMAT_SRGB8_ALPHA8_ASTC_6x5 PIPE_FORMAT_ASTC_6x5_SRGB
+#define MESA_FORMAT_SRGB8_ALPHA8_ASTC_6x6 PIPE_FORMAT_ASTC_6x6_SRGB
+#define MESA_FORMAT_SRGB8_ALPHA8_ASTC_8x5 PIPE_FORMAT_ASTC_8x5_SRGB
+#define MESA_FORMAT_SRGB8_ALPHA8_ASTC_8x6 PIPE_FORMAT_ASTC_8x6_SRGB
+#define MESA_FORMAT_SRGB8_ALPHA8_ASTC_8x8 PIPE_FORMAT_ASTC_8x8_SRGB
+#define MESA_FORMAT_SRGB8_ALPHA8_ASTC_10x5 PIPE_FORMAT_ASTC_10x5_SRGB
+#define MESA_FORMAT_SRGB8_ALPHA8_ASTC_10x6 PIPE_FORMAT_ASTC_10x6_SRGB
+#define MESA_FORMAT_SRGB8_ALPHA8_ASTC_10x8 PIPE_FORMAT_ASTC_10x8_SRGB
+#define MESA_FORMAT_SRGB8_ALPHA8_ASTC_10x10 PIPE_FORMAT_ASTC_10x10_SRGB
+#define MESA_FORMAT_SRGB8_ALPHA8_ASTC_12x10 PIPE_FORMAT_ASTC_12x10_SRGB
+#define MESA_FORMAT_SRGB8_ALPHA8_ASTC_12x12 PIPE_FORMAT_ASTC_12x12_SRGB
+#define MESA_FORMAT_RGBA_ASTC_3x3x3 PIPE_FORMAT_ASTC_3x3x3
+#define MESA_FORMAT_RGBA_ASTC_4x3x3 PIPE_FORMAT_ASTC_4x3x3
+#define MESA_FORMAT_RGBA_ASTC_4x4x3 PIPE_FORMAT_ASTC_4x4x3
+#define MESA_FORMAT_RGBA_ASTC_4x4x4 PIPE_FORMAT_ASTC_4x4x4
+#define MESA_FORMAT_RGBA_ASTC_5x4x4 PIPE_FORMAT_ASTC_5x4x4
+#define MESA_FORMAT_RGBA_ASTC_5x5x4 PIPE_FORMAT_ASTC_5x5x4
+#define MESA_FORMAT_RGBA_ASTC_5x5x5 PIPE_FORMAT_ASTC_5x5x5
+#define MESA_FORMAT_RGBA_ASTC_6x5x5 PIPE_FORMAT_ASTC_6x5x5
+#define MESA_FORMAT_RGBA_ASTC_6x6x5 PIPE_FORMAT_ASTC_6x6x5
+#define MESA_FORMAT_RGBA_ASTC_6x6x6 PIPE_FORMAT_ASTC_6x6x6
+#define MESA_FORMAT_SRGB8_ALPHA8_ASTC_3x3x3 PIPE_FORMAT_ASTC_3x3x3_SRGB
+#define MESA_FORMAT_SRGB8_ALPHA8_ASTC_4x3x3 PIPE_FORMAT_ASTC_4x3x3_SRGB
+#define MESA_FORMAT_SRGB8_ALPHA8_ASTC_4x4x3 PIPE_FORMAT_ASTC_4x4x3_SRGB
+#define MESA_FORMAT_SRGB8_ALPHA8_ASTC_4x4x4 PIPE_FORMAT_ASTC_4x4x4_SRGB
+#define MESA_FORMAT_SRGB8_ALPHA8_ASTC_5x4x4 PIPE_FORMAT_ASTC_5x4x4_SRGB
+#define MESA_FORMAT_SRGB8_ALPHA8_ASTC_5x5x4 PIPE_FORMAT_ASTC_5x5x4_SRGB
+#define MESA_FORMAT_SRGB8_ALPHA8_ASTC_5x5x5 PIPE_FORMAT_ASTC_5x5x5_SRGB
+#define MESA_FORMAT_SRGB8_ALPHA8_ASTC_6x5x5 PIPE_FORMAT_ASTC_6x5x5_SRGB
+#define MESA_FORMAT_SRGB8_ALPHA8_ASTC_6x6x5 PIPE_FORMAT_ASTC_6x6x5_SRGB
+#define MESA_FORMAT_SRGB8_ALPHA8_ASTC_6x6x6 PIPE_FORMAT_ASTC_6x6x6_SRGB
+#define MESA_FORMAT_ATC_RGB PIPE_FORMAT_ATC_RGB
+#define MESA_FORMAT_ATC_RGBA_EXPLICIT PIPE_FORMAT_ATC_RGBA_EXPLICIT
+#define MESA_FORMAT_ATC_RGBA_INTERPOLATED PIPE_FORMAT_ATC_RGBA_INTERPOLATED
+#define MESA_FORMAT_COUNT PIPE_FORMAT_COUNT
+
+/* Packed to array format adapters */
+#if UTIL_ARCH_LITTLE_ENDIAN
+#define MESA_FORMAT_RGBA_UINT8 MESA_FORMAT_R8G8B8A8_UINT
+#define MESA_FORMAT_RGBA_UNORM8 MESA_FORMAT_R8G8B8A8_UNORM
+#define MESA_FORMAT_RGBA_SNORM8 MESA_FORMAT_R8G8B8A8_SNORM
+#else
+#define MESA_FORMAT_RGBA_UINT8 MESA_FORMAT_A8B8G8R8_UINT
+#define MESA_FORMAT_RGBA_UNORM8 MESA_FORMAT_A8B8G8R8_UNORM
+#define MESA_FORMAT_RGBA_SNORM8 MESA_FORMAT_A8B8G8R8_SNORM
+#endif
+
+extern const char *
+_mesa_get_format_name(mesa_format format);
+
+extern int
+_mesa_get_format_bytes(mesa_format format);
+
+extern GLint
+_mesa_get_format_bits(mesa_format format, GLenum pname);
+
+extern unsigned int
+_mesa_get_format_max_bits(mesa_format format);
+
+extern enum mesa_format_layout
+_mesa_get_format_layout(mesa_format format);
+
+extern GLenum
+_mesa_get_format_datatype(mesa_format format);
+
+extern GLenum
+_mesa_get_format_base_format(uint32_t format);
+
+extern void
+_mesa_get_format_block_size(mesa_format format,
+ unsigned int *bw, unsigned int *bh);
+
+extern void
+_mesa_get_format_block_size_3d(mesa_format format, unsigned int *bw,
+ unsigned int *bh, unsigned int *bd);
+
+extern mesa_array_format
+_mesa_array_format_flip_channels(mesa_array_format format);
+
+extern void
+_mesa_get_format_swizzle(mesa_format format, uint8_t swizzle_out[4]);
+
+extern uint32_t
+_mesa_format_to_array_format(mesa_format format);
+
+extern mesa_format
+_mesa_format_from_array_format(uint32_t array_format);
+
+extern bool
+_mesa_is_format_compressed(mesa_format format);
+
+extern bool
+_mesa_is_format_packed_depth_stencil(mesa_format format);
+
+extern bool
+_mesa_is_format_integer_color(mesa_format format);
+
+extern bool
+_mesa_is_format_unsigned(mesa_format format);
+
+extern bool
+_mesa_is_format_signed(mesa_format format);
+
+extern bool
+_mesa_is_format_integer(mesa_format format);
+
+extern bool
+_mesa_is_format_etc2(mesa_format format);
+
+bool
+_mesa_is_format_astc_2d(mesa_format format);
+
+bool
+_mesa_is_format_color_format(mesa_format format);
+
+bool
+_mesa_is_format_srgb(mesa_format format);
+
+extern uint32_t
+_mesa_format_image_size(mesa_format format, int width,
+ int height, int depth);
+
+extern uint64_t
+_mesa_format_image_size64(mesa_format format, int width,
+ int height, int depth);
+
+extern int32_t
+_mesa_format_row_stride(mesa_format format, int width);
+
+extern void
+_mesa_uncompressed_format_to_type_and_comps(mesa_format format,
+ GLenum *datatype, GLuint *comps);
+
+extern void
+_mesa_test_formats(void);
+
+extern mesa_format
+_mesa_get_srgb_format_linear(mesa_format format);
+
+extern mesa_format
+_mesa_get_linear_format_srgb(mesa_format format);
+
+extern mesa_format
+_mesa_get_intensity_format_red(mesa_format format);
+
+extern mesa_format
+_mesa_get_uncompressed_format(mesa_format format);
+
+extern unsigned int
+_mesa_format_num_components(mesa_format format);
+
+extern bool
+_mesa_format_has_color_component(mesa_format format, int component);
+
+bool
+_mesa_format_matches_format_and_type(mesa_format mesa_format,
+ GLenum format, GLenum type,
+ bool swapBytes, GLenum *error);
+
+mesa_format
+_mesa_format_fallback_rgbx_to_rgba(mesa_format format);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* FORMATS_H */
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/mesa/main/glheader.h b/third_party/rust/glslopt/glsl-optimizer/src/mesa/main/glheader.h
new file mode 100644
index 0000000000..59ca1cbadf
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/mesa/main/glheader.h
@@ -0,0 +1,164 @@
+/*
+ * Mesa 3-D graphics library
+ *
+ * Copyright (C) 1999-2008 Brian Paul All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+
+/**
+ * \file glheader.h
+ * Wrapper for GL/gl.h and GL/glext.h
+ */
+
+
+#ifndef GLHEADER_H
+#define GLHEADER_H
+
+
+#define GL_GLEXT_PROTOTYPES
+#include "GL/gl.h"
+#include "GL/glext.h"
+
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+/* Custom Mesa types to save space. */
+typedef unsigned short GLenum16;
+typedef unsigned char GLbitfield8;
+typedef unsigned short GLbitfield16;
+typedef GLuint64 GLbitfield64;
+
+/* Common GLES 1.0 and 2.0 tokens */
+
+#ifndef GL_OES_EGL_image_external
+#define GL_TEXTURE_EXTERNAL_OES 0x8D65
+#define GL_SAMPLER_EXTERNAL_OES 0x8D66
+#define GL_TEXTURE_BINDING_EXTERNAL_OES 0x8D67
+#define GL_REQUIRED_TEXTURE_IMAGE_UNITS_OES 0x8D68
+#endif
+
+#ifndef GL_OES_compressed_ETC1_RGB8_texture
+#define GL_ETC1_RGB8_OES 0x8D64
+#endif
+
+
+/* GLES 1.0 only tokens */
+
+typedef int GLclampx;
+
+#ifndef GL_OES_point_size_array
+#define GL_POINT_SIZE_ARRAY_OES 0x8B9C
+#define GL_POINT_SIZE_ARRAY_TYPE_OES 0x898A
+#define GL_POINT_SIZE_ARRAY_STRIDE_OES 0x898B
+#define GL_POINT_SIZE_ARRAY_POINTER_OES 0x898C
+#define GL_POINT_SIZE_ARRAY_BUFFER_BINDING_OES 0x8B9F
+#endif
+
+
+#ifndef GL_OES_draw_texture
+#define GL_TEXTURE_CROP_RECT_OES 0x8B9D
+#endif
+
+#ifndef GL_TEXTURE_GEN_STR_OES
+#define GL_TEXTURE_GEN_STR_OES 0x8D60
+#endif
+
+
+/* GLES 2.0 only tokens */
+
+#ifndef GL_PROGRAM_BINARY_LENGTH_OES
+#define GL_PROGRAM_BINARY_LENGTH_OES 0x8741
+#endif
+
+#ifndef GL_OES_texture_compression_astc
+#define GL_COMPRESSED_RGBA_ASTC_3x3x3_OES 0x93C0
+#define GL_COMPRESSED_RGBA_ASTC_4x3x3_OES 0x93C1
+#define GL_COMPRESSED_RGBA_ASTC_4x4x3_OES 0x93C2
+#define GL_COMPRESSED_RGBA_ASTC_4x4x4_OES 0x93C3
+#define GL_COMPRESSED_RGBA_ASTC_5x4x4_OES 0x93C4
+#define GL_COMPRESSED_RGBA_ASTC_5x5x4_OES 0x93C5
+#define GL_COMPRESSED_RGBA_ASTC_5x5x5_OES 0x93C6
+#define GL_COMPRESSED_RGBA_ASTC_6x5x5_OES 0x93C7
+#define GL_COMPRESSED_RGBA_ASTC_6x6x5_OES 0x93C8
+#define GL_COMPRESSED_RGBA_ASTC_6x6x6_OES 0x93C9
+#define GL_COMPRESSED_SRGB8_ALPHA8_ASTC_3x3x3_OES 0x93E0
+#define GL_COMPRESSED_SRGB8_ALPHA8_ASTC_4x3x3_OES 0x93E1
+#define GL_COMPRESSED_SRGB8_ALPHA8_ASTC_4x4x3_OES 0x93E2
+#define GL_COMPRESSED_SRGB8_ALPHA8_ASTC_4x4x4_OES 0x93E3
+#define GL_COMPRESSED_SRGB8_ALPHA8_ASTC_5x4x4_OES 0x93E4
+#define GL_COMPRESSED_SRGB8_ALPHA8_ASTC_5x5x4_OES 0x93E5
+#define GL_COMPRESSED_SRGB8_ALPHA8_ASTC_5x5x5_OES 0x93E6
+#define GL_COMPRESSED_SRGB8_ALPHA8_ASTC_6x5x5_OES 0x93E7
+#define GL_COMPRESSED_SRGB8_ALPHA8_ASTC_6x6x5_OES 0x93E8
+#define GL_COMPRESSED_SRGB8_ALPHA8_ASTC_6x6x6_OES 0x93E9
+#endif
+
+#ifndef GL_EXT_shader_framebuffer_fetch
+#define GL_FRAGMENT_SHADER_DISCARDS_SAMPLES_EXT 0x8A52
+#endif
+
+#ifndef GL_EXT_disjoint_timer_query
+#define GL_GPU_DISJOINT_EXT 0x8FBB
+#endif
+
+/* Inexplicably, GL_HALF_FLOAT_OES has a different value than GL_HALF_FLOAT.
+ */
+#ifndef GL_HALF_FLOAT_OES
+#define GL_HALF_FLOAT_OES 0x8D61
+#endif
+
+/* There is no formal spec for the following extension. */
+#ifndef GL_ATI_texture_compression_3dc
+#define GL_ATI_texture_compression_3dc 1
+#define GL_COMPRESSED_LUMINANCE_ALPHA_3DC_ATI 0x8837
+#endif
+
+#ifndef GL_EXT_texture_sRGB_R8
+#define GL_SR8_EXT 0x8FBD
+#endif
+
+#ifndef GL_AMD_compressed_ATC_texture
+#define GL_ATC_RGB_AMD 0x8C92
+#define GL_ATC_RGBA_EXPLICIT_ALPHA_AMD 0x8C93
+#define GL_ATC_RGBA_INTERPOLATED_ALPHA_AMD 0x87EE
+#endif
+
+/**
+ * Internal token to represent a GLSL shader program (a collection of
+ * one or more shaders that get linked together). Note that GLSL
+ * shaders and shader programs share one name space (one hash table)
+ * so we need a value that's different from any of the
+ * GL_VERTEX/FRAGMENT/GEOMETRY_PROGRAM tokens.
+ */
+#define GL_SHADER_PROGRAM_MESA 0x9999
+
+#ifndef GL_EXT_multisampled_render_to_texture
+#define GL_FRAMEBUFFER_ATTACHMENT_TEXTURE_SAMPLES_EXT 0x8D6C
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* GLHEADER_H */
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/mesa/main/glthread.h b/third_party/rust/glslopt/glsl-optimizer/src/mesa/main/glthread.h
new file mode 100644
index 0000000000..a50b2f87ce
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/mesa/main/glthread.h
@@ -0,0 +1,142 @@
+/*
+ * Copyright © 2012 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#ifndef _GLTHREAD_H
+#define _GLTHREAD_H
+
+/* The size of one batch and the maximum size of one call.
+ *
+ * This should be as low as possible, so that:
+ * - multiple synchronizations within a frame don't slow us down much
+ * - a smaller number of calls per frame can still get decent parallelism
+ * - the memory footprint of the queue is low, and with that comes a lower
+ * chance of experiencing CPU cache thrashing
+ * but it should be high enough so that u_queue overhead remains negligible.
+ */
+#define MARSHAL_MAX_CMD_SIZE (8 * 1024)
+
+/* The number of batch slots in memory.
+ *
+ * One batch is being executed, one batch is being filled, the rest are
+ * waiting batches. There must be at least 1 slot for a waiting batch,
+ * so the minimum number of batches is 3.
+ */
+#define MARSHAL_MAX_BATCHES 8
+
+#include <inttypes.h>
+#include <stdbool.h>
+#include "util/u_queue.h"
+#include "GL/gl.h"
+#include "compiler/shader_enums.h"
+
+struct gl_context;
+struct _mesa_HashTable;
+
+struct glthread_vao {
+ GLuint Name;
+ GLuint CurrentElementBufferName;
+ GLbitfield Enabled;
+ GLbitfield UserPointerMask;
+};
+
+/** A single batch of commands queued up for execution. */
+struct glthread_batch
+{
+ /** Batch fence for waiting for the execution to finish. */
+ struct util_queue_fence fence;
+
+ /** The worker thread will access the context with this. */
+ struct gl_context *ctx;
+
+ /** Amount of data used by batch commands, in bytes. */
+ int used;
+
+ /** Data contained in the command buffer. */
+#ifdef _MSC_VER
+ __declspec(align(8))
+#else
+ __attribute__((aligned(8)))
+#endif
+ uint8_t buffer[MARSHAL_MAX_CMD_SIZE];
+};
+
+struct glthread_state
+{
+ /** Multithreaded queue. */
+ struct util_queue queue;
+
+ /** This is sent to the driver for framebuffer overlay / HUD. */
+ struct util_queue_monitoring stats;
+
+ /** Whether GLThread is enabled. */
+ bool enabled;
+
+ /** The ring of batches in memory. */
+ struct glthread_batch batches[MARSHAL_MAX_BATCHES];
+
+ /** Pointer to the batch currently being filled. */
+ struct glthread_batch *next_batch;
+
+ /** Index of the last submitted batch. */
+ unsigned last;
+
+ /** Index of the batch being filled and about to be submitted. */
+ unsigned next;
+
+ /** Vertex Array objects tracked by glthread independently of Mesa. */
+ struct _mesa_HashTable *VAOs;
+ struct glthread_vao *CurrentVAO;
+ struct glthread_vao *LastLookedUpVAO;
+ struct glthread_vao DefaultVAO;
+ int ClientActiveTexture;
+
+ /** Currently-bound buffer object IDs. */
+ GLuint CurrentArrayBufferName;
+ GLuint CurrentDrawIndirectBufferName;
+};
+
+void _mesa_glthread_init(struct gl_context *ctx);
+void _mesa_glthread_destroy(struct gl_context *ctx);
+
+void _mesa_glthread_restore_dispatch(struct gl_context *ctx, const char *func);
+void _mesa_glthread_disable(struct gl_context *ctx, const char *func);
+void _mesa_glthread_flush_batch(struct gl_context *ctx);
+void _mesa_glthread_finish(struct gl_context *ctx);
+void _mesa_glthread_finish_before(struct gl_context *ctx, const char *func);
+
+void _mesa_glthread_BindBuffer(struct gl_context *ctx, GLenum target,
+ GLuint buffer);
+void _mesa_glthread_DeleteBuffers(struct gl_context *ctx, GLsizei n,
+ const GLuint *buffers);
+
+void _mesa_glthread_BindVertexArray(struct gl_context *ctx, GLuint id);
+void _mesa_glthread_DeleteVertexArrays(struct gl_context *ctx,
+ GLsizei n, const GLuint *ids);
+void _mesa_glthread_GenVertexArrays(struct gl_context *ctx,
+ GLsizei n, GLuint *arrays);
+void _mesa_glthread_ClientState(struct gl_context *ctx, GLuint *vaobj,
+ gl_vert_attrib attrib, bool enable);
+void _mesa_glthread_AttribPointer(struct gl_context *ctx,
+ gl_vert_attrib attrib);
+
+#endif /* _GLTHREAD_H*/
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/mesa/main/hash.h b/third_party/rust/glslopt/glsl-optimizer/src/mesa/main/hash.h
new file mode 100644
index 0000000000..0a3996a7d0
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/mesa/main/hash.h
@@ -0,0 +1,181 @@
+/**
+ * \file hash.h
+ * Generic hash table.
+ */
+
+/*
+ * Mesa 3-D graphics library
+ *
+ * Copyright (C) 1999-2006 Brian Paul All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+
+#ifndef HASH_H
+#define HASH_H
+
+
+#include <stdbool.h>
+#include <stdint.h>
+#include "glheader.h"
+
+#include "c11/threads.h"
+
+/**
+ * Magic GLuint object name that gets stored outside of the struct hash_table.
+ *
+ * The hash table needs a particular pointer to be the marker for a key that
+ * was deleted from the table, along with NULL for the "never allocated in the
+ * table" marker. Legacy GL allows any GLuint to be used as a GL object name,
+ * and we use a 1:1 mapping from GLuints to key pointers, so we need to be
+ * able to track a GLuint that happens to match the deleted key outside of
+ * struct hash_table. We tell the hash table to use "1" as the deleted key
+ * value, so that we test the deleted-key-in-the-table path as best we can.
+ */
+#define DELETED_KEY_VALUE 1
+
+/** @{
+ * Mapping from our use of GLuint as both the key and the hash value to the
+ * hash_table.h API
+ *
+ * There exist many integer hash functions, designed to avoid collisions when
+ * the integers are spread across key space with some patterns. In GL, the
+ * pattern (in the case of glGen*()ed object IDs) is that the keys are unique
+ * contiguous integers starting from 1. Because of that, we just use the key
+ * as the hash value, to minimize the cost of the hash function. If objects
+ * are never deleted, we will never see a collision in the table, because the
+ * table resizes itself when it approaches full, and thus key % table_size ==
+ * key.
+ *
+ * The case where we could have collisions for genned objects would be
+ * something like: glGenBuffers(&a, 100); glDeleteBuffers(&a + 50, 50);
+ * glGenBuffers(&b, 100), because objects 1-50 and 101-200 are allocated at
+ * the end of that sequence, instead of 1-150. So far it doesn't appear to be
+ * a problem.
+ */
+static inline bool
+uint_key_compare(const void *a, const void *b)
+{
+ return a == b;
+}
+
+static inline uint32_t
+uint_hash(GLuint id)
+{
+ return id;
+}
+
+static inline uint32_t
+uint_key_hash(const void *key)
+{
+ return uint_hash((uintptr_t)key);
+}
+
+static inline void *
+uint_key(GLuint id)
+{
+ return (void *)(uintptr_t) id;
+}
+/** @} */
+
+/**
+ * The hash table data structure.
+ */
+struct _mesa_HashTable {
+ struct hash_table *ht;
+ GLuint MaxKey; /**< highest key inserted so far */
+ mtx_t Mutex; /**< mutual exclusion lock */
+ GLboolean InDeleteAll; /**< Debug check */
+ /** Value that would be in the table for DELETED_KEY_VALUE. */
+ void *deleted_key_data;
+};
+
+extern struct _mesa_HashTable *_mesa_NewHashTable(void);
+
+extern void _mesa_DeleteHashTable(struct _mesa_HashTable *table);
+
+extern void *_mesa_HashLookup(struct _mesa_HashTable *table, GLuint key);
+
+extern void _mesa_HashInsert(struct _mesa_HashTable *table, GLuint key, void *data);
+
+extern void _mesa_HashRemove(struct _mesa_HashTable *table, GLuint key);
+
+/**
+ * Lock the hash table mutex.
+ *
+ * This function should be used when multiple objects need
+ * to be looked up in the hash table, to avoid having to lock
+ * and unlock the mutex each time.
+ *
+ * \param table the hash table.
+ */
+static inline void
+_mesa_HashLockMutex(struct _mesa_HashTable *table)
+{
+ assert(table);
+ mtx_lock(&table->Mutex);
+}
+
+
+/**
+ * Unlock the hash table mutex.
+ *
+ * \param table the hash table.
+ */
+static inline void
+_mesa_HashUnlockMutex(struct _mesa_HashTable *table)
+{
+ assert(table);
+ mtx_unlock(&table->Mutex);
+}
+
+extern void *_mesa_HashLookupLocked(struct _mesa_HashTable *table, GLuint key);
+
+extern void _mesa_HashInsertLocked(struct _mesa_HashTable *table,
+ GLuint key, void *data);
+
+extern void _mesa_HashRemoveLocked(struct _mesa_HashTable *table, GLuint key);
+
+extern void
+_mesa_HashDeleteAll(struct _mesa_HashTable *table,
+ void (*callback)(GLuint key, void *data, void *userData),
+ void *userData);
+
+extern void
+_mesa_HashWalk(const struct _mesa_HashTable *table,
+ void (*callback)(GLuint key, void *data, void *userData),
+ void *userData);
+
+extern void
+_mesa_HashWalkLocked(const struct _mesa_HashTable *table,
+ void (*callback)(GLuint key, void *data, void *userData),
+ void *userData);
+
+extern void _mesa_HashPrint(const struct _mesa_HashTable *table);
+
+extern GLuint _mesa_HashFindFreeKeyBlock(struct _mesa_HashTable *table, GLuint numKeys);
+
+extern GLuint
+_mesa_HashNumEntries(const struct _mesa_HashTable *table);
+
+extern void _mesa_test_hash_functions(void);
+
+
+#endif
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/mesa/main/macros.h b/third_party/rust/glslopt/glsl-optimizer/src/mesa/main/macros.h
new file mode 100644
index 0000000000..caa310fd02
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/mesa/main/macros.h
@@ -0,0 +1,800 @@
+/**
+ * \file macros.h
+ * A collection of useful macros.
+ */
+
+/*
+ * Mesa 3-D graphics library
+ *
+ * Copyright (C) 1999-2006 Brian Paul All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+
+#ifndef MACROS_H
+#define MACROS_H
+
+#include "util/macros.h"
+#include "util/u_math.h"
+#include "util/rounding.h"
+#include "util/compiler.h"
+#include "main/glheader.h"
+#include "mesa_private.h"
+
+
+/**
+ * \name Integer / float conversion for colors, normals, etc.
+ */
+/*@{*/
+
+/** Convert GLubyte in [0,255] to GLfloat in [0.0,1.0] */
+extern GLfloat _mesa_ubyte_to_float_color_tab[256];
+#define UBYTE_TO_FLOAT(u) _mesa_ubyte_to_float_color_tab[(unsigned int)(u)]
+
+/** Convert GLfloat in [0.0,1.0] to GLubyte in [0,255] */
+#define FLOAT_TO_UBYTE(X) ((GLubyte) (GLint) ((X) * 255.0F))
+
+
+/** Convert GLbyte in [-128,127] to GLfloat in [-1.0,1.0] */
+#define BYTE_TO_FLOAT(B) ((2.0F * (B) + 1.0F) * (1.0F/255.0F))
+
+/** Convert GLfloat in [-1.0,1.0] to GLbyte in [-128,127] */
+#define FLOAT_TO_BYTE(X) ( (((GLint) (255.0F * (X))) - 1) / 2 )
+
+
+/** Convert GLbyte to GLfloat while preserving zero */
+#define BYTE_TO_FLOATZ(B) ((B) == 0 ? 0.0F : BYTE_TO_FLOAT(B))
+
+
+/** Convert GLbyte in [-128,127] to GLfloat in [-1.0,1.0], texture/fb data */
+#define BYTE_TO_FLOAT_TEX(B) ((B) == -128 ? -1.0F : (B) * (1.0F/127.0F))
+
+/** Convert GLfloat in [-1.0,1.0] to GLbyte in [-128,127], texture/fb data */
+#define FLOAT_TO_BYTE_TEX(X) CLAMP( (GLint) (127.0F * (X)), -128, 127 )
+
+/** Convert GLushort in [0,65535] to GLfloat in [0.0,1.0] */
+#define USHORT_TO_FLOAT(S) ((GLfloat) (S) * (1.0F / 65535.0F))
+
+/** Convert GLfloat in [0.0,1.0] to GLushort in [0, 65535] */
+#define FLOAT_TO_USHORT(X) ((GLuint) ((X) * 65535.0F))
+
+
+/** Convert GLshort in [-32768,32767] to GLfloat in [-1.0,1.0] */
+#define SHORT_TO_FLOAT(S) ((2.0F * (S) + 1.0F) * (1.0F/65535.0F))
+
+/** Convert GLfloat in [-1.0,1.0] to GLshort in [-32768,32767] */
+#define FLOAT_TO_SHORT(X) ( (((GLint) (65535.0F * (X))) - 1) / 2 )
+
+/** Convert GLshort to GLfloat while preserving zero */
+#define SHORT_TO_FLOATZ(S) ((S) == 0 ? 0.0F : SHORT_TO_FLOAT(S))
+
+
+/** Convert GLshort in [-32768,32767] to GLfloat in [-1.0,1.0], texture/fb data */
+#define SHORT_TO_FLOAT_TEX(S) ((S) == -32768 ? -1.0F : (S) * (1.0F/32767.0F))
+
+/** Convert GLfloat in [-1.0,1.0] to GLshort in [-32768,32767], texture/fb data */
+#define FLOAT_TO_SHORT_TEX(X) ( (GLint) (32767.0F * (X)) )
+
+
+/** Convert GLuint in [0,4294967295] to GLfloat in [0.0,1.0] */
+#define UINT_TO_FLOAT(U) ((GLfloat) ((U) * (1.0F / 4294967295.0)))
+
+/** Convert GLfloat in [0.0,1.0] to GLuint in [0,4294967295] */
+#define FLOAT_TO_UINT(X) ((GLuint) ((X) * 4294967295.0))
+
+
+/** Convert GLint in [-2147483648,2147483647] to GLfloat in [-1.0,1.0] */
+#define INT_TO_FLOAT(I) ((GLfloat) ((2.0F * (I) + 1.0F) * (1.0F/4294967294.0)))
+
+/** Convert GLfloat in [-1.0,1.0] to GLint in [-2147483648,2147483647] */
+/* causes overflow:
+#define FLOAT_TO_INT(X) ( (((GLint) (4294967294.0 * (X))) - 1) / 2 )
+*/
+/* a close approximation: */
+#define FLOAT_TO_INT(X) ( (GLint) (2147483647.0 * (X)) )
+
+/** Convert GLfloat in [-1.0,1.0] to GLint64 in [-(1<<63),(1 << 63) -1] */
+#define FLOAT_TO_INT64(X) ( (GLint64) (9223372036854775807.0 * (double)(X)) )
+
+
+/** Convert GLint in [-2147483648,2147483647] to GLfloat in [-1.0,1.0], texture/fb data */
+#define INT_TO_FLOAT_TEX(I) ((I) == -2147483648 ? -1.0F : (I) * (1.0F/2147483647.0))
+
+/** Convert GLfloat in [-1.0,1.0] to GLint in [-2147483648,2147483647], texture/fb data */
+#define FLOAT_TO_INT_TEX(X) ( (GLint) (2147483647.0 * (X)) )
+
+
+#define BYTE_TO_UBYTE(b) ((GLubyte) ((b) < 0 ? 0 : (GLubyte) (b)))
+#define SHORT_TO_UBYTE(s) ((GLubyte) ((s) < 0 ? 0 : (GLubyte) ((s) >> 7)))
+#define USHORT_TO_UBYTE(s) ((GLubyte) ((s) >> 8))
+#define INT_TO_UBYTE(i) ((GLubyte) ((i) < 0 ? 0 : (GLubyte) ((i) >> 23)))
+#define UINT_TO_UBYTE(i) ((GLubyte) ((i) >> 24))
+
+
+#define BYTE_TO_USHORT(b) ((b) < 0 ? 0 : ((GLushort) (((b) * 65535) / 255)))
+#define UBYTE_TO_USHORT(b) (((GLushort) (b) << 8) | (GLushort) (b))
+#define SHORT_TO_USHORT(s) ((s) < 0 ? 0 : ((GLushort) (((s) * 65535 / 32767))))
+#define INT_TO_USHORT(i) ((i) < 0 ? 0 : ((GLushort) ((i) >> 15)))
+#define UINT_TO_USHORT(i) ((i) < 0 ? 0 : ((GLushort) ((i) >> 16)))
+#define UNCLAMPED_FLOAT_TO_USHORT(us, f) \
+ us = ( (GLushort) _mesa_lroundevenf( CLAMP((f), 0.0F, 1.0F) * 65535.0F) )
+#define CLAMPED_FLOAT_TO_USHORT(us, f) \
+ us = ( (GLushort) _mesa_lroundevenf( (f) * 65535.0F) )
+
+#define UNCLAMPED_FLOAT_TO_SHORT(s, f) \
+ s = ( (GLshort) _mesa_lroundevenf( CLAMP((f), -1.0F, 1.0F) * 32767.0F) )
+
+/***
+ *** UNCLAMPED_FLOAT_TO_UBYTE: clamp float to [0,1] and map to ubyte in [0,255]
+ *** CLAMPED_FLOAT_TO_UBYTE: map float known to be in [0,1] to ubyte in [0,255]
+ ***/
+#ifndef DEBUG
+/* This function/macro is sensitive to precision. Test very carefully
+ * if you change it!
+ */
+#define UNCLAMPED_FLOAT_TO_UBYTE(UB, FLT) \
+ do { \
+ fi_type __tmp; \
+ __tmp.f = (FLT); \
+ if (__tmp.i < 0) \
+ UB = (GLubyte) 0; \
+ else if (__tmp.i >= IEEE_ONE) \
+ UB = (GLubyte) 255; \
+ else { \
+ __tmp.f = __tmp.f * (255.0F/256.0F) + 32768.0F; \
+ UB = (GLubyte) __tmp.i; \
+ } \
+ } while (0)
+#define CLAMPED_FLOAT_TO_UBYTE(UB, FLT) \
+ do { \
+ fi_type __tmp; \
+ __tmp.f = (FLT) * (255.0F/256.0F) + 32768.0F; \
+ UB = (GLubyte) __tmp.i; \
+ } while (0)
+#else
+#define UNCLAMPED_FLOAT_TO_UBYTE(ub, f) \
+ ub = ((GLubyte) _mesa_lroundevenf(CLAMP((f), 0.0F, 1.0F) * 255.0F))
+#define CLAMPED_FLOAT_TO_UBYTE(ub, f) \
+ ub = ((GLubyte) _mesa_lroundevenf((f) * 255.0F))
+#endif
+
+static fi_type UINT_AS_UNION(GLuint u)
+{
+ fi_type tmp;
+ tmp.u = u;
+ return tmp;
+}
+
+static inline fi_type INT_AS_UNION(GLint i)
+{
+ fi_type tmp;
+ tmp.i = i;
+ return tmp;
+}
+
+static inline fi_type FLOAT_AS_UNION(GLfloat f)
+{
+ fi_type tmp;
+ tmp.f = f;
+ return tmp;
+}
+
+static inline uint64_t DOUBLE_AS_UINT64(double d)
+{
+ union {
+ double d;
+ uint64_t u64;
+ } tmp;
+ tmp.d = d;
+ return tmp.u64;
+}
+
+static inline double UINT64_AS_DOUBLE(uint64_t u)
+{
+ union {
+ double d;
+ uint64_t u64;
+ } tmp;
+ tmp.u64 = u;
+ return tmp.d;
+}
+
+/* First sign-extend x, then return uint32_t. */
+#define INT_AS_UINT(x) ((uint32_t)((int32_t)(x)))
+#define FLOAT_AS_UINT(x) (FLOAT_AS_UNION(x).u)
+
+/**
+ * Convert a floating point value to an unsigned fixed point value.
+ *
+ * \param frac_bits The number of bits used to store the fractional part.
+ */
+static inline uint32_t
+U_FIXED(float value, uint32_t frac_bits)
+{
+ value *= (1 << frac_bits);
+ return value < 0.0f ? 0 : (uint32_t) value;
+}
+
+/**
+ * Convert a floating point value to an signed fixed point value.
+ *
+ * \param frac_bits The number of bits used to store the fractional part.
+ */
+static inline int32_t
+S_FIXED(float value, uint32_t frac_bits)
+{
+ return (int32_t) (value * (1 << frac_bits));
+}
+/*@}*/
+
+
+/** Stepping a GLfloat pointer by a byte stride */
+#define STRIDE_F(p, i) (p = (GLfloat *)((GLubyte *)p + i))
+/** Stepping a GLuint pointer by a byte stride */
+#define STRIDE_UI(p, i) (p = (GLuint *)((GLubyte *)p + i))
+/** Stepping a GLubyte[4] pointer by a byte stride */
+#define STRIDE_4UB(p, i) (p = (GLubyte (*)[4])((GLubyte *)p + i))
+/** Stepping a GLfloat[4] pointer by a byte stride */
+#define STRIDE_4F(p, i) (p = (GLfloat (*)[4])((GLubyte *)p + i))
+/** Stepping a \p t pointer by a byte stride */
+#define STRIDE_T(p, t, i) (p = (t)((GLubyte *)p + i))
+
+
+/**********************************************************************/
+/** \name 4-element vector operations */
+/*@{*/
+
+/** Zero */
+#define ZERO_4V( DST ) (DST)[0] = (DST)[1] = (DST)[2] = (DST)[3] = 0
+
+/** Test for equality */
+#define TEST_EQ_4V(a,b) ((a)[0] == (b)[0] && \
+ (a)[1] == (b)[1] && \
+ (a)[2] == (b)[2] && \
+ (a)[3] == (b)[3])
+
+/** Test for equality (unsigned bytes) */
+static inline GLboolean
+TEST_EQ_4UBV(const GLubyte a[4], const GLubyte b[4])
+{
+#if defined(__i386__)
+ return *((const GLuint *) a) == *((const GLuint *) b);
+#else
+ return TEST_EQ_4V(a, b);
+#endif
+}
+
+
+/** Copy a 4-element vector */
+#define COPY_4V( DST, SRC ) \
+do { \
+ (DST)[0] = (SRC)[0]; \
+ (DST)[1] = (SRC)[1]; \
+ (DST)[2] = (SRC)[2]; \
+ (DST)[3] = (SRC)[3]; \
+} while (0)
+
+/** Copy a 4-element unsigned byte vector */
+static inline void
+COPY_4UBV(GLubyte dst[4], const GLubyte src[4])
+{
+#if defined(__i386__)
+ *((GLuint *) dst) = *((GLuint *) src);
+#else
+ /* The GLuint cast might fail if DST or SRC are not dword-aligned (RISC) */
+ COPY_4V(dst, src);
+#endif
+}
+
+/** Copy \p SZ elements into a 4-element vector */
+#define COPY_SZ_4V(DST, SZ, SRC) \
+do { \
+ switch (SZ) { \
+ case 4: (DST)[3] = (SRC)[3]; \
+ case 3: (DST)[2] = (SRC)[2]; \
+ case 2: (DST)[1] = (SRC)[1]; \
+ case 1: (DST)[0] = (SRC)[0]; \
+ } \
+} while(0)
+
+/** Copy \p SZ elements into a homegeneous (4-element) vector, giving
+ * default values to the remaining */
+#define COPY_CLEAN_4V(DST, SZ, SRC) \
+do { \
+ ASSIGN_4V( DST, 0, 0, 0, 1 ); \
+ COPY_SZ_4V( DST, SZ, SRC ); \
+} while (0)
+
+/** Subtraction */
+#define SUB_4V( DST, SRCA, SRCB ) \
+do { \
+ (DST)[0] = (SRCA)[0] - (SRCB)[0]; \
+ (DST)[1] = (SRCA)[1] - (SRCB)[1]; \
+ (DST)[2] = (SRCA)[2] - (SRCB)[2]; \
+ (DST)[3] = (SRCA)[3] - (SRCB)[3]; \
+} while (0)
+
+/** Addition */
+#define ADD_4V( DST, SRCA, SRCB ) \
+do { \
+ (DST)[0] = (SRCA)[0] + (SRCB)[0]; \
+ (DST)[1] = (SRCA)[1] + (SRCB)[1]; \
+ (DST)[2] = (SRCA)[2] + (SRCB)[2]; \
+ (DST)[3] = (SRCA)[3] + (SRCB)[3]; \
+} while (0)
+
+/** Element-wise multiplication */
+#define SCALE_4V( DST, SRCA, SRCB ) \
+do { \
+ (DST)[0] = (SRCA)[0] * (SRCB)[0]; \
+ (DST)[1] = (SRCA)[1] * (SRCB)[1]; \
+ (DST)[2] = (SRCA)[2] * (SRCB)[2]; \
+ (DST)[3] = (SRCA)[3] * (SRCB)[3]; \
+} while (0)
+
+/** In-place addition */
+#define ACC_4V( DST, SRC ) \
+do { \
+ (DST)[0] += (SRC)[0]; \
+ (DST)[1] += (SRC)[1]; \
+ (DST)[2] += (SRC)[2]; \
+ (DST)[3] += (SRC)[3]; \
+} while (0)
+
+/** Element-wise multiplication and addition */
+#define ACC_SCALE_4V( DST, SRCA, SRCB ) \
+do { \
+ (DST)[0] += (SRCA)[0] * (SRCB)[0]; \
+ (DST)[1] += (SRCA)[1] * (SRCB)[1]; \
+ (DST)[2] += (SRCA)[2] * (SRCB)[2]; \
+ (DST)[3] += (SRCA)[3] * (SRCB)[3]; \
+} while (0)
+
+/** In-place scalar multiplication and addition */
+#define ACC_SCALE_SCALAR_4V( DST, S, SRCB ) \
+do { \
+ (DST)[0] += S * (SRCB)[0]; \
+ (DST)[1] += S * (SRCB)[1]; \
+ (DST)[2] += S * (SRCB)[2]; \
+ (DST)[3] += S * (SRCB)[3]; \
+} while (0)
+
+/** Scalar multiplication */
+#define SCALE_SCALAR_4V( DST, S, SRCB ) \
+do { \
+ (DST)[0] = S * (SRCB)[0]; \
+ (DST)[1] = S * (SRCB)[1]; \
+ (DST)[2] = S * (SRCB)[2]; \
+ (DST)[3] = S * (SRCB)[3]; \
+} while (0)
+
+/** In-place scalar multiplication */
+#define SELF_SCALE_SCALAR_4V( DST, S ) \
+do { \
+ (DST)[0] *= S; \
+ (DST)[1] *= S; \
+ (DST)[2] *= S; \
+ (DST)[3] *= S; \
+} while (0)
+
+/*@}*/
+
+
+/**********************************************************************/
+/** \name 3-element vector operations*/
+/*@{*/
+
+/** Zero */
+#define ZERO_3V( DST ) (DST)[0] = (DST)[1] = (DST)[2] = 0
+
+/** Test for equality */
+#define TEST_EQ_3V(a,b) \
+ ((a)[0] == (b)[0] && \
+ (a)[1] == (b)[1] && \
+ (a)[2] == (b)[2])
+
+/** Copy a 3-element vector */
+#define COPY_3V( DST, SRC ) \
+do { \
+ (DST)[0] = (SRC)[0]; \
+ (DST)[1] = (SRC)[1]; \
+ (DST)[2] = (SRC)[2]; \
+} while (0)
+
+/** Copy a 3-element vector with cast */
+#define COPY_3V_CAST( DST, SRC, CAST ) \
+do { \
+ (DST)[0] = (CAST)(SRC)[0]; \
+ (DST)[1] = (CAST)(SRC)[1]; \
+ (DST)[2] = (CAST)(SRC)[2]; \
+} while (0)
+
+/** Copy a 3-element float vector */
+#define COPY_3FV( DST, SRC ) \
+do { \
+ const GLfloat *_tmp = (SRC); \
+ (DST)[0] = _tmp[0]; \
+ (DST)[1] = _tmp[1]; \
+ (DST)[2] = _tmp[2]; \
+} while (0)
+
+/** Subtraction */
+#define SUB_3V( DST, SRCA, SRCB ) \
+do { \
+ (DST)[0] = (SRCA)[0] - (SRCB)[0]; \
+ (DST)[1] = (SRCA)[1] - (SRCB)[1]; \
+ (DST)[2] = (SRCA)[2] - (SRCB)[2]; \
+} while (0)
+
+/** Addition */
+#define ADD_3V( DST, SRCA, SRCB ) \
+do { \
+ (DST)[0] = (SRCA)[0] + (SRCB)[0]; \
+ (DST)[1] = (SRCA)[1] + (SRCB)[1]; \
+ (DST)[2] = (SRCA)[2] + (SRCB)[2]; \
+} while (0)
+
+/** In-place scalar multiplication */
+#define SCALE_3V( DST, SRCA, SRCB ) \
+do { \
+ (DST)[0] = (SRCA)[0] * (SRCB)[0]; \
+ (DST)[1] = (SRCA)[1] * (SRCB)[1]; \
+ (DST)[2] = (SRCA)[2] * (SRCB)[2]; \
+} while (0)
+
+/** In-place element-wise multiplication */
+#define SELF_SCALE_3V( DST, SRC ) \
+do { \
+ (DST)[0] *= (SRC)[0]; \
+ (DST)[1] *= (SRC)[1]; \
+ (DST)[2] *= (SRC)[2]; \
+} while (0)
+
+/** In-place addition */
+#define ACC_3V( DST, SRC ) \
+do { \
+ (DST)[0] += (SRC)[0]; \
+ (DST)[1] += (SRC)[1]; \
+ (DST)[2] += (SRC)[2]; \
+} while (0)
+
+/** Element-wise multiplication and addition */
+#define ACC_SCALE_3V( DST, SRCA, SRCB ) \
+do { \
+ (DST)[0] += (SRCA)[0] * (SRCB)[0]; \
+ (DST)[1] += (SRCA)[1] * (SRCB)[1]; \
+ (DST)[2] += (SRCA)[2] * (SRCB)[2]; \
+} while (0)
+
+/** Scalar multiplication */
+#define SCALE_SCALAR_3V( DST, S, SRCB ) \
+do { \
+ (DST)[0] = S * (SRCB)[0]; \
+ (DST)[1] = S * (SRCB)[1]; \
+ (DST)[2] = S * (SRCB)[2]; \
+} while (0)
+
+/** In-place scalar multiplication and addition */
+#define ACC_SCALE_SCALAR_3V( DST, S, SRCB ) \
+do { \
+ (DST)[0] += S * (SRCB)[0]; \
+ (DST)[1] += S * (SRCB)[1]; \
+ (DST)[2] += S * (SRCB)[2]; \
+} while (0)
+
+/** In-place scalar multiplication */
+#define SELF_SCALE_SCALAR_3V( DST, S ) \
+do { \
+ (DST)[0] *= S; \
+ (DST)[1] *= S; \
+ (DST)[2] *= S; \
+} while (0)
+
+/** In-place scalar addition */
+#define ACC_SCALAR_3V( DST, S ) \
+do { \
+ (DST)[0] += S; \
+ (DST)[1] += S; \
+ (DST)[2] += S; \
+} while (0)
+
+/** Assignment */
+#define ASSIGN_3V( V, V0, V1, V2 ) \
+do { \
+ V[0] = V0; \
+ V[1] = V1; \
+ V[2] = V2; \
+} while(0)
+
+/*@}*/
+
+
+/**********************************************************************/
+/** \name 2-element vector operations*/
+/*@{*/
+
+/** Zero */
+#define ZERO_2V( DST ) (DST)[0] = (DST)[1] = 0
+
+/** Copy a 2-element vector */
+#define COPY_2V( DST, SRC ) \
+do { \
+ (DST)[0] = (SRC)[0]; \
+ (DST)[1] = (SRC)[1]; \
+} while (0)
+
+/** Copy a 2-element vector with cast */
+#define COPY_2V_CAST( DST, SRC, CAST ) \
+do { \
+ (DST)[0] = (CAST)(SRC)[0]; \
+ (DST)[1] = (CAST)(SRC)[1]; \
+} while (0)
+
+/** Copy a 2-element float vector */
+#define COPY_2FV( DST, SRC ) \
+do { \
+ const GLfloat *_tmp = (SRC); \
+ (DST)[0] = _tmp[0]; \
+ (DST)[1] = _tmp[1]; \
+} while (0)
+
+/** Subtraction */
+#define SUB_2V( DST, SRCA, SRCB ) \
+do { \
+ (DST)[0] = (SRCA)[0] - (SRCB)[0]; \
+ (DST)[1] = (SRCA)[1] - (SRCB)[1]; \
+} while (0)
+
+/** Addition */
+#define ADD_2V( DST, SRCA, SRCB ) \
+do { \
+ (DST)[0] = (SRCA)[0] + (SRCB)[0]; \
+ (DST)[1] = (SRCA)[1] + (SRCB)[1]; \
+} while (0)
+
+/** In-place scalar multiplication */
+#define SCALE_2V( DST, SRCA, SRCB ) \
+do { \
+ (DST)[0] = (SRCA)[0] * (SRCB)[0]; \
+ (DST)[1] = (SRCA)[1] * (SRCB)[1]; \
+} while (0)
+
+/** In-place addition */
+#define ACC_2V( DST, SRC ) \
+do { \
+ (DST)[0] += (SRC)[0]; \
+ (DST)[1] += (SRC)[1]; \
+} while (0)
+
+/** Element-wise multiplication and addition */
+#define ACC_SCALE_2V( DST, SRCA, SRCB ) \
+do { \
+ (DST)[0] += (SRCA)[0] * (SRCB)[0]; \
+ (DST)[1] += (SRCA)[1] * (SRCB)[1]; \
+} while (0)
+
+/** Scalar multiplication */
+#define SCALE_SCALAR_2V( DST, S, SRCB ) \
+do { \
+ (DST)[0] = S * (SRCB)[0]; \
+ (DST)[1] = S * (SRCB)[1]; \
+} while (0)
+
+/** In-place scalar multiplication and addition */
+#define ACC_SCALE_SCALAR_2V( DST, S, SRCB ) \
+do { \
+ (DST)[0] += S * (SRCB)[0]; \
+ (DST)[1] += S * (SRCB)[1]; \
+} while (0)
+
+/** In-place scalar multiplication */
+#define SELF_SCALE_SCALAR_2V( DST, S ) \
+do { \
+ (DST)[0] *= S; \
+ (DST)[1] *= S; \
+} while (0)
+
+/** In-place scalar addition */
+#define ACC_SCALAR_2V( DST, S ) \
+do { \
+ (DST)[0] += S; \
+ (DST)[1] += S; \
+} while (0)
+
+/** Assign scalers to short vectors */
+#define ASSIGN_2V( V, V0, V1 ) \
+do { \
+ V[0] = V0; \
+ V[1] = V1; \
+} while(0)
+
+/*@}*/
+
+/** Copy \p sz elements into a homegeneous (4-element) vector, giving
+ * default values to the remaining components.
+ * The default values are chosen based on \p type.
+ */
+static inline void
+COPY_CLEAN_4V_TYPE_AS_UNION(fi_type dst[4], int sz, const fi_type src[4],
+ GLenum type)
+{
+ switch (type) {
+ case GL_FLOAT:
+ ASSIGN_4V(dst, FLOAT_AS_UNION(0), FLOAT_AS_UNION(0),
+ FLOAT_AS_UNION(0), FLOAT_AS_UNION(1));
+ break;
+ case GL_INT:
+ ASSIGN_4V(dst, INT_AS_UNION(0), INT_AS_UNION(0),
+ INT_AS_UNION(0), INT_AS_UNION(1));
+ break;
+ case GL_UNSIGNED_INT:
+ ASSIGN_4V(dst, UINT_AS_UNION(0), UINT_AS_UNION(0),
+ UINT_AS_UNION(0), UINT_AS_UNION(1));
+ break;
+ default:
+ ASSIGN_4V(dst, FLOAT_AS_UNION(0), FLOAT_AS_UNION(0),
+ FLOAT_AS_UNION(0), FLOAT_AS_UNION(1)); /* silence warnings */
+ assert(!"Unexpected type in COPY_CLEAN_4V_TYPE_AS_UNION macro");
+ }
+ COPY_SZ_4V(dst, sz, src);
+}
+
+/** \name Linear interpolation functions */
+/*@{*/
+
+static inline GLfloat
+LINTERP(GLfloat t, GLfloat out, GLfloat in)
+{
+ return out + t * (in - out);
+}
+
+static inline void
+INTERP_3F(GLfloat t, GLfloat dst[3], const GLfloat out[3], const GLfloat in[3])
+{
+ dst[0] = LINTERP( t, out[0], in[0] );
+ dst[1] = LINTERP( t, out[1], in[1] );
+ dst[2] = LINTERP( t, out[2], in[2] );
+}
+
+static inline void
+INTERP_4F(GLfloat t, GLfloat dst[4], const GLfloat out[4], const GLfloat in[4])
+{
+ dst[0] = LINTERP( t, out[0], in[0] );
+ dst[1] = LINTERP( t, out[1], in[1] );
+ dst[2] = LINTERP( t, out[2], in[2] );
+ dst[3] = LINTERP( t, out[3], in[3] );
+}
+
+/*@}*/
+
+
+
+static inline unsigned
+minify(unsigned value, unsigned levels)
+{
+ return MAX2(1, value >> levels);
+}
+
+
+/** Cross product of two 3-element vectors */
+static inline void
+CROSS3(GLfloat n[3], const GLfloat u[3], const GLfloat v[3])
+{
+ n[0] = u[1] * v[2] - u[2] * v[1];
+ n[1] = u[2] * v[0] - u[0] * v[2];
+ n[2] = u[0] * v[1] - u[1] * v[0];
+}
+
+
+/** Dot product of two 2-element vectors */
+static inline GLfloat
+DOT2(const GLfloat a[2], const GLfloat b[2])
+{
+ return a[0] * b[0] + a[1] * b[1];
+}
+
+static inline GLfloat
+DOT3(const GLfloat a[3], const GLfloat b[3])
+{
+ return a[0] * b[0] + a[1] * b[1] + a[2] * b[2];
+}
+
+static inline GLfloat
+DOT4(const GLfloat a[4], const GLfloat b[4])
+{
+ return a[0] * b[0] + a[1] * b[1] + a[2] * b[2] + a[3] * b[3];
+}
+
+
+static inline GLfloat
+LEN_SQUARED_3FV(const GLfloat v[3])
+{
+ return DOT3(v, v);
+}
+
+static inline GLfloat
+LEN_SQUARED_2FV(const GLfloat v[2])
+{
+ return DOT2(v, v);
+}
+
+
+static inline GLfloat
+LEN_3FV(const GLfloat v[3])
+{
+ return sqrtf(LEN_SQUARED_3FV(v));
+}
+
+static inline GLfloat
+LEN_2FV(const GLfloat v[2])
+{
+ return sqrtf(LEN_SQUARED_2FV(v));
+}
+
+
+/* Normalize a 3-element vector to unit length. */
+static inline void
+NORMALIZE_3FV(GLfloat v[3])
+{
+ GLfloat len = (GLfloat) LEN_SQUARED_3FV(v);
+ if (len) {
+ len = 1.0f / sqrtf(len);
+ v[0] *= len;
+ v[1] *= len;
+ v[2] *= len;
+ }
+}
+
+
+/** Test two floats have opposite signs */
+static inline GLboolean
+DIFFERENT_SIGNS(GLfloat x, GLfloat y)
+{
+#ifdef _MSC_VER
+#pragma warning( push )
+#pragma warning( disable : 6334 ) /* sizeof operator applied to an expression with an operator may yield unexpected results */
+#endif
+ return signbit(x) != signbit(y);
+#ifdef _MSC_VER
+#pragma warning( pop )
+#endif
+}
+
+
+/** casts to silence warnings with some compilers */
+#define ENUM_TO_INT(E) ((GLint)(E))
+#define ENUM_TO_FLOAT(E) ((GLfloat)(GLint)(E))
+#define ENUM_TO_DOUBLE(E) ((GLdouble)(GLint)(E))
+#define ENUM_TO_BOOLEAN(E) ((E) ? GL_TRUE : GL_FALSE)
+
+
+/* Stringify */
+#define STRINGIFY(x) #x
+
+/*
+ * For GL_ARB_vertex_buffer_object we need to treat vertex array pointers
+ * as offsets into buffer stores. Since the vertex array pointer and
+ * buffer store pointer are both pointers and we need to add them, we use
+ * this macro.
+ * Both pointers/offsets are expressed in bytes.
+ */
+#define ADD_POINTERS(A, B) ( (GLubyte *) (A) + (uintptr_t) (B) )
+
+#endif
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/mesa/main/menums.h b/third_party/rust/glslopt/glsl-optimizer/src/mesa/main/menums.h
new file mode 100644
index 0000000000..79c14da5ea
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/mesa/main/menums.h
@@ -0,0 +1,189 @@
+/*
+ * Mesa 3-D graphics library
+ *
+ * Copyright (C) 1999-2008 Brian Paul All Rights Reserved.
+ * Copyright (C) 2009 VMware, Inc. All Rights Reserved.
+ * Copyright (C) 2018 Advanced Micro Devices, Inc. All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/**
+ * \file menums.h
+ * Often used definitions and enums.
+ */
+
+#ifndef MENUMS_H
+#define MENUMS_H
+
+#include "util/macros.h"
+
+/**
+ * Enum for the OpenGL APIs we know about and may support.
+ *
+ * NOTE: This must match the api_enum table in
+ * src/mesa/main/get_hash_generator.py
+ */
+typedef enum
+{
+ API_OPENGL_COMPAT, /* legacy / compatibility contexts */
+ API_OPENGLES,
+ API_OPENGLES2,
+ API_OPENGL_CORE,
+ API_OPENGL_LAST = API_OPENGL_CORE
+} gl_api;
+
+/**
+ * An index for each type of texture object. These correspond to the GL
+ * texture target enums, such as GL_TEXTURE_2D, GL_TEXTURE_CUBE_MAP, etc.
+ * Note: the order is from highest priority to lowest priority.
+ */
+typedef enum
+{
+ TEXTURE_2D_MULTISAMPLE_INDEX,
+ TEXTURE_2D_MULTISAMPLE_ARRAY_INDEX,
+ TEXTURE_CUBE_ARRAY_INDEX,
+ TEXTURE_BUFFER_INDEX,
+ TEXTURE_2D_ARRAY_INDEX,
+ TEXTURE_1D_ARRAY_INDEX,
+ TEXTURE_EXTERNAL_INDEX,
+ TEXTURE_CUBE_INDEX,
+ TEXTURE_3D_INDEX,
+ TEXTURE_RECT_INDEX,
+ TEXTURE_2D_INDEX,
+ TEXTURE_1D_INDEX,
+ NUM_TEXTURE_TARGETS
+} gl_texture_index;
+
+/**
+ * Remapped color logical operations
+ *
+ * With the exception of NVIDIA hardware, which consumes the OpenGL enumerants
+ * directly, everything wants this mapping of color logical operations.
+ *
+ * Fun fact: These values are just the bit-reverse of the low-nibble of the GL
+ * enumerant values (i.e., `GL_NOOP & 0x0f` is `b0101' while
+ * \c COLOR_LOGICOP_NOOP is `b1010`).
+ *
+ * Fun fact #2: These values are just an encoding of the operation as a table
+ * of bit values. The result of the logic op is:
+ *
+ * result_bit = (logic_op >> (2 * src_bit + dst_bit)) & 1
+ *
+ * For the GL enums, the result is:
+ *
+ * result_bit = logic_op & (1 << (2 * src_bit + dst_bit))
+ */
+enum PACKED gl_logicop_mode {
+ COLOR_LOGICOP_CLEAR = 0,
+ COLOR_LOGICOP_NOR = 1,
+ COLOR_LOGICOP_AND_INVERTED = 2,
+ COLOR_LOGICOP_COPY_INVERTED = 3,
+ COLOR_LOGICOP_AND_REVERSE = 4,
+ COLOR_LOGICOP_INVERT = 5,
+ COLOR_LOGICOP_XOR = 6,
+ COLOR_LOGICOP_NAND = 7,
+ COLOR_LOGICOP_AND = 8,
+ COLOR_LOGICOP_EQUIV = 9,
+ COLOR_LOGICOP_NOOP = 10,
+ COLOR_LOGICOP_OR_INVERTED = 11,
+ COLOR_LOGICOP_COPY = 12,
+ COLOR_LOGICOP_OR_REVERSE = 13,
+ COLOR_LOGICOP_OR = 14,
+ COLOR_LOGICOP_SET = 15
+};
+
+/**
+ * Indexes for all renderbuffers
+ */
+typedef enum
+{
+ /* the four standard color buffers */
+ BUFFER_FRONT_LEFT,
+ BUFFER_BACK_LEFT,
+ BUFFER_FRONT_RIGHT,
+ BUFFER_BACK_RIGHT,
+ BUFFER_DEPTH,
+ BUFFER_STENCIL,
+ BUFFER_ACCUM,
+ /* optional aux buffer */
+ BUFFER_AUX0,
+ /* generic renderbuffers */
+ BUFFER_COLOR0,
+ BUFFER_COLOR1,
+ BUFFER_COLOR2,
+ BUFFER_COLOR3,
+ BUFFER_COLOR4,
+ BUFFER_COLOR5,
+ BUFFER_COLOR6,
+ BUFFER_COLOR7,
+ BUFFER_COUNT,
+ BUFFER_NONE = -1,
+} gl_buffer_index;
+
+typedef enum
+{
+ MAP_USER,
+ MAP_INTERNAL,
+ MAP_COUNT
+} gl_map_buffer_index;
+
+/** @{
+ *
+ * These are a mapping of the GL_ARB_debug_output/GL_KHR_debug enums
+ * to small enums suitable for use as an array index.
+ */
+
+enum mesa_debug_source
+{
+ MESA_DEBUG_SOURCE_API,
+ MESA_DEBUG_SOURCE_WINDOW_SYSTEM,
+ MESA_DEBUG_SOURCE_SHADER_COMPILER,
+ MESA_DEBUG_SOURCE_THIRD_PARTY,
+ MESA_DEBUG_SOURCE_APPLICATION,
+ MESA_DEBUG_SOURCE_OTHER,
+ MESA_DEBUG_SOURCE_COUNT
+};
+
+enum mesa_debug_type
+{
+ MESA_DEBUG_TYPE_ERROR,
+ MESA_DEBUG_TYPE_DEPRECATED,
+ MESA_DEBUG_TYPE_UNDEFINED,
+ MESA_DEBUG_TYPE_PORTABILITY,
+ MESA_DEBUG_TYPE_PERFORMANCE,
+ MESA_DEBUG_TYPE_OTHER,
+ MESA_DEBUG_TYPE_MARKER,
+ MESA_DEBUG_TYPE_PUSH_GROUP,
+ MESA_DEBUG_TYPE_POP_GROUP,
+ MESA_DEBUG_TYPE_COUNT
+};
+
+enum mesa_debug_severity
+{
+ MESA_DEBUG_SEVERITY_LOW,
+ MESA_DEBUG_SEVERITY_MEDIUM,
+ MESA_DEBUG_SEVERITY_HIGH,
+ MESA_DEBUG_SEVERITY_NOTIFICATION,
+ MESA_DEBUG_SEVERITY_COUNT
+};
+
+/** @} */
+
+#endif
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/mesa/main/mesa_private.h b/third_party/rust/glslopt/glsl-optimizer/src/mesa/main/mesa_private.h
new file mode 100644
index 0000000000..229a746a80
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/mesa/main/mesa_private.h
@@ -0,0 +1,56 @@
+/*
+ * Mesa 3-D graphics library
+ *
+ * Copyright (C) 1999-2008 Brian Paul All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/**
+ * \file mesa_private.h
+ * Contains mesa internal values
+ *
+ */
+
+#ifndef MESA_PRIVATE_H
+#define MESA_PRIVATE_H
+
+#include "glheader.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+/**
+ * Sometimes we treat floats as ints. On x86 systems, moving a float
+ * as an int (thereby using integer registers instead of FP registers) is
+ * a performance win. Typically, this can be done with ordinary casts.
+ * But with gcc's -fstrict-aliasing flag (which defaults to on in gcc 3.0)
+ * these casts generate warnings.
+ * The following union typedef is used to solve that.
+ */
+typedef union { float f; int i; unsigned u; } fi_type;
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* MESA_PRIVATE_H */
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/mesa/main/mtypes.h b/third_party/rust/glslopt/glsl-optimizer/src/mesa/main/mtypes.h
new file mode 100644
index 0000000000..4f1e8e52ab
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/mesa/main/mtypes.h
@@ -0,0 +1,5281 @@
+/*
+ * Mesa 3-D graphics library
+ *
+ * Copyright (C) 1999-2008 Brian Paul All Rights Reserved.
+ * Copyright (C) 2009 VMware, Inc. All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/**
+ * \file mtypes.h
+ * Main Mesa data structures.
+ *
+ * Please try to mark derived values with a leading underscore ('_').
+ */
+
+#ifndef MTYPES_H
+#define MTYPES_H
+
+
+#include <stdint.h> /* uint32_t */
+#include <stdbool.h>
+#include "c11/threads.h"
+
+#include "main/glheader.h"
+#include "main/glthread.h"
+#include "main/menums.h"
+#include "main/config.h"
+#include "glapi/glapi.h"
+#include "math/m_matrix.h" /* GLmatrix */
+#include "compiler/shader_enums.h"
+#include "compiler/shader_info.h"
+#include "main/formats.h" /* MESA_FORMAT_COUNT */
+#include "compiler/glsl/list.h"
+#include "util/simple_mtx.h"
+#include "util/u_dynarray.h"
+
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define GET_COLORMASK_BIT(mask, buf, chan) (((mask) >> (4 * (buf) + (chan))) & 0x1)
+#define GET_COLORMASK(mask, buf) (((mask) >> (4 * (buf))) & 0xf)
+
+
+/**
+ * \name Some forward type declarations
+ */
+/*@{*/
+struct _mesa_HashTable;
+struct gl_attrib_node;
+struct gl_list_extensions;
+struct gl_meta_state;
+struct gl_program_cache;
+struct gl_texture_object;
+struct gl_debug_state;
+struct gl_context;
+struct st_context;
+struct gl_uniform_storage;
+struct prog_instruction;
+struct gl_program_parameter_list;
+struct gl_shader_spirv_data;
+struct set;
+struct shader_includes;
+struct vbo_context;
+/*@}*/
+
+
+/** Extra draw modes beyond GL_POINTS, GL_TRIANGLE_FAN, etc */
+#define PRIM_MAX GL_PATCHES
+#define PRIM_OUTSIDE_BEGIN_END (PRIM_MAX + 1)
+#define PRIM_UNKNOWN (PRIM_MAX + 2)
+
+/**
+ * Determine if the given gl_varying_slot appears in the fragment shader.
+ */
+static inline GLboolean
+_mesa_varying_slot_in_fs(gl_varying_slot slot)
+{
+ switch (slot) {
+ case VARYING_SLOT_PSIZ:
+ case VARYING_SLOT_BFC0:
+ case VARYING_SLOT_BFC1:
+ case VARYING_SLOT_EDGE:
+ case VARYING_SLOT_CLIP_VERTEX:
+ case VARYING_SLOT_LAYER:
+ case VARYING_SLOT_TESS_LEVEL_OUTER:
+ case VARYING_SLOT_TESS_LEVEL_INNER:
+ case VARYING_SLOT_BOUNDING_BOX0:
+ case VARYING_SLOT_BOUNDING_BOX1:
+ case VARYING_SLOT_VIEWPORT_MASK:
+ return GL_FALSE;
+ default:
+ return GL_TRUE;
+ }
+}
+
+/**
+ * Bit flags for all renderbuffers
+ */
+#define BUFFER_BIT_FRONT_LEFT (1 << BUFFER_FRONT_LEFT)
+#define BUFFER_BIT_BACK_LEFT (1 << BUFFER_BACK_LEFT)
+#define BUFFER_BIT_FRONT_RIGHT (1 << BUFFER_FRONT_RIGHT)
+#define BUFFER_BIT_BACK_RIGHT (1 << BUFFER_BACK_RIGHT)
+#define BUFFER_BIT_AUX0 (1 << BUFFER_AUX0)
+#define BUFFER_BIT_AUX1 (1 << BUFFER_AUX1)
+#define BUFFER_BIT_AUX2 (1 << BUFFER_AUX2)
+#define BUFFER_BIT_AUX3 (1 << BUFFER_AUX3)
+#define BUFFER_BIT_DEPTH (1 << BUFFER_DEPTH)
+#define BUFFER_BIT_STENCIL (1 << BUFFER_STENCIL)
+#define BUFFER_BIT_ACCUM (1 << BUFFER_ACCUM)
+#define BUFFER_BIT_COLOR0 (1 << BUFFER_COLOR0)
+#define BUFFER_BIT_COLOR1 (1 << BUFFER_COLOR1)
+#define BUFFER_BIT_COLOR2 (1 << BUFFER_COLOR2)
+#define BUFFER_BIT_COLOR3 (1 << BUFFER_COLOR3)
+#define BUFFER_BIT_COLOR4 (1 << BUFFER_COLOR4)
+#define BUFFER_BIT_COLOR5 (1 << BUFFER_COLOR5)
+#define BUFFER_BIT_COLOR6 (1 << BUFFER_COLOR6)
+#define BUFFER_BIT_COLOR7 (1 << BUFFER_COLOR7)
+
+/**
+ * Mask of all the color buffer bits (but not accum).
+ */
+#define BUFFER_BITS_COLOR (BUFFER_BIT_FRONT_LEFT | \
+ BUFFER_BIT_BACK_LEFT | \
+ BUFFER_BIT_FRONT_RIGHT | \
+ BUFFER_BIT_BACK_RIGHT | \
+ BUFFER_BIT_AUX0 | \
+ BUFFER_BIT_COLOR0 | \
+ BUFFER_BIT_COLOR1 | \
+ BUFFER_BIT_COLOR2 | \
+ BUFFER_BIT_COLOR3 | \
+ BUFFER_BIT_COLOR4 | \
+ BUFFER_BIT_COLOR5 | \
+ BUFFER_BIT_COLOR6 | \
+ BUFFER_BIT_COLOR7)
+
+/* Mask of bits for depth+stencil buffers */
+#define BUFFER_BITS_DEPTH_STENCIL (BUFFER_BIT_DEPTH | BUFFER_BIT_STENCIL)
+
+/**
+ * Framebuffer configuration (aka visual / pixelformat)
+ * Note: some of these fields should be boolean, but it appears that
+ * code in drivers/dri/common/util.c requires int-sized fields.
+ */
+struct gl_config
+{
+ GLboolean floatMode;
+ GLuint doubleBufferMode;
+ GLuint stereoMode;
+
+ GLint redBits, greenBits, blueBits, alphaBits; /* bits per comp */
+ GLuint redMask, greenMask, blueMask, alphaMask;
+ GLint redShift, greenShift, blueShift, alphaShift;
+ GLint rgbBits; /* total bits for rgb */
+
+ GLint accumRedBits, accumGreenBits, accumBlueBits, accumAlphaBits;
+ GLint depthBits;
+ GLint stencilBits;
+
+ GLint numAuxBuffers;
+
+ GLint level;
+
+ /* EXT_visual_rating / GLX 1.2 */
+ GLint visualRating;
+
+ /* EXT_visual_info / GLX 1.2 */
+ GLint transparentPixel;
+ /* colors are floats scaled to ints */
+ GLint transparentRed, transparentGreen, transparentBlue, transparentAlpha;
+ GLint transparentIndex;
+
+ /* ARB_multisample / SGIS_multisample */
+ GLint sampleBuffers;
+ GLuint samples;
+
+ /* SGIX_pbuffer / GLX 1.3 */
+ GLint maxPbufferWidth;
+ GLint maxPbufferHeight;
+ GLint maxPbufferPixels;
+ GLint optimalPbufferWidth; /* Only for SGIX_pbuffer. */
+ GLint optimalPbufferHeight; /* Only for SGIX_pbuffer. */
+
+ /* OML_swap_method */
+ GLint swapMethod;
+
+ /* EXT_texture_from_pixmap */
+ GLint bindToTextureRgb;
+ GLint bindToTextureRgba;
+ GLint bindToMipmapTexture;
+ GLint bindToTextureTargets;
+ GLint yInverted;
+
+ /* EXT_framebuffer_sRGB */
+ GLint sRGBCapable;
+
+ /* EGL_KHR_mutable_render_buffer */
+ GLuint mutableRenderBuffer; /* bool */
+};
+
+
+/**
+ * \name Bit flags used for updating material values.
+ */
+/*@{*/
+#define MAT_ATTRIB_FRONT_AMBIENT 0
+#define MAT_ATTRIB_BACK_AMBIENT 1
+#define MAT_ATTRIB_FRONT_DIFFUSE 2
+#define MAT_ATTRIB_BACK_DIFFUSE 3
+#define MAT_ATTRIB_FRONT_SPECULAR 4
+#define MAT_ATTRIB_BACK_SPECULAR 5
+#define MAT_ATTRIB_FRONT_EMISSION 6
+#define MAT_ATTRIB_BACK_EMISSION 7
+#define MAT_ATTRIB_FRONT_SHININESS 8
+#define MAT_ATTRIB_BACK_SHININESS 9
+#define MAT_ATTRIB_FRONT_INDEXES 10
+#define MAT_ATTRIB_BACK_INDEXES 11
+#define MAT_ATTRIB_MAX 12
+
+#define MAT_ATTRIB_AMBIENT(f) (MAT_ATTRIB_FRONT_AMBIENT+(f))
+#define MAT_ATTRIB_DIFFUSE(f) (MAT_ATTRIB_FRONT_DIFFUSE+(f))
+#define MAT_ATTRIB_SPECULAR(f) (MAT_ATTRIB_FRONT_SPECULAR+(f))
+#define MAT_ATTRIB_EMISSION(f) (MAT_ATTRIB_FRONT_EMISSION+(f))
+#define MAT_ATTRIB_SHININESS(f)(MAT_ATTRIB_FRONT_SHININESS+(f))
+#define MAT_ATTRIB_INDEXES(f) (MAT_ATTRIB_FRONT_INDEXES+(f))
+
+#define MAT_BIT_FRONT_AMBIENT (1<<MAT_ATTRIB_FRONT_AMBIENT)
+#define MAT_BIT_BACK_AMBIENT (1<<MAT_ATTRIB_BACK_AMBIENT)
+#define MAT_BIT_FRONT_DIFFUSE (1<<MAT_ATTRIB_FRONT_DIFFUSE)
+#define MAT_BIT_BACK_DIFFUSE (1<<MAT_ATTRIB_BACK_DIFFUSE)
+#define MAT_BIT_FRONT_SPECULAR (1<<MAT_ATTRIB_FRONT_SPECULAR)
+#define MAT_BIT_BACK_SPECULAR (1<<MAT_ATTRIB_BACK_SPECULAR)
+#define MAT_BIT_FRONT_EMISSION (1<<MAT_ATTRIB_FRONT_EMISSION)
+#define MAT_BIT_BACK_EMISSION (1<<MAT_ATTRIB_BACK_EMISSION)
+#define MAT_BIT_FRONT_SHININESS (1<<MAT_ATTRIB_FRONT_SHININESS)
+#define MAT_BIT_BACK_SHININESS (1<<MAT_ATTRIB_BACK_SHININESS)
+#define MAT_BIT_FRONT_INDEXES (1<<MAT_ATTRIB_FRONT_INDEXES)
+#define MAT_BIT_BACK_INDEXES (1<<MAT_ATTRIB_BACK_INDEXES)
+
+
+#define FRONT_MATERIAL_BITS (MAT_BIT_FRONT_EMISSION | \
+ MAT_BIT_FRONT_AMBIENT | \
+ MAT_BIT_FRONT_DIFFUSE | \
+ MAT_BIT_FRONT_SPECULAR | \
+ MAT_BIT_FRONT_SHININESS | \
+ MAT_BIT_FRONT_INDEXES)
+
+#define BACK_MATERIAL_BITS (MAT_BIT_BACK_EMISSION | \
+ MAT_BIT_BACK_AMBIENT | \
+ MAT_BIT_BACK_DIFFUSE | \
+ MAT_BIT_BACK_SPECULAR | \
+ MAT_BIT_BACK_SHININESS | \
+ MAT_BIT_BACK_INDEXES)
+
+#define ALL_MATERIAL_BITS (FRONT_MATERIAL_BITS | BACK_MATERIAL_BITS)
+/*@}*/
+
+
+/**
+ * Material state.
+ */
+struct gl_material
+{
+ GLfloat Attrib[MAT_ATTRIB_MAX][4];
+};
+
+
+/**
+ * Light state flags.
+ */
+/*@{*/
+#define LIGHT_SPOT 0x1
+#define LIGHT_LOCAL_VIEWER 0x2
+#define LIGHT_POSITIONAL 0x4
+#define LIGHT_NEED_VERTICES (LIGHT_POSITIONAL|LIGHT_LOCAL_VIEWER)
+/*@}*/
+
+
+/**
+ * Light source state.
+ */
+struct gl_light
+{
+ GLfloat Ambient[4]; /**< ambient color */
+ GLfloat Diffuse[4]; /**< diffuse color */
+ GLfloat Specular[4]; /**< specular color */
+ GLfloat EyePosition[4]; /**< position in eye coordinates */
+ GLfloat SpotDirection[4]; /**< spotlight direction in eye coordinates */
+ GLfloat SpotExponent;
+ GLfloat SpotCutoff; /**< in degrees */
+ GLfloat _CosCutoff; /**< = MAX(0, cos(SpotCutoff)) */
+ GLfloat ConstantAttenuation;
+ GLfloat LinearAttenuation;
+ GLfloat QuadraticAttenuation;
+ GLboolean Enabled; /**< On/off flag */
+
+ /**
+ * \name Derived fields
+ */
+ /*@{*/
+ GLbitfield _Flags; /**< Mask of LIGHT_x bits defined above */
+
+ GLfloat _Position[4]; /**< position in eye/obj coordinates */
+ GLfloat _VP_inf_norm[3]; /**< Norm direction to infinite light */
+ GLfloat _h_inf_norm[3]; /**< Norm( _VP_inf_norm + <0,0,1> ) */
+ GLfloat _NormSpotDirection[4]; /**< normalized spotlight direction */
+ GLfloat _VP_inf_spot_attenuation;
+
+ GLfloat _MatAmbient[2][3]; /**< material ambient * light ambient */
+ GLfloat _MatDiffuse[2][3]; /**< material diffuse * light diffuse */
+ GLfloat _MatSpecular[2][3]; /**< material spec * light specular */
+ /*@}*/
+};
+
+
+/**
+ * Light model state.
+ */
+struct gl_lightmodel
+{
+ GLfloat Ambient[4]; /**< ambient color */
+ GLboolean LocalViewer; /**< Local (or infinite) view point? */
+ GLboolean TwoSide; /**< Two (or one) sided lighting? */
+ GLenum16 ColorControl; /**< either GL_SINGLE_COLOR
+ or GL_SEPARATE_SPECULAR_COLOR */
+};
+
+
+/**
+ * Accumulation buffer attribute group (GL_ACCUM_BUFFER_BIT)
+ */
+struct gl_accum_attrib
+{
+ GLfloat ClearColor[4]; /**< Accumulation buffer clear color */
+};
+
+
+/**
+ * Used for storing clear color, texture border color, etc.
+ * The float values are typically unclamped.
+ */
+union gl_color_union
+{
+ GLfloat f[4];
+ GLint i[4];
+ GLuint ui[4];
+};
+
+
+/**
+ * Color buffer attribute group (GL_COLOR_BUFFER_BIT).
+ */
+struct gl_colorbuffer_attrib
+{
+ GLuint ClearIndex; /**< Index for glClear */
+ union gl_color_union ClearColor; /**< Color for glClear, unclamped */
+ GLuint IndexMask; /**< Color index write mask */
+
+ /** 4 colormask bits per draw buffer, max 8 draw buffers. 4*8 = 32 bits */
+ GLbitfield ColorMask;
+
+ GLenum16 DrawBuffer[MAX_DRAW_BUFFERS]; /**< Which buffer to draw into */
+
+ /**
+ * \name alpha testing
+ */
+ /*@{*/
+ GLboolean AlphaEnabled; /**< Alpha test enabled flag */
+ GLenum16 AlphaFunc; /**< Alpha test function */
+ GLfloat AlphaRefUnclamped;
+ GLclampf AlphaRef; /**< Alpha reference value */
+ /*@}*/
+
+ /**
+ * \name Blending
+ */
+ /*@{*/
+ GLbitfield BlendEnabled; /**< Per-buffer blend enable flags */
+
+ /* NOTE: this does _not_ depend on fragment clamping or any other clamping
+ * control, only on the fixed-pointness of the render target.
+ * The query does however depend on fragment color clamping.
+ */
+ GLfloat BlendColorUnclamped[4]; /**< Blending color */
+ GLfloat BlendColor[4]; /**< Blending color */
+
+ struct
+ {
+ GLenum16 SrcRGB; /**< RGB blend source term */
+ GLenum16 DstRGB; /**< RGB blend dest term */
+ GLenum16 SrcA; /**< Alpha blend source term */
+ GLenum16 DstA; /**< Alpha blend dest term */
+ GLenum16 EquationRGB; /**< GL_ADD, GL_SUBTRACT, etc. */
+ GLenum16 EquationA; /**< GL_ADD, GL_SUBTRACT, etc. */
+ /**
+ * Set if any blend factor uses SRC1. Computed at the time blend factors
+ * get set.
+ */
+ GLboolean _UsesDualSrc;
+ } Blend[MAX_DRAW_BUFFERS];
+ /** Are the blend func terms currently different for each buffer/target? */
+ GLboolean _BlendFuncPerBuffer;
+ /** Are the blend equations currently different for each buffer/target? */
+ GLboolean _BlendEquationPerBuffer;
+
+ /**
+ * Which advanced blending mode is in use (or BLEND_NONE).
+ *
+ * KHR_blend_equation_advanced only allows advanced blending with a single
+ * draw buffer, and NVX_blend_equation_advanced_multi_draw_buffer still
+ * requires all draw buffers to match, so we only need a single value.
+ */
+ enum gl_advanced_blend_mode _AdvancedBlendMode;
+
+ /** Coherency requested via glEnable(GL_BLEND_ADVANCED_COHERENT_KHR)? */
+ bool BlendCoherent;
+ /*@}*/
+
+ /**
+ * \name Logic op
+ */
+ /*@{*/
+ GLboolean IndexLogicOpEnabled; /**< Color index logic op enabled flag */
+ GLboolean ColorLogicOpEnabled; /**< RGBA logic op enabled flag */
+ GLenum16 LogicOp; /**< Logic operator */
+ enum gl_logicop_mode _LogicOp;
+ /*@}*/
+
+ GLboolean DitherFlag; /**< Dither enable flag */
+
+ GLboolean _ClampFragmentColor; /** < with GL_FIXED_ONLY_ARB resolved */
+ GLenum16 ClampFragmentColor; /**< GL_TRUE, GL_FALSE or GL_FIXED_ONLY_ARB */
+ GLenum16 ClampReadColor; /**< GL_TRUE, GL_FALSE or GL_FIXED_ONLY_ARB */
+
+ GLboolean sRGBEnabled; /**< Framebuffer sRGB blending/updating requested */
+};
+
+
+/**
+ * Vertex format to describe a vertex element.
+ */
+struct gl_vertex_format
+{
+ GLenum16 Type; /**< datatype: GL_FLOAT, GL_INT, etc */
+ GLenum16 Format; /**< default: GL_RGBA, but may be GL_BGRA */
+ enum pipe_format _PipeFormat:16; /**< pipe_format for Gallium */
+ GLubyte Size:5; /**< components per element (1,2,3,4) */
+ GLubyte Normalized:1; /**< GL_ARB_vertex_program */
+ GLubyte Integer:1; /**< Integer-valued? */
+ GLubyte Doubles:1; /**< double values are not converted to floats */
+ GLubyte _ElementSize; /**< Size of each element in bytes */
+};
+
+
+/**
+ * Current attribute group (GL_CURRENT_BIT).
+ */
+struct gl_current_attrib
+{
+ /**
+ * \name Current vertex attributes (color, texcoords, etc).
+ * \note Values are valid only after FLUSH_VERTICES has been called.
+ * \note Index and Edgeflag current values are stored as floats in the
+ * SIX and SEVEN attribute slots.
+ * \note We need double storage for 64-bit vertex attributes
+ */
+ GLfloat Attrib[VERT_ATTRIB_MAX][4*2];
+
+ /**
+ * \name Current raster position attributes (always up to date after a
+ * glRasterPos call).
+ */
+ GLfloat RasterPos[4];
+ GLfloat RasterDistance;
+ GLfloat RasterColor[4];
+ GLfloat RasterSecondaryColor[4];
+ GLfloat RasterTexCoords[MAX_TEXTURE_COORD_UNITS][4];
+ GLboolean RasterPosValid;
+};
+
+
+/**
+ * Depth buffer attribute group (GL_DEPTH_BUFFER_BIT).
+ */
+struct gl_depthbuffer_attrib
+{
+ GLenum16 Func; /**< Function for depth buffer compare */
+ GLclampd Clear; /**< Value to clear depth buffer to */
+ GLboolean Test; /**< Depth buffering enabled flag */
+ GLboolean Mask; /**< Depth buffer writable? */
+ GLboolean BoundsTest; /**< GL_EXT_depth_bounds_test */
+ GLfloat BoundsMin, BoundsMax;/**< GL_EXT_depth_bounds_test */
+};
+
+
+/**
+ * Evaluator attribute group (GL_EVAL_BIT).
+ */
+struct gl_eval_attrib
+{
+ /**
+ * \name Enable bits
+ */
+ /*@{*/
+ GLboolean Map1Color4;
+ GLboolean Map1Index;
+ GLboolean Map1Normal;
+ GLboolean Map1TextureCoord1;
+ GLboolean Map1TextureCoord2;
+ GLboolean Map1TextureCoord3;
+ GLboolean Map1TextureCoord4;
+ GLboolean Map1Vertex3;
+ GLboolean Map1Vertex4;
+ GLboolean Map2Color4;
+ GLboolean Map2Index;
+ GLboolean Map2Normal;
+ GLboolean Map2TextureCoord1;
+ GLboolean Map2TextureCoord2;
+ GLboolean Map2TextureCoord3;
+ GLboolean Map2TextureCoord4;
+ GLboolean Map2Vertex3;
+ GLboolean Map2Vertex4;
+ GLboolean AutoNormal;
+ /*@}*/
+
+ /**
+ * \name Map Grid endpoints and divisions and calculated du values
+ */
+ /*@{*/
+ GLint MapGrid1un;
+ GLfloat MapGrid1u1, MapGrid1u2, MapGrid1du;
+ GLint MapGrid2un, MapGrid2vn;
+ GLfloat MapGrid2u1, MapGrid2u2, MapGrid2du;
+ GLfloat MapGrid2v1, MapGrid2v2, MapGrid2dv;
+ /*@}*/
+};
+
+
+/**
+ * Compressed fog mode.
+ */
+enum gl_fog_mode
+{
+ FOG_NONE,
+ FOG_LINEAR,
+ FOG_EXP,
+ FOG_EXP2,
+};
+
+
+/**
+ * Fog attribute group (GL_FOG_BIT).
+ */
+struct gl_fog_attrib
+{
+ GLboolean Enabled; /**< Fog enabled flag */
+ GLboolean ColorSumEnabled;
+ uint8_t _PackedMode; /**< Fog mode as 2 bits */
+ uint8_t _PackedEnabledMode; /**< Masked CompressedMode */
+ GLfloat ColorUnclamped[4]; /**< Fog color */
+ GLfloat Color[4]; /**< Fog color */
+ GLfloat Density; /**< Density >= 0.0 */
+ GLfloat Start; /**< Start distance in eye coords */
+ GLfloat End; /**< End distance in eye coords */
+ GLfloat Index; /**< Fog index */
+ GLenum16 Mode; /**< Fog mode */
+ GLenum16 FogCoordinateSource;/**< GL_EXT_fog_coord */
+ GLenum16 FogDistanceMode; /**< GL_NV_fog_distance */
+};
+
+
+/**
+ * Hint attribute group (GL_HINT_BIT).
+ *
+ * Values are always one of GL_FASTEST, GL_NICEST, or GL_DONT_CARE.
+ */
+struct gl_hint_attrib
+{
+ GLenum16 PerspectiveCorrection;
+ GLenum16 PointSmooth;
+ GLenum16 LineSmooth;
+ GLenum16 PolygonSmooth;
+ GLenum16 Fog;
+ GLenum16 TextureCompression; /**< GL_ARB_texture_compression */
+ GLenum16 GenerateMipmap; /**< GL_SGIS_generate_mipmap */
+ GLenum16 FragmentShaderDerivative; /**< GL_ARB_fragment_shader */
+ GLuint MaxShaderCompilerThreads; /**< GL_ARB_parallel_shader_compile */
+};
+
+
+/**
+ * Lighting attribute group (GL_LIGHT_BIT).
+ */
+struct gl_light_attrib
+{
+ struct gl_light Light[MAX_LIGHTS]; /**< Array of light sources */
+ struct gl_lightmodel Model; /**< Lighting model */
+
+ /**
+ * Front and back material values.
+ * Note: must call FLUSH_VERTICES() before using.
+ */
+ struct gl_material Material;
+
+ GLboolean Enabled; /**< Lighting enabled flag */
+ GLboolean ColorMaterialEnabled;
+
+ GLenum16 ShadeModel; /**< GL_FLAT or GL_SMOOTH */
+ GLenum16 ProvokingVertex; /**< GL_EXT_provoking_vertex */
+ GLenum16 ColorMaterialFace; /**< GL_FRONT, BACK or FRONT_AND_BACK */
+ GLenum16 ColorMaterialMode; /**< GL_AMBIENT, GL_DIFFUSE, etc */
+ GLbitfield _ColorMaterialBitmask; /**< bitmask formed from Face and Mode */
+
+
+ GLboolean _ClampVertexColor;
+ GLenum16 ClampVertexColor; /**< GL_TRUE, GL_FALSE, GL_FIXED_ONLY */
+
+ /**
+ * Derived state for optimizations:
+ */
+ /*@{*/
+ GLbitfield _EnabledLights; /**< bitmask containing enabled lights */
+
+ GLboolean _NeedEyeCoords;
+ GLboolean _NeedVertices; /**< Use fast shader? */
+
+ GLfloat _BaseColor[2][3];
+ /*@}*/
+};
+
+
+/**
+ * Line attribute group (GL_LINE_BIT).
+ */
+struct gl_line_attrib
+{
+ GLboolean SmoothFlag; /**< GL_LINE_SMOOTH enabled? */
+ GLboolean StippleFlag; /**< GL_LINE_STIPPLE enabled? */
+ GLushort StipplePattern; /**< Stipple pattern */
+ GLint StippleFactor; /**< Stipple repeat factor */
+ GLfloat Width; /**< Line width */
+};
+
+
+/**
+ * Display list attribute group (GL_LIST_BIT).
+ */
+struct gl_list_attrib
+{
+ GLuint ListBase;
+};
+
+
+/**
+ * Multisample attribute group (GL_MULTISAMPLE_BIT).
+ */
+struct gl_multisample_attrib
+{
+ GLboolean Enabled;
+ GLboolean SampleAlphaToCoverage;
+ GLboolean SampleAlphaToOne;
+ GLboolean SampleCoverage;
+ GLboolean SampleCoverageInvert;
+ GLboolean SampleShading;
+
+ /* ARB_texture_multisample / GL3.2 additions */
+ GLboolean SampleMask;
+
+ GLfloat SampleCoverageValue; /**< In range [0, 1] */
+ GLfloat MinSampleShadingValue; /**< In range [0, 1] */
+
+ /** The GL spec defines this as an array but >32x MSAA is madness */
+ GLbitfield SampleMaskValue;
+
+ /* NV_alpha_to_coverage_dither_control */
+ GLenum SampleAlphaToCoverageDitherControl;
+};
+
+
+/**
+ * A pixelmap (see glPixelMap)
+ */
+struct gl_pixelmap
+{
+ GLint Size;
+ GLfloat Map[MAX_PIXEL_MAP_TABLE];
+};
+
+
+/**
+ * Collection of all pixelmaps
+ */
+struct gl_pixelmaps
+{
+ struct gl_pixelmap RtoR; /**< i.e. GL_PIXEL_MAP_R_TO_R */
+ struct gl_pixelmap GtoG;
+ struct gl_pixelmap BtoB;
+ struct gl_pixelmap AtoA;
+ struct gl_pixelmap ItoR;
+ struct gl_pixelmap ItoG;
+ struct gl_pixelmap ItoB;
+ struct gl_pixelmap ItoA;
+ struct gl_pixelmap ItoI;
+ struct gl_pixelmap StoS;
+};
+
+
+/**
+ * Pixel attribute group (GL_PIXEL_MODE_BIT).
+ */
+struct gl_pixel_attrib
+{
+ GLenum16 ReadBuffer; /**< source buffer for glRead/CopyPixels() */
+
+ /*--- Begin Pixel Transfer State ---*/
+ /* Fields are in the order in which they're applied... */
+
+ /** Scale & Bias (index shift, offset) */
+ /*@{*/
+ GLfloat RedBias, RedScale;
+ GLfloat GreenBias, GreenScale;
+ GLfloat BlueBias, BlueScale;
+ GLfloat AlphaBias, AlphaScale;
+ GLfloat DepthBias, DepthScale;
+ GLint IndexShift, IndexOffset;
+ /*@}*/
+
+ /* Pixel Maps */
+ /* Note: actual pixel maps are not part of this attrib group */
+ GLboolean MapColorFlag;
+ GLboolean MapStencilFlag;
+
+ /*--- End Pixel Transfer State ---*/
+
+ /** glPixelZoom */
+ GLfloat ZoomX, ZoomY;
+};
+
+
+/**
+ * Point attribute group (GL_POINT_BIT).
+ */
+struct gl_point_attrib
+{
+ GLfloat Size; /**< User-specified point size */
+ GLfloat Params[3]; /**< GL_EXT_point_parameters */
+ GLfloat MinSize, MaxSize; /**< GL_EXT_point_parameters */
+ GLfloat Threshold; /**< GL_EXT_point_parameters */
+ GLboolean SmoothFlag; /**< True if GL_POINT_SMOOTH is enabled */
+ GLboolean _Attenuated; /**< True if Params != [1, 0, 0] */
+ GLboolean PointSprite; /**< GL_NV/ARB_point_sprite */
+ GLbitfield CoordReplace; /**< GL_ARB_point_sprite*/
+ GLenum16 SpriteRMode; /**< GL_NV_point_sprite (only!) */
+ GLenum16 SpriteOrigin; /**< GL_ARB_point_sprite */
+};
+
+
+/**
+ * Polygon attribute group (GL_POLYGON_BIT).
+ */
+struct gl_polygon_attrib
+{
+ GLenum16 FrontFace; /**< Either GL_CW or GL_CCW */
+ GLenum FrontMode; /**< Either GL_POINT, GL_LINE or GL_FILL */
+ GLenum BackMode; /**< Either GL_POINT, GL_LINE or GL_FILL */
+ GLboolean CullFlag; /**< Culling on/off flag */
+ GLboolean SmoothFlag; /**< True if GL_POLYGON_SMOOTH is enabled */
+ GLboolean StippleFlag; /**< True if GL_POLYGON_STIPPLE is enabled */
+ GLenum16 CullFaceMode; /**< Culling mode GL_FRONT or GL_BACK */
+ GLfloat OffsetFactor; /**< Polygon offset factor, from user */
+ GLfloat OffsetUnits; /**< Polygon offset units, from user */
+ GLfloat OffsetClamp; /**< Polygon offset clamp, from user */
+ GLboolean OffsetPoint; /**< Offset in GL_POINT mode */
+ GLboolean OffsetLine; /**< Offset in GL_LINE mode */
+ GLboolean OffsetFill; /**< Offset in GL_FILL mode */
+};
+
+
+/**
+ * Scissor attributes (GL_SCISSOR_BIT).
+ */
+struct gl_scissor_rect
+{
+ GLint X, Y; /**< Lower left corner of box */
+ GLsizei Width, Height; /**< Size of box */
+};
+
+
+struct gl_scissor_attrib
+{
+ GLbitfield EnableFlags; /**< Scissor test enabled? */
+ struct gl_scissor_rect ScissorArray[MAX_VIEWPORTS];
+ GLint NumWindowRects; /**< Count of enabled window rectangles */
+ GLenum16 WindowRectMode; /**< Whether to include or exclude the rects */
+ struct gl_scissor_rect WindowRects[MAX_WINDOW_RECTANGLES];
+};
+
+
+/**
+ * Stencil attribute group (GL_STENCIL_BUFFER_BIT).
+ *
+ * Three sets of stencil data are tracked so that OpenGL 2.0,
+ * GL_EXT_stencil_two_side, and GL_ATI_separate_stencil can all be supported
+ * simultaneously. In each of the stencil state arrays, element 0 corresponds
+ * to GL_FRONT. Element 1 corresponds to the OpenGL 2.0 /
+ * GL_ATI_separate_stencil GL_BACK state. Element 2 corresponds to the
+ * GL_EXT_stencil_two_side GL_BACK state.
+ *
+ * The derived value \c _BackFace is either 1 or 2 depending on whether or
+ * not GL_STENCIL_TEST_TWO_SIDE_EXT is enabled.
+ *
+ * The derived value \c _TestTwoSide is set when the front-face and back-face
+ * stencil state are different.
+ */
+struct gl_stencil_attrib
+{
+ GLboolean Enabled; /**< Enabled flag */
+ GLboolean TestTwoSide; /**< GL_EXT_stencil_two_side */
+ GLubyte ActiveFace; /**< GL_EXT_stencil_two_side (0 or 2) */
+ GLubyte _BackFace; /**< Current back stencil state (1 or 2) */
+ GLenum16 Function[3]; /**< Stencil function */
+ GLenum16 FailFunc[3]; /**< Fail function */
+ GLenum16 ZPassFunc[3]; /**< Depth buffer pass function */
+ GLenum16 ZFailFunc[3]; /**< Depth buffer fail function */
+ GLint Ref[3]; /**< Reference value */
+ GLuint ValueMask[3]; /**< Value mask */
+ GLuint WriteMask[3]; /**< Write mask */
+ GLuint Clear; /**< Clear value */
+};
+
+
+/**
+ * Bit flags for each type of texture object
+ */
+/*@{*/
+#define TEXTURE_2D_MULTISAMPLE_BIT (1 << TEXTURE_2D_MULTISAMPLE_INDEX)
+#define TEXTURE_2D_MULTISAMPLE_ARRAY_BIT (1 << TEXTURE_2D_MULTISAMPLE_ARRAY_INDEX)
+#define TEXTURE_CUBE_ARRAY_BIT (1 << TEXTURE_CUBE_ARRAY_INDEX)
+#define TEXTURE_BUFFER_BIT (1 << TEXTURE_BUFFER_INDEX)
+#define TEXTURE_2D_ARRAY_BIT (1 << TEXTURE_2D_ARRAY_INDEX)
+#define TEXTURE_1D_ARRAY_BIT (1 << TEXTURE_1D_ARRAY_INDEX)
+#define TEXTURE_EXTERNAL_BIT (1 << TEXTURE_EXTERNAL_INDEX)
+#define TEXTURE_CUBE_BIT (1 << TEXTURE_CUBE_INDEX)
+#define TEXTURE_3D_BIT (1 << TEXTURE_3D_INDEX)
+#define TEXTURE_RECT_BIT (1 << TEXTURE_RECT_INDEX)
+#define TEXTURE_2D_BIT (1 << TEXTURE_2D_INDEX)
+#define TEXTURE_1D_BIT (1 << TEXTURE_1D_INDEX)
+/*@}*/
+
+
+/**
+ * Texture image state. Drivers will typically create a subclass of this
+ * with extra fields for memory buffers, etc.
+ */
+struct gl_texture_image
+{
+ GLint InternalFormat; /**< Internal format as given by the user */
+ GLenum16 _BaseFormat; /**< Either GL_RGB, GL_RGBA, GL_ALPHA,
+ * GL_LUMINANCE, GL_LUMINANCE_ALPHA,
+ * GL_INTENSITY, GL_DEPTH_COMPONENT or
+ * GL_DEPTH_STENCIL_EXT only. Used for
+ * choosing TexEnv arithmetic.
+ */
+ mesa_format TexFormat; /**< The actual texture memory format */
+
+ GLuint Border; /**< 0 or 1 */
+ GLuint Width; /**< = 2^WidthLog2 + 2*Border */
+ GLuint Height; /**< = 2^HeightLog2 + 2*Border */
+ GLuint Depth; /**< = 2^DepthLog2 + 2*Border */
+ GLuint Width2; /**< = Width - 2*Border */
+ GLuint Height2; /**< = Height - 2*Border */
+ GLuint Depth2; /**< = Depth - 2*Border */
+ GLuint WidthLog2; /**< = log2(Width2) */
+ GLuint HeightLog2; /**< = log2(Height2) */
+ GLuint DepthLog2; /**< = log2(Depth2) */
+ GLuint MaxNumLevels; /**< = maximum possible number of mipmap
+ levels, computed from the dimensions */
+
+ struct gl_texture_object *TexObject; /**< Pointer back to parent object */
+ GLuint Level; /**< Which mipmap level am I? */
+ /** Cube map face: index into gl_texture_object::Image[] array */
+ GLuint Face;
+
+ /** GL_ARB_texture_multisample */
+ GLuint NumSamples; /**< Sample count, or 0 for non-multisample */
+ GLboolean FixedSampleLocations; /**< Same sample locations for all pixels? */
+};
+
+
+/**
+ * Indexes for cube map faces.
+ */
+typedef enum
+{
+ FACE_POS_X = 0,
+ FACE_NEG_X = 1,
+ FACE_POS_Y = 2,
+ FACE_NEG_Y = 3,
+ FACE_POS_Z = 4,
+ FACE_NEG_Z = 5,
+ MAX_FACES = 6
+} gl_face_index;
+
+
+/**
+ * Sampler object state. These objects are new with GL_ARB_sampler_objects
+ * and OpenGL 3.3. Legacy texture objects also contain a sampler object.
+ */
+struct gl_sampler_object
+{
+ simple_mtx_t Mutex;
+ GLuint Name;
+ GLchar *Label; /**< GL_KHR_debug */
+ GLint RefCount;
+
+ GLenum16 WrapS; /**< S-axis texture image wrap mode */
+ GLenum16 WrapT; /**< T-axis texture image wrap mode */
+ GLenum16 WrapR; /**< R-axis texture image wrap mode */
+ GLenum16 MinFilter; /**< minification filter */
+ GLenum16 MagFilter; /**< magnification filter */
+ GLenum16 sRGBDecode; /**< GL_DECODE_EXT or GL_SKIP_DECODE_EXT */
+ union gl_color_union BorderColor; /**< Interpreted according to texture format */
+ GLfloat MinLod; /**< min lambda, OpenGL 1.2 */
+ GLfloat MaxLod; /**< max lambda, OpenGL 1.2 */
+ GLfloat LodBias; /**< OpenGL 1.4 */
+ GLfloat MaxAnisotropy; /**< GL_EXT_texture_filter_anisotropic */
+ GLenum16 CompareMode; /**< GL_ARB_shadow */
+ GLenum16 CompareFunc; /**< GL_ARB_shadow */
+ GLboolean CubeMapSeamless; /**< GL_AMD_seamless_cubemap_per_texture */
+
+ /** GL_ARB_bindless_texture */
+ bool HandleAllocated;
+ struct util_dynarray Handles;
+};
+
+
+/**
+ * Texture object state. Contains the array of mipmap images, border color,
+ * wrap modes, filter modes, and shadow/texcompare state.
+ */
+struct gl_texture_object
+{
+ simple_mtx_t Mutex; /**< for thread safety */
+ GLint RefCount; /**< reference count */
+ GLuint Name; /**< the user-visible texture object ID */
+ GLenum16 Target; /**< GL_TEXTURE_1D, GL_TEXTURE_2D, etc. */
+ GLenum16 DepthMode; /**< GL_ARB_depth_texture */
+ GLchar *Label; /**< GL_KHR_debug */
+
+ struct gl_sampler_object Sampler;
+
+ gl_texture_index TargetIndex; /**< The gl_texture_unit::CurrentTex index.
+ Only valid when Target is valid. */
+ GLfloat Priority; /**< in [0,1] */
+ GLint MaxLevel; /**< max mipmap level (max=1000), OpenGL 1.2 */
+ GLint BaseLevel; /**< min mipmap level, OpenGL 1.2 */
+ GLbyte _MaxLevel; /**< actual max mipmap level (q in the spec) */
+ GLfloat _MaxLambda; /**< = _MaxLevel - BaseLevel (q - p in spec) */
+ GLint CropRect[4]; /**< GL_OES_draw_texture */
+ GLenum Swizzle[4]; /**< GL_EXT_texture_swizzle */
+ GLushort _Swizzle; /**< same as Swizzle, but SWIZZLE_* format */
+ GLbyte ImmutableLevels; /**< ES 3.0 / ARB_texture_view */
+ GLboolean GenerateMipmap; /**< GL_SGIS_generate_mipmap */
+ GLboolean _BaseComplete; /**< Is the base texture level valid? */
+ GLboolean _MipmapComplete; /**< Is the whole mipmap valid? */
+ GLboolean _IsIntegerFormat; /**< Does the texture store integer values? */
+ GLboolean _RenderToTexture; /**< Any rendering to this texture? */
+ GLboolean Purgeable; /**< Is the buffer purgeable under memory
+ pressure? */
+ GLboolean Immutable; /**< GL_ARB_texture_storage */
+ GLboolean _IsFloat; /**< GL_OES_float_texture */
+ GLboolean _IsHalfFloat; /**< GL_OES_half_float_texture */
+ bool StencilSampling; /**< Should we sample stencil instead of depth? */
+ bool HandleAllocated; /**< GL_ARB_bindless_texture */
+
+ /** GL_OES_EGL_image_external */
+ GLubyte RequiredTextureImageUnits;
+
+ GLubyte MinLevel; /**< GL_ARB_texture_view */
+ GLubyte NumLevels; /**< GL_ARB_texture_view */
+ GLushort MinLayer; /**< GL_ARB_texture_view */
+ GLushort NumLayers; /**< GL_ARB_texture_view */
+
+ /** GL_EXT_memory_object */
+ GLenum16 TextureTiling;
+
+ /** GL_ARB_shader_image_load_store */
+ GLenum16 ImageFormatCompatibilityType;
+
+ /** GL_ARB_texture_buffer_object */
+ GLenum16 BufferObjectFormat;
+ /** Equivalent Mesa format for BufferObjectFormat. */
+ mesa_format _BufferObjectFormat;
+ struct gl_buffer_object *BufferObject;
+
+ /** GL_ARB_texture_buffer_range */
+ GLintptr BufferOffset;
+ GLsizeiptr BufferSize; /**< if this is -1, use BufferObject->Size instead */
+
+ /** Actual texture images, indexed by [cube face] and [mipmap level] */
+ struct gl_texture_image *Image[MAX_FACES][MAX_TEXTURE_LEVELS];
+
+ /** GL_ARB_bindless_texture */
+ struct util_dynarray SamplerHandles;
+ struct util_dynarray ImageHandles;
+};
+
+
+/** Up to four combiner sources are possible with GL_NV_texture_env_combine4 */
+#define MAX_COMBINER_TERMS 4
+
+
+/**
+ * Texture combine environment state.
+ */
+struct gl_tex_env_combine_state
+{
+ GLenum16 ModeRGB; /**< GL_REPLACE, GL_DECAL, GL_ADD, etc. */
+ GLenum16 ModeA; /**< GL_REPLACE, GL_DECAL, GL_ADD, etc. */
+ /** Source terms: GL_PRIMARY_COLOR, GL_TEXTURE, etc */
+ GLenum16 SourceRGB[MAX_COMBINER_TERMS];
+ GLenum16 SourceA[MAX_COMBINER_TERMS];
+ /** Source operands: GL_SRC_COLOR, GL_ONE_MINUS_SRC_COLOR, etc */
+ GLenum16 OperandRGB[MAX_COMBINER_TERMS];
+ GLenum16 OperandA[MAX_COMBINER_TERMS];
+ GLubyte ScaleShiftRGB; /**< 0, 1 or 2 */
+ GLubyte ScaleShiftA; /**< 0, 1 or 2 */
+ GLubyte _NumArgsRGB; /**< Number of inputs used for the RGB combiner */
+ GLubyte _NumArgsA; /**< Number of inputs used for the A combiner */
+};
+
+
+/** Compressed TexEnv effective Combine mode */
+enum gl_tex_env_mode
+{
+ TEXENV_MODE_REPLACE, /* r = a0 */
+ TEXENV_MODE_MODULATE, /* r = a0 * a1 */
+ TEXENV_MODE_ADD, /* r = a0 + a1 */
+ TEXENV_MODE_ADD_SIGNED, /* r = a0 + a1 - 0.5 */
+ TEXENV_MODE_INTERPOLATE, /* r = a0 * a2 + a1 * (1 - a2) */
+ TEXENV_MODE_SUBTRACT, /* r = a0 - a1 */
+ TEXENV_MODE_DOT3_RGB, /* r = a0 . a1 */
+ TEXENV_MODE_DOT3_RGB_EXT, /* r = a0 . a1 */
+ TEXENV_MODE_DOT3_RGBA, /* r = a0 . a1 */
+ TEXENV_MODE_DOT3_RGBA_EXT, /* r = a0 . a1 */
+ TEXENV_MODE_MODULATE_ADD_ATI, /* r = a0 * a2 + a1 */
+ TEXENV_MODE_MODULATE_SIGNED_ADD_ATI, /* r = a0 * a2 + a1 - 0.5 */
+ TEXENV_MODE_MODULATE_SUBTRACT_ATI, /* r = a0 * a2 - a1 */
+ TEXENV_MODE_ADD_PRODUCTS_NV, /* r = a0 * a1 + a2 * a3 */
+ TEXENV_MODE_ADD_PRODUCTS_SIGNED_NV, /* r = a0 * a1 + a2 * a3 - 0.5 */
+};
+
+
+/** Compressed TexEnv Combine source */
+enum gl_tex_env_source
+{
+ TEXENV_SRC_TEXTURE0,
+ TEXENV_SRC_TEXTURE1,
+ TEXENV_SRC_TEXTURE2,
+ TEXENV_SRC_TEXTURE3,
+ TEXENV_SRC_TEXTURE4,
+ TEXENV_SRC_TEXTURE5,
+ TEXENV_SRC_TEXTURE6,
+ TEXENV_SRC_TEXTURE7,
+ TEXENV_SRC_TEXTURE,
+ TEXENV_SRC_PREVIOUS,
+ TEXENV_SRC_PRIMARY_COLOR,
+ TEXENV_SRC_CONSTANT,
+ TEXENV_SRC_ZERO,
+ TEXENV_SRC_ONE,
+};
+
+
+/** Compressed TexEnv Combine operand */
+enum gl_tex_env_operand
+{
+ TEXENV_OPR_COLOR,
+ TEXENV_OPR_ONE_MINUS_COLOR,
+ TEXENV_OPR_ALPHA,
+ TEXENV_OPR_ONE_MINUS_ALPHA,
+};
+
+
+/** Compressed TexEnv Combine argument */
+struct gl_tex_env_argument
+{
+#ifdef __GNUC__
+ __extension__ uint8_t Source:4; /**< TEXENV_SRC_x */
+ __extension__ uint8_t Operand:2; /**< TEXENV_OPR_x */
+#else
+ uint8_t Source; /**< SRC_x */
+ uint8_t Operand; /**< OPR_x */
+#endif
+};
+
+
+/***
+ * Compressed TexEnv Combine state.
+ */
+struct gl_tex_env_combine_packed
+{
+ uint32_t ModeRGB:4; /**< Effective mode for RGB as 4 bits */
+ uint32_t ModeA:4; /**< Effective mode for RGB as 4 bits */
+ uint32_t ScaleShiftRGB:2; /**< 0, 1 or 2 */
+ uint32_t ScaleShiftA:2; /**< 0, 1 or 2 */
+ uint32_t NumArgsRGB:3; /**< Number of inputs used for the RGB combiner */
+ uint32_t NumArgsA:3; /**< Number of inputs used for the A combiner */
+ /** Source arguments in a packed manner */
+ struct gl_tex_env_argument ArgsRGB[MAX_COMBINER_TERMS];
+ struct gl_tex_env_argument ArgsA[MAX_COMBINER_TERMS];
+};
+
+
+/**
+ * TexGenEnabled flags.
+ */
+/*@{*/
+#define S_BIT 1
+#define T_BIT 2
+#define R_BIT 4
+#define Q_BIT 8
+#define STR_BITS (S_BIT | T_BIT | R_BIT)
+/*@}*/
+
+
+/**
+ * Bit flag versions of the corresponding GL_ constants.
+ */
+/*@{*/
+#define TEXGEN_SPHERE_MAP 0x1
+#define TEXGEN_OBJ_LINEAR 0x2
+#define TEXGEN_EYE_LINEAR 0x4
+#define TEXGEN_REFLECTION_MAP_NV 0x8
+#define TEXGEN_NORMAL_MAP_NV 0x10
+
+#define TEXGEN_NEED_NORMALS (TEXGEN_SPHERE_MAP | \
+ TEXGEN_REFLECTION_MAP_NV | \
+ TEXGEN_NORMAL_MAP_NV)
+#define TEXGEN_NEED_EYE_COORD (TEXGEN_SPHERE_MAP | \
+ TEXGEN_REFLECTION_MAP_NV | \
+ TEXGEN_NORMAL_MAP_NV | \
+ TEXGEN_EYE_LINEAR)
+/*@}*/
+
+
+
+/** Tex-gen enabled for texture unit? */
+#define ENABLE_TEXGEN(unit) (1 << (unit))
+
+/** Non-identity texture matrix for texture unit? */
+#define ENABLE_TEXMAT(unit) (1 << (unit))
+
+
+/**
+ * Texture coord generation state.
+ */
+struct gl_texgen
+{
+ GLenum16 Mode; /**< GL_EYE_LINEAR, GL_SPHERE_MAP, etc */
+ GLbitfield8 _ModeBit; /**< TEXGEN_x bit corresponding to Mode */
+ GLfloat ObjectPlane[4];
+ GLfloat EyePlane[4];
+};
+
+
+/**
+ * Sampler-related subset of a texture unit, like current texture objects.
+ */
+struct gl_texture_unit
+{
+ GLfloat LodBias; /**< for biasing mipmap levels */
+
+ /** Texture targets that have a non-default texture bound */
+ GLbitfield _BoundTextures;
+
+ /** Current sampler object (GL_ARB_sampler_objects) */
+ struct gl_sampler_object *Sampler;
+
+ /** Current texture object pointers */
+ struct gl_texture_object *CurrentTex[NUM_TEXTURE_TARGETS];
+
+ /** Points to highest priority, complete and enabled texture object */
+ struct gl_texture_object *_Current;
+};
+
+
+/**
+ * Fixed-function-related subset of a texture unit, like enable flags,
+ * texture environment/function/combiners, and texgen state.
+ */
+struct gl_fixedfunc_texture_unit
+{
+ GLbitfield16 Enabled; /**< bitmask of TEXTURE_*_BIT flags */
+
+ GLenum16 EnvMode; /**< GL_MODULATE, GL_DECAL, GL_BLEND, etc. */
+ GLclampf EnvColor[4];
+ GLfloat EnvColorUnclamped[4];
+
+ struct gl_texgen GenS;
+ struct gl_texgen GenT;
+ struct gl_texgen GenR;
+ struct gl_texgen GenQ;
+ GLbitfield8 TexGenEnabled; /**< Bitwise-OR of [STRQ]_BIT values */
+ GLbitfield8 _GenFlags; /**< Bitwise-OR of Gen[STRQ]._ModeBit */
+
+ /**
+ * \name GL_EXT_texture_env_combine
+ */
+ struct gl_tex_env_combine_state Combine;
+
+ /**
+ * Derived state based on \c EnvMode and the \c BaseFormat of the
+ * currently enabled texture.
+ */
+ struct gl_tex_env_combine_state _EnvMode;
+
+ /** Current compressed TexEnv & Combine state */
+ struct gl_tex_env_combine_packed _CurrentCombinePacked;
+
+ /**
+ * Currently enabled combiner state. This will point to either
+ * \c Combine or \c _EnvMode.
+ */
+ struct gl_tex_env_combine_state *_CurrentCombine;
+};
+
+
+/**
+ * Texture attribute group (GL_TEXTURE_BIT).
+ */
+struct gl_texture_attrib
+{
+ struct gl_texture_object *ProxyTex[NUM_TEXTURE_TARGETS];
+
+ /** GL_ARB_texture_buffer_object */
+ struct gl_buffer_object *BufferObject;
+
+ GLuint CurrentUnit; /**< GL_ACTIVE_TEXTURE */
+
+ /** Texture coord units/sets used for fragment texturing */
+ GLbitfield8 _EnabledCoordUnits;
+
+ /** Texture coord units that have texgen enabled */
+ GLbitfield8 _TexGenEnabled;
+
+ /** Texture coord units that have non-identity matrices */
+ GLbitfield8 _TexMatEnabled;
+
+ /** Bitwise-OR of all Texture.Unit[i]._GenFlags */
+ GLbitfield8 _GenFlags;
+
+ /** Largest index of a texture unit with _Current != NULL. */
+ GLshort _MaxEnabledTexImageUnit;
+
+ /** Largest index + 1 of texture units that have had any CurrentTex set. */
+ GLubyte NumCurrentTexUsed;
+
+ /** GL_ARB_seamless_cubemap */
+ GLboolean CubeMapSeamless;
+
+ struct gl_texture_unit Unit[MAX_COMBINED_TEXTURE_IMAGE_UNITS];
+ struct gl_fixedfunc_texture_unit FixedFuncUnit[MAX_TEXTURE_COORD_UNITS];
+};
+
+
+/**
+ * Data structure representing a single clip plane (e.g. one of the elements
+ * of the ctx->Transform.EyeUserPlane or ctx->Transform._ClipUserPlane array).
+ */
+typedef GLfloat gl_clip_plane[4];
+
+
+/**
+ * Transformation attribute group (GL_TRANSFORM_BIT).
+ */
+struct gl_transform_attrib
+{
+ GLenum16 MatrixMode; /**< Matrix mode */
+ gl_clip_plane EyeUserPlane[MAX_CLIP_PLANES]; /**< User clip planes */
+ gl_clip_plane _ClipUserPlane[MAX_CLIP_PLANES]; /**< derived */
+ GLbitfield ClipPlanesEnabled; /**< on/off bitmask */
+ GLboolean Normalize; /**< Normalize all normals? */
+ GLboolean RescaleNormals; /**< GL_EXT_rescale_normal */
+ GLboolean RasterPositionUnclipped; /**< GL_IBM_rasterpos_clip */
+ GLboolean DepthClampNear; /**< GL_AMD_depth_clamp_separate */
+ GLboolean DepthClampFar; /**< GL_AMD_depth_clamp_separate */
+ /** GL_ARB_clip_control */
+ GLenum16 ClipOrigin; /**< GL_LOWER_LEFT or GL_UPPER_LEFT */
+ GLenum16 ClipDepthMode;/**< GL_NEGATIVE_ONE_TO_ONE or GL_ZERO_TO_ONE */
+};
+
+
+/**
+ * Viewport attribute group (GL_VIEWPORT_BIT).
+ */
+struct gl_viewport_attrib
+{
+ GLfloat X, Y; /**< position */
+ GLfloat Width, Height; /**< size */
+ GLfloat Near, Far; /**< Depth buffer range */
+
+ /**< GL_NV_viewport_swizzle */
+ GLenum16 SwizzleX, SwizzleY, SwizzleZ, SwizzleW;
+};
+
+
+/**
+ * Fields describing a mapped buffer range.
+ */
+struct gl_buffer_mapping
+{
+ GLbitfield AccessFlags; /**< Mask of GL_MAP_x_BIT flags */
+ GLvoid *Pointer; /**< User-space address of mapping */
+ GLintptr Offset; /**< Mapped offset */
+ GLsizeiptr Length; /**< Mapped length */
+};
+
+
+/**
+ * Usages we've seen for a buffer object.
+ */
+typedef enum
+{
+ USAGE_UNIFORM_BUFFER = 0x1,
+ USAGE_TEXTURE_BUFFER = 0x2,
+ USAGE_ATOMIC_COUNTER_BUFFER = 0x4,
+ USAGE_SHADER_STORAGE_BUFFER = 0x8,
+ USAGE_TRANSFORM_FEEDBACK_BUFFER = 0x10,
+ USAGE_PIXEL_PACK_BUFFER = 0x20,
+ USAGE_ARRAY_BUFFER = 0x40,
+ USAGE_ELEMENT_ARRAY_BUFFER = 0x80,
+ USAGE_DISABLE_MINMAX_CACHE = 0x100,
+} gl_buffer_usage;
+
+
+/**
+ * GL_ARB_vertex/pixel_buffer_object buffer object
+ */
+struct gl_buffer_object
+{
+ GLint RefCount;
+ GLuint Name;
+ GLchar *Label; /**< GL_KHR_debug */
+ GLenum16 Usage; /**< GL_STREAM_DRAW_ARB, GL_STREAM_READ_ARB, etc. */
+ GLbitfield StorageFlags; /**< GL_MAP_PERSISTENT_BIT, etc. */
+ GLsizeiptrARB Size; /**< Size of buffer storage in bytes */
+ GLubyte *Data; /**< Location of storage either in RAM or VRAM. */
+ GLboolean DeletePending; /**< true if buffer object is removed from the hash */
+ GLboolean Written; /**< Ever written to? (for debugging) */
+ GLboolean Purgeable; /**< Is the buffer purgeable under memory pressure? */
+ GLboolean Immutable; /**< GL_ARB_buffer_storage */
+ gl_buffer_usage UsageHistory; /**< How has this buffer been used so far? */
+
+ /** Counters used for buffer usage warnings */
+ GLuint NumSubDataCalls;
+ GLuint NumMapBufferWriteCalls;
+
+ struct gl_buffer_mapping Mappings[MAP_COUNT];
+
+ /** Memoization of min/max index computations for static index buffers */
+ simple_mtx_t MinMaxCacheMutex;
+ struct hash_table *MinMaxCache;
+ unsigned MinMaxCacheHitIndices;
+ unsigned MinMaxCacheMissIndices;
+ bool MinMaxCacheDirty;
+
+ bool HandleAllocated; /**< GL_ARB_bindless_texture */
+};
+
+
+/**
+ * Client pixel packing/unpacking attributes
+ */
+struct gl_pixelstore_attrib
+{
+ GLint Alignment;
+ GLint RowLength;
+ GLint SkipPixels;
+ GLint SkipRows;
+ GLint ImageHeight;
+ GLint SkipImages;
+ GLboolean SwapBytes;
+ GLboolean LsbFirst;
+ GLboolean Invert; /**< GL_MESA_pack_invert */
+ GLint CompressedBlockWidth; /**< GL_ARB_compressed_texture_pixel_storage */
+ GLint CompressedBlockHeight;
+ GLint CompressedBlockDepth;
+ GLint CompressedBlockSize;
+ struct gl_buffer_object *BufferObj; /**< GL_ARB_pixel_buffer_object */
+};
+
+
+/**
+ * Enum for defining the mapping for the position/generic0 attribute.
+ *
+ * Do not change the order of the values as these are used as
+ * array indices.
+ */
+typedef enum
+{
+ ATTRIBUTE_MAP_MODE_IDENTITY, /**< 1:1 mapping */
+ ATTRIBUTE_MAP_MODE_POSITION, /**< get position and generic0 from position */
+ ATTRIBUTE_MAP_MODE_GENERIC0, /**< get position and generic0 from generic0 */
+ ATTRIBUTE_MAP_MODE_MAX /**< for sizing arrays */
+} gl_attribute_map_mode;
+
+
+/**
+ * Attributes to describe a vertex array.
+ *
+ * Contains the size, type, format and normalization flag,
+ * along with the index of a vertex buffer binding point.
+ *
+ * Note that the Stride field corresponds to VERTEX_ATTRIB_ARRAY_STRIDE
+ * and is only present for backwards compatibility reasons.
+ * Rendering always uses VERTEX_BINDING_STRIDE.
+ * The gl*Pointer() functions will set VERTEX_ATTRIB_ARRAY_STRIDE
+ * and VERTEX_BINDING_STRIDE to the same value, while
+ * glBindVertexBuffer() will only set VERTEX_BINDING_STRIDE.
+ */
+struct gl_array_attributes
+{
+ /** Points to client array data. Not used when a VBO is bound */
+ const GLubyte *Ptr;
+ /** Offset of the first element relative to the binding offset */
+ GLuint RelativeOffset;
+ /** Vertex format */
+ struct gl_vertex_format Format;
+ /** Stride as specified with gl*Pointer() */
+ GLshort Stride;
+ /** Index into gl_vertex_array_object::BufferBinding[] array */
+ GLubyte BufferBindingIndex;
+
+ /**
+ * Derived effective buffer binding index
+ *
+ * Index into the gl_vertex_buffer_binding array of the vao.
+ * Similar to BufferBindingIndex, but with the mapping of the
+ * position/generic0 attributes applied and with identical
+ * gl_vertex_buffer_binding entries collapsed to a single
+ * entry within the vao.
+ *
+ * The value is valid past calling _mesa_update_vao_derived_arrays.
+ * Note that _mesa_update_vao_derived_arrays is called when binding
+ * the VAO to Array._DrawVAO.
+ */
+ GLubyte _EffBufferBindingIndex;
+ /**
+ * Derived effective relative offset.
+ *
+ * Relative offset to the effective buffers offset in
+ * gl_vertex_buffer_binding::_EffOffset.
+ *
+ * The value is valid past calling _mesa_update_vao_derived_arrays.
+ * Note that _mesa_update_vao_derived_arrays is called when binding
+ * the VAO to Array._DrawVAO.
+ */
+ GLushort _EffRelativeOffset;
+};
+
+
+/**
+ * This describes the buffer object used for a vertex array (or
+ * multiple vertex arrays). If BufferObj points to the default/null
+ * buffer object, then the vertex array lives in user memory and not a VBO.
+ */
+struct gl_vertex_buffer_binding
+{
+ GLintptr Offset; /**< User-specified offset */
+ GLsizei Stride; /**< User-specified stride */
+ GLuint InstanceDivisor; /**< GL_ARB_instanced_arrays */
+ struct gl_buffer_object *BufferObj; /**< GL_ARB_vertex_buffer_object */
+ GLbitfield _BoundArrays; /**< Arrays bound to this binding point */
+
+ /**
+ * Derived effective bound arrays.
+ *
+ * The effective binding handles enabled arrays past the
+ * position/generic0 attribute mapping and reduces the refered
+ * gl_vertex_buffer_binding entries to a unique subset.
+ *
+ * The value is valid past calling _mesa_update_vao_derived_arrays.
+ * Note that _mesa_update_vao_derived_arrays is called when binding
+ * the VAO to Array._DrawVAO.
+ */
+ GLbitfield _EffBoundArrays;
+ /**
+ * Derived offset.
+ *
+ * The absolute offset to that we can collapse some attributes
+ * to this unique effective binding.
+ * For user space array bindings this contains the smallest pointer value
+ * in the bound and interleaved arrays.
+ * For VBO bindings this contains an offset that lets the attributes
+ * _EffRelativeOffset stay positive and in bounds with
+ * Const.MaxVertexAttribRelativeOffset
+ *
+ * The value is valid past calling _mesa_update_vao_derived_arrays.
+ * Note that _mesa_update_vao_derived_arrays is called when binding
+ * the VAO to Array._DrawVAO.
+ */
+ GLintptr _EffOffset;
+};
+
+
+/**
+ * A representation of "Vertex Array Objects" (VAOs) from OpenGL 3.1+ /
+ * the GL_ARB_vertex_array_object extension.
+ */
+struct gl_vertex_array_object
+{
+ /** Name of the VAO as received from glGenVertexArray. */
+ GLuint Name;
+
+ GLint RefCount;
+
+ GLchar *Label; /**< GL_KHR_debug */
+
+ /**
+ * Has this array object been bound?
+ */
+ GLboolean EverBound;
+
+ /**
+ * Marked to true if the object is shared between contexts and immutable.
+ * Then reference counting is done using atomics and thread safe.
+ * Is used for dlist VAOs.
+ */
+ bool SharedAndImmutable;
+
+ /** Vertex attribute arrays */
+ struct gl_array_attributes VertexAttrib[VERT_ATTRIB_MAX];
+
+ /** Vertex buffer bindings */
+ struct gl_vertex_buffer_binding BufferBinding[VERT_ATTRIB_MAX];
+
+ /** Mask indicating which vertex arrays have vertex buffer associated. */
+ GLbitfield VertexAttribBufferMask;
+
+ /** Mask indicating which vertex arrays have a non-zero instance divisor. */
+ GLbitfield NonZeroDivisorMask;
+
+ /** Mask of VERT_BIT_* values indicating which arrays are enabled */
+ GLbitfield Enabled;
+
+ /**
+ * Mask of VERT_BIT_* enabled arrays past position/generic0 mapping
+ *
+ * The value is valid past calling _mesa_update_vao_derived_arrays.
+ * Note that _mesa_update_vao_derived_arrays is called when binding
+ * the VAO to Array._DrawVAO.
+ */
+ GLbitfield _EffEnabledVBO;
+
+ /** Same as _EffEnabledVBO, but for instance divisors. */
+ GLbitfield _EffEnabledNonZeroDivisor;
+
+ /** Denotes the way the position/generic0 attribute is mapped */
+ gl_attribute_map_mode _AttributeMapMode;
+
+ /** Mask of VERT_BIT_* values indicating changed/dirty arrays */
+ GLbitfield NewArrays;
+
+ /** The index buffer (also known as the element array buffer in OpenGL). */
+ struct gl_buffer_object *IndexBufferObj;
+};
+
+
+/**
+ * Vertex array state
+ */
+struct gl_array_attrib
+{
+ /** Currently bound array object. */
+ struct gl_vertex_array_object *VAO;
+
+ /** The default vertex array object */
+ struct gl_vertex_array_object *DefaultVAO;
+
+ /** The last VAO accessed by a DSA function */
+ struct gl_vertex_array_object *LastLookedUpVAO;
+
+ /** These contents are copied to newly created VAOs. */
+ struct gl_vertex_array_object DefaultVAOState;
+
+ /** Array objects (GL_ARB_vertex_array_object) */
+ struct _mesa_HashTable *Objects;
+
+ GLint ActiveTexture; /**< Client Active Texture */
+ GLuint LockFirst; /**< GL_EXT_compiled_vertex_array */
+ GLuint LockCount; /**< GL_EXT_compiled_vertex_array */
+
+ /**
+ * \name Primitive restart controls
+ *
+ * Primitive restart is enabled if either \c PrimitiveRestart or
+ * \c PrimitiveRestartFixedIndex is set.
+ */
+ /*@{*/
+ GLboolean PrimitiveRestart;
+ GLboolean PrimitiveRestartFixedIndex;
+ GLboolean _PrimitiveRestart;
+ GLuint RestartIndex;
+ GLuint _RestartIndex[4]; /**< Restart indices for index_size - 1. */
+ /*@}*/
+
+ /* GL_ARB_vertex_buffer_object */
+ struct gl_buffer_object *ArrayBufferObj;
+
+ /**
+ * Vertex array object that is used with the currently active draw command.
+ * The _DrawVAO is either set to the currently bound VAO for array type
+ * draws or to internal VAO's set up by the vbo module to execute immediate
+ * mode or display list draws.
+ */
+ struct gl_vertex_array_object *_DrawVAO;
+ /**
+ * The VERT_BIT_* bits effectively enabled from the current _DrawVAO.
+ * This is always a subset of _mesa_get_vao_vp_inputs(_DrawVAO)
+ * but may omit those arrays that shall not be referenced by the current
+ * gl_vertex_program_state::_VPMode. For example the generic attributes are
+ * maked out form the _DrawVAO's enabled arrays when a fixed function
+ * array draw is executed.
+ */
+ GLbitfield _DrawVAOEnabledAttribs;
+ /**
+ * Initially or if the VAO referenced by _DrawVAO is deleted the _DrawVAO
+ * pointer is set to the _EmptyVAO which is just an empty VAO all the time.
+ */
+ struct gl_vertex_array_object *_EmptyVAO;
+
+ /** Legal array datatypes and the API for which they have been computed */
+ GLbitfield LegalTypesMask;
+ gl_api LegalTypesMaskAPI;
+};
+
+
+/**
+ * Feedback buffer state
+ */
+struct gl_feedback
+{
+ GLenum16 Type;
+ GLbitfield _Mask; /**< FB_* bits */
+ GLfloat *Buffer;
+ GLuint BufferSize;
+ GLuint Count;
+};
+
+
+/**
+ * Selection buffer state
+ */
+struct gl_selection
+{
+ GLuint *Buffer; /**< selection buffer */
+ GLuint BufferSize; /**< size of the selection buffer */
+ GLuint BufferCount; /**< number of values in the selection buffer */
+ GLuint Hits; /**< number of records in the selection buffer */
+ GLuint NameStackDepth; /**< name stack depth */
+ GLuint NameStack[MAX_NAME_STACK_DEPTH]; /**< name stack */
+ GLboolean HitFlag; /**< hit flag */
+ GLfloat HitMinZ; /**< minimum hit depth */
+ GLfloat HitMaxZ; /**< maximum hit depth */
+};
+
+
+/**
+ * 1-D Evaluator control points
+ */
+struct gl_1d_map
+{
+ GLuint Order; /**< Number of control points */
+ GLfloat u1, u2, du; /**< u1, u2, 1.0/(u2-u1) */
+ GLfloat *Points; /**< Points to contiguous control points */
+};
+
+
+/**
+ * 2-D Evaluator control points
+ */
+struct gl_2d_map
+{
+ GLuint Uorder; /**< Number of control points in U dimension */
+ GLuint Vorder; /**< Number of control points in V dimension */
+ GLfloat u1, u2, du;
+ GLfloat v1, v2, dv;
+ GLfloat *Points; /**< Points to contiguous control points */
+};
+
+
+/**
+ * All evaluator control point state
+ */
+struct gl_evaluators
+{
+ /**
+ * \name 1-D maps
+ */
+ /*@{*/
+ struct gl_1d_map Map1Vertex3;
+ struct gl_1d_map Map1Vertex4;
+ struct gl_1d_map Map1Index;
+ struct gl_1d_map Map1Color4;
+ struct gl_1d_map Map1Normal;
+ struct gl_1d_map Map1Texture1;
+ struct gl_1d_map Map1Texture2;
+ struct gl_1d_map Map1Texture3;
+ struct gl_1d_map Map1Texture4;
+ /*@}*/
+
+ /**
+ * \name 2-D maps
+ */
+ /*@{*/
+ struct gl_2d_map Map2Vertex3;
+ struct gl_2d_map Map2Vertex4;
+ struct gl_2d_map Map2Index;
+ struct gl_2d_map Map2Color4;
+ struct gl_2d_map Map2Normal;
+ struct gl_2d_map Map2Texture1;
+ struct gl_2d_map Map2Texture2;
+ struct gl_2d_map Map2Texture3;
+ struct gl_2d_map Map2Texture4;
+ /*@}*/
+};
+
+
+struct gl_transform_feedback_varying_info
+{
+ char *Name;
+ GLenum16 Type;
+ GLint BufferIndex;
+ GLint Size;
+ GLint Offset;
+};
+
+
+/**
+ * Per-output info vertex shaders for transform feedback.
+ */
+struct gl_transform_feedback_output
+{
+ uint32_t OutputRegister;
+ uint32_t OutputBuffer;
+ uint32_t NumComponents;
+ uint32_t StreamId;
+
+ /** offset (in DWORDs) of this output within the interleaved structure */
+ uint32_t DstOffset;
+
+ /**
+ * Offset into the output register of the data to output. For example,
+ * if NumComponents is 2 and ComponentOffset is 1, then the data to
+ * offset is in the y and z components of the output register.
+ */
+ uint32_t ComponentOffset;
+};
+
+
+struct gl_transform_feedback_buffer
+{
+ uint32_t Binding;
+
+ uint32_t NumVaryings;
+
+ /**
+ * Total number of components stored in each buffer. This may be used by
+ * hardware back-ends to determine the correct stride when interleaving
+ * multiple transform feedback outputs in the same buffer.
+ */
+ uint32_t Stride;
+
+ /**
+ * Which transform feedback stream this buffer binding is associated with.
+ */
+ uint32_t Stream;
+};
+
+
+/** Post-link transform feedback info. */
+struct gl_transform_feedback_info
+{
+ unsigned NumOutputs;
+
+ /* Bitmask of active buffer indices. */
+ unsigned ActiveBuffers;
+
+ struct gl_transform_feedback_output *Outputs;
+
+ /** Transform feedback varyings used for the linking of this shader program.
+ *
+ * Use for glGetTransformFeedbackVarying().
+ */
+ struct gl_transform_feedback_varying_info *Varyings;
+ GLint NumVarying;
+
+ struct gl_transform_feedback_buffer Buffers[MAX_FEEDBACK_BUFFERS];
+};
+
+
+/**
+ * Transform feedback object state
+ */
+struct gl_transform_feedback_object
+{
+ GLuint Name; /**< AKA the object ID */
+ GLint RefCount;
+ GLchar *Label; /**< GL_KHR_debug */
+ GLboolean Active; /**< Is transform feedback enabled? */
+ GLboolean Paused; /**< Is transform feedback paused? */
+ GLboolean EndedAnytime; /**< Has EndTransformFeedback been called
+ at least once? */
+ GLboolean EverBound; /**< Has this object been bound? */
+
+ /**
+ * GLES: if Active is true, remaining number of primitives which can be
+ * rendered without overflow. This is necessary to track because GLES
+ * requires us to generate INVALID_OPERATION if a call to glDrawArrays or
+ * glDrawArraysInstanced would overflow transform feedback buffers.
+ * Undefined if Active is false.
+ *
+ * Not tracked for desktop GL since it's unnecessary.
+ */
+ unsigned GlesRemainingPrims;
+
+ /**
+ * The program active when BeginTransformFeedback() was called.
+ * When active and unpaused, this equals ctx->Shader.CurrentProgram[stage],
+ * where stage is the pipeline stage that is the source of data for
+ * transform feedback.
+ */
+ struct gl_program *program;
+
+ /** The feedback buffers */
+ GLuint BufferNames[MAX_FEEDBACK_BUFFERS];
+ struct gl_buffer_object *Buffers[MAX_FEEDBACK_BUFFERS];
+
+ /** Start of feedback data in dest buffer */
+ GLintptr Offset[MAX_FEEDBACK_BUFFERS];
+
+ /**
+ * Max data to put into dest buffer (in bytes). Computed based on
+ * RequestedSize and the actual size of the buffer.
+ */
+ GLsizeiptr Size[MAX_FEEDBACK_BUFFERS];
+
+ /**
+ * Size that was specified when the buffer was bound. If the buffer was
+ * bound with glBindBufferBase() or glBindBufferOffsetEXT(), this value is
+ * zero.
+ */
+ GLsizeiptr RequestedSize[MAX_FEEDBACK_BUFFERS];
+};
+
+
+/**
+ * Context state for transform feedback.
+ */
+struct gl_transform_feedback_state
+{
+ GLenum16 Mode; /**< GL_POINTS, GL_LINES or GL_TRIANGLES */
+
+ /** The general binding point (GL_TRANSFORM_FEEDBACK_BUFFER) */
+ struct gl_buffer_object *CurrentBuffer;
+
+ /** The table of all transform feedback objects */
+ struct _mesa_HashTable *Objects;
+
+ /** The current xform-fb object (GL_TRANSFORM_FEEDBACK_BINDING) */
+ struct gl_transform_feedback_object *CurrentObject;
+
+ /** The default xform-fb object (Name==0) */
+ struct gl_transform_feedback_object *DefaultObject;
+};
+
+
+/**
+ * A "performance monitor" as described in AMD_performance_monitor.
+ */
+struct gl_perf_monitor_object
+{
+ GLuint Name;
+
+ /** True if the monitor is currently active (Begin called but not End). */
+ GLboolean Active;
+
+ /**
+ * True if the monitor has ended.
+ *
+ * This is distinct from !Active because it may never have began.
+ */
+ GLboolean Ended;
+
+ /**
+ * A list of groups with currently active counters.
+ *
+ * ActiveGroups[g] == n if there are n counters active from group 'g'.
+ */
+ unsigned *ActiveGroups;
+
+ /**
+ * An array of bitsets, subscripted by group ID, then indexed by counter ID.
+ *
+ * Checking whether counter 'c' in group 'g' is active can be done via:
+ *
+ * BITSET_TEST(ActiveCounters[g], c)
+ */
+ GLuint **ActiveCounters;
+};
+
+
+union gl_perf_monitor_counter_value
+{
+ float f;
+ uint64_t u64;
+ uint32_t u32;
+};
+
+
+struct gl_perf_monitor_counter
+{
+ /** Human readable name for the counter. */
+ const char *Name;
+
+ /**
+ * Data type of the counter. Valid values are FLOAT, UNSIGNED_INT,
+ * UNSIGNED_INT64_AMD, and PERCENTAGE_AMD.
+ */
+ GLenum16 Type;
+
+ /** Minimum counter value. */
+ union gl_perf_monitor_counter_value Minimum;
+
+ /** Maximum counter value. */
+ union gl_perf_monitor_counter_value Maximum;
+};
+
+
+struct gl_perf_monitor_group
+{
+ /** Human readable name for the group. */
+ const char *Name;
+
+ /**
+ * Maximum number of counters in this group which can be active at the
+ * same time.
+ */
+ GLuint MaxActiveCounters;
+
+ /** Array of counters within this group. */
+ const struct gl_perf_monitor_counter *Counters;
+ GLuint NumCounters;
+};
+
+
+/**
+ * A query object instance as described in INTEL_performance_query.
+ *
+ * NB: We want to keep this and the corresponding backend structure
+ * relatively lean considering that applications may expect to
+ * allocate enough objects to be able to query around all draw calls
+ * in a frame.
+ */
+struct gl_perf_query_object
+{
+ GLuint Id; /**< hash table ID/name */
+ unsigned Used:1; /**< has been used for 1 or more queries */
+ unsigned Active:1; /**< inside Begin/EndPerfQuery */
+ unsigned Ready:1; /**< result is ready? */
+};
+
+
+/**
+ * Context state for AMD_performance_monitor.
+ */
+struct gl_perf_monitor_state
+{
+ /** Array of performance monitor groups (indexed by group ID) */
+ const struct gl_perf_monitor_group *Groups;
+ GLuint NumGroups;
+
+ /** The table of all performance monitors. */
+ struct _mesa_HashTable *Monitors;
+};
+
+
+/**
+ * Context state for INTEL_performance_query.
+ */
+struct gl_perf_query_state
+{
+ struct _mesa_HashTable *Objects; /**< The table of all performance query objects */
+};
+
+
+/**
+ * A bindless sampler object.
+ */
+struct gl_bindless_sampler
+{
+ /** Texture unit (set by glUniform1()). */
+ GLubyte unit;
+
+ /** Whether this bindless sampler is bound to a unit. */
+ GLboolean bound;
+
+ /** Texture Target (TEXTURE_1D/2D/3D/etc_INDEX). */
+ gl_texture_index target;
+
+ /** Pointer to the base of the data. */
+ GLvoid *data;
+};
+
+
+/**
+ * A bindless image object.
+ */
+struct gl_bindless_image
+{
+ /** Image unit (set by glUniform1()). */
+ GLubyte unit;
+
+ /** Whether this bindless image is bound to a unit. */
+ GLboolean bound;
+
+ /** Access qualifier (GL_READ_WRITE, GL_READ_ONLY, GL_WRITE_ONLY, or
+ * GL_NONE to indicate both read-only and write-only)
+ */
+ GLenum16 access;
+
+ /** Pointer to the base of the data. */
+ GLvoid *data;
+};
+
+
+/**
+ * Current vertex processing mode: fixed function vs. shader.
+ * In reality, fixed function is probably implemented by a shader but that's
+ * not what we care about here.
+ */
+typedef enum
+{
+ VP_MODE_FF, /**< legacy / fixed function */
+ VP_MODE_SHADER, /**< ARB vertex program or GLSL vertex shader */
+ VP_MODE_MAX /**< for sizing arrays */
+} gl_vertex_processing_mode;
+
+
+/**
+ * Base class for any kind of program object
+ */
+struct gl_program
+{
+ /** FIXME: This must be first until we split shader_info from nir_shader */
+ struct shader_info info;
+
+ GLuint Id;
+ GLint RefCount;
+ GLubyte *String; /**< Null-terminated program text */
+
+ /** GL_VERTEX/FRAGMENT_PROGRAM_ARB, GL_GEOMETRY_PROGRAM_NV */
+ GLenum16 Target;
+ GLenum16 Format; /**< String encoding format */
+
+ GLboolean _Used; /**< Ever used for drawing? Used for debugging */
+
+ struct nir_shader *nir;
+
+ /* Saved and restored with metadata. Freed with ralloc. */
+ void *driver_cache_blob;
+ size_t driver_cache_blob_size;
+
+ bool is_arb_asm; /** Is this an ARB assembly-style program */
+
+ /** Is this program written to on disk shader cache */
+ bool program_written_to_cache;
+
+ /** A bitfield indicating which vertex shader inputs consume two slots
+ *
+ * This is used for mapping from single-slot input locations in the GL API
+ * to dual-slot double input locations in the shader. This field is set
+ * once as part of linking and never updated again to ensure the mapping
+ * remains consistent.
+ *
+ * Note: There may be dual-slot variables in the original shader source
+ * which do not appear in this bitfield due to having been eliminated by
+ * the compiler prior to DualSlotInputs being calculated. There may also
+ * be bits set in this bitfield which are set but which the shader never
+ * reads due to compiler optimizations eliminating such variables after
+ * DualSlotInputs is calculated.
+ */
+ GLbitfield64 DualSlotInputs;
+ /** Subset of OutputsWritten outputs written with non-zero index. */
+ GLbitfield64 SecondaryOutputsWritten;
+ /** TEXTURE_x_BIT bitmask */
+ GLbitfield16 TexturesUsed[MAX_COMBINED_TEXTURE_IMAGE_UNITS];
+ /** Bitfield of which samplers are used */
+ GLbitfield SamplersUsed;
+ /** Texture units used for shadow sampling. */
+ GLbitfield ShadowSamplers;
+ /** Texture units used for samplerExternalOES */
+ GLbitfield ExternalSamplersUsed;
+
+ /** Named parameters, constants, etc. from program text */
+ struct gl_program_parameter_list *Parameters;
+
+ /** Map from sampler unit to texture unit (set by glUniform1i()) */
+ GLubyte SamplerUnits[MAX_SAMPLERS];
+
+ /* FIXME: We should be able to make this struct a union. However some
+ * drivers (i915/fragment_programs, swrast/prog_execute) mix the use of
+ * these fields, we should fix this.
+ */
+ struct {
+ /** Fields used by GLSL programs */
+ struct {
+ /** Data shared by gl_program and gl_shader_program */
+ struct gl_shader_program_data *data;
+
+ struct gl_active_atomic_buffer **AtomicBuffers;
+
+ /** Post-link transform feedback info. */
+ struct gl_transform_feedback_info *LinkedTransformFeedback;
+
+ /**
+ * Number of types for subroutine uniforms.
+ */
+ GLuint NumSubroutineUniformTypes;
+
+ /**
+ * Subroutine uniform remap table
+ * based on the program level uniform remap table.
+ */
+ GLuint NumSubroutineUniforms; /* non-sparse total */
+ GLuint NumSubroutineUniformRemapTable;
+ struct gl_uniform_storage **SubroutineUniformRemapTable;
+
+ /**
+ * Num of subroutine functions for this stage and storage for them.
+ */
+ GLuint NumSubroutineFunctions;
+ GLuint MaxSubroutineFunctionIndex;
+ struct gl_subroutine_function *SubroutineFunctions;
+
+ /**
+ * Map from image uniform index to image unit (set by glUniform1i())
+ *
+ * An image uniform index is associated with each image uniform by
+ * the linker. The image index associated with each uniform is
+ * stored in the \c gl_uniform_storage::image field.
+ */
+ GLubyte ImageUnits[MAX_IMAGE_UNIFORMS];
+
+ /**
+ * Access qualifier specified in the shader for each image uniform
+ * index. Either \c GL_READ_ONLY, \c GL_WRITE_ONLY, \c
+ * GL_READ_WRITE, or \c GL_NONE to indicate both read-only and
+ * write-only.
+ *
+ * It may be different, though only more strict than the value of
+ * \c gl_image_unit::Access for the corresponding image unit.
+ */
+ GLenum16 ImageAccess[MAX_IMAGE_UNIFORMS];
+
+ struct gl_uniform_block **UniformBlocks;
+ struct gl_uniform_block **ShaderStorageBlocks;
+
+ /**
+ * Bitmask of shader storage blocks not declared as read-only.
+ */
+ unsigned ShaderStorageBlocksWriteAccess;
+
+ /** Which texture target is being sampled
+ * (TEXTURE_1D/2D/3D/etc_INDEX)
+ */
+ GLubyte SamplerTargets[MAX_SAMPLERS];
+
+ /**
+ * Number of samplers declared with the bindless_sampler layout
+ * qualifier as specified by ARB_bindless_texture.
+ */
+ GLuint NumBindlessSamplers;
+ GLboolean HasBoundBindlessSampler;
+ struct gl_bindless_sampler *BindlessSamplers;
+
+ /**
+ * Number of images declared with the bindless_image layout qualifier
+ * as specified by ARB_bindless_texture.
+ */
+ GLuint NumBindlessImages;
+ GLboolean HasBoundBindlessImage;
+ struct gl_bindless_image *BindlessImages;
+
+ union {
+ struct {
+ /**
+ * A bitmask of gl_advanced_blend_mode values
+ */
+ GLbitfield BlendSupport;
+ } fs;
+ };
+ } sh;
+
+ /** ARB assembly-style program fields */
+ struct {
+ struct prog_instruction *Instructions;
+
+ /**
+ * Local parameters used by the program.
+ *
+ * It's dynamically allocated because it is rarely used (just
+ * assembly-style programs), and MAX_PROGRAM_LOCAL_PARAMS entries
+ * once it's allocated.
+ */
+ GLfloat (*LocalParams)[4];
+
+ /** Bitmask of which register files are read/written with indirect
+ * addressing. Mask of (1 << PROGRAM_x) bits.
+ */
+ GLbitfield IndirectRegisterFiles;
+
+ /** Logical counts */
+ /*@{*/
+ GLuint NumInstructions;
+ GLuint NumTemporaries;
+ GLuint NumParameters;
+ GLuint NumAttributes;
+ GLuint NumAddressRegs;
+ GLuint NumAluInstructions;
+ GLuint NumTexInstructions;
+ GLuint NumTexIndirections;
+ /*@}*/
+ /** Native, actual h/w counts */
+ /*@{*/
+ GLuint NumNativeInstructions;
+ GLuint NumNativeTemporaries;
+ GLuint NumNativeParameters;
+ GLuint NumNativeAttributes;
+ GLuint NumNativeAddressRegs;
+ GLuint NumNativeAluInstructions;
+ GLuint NumNativeTexInstructions;
+ GLuint NumNativeTexIndirections;
+ /*@}*/
+
+ /** Used by ARB assembly-style programs. Can only be true for vertex
+ * programs.
+ */
+ GLboolean IsPositionInvariant;
+ } arb;
+ };
+};
+
+
+/**
+ * State common to vertex and fragment programs.
+ */
+struct gl_program_state
+{
+ GLint ErrorPos; /* GL_PROGRAM_ERROR_POSITION_ARB/NV */
+ const char *ErrorString; /* GL_PROGRAM_ERROR_STRING_ARB/NV */
+};
+
+
+/**
+ * Context state for vertex programs.
+ */
+struct gl_vertex_program_state
+{
+ GLboolean Enabled; /**< User-set GL_VERTEX_PROGRAM_ARB/NV flag */
+ GLboolean PointSizeEnabled; /**< GL_VERTEX_PROGRAM_POINT_SIZE_ARB/NV */
+ GLboolean TwoSideEnabled; /**< GL_VERTEX_PROGRAM_TWO_SIDE_ARB/NV */
+ /** Should fixed-function T&L be implemented with a vertex prog? */
+ GLboolean _MaintainTnlProgram;
+
+ struct gl_program *Current; /**< User-bound vertex program */
+
+ /** Currently enabled and valid vertex program (including internal
+ * programs, user-defined vertex programs and GLSL vertex shaders).
+ * This is the program we must use when rendering.
+ */
+ struct gl_program *_Current;
+
+ GLfloat Parameters[MAX_PROGRAM_ENV_PARAMS][4]; /**< Env params */
+
+ /** Program to emulate fixed-function T&L (see above) */
+ struct gl_program *_TnlProgram;
+
+ /** Cache of fixed-function programs */
+ struct gl_program_cache *Cache;
+
+ GLboolean _Overriden;
+
+ /**
+ * If we have a vertex program, a TNL program or no program at all.
+ * Note that this value should be kept up to date all the time,
+ * nevertheless its correctness is asserted in _mesa_update_state.
+ * The reason is to avoid calling _mesa_update_state twice we need
+ * this value on draw *before* actually calling _mesa_update_state.
+ * Also it should need to get recomputed only on changes to the
+ * vertex program which are heavyweight already.
+ */
+ gl_vertex_processing_mode _VPMode;
+};
+
+/**
+ * Context state for tessellation control programs.
+ */
+struct gl_tess_ctrl_program_state
+{
+ /** Currently bound and valid shader. */
+ struct gl_program *_Current;
+
+ GLint patch_vertices;
+ GLfloat patch_default_outer_level[4];
+ GLfloat patch_default_inner_level[2];
+};
+
+/**
+ * Context state for tessellation evaluation programs.
+ */
+struct gl_tess_eval_program_state
+{
+ /** Currently bound and valid shader. */
+ struct gl_program *_Current;
+};
+
+/**
+ * Context state for geometry programs.
+ */
+struct gl_geometry_program_state
+{
+ /**
+ * Currently enabled and valid program (including internal programs
+ * and compiled shader programs).
+ */
+ struct gl_program *_Current;
+};
+
+/**
+ * Context state for fragment programs.
+ */
+struct gl_fragment_program_state
+{
+ GLboolean Enabled; /**< User-set fragment program enable flag */
+ /** Should fixed-function texturing be implemented with a fragment prog? */
+ GLboolean _MaintainTexEnvProgram;
+
+ struct gl_program *Current; /**< User-bound fragment program */
+
+ /**
+ * Currently enabled and valid fragment program (including internal
+ * programs, user-defined fragment programs and GLSL fragment shaders).
+ * This is the program we must use when rendering.
+ */
+ struct gl_program *_Current;
+
+ GLfloat Parameters[MAX_PROGRAM_ENV_PARAMS][4]; /**< Env params */
+
+ /** Program to emulate fixed-function texture env/combine (see above) */
+ struct gl_program *_TexEnvProgram;
+
+ /** Cache of fixed-function programs */
+ struct gl_program_cache *Cache;
+};
+
+
+/**
+ * Context state for compute programs.
+ */
+struct gl_compute_program_state
+{
+ /** Currently enabled and valid program (including internal programs
+ * and compiled shader programs).
+ */
+ struct gl_program *_Current;
+};
+
+
+/**
+ * ATI_fragment_shader runtime state
+ */
+
+struct atifs_instruction;
+struct atifs_setupinst;
+
+/**
+ * ATI fragment shader
+ */
+struct ati_fragment_shader
+{
+ GLuint Id;
+ GLint RefCount;
+ struct atifs_instruction *Instructions[2];
+ struct atifs_setupinst *SetupInst[2];
+ GLfloat Constants[8][4];
+ GLbitfield LocalConstDef; /**< Indicates which constants have been set */
+ GLubyte numArithInstr[2];
+ GLubyte regsAssigned[2];
+ GLubyte NumPasses; /**< 1 or 2 */
+ /**
+ * Current compile stage: 0 setup pass1, 1 arith pass1,
+ * 2 setup pass2, 3 arith pass2.
+ */
+ GLubyte cur_pass;
+ GLubyte last_optype;
+ GLboolean interpinp1;
+ GLboolean isValid;
+ /**
+ * Array of 2 bit values for each tex unit to remember whether
+ * STR or STQ swizzle was used
+ */
+ GLuint swizzlerq;
+ struct gl_program *Program;
+};
+
+/**
+ * Context state for GL_ATI_fragment_shader
+ */
+struct gl_ati_fragment_shader_state
+{
+ GLboolean Enabled;
+ GLboolean Compiling;
+ GLfloat GlobalConstants[8][4];
+ struct ati_fragment_shader *Current;
+};
+
+/**
+ * Shader subroutine function definition
+ */
+struct gl_subroutine_function
+{
+ char *name;
+ int index;
+ int num_compat_types;
+ const struct glsl_type **types;
+};
+
+/**
+ * Shader information needed by both gl_shader and gl_linked shader.
+ */
+struct gl_shader_info
+{
+ /**
+ * Tessellation Control shader state from layout qualifiers.
+ */
+ struct {
+ /**
+ * 0 - vertices not declared in shader, or
+ * 1 .. GL_MAX_PATCH_VERTICES
+ */
+ GLint VerticesOut;
+ } TessCtrl;
+
+ /**
+ * Tessellation Evaluation shader state from layout qualifiers.
+ */
+ struct {
+ /**
+ * GL_TRIANGLES, GL_QUADS, GL_ISOLINES or PRIM_UNKNOWN if it's not set
+ * in this shader.
+ */
+ GLenum16 PrimitiveMode;
+
+ enum gl_tess_spacing Spacing;
+
+ /**
+ * GL_CW, GL_CCW, or 0 if it's not set in this shader.
+ */
+ GLenum16 VertexOrder;
+ /**
+ * 1, 0, or -1 if it's not set in this shader.
+ */
+ int PointMode;
+ } TessEval;
+
+ /**
+ * Geometry shader state from GLSL 1.50 layout qualifiers.
+ */
+ struct {
+ GLint VerticesOut;
+ /**
+ * 0 - Invocations count not declared in shader, or
+ * 1 .. Const.MaxGeometryShaderInvocations
+ */
+ GLint Invocations;
+ /**
+ * GL_POINTS, GL_LINES, GL_LINES_ADJACENCY, GL_TRIANGLES, or
+ * GL_TRIANGLES_ADJACENCY, or PRIM_UNKNOWN if it's not set in this
+ * shader.
+ */
+ GLenum16 InputType;
+ /**
+ * GL_POINTS, GL_LINE_STRIP or GL_TRIANGLE_STRIP, or PRIM_UNKNOWN if
+ * it's not set in this shader.
+ */
+ GLenum16 OutputType;
+ } Geom;
+
+ /**
+ * Compute shader state from ARB_compute_shader and
+ * ARB_compute_variable_group_size layout qualifiers.
+ */
+ struct {
+ /**
+ * Size specified using local_size_{x,y,z}, or all 0's to indicate that
+ * it's not set in this shader.
+ */
+ unsigned LocalSize[3];
+
+ /**
+ * Whether a variable work group size has been specified as defined by
+ * ARB_compute_variable_group_size.
+ */
+ bool LocalSizeVariable;
+
+ /*
+ * Arrangement of invocations used to calculate derivatives in a compute
+ * shader. From NV_compute_shader_derivatives.
+ */
+ enum gl_derivative_group DerivativeGroup;
+ } Comp;
+};
+
+/**
+ * A linked GLSL shader object.
+ */
+struct gl_linked_shader
+{
+ gl_shader_stage Stage;
+
+#ifdef DEBUG
+ unsigned SourceChecksum;
+#endif
+
+ struct gl_program *Program; /**< Post-compile assembly code */
+
+ /**
+ * \name Sampler tracking
+ *
+ * \note Each of these fields is only set post-linking.
+ */
+ /*@{*/
+ GLbitfield shadow_samplers; /**< Samplers used for shadow sampling. */
+ /*@}*/
+
+ /**
+ * Number of default uniform block components used by this shader.
+ *
+ * This field is only set post-linking.
+ */
+ unsigned num_uniform_components;
+
+ /**
+ * Number of combined uniform components used by this shader.
+ *
+ * This field is only set post-linking. It is the sum of the uniform block
+ * sizes divided by sizeof(float), and num_uniform_compoennts.
+ */
+ unsigned num_combined_uniform_components;
+
+ struct exec_list *ir;
+ struct exec_list *packed_varyings;
+ struct exec_list *fragdata_arrays;
+ struct glsl_symbol_table *symbols;
+
+ /**
+ * ARB_gl_spirv related data.
+ *
+ * This is actually a reference to the gl_shader::spirv_data, which
+ * stores information that is also needed during linking.
+ */
+ struct gl_shader_spirv_data *spirv_data;
+};
+
+
+/**
+ * Compile status enum. COMPILE_SKIPPED is used to indicate the compile
+ * was skipped due to the shader matching one that's been seen before by
+ * the on-disk cache.
+ */
+enum gl_compile_status
+{
+ COMPILE_FAILURE = 0,
+ COMPILE_SUCCESS,
+ COMPILE_SKIPPED
+};
+
+/**
+ * A GLSL shader object.
+ */
+struct gl_shader
+{
+ /** GL_FRAGMENT_SHADER || GL_VERTEX_SHADER || GL_GEOMETRY_SHADER_ARB ||
+ * GL_TESS_CONTROL_SHADER || GL_TESS_EVALUATION_SHADER.
+ * Must be the first field.
+ */
+ GLenum16 Type;
+ gl_shader_stage Stage;
+ GLuint Name; /**< AKA the handle */
+ GLint RefCount; /**< Reference count */
+ GLchar *Label; /**< GL_KHR_debug */
+ unsigned char sha1[20]; /**< SHA1 hash of pre-processed source */
+ GLboolean DeletePending;
+ bool IsES; /**< True if this shader uses GLSL ES */
+
+ enum gl_compile_status CompileStatus;
+
+#ifdef DEBUG
+ unsigned SourceChecksum; /**< for debug/logging purposes */
+#endif
+ const GLchar *Source; /**< Source code string */
+
+ const GLchar *FallbackSource; /**< Fallback string used by on-disk cache*/
+
+ GLchar *InfoLog;
+
+ unsigned Version; /**< GLSL version used for linking */
+
+ /**
+ * A bitmask of gl_advanced_blend_mode values
+ */
+ GLbitfield BlendSupport;
+
+ struct exec_list *ir;
+ struct glsl_symbol_table *symbols;
+
+ /**
+ * Whether early fragment tests are enabled as defined by
+ * ARB_shader_image_load_store.
+ */
+ bool EarlyFragmentTests;
+
+ bool ARB_fragment_coord_conventions_enable;
+
+ bool redeclares_gl_fragcoord;
+ bool uses_gl_fragcoord;
+
+ bool PostDepthCoverage;
+ bool PixelInterlockOrdered;
+ bool PixelInterlockUnordered;
+ bool SampleInterlockOrdered;
+ bool SampleInterlockUnordered;
+ bool InnerCoverage;
+
+ /**
+ * Fragment shader state from GLSL 1.50 layout qualifiers.
+ */
+ bool origin_upper_left;
+ bool pixel_center_integer;
+
+ /**
+ * Whether bindless_sampler/bindless_image, and respectively
+ * bound_sampler/bound_image are declared at global scope as defined by
+ * ARB_bindless_texture.
+ */
+ bool bindless_sampler;
+ bool bindless_image;
+ bool bound_sampler;
+ bool bound_image;
+
+ /**
+ * Whether layer output is viewport-relative.
+ */
+ bool redeclares_gl_layer;
+ bool layer_viewport_relative;
+
+ /** Global xfb_stride out qualifier if any */
+ GLuint TransformFeedbackBufferStride[MAX_FEEDBACK_BUFFERS];
+
+ struct gl_shader_info info;
+
+ /* ARB_gl_spirv related data */
+ struct gl_shader_spirv_data *spirv_data;
+};
+
+
+struct gl_uniform_buffer_variable
+{
+ char *Name;
+
+ /**
+ * Name of the uniform as seen by glGetUniformIndices.
+ *
+ * glGetUniformIndices requires that the block instance index \b not be
+ * present in the name of queried uniforms.
+ *
+ * \note
+ * \c gl_uniform_buffer_variable::IndexName and
+ * \c gl_uniform_buffer_variable::Name may point to identical storage.
+ */
+ char *IndexName;
+
+ const struct glsl_type *Type;
+ unsigned int Offset;
+ GLboolean RowMajor;
+};
+
+
+struct gl_uniform_block
+{
+ /** Declared name of the uniform block */
+ char *Name;
+
+ /** Array of supplemental information about UBO ir_variables. */
+ struct gl_uniform_buffer_variable *Uniforms;
+ GLuint NumUniforms;
+
+ /**
+ * Index (GL_UNIFORM_BLOCK_BINDING) into ctx->UniformBufferBindings[] to use
+ * with glBindBufferBase to bind a buffer object to this uniform block.
+ */
+ GLuint Binding;
+
+ /**
+ * Minimum size (in bytes) of a buffer object to back this uniform buffer
+ * (GL_UNIFORM_BLOCK_DATA_SIZE).
+ */
+ GLuint UniformBufferSize;
+
+ /** Stages that reference this block */
+ uint8_t stageref;
+
+ /**
+ * Linearized array index for uniform block instance arrays
+ *
+ * Given a uniform block instance array declared with size
+ * blk[s_0][s_1]..[s_m], the block referenced by blk[i_0][i_1]..[i_m] will
+ * have the linearized array index
+ *
+ * m-1 m
+ * i_m + ∑ i_j * ∏ s_k
+ * j=0 k=j+1
+ *
+ * For a uniform block instance that is not an array, this is always 0.
+ */
+ uint8_t linearized_array_index;
+
+ /**
+ * Layout specified in the shader
+ *
+ * This isn't accessible through the API, but it is used while
+ * cross-validating uniform blocks.
+ */
+ enum glsl_interface_packing _Packing;
+ GLboolean _RowMajor;
+};
+
+/**
+ * Structure that represents a reference to an atomic buffer from some
+ * shader program.
+ */
+struct gl_active_atomic_buffer
+{
+ /** Uniform indices of the atomic counters declared within it. */
+ GLuint *Uniforms;
+ GLuint NumUniforms;
+
+ /** Binding point index associated with it. */
+ GLuint Binding;
+
+ /** Minimum reasonable size it is expected to have. */
+ GLuint MinimumSize;
+
+ /** Shader stages making use of it. */
+ GLboolean StageReferences[MESA_SHADER_STAGES];
+};
+
+/**
+ * Data container for shader queries. This holds only the minimal
+ * amount of required information for resource queries to work.
+ */
+struct gl_shader_variable
+{
+ /**
+ * Declared type of the variable
+ */
+ const struct glsl_type *type;
+
+ /**
+ * If the variable is in an interface block, this is the type of the block.
+ */
+ const struct glsl_type *interface_type;
+
+ /**
+ * For variables inside structs (possibly recursively), this is the
+ * outermost struct type.
+ */
+ const struct glsl_type *outermost_struct_type;
+
+ /**
+ * Declared name of the variable
+ */
+ char *name;
+
+ /**
+ * Storage location of the base of this variable
+ *
+ * The precise meaning of this field depends on the nature of the variable.
+ *
+ * - Vertex shader input: one of the values from \c gl_vert_attrib.
+ * - Vertex shader output: one of the values from \c gl_varying_slot.
+ * - Geometry shader input: one of the values from \c gl_varying_slot.
+ * - Geometry shader output: one of the values from \c gl_varying_slot.
+ * - Fragment shader input: one of the values from \c gl_varying_slot.
+ * - Fragment shader output: one of the values from \c gl_frag_result.
+ * - Uniforms: Per-stage uniform slot number for default uniform block.
+ * - Uniforms: Index within the uniform block definition for UBO members.
+ * - Non-UBO Uniforms: explicit location until linking then reused to
+ * store uniform slot number.
+ * - Other: This field is not currently used.
+ *
+ * If the variable is a uniform, shader input, or shader output, and the
+ * slot has not been assigned, the value will be -1.
+ */
+ int location;
+
+ /**
+ * Specifies the first component the variable is stored in as per
+ * ARB_enhanced_layouts.
+ */
+ unsigned component:2;
+
+ /**
+ * Output index for dual source blending.
+ *
+ * \note
+ * The GLSL spec only allows the values 0 or 1 for the index in \b dual
+ * source blending.
+ */
+ unsigned index:1;
+
+ /**
+ * Specifies whether a shader input/output is per-patch in tessellation
+ * shader stages.
+ */
+ unsigned patch:1;
+
+ /**
+ * Storage class of the variable.
+ *
+ * \sa (n)ir_variable_mode
+ */
+ unsigned mode:4;
+
+ /**
+ * Interpolation mode for shader inputs / outputs
+ *
+ * \sa glsl_interp_mode
+ */
+ unsigned interpolation:2;
+
+ /**
+ * Was the location explicitly set in the shader?
+ *
+ * If the location is explicitly set in the shader, it \b cannot be changed
+ * by the linker or by the API (e.g., calls to \c glBindAttribLocation have
+ * no effect).
+ */
+ unsigned explicit_location:1;
+
+ /**
+ * Precision qualifier.
+ */
+ unsigned precision:2;
+};
+
+/**
+ * Active resource in a gl_shader_program
+ */
+struct gl_program_resource
+{
+ GLenum16 Type; /** Program interface type. */
+ const void *Data; /** Pointer to resource associated data structure. */
+ uint8_t StageReferences; /** Bitmask of shader stage references. */
+};
+
+/**
+ * Link status enum. LINKING_SKIPPED is used to indicate linking
+ * was skipped due to the shader being loaded from the on-disk cache.
+ */
+enum gl_link_status
+{
+ LINKING_FAILURE = 0,
+ LINKING_SUCCESS,
+ LINKING_SKIPPED
+};
+
+/**
+ * A data structure to be shared by gl_shader_program and gl_program.
+ */
+struct gl_shader_program_data
+{
+ GLint RefCount; /**< Reference count */
+
+ /** SHA1 hash of linked shader program */
+ unsigned char sha1[20];
+
+ unsigned NumUniformStorage;
+ unsigned NumHiddenUniforms;
+ struct gl_uniform_storage *UniformStorage;
+
+ unsigned NumUniformBlocks;
+ unsigned NumShaderStorageBlocks;
+
+ struct gl_uniform_block *UniformBlocks;
+ struct gl_uniform_block *ShaderStorageBlocks;
+
+ struct gl_active_atomic_buffer *AtomicBuffers;
+ unsigned NumAtomicBuffers;
+
+ /* Shader cache variables used during restore */
+ unsigned NumUniformDataSlots;
+ union gl_constant_value *UniformDataSlots;
+
+ /* Used to hold initial uniform values for program binary restores.
+ *
+ * From the ARB_get_program_binary spec:
+ *
+ * "A successful call to ProgramBinary will reset all uniform
+ * variables to their initial values. The initial value is either
+ * the value of the variable's initializer as specified in the
+ * original shader source, or 0 if no initializer was present.
+ */
+ union gl_constant_value *UniformDataDefaults;
+
+ /** Hash for quick search by name. */
+ struct hash_table_u64 *ProgramResourceHash;
+
+ GLboolean Validated;
+
+ /** List of all active resources after linking. */
+ struct gl_program_resource *ProgramResourceList;
+ unsigned NumProgramResourceList;
+
+ enum gl_link_status LinkStatus; /**< GL_LINK_STATUS */
+ GLchar *InfoLog;
+
+ unsigned Version; /**< GLSL version used for linking */
+
+ /* Mask of stages this program was linked against */
+ unsigned linked_stages;
+
+ /* Whether the shaders of this program are loaded from SPIR-V binaries
+ * (all have the SPIR_V_BINARY_ARB state). This was introduced by the
+ * ARB_gl_spirv extension.
+ */
+ bool spirv;
+};
+
+/**
+ * A GLSL program object.
+ * Basically a linked collection of vertex and fragment shaders.
+ */
+struct gl_shader_program
+{
+ GLenum16 Type; /**< Always GL_SHADER_PROGRAM (internal token) */
+ GLuint Name; /**< aka handle or ID */
+ GLchar *Label; /**< GL_KHR_debug */
+ GLint RefCount; /**< Reference count */
+ GLboolean DeletePending;
+
+ /**
+ * Is the application intending to glGetProgramBinary this program?
+ *
+ * BinaryRetrievableHint is the currently active hint that gets set
+ * during initialization and after linking and BinaryRetrievableHintPending
+ * is the hint set by the user to be active when program is linked next time.
+ */
+ GLboolean BinaryRetrievableHint;
+ GLboolean BinaryRetrievableHintPending;
+
+ /**
+ * Indicates whether program can be bound for individual pipeline stages
+ * using UseProgramStages after it is next linked.
+ */
+ GLboolean SeparateShader;
+
+ GLuint NumShaders; /**< number of attached shaders */
+ struct gl_shader **Shaders; /**< List of attached the shaders */
+
+ /**
+ * User-defined attribute bindings
+ *
+ * These are set via \c glBindAttribLocation and are used to direct the
+ * GLSL linker. These are \b not the values used in the compiled shader,
+ * and they are \b not the values returned by \c glGetAttribLocation.
+ */
+ struct string_to_uint_map *AttributeBindings;
+
+ /**
+ * User-defined fragment data bindings
+ *
+ * These are set via \c glBindFragDataLocation and are used to direct the
+ * GLSL linker. These are \b not the values used in the compiled shader,
+ * and they are \b not the values returned by \c glGetFragDataLocation.
+ */
+ struct string_to_uint_map *FragDataBindings;
+ struct string_to_uint_map *FragDataIndexBindings;
+
+ /**
+ * Transform feedback varyings last specified by
+ * glTransformFeedbackVaryings().
+ *
+ * For the current set of transform feedback varyings used for transform
+ * feedback output, see LinkedTransformFeedback.
+ */
+ struct {
+ GLenum16 BufferMode;
+ /** Global xfb_stride out qualifier if any */
+ GLuint BufferStride[MAX_FEEDBACK_BUFFERS];
+ GLuint NumVarying;
+ GLchar **VaryingNames; /**< Array [NumVarying] of char * */
+ } TransformFeedback;
+
+ struct gl_program *last_vert_prog;
+
+ /** Post-link gl_FragDepth layout for ARB_conservative_depth. */
+ enum gl_frag_depth_layout FragDepthLayout;
+
+ /**
+ * Geometry shader state - copied into gl_program by
+ * _mesa_copy_linked_program_data().
+ */
+ struct {
+ GLint VerticesIn;
+
+ bool UsesEndPrimitive;
+ bool UsesStreams;
+ } Geom;
+
+ /**
+ * Compute shader state - copied into gl_program by
+ * _mesa_copy_linked_program_data().
+ */
+ struct {
+ /**
+ * Size of shared variables accessed by the compute shader.
+ */
+ unsigned SharedSize;
+ } Comp;
+
+ /** Data shared by gl_program and gl_shader_program */
+ struct gl_shader_program_data *data;
+
+ /**
+ * Mapping from GL uniform locations returned by \c glUniformLocation to
+ * UniformStorage entries. Arrays will have multiple contiguous slots
+ * in the UniformRemapTable, all pointing to the same UniformStorage entry.
+ */
+ unsigned NumUniformRemapTable;
+ struct gl_uniform_storage **UniformRemapTable;
+
+ /**
+ * Sometimes there are empty slots left over in UniformRemapTable after we
+ * allocate slots to explicit locations. This list stores the blocks of
+ * continuous empty slots inside UniformRemapTable.
+ */
+ struct exec_list EmptyUniformLocations;
+
+ /**
+ * Total number of explicit uniform location including inactive uniforms.
+ */
+ unsigned NumExplicitUniformLocations;
+
+ /**
+ * Map of active uniform names to locations
+ *
+ * Maps any active uniform that is not an array element to a location.
+ * Each active uniform, including individual structure members will appear
+ * in this map. This roughly corresponds to the set of names that would be
+ * enumerated by \c glGetActiveUniform.
+ */
+ struct string_to_uint_map *UniformHash;
+
+ GLboolean SamplersValidated; /**< Samplers validated against texture units? */
+
+ bool IsES; /**< True if this program uses GLSL ES */
+
+ /**
+ * Per-stage shaders resulting from the first stage of linking.
+ *
+ * Set of linked shaders for this program. The array is accessed using the
+ * \c MESA_SHADER_* defines. Entries for non-existent stages will be
+ * \c NULL.
+ */
+ struct gl_linked_shader *_LinkedShaders[MESA_SHADER_STAGES];
+
+ /**
+ * True if any of the fragment shaders attached to this program use:
+ * #extension ARB_fragment_coord_conventions: enable
+ */
+ GLboolean ARB_fragment_coord_conventions_enable;
+};
+
+
+#define GLSL_DUMP 0x1 /**< Dump shaders to stdout */
+#define GLSL_LOG 0x2 /**< Write shaders to files */
+#define GLSL_UNIFORMS 0x4 /**< Print glUniform calls */
+#define GLSL_NOP_VERT 0x8 /**< Force no-op vertex shaders */
+#define GLSL_NOP_FRAG 0x10 /**< Force no-op fragment shaders */
+#define GLSL_USE_PROG 0x20 /**< Log glUseProgram calls */
+#define GLSL_REPORT_ERRORS 0x40 /**< Print compilation errors */
+#define GLSL_DUMP_ON_ERROR 0x80 /**< Dump shaders to stderr on compile error */
+#define GLSL_CACHE_INFO 0x100 /**< Print debug information about shader cache */
+#define GLSL_CACHE_FALLBACK 0x200 /**< Force shader cache fallback paths */
+
+
+/**
+ * Context state for GLSL vertex/fragment shaders.
+ * Extended to support pipeline object
+ */
+struct gl_pipeline_object
+{
+ /** Name of the pipeline object as received from glGenProgramPipelines.
+ * It would be 0 for shaders without separate shader objects.
+ */
+ GLuint Name;
+
+ GLint RefCount;
+
+ GLchar *Label; /**< GL_KHR_debug */
+
+ /**
+ * Programs used for rendering
+ *
+ * There is a separate program set for each shader stage.
+ */
+ struct gl_program *CurrentProgram[MESA_SHADER_STAGES];
+
+ struct gl_shader_program *ReferencedPrograms[MESA_SHADER_STAGES];
+
+ /**
+ * Program used by glUniform calls.
+ *
+ * Explicitly set by \c glUseProgram and \c glActiveProgramEXT.
+ */
+ struct gl_shader_program *ActiveProgram;
+
+ GLbitfield Flags; /**< Mask of GLSL_x flags */
+ GLboolean EverBound; /**< Has the pipeline object been created */
+ GLboolean Validated; /**< Pipeline Validation status */
+
+ GLchar *InfoLog;
+};
+
+/**
+ * Context state for GLSL pipeline shaders.
+ */
+struct gl_pipeline_shader_state
+{
+ /** Currently bound pipeline object. See _mesa_BindProgramPipeline() */
+ struct gl_pipeline_object *Current;
+
+ /** Default Object to ensure that _Shader is never NULL */
+ struct gl_pipeline_object *Default;
+
+ /** Pipeline objects */
+ struct _mesa_HashTable *Objects;
+};
+
+/**
+ * Compiler options for a single GLSL shaders type
+ */
+struct gl_shader_compiler_options
+{
+ /** Driver-selectable options: */
+ GLboolean EmitNoLoops;
+ GLboolean EmitNoCont; /**< Emit CONT opcode? */
+ GLboolean EmitNoMainReturn; /**< Emit CONT/RET opcodes? */
+ GLboolean EmitNoPow; /**< Emit POW opcodes? */
+ GLboolean EmitNoSat; /**< Emit SAT opcodes? */
+ GLboolean LowerCombinedClipCullDistance; /** Lower gl_ClipDistance and
+ * gl_CullDistance together from
+ * float[8] to vec4[2]
+ **/
+ GLbitfield LowerBuiltinVariablesXfb; /**< Which builtin variables should
+ * be lowered for transform feedback
+ **/
+
+ /**
+ * If we can lower the precision of variables based on precision
+ * qualifiers
+ */
+ GLboolean LowerPrecision;
+
+ /**
+ * \name Forms of indirect addressing the driver cannot do.
+ */
+ /*@{*/
+ GLboolean EmitNoIndirectInput; /**< No indirect addressing of inputs */
+ GLboolean EmitNoIndirectOutput; /**< No indirect addressing of outputs */
+ GLboolean EmitNoIndirectTemp; /**< No indirect addressing of temps */
+ GLboolean EmitNoIndirectUniform; /**< No indirect addressing of constants */
+ GLboolean EmitNoIndirectSampler; /**< No indirect addressing of samplers */
+ /*@}*/
+
+ GLuint MaxIfDepth; /**< Maximum nested IF blocks */
+ GLuint MaxUnrollIterations;
+
+ /**
+ * Optimize code for array of structures backends.
+ *
+ * This is a proxy for:
+ * - preferring DP4 instructions (rather than MUL/MAD) for
+ * matrix * vector operations, such as position transformation.
+ */
+ GLboolean OptimizeForAOS;
+
+ /** Lower UBO and SSBO access to intrinsics. */
+ GLboolean LowerBufferInterfaceBlocks;
+
+ /** Clamp UBO and SSBO block indices so they don't go out-of-bounds. */
+ GLboolean ClampBlockIndicesToArrayBounds;
+
+ /** (driconf) Force gl_Position to be considered invariant */
+ GLboolean PositionAlwaysInvariant;
+
+ const struct nir_shader_compiler_options *NirOptions;
+};
+
+
+/**
+ * Occlusion/timer query object.
+ */
+struct gl_query_object
+{
+ GLenum16 Target; /**< The query target, when active */
+ GLuint Id; /**< hash table ID/name */
+ GLchar *Label; /**< GL_KHR_debug */
+ GLuint64EXT Result; /**< the counter */
+ GLboolean Active; /**< inside Begin/EndQuery */
+ GLboolean Ready; /**< result is ready? */
+ GLboolean EverBound;/**< has query object ever been bound */
+ GLuint Stream; /**< The stream */
+};
+
+
+/**
+ * Context state for query objects.
+ */
+struct gl_query_state
+{
+ struct _mesa_HashTable *QueryObjects;
+ struct gl_query_object *CurrentOcclusionObject; /* GL_ARB_occlusion_query */
+ struct gl_query_object *CurrentTimerObject; /* GL_EXT_timer_query */
+
+ /** GL_NV_conditional_render */
+ struct gl_query_object *CondRenderQuery;
+
+ /** GL_EXT_transform_feedback */
+ struct gl_query_object *PrimitivesGenerated[MAX_VERTEX_STREAMS];
+ struct gl_query_object *PrimitivesWritten[MAX_VERTEX_STREAMS];
+
+ /** GL_ARB_transform_feedback_overflow_query */
+ struct gl_query_object *TransformFeedbackOverflow[MAX_VERTEX_STREAMS];
+ struct gl_query_object *TransformFeedbackOverflowAny;
+
+ /** GL_ARB_timer_query */
+ struct gl_query_object *TimeElapsed;
+
+ /** GL_ARB_pipeline_statistics_query */
+ struct gl_query_object *pipeline_stats[MAX_PIPELINE_STATISTICS];
+
+ GLenum16 CondRenderMode;
+};
+
+
+/** Sync object state */
+struct gl_sync_object
+{
+ GLuint Name; /**< Fence name */
+ GLint RefCount; /**< Reference count */
+ GLchar *Label; /**< GL_KHR_debug */
+ GLboolean DeletePending; /**< Object was deleted while there were still
+ * live references (e.g., sync not yet finished)
+ */
+ GLenum16 SyncCondition;
+ GLbitfield Flags; /**< Flags passed to glFenceSync */
+ GLuint StatusFlag:1; /**< Has the sync object been signaled? */
+};
+
+
+/**
+ * State which can be shared by multiple contexts:
+ */
+struct gl_shared_state
+{
+ simple_mtx_t Mutex; /**< for thread safety */
+ GLint RefCount; /**< Reference count */
+ struct _mesa_HashTable *DisplayList; /**< Display lists hash table */
+ struct _mesa_HashTable *BitmapAtlas; /**< For optimized glBitmap text */
+ struct _mesa_HashTable *TexObjects; /**< Texture objects hash table */
+
+ /** Default texture objects (shared by all texture units) */
+ struct gl_texture_object *DefaultTex[NUM_TEXTURE_TARGETS];
+
+ /** Fallback texture used when a bound texture is incomplete */
+ struct gl_texture_object *FallbackTex[NUM_TEXTURE_TARGETS];
+
+ /**
+ * \name Thread safety and statechange notification for texture
+ * objects.
+ *
+ * \todo Improve the granularity of locking.
+ */
+ /*@{*/
+ mtx_t TexMutex; /**< texobj thread safety */
+ GLuint TextureStateStamp; /**< state notification for shared tex */
+ /*@}*/
+
+ /**
+ * \name Vertex/geometry/fragment programs
+ */
+ /*@{*/
+ struct _mesa_HashTable *Programs; /**< All vertex/fragment programs */
+ struct gl_program *DefaultVertexProgram;
+ struct gl_program *DefaultFragmentProgram;
+ /*@}*/
+
+ /* GL_ATI_fragment_shader */
+ struct _mesa_HashTable *ATIShaders;
+ struct ati_fragment_shader *DefaultFragmentShader;
+
+ struct _mesa_HashTable *BufferObjects;
+
+ /** Table of both gl_shader and gl_shader_program objects */
+ struct _mesa_HashTable *ShaderObjects;
+
+ /* GL_EXT_framebuffer_object */
+ struct _mesa_HashTable *RenderBuffers;
+ struct _mesa_HashTable *FrameBuffers;
+
+ /* GL_ARB_sync */
+ struct set *SyncObjects;
+
+ /** GL_ARB_sampler_objects */
+ struct _mesa_HashTable *SamplerObjects;
+
+ /* GL_ARB_bindless_texture */
+ struct hash_table_u64 *TextureHandles;
+ struct hash_table_u64 *ImageHandles;
+ mtx_t HandlesMutex; /**< For texture/image handles safety */
+
+ /* GL_ARB_shading_language_include */
+ struct shader_includes *ShaderIncludes;
+ /* glCompileShaderInclude expects ShaderIncludes not to change while it is
+ * in progress.
+ */
+ mtx_t ShaderIncludeMutex;
+
+ /**
+ * Some context in this share group was affected by a GPU reset
+ *
+ * On the next call to \c glGetGraphicsResetStatus, contexts that have not
+ * been affected by a GPU reset must also return
+ * \c GL_INNOCENT_CONTEXT_RESET_ARB.
+ *
+ * Once this field becomes true, it is never reset to false.
+ */
+ bool ShareGroupReset;
+
+ /** EXT_external_objects */
+ struct _mesa_HashTable *MemoryObjects;
+
+ /** EXT_semaphore */
+ struct _mesa_HashTable *SemaphoreObjects;
+
+ /**
+ * Some context in this share group was affected by a disjoint
+ * operation. This operation can be anything that has effects on
+ * values of timer queries in such manner that they become invalid for
+ * performance metrics. As example gpu reset, counter overflow or gpu
+ * frequency changes.
+ */
+ bool DisjointOperation;
+};
+
+
+
+/**
+ * Renderbuffers represent drawing surfaces such as color, depth and/or
+ * stencil. A framebuffer object has a set of renderbuffers.
+ * Drivers will typically derive subclasses of this type.
+ */
+struct gl_renderbuffer
+{
+ simple_mtx_t Mutex; /**< for thread safety */
+ GLuint ClassID; /**< Useful for drivers */
+ GLuint Name;
+ GLchar *Label; /**< GL_KHR_debug */
+ GLint RefCount;
+ GLuint Width, Height;
+ GLuint Depth;
+ GLboolean Purgeable; /**< Is the buffer purgeable under memory pressure? */
+ GLboolean AttachedAnytime; /**< TRUE if it was attached to a framebuffer */
+ /**
+ * True for renderbuffers that wrap textures, giving the driver a chance to
+ * flush render caches through the FinishRenderTexture hook.
+ *
+ * Drivers may also set this on renderbuffers other than those generated by
+ * glFramebufferTexture(), though it means FinishRenderTexture() would be
+ * called without a rb->TexImage.
+ */
+ GLboolean NeedsFinishRenderTexture;
+ GLubyte NumSamples; /**< zero means not multisampled */
+ GLubyte NumStorageSamples; /**< for AMD_framebuffer_multisample_advanced */
+ GLenum16 InternalFormat; /**< The user-specified format */
+ GLenum16 _BaseFormat; /**< Either GL_RGB, GL_RGBA, GL_DEPTH_COMPONENT or
+ GL_STENCIL_INDEX. */
+ mesa_format Format; /**< The actual renderbuffer memory format */
+ /**
+ * Pointer to the texture image if this renderbuffer wraps a texture,
+ * otherwise NULL.
+ *
+ * Note that the reference on the gl_texture_object containing this
+ * TexImage is held by the gl_renderbuffer_attachment.
+ */
+ struct gl_texture_image *TexImage;
+
+ /** Delete this renderbuffer */
+ void (*Delete)(struct gl_context *ctx, struct gl_renderbuffer *rb);
+
+ /** Allocate new storage for this renderbuffer */
+ GLboolean (*AllocStorage)(struct gl_context *ctx,
+ struct gl_renderbuffer *rb,
+ GLenum internalFormat,
+ GLuint width, GLuint height);
+};
+
+
+/**
+ * A renderbuffer attachment points to either a texture object (and specifies
+ * a mipmap level, cube face or 3D texture slice) or points to a renderbuffer.
+ */
+struct gl_renderbuffer_attachment
+{
+ GLenum16 Type; /**< \c GL_NONE or \c GL_TEXTURE or \c GL_RENDERBUFFER_EXT */
+ GLboolean Complete;
+
+ /**
+ * If \c Type is \c GL_RENDERBUFFER_EXT, this stores a pointer to the
+ * application supplied renderbuffer object.
+ */
+ struct gl_renderbuffer *Renderbuffer;
+
+ /**
+ * If \c Type is \c GL_TEXTURE, this stores a pointer to the application
+ * supplied texture object.
+ */
+ struct gl_texture_object *Texture;
+ GLuint TextureLevel; /**< Attached mipmap level. */
+ GLsizei NumSamples; /**< from FramebufferTexture2DMultisampleEXT */
+ GLuint CubeMapFace; /**< 0 .. 5, for cube map textures. */
+ GLuint Zoffset; /**< Slice for 3D textures, or layer for both 1D
+ * and 2D array textures */
+ GLboolean Layered;
+};
+
+
+/**
+ * A framebuffer is a collection of renderbuffers (color, depth, stencil, etc).
+ * In C++ terms, think of this as a base class from which device drivers
+ * will make derived classes.
+ */
+struct gl_framebuffer
+{
+ simple_mtx_t Mutex; /**< for thread safety */
+ /**
+ * If zero, this is a window system framebuffer. If non-zero, this
+ * is a FBO framebuffer; note that for some devices (i.e. those with
+ * a natural pixel coordinate system for FBOs that differs from the
+ * OpenGL/Mesa coordinate system), this means that the viewport,
+ * polygon face orientation, and polygon stipple will have to be inverted.
+ */
+ GLuint Name;
+ GLint RefCount;
+
+ GLchar *Label; /**< GL_KHR_debug */
+
+ GLboolean DeletePending;
+
+ /**
+ * The framebuffer's visual. Immutable if this is a window system buffer.
+ * Computed from attachments if user-made FBO.
+ */
+ struct gl_config Visual;
+
+ /**
+ * Size of frame buffer in pixels. If there are no attachments, then both
+ * of these are 0.
+ */
+ GLuint Width, Height;
+
+ /**
+ * In the case that the framebuffer has no attachment (i.e.
+ * GL_ARB_framebuffer_no_attachments) then the geometry of
+ * the framebuffer is specified by the default values.
+ */
+ struct {
+ GLuint Width, Height, Layers, NumSamples;
+ GLboolean FixedSampleLocations;
+ /* Derived from NumSamples by the driver so that it can choose a valid
+ * value for the hardware.
+ */
+ GLuint _NumSamples;
+ } DefaultGeometry;
+
+ /** \name Drawing bounds (Intersection of buffer size and scissor box)
+ * The drawing region is given by [_Xmin, _Xmax) x [_Ymin, _Ymax),
+ * (inclusive for _Xmin and _Ymin while exclusive for _Xmax and _Ymax)
+ */
+ /*@{*/
+ GLint _Xmin, _Xmax;
+ GLint _Ymin, _Ymax;
+ /*@}*/
+
+ /** \name Derived Z buffer stuff */
+ /*@{*/
+ GLuint _DepthMax; /**< Max depth buffer value */
+ GLfloat _DepthMaxF; /**< Float max depth buffer value */
+ GLfloat _MRD; /**< minimum resolvable difference in Z values */
+ /*@}*/
+
+ /** One of the GL_FRAMEBUFFER_(IN)COMPLETE_* tokens */
+ GLenum16 _Status;
+
+ /** Whether one of Attachment has Type != GL_NONE
+ * NOTE: the values for Width and Height are set to 0 in case of having
+ * no attachments, a backend driver supporting the extension
+ * GL_ARB_framebuffer_no_attachments must check for the flag _HasAttachments
+ * and if GL_FALSE, must then use the values in DefaultGeometry to initialize
+ * its viewport, scissor and so on (in particular _Xmin, _Xmax, _Ymin and
+ * _Ymax do NOT take into account _HasAttachments being false). To get the
+ * geometry of the framebuffer, the helper functions
+ * _mesa_geometric_width(),
+ * _mesa_geometric_height(),
+ * _mesa_geometric_samples() and
+ * _mesa_geometric_layers()
+ * are available that check _HasAttachments.
+ */
+ bool _HasAttachments;
+
+ GLbitfield _IntegerBuffers; /**< Which color buffers are integer valued */
+ GLbitfield _RGBBuffers; /**< Which color buffers have baseformat == RGB */
+ GLbitfield _FP32Buffers; /**< Which color buffers are FP32 */
+
+ /* ARB_color_buffer_float */
+ GLboolean _AllColorBuffersFixedPoint; /* no integer, no float */
+ GLboolean _HasSNormOrFloatColorBuffer;
+
+ /**
+ * The maximum number of layers in the framebuffer, or 0 if the framebuffer
+ * is not layered. For cube maps and cube map arrays, each cube face
+ * counts as a layer. As the case for Width, Height a backend driver
+ * supporting GL_ARB_framebuffer_no_attachments must use DefaultGeometry
+ * in the case that _HasAttachments is false
+ */
+ GLuint MaxNumLayers;
+
+ /** Array of all renderbuffer attachments, indexed by BUFFER_* tokens. */
+ struct gl_renderbuffer_attachment Attachment[BUFFER_COUNT];
+
+ /* In unextended OpenGL these vars are part of the GL_COLOR_BUFFER
+ * attribute group and GL_PIXEL attribute group, respectively.
+ */
+ GLenum16 ColorDrawBuffer[MAX_DRAW_BUFFERS];
+ GLenum16 ColorReadBuffer;
+
+ /* GL_ARB_sample_locations */
+ GLfloat *SampleLocationTable; /**< If NULL, no table has been specified */
+ GLboolean ProgrammableSampleLocations;
+ GLboolean SampleLocationPixelGrid;
+
+ /** Computed from ColorDraw/ReadBuffer above */
+ GLuint _NumColorDrawBuffers;
+ gl_buffer_index _ColorDrawBufferIndexes[MAX_DRAW_BUFFERS];
+ gl_buffer_index _ColorReadBufferIndex;
+ struct gl_renderbuffer *_ColorDrawBuffers[MAX_DRAW_BUFFERS];
+ struct gl_renderbuffer *_ColorReadBuffer;
+
+ /* GL_MESA_framebuffer_flip_y */
+ bool FlipY;
+
+ /** Delete this framebuffer */
+ void (*Delete)(struct gl_framebuffer *fb);
+};
+
+
+/**
+ * Precision info for shader datatypes. See glGetShaderPrecisionFormat().
+ */
+struct gl_precision
+{
+ GLushort RangeMin; /**< min value exponent */
+ GLushort RangeMax; /**< max value exponent */
+ GLushort Precision; /**< number of mantissa bits */
+};
+
+
+/**
+ * Limits for vertex, geometry and fragment programs/shaders.
+ */
+struct gl_program_constants
+{
+ /* logical limits */
+ GLuint MaxInstructions;
+ GLuint MaxAluInstructions;
+ GLuint MaxTexInstructions;
+ GLuint MaxTexIndirections;
+ GLuint MaxAttribs;
+ GLuint MaxTemps;
+ GLuint MaxAddressRegs;
+ GLuint MaxAddressOffset; /**< [-MaxAddressOffset, MaxAddressOffset-1] */
+ GLuint MaxParameters;
+ GLuint MaxLocalParams;
+ GLuint MaxEnvParams;
+ /* native/hardware limits */
+ GLuint MaxNativeInstructions;
+ GLuint MaxNativeAluInstructions;
+ GLuint MaxNativeTexInstructions;
+ GLuint MaxNativeTexIndirections;
+ GLuint MaxNativeAttribs;
+ GLuint MaxNativeTemps;
+ GLuint MaxNativeAddressRegs;
+ GLuint MaxNativeParameters;
+ /* For shaders */
+ GLuint MaxUniformComponents; /**< Usually == MaxParameters * 4 */
+
+ /**
+ * \name Per-stage input / output limits
+ *
+ * Previous to OpenGL 3.2, the intrastage data limits were advertised with
+ * a single value: GL_MAX_VARYING_COMPONENTS (GL_MAX_VARYING_VECTORS in
+ * ES). This is stored as \c gl_constants::MaxVarying.
+ *
+ * Starting with OpenGL 3.2, the limits are advertised with per-stage
+ * variables. Each stage as a certain number of outputs that it can feed
+ * to the next stage and a certain number inputs that it can consume from
+ * the previous stage.
+ *
+ * Vertex shader inputs do not participate this in this accounting.
+ * These are tracked exclusively by \c gl_program_constants::MaxAttribs.
+ *
+ * Fragment shader outputs do not participate this in this accounting.
+ * These are tracked exclusively by \c gl_constants::MaxDrawBuffers.
+ */
+ /*@{*/
+ GLuint MaxInputComponents;
+ GLuint MaxOutputComponents;
+ /*@}*/
+
+ /* ES 2.0 and GL_ARB_ES2_compatibility */
+ struct gl_precision LowFloat, MediumFloat, HighFloat;
+ struct gl_precision LowInt, MediumInt, HighInt;
+ /* GL_ARB_uniform_buffer_object */
+ GLuint MaxUniformBlocks;
+ uint64_t MaxCombinedUniformComponents;
+ GLuint MaxTextureImageUnits;
+
+ /* GL_ARB_shader_atomic_counters */
+ GLuint MaxAtomicBuffers;
+ GLuint MaxAtomicCounters;
+
+ /* GL_ARB_shader_image_load_store */
+ GLuint MaxImageUniforms;
+
+ /* GL_ARB_shader_storage_buffer_object */
+ GLuint MaxShaderStorageBlocks;
+};
+
+/**
+ * Constants which may be overridden by device driver during context creation
+ * but are never changed after that.
+ */
+struct gl_constants
+{
+ GLuint MaxTextureMbytes; /**< Max memory per image, in MB */
+ GLuint MaxTextureSize; /**< Max 1D/2D texture size, in pixels*/
+ GLuint Max3DTextureLevels; /**< Max mipmap levels for 3D textures */
+ GLuint MaxCubeTextureLevels; /**< Max mipmap levels for cube textures */
+ GLuint MaxArrayTextureLayers; /**< Max layers in array textures */
+ GLuint MaxTextureRectSize; /**< Max rectangle texture size, in pixes */
+ GLuint MaxTextureCoordUnits;
+ GLuint MaxCombinedTextureImageUnits;
+ GLuint MaxTextureUnits; /**< = MIN(CoordUnits, FragmentProgram.ImageUnits) */
+ GLfloat MaxTextureMaxAnisotropy; /**< GL_EXT_texture_filter_anisotropic */
+ GLfloat MaxTextureLodBias; /**< GL_EXT_texture_lod_bias */
+ GLuint MaxTextureBufferSize; /**< GL_ARB_texture_buffer_object */
+
+ GLuint TextureBufferOffsetAlignment; /**< GL_ARB_texture_buffer_range */
+
+ GLuint MaxArrayLockSize;
+
+ GLint SubPixelBits;
+
+ GLfloat MinPointSize, MaxPointSize; /**< aliased */
+ GLfloat MinPointSizeAA, MaxPointSizeAA; /**< antialiased */
+ GLfloat PointSizeGranularity;
+ GLfloat MinLineWidth, MaxLineWidth; /**< aliased */
+ GLfloat MinLineWidthAA, MaxLineWidthAA; /**< antialiased */
+ GLfloat LineWidthGranularity;
+
+ GLuint MaxClipPlanes;
+ GLuint MaxLights;
+ GLfloat MaxShininess; /**< GL_NV_light_max_exponent */
+ GLfloat MaxSpotExponent; /**< GL_NV_light_max_exponent */
+
+ GLuint MaxViewportWidth, MaxViewportHeight;
+ GLuint MaxViewports; /**< GL_ARB_viewport_array */
+ GLuint ViewportSubpixelBits; /**< GL_ARB_viewport_array */
+ struct {
+ GLfloat Min;
+ GLfloat Max;
+ } ViewportBounds; /**< GL_ARB_viewport_array */
+ GLuint MaxWindowRectangles; /**< GL_EXT_window_rectangles */
+
+ struct gl_program_constants Program[MESA_SHADER_STAGES];
+ GLuint MaxProgramMatrices;
+ GLuint MaxProgramMatrixStackDepth;
+
+ struct {
+ GLuint SamplesPassed;
+ GLuint TimeElapsed;
+ GLuint Timestamp;
+ GLuint PrimitivesGenerated;
+ GLuint PrimitivesWritten;
+ GLuint VerticesSubmitted;
+ GLuint PrimitivesSubmitted;
+ GLuint VsInvocations;
+ GLuint TessPatches;
+ GLuint TessInvocations;
+ GLuint GsInvocations;
+ GLuint GsPrimitives;
+ GLuint FsInvocations;
+ GLuint ComputeInvocations;
+ GLuint ClInPrimitives;
+ GLuint ClOutPrimitives;
+ } QueryCounterBits;
+
+ GLuint MaxDrawBuffers; /**< GL_ARB_draw_buffers */
+
+ GLuint MaxColorAttachments; /**< GL_EXT_framebuffer_object */
+ GLuint MaxRenderbufferSize; /**< GL_EXT_framebuffer_object */
+ GLuint MaxSamples; /**< GL_ARB_framebuffer_object */
+
+ /**
+ * GL_ARB_framebuffer_no_attachments
+ */
+ GLuint MaxFramebufferWidth;
+ GLuint MaxFramebufferHeight;
+ GLuint MaxFramebufferLayers;
+ GLuint MaxFramebufferSamples;
+
+ /** Number of varying vectors between any two shader stages. */
+ GLuint MaxVarying;
+
+ /** @{
+ * GL_ARB_uniform_buffer_object
+ */
+ GLuint MaxCombinedUniformBlocks;
+ GLuint MaxUniformBufferBindings;
+ GLuint MaxUniformBlockSize;
+ GLuint UniformBufferOffsetAlignment;
+ /** @} */
+
+ /** @{
+ * GL_ARB_shader_storage_buffer_object
+ */
+ GLuint MaxCombinedShaderStorageBlocks;
+ GLuint MaxShaderStorageBufferBindings;
+ GLuint MaxShaderStorageBlockSize;
+ GLuint ShaderStorageBufferOffsetAlignment;
+ /** @} */
+
+ /**
+ * GL_ARB_explicit_uniform_location
+ */
+ GLuint MaxUserAssignableUniformLocations;
+
+ /** geometry shader */
+ GLuint MaxGeometryOutputVertices;
+ GLuint MaxGeometryTotalOutputComponents;
+ GLuint MaxGeometryShaderInvocations;
+
+ GLuint GLSLVersion; /**< Desktop GLSL version supported (ex: 120 = 1.20) */
+ GLuint GLSLVersionCompat; /**< Desktop compat GLSL version supported */
+
+ /**
+ * Changes default GLSL extension behavior from "error" to "warn". It's out
+ * of spec, but it can make some apps work that otherwise wouldn't.
+ */
+ GLboolean ForceGLSLExtensionsWarn;
+
+ /**
+ * If non-zero, forces GLSL shaders to behave as if they began
+ * with "#version ForceGLSLVersion".
+ */
+ GLuint ForceGLSLVersion;
+
+ /**
+ * Allow GLSL #extension directives in the middle of shaders.
+ */
+ GLboolean AllowGLSLExtensionDirectiveMidShader;
+
+ /**
+ * Allow builtins as part of constant expressions. This was not allowed
+ * until GLSL 1.20 this allows it everywhere.
+ */
+ GLboolean AllowGLSLBuiltinConstantExpression;
+
+ /**
+ * Allow some relaxation of GLSL ES shader restrictions. This encompasses
+ * a number of relaxations to the ES shader rules.
+ */
+ GLboolean AllowGLSLRelaxedES;
+
+ /**
+ * Allow GLSL built-in variables to be redeclared verbatim
+ */
+ GLboolean AllowGLSLBuiltinVariableRedeclaration;
+
+ /**
+ * Allow GLSL interpolation qualifier mismatch across shader stages.
+ */
+ GLboolean AllowGLSLCrossStageInterpolationMismatch;
+
+ /**
+ * Allow creating a higher compat profile (version 3.1+) for apps that
+ * request it. Be careful when adding that driconf option because some
+ * features are unimplemented and might not work correctly.
+ */
+ GLboolean AllowHigherCompatVersion;
+
+ /**
+ * Allow layout qualifiers on function parameters.
+ */
+ GLboolean AllowLayoutQualifiersOnFunctionParameters;
+
+ /**
+ * Force computing the absolute value for sqrt() and inversesqrt() to follow
+ * D3D9 when apps rely on this behaviour.
+ */
+ GLboolean ForceGLSLAbsSqrt;
+
+ /**
+ * Force uninitialized variables to default to zero.
+ */
+ GLboolean GLSLZeroInit;
+
+ /**
+ * Treat integer textures using GL_LINEAR filters as GL_NEAREST.
+ */
+ GLboolean ForceIntegerTexNearest;
+
+ /**
+ * Does the driver support real 32-bit integers? (Otherwise, integers are
+ * simulated via floats.)
+ */
+ GLboolean NativeIntegers;
+
+ /**
+ * Does VertexID count from zero or from base vertex?
+ *
+ * \note
+ * If desktop GLSL 1.30 or GLSL ES 3.00 are not supported, this field is
+ * ignored and need not be set.
+ */
+ bool VertexID_is_zero_based;
+
+ /**
+ * If the driver supports real 32-bit integers, what integer value should be
+ * used for boolean true in uniform uploads? (Usually 1 or ~0.)
+ */
+ GLuint UniformBooleanTrue;
+
+ /**
+ * Maximum amount of time, measured in nanseconds, that the server can wait.
+ */
+ GLuint64 MaxServerWaitTimeout;
+
+ /** GL_EXT_provoking_vertex */
+ GLboolean QuadsFollowProvokingVertexConvention;
+
+ /** GL_ARB_viewport_array */
+ GLenum16 LayerAndVPIndexProvokingVertex;
+
+ /** OpenGL version 3.0 */
+ GLbitfield ContextFlags; /**< Ex: GL_CONTEXT_FLAG_FORWARD_COMPATIBLE_BIT */
+
+ /** OpenGL version 3.2 */
+ GLbitfield ProfileMask; /**< Mask of CONTEXT_x_PROFILE_BIT */
+
+ /** OpenGL version 4.4 */
+ GLuint MaxVertexAttribStride;
+
+ /** GL_EXT_transform_feedback */
+ GLuint MaxTransformFeedbackBuffers;
+ GLuint MaxTransformFeedbackSeparateComponents;
+ GLuint MaxTransformFeedbackInterleavedComponents;
+ GLuint MaxVertexStreams;
+
+ /** GL_EXT_gpu_shader4 */
+ GLint MinProgramTexelOffset, MaxProgramTexelOffset;
+
+ /** GL_ARB_texture_gather */
+ GLuint MinProgramTextureGatherOffset;
+ GLuint MaxProgramTextureGatherOffset;
+ GLuint MaxProgramTextureGatherComponents;
+
+ /* GL_ARB_robustness */
+ GLenum16 ResetStrategy;
+
+ /* GL_KHR_robustness */
+ GLboolean RobustAccess;
+
+ /* GL_ARB_blend_func_extended */
+ GLuint MaxDualSourceDrawBuffers;
+
+ /**
+ * Whether the implementation strips out and ignores texture borders.
+ *
+ * Many GPU hardware implementations don't support rendering with texture
+ * borders and mipmapped textures. (Note: not static border color, but the
+ * old 1-pixel border around each edge). Implementations then have to do
+ * slow fallbacks to be correct, or just ignore the border and be fast but
+ * wrong. Setting the flag strips the border off of TexImage calls,
+ * providing "fast but wrong" at significantly reduced driver complexity.
+ *
+ * Texture borders are deprecated in GL 3.0.
+ **/
+ GLboolean StripTextureBorder;
+
+ /**
+ * For drivers which can do a better job at eliminating unused uniforms
+ * than the GLSL compiler.
+ *
+ * XXX Remove these as soon as a better solution is available.
+ */
+ GLboolean GLSLSkipStrictMaxUniformLimitCheck;
+
+ /**
+ * Whether gl_FragCoord, gl_PointCoord and gl_FrontFacing
+ * are system values.
+ **/
+ bool GLSLFragCoordIsSysVal;
+ bool GLSLPointCoordIsSysVal;
+ bool GLSLFrontFacingIsSysVal;
+
+ /**
+ * Run the minimum amount of GLSL optimizations to be able to link
+ * shaders optimally (eliminate dead varyings and uniforms) and just do
+ * all the necessary lowering.
+ */
+ bool GLSLOptimizeConservatively;
+
+ /**
+ * Whether to call lower_const_arrays_to_uniforms() during linking.
+ */
+ bool GLSLLowerConstArrays;
+
+ /**
+ * True if gl_TessLevelInner/Outer[] in the TES should be inputs
+ * (otherwise, they're system values).
+ */
+ bool GLSLTessLevelsAsInputs;
+
+ /**
+ * Always use the GetTransformFeedbackVertexCount() driver hook, rather
+ * than passing the transform feedback object to the drawing function.
+ */
+ GLboolean AlwaysUseGetTransformFeedbackVertexCount;
+
+ /** GL_ARB_map_buffer_alignment */
+ GLuint MinMapBufferAlignment;
+
+ /**
+ * Disable varying packing. This is out of spec, but potentially useful
+ * for older platforms that supports a limited number of texture
+ * indirections--on these platforms, unpacking the varyings in the fragment
+ * shader increases the number of texture indirections by 1, which might
+ * make some shaders not executable at all.
+ *
+ * Drivers that support transform feedback must set this value to GL_FALSE.
+ */
+ GLboolean DisableVaryingPacking;
+
+ /**
+ * Disable varying packing if used for transform feedback. This is needed
+ * for some drivers (e.g. Panfrost) where transform feedback requires
+ * unpacked varyings.
+ *
+ * This variable is mutually exlusive with DisableVaryingPacking.
+ */
+ GLboolean DisableTransformFeedbackPacking;
+
+ /**
+ * UBOs and SSBOs can be packed tightly by the OpenGL implementation when
+ * layout is set as shared (the default) or packed. However most Mesa drivers
+ * just use STD140 for these layouts. This flag allows drivers to use STD430
+ * for packed and shared layouts which allows arrays to be packed more
+ * tightly.
+ */
+ bool UseSTD430AsDefaultPacking;
+
+ /**
+ * Should meaningful names be generated for compiler temporary variables?
+ *
+ * Generally, it is not useful to have the compiler generate "meaningful"
+ * names for temporary variables that it creates. This can, however, be a
+ * useful debugging aid. In Mesa debug builds or release builds when
+ * MESA_GLSL is set at run-time, meaningful names will be generated.
+ * Drivers can also force names to be generated by setting this field.
+ * For example, the i965 driver may set it when INTEL_DEBUG=vs (to dump
+ * vertex shader assembly) is set at run-time.
+ */
+ bool GenerateTemporaryNames;
+
+ /*
+ * Maximum value supported for an index in DrawElements and friends.
+ *
+ * This must be at least (1ull<<24)-1. The default value is
+ * (1ull<<32)-1.
+ *
+ * \since ES 3.0 or GL_ARB_ES3_compatibility
+ * \sa _mesa_init_constants
+ */
+ GLuint64 MaxElementIndex;
+
+ /**
+ * Disable interpretation of line continuations (lines ending with a
+ * backslash character ('\') in GLSL source.
+ */
+ GLboolean DisableGLSLLineContinuations;
+
+ /** GL_ARB_texture_multisample */
+ GLint MaxColorTextureSamples;
+ GLint MaxDepthTextureSamples;
+ GLint MaxIntegerSamples;
+
+ /** GL_AMD_framebuffer_multisample_advanced */
+ GLint MaxColorFramebufferSamples;
+ GLint MaxColorFramebufferStorageSamples;
+ GLint MaxDepthStencilFramebufferSamples;
+
+ /* An array of supported MSAA modes allowing different sample
+ * counts per attachment type.
+ */
+ struct {
+ GLint NumColorSamples;
+ GLint NumColorStorageSamples;
+ GLint NumDepthStencilSamples;
+ } SupportedMultisampleModes[40];
+ GLint NumSupportedMultisampleModes;
+
+ /** GL_ARB_shader_atomic_counters */
+ GLuint MaxAtomicBufferBindings;
+ GLuint MaxAtomicBufferSize;
+ GLuint MaxCombinedAtomicBuffers;
+ GLuint MaxCombinedAtomicCounters;
+
+ /** GL_ARB_vertex_attrib_binding */
+ GLint MaxVertexAttribRelativeOffset;
+ GLint MaxVertexAttribBindings;
+
+ /* GL_ARB_shader_image_load_store */
+ GLuint MaxImageUnits;
+ GLuint MaxCombinedShaderOutputResources;
+ GLuint MaxImageSamples;
+ GLuint MaxCombinedImageUniforms;
+
+ /** GL_ARB_compute_shader */
+ GLuint MaxComputeWorkGroupCount[3]; /* Array of x, y, z dimensions */
+ GLuint MaxComputeWorkGroupSize[3]; /* Array of x, y, z dimensions */
+ GLuint MaxComputeWorkGroupInvocations;
+ GLuint MaxComputeSharedMemorySize;
+
+ /** GL_ARB_compute_variable_group_size */
+ GLuint MaxComputeVariableGroupSize[3]; /* Array of x, y, z dimensions */
+ GLuint MaxComputeVariableGroupInvocations;
+
+ /** GL_ARB_gpu_shader5 */
+ GLfloat MinFragmentInterpolationOffset;
+ GLfloat MaxFragmentInterpolationOffset;
+
+ GLboolean FakeSWMSAA;
+
+ /** GL_KHR_context_flush_control */
+ GLenum16 ContextReleaseBehavior;
+
+ struct gl_shader_compiler_options ShaderCompilerOptions[MESA_SHADER_STAGES];
+
+ /** GL_ARB_tessellation_shader */
+ GLuint MaxPatchVertices;
+ GLuint MaxTessGenLevel;
+ GLuint MaxTessPatchComponents;
+ GLuint MaxTessControlTotalOutputComponents;
+ bool LowerTessLevel; /**< Lower gl_TessLevel* from float[n] to vecn? */
+ bool PrimitiveRestartForPatches;
+ bool LowerCsDerivedVariables; /**< Lower gl_GlobalInvocationID and
+ * gl_LocalInvocationIndex based on
+ * other builtin variables. */
+
+ /** GL_OES_primitive_bounding_box */
+ bool NoPrimitiveBoundingBoxOutput;
+
+ /** GL_ARB_sparse_buffer */
+ GLuint SparseBufferPageSize;
+
+ /** Used as an input for sha1 generation in the on-disk shader cache */
+ unsigned char *dri_config_options_sha1;
+
+ /** When drivers are OK with mapped buffers during draw and other calls. */
+ bool AllowMappedBuffersDuringExecution;
+
+ /**
+ * Whether buffer creation, unsynchronized mapping, unmapping, and
+ * deletion is thread-safe.
+ */
+ bool BufferCreateMapUnsynchronizedThreadSafe;
+
+ /** GL_ARB_get_program_binary */
+ GLuint NumProgramBinaryFormats;
+
+ /** GL_NV_conservative_raster */
+ GLuint MaxSubpixelPrecisionBiasBits;
+
+ /** GL_NV_conservative_raster_dilate */
+ GLfloat ConservativeRasterDilateRange[2];
+ GLfloat ConservativeRasterDilateGranularity;
+
+ /** Is the drivers uniform storage packed or padded to 16 bytes. */
+ bool PackedDriverUniformStorage;
+
+ /** Does the driver make use of the NIR based GLSL linker */
+ bool UseNIRGLSLLinker;
+
+ /** Wether or not glBitmap uses red textures rather than alpha */
+ bool BitmapUsesRed;
+
+ /** Whether the vertex buffer offset is a signed 32-bit integer. */
+ bool VertexBufferOffsetIsInt32;
+
+ /** Whether the driver can handle MultiDrawElements with non-VBO indices. */
+ bool MultiDrawWithUserIndices;
+
+ /** Whether out-of-order draw (Begin/End) optimizations are allowed. */
+ bool AllowDrawOutOfOrder;
+
+ /** GL_ARB_gl_spirv */
+ struct spirv_supported_capabilities SpirVCapabilities;
+
+ /** GL_ARB_spirv_extensions */
+ struct spirv_supported_extensions *SpirVExtensions;
+
+ char *VendorOverride;
+
+ /** Buffer size used to upload vertices from glBegin/glEnd. */
+ unsigned glBeginEndBufferSize;
+};
+
+
+/**
+ * Enable flag for each OpenGL extension. Different device drivers will
+ * enable different extensions at runtime.
+ */
+struct gl_extensions
+{
+ GLboolean dummy; /* don't remove this! */
+ GLboolean dummy_true; /* Set true by _mesa_init_extensions(). */
+ GLboolean dummy_false; /* Set false by _mesa_init_extensions(). */
+ GLboolean ANGLE_texture_compression_dxt;
+ GLboolean ARB_ES2_compatibility;
+ GLboolean ARB_ES3_compatibility;
+ GLboolean ARB_ES3_1_compatibility;
+ GLboolean ARB_ES3_2_compatibility;
+ GLboolean ARB_arrays_of_arrays;
+ GLboolean ARB_base_instance;
+ GLboolean ARB_bindless_texture;
+ GLboolean ARB_blend_func_extended;
+ GLboolean ARB_buffer_storage;
+ GLboolean ARB_clear_texture;
+ GLboolean ARB_clip_control;
+ GLboolean ARB_color_buffer_float;
+ GLboolean ARB_compatibility;
+ GLboolean ARB_compute_shader;
+ GLboolean ARB_compute_variable_group_size;
+ GLboolean ARB_conditional_render_inverted;
+ GLboolean ARB_conservative_depth;
+ GLboolean ARB_copy_image;
+ GLboolean ARB_cull_distance;
+ GLboolean ARB_depth_buffer_float;
+ GLboolean ARB_depth_clamp;
+ GLboolean ARB_depth_texture;
+ GLboolean ARB_derivative_control;
+ GLboolean ARB_draw_buffers_blend;
+ GLboolean ARB_draw_elements_base_vertex;
+ GLboolean ARB_draw_indirect;
+ GLboolean ARB_draw_instanced;
+ GLboolean ARB_fragment_coord_conventions;
+ GLboolean ARB_fragment_layer_viewport;
+ GLboolean ARB_fragment_program;
+ GLboolean ARB_fragment_program_shadow;
+ GLboolean ARB_fragment_shader;
+ GLboolean ARB_framebuffer_no_attachments;
+ GLboolean ARB_framebuffer_object;
+ GLboolean ARB_fragment_shader_interlock;
+ GLboolean ARB_enhanced_layouts;
+ GLboolean ARB_explicit_attrib_location;
+ GLboolean ARB_explicit_uniform_location;
+ GLboolean ARB_gl_spirv;
+ GLboolean ARB_gpu_shader5;
+ GLboolean ARB_gpu_shader_fp64;
+ GLboolean ARB_gpu_shader_int64;
+ GLboolean ARB_half_float_vertex;
+ GLboolean ARB_indirect_parameters;
+ GLboolean ARB_instanced_arrays;
+ GLboolean ARB_internalformat_query;
+ GLboolean ARB_internalformat_query2;
+ GLboolean ARB_map_buffer_range;
+ GLboolean ARB_occlusion_query;
+ GLboolean ARB_occlusion_query2;
+ GLboolean ARB_pipeline_statistics_query;
+ GLboolean ARB_point_sprite;
+ GLboolean ARB_polygon_offset_clamp;
+ GLboolean ARB_post_depth_coverage;
+ GLboolean ARB_query_buffer_object;
+ GLboolean ARB_robust_buffer_access_behavior;
+ GLboolean ARB_sample_locations;
+ GLboolean ARB_sample_shading;
+ GLboolean ARB_seamless_cube_map;
+ GLboolean ARB_shader_atomic_counter_ops;
+ GLboolean ARB_shader_atomic_counters;
+ GLboolean ARB_shader_ballot;
+ GLboolean ARB_shader_bit_encoding;
+ GLboolean ARB_shader_clock;
+ GLboolean ARB_shader_draw_parameters;
+ GLboolean ARB_shader_group_vote;
+ GLboolean ARB_shader_image_load_store;
+ GLboolean ARB_shader_image_size;
+ GLboolean ARB_shader_precision;
+ GLboolean ARB_shader_stencil_export;
+ GLboolean ARB_shader_storage_buffer_object;
+ GLboolean ARB_shader_texture_image_samples;
+ GLboolean ARB_shader_texture_lod;
+ GLboolean ARB_shader_viewport_layer_array;
+ GLboolean ARB_shading_language_packing;
+ GLboolean ARB_shading_language_420pack;
+ GLboolean ARB_shadow;
+ GLboolean ARB_sparse_buffer;
+ GLboolean ARB_stencil_texturing;
+ GLboolean ARB_spirv_extensions;
+ GLboolean ARB_sync;
+ GLboolean ARB_tessellation_shader;
+ GLboolean ARB_texture_border_clamp;
+ GLboolean ARB_texture_buffer_object;
+ GLboolean ARB_texture_buffer_object_rgb32;
+ GLboolean ARB_texture_buffer_range;
+ GLboolean ARB_texture_compression_bptc;
+ GLboolean ARB_texture_compression_rgtc;
+ GLboolean ARB_texture_cube_map;
+ GLboolean ARB_texture_cube_map_array;
+ GLboolean ARB_texture_env_combine;
+ GLboolean ARB_texture_env_crossbar;
+ GLboolean ARB_texture_env_dot3;
+ GLboolean ARB_texture_filter_anisotropic;
+ GLboolean ARB_texture_float;
+ GLboolean ARB_texture_gather;
+ GLboolean ARB_texture_mirror_clamp_to_edge;
+ GLboolean ARB_texture_multisample;
+ GLboolean ARB_texture_non_power_of_two;
+ GLboolean ARB_texture_stencil8;
+ GLboolean ARB_texture_query_levels;
+ GLboolean ARB_texture_query_lod;
+ GLboolean ARB_texture_rg;
+ GLboolean ARB_texture_rgb10_a2ui;
+ GLboolean ARB_texture_view;
+ GLboolean ARB_timer_query;
+ GLboolean ARB_transform_feedback2;
+ GLboolean ARB_transform_feedback3;
+ GLboolean ARB_transform_feedback_instanced;
+ GLboolean ARB_transform_feedback_overflow_query;
+ GLboolean ARB_uniform_buffer_object;
+ GLboolean ARB_vertex_attrib_64bit;
+ GLboolean ARB_vertex_program;
+ GLboolean ARB_vertex_shader;
+ GLboolean ARB_vertex_type_10f_11f_11f_rev;
+ GLboolean ARB_vertex_type_2_10_10_10_rev;
+ GLboolean ARB_viewport_array;
+ GLboolean EXT_blend_color;
+ GLboolean EXT_blend_equation_separate;
+ GLboolean EXT_blend_func_separate;
+ GLboolean EXT_blend_minmax;
+ GLboolean EXT_demote_to_helper_invocation;
+ GLboolean EXT_depth_bounds_test;
+ GLboolean EXT_disjoint_timer_query;
+ GLboolean EXT_draw_buffers2;
+ GLboolean EXT_EGL_image_storage;
+ GLboolean EXT_float_blend;
+ GLboolean EXT_framebuffer_multisample;
+ GLboolean EXT_framebuffer_multisample_blit_scaled;
+ GLboolean EXT_framebuffer_sRGB;
+ GLboolean EXT_gpu_program_parameters;
+ GLboolean EXT_gpu_shader4;
+ GLboolean EXT_memory_object;
+ GLboolean EXT_memory_object_fd;
+ GLboolean EXT_multisampled_render_to_texture;
+ GLboolean EXT_packed_float;
+ GLboolean EXT_pixel_buffer_object;
+ GLboolean EXT_point_parameters;
+ GLboolean EXT_provoking_vertex;
+ GLboolean EXT_render_snorm;
+ GLboolean EXT_semaphore;
+ GLboolean EXT_semaphore_fd;
+ GLboolean EXT_shader_image_load_formatted;
+ GLboolean EXT_shader_image_load_store;
+ GLboolean EXT_shader_integer_mix;
+ GLboolean EXT_shader_samples_identical;
+ GLboolean EXT_sRGB;
+ GLboolean EXT_stencil_two_side;
+ GLboolean EXT_texture_array;
+ GLboolean EXT_texture_buffer_object;
+ GLboolean EXT_texture_compression_latc;
+ GLboolean EXT_texture_compression_s3tc;
+ GLboolean EXT_texture_compression_s3tc_srgb;
+ GLboolean EXT_texture_env_dot3;
+ GLboolean EXT_texture_filter_anisotropic;
+ GLboolean EXT_texture_integer;
+ GLboolean EXT_texture_mirror_clamp;
+ GLboolean EXT_texture_norm16;
+ GLboolean EXT_texture_shadow_lod;
+ GLboolean EXT_texture_shared_exponent;
+ GLboolean EXT_texture_snorm;
+ GLboolean EXT_texture_sRGB;
+ GLboolean EXT_texture_sRGB_R8;
+ GLboolean EXT_texture_sRGB_decode;
+ GLboolean EXT_texture_swizzle;
+ GLboolean EXT_texture_type_2_10_10_10_REV;
+ GLboolean EXT_transform_feedback;
+ GLboolean EXT_timer_query;
+ GLboolean EXT_vertex_array_bgra;
+ GLboolean EXT_window_rectangles;
+ GLboolean OES_copy_image;
+ GLboolean OES_primitive_bounding_box;
+ GLboolean OES_sample_variables;
+ GLboolean OES_standard_derivatives;
+ GLboolean OES_texture_buffer;
+ GLboolean OES_texture_cube_map_array;
+ GLboolean OES_texture_view;
+ GLboolean OES_viewport_array;
+ /* vendor extensions */
+ GLboolean AMD_compressed_ATC_texture;
+ GLboolean AMD_framebuffer_multisample_advanced;
+ GLboolean AMD_depth_clamp_separate;
+ GLboolean AMD_performance_monitor;
+ GLboolean AMD_pinned_memory;
+ GLboolean AMD_seamless_cubemap_per_texture;
+ GLboolean AMD_vertex_shader_layer;
+ GLboolean AMD_vertex_shader_viewport_index;
+ GLboolean ANDROID_extension_pack_es31a;
+ GLboolean APPLE_object_purgeable;
+ GLboolean ATI_meminfo;
+ GLboolean ATI_texture_compression_3dc;
+ GLboolean ATI_texture_mirror_once;
+ GLboolean ATI_texture_env_combine3;
+ GLboolean ATI_fragment_shader;
+ GLboolean GREMEDY_string_marker;
+ GLboolean INTEL_blackhole_render;
+ GLboolean INTEL_conservative_rasterization;
+ GLboolean INTEL_performance_query;
+ GLboolean INTEL_shader_atomic_float_minmax;
+ GLboolean INTEL_shader_integer_functions2;
+ GLboolean KHR_blend_equation_advanced;
+ GLboolean KHR_blend_equation_advanced_coherent;
+ GLboolean KHR_robustness;
+ GLboolean KHR_texture_compression_astc_hdr;
+ GLboolean KHR_texture_compression_astc_ldr;
+ GLboolean KHR_texture_compression_astc_sliced_3d;
+ GLboolean MESA_framebuffer_flip_y;
+ GLboolean MESA_tile_raster_order;
+ GLboolean MESA_pack_invert;
+ GLboolean EXT_shader_framebuffer_fetch;
+ GLboolean EXT_shader_framebuffer_fetch_non_coherent;
+ GLboolean MESA_shader_integer_functions;
+ GLboolean MESA_ycbcr_texture;
+ GLboolean NV_alpha_to_coverage_dither_control;
+ GLboolean NV_compute_shader_derivatives;
+ GLboolean NV_conditional_render;
+ GLboolean NV_copy_image;
+ GLboolean NV_fill_rectangle;
+ GLboolean NV_fog_distance;
+ GLboolean NV_point_sprite;
+ GLboolean NV_primitive_restart;
+ GLboolean NV_shader_atomic_float;
+ GLboolean NV_texture_barrier;
+ GLboolean NV_texture_env_combine4;
+ GLboolean NV_texture_rectangle;
+ GLboolean NV_vdpau_interop;
+ GLboolean NV_conservative_raster;
+ GLboolean NV_conservative_raster_dilate;
+ GLboolean NV_conservative_raster_pre_snap_triangles;
+ GLboolean NV_conservative_raster_pre_snap;
+ GLboolean NV_viewport_array2;
+ GLboolean NV_viewport_swizzle;
+ GLboolean NVX_gpu_memory_info;
+ GLboolean TDFX_texture_compression_FXT1;
+ GLboolean OES_EGL_image;
+ GLboolean OES_draw_texture;
+ GLboolean OES_depth_texture_cube_map;
+ GLboolean OES_EGL_image_external;
+ GLboolean OES_texture_float;
+ GLboolean OES_texture_float_linear;
+ GLboolean OES_texture_half_float;
+ GLboolean OES_texture_half_float_linear;
+ GLboolean OES_compressed_ETC1_RGB8_texture;
+ GLboolean OES_geometry_shader;
+ GLboolean OES_texture_compression_astc;
+ GLboolean extension_sentinel;
+ /** The extension string */
+ const GLubyte *String;
+ /** Number of supported extensions */
+ GLuint Count;
+ /**
+ * The context version which extension helper functions compare against.
+ * By default, the value is equal to ctx->Version. This changes to ~0
+ * while meta is in progress.
+ */
+ GLubyte Version;
+};
+
+
+/**
+ * A stack of matrices (projection, modelview, color, texture, etc).
+ */
+struct gl_matrix_stack
+{
+ GLmatrix *Top; /**< points into Stack */
+ GLmatrix *Stack; /**< array [MaxDepth] of GLmatrix */
+ unsigned StackSize; /**< Number of elements in Stack */
+ GLuint Depth; /**< 0 <= Depth < MaxDepth */
+ GLuint MaxDepth; /**< size of Stack[] array */
+ GLuint DirtyFlag; /**< _NEW_MODELVIEW or _NEW_PROJECTION, for example */
+};
+
+
+/**
+ * \name Bits for image transfer operations
+ * \sa __struct gl_contextRec::ImageTransferState.
+ */
+/*@{*/
+#define IMAGE_SCALE_BIAS_BIT 0x1
+#define IMAGE_SHIFT_OFFSET_BIT 0x2
+#define IMAGE_MAP_COLOR_BIT 0x4
+#define IMAGE_CLAMP_BIT 0x800
+
+
+/** Pixel Transfer ops */
+#define IMAGE_BITS (IMAGE_SCALE_BIAS_BIT | \
+ IMAGE_SHIFT_OFFSET_BIT | \
+ IMAGE_MAP_COLOR_BIT)
+
+
+/**
+ * \name Bits to indicate what state has changed.
+ */
+/*@{*/
+#define _NEW_MODELVIEW (1u << 0) /**< gl_context::ModelView */
+#define _NEW_PROJECTION (1u << 1) /**< gl_context::Projection */
+#define _NEW_TEXTURE_MATRIX (1u << 2) /**< gl_context::TextureMatrix */
+#define _NEW_COLOR (1u << 3) /**< gl_context::Color */
+#define _NEW_DEPTH (1u << 4) /**< gl_context::Depth */
+/* gap */
+#define _NEW_FOG (1u << 6) /**< gl_context::Fog */
+#define _NEW_HINT (1u << 7) /**< gl_context::Hint */
+#define _NEW_LIGHT (1u << 8) /**< gl_context::Light */
+#define _NEW_LINE (1u << 9) /**< gl_context::Line */
+#define _NEW_PIXEL (1u << 10) /**< gl_context::Pixel */
+#define _NEW_POINT (1u << 11) /**< gl_context::Point */
+#define _NEW_POLYGON (1u << 12) /**< gl_context::Polygon */
+#define _NEW_POLYGONSTIPPLE (1u << 13) /**< gl_context::PolygonStipple */
+#define _NEW_SCISSOR (1u << 14) /**< gl_context::Scissor */
+#define _NEW_STENCIL (1u << 15) /**< gl_context::Stencil */
+#define _NEW_TEXTURE_OBJECT (1u << 16) /**< gl_context::Texture (bindings only) */
+#define _NEW_TRANSFORM (1u << 17) /**< gl_context::Transform */
+#define _NEW_VIEWPORT (1u << 18) /**< gl_context::Viewport */
+#define _NEW_TEXTURE_STATE (1u << 19) /**< gl_context::Texture (states only) */
+/* gap */
+#define _NEW_RENDERMODE (1u << 21) /**< gl_context::RenderMode, etc */
+#define _NEW_BUFFERS (1u << 22) /**< gl_context::Visual, DrawBuffer, */
+#define _NEW_CURRENT_ATTRIB (1u << 23) /**< gl_context::Current */
+#define _NEW_MULTISAMPLE (1u << 24) /**< gl_context::Multisample */
+#define _NEW_TRACK_MATRIX (1u << 25) /**< gl_context::VertexProgram */
+#define _NEW_PROGRAM (1u << 26) /**< New program/shader state */
+#define _NEW_PROGRAM_CONSTANTS (1u << 27)
+/* gap */
+#define _NEW_FRAG_CLAMP (1u << 29)
+/* gap, re-use for core Mesa state only; use ctx->DriverFlags otherwise */
+#define _NEW_VARYING_VP_INPUTS (1u << 31) /**< gl_context::varying_vp_inputs */
+#define _NEW_ALL ~0
+/*@}*/
+
+
+/**
+ * Composite state flags
+ */
+/*@{*/
+#define _NEW_TEXTURE (_NEW_TEXTURE_OBJECT | _NEW_TEXTURE_STATE)
+
+#define _MESA_NEW_NEED_EYE_COORDS (_NEW_LIGHT | \
+ _NEW_TEXTURE_STATE | \
+ _NEW_POINT | \
+ _NEW_PROGRAM | \
+ _NEW_MODELVIEW)
+
+#define _MESA_NEW_SEPARATE_SPECULAR (_NEW_LIGHT | \
+ _NEW_FOG | \
+ _NEW_PROGRAM)
+
+
+/*@}*/
+
+
+
+
+/* This has to be included here. */
+#include "dd.h"
+
+
+/** Opaque declaration of display list payload data type */
+union gl_dlist_node;
+
+
+/**
+ * Per-display list information.
+ */
+struct gl_display_list
+{
+ GLuint Name;
+ GLbitfield Flags; /**< DLIST_x flags */
+ GLchar *Label; /**< GL_KHR_debug */
+ /** The dlist commands are in a linked list of nodes */
+ union gl_dlist_node *Head;
+};
+
+
+/**
+ * State used during display list compilation and execution.
+ */
+struct gl_dlist_state
+{
+ struct gl_display_list *CurrentList; /**< List currently being compiled */
+ union gl_dlist_node *CurrentBlock; /**< Pointer to current block of nodes */
+ GLuint CurrentPos; /**< Index into current block of nodes */
+ GLuint CallDepth; /**< Current recursion calling depth */
+
+ GLvertexformat ListVtxfmt;
+
+ GLubyte ActiveAttribSize[VERT_ATTRIB_MAX];
+ uint32_t CurrentAttrib[VERT_ATTRIB_MAX][8];
+
+ GLubyte ActiveMaterialSize[MAT_ATTRIB_MAX];
+ GLfloat CurrentMaterial[MAT_ATTRIB_MAX][4];
+
+ struct {
+ /* State known to have been set by the currently-compiling display
+ * list. Used to eliminate some redundant state changes.
+ */
+ GLenum16 ShadeModel;
+ } Current;
+};
+
+/**
+ * Driver-specific state flags.
+ *
+ * These are or'd with gl_context::NewDriverState to notify a driver about
+ * a state change. The driver sets the flags at context creation and
+ * the meaning of the bits set is opaque to core Mesa.
+ */
+struct gl_driver_flags
+{
+ /** gl_context::Array::_DrawArrays (vertex array state) */
+ uint64_t NewArray;
+
+ /** gl_context::TransformFeedback::CurrentObject */
+ uint64_t NewTransformFeedback;
+
+ /** gl_context::TransformFeedback::CurrentObject::shader_program */
+ uint64_t NewTransformFeedbackProg;
+
+ /** gl_context::RasterDiscard */
+ uint64_t NewRasterizerDiscard;
+
+ /** gl_context::TileRasterOrder* */
+ uint64_t NewTileRasterOrder;
+
+ /**
+ * gl_context::UniformBufferBindings
+ * gl_shader_program::UniformBlocks
+ */
+ uint64_t NewUniformBuffer;
+
+ /**
+ * gl_context::ShaderStorageBufferBindings
+ * gl_shader_program::ShaderStorageBlocks
+ */
+ uint64_t NewShaderStorageBuffer;
+
+ uint64_t NewTextureBuffer;
+
+ /**
+ * gl_context::AtomicBufferBindings
+ */
+ uint64_t NewAtomicBuffer;
+
+ /**
+ * gl_context::ImageUnits
+ */
+ uint64_t NewImageUnits;
+
+ /**
+ * gl_context::TessCtrlProgram::patch_default_*
+ */
+ uint64_t NewDefaultTessLevels;
+
+ /**
+ * gl_context::IntelConservativeRasterization
+ */
+ uint64_t NewIntelConservativeRasterization;
+
+ /**
+ * gl_context::NvConservativeRasterization
+ */
+ uint64_t NewNvConservativeRasterization;
+
+ /**
+ * gl_context::ConservativeRasterMode/ConservativeRasterDilate
+ * gl_context::SubpixelPrecisionBias
+ */
+ uint64_t NewNvConservativeRasterizationParams;
+
+ /**
+ * gl_context::Scissor::WindowRects
+ */
+ uint64_t NewWindowRectangles;
+
+ /** gl_context::Color::sRGBEnabled */
+ uint64_t NewFramebufferSRGB;
+
+ /** gl_context::Scissor::EnableFlags */
+ uint64_t NewScissorTest;
+
+ /** gl_context::Scissor::ScissorArray */
+ uint64_t NewScissorRect;
+
+ /** gl_context::Color::Alpha* */
+ uint64_t NewAlphaTest;
+
+ /** gl_context::Color::Blend/Dither */
+ uint64_t NewBlend;
+
+ /** gl_context::Color::BlendColor */
+ uint64_t NewBlendColor;
+
+ /** gl_context::Color::Color/Index */
+ uint64_t NewColorMask;
+
+ /** gl_context::Depth */
+ uint64_t NewDepth;
+
+ /** gl_context::Color::LogicOp/ColorLogicOp/IndexLogicOp */
+ uint64_t NewLogicOp;
+
+ /** gl_context::Multisample::Enabled */
+ uint64_t NewMultisampleEnable;
+
+ /** gl_context::Multisample::SampleAlphaTo* */
+ uint64_t NewSampleAlphaToXEnable;
+
+ /** gl_context::Multisample::SampleCoverage/SampleMaskValue */
+ uint64_t NewSampleMask;
+
+ /** gl_context::Multisample::(Min)SampleShading */
+ uint64_t NewSampleShading;
+
+ /** gl_context::Stencil */
+ uint64_t NewStencil;
+
+ /** gl_context::Transform::ClipOrigin/ClipDepthMode */
+ uint64_t NewClipControl;
+
+ /** gl_context::Transform::EyeUserPlane */
+ uint64_t NewClipPlane;
+
+ /** gl_context::Transform::ClipPlanesEnabled */
+ uint64_t NewClipPlaneEnable;
+
+ /** gl_context::Transform::DepthClamp */
+ uint64_t NewDepthClamp;
+
+ /** gl_context::Line */
+ uint64_t NewLineState;
+
+ /** gl_context::Polygon */
+ uint64_t NewPolygonState;
+
+ /** gl_context::PolygonStipple */
+ uint64_t NewPolygonStipple;
+
+ /** gl_context::ViewportArray */
+ uint64_t NewViewport;
+
+ /** Shader constants (uniforms, program parameters, state constants) */
+ uint64_t NewShaderConstants[MESA_SHADER_STAGES];
+
+ /** Programmable sample location state for gl_context::DrawBuffer */
+ uint64_t NewSampleLocations;
+};
+
+struct gl_buffer_binding
+{
+ struct gl_buffer_object *BufferObject;
+ /** Start of uniform block data in the buffer */
+ GLintptr Offset;
+ /** Size of data allowed to be referenced from the buffer (in bytes) */
+ GLsizeiptr Size;
+ /**
+ * glBindBufferBase() indicates that the Size should be ignored and only
+ * limited by the current size of the BufferObject.
+ */
+ GLboolean AutomaticSize;
+};
+
+/**
+ * ARB_shader_image_load_store image unit.
+ */
+struct gl_image_unit
+{
+ /**
+ * Texture object bound to this unit.
+ */
+ struct gl_texture_object *TexObj;
+
+ /**
+ * Level of the texture object bound to this unit.
+ */
+ GLubyte Level;
+
+ /**
+ * \c GL_TRUE if the whole level is bound as an array of layers, \c
+ * GL_FALSE if only some specific layer of the texture is bound.
+ * \sa Layer
+ */
+ GLboolean Layered;
+
+ /**
+ * Layer of the texture object bound to this unit as specified by the
+ * application.
+ */
+ GLushort Layer;
+
+ /**
+ * Layer of the texture object bound to this unit, or zero if
+ * Layered == false.
+ */
+ GLushort _Layer;
+
+ /**
+ * Access allowed to this texture image. Either \c GL_READ_ONLY,
+ * \c GL_WRITE_ONLY or \c GL_READ_WRITE.
+ */
+ GLenum16 Access;
+
+ /**
+ * GL internal format that determines the interpretation of the
+ * image memory when shader image operations are performed through
+ * this unit.
+ */
+ GLenum16 Format;
+
+ /**
+ * Mesa format corresponding to \c Format.
+ */
+ mesa_format _ActualFormat:16;
+};
+
+/**
+ * Shader subroutines storage
+ */
+struct gl_subroutine_index_binding
+{
+ GLuint NumIndex;
+ GLuint *IndexPtr;
+};
+
+struct gl_texture_handle_object
+{
+ struct gl_texture_object *texObj;
+ struct gl_sampler_object *sampObj;
+ GLuint64 handle;
+};
+
+struct gl_image_handle_object
+{
+ struct gl_image_unit imgObj;
+ GLuint64 handle;
+};
+
+struct gl_memory_object
+{
+ GLuint Name; /**< hash table ID/name */
+ GLboolean Immutable; /**< denotes mutability state of parameters */
+ GLboolean Dedicated; /**< import memory from a dedicated allocation */
+};
+
+struct gl_semaphore_object
+{
+ GLuint Name; /**< hash table ID/name */
+};
+
+/**
+ * Mesa rendering context.
+ *
+ * This is the central context data structure for Mesa. Almost all
+ * OpenGL state is contained in this structure.
+ * Think of this as a base class from which device drivers will derive
+ * sub classes.
+ */
+struct gl_context
+{
+ /** State possibly shared with other contexts in the address space */
+ struct gl_shared_state *Shared;
+
+ /** \name API function pointer tables */
+ /*@{*/
+ gl_api API;
+
+ /**
+ * The current dispatch table for non-displaylist-saving execution, either
+ * BeginEnd or OutsideBeginEnd
+ */
+ struct _glapi_table *Exec;
+ /**
+ * The normal dispatch table for non-displaylist-saving, non-begin/end
+ */
+ struct _glapi_table *OutsideBeginEnd;
+ /** The dispatch table used between glNewList() and glEndList() */
+ struct _glapi_table *Save;
+ /**
+ * The dispatch table used between glBegin() and glEnd() (outside of a
+ * display list). Only valid functions between those two are set, which is
+ * mostly just the set in a GLvertexformat struct.
+ */
+ struct _glapi_table *BeginEnd;
+ /**
+ * Dispatch table for when a graphics reset has happened.
+ */
+ struct _glapi_table *ContextLost;
+ /**
+ * Dispatch table used to marshal API calls from the client program to a
+ * separate server thread. NULL if API calls are not being marshalled to
+ * another thread.
+ */
+ struct _glapi_table *MarshalExec;
+ /**
+ * Dispatch table currently in use for fielding API calls from the client
+ * program. If API calls are being marshalled to another thread, this ==
+ * MarshalExec. Otherwise it == CurrentServerDispatch.
+ */
+ struct _glapi_table *CurrentClientDispatch;
+
+ /**
+ * Dispatch table currently in use for performing API calls. == Save or
+ * Exec.
+ */
+ struct _glapi_table *CurrentServerDispatch;
+
+ /*@}*/
+
+ struct glthread_state GLThread;
+
+ struct gl_config Visual;
+ struct gl_framebuffer *DrawBuffer; /**< buffer for writing */
+ struct gl_framebuffer *ReadBuffer; /**< buffer for reading */
+ struct gl_framebuffer *WinSysDrawBuffer; /**< set with MakeCurrent */
+ struct gl_framebuffer *WinSysReadBuffer; /**< set with MakeCurrent */
+
+ /**
+ * Device driver function pointer table
+ */
+ struct dd_function_table Driver;
+
+ /** Core/Driver constants */
+ struct gl_constants Const;
+
+ /** \name The various 4x4 matrix stacks */
+ /*@{*/
+ struct gl_matrix_stack ModelviewMatrixStack;
+ struct gl_matrix_stack ProjectionMatrixStack;
+ struct gl_matrix_stack TextureMatrixStack[MAX_TEXTURE_UNITS];
+ struct gl_matrix_stack ProgramMatrixStack[MAX_PROGRAM_MATRICES];
+ struct gl_matrix_stack *CurrentStack; /**< Points to one of the above stacks */
+ /*@}*/
+
+ /** Combined modelview and projection matrix */
+ GLmatrix _ModelProjectMatrix;
+
+ /** \name Display lists */
+ struct gl_dlist_state ListState;
+
+ GLboolean ExecuteFlag; /**< Execute GL commands? */
+ GLboolean CompileFlag; /**< Compile GL commands into display list? */
+
+ /** Extension information */
+ struct gl_extensions Extensions;
+
+ /** GL version integer, for example 31 for GL 3.1, or 20 for GLES 2.0. */
+ GLuint Version;
+ char *VersionString;
+
+ /** \name State attribute stack (for glPush/PopAttrib) */
+ /*@{*/
+ GLuint AttribStackDepth;
+ struct gl_attrib_node *AttribStack[MAX_ATTRIB_STACK_DEPTH];
+ /*@}*/
+
+ /** \name Renderer attribute groups
+ *
+ * We define a struct for each attribute group to make pushing and popping
+ * attributes easy. Also it's a good organization.
+ */
+ /*@{*/
+ struct gl_accum_attrib Accum; /**< Accum buffer attributes */
+ struct gl_colorbuffer_attrib Color; /**< Color buffer attributes */
+ struct gl_current_attrib Current; /**< Current attributes */
+ struct gl_depthbuffer_attrib Depth; /**< Depth buffer attributes */
+ struct gl_eval_attrib Eval; /**< Eval attributes */
+ struct gl_fog_attrib Fog; /**< Fog attributes */
+ struct gl_hint_attrib Hint; /**< Hint attributes */
+ struct gl_light_attrib Light; /**< Light attributes */
+ struct gl_line_attrib Line; /**< Line attributes */
+ struct gl_list_attrib List; /**< List attributes */
+ struct gl_multisample_attrib Multisample;
+ struct gl_pixel_attrib Pixel; /**< Pixel attributes */
+ struct gl_point_attrib Point; /**< Point attributes */
+ struct gl_polygon_attrib Polygon; /**< Polygon attributes */
+ GLuint PolygonStipple[32]; /**< Polygon stipple */
+ struct gl_scissor_attrib Scissor; /**< Scissor attributes */
+ struct gl_stencil_attrib Stencil; /**< Stencil buffer attributes */
+ struct gl_texture_attrib Texture; /**< Texture attributes */
+ struct gl_transform_attrib Transform; /**< Transformation attributes */
+ struct gl_viewport_attrib ViewportArray[MAX_VIEWPORTS]; /**< Viewport attributes */
+ GLuint SubpixelPrecisionBias[2]; /**< Viewport attributes */
+ /*@}*/
+
+ /** \name Client attribute stack */
+ /*@{*/
+ GLuint ClientAttribStackDepth;
+ struct gl_attrib_node *ClientAttribStack[MAX_CLIENT_ATTRIB_STACK_DEPTH];
+ /*@}*/
+
+ /** \name Client attribute groups */
+ /*@{*/
+ struct gl_array_attrib Array; /**< Vertex arrays */
+ struct gl_pixelstore_attrib Pack; /**< Pixel packing */
+ struct gl_pixelstore_attrib Unpack; /**< Pixel unpacking */
+ struct gl_pixelstore_attrib DefaultPacking; /**< Default params */
+ /*@}*/
+
+ /** \name Other assorted state (not pushed/popped on attribute stack) */
+ /*@{*/
+ struct gl_pixelmaps PixelMaps;
+
+ struct gl_evaluators EvalMap; /**< All evaluators */
+ struct gl_feedback Feedback; /**< Feedback */
+ struct gl_selection Select; /**< Selection */
+
+ struct gl_program_state Program; /**< general program state */
+ struct gl_vertex_program_state VertexProgram;
+ struct gl_fragment_program_state FragmentProgram;
+ struct gl_geometry_program_state GeometryProgram;
+ struct gl_compute_program_state ComputeProgram;
+ struct gl_tess_ctrl_program_state TessCtrlProgram;
+ struct gl_tess_eval_program_state TessEvalProgram;
+ struct gl_ati_fragment_shader_state ATIFragmentShader;
+
+ struct gl_pipeline_shader_state Pipeline; /**< GLSL pipeline shader object state */
+ struct gl_pipeline_object Shader; /**< GLSL shader object state */
+
+ /**
+ * Current active shader pipeline state
+ *
+ * Almost all internal users want ::_Shader instead of ::Shader. The
+ * exceptions are bits of legacy GLSL API that do not know about separate
+ * shader objects.
+ *
+ * If a program is active via \c glUseProgram, this will point to
+ * \c ::Shader.
+ *
+ * If a program pipeline is active via \c glBindProgramPipeline, this will
+ * point to \c ::Pipeline.Current.
+ *
+ * If neither a program nor a program pipeline is active, this will point to
+ * \c ::Pipeline.Default. This ensures that \c ::_Shader will never be
+ * \c NULL.
+ */
+ struct gl_pipeline_object *_Shader;
+
+ /**
+ * NIR containing the functions that implement software fp64 support.
+ */
+ struct nir_shader *SoftFP64;
+
+ struct gl_query_state Query; /**< occlusion, timer queries */
+
+ struct gl_transform_feedback_state TransformFeedback;
+
+ struct gl_perf_monitor_state PerfMonitor;
+ struct gl_perf_query_state PerfQuery;
+
+ struct gl_buffer_object *DrawIndirectBuffer; /** < GL_ARB_draw_indirect */
+ struct gl_buffer_object *ParameterBuffer; /** < GL_ARB_indirect_parameters */
+ struct gl_buffer_object *DispatchIndirectBuffer; /** < GL_ARB_compute_shader */
+
+ struct gl_buffer_object *CopyReadBuffer; /**< GL_ARB_copy_buffer */
+ struct gl_buffer_object *CopyWriteBuffer; /**< GL_ARB_copy_buffer */
+
+ struct gl_buffer_object *QueryBuffer; /**< GL_ARB_query_buffer_object */
+
+ /**
+ * Current GL_ARB_uniform_buffer_object binding referenced by
+ * GL_UNIFORM_BUFFER target for glBufferData, glMapBuffer, etc.
+ */
+ struct gl_buffer_object *UniformBuffer;
+
+ /**
+ * Current GL_ARB_shader_storage_buffer_object binding referenced by
+ * GL_SHADER_STORAGE_BUFFER target for glBufferData, glMapBuffer, etc.
+ */
+ struct gl_buffer_object *ShaderStorageBuffer;
+
+ /**
+ * Array of uniform buffers for GL_ARB_uniform_buffer_object and GL 3.1.
+ * This is set up using glBindBufferRange() or glBindBufferBase(). They are
+ * associated with uniform blocks by glUniformBlockBinding()'s state in the
+ * shader program.
+ */
+ struct gl_buffer_binding
+ UniformBufferBindings[MAX_COMBINED_UNIFORM_BUFFERS];
+
+ /**
+ * Array of shader storage buffers for ARB_shader_storage_buffer_object
+ * and GL 4.3. This is set up using glBindBufferRange() or
+ * glBindBufferBase(). They are associated with shader storage blocks by
+ * glShaderStorageBlockBinding()'s state in the shader program.
+ */
+ struct gl_buffer_binding
+ ShaderStorageBufferBindings[MAX_COMBINED_SHADER_STORAGE_BUFFERS];
+
+ /**
+ * Object currently associated with the GL_ATOMIC_COUNTER_BUFFER
+ * target.
+ */
+ struct gl_buffer_object *AtomicBuffer;
+
+ /**
+ * Object currently associated w/ the GL_EXTERNAL_VIRTUAL_MEMORY_BUFFER_AMD
+ * target.
+ */
+ struct gl_buffer_object *ExternalVirtualMemoryBuffer;
+
+ /**
+ * Array of atomic counter buffer binding points.
+ */
+ struct gl_buffer_binding
+ AtomicBufferBindings[MAX_COMBINED_ATOMIC_BUFFERS];
+
+ /**
+ * Array of image units for ARB_shader_image_load_store.
+ */
+ struct gl_image_unit ImageUnits[MAX_IMAGE_UNITS];
+
+ struct gl_subroutine_index_binding SubroutineIndex[MESA_SHADER_STAGES];
+ /*@}*/
+
+ struct gl_meta_state *Meta; /**< for "meta" operations */
+
+ /* GL_EXT_framebuffer_object */
+ struct gl_renderbuffer *CurrentRenderbuffer;
+
+ GLenum16 ErrorValue; /**< Last error code */
+
+ /**
+ * Recognize and silence repeated error debug messages in buggy apps.
+ */
+ const char *ErrorDebugFmtString;
+ GLuint ErrorDebugCount;
+
+ /* GL_ARB_debug_output/GL_KHR_debug */
+ simple_mtx_t DebugMutex;
+ struct gl_debug_state *Debug;
+
+ GLenum16 RenderMode; /**< either GL_RENDER, GL_SELECT, GL_FEEDBACK */
+ GLbitfield NewState; /**< bitwise-or of _NEW_* flags */
+ uint64_t NewDriverState; /**< bitwise-or of flags from DriverFlags */
+
+ struct gl_driver_flags DriverFlags;
+
+ GLboolean ViewportInitialized; /**< has viewport size been initialized? */
+ GLboolean _AllowDrawOutOfOrder;
+
+ GLbitfield varying_vp_inputs; /**< mask of VERT_BIT_* flags */
+
+ /** \name Derived state */
+ GLbitfield _ImageTransferState;/**< bitwise-or of IMAGE_*_BIT flags */
+ GLfloat _EyeZDir[3];
+ GLfloat _ModelViewInvScale; /* may be for model- or eyespace lighting */
+ GLfloat _ModelViewInvScaleEyespace; /* always factor defined in spec */
+ GLboolean _NeedEyeCoords;
+ GLboolean _ForceEyeCoords;
+
+ GLuint TextureStateTimestamp; /**< detect changes to shared state */
+
+ struct gl_list_extensions *ListExt; /**< driver dlist extensions */
+
+ /** \name For debugging/development only */
+ /*@{*/
+ GLboolean FirstTimeCurrent;
+ /*@}*/
+
+ /**
+ * False if this context was created without a config. This is needed
+ * because the initial state of glDrawBuffers depends on this
+ */
+ GLboolean HasConfig;
+
+ GLboolean TextureFormatSupported[MESA_FORMAT_COUNT];
+
+ GLboolean RasterDiscard; /**< GL_RASTERIZER_DISCARD */
+ GLboolean IntelConservativeRasterization; /**< GL_CONSERVATIVE_RASTERIZATION_INTEL */
+ GLboolean ConservativeRasterization; /**< GL_CONSERVATIVE_RASTERIZATION_NV */
+ GLfloat ConservativeRasterDilate;
+ GLenum16 ConservativeRasterMode;
+
+ GLboolean IntelBlackholeRender; /**< GL_INTEL_blackhole_render */
+
+ /** Does glVertexAttrib(0) alias glVertex()? */
+ bool _AttribZeroAliasesVertex;
+
+ /**
+ * When set, TileRasterOrderIncreasingX/Y control the order that a tiled
+ * renderer's tiles should be excecuted, to meet the requirements of
+ * GL_MESA_tile_raster_order.
+ */
+ GLboolean TileRasterOrderFixed;
+ GLboolean TileRasterOrderIncreasingX;
+ GLboolean TileRasterOrderIncreasingY;
+
+ /**
+ * \name Hooks for module contexts.
+ *
+ * These will eventually live in the driver or elsewhere.
+ */
+ /*@{*/
+ void *swrast_context;
+ void *swsetup_context;
+ void *swtnl_context;
+ struct vbo_context *vbo_context;
+ struct st_context *st;
+ /*@}*/
+
+ /**
+ * \name NV_vdpau_interop
+ */
+ /*@{*/
+ const void *vdpDevice;
+ const void *vdpGetProcAddress;
+ struct set *vdpSurfaces;
+ /*@}*/
+
+ /**
+ * Has this context observed a GPU reset in any context in the share group?
+ *
+ * Once this field becomes true, it is never reset to false.
+ */
+ GLboolean ShareGroupReset;
+
+ /**
+ * \name OES_primitive_bounding_box
+ *
+ * Stores the arguments to glPrimitiveBoundingBox
+ */
+ GLfloat PrimitiveBoundingBox[8];
+
+ struct disk_cache *Cache;
+
+ /**
+ * \name GL_ARB_bindless_texture
+ */
+ /*@{*/
+ struct hash_table_u64 *ResidentTextureHandles;
+ struct hash_table_u64 *ResidentImageHandles;
+ /*@}*/
+
+ bool shader_builtin_ref;
+};
+
+/**
+ * Information about memory usage. All sizes are in kilobytes.
+ */
+struct gl_memory_info
+{
+ unsigned total_device_memory; /**< size of device memory, e.g. VRAM */
+ unsigned avail_device_memory; /**< free device memory at the moment */
+ unsigned total_staging_memory; /**< size of staging memory, e.g. GART */
+ unsigned avail_staging_memory; /**< free staging memory at the moment */
+ unsigned device_memory_evicted; /**< size of memory evicted (monotonic counter) */
+ unsigned nr_device_memory_evictions; /**< # of evictions (monotonic counter) */
+};
+
+#ifndef NDEBUG
+extern int MESA_VERBOSE;
+extern int MESA_DEBUG_FLAGS;
+#else
+# define MESA_VERBOSE 0
+# define MESA_DEBUG_FLAGS 0
+#endif
+
+
+/** The MESA_VERBOSE var is a bitmask of these flags */
+enum _verbose
+{
+ VERBOSE_VARRAY = 0x0001,
+ VERBOSE_TEXTURE = 0x0002,
+ VERBOSE_MATERIAL = 0x0004,
+ VERBOSE_PIPELINE = 0x0008,
+ VERBOSE_DRIVER = 0x0010,
+ VERBOSE_STATE = 0x0020,
+ VERBOSE_API = 0x0040,
+ VERBOSE_DISPLAY_LIST = 0x0100,
+ VERBOSE_LIGHTING = 0x0200,
+ VERBOSE_PRIMS = 0x0400,
+ VERBOSE_VERTS = 0x0800,
+ VERBOSE_DISASSEM = 0x1000,
+ VERBOSE_DRAW = 0x2000,
+ VERBOSE_SWAPBUFFERS = 0x4000
+};
+
+
+/** The MESA_DEBUG_FLAGS var is a bitmask of these flags */
+enum _debug
+{
+ DEBUG_SILENT = (1 << 0),
+ DEBUG_ALWAYS_FLUSH = (1 << 1),
+ DEBUG_INCOMPLETE_TEXTURE = (1 << 2),
+ DEBUG_INCOMPLETE_FBO = (1 << 3),
+ DEBUG_CONTEXT = (1 << 4)
+};
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* MTYPES_H */
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/mesa/main/shaderobj.h b/third_party/rust/glslopt/glsl-optimizer/src/mesa/main/shaderobj.h
new file mode 100644
index 0000000000..0d512550de
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/mesa/main/shaderobj.h
@@ -0,0 +1,265 @@
+/*
+ * Mesa 3-D graphics library
+ *
+ * Copyright (C) 2004-2007 Brian Paul All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+
+#ifndef SHADEROBJ_H
+#define SHADEROBJ_H
+
+
+#include "main/glheader.h"
+#include "compiler/shader_enums.h"
+#include "program/ir_to_mesa.h"
+#include "util/macros.h"
+
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+struct gl_shader_program_data;
+struct gl_linked_shader;
+struct dd_function_table;
+struct gl_pipeline_object;
+
+/**
+ * Internal functions
+ */
+
+extern void
+_mesa_init_shader_state(struct gl_context * ctx);
+
+extern void
+_mesa_free_shader_state(struct gl_context *ctx);
+
+
+extern void
+_mesa_reference_shader(struct gl_context *ctx, struct gl_shader **ptr,
+ struct gl_shader *sh);
+
+extern struct gl_shader *
+_mesa_lookup_shader(struct gl_context *ctx, GLuint name);
+
+extern struct gl_shader *
+_mesa_lookup_shader_err(struct gl_context *ctx, GLuint name, const char *caller);
+
+
+
+extern void
+_mesa_reference_shader_program_(struct gl_context *ctx,
+ struct gl_shader_program **ptr,
+ struct gl_shader_program *shProg);
+
+void
+_mesa_reference_shader_program_data(struct gl_context *ctx,
+ struct gl_shader_program_data **ptr,
+ struct gl_shader_program_data *data);
+
+static inline void
+_mesa_reference_shader_program(struct gl_context *ctx,
+ struct gl_shader_program **ptr,
+ struct gl_shader_program *shProg)
+{
+ if (*ptr != shProg)
+ _mesa_reference_shader_program_(ctx, ptr, shProg);
+}
+
+extern struct gl_shader *
+_mesa_new_shader(GLuint name, gl_shader_stage type);
+
+extern void
+_mesa_delete_shader(struct gl_context *ctx, struct gl_shader *sh);
+
+extern void
+_mesa_delete_linked_shader(struct gl_context *ctx,
+ struct gl_linked_shader *sh);
+
+extern struct gl_shader_program *
+_mesa_lookup_shader_program(struct gl_context *ctx, GLuint name);
+
+extern struct gl_shader_program *
+_mesa_lookup_shader_program_err(struct gl_context *ctx, GLuint name,
+ const char *caller);
+
+extern struct gl_shader_program *
+_mesa_new_shader_program(GLuint name);
+
+extern struct gl_shader_program_data *
+_mesa_create_shader_program_data(void);
+
+extern void
+_mesa_clear_shader_program_data(struct gl_context *ctx,
+ struct gl_shader_program *shProg);
+
+extern void
+_mesa_free_shader_program_data(struct gl_context *ctx,
+ struct gl_shader_program *shProg);
+
+extern void
+_mesa_delete_shader_program(struct gl_context *ctx,
+ struct gl_shader_program *shProg);
+
+
+extern void
+_mesa_init_shader_object_functions(struct dd_function_table *driver);
+
+static inline gl_shader_stage
+_mesa_shader_enum_to_shader_stage(GLenum v)
+{
+ switch (v) {
+ case GL_VERTEX_SHADER:
+ return MESA_SHADER_VERTEX;
+ case GL_FRAGMENT_SHADER:
+ return MESA_SHADER_FRAGMENT;
+ case GL_GEOMETRY_SHADER:
+ return MESA_SHADER_GEOMETRY;
+ case GL_TESS_CONTROL_SHADER:
+ return MESA_SHADER_TESS_CTRL;
+ case GL_TESS_EVALUATION_SHADER:
+ return MESA_SHADER_TESS_EVAL;
+ case GL_COMPUTE_SHADER:
+ return MESA_SHADER_COMPUTE;
+ default:
+ unreachable("bad value in _mesa_shader_enum_to_shader_stage()");
+ }
+}
+
+/* 8 bytes + another underscore */
+#define MESA_SUBROUTINE_PREFIX_LEN 9
+static inline const char *
+_mesa_shader_stage_to_subroutine_prefix(gl_shader_stage stage)
+{
+ switch (stage) {
+ case MESA_SHADER_VERTEX:
+ return "__subu_v";
+ case MESA_SHADER_GEOMETRY:
+ return "__subu_g";
+ case MESA_SHADER_FRAGMENT:
+ return "__subu_f";
+ case MESA_SHADER_COMPUTE:
+ return "__subu_c";
+ case MESA_SHADER_TESS_CTRL:
+ return "__subu_t";
+ case MESA_SHADER_TESS_EVAL:
+ return "__subu_e";
+ default:
+ return NULL;
+ }
+}
+
+static inline gl_shader_stage
+_mesa_shader_stage_from_subroutine_uniform(GLenum subuniform)
+{
+ switch (subuniform) {
+ case GL_VERTEX_SUBROUTINE_UNIFORM:
+ return MESA_SHADER_VERTEX;
+ case GL_GEOMETRY_SUBROUTINE_UNIFORM:
+ return MESA_SHADER_GEOMETRY;
+ case GL_FRAGMENT_SUBROUTINE_UNIFORM:
+ return MESA_SHADER_FRAGMENT;
+ case GL_COMPUTE_SUBROUTINE_UNIFORM:
+ return MESA_SHADER_COMPUTE;
+ case GL_TESS_CONTROL_SUBROUTINE_UNIFORM:
+ return MESA_SHADER_TESS_CTRL;
+ case GL_TESS_EVALUATION_SUBROUTINE_UNIFORM:
+ return MESA_SHADER_TESS_EVAL;
+ }
+ unreachable("not reached");
+}
+
+static inline gl_shader_stage
+_mesa_shader_stage_from_subroutine(GLenum subroutine)
+{
+ switch (subroutine) {
+ case GL_VERTEX_SUBROUTINE:
+ return MESA_SHADER_VERTEX;
+ case GL_GEOMETRY_SUBROUTINE:
+ return MESA_SHADER_GEOMETRY;
+ case GL_FRAGMENT_SUBROUTINE:
+ return MESA_SHADER_FRAGMENT;
+ case GL_COMPUTE_SUBROUTINE:
+ return MESA_SHADER_COMPUTE;
+ case GL_TESS_CONTROL_SUBROUTINE:
+ return MESA_SHADER_TESS_CTRL;
+ case GL_TESS_EVALUATION_SUBROUTINE:
+ return MESA_SHADER_TESS_EVAL;
+ }
+ unreachable("not reached");
+}
+
+static inline GLenum
+_mesa_shader_stage_to_subroutine(gl_shader_stage stage)
+{
+ switch (stage) {
+ case MESA_SHADER_VERTEX:
+ return GL_VERTEX_SUBROUTINE;
+ case MESA_SHADER_GEOMETRY:
+ return GL_GEOMETRY_SUBROUTINE;
+ case MESA_SHADER_FRAGMENT:
+ return GL_FRAGMENT_SUBROUTINE;
+ case MESA_SHADER_COMPUTE:
+ return GL_COMPUTE_SUBROUTINE;
+ case MESA_SHADER_TESS_CTRL:
+ return GL_TESS_CONTROL_SUBROUTINE;
+ case MESA_SHADER_TESS_EVAL:
+ return GL_TESS_EVALUATION_SUBROUTINE;
+ case MESA_SHADER_NONE:
+ break;
+ case MESA_SHADER_KERNEL:
+ unreachable("not reached");
+ break;
+ }
+ unreachable("not reached");
+}
+
+static inline GLenum
+_mesa_shader_stage_to_subroutine_uniform(gl_shader_stage stage)
+{
+ switch (stage) {
+ case MESA_SHADER_VERTEX:
+ return GL_VERTEX_SUBROUTINE_UNIFORM;
+ case MESA_SHADER_GEOMETRY:
+ return GL_GEOMETRY_SUBROUTINE_UNIFORM;
+ case MESA_SHADER_FRAGMENT:
+ return GL_FRAGMENT_SUBROUTINE_UNIFORM;
+ case MESA_SHADER_COMPUTE:
+ return GL_COMPUTE_SUBROUTINE_UNIFORM;
+ case MESA_SHADER_TESS_CTRL:
+ return GL_TESS_CONTROL_SUBROUTINE_UNIFORM;
+ case MESA_SHADER_TESS_EVAL:
+ return GL_TESS_EVALUATION_SUBROUTINE_UNIFORM;
+ case MESA_SHADER_NONE:
+ case MESA_SHADER_KERNEL:
+ break;
+ }
+ unreachable("not reached");
+}
+
+extern bool
+_mesa_validate_pipeline_io(struct gl_pipeline_object *);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* SHADEROBJ_H */
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/mesa/main/uniforms.h b/third_party/rust/glslopt/glsl-optimizer/src/mesa/main/uniforms.h
new file mode 100644
index 0000000000..819cf90ba4
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/mesa/main/uniforms.h
@@ -0,0 +1,523 @@
+/*
+ * Mesa 3-D graphics library
+ *
+ * Copyright (C) 2010 VMware, Inc. All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+
+#ifndef UNIFORMS_H
+#define UNIFORMS_H
+
+#include "main/glheader.h"
+#include "compiler/glsl_types.h"
+#include "compiler/glsl/ir_uniform.h"
+#include "program/prog_parameter.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+struct gl_program;
+struct _glapi_table;
+
+void GLAPIENTRY
+_mesa_Uniform1f(GLint, GLfloat);
+void GLAPIENTRY
+_mesa_Uniform2f(GLint, GLfloat, GLfloat);
+void GLAPIENTRY
+_mesa_Uniform3f(GLint, GLfloat, GLfloat, GLfloat);
+void GLAPIENTRY
+_mesa_Uniform4f(GLint, GLfloat, GLfloat, GLfloat, GLfloat);
+void GLAPIENTRY
+_mesa_Uniform1i(GLint, GLint);
+void GLAPIENTRY
+_mesa_Uniform2i(GLint, GLint, GLint);
+void GLAPIENTRY
+_mesa_Uniform3i(GLint, GLint, GLint, GLint);
+void GLAPIENTRY
+_mesa_Uniform4i(GLint, GLint, GLint, GLint, GLint);
+void GLAPIENTRY
+_mesa_Uniform1fv(GLint, GLsizei, const GLfloat *);
+void GLAPIENTRY
+_mesa_Uniform2fv(GLint, GLsizei, const GLfloat *);
+void GLAPIENTRY
+_mesa_Uniform3fv(GLint, GLsizei, const GLfloat *);
+void GLAPIENTRY
+_mesa_Uniform4fv(GLint, GLsizei, const GLfloat *);
+void GLAPIENTRY
+_mesa_Uniform1iv(GLint, GLsizei, const GLint *);
+void GLAPIENTRY
+_mesa_Uniform2iv(GLint, GLsizei, const GLint *);
+void GLAPIENTRY
+_mesa_Uniform3iv(GLint, GLsizei, const GLint *);
+void GLAPIENTRY
+_mesa_Uniform4iv(GLint, GLsizei, const GLint *);
+void GLAPIENTRY
+_mesa_Uniform1ui(GLint location, GLuint v0);
+void GLAPIENTRY
+_mesa_Uniform2ui(GLint location, GLuint v0, GLuint v1);
+void GLAPIENTRY
+_mesa_Uniform3ui(GLint location, GLuint v0, GLuint v1, GLuint v2);
+void GLAPIENTRY
+_mesa_Uniform4ui(GLint location, GLuint v0, GLuint v1, GLuint v2, GLuint v3);
+void GLAPIENTRY
+_mesa_Uniform1uiv(GLint location, GLsizei count, const GLuint *value);
+void GLAPIENTRY
+_mesa_Uniform2uiv(GLint location, GLsizei count, const GLuint *value);
+void GLAPIENTRY
+_mesa_Uniform3uiv(GLint location, GLsizei count, const GLuint *value);
+void GLAPIENTRY
+_mesa_Uniform4uiv(GLint location, GLsizei count, const GLuint *value);
+void GLAPIENTRY
+_mesa_UniformMatrix2fv(GLint, GLsizei, GLboolean, const GLfloat *);
+void GLAPIENTRY
+_mesa_UniformMatrix3fv(GLint, GLsizei, GLboolean, const GLfloat *);
+void GLAPIENTRY
+_mesa_UniformMatrix4fv(GLint, GLsizei, GLboolean, const GLfloat *);
+void GLAPIENTRY
+_mesa_UniformMatrix2x3fv(GLint location, GLsizei count, GLboolean transpose,
+ const GLfloat *value);
+void GLAPIENTRY
+_mesa_UniformMatrix3x2fv(GLint location, GLsizei count, GLboolean transpose,
+ const GLfloat *value);
+void GLAPIENTRY
+_mesa_UniformMatrix2x4fv(GLint location, GLsizei count, GLboolean transpose,
+ const GLfloat *value);
+void GLAPIENTRY
+_mesa_UniformMatrix4x2fv(GLint location, GLsizei count, GLboolean transpose,
+ const GLfloat *value);
+void GLAPIENTRY
+_mesa_UniformMatrix3x4fv(GLint location, GLsizei count, GLboolean transpose,
+ const GLfloat *value);
+void GLAPIENTRY
+_mesa_UniformMatrix4x3fv(GLint location, GLsizei count, GLboolean transpose,
+ const GLfloat *value);
+
+void GLAPIENTRY
+_mesa_UniformHandleui64ARB(GLint location, GLuint64 value);
+void GLAPIENTRY
+_mesa_UniformHandleui64vARB(GLint location, GLsizei count,
+ const GLuint64 *value);
+void GLAPIENTRY
+_mesa_ProgramUniformHandleui64ARB(GLuint program, GLint location,
+ GLuint64 value);
+void GLAPIENTRY
+_mesa_ProgramUniformHandleui64vARB(GLuint program, GLint location,
+ GLsizei count, const GLuint64 *values);
+
+void GLAPIENTRY
+_mesa_ProgramUniform1f(GLuint program, GLint, GLfloat);
+void GLAPIENTRY
+_mesa_ProgramUniform2f(GLuint program, GLint, GLfloat, GLfloat);
+void GLAPIENTRY
+_mesa_ProgramUniform3f(GLuint program, GLint, GLfloat, GLfloat, GLfloat);
+void GLAPIENTRY
+_mesa_ProgramUniform4f(GLuint program, GLint, GLfloat, GLfloat, GLfloat, GLfloat);
+void GLAPIENTRY
+_mesa_ProgramUniform1i(GLuint program, GLint, GLint);
+void GLAPIENTRY
+_mesa_ProgramUniform2i(GLuint program, GLint, GLint, GLint);
+void GLAPIENTRY
+_mesa_ProgramUniform3i(GLuint program, GLint, GLint, GLint, GLint);
+void GLAPIENTRY
+_mesa_ProgramUniform4i(GLuint program, GLint, GLint, GLint, GLint, GLint);
+void GLAPIENTRY
+_mesa_ProgramUniform1fv(GLuint program, GLint, GLsizei, const GLfloat *);
+void GLAPIENTRY
+_mesa_ProgramUniform2fv(GLuint program, GLint, GLsizei, const GLfloat *);
+void GLAPIENTRY
+_mesa_ProgramUniform3fv(GLuint program, GLint, GLsizei, const GLfloat *);
+void GLAPIENTRY
+_mesa_ProgramUniform4fv(GLuint program, GLint, GLsizei, const GLfloat *);
+void GLAPIENTRY
+_mesa_ProgramUniform1iv(GLuint program, GLint, GLsizei, const GLint *);
+void GLAPIENTRY
+_mesa_ProgramUniform2iv(GLuint program, GLint, GLsizei, const GLint *);
+void GLAPIENTRY
+_mesa_ProgramUniform3iv(GLuint program, GLint, GLsizei, const GLint *);
+void GLAPIENTRY
+_mesa_ProgramUniform4iv(GLuint program, GLint, GLsizei, const GLint *);
+void GLAPIENTRY
+_mesa_ProgramUniform1ui(GLuint program, GLint location, GLuint v0);
+void GLAPIENTRY
+_mesa_ProgramUniform2ui(GLuint program, GLint location, GLuint v0, GLuint v1);
+void GLAPIENTRY
+_mesa_ProgramUniform3ui(GLuint program, GLint location, GLuint v0, GLuint v1,
+ GLuint v2);
+void GLAPIENTRY
+_mesa_ProgramUniform4ui(GLuint program, GLint location, GLuint v0, GLuint v1,
+ GLuint v2, GLuint v3);
+void GLAPIENTRY
+_mesa_ProgramUniform1uiv(GLuint program, GLint location, GLsizei count,
+ const GLuint *value);
+void GLAPIENTRY
+_mesa_ProgramUniform2uiv(GLuint program, GLint location, GLsizei count,
+ const GLuint *value);
+void GLAPIENTRY
+_mesa_ProgramUniform3uiv(GLuint program, GLint location, GLsizei count,
+ const GLuint *value);
+void GLAPIENTRY
+_mesa_ProgramUniform4uiv(GLuint program, GLint location, GLsizei count,
+ const GLuint *value);
+void GLAPIENTRY
+_mesa_ProgramUniformMatrix2fv(GLuint program, GLint, GLsizei, GLboolean,
+ const GLfloat *);
+void GLAPIENTRY
+_mesa_ProgramUniformMatrix3fv(GLuint program, GLint, GLsizei, GLboolean,
+ const GLfloat *);
+void GLAPIENTRY
+_mesa_ProgramUniformMatrix4fv(GLuint program, GLint, GLsizei, GLboolean,
+ const GLfloat *);
+void GLAPIENTRY
+_mesa_ProgramUniformMatrix2x3fv(GLuint program, GLint location, GLsizei count,
+ GLboolean transpose, const GLfloat *value);
+void GLAPIENTRY
+_mesa_ProgramUniformMatrix3x2fv(GLuint program, GLint location, GLsizei count,
+ GLboolean transpose, const GLfloat *value);
+void GLAPIENTRY
+_mesa_ProgramUniformMatrix2x4fv(GLuint program, GLint location, GLsizei count,
+ GLboolean transpose, const GLfloat *value);
+void GLAPIENTRY
+_mesa_ProgramUniformMatrix4x2fv(GLuint program, GLint location, GLsizei count,
+ GLboolean transpose, const GLfloat *value);
+void GLAPIENTRY
+_mesa_ProgramUniformMatrix3x4fv(GLuint program, GLint location, GLsizei count,
+ GLboolean transpose, const GLfloat *value);
+void GLAPIENTRY
+_mesa_ProgramUniformMatrix4x3fv(GLuint program, GLint location, GLsizei count,
+ GLboolean transpose, const GLfloat *value);
+
+void GLAPIENTRY
+_mesa_GetnUniformfvARB(GLuint, GLint, GLsizei, GLfloat *);
+void GLAPIENTRY
+_mesa_GetUniformfv(GLuint, GLint, GLfloat *);
+void GLAPIENTRY
+_mesa_GetnUniformivARB(GLuint, GLint, GLsizei, GLint *);
+void GLAPIENTRY
+_mesa_GetUniformuiv(GLuint, GLint, GLuint *);
+void GLAPIENTRY
+_mesa_GetnUniformuivARB(GLuint, GLint, GLsizei, GLuint *);
+void GLAPIENTRY
+_mesa_GetUniformuiv(GLuint program, GLint location, GLuint *params);
+void GLAPIENTRY
+_mesa_GetnUniformdvARB(GLuint, GLint, GLsizei, GLdouble *);
+void GLAPIENTRY
+_mesa_GetUniformdv(GLuint, GLint, GLdouble *);
+GLint GLAPIENTRY
+_mesa_GetUniformLocation(GLuint, const GLcharARB *);
+GLint GLAPIENTRY
+_mesa_GetUniformLocation_no_error(GLuint, const GLcharARB *);
+GLuint GLAPIENTRY
+_mesa_GetUniformBlockIndex(GLuint program,
+ const GLchar *uniformBlockName);
+void GLAPIENTRY
+_mesa_GetUniformIndices(GLuint program,
+ GLsizei uniformCount,
+ const GLchar * const *uniformNames,
+ GLuint *uniformIndices);
+
+void GLAPIENTRY
+_mesa_UniformBlockBinding_no_error(GLuint program, GLuint uniformBlockIndex,
+ GLuint uniformBlockBinding);
+
+void GLAPIENTRY
+_mesa_UniformBlockBinding(GLuint program,
+ GLuint uniformBlockIndex,
+ GLuint uniformBlockBinding);
+
+void GLAPIENTRY
+_mesa_ShaderStorageBlockBinding_no_error(GLuint program,
+ GLuint shaderStorageBlockIndex,
+ GLuint shaderStorageBlockBinding);
+
+void GLAPIENTRY
+_mesa_ShaderStorageBlockBinding(GLuint program,
+ GLuint shaderStorageBlockIndex,
+ GLuint shaderStorageBlockBinding);
+void GLAPIENTRY
+_mesa_GetActiveAtomicCounterBufferiv(GLuint program, GLuint bufferIndex,
+ GLenum pname, GLint *params);
+void GLAPIENTRY
+_mesa_GetActiveUniformBlockiv(GLuint program,
+ GLuint uniformBlockIndex,
+ GLenum pname,
+ GLint *params);
+void GLAPIENTRY
+_mesa_GetActiveUniformBlockName(GLuint program,
+ GLuint uniformBlockIndex,
+ GLsizei bufSize,
+ GLsizei *length,
+ GLchar *uniformBlockName);
+void GLAPIENTRY
+_mesa_GetActiveUniformName(GLuint program, GLuint uniformIndex,
+ GLsizei bufSize, GLsizei *length,
+ GLchar *uniformName);
+void GLAPIENTRY
+_mesa_GetActiveUniform(GLuint, GLuint, GLsizei, GLsizei *,
+ GLint *, GLenum *, GLcharARB *);
+void GLAPIENTRY
+_mesa_GetActiveUniformsiv(GLuint program,
+ GLsizei uniformCount,
+ const GLuint *uniformIndices,
+ GLenum pname,
+ GLint *params);
+void GLAPIENTRY
+_mesa_GetUniformiv(GLuint, GLint, GLint *);
+
+void GLAPIENTRY
+_mesa_Uniform1d(GLint, GLdouble);
+void GLAPIENTRY
+_mesa_Uniform2d(GLint, GLdouble, GLdouble);
+void GLAPIENTRY
+_mesa_Uniform3d(GLint, GLdouble, GLdouble, GLdouble);
+void GLAPIENTRY
+_mesa_Uniform4d(GLint, GLdouble, GLdouble, GLdouble, GLdouble);
+
+void GLAPIENTRY
+_mesa_Uniform1dv(GLint, GLsizei, const GLdouble *);
+void GLAPIENTRY
+_mesa_Uniform2dv(GLint, GLsizei, const GLdouble *);
+void GLAPIENTRY
+_mesa_Uniform3dv(GLint, GLsizei, const GLdouble *);
+void GLAPIENTRY
+_mesa_Uniform4dv(GLint, GLsizei, const GLdouble *);
+
+void GLAPIENTRY
+_mesa_GetUniformi64vARB(GLuint, GLint, GLint64 *);
+void GLAPIENTRY
+_mesa_GetUniformui64vARB(GLuint, GLint, GLuint64 *);
+
+void GLAPIENTRY
+_mesa_GetnUniformi64vARB(GLuint, GLint, GLsizei, GLint64 *);
+void GLAPIENTRY
+_mesa_GetnUniformui64vARB(GLuint, GLint, GLsizei, GLuint64 *);
+
+void GLAPIENTRY
+_mesa_UniformMatrix2dv(GLint, GLsizei, GLboolean, const GLdouble *);
+void GLAPIENTRY
+_mesa_UniformMatrix3dv(GLint, GLsizei, GLboolean, const GLdouble *);
+void GLAPIENTRY
+_mesa_UniformMatrix4dv(GLint, GLsizei, GLboolean, const GLdouble *);
+void GLAPIENTRY
+_mesa_UniformMatrix2x3dv(GLint location, GLsizei count, GLboolean transpose,
+ const GLdouble *value);
+void GLAPIENTRY
+_mesa_UniformMatrix3x2dv(GLint location, GLsizei count, GLboolean transpose,
+ const GLdouble *value);
+void GLAPIENTRY
+_mesa_UniformMatrix2x4dv(GLint location, GLsizei count, GLboolean transpose,
+ const GLdouble *value);
+void GLAPIENTRY
+_mesa_UniformMatrix4x2dv(GLint location, GLsizei count, GLboolean transpose,
+ const GLdouble *value);
+void GLAPIENTRY
+_mesa_UniformMatrix3x4dv(GLint location, GLsizei count, GLboolean transpose,
+ const GLdouble *value);
+void GLAPIENTRY
+_mesa_UniformMatrix4x3dv(GLint location, GLsizei count, GLboolean transpose,
+ const GLdouble *value);
+
+void GLAPIENTRY
+_mesa_ProgramUniform1d(GLuint program, GLint, GLdouble);
+void GLAPIENTRY
+_mesa_ProgramUniform2d(GLuint program, GLint, GLdouble, GLdouble);
+void GLAPIENTRY
+_mesa_ProgramUniform3d(GLuint program, GLint, GLdouble, GLdouble, GLdouble);
+void GLAPIENTRY
+_mesa_ProgramUniform4d(GLuint program, GLint, GLdouble, GLdouble, GLdouble, GLdouble);
+
+void GLAPIENTRY
+_mesa_ProgramUniform1dv(GLuint program, GLint, GLsizei, const GLdouble *);
+void GLAPIENTRY
+_mesa_ProgramUniform2dv(GLuint program, GLint, GLsizei, const GLdouble *);
+void GLAPIENTRY
+_mesa_ProgramUniform3dv(GLuint program, GLint, GLsizei, const GLdouble *);
+void GLAPIENTRY
+_mesa_ProgramUniform4dv(GLuint program, GLint, GLsizei, const GLdouble *);
+
+void GLAPIENTRY
+_mesa_ProgramUniformMatrix2dv(GLuint program, GLint, GLsizei, GLboolean,
+ const GLdouble *);
+void GLAPIENTRY
+_mesa_ProgramUniformMatrix3dv(GLuint program, GLint, GLsizei, GLboolean,
+ const GLdouble *);
+void GLAPIENTRY
+_mesa_ProgramUniformMatrix4dv(GLuint program, GLint, GLsizei, GLboolean,
+ const GLdouble *);
+void GLAPIENTRY
+_mesa_ProgramUniformMatrix2x3dv(GLuint program, GLint location, GLsizei count,
+ GLboolean transpose, const GLdouble *value);
+void GLAPIENTRY
+_mesa_ProgramUniformMatrix3x2dv(GLuint program, GLint location, GLsizei count,
+ GLboolean transpose, const GLdouble *value);
+void GLAPIENTRY
+_mesa_ProgramUniformMatrix2x4dv(GLuint program, GLint location, GLsizei count,
+ GLboolean transpose, const GLdouble *value);
+void GLAPIENTRY
+_mesa_ProgramUniformMatrix4x2dv(GLuint program, GLint location, GLsizei count,
+ GLboolean transpose, const GLdouble *value);
+void GLAPIENTRY
+_mesa_ProgramUniformMatrix3x4dv(GLuint program, GLint location, GLsizei count,
+ GLboolean transpose, const GLdouble *value);
+void GLAPIENTRY
+_mesa_ProgramUniformMatrix4x3dv(GLuint program, GLint location, GLsizei count,
+ GLboolean transpose, const GLdouble *value);
+
+void GLAPIENTRY
+_mesa_Uniform1i64ARB(GLint, GLint64);
+void GLAPIENTRY
+_mesa_Uniform2i64ARB(GLint, GLint64, GLint64);
+void GLAPIENTRY
+_mesa_Uniform3i64ARB(GLint, GLint64, GLint64, GLint64);
+void GLAPIENTRY
+_mesa_Uniform4i64ARB(GLint, GLint64, GLint64, GLint64, GLint64);
+
+void GLAPIENTRY
+_mesa_Uniform1i64vARB(GLint, GLsizei, const GLint64 *);
+void GLAPIENTRY
+_mesa_Uniform2i64vARB(GLint, GLsizei, const GLint64 *);
+void GLAPIENTRY
+_mesa_Uniform3i64vARB(GLint, GLsizei, const GLint64 *);
+void GLAPIENTRY
+_mesa_Uniform4i64vARB(GLint, GLsizei, const GLint64 *);
+
+void GLAPIENTRY
+_mesa_Uniform1ui64ARB(GLint, GLuint64);
+void GLAPIENTRY
+_mesa_Uniform2ui64ARB(GLint, GLuint64, GLuint64);
+void GLAPIENTRY
+_mesa_Uniform3ui64ARB(GLint, GLuint64, GLuint64, GLuint64);
+void GLAPIENTRY
+_mesa_Uniform4ui64ARB(GLint, GLuint64, GLuint64, GLuint64, GLuint64);
+
+void GLAPIENTRY
+_mesa_Uniform1ui64vARB(GLint, GLsizei, const GLuint64 *);
+void GLAPIENTRY
+_mesa_Uniform2ui64vARB(GLint, GLsizei, const GLuint64 *);
+void GLAPIENTRY
+_mesa_Uniform3ui64vARB(GLint, GLsizei, const GLuint64 *);
+void GLAPIENTRY
+_mesa_Uniform4ui64vARB(GLint, GLsizei, const GLuint64 *);
+
+void GLAPIENTRY
+_mesa_ProgramUniform1i64ARB(GLuint, GLint, GLint64);
+void GLAPIENTRY
+_mesa_ProgramUniform2i64ARB(GLuint, GLint, GLint64, GLint64);
+void GLAPIENTRY
+_mesa_ProgramUniform3i64ARB(GLuint, GLint, GLint64, GLint64, GLint64);
+void GLAPIENTRY
+_mesa_ProgramUniform4i64ARB(GLuint, GLint, GLint64, GLint64, GLint64, GLint64);
+
+void GLAPIENTRY
+_mesa_ProgramUniform1i64vARB(GLuint, GLint, GLsizei, const GLint64 *);
+void GLAPIENTRY
+_mesa_ProgramUniform2i64vARB(GLuint, GLint, GLsizei, const GLint64 *);
+void GLAPIENTRY
+_mesa_ProgramUniform3i64vARB(GLuint, GLint, GLsizei, const GLint64 *);
+void GLAPIENTRY
+_mesa_ProgramUniform4i64vARB(GLuint, GLint, GLsizei, const GLint64 *);
+
+void GLAPIENTRY
+_mesa_ProgramUniform1ui64ARB(GLuint, GLint, GLuint64);
+void GLAPIENTRY
+_mesa_ProgramUniform2ui64ARB(GLuint, GLint, GLuint64, GLuint64);
+void GLAPIENTRY
+_mesa_ProgramUniform3ui64ARB(GLuint, GLint, GLuint64, GLuint64, GLuint64);
+void GLAPIENTRY
+_mesa_ProgramUniform4ui64ARB(GLuint, GLint, GLuint64, GLuint64, GLuint64, GLuint64);
+
+void GLAPIENTRY
+_mesa_ProgramUniform1ui64vARB(GLuint, GLint, GLsizei, const GLuint64 *);
+void GLAPIENTRY
+_mesa_ProgramUniform2ui64vARB(GLuint, GLint, GLsizei, const GLuint64 *);
+void GLAPIENTRY
+_mesa_ProgramUniform3ui64vARB(GLuint, GLint, GLsizei, const GLuint64 *);
+void GLAPIENTRY
+_mesa_ProgramUniform4ui64vARB(GLuint, GLint, GLsizei, const GLuint64 *);
+
+void
+_mesa_uniform(GLint location, GLsizei count, const GLvoid *values,
+ struct gl_context *, struct gl_shader_program *,
+ enum glsl_base_type basicType, unsigned src_components);
+
+void
+_mesa_uniform_matrix(GLint location, GLsizei count,
+ GLboolean transpose, const void *values,
+ struct gl_context *, struct gl_shader_program *,
+ GLuint cols, GLuint rows, enum glsl_base_type basicType);
+
+void
+_mesa_uniform_handle(GLint location, GLsizei count, const GLvoid *values,
+ struct gl_context *, struct gl_shader_program *);
+
+void
+_mesa_get_uniform(struct gl_context *ctx, GLuint program, GLint location,
+ GLsizei bufSize, enum glsl_base_type returnType,
+ GLvoid *paramsOut);
+
+extern void
+_mesa_uniform_attach_driver_storage(struct gl_uniform_storage *,
+ unsigned element_stride,
+ unsigned vector_stride,
+ enum gl_uniform_driver_format format,
+ void *data);
+
+extern void
+_mesa_uniform_detach_all_driver_storage(struct gl_uniform_storage *uni);
+
+extern void
+_mesa_propagate_uniforms_to_driver_storage(struct gl_uniform_storage *uni,
+ unsigned array_index,
+ unsigned count);
+
+extern void
+_mesa_update_shader_textures_used(struct gl_shader_program *shProg,
+ struct gl_program *prog);
+
+extern bool
+_mesa_sampler_uniforms_are_valid(const struct gl_shader_program *shProg,
+ char *errMsg, size_t errMsgLength);
+extern bool
+_mesa_sampler_uniforms_pipeline_are_valid(struct gl_pipeline_object *);
+
+extern void
+_mesa_flush_vertices_for_uniforms(struct gl_context *ctx,
+ const struct gl_uniform_storage *uni);
+
+struct gl_builtin_uniform_element {
+ const char *field;
+ gl_state_index16 tokens[STATE_LENGTH];
+ int swizzle;
+};
+
+struct gl_builtin_uniform_desc {
+ const char *name;
+ const struct gl_builtin_uniform_element *elements;
+ unsigned int num_elements;
+};
+
+#ifdef __cplusplus
+}
+#endif
+
+
+#endif /* UNIFORMS_H */
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/mesa/main/version.h b/third_party/rust/glslopt/glsl-optimizer/src/mesa/main/version.h
new file mode 100644
index 0000000000..4469509c08
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/mesa/main/version.h
@@ -0,0 +1,66 @@
+/*
+ * Mesa 3-D graphics library
+ *
+ * Copyright (C) 1999-2008 Brian Paul All Rights Reserved.
+ * Copyright (C) 2009 VMware, Inc. All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+
+#ifndef VERSION_H
+#define VERSION_H
+
+#include <stdbool.h>
+#include "glheader.h"
+#include "menums.h"
+
+struct gl_context;
+struct gl_constants;
+struct gl_extensions;
+
+extern GLuint
+_mesa_get_version(const struct gl_extensions *extensions,
+ struct gl_constants *consts, gl_api api);
+
+extern void
+_mesa_compute_version(struct gl_context *ctx);
+
+extern bool
+_mesa_override_gl_version_contextless(struct gl_constants *consts,
+ gl_api *apiOut, GLuint *versionOut);
+
+extern void
+_mesa_override_gl_version(struct gl_context *ctx);
+
+extern void
+_mesa_override_glsl_version(struct gl_constants *consts);
+
+extern void
+_mesa_get_driver_uuid(struct gl_context *ctx, GLint *uuid);
+
+extern void
+_mesa_get_device_uuid(struct gl_context *ctx, GLint *uuid);
+
+extern int
+_mesa_get_shading_language_version(const struct gl_context *ctx,
+ int index,
+ char **versionOut);
+
+#endif /* VERSION_H */
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/mesa/math/m_matrix.h b/third_party/rust/glslopt/glsl-optimizer/src/mesa/math/m_matrix.h
new file mode 100644
index 0000000000..c34d9e3022
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/mesa/math/m_matrix.h
@@ -0,0 +1,218 @@
+/*
+ * Mesa 3-D graphics library
+ *
+ * Copyright (C) 1999-2005 Brian Paul All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+
+/**
+ * \file math/m_matrix.h
+ * Defines basic structures for matrix-handling.
+ */
+
+#ifndef _M_MATRIX_H
+#define _M_MATRIX_H
+
+
+#include "main/glheader.h"
+
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+/**
+ * \name Symbolic names to some of the entries in the matrix
+ *
+ * These are handy for the viewport mapping, which is expressed as a matrix.
+ */
+/*@{*/
+#define MAT_SX 0
+#define MAT_SY 5
+#define MAT_SZ 10
+#define MAT_TX 12
+#define MAT_TY 13
+#define MAT_TZ 14
+/*@}*/
+
+
+/**
+ * Different kinds of 4x4 transformation matrices.
+ * We use these to select specific optimized vertex transformation routines.
+ */
+enum GLmatrixtype {
+ MATRIX_GENERAL, /**< general 4x4 matrix */
+ MATRIX_IDENTITY, /**< identity matrix */
+ MATRIX_3D_NO_ROT, /**< orthogonal projection and others... */
+ MATRIX_PERSPECTIVE, /**< perspective projection matrix */
+ MATRIX_2D, /**< 2-D transformation */
+ MATRIX_2D_NO_ROT, /**< 2-D scale & translate only */
+ MATRIX_3D /**< 3-D transformation */
+} ;
+
+/**
+ * Matrix type to represent 4x4 transformation matrices.
+ */
+typedef struct {
+ GLfloat *m; /**< 16 matrix elements (16-byte aligned) */
+ GLfloat *inv; /**< 16-element inverse (16-byte aligned) */
+ GLuint flags; /**< possible values determined by (of \link
+ * MatFlags MAT_FLAG_* flags\endlink)
+ */
+ enum GLmatrixtype type;
+} GLmatrix;
+
+
+
+
+extern void
+_math_matrix_ctr( GLmatrix *m );
+
+extern void
+_math_matrix_dtr( GLmatrix *m );
+
+extern void
+_math_matrix_mul_matrix( GLmatrix *dest, const GLmatrix *a, const GLmatrix *b );
+
+extern void
+_math_matrix_mul_floats( GLmatrix *dest, const GLfloat *b );
+
+extern void
+_math_matrix_loadf( GLmatrix *mat, const GLfloat *m );
+
+extern void
+_math_matrix_translate( GLmatrix *mat, GLfloat x, GLfloat y, GLfloat z );
+
+extern void
+_math_matrix_rotate( GLmatrix *m, GLfloat angle,
+ GLfloat x, GLfloat y, GLfloat z );
+
+extern void
+_math_matrix_scale( GLmatrix *mat, GLfloat x, GLfloat y, GLfloat z );
+
+extern void
+_math_matrix_ortho( GLmatrix *mat,
+ GLfloat left, GLfloat right,
+ GLfloat bottom, GLfloat top,
+ GLfloat nearval, GLfloat farval );
+
+extern void
+_math_matrix_frustum( GLmatrix *mat,
+ GLfloat left, GLfloat right,
+ GLfloat bottom, GLfloat top,
+ GLfloat nearval, GLfloat farval );
+
+extern void
+_math_matrix_viewport( GLmatrix *m, const float scale[3],
+ const float translate[3], double depthMax );
+
+extern void
+_math_matrix_set_identity( GLmatrix *dest );
+
+extern void
+_math_matrix_copy( GLmatrix *to, const GLmatrix *from );
+
+extern void
+_math_matrix_analyse( GLmatrix *mat );
+
+extern void
+_math_matrix_print( const GLmatrix *m );
+
+extern GLboolean
+_math_matrix_is_length_preserving( const GLmatrix *m );
+
+extern GLboolean
+_math_matrix_has_rotation( const GLmatrix *m );
+
+extern GLboolean
+_math_matrix_is_general_scale( const GLmatrix *m );
+
+extern GLboolean
+_math_matrix_is_dirty( const GLmatrix *m );
+
+
+/**
+ * \name Related functions that don't actually operate on GLmatrix structs
+ */
+/*@{*/
+
+extern void
+_math_transposef( GLfloat to[16], const GLfloat from[16] );
+
+extern void
+_math_transposed( GLdouble to[16], const GLdouble from[16] );
+
+extern void
+_math_transposefd( GLfloat to[16], const GLdouble from[16] );
+
+
+/*
+ * Transform a point (column vector) by a matrix: Q = M * P
+ */
+#define TRANSFORM_POINT( Q, M, P ) \
+ Q[0] = M[0] * P[0] + M[4] * P[1] + M[8] * P[2] + M[12] * P[3]; \
+ Q[1] = M[1] * P[0] + M[5] * P[1] + M[9] * P[2] + M[13] * P[3]; \
+ Q[2] = M[2] * P[0] + M[6] * P[1] + M[10] * P[2] + M[14] * P[3]; \
+ Q[3] = M[3] * P[0] + M[7] * P[1] + M[11] * P[2] + M[15] * P[3];
+
+
+#define TRANSFORM_POINT3( Q, M, P ) \
+ Q[0] = M[0] * P[0] + M[4] * P[1] + M[8] * P[2] + M[12]; \
+ Q[1] = M[1] * P[0] + M[5] * P[1] + M[9] * P[2] + M[13]; \
+ Q[2] = M[2] * P[0] + M[6] * P[1] + M[10] * P[2] + M[14]; \
+ Q[3] = M[3] * P[0] + M[7] * P[1] + M[11] * P[2] + M[15];
+
+
+/*
+ * Transform a normal (row vector) by a matrix: [NX NY NZ] = N * MAT
+ */
+#define TRANSFORM_NORMAL( TO, N, MAT ) \
+do { \
+ TO[0] = N[0] * MAT[0] + N[1] * MAT[1] + N[2] * MAT[2]; \
+ TO[1] = N[0] * MAT[4] + N[1] * MAT[5] + N[2] * MAT[6]; \
+ TO[2] = N[0] * MAT[8] + N[1] * MAT[9] + N[2] * MAT[10]; \
+} while (0)
+
+
+/**
+ * Transform a direction by a matrix.
+ */
+#define TRANSFORM_DIRECTION( TO, DIR, MAT ) \
+do { \
+ TO[0] = DIR[0] * MAT[0] + DIR[1] * MAT[4] + DIR[2] * MAT[8]; \
+ TO[1] = DIR[0] * MAT[1] + DIR[1] * MAT[5] + DIR[2] * MAT[9]; \
+ TO[2] = DIR[0] * MAT[2] + DIR[1] * MAT[6] + DIR[2] * MAT[10];\
+} while (0)
+
+
+extern void
+_mesa_transform_vector(GLfloat u[4], const GLfloat v[4], const GLfloat m[16]);
+
+
+/*@}*/
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/mesa/program/dummy_errors.c b/third_party/rust/glslopt/glsl-optimizer/src/mesa/program/dummy_errors.c
new file mode 100644
index 0000000000..d69f54d1d0
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/mesa/program/dummy_errors.c
@@ -0,0 +1,30 @@
+/*
+ * Copyright © 2014 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+#include <stdio.h>
+#include "main/errors.h"
+
+void
+_mesa_error_no_memory(const char *caller)
+{
+ fprintf(stderr, "Mesa error: out of memory in %s", caller);
+}
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/mesa/program/ir_to_mesa.h b/third_party/rust/glslopt/glsl-optimizer/src/mesa/program/ir_to_mesa.h
new file mode 100644
index 0000000000..33eb801bae
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/mesa/program/ir_to_mesa.h
@@ -0,0 +1,59 @@
+/*
+ * Copyright © 2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef IR_TO_MESA_H
+#define IR_TO_MESA_H
+
+#include "main/glheader.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+struct gl_context;
+struct gl_program;
+struct gl_shader;
+struct gl_shader_program;
+struct gl_linked_shader;
+struct gl_program_parameter_list;
+
+void _mesa_glsl_link_shader(struct gl_context *ctx, struct gl_shader_program *prog);
+GLboolean _mesa_ir_link_shader(struct gl_context *ctx, struct gl_shader_program *prog);
+
+void
+_mesa_generate_parameters_list_for_uniforms(struct gl_context *ctx,
+ struct gl_shader_program
+ *shader_program,
+ struct gl_linked_shader *sh,
+ struct gl_program_parameter_list
+ *params);
+void
+_mesa_associate_uniform_storage(struct gl_context *ctx,
+ struct gl_shader_program *shader_program,
+ struct gl_program *prog);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* IR_TO_MESA_H */
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/mesa/program/prog_instruction.h b/third_party/rust/glslopt/glsl-optimizer/src/mesa/program/prog_instruction.h
new file mode 100644
index 0000000000..328566a10f
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/mesa/program/prog_instruction.h
@@ -0,0 +1,293 @@
+/*
+ * Mesa 3-D graphics library
+ *
+ * Copyright (C) 1999-2008 Brian Paul All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+
+/**
+ * \file prog_instruction.h
+ *
+ * Vertex/fragment program instruction datatypes and constants.
+ *
+ * \author Brian Paul
+ * \author Keith Whitwell
+ * \author Ian Romanick <idr@us.ibm.com>
+ */
+
+
+#ifndef PROG_INSTRUCTION_H
+#define PROG_INSTRUCTION_H
+
+
+#include "main/glheader.h"
+
+
+/**
+ * Swizzle indexes.
+ * Do not change!
+ */
+/*@{*/
+#define SWIZZLE_X 0
+#define SWIZZLE_Y 1
+#define SWIZZLE_Z 2
+#define SWIZZLE_W 3
+#define SWIZZLE_ZERO 4 /**< For SWZ instruction only */
+#define SWIZZLE_ONE 5 /**< For SWZ instruction only */
+#define SWIZZLE_NIL 7 /**< used during shader code gen (undefined value) */
+/*@}*/
+
+#define MAKE_SWIZZLE4(a,b,c,d) (((a)<<0) | ((b)<<3) | ((c)<<6) | ((d)<<9))
+#define SWIZZLE_NOOP MAKE_SWIZZLE4(0,1,2,3)
+#define GET_SWZ(swz, idx) (((swz) >> ((idx)*3)) & 0x7)
+#define GET_BIT(msk, idx) (((msk) >> (idx)) & 0x1)
+/** Determine if swz contains SWIZZLE_ZERO/ONE/NIL for any components. */
+#define HAS_EXTENDED_SWIZZLE(swz) (swz & 0x924)
+
+#define SWIZZLE_XYZW MAKE_SWIZZLE4(SWIZZLE_X, SWIZZLE_Y, SWIZZLE_Z, SWIZZLE_W)
+#define SWIZZLE_XXXX MAKE_SWIZZLE4(SWIZZLE_X, SWIZZLE_X, SWIZZLE_X, SWIZZLE_X)
+#define SWIZZLE_YYYY MAKE_SWIZZLE4(SWIZZLE_Y, SWIZZLE_Y, SWIZZLE_Y, SWIZZLE_Y)
+#define SWIZZLE_ZZZZ MAKE_SWIZZLE4(SWIZZLE_Z, SWIZZLE_Z, SWIZZLE_Z, SWIZZLE_Z)
+#define SWIZZLE_WWWW MAKE_SWIZZLE4(SWIZZLE_W, SWIZZLE_W, SWIZZLE_W, SWIZZLE_W)
+
+
+/**
+ * Writemask values, 1 bit per component.
+ */
+/*@{*/
+#define WRITEMASK_X 0x1
+#define WRITEMASK_Y 0x2
+#define WRITEMASK_XY 0x3
+#define WRITEMASK_Z 0x4
+#define WRITEMASK_XZ 0x5
+#define WRITEMASK_YZ 0x6
+#define WRITEMASK_XYZ 0x7
+#define WRITEMASK_W 0x8
+#define WRITEMASK_XW 0x9
+#define WRITEMASK_YW 0xa
+#define WRITEMASK_XYW 0xb
+#define WRITEMASK_ZW 0xc
+#define WRITEMASK_XZW 0xd
+#define WRITEMASK_YZW 0xe
+#define WRITEMASK_XYZW 0xf
+/*@}*/
+
+
+/**
+ * Per-component negation masks
+ */
+/*@{*/
+#define NEGATE_X 0x1
+#define NEGATE_Y 0x2
+#define NEGATE_Z 0x4
+#define NEGATE_W 0x8
+#define NEGATE_XYZ 0x7
+#define NEGATE_XYZW 0xf
+#define NEGATE_NONE 0x0
+/*@}*/
+
+
+/**
+ * Program instruction opcodes for vertex, fragment and geometry programs.
+ */
+enum prog_opcode {
+ /* ARB_vp ARB_fp NV_vp NV_fp GLSL */
+ /*------------------------------------------*/
+ OPCODE_NOP = 0, /* X */
+ OPCODE_ABS, /* X X 1.1 X */
+ OPCODE_ADD, /* X X X X X */
+ OPCODE_ARL, /* X X X */
+ OPCODE_BGNLOOP, /* opt */
+ OPCODE_BGNSUB, /* opt */
+ OPCODE_BRK, /* 2 opt */
+ OPCODE_CAL, /* 2 2 opt */
+ OPCODE_CMP, /* X X */
+ OPCODE_CONT, /* opt */
+ OPCODE_COS, /* X 2 X X */
+ OPCODE_DDX, /* X X */
+ OPCODE_DDY, /* X X */
+ OPCODE_DP2, /* 2 X */
+ OPCODE_DP3, /* X X X X X */
+ OPCODE_DP4, /* X X X X X */
+ OPCODE_DPH, /* X X 1.1 */
+ OPCODE_DST, /* X X X X */
+ OPCODE_ELSE, /* opt */
+ OPCODE_END, /* X X X X opt */
+ OPCODE_ENDIF, /* opt */
+ OPCODE_ENDLOOP, /* opt */
+ OPCODE_ENDSUB, /* opt */
+ OPCODE_EX2, /* X X 2 X X */
+ OPCODE_EXP, /* X X */
+ OPCODE_FLR, /* X X 2 X X */
+ OPCODE_FRC, /* X X 2 X X */
+ OPCODE_IF, /* opt */
+ OPCODE_KIL, /* X X */
+ OPCODE_LG2, /* X X 2 X X */
+ OPCODE_LIT, /* X X X X */
+ OPCODE_LOG, /* X X */
+ OPCODE_LRP, /* X X */
+ OPCODE_MAD, /* X X X X X */
+ OPCODE_MAX, /* X X X X X */
+ OPCODE_MIN, /* X X X X X */
+ OPCODE_MOV, /* X X X X X */
+ OPCODE_MUL, /* X X X X X */
+ OPCODE_NOISE1, /* X */
+ OPCODE_NOISE2, /* X */
+ OPCODE_NOISE3, /* X */
+ OPCODE_NOISE4, /* X */
+ OPCODE_POW, /* X X X X */
+ OPCODE_RCP, /* X X X X X */
+ OPCODE_RET, /* 2 2 opt */
+ OPCODE_RSQ, /* X X X X X */
+ OPCODE_SCS, /* X X */
+ OPCODE_SGE, /* X X X X X */
+ OPCODE_SIN, /* X 2 X X */
+ OPCODE_SLT, /* X X X X X */
+ OPCODE_SSG, /* 2 X */
+ OPCODE_SUB, /* X X 1.1 X X */
+ OPCODE_SWZ, /* X X X */
+ OPCODE_TEX, /* X 3 X X */
+ OPCODE_TXB, /* X 3 X */
+ OPCODE_TXD, /* X X */
+ OPCODE_TXL, /* 3 2 X */
+ OPCODE_TXP, /* X X */
+ OPCODE_TRUNC, /* X */
+ OPCODE_XPD, /* X X */
+ MAX_OPCODE
+};
+
+
+/**
+ * Number of bits for the src/dst register Index field.
+ * This limits the size of temp/uniform register files.
+ */
+#define INST_INDEX_BITS 12
+
+
+/**
+ * Instruction source register.
+ */
+struct prog_src_register
+{
+ GLuint File:4; /**< One of the PROGRAM_* register file values. */
+ GLint Index:(INST_INDEX_BITS+1); /**< Extra bit here for sign bit.
+ * May be negative for relative addressing.
+ */
+ GLuint Swizzle:12;
+ GLuint RelAddr:1;
+
+ /**
+ * Negation.
+ * This will either be NEGATE_NONE or NEGATE_XYZW, except for the SWZ
+ * instruction which allows per-component negation.
+ */
+ GLuint Negate:4;
+};
+
+
+/**
+ * Instruction destination register.
+ */
+struct prog_dst_register
+{
+ GLuint File:4; /**< One of the PROGRAM_* register file values */
+ GLuint Index:INST_INDEX_BITS; /**< Unsigned, never negative */
+ GLuint WriteMask:4;
+ GLuint RelAddr:1;
+};
+
+
+/**
+ * Vertex/fragment program instruction.
+ */
+struct prog_instruction
+{
+ enum prog_opcode Opcode;
+ struct prog_src_register SrcReg[3];
+ struct prog_dst_register DstReg;
+
+ /**
+ * Saturate each value of the vectored result to the range [0,1].
+ *
+ * \since
+ * ARB_fragment_program
+ */
+ GLuint Saturate:1;
+
+ /**
+ * \name Extra fields for TEX, TXB, TXD, TXL, TXP instructions.
+ */
+ /*@{*/
+ /** Source texture unit. */
+ GLuint TexSrcUnit:5;
+
+ /** Source texture target, one of TEXTURE_{1D,2D,3D,CUBE,RECT}_INDEX */
+ GLuint TexSrcTarget:4;
+
+ /** True if tex instruction should do shadow comparison */
+ GLuint TexShadow:1;
+ /*@}*/
+
+ /**
+ * For BRA and CAL instructions, the location to jump to.
+ * For BGNLOOP, points to ENDLOOP (and vice-versa).
+ * For BRK, points to ENDLOOP
+ * For IF, points to ELSE or ENDIF.
+ * For ELSE, points to ENDIF.
+ */
+ GLint BranchTarget;
+};
+
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+struct gl_program;
+
+extern void
+_mesa_init_instructions(struct prog_instruction *inst, GLuint count);
+
+extern struct prog_instruction *
+_mesa_copy_instructions(struct prog_instruction *dest,
+ const struct prog_instruction *src, GLuint n);
+
+extern GLuint
+_mesa_num_inst_src_regs(enum prog_opcode opcode);
+
+extern GLuint
+_mesa_num_inst_dst_regs(enum prog_opcode opcode);
+
+extern GLboolean
+_mesa_is_tex_instruction(enum prog_opcode opcode);
+
+extern GLboolean
+_mesa_check_soa_dependencies(const struct prog_instruction *inst);
+
+extern const char *
+_mesa_opcode_string(enum prog_opcode opcode);
+
+
+#ifdef __cplusplus
+} /* extern "C" */
+#endif
+
+#endif /* PROG_INSTRUCTION_H */
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/mesa/program/prog_parameter.h b/third_party/rust/glslopt/glsl-optimizer/src/mesa/program/prog_parameter.h
new file mode 100644
index 0000000000..1fb0c5b7d1
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/mesa/program/prog_parameter.h
@@ -0,0 +1,244 @@
+/*
+ * Mesa 3-D graphics library
+ *
+ * Copyright (C) 1999-2008 Brian Paul All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/**
+ * \file prog_parameter.c
+ * Program parameter lists and functions.
+ * \author Brian Paul
+ */
+
+#ifndef PROG_PARAMETER_H
+#define PROG_PARAMETER_H
+
+#include <stdbool.h>
+#include <stdint.h>
+#include "prog_statevars.h"
+
+#include <string.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * Names of the various vertex/fragment program register files, etc.
+ *
+ * NOTE: first four tokens must fit into 2 bits (see t_vb_arbprogram.c)
+ * All values should fit in a 4-bit field.
+ *
+ * NOTE: PROGRAM_STATE_VAR, PROGRAM_CONSTANT, and PROGRAM_UNIFORM can all be
+ * considered to be "uniform" variables since they can only be set outside
+ * glBegin/End. They're also all stored in the same Parameters array.
+ */
+typedef enum
+{
+ PROGRAM_TEMPORARY, /**< machine->Temporary[] */
+ PROGRAM_ARRAY, /**< Arrays & Matrixes */
+ PROGRAM_INPUT, /**< machine->Inputs[] */
+ PROGRAM_OUTPUT, /**< machine->Outputs[] */
+ PROGRAM_STATE_VAR, /**< gl_program->Parameters[] */
+ PROGRAM_CONSTANT, /**< gl_program->Parameters[] */
+ PROGRAM_UNIFORM, /**< gl_program->Parameters[] */
+ PROGRAM_WRITE_ONLY, /**< A dummy, write-only register */
+ PROGRAM_ADDRESS, /**< machine->AddressReg */
+ PROGRAM_SAMPLER, /**< for shader samplers, compile-time only */
+ PROGRAM_SYSTEM_VALUE,/**< InstanceId, PrimitiveID, etc. */
+ PROGRAM_UNDEFINED, /**< Invalid/TBD value */
+ PROGRAM_IMMEDIATE, /**< Immediate value, used by TGSI */
+ PROGRAM_BUFFER, /**< for shader buffers, compile-time only */
+ PROGRAM_MEMORY, /**< for shared, global and local memory */
+ PROGRAM_IMAGE, /**< for shader images, compile-time only */
+ PROGRAM_HW_ATOMIC, /**< for hw atomic counters, compile-time only */
+ PROGRAM_FILE_MAX
+} gl_register_file;
+
+
+/**
+ * Actual data for constant values of parameters.
+ */
+typedef union gl_constant_value
+{
+ GLfloat f;
+ GLint b;
+ GLint i;
+ GLuint u;
+} gl_constant_value;
+
+
+/**
+ * Program parameter.
+ * Used by shaders/programs for uniforms, constants, varying vars, etc.
+ */
+struct gl_program_parameter
+{
+ const char *Name; /**< Null-terminated string */
+ gl_register_file Type:5; /**< PROGRAM_CONSTANT or STATE_VAR */
+
+ /**
+ * We need to keep track of whether the param is padded for use in the
+ * shader cache.
+ */
+ bool Padded:1;
+
+ GLenum16 DataType; /**< GL_FLOAT, GL_FLOAT_VEC2, etc */
+
+ /**
+ * Number of components (1..4), or more.
+ * If the number of components is greater than 4,
+ * this parameter is part of a larger uniform like a GLSL matrix or array.
+ * The next program parameter's Size will be Size-4 of this parameter.
+ */
+ GLushort Size;
+ /**
+ * A sequence of STATE_* tokens and integers to identify GL state.
+ */
+ gl_state_index16 StateIndexes[STATE_LENGTH];
+
+ /**
+ * Index of this parameter's uniform storage.
+ */
+ uint32_t UniformStorageIndex;
+
+ /**
+ * Index of the first uniform storage that is associated with the same
+ * variable as this parameter.
+ */
+ uint32_t MainUniformStorageIndex;
+};
+
+
+/**
+ * List of gl_program_parameter instances.
+ */
+struct gl_program_parameter_list
+{
+ GLuint Size; /**< allocated size of Parameters, ParameterValues */
+ GLuint NumParameters; /**< number of used parameters in array */
+ unsigned NumParameterValues; /**< number of used parameter values array */
+ struct gl_program_parameter *Parameters; /**< Array [Size] */
+ unsigned *ParameterValueOffset;
+ gl_constant_value *ParameterValues; /**< Array [Size] of gl_constant_value */
+ GLbitfield StateFlags; /**< _NEW_* flags indicating which state changes
+ might invalidate ParameterValues[] */
+};
+
+
+extern struct gl_program_parameter_list *
+_mesa_new_parameter_list(void);
+
+extern struct gl_program_parameter_list *
+_mesa_new_parameter_list_sized(unsigned size);
+
+extern void
+_mesa_free_parameter_list(struct gl_program_parameter_list *paramList);
+
+extern void
+_mesa_reserve_parameter_storage(struct gl_program_parameter_list *paramList,
+ unsigned reserve_slots);
+
+extern GLint
+_mesa_add_parameter(struct gl_program_parameter_list *paramList,
+ gl_register_file type, const char *name,
+ GLuint size, GLenum datatype,
+ const gl_constant_value *values,
+ const gl_state_index16 state[STATE_LENGTH],
+ bool pad_and_align);
+
+extern GLint
+_mesa_add_typed_unnamed_constant(struct gl_program_parameter_list *paramList,
+ const gl_constant_value values[4], GLuint size,
+ GLenum datatype, GLuint *swizzleOut);
+
+static inline GLint
+_mesa_add_unnamed_constant(struct gl_program_parameter_list *paramList,
+ const gl_constant_value values[4], GLuint size,
+ GLuint *swizzleOut)
+{
+ return _mesa_add_typed_unnamed_constant(paramList, values, size, GL_NONE,
+ swizzleOut);
+}
+
+extern GLint
+_mesa_add_sized_state_reference(struct gl_program_parameter_list *paramList,
+ const gl_state_index16 stateTokens[STATE_LENGTH],
+ const unsigned size, bool pad_and_align);
+
+extern GLint
+_mesa_add_state_reference(struct gl_program_parameter_list *paramList,
+ const gl_state_index16 stateTokens[]);
+
+
+static inline GLint
+_mesa_lookup_parameter_index(const struct gl_program_parameter_list *paramList,
+ const char *name)
+{
+ if (!paramList)
+ return -1;
+
+ /* name must be null-terminated */
+ for (GLint i = 0; i < (GLint) paramList->NumParameters; i++) {
+ if (paramList->Parameters[i].Name &&
+ strcmp(paramList->Parameters[i].Name, name) == 0)
+ return i;
+ }
+
+ return -1;
+}
+
+static inline bool
+_mesa_gl_datatype_is_64bit(GLenum datatype)
+{
+ switch (datatype) {
+ case GL_DOUBLE:
+ case GL_DOUBLE_VEC2:
+ case GL_DOUBLE_VEC3:
+ case GL_DOUBLE_VEC4:
+ case GL_DOUBLE_MAT2:
+ case GL_DOUBLE_MAT2x3:
+ case GL_DOUBLE_MAT2x4:
+ case GL_DOUBLE_MAT3:
+ case GL_DOUBLE_MAT3x2:
+ case GL_DOUBLE_MAT3x4:
+ case GL_DOUBLE_MAT4:
+ case GL_DOUBLE_MAT4x2:
+ case GL_DOUBLE_MAT4x3:
+ case GL_INT64_ARB:
+ case GL_INT64_VEC2_ARB:
+ case GL_INT64_VEC3_ARB:
+ case GL_INT64_VEC4_ARB:
+ case GL_UNSIGNED_INT64_ARB:
+ case GL_UNSIGNED_INT64_VEC2_ARB:
+ case GL_UNSIGNED_INT64_VEC3_ARB:
+ case GL_UNSIGNED_INT64_VEC4_ARB:
+ return true;
+ default:
+ return false;
+ }
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* PROG_PARAMETER_H */
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/mesa/program/prog_statevars.h b/third_party/rust/glslopt/glsl-optimizer/src/mesa/program/prog_statevars.h
new file mode 100644
index 0000000000..de457d1a7f
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/mesa/program/prog_statevars.h
@@ -0,0 +1,156 @@
+/*
+ * Mesa 3-D graphics library
+ *
+ * Copyright (C) 1999-2007 Brian Paul All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef PROG_STATEVARS_H
+#define PROG_STATEVARS_H
+
+
+#include "main/glheader.h"
+#include "compiler/shader_enums.h"
+
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+struct gl_context;
+struct gl_program_parameter_list;
+
+
+/**
+ * Used for describing GL state referenced from inside ARB vertex and
+ * fragment programs.
+ * A string such as "state.light[0].ambient" gets translated into a
+ * sequence of tokens such as [ STATE_LIGHT, 0, STATE_AMBIENT ].
+ *
+ * For state that's an array, like STATE_CLIPPLANE, the 2nd token [1] should
+ * always be the array index.
+ */
+typedef enum gl_state_index_ {
+ STATE_MATERIAL = 100, /* start at 100 so small ints are seen as ints */
+
+ STATE_LIGHT,
+ STATE_LIGHTMODEL_AMBIENT,
+ STATE_LIGHTMODEL_SCENECOLOR,
+ STATE_LIGHTPROD,
+
+ STATE_TEXGEN,
+
+ STATE_FOG_COLOR,
+ STATE_FOG_PARAMS,
+
+ STATE_CLIPPLANE,
+
+ STATE_POINT_SIZE,
+ STATE_POINT_ATTENUATION,
+
+ STATE_MODELVIEW_MATRIX,
+ STATE_PROJECTION_MATRIX,
+ STATE_MVP_MATRIX,
+ STATE_TEXTURE_MATRIX,
+ STATE_PROGRAM_MATRIX,
+ STATE_MATRIX_INVERSE,
+ STATE_MATRIX_TRANSPOSE,
+ STATE_MATRIX_INVTRANS,
+
+ STATE_AMBIENT,
+ STATE_DIFFUSE,
+ STATE_SPECULAR,
+ STATE_EMISSION,
+ STATE_SHININESS,
+ STATE_HALF_VECTOR,
+
+ STATE_POSITION, /**< xyzw = position */
+ STATE_ATTENUATION, /**< xyz = attenuation, w = spot exponent */
+ STATE_SPOT_DIRECTION, /**< xyz = direction, w = cos(cutoff) */
+ STATE_SPOT_CUTOFF, /**< x = cutoff, yzw = undefined */
+
+ STATE_TEXGEN_EYE_S,
+ STATE_TEXGEN_EYE_T,
+ STATE_TEXGEN_EYE_R,
+ STATE_TEXGEN_EYE_Q,
+ STATE_TEXGEN_OBJECT_S,
+ STATE_TEXGEN_OBJECT_T,
+ STATE_TEXGEN_OBJECT_R,
+ STATE_TEXGEN_OBJECT_Q,
+
+ STATE_TEXENV_COLOR,
+
+ STATE_NUM_SAMPLES, /* An integer, not a float like the other state vars */
+
+ STATE_DEPTH_RANGE,
+
+ STATE_VERTEX_PROGRAM,
+ STATE_FRAGMENT_PROGRAM,
+
+ STATE_ENV,
+ STATE_LOCAL,
+
+ STATE_INTERNAL, /* Mesa additions */
+ STATE_CURRENT_ATTRIB, /* ctx->Current vertex attrib value */
+ STATE_CURRENT_ATTRIB_MAYBE_VP_CLAMPED, /* ctx->Current vertex attrib value after passthrough vertex processing */
+ STATE_NORMAL_SCALE,
+ STATE_FOG_PARAMS_OPTIMIZED, /* for faster fog calc */
+ STATE_POINT_SIZE_CLAMPED, /* includes implementation dependent size clamp */
+ STATE_LIGHT_SPOT_DIR_NORMALIZED, /* pre-normalized spot dir */
+ STATE_LIGHT_POSITION, /* object vs eye space */
+ STATE_LIGHT_POSITION_NORMALIZED, /* object vs eye space */
+ STATE_LIGHT_HALF_VECTOR, /* object vs eye space */
+ STATE_PT_SCALE, /**< Pixel transfer RGBA scale */
+ STATE_PT_BIAS, /**< Pixel transfer RGBA bias */
+ STATE_FB_SIZE, /**< (width-1, height-1, 0, 0) */
+ STATE_FB_WPOS_Y_TRANSFORM, /**< (1, 0, -1, height) if a FBO is bound, (-1, height, 1, 0) otherwise */
+ STATE_TCS_PATCH_VERTICES_IN, /**< gl_PatchVerticesIn for TCS (integer) */
+ STATE_TES_PATCH_VERTICES_IN, /**< gl_PatchVerticesIn for TES (integer) */
+ /**
+ * A single enum gl_blend_support_qualifier value representing the
+ * currently active advanced blending equation, or zero if disabled.
+ */
+ STATE_ADVANCED_BLENDING_MODE,
+ STATE_ALPHA_REF, /* alpha-test reference value */
+ STATE_CLIP_INTERNAL, /* similar to STATE_CLIPPLANE, but in clip-space */
+ STATE_INTERNAL_DRIVER /* first available state index for drivers (must be last) */
+} gl_state_index;
+
+
+extern void
+_mesa_load_state_parameters(struct gl_context *ctx,
+ struct gl_program_parameter_list *paramList);
+
+
+extern GLbitfield
+_mesa_program_state_flags(const gl_state_index16 state[STATE_LENGTH]);
+
+
+extern char *
+_mesa_program_state_string(const gl_state_index16 state[STATE_LENGTH]);
+
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* PROG_STATEVARS_H */
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/mesa/program/program.h b/third_party/rust/glslopt/glsl-optimizer/src/mesa/program/program.h
new file mode 100644
index 0000000000..7de6804047
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/mesa/program/program.h
@@ -0,0 +1,169 @@
+/*
+ * Mesa 3-D graphics library
+ *
+ * Copyright (C) 1999-2007 Brian Paul All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/**
+ * \file program.c
+ * Vertex and fragment program support functions.
+ * \author Brian Paul
+ */
+
+
+/**
+ * \mainpage Mesa vertex and fragment program module
+ *
+ * This module or directory contains most of the code for vertex and
+ * fragment programs and shaders, including state management, parsers,
+ * and (some) software routines for executing programs
+ */
+
+#ifndef PROGRAM_H
+#define PROGRAM_H
+
+#include "prog_parameter.h"
+
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+extern struct gl_program _mesa_DummyProgram;
+
+
+extern void
+_mesa_init_program(struct gl_context *ctx);
+
+extern void
+_mesa_free_program_data(struct gl_context *ctx);
+
+extern void
+_mesa_update_default_objects_program(struct gl_context *ctx);
+
+extern void
+_mesa_set_program_error(struct gl_context *ctx, GLint pos, const char *string);
+
+extern struct gl_program *
+_mesa_init_gl_program(struct gl_program *prog, gl_shader_stage stage,
+ GLuint id, bool is_arb_asm);
+
+extern struct gl_program *
+_mesa_new_program(struct gl_context *ctx, gl_shader_stage stage, GLuint id,
+ bool is_arb_asm);
+
+extern void
+_mesa_delete_program(struct gl_context *ctx, struct gl_program *prog);
+
+extern struct gl_program *
+_mesa_lookup_program(struct gl_context *ctx, GLuint id);
+
+extern void
+_mesa_reference_program_(struct gl_context *ctx,
+ struct gl_program **ptr,
+ struct gl_program *prog);
+
+static inline void
+_mesa_reference_program(struct gl_context *ctx,
+ struct gl_program **ptr,
+ struct gl_program *prog)
+{
+ if (*ptr != prog)
+ _mesa_reference_program_(ctx, ptr, prog);
+}
+
+extern GLboolean
+_mesa_insert_instructions(struct gl_program *prog, GLuint start, GLuint count);
+
+extern GLboolean
+_mesa_delete_instructions(struct gl_program *prog, GLuint start, GLuint count,
+ void *mem_ctx);
+
+extern void
+_mesa_find_used_registers(const struct gl_program *prog,
+ gl_register_file file,
+ GLboolean used[], GLuint usedSize);
+
+extern GLint
+_mesa_find_free_register(const GLboolean used[],
+ GLuint maxRegs, GLuint firstReg);
+
+extern GLint
+_mesa_get_min_invocations_per_fragment(struct gl_context *ctx,
+ const struct gl_program *prog);
+
+static inline GLuint
+_mesa_program_enum_to_shader_stage(GLenum v)
+{
+ switch (v) {
+ case GL_VERTEX_PROGRAM_ARB:
+ return MESA_SHADER_VERTEX;
+ case GL_FRAGMENT_PROGRAM_ARB:
+ return MESA_SHADER_FRAGMENT;
+ case GL_FRAGMENT_SHADER_ATI:
+ return MESA_SHADER_FRAGMENT;
+ case GL_GEOMETRY_PROGRAM_NV:
+ return MESA_SHADER_GEOMETRY;
+ case GL_TESS_CONTROL_PROGRAM_NV:
+ return MESA_SHADER_TESS_CTRL;
+ case GL_TESS_EVALUATION_PROGRAM_NV:
+ return MESA_SHADER_TESS_EVAL;
+ case GL_COMPUTE_PROGRAM_NV:
+ return MESA_SHADER_COMPUTE;
+ default:
+ assert(0);
+ return ~0;
+ }
+}
+
+
+static inline GLenum
+_mesa_shader_stage_to_program(unsigned stage)
+{
+ switch (stage) {
+ case MESA_SHADER_VERTEX:
+ return GL_VERTEX_PROGRAM_ARB;
+ case MESA_SHADER_FRAGMENT:
+ return GL_FRAGMENT_PROGRAM_ARB;
+ case MESA_SHADER_GEOMETRY:
+ return GL_GEOMETRY_PROGRAM_NV;
+ case MESA_SHADER_TESS_CTRL:
+ return GL_TESS_CONTROL_PROGRAM_NV;
+ case MESA_SHADER_TESS_EVAL:
+ return GL_TESS_EVALUATION_PROGRAM_NV;
+ case MESA_SHADER_COMPUTE:
+ return GL_COMPUTE_PROGRAM_NV;
+ }
+
+ assert(!"Unexpected shader stage in _mesa_shader_stage_to_program");
+ return GL_VERTEX_PROGRAM_ARB;
+}
+
+
+GLbitfield
+gl_external_samplers(const struct gl_program *prog);
+
+
+#ifdef __cplusplus
+} /* extern "C" */
+#endif
+
+#endif /* PROGRAM_H */
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/mesa/program/symbol_table.c b/third_party/rust/glslopt/glsl-optimizer/src/mesa/program/symbol_table.c
new file mode 100644
index 0000000000..f86588b76b
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/mesa/program/symbol_table.c
@@ -0,0 +1,314 @@
+/*
+ * Copyright © 2008 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+
+#include "main/errors.h"
+#include "symbol_table.h"
+#include "util/hash_table.h"
+#include "util/u_string.h"
+
+struct symbol {
+ /** Symbol name. */
+ char *name;
+
+ /**
+ * Link to the next symbol in the table with the same name
+ *
+ * The linked list of symbols with the same name is ordered by scope
+ * from inner-most to outer-most.
+ */
+ struct symbol *next_with_same_name;
+
+ /**
+ * Link to the next symbol in the table with the same scope
+ *
+ * The linked list of symbols with the same scope is unordered. Symbols
+ * in this list my have unique names.
+ */
+ struct symbol *next_with_same_scope;
+
+ /** Scope depth where this symbol was defined. */
+ unsigned depth;
+
+ /**
+ * Arbitrary user supplied data.
+ */
+ void *data;
+};
+
+
+/**
+ * Element of the scope stack.
+ */
+struct scope_level {
+ /** Link to next (inner) scope level. */
+ struct scope_level *next;
+
+ /** Linked list of symbols with the same scope. */
+ struct symbol *symbols;
+};
+
+
+/**
+ *
+ */
+struct _mesa_symbol_table {
+ /** Hash table containing all symbols in the symbol table. */
+ struct hash_table *ht;
+
+ /** Top of scope stack. */
+ struct scope_level *current_scope;
+
+ /** Current scope depth. */
+ unsigned depth;
+};
+
+void
+_mesa_symbol_table_pop_scope(struct _mesa_symbol_table *table)
+{
+ struct scope_level *const scope = table->current_scope;
+ struct symbol *sym = scope->symbols;
+
+ table->current_scope = scope->next;
+ table->depth--;
+
+ free(scope);
+
+ while (sym != NULL) {
+ struct symbol *const next = sym->next_with_same_scope;
+ struct hash_entry *hte = _mesa_hash_table_search(table->ht,
+ sym->name);
+ if (sym->next_with_same_name) {
+ /* If there is a symbol with this name in an outer scope update
+ * the hash table to point to it.
+ */
+ hte->key = sym->next_with_same_name->name;
+ hte->data = sym->next_with_same_name;
+ } else {
+ _mesa_hash_table_remove(table->ht, hte);
+ free(sym->name);
+ }
+
+ free(sym);
+ sym = next;
+ }
+}
+
+
+void
+_mesa_symbol_table_push_scope(struct _mesa_symbol_table *table)
+{
+ struct scope_level *const scope = calloc(1, sizeof(*scope));
+ if (scope == NULL) {
+ _mesa_error_no_memory(__func__);
+ return;
+ }
+
+ scope->next = table->current_scope;
+ table->current_scope = scope;
+ table->depth++;
+}
+
+
+static struct symbol *
+find_symbol(struct _mesa_symbol_table *table, const char *name)
+{
+ struct hash_entry *entry = _mesa_hash_table_search(table->ht, name);
+ return entry ? (struct symbol *) entry->data : NULL;
+}
+
+
+/**
+ * Determine the scope "distance" of a symbol from the current scope
+ *
+ * \return
+ * A non-negative number for the number of scopes between the current scope
+ * and the scope where a symbol was defined. A value of zero means the current
+ * scope. A negative number if the symbol does not exist.
+ */
+int
+_mesa_symbol_table_symbol_scope(struct _mesa_symbol_table *table,
+ const char *name)
+{
+ struct symbol *const sym = find_symbol(table, name);
+
+ if (sym) {
+ assert(sym->depth <= table->depth);
+ return sym->depth - table->depth;
+ }
+
+ return -1;
+}
+
+
+void *
+_mesa_symbol_table_find_symbol(struct _mesa_symbol_table *table,
+ const char *name)
+{
+ struct symbol *const sym = find_symbol(table, name);
+ if (sym)
+ return sym->data;
+
+ return NULL;
+}
+
+
+int
+_mesa_symbol_table_add_symbol(struct _mesa_symbol_table *table,
+ const char *name, void *declaration)
+{
+ struct symbol *new_sym;
+ struct symbol *sym = find_symbol(table, name);
+
+ if (sym && sym->depth == table->depth)
+ return -1;
+
+ new_sym = calloc(1, sizeof(*sym));
+ if (new_sym == NULL) {
+ _mesa_error_no_memory(__func__);
+ return -1;
+ }
+
+ if (sym) {
+ /* Store link to symbol in outer scope with the same name */
+ new_sym->next_with_same_name = sym;
+ new_sym->name = sym->name;
+ } else {
+ new_sym->name = strdup(name);
+ if (new_sym->name == NULL) {
+ free(new_sym);
+ _mesa_error_no_memory(__func__);
+ return -1;
+ }
+ }
+
+ new_sym->next_with_same_scope = table->current_scope->symbols;
+ new_sym->data = declaration;
+ new_sym->depth = table->depth;
+
+ table->current_scope->symbols = new_sym;
+
+ _mesa_hash_table_insert(table->ht, new_sym->name, new_sym);
+
+ return 0;
+}
+
+int
+_mesa_symbol_table_replace_symbol(struct _mesa_symbol_table *table,
+ const char *name,
+ void *declaration)
+{
+ struct symbol *sym = find_symbol(table, name);
+
+ /* If the symbol doesn't exist, it cannot be replaced. */
+ if (sym == NULL)
+ return -1;
+
+ sym->data = declaration;
+ return 0;
+}
+
+int
+_mesa_symbol_table_add_global_symbol(struct _mesa_symbol_table *table,
+ const char *name, void *declaration)
+{
+ struct scope_level *top_scope;
+ struct symbol *inner_sym = NULL;
+ struct symbol *sym = find_symbol(table, name);
+
+ while (sym) {
+ if (sym->depth == 0)
+ return -1;
+
+ inner_sym = sym;
+
+ /* Get symbol from the outer scope with the same name */
+ sym = sym->next_with_same_name;
+ }
+
+ /* Find the top-level scope */
+ for (top_scope = table->current_scope; top_scope->next != NULL;
+ top_scope = top_scope->next) {
+ /* empty */
+ }
+
+ sym = calloc(1, sizeof(*sym));
+ if (sym == NULL) {
+ _mesa_error_no_memory(__func__);
+ return -1;
+ }
+
+ if (inner_sym) {
+ /* In case we add the global out of order store a link to the global
+ * symbol in global.
+ */
+ inner_sym->next_with_same_name = sym;
+
+ sym->name = inner_sym->name;
+ } else {
+ sym->name = strdup(name);
+ if (sym->name == NULL) {
+ free(sym);
+ _mesa_error_no_memory(__func__);
+ return -1;
+ }
+ }
+
+ sym->next_with_same_scope = top_scope->symbols;
+ sym->data = declaration;
+
+ top_scope->symbols = sym;
+
+ _mesa_hash_table_insert(table->ht, sym->name, sym);
+
+ return 0;
+}
+
+
+
+struct _mesa_symbol_table *
+_mesa_symbol_table_ctor(void)
+{
+ struct _mesa_symbol_table *table = calloc(1, sizeof(*table));
+
+ if (table != NULL) {
+ table->ht = _mesa_hash_table_create(NULL, _mesa_hash_string,
+ _mesa_key_string_equal);
+
+ _mesa_symbol_table_push_scope(table);
+ }
+
+ return table;
+}
+
+
+void
+_mesa_symbol_table_dtor(struct _mesa_symbol_table *table)
+{
+ while (table->current_scope != NULL) {
+ _mesa_symbol_table_pop_scope(table);
+ }
+
+ _mesa_hash_table_destroy(table->ht, NULL);
+ free(table);
+}
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/mesa/program/symbol_table.h b/third_party/rust/glslopt/glsl-optimizer/src/mesa/program/symbol_table.h
new file mode 100644
index 0000000000..6db2164fc2
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/mesa/program/symbol_table.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright © 2008 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+#ifndef MESA_SYMBOL_TABLE_H
+#define MESA_SYMBOL_TABLE_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+struct _mesa_symbol_table;
+
+extern void _mesa_symbol_table_push_scope(struct _mesa_symbol_table *table);
+
+extern void _mesa_symbol_table_pop_scope(struct _mesa_symbol_table *table);
+
+extern int _mesa_symbol_table_add_symbol(struct _mesa_symbol_table *symtab,
+ const char *name, void *declaration);
+
+extern int _mesa_symbol_table_replace_symbol(struct _mesa_symbol_table *table,
+ const char *name,
+ void *declaration);
+
+extern int
+_mesa_symbol_table_add_global_symbol(struct _mesa_symbol_table *symtab,
+ const char *name,
+ void *declaration);
+
+extern int _mesa_symbol_table_symbol_scope(struct _mesa_symbol_table *table,
+ const char *name);
+
+extern void *_mesa_symbol_table_find_symbol(struct _mesa_symbol_table *symtab,
+ const char *name);
+
+extern struct _mesa_symbol_table *_mesa_symbol_table_ctor(void);
+
+extern void _mesa_symbol_table_dtor(struct _mesa_symbol_table *);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* MESA_SYMBOL_TABLE_H */
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/mesa/vbo/vbo.h b/third_party/rust/glslopt/glsl-optimizer/src/mesa/vbo/vbo.h
new file mode 100644
index 0000000000..bd99e24651
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/mesa/vbo/vbo.h
@@ -0,0 +1,162 @@
+/*
+ * mesa 3-D graphics library
+ *
+ * Copyright (C) 1999-2006 Brian Paul All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/**
+ * \brief Public interface to the VBO module
+ * \author Keith Whitwell
+ */
+
+
+#ifndef _VBO_H
+#define _VBO_H
+
+#include <stdbool.h>
+#include "main/glheader.h"
+#include "main/draw.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+struct gl_context;
+
+GLboolean
+_vbo_CreateContext(struct gl_context *ctx, bool use_buffer_objects);
+
+void
+_vbo_DestroyContext(struct gl_context *ctx);
+
+void
+vbo_exec_update_eval_maps(struct gl_context *ctx);
+
+void
+_vbo_install_exec_vtxfmt(struct gl_context *ctx);
+
+void
+vbo_initialize_exec_dispatch(const struct gl_context *ctx,
+ struct _glapi_table *exec);
+
+void
+vbo_initialize_save_dispatch(const struct gl_context *ctx,
+ struct _glapi_table *exec);
+
+void
+vbo_exec_FlushVertices(struct gl_context *ctx, GLuint flags);
+
+void
+vbo_save_SaveFlushVertices(struct gl_context *ctx);
+
+void
+vbo_save_NotifyBegin(struct gl_context *ctx, GLenum mode,
+ bool no_current_update);
+
+void
+vbo_save_NewList(struct gl_context *ctx, GLuint list, GLenum mode);
+
+void
+vbo_save_EndList(struct gl_context *ctx);
+
+void
+vbo_save_BeginCallList(struct gl_context *ctx, struct gl_display_list *list);
+
+void
+vbo_save_EndCallList(struct gl_context *ctx);
+
+
+void
+vbo_delete_minmax_cache(struct gl_buffer_object *bufferObj);
+
+void
+vbo_get_minmax_index_mapped(unsigned count, unsigned index_size,
+ unsigned restartIndex, bool restart,
+ const void *indices,
+ unsigned *min_index, unsigned *max_index);
+
+void
+vbo_get_minmax_indices(struct gl_context *ctx, const struct _mesa_prim *prim,
+ const struct _mesa_index_buffer *ib,
+ GLuint *min_index, GLuint *max_index, GLuint nr_prims);
+
+void
+vbo_sw_primitive_restart(struct gl_context *ctx,
+ const struct _mesa_prim *prim,
+ GLuint nr_prims,
+ const struct _mesa_index_buffer *ib,
+ GLuint num_instances, GLuint base_instance,
+ struct gl_buffer_object *indirect,
+ GLsizeiptr indirect_offset);
+
+
+const struct gl_array_attributes*
+_vbo_current_attrib(const struct gl_context *ctx, gl_vert_attrib attr);
+
+
+const struct gl_vertex_buffer_binding*
+_vbo_current_binding(const struct gl_context *ctx);
+
+
+void GLAPIENTRY
+_es_Color4f(GLfloat r, GLfloat g, GLfloat b, GLfloat a);
+
+void GLAPIENTRY
+_es_Normal3f(GLfloat x, GLfloat y, GLfloat z);
+
+void GLAPIENTRY
+_es_MultiTexCoord4f(GLenum target, GLfloat s, GLfloat t, GLfloat r, GLfloat q);
+
+void GLAPIENTRY
+_es_Materialfv(GLenum face, GLenum pname, const GLfloat *params);
+
+void GLAPIENTRY
+_es_Materialf(GLenum face, GLenum pname, GLfloat param);
+
+void GLAPIENTRY
+_es_VertexAttrib4f(GLuint index, GLfloat x, GLfloat y, GLfloat z, GLfloat w);
+
+void GLAPIENTRY
+_es_VertexAttrib1f(GLuint indx, GLfloat x);
+
+void GLAPIENTRY
+_es_VertexAttrib1fv(GLuint indx, const GLfloat* values);
+
+void GLAPIENTRY
+_es_VertexAttrib2f(GLuint indx, GLfloat x, GLfloat y);
+
+void GLAPIENTRY
+_es_VertexAttrib2fv(GLuint indx, const GLfloat* values);
+
+void GLAPIENTRY
+_es_VertexAttrib3f(GLuint indx, GLfloat x, GLfloat y, GLfloat z);
+
+void GLAPIENTRY
+_es_VertexAttrib3fv(GLuint indx, const GLfloat* values);
+
+void GLAPIENTRY
+_es_VertexAttrib4fv(GLuint indx, const GLfloat* values);
+
+#ifdef __cplusplus
+} // extern "C"
+#endif
+
+#endif
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/util/bitscan.h b/third_party/rust/glslopt/glsl-optimizer/src/util/bitscan.h
new file mode 100644
index 0000000000..895a1e7a37
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/util/bitscan.h
@@ -0,0 +1,326 @@
+/**************************************************************************
+ *
+ * Copyright 2008 VMware, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+
+#ifndef BITSCAN_H
+#define BITSCAN_H
+
+#include <assert.h>
+#include <stdint.h>
+#include <stdbool.h>
+#include <string.h>
+
+#if defined(_MSC_VER)
+#include <intrin.h>
+#endif
+
+#if defined(__POPCNT__)
+#include <popcntintrin.h>
+#endif
+
+#include "c99_compat.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+/**
+ * Find first bit set in word. Least significant bit is 1.
+ * Return 0 if no bits set.
+ */
+#ifdef HAVE___BUILTIN_FFS
+#define ffs __builtin_ffs
+#elif defined(_MSC_VER) && (_M_IX86 || _M_ARM || _M_AMD64 || _M_IA64)
+static inline
+int ffs(int i)
+{
+ unsigned long index;
+ if (_BitScanForward(&index, i))
+ return index + 1;
+ else
+ return 0;
+}
+#else
+extern
+int ffs(int i);
+#endif
+
+#ifdef HAVE___BUILTIN_FFSLL
+#define ffsll __builtin_ffsll
+#elif defined(_MSC_VER) && (_M_AMD64 || _M_ARM64 || _M_IA64)
+static inline int
+ffsll(long long int i)
+{
+ unsigned long index;
+ if (_BitScanForward64(&index, i))
+ return index + 1;
+ else
+ return 0;
+}
+#else
+extern int
+ffsll(long long int val);
+#endif
+
+
+/* Destructively loop over all of the bits in a mask as in:
+ *
+ * while (mymask) {
+ * int i = u_bit_scan(&mymask);
+ * ... process element i
+ * }
+ *
+ */
+static inline int
+u_bit_scan(unsigned *mask)
+{
+ const int i = ffs(*mask) - 1;
+ *mask ^= (1u << i);
+ return i;
+}
+
+static inline int
+u_bit_scan64(uint64_t *mask)
+{
+ const int i = ffsll(*mask) - 1;
+ *mask ^= (((uint64_t)1) << i);
+ return i;
+}
+
+/* Determine if an unsigned value is a power of two.
+ *
+ * \note
+ * Zero is treated as a power of two.
+ */
+static inline bool
+util_is_power_of_two_or_zero(unsigned v)
+{
+ return (v & (v - 1)) == 0;
+}
+
+/* Determine if an uint64_t value is a power of two.
+ *
+ * \note
+ * Zero is treated as a power of two.
+ */
+static inline bool
+util_is_power_of_two_or_zero64(uint64_t v)
+{
+ return (v & (v - 1)) == 0;
+}
+
+/* Determine if an unsigned value is a power of two.
+ *
+ * \note
+ * Zero is \b not treated as a power of two.
+ */
+static inline bool
+util_is_power_of_two_nonzero(unsigned v)
+{
+ /* __POPCNT__ is different from HAVE___BUILTIN_POPCOUNT. The latter
+ * indicates the existence of the __builtin_popcount function. The former
+ * indicates that _mm_popcnt_u32 exists and is a native instruction.
+ *
+ * The other alternative is to use SSE 4.2 compile-time flags. This has
+ * two drawbacks. First, there is currently no build infrastructure for
+ * SSE 4.2 (only 4.1), so that would have to be added. Second, some AMD
+ * CPUs support POPCNT but not SSE 4.2 (e.g., Barcelona).
+ */
+#ifdef __POPCNT__
+ return _mm_popcnt_u32(v) == 1;
+#else
+ return v != 0 && (v & (v - 1)) == 0;
+#endif
+}
+
+/* For looping over a bitmask when you want to loop over consecutive bits
+ * manually, for example:
+ *
+ * while (mask) {
+ * int start, count, i;
+ *
+ * u_bit_scan_consecutive_range(&mask, &start, &count);
+ *
+ * for (i = 0; i < count; i++)
+ * ... process element (start+i)
+ * }
+ */
+static inline void
+u_bit_scan_consecutive_range(unsigned *mask, int *start, int *count)
+{
+ if (*mask == 0xffffffff) {
+ *start = 0;
+ *count = 32;
+ *mask = 0;
+ return;
+ }
+ *start = ffs(*mask) - 1;
+ *count = ffs(~(*mask >> *start)) - 1;
+ *mask &= ~(((1u << *count) - 1) << *start);
+}
+
+static inline void
+u_bit_scan_consecutive_range64(uint64_t *mask, int *start, int *count)
+{
+ if (*mask == ~0ull) {
+ *start = 0;
+ *count = 64;
+ *mask = 0;
+ return;
+ }
+ *start = ffsll(*mask) - 1;
+ *count = ffsll(~(*mask >> *start)) - 1;
+ *mask &= ~(((((uint64_t)1) << *count) - 1) << *start);
+}
+
+
+/**
+ * Find last bit set in a word. The least significant bit is 1.
+ * Return 0 if no bits are set.
+ * Essentially ffs() in the reverse direction.
+ */
+static inline unsigned
+util_last_bit(unsigned u)
+{
+#if defined(HAVE___BUILTIN_CLZ)
+ return u == 0 ? 0 : 32 - __builtin_clz(u);
+#elif defined(_MSC_VER) && (_M_IX86 || _M_ARM || _M_AMD64 || _M_IA64)
+ unsigned long index;
+ if (_BitScanReverse(&index, u))
+ return index + 1;
+ else
+ return 0;
+#else
+ unsigned r = 0;
+ while (u) {
+ r++;
+ u >>= 1;
+ }
+ return r;
+#endif
+}
+
+/**
+ * Find last bit set in a word. The least significant bit is 1.
+ * Return 0 if no bits are set.
+ * Essentially ffsll() in the reverse direction.
+ */
+static inline unsigned
+util_last_bit64(uint64_t u)
+{
+#if defined(HAVE___BUILTIN_CLZLL)
+ return u == 0 ? 0 : 64 - __builtin_clzll(u);
+#elif defined(_MSC_VER) && (_M_AMD64 || _M_ARM64 || _M_IA64)
+ unsigned long index;
+ if (_BitScanReverse64(&index, u))
+ return index + 1;
+ else
+ return 0;
+#else
+ unsigned r = 0;
+ while (u) {
+ r++;
+ u >>= 1;
+ }
+ return r;
+#endif
+}
+
+/**
+ * Find last bit in a word that does not match the sign bit. The least
+ * significant bit is 1.
+ * Return 0 if no bits are set.
+ */
+static inline unsigned
+util_last_bit_signed(int i)
+{
+ if (i >= 0)
+ return util_last_bit(i);
+ else
+ return util_last_bit(~(unsigned)i);
+}
+
+/* Returns a bitfield in which the first count bits starting at start are
+ * set.
+ */
+static inline unsigned
+u_bit_consecutive(unsigned start, unsigned count)
+{
+ assert(start + count <= 32);
+ if (count == 32)
+ return ~0;
+ return ((1u << count) - 1) << start;
+}
+
+static inline uint64_t
+u_bit_consecutive64(unsigned start, unsigned count)
+{
+ assert(start + count <= 64);
+ if (count == 64)
+ return ~(uint64_t)0;
+ return (((uint64_t)1 << count) - 1) << start;
+}
+
+/**
+ * Return number of bits set in n.
+ */
+static inline unsigned
+util_bitcount(unsigned n)
+{
+#if defined(HAVE___BUILTIN_POPCOUNT)
+ return __builtin_popcount(n);
+#else
+ /* K&R classic bitcount.
+ *
+ * For each iteration, clear the LSB from the bitfield.
+ * Requires only one iteration per set bit, instead of
+ * one iteration per bit less than highest set bit.
+ */
+ unsigned bits;
+ for (bits = 0; n; bits++) {
+ n &= n - 1;
+ }
+ return bits;
+#endif
+}
+
+static inline unsigned
+util_bitcount64(uint64_t n)
+{
+#ifdef HAVE___BUILTIN_POPCOUNTLL
+ return __builtin_popcountll(n);
+#else
+ return util_bitcount(n) + util_bitcount(n >> 32);
+#endif
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* BITSCAN_H */
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/util/bitset.h b/third_party/rust/glslopt/glsl-optimizer/src/util/bitset.h
new file mode 100644
index 0000000000..0fdfe205f3
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/util/bitset.h
@@ -0,0 +1,261 @@
+/*
+ * Mesa 3-D graphics library
+ *
+ * Copyright (C) 2006 Brian Paul All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/**
+ * \file bitset.h
+ * \brief Bitset of arbitrary size definitions.
+ * \author Michal Krol
+ */
+
+#ifndef BITSET_H
+#define BITSET_H
+
+#include "util/bitscan.h"
+#include "util/macros.h"
+
+/****************************************************************************
+ * generic bitset implementation
+ */
+
+#define BITSET_WORD unsigned int
+#define BITSET_WORDBITS (sizeof (BITSET_WORD) * 8)
+
+/* bitset declarations
+ */
+#define BITSET_WORDS(bits) (((bits) + BITSET_WORDBITS - 1) / BITSET_WORDBITS)
+#define BITSET_DECLARE(name, bits) BITSET_WORD name[BITSET_WORDS(bits)]
+
+/* bitset operations
+ */
+#define BITSET_COPY(x, y) memcpy( (x), (y), sizeof (x) )
+#define BITSET_EQUAL(x, y) (memcmp( (x), (y), sizeof (x) ) == 0)
+#define BITSET_ZERO(x) memset( (x), 0, sizeof (x) )
+#define BITSET_ONES(x) memset( (x), 0xff, sizeof (x) )
+
+#define BITSET_BITWORD(b) ((b) / BITSET_WORDBITS)
+#define BITSET_BIT(b) (1u << ((b) % BITSET_WORDBITS))
+
+/* single bit operations
+ */
+#define BITSET_TEST(x, b) (((x)[BITSET_BITWORD(b)] & BITSET_BIT(b)) != 0)
+#define BITSET_SET(x, b) ((x)[BITSET_BITWORD(b)] |= BITSET_BIT(b))
+#define BITSET_CLEAR(x, b) ((x)[BITSET_BITWORD(b)] &= ~BITSET_BIT(b))
+
+#define BITSET_MASK(b) (((b) % BITSET_WORDBITS == 0) ? ~0 : BITSET_BIT(b) - 1)
+#define BITSET_RANGE(b, e) ((BITSET_MASK((e) + 1)) & ~(BITSET_BIT(b) - 1))
+
+/* bit range operations
+ */
+#define BITSET_TEST_RANGE(x, b, e) \
+ (BITSET_BITWORD(b) == BITSET_BITWORD(e) ? \
+ (((x)[BITSET_BITWORD(b)] & BITSET_RANGE(b, e)) != 0) : \
+ (assert (!"BITSET_TEST_RANGE: bit range crosses word boundary"), 0))
+#define BITSET_SET_RANGE(x, b, e) \
+ (BITSET_BITWORD(b) == BITSET_BITWORD(e) ? \
+ ((x)[BITSET_BITWORD(b)] |= BITSET_RANGE(b, e)) : \
+ (assert (!"BITSET_SET_RANGE: bit range crosses word boundary"), 0))
+#define BITSET_CLEAR_RANGE(x, b, e) \
+ (BITSET_BITWORD(b) == BITSET_BITWORD(e) ? \
+ ((x)[BITSET_BITWORD(b)] &= ~BITSET_RANGE(b, e)) : \
+ (assert (!"BITSET_CLEAR_RANGE: bit range crosses word boundary"), 0))
+
+/* Get first bit set in a bitset.
+ */
+static inline int
+__bitset_ffs(const BITSET_WORD *x, int n)
+{
+ int i;
+
+ for (i = 0; i < n; i++) {
+ if (x[i])
+ return ffs(x[i]) + BITSET_WORDBITS * i;
+ }
+
+ return 0;
+}
+
+#define BITSET_FFS(x) __bitset_ffs(x, ARRAY_SIZE(x))
+
+static inline unsigned
+__bitset_next_set(unsigned i, BITSET_WORD *tmp,
+ const BITSET_WORD *set, unsigned size)
+{
+ unsigned bit, word;
+
+ /* NOTE: The initial conditions for this function are very specific. At
+ * the start of the loop, the tmp variable must be set to *set and the
+ * initial i value set to 0. This way, if there is a bit set in the first
+ * word, we ignore the i-value and just grab that bit (so 0 is ok, even
+ * though 0 may be returned). If the first word is 0, then the value of
+ * `word` will be 0 and we will go on to look at the second word.
+ */
+ word = BITSET_BITWORD(i);
+ while (*tmp == 0) {
+ word++;
+
+ if (word >= BITSET_WORDS(size))
+ return size;
+
+ *tmp = set[word];
+ }
+
+ /* Find the next set bit in the non-zero word */
+ bit = ffs(*tmp) - 1;
+
+ /* Unset the bit */
+ *tmp &= ~(1ull << bit);
+
+ return word * BITSET_WORDBITS + bit;
+}
+
+/**
+ * Iterates over each set bit in a set
+ *
+ * @param __i iteration variable, bit number
+ * @param __set the bitset to iterate (will not be modified)
+ * @param __size number of bits in the set to consider
+ */
+#define BITSET_FOREACH_SET(__i, __set, __size) \
+ for (BITSET_WORD __tmp = *(__set), *__foo = &__tmp; __foo != NULL; __foo = NULL) \
+ for (__i = 0; \
+ (__i = __bitset_next_set(__i, &__tmp, __set, __size)) < __size;)
+
+#ifdef __cplusplus
+
+/**
+ * Simple C++ wrapper of a bitset type of static size, with value semantics
+ * and basic bitwise arithmetic operators. The operators defined below are
+ * expected to have the same semantics as the same operator applied to other
+ * fundamental integer types. T is the name of the struct to instantiate
+ * it as, and N is the number of bits in the bitset.
+ */
+#define DECLARE_BITSET_T(T, N) struct T { \
+ EXPLICIT_CONVERSION \
+ operator bool() const \
+ { \
+ for (unsigned i = 0; i < BITSET_WORDS(N); i++) \
+ if (words[i]) \
+ return true; \
+ return false; \
+ } \
+ \
+ T & \
+ operator=(int x) \
+ { \
+ const T c = {{ (BITSET_WORD)x }}; \
+ return *this = c; \
+ } \
+ \
+ friend bool \
+ operator==(const T &b, const T &c) \
+ { \
+ return BITSET_EQUAL(b.words, c.words); \
+ } \
+ \
+ friend bool \
+ operator!=(const T &b, const T &c) \
+ { \
+ return !(b == c); \
+ } \
+ \
+ friend bool \
+ operator==(const T &b, int x) \
+ { \
+ const T c = {{ (BITSET_WORD)x }}; \
+ return b == c; \
+ } \
+ \
+ friend bool \
+ operator!=(const T &b, int x) \
+ { \
+ return !(b == x); \
+ } \
+ \
+ friend T \
+ operator~(const T &b) \
+ { \
+ T c; \
+ for (unsigned i = 0; i < BITSET_WORDS(N); i++) \
+ c.words[i] = ~b.words[i]; \
+ return c; \
+ } \
+ \
+ T & \
+ operator|=(const T &b) \
+ { \
+ for (unsigned i = 0; i < BITSET_WORDS(N); i++) \
+ words[i] |= b.words[i]; \
+ return *this; \
+ } \
+ \
+ friend T \
+ operator|(const T &b, const T &c) \
+ { \
+ T d = b; \
+ d |= c; \
+ return d; \
+ } \
+ \
+ T & \
+ operator&=(const T &b) \
+ { \
+ for (unsigned i = 0; i < BITSET_WORDS(N); i++) \
+ words[i] &= b.words[i]; \
+ return *this; \
+ } \
+ \
+ friend T \
+ operator&(const T &b, const T &c) \
+ { \
+ T d = b; \
+ d &= c; \
+ return d; \
+ } \
+ \
+ bool \
+ test(unsigned i) const \
+ { \
+ return BITSET_TEST(words, i); \
+ } \
+ \
+ T & \
+ set(unsigned i) \
+ { \
+ BITSET_SET(words, i); \
+ return *this; \
+ } \
+ \
+ T & \
+ clear(unsigned i) \
+ { \
+ BITSET_CLEAR(words, i); \
+ return *this; \
+ } \
+ \
+ BITSET_WORD words[BITSET_WORDS(N)]; \
+ }
+
+#endif
+
+#endif
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/util/blob.c b/third_party/rust/glslopt/glsl-optimizer/src/util/blob.c
new file mode 100644
index 0000000000..db192146ac
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/util/blob.c
@@ -0,0 +1,368 @@
+/*
+ * Copyright © 2014 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include <string.h>
+
+#include "blob.h"
+#include "u_math.h"
+
+#ifdef HAVE_VALGRIND
+#include <valgrind.h>
+#include <memcheck.h>
+#define VG(x) x
+#else
+#define VG(x)
+#endif
+
+#define BLOB_INITIAL_SIZE 4096
+
+/* Ensure that \blob will be able to fit an additional object of size
+ * \additional. The growing (if any) will occur by doubling the existing
+ * allocation.
+ */
+static bool
+grow_to_fit(struct blob *blob, size_t additional)
+{
+ size_t to_allocate;
+ uint8_t *new_data;
+
+ if (blob->out_of_memory)
+ return false;
+
+ if (blob->size + additional <= blob->allocated)
+ return true;
+
+ if (blob->fixed_allocation) {
+ blob->out_of_memory = true;
+ return false;
+ }
+
+ if (blob->allocated == 0)
+ to_allocate = BLOB_INITIAL_SIZE;
+ else
+ to_allocate = blob->allocated * 2;
+
+ to_allocate = MAX2(to_allocate, blob->allocated + additional);
+
+ new_data = realloc(blob->data, to_allocate);
+ if (new_data == NULL) {
+ blob->out_of_memory = true;
+ return false;
+ }
+
+ blob->data = new_data;
+ blob->allocated = to_allocate;
+
+ return true;
+}
+
+/* Align the blob->size so that reading or writing a value at (blob->data +
+ * blob->size) will result in an access aligned to a granularity of \alignment
+ * bytes.
+ *
+ * \return True unless allocation fails
+ */
+static bool
+align_blob(struct blob *blob, size_t alignment)
+{
+ const size_t new_size = align64(blob->size, alignment);
+
+ if (blob->size < new_size) {
+ if (!grow_to_fit(blob, new_size - blob->size))
+ return false;
+
+ if (blob->data)
+ memset(blob->data + blob->size, 0, new_size - blob->size);
+ blob->size = new_size;
+ }
+
+ return true;
+}
+
+static void
+align_blob_reader(struct blob_reader *blob, size_t alignment)
+{
+ blob->current = blob->data + align64(blob->current - blob->data, alignment);
+}
+
+void
+blob_init(struct blob *blob)
+{
+ blob->data = NULL;
+ blob->allocated = 0;
+ blob->size = 0;
+ blob->fixed_allocation = false;
+ blob->out_of_memory = false;
+}
+
+void
+blob_init_fixed(struct blob *blob, void *data, size_t size)
+{
+ blob->data = data;
+ blob->allocated = size;
+ blob->size = 0;
+ blob->fixed_allocation = true;
+ blob->out_of_memory = false;
+}
+
+void
+blob_finish_get_buffer(struct blob *blob, void **buffer, size_t *size)
+{
+ *buffer = blob->data;
+ *size = blob->size;
+ blob->data = NULL;
+
+ /* Trim the buffer. */
+ *buffer = realloc(*buffer, *size);
+}
+
+bool
+blob_overwrite_bytes(struct blob *blob,
+ size_t offset,
+ const void *bytes,
+ size_t to_write)
+{
+ /* Detect an attempt to overwrite data out of bounds. */
+ if (offset + to_write < offset || blob->size < offset + to_write)
+ return false;
+
+ VG(VALGRIND_CHECK_MEM_IS_DEFINED(bytes, to_write));
+
+ if (blob->data)
+ memcpy(blob->data + offset, bytes, to_write);
+
+ return true;
+}
+
+bool
+blob_write_bytes(struct blob *blob, const void *bytes, size_t to_write)
+{
+ if (! grow_to_fit(blob, to_write))
+ return false;
+
+ VG(VALGRIND_CHECK_MEM_IS_DEFINED(bytes, to_write));
+
+ if (blob->data && to_write > 0)
+ memcpy(blob->data + blob->size, bytes, to_write);
+ blob->size += to_write;
+
+ return true;
+}
+
+intptr_t
+blob_reserve_bytes(struct blob *blob, size_t to_write)
+{
+ intptr_t ret;
+
+ if (! grow_to_fit (blob, to_write))
+ return -1;
+
+ ret = blob->size;
+ blob->size += to_write;
+
+ return ret;
+}
+
+intptr_t
+blob_reserve_uint32(struct blob *blob)
+{
+ align_blob(blob, sizeof(uint32_t));
+ return blob_reserve_bytes(blob, sizeof(uint32_t));
+}
+
+intptr_t
+blob_reserve_intptr(struct blob *blob)
+{
+ align_blob(blob, sizeof(intptr_t));
+ return blob_reserve_bytes(blob, sizeof(intptr_t));
+}
+
+#define BLOB_WRITE_TYPE(name, type) \
+bool \
+name(struct blob *blob, type value) \
+{ \
+ align_blob(blob, sizeof(value)); \
+ return blob_write_bytes(blob, &value, sizeof(value)); \
+}
+
+BLOB_WRITE_TYPE(blob_write_uint8, uint8_t)
+BLOB_WRITE_TYPE(blob_write_uint16, uint16_t)
+BLOB_WRITE_TYPE(blob_write_uint32, uint32_t)
+BLOB_WRITE_TYPE(blob_write_uint64, uint64_t)
+BLOB_WRITE_TYPE(blob_write_intptr, intptr_t)
+
+#define ASSERT_ALIGNED(_offset, _align) \
+ assert(align64((_offset), (_align)) == (_offset))
+
+bool
+blob_overwrite_uint8 (struct blob *blob,
+ size_t offset,
+ uint8_t value)
+{
+ ASSERT_ALIGNED(offset, sizeof(value));
+ return blob_overwrite_bytes(blob, offset, &value, sizeof(value));
+}
+
+bool
+blob_overwrite_uint32 (struct blob *blob,
+ size_t offset,
+ uint32_t value)
+{
+ ASSERT_ALIGNED(offset, sizeof(value));
+ return blob_overwrite_bytes(blob, offset, &value, sizeof(value));
+}
+
+bool
+blob_overwrite_intptr (struct blob *blob,
+ size_t offset,
+ intptr_t value)
+{
+ ASSERT_ALIGNED(offset, sizeof(value));
+ return blob_overwrite_bytes(blob, offset, &value, sizeof(value));
+}
+
+bool
+blob_write_string(struct blob *blob, const char *str)
+{
+ return blob_write_bytes(blob, str, strlen(str) + 1);
+}
+
+void
+blob_reader_init(struct blob_reader *blob, const void *data, size_t size)
+{
+ blob->data = data;
+ blob->end = blob->data + size;
+ blob->current = data;
+ blob->overrun = false;
+}
+
+/* Check that an object of size \size can be read from this blob.
+ *
+ * If not, set blob->overrun to indicate that we attempted to read too far.
+ */
+static bool
+ensure_can_read(struct blob_reader *blob, size_t size)
+{
+ if (blob->overrun)
+ return false;
+
+ if (blob->current <= blob->end && blob->end - blob->current >= size)
+ return true;
+
+ blob->overrun = true;
+
+ return false;
+}
+
+const void *
+blob_read_bytes(struct blob_reader *blob, size_t size)
+{
+ const void *ret;
+
+ if (! ensure_can_read (blob, size))
+ return NULL;
+
+ ret = blob->current;
+
+ blob->current += size;
+
+ return ret;
+}
+
+void
+blob_copy_bytes(struct blob_reader *blob, void *dest, size_t size)
+{
+ const void *bytes;
+
+ bytes = blob_read_bytes(blob, size);
+ if (bytes == NULL || size == 0)
+ return;
+
+ memcpy(dest, bytes, size);
+}
+
+void
+blob_skip_bytes(struct blob_reader *blob, size_t size)
+{
+ if (ensure_can_read (blob, size))
+ blob->current += size;
+}
+
+/* These next three read functions have identical form. If we add any beyond
+ * these first three we should probably switch to generating these with a
+ * preprocessor macro.
+*/
+
+#define BLOB_READ_TYPE(name, type) \
+type \
+name(struct blob_reader *blob) \
+{ \
+ type ret; \
+ int size = sizeof(ret); \
+ align_blob_reader(blob, size); \
+ if (! ensure_can_read(blob, size)) \
+ return 0; \
+ ret = *((type*) blob->current); \
+ blob->current += size; \
+ return ret; \
+}
+
+BLOB_READ_TYPE(blob_read_uint8, uint8_t)
+BLOB_READ_TYPE(blob_read_uint16, uint16_t)
+BLOB_READ_TYPE(blob_read_uint32, uint32_t)
+BLOB_READ_TYPE(blob_read_uint64, uint64_t)
+BLOB_READ_TYPE(blob_read_intptr, intptr_t)
+
+char *
+blob_read_string(struct blob_reader *blob)
+{
+ int size;
+ char *ret;
+ uint8_t *nul;
+
+ /* If we're already at the end, then this is an overrun. */
+ if (blob->current >= blob->end) {
+ blob->overrun = true;
+ return NULL;
+ }
+
+ /* Similarly, if there is no zero byte in the data remaining in this blob,
+ * we also consider that an overrun.
+ */
+ nul = memchr(blob->current, 0, blob->end - blob->current);
+
+ if (nul == NULL) {
+ blob->overrun = true;
+ return NULL;
+ }
+
+ size = nul - blob->current + 1;
+
+ assert(ensure_can_read(blob, size));
+
+ ret = (char *) blob->current;
+
+ blob->current += size;
+
+ return ret;
+}
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/util/blob.h b/third_party/rust/glslopt/glsl-optimizer/src/util/blob.h
new file mode 100644
index 0000000000..e1e156eb43
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/util/blob.h
@@ -0,0 +1,418 @@
+/*
+ * Copyright © 2014 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#ifndef BLOB_H
+#define BLOB_H
+
+#include <stdbool.h>
+#include <stddef.h>
+#include <stdint.h>
+#include <stdlib.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* The blob functions implement a simple, low-level API for serializing and
+ * deserializing.
+ *
+ * All objects written to a blob will be serialized directly, (without any
+ * additional meta-data to describe the data written). Therefore, it is the
+ * caller's responsibility to ensure that any data can be read later, (either
+ * by knowing exactly what data is expected, or by writing to the blob
+ * sufficient meta-data to describe what has been written).
+ *
+ * A blob is efficient in that it dynamically grows by doubling in size, so
+ * allocation costs are logarithmic.
+ */
+
+struct blob {
+ /* The data actually written to the blob. */
+ uint8_t *data;
+
+ /** Number of bytes that have been allocated for \c data. */
+ size_t allocated;
+
+ /** The number of bytes that have actual data written to them. */
+ size_t size;
+
+ /** True if \c data a fixed allocation that we cannot resize
+ *
+ * \see blob_init_fixed
+ */
+ bool fixed_allocation;
+
+ /**
+ * True if we've ever failed to realloc or if we go pas the end of a fixed
+ * allocation blob.
+ */
+ bool out_of_memory;
+};
+
+/* When done reading, the caller can ensure that everything was consumed by
+ * checking the following:
+ *
+ * 1. blob->current should be equal to blob->end, (if not, too little was
+ * read).
+ *
+ * 2. blob->overrun should be false, (otherwise, too much was read).
+ */
+struct blob_reader {
+ const uint8_t *data;
+ const uint8_t *end;
+ const uint8_t *current;
+ bool overrun;
+};
+
+/**
+ * Init a new, empty blob.
+ */
+void
+blob_init(struct blob *blob);
+
+/**
+ * Init a new, fixed-size blob.
+ *
+ * A fixed-size blob has a fixed block of data that will not be freed on
+ * blob_finish and will never be grown. If we hit the end, we simply start
+ * returning false from the write functions.
+ *
+ * If a fixed-size blob has a NULL data pointer then the data is written but
+ * it otherwise operates normally. This can be used to determine the size
+ * that will be required to write a given data structure.
+ */
+void
+blob_init_fixed(struct blob *blob, void *data, size_t size);
+
+/**
+ * Finish a blob and free its memory.
+ *
+ * If \blob was initialized with blob_init_fixed, the data pointer is
+ * considered to be owned by the user and will not be freed.
+ */
+static inline void
+blob_finish(struct blob *blob)
+{
+ if (!blob->fixed_allocation)
+ free(blob->data);
+}
+
+void
+blob_finish_get_buffer(struct blob *blob, void **buffer, size_t *size);
+
+/**
+ * Add some unstructured, fixed-size data to a blob.
+ *
+ * \return True unless allocation failed.
+ */
+bool
+blob_write_bytes(struct blob *blob, const void *bytes, size_t to_write);
+
+/**
+ * Reserve space in \blob for a number of bytes.
+ *
+ * Space will be allocated within the blob for these byes, but the bytes will
+ * be left uninitialized. The caller is expected to use \sa
+ * blob_overwrite_bytes to write to these bytes.
+ *
+ * \return An offset to space allocated within \blob to which \to_write bytes
+ * can be written, (or -1 in case of any allocation error).
+ */
+intptr_t
+blob_reserve_bytes(struct blob *blob, size_t to_write);
+
+/**
+ * Similar to \sa blob_reserve_bytes, but only reserves an uint32_t worth of
+ * space. Note that this must be used if later reading with \sa
+ * blob_read_uint32, since it aligns the offset correctly.
+ */
+intptr_t
+blob_reserve_uint32(struct blob *blob);
+
+/**
+ * Similar to \sa blob_reserve_bytes, but only reserves an intptr_t worth of
+ * space. Note that this must be used if later reading with \sa
+ * blob_read_intptr, since it aligns the offset correctly.
+ */
+intptr_t
+blob_reserve_intptr(struct blob *blob);
+
+/**
+ * Overwrite some data previously written to the blob.
+ *
+ * Writes data to an existing portion of the blob at an offset of \offset.
+ * This data range must have previously been written to the blob by one of the
+ * blob_write_* calls.
+ *
+ * For example usage, see blob_overwrite_uint32
+ *
+ * \return True unless the requested offset or offset+to_write lie outside
+ * the current blob's size.
+ */
+bool
+blob_overwrite_bytes(struct blob *blob,
+ size_t offset,
+ const void *bytes,
+ size_t to_write);
+
+/**
+ * Add a uint8_t to a blob.
+ *
+ * \return True unless allocation failed.
+ */
+bool
+blob_write_uint8(struct blob *blob, uint8_t value);
+
+/**
+ * Overwrite a uint8_t previously written to the blob.
+ *
+ * Writes a uint8_t value to an existing portion of the blob at an offset of
+ * \offset. This data range must have previously been written to the blob by
+ * one of the blob_write_* calls.
+ *
+ * \return True unless the requested position or position+to_write lie outside
+ * the current blob's size.
+ */
+bool
+blob_overwrite_uint8(struct blob *blob,
+ size_t offset,
+ uint8_t value);
+
+/**
+ * Add a uint16_t to a blob.
+ *
+ * \note This function will only write to a uint16_t-aligned offset from the
+ * beginning of the blob's data, so some padding bytes may be added to the
+ * blob if this write follows some unaligned write (such as
+ * blob_write_string).
+ *
+ * \return True unless allocation failed.
+ */
+bool
+blob_write_uint16(struct blob *blob, uint16_t value);
+
+/**
+ * Add a uint32_t to a blob.
+ *
+ * \note This function will only write to a uint32_t-aligned offset from the
+ * beginning of the blob's data, so some padding bytes may be added to the
+ * blob if this write follows some unaligned write (such as
+ * blob_write_string).
+ *
+ * \return True unless allocation failed.
+ */
+bool
+blob_write_uint32(struct blob *blob, uint32_t value);
+
+/**
+ * Overwrite a uint32_t previously written to the blob.
+ *
+ * Writes a uint32_t value to an existing portion of the blob at an offset of
+ * \offset. This data range must have previously been written to the blob by
+ * one of the blob_write_* calls.
+ *
+ *
+ * The expected usage is something like the following pattern:
+ *
+ * size_t offset;
+ *
+ * offset = blob_reserve_uint32(blob);
+ * ... various blob write calls, writing N items ...
+ * blob_overwrite_uint32 (blob, offset, N);
+ *
+ * \return True unless the requested position or position+to_write lie outside
+ * the current blob's size.
+ */
+bool
+blob_overwrite_uint32(struct blob *blob,
+ size_t offset,
+ uint32_t value);
+
+/**
+ * Add a uint64_t to a blob.
+ *
+ * \note This function will only write to a uint64_t-aligned offset from the
+ * beginning of the blob's data, so some padding bytes may be added to the
+ * blob if this write follows some unaligned write (such as
+ * blob_write_string).
+ *
+ * \return True unless allocation failed.
+ */
+bool
+blob_write_uint64(struct blob *blob, uint64_t value);
+
+/**
+ * Add an intptr_t to a blob.
+ *
+ * \note This function will only write to an intptr_t-aligned offset from the
+ * beginning of the blob's data, so some padding bytes may be added to the
+ * blob if this write follows some unaligned write (such as
+ * blob_write_string).
+ *
+ * \return True unless allocation failed.
+ */
+bool
+blob_write_intptr(struct blob *blob, intptr_t value);
+
+/**
+ * Overwrite an intptr_t previously written to the blob.
+ *
+ * Writes a intptr_t value to an existing portion of the blob at an offset of
+ * \offset. This data range must have previously been written to the blob by
+ * one of the blob_write_* calls.
+ *
+ * For example usage, see blob_overwrite_uint32
+ *
+ * \return True unless the requested position or position+to_write lie outside
+ * the current blob's size.
+ */
+bool
+blob_overwrite_intptr(struct blob *blob,
+ size_t offset,
+ intptr_t value);
+
+/**
+ * Add a NULL-terminated string to a blob, (including the NULL terminator).
+ *
+ * \return True unless allocation failed.
+ */
+bool
+blob_write_string(struct blob *blob, const char *str);
+
+/**
+ * Start reading a blob, (initializing the contents of \blob for reading).
+ *
+ * After this call, the caller can use the various blob_read_* functions to
+ * read elements from the data array.
+ *
+ * For all of the blob_read_* functions, if there is insufficient data
+ * remaining, the functions will do nothing, (perhaps returning default values
+ * such as 0). The caller can detect this by noting that the blob_reader's
+ * current value is unchanged before and after the call.
+ */
+void
+blob_reader_init(struct blob_reader *blob, const void *data, size_t size);
+
+/**
+ * Read some unstructured, fixed-size data from the current location, (and
+ * update the current location to just past this data).
+ *
+ * \note The memory returned belongs to the data underlying the blob reader. The
+ * caller must copy the data in order to use it after the lifetime of the data
+ * underlying the blob reader.
+ *
+ * \return The bytes read (see note above about memory lifetime).
+ */
+const void *
+blob_read_bytes(struct blob_reader *blob, size_t size);
+
+/**
+ * Read some unstructured, fixed-size data from the current location, copying
+ * it to \dest (and update the current location to just past this data)
+ */
+void
+blob_copy_bytes(struct blob_reader *blob, void *dest, size_t size);
+
+/**
+ * Skip \size bytes within the blob.
+ */
+void
+blob_skip_bytes(struct blob_reader *blob, size_t size);
+
+/**
+ * Read a uint8_t from the current location, (and update the current location
+ * to just past this uint8_t).
+ *
+ * \return The uint8_t read
+ */
+uint8_t
+blob_read_uint8(struct blob_reader *blob);
+
+/**
+ * Read a uint16_t from the current location, (and update the current location
+ * to just past this uint16_t).
+ *
+ * \note This function will only read from a uint16_t-aligned offset from the
+ * beginning of the blob's data, so some padding bytes may be skipped.
+ *
+ * \return The uint16_t read
+ */
+uint16_t
+blob_read_uint16(struct blob_reader *blob);
+
+/**
+ * Read a uint32_t from the current location, (and update the current location
+ * to just past this uint32_t).
+ *
+ * \note This function will only read from a uint32_t-aligned offset from the
+ * beginning of the blob's data, so some padding bytes may be skipped.
+ *
+ * \return The uint32_t read
+ */
+uint32_t
+blob_read_uint32(struct blob_reader *blob);
+
+/**
+ * Read a uint64_t from the current location, (and update the current location
+ * to just past this uint64_t).
+ *
+ * \note This function will only read from a uint64_t-aligned offset from the
+ * beginning of the blob's data, so some padding bytes may be skipped.
+ *
+ * \return The uint64_t read
+ */
+uint64_t
+blob_read_uint64(struct blob_reader *blob);
+
+/**
+ * Read an intptr_t value from the current location, (and update the
+ * current location to just past this intptr_t).
+ *
+ * \note This function will only read from an intptr_t-aligned offset from the
+ * beginning of the blob's data, so some padding bytes may be skipped.
+ *
+ * \return The intptr_t read
+ */
+intptr_t
+blob_read_intptr(struct blob_reader *blob);
+
+/**
+ * Read a NULL-terminated string from the current location, (and update the
+ * current location to just past this string).
+ *
+ * \note The memory returned belongs to the data underlying the blob reader. The
+ * caller must copy the string in order to use the string after the lifetime
+ * of the data underlying the blob reader.
+ *
+ * \return The string read (see note above about memory lifetime). However, if
+ * there is no NULL byte remaining within the blob, this function returns
+ * NULL.
+ */
+char *
+blob_read_string(struct blob_reader *blob);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* BLOB_H */
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/util/compiler.h b/third_party/rust/glslopt/glsl-optimizer/src/util/compiler.h
new file mode 100644
index 0000000000..43a06b4313
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/util/compiler.h
@@ -0,0 +1,76 @@
+/*
+ * Mesa 3-D graphics library
+ *
+ * Copyright (C) 1999-2008 Brian Paul All Rights Reserved.
+ * Copyright (C) 2009 VMware, Inc. All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+
+/**
+ * \file compiler.h
+ * Compiler-related stuff.
+ */
+
+
+#ifndef COMPILER_H
+#define COMPILER_H
+
+
+#include <assert.h>
+
+#include "util/macros.h"
+
+#include "c99_compat.h" /* inline, __func__, etc. */
+
+
+/**
+ * Either define MESA_BIG_ENDIAN or MESA_LITTLE_ENDIAN, and CPU_TO_LE32.
+ * Do not use these unless absolutely necessary!
+ * Try to use a runtime test instead.
+ * For now, only used by some DRI hardware drivers for color/texel packing.
+ */
+#if defined(BYTE_ORDER) && defined(BIG_ENDIAN) && BYTE_ORDER == BIG_ENDIAN
+#if defined(__linux__)
+#include <byteswap.h>
+#define CPU_TO_LE32( x ) bswap_32( x )
+#elif defined(__APPLE__)
+#include <CoreFoundation/CFByteOrder.h>
+#define CPU_TO_LE32( x ) CFSwapInt32HostToLittle( x )
+#elif defined(__OpenBSD__)
+#include <sys/types.h>
+#define CPU_TO_LE32( x ) htole32( x )
+#else /*__linux__ */
+#include <sys/endian.h>
+#define CPU_TO_LE32( x ) bswap32( x )
+#endif /*__linux__*/
+#define MESA_BIG_ENDIAN 1
+#else
+#define CPU_TO_LE32( x ) ( x )
+#define MESA_LITTLE_ENDIAN 1
+#endif
+#define LE32_TO_CPU( x ) CPU_TO_LE32( x )
+
+
+
+#define IEEE_ONE 0x3f800000
+
+
+#endif /* COMPILER_H */
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/util/crc32.c b/third_party/rust/glslopt/glsl-optimizer/src/util/crc32.c
new file mode 100644
index 0000000000..425046ab5f
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/util/crc32.c
@@ -0,0 +1,134 @@
+/**************************************************************************
+ *
+ * Copyright 2008 VMware, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/**
+ * @file
+ * CRC32 implementation.
+ *
+ * @author Jose Fonseca
+ */
+
+
+#ifdef HAVE_ZLIB
+#include <zlib.h>
+#endif
+#include "crc32.h"
+
+
+static const uint32_t
+util_crc32_table[256] = {
+ 0x00000000, 0x77073096, 0xee0e612c, 0x990951ba,
+ 0x076dc419, 0x706af48f, 0xe963a535, 0x9e6495a3,
+ 0x0edb8832, 0x79dcb8a4, 0xe0d5e91e, 0x97d2d988,
+ 0x09b64c2b, 0x7eb17cbd, 0xe7b82d07, 0x90bf1d91,
+ 0x1db71064, 0x6ab020f2, 0xf3b97148, 0x84be41de,
+ 0x1adad47d, 0x6ddde4eb, 0xf4d4b551, 0x83d385c7,
+ 0x136c9856, 0x646ba8c0, 0xfd62f97a, 0x8a65c9ec,
+ 0x14015c4f, 0x63066cd9, 0xfa0f3d63, 0x8d080df5,
+ 0x3b6e20c8, 0x4c69105e, 0xd56041e4, 0xa2677172,
+ 0x3c03e4d1, 0x4b04d447, 0xd20d85fd, 0xa50ab56b,
+ 0x35b5a8fa, 0x42b2986c, 0xdbbbc9d6, 0xacbcf940,
+ 0x32d86ce3, 0x45df5c75, 0xdcd60dcf, 0xabd13d59,
+ 0x26d930ac, 0x51de003a, 0xc8d75180, 0xbfd06116,
+ 0x21b4f4b5, 0x56b3c423, 0xcfba9599, 0xb8bda50f,
+ 0x2802b89e, 0x5f058808, 0xc60cd9b2, 0xb10be924,
+ 0x2f6f7c87, 0x58684c11, 0xc1611dab, 0xb6662d3d,
+ 0x76dc4190, 0x01db7106, 0x98d220bc, 0xefd5102a,
+ 0x71b18589, 0x06b6b51f, 0x9fbfe4a5, 0xe8b8d433,
+ 0x7807c9a2, 0x0f00f934, 0x9609a88e, 0xe10e9818,
+ 0x7f6a0dbb, 0x086d3d2d, 0x91646c97, 0xe6635c01,
+ 0x6b6b51f4, 0x1c6c6162, 0x856530d8, 0xf262004e,
+ 0x6c0695ed, 0x1b01a57b, 0x8208f4c1, 0xf50fc457,
+ 0x65b0d9c6, 0x12b7e950, 0x8bbeb8ea, 0xfcb9887c,
+ 0x62dd1ddf, 0x15da2d49, 0x8cd37cf3, 0xfbd44c65,
+ 0x4db26158, 0x3ab551ce, 0xa3bc0074, 0xd4bb30e2,
+ 0x4adfa541, 0x3dd895d7, 0xa4d1c46d, 0xd3d6f4fb,
+ 0x4369e96a, 0x346ed9fc, 0xad678846, 0xda60b8d0,
+ 0x44042d73, 0x33031de5, 0xaa0a4c5f, 0xdd0d7cc9,
+ 0x5005713c, 0x270241aa, 0xbe0b1010, 0xc90c2086,
+ 0x5768b525, 0x206f85b3, 0xb966d409, 0xce61e49f,
+ 0x5edef90e, 0x29d9c998, 0xb0d09822, 0xc7d7a8b4,
+ 0x59b33d17, 0x2eb40d81, 0xb7bd5c3b, 0xc0ba6cad,
+ 0xedb88320, 0x9abfb3b6, 0x03b6e20c, 0x74b1d29a,
+ 0xead54739, 0x9dd277af, 0x04db2615, 0x73dc1683,
+ 0xe3630b12, 0x94643b84, 0x0d6d6a3e, 0x7a6a5aa8,
+ 0xe40ecf0b, 0x9309ff9d, 0x0a00ae27, 0x7d079eb1,
+ 0xf00f9344, 0x8708a3d2, 0x1e01f268, 0x6906c2fe,
+ 0xf762575d, 0x806567cb, 0x196c3671, 0x6e6b06e7,
+ 0xfed41b76, 0x89d32be0, 0x10da7a5a, 0x67dd4acc,
+ 0xf9b9df6f, 0x8ebeeff9, 0x17b7be43, 0x60b08ed5,
+ 0xd6d6a3e8, 0xa1d1937e, 0x38d8c2c4, 0x4fdff252,
+ 0xd1bb67f1, 0xa6bc5767, 0x3fb506dd, 0x48b2364b,
+ 0xd80d2bda, 0xaf0a1b4c, 0x36034af6, 0x41047a60,
+ 0xdf60efc3, 0xa867df55, 0x316e8eef, 0x4669be79,
+ 0xcb61b38c, 0xbc66831a, 0x256fd2a0, 0x5268e236,
+ 0xcc0c7795, 0xbb0b4703, 0x220216b9, 0x5505262f,
+ 0xc5ba3bbe, 0xb2bd0b28, 0x2bb45a92, 0x5cb36a04,
+ 0xc2d7ffa7, 0xb5d0cf31, 0x2cd99e8b, 0x5bdeae1d,
+ 0x9b64c2b0, 0xec63f226, 0x756aa39c, 0x026d930a,
+ 0x9c0906a9, 0xeb0e363f, 0x72076785, 0x05005713,
+ 0x95bf4a82, 0xe2b87a14, 0x7bb12bae, 0x0cb61b38,
+ 0x92d28e9b, 0xe5d5be0d, 0x7cdcefb7, 0x0bdbdf21,
+ 0x86d3d2d4, 0xf1d4e242, 0x68ddb3f8, 0x1fda836e,
+ 0x81be16cd, 0xf6b9265b, 0x6fb077e1, 0x18b74777,
+ 0x88085ae6, 0xff0f6a70, 0x66063bca, 0x11010b5c,
+ 0x8f659eff, 0xf862ae69, 0x616bffd3, 0x166ccf45,
+ 0xa00ae278, 0xd70dd2ee, 0x4e048354, 0x3903b3c2,
+ 0xa7672661, 0xd06016f7, 0x4969474d, 0x3e6e77db,
+ 0xaed16a4a, 0xd9d65adc, 0x40df0b66, 0x37d83bf0,
+ 0xa9bcae53, 0xdebb9ec5, 0x47b2cf7f, 0x30b5ffe9,
+ 0xbdbdf21c, 0xcabac28a, 0x53b39330, 0x24b4a3a6,
+ 0xbad03605, 0xcdd70693, 0x54de5729, 0x23d967bf,
+ 0xb3667a2e, 0xc4614ab8, 0x5d681b02, 0x2a6f2b94,
+ 0xb40bbe37, 0xc30c8ea1, 0x5a05df1b, 0x2d02ef8d
+};
+
+
+/**
+ * @sa http://www.w3.org/TR/PNG/#D-CRCAppendix
+ */
+uint32_t
+util_hash_crc32(const void *data, size_t size)
+{
+ const uint8_t *p = data;
+ uint32_t crc = 0xffffffff;
+
+#ifdef HAVE_ZLIB
+ /* Prefer zlib's implementation for better performance.
+ * zlib's uInt is always "unsigned int" while size_t can be 64bit.
+ * Since 1.2.9 there's crc32_z that takes size_t, but use the more
+ * available function to avoid build system complications.
+ */
+ if ((uInt)size == size)
+ return ~crc32(0, data, size);
+#endif
+
+ while (size--)
+ crc = util_crc32_table[(crc ^ *p++) & 0xff] ^ (crc >> 8);
+
+ return crc;
+}
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/util/crc32.h b/third_party/rust/glslopt/glsl-optimizer/src/util/crc32.h
new file mode 100644
index 0000000000..b6a21f4170
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/util/crc32.h
@@ -0,0 +1,55 @@
+/**************************************************************************
+ *
+ * Copyright 2008 VMware, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/**
+ * @file
+ * CRC32 function.
+ *
+ * @author Jose Fonseca <jfonseca@vmware.com>
+ */
+
+#ifndef CRC32_H_
+#define CRC32_H_
+
+#include <stdlib.h>
+#include <stdint.h>
+
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+uint32_t
+util_hash_crc32(const void *data, size_t size);
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* CRC32_H_ */
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/util/debug.c b/third_party/rust/glslopt/glsl-optimizer/src/util/debug.c
new file mode 100644
index 0000000000..89ae613107
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/util/debug.c
@@ -0,0 +1,114 @@
+/*
+ * Copyright © 2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include <errno.h>
+#include <string.h>
+#include "debug.h"
+#include "u_string.h"
+
+uint64_t
+parse_debug_string(const char *debug,
+ const struct debug_control *control)
+{
+ uint64_t flag = 0;
+
+ if (debug != NULL) {
+ for (; control->string != NULL; control++) {
+ if (!strcmp(debug, "all")) {
+ flag |= control->flag;
+
+ } else {
+ const char *s = debug;
+ unsigned n;
+
+ for (; n = strcspn(s, ", "), *s; s += MAX2(1, n)) {
+ if (strlen(control->string) == n &&
+ !strncmp(control->string, s, n))
+ flag |= control->flag;
+ }
+ }
+ }
+ }
+
+ return flag;
+}
+
+bool
+comma_separated_list_contains(const char *list, const char *s)
+{
+ assert(list);
+ const size_t len = strlen(s);
+
+ for (unsigned n; n = strcspn(list, ","), *list; list += MAX2(1, n)) {
+ if (n == len && !strncmp(list, s, n))
+ return true;
+ }
+
+ return false;
+}
+
+/**
+ * Reads an environment variable and interprets its value as a boolean.
+ *
+ * Recognizes 0/false/no and 1/true/yes. Other values result in the default value.
+ */
+bool
+env_var_as_boolean(const char *var_name, bool default_value)
+{
+ const char *str = getenv(var_name);
+ if (str == NULL)
+ return default_value;
+
+ if (strcmp(str, "1") == 0 ||
+ strcasecmp(str, "true") == 0 ||
+ strcasecmp(str, "y") == 0 ||
+ strcasecmp(str, "yes") == 0) {
+ return true;
+ } else if (strcmp(str, "0") == 0 ||
+ strcasecmp(str, "false") == 0 ||
+ strcasecmp(str, "n") == 0 ||
+ strcasecmp(str, "no") == 0) {
+ return false;
+ } else {
+ return default_value;
+ }
+}
+
+/**
+ * Reads an environment variable and interprets its value as a unsigned.
+ */
+unsigned
+env_var_as_unsigned(const char *var_name, unsigned default_value)
+{
+ char *str = getenv(var_name);
+ if (str) {
+ char *end;
+ unsigned long result;
+
+ errno = 0;
+ result = strtoul(str, &end, 0);
+ if (errno == 0 && end != str && *end == '\0')
+ return result;
+ }
+ return default_value;
+}
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/util/debug.h b/third_party/rust/glslopt/glsl-optimizer/src/util/debug.h
new file mode 100644
index 0000000000..bbcc197554
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/util/debug.h
@@ -0,0 +1,53 @@
+/*
+ * Copyright © 2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#ifndef _UTIL_DEBUG_H
+#define _UTIL_DEBUG_H
+
+#include <stdint.h>
+#include <stdbool.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+struct debug_control {
+ const char * string;
+ uint64_t flag;
+};
+
+uint64_t
+parse_debug_string(const char *debug,
+ const struct debug_control *control);
+bool
+comma_separated_list_contains(const char *list, const char *s);
+bool
+env_var_as_boolean(const char *var_name, bool default_value);
+unsigned
+env_var_as_unsigned(const char *var_name, unsigned default_value);
+
+#ifdef __cplusplus
+} /* extern C */
+#endif
+
+#endif /* _UTIL_DEBUG_H */
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/util/detect_os.h b/third_party/rust/glslopt/glsl-optimizer/src/util/detect_os.h
new file mode 100644
index 0000000000..6506948e03
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/util/detect_os.h
@@ -0,0 +1,131 @@
+/* SPDX-License-Identifier: MIT */
+/* Copyright 2008 VMware, Inc. */
+
+/**
+ * Auto-detect the operating system family.
+ *
+ * See also:
+ * - http://gcc.gnu.org/onlinedocs/cpp/Common-Predefined-Macros.html
+ * - echo | gcc -dM -E - | sort
+ * - http://msdn.microsoft.com/en-us/library/b0084kay.aspx
+ *
+ * @author José Fonseca <jfonseca@vmware.com>
+ */
+
+#ifndef DETECT_OS_H
+#define DETECT_OS_H
+
+#if defined(__linux__)
+#define DETECT_OS_LINUX 1
+#define DETECT_OS_UNIX 1
+#endif
+
+/*
+ * Android defines __linux__, so DETECT_OS_LINUX and DETECT_OS_UNIX will
+ * also be defined.
+ */
+#if defined(ANDROID)
+#define DETECT_OS_ANDROID 1
+#endif
+
+#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
+#define DETECT_OS_FREEBSD 1
+#define DETECT_OS_BSD 1
+#define DETECT_OS_UNIX 1
+#endif
+
+#if defined(__OpenBSD__)
+#define DETECT_OS_OPENBSD 1
+#define DETECT_OS_BSD 1
+#define DETECT_OS_UNIX 1
+#endif
+
+#if defined(__NetBSD__)
+#define DETECT_OS_NETBSD 1
+#define DETECT_OS_BSD 1
+#define DETECT_OS_UNIX 1
+#endif
+
+#if defined(__DragonFly__)
+#define DETECT_OS_DRAGONFLY 1
+#define DETECT_OS_BSD 1
+#define DETECT_OS_UNIX 1
+#endif
+
+#if defined(__GNU__)
+#define DETECT_OS_HURD 1
+#define DETECT_OS_UNIX 1
+#endif
+
+#if defined(__sun)
+#define DETECT_OS_SOLARIS 1
+#define DETECT_OS_UNIX 1
+#endif
+
+#if defined(__APPLE__)
+#define DETECT_OS_APPLE 1
+#define DETECT_OS_UNIX 1
+#endif
+
+#if defined(_WIN32) || defined(WIN32)
+#define DETECT_OS_WINDOWS 1
+#endif
+
+#if defined(__HAIKU__)
+#define DETECT_OS_HAIKU 1
+#define DETECT_OS_UNIX 1
+#endif
+
+#if defined(__CYGWIN__)
+#define DETECT_OS_CYGWIN 1
+#define DETECT_OS_UNIX 1
+#endif
+
+
+/*
+ * Make sure DETECT_OS_* are always defined, so that they can be used with #if
+ */
+#ifndef DETECT_OS_ANDROID
+#define DETECT_OS_ANDROID 0
+#endif
+#ifndef DETECT_OS_APPLE
+#define DETECT_OS_APPLE 0
+#endif
+#ifndef DETECT_OS_BSD
+#define DETECT_OS_BSD 0
+#endif
+#ifndef DETECT_OS_CYGWIN
+#define DETECT_OS_CYGWIN 0
+#endif
+#ifndef DETECT_OS_DRAGONFLY
+#define DETECT_OS_DRAGONFLY 0
+#endif
+#ifndef DETECT_OS_FREEBSD
+#define DETECT_OS_FREEBSD 0
+#endif
+#ifndef DETECT_OS_HAIKU
+#define DETECT_OS_HAIKU 0
+#endif
+#ifndef DETECT_OS_HURD
+#define DETECT_OS_HURD 0
+#endif
+#ifndef DETECT_OS_LINUX
+#define DETECT_OS_LINUX 0
+#endif
+#ifndef DETECT_OS_NETBSD
+#define DETECT_OS_NETBSD 0
+#endif
+#ifndef DETECT_OS_OPENBSD
+#define DETECT_OS_OPENBSD 0
+#endif
+#ifndef DETECT_OS_SOLARIS
+#define DETECT_OS_SOLARIS 0
+#endif
+#ifndef DETECT_OS_UNIX
+#define DETECT_OS_UNIX 0
+#endif
+#ifndef DETECT_OS_WINDOWS
+#define DETECT_OS_WINDOWS 0
+#endif
+
+#endif /* DETECT_OS_H */
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/util/disk_cache.c b/third_party/rust/glslopt/glsl-optimizer/src/util/disk_cache.c
new file mode 100644
index 0000000000..a92d621927
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/util/disk_cache.c
@@ -0,0 +1,1344 @@
+/*
+ * Copyright © 2014 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#ifdef ENABLE_SHADER_CACHE
+
+#include <ctype.h>
+#include <ftw.h>
+#include <string.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <sys/file.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <sys/mman.h>
+#include <unistd.h>
+#include <fcntl.h>
+#include <pwd.h>
+#include <errno.h>
+#include <dirent.h>
+#include <inttypes.h>
+#include "zlib.h"
+
+#ifdef HAVE_ZSTD
+#include "zstd.h"
+#endif
+
+#include "util/crc32.h"
+#include "util/debug.h"
+#include "util/rand_xor.h"
+#include "util/u_atomic.h"
+#include "util/u_queue.h"
+#include "util/mesa-sha1.h"
+#include "util/ralloc.h"
+#include "util/compiler.h"
+
+#include "disk_cache.h"
+
+/* Number of bits to mask off from a cache key to get an index. */
+#define CACHE_INDEX_KEY_BITS 16
+
+/* Mask for computing an index from a key. */
+#define CACHE_INDEX_KEY_MASK ((1 << CACHE_INDEX_KEY_BITS) - 1)
+
+/* The number of keys that can be stored in the index. */
+#define CACHE_INDEX_MAX_KEYS (1 << CACHE_INDEX_KEY_BITS)
+
+/* The cache version should be bumped whenever a change is made to the
+ * structure of cache entries or the index. This will give any 3rd party
+ * applications reading the cache entries a chance to adjust to the changes.
+ *
+ * - The cache version is checked internally when reading a cache entry. If we
+ * ever have a mismatch we are in big trouble as this means we had a cache
+ * collision. In case of such an event please check the skys for giant
+ * asteroids and that the entire Mesa team hasn't been eaten by wolves.
+ *
+ * - There is no strict requirement that cache versions be backwards
+ * compatible but effort should be taken to limit disruption where possible.
+ */
+#define CACHE_VERSION 1
+
+/* 3 is the recomended level, with 22 as the absolute maximum */
+#define ZSTD_COMPRESSION_LEVEL 3
+
+struct disk_cache {
+ /* The path to the cache directory. */
+ char *path;
+ bool path_init_failed;
+
+ /* Thread queue for compressing and writing cache entries to disk */
+ struct util_queue cache_queue;
+
+ /* Seed for rand, which is used to pick a random directory */
+ uint64_t seed_xorshift128plus[2];
+
+ /* A pointer to the mmapped index file within the cache directory. */
+ uint8_t *index_mmap;
+ size_t index_mmap_size;
+
+ /* Pointer to total size of all objects in cache (within index_mmap) */
+ uint64_t *size;
+
+ /* Pointer to stored keys, (within index_mmap). */
+ uint8_t *stored_keys;
+
+ /* Maximum size of all cached objects (in bytes). */
+ uint64_t max_size;
+
+ /* Driver cache keys. */
+ uint8_t *driver_keys_blob;
+ size_t driver_keys_blob_size;
+
+ disk_cache_put_cb blob_put_cb;
+ disk_cache_get_cb blob_get_cb;
+};
+
+struct disk_cache_put_job {
+ struct util_queue_fence fence;
+
+ struct disk_cache *cache;
+
+ cache_key key;
+
+ /* Copy of cache data to be compressed and written. */
+ void *data;
+
+ /* Size of data to be compressed and written. */
+ size_t size;
+
+ struct cache_item_metadata cache_item_metadata;
+};
+
+/* Create a directory named 'path' if it does not already exist.
+ *
+ * Returns: 0 if path already exists as a directory or if created.
+ * -1 in all other cases.
+ */
+static int
+mkdir_if_needed(const char *path)
+{
+ struct stat sb;
+
+ /* If the path exists already, then our work is done if it's a
+ * directory, but it's an error if it is not.
+ */
+ if (stat(path, &sb) == 0) {
+ if (S_ISDIR(sb.st_mode)) {
+ return 0;
+ } else {
+ fprintf(stderr, "Cannot use %s for shader cache (not a directory)"
+ "---disabling.\n", path);
+ return -1;
+ }
+ }
+
+ int ret = mkdir(path, 0755);
+ if (ret == 0 || (ret == -1 && errno == EEXIST))
+ return 0;
+
+ fprintf(stderr, "Failed to create %s for shader cache (%s)---disabling.\n",
+ path, strerror(errno));
+
+ return -1;
+}
+
+/* Concatenate an existing path and a new name to form a new path. If the new
+ * path does not exist as a directory, create it then return the resulting
+ * name of the new path (ralloc'ed off of 'ctx').
+ *
+ * Returns NULL on any error, such as:
+ *
+ * <path> does not exist or is not a directory
+ * <path>/<name> exists but is not a directory
+ * <path>/<name> cannot be created as a directory
+ */
+static char *
+concatenate_and_mkdir(void *ctx, const char *path, const char *name)
+{
+ char *new_path;
+ struct stat sb;
+
+ if (stat(path, &sb) != 0 || ! S_ISDIR(sb.st_mode))
+ return NULL;
+
+ new_path = ralloc_asprintf(ctx, "%s/%s", path, name);
+
+ if (mkdir_if_needed(new_path) == 0)
+ return new_path;
+ else
+ return NULL;
+}
+
+#define DRV_KEY_CPY(_dst, _src, _src_size) \
+do { \
+ memcpy(_dst, _src, _src_size); \
+ _dst += _src_size; \
+} while (0);
+
+struct disk_cache *
+disk_cache_create(const char *gpu_name, const char *driver_id,
+ uint64_t driver_flags)
+{
+ void *local;
+ struct disk_cache *cache = NULL;
+ char *path, *max_size_str;
+ uint64_t max_size;
+ int fd = -1;
+ struct stat sb;
+ size_t size;
+
+ uint8_t cache_version = CACHE_VERSION;
+ size_t cv_size = sizeof(cache_version);
+
+ /* If running as a users other than the real user disable cache */
+ if (geteuid() != getuid())
+ return NULL;
+
+ /* A ralloc context for transient data during this invocation. */
+ local = ralloc_context(NULL);
+ if (local == NULL)
+ goto fail;
+
+ /* At user request, disable shader cache entirely. */
+ if (env_var_as_boolean("MESA_GLSL_CACHE_DISABLE", false))
+ goto fail;
+
+ cache = rzalloc(NULL, struct disk_cache);
+ if (cache == NULL)
+ goto fail;
+
+ /* Assume failure. */
+ cache->path_init_failed = true;
+
+ /* Determine path for cache based on the first defined name as follows:
+ *
+ * $MESA_GLSL_CACHE_DIR
+ * $XDG_CACHE_HOME/mesa_shader_cache
+ * <pwd.pw_dir>/.cache/mesa_shader_cache
+ */
+ path = getenv("MESA_GLSL_CACHE_DIR");
+ if (path) {
+ if (mkdir_if_needed(path) == -1)
+ goto path_fail;
+
+ path = concatenate_and_mkdir(local, path, CACHE_DIR_NAME);
+ if (path == NULL)
+ goto path_fail;
+ }
+
+ if (path == NULL) {
+ char *xdg_cache_home = getenv("XDG_CACHE_HOME");
+
+ if (xdg_cache_home) {
+ if (mkdir_if_needed(xdg_cache_home) == -1)
+ goto path_fail;
+
+ path = concatenate_and_mkdir(local, xdg_cache_home, CACHE_DIR_NAME);
+ if (path == NULL)
+ goto path_fail;
+ }
+ }
+
+ if (path == NULL) {
+ char *buf;
+ size_t buf_size;
+ struct passwd pwd, *result;
+
+ buf_size = sysconf(_SC_GETPW_R_SIZE_MAX);
+ if (buf_size == -1)
+ buf_size = 512;
+
+ /* Loop until buf_size is large enough to query the directory */
+ while (1) {
+ buf = ralloc_size(local, buf_size);
+
+ getpwuid_r(getuid(), &pwd, buf, buf_size, &result);
+ if (result)
+ break;
+
+ if (errno == ERANGE) {
+ ralloc_free(buf);
+ buf = NULL;
+ buf_size *= 2;
+ } else {
+ goto path_fail;
+ }
+ }
+
+ path = concatenate_and_mkdir(local, pwd.pw_dir, ".cache");
+ if (path == NULL)
+ goto path_fail;
+
+ path = concatenate_and_mkdir(local, path, CACHE_DIR_NAME);
+ if (path == NULL)
+ goto path_fail;
+ }
+
+ cache->path = ralloc_strdup(cache, path);
+ if (cache->path == NULL)
+ goto path_fail;
+
+ path = ralloc_asprintf(local, "%s/index", cache->path);
+ if (path == NULL)
+ goto path_fail;
+
+ fd = open(path, O_RDWR | O_CREAT | O_CLOEXEC, 0644);
+ if (fd == -1)
+ goto path_fail;
+
+ if (fstat(fd, &sb) == -1)
+ goto path_fail;
+
+ /* Force the index file to be the expected size. */
+ size = sizeof(*cache->size) + CACHE_INDEX_MAX_KEYS * CACHE_KEY_SIZE;
+ if (sb.st_size != size) {
+ if (ftruncate(fd, size) == -1)
+ goto path_fail;
+ }
+
+ /* We map this shared so that other processes see updates that we
+ * make.
+ *
+ * Note: We do use atomic addition to ensure that multiple
+ * processes don't scramble the cache size recorded in the
+ * index. But we don't use any locking to prevent multiple
+ * processes from updating the same entry simultaneously. The idea
+ * is that if either result lands entirely in the index, then
+ * that's equivalent to a well-ordered write followed by an
+ * eviction and a write. On the other hand, if the simultaneous
+ * writes result in a corrupt entry, that's not really any
+ * different than both entries being evicted, (since within the
+ * guarantees of the cryptographic hash, a corrupt entry is
+ * unlikely to ever match a real cache key).
+ */
+ cache->index_mmap = mmap(NULL, size, PROT_READ | PROT_WRITE,
+ MAP_SHARED, fd, 0);
+ if (cache->index_mmap == MAP_FAILED)
+ goto path_fail;
+ cache->index_mmap_size = size;
+
+ cache->size = (uint64_t *) cache->index_mmap;
+ cache->stored_keys = cache->index_mmap + sizeof(uint64_t);
+
+ max_size = 0;
+
+ max_size_str = getenv("MESA_GLSL_CACHE_MAX_SIZE");
+ if (max_size_str) {
+ char *end;
+ max_size = strtoul(max_size_str, &end, 10);
+ if (end == max_size_str) {
+ max_size = 0;
+ } else {
+ switch (*end) {
+ case 'K':
+ case 'k':
+ max_size *= 1024;
+ break;
+ case 'M':
+ case 'm':
+ max_size *= 1024*1024;
+ break;
+ case '\0':
+ case 'G':
+ case 'g':
+ default:
+ max_size *= 1024*1024*1024;
+ break;
+ }
+ }
+ }
+
+ /* Default to 1GB for maximum cache size. */
+ if (max_size == 0) {
+ max_size = 1024*1024*1024;
+ }
+
+ cache->max_size = max_size;
+
+ /* 4 threads were chosen below because just about all modern CPUs currently
+ * available that run Mesa have *at least* 4 cores. For these CPUs allowing
+ * more threads can result in the queue being processed faster, thus
+ * avoiding excessive memory use due to a backlog of cache entrys building
+ * up in the queue. Since we set the UTIL_QUEUE_INIT_USE_MINIMUM_PRIORITY
+ * flag this should have little negative impact on low core systems.
+ *
+ * The queue will resize automatically when it's full, so adding new jobs
+ * doesn't stall.
+ */
+ util_queue_init(&cache->cache_queue, "disk$", 32, 4,
+ UTIL_QUEUE_INIT_RESIZE_IF_FULL |
+ UTIL_QUEUE_INIT_USE_MINIMUM_PRIORITY |
+ UTIL_QUEUE_INIT_SET_FULL_THREAD_AFFINITY);
+
+ cache->path_init_failed = false;
+
+ path_fail:
+
+ if (fd != -1)
+ close(fd);
+
+ cache->driver_keys_blob_size = cv_size;
+
+ /* Create driver id keys */
+ size_t id_size = strlen(driver_id) + 1;
+ size_t gpu_name_size = strlen(gpu_name) + 1;
+ cache->driver_keys_blob_size += id_size;
+ cache->driver_keys_blob_size += gpu_name_size;
+
+ /* We sometimes store entire structs that contains a pointers in the cache,
+ * use pointer size as a key to avoid hard to debug issues.
+ */
+ uint8_t ptr_size = sizeof(void *);
+ size_t ptr_size_size = sizeof(ptr_size);
+ cache->driver_keys_blob_size += ptr_size_size;
+
+ size_t driver_flags_size = sizeof(driver_flags);
+ cache->driver_keys_blob_size += driver_flags_size;
+
+ cache->driver_keys_blob =
+ ralloc_size(cache, cache->driver_keys_blob_size);
+ if (!cache->driver_keys_blob)
+ goto fail;
+
+ uint8_t *drv_key_blob = cache->driver_keys_blob;
+ DRV_KEY_CPY(drv_key_blob, &cache_version, cv_size)
+ DRV_KEY_CPY(drv_key_blob, driver_id, id_size)
+ DRV_KEY_CPY(drv_key_blob, gpu_name, gpu_name_size)
+ DRV_KEY_CPY(drv_key_blob, &ptr_size, ptr_size_size)
+ DRV_KEY_CPY(drv_key_blob, &driver_flags, driver_flags_size)
+
+ /* Seed our rand function */
+ s_rand_xorshift128plus(cache->seed_xorshift128plus, true);
+
+ ralloc_free(local);
+
+ return cache;
+
+ fail:
+ if (cache)
+ ralloc_free(cache);
+ ralloc_free(local);
+
+ return NULL;
+}
+
+void
+disk_cache_destroy(struct disk_cache *cache)
+{
+ if (cache && !cache->path_init_failed) {
+ util_queue_finish(&cache->cache_queue);
+ util_queue_destroy(&cache->cache_queue);
+ munmap(cache->index_mmap, cache->index_mmap_size);
+ }
+
+ ralloc_free(cache);
+}
+
+void
+disk_cache_wait_for_idle(struct disk_cache *cache)
+{
+ util_queue_finish(&cache->cache_queue);
+}
+
+/* Return a filename within the cache's directory corresponding to 'key'. The
+ * returned filename is ralloced with 'cache' as the parent context.
+ *
+ * Returns NULL if out of memory.
+ */
+static char *
+get_cache_file(struct disk_cache *cache, const cache_key key)
+{
+ char buf[41];
+ char *filename;
+
+ if (cache->path_init_failed)
+ return NULL;
+
+ _mesa_sha1_format(buf, key);
+ if (asprintf(&filename, "%s/%c%c/%s", cache->path, buf[0],
+ buf[1], buf + 2) == -1)
+ return NULL;
+
+ return filename;
+}
+
+/* Create the directory that will be needed for the cache file for \key.
+ *
+ * Obviously, the implementation here must closely match
+ * _get_cache_file above.
+*/
+static void
+make_cache_file_directory(struct disk_cache *cache, const cache_key key)
+{
+ char *dir;
+ char buf[41];
+
+ _mesa_sha1_format(buf, key);
+ if (asprintf(&dir, "%s/%c%c", cache->path, buf[0], buf[1]) == -1)
+ return;
+
+ mkdir_if_needed(dir);
+ free(dir);
+}
+
+/* Given a directory path and predicate function, find the entry with
+ * the oldest access time in that directory for which the predicate
+ * returns true.
+ *
+ * Returns: A malloc'ed string for the path to the chosen file, (or
+ * NULL on any error). The caller should free the string when
+ * finished.
+ */
+static char *
+choose_lru_file_matching(const char *dir_path,
+ bool (*predicate)(const char *dir_path,
+ const struct stat *,
+ const char *, const size_t))
+{
+ DIR *dir;
+ struct dirent *entry;
+ char *filename;
+ char *lru_name = NULL;
+ time_t lru_atime = 0;
+
+ dir = opendir(dir_path);
+ if (dir == NULL)
+ return NULL;
+
+ while (1) {
+ entry = readdir(dir);
+ if (entry == NULL)
+ break;
+
+ struct stat sb;
+ if (fstatat(dirfd(dir), entry->d_name, &sb, 0) == 0) {
+ if (!lru_atime || (sb.st_atime < lru_atime)) {
+ size_t len = strlen(entry->d_name);
+
+ if (!predicate(dir_path, &sb, entry->d_name, len))
+ continue;
+
+ char *tmp = realloc(lru_name, len + 1);
+ if (tmp) {
+ lru_name = tmp;
+ memcpy(lru_name, entry->d_name, len + 1);
+ lru_atime = sb.st_atime;
+ }
+ }
+ }
+ }
+
+ if (lru_name == NULL) {
+ closedir(dir);
+ return NULL;
+ }
+
+ if (asprintf(&filename, "%s/%s", dir_path, lru_name) < 0)
+ filename = NULL;
+
+ free(lru_name);
+ closedir(dir);
+
+ return filename;
+}
+
+/* Is entry a regular file, and not having a name with a trailing
+ * ".tmp"
+ */
+static bool
+is_regular_non_tmp_file(const char *path, const struct stat *sb,
+ const char *d_name, const size_t len)
+{
+ if (!S_ISREG(sb->st_mode))
+ return false;
+
+ if (len >= 4 && strcmp(&d_name[len-4], ".tmp") == 0)
+ return false;
+
+ return true;
+}
+
+/* Returns the size of the deleted file, (or 0 on any error). */
+static size_t
+unlink_lru_file_from_directory(const char *path)
+{
+ struct stat sb;
+ char *filename;
+
+ filename = choose_lru_file_matching(path, is_regular_non_tmp_file);
+ if (filename == NULL)
+ return 0;
+
+ if (stat(filename, &sb) == -1) {
+ free (filename);
+ return 0;
+ }
+
+ unlink(filename);
+ free (filename);
+
+ return sb.st_blocks * 512;
+}
+
+/* Is entry a directory with a two-character name, (and not the
+ * special name of ".."). We also return false if the dir is empty.
+ */
+static bool
+is_two_character_sub_directory(const char *path, const struct stat *sb,
+ const char *d_name, const size_t len)
+{
+ if (!S_ISDIR(sb->st_mode))
+ return false;
+
+ if (len != 2)
+ return false;
+
+ if (strcmp(d_name, "..") == 0)
+ return false;
+
+ char *subdir;
+ if (asprintf(&subdir, "%s/%s", path, d_name) == -1)
+ return false;
+ DIR *dir = opendir(subdir);
+ free(subdir);
+
+ if (dir == NULL)
+ return false;
+
+ unsigned subdir_entries = 0;
+ struct dirent *d;
+ while ((d = readdir(dir)) != NULL) {
+ if(++subdir_entries > 2)
+ break;
+ }
+ closedir(dir);
+
+ /* If dir only contains '.' and '..' it must be empty */
+ if (subdir_entries <= 2)
+ return false;
+
+ return true;
+}
+
+static void
+evict_lru_item(struct disk_cache *cache)
+{
+ char *dir_path;
+
+ /* With a reasonably-sized, full cache, (and with keys generated
+ * from a cryptographic hash), we can choose two random hex digits
+ * and reasonably expect the directory to exist with a file in it.
+ * Provides pseudo-LRU eviction to reduce checking all cache files.
+ */
+ uint64_t rand64 = rand_xorshift128plus(cache->seed_xorshift128plus);
+ if (asprintf(&dir_path, "%s/%02" PRIx64 , cache->path, rand64 & 0xff) < 0)
+ return;
+
+ size_t size = unlink_lru_file_from_directory(dir_path);
+
+ free(dir_path);
+
+ if (size) {
+ p_atomic_add(cache->size, - (uint64_t)size);
+ return;
+ }
+
+ /* In the case where the random choice of directory didn't find
+ * something, we choose the least recently accessed from the
+ * existing directories.
+ *
+ * Really, the only reason this code exists is to allow the unit
+ * tests to work, (which use an artificially-small cache to be able
+ * to force a single cached item to be evicted).
+ */
+ dir_path = choose_lru_file_matching(cache->path,
+ is_two_character_sub_directory);
+ if (dir_path == NULL)
+ return;
+
+ size = unlink_lru_file_from_directory(dir_path);
+
+ free(dir_path);
+
+ if (size)
+ p_atomic_add(cache->size, - (uint64_t)size);
+}
+
+void
+disk_cache_remove(struct disk_cache *cache, const cache_key key)
+{
+ struct stat sb;
+
+ char *filename = get_cache_file(cache, key);
+ if (filename == NULL) {
+ return;
+ }
+
+ if (stat(filename, &sb) == -1) {
+ free(filename);
+ return;
+ }
+
+ unlink(filename);
+ free(filename);
+
+ if (sb.st_blocks)
+ p_atomic_add(cache->size, - (uint64_t)sb.st_blocks * 512);
+}
+
+static ssize_t
+read_all(int fd, void *buf, size_t count)
+{
+ char *in = buf;
+ ssize_t read_ret;
+ size_t done;
+
+ for (done = 0; done < count; done += read_ret) {
+ read_ret = read(fd, in + done, count - done);
+ if (read_ret == -1 || read_ret == 0)
+ return -1;
+ }
+ return done;
+}
+
+static ssize_t
+write_all(int fd, const void *buf, size_t count)
+{
+ const char *out = buf;
+ ssize_t written;
+ size_t done;
+
+ for (done = 0; done < count; done += written) {
+ written = write(fd, out + done, count - done);
+ if (written == -1)
+ return -1;
+ }
+ return done;
+}
+
+/* From the zlib docs:
+ * "If the memory is available, buffers sizes on the order of 128K or 256K
+ * bytes should be used."
+ */
+#define BUFSIZE 256 * 1024
+
+/**
+ * Compresses cache entry in memory and writes it to disk. Returns the size
+ * of the data written to disk.
+ */
+static size_t
+deflate_and_write_to_disk(const void *in_data, size_t in_data_size, int dest,
+ const char *filename)
+{
+#ifdef HAVE_ZSTD
+ /* from the zstd docs (https://facebook.github.io/zstd/zstd_manual.html):
+ * compression runs faster if `dstCapacity` >= `ZSTD_compressBound(srcSize)`.
+ */
+ size_t out_size = ZSTD_compressBound(in_data_size);
+ void * out = malloc(out_size);
+
+ size_t ret = ZSTD_compress(out, out_size, in_data, in_data_size,
+ ZSTD_COMPRESSION_LEVEL);
+ if (ZSTD_isError(ret)) {
+ free(out);
+ return 0;
+ }
+ ssize_t written = write_all(dest, out, ret);
+ if (written == -1) {
+ free(out);
+ return 0;
+ }
+ free(out);
+ return ret;
+#else
+ unsigned char *out;
+
+ /* allocate deflate state */
+ z_stream strm;
+ strm.zalloc = Z_NULL;
+ strm.zfree = Z_NULL;
+ strm.opaque = Z_NULL;
+ strm.next_in = (uint8_t *) in_data;
+ strm.avail_in = in_data_size;
+
+ int ret = deflateInit(&strm, Z_BEST_COMPRESSION);
+ if (ret != Z_OK)
+ return 0;
+
+ /* compress until end of in_data */
+ size_t compressed_size = 0;
+ int flush;
+
+ out = malloc(BUFSIZE * sizeof(unsigned char));
+ if (out == NULL)
+ return 0;
+
+ do {
+ int remaining = in_data_size - BUFSIZE;
+ flush = remaining > 0 ? Z_NO_FLUSH : Z_FINISH;
+ in_data_size -= BUFSIZE;
+
+ /* Run deflate() on input until the output buffer is not full (which
+ * means there is no more data to deflate).
+ */
+ do {
+ strm.avail_out = BUFSIZE;
+ strm.next_out = out;
+
+ ret = deflate(&strm, flush); /* no bad return value */
+ assert(ret != Z_STREAM_ERROR); /* state not clobbered */
+
+ size_t have = BUFSIZE - strm.avail_out;
+ compressed_size += have;
+
+ ssize_t written = write_all(dest, out, have);
+ if (written == -1) {
+ (void)deflateEnd(&strm);
+ free(out);
+ return 0;
+ }
+ } while (strm.avail_out == 0);
+
+ /* all input should be used */
+ assert(strm.avail_in == 0);
+
+ } while (flush != Z_FINISH);
+
+ /* stream should be complete */
+ assert(ret == Z_STREAM_END);
+
+ /* clean up and return */
+ (void)deflateEnd(&strm);
+ free(out);
+ return compressed_size;
+# endif
+}
+
+static struct disk_cache_put_job *
+create_put_job(struct disk_cache *cache, const cache_key key,
+ const void *data, size_t size,
+ struct cache_item_metadata *cache_item_metadata)
+{
+ struct disk_cache_put_job *dc_job = (struct disk_cache_put_job *)
+ malloc(sizeof(struct disk_cache_put_job) + size);
+
+ if (dc_job) {
+ dc_job->cache = cache;
+ memcpy(dc_job->key, key, sizeof(cache_key));
+ dc_job->data = dc_job + 1;
+ memcpy(dc_job->data, data, size);
+ dc_job->size = size;
+
+ /* Copy the cache item metadata */
+ if (cache_item_metadata) {
+ dc_job->cache_item_metadata.type = cache_item_metadata->type;
+ if (cache_item_metadata->type == CACHE_ITEM_TYPE_GLSL) {
+ dc_job->cache_item_metadata.num_keys =
+ cache_item_metadata->num_keys;
+ dc_job->cache_item_metadata.keys = (cache_key *)
+ malloc(cache_item_metadata->num_keys * sizeof(cache_key));
+
+ if (!dc_job->cache_item_metadata.keys)
+ goto fail;
+
+ memcpy(dc_job->cache_item_metadata.keys,
+ cache_item_metadata->keys,
+ sizeof(cache_key) * cache_item_metadata->num_keys);
+ }
+ } else {
+ dc_job->cache_item_metadata.type = CACHE_ITEM_TYPE_UNKNOWN;
+ dc_job->cache_item_metadata.keys = NULL;
+ }
+ }
+
+ return dc_job;
+
+fail:
+ free(dc_job);
+
+ return NULL;
+}
+
+static void
+destroy_put_job(void *job, int thread_index)
+{
+ if (job) {
+ struct disk_cache_put_job *dc_job = (struct disk_cache_put_job *) job;
+ free(dc_job->cache_item_metadata.keys);
+
+ free(job);
+ }
+}
+
+struct cache_entry_file_data {
+ uint32_t crc32;
+ uint32_t uncompressed_size;
+};
+
+static void
+cache_put(void *job, int thread_index)
+{
+ assert(job);
+
+ int fd = -1, fd_final = -1, err, ret;
+ unsigned i = 0;
+ char *filename = NULL, *filename_tmp = NULL;
+ struct disk_cache_put_job *dc_job = (struct disk_cache_put_job *) job;
+
+ filename = get_cache_file(dc_job->cache, dc_job->key);
+ if (filename == NULL)
+ goto done;
+
+ /* If the cache is too large, evict something else first. */
+ while (*dc_job->cache->size + dc_job->size > dc_job->cache->max_size &&
+ i < 8) {
+ evict_lru_item(dc_job->cache);
+ i++;
+ }
+
+ /* Write to a temporary file to allow for an atomic rename to the
+ * final destination filename, (to prevent any readers from seeing
+ * a partially written file).
+ */
+ if (asprintf(&filename_tmp, "%s.tmp", filename) == -1)
+ goto done;
+
+ fd = open(filename_tmp, O_WRONLY | O_CLOEXEC | O_CREAT, 0644);
+
+ /* Make the two-character subdirectory within the cache as needed. */
+ if (fd == -1) {
+ if (errno != ENOENT)
+ goto done;
+
+ make_cache_file_directory(dc_job->cache, dc_job->key);
+
+ fd = open(filename_tmp, O_WRONLY | O_CLOEXEC | O_CREAT, 0644);
+ if (fd == -1)
+ goto done;
+ }
+
+ /* With the temporary file open, we take an exclusive flock on
+ * it. If the flock fails, then another process still has the file
+ * open with the flock held. So just let that file be responsible
+ * for writing the file.
+ */
+#ifdef HAVE_FLOCK
+ err = flock(fd, LOCK_EX | LOCK_NB);
+#else
+ struct flock lock = {
+ .l_start = 0,
+ .l_len = 0, /* entire file */
+ .l_type = F_WRLCK,
+ .l_whence = SEEK_SET
+ };
+ err = fcntl(fd, F_SETLK, &lock);
+#endif
+ if (err == -1)
+ goto done;
+
+ /* Now that we have the lock on the open temporary file, we can
+ * check to see if the destination file already exists. If so,
+ * another process won the race between when we saw that the file
+ * didn't exist and now. In this case, we don't do anything more,
+ * (to ensure the size accounting of the cache doesn't get off).
+ */
+ fd_final = open(filename, O_RDONLY | O_CLOEXEC);
+ if (fd_final != -1) {
+ unlink(filename_tmp);
+ goto done;
+ }
+
+ /* OK, we're now on the hook to write out a file that we know is
+ * not in the cache, and is also not being written out to the cache
+ * by some other process.
+ */
+
+ /* Write the driver_keys_blob, this can be used find information about the
+ * mesa version that produced the entry or deal with hash collisions,
+ * should that ever become a real problem.
+ */
+ ret = write_all(fd, dc_job->cache->driver_keys_blob,
+ dc_job->cache->driver_keys_blob_size);
+ if (ret == -1) {
+ unlink(filename_tmp);
+ goto done;
+ }
+
+ /* Write the cache item metadata. This data can be used to deal with
+ * hash collisions, as well as providing useful information to 3rd party
+ * tools reading the cache files.
+ */
+ ret = write_all(fd, &dc_job->cache_item_metadata.type,
+ sizeof(uint32_t));
+ if (ret == -1) {
+ unlink(filename_tmp);
+ goto done;
+ }
+
+ if (dc_job->cache_item_metadata.type == CACHE_ITEM_TYPE_GLSL) {
+ ret = write_all(fd, &dc_job->cache_item_metadata.num_keys,
+ sizeof(uint32_t));
+ if (ret == -1) {
+ unlink(filename_tmp);
+ goto done;
+ }
+
+ ret = write_all(fd, dc_job->cache_item_metadata.keys[0],
+ dc_job->cache_item_metadata.num_keys *
+ sizeof(cache_key));
+ if (ret == -1) {
+ unlink(filename_tmp);
+ goto done;
+ }
+ }
+
+ /* Create CRC of the data. We will read this when restoring the cache and
+ * use it to check for corruption.
+ */
+ struct cache_entry_file_data cf_data;
+ cf_data.crc32 = util_hash_crc32(dc_job->data, dc_job->size);
+ cf_data.uncompressed_size = dc_job->size;
+
+ size_t cf_data_size = sizeof(cf_data);
+ ret = write_all(fd, &cf_data, cf_data_size);
+ if (ret == -1) {
+ unlink(filename_tmp);
+ goto done;
+ }
+
+ /* Now, finally, write out the contents to the temporary file, then
+ * rename them atomically to the destination filename, and also
+ * perform an atomic increment of the total cache size.
+ */
+ size_t file_size = deflate_and_write_to_disk(dc_job->data, dc_job->size,
+ fd, filename_tmp);
+ if (file_size == 0) {
+ unlink(filename_tmp);
+ goto done;
+ }
+ ret = rename(filename_tmp, filename);
+ if (ret == -1) {
+ unlink(filename_tmp);
+ goto done;
+ }
+
+ struct stat sb;
+ if (stat(filename, &sb) == -1) {
+ /* Something went wrong remove the file */
+ unlink(filename);
+ goto done;
+ }
+
+ p_atomic_add(dc_job->cache->size, sb.st_blocks * 512);
+
+ done:
+ if (fd_final != -1)
+ close(fd_final);
+ /* This close finally releases the flock, (now that the final file
+ * has been renamed into place and the size has been added).
+ */
+ if (fd != -1)
+ close(fd);
+ free(filename_tmp);
+ free(filename);
+}
+
+void
+disk_cache_put(struct disk_cache *cache, const cache_key key,
+ const void *data, size_t size,
+ struct cache_item_metadata *cache_item_metadata)
+{
+ if (cache->blob_put_cb) {
+ cache->blob_put_cb(key, CACHE_KEY_SIZE, data, size);
+ return;
+ }
+
+ if (cache->path_init_failed)
+ return;
+
+ struct disk_cache_put_job *dc_job =
+ create_put_job(cache, key, data, size, cache_item_metadata);
+
+ if (dc_job) {
+ util_queue_fence_init(&dc_job->fence);
+ util_queue_add_job(&cache->cache_queue, dc_job, &dc_job->fence,
+ cache_put, destroy_put_job, dc_job->size);
+ }
+}
+
+/**
+ * Decompresses cache entry, returns true if successful.
+ */
+static bool
+inflate_cache_data(uint8_t *in_data, size_t in_data_size,
+ uint8_t *out_data, size_t out_data_size)
+{
+#ifdef HAVE_ZSTD
+ size_t ret = ZSTD_decompress(out_data, out_data_size, in_data, in_data_size);
+ return !ZSTD_isError(ret);
+#else
+ z_stream strm;
+
+ /* allocate inflate state */
+ strm.zalloc = Z_NULL;
+ strm.zfree = Z_NULL;
+ strm.opaque = Z_NULL;
+ strm.next_in = in_data;
+ strm.avail_in = in_data_size;
+ strm.next_out = out_data;
+ strm.avail_out = out_data_size;
+
+ int ret = inflateInit(&strm);
+ if (ret != Z_OK)
+ return false;
+
+ ret = inflate(&strm, Z_NO_FLUSH);
+ assert(ret != Z_STREAM_ERROR); /* state not clobbered */
+
+ /* Unless there was an error we should have decompressed everything in one
+ * go as we know the uncompressed file size.
+ */
+ if (ret != Z_STREAM_END) {
+ (void)inflateEnd(&strm);
+ return false;
+ }
+ assert(strm.avail_out == 0);
+
+ /* clean up and return */
+ (void)inflateEnd(&strm);
+ return true;
+#endif
+}
+
+void *
+disk_cache_get(struct disk_cache *cache, const cache_key key, size_t *size)
+{
+ int fd = -1, ret;
+ struct stat sb;
+ char *filename = NULL;
+ uint8_t *data = NULL;
+ uint8_t *uncompressed_data = NULL;
+ uint8_t *file_header = NULL;
+
+ if (size)
+ *size = 0;
+
+ if (cache->blob_get_cb) {
+ /* This is what Android EGL defines as the maxValueSize in egl_cache_t
+ * class implementation.
+ */
+ const signed long max_blob_size = 64 * 1024;
+ void *blob = malloc(max_blob_size);
+ if (!blob)
+ return NULL;
+
+ signed long bytes =
+ cache->blob_get_cb(key, CACHE_KEY_SIZE, blob, max_blob_size);
+
+ if (!bytes) {
+ free(blob);
+ return NULL;
+ }
+
+ if (size)
+ *size = bytes;
+ return blob;
+ }
+
+ filename = get_cache_file(cache, key);
+ if (filename == NULL)
+ goto fail;
+
+ fd = open(filename, O_RDONLY | O_CLOEXEC);
+ if (fd == -1)
+ goto fail;
+
+ if (fstat(fd, &sb) == -1)
+ goto fail;
+
+ data = malloc(sb.st_size);
+ if (data == NULL)
+ goto fail;
+
+ size_t ck_size = cache->driver_keys_blob_size;
+ file_header = malloc(ck_size);
+ if (!file_header)
+ goto fail;
+
+ if (sb.st_size < ck_size)
+ goto fail;
+
+ ret = read_all(fd, file_header, ck_size);
+ if (ret == -1)
+ goto fail;
+
+ /* Check for extremely unlikely hash collisions */
+ if (memcmp(cache->driver_keys_blob, file_header, ck_size) != 0) {
+ assert(!"Mesa cache keys mismatch!");
+ goto fail;
+ }
+
+ size_t cache_item_md_size = sizeof(uint32_t);
+ uint32_t md_type;
+ ret = read_all(fd, &md_type, cache_item_md_size);
+ if (ret == -1)
+ goto fail;
+
+ if (md_type == CACHE_ITEM_TYPE_GLSL) {
+ uint32_t num_keys;
+ cache_item_md_size += sizeof(uint32_t);
+ ret = read_all(fd, &num_keys, sizeof(uint32_t));
+ if (ret == -1)
+ goto fail;
+
+ /* The cache item metadata is currently just used for distributing
+ * precompiled shaders, they are not used by Mesa so just skip them for
+ * now.
+ * TODO: pass the metadata back to the caller and do some basic
+ * validation.
+ */
+ cache_item_md_size += num_keys * sizeof(cache_key);
+ ret = lseek(fd, num_keys * sizeof(cache_key), SEEK_CUR);
+ if (ret == -1)
+ goto fail;
+ }
+
+ /* Load the CRC that was created when the file was written. */
+ struct cache_entry_file_data cf_data;
+ size_t cf_data_size = sizeof(cf_data);
+ ret = read_all(fd, &cf_data, cf_data_size);
+ if (ret == -1)
+ goto fail;
+
+ /* Load the actual cache data. */
+ size_t cache_data_size =
+ sb.st_size - cf_data_size - ck_size - cache_item_md_size;
+ ret = read_all(fd, data, cache_data_size);
+ if (ret == -1)
+ goto fail;
+
+ /* Uncompress the cache data */
+ uncompressed_data = malloc(cf_data.uncompressed_size);
+ if (!inflate_cache_data(data, cache_data_size, uncompressed_data,
+ cf_data.uncompressed_size))
+ goto fail;
+
+ /* Check the data for corruption */
+ if (cf_data.crc32 != util_hash_crc32(uncompressed_data,
+ cf_data.uncompressed_size))
+ goto fail;
+
+ free(data);
+ free(filename);
+ free(file_header);
+ close(fd);
+
+ if (size)
+ *size = cf_data.uncompressed_size;
+
+ return uncompressed_data;
+
+ fail:
+ if (data)
+ free(data);
+ if (uncompressed_data)
+ free(uncompressed_data);
+ if (filename)
+ free(filename);
+ if (file_header)
+ free(file_header);
+ if (fd != -1)
+ close(fd);
+
+ return NULL;
+}
+
+void
+disk_cache_put_key(struct disk_cache *cache, const cache_key key)
+{
+ const uint32_t *key_chunk = (const uint32_t *) key;
+ int i = CPU_TO_LE32(*key_chunk) & CACHE_INDEX_KEY_MASK;
+ unsigned char *entry;
+
+ if (cache->blob_put_cb) {
+ cache->blob_put_cb(key, CACHE_KEY_SIZE, key_chunk, sizeof(uint32_t));
+ return;
+ }
+
+ if (cache->path_init_failed)
+ return;
+
+ entry = &cache->stored_keys[i * CACHE_KEY_SIZE];
+
+ memcpy(entry, key, CACHE_KEY_SIZE);
+}
+
+/* This function lets us test whether a given key was previously
+ * stored in the cache with disk_cache_put_key(). The implement is
+ * efficient by not using syscalls or hitting the disk. It's not
+ * race-free, but the races are benign. If we race with someone else
+ * calling disk_cache_put_key, then that's just an extra cache miss and an
+ * extra recompile.
+ */
+bool
+disk_cache_has_key(struct disk_cache *cache, const cache_key key)
+{
+ const uint32_t *key_chunk = (const uint32_t *) key;
+ int i = CPU_TO_LE32(*key_chunk) & CACHE_INDEX_KEY_MASK;
+ unsigned char *entry;
+
+ if (cache->blob_get_cb) {
+ uint32_t blob;
+ return cache->blob_get_cb(key, CACHE_KEY_SIZE, &blob, sizeof(uint32_t));
+ }
+
+ if (cache->path_init_failed)
+ return false;
+
+ entry = &cache->stored_keys[i * CACHE_KEY_SIZE];
+
+ return memcmp(entry, key, CACHE_KEY_SIZE) == 0;
+}
+
+void
+disk_cache_compute_key(struct disk_cache *cache, const void *data, size_t size,
+ cache_key key)
+{
+ struct mesa_sha1 ctx;
+
+ _mesa_sha1_init(&ctx);
+ _mesa_sha1_update(&ctx, cache->driver_keys_blob,
+ cache->driver_keys_blob_size);
+ _mesa_sha1_update(&ctx, data, size);
+ _mesa_sha1_final(&ctx, key);
+}
+
+void
+disk_cache_set_callbacks(struct disk_cache *cache, disk_cache_put_cb put,
+ disk_cache_get_cb get)
+{
+ cache->blob_put_cb = put;
+ cache->blob_get_cb = get;
+}
+
+#endif /* ENABLE_SHADER_CACHE */
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/util/disk_cache.h b/third_party/rust/glslopt/glsl-optimizer/src/util/disk_cache.h
new file mode 100644
index 0000000000..09b316e6e8
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/util/disk_cache.h
@@ -0,0 +1,321 @@
+/*
+ * Copyright © 2014 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#ifndef DISK_CACHE_H
+#define DISK_CACHE_H
+
+#ifdef HAVE_DLFCN_H
+#include <dlfcn.h>
+#include <stdio.h>
+#include "util/build_id.h"
+#endif
+#include <assert.h>
+#include <stdint.h>
+#include <stdbool.h>
+#include <sys/stat.h>
+#include "util/mesa-sha1.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* Size of cache keys in bytes. */
+#define CACHE_KEY_SIZE 20
+
+#define CACHE_DIR_NAME "mesa_shader_cache"
+
+typedef uint8_t cache_key[CACHE_KEY_SIZE];
+
+/* WARNING: 3rd party applications might be reading the cache item metadata.
+ * Do not change these values without making the change widely known.
+ * Please contact Valve developers and make them aware of this change.
+ */
+#define CACHE_ITEM_TYPE_UNKNOWN 0x0
+#define CACHE_ITEM_TYPE_GLSL 0x1
+
+typedef void
+(*disk_cache_put_cb) (const void *key, signed long keySize,
+ const void *value, signed long valueSize);
+
+typedef signed long
+(*disk_cache_get_cb) (const void *key, signed long keySize,
+ void *value, signed long valueSize);
+
+struct cache_item_metadata {
+ /**
+ * The cache item type. This could be used to identify a GLSL cache item,
+ * a certain type of IR (tgsi, nir, etc), or signal that it is the final
+ * binary form of the shader.
+ */
+ uint32_t type;
+
+ /** GLSL cache item metadata */
+ cache_key *keys; /* sha1 list of shaders that make up the cache item */
+ uint32_t num_keys;
+};
+
+struct disk_cache;
+
+static inline char *
+disk_cache_format_hex_id(char *buf, const uint8_t *hex_id, unsigned size)
+{
+ static const char hex_digits[] = "0123456789abcdef";
+ unsigned i;
+
+ for (i = 0; i < size; i += 2) {
+ buf[i] = hex_digits[hex_id[i >> 1] >> 4];
+ buf[i + 1] = hex_digits[hex_id[i >> 1] & 0x0f];
+ }
+ buf[i] = '\0';
+
+ return buf;
+}
+
+#ifdef HAVE_DLADDR
+static inline bool
+disk_cache_get_function_timestamp(void *ptr, uint32_t* timestamp)
+{
+ Dl_info info;
+ struct stat st;
+ if (!dladdr(ptr, &info) || !info.dli_fname) {
+ return false;
+ }
+ if (stat(info.dli_fname, &st)) {
+ return false;
+ }
+
+ if (!st.st_mtime) {
+ fprintf(stderr, "Mesa: The provided filesystem timestamp for the cache "
+ "is bogus! Disabling On-disk cache.\n");
+ return false;
+ }
+
+ *timestamp = st.st_mtime;
+
+ return true;
+}
+
+static inline bool
+disk_cache_get_function_identifier(void *ptr, struct mesa_sha1 *ctx)
+{
+ uint32_t timestamp;
+
+#ifdef HAVE_DL_ITERATE_PHDR
+ const struct build_id_note *note = NULL;
+ if ((note = build_id_find_nhdr_for_addr(ptr))) {
+ _mesa_sha1_update(ctx, build_id_data(note), build_id_length(note));
+ } else
+#endif
+ if (disk_cache_get_function_timestamp(ptr, &timestamp)) {
+ _mesa_sha1_update(ctx, &timestamp, sizeof(timestamp));
+ } else
+ return false;
+ return true;
+}
+#endif
+
+/* Provide inlined stub functions if the shader cache is disabled. */
+
+#ifdef ENABLE_SHADER_CACHE
+
+/**
+ * Create a new cache object.
+ *
+ * This function creates the handle necessary for all subsequent cache_*
+ * functions.
+ *
+ * This cache provides two distinct operations:
+ *
+ * o Storage and retrieval of arbitrary objects by cryptographic
+ * name (or "key"). This is provided via disk_cache_put() and
+ * disk_cache_get().
+ *
+ * o The ability to store a key alone and check later whether the
+ * key was previously stored. This is provided via disk_cache_put_key()
+ * and disk_cache_has_key().
+ *
+ * The put_key()/has_key() operations are conceptually identical to
+ * put()/get() with no data, but are provided separately to allow for
+ * a more efficient implementation.
+ *
+ * In all cases, the keys are sequences of 20 bytes. It is anticipated
+ * that callers will compute appropriate SHA-1 signatures for keys,
+ * (though nothing in this implementation directly relies on how the
+ * names are computed). See mesa-sha1.h and _mesa_sha1_compute for
+ * assistance in computing SHA-1 signatures.
+ */
+struct disk_cache *
+disk_cache_create(const char *gpu_name, const char *timestamp,
+ uint64_t driver_flags);
+
+/**
+ * Destroy a cache object, (freeing all associated resources).
+ */
+void
+disk_cache_destroy(struct disk_cache *cache);
+
+/* Wait for all previous disk_cache_put() calls to be processed (used for unit
+ * testing).
+ */
+void
+disk_cache_wait_for_idle(struct disk_cache *cache);
+
+/**
+ * Remove the item in the cache under the name \key.
+ */
+void
+disk_cache_remove(struct disk_cache *cache, const cache_key key);
+
+/**
+ * Store an item in the cache under the name \key.
+ *
+ * The item can be retrieved later with disk_cache_get(), (unless the item has
+ * been evicted in the interim).
+ *
+ * Any call to disk_cache_put() may cause an existing, random item to be
+ * evicted from the cache.
+ */
+void
+disk_cache_put(struct disk_cache *cache, const cache_key key,
+ const void *data, size_t size,
+ struct cache_item_metadata *cache_item_metadata);
+
+/**
+ * Retrieve an item previously stored in the cache with the name <key>.
+ *
+ * The item must have been previously stored with a call to disk_cache_put().
+ *
+ * If \size is non-NULL, then, on successful return, it will be set to the
+ * size of the object.
+ *
+ * \return A pointer to the stored object if found. NULL if the object
+ * is not found, or if any error occurs, (memory allocation failure,
+ * filesystem error, etc.). The returned data is malloc'ed so the
+ * caller should call free() it when finished.
+ */
+void *
+disk_cache_get(struct disk_cache *cache, const cache_key key, size_t *size);
+
+/**
+ * Store the name \key within the cache, (without any associated data).
+ *
+ * Later this key can be checked with disk_cache_has_key(), (unless the key
+ * has been evicted in the interim).
+ *
+ * Any call to disk_cache_put_key() may cause an existing, random key to be
+ * evicted from the cache.
+ */
+void
+disk_cache_put_key(struct disk_cache *cache, const cache_key key);
+
+/**
+ * Test whether the name \key was previously recorded in the cache.
+ *
+ * Return value: True if disk_cache_put_key() was previously called with
+ * \key, (and the key was not evicted in the interim).
+ *
+ * Note: disk_cache_has_key() will only return true for keys passed to
+ * disk_cache_put_key(). Specifically, a call to disk_cache_put() will not cause
+ * disk_cache_has_key() to return true for the same key.
+ */
+bool
+disk_cache_has_key(struct disk_cache *cache, const cache_key key);
+
+/**
+ * Compute the name \key from \data of given \size.
+ */
+void
+disk_cache_compute_key(struct disk_cache *cache, const void *data, size_t size,
+ cache_key key);
+
+void
+disk_cache_set_callbacks(struct disk_cache *cache, disk_cache_put_cb put,
+ disk_cache_get_cb get);
+
+#else
+
+static inline struct disk_cache *
+disk_cache_create(const char *gpu_name, const char *timestamp,
+ uint64_t driver_flags)
+{
+ return NULL;
+}
+
+static inline void
+disk_cache_destroy(struct disk_cache *cache) {
+ return;
+}
+
+static inline void
+disk_cache_put(struct disk_cache *cache, const cache_key key,
+ const void *data, size_t size,
+ struct cache_item_metadata *cache_item_metadata)
+{
+ return;
+}
+
+static inline void
+disk_cache_remove(struct disk_cache *cache, const cache_key key)
+{
+ return;
+}
+
+static inline uint8_t *
+disk_cache_get(struct disk_cache *cache, const cache_key key, size_t *size)
+{
+ return NULL;
+}
+
+static inline void
+disk_cache_put_key(struct disk_cache *cache, const cache_key key)
+{
+ return;
+}
+
+static inline bool
+disk_cache_has_key(struct disk_cache *cache, const cache_key key)
+{
+ return false;
+}
+
+static inline void
+disk_cache_compute_key(struct disk_cache *cache, const void *data, size_t size,
+ const cache_key key)
+{
+ return;
+}
+
+static inline void
+disk_cache_set_callbacks(struct disk_cache *cache, disk_cache_put_cb put,
+ disk_cache_get_cb get)
+{
+ return;
+}
+
+#endif /* ENABLE_SHADER_CACHE */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* CACHE_H */
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/util/fast_urem_by_const.h b/third_party/rust/glslopt/glsl-optimizer/src/util/fast_urem_by_const.h
new file mode 100644
index 0000000000..beb253d229
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/util/fast_urem_by_const.h
@@ -0,0 +1,74 @@
+/*
+ * Copyright © 2010 Valve Software
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include <stdint.h>
+
+/*
+ * Code for fast 32-bit unsigned remainder, based off of "Faster Remainder by
+ * Direct Computation: Applications to Compilers and Software Libraries,"
+ * available at https://arxiv.org/pdf/1902.01961.pdf.
+ *
+ * util_fast_urem32(n, d, REMAINDER_MAGIC(d)) returns the same thing as
+ * n % d for any unsigned n and d, however it compiles down to only a few
+ * multiplications, so it should be faster than plain uint32_t modulo if the
+ * same divisor is used many times.
+ */
+
+#define REMAINDER_MAGIC(divisor) \
+ ((uint64_t) ~0ull / (divisor) + 1)
+
+/*
+ * Get bits 64-96 of a 32x64-bit multiply. If __int128_t is available, we use
+ * it, which usually compiles down to one instruction on 64-bit architectures.
+ * Otherwise on 32-bit architectures we usually get four instructions (one
+ * 32x32->64 multiply, one 32x32->32 multiply, and one 64-bit add).
+ */
+
+static inline uint32_t
+_mul32by64_hi(uint32_t a, uint64_t b)
+{
+#ifdef HAVE_UINT128
+ return ((__uint128_t) b * a) >> 64;
+#else
+ /*
+ * Let b = b0 + 2^32 * b1. Then a * b = a * b0 + 2^32 * a * b1. We would
+ * have to do a 96-bit addition to get the full result, except that only
+ * one term has non-zero lower 32 bits, which means that to get the high 32
+ * bits, we only have to add the high 64 bits of each term. Unfortunately,
+ * we have to do the 64-bit addition in case the low 32 bits overflow.
+ */
+ uint32_t b0 = (uint32_t) b;
+ uint32_t b1 = b >> 32;
+ return ((((uint64_t) a * b0) >> 32) + (uint64_t) a * b1) >> 32;
+#endif
+}
+
+static inline uint32_t
+util_fast_urem32(uint32_t n, uint32_t d, uint64_t magic)
+{
+ uint64_t lowbits = magic * n;
+ uint32_t result = _mul32by64_hi(d, lowbits);
+ assert(result == n % d);
+ return result;
+}
+
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/util/fnv1a.h b/third_party/rust/glslopt/glsl-optimizer/src/util/fnv1a.h
new file mode 100644
index 0000000000..0f92d0b0e4
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/util/fnv1a.h
@@ -0,0 +1,61 @@
+/*
+ * Copyright © 2009,2012 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ *
+ */
+
+#ifndef _FNV1A_H
+#define _FNV1A_H
+
+enum {
+ _mesa_fnv32_1a_offset_bias = 2166136261u,
+};
+
+/**
+ * Quick FNV-1a hash implementation based on:
+ * http://www.isthe.com/chongo/tech/comp/fnv/
+ *
+ * FNV-1a is not be the best hash out there -- Jenkins's lookup3 is supposed
+ * to be quite good, and it probably beats FNV. But FNV has the advantage
+ * that it involves almost no code. For an improvement on both, see Paul
+ * Hsieh's http://www.azillionmonkeys.com/qed/hash.html
+ */
+static inline uint32_t
+_mesa_fnv32_1a_accumulate_block(uint32_t hash, const void *data, size_t size)
+{
+ const uint8_t *bytes = (const uint8_t *)data;
+
+ while (size-- != 0) {
+ hash ^= *bytes;
+ hash = hash * 0x01000193;
+ bytes++;
+ }
+
+ return hash;
+}
+
+#define _mesa_fnv32_1a_accumulate(hash, expr) \
+ _mesa_fnv32_1a_accumulate_block(hash, &(expr), sizeof(expr))
+
+#endif
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/util/format/u_format.h b/third_party/rust/glslopt/glsl-optimizer/src/util/format/u_format.h
new file mode 100644
index 0000000000..1b6f0b8828
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/util/format/u_format.h
@@ -0,0 +1,1665 @@
+/**************************************************************************
+ *
+ * Copyright 2009-2010 Vmware, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+
+#ifndef U_FORMAT_H
+#define U_FORMAT_H
+
+
+#include "pipe/p_format.h"
+#include "pipe/p_defines.h"
+#include "util/u_debug.h"
+
+union pipe_color_union;
+struct pipe_screen;
+
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+/**
+ * Describe how to pack/unpack pixels into/from the prescribed format.
+ *
+ * XXX: This could be renamed to something like util_format_pack, or broke down
+ * in flags inside util_format_block that said exactly what we want.
+ */
+enum util_format_layout {
+ /**
+ * Formats with util_format_block::width == util_format_block::height == 1
+ * that can be described as an ordinary data structure.
+ */
+ UTIL_FORMAT_LAYOUT_PLAIN,
+
+ /**
+ * Formats with sub-sampled channels.
+ *
+ * This is for formats like YVYU where there is less than one sample per
+ * pixel.
+ */
+ UTIL_FORMAT_LAYOUT_SUBSAMPLED,
+
+ /**
+ * S3 Texture Compression formats.
+ */
+ UTIL_FORMAT_LAYOUT_S3TC,
+
+ /**
+ * Red-Green Texture Compression formats.
+ */
+ UTIL_FORMAT_LAYOUT_RGTC,
+
+ /**
+ * Ericsson Texture Compression
+ */
+ UTIL_FORMAT_LAYOUT_ETC,
+
+ /**
+ * BC6/7 Texture Compression
+ */
+ UTIL_FORMAT_LAYOUT_BPTC,
+
+ UTIL_FORMAT_LAYOUT_ASTC,
+
+ UTIL_FORMAT_LAYOUT_ATC,
+
+ /** Formats with 2 or more planes. */
+ UTIL_FORMAT_LAYOUT_PLANAR2,
+ UTIL_FORMAT_LAYOUT_PLANAR3,
+
+ UTIL_FORMAT_LAYOUT_FXT1 = 10,
+
+ /**
+ * Everything else that doesn't fit in any of the above layouts.
+ */
+ UTIL_FORMAT_LAYOUT_OTHER,
+};
+
+
+struct util_format_block
+{
+ /** Block width in pixels */
+ unsigned width;
+
+ /** Block height in pixels */
+ unsigned height;
+
+ /** Block depth in pixels */
+ unsigned depth;
+
+ /** Block size in bits */
+ unsigned bits;
+};
+
+
+enum util_format_type {
+ UTIL_FORMAT_TYPE_VOID = 0,
+ UTIL_FORMAT_TYPE_UNSIGNED = 1,
+ UTIL_FORMAT_TYPE_SIGNED = 2,
+ UTIL_FORMAT_TYPE_FIXED = 3,
+ UTIL_FORMAT_TYPE_FLOAT = 4
+};
+
+
+enum util_format_colorspace {
+ UTIL_FORMAT_COLORSPACE_RGB = 0,
+ UTIL_FORMAT_COLORSPACE_SRGB = 1,
+ UTIL_FORMAT_COLORSPACE_YUV = 2,
+ UTIL_FORMAT_COLORSPACE_ZS = 3
+};
+
+
+struct util_format_channel_description
+{
+ unsigned type:5; /**< UTIL_FORMAT_TYPE_x */
+ unsigned normalized:1;
+ unsigned pure_integer:1;
+ unsigned size:9; /**< bits per channel */
+ unsigned shift:16; /** number of bits from lsb */
+};
+
+
+struct util_format_description
+{
+ enum pipe_format format;
+
+ const char *name;
+
+ /**
+ * Short name, striped of the prefix, lower case.
+ */
+ const char *short_name;
+
+ /**
+ * Pixel block dimensions.
+ */
+ struct util_format_block block;
+
+ enum util_format_layout layout;
+
+ /**
+ * The number of channels.
+ */
+ unsigned nr_channels:3;
+
+ /**
+ * Whether all channels have the same number of (whole) bytes and type.
+ */
+ unsigned is_array:1;
+
+ /**
+ * Whether the pixel format can be described as a bitfield structure.
+ *
+ * In particular:
+ * - pixel depth must be 8, 16, or 32 bits;
+ * - all channels must be unsigned, signed, or void
+ */
+ unsigned is_bitmask:1;
+
+ /**
+ * Whether channels have mixed types (ignoring UTIL_FORMAT_TYPE_VOID).
+ */
+ unsigned is_mixed:1;
+
+ /**
+ * Whether the format contains UNORM channels
+ */
+ unsigned is_unorm:1;
+
+ /**
+ * Whether the format contains SNORM channels
+ */
+ unsigned is_snorm:1;
+
+ /**
+ * Input channel description, in the order XYZW.
+ *
+ * Only valid for UTIL_FORMAT_LAYOUT_PLAIN formats.
+ *
+ * If each channel is accessed as an individual N-byte value, X is always
+ * at the lowest address in memory, Y is always next, and so on. For all
+ * currently-defined formats, the N-byte value has native endianness.
+ *
+ * If instead a group of channels is accessed as a single N-byte value,
+ * the order of the channels within that value depends on endianness.
+ * For big-endian targets, X is the most significant subvalue,
+ * otherwise it is the least significant one.
+ *
+ * For example, if X is 8 bits and Y is 24 bits, the memory order is:
+ *
+ * 0 1 2 3
+ * little-endian: X Yl Ym Yu (l = lower, m = middle, u = upper)
+ * big-endian: X Yu Ym Yl
+ *
+ * If X is 5 bits, Y is 5 bits, Z is 5 bits and W is 1 bit, the layout is:
+ *
+ * 0 1
+ * msb lsb msb lsb
+ * little-endian: YYYXXXXX WZZZZZYY
+ * big-endian: XXXXXYYY YYZZZZZW
+ */
+ struct util_format_channel_description channel[4];
+
+ /**
+ * Output channel swizzle.
+ *
+ * The order is either:
+ * - RGBA
+ * - YUV(A)
+ * - ZS
+ * depending on the colorspace.
+ */
+ unsigned char swizzle[4];
+
+ /**
+ * Colorspace transformation.
+ */
+ enum util_format_colorspace colorspace;
+
+ /**
+ * Unpack pixel blocks to R8G8B8A8_UNORM.
+ * Note: strides are in bytes.
+ *
+ * Only defined for non-depth-stencil formats.
+ */
+ void
+ (*unpack_rgba_8unorm)(uint8_t *dst, unsigned dst_stride,
+ const uint8_t *src, unsigned src_stride,
+ unsigned width, unsigned height);
+
+ /**
+ * Pack pixel blocks from R8G8B8A8_UNORM.
+ * Note: strides are in bytes.
+ *
+ * Only defined for non-depth-stencil formats.
+ */
+ void
+ (*pack_rgba_8unorm)(uint8_t *dst, unsigned dst_stride,
+ const uint8_t *src, unsigned src_stride,
+ unsigned width, unsigned height);
+
+ /**
+ * Fetch a single pixel (i, j) from a block.
+ *
+ * XXX: Only defined for a very few select formats.
+ */
+ void
+ (*fetch_rgba_8unorm)(uint8_t *dst,
+ const uint8_t *src,
+ unsigned i, unsigned j);
+
+ /**
+ * Unpack pixel blocks to R32G32B32A32_FLOAT.
+ * Note: strides are in bytes.
+ *
+ * Only defined for non-depth-stencil formats.
+ */
+ void
+ (*unpack_rgba_float)(float *dst, unsigned dst_stride,
+ const uint8_t *src, unsigned src_stride,
+ unsigned width, unsigned height);
+
+ /**
+ * Pack pixel blocks from R32G32B32A32_FLOAT.
+ * Note: strides are in bytes.
+ *
+ * Only defined for non-depth-stencil formats.
+ */
+ void
+ (*pack_rgba_float)(uint8_t *dst, unsigned dst_stride,
+ const float *src, unsigned src_stride,
+ unsigned width, unsigned height);
+
+ /**
+ * Fetch a single pixel (i, j) from a block.
+ *
+ * Only defined for non-depth-stencil and non-integer formats.
+ */
+ void
+ (*fetch_rgba_float)(float *dst,
+ const uint8_t *src,
+ unsigned i, unsigned j);
+
+ /**
+ * Unpack pixels to Z32_UNORM.
+ * Note: strides are in bytes.
+ *
+ * Only defined for depth formats.
+ */
+ void
+ (*unpack_z_32unorm)(uint32_t *dst, unsigned dst_stride,
+ const uint8_t *src, unsigned src_stride,
+ unsigned width, unsigned height);
+
+ /**
+ * Pack pixels from Z32_FLOAT.
+ * Note: strides are in bytes.
+ *
+ * Only defined for depth formats.
+ */
+ void
+ (*pack_z_32unorm)(uint8_t *dst, unsigned dst_stride,
+ const uint32_t *src, unsigned src_stride,
+ unsigned width, unsigned height);
+
+ /**
+ * Unpack pixels to Z32_FLOAT.
+ * Note: strides are in bytes.
+ *
+ * Only defined for depth formats.
+ */
+ void
+ (*unpack_z_float)(float *dst, unsigned dst_stride,
+ const uint8_t *src, unsigned src_stride,
+ unsigned width, unsigned height);
+
+ /**
+ * Pack pixels from Z32_FLOAT.
+ * Note: strides are in bytes.
+ *
+ * Only defined for depth formats.
+ */
+ void
+ (*pack_z_float)(uint8_t *dst, unsigned dst_stride,
+ const float *src, unsigned src_stride,
+ unsigned width, unsigned height);
+
+ /**
+ * Unpack pixels to S8_UINT.
+ * Note: strides are in bytes.
+ *
+ * Only defined for stencil formats.
+ */
+ void
+ (*unpack_s_8uint)(uint8_t *dst, unsigned dst_stride,
+ const uint8_t *src, unsigned src_stride,
+ unsigned width, unsigned height);
+
+ /**
+ * Pack pixels from S8_UINT.
+ * Note: strides are in bytes.
+ *
+ * Only defined for stencil formats.
+ */
+ void
+ (*pack_s_8uint)(uint8_t *dst, unsigned dst_stride,
+ const uint8_t *src, unsigned src_stride,
+ unsigned width, unsigned height);
+
+ /**
+ * Unpack pixel blocks to R32G32B32A32_UINT.
+ * Note: strides are in bytes.
+ *
+ * Only defined for INT formats.
+ */
+ void
+ (*unpack_rgba_uint)(uint32_t *dst, unsigned dst_stride,
+ const uint8_t *src, unsigned src_stride,
+ unsigned width, unsigned height);
+
+ void
+ (*pack_rgba_uint)(uint8_t *dst, unsigned dst_stride,
+ const uint32_t *src, unsigned src_stride,
+ unsigned width, unsigned height);
+
+ /**
+ * Unpack pixel blocks to R32G32B32A32_SINT.
+ * Note: strides are in bytes.
+ *
+ * Only defined for INT formats.
+ */
+ void
+ (*unpack_rgba_sint)(int32_t *dst, unsigned dst_stride,
+ const uint8_t *src, unsigned src_stride,
+ unsigned width, unsigned height);
+
+ void
+ (*pack_rgba_sint)(uint8_t *dst, unsigned dst_stride,
+ const int32_t *src, unsigned src_stride,
+ unsigned width, unsigned height);
+
+ /**
+ * Fetch a single pixel (i, j) from a block.
+ *
+ * Only defined for unsigned (pure) integer formats.
+ */
+ void
+ (*fetch_rgba_uint)(uint32_t *dst,
+ const uint8_t *src,
+ unsigned i, unsigned j);
+
+ /**
+ * Fetch a single pixel (i, j) from a block.
+ *
+ * Only defined for signed (pure) integer formats.
+ */
+ void
+ (*fetch_rgba_sint)(int32_t *dst,
+ const uint8_t *src,
+ unsigned i, unsigned j);
+};
+
+
+const struct util_format_description *
+util_format_description(enum pipe_format format);
+
+
+/*
+ * Format query functions.
+ */
+
+static inline const char *
+util_format_name(enum pipe_format format)
+{
+ const struct util_format_description *desc = util_format_description(format);
+
+ assert(desc);
+ if (!desc) {
+ return "PIPE_FORMAT_???";
+ }
+
+ return desc->name;
+}
+
+static inline const char *
+util_format_short_name(enum pipe_format format)
+{
+ const struct util_format_description *desc = util_format_description(format);
+
+ assert(desc);
+ if (!desc) {
+ return "???";
+ }
+
+ return desc->short_name;
+}
+
+/**
+ * Whether this format is plain, see UTIL_FORMAT_LAYOUT_PLAIN for more info.
+ */
+static inline boolean
+util_format_is_plain(enum pipe_format format)
+{
+ const struct util_format_description *desc = util_format_description(format);
+
+ if (!format) {
+ return FALSE;
+ }
+
+ return desc->layout == UTIL_FORMAT_LAYOUT_PLAIN ? TRUE : FALSE;
+}
+
+static inline boolean
+util_format_is_compressed(enum pipe_format format)
+{
+ const struct util_format_description *desc = util_format_description(format);
+
+ assert(desc);
+ if (!desc) {
+ return FALSE;
+ }
+
+ switch (desc->layout) {
+ case UTIL_FORMAT_LAYOUT_S3TC:
+ case UTIL_FORMAT_LAYOUT_RGTC:
+ case UTIL_FORMAT_LAYOUT_ETC:
+ case UTIL_FORMAT_LAYOUT_BPTC:
+ case UTIL_FORMAT_LAYOUT_ASTC:
+ case UTIL_FORMAT_LAYOUT_ATC:
+ case UTIL_FORMAT_LAYOUT_FXT1:
+ /* XXX add other formats in the future */
+ return TRUE;
+ default:
+ return FALSE;
+ }
+}
+
+static inline boolean
+util_format_is_s3tc(enum pipe_format format)
+{
+ const struct util_format_description *desc = util_format_description(format);
+
+ assert(desc);
+ if (!desc) {
+ return FALSE;
+ }
+
+ return desc->layout == UTIL_FORMAT_LAYOUT_S3TC ? TRUE : FALSE;
+}
+
+static inline boolean
+util_format_is_etc(enum pipe_format format)
+{
+ const struct util_format_description *desc = util_format_description(format);
+
+ assert(desc);
+ if (!desc) {
+ return FALSE;
+ }
+
+ return desc->layout == UTIL_FORMAT_LAYOUT_ETC ? TRUE : FALSE;
+}
+
+static inline boolean
+util_format_is_srgb(enum pipe_format format)
+{
+ const struct util_format_description *desc = util_format_description(format);
+ return desc->colorspace == UTIL_FORMAT_COLORSPACE_SRGB;
+}
+
+static inline boolean
+util_format_has_depth(const struct util_format_description *desc)
+{
+ return desc->colorspace == UTIL_FORMAT_COLORSPACE_ZS &&
+ desc->swizzle[0] != PIPE_SWIZZLE_NONE;
+}
+
+static inline boolean
+util_format_has_stencil(const struct util_format_description *desc)
+{
+ return desc->colorspace == UTIL_FORMAT_COLORSPACE_ZS &&
+ desc->swizzle[1] != PIPE_SWIZZLE_NONE;
+}
+
+static inline boolean
+util_format_is_depth_or_stencil(enum pipe_format format)
+{
+ const struct util_format_description *desc = util_format_description(format);
+
+ assert(desc);
+ if (!desc) {
+ return FALSE;
+ }
+
+ return util_format_has_depth(desc) ||
+ util_format_has_stencil(desc);
+}
+
+static inline boolean
+util_format_is_depth_and_stencil(enum pipe_format format)
+{
+ const struct util_format_description *desc = util_format_description(format);
+
+ assert(desc);
+ if (!desc) {
+ return FALSE;
+ }
+
+ return util_format_has_depth(desc) &&
+ util_format_has_stencil(desc);
+}
+
+/**
+ * For depth-stencil formats, return the equivalent depth-only format.
+ */
+static inline enum pipe_format
+util_format_get_depth_only(enum pipe_format format)
+{
+ switch (format) {
+ case PIPE_FORMAT_Z24_UNORM_S8_UINT:
+ return PIPE_FORMAT_Z24X8_UNORM;
+
+ case PIPE_FORMAT_S8_UINT_Z24_UNORM:
+ return PIPE_FORMAT_X8Z24_UNORM;
+
+ case PIPE_FORMAT_Z32_FLOAT_S8X24_UINT:
+ return PIPE_FORMAT_Z32_FLOAT;
+
+ default:
+ return format;
+ }
+}
+
+static inline boolean
+util_format_is_yuv(enum pipe_format format)
+{
+ const struct util_format_description *desc = util_format_description(format);
+
+ assert(desc);
+ if (!desc) {
+ return FALSE;
+ }
+
+ return desc->colorspace == UTIL_FORMAT_COLORSPACE_YUV;
+}
+
+/**
+ * Calculates the depth format type based upon the incoming format description.
+ */
+static inline unsigned
+util_get_depth_format_type(const struct util_format_description *desc)
+{
+ unsigned depth_channel = desc->swizzle[0];
+ if (desc->colorspace == UTIL_FORMAT_COLORSPACE_ZS &&
+ depth_channel != PIPE_SWIZZLE_NONE) {
+ return desc->channel[depth_channel].type;
+ } else {
+ return UTIL_FORMAT_TYPE_VOID;
+ }
+}
+
+
+/**
+ * Calculates the MRD for the depth format. MRD is used in depth bias
+ * for UNORM and unbound depth buffers. When the depth buffer is floating
+ * point, the depth bias calculation does not use the MRD. However, the
+ * default MRD will be 1.0 / ((1 << 24) - 1).
+ */
+double
+util_get_depth_format_mrd(const struct util_format_description *desc);
+
+
+/**
+ * Return whether this is an RGBA, Z, S, or combined ZS format.
+ * Useful for initializing pipe_blit_info::mask.
+ */
+static inline unsigned
+util_format_get_mask(enum pipe_format format)
+{
+ const struct util_format_description *desc =
+ util_format_description(format);
+
+ if (!desc)
+ return 0;
+
+ if (util_format_has_depth(desc)) {
+ if (util_format_has_stencil(desc)) {
+ return PIPE_MASK_ZS;
+ } else {
+ return PIPE_MASK_Z;
+ }
+ } else {
+ if (util_format_has_stencil(desc)) {
+ return PIPE_MASK_S;
+ } else {
+ return PIPE_MASK_RGBA;
+ }
+ }
+}
+
+/**
+ * Give the RGBA colormask of the channels that can be represented in this
+ * format.
+ *
+ * That is, the channels whose values are preserved.
+ */
+static inline unsigned
+util_format_colormask(const struct util_format_description *desc)
+{
+ unsigned colormask;
+ unsigned chan;
+
+ switch (desc->colorspace) {
+ case UTIL_FORMAT_COLORSPACE_RGB:
+ case UTIL_FORMAT_COLORSPACE_SRGB:
+ case UTIL_FORMAT_COLORSPACE_YUV:
+ colormask = 0;
+ for (chan = 0; chan < 4; ++chan) {
+ if (desc->swizzle[chan] < 4) {
+ colormask |= (1 << chan);
+ }
+ }
+ return colormask;
+ case UTIL_FORMAT_COLORSPACE_ZS:
+ return 0;
+ default:
+ assert(0);
+ return 0;
+ }
+}
+
+
+/**
+ * Checks if color mask covers every channel for the specified format
+ *
+ * @param desc a format description to check colormask with
+ * @param colormask a bit mask for channels, matches format of PIPE_MASK_RGBA
+ */
+static inline boolean
+util_format_colormask_full(const struct util_format_description *desc, unsigned colormask)
+{
+ return (~colormask & util_format_colormask(desc)) == 0;
+}
+
+
+boolean
+util_format_is_float(enum pipe_format format);
+
+
+boolean
+util_format_has_alpha(enum pipe_format format);
+
+
+boolean
+util_format_is_luminance(enum pipe_format format);
+
+boolean
+util_format_is_alpha(enum pipe_format format);
+
+boolean
+util_format_is_luminance_alpha(enum pipe_format format);
+
+
+boolean
+util_format_is_intensity(enum pipe_format format);
+
+boolean
+util_format_is_subsampled_422(enum pipe_format format);
+
+boolean
+util_format_is_pure_integer(enum pipe_format format);
+
+boolean
+util_format_is_pure_sint(enum pipe_format format);
+
+boolean
+util_format_is_pure_uint(enum pipe_format format);
+
+boolean
+util_format_is_snorm(enum pipe_format format);
+
+boolean
+util_format_is_unorm(enum pipe_format format);
+
+boolean
+util_format_is_snorm8(enum pipe_format format);
+
+/**
+ * Check if the src format can be blitted to the destination format with
+ * a simple memcpy. For example, blitting from RGBA to RGBx is OK, but not
+ * the reverse.
+ */
+boolean
+util_is_format_compatible(const struct util_format_description *src_desc,
+ const struct util_format_description *dst_desc);
+
+/**
+ * Whether this format is a rgab8 variant.
+ *
+ * That is, any format that matches the
+ *
+ * PIPE_FORMAT_?8?8?8?8_UNORM
+ */
+static inline boolean
+util_format_is_rgba8_variant(const struct util_format_description *desc)
+{
+ unsigned chan;
+
+ if(desc->block.width != 1 ||
+ desc->block.height != 1 ||
+ desc->block.bits != 32)
+ return FALSE;
+
+ for(chan = 0; chan < 4; ++chan) {
+ if(desc->channel[chan].type != UTIL_FORMAT_TYPE_UNSIGNED &&
+ desc->channel[chan].type != UTIL_FORMAT_TYPE_VOID)
+ return FALSE;
+ if(desc->channel[chan].type == UTIL_FORMAT_TYPE_UNSIGNED &&
+ !desc->channel[chan].normalized)
+ return FALSE;
+ if(desc->channel[chan].size != 8)
+ return FALSE;
+ }
+
+ return TRUE;
+}
+
+/**
+ * Return total bits needed for the pixel format per block.
+ */
+static inline uint
+util_format_get_blocksizebits(enum pipe_format format)
+{
+ const struct util_format_description *desc = util_format_description(format);
+
+ assert(desc);
+ if (!desc) {
+ return 0;
+ }
+
+ return desc->block.bits;
+}
+
+/**
+ * Return bytes per block (not pixel) for the given format.
+ */
+static inline uint
+util_format_get_blocksize(enum pipe_format format)
+{
+ uint bits = util_format_get_blocksizebits(format);
+ uint bytes = bits / 8;
+
+ assert(bits % 8 == 0);
+ assert(bytes > 0);
+ if (bytes == 0) {
+ bytes = 1;
+ }
+
+ return bytes;
+}
+
+static inline uint
+util_format_get_blockwidth(enum pipe_format format)
+{
+ const struct util_format_description *desc = util_format_description(format);
+
+ assert(desc);
+ if (!desc) {
+ return 1;
+ }
+
+ return desc->block.width;
+}
+
+static inline uint
+util_format_get_blockheight(enum pipe_format format)
+{
+ const struct util_format_description *desc = util_format_description(format);
+
+ assert(desc);
+ if (!desc) {
+ return 1;
+ }
+
+ return desc->block.height;
+}
+
+static inline uint
+util_format_get_blockdepth(enum pipe_format format)
+{
+ const struct util_format_description *desc = util_format_description(format);
+
+ assert(desc);
+ if (!desc) {
+ return 1;
+ }
+
+ return desc->block.depth;
+}
+
+static inline unsigned
+util_format_get_nblocksx(enum pipe_format format,
+ unsigned x)
+{
+ unsigned blockwidth = util_format_get_blockwidth(format);
+ return (x + blockwidth - 1) / blockwidth;
+}
+
+static inline unsigned
+util_format_get_nblocksy(enum pipe_format format,
+ unsigned y)
+{
+ unsigned blockheight = util_format_get_blockheight(format);
+ return (y + blockheight - 1) / blockheight;
+}
+
+static inline unsigned
+util_format_get_nblocksz(enum pipe_format format,
+ unsigned z)
+{
+ unsigned blockdepth = util_format_get_blockdepth(format);
+ return (z + blockdepth - 1) / blockdepth;
+}
+
+static inline unsigned
+util_format_get_nblocks(enum pipe_format format,
+ unsigned width,
+ unsigned height)
+{
+ assert(util_format_get_blockdepth(format) == 1);
+ return util_format_get_nblocksx(format, width) * util_format_get_nblocksy(format, height);
+}
+
+static inline size_t
+util_format_get_stride(enum pipe_format format,
+ unsigned width)
+{
+ return (size_t)util_format_get_nblocksx(format, width) * util_format_get_blocksize(format);
+}
+
+static inline size_t
+util_format_get_2d_size(enum pipe_format format,
+ size_t stride,
+ unsigned height)
+{
+ return util_format_get_nblocksy(format, height) * stride;
+}
+
+static inline uint
+util_format_get_component_bits(enum pipe_format format,
+ enum util_format_colorspace colorspace,
+ uint component)
+{
+ const struct util_format_description *desc = util_format_description(format);
+ enum util_format_colorspace desc_colorspace;
+
+ assert(format);
+ if (!format) {
+ return 0;
+ }
+
+ assert(component < 4);
+
+ /* Treat RGB and SRGB as equivalent. */
+ if (colorspace == UTIL_FORMAT_COLORSPACE_SRGB) {
+ colorspace = UTIL_FORMAT_COLORSPACE_RGB;
+ }
+ if (desc->colorspace == UTIL_FORMAT_COLORSPACE_SRGB) {
+ desc_colorspace = UTIL_FORMAT_COLORSPACE_RGB;
+ } else {
+ desc_colorspace = desc->colorspace;
+ }
+
+ if (desc_colorspace != colorspace) {
+ return 0;
+ }
+
+ switch (desc->swizzle[component]) {
+ case PIPE_SWIZZLE_X:
+ return desc->channel[0].size;
+ case PIPE_SWIZZLE_Y:
+ return desc->channel[1].size;
+ case PIPE_SWIZZLE_Z:
+ return desc->channel[2].size;
+ case PIPE_SWIZZLE_W:
+ return desc->channel[3].size;
+ default:
+ return 0;
+ }
+}
+
+/**
+ * Given a linear RGB colorspace format, return the corresponding SRGB
+ * format, or PIPE_FORMAT_NONE if none.
+ */
+static inline enum pipe_format
+util_format_srgb(enum pipe_format format)
+{
+ if (util_format_is_srgb(format))
+ return format;
+
+ switch (format) {
+ case PIPE_FORMAT_L8_UNORM:
+ return PIPE_FORMAT_L8_SRGB;
+ case PIPE_FORMAT_R8_UNORM:
+ return PIPE_FORMAT_R8_SRGB;
+ case PIPE_FORMAT_L8A8_UNORM:
+ return PIPE_FORMAT_L8A8_SRGB;
+ case PIPE_FORMAT_R8G8_UNORM:
+ return PIPE_FORMAT_R8G8_SRGB;
+ case PIPE_FORMAT_R8G8B8_UNORM:
+ return PIPE_FORMAT_R8G8B8_SRGB;
+ case PIPE_FORMAT_B8G8R8_UNORM:
+ return PIPE_FORMAT_B8G8R8_SRGB;
+ case PIPE_FORMAT_A8B8G8R8_UNORM:
+ return PIPE_FORMAT_A8B8G8R8_SRGB;
+ case PIPE_FORMAT_X8B8G8R8_UNORM:
+ return PIPE_FORMAT_X8B8G8R8_SRGB;
+ case PIPE_FORMAT_B8G8R8A8_UNORM:
+ return PIPE_FORMAT_B8G8R8A8_SRGB;
+ case PIPE_FORMAT_B8G8R8X8_UNORM:
+ return PIPE_FORMAT_B8G8R8X8_SRGB;
+ case PIPE_FORMAT_A8R8G8B8_UNORM:
+ return PIPE_FORMAT_A8R8G8B8_SRGB;
+ case PIPE_FORMAT_X8R8G8B8_UNORM:
+ return PIPE_FORMAT_X8R8G8B8_SRGB;
+ case PIPE_FORMAT_R8G8B8A8_UNORM:
+ return PIPE_FORMAT_R8G8B8A8_SRGB;
+ case PIPE_FORMAT_R8G8B8X8_UNORM:
+ return PIPE_FORMAT_R8G8B8X8_SRGB;
+ case PIPE_FORMAT_DXT1_RGB:
+ return PIPE_FORMAT_DXT1_SRGB;
+ case PIPE_FORMAT_DXT1_RGBA:
+ return PIPE_FORMAT_DXT1_SRGBA;
+ case PIPE_FORMAT_DXT3_RGBA:
+ return PIPE_FORMAT_DXT3_SRGBA;
+ case PIPE_FORMAT_DXT5_RGBA:
+ return PIPE_FORMAT_DXT5_SRGBA;
+ case PIPE_FORMAT_B5G6R5_UNORM:
+ return PIPE_FORMAT_B5G6R5_SRGB;
+ case PIPE_FORMAT_BPTC_RGBA_UNORM:
+ return PIPE_FORMAT_BPTC_SRGBA;
+ case PIPE_FORMAT_ETC2_RGB8:
+ return PIPE_FORMAT_ETC2_SRGB8;
+ case PIPE_FORMAT_ETC2_RGB8A1:
+ return PIPE_FORMAT_ETC2_SRGB8A1;
+ case PIPE_FORMAT_ETC2_RGBA8:
+ return PIPE_FORMAT_ETC2_SRGBA8;
+ case PIPE_FORMAT_ASTC_4x4:
+ return PIPE_FORMAT_ASTC_4x4_SRGB;
+ case PIPE_FORMAT_ASTC_5x4:
+ return PIPE_FORMAT_ASTC_5x4_SRGB;
+ case PIPE_FORMAT_ASTC_5x5:
+ return PIPE_FORMAT_ASTC_5x5_SRGB;
+ case PIPE_FORMAT_ASTC_6x5:
+ return PIPE_FORMAT_ASTC_6x5_SRGB;
+ case PIPE_FORMAT_ASTC_6x6:
+ return PIPE_FORMAT_ASTC_6x6_SRGB;
+ case PIPE_FORMAT_ASTC_8x5:
+ return PIPE_FORMAT_ASTC_8x5_SRGB;
+ case PIPE_FORMAT_ASTC_8x6:
+ return PIPE_FORMAT_ASTC_8x6_SRGB;
+ case PIPE_FORMAT_ASTC_8x8:
+ return PIPE_FORMAT_ASTC_8x8_SRGB;
+ case PIPE_FORMAT_ASTC_10x5:
+ return PIPE_FORMAT_ASTC_10x5_SRGB;
+ case PIPE_FORMAT_ASTC_10x6:
+ return PIPE_FORMAT_ASTC_10x6_SRGB;
+ case PIPE_FORMAT_ASTC_10x8:
+ return PIPE_FORMAT_ASTC_10x8_SRGB;
+ case PIPE_FORMAT_ASTC_10x10:
+ return PIPE_FORMAT_ASTC_10x10_SRGB;
+ case PIPE_FORMAT_ASTC_12x10:
+ return PIPE_FORMAT_ASTC_12x10_SRGB;
+ case PIPE_FORMAT_ASTC_12x12:
+ return PIPE_FORMAT_ASTC_12x12_SRGB;
+ case PIPE_FORMAT_ASTC_3x3x3:
+ return PIPE_FORMAT_ASTC_3x3x3_SRGB;
+ case PIPE_FORMAT_ASTC_4x3x3:
+ return PIPE_FORMAT_ASTC_4x3x3_SRGB;
+ case PIPE_FORMAT_ASTC_4x4x3:
+ return PIPE_FORMAT_ASTC_4x4x3_SRGB;
+ case PIPE_FORMAT_ASTC_4x4x4:
+ return PIPE_FORMAT_ASTC_4x4x4_SRGB;
+ case PIPE_FORMAT_ASTC_5x4x4:
+ return PIPE_FORMAT_ASTC_5x4x4_SRGB;
+ case PIPE_FORMAT_ASTC_5x5x4:
+ return PIPE_FORMAT_ASTC_5x5x4_SRGB;
+ case PIPE_FORMAT_ASTC_5x5x5:
+ return PIPE_FORMAT_ASTC_5x5x5_SRGB;
+ case PIPE_FORMAT_ASTC_6x5x5:
+ return PIPE_FORMAT_ASTC_6x5x5_SRGB;
+ case PIPE_FORMAT_ASTC_6x6x5:
+ return PIPE_FORMAT_ASTC_6x6x5_SRGB;
+ case PIPE_FORMAT_ASTC_6x6x6:
+ return PIPE_FORMAT_ASTC_6x6x6_SRGB;
+
+ default:
+ return PIPE_FORMAT_NONE;
+ }
+}
+
+/**
+ * Given an sRGB format, return the corresponding linear colorspace format.
+ * For non sRGB formats, return the format unchanged.
+ */
+static inline enum pipe_format
+util_format_linear(enum pipe_format format)
+{
+ switch (format) {
+ case PIPE_FORMAT_L8_SRGB:
+ return PIPE_FORMAT_L8_UNORM;
+ case PIPE_FORMAT_R8_SRGB:
+ return PIPE_FORMAT_R8_UNORM;
+ case PIPE_FORMAT_L8A8_SRGB:
+ return PIPE_FORMAT_L8A8_UNORM;
+ case PIPE_FORMAT_R8G8_SRGB:
+ return PIPE_FORMAT_R8G8_UNORM;
+ case PIPE_FORMAT_R8G8B8_SRGB:
+ return PIPE_FORMAT_R8G8B8_UNORM;
+ case PIPE_FORMAT_B8G8R8_SRGB:
+ return PIPE_FORMAT_B8G8R8_UNORM;
+ case PIPE_FORMAT_A8B8G8R8_SRGB:
+ return PIPE_FORMAT_A8B8G8R8_UNORM;
+ case PIPE_FORMAT_X8B8G8R8_SRGB:
+ return PIPE_FORMAT_X8B8G8R8_UNORM;
+ case PIPE_FORMAT_B8G8R8A8_SRGB:
+ return PIPE_FORMAT_B8G8R8A8_UNORM;
+ case PIPE_FORMAT_B8G8R8X8_SRGB:
+ return PIPE_FORMAT_B8G8R8X8_UNORM;
+ case PIPE_FORMAT_A8R8G8B8_SRGB:
+ return PIPE_FORMAT_A8R8G8B8_UNORM;
+ case PIPE_FORMAT_X8R8G8B8_SRGB:
+ return PIPE_FORMAT_X8R8G8B8_UNORM;
+ case PIPE_FORMAT_R8G8B8A8_SRGB:
+ return PIPE_FORMAT_R8G8B8A8_UNORM;
+ case PIPE_FORMAT_R8G8B8X8_SRGB:
+ return PIPE_FORMAT_R8G8B8X8_UNORM;
+ case PIPE_FORMAT_DXT1_SRGB:
+ return PIPE_FORMAT_DXT1_RGB;
+ case PIPE_FORMAT_DXT1_SRGBA:
+ return PIPE_FORMAT_DXT1_RGBA;
+ case PIPE_FORMAT_DXT3_SRGBA:
+ return PIPE_FORMAT_DXT3_RGBA;
+ case PIPE_FORMAT_DXT5_SRGBA:
+ return PIPE_FORMAT_DXT5_RGBA;
+ case PIPE_FORMAT_B5G6R5_SRGB:
+ return PIPE_FORMAT_B5G6R5_UNORM;
+ case PIPE_FORMAT_BPTC_SRGBA:
+ return PIPE_FORMAT_BPTC_RGBA_UNORM;
+ case PIPE_FORMAT_ETC2_SRGB8:
+ return PIPE_FORMAT_ETC2_RGB8;
+ case PIPE_FORMAT_ETC2_SRGB8A1:
+ return PIPE_FORMAT_ETC2_RGB8A1;
+ case PIPE_FORMAT_ETC2_SRGBA8:
+ return PIPE_FORMAT_ETC2_RGBA8;
+ case PIPE_FORMAT_ASTC_4x4_SRGB:
+ return PIPE_FORMAT_ASTC_4x4;
+ case PIPE_FORMAT_ASTC_5x4_SRGB:
+ return PIPE_FORMAT_ASTC_5x4;
+ case PIPE_FORMAT_ASTC_5x5_SRGB:
+ return PIPE_FORMAT_ASTC_5x5;
+ case PIPE_FORMAT_ASTC_6x5_SRGB:
+ return PIPE_FORMAT_ASTC_6x5;
+ case PIPE_FORMAT_ASTC_6x6_SRGB:
+ return PIPE_FORMAT_ASTC_6x6;
+ case PIPE_FORMAT_ASTC_8x5_SRGB:
+ return PIPE_FORMAT_ASTC_8x5;
+ case PIPE_FORMAT_ASTC_8x6_SRGB:
+ return PIPE_FORMAT_ASTC_8x6;
+ case PIPE_FORMAT_ASTC_8x8_SRGB:
+ return PIPE_FORMAT_ASTC_8x8;
+ case PIPE_FORMAT_ASTC_10x5_SRGB:
+ return PIPE_FORMAT_ASTC_10x5;
+ case PIPE_FORMAT_ASTC_10x6_SRGB:
+ return PIPE_FORMAT_ASTC_10x6;
+ case PIPE_FORMAT_ASTC_10x8_SRGB:
+ return PIPE_FORMAT_ASTC_10x8;
+ case PIPE_FORMAT_ASTC_10x10_SRGB:
+ return PIPE_FORMAT_ASTC_10x10;
+ case PIPE_FORMAT_ASTC_12x10_SRGB:
+ return PIPE_FORMAT_ASTC_12x10;
+ case PIPE_FORMAT_ASTC_12x12_SRGB:
+ return PIPE_FORMAT_ASTC_12x12;
+ case PIPE_FORMAT_ASTC_3x3x3_SRGB:
+ return PIPE_FORMAT_ASTC_3x3x3;
+ case PIPE_FORMAT_ASTC_4x3x3_SRGB:
+ return PIPE_FORMAT_ASTC_4x3x3;
+ case PIPE_FORMAT_ASTC_4x4x3_SRGB:
+ return PIPE_FORMAT_ASTC_4x4x3;
+ case PIPE_FORMAT_ASTC_4x4x4_SRGB:
+ return PIPE_FORMAT_ASTC_4x4x4;
+ case PIPE_FORMAT_ASTC_5x4x4_SRGB:
+ return PIPE_FORMAT_ASTC_5x4x4;
+ case PIPE_FORMAT_ASTC_5x5x4_SRGB:
+ return PIPE_FORMAT_ASTC_5x5x4;
+ case PIPE_FORMAT_ASTC_5x5x5_SRGB:
+ return PIPE_FORMAT_ASTC_5x5x5;
+ case PIPE_FORMAT_ASTC_6x5x5_SRGB:
+ return PIPE_FORMAT_ASTC_6x5x5;
+ case PIPE_FORMAT_ASTC_6x6x5_SRGB:
+ return PIPE_FORMAT_ASTC_6x6x5;
+ case PIPE_FORMAT_ASTC_6x6x6_SRGB:
+ return PIPE_FORMAT_ASTC_6x6x6;
+ default:
+ return format;
+ }
+}
+
+/**
+ * Given a depth-stencil format, return the corresponding stencil-only format.
+ * For stencil-only formats, return the format unchanged.
+ */
+static inline enum pipe_format
+util_format_stencil_only(enum pipe_format format)
+{
+ switch (format) {
+ /* mask out the depth component */
+ case PIPE_FORMAT_Z24_UNORM_S8_UINT:
+ return PIPE_FORMAT_X24S8_UINT;
+ case PIPE_FORMAT_S8_UINT_Z24_UNORM:
+ return PIPE_FORMAT_S8X24_UINT;
+ case PIPE_FORMAT_Z32_FLOAT_S8X24_UINT:
+ return PIPE_FORMAT_X32_S8X24_UINT;
+
+ /* stencil only formats */
+ case PIPE_FORMAT_X24S8_UINT:
+ case PIPE_FORMAT_S8X24_UINT:
+ case PIPE_FORMAT_X32_S8X24_UINT:
+ case PIPE_FORMAT_S8_UINT:
+ return format;
+
+ default:
+ assert(0);
+ return PIPE_FORMAT_NONE;
+ }
+}
+
+/**
+ * Converts PIPE_FORMAT_*I* to PIPE_FORMAT_*R*.
+ * This is identity for non-intensity formats.
+ */
+static inline enum pipe_format
+util_format_intensity_to_red(enum pipe_format format)
+{
+ switch (format) {
+ case PIPE_FORMAT_I8_UNORM:
+ return PIPE_FORMAT_R8_UNORM;
+ case PIPE_FORMAT_I8_SNORM:
+ return PIPE_FORMAT_R8_SNORM;
+ case PIPE_FORMAT_I16_UNORM:
+ return PIPE_FORMAT_R16_UNORM;
+ case PIPE_FORMAT_I16_SNORM:
+ return PIPE_FORMAT_R16_SNORM;
+ case PIPE_FORMAT_I16_FLOAT:
+ return PIPE_FORMAT_R16_FLOAT;
+ case PIPE_FORMAT_I32_FLOAT:
+ return PIPE_FORMAT_R32_FLOAT;
+ case PIPE_FORMAT_I8_UINT:
+ return PIPE_FORMAT_R8_UINT;
+ case PIPE_FORMAT_I8_SINT:
+ return PIPE_FORMAT_R8_SINT;
+ case PIPE_FORMAT_I16_UINT:
+ return PIPE_FORMAT_R16_UINT;
+ case PIPE_FORMAT_I16_SINT:
+ return PIPE_FORMAT_R16_SINT;
+ case PIPE_FORMAT_I32_UINT:
+ return PIPE_FORMAT_R32_UINT;
+ case PIPE_FORMAT_I32_SINT:
+ return PIPE_FORMAT_R32_SINT;
+ default:
+ assert(!util_format_is_intensity(format));
+ return format;
+ }
+}
+
+/**
+ * Converts PIPE_FORMAT_*L* to PIPE_FORMAT_*R*.
+ * This is identity for non-luminance formats.
+ */
+static inline enum pipe_format
+util_format_luminance_to_red(enum pipe_format format)
+{
+ switch (format) {
+ case PIPE_FORMAT_L8_UNORM:
+ return PIPE_FORMAT_R8_UNORM;
+ case PIPE_FORMAT_L8_SNORM:
+ return PIPE_FORMAT_R8_SNORM;
+ case PIPE_FORMAT_L16_UNORM:
+ return PIPE_FORMAT_R16_UNORM;
+ case PIPE_FORMAT_L16_SNORM:
+ return PIPE_FORMAT_R16_SNORM;
+ case PIPE_FORMAT_L16_FLOAT:
+ return PIPE_FORMAT_R16_FLOAT;
+ case PIPE_FORMAT_L32_FLOAT:
+ return PIPE_FORMAT_R32_FLOAT;
+ case PIPE_FORMAT_L8_UINT:
+ return PIPE_FORMAT_R8_UINT;
+ case PIPE_FORMAT_L8_SINT:
+ return PIPE_FORMAT_R8_SINT;
+ case PIPE_FORMAT_L16_UINT:
+ return PIPE_FORMAT_R16_UINT;
+ case PIPE_FORMAT_L16_SINT:
+ return PIPE_FORMAT_R16_SINT;
+ case PIPE_FORMAT_L32_UINT:
+ return PIPE_FORMAT_R32_UINT;
+ case PIPE_FORMAT_L32_SINT:
+ return PIPE_FORMAT_R32_SINT;
+
+ case PIPE_FORMAT_LATC1_UNORM:
+ return PIPE_FORMAT_RGTC1_UNORM;
+ case PIPE_FORMAT_LATC1_SNORM:
+ return PIPE_FORMAT_RGTC1_SNORM;
+
+ case PIPE_FORMAT_L4A4_UNORM:
+ return PIPE_FORMAT_R4A4_UNORM;
+
+ case PIPE_FORMAT_L8A8_UNORM:
+ return PIPE_FORMAT_R8A8_UNORM;
+ case PIPE_FORMAT_L8A8_SNORM:
+ return PIPE_FORMAT_R8A8_SNORM;
+ case PIPE_FORMAT_L16A16_UNORM:
+ return PIPE_FORMAT_R16A16_UNORM;
+ case PIPE_FORMAT_L16A16_SNORM:
+ return PIPE_FORMAT_R16A16_SNORM;
+ case PIPE_FORMAT_L16A16_FLOAT:
+ return PIPE_FORMAT_R16A16_FLOAT;
+ case PIPE_FORMAT_L32A32_FLOAT:
+ return PIPE_FORMAT_R32A32_FLOAT;
+ case PIPE_FORMAT_L8A8_UINT:
+ return PIPE_FORMAT_R8A8_UINT;
+ case PIPE_FORMAT_L8A8_SINT:
+ return PIPE_FORMAT_R8A8_SINT;
+ case PIPE_FORMAT_L16A16_UINT:
+ return PIPE_FORMAT_R16A16_UINT;
+ case PIPE_FORMAT_L16A16_SINT:
+ return PIPE_FORMAT_R16A16_SINT;
+ case PIPE_FORMAT_L32A32_UINT:
+ return PIPE_FORMAT_R32A32_UINT;
+ case PIPE_FORMAT_L32A32_SINT:
+ return PIPE_FORMAT_R32A32_SINT;
+
+ /* We don't have compressed red-alpha variants for these. */
+ case PIPE_FORMAT_LATC2_UNORM:
+ case PIPE_FORMAT_LATC2_SNORM:
+ return PIPE_FORMAT_NONE;
+
+ default:
+ assert(!util_format_is_luminance(format) &&
+ !util_format_is_luminance_alpha(format));
+ return format;
+ }
+}
+
+static inline unsigned
+util_format_get_num_planes(enum pipe_format format)
+{
+ switch (util_format_description(format)->layout) {
+ case UTIL_FORMAT_LAYOUT_PLANAR3:
+ return 3;
+ case UTIL_FORMAT_LAYOUT_PLANAR2:
+ return 2;
+ default:
+ return 1;
+ }
+}
+
+static inline enum pipe_format
+util_format_get_plane_format(enum pipe_format format, unsigned plane)
+{
+ switch (format) {
+ case PIPE_FORMAT_YV12:
+ case PIPE_FORMAT_YV16:
+ case PIPE_FORMAT_IYUV:
+ return PIPE_FORMAT_R8_UNORM;
+ case PIPE_FORMAT_NV12:
+ return !plane ? PIPE_FORMAT_R8_UNORM : PIPE_FORMAT_RG88_UNORM;
+ case PIPE_FORMAT_NV21:
+ return !plane ? PIPE_FORMAT_R8_UNORM : PIPE_FORMAT_GR88_UNORM;
+ case PIPE_FORMAT_P010:
+ case PIPE_FORMAT_P016:
+ return !plane ? PIPE_FORMAT_R16_UNORM : PIPE_FORMAT_R16G16_UNORM;
+ default:
+ return format;
+ }
+}
+
+static inline unsigned
+util_format_get_plane_width(enum pipe_format format, unsigned plane,
+ unsigned width)
+{
+ switch (format) {
+ case PIPE_FORMAT_YV12:
+ case PIPE_FORMAT_YV16:
+ case PIPE_FORMAT_IYUV:
+ case PIPE_FORMAT_NV12:
+ case PIPE_FORMAT_NV21:
+ case PIPE_FORMAT_P010:
+ case PIPE_FORMAT_P016:
+ return !plane ? width : (width + 1) / 2;
+ default:
+ return width;
+ }
+}
+
+static inline unsigned
+util_format_get_plane_height(enum pipe_format format, unsigned plane,
+ unsigned height)
+{
+ switch (format) {
+ case PIPE_FORMAT_YV12:
+ case PIPE_FORMAT_IYUV:
+ case PIPE_FORMAT_NV12:
+ case PIPE_FORMAT_NV21:
+ case PIPE_FORMAT_P010:
+ case PIPE_FORMAT_P016:
+ return !plane ? height : (height + 1) / 2;
+ case PIPE_FORMAT_YV16:
+ default:
+ return height;
+ }
+}
+
+bool util_format_planar_is_supported(struct pipe_screen *screen,
+ enum pipe_format format,
+ enum pipe_texture_target target,
+ unsigned sample_count,
+ unsigned storage_sample_count,
+ unsigned bind);
+
+/**
+ * Return the number of components stored.
+ * Formats with block size != 1x1 will always have 1 component (the block).
+ */
+static inline unsigned
+util_format_get_nr_components(enum pipe_format format)
+{
+ const struct util_format_description *desc = util_format_description(format);
+ return desc->nr_channels;
+}
+
+/**
+ * Return the index of the first non-void channel
+ * -1 if no non-void channels
+ */
+static inline int
+util_format_get_first_non_void_channel(enum pipe_format format)
+{
+ const struct util_format_description *desc = util_format_description(format);
+ int i;
+
+ for (i = 0; i < 4; i++)
+ if (desc->channel[i].type != UTIL_FORMAT_TYPE_VOID)
+ break;
+
+ if (i == 4)
+ return -1;
+
+ return i;
+}
+
+/**
+ * Whether this format is any 8-bit UNORM variant. Looser than
+ * util_is_rgba8_variant (also includes alpha textures, for instance).
+ */
+
+static inline bool
+util_format_is_unorm8(const struct util_format_description *desc)
+{
+ int c = util_format_get_first_non_void_channel(desc->format);
+
+ if (c == -1)
+ return false;
+
+ return desc->is_unorm && desc->is_array && desc->channel[c].size == 8;
+}
+
+static inline void
+util_format_unpack_z_float(enum pipe_format format, float *dst,
+ const void *src, unsigned w)
+{
+ const struct util_format_description *desc = util_format_description(format);
+
+ desc->unpack_z_float(dst, 0, (const uint8_t *)src, 0, w, 1);
+}
+
+static inline void
+util_format_unpack_z_32unorm(enum pipe_format format, uint32_t *dst,
+ const void *src, unsigned w)
+{
+ const struct util_format_description *desc = util_format_description(format);
+
+ desc->unpack_z_32unorm(dst, 0, (const uint8_t *)src, 0, w, 1);
+}
+
+static inline void
+util_format_unpack_s_8uint(enum pipe_format format, uint8_t *dst,
+ const void *src, unsigned w)
+{
+ const struct util_format_description *desc = util_format_description(format);
+
+ desc->unpack_s_8uint(dst, 0, (const uint8_t *)src, 0, w, 1);
+}
+
+static inline void
+util_format_unpack_rgba_float(enum pipe_format format, float *dst,
+ const void *src, unsigned w)
+{
+ const struct util_format_description *desc = util_format_description(format);
+
+ desc->unpack_rgba_float(dst, 0, (const uint8_t *)src, 0, w, 1);
+}
+
+/**
+ * Unpacks a row of color data to 32-bit RGBA, either integers for pure
+ * integer formats (sign-extended for signed data), or 32-bit floats.
+ */
+static inline void
+util_format_unpack_rgba(enum pipe_format format, void *dst,
+ const void *src, unsigned w)
+{
+ const struct util_format_description *desc = util_format_description(format);
+
+ if (util_format_is_pure_uint(format))
+ desc->unpack_rgba_uint((uint32_t *)dst, 0, (const uint8_t *)src, 0, w, 1);
+ else if (util_format_is_pure_sint(format))
+ desc->unpack_rgba_sint((int32_t *)dst, 0, (const uint8_t *)src, 0, w, 1);
+ else
+ desc->unpack_rgba_float((float *)dst, 0, (const uint8_t *)src, 0, w, 1);
+}
+
+static inline void
+util_format_pack_z_float(enum pipe_format format, void *dst,
+ const float *src, unsigned w)
+{
+ const struct util_format_description *desc = util_format_description(format);
+
+ desc->pack_z_float((uint8_t *)dst, 0, src, 0, w, 1);
+}
+
+static inline void
+util_format_pack_z_32unorm(enum pipe_format format, void *dst,
+ const uint32_t *src, unsigned w)
+{
+ const struct util_format_description *desc = util_format_description(format);
+
+ desc->pack_z_32unorm((uint8_t *)dst, 0, src, 0, w, 1);
+}
+
+static inline void
+util_format_pack_s_8uint(enum pipe_format format, void *dst,
+ const uint8_t *src, unsigned w)
+{
+ const struct util_format_description *desc = util_format_description(format);
+
+ desc->pack_s_8uint((uint8_t *)dst, 0, src, 0, w, 1);
+}
+
+/**
+ * Packs a row of color data from 32-bit RGBA, either integers for pure
+ * integer formats, or 32-bit floats. Values are clamped to the packed
+ * representation's range.
+ */
+static inline void
+util_format_pack_rgba(enum pipe_format format, void *dst,
+ const void *src, unsigned w)
+{
+ const struct util_format_description *desc = util_format_description(format);
+
+ if (util_format_is_pure_uint(format))
+ desc->pack_rgba_uint((uint8_t *)dst, 0, (const uint32_t *)src, 0, w, 1);
+ else if (util_format_is_pure_sint(format))
+ desc->pack_rgba_sint((uint8_t *)dst, 0, (const int32_t *)src, 0, w, 1);
+ else
+ desc->pack_rgba_float((uint8_t *)dst, 0, (const float *)src, 0, w, 1);
+}
+
+/*
+ * Format access functions.
+ */
+
+void
+util_format_read_4f(enum pipe_format format,
+ float *dst, unsigned dst_stride,
+ const void *src, unsigned src_stride,
+ unsigned x, unsigned y, unsigned w, unsigned h);
+
+void
+util_format_write_4f(enum pipe_format format,
+ const float *src, unsigned src_stride,
+ void *dst, unsigned dst_stride,
+ unsigned x, unsigned y, unsigned w, unsigned h);
+
+void
+util_format_read_4ub(enum pipe_format format,
+ uint8_t *dst, unsigned dst_stride,
+ const void *src, unsigned src_stride,
+ unsigned x, unsigned y, unsigned w, unsigned h);
+
+void
+util_format_write_4ub(enum pipe_format format,
+ const uint8_t *src, unsigned src_stride,
+ void *dst, unsigned dst_stride,
+ unsigned x, unsigned y, unsigned w, unsigned h);
+
+void
+util_format_read_4ui(enum pipe_format format,
+ unsigned *dst, unsigned dst_stride,
+ const void *src, unsigned src_stride,
+ unsigned x, unsigned y, unsigned w, unsigned h);
+
+void
+util_format_write_4ui(enum pipe_format format,
+ const unsigned int *src, unsigned src_stride,
+ void *dst, unsigned dst_stride,
+ unsigned x, unsigned y, unsigned w, unsigned h);
+
+void
+util_format_read_4i(enum pipe_format format,
+ int *dst, unsigned dst_stride,
+ const void *src, unsigned src_stride,
+ unsigned x, unsigned y, unsigned w, unsigned h);
+
+void
+util_format_write_4i(enum pipe_format format,
+ const int *src, unsigned src_stride,
+ void *dst, unsigned dst_stride,
+ unsigned x, unsigned y, unsigned w, unsigned h);
+
+/*
+ * Generic format conversion;
+ */
+
+boolean
+util_format_fits_8unorm(const struct util_format_description *format_desc);
+
+boolean
+util_format_translate(enum pipe_format dst_format,
+ void *dst, unsigned dst_stride,
+ unsigned dst_x, unsigned dst_y,
+ enum pipe_format src_format,
+ const void *src, unsigned src_stride,
+ unsigned src_x, unsigned src_y,
+ unsigned width, unsigned height);
+
+boolean
+util_format_translate_3d(enum pipe_format dst_format,
+ void *dst, unsigned dst_stride,
+ unsigned dst_slice_stride,
+ unsigned dst_x, unsigned dst_y,
+ unsigned dst_z,
+ enum pipe_format src_format,
+ const void *src, unsigned src_stride,
+ unsigned src_slice_stride,
+ unsigned src_x, unsigned src_y,
+ unsigned src_z, unsigned width,
+ unsigned height, unsigned depth);
+
+/*
+ * Swizzle operations.
+ */
+
+/* Compose two sets of swizzles.
+ * If V is a 4D vector and the function parameters represent functions that
+ * swizzle vector components, this holds:
+ * swz2(swz1(V)) = dst(V)
+ */
+void util_format_compose_swizzles(const unsigned char swz1[4],
+ const unsigned char swz2[4],
+ unsigned char dst[4]);
+
+/* Apply the swizzle provided in \param swz (which is one of PIPE_SWIZZLE_x)
+ * to \param src and store the result in \param dst.
+ * \param is_integer determines the value written for PIPE_SWIZZLE_1.
+ */
+void util_format_apply_color_swizzle(union pipe_color_union *dst,
+ const union pipe_color_union *src,
+ const unsigned char swz[4],
+ const boolean is_integer);
+
+void pipe_swizzle_4f(float *dst, const float *src,
+ const unsigned char swz[4]);
+
+void util_format_unswizzle_4f(float *dst, const float *src,
+ const unsigned char swz[4]);
+
+enum pipe_format
+util_format_snorm8_to_sint8(enum pipe_format format);
+
+
+extern void
+util_copy_rect(ubyte * dst, enum pipe_format format,
+ unsigned dst_stride, unsigned dst_x, unsigned dst_y,
+ unsigned width, unsigned height, const ubyte * src,
+ int src_stride, unsigned src_x, unsigned src_y);
+
+#ifdef __cplusplus
+} // extern "C" {
+#endif
+
+#endif /* ! U_FORMAT_H */
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/util/futex.h b/third_party/rust/glslopt/glsl-optimizer/src/util/futex.h
new file mode 100644
index 0000000000..4d712e2ef2
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/util/futex.h
@@ -0,0 +1,113 @@
+/*
+ * Copyright © 2015 Intel
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#ifndef UTIL_FUTEX_H
+#define UTIL_FUTEX_H
+
+#if defined(HAVE_LINUX_FUTEX_H)
+#define UTIL_FUTEX_SUPPORTED 1
+
+#include <limits.h>
+#include <stdint.h>
+#include <unistd.h>
+#include <linux/futex.h>
+#include <sys/syscall.h>
+#include <sys/time.h>
+
+static inline long sys_futex(void *addr1, int op, int val1, const struct timespec *timeout, void *addr2, int val3)
+{
+ return syscall(SYS_futex, addr1, op, val1, timeout, addr2, val3);
+}
+
+static inline int futex_wake(uint32_t *addr, int count)
+{
+ return sys_futex(addr, FUTEX_WAKE, count, NULL, NULL, 0);
+}
+
+static inline int futex_wait(uint32_t *addr, int32_t value, const struct timespec *timeout)
+{
+ /* FUTEX_WAIT_BITSET with FUTEX_BITSET_MATCH_ANY is equivalent to
+ * FUTEX_WAIT, except that it treats the timeout as absolute. */
+ return sys_futex(addr, FUTEX_WAIT_BITSET, value, timeout, NULL,
+ FUTEX_BITSET_MATCH_ANY);
+}
+
+#elif defined(__FreeBSD__)
+#define UTIL_FUTEX_SUPPORTED 1
+
+#include <assert.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <sys/types.h>
+#include <sys/umtx.h>
+#include <sys/time.h>
+
+static inline int futex_wake(uint32_t *addr, int count)
+{
+ assert(count == (int)(uint32_t)count); /* Check that bits weren't discarded */
+ return _umtx_op(addr, UMTX_OP_WAKE, (uint32_t)count, NULL, NULL) == -1 ? errno : 0;
+}
+
+static inline int futex_wait(uint32_t *addr, int32_t value, struct timespec *timeout)
+{
+ void *uaddr = NULL, *uaddr2 = NULL;
+ struct _umtx_time tmo = {
+ ._flags = UMTX_ABSTIME,
+ ._clockid = CLOCK_MONOTONIC
+ };
+
+ assert(value == (int)(uint32_t)value); /* Check that bits weren't discarded */
+
+ if (timeout != NULL) {
+ tmo._timeout = *timeout;
+ uaddr = (void *)(uintptr_t)sizeof(tmo);
+ uaddr2 = (void *)&tmo;
+ }
+
+ return _umtx_op(addr, UMTX_OP_WAIT_UINT, (uint32_t)value, uaddr, uaddr2) == -1 ? errno : 0;
+}
+
+#elif defined(__OpenBSD__)
+#define UTIL_FUTEX_SUPPORTED 1
+
+#include <sys/time.h>
+#include <sys/futex.h>
+
+static inline int futex_wake(uint32_t *addr, int count)
+{
+ return futex(addr, FUTEX_WAKE, count, NULL, NULL);
+}
+
+static inline int futex_wait(uint32_t *addr, int32_t value, const struct timespec *timeout)
+{
+ struct timespec tsrel, tsnow;
+ clock_gettime(CLOCK_MONOTONIC, &tsnow);
+ timespecsub(timeout, &tsrel, &tsrel);
+ return futex(addr, FUTEX_WAIT, value, &tsrel, NULL);
+}
+
+#else
+#define UTIL_FUTEX_SUPPORTED 0
+#endif
+
+#endif /* UTIL_FUTEX_H */
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/util/half_float.c b/third_party/rust/glslopt/glsl-optimizer/src/util/half_float.c
new file mode 100644
index 0000000000..aae690a56a
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/util/half_float.c
@@ -0,0 +1,213 @@
+/*
+ * Mesa 3-D graphics library
+ *
+ * Copyright (C) 1999-2007 Brian Paul All Rights Reserved.
+ * Copyright 2015 Philip Taylor <philip@zaynar.co.uk>
+ * Copyright 2018 Advanced Micro Devices, Inc.
+ * Copyright (C) 2018-2019 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <math.h>
+#include <assert.h>
+#include "half_float.h"
+#include "util/u_half.h"
+#include "rounding.h"
+#include "softfloat.h"
+#include "macros.h"
+
+typedef union { float f; int32_t i; uint32_t u; } fi_type;
+
+/**
+ * Convert a 4-byte float to a 2-byte half float.
+ *
+ * Not all float32 values can be represented exactly as a float16 value. We
+ * round such intermediate float32 values to the nearest float16. When the
+ * float32 lies exactly between to float16 values, we round to the one with
+ * an even mantissa.
+ *
+ * This rounding behavior has several benefits:
+ * - It has no sign bias.
+ *
+ * - It reproduces the behavior of real hardware: opcode F32TO16 in Intel's
+ * GPU ISA.
+ *
+ * - By reproducing the behavior of the GPU (at least on Intel hardware),
+ * compile-time evaluation of constant packHalf2x16 GLSL expressions will
+ * result in the same value as if the expression were executed on the GPU.
+ */
+uint16_t
+_mesa_float_to_half(float val)
+{
+ const fi_type fi = {val};
+ const int flt_m = fi.i & 0x7fffff;
+ const int flt_e = (fi.i >> 23) & 0xff;
+ const int flt_s = (fi.i >> 31) & 0x1;
+ int s, e, m = 0;
+ uint16_t result;
+
+ /* sign bit */
+ s = flt_s;
+
+ /* handle special cases */
+ if ((flt_e == 0) && (flt_m == 0)) {
+ /* zero */
+ /* m = 0; - already set */
+ e = 0;
+ }
+ else if ((flt_e == 0) && (flt_m != 0)) {
+ /* denorm -- denorm float maps to 0 half */
+ /* m = 0; - already set */
+ e = 0;
+ }
+ else if ((flt_e == 0xff) && (flt_m == 0)) {
+ /* infinity */
+ /* m = 0; - already set */
+ e = 31;
+ }
+ else if ((flt_e == 0xff) && (flt_m != 0)) {
+ /* NaN */
+ m = 1;
+ e = 31;
+ }
+ else {
+ /* regular number */
+ const int new_exp = flt_e - 127;
+ if (new_exp < -14) {
+ /* The float32 lies in the range (0.0, min_normal16) and is rounded
+ * to a nearby float16 value. The result will be either zero, subnormal,
+ * or normal.
+ */
+ e = 0;
+ m = _mesa_lroundevenf((1 << 24) * fabsf(fi.f));
+ }
+ else if (new_exp > 15) {
+ /* map this value to infinity */
+ /* m = 0; - already set */
+ e = 31;
+ }
+ else {
+ /* The float32 lies in the range
+ * [min_normal16, max_normal16 + max_step16)
+ * and is rounded to a nearby float16 value. The result will be
+ * either normal or infinite.
+ */
+ e = new_exp + 15;
+ m = _mesa_lroundevenf(flt_m / (float) (1 << 13));
+ }
+ }
+
+ assert(0 <= m && m <= 1024);
+ if (m == 1024) {
+ /* The float32 was rounded upwards into the range of the next exponent,
+ * so bump the exponent. This correctly handles the case where f32
+ * should be rounded up to float16 infinity.
+ */
+ ++e;
+ m = 0;
+ }
+
+ result = (s << 15) | (e << 10) | m;
+ return result;
+}
+
+uint16_t
+_mesa_float_to_float16_rtz(float val)
+{
+ return _mesa_float_to_half_rtz(val);
+}
+
+/**
+ * Convert a 2-byte half float to a 4-byte float.
+ * Based on code from:
+ * http://www.opengl.org/discussion_boards/ubb/Forum3/HTML/008786.html
+ */
+float
+_mesa_half_to_float(uint16_t val)
+{
+ return util_half_to_float(val);
+}
+
+/**
+ * Convert 0.0 to 0x00, 1.0 to 0xff.
+ * Values outside the range [0.0, 1.0] will give undefined results.
+ */
+uint8_t _mesa_half_to_unorm8(uint16_t val)
+{
+ const int m = val & 0x3ff;
+ const int e = (val >> 10) & 0x1f;
+ ASSERTED const int s = (val >> 15) & 0x1;
+
+ /* v = round_to_nearest(1.mmmmmmmmmm * 2^(e-15) * 255)
+ * = round_to_nearest((1.mmmmmmmmmm * 255) * 2^(e-15))
+ * = round_to_nearest((1mmmmmmmmmm * 255) * 2^(e-25))
+ * = round_to_zero((1mmmmmmmmmm * 255) * 2^(e-25) + 0.5)
+ * = round_to_zero(((1mmmmmmmmmm * 255) * 2^(e-24) + 1) / 2)
+ *
+ * This happens to give the correct answer for zero/subnormals too
+ */
+ assert(s == 0 && val <= FP16_ONE); /* check 0 <= this <= 1 */
+ /* (implies e <= 15, which means the bit-shifts below are safe) */
+
+ uint32_t v = ((1 << 10) | m) * 255;
+ v = ((v >> (24 - e)) + 1) >> 1;
+ return v;
+}
+
+/**
+ * Takes a uint16_t, divides by 65536, converts the infinite-precision
+ * result to fp16 with round-to-zero. Used by the ASTC decoder.
+ */
+uint16_t _mesa_uint16_div_64k_to_half(uint16_t v)
+{
+ /* Zero or subnormal. Set the mantissa to (v << 8) and return. */
+ if (v < 4)
+ return v << 8;
+
+ /* Count the leading 0s in the uint16_t */
+#ifdef HAVE___BUILTIN_CLZ
+ int n = __builtin_clz(v) - 16;
+#else
+ int n = 16;
+ for (int i = 15; i >= 0; i--) {
+ if (v & (1 << i)) {
+ n = 15 - i;
+ break;
+ }
+ }
+#endif
+
+ /* Shift the mantissa up so bit 16 is the hidden 1 bit,
+ * mask it off, then shift back down to 10 bits
+ */
+ int m = ( ((uint32_t)v << (n + 1)) & 0xffff ) >> 6;
+
+ /* (0{n} 1 X{15-n}) * 2^-16
+ * = 1.X * 2^(15-n-16)
+ * = 1.X * 2^(14-n - 15)
+ * which is the FP16 form with e = 14 - n
+ */
+ int e = 14 - n;
+
+ assert(e >= 1 && e <= 30);
+ assert(m >= 0 && m < 0x400);
+
+ return (e << 10) | m;
+}
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/util/half_float.h b/third_party/rust/glslopt/glsl-optimizer/src/util/half_float.h
new file mode 100644
index 0000000000..c9fad9a940
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/util/half_float.h
@@ -0,0 +1,85 @@
+/*
+ * Mesa 3-D graphics library
+ *
+ * Copyright (C) 1999-2007 Brian Paul All Rights Reserved.
+ * Copyright (C) 2018-2019 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef _HALF_FLOAT_H_
+#define _HALF_FLOAT_H_
+
+#include <stdbool.h>
+#include <stdint.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define FP16_ONE ((uint16_t) 0x3c00)
+#define FP16_ZERO ((uint16_t) 0)
+
+uint16_t _mesa_float_to_half(float val);
+float _mesa_half_to_float(uint16_t val);
+uint8_t _mesa_half_to_unorm8(uint16_t v);
+uint16_t _mesa_uint16_div_64k_to_half(uint16_t v);
+
+/*
+ * _mesa_float_to_float16_rtz is no more than a wrapper to the counterpart
+ * softfloat.h call. Still, softfloat.h conversion API is meant to be kept
+ * private. In other words, only use the API published here, instead of
+ * calling directly the softfloat.h one.
+ */
+uint16_t _mesa_float_to_float16_rtz(float val);
+
+static inline uint16_t
+_mesa_float_to_float16_rtne(float val)
+{
+ return _mesa_float_to_half(val);
+}
+
+static inline bool
+_mesa_half_is_negative(uint16_t h)
+{
+ return !!(h & 0x8000);
+}
+
+
+#ifdef __cplusplus
+
+/* Helper class for disambiguating fp16 from uint16_t in C++ overloads */
+
+struct float16_t {
+ uint16_t bits;
+ float16_t(float f) : bits(_mesa_float_to_half(f)) {}
+ float16_t(double d) : bits(_mesa_float_to_half(d)) {}
+ float16_t(uint16_t bits) : bits(bits) {}
+ static float16_t one() { return float16_t(FP16_ONE); }
+ static float16_t zero() { return float16_t(FP16_ZERO); }
+};
+
+#endif
+
+
+#ifdef __cplusplus
+} /* extern C */
+#endif
+
+#endif /* _HALF_FLOAT_H_ */
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/util/hash_table.c b/third_party/rust/glslopt/glsl-optimizer/src/util/hash_table.c
new file mode 100644
index 0000000000..0b0077cc7f
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/util/hash_table.c
@@ -0,0 +1,859 @@
+/*
+ * Copyright © 2009,2012 Intel Corporation
+ * Copyright © 1988-2004 Keith Packard and Bart Massey.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Except as contained in this notice, the names of the authors
+ * or their institutions shall not be used in advertising or
+ * otherwise to promote the sale, use or other dealings in this
+ * Software without prior written authorization from the
+ * authors.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ * Keith Packard <keithp@keithp.com>
+ */
+
+/**
+ * Implements an open-addressing, linear-reprobing hash table.
+ *
+ * For more information, see:
+ *
+ * http://cgit.freedesktop.org/~anholt/hash_table/tree/README
+ */
+
+#include <stdlib.h>
+#include <string.h>
+#include <assert.h>
+
+#include "hash_table.h"
+#include "ralloc.h"
+#include "macros.h"
+#include "u_memory.h"
+#include "fast_urem_by_const.h"
+#include "util/u_memory.h"
+
+#define XXH_INLINE_ALL
+#include "xxhash.h"
+
+/**
+ * Magic number that gets stored outside of the struct hash_table.
+ *
+ * The hash table needs a particular pointer to be the marker for a key that
+ * was deleted from the table, along with NULL for the "never allocated in the
+ * table" marker. Legacy GL allows any GLuint to be used as a GL object name,
+ * and we use a 1:1 mapping from GLuints to key pointers, so we need to be
+ * able to track a GLuint that happens to match the deleted key outside of
+ * struct hash_table. We tell the hash table to use "1" as the deleted key
+ * value, so that we test the deleted-key-in-the-table path as best we can.
+ */
+#define DELETED_KEY_VALUE 1
+
+static inline void *
+uint_key(unsigned id)
+{
+ return (void *)(uintptr_t) id;
+}
+
+static const uint32_t deleted_key_value;
+
+/**
+ * From Knuth -- a good choice for hash/rehash values is p, p-2 where
+ * p and p-2 are both prime. These tables are sized to have an extra 10%
+ * free to avoid exponential performance degradation as the hash table fills
+ */
+static const struct {
+ uint32_t max_entries, size, rehash;
+ uint64_t size_magic, rehash_magic;
+} hash_sizes[] = {
+#define ENTRY(max_entries, size, rehash) \
+ { max_entries, size, rehash, \
+ REMAINDER_MAGIC(size), REMAINDER_MAGIC(rehash) }
+
+ ENTRY(2, 5, 3 ),
+ ENTRY(4, 7, 5 ),
+ ENTRY(8, 13, 11 ),
+ ENTRY(16, 19, 17 ),
+ ENTRY(32, 43, 41 ),
+ ENTRY(64, 73, 71 ),
+ ENTRY(128, 151, 149 ),
+ ENTRY(256, 283, 281 ),
+ ENTRY(512, 571, 569 ),
+ ENTRY(1024, 1153, 1151 ),
+ ENTRY(2048, 2269, 2267 ),
+ ENTRY(4096, 4519, 4517 ),
+ ENTRY(8192, 9013, 9011 ),
+ ENTRY(16384, 18043, 18041 ),
+ ENTRY(32768, 36109, 36107 ),
+ ENTRY(65536, 72091, 72089 ),
+ ENTRY(131072, 144409, 144407 ),
+ ENTRY(262144, 288361, 288359 ),
+ ENTRY(524288, 576883, 576881 ),
+ ENTRY(1048576, 1153459, 1153457 ),
+ ENTRY(2097152, 2307163, 2307161 ),
+ ENTRY(4194304, 4613893, 4613891 ),
+ ENTRY(8388608, 9227641, 9227639 ),
+ ENTRY(16777216, 18455029, 18455027 ),
+ ENTRY(33554432, 36911011, 36911009 ),
+ ENTRY(67108864, 73819861, 73819859 ),
+ ENTRY(134217728, 147639589, 147639587 ),
+ ENTRY(268435456, 295279081, 295279079 ),
+ ENTRY(536870912, 590559793, 590559791 ),
+ ENTRY(1073741824, 1181116273, 1181116271 ),
+ ENTRY(2147483648ul, 2362232233ul, 2362232231ul )
+};
+
+ASSERTED static inline bool
+key_pointer_is_reserved(const struct hash_table *ht, const void *key)
+{
+ return key == NULL || key == ht->deleted_key;
+}
+
+static int
+entry_is_free(const struct hash_entry *entry)
+{
+ return entry->key == NULL;
+}
+
+static int
+entry_is_deleted(const struct hash_table *ht, struct hash_entry *entry)
+{
+ return entry->key == ht->deleted_key;
+}
+
+static int
+entry_is_present(const struct hash_table *ht, struct hash_entry *entry)
+{
+ return entry->key != NULL && entry->key != ht->deleted_key;
+}
+
+bool
+_mesa_hash_table_init(struct hash_table *ht,
+ void *mem_ctx,
+ uint32_t (*key_hash_function)(const void *key),
+ bool (*key_equals_function)(const void *a,
+ const void *b))
+{
+ ht->size_index = 0;
+ ht->size = hash_sizes[ht->size_index].size;
+ ht->rehash = hash_sizes[ht->size_index].rehash;
+ ht->size_magic = hash_sizes[ht->size_index].size_magic;
+ ht->rehash_magic = hash_sizes[ht->size_index].rehash_magic;
+ ht->max_entries = hash_sizes[ht->size_index].max_entries;
+ ht->key_hash_function = key_hash_function;
+ ht->key_equals_function = key_equals_function;
+ ht->table = rzalloc_array(mem_ctx, struct hash_entry, ht->size);
+ ht->entries = 0;
+ ht->deleted_entries = 0;
+ ht->deleted_key = &deleted_key_value;
+
+ return ht->table != NULL;
+}
+
+struct hash_table *
+_mesa_hash_table_create(void *mem_ctx,
+ uint32_t (*key_hash_function)(const void *key),
+ bool (*key_equals_function)(const void *a,
+ const void *b))
+{
+ struct hash_table *ht;
+
+ /* mem_ctx is used to allocate the hash table, but the hash table is used
+ * to allocate all of the suballocations.
+ */
+ ht = ralloc(mem_ctx, struct hash_table);
+ if (ht == NULL)
+ return NULL;
+
+ if (!_mesa_hash_table_init(ht, ht, key_hash_function, key_equals_function)) {
+ ralloc_free(ht);
+ return NULL;
+ }
+
+ return ht;
+}
+
+struct hash_table *
+_mesa_hash_table_clone(struct hash_table *src, void *dst_mem_ctx)
+{
+ struct hash_table *ht;
+
+ ht = ralloc(dst_mem_ctx, struct hash_table);
+ if (ht == NULL)
+ return NULL;
+
+ memcpy(ht, src, sizeof(struct hash_table));
+
+ ht->table = ralloc_array(ht, struct hash_entry, ht->size);
+ if (ht->table == NULL) {
+ ralloc_free(ht);
+ return NULL;
+ }
+
+ memcpy(ht->table, src->table, ht->size * sizeof(struct hash_entry));
+
+ return ht;
+}
+
+/**
+ * Frees the given hash table.
+ *
+ * If delete_function is passed, it gets called on each entry present before
+ * freeing.
+ */
+void
+_mesa_hash_table_destroy(struct hash_table *ht,
+ void (*delete_function)(struct hash_entry *entry))
+{
+ if (!ht)
+ return;
+
+ if (delete_function) {
+ hash_table_foreach(ht, entry) {
+ delete_function(entry);
+ }
+ }
+ ralloc_free(ht);
+}
+
+/**
+ * Deletes all entries of the given hash table without deleting the table
+ * itself or changing its structure.
+ *
+ * If delete_function is passed, it gets called on each entry present.
+ */
+void
+_mesa_hash_table_clear(struct hash_table *ht,
+ void (*delete_function)(struct hash_entry *entry))
+{
+ struct hash_entry *entry;
+
+ for (entry = ht->table; entry != ht->table + ht->size; entry++) {
+ if (entry->key == NULL)
+ continue;
+
+ if (delete_function != NULL && entry->key != ht->deleted_key)
+ delete_function(entry);
+
+ entry->key = NULL;
+ }
+
+ ht->entries = 0;
+ ht->deleted_entries = 0;
+}
+
+/** Sets the value of the key pointer used for deleted entries in the table.
+ *
+ * The assumption is that usually keys are actual pointers, so we use a
+ * default value of a pointer to an arbitrary piece of storage in the library.
+ * But in some cases a consumer wants to store some other sort of value in the
+ * table, like a uint32_t, in which case that pointer may conflict with one of
+ * their valid keys. This lets that user select a safe value.
+ *
+ * This must be called before any keys are actually deleted from the table.
+ */
+void
+_mesa_hash_table_set_deleted_key(struct hash_table *ht, const void *deleted_key)
+{
+ ht->deleted_key = deleted_key;
+}
+
+static struct hash_entry *
+hash_table_search(struct hash_table *ht, uint32_t hash, const void *key)
+{
+ assert(!key_pointer_is_reserved(ht, key));
+
+ uint32_t size = ht->size;
+ uint32_t start_hash_address = util_fast_urem32(hash, size, ht->size_magic);
+ uint32_t double_hash = 1 + util_fast_urem32(hash, ht->rehash,
+ ht->rehash_magic);
+ uint32_t hash_address = start_hash_address;
+
+ do {
+ struct hash_entry *entry = ht->table + hash_address;
+
+ if (entry_is_free(entry)) {
+ return NULL;
+ } else if (entry_is_present(ht, entry) && entry->hash == hash) {
+ if (ht->key_equals_function(key, entry->key)) {
+ return entry;
+ }
+ }
+
+ hash_address += double_hash;
+ if (hash_address >= size)
+ hash_address -= size;
+ } while (hash_address != start_hash_address);
+
+ return NULL;
+}
+
+/**
+ * Finds a hash table entry with the given key and hash of that key.
+ *
+ * Returns NULL if no entry is found. Note that the data pointer may be
+ * modified by the user.
+ */
+struct hash_entry *
+_mesa_hash_table_search(struct hash_table *ht, const void *key)
+{
+ assert(ht->key_hash_function);
+ return hash_table_search(ht, ht->key_hash_function(key), key);
+}
+
+struct hash_entry *
+_mesa_hash_table_search_pre_hashed(struct hash_table *ht, uint32_t hash,
+ const void *key)
+{
+ assert(ht->key_hash_function == NULL || hash == ht->key_hash_function(key));
+ return hash_table_search(ht, hash, key);
+}
+
+static struct hash_entry *
+hash_table_insert(struct hash_table *ht, uint32_t hash,
+ const void *key, void *data);
+
+static void
+hash_table_insert_rehash(struct hash_table *ht, uint32_t hash,
+ const void *key, void *data)
+{
+ uint32_t size = ht->size;
+ uint32_t start_hash_address = util_fast_urem32(hash, size, ht->size_magic);
+ uint32_t double_hash = 1 + util_fast_urem32(hash, ht->rehash,
+ ht->rehash_magic);
+ uint32_t hash_address = start_hash_address;
+ do {
+ struct hash_entry *entry = ht->table + hash_address;
+
+ if (likely(entry->key == NULL)) {
+ entry->hash = hash;
+ entry->key = key;
+ entry->data = data;
+ return;
+ }
+
+ hash_address += double_hash;
+ if (hash_address >= size)
+ hash_address -= size;
+ } while (true);
+}
+
+static void
+_mesa_hash_table_rehash(struct hash_table *ht, unsigned new_size_index)
+{
+ struct hash_table old_ht;
+ struct hash_entry *table;
+
+ if (new_size_index >= ARRAY_SIZE(hash_sizes))
+ return;
+
+ table = rzalloc_array(ralloc_parent(ht->table), struct hash_entry,
+ hash_sizes[new_size_index].size);
+ if (table == NULL)
+ return;
+
+ old_ht = *ht;
+
+ ht->table = table;
+ ht->size_index = new_size_index;
+ ht->size = hash_sizes[ht->size_index].size;
+ ht->rehash = hash_sizes[ht->size_index].rehash;
+ ht->size_magic = hash_sizes[ht->size_index].size_magic;
+ ht->rehash_magic = hash_sizes[ht->size_index].rehash_magic;
+ ht->max_entries = hash_sizes[ht->size_index].max_entries;
+ ht->entries = 0;
+ ht->deleted_entries = 0;
+
+ hash_table_foreach(&old_ht, entry) {
+ hash_table_insert_rehash(ht, entry->hash, entry->key, entry->data);
+ }
+
+ ht->entries = old_ht.entries;
+
+ ralloc_free(old_ht.table);
+}
+
+static struct hash_entry *
+hash_table_insert(struct hash_table *ht, uint32_t hash,
+ const void *key, void *data)
+{
+ struct hash_entry *available_entry = NULL;
+
+ assert(!key_pointer_is_reserved(ht, key));
+
+ if (ht->entries >= ht->max_entries) {
+ _mesa_hash_table_rehash(ht, ht->size_index + 1);
+ } else if (ht->deleted_entries + ht->entries >= ht->max_entries) {
+ _mesa_hash_table_rehash(ht, ht->size_index);
+ }
+
+ uint32_t size = ht->size;
+ uint32_t start_hash_address = util_fast_urem32(hash, size, ht->size_magic);
+ uint32_t double_hash = 1 + util_fast_urem32(hash, ht->rehash,
+ ht->rehash_magic);
+ uint32_t hash_address = start_hash_address;
+ do {
+ struct hash_entry *entry = ht->table + hash_address;
+
+ if (!entry_is_present(ht, entry)) {
+ /* Stash the first available entry we find */
+ if (available_entry == NULL)
+ available_entry = entry;
+ if (entry_is_free(entry))
+ break;
+ }
+
+ /* Implement replacement when another insert happens
+ * with a matching key. This is a relatively common
+ * feature of hash tables, with the alternative
+ * generally being "insert the new value as well, and
+ * return it first when the key is searched for".
+ *
+ * Note that the hash table doesn't have a delete
+ * callback. If freeing of old data pointers is
+ * required to avoid memory leaks, perform a search
+ * before inserting.
+ */
+ if (!entry_is_deleted(ht, entry) &&
+ entry->hash == hash &&
+ ht->key_equals_function(key, entry->key)) {
+ entry->key = key;
+ entry->data = data;
+ return entry;
+ }
+
+ hash_address += double_hash;
+ if (hash_address >= size)
+ hash_address -= size;
+ } while (hash_address != start_hash_address);
+
+ if (available_entry) {
+ if (entry_is_deleted(ht, available_entry))
+ ht->deleted_entries--;
+ available_entry->hash = hash;
+ available_entry->key = key;
+ available_entry->data = data;
+ ht->entries++;
+ return available_entry;
+ }
+
+ /* We could hit here if a required resize failed. An unchecked-malloc
+ * application could ignore this result.
+ */
+ return NULL;
+}
+
+/**
+ * Inserts the key with the given hash into the table.
+ *
+ * Note that insertion may rearrange the table on a resize or rehash,
+ * so previously found hash_entries are no longer valid after this function.
+ */
+struct hash_entry *
+_mesa_hash_table_insert(struct hash_table *ht, const void *key, void *data)
+{
+ assert(ht->key_hash_function);
+ return hash_table_insert(ht, ht->key_hash_function(key), key, data);
+}
+
+struct hash_entry *
+_mesa_hash_table_insert_pre_hashed(struct hash_table *ht, uint32_t hash,
+ const void *key, void *data)
+{
+ assert(ht->key_hash_function == NULL || hash == ht->key_hash_function(key));
+ return hash_table_insert(ht, hash, key, data);
+}
+
+/**
+ * This function deletes the given hash table entry.
+ *
+ * Note that deletion doesn't otherwise modify the table, so an iteration over
+ * the table deleting entries is safe.
+ */
+void
+_mesa_hash_table_remove(struct hash_table *ht,
+ struct hash_entry *entry)
+{
+ if (!entry)
+ return;
+
+ entry->key = ht->deleted_key;
+ ht->entries--;
+ ht->deleted_entries++;
+}
+
+/**
+ * Removes the entry with the corresponding key, if exists.
+ */
+void _mesa_hash_table_remove_key(struct hash_table *ht,
+ const void *key)
+{
+ _mesa_hash_table_remove(ht, _mesa_hash_table_search(ht, key));
+}
+
+/**
+ * This function is an iterator over the hash table.
+ *
+ * Pass in NULL for the first entry, as in the start of a for loop. Note that
+ * an iteration over the table is O(table_size) not O(entries).
+ */
+struct hash_entry *
+_mesa_hash_table_next_entry(struct hash_table *ht,
+ struct hash_entry *entry)
+{
+ if (entry == NULL)
+ entry = ht->table;
+ else
+ entry = entry + 1;
+
+ for (; entry != ht->table + ht->size; entry++) {
+ if (entry_is_present(ht, entry)) {
+ return entry;
+ }
+ }
+
+ return NULL;
+}
+
+/**
+ * Returns a random entry from the hash table.
+ *
+ * This may be useful in implementing random replacement (as opposed
+ * to just removing everything) in caches based on this hash table
+ * implementation. @predicate may be used to filter entries, or may
+ * be set to NULL for no filtering.
+ */
+struct hash_entry *
+_mesa_hash_table_random_entry(struct hash_table *ht,
+ bool (*predicate)(struct hash_entry *entry))
+{
+ struct hash_entry *entry;
+ uint32_t i = rand() % ht->size;
+
+ if (ht->entries == 0)
+ return NULL;
+
+ for (entry = ht->table + i; entry != ht->table + ht->size; entry++) {
+ if (entry_is_present(ht, entry) &&
+ (!predicate || predicate(entry))) {
+ return entry;
+ }
+ }
+
+ for (entry = ht->table; entry != ht->table + i; entry++) {
+ if (entry_is_present(ht, entry) &&
+ (!predicate || predicate(entry))) {
+ return entry;
+ }
+ }
+
+ return NULL;
+}
+
+
+uint32_t
+_mesa_hash_data(const void *data, size_t size)
+{
+ return XXH32(data, size, 0);
+}
+
+uint32_t
+_mesa_hash_int(const void *key)
+{
+ return XXH32(key, sizeof(int), 0);
+}
+
+uint32_t
+_mesa_hash_uint(const void *key)
+{
+ return XXH32(key, sizeof(unsigned), 0);
+}
+
+uint32_t
+_mesa_hash_u32(const void *key)
+{
+ return XXH32(key, 4, 0);
+}
+
+/** FNV-1a string hash implementation */
+uint32_t
+_mesa_hash_string(const void *_key)
+{
+ uint32_t hash = _mesa_fnv32_1a_offset_bias;
+ const char *key = _key;
+
+ while (*key != 0) {
+ hash = _mesa_fnv32_1a_accumulate(hash, *key);
+ key++;
+ }
+
+ return hash;
+}
+
+uint32_t
+_mesa_hash_pointer(const void *pointer)
+{
+ uintptr_t num = (uintptr_t) pointer;
+ return (uint32_t) ((num >> 2) ^ (num >> 6) ^ (num >> 10) ^ (num >> 14));
+}
+
+bool
+_mesa_key_int_equal(const void *a, const void *b)
+{
+ return *((const int *)a) == *((const int *)b);
+}
+
+bool
+_mesa_key_uint_equal(const void *a, const void *b)
+{
+
+ return *((const unsigned *)a) == *((const unsigned *)b);
+}
+
+bool
+_mesa_key_u32_equal(const void *a, const void *b)
+{
+ return *((const uint32_t *)a) == *((const uint32_t *)b);
+}
+
+/**
+ * String compare function for use as the comparison callback in
+ * _mesa_hash_table_create().
+ */
+bool
+_mesa_key_string_equal(const void *a, const void *b)
+{
+ return strcmp(a, b) == 0;
+}
+
+bool
+_mesa_key_pointer_equal(const void *a, const void *b)
+{
+ return a == b;
+}
+
+/**
+ * Helper to create a hash table with pointer keys.
+ */
+struct hash_table *
+_mesa_pointer_hash_table_create(void *mem_ctx)
+{
+ return _mesa_hash_table_create(mem_ctx, _mesa_hash_pointer,
+ _mesa_key_pointer_equal);
+}
+
+/**
+ * Hash table wrapper which supports 64-bit keys.
+ *
+ * TODO: unify all hash table implementations.
+ */
+
+struct hash_key_u64 {
+ uint64_t value;
+};
+
+static uint32_t
+key_u64_hash(const void *key)
+{
+ return _mesa_hash_data(key, sizeof(struct hash_key_u64));
+}
+
+static bool
+key_u64_equals(const void *a, const void *b)
+{
+ const struct hash_key_u64 *aa = a;
+ const struct hash_key_u64 *bb = b;
+
+ return aa->value == bb->value;
+}
+
+#define FREED_KEY_VALUE 0
+
+struct hash_table_u64 *
+_mesa_hash_table_u64_create(void *mem_ctx)
+{
+ STATIC_ASSERT(FREED_KEY_VALUE != DELETED_KEY_VALUE);
+ struct hash_table_u64 *ht;
+
+ ht = CALLOC_STRUCT(hash_table_u64);
+ if (!ht)
+ return NULL;
+
+ if (sizeof(void *) == 8) {
+ ht->table = _mesa_hash_table_create(mem_ctx, _mesa_hash_pointer,
+ _mesa_key_pointer_equal);
+ } else {
+ ht->table = _mesa_hash_table_create(mem_ctx, key_u64_hash,
+ key_u64_equals);
+ }
+
+ if (ht->table)
+ _mesa_hash_table_set_deleted_key(ht->table, uint_key(DELETED_KEY_VALUE));
+
+ return ht;
+}
+
+void
+_mesa_hash_table_u64_clear(struct hash_table_u64 *ht,
+ void (*delete_function)(struct hash_entry *entry))
+{
+ if (!ht)
+ return;
+
+ if (ht->deleted_key_data) {
+ if (delete_function) {
+ struct hash_table *table = ht->table;
+ struct hash_entry entry;
+
+ /* Create a fake entry for the delete function. */
+ if (sizeof(void *) == 8) {
+ entry.hash = table->key_hash_function(table->deleted_key);
+ } else {
+ struct hash_key_u64 _key = { .value = (uintptr_t)table->deleted_key };
+ entry.hash = table->key_hash_function(&_key);
+ }
+ entry.key = table->deleted_key;
+ entry.data = ht->deleted_key_data;
+
+ delete_function(&entry);
+ }
+ ht->deleted_key_data = NULL;
+ }
+
+ if (ht->freed_key_data) {
+ if (delete_function) {
+ struct hash_table *table = ht->table;
+ struct hash_entry entry;
+
+ /* Create a fake entry for the delete function. */
+ if (sizeof(void *) == 8) {
+ entry.hash = table->key_hash_function(uint_key(FREED_KEY_VALUE));
+ } else {
+ struct hash_key_u64 _key = { .value = (uintptr_t)FREED_KEY_VALUE };
+ entry.hash = table->key_hash_function(&_key);
+ }
+ entry.key = uint_key(FREED_KEY_VALUE);
+ entry.data = ht->freed_key_data;
+
+ delete_function(&entry);
+ }
+ ht->freed_key_data = NULL;
+ }
+
+ _mesa_hash_table_clear(ht->table, delete_function);
+}
+
+void
+_mesa_hash_table_u64_destroy(struct hash_table_u64 *ht,
+ void (*delete_function)(struct hash_entry *entry))
+{
+ if (!ht)
+ return;
+
+ _mesa_hash_table_u64_clear(ht, delete_function);
+ _mesa_hash_table_destroy(ht->table, delete_function);
+ free(ht);
+}
+
+void
+_mesa_hash_table_u64_insert(struct hash_table_u64 *ht, uint64_t key,
+ void *data)
+{
+ if (key == FREED_KEY_VALUE) {
+ ht->freed_key_data = data;
+ return;
+ }
+
+ if (key == DELETED_KEY_VALUE) {
+ ht->deleted_key_data = data;
+ return;
+ }
+
+ if (sizeof(void *) == 8) {
+ _mesa_hash_table_insert(ht->table, (void *)(uintptr_t)key, data);
+ } else {
+ struct hash_key_u64 *_key = CALLOC_STRUCT(hash_key_u64);
+
+ if (!_key)
+ return;
+ _key->value = key;
+
+ _mesa_hash_table_insert(ht->table, _key, data);
+ }
+}
+
+static struct hash_entry *
+hash_table_u64_search(struct hash_table_u64 *ht, uint64_t key)
+{
+ if (sizeof(void *) == 8) {
+ return _mesa_hash_table_search(ht->table, (void *)(uintptr_t)key);
+ } else {
+ struct hash_key_u64 _key = { .value = key };
+ return _mesa_hash_table_search(ht->table, &_key);
+ }
+}
+
+void *
+_mesa_hash_table_u64_search(struct hash_table_u64 *ht, uint64_t key)
+{
+ struct hash_entry *entry;
+
+ if (key == FREED_KEY_VALUE)
+ return ht->freed_key_data;
+
+ if (key == DELETED_KEY_VALUE)
+ return ht->deleted_key_data;
+
+ entry = hash_table_u64_search(ht, key);
+ if (!entry)
+ return NULL;
+
+ return entry->data;
+}
+
+void
+_mesa_hash_table_u64_remove(struct hash_table_u64 *ht, uint64_t key)
+{
+ struct hash_entry *entry;
+
+ if (key == FREED_KEY_VALUE) {
+ ht->freed_key_data = NULL;
+ return;
+ }
+
+ if (key == DELETED_KEY_VALUE) {
+ ht->deleted_key_data = NULL;
+ return;
+ }
+
+ entry = hash_table_u64_search(ht, key);
+ if (!entry)
+ return;
+
+ if (sizeof(void *) == 8) {
+ _mesa_hash_table_remove(ht->table, entry);
+ } else {
+ struct hash_key *_key = (struct hash_key *)entry->key;
+
+ _mesa_hash_table_remove(ht->table, entry);
+ free(_key);
+ }
+}
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/util/hash_table.h b/third_party/rust/glslopt/glsl-optimizer/src/util/hash_table.h
new file mode 100644
index 0000000000..b1eb9d4e21
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/util/hash_table.h
@@ -0,0 +1,183 @@
+/*
+ * Copyright © 2009,2012 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ *
+ */
+
+#ifndef _HASH_TABLE_H
+#define _HASH_TABLE_H
+
+#include <stdlib.h>
+#include <inttypes.h>
+#include <stdbool.h>
+#include "c99_compat.h"
+#include "fnv1a.h"
+#include "macros.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+struct hash_entry {
+ uint32_t hash;
+ const void *key;
+ void *data;
+};
+
+struct hash_table {
+ struct hash_entry *table;
+ uint32_t (*key_hash_function)(const void *key);
+ bool (*key_equals_function)(const void *a, const void *b);
+ const void *deleted_key;
+ uint32_t size;
+ uint32_t rehash;
+ uint64_t size_magic;
+ uint64_t rehash_magic;
+ uint32_t max_entries;
+ uint32_t size_index;
+ uint32_t entries;
+ uint32_t deleted_entries;
+};
+
+struct hash_table *
+_mesa_hash_table_create(void *mem_ctx,
+ uint32_t (*key_hash_function)(const void *key),
+ bool (*key_equals_function)(const void *a,
+ const void *b));
+
+bool
+_mesa_hash_table_init(struct hash_table *ht,
+ void *mem_ctx,
+ uint32_t (*key_hash_function)(const void *key),
+ bool (*key_equals_function)(const void *a,
+ const void *b));
+
+struct hash_table *
+_mesa_hash_table_clone(struct hash_table *src, void *dst_mem_ctx);
+void _mesa_hash_table_destroy(struct hash_table *ht,
+ void (*delete_function)(struct hash_entry *entry));
+void _mesa_hash_table_clear(struct hash_table *ht,
+ void (*delete_function)(struct hash_entry *entry));
+void _mesa_hash_table_set_deleted_key(struct hash_table *ht,
+ const void *deleted_key);
+
+static inline uint32_t _mesa_hash_table_num_entries(struct hash_table *ht)
+{
+ return ht->entries;
+}
+
+struct hash_entry *
+_mesa_hash_table_insert(struct hash_table *ht, const void *key, void *data);
+struct hash_entry *
+_mesa_hash_table_insert_pre_hashed(struct hash_table *ht, uint32_t hash,
+ const void *key, void *data);
+struct hash_entry *
+_mesa_hash_table_search(struct hash_table *ht, const void *key);
+struct hash_entry *
+_mesa_hash_table_search_pre_hashed(struct hash_table *ht, uint32_t hash,
+ const void *key);
+void _mesa_hash_table_remove(struct hash_table *ht,
+ struct hash_entry *entry);
+void _mesa_hash_table_remove_key(struct hash_table *ht,
+ const void *key);
+
+struct hash_entry *_mesa_hash_table_next_entry(struct hash_table *ht,
+ struct hash_entry *entry);
+struct hash_entry *
+_mesa_hash_table_random_entry(struct hash_table *ht,
+ bool (*predicate)(struct hash_entry *entry));
+
+uint32_t _mesa_hash_data(const void *data, size_t size);
+
+uint32_t _mesa_hash_int(const void *key);
+uint32_t _mesa_hash_uint(const void *key);
+uint32_t _mesa_hash_u32(const void *key);
+uint32_t _mesa_hash_string(const void *key);
+uint32_t _mesa_hash_pointer(const void *pointer);
+
+bool _mesa_key_int_equal(const void *a, const void *b);
+bool _mesa_key_uint_equal(const void *a, const void *b);
+bool _mesa_key_u32_equal(const void *a, const void *b);
+bool _mesa_key_string_equal(const void *a, const void *b);
+bool _mesa_key_pointer_equal(const void *a, const void *b);
+
+struct hash_table *
+_mesa_pointer_hash_table_create(void *mem_ctx);
+
+/**
+ * This foreach function is safe against deletion (which just replaces
+ * an entry's data with the deleted marker), but not against insertion
+ * (which may rehash the table, making entry a dangling pointer).
+ */
+#define hash_table_foreach(ht, entry) \
+ for (struct hash_entry *entry = _mesa_hash_table_next_entry(ht, NULL); \
+ entry != NULL; \
+ entry = _mesa_hash_table_next_entry(ht, entry))
+
+static inline void
+hash_table_call_foreach(struct hash_table *ht,
+ void (*callback)(const void *key,
+ void *data,
+ void *closure),
+ void *closure)
+{
+ hash_table_foreach(ht, entry)
+ callback(entry->key, entry->data, closure);
+}
+
+/**
+ * Hash table wrapper which supports 64-bit keys.
+ */
+struct hash_table_u64 {
+ struct hash_table *table;
+ void *freed_key_data;
+ void *deleted_key_data;
+};
+
+struct hash_table_u64 *
+_mesa_hash_table_u64_create(void *mem_ctx);
+
+void
+_mesa_hash_table_u64_destroy(struct hash_table_u64 *ht,
+ void (*delete_function)(struct hash_entry *entry));
+
+void
+_mesa_hash_table_u64_insert(struct hash_table_u64 *ht, uint64_t key,
+ void *data);
+
+void *
+_mesa_hash_table_u64_search(struct hash_table_u64 *ht, uint64_t key);
+
+void
+_mesa_hash_table_u64_remove(struct hash_table_u64 *ht, uint64_t key);
+
+void
+_mesa_hash_table_u64_clear(struct hash_table_u64 *ht,
+ void (*delete_function)(struct hash_entry *entry));
+
+#ifdef __cplusplus
+} /* extern C */
+#endif
+
+#endif /* _HASH_TABLE_H */
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/util/list.h b/third_party/rust/glslopt/glsl-optimizer/src/util/list.h
new file mode 100644
index 0000000000..8a18c4b0d8
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/util/list.h
@@ -0,0 +1,249 @@
+/**************************************************************************
+ *
+ * Copyright 2006 VMware, Inc., Bismarck, ND. USA.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ **************************************************************************/
+
+/**
+ * \file
+ * List macros heavily inspired by the Linux kernel
+ * list handling. No list looping yet.
+ *
+ * Is not threadsafe, so common operations need to
+ * be protected using an external mutex.
+ */
+
+#ifndef _UTIL_LIST_H_
+#define _UTIL_LIST_H_
+
+
+#include <stdbool.h>
+#include <stddef.h>
+#include <assert.h>
+#include "c99_compat.h"
+
+#ifdef DEBUG
+# define list_assert(cond, msg) assert(cond && msg)
+#else
+# define list_assert(cond, msg) (void)(0 && (cond))
+#endif
+
+struct list_head
+{
+ struct list_head *prev;
+ struct list_head *next;
+};
+
+static inline void list_inithead(struct list_head *item)
+{
+ item->prev = item;
+ item->next = item;
+}
+
+static inline void list_add(struct list_head *item, struct list_head *list)
+{
+ item->prev = list;
+ item->next = list->next;
+ list->next->prev = item;
+ list->next = item;
+}
+
+static inline void list_addtail(struct list_head *item, struct list_head *list)
+{
+ item->next = list;
+ item->prev = list->prev;
+ list->prev->next = item;
+ list->prev = item;
+}
+
+static inline bool list_is_empty(const struct list_head *list);
+
+static inline void list_replace(struct list_head *from, struct list_head *to)
+{
+ if (list_is_empty(from)) {
+ list_inithead(to);
+ } else {
+ to->prev = from->prev;
+ to->next = from->next;
+ from->next->prev = to;
+ from->prev->next = to;
+ }
+}
+
+static inline void list_del(struct list_head *item)
+{
+ item->prev->next = item->next;
+ item->next->prev = item->prev;
+ item->prev = item->next = NULL;
+}
+
+static inline void list_delinit(struct list_head *item)
+{
+ item->prev->next = item->next;
+ item->next->prev = item->prev;
+ item->next = item;
+ item->prev = item;
+}
+
+static inline bool list_is_empty(const struct list_head *list)
+{
+ return list->next == list;
+}
+
+/**
+ * Returns whether the list has exactly one element.
+ */
+static inline bool list_is_singular(const struct list_head *list)
+{
+ return list->next != NULL && list->next != list && list->next->next == list;
+}
+
+static inline unsigned list_length(const struct list_head *list)
+{
+ struct list_head *node;
+ unsigned length = 0;
+ for (node = list->next; node != list; node = node->next)
+ length++;
+ return length;
+}
+
+static inline void list_splice(struct list_head *src, struct list_head *dst)
+{
+ if (list_is_empty(src))
+ return;
+
+ src->next->prev = dst;
+ src->prev->next = dst->next;
+ dst->next->prev = src->prev;
+ dst->next = src->next;
+}
+
+static inline void list_splicetail(struct list_head *src, struct list_head *dst)
+{
+ if (list_is_empty(src))
+ return;
+
+ src->prev->next = dst;
+ src->next->prev = dst->prev;
+ dst->prev->next = src->next;
+ dst->prev = src->prev;
+}
+
+static inline void list_validate(const struct list_head *list)
+{
+ struct list_head *node;
+ assert(list->next->prev == list && list->prev->next == list);
+ for (node = list->next; node != list; node = node->next)
+ assert(node->next->prev == node && node->prev->next == node);
+}
+
+#define LIST_ENTRY(__type, __item, __field) \
+ ((__type *)(((char *)(__item)) - offsetof(__type, __field)))
+
+/**
+ * Cast from a pointer to a member of a struct back to the containing struct.
+ *
+ * 'sample' MUST be initialized, or else the result is undefined!
+ */
+#ifndef container_of
+#define container_of(ptr, sample, member) \
+ (void *)((char *)(ptr) \
+ - ((char *)&(sample)->member - (char *)(sample)))
+#endif
+
+#define list_first_entry(ptr, type, member) \
+ LIST_ENTRY(type, (ptr)->next, member)
+
+#define list_last_entry(ptr, type, member) \
+ LIST_ENTRY(type, (ptr)->prev, member)
+
+
+#define LIST_FOR_EACH_ENTRY(pos, head, member) \
+ for (pos = NULL, pos = container_of((head)->next, pos, member); \
+ &pos->member != (head); \
+ pos = container_of(pos->member.next, pos, member))
+
+#define LIST_FOR_EACH_ENTRY_SAFE(pos, storage, head, member) \
+ for (pos = NULL, pos = container_of((head)->next, pos, member), \
+ storage = container_of(pos->member.next, pos, member); \
+ &pos->member != (head); \
+ pos = storage, storage = container_of(storage->member.next, storage, member))
+
+#define LIST_FOR_EACH_ENTRY_SAFE_REV(pos, storage, head, member) \
+ for (pos = NULL, pos = container_of((head)->prev, pos, member), \
+ storage = container_of(pos->member.prev, pos, member); \
+ &pos->member != (head); \
+ pos = storage, storage = container_of(storage->member.prev, storage, member))
+
+#define LIST_FOR_EACH_ENTRY_FROM(pos, start, head, member) \
+ for (pos = NULL, pos = container_of((start), pos, member); \
+ &pos->member != (head); \
+ pos = container_of(pos->member.next, pos, member))
+
+#define LIST_FOR_EACH_ENTRY_FROM_REV(pos, start, head, member) \
+ for (pos = NULL, pos = container_of((start), pos, member); \
+ &pos->member != (head); \
+ pos = container_of(pos->member.prev, pos, member))
+
+#define list_for_each_entry(type, pos, head, member) \
+ for (type *pos = LIST_ENTRY(type, (head)->next, member), \
+ *__next = LIST_ENTRY(type, pos->member.next, member); \
+ &pos->member != (head); \
+ pos = LIST_ENTRY(type, pos->member.next, member), \
+ list_assert(pos == __next, "use _safe iterator"), \
+ __next = LIST_ENTRY(type, __next->member.next, member))
+
+#define list_for_each_entry_safe(type, pos, head, member) \
+ for (type *pos = LIST_ENTRY(type, (head)->next, member), \
+ *__next = LIST_ENTRY(type, pos->member.next, member); \
+ &pos->member != (head); \
+ pos = __next, \
+ __next = LIST_ENTRY(type, __next->member.next, member))
+
+#define list_for_each_entry_rev(type, pos, head, member) \
+ for (type *pos = LIST_ENTRY(type, (head)->prev, member), \
+ *__prev = LIST_ENTRY(type, pos->member.prev, member); \
+ &pos->member != (head); \
+ pos = LIST_ENTRY(type, pos->member.prev, member), \
+ list_assert(pos == __prev, "use _safe iterator"), \
+ __prev = LIST_ENTRY(type, __prev->member.prev, member))
+
+#define list_for_each_entry_safe_rev(type, pos, head, member) \
+ for (type *pos = LIST_ENTRY(type, (head)->prev, member), \
+ *__prev = LIST_ENTRY(type, pos->member.prev, member); \
+ &pos->member != (head); \
+ pos = __prev, \
+ __prev = LIST_ENTRY(type, __prev->member.prev, member))
+
+#define list_for_each_entry_from(type, pos, start, head, member) \
+ for (type *pos = LIST_ENTRY(type, (start), member); \
+ &pos->member != (head); \
+ pos = LIST_ENTRY(type, pos->member.next, member))
+
+#define list_for_each_entry_from_rev(type, pos, start, head, member) \
+ for (type *pos = LIST_ENTRY(type, (start), member); \
+ &pos->member != (head); \
+ pos = LIST_ENTRY(type, pos->member.prev, member))
+
+#endif /*_UTIL_LIST_H_*/
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/util/macros.h b/third_party/rust/glslopt/glsl-optimizer/src/util/macros.h
new file mode 100644
index 0000000000..fcace4e351
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/util/macros.h
@@ -0,0 +1,349 @@
+/*
+ * Copyright © 2014 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#ifndef UTIL_MACROS_H
+#define UTIL_MACROS_H
+
+#include <assert.h>
+
+#include "c99_compat.h"
+#include "c11_compat.h"
+
+/* Compute the size of an array */
+#ifndef ARRAY_SIZE
+# define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
+#endif
+
+/* For compatibility with Clang's __has_builtin() */
+#ifndef __has_builtin
+# define __has_builtin(x) 0
+#endif
+
+/**
+ * __builtin_expect macros
+ */
+#if !defined(HAVE___BUILTIN_EXPECT)
+# define __builtin_expect(x, y) (x)
+#endif
+
+#ifndef likely
+# ifdef HAVE___BUILTIN_EXPECT
+# define likely(x) __builtin_expect(!!(x), 1)
+# define unlikely(x) __builtin_expect(!!(x), 0)
+# else
+# define likely(x) (x)
+# define unlikely(x) (x)
+# endif
+#endif
+
+
+/**
+ * Static (compile-time) assertion.
+ * Basically, use COND to dimension an array. If COND is false/zero the
+ * array size will be -1 and we'll get a compilation error.
+ */
+#define STATIC_ASSERT(COND) \
+ do { \
+ (void) sizeof(char [1 - 2*!(COND)]); \
+ } while (0)
+
+
+/**
+ * Unreachable macro. Useful for suppressing "control reaches end of non-void
+ * function" warnings.
+ */
+#if defined(HAVE___BUILTIN_UNREACHABLE) || __has_builtin(__builtin_unreachable)
+#define unreachable(str) \
+do { \
+ assert(!str); \
+ __builtin_unreachable(); \
+} while (0)
+#elif defined (_MSC_VER)
+#define unreachable(str) \
+do { \
+ assert(!str); \
+ __assume(0); \
+} while (0)
+#else
+#define unreachable(str) assert(!str)
+#endif
+
+/**
+ * Assume macro. Useful for expressing our assumptions to the compiler,
+ * typically for purposes of silencing warnings.
+ */
+#if __has_builtin(__builtin_assume)
+#define assume(expr) \
+do { \
+ assert(expr); \
+ __builtin_assume(expr); \
+} while (0)
+#elif defined HAVE___BUILTIN_UNREACHABLE
+#define assume(expr) ((expr) ? ((void) 0) \
+ : (assert(!"assumption failed"), \
+ __builtin_unreachable()))
+#elif defined (_MSC_VER)
+#define assume(expr) __assume(expr)
+#else
+#define assume(expr) assert(expr)
+#endif
+
+/* Attribute const is used for functions that have no effects other than their
+ * return value, and only rely on the argument values to compute the return
+ * value. As a result, calls to it can be CSEed. Note that using memory
+ * pointed to by the arguments is not allowed for const functions.
+ */
+#ifdef HAVE_FUNC_ATTRIBUTE_CONST
+#define ATTRIBUTE_CONST __attribute__((__const__))
+#else
+#define ATTRIBUTE_CONST
+#endif
+
+#ifdef HAVE_FUNC_ATTRIBUTE_FLATTEN
+#define FLATTEN __attribute__((__flatten__))
+#else
+#define FLATTEN
+#endif
+
+#ifdef HAVE_FUNC_ATTRIBUTE_FORMAT
+#define PRINTFLIKE(f, a) __attribute__ ((format(__printf__, f, a)))
+#else
+#define PRINTFLIKE(f, a)
+#endif
+
+#ifdef HAVE_FUNC_ATTRIBUTE_MALLOC
+#define MALLOCLIKE __attribute__((__malloc__))
+#else
+#define MALLOCLIKE
+#endif
+
+/* Forced function inlining */
+/* Note: Clang also sets __GNUC__ (see other cases below) */
+#ifndef ALWAYS_INLINE
+# if defined(__GNUC__)
+# define ALWAYS_INLINE inline __attribute__((always_inline))
+# elif defined(_MSC_VER)
+# define ALWAYS_INLINE __forceinline
+# else
+# define ALWAYS_INLINE inline
+# endif
+#endif
+
+/* Used to optionally mark structures with misaligned elements or size as
+ * packed, to trade off performance for space.
+ */
+#ifdef HAVE_FUNC_ATTRIBUTE_PACKED
+#define PACKED __attribute__((__packed__))
+#else
+#define PACKED
+#endif
+
+/* Attribute pure is used for functions that have no effects other than their
+ * return value. As a result, calls to it can be dead code eliminated.
+ */
+#ifdef HAVE_FUNC_ATTRIBUTE_PURE
+#define ATTRIBUTE_PURE __attribute__((__pure__))
+#else
+#define ATTRIBUTE_PURE
+#endif
+
+#ifdef HAVE_FUNC_ATTRIBUTE_RETURNS_NONNULL
+#define ATTRIBUTE_RETURNS_NONNULL __attribute__((__returns_nonnull__))
+#else
+#define ATTRIBUTE_RETURNS_NONNULL
+#endif
+
+#ifndef NORETURN
+# ifdef _MSC_VER
+# define NORETURN __declspec(noreturn)
+# elif defined HAVE_FUNC_ATTRIBUTE_NORETURN
+# define NORETURN __attribute__((__noreturn__))
+# else
+# define NORETURN
+# endif
+#endif
+
+#ifdef __cplusplus
+/**
+ * Macro function that evaluates to true if T is a trivially
+ * destructible type -- that is, if its (non-virtual) destructor
+ * performs no action and all member variables and base classes are
+ * trivially destructible themselves.
+ */
+# if (defined(__clang__) && defined(__has_feature))
+# if __has_feature(has_trivial_destructor)
+# define HAS_TRIVIAL_DESTRUCTOR(T) __has_trivial_destructor(T)
+# endif
+# elif defined(__GNUC__)
+# if ((__GNUC__ > 4) || ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 3)))
+# define HAS_TRIVIAL_DESTRUCTOR(T) __has_trivial_destructor(T)
+# endif
+# elif defined(_MSC_VER) && !defined(__INTEL_COMPILER)
+# define HAS_TRIVIAL_DESTRUCTOR(T) __has_trivial_destructor(T)
+# endif
+# ifndef HAS_TRIVIAL_DESTRUCTOR
+ /* It's always safe (if inefficient) to assume that a
+ * destructor is non-trivial.
+ */
+# define HAS_TRIVIAL_DESTRUCTOR(T) (false)
+# endif
+#endif
+
+/**
+ * PUBLIC/USED macros
+ *
+ * If we build the library with gcc's -fvisibility=hidden flag, we'll
+ * use the PUBLIC macro to mark functions that are to be exported.
+ *
+ * We also need to define a USED attribute, so the optimizer doesn't
+ * inline a static function that we later use in an alias. - ajax
+ */
+#ifndef PUBLIC
+# if defined(__GNUC__)
+# define PUBLIC __attribute__((visibility("default")))
+# define USED __attribute__((used))
+# elif defined(_MSC_VER)
+# define PUBLIC __declspec(dllexport)
+# define USED
+# else
+# define PUBLIC
+# define USED
+# endif
+#endif
+
+/**
+ * UNUSED marks variables (or sometimes functions) that have to be defined,
+ * but are sometimes (or always) unused beyond that. A common case is for
+ * a function parameter to be used in some build configurations but not others.
+ * Another case is fallback vfuncs that don't do anything with their params.
+ *
+ * Note that this should not be used for identifiers used in `assert()`;
+ * see ASSERTED below.
+ */
+#ifdef HAVE_FUNC_ATTRIBUTE_UNUSED
+#define UNUSED __attribute__((unused))
+#else
+#define UNUSED
+#endif
+
+/**
+ * Use ASSERTED to indicate that an identifier is unused outside of an `assert()`,
+ * so that assert-free builds don't get "unused variable" warnings.
+ */
+#ifdef NDEBUG
+#define ASSERTED UNUSED
+#else
+#define ASSERTED
+#endif
+
+#ifdef HAVE_FUNC_ATTRIBUTE_WARN_UNUSED_RESULT
+#define MUST_CHECK __attribute__((warn_unused_result))
+#else
+#define MUST_CHECK
+#endif
+
+#if defined(__GNUC__)
+#define ATTRIBUTE_NOINLINE __attribute__((noinline))
+#else
+#define ATTRIBUTE_NOINLINE
+#endif
+
+
+/**
+ * Check that STRUCT::FIELD can hold MAXVAL. We use a lot of bitfields
+ * in Mesa/gallium. We have to be sure they're of sufficient size to
+ * hold the largest expected value.
+ * Note that with MSVC, enums are signed and enum bitfields need one extra
+ * high bit (always zero) to ensure the max value is handled correctly.
+ * This macro will detect that with MSVC, but not GCC.
+ */
+#define ASSERT_BITFIELD_SIZE(STRUCT, FIELD, MAXVAL) \
+ do { \
+ ASSERTED STRUCT s; \
+ s.FIELD = (MAXVAL); \
+ assert((int) s.FIELD == (MAXVAL) && "Insufficient bitfield size!"); \
+ } while (0)
+
+
+/** Compute ceiling of integer quotient of A divided by B. */
+#define DIV_ROUND_UP( A, B ) ( ((A) + (B) - 1) / (B) )
+
+/** Clamp X to [MIN,MAX]. Turn NaN into MIN, arbitrarily. */
+#define CLAMP( X, MIN, MAX ) ( (X)>(MIN) ? ((X)>(MAX) ? (MAX) : (X)) : (MIN) )
+
+/** Minimum of two values: */
+#define MIN2( A, B ) ( (A)<(B) ? (A) : (B) )
+
+/** Maximum of two values: */
+#define MAX2( A, B ) ( (A)>(B) ? (A) : (B) )
+
+/** Minimum and maximum of three values: */
+#define MIN3( A, B, C ) ((A) < (B) ? MIN2(A, C) : MIN2(B, C))
+#define MAX3( A, B, C ) ((A) > (B) ? MAX2(A, C) : MAX2(B, C))
+
+/** Align a value to a power of two */
+#define ALIGN_POT(x, pot_align) (((x) + (pot_align) - 1) & ~((pot_align) - 1))
+
+/**
+ * Macro for declaring an explicit conversion operator. Defaults to an
+ * implicit conversion if C++11 is not supported.
+ */
+#if __cplusplus >= 201103L
+#define EXPLICIT_CONVERSION explicit
+#elif defined(__cplusplus)
+#define EXPLICIT_CONVERSION
+#endif
+
+/** Set a single bit */
+#define BITFIELD_BIT(b) (1u << (b))
+/** Set all bits up to excluding bit b */
+#define BITFIELD_MASK(b) \
+ ((b) == 32 ? (~0u) : BITFIELD_BIT((b) % 32) - 1)
+/** Set count bits starting from bit b */
+#define BITFIELD_RANGE(b, count) \
+ (BITFIELD_MASK((b) + (count)) & ~BITFIELD_MASK(b))
+
+/** Set a single bit */
+#define BITFIELD64_BIT(b) (1ull << (b))
+/** Set all bits up to excluding bit b */
+#define BITFIELD64_MASK(b) \
+ ((b) == 64 ? (~0ull) : BITFIELD64_BIT(b) - 1)
+/** Set count bits starting from bit b */
+#define BITFIELD64_RANGE(b, count) \
+ (BITFIELD64_MASK((b) + (count)) & ~BITFIELD64_MASK(b))
+
+/* TODO: In future we should try to move this to u_debug.h once header
+ * dependencies are reorganised to allow this.
+ */
+enum pipe_debug_type
+{
+ PIPE_DEBUG_TYPE_OUT_OF_MEMORY = 1,
+ PIPE_DEBUG_TYPE_ERROR,
+ PIPE_DEBUG_TYPE_SHADER_INFO,
+ PIPE_DEBUG_TYPE_PERF_INFO,
+ PIPE_DEBUG_TYPE_INFO,
+ PIPE_DEBUG_TYPE_FALLBACK,
+ PIPE_DEBUG_TYPE_CONFORMANCE,
+};
+
+#endif /* UTIL_MACROS_H */
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/util/mesa-sha1.c b/third_party/rust/glslopt/glsl-optimizer/src/util/mesa-sha1.c
new file mode 100644
index 0000000000..fa9284627b
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/util/mesa-sha1.c
@@ -0,0 +1,51 @@
+/* Copyright © 2007 Carl Worth
+ * Copyright © 2009 Jeremy Huddleston, Julien Cristau, and Matthieu Herrb
+ * Copyright © 2009-2010 Mikhail Gusarov
+ * Copyright © 2012 Yaakov Selkowitz and Keith Packard
+ * Copyright © 2014 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include "sha1/sha1.h"
+#include "mesa-sha1.h"
+
+void
+_mesa_sha1_compute(const void *data, size_t size, unsigned char result[20])
+{
+ struct mesa_sha1 ctx;
+
+ _mesa_sha1_init(&ctx);
+ _mesa_sha1_update(&ctx, data, size);
+ _mesa_sha1_final(&ctx, result);
+}
+
+void
+_mesa_sha1_format(char *buf, const unsigned char *sha1)
+{
+ static const char hex_digits[] = "0123456789abcdef";
+ int i;
+
+ for (i = 0; i < 40; i += 2) {
+ buf[i] = hex_digits[sha1[i >> 1] >> 4];
+ buf[i + 1] = hex_digits[sha1[i >> 1] & 0x0f];
+ }
+ buf[i] = '\0';
+}
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/util/mesa-sha1.h b/third_party/rust/glslopt/glsl-optimizer/src/util/mesa-sha1.h
new file mode 100644
index 0000000000..bde50ba1eb
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/util/mesa-sha1.h
@@ -0,0 +1,64 @@
+/* Copyright © 2014 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef MESA_SHA1_H
+#define MESA_SHA1_H
+
+#include <stdlib.h>
+#include "c99_compat.h"
+#include "sha1/sha1.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define mesa_sha1 _SHA1_CTX
+
+static inline void
+_mesa_sha1_init(struct mesa_sha1 *ctx)
+{
+ SHA1Init(ctx);
+}
+
+static inline void
+_mesa_sha1_update(struct mesa_sha1 *ctx, const void *data, size_t size)
+{
+ SHA1Update(ctx, (const unsigned char *)data, size);
+}
+
+static inline void
+_mesa_sha1_final(struct mesa_sha1 *ctx, unsigned char result[20])
+{
+ SHA1Final(result, ctx);
+}
+
+void
+_mesa_sha1_format(char *buf, const unsigned char *sha1);
+
+void
+_mesa_sha1_compute(const void *data, size_t size, unsigned char result[20]);
+
+#ifdef __cplusplus
+} /* extern C */
+#endif
+
+#endif
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/util/mesa-sha1_test.c b/third_party/rust/glslopt/glsl-optimizer/src/util/mesa-sha1_test.c
new file mode 100644
index 0000000000..9b3b477c7f
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/util/mesa-sha1_test.c
@@ -0,0 +1,65 @@
+/*
+ * Copyright © 2017 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include <stdio.h>
+#include <stdbool.h>
+#include <string.h>
+
+#include "macros.h"
+#include "mesa-sha1.h"
+
+#define SHA1_LENGTH 40
+
+int main(int argc, char *argv[])
+{
+ static const struct {
+ const char *string;
+ const char *sha1;
+ } test_data[] = {
+ {"Mesa Rocks! 273", "7fb99737373d65a73f049cdabc01e73aa6bc60f3"},
+ {"Mesa Rocks! 300", "b2180263e37d3bed6a4be0afe41b1a82ebbcf4c3"},
+ {"Mesa Rocks! 583", "7fb9734108a62503e8a149c1051facd7fb112d05"},
+ };
+
+ bool failed = false;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(test_data); i++) {
+ unsigned char sha1[20];
+ _mesa_sha1_compute(test_data[i].string, strlen(test_data[i].string),
+ sha1);
+
+ char buf[41];
+ _mesa_sha1_format(buf, sha1);
+
+ if (memcmp(test_data[i].sha1, buf, SHA1_LENGTH) != 0) {
+ printf("For string \"%s\", length %zu:\n"
+ "\tExpected: %s\n\t Got: %s\n",
+ test_data[i].string, strlen(test_data[i].string),
+ test_data[i].sha1, buf);
+ failed = true;
+ }
+ }
+
+ return failed;
+}
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/util/os_memory.h b/third_party/rust/glslopt/glsl-optimizer/src/util/os_memory.h
new file mode 100644
index 0000000000..b191cf2058
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/util/os_memory.h
@@ -0,0 +1,74 @@
+/**************************************************************************
+ *
+ * Copyright 2010 Vmware, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+
+/*
+ * OS memory management abstractions
+ */
+
+
+#ifndef _OS_MEMORY_H_
+#define _OS_MEMORY_H_
+
+#if defined(EMBEDDED_DEVICE)
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+void *
+os_malloc(size_t size);
+
+void *
+os_calloc(size_t count, size_t size);
+
+void
+os_free(void *ptr);
+
+void *
+os_realloc(void *ptr, size_t old_size, size_t new_size);
+
+void *
+os_malloc_aligned(size_t size, size_t alignment);
+
+void
+os_free_aligned(void *ptr);
+
+void *
+os_realloc_aligned(void *ptr, size_t oldsize, size_t newsize, size_t alignemnt);
+
+#ifdef __cplusplus
+}
+#endif
+
+#else
+
+# include "os_memory_stdc.h"
+
+#endif
+
+#endif /* _OS_MEMORY_H_ */
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/util/os_memory_aligned.h b/third_party/rust/glslopt/glsl-optimizer/src/util/os_memory_aligned.h
new file mode 100644
index 0000000000..08f12062a7
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/util/os_memory_aligned.h
@@ -0,0 +1,128 @@
+/**************************************************************************
+ *
+ * Copyright 2008-2010 VMware, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+
+/*
+ * Memory alignment wrappers.
+ */
+
+
+#ifndef _OS_MEMORY_H_
+#error "Must not be included directly. Include os_memory.h instead"
+#endif
+
+
+/**
+ * Add two size_t values with integer overflow check.
+ * TODO: leverage __builtin_add_overflow where available
+ */
+static inline bool
+add_overflow_size_t(size_t a, size_t b, size_t *res)
+{
+ *res = a + b;
+ return *res < a || *res < b;
+}
+
+
+#if defined(HAVE_POSIX_MEMALIGN)
+
+static inline void *
+os_malloc_aligned(size_t size, size_t alignment)
+{
+ void *ptr;
+ alignment = (alignment + sizeof(void*) - 1) & ~(sizeof(void*) - 1);
+ if(posix_memalign(&ptr, alignment, size) != 0)
+ return NULL;
+ return ptr;
+}
+
+#define os_free_aligned(_ptr) free(_ptr)
+
+#else
+
+/**
+ * Return memory on given byte alignment
+ */
+static inline void *
+os_malloc_aligned(size_t size, size_t alignment)
+{
+ char *ptr, *buf;
+ size_t alloc_size;
+
+ /*
+ * Calculate
+ *
+ * alloc_size = size + alignment + sizeof(void *)
+ *
+ * while checking for overflow.
+ */
+ if (add_overflow_size_t(size, alignment, &alloc_size) ||
+ add_overflow_size_t(alloc_size, sizeof(void *), &alloc_size)) {
+ return NULL;
+ }
+
+ ptr = (char *) os_malloc(alloc_size);
+ if (!ptr)
+ return NULL;
+
+ buf = (char *)(((uintptr_t)ptr + sizeof(void *) + alignment - 1) & ~((uintptr_t)(alignment - 1)));
+ *(char **)(buf - sizeof(void *)) = ptr;
+
+ return buf;
+}
+
+
+/**
+ * Free memory returned by os_malloc_aligned().
+ */
+static inline void
+os_free_aligned(void *ptr)
+{
+ if (ptr) {
+ void **cubbyHole = (void **) ((char *) ptr - sizeof(void *));
+ void *realAddr = *cubbyHole;
+ os_free(realAddr);
+ }
+}
+
+#endif
+
+/**
+ * Reallocate memeory, with alignment
+ */
+static inline void *
+os_realloc_aligned(void *ptr, size_t oldsize, size_t newsize, size_t alignment)
+{
+ const size_t copySize = MIN2(oldsize, newsize);
+ void *newBuf = os_malloc_aligned(newsize, alignment);
+ if (newBuf && ptr && copySize > 0) {
+ memcpy(newBuf, ptr, copySize);
+ }
+
+ os_free_aligned(ptr);
+ return newBuf;
+}
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/util/os_memory_stdc.h b/third_party/rust/glslopt/glsl-optimizer/src/util/os_memory_stdc.h
new file mode 100644
index 0000000000..bda5715998
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/util/os_memory_stdc.h
@@ -0,0 +1,60 @@
+/**************************************************************************
+ *
+ * Copyright 2008-2010 VMware, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+
+/*
+ * OS memory management abstractions for the standard C library.
+ */
+
+
+#ifndef _OS_MEMORY_H_
+#error "Must not be included directly. Include os_memory.h instead"
+#endif
+
+#include <stdlib.h>
+
+
+#define os_malloc(_size) malloc(_size)
+#define os_calloc(_count, _size ) calloc(_count, _size )
+#define os_free(_ptr) free(_ptr)
+
+#define os_realloc( _old_ptr, _old_size, _new_size) \
+ realloc(_old_ptr, _new_size + 0*(_old_size))
+
+#if DETECT_OS_WINDOWS
+
+#include <malloc.h>
+
+#define os_malloc_aligned(_size, _align) _aligned_malloc(_size, _align)
+#define os_free_aligned(_ptr) _aligned_free(_ptr)
+#define os_realloc_aligned(_ptr, _oldsize, _newsize, _alignment) _aligned_realloc(_ptr, _newsize, _alignment)
+
+#else
+
+#include "os_memory_aligned.h"
+
+#endif
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/util/os_misc.c b/third_party/rust/glslopt/glsl-optimizer/src/util/os_misc.c
new file mode 100644
index 0000000000..e6894731b6
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/util/os_misc.c
@@ -0,0 +1,184 @@
+/**************************************************************************
+ *
+ * Copyright 2008-2010 Vmware, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+
+#include "os_misc.h"
+
+#include <stdarg.h>
+
+
+#if DETECT_OS_WINDOWS
+
+#ifndef WIN32_LEAN_AND_MEAN
+#define WIN32_LEAN_AND_MEAN // Exclude rarely-used stuff from Windows headers
+#endif
+#include <windows.h>
+#include <stdio.h>
+#include <stdlib.h>
+
+#else
+
+#include <stdio.h>
+#include <stdlib.h>
+
+#endif
+
+
+#if DETECT_OS_ANDROID
+# define LOG_TAG "MESA"
+# include <unistd.h>
+# include <log/log.h>
+#elif DETECT_OS_LINUX || DETECT_OS_CYGWIN || DETECT_OS_SOLARIS || DETECT_OS_HURD
+# include <unistd.h>
+#elif DETECT_OS_APPLE || DETECT_OS_BSD
+# include <sys/sysctl.h>
+#elif DETECT_OS_HAIKU
+# include <kernel/OS.h>
+#elif DETECT_OS_WINDOWS
+# include <windows.h>
+#else
+#error unexpected platform in os_sysinfo.c
+#endif
+
+
+void
+os_log_message(const char *message)
+{
+ /* If the GALLIUM_LOG_FILE environment variable is set to a valid filename,
+ * write all messages to that file.
+ */
+ static FILE *fout = NULL;
+
+ if (!fout) {
+#ifdef DEBUG
+ /* one-time init */
+ const char *filename = os_get_option("GALLIUM_LOG_FILE");
+ if (filename) {
+ const char *mode = "w";
+ if (filename[0] == '+') {
+ /* If the filename is prefixed with '+' then open the file for
+ * appending instead of normal writing.
+ */
+ mode = "a";
+ filename++; /* skip the '+' */
+ }
+ fout = fopen(filename, mode);
+ }
+#endif
+ if (!fout)
+ fout = stderr;
+ }
+
+#if DETECT_OS_WINDOWS
+ OutputDebugStringA(message);
+ if(GetConsoleWindow() && !IsDebuggerPresent()) {
+ fflush(stdout);
+ fputs(message, fout);
+ fflush(fout);
+ }
+ else if (fout != stderr) {
+ fputs(message, fout);
+ fflush(fout);
+ }
+#else /* !DETECT_OS_WINDOWS */
+ fflush(stdout);
+ fputs(message, fout);
+ fflush(fout);
+# if DETECT_OS_ANDROID
+ LOG_PRI(ANDROID_LOG_ERROR, LOG_TAG, "%s", message);
+# endif
+#endif
+}
+
+
+#if !defined(EMBEDDED_DEVICE)
+const char *
+os_get_option(const char *name)
+{
+ return getenv(name);
+}
+#endif /* !EMBEDDED_DEVICE */
+
+
+/**
+ * Return the size of the total physical memory.
+ * \param size returns the size of the total physical memory
+ * \return true for success, or false on failure
+ */
+bool
+os_get_total_physical_memory(uint64_t *size)
+{
+#if DETECT_OS_LINUX || DETECT_OS_CYGWIN || DETECT_OS_SOLARIS || DETECT_OS_HURD
+ const long phys_pages = sysconf(_SC_PHYS_PAGES);
+ const long page_size = sysconf(_SC_PAGE_SIZE);
+
+ if (phys_pages <= 0 || page_size <= 0)
+ return false;
+
+ *size = (uint64_t)phys_pages * (uint64_t)page_size;
+ return true;
+#elif DETECT_OS_APPLE || DETECT_OS_BSD
+ size_t len = sizeof(*size);
+ int mib[2];
+
+ mib[0] = CTL_HW;
+#if DETECT_OS_APPLE
+ mib[1] = HW_MEMSIZE;
+#elif DETECT_OS_NETBSD || DETECT_OS_OPENBSD
+ mib[1] = HW_PHYSMEM64;
+#elif DETECT_OS_FREEBSD
+ mib[1] = HW_REALMEM;
+#elif DETECT_OS_DRAGONFLY
+ mib[1] = HW_PHYSMEM;
+#else
+#error Unsupported *BSD
+#endif
+
+ return (sysctl(mib, 2, size, &len, NULL, 0) == 0);
+#elif DETECT_OS_HAIKU
+ system_info info;
+ status_t ret;
+
+ ret = get_system_info(&info);
+ if (ret != B_OK || info.max_pages <= 0)
+ return false;
+
+ *size = (uint64_t)info.max_pages * (uint64_t)B_PAGE_SIZE;
+ return true;
+#elif DETECT_OS_WINDOWS
+ MEMORYSTATUSEX status;
+ BOOL ret;
+
+ status.dwLength = sizeof(status);
+ ret = GlobalMemoryStatusEx(&status);
+ *size = status.ullTotalPhys;
+ return (ret == TRUE);
+#else
+#error unexpected platform in os_sysinfo.c
+ return false;
+#endif
+}
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/util/os_misc.h b/third_party/rust/glslopt/glsl-optimizer/src/util/os_misc.h
new file mode 100644
index 0000000000..19c8962d5d
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/util/os_misc.h
@@ -0,0 +1,104 @@
+/**************************************************************************
+ *
+ * Copyright 2010 Vmware, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+
+/*
+ * Miscellaneous OS services.
+ */
+
+
+#ifndef _OS_MISC_H_
+#define _OS_MISC_H_
+
+#include <stdint.h>
+#include <stdbool.h>
+
+#include "detect_os.h"
+
+
+#if DETECT_OS_UNIX
+# include <signal.h> /* for kill() */
+# include <unistd.h> /* getpid() */
+#endif
+
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+/*
+ * Trap into the debugger.
+ */
+#if (defined(PIPE_ARCH_X86) || defined(PIPE_ARCH_X86_64)) && defined(PIPE_CC_GCC)
+# define os_break() __asm("int3")
+#elif defined(PIPE_CC_MSVC)
+# define os_break() __debugbreak()
+#elif DETECT_OS_UNIX
+# define os_break() kill(getpid(), SIGTRAP)
+#else
+# define os_break() abort()
+#endif
+
+
+/*
+ * Abort the program.
+ */
+#if defined(DEBUG)
+# define os_abort() do { os_break(); abort(); } while(0)
+#else
+# define os_abort() abort()
+#endif
+
+
+/*
+ * Output a message. Message should preferably end in a newline.
+ */
+void
+os_log_message(const char *message);
+
+
+/*
+ * Get an option. Should return NULL if specified option is not set.
+ */
+const char *
+os_get_option(const char *name);
+
+
+/*
+ * Get the total amount of physical memory available on the system.
+ */
+bool
+os_get_total_physical_memory(uint64_t *size);
+
+
+#ifdef __cplusplus
+}
+#endif
+
+
+#endif /* _OS_MISC_H_ */
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/util/os_time.h b/third_party/rust/glslopt/glsl-optimizer/src/util/os_time.h
new file mode 100644
index 0000000000..049ab118db
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/util/os_time.h
@@ -0,0 +1,130 @@
+/**************************************************************************
+ *
+ * Copyright 2008-2010 VMware, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/**
+ * @file
+ * OS independent time-manipulation functions.
+ *
+ * @author Jose Fonseca <jfonseca@vmware.com>
+ */
+
+#ifndef _OS_TIME_H_
+#define _OS_TIME_H_
+
+#include <stdbool.h>
+#include <stdint.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* must be equal to PIPE_TIMEOUT_INFINITE */
+#define OS_TIMEOUT_INFINITE 0xffffffffffffffffull
+
+/*
+ * Get the current time in nanoseconds from an unknown base.
+ */
+int64_t
+os_time_get_nano(void);
+
+
+/*
+ * Get the current time in microseconds from an unknown base.
+ */
+static inline int64_t
+os_time_get(void)
+{
+ return os_time_get_nano() / 1000;
+}
+
+
+/*
+ * Sleep.
+ */
+void
+os_time_sleep(int64_t usecs);
+
+
+/*
+ * Helper function for detecting time outs, taking in account overflow.
+ *
+ * Returns true if the current time has elapsed beyond the specified interval.
+ */
+static inline bool
+os_time_timeout(int64_t start,
+ int64_t end,
+ int64_t curr)
+{
+ if (start <= end)
+ return !(start <= curr && curr < end);
+ else
+ return !((start <= curr) || (curr < end));
+}
+
+
+/**
+ * Convert a relative timeout in nanoseconds into an absolute timeout,
+ * in other words, it returns current time + timeout.
+ * os_time_get_nano() must be monotonic.
+ * OS_TIMEOUT_INFINITE is passed through unchanged. If the calculation
+ * overflows, OS_TIMEOUT_INFINITE is returned.
+ */
+int64_t
+os_time_get_absolute_timeout(uint64_t timeout);
+
+
+/**
+ * Wait until the variable at the given memory location is zero.
+ *
+ * \param var variable
+ * \param timeout timeout in ns, can be anything from 0 (no wait) to
+ * OS_TIMEOUT_INFINITE (wait forever)
+ * \return true if the variable is zero
+ */
+bool
+os_wait_until_zero(volatile int *var, uint64_t timeout);
+
+
+/**
+ * Wait until the variable at the given memory location is zero.
+ * The timeout is the absolute time when the waiting should stop. If it is
+ * less than or equal to the current time, it only returns the status and
+ * doesn't wait. OS_TIMEOUT_INFINITE waits forever. This requires that
+ * os_time_get_nano is monotonic.
+ *
+ * \param var variable
+ * \param timeout the time in ns when the waiting should stop
+ * \return true if the variable is zero
+ */
+bool
+os_wait_until_zero_abs_timeout(volatile int *var, int64_t timeout);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _OS_TIME_H_ */
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/util/ralloc.c b/third_party/rust/glslopt/glsl-optimizer/src/util/ralloc.c
new file mode 100644
index 0000000000..7b7b018558
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/util/ralloc.c
@@ -0,0 +1,921 @@
+/*
+ * Copyright © 2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include <assert.h>
+#include <stdlib.h>
+#include <stdarg.h>
+#include <stdio.h>
+#include <string.h>
+#include <stdint.h>
+
+/* Some versions of MinGW are missing _vscprintf's declaration, although they
+ * still provide the symbol in the import library. */
+#ifdef __MINGW32__
+_CRTIMP int _vscprintf(const char *format, va_list argptr);
+#endif
+
+#include "ralloc.h"
+
+#ifndef va_copy
+#ifdef __va_copy
+#define va_copy(dest, src) __va_copy((dest), (src))
+#else
+#define va_copy(dest, src) (dest) = (src)
+#endif
+#endif
+
+#define CANARY 0x5A1106
+
+/* Align the header's size so that ralloc() allocations will return with the
+ * same alignment as a libc malloc would have (8 on 32-bit GLIBC, 16 on
+ * 64-bit), avoiding performance penalities on x86 and alignment faults on
+ * ARM.
+ */
+struct
+#ifdef _MSC_VER
+#if _WIN64
+__declspec(align(16))
+#else
+ __declspec(align(8))
+#endif
+#elif defined(__LP64__)
+ __attribute__((aligned(16)))
+#else
+ __attribute__((aligned(8)))
+#endif
+ ralloc_header
+{
+#ifndef NDEBUG
+ /* A canary value used to determine whether a pointer is ralloc'd. */
+ unsigned canary;
+#endif
+
+ struct ralloc_header *parent;
+
+ /* The first child (head of a linked list) */
+ struct ralloc_header *child;
+
+ /* Linked list of siblings */
+ struct ralloc_header *prev;
+ struct ralloc_header *next;
+
+ void (*destructor)(void *);
+};
+
+typedef struct ralloc_header ralloc_header;
+
+static void unlink_block(ralloc_header *info);
+static void unsafe_free(ralloc_header *info);
+
+static ralloc_header *
+get_header(const void *ptr)
+{
+ ralloc_header *info = (ralloc_header *) (((char *) ptr) -
+ sizeof(ralloc_header));
+ assert(info->canary == CANARY);
+ return info;
+}
+
+#define PTR_FROM_HEADER(info) (((char *) info) + sizeof(ralloc_header))
+
+static void
+add_child(ralloc_header *parent, ralloc_header *info)
+{
+ if (parent != NULL) {
+ info->parent = parent;
+ info->next = parent->child;
+ parent->child = info;
+
+ if (info->next != NULL)
+ info->next->prev = info;
+ }
+}
+
+void *
+ralloc_context(const void *ctx)
+{
+ return ralloc_size(ctx, 0);
+}
+
+void *
+ralloc_size(const void *ctx, size_t size)
+{
+ void *block = malloc(size + sizeof(ralloc_header));
+ ralloc_header *info;
+ ralloc_header *parent;
+
+ if (unlikely(block == NULL))
+ return NULL;
+
+ info = (ralloc_header *) block;
+ /* measurements have shown that calloc is slower (because of
+ * the multiplication overflow checking?), so clear things
+ * manually
+ */
+ info->parent = NULL;
+ info->child = NULL;
+ info->prev = NULL;
+ info->next = NULL;
+ info->destructor = NULL;
+
+ parent = ctx != NULL ? get_header(ctx) : NULL;
+
+ add_child(parent, info);
+
+#ifndef NDEBUG
+ info->canary = CANARY;
+#endif
+
+ return PTR_FROM_HEADER(info);
+}
+
+void *
+rzalloc_size(const void *ctx, size_t size)
+{
+ void *ptr = ralloc_size(ctx, size);
+
+ if (likely(ptr))
+ memset(ptr, 0, size);
+
+ return ptr;
+}
+
+/* helper function - assumes ptr != NULL */
+static void *
+resize(void *ptr, size_t size)
+{
+ ralloc_header *child, *old, *info;
+
+ old = get_header(ptr);
+ info = realloc(old, size + sizeof(ralloc_header));
+
+ if (info == NULL)
+ return NULL;
+
+ /* Update parent and sibling's links to the reallocated node. */
+ if (info != old && info->parent != NULL) {
+ if (info->parent->child == old)
+ info->parent->child = info;
+
+ if (info->prev != NULL)
+ info->prev->next = info;
+
+ if (info->next != NULL)
+ info->next->prev = info;
+ }
+
+ /* Update child->parent links for all children */
+ for (child = info->child; child != NULL; child = child->next)
+ child->parent = info;
+
+ return PTR_FROM_HEADER(info);
+}
+
+void *
+reralloc_size(const void *ctx, void *ptr, size_t size)
+{
+ if (unlikely(ptr == NULL))
+ return ralloc_size(ctx, size);
+
+ assert(ralloc_parent(ptr) == ctx);
+ return resize(ptr, size);
+}
+
+void *
+rerzalloc_size(const void *ctx, void *ptr, size_t old_size, size_t new_size)
+{
+ if (unlikely(ptr == NULL))
+ return rzalloc_size(ctx, new_size);
+
+ assert(ralloc_parent(ptr) == ctx);
+ ptr = resize(ptr, new_size);
+
+ if (new_size > old_size)
+ memset((char *)ptr + old_size, 0, new_size - old_size);
+
+ return ptr;
+}
+
+void *
+ralloc_array_size(const void *ctx, size_t size, unsigned count)
+{
+ if (count > SIZE_MAX/size)
+ return NULL;
+
+ return ralloc_size(ctx, size * count);
+}
+
+void *
+rzalloc_array_size(const void *ctx, size_t size, unsigned count)
+{
+ if (count > SIZE_MAX/size)
+ return NULL;
+
+ return rzalloc_size(ctx, size * count);
+}
+
+void *
+reralloc_array_size(const void *ctx, void *ptr, size_t size, unsigned count)
+{
+ if (count > SIZE_MAX/size)
+ return NULL;
+
+ return reralloc_size(ctx, ptr, size * count);
+}
+
+void *
+rerzalloc_array_size(const void *ctx, void *ptr, size_t size,
+ unsigned old_count, unsigned new_count)
+{
+ if (new_count > SIZE_MAX/size)
+ return NULL;
+
+ return rerzalloc_size(ctx, ptr, size * old_count, size * new_count);
+}
+
+void
+ralloc_free(void *ptr)
+{
+ ralloc_header *info;
+
+ if (ptr == NULL)
+ return;
+
+ info = get_header(ptr);
+ unlink_block(info);
+ unsafe_free(info);
+}
+
+static void
+unlink_block(ralloc_header *info)
+{
+ /* Unlink from parent & siblings */
+ if (info->parent != NULL) {
+ if (info->parent->child == info)
+ info->parent->child = info->next;
+
+ if (info->prev != NULL)
+ info->prev->next = info->next;
+
+ if (info->next != NULL)
+ info->next->prev = info->prev;
+ }
+ info->parent = NULL;
+ info->prev = NULL;
+ info->next = NULL;
+}
+
+static void
+unsafe_free(ralloc_header *info)
+{
+ /* Recursively free any children...don't waste time unlinking them. */
+ ralloc_header *temp;
+ while (info->child != NULL) {
+ temp = info->child;
+ info->child = temp->next;
+ unsafe_free(temp);
+ }
+
+ /* Free the block itself. Call the destructor first, if any. */
+ if (info->destructor != NULL)
+ info->destructor(PTR_FROM_HEADER(info));
+
+ free(info);
+}
+
+void
+ralloc_steal(const void *new_ctx, void *ptr)
+{
+ ralloc_header *info, *parent;
+
+ if (unlikely(ptr == NULL))
+ return;
+
+ info = get_header(ptr);
+ parent = new_ctx ? get_header(new_ctx) : NULL;
+
+ unlink_block(info);
+
+ add_child(parent, info);
+}
+
+void
+ralloc_adopt(const void *new_ctx, void *old_ctx)
+{
+ ralloc_header *new_info, *old_info, *child;
+
+ if (unlikely(old_ctx == NULL))
+ return;
+
+ old_info = get_header(old_ctx);
+ new_info = get_header(new_ctx);
+
+ /* If there are no children, bail. */
+ if (unlikely(old_info->child == NULL))
+ return;
+
+ /* Set all the children's parent to new_ctx; get a pointer to the last child. */
+ for (child = old_info->child; child->next != NULL; child = child->next) {
+ child->parent = new_info;
+ }
+ child->parent = new_info;
+
+ /* Connect the two lists together; parent them to new_ctx; make old_ctx empty. */
+ child->next = new_info->child;
+ if (child->next)
+ child->next->prev = child;
+ new_info->child = old_info->child;
+ old_info->child = NULL;
+}
+
+void *
+ralloc_parent(const void *ptr)
+{
+ ralloc_header *info;
+
+ if (unlikely(ptr == NULL))
+ return NULL;
+
+ info = get_header(ptr);
+ return info->parent ? PTR_FROM_HEADER(info->parent) : NULL;
+}
+
+void
+ralloc_set_destructor(const void *ptr, void(*destructor)(void *))
+{
+ ralloc_header *info = get_header(ptr);
+ info->destructor = destructor;
+}
+
+char *
+ralloc_strdup(const void *ctx, const char *str)
+{
+ size_t n;
+ char *ptr;
+
+ if (unlikely(str == NULL))
+ return NULL;
+
+ n = strlen(str);
+ ptr = ralloc_array(ctx, char, n + 1);
+ memcpy(ptr, str, n);
+ ptr[n] = '\0';
+ return ptr;
+}
+
+char *
+ralloc_strndup(const void *ctx, const char *str, size_t max)
+{
+ size_t n;
+ char *ptr;
+
+ if (unlikely(str == NULL))
+ return NULL;
+
+ n = strnlen(str, max);
+ ptr = ralloc_array(ctx, char, n + 1);
+ memcpy(ptr, str, n);
+ ptr[n] = '\0';
+ return ptr;
+}
+
+/* helper routine for strcat/strncat - n is the exact amount to copy */
+static bool
+cat(char **dest, const char *str, size_t n)
+{
+ char *both;
+ size_t existing_length;
+ assert(dest != NULL && *dest != NULL);
+
+ existing_length = strlen(*dest);
+ both = resize(*dest, existing_length + n + 1);
+ if (unlikely(both == NULL))
+ return false;
+
+ memcpy(both + existing_length, str, n);
+ both[existing_length + n] = '\0';
+
+ *dest = both;
+ return true;
+}
+
+
+bool
+ralloc_strcat(char **dest, const char *str)
+{
+ return cat(dest, str, strlen(str));
+}
+
+bool
+ralloc_strncat(char **dest, const char *str, size_t n)
+{
+ return cat(dest, str, strnlen(str, n));
+}
+
+bool
+ralloc_str_append(char **dest, const char *str,
+ size_t existing_length, size_t str_size)
+{
+ char *both;
+ assert(dest != NULL && *dest != NULL);
+
+ both = resize(*dest, existing_length + str_size + 1);
+ if (unlikely(both == NULL))
+ return false;
+
+ memcpy(both + existing_length, str, str_size);
+ both[existing_length + str_size] = '\0';
+
+ *dest = both;
+
+ return true;
+}
+
+char *
+ralloc_asprintf(const void *ctx, const char *fmt, ...)
+{
+ char *ptr;
+ va_list args;
+ va_start(args, fmt);
+ ptr = ralloc_vasprintf(ctx, fmt, args);
+ va_end(args);
+ return ptr;
+}
+
+size_t
+printf_length(const char *fmt, va_list untouched_args)
+{
+ int size;
+ char junk;
+
+ /* Make a copy of the va_list so the original caller can still use it */
+ va_list args;
+ va_copy(args, untouched_args);
+
+#ifdef _WIN32
+ /* We need to use _vcsprintf to calculate the size as vsnprintf returns -1
+ * if the number of characters to write is greater than count.
+ */
+ size = _vscprintf(fmt, args);
+ (void)junk;
+#else
+ size = vsnprintf(&junk, 1, fmt, args);
+#endif
+ assert(size >= 0);
+
+ va_end(args);
+
+ return size;
+}
+
+char *
+ralloc_vasprintf(const void *ctx, const char *fmt, va_list args)
+{
+ size_t size = printf_length(fmt, args) + 1;
+
+ char *ptr = ralloc_size(ctx, size);
+ if (ptr != NULL)
+ vsnprintf(ptr, size, fmt, args);
+
+ return ptr;
+}
+
+bool
+ralloc_asprintf_append(char **str, const char *fmt, ...)
+{
+ bool success;
+ va_list args;
+ va_start(args, fmt);
+ success = ralloc_vasprintf_append(str, fmt, args);
+ va_end(args);
+ return success;
+}
+
+bool
+ralloc_vasprintf_append(char **str, const char *fmt, va_list args)
+{
+ size_t existing_length;
+ assert(str != NULL);
+ existing_length = *str ? strlen(*str) : 0;
+ return ralloc_vasprintf_rewrite_tail(str, &existing_length, fmt, args);
+}
+
+bool
+ralloc_asprintf_rewrite_tail(char **str, size_t *start, const char *fmt, ...)
+{
+ bool success;
+ va_list args;
+ va_start(args, fmt);
+ success = ralloc_vasprintf_rewrite_tail(str, start, fmt, args);
+ va_end(args);
+ return success;
+}
+
+bool
+ralloc_vasprintf_rewrite_tail(char **str, size_t *start, const char *fmt,
+ va_list args)
+{
+ size_t new_length;
+ char *ptr;
+
+ assert(str != NULL);
+
+ if (unlikely(*str == NULL)) {
+ // Assuming a NULL context is probably bad, but it's expected behavior.
+ *str = ralloc_vasprintf(NULL, fmt, args);
+ *start = strlen(*str);
+ return true;
+ }
+
+ new_length = printf_length(fmt, args);
+
+ ptr = resize(*str, *start + new_length + 1);
+ if (unlikely(ptr == NULL))
+ return false;
+
+ vsnprintf(ptr + *start, new_length + 1, fmt, args);
+ *str = ptr;
+ *start += new_length;
+ return true;
+}
+
+/***************************************************************************
+ * Linear allocator for short-lived allocations.
+ ***************************************************************************
+ *
+ * The allocator consists of a parent node (2K buffer), which requires
+ * a ralloc parent, and child nodes (allocations). Child nodes can't be freed
+ * directly, because the parent doesn't track them. You have to release
+ * the parent node in order to release all its children.
+ *
+ * The allocator uses a fixed-sized buffer with a monotonically increasing
+ * offset after each allocation. If the buffer is all used, another buffer
+ * is allocated, sharing the same ralloc parent, so all buffers are at
+ * the same level in the ralloc hierarchy.
+ *
+ * The linear parent node is always the first buffer and keeps track of all
+ * other buffers.
+ */
+
+#define MIN_LINEAR_BUFSIZE 2048
+#define SUBALLOC_ALIGNMENT 8
+#define LMAGIC 0x87b9c7d3
+
+struct
+#ifdef _MSC_VER
+ __declspec(align(8))
+#elif defined(__LP64__)
+ __attribute__((aligned(16)))
+#else
+ __attribute__((aligned(8)))
+#endif
+ linear_header {
+#ifndef NDEBUG
+ unsigned magic; /* for debugging */
+#endif
+ unsigned offset; /* points to the first unused byte in the buffer */
+ unsigned size; /* size of the buffer */
+ void *ralloc_parent; /* new buffers will use this */
+ struct linear_header *next; /* next buffer if we have more */
+ struct linear_header *latest; /* the only buffer that has free space */
+
+ /* After this structure, the buffer begins.
+ * Each suballocation consists of linear_size_chunk as its header followed
+ * by the suballocation, so it goes:
+ *
+ * - linear_size_chunk
+ * - allocated space
+ * - linear_size_chunk
+ * - allocated space
+ * etc.
+ *
+ * linear_size_chunk is only needed by linear_realloc.
+ */
+};
+
+struct linear_size_chunk {
+ unsigned size; /* for realloc */
+ unsigned _padding;
+};
+
+typedef struct linear_header linear_header;
+typedef struct linear_size_chunk linear_size_chunk;
+
+#define LINEAR_PARENT_TO_HEADER(parent) \
+ (linear_header*) \
+ ((char*)(parent) - sizeof(linear_size_chunk) - sizeof(linear_header))
+
+/* Allocate the linear buffer with its header. */
+static linear_header *
+create_linear_node(void *ralloc_ctx, unsigned min_size)
+{
+ linear_header *node;
+
+ min_size += sizeof(linear_size_chunk);
+
+ if (likely(min_size < MIN_LINEAR_BUFSIZE))
+ min_size = MIN_LINEAR_BUFSIZE;
+
+ node = ralloc_size(ralloc_ctx, sizeof(linear_header) + min_size);
+ if (unlikely(!node))
+ return NULL;
+
+#ifndef NDEBUG
+ node->magic = LMAGIC;
+#endif
+ node->offset = 0;
+ node->size = min_size;
+ node->ralloc_parent = ralloc_ctx;
+ node->next = NULL;
+ node->latest = node;
+ return node;
+}
+
+void *
+linear_alloc_child(void *parent, unsigned size)
+{
+ linear_header *first = LINEAR_PARENT_TO_HEADER(parent);
+ linear_header *latest = first->latest;
+ linear_header *new_node;
+ linear_size_chunk *ptr;
+ unsigned full_size;
+
+ assert(first->magic == LMAGIC);
+ assert(!latest->next);
+
+ size = ALIGN_POT(size, SUBALLOC_ALIGNMENT);
+ full_size = sizeof(linear_size_chunk) + size;
+
+ if (unlikely(latest->offset + full_size > latest->size)) {
+ /* allocate a new node */
+ new_node = create_linear_node(latest->ralloc_parent, size);
+ if (unlikely(!new_node))
+ return NULL;
+
+ first->latest = new_node;
+ latest->latest = new_node;
+ latest->next = new_node;
+ latest = new_node;
+ }
+
+ ptr = (linear_size_chunk *)((char*)&latest[1] + latest->offset);
+ ptr->size = size;
+ latest->offset += full_size;
+
+ assert((uintptr_t)&ptr[1] % SUBALLOC_ALIGNMENT == 0);
+ return &ptr[1];
+}
+
+void *
+linear_alloc_parent(void *ralloc_ctx, unsigned size)
+{
+ linear_header *node;
+
+ if (unlikely(!ralloc_ctx))
+ return NULL;
+
+ size = ALIGN_POT(size, SUBALLOC_ALIGNMENT);
+
+ node = create_linear_node(ralloc_ctx, size);
+ if (unlikely(!node))
+ return NULL;
+
+ return linear_alloc_child((char*)node +
+ sizeof(linear_header) +
+ sizeof(linear_size_chunk), size);
+}
+
+void *
+linear_zalloc_child(void *parent, unsigned size)
+{
+ void *ptr = linear_alloc_child(parent, size);
+
+ if (likely(ptr))
+ memset(ptr, 0, size);
+ return ptr;
+}
+
+void *
+linear_zalloc_parent(void *parent, unsigned size)
+{
+ void *ptr = linear_alloc_parent(parent, size);
+
+ if (likely(ptr))
+ memset(ptr, 0, size);
+ return ptr;
+}
+
+void
+linear_free_parent(void *ptr)
+{
+ linear_header *node;
+
+ if (unlikely(!ptr))
+ return;
+
+ node = LINEAR_PARENT_TO_HEADER(ptr);
+ assert(node->magic == LMAGIC);
+
+ while (node) {
+ void *ptr = node;
+
+ node = node->next;
+ ralloc_free(ptr);
+ }
+}
+
+void
+ralloc_steal_linear_parent(void *new_ralloc_ctx, void *ptr)
+{
+ linear_header *node;
+
+ if (unlikely(!ptr))
+ return;
+
+ node = LINEAR_PARENT_TO_HEADER(ptr);
+ assert(node->magic == LMAGIC);
+
+ while (node) {
+ ralloc_steal(new_ralloc_ctx, node);
+ node->ralloc_parent = new_ralloc_ctx;
+ node = node->next;
+ }
+}
+
+void *
+ralloc_parent_of_linear_parent(void *ptr)
+{
+ linear_header *node = LINEAR_PARENT_TO_HEADER(ptr);
+ assert(node->magic == LMAGIC);
+ return node->ralloc_parent;
+}
+
+void *
+linear_realloc(void *parent, void *old, unsigned new_size)
+{
+ unsigned old_size = 0;
+ ralloc_header *new_ptr;
+
+ new_ptr = linear_alloc_child(parent, new_size);
+
+ if (unlikely(!old))
+ return new_ptr;
+
+ old_size = ((linear_size_chunk*)old)[-1].size;
+
+ if (likely(new_ptr && old_size))
+ memcpy(new_ptr, old, MIN2(old_size, new_size));
+
+ return new_ptr;
+}
+
+/* All code below is pretty much copied from ralloc and only the alloc
+ * calls are different.
+ */
+
+char *
+linear_strdup(void *parent, const char *str)
+{
+ unsigned n;
+ char *ptr;
+
+ if (unlikely(!str))
+ return NULL;
+
+ n = strlen(str);
+ ptr = linear_alloc_child(parent, n + 1);
+ if (unlikely(!ptr))
+ return NULL;
+
+ memcpy(ptr, str, n);
+ ptr[n] = '\0';
+ return ptr;
+}
+
+char *
+linear_asprintf(void *parent, const char *fmt, ...)
+{
+ char *ptr;
+ va_list args;
+ va_start(args, fmt);
+ ptr = linear_vasprintf(parent, fmt, args);
+ va_end(args);
+ return ptr;
+}
+
+char *
+linear_vasprintf(void *parent, const char *fmt, va_list args)
+{
+ unsigned size = printf_length(fmt, args) + 1;
+
+ char *ptr = linear_alloc_child(parent, size);
+ if (ptr != NULL)
+ vsnprintf(ptr, size, fmt, args);
+
+ return ptr;
+}
+
+bool
+linear_asprintf_append(void *parent, char **str, const char *fmt, ...)
+{
+ bool success;
+ va_list args;
+ va_start(args, fmt);
+ success = linear_vasprintf_append(parent, str, fmt, args);
+ va_end(args);
+ return success;
+}
+
+bool
+linear_vasprintf_append(void *parent, char **str, const char *fmt, va_list args)
+{
+ size_t existing_length;
+ assert(str != NULL);
+ existing_length = *str ? strlen(*str) : 0;
+ return linear_vasprintf_rewrite_tail(parent, str, &existing_length, fmt, args);
+}
+
+bool
+linear_asprintf_rewrite_tail(void *parent, char **str, size_t *start,
+ const char *fmt, ...)
+{
+ bool success;
+ va_list args;
+ va_start(args, fmt);
+ success = linear_vasprintf_rewrite_tail(parent, str, start, fmt, args);
+ va_end(args);
+ return success;
+}
+
+bool
+linear_vasprintf_rewrite_tail(void *parent, char **str, size_t *start,
+ const char *fmt, va_list args)
+{
+ size_t new_length;
+ char *ptr;
+
+ assert(str != NULL);
+
+ if (unlikely(*str == NULL)) {
+ *str = linear_vasprintf(parent, fmt, args);
+ *start = strlen(*str);
+ return true;
+ }
+
+ new_length = printf_length(fmt, args);
+
+ ptr = linear_realloc(parent, *str, *start + new_length + 1);
+ if (unlikely(ptr == NULL))
+ return false;
+
+ vsnprintf(ptr + *start, new_length + 1, fmt, args);
+ *str = ptr;
+ *start += new_length;
+ return true;
+}
+
+/* helper routine for strcat/strncat - n is the exact amount to copy */
+static bool
+linear_cat(void *parent, char **dest, const char *str, unsigned n)
+{
+ char *both;
+ unsigned existing_length;
+ assert(dest != NULL && *dest != NULL);
+
+ existing_length = strlen(*dest);
+ both = linear_realloc(parent, *dest, existing_length + n + 1);
+ if (unlikely(both == NULL))
+ return false;
+
+ memcpy(both + existing_length, str, n);
+ both[existing_length + n] = '\0';
+
+ *dest = both;
+ return true;
+}
+
+bool
+linear_strcat(void *parent, char **dest, const char *str)
+{
+ return linear_cat(parent, dest, str, strlen(str));
+}
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/util/ralloc.h b/third_party/rust/glslopt/glsl-optimizer/src/util/ralloc.h
new file mode 100644
index 0000000000..e84ba0f8c6
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/util/ralloc.h
@@ -0,0 +1,609 @@
+/*
+ * Copyright © 2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+/**
+ * \file ralloc.h
+ *
+ * ralloc: a recursive memory allocator
+ *
+ * The ralloc memory allocator creates a hierarchy of allocated
+ * objects. Every allocation is in reference to some parent, and
+ * every allocated object can in turn be used as the parent of a
+ * subsequent allocation. This allows for extremely convenient
+ * discarding of an entire tree/sub-tree of allocations by calling
+ * ralloc_free on any particular object to free it and all of its
+ * children.
+ *
+ * The conceptual working of ralloc was directly inspired by Andrew
+ * Tridgell's talloc, but ralloc is an independent implementation
+ * released under the MIT license and tuned for Mesa.
+ *
+ * talloc is more sophisticated than ralloc in that it includes reference
+ * counting and useful debugging features. However, it is released under
+ * a non-permissive open source license.
+ */
+
+#ifndef RALLOC_H
+#define RALLOC_H
+
+#include <stddef.h>
+#include <stdarg.h>
+#include <stdbool.h>
+
+#include "macros.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * \def ralloc(ctx, type)
+ * Allocate a new object chained off of the given context.
+ *
+ * This is equivalent to:
+ * \code
+ * ((type *) ralloc_size(ctx, sizeof(type))
+ * \endcode
+ */
+#define ralloc(ctx, type) ((type *) ralloc_size(ctx, sizeof(type)))
+
+/**
+ * \def rzalloc(ctx, type)
+ * Allocate a new object out of the given context and initialize it to zero.
+ *
+ * This is equivalent to:
+ * \code
+ * ((type *) rzalloc_size(ctx, sizeof(type))
+ * \endcode
+ */
+#define rzalloc(ctx, type) ((type *) rzalloc_size(ctx, sizeof(type)))
+
+/**
+ * Allocate a new ralloc context.
+ *
+ * While any ralloc'd pointer can be used as a context, sometimes it is useful
+ * to simply allocate a context with no associated memory.
+ *
+ * It is equivalent to:
+ * \code
+ * ((type *) ralloc_size(ctx, 0)
+ * \endcode
+ */
+void *ralloc_context(const void *ctx);
+
+/**
+ * Allocate memory chained off of the given context.
+ *
+ * This is the core allocation routine which is used by all others. It
+ * simply allocates storage for \p size bytes and returns the pointer,
+ * similar to \c malloc.
+ */
+void *ralloc_size(const void *ctx, size_t size) MALLOCLIKE;
+
+/**
+ * Allocate zero-initialized memory chained off of the given context.
+ *
+ * This is similar to \c calloc with a size of 1.
+ */
+void *rzalloc_size(const void *ctx, size_t size) MALLOCLIKE;
+
+/**
+ * Resize a piece of ralloc-managed memory, preserving data.
+ *
+ * Similar to \c realloc. Unlike C89, passing 0 for \p size does not free the
+ * memory. Instead, it resizes it to a 0-byte ralloc context, just like
+ * calling ralloc_size(ctx, 0). This is different from talloc.
+ *
+ * \param ctx The context to use for new allocation. If \p ptr != NULL,
+ * it must be the same as ralloc_parent(\p ptr).
+ * \param ptr Pointer to the memory to be resized. May be NULL.
+ * \param size The amount of memory to allocate, in bytes.
+ */
+void *reralloc_size(const void *ctx, void *ptr, size_t size);
+
+/**
+ * Resize a ralloc-managed array, preserving data and initializing any newly
+ * allocated data to zero.
+ *
+ * Similar to \c realloc. Unlike C89, passing 0 for \p size does not free the
+ * memory. Instead, it resizes it to a 0-byte ralloc context, just like
+ * calling ralloc_size(ctx, 0). This is different from talloc.
+ *
+ * \param ctx The context to use for new allocation. If \p ptr != NULL,
+ * it must be the same as ralloc_parent(\p ptr).
+ * \param ptr Pointer to the memory to be resized. May be NULL.
+ * \param old_size The amount of memory in the previous allocation, in bytes.
+ * \param new_size The amount of memory to allocate, in bytes.
+ */
+void *rerzalloc_size(const void *ctx, void *ptr,
+ size_t old_size, size_t new_size);
+
+/// \defgroup array Array Allocators @{
+
+/**
+ * \def ralloc_array(ctx, type, count)
+ * Allocate an array of objects chained off the given context.
+ *
+ * Similar to \c calloc, but does not initialize the memory to zero.
+ *
+ * More than a convenience function, this also checks for integer overflow when
+ * multiplying \c sizeof(type) and \p count. This is necessary for security.
+ *
+ * This is equivalent to:
+ * \code
+ * ((type *) ralloc_array_size(ctx, sizeof(type), count)
+ * \endcode
+ */
+#define ralloc_array(ctx, type, count) \
+ ((type *) ralloc_array_size(ctx, sizeof(type), count))
+
+/**
+ * \def rzalloc_array(ctx, type, count)
+ * Allocate a zero-initialized array chained off the given context.
+ *
+ * Similar to \c calloc.
+ *
+ * More than a convenience function, this also checks for integer overflow when
+ * multiplying \c sizeof(type) and \p count. This is necessary for security.
+ *
+ * This is equivalent to:
+ * \code
+ * ((type *) rzalloc_array_size(ctx, sizeof(type), count)
+ * \endcode
+ */
+#define rzalloc_array(ctx, type, count) \
+ ((type *) rzalloc_array_size(ctx, sizeof(type), count))
+
+/**
+ * \def reralloc(ctx, ptr, type, count)
+ * Resize a ralloc-managed array, preserving data.
+ *
+ * Similar to \c realloc. Unlike C89, passing 0 for \p size does not free the
+ * memory. Instead, it resizes it to a 0-byte ralloc context, just like
+ * calling ralloc_size(ctx, 0). This is different from talloc.
+ *
+ * More than a convenience function, this also checks for integer overflow when
+ * multiplying \c sizeof(type) and \p count. This is necessary for security.
+ *
+ * \param ctx The context to use for new allocation. If \p ptr != NULL,
+ * it must be the same as ralloc_parent(\p ptr).
+ * \param ptr Pointer to the array to be resized. May be NULL.
+ * \param type The element type.
+ * \param count The number of elements to allocate.
+ */
+#define reralloc(ctx, ptr, type, count) \
+ ((type *) reralloc_array_size(ctx, ptr, sizeof(type), count))
+
+/**
+ * \def rerzalloc(ctx, ptr, type, count)
+ * Resize a ralloc-managed array, preserving data and initializing any newly
+ * allocated data to zero.
+ *
+ * Similar to \c realloc. Unlike C89, passing 0 for \p size does not free the
+ * memory. Instead, it resizes it to a 0-byte ralloc context, just like
+ * calling ralloc_size(ctx, 0). This is different from talloc.
+ *
+ * More than a convenience function, this also checks for integer overflow when
+ * multiplying \c sizeof(type) and \p count. This is necessary for security.
+ *
+ * \param ctx The context to use for new allocation. If \p ptr != NULL,
+ * it must be the same as ralloc_parent(\p ptr).
+ * \param ptr Pointer to the array to be resized. May be NULL.
+ * \param type The element type.
+ * \param old_count The number of elements in the previous allocation.
+ * \param new_count The number of elements to allocate.
+ */
+#define rerzalloc(ctx, ptr, type, old_count, new_count) \
+ ((type *) rerzalloc_array_size(ctx, ptr, sizeof(type), old_count, new_count))
+
+/**
+ * Allocate memory for an array chained off the given context.
+ *
+ * Similar to \c calloc, but does not initialize the memory to zero.
+ *
+ * More than a convenience function, this also checks for integer overflow when
+ * multiplying \p size and \p count. This is necessary for security.
+ */
+void *ralloc_array_size(const void *ctx, size_t size, unsigned count) MALLOCLIKE;
+
+/**
+ * Allocate a zero-initialized array chained off the given context.
+ *
+ * Similar to \c calloc.
+ *
+ * More than a convenience function, this also checks for integer overflow when
+ * multiplying \p size and \p count. This is necessary for security.
+ */
+void *rzalloc_array_size(const void *ctx, size_t size, unsigned count) MALLOCLIKE;
+
+/**
+ * Resize a ralloc-managed array, preserving data.
+ *
+ * Similar to \c realloc. Unlike C89, passing 0 for \p size does not free the
+ * memory. Instead, it resizes it to a 0-byte ralloc context, just like
+ * calling ralloc_size(ctx, 0). This is different from talloc.
+ *
+ * More than a convenience function, this also checks for integer overflow when
+ * multiplying \c sizeof(type) and \p count. This is necessary for security.
+ *
+ * \param ctx The context to use for new allocation. If \p ptr != NULL,
+ * it must be the same as ralloc_parent(\p ptr).
+ * \param ptr Pointer to the array to be resized. May be NULL.
+ * \param size The size of an individual element.
+ * \param count The number of elements to allocate.
+ *
+ * \return True unless allocation failed.
+ */
+void *reralloc_array_size(const void *ctx, void *ptr, size_t size,
+ unsigned count);
+
+/**
+ * Resize a ralloc-managed array, preserving data and initializing any newly
+ * allocated data to zero.
+ *
+ * Similar to \c realloc. Unlike C89, passing 0 for \p size does not free the
+ * memory. Instead, it resizes it to a 0-byte ralloc context, just like
+ * calling ralloc_size(ctx, 0). This is different from talloc.
+ *
+ * More than a convenience function, this also checks for integer overflow when
+ * multiplying \c sizeof(type) and \p count. This is necessary for security.
+ *
+ * \param ctx The context to use for new allocation. If \p ptr != NULL,
+ * it must be the same as ralloc_parent(\p ptr).
+ * \param ptr Pointer to the array to be resized. May be NULL.
+ * \param size The size of an individual element.
+ * \param old_count The number of elements in the previous allocation.
+ * \param new_count The number of elements to allocate.
+ *
+ * \return True unless allocation failed.
+ */
+void *rerzalloc_array_size(const void *ctx, void *ptr, size_t size,
+ unsigned old_count, unsigned new_count);
+/// @}
+
+/**
+ * Free a piece of ralloc-managed memory.
+ *
+ * This will also free the memory of any children allocated this context.
+ */
+void ralloc_free(void *ptr);
+
+/**
+ * "Steal" memory from one context, changing it to another.
+ *
+ * This changes \p ptr's context to \p new_ctx. This is quite useful if
+ * memory is allocated out of a temporary context.
+ */
+void ralloc_steal(const void *new_ctx, void *ptr);
+
+/**
+ * Reparent all children from one context to another.
+ *
+ * This effectively calls ralloc_steal(new_ctx, child) for all children of \p old_ctx.
+ */
+void ralloc_adopt(const void *new_ctx, void *old_ctx);
+
+/**
+ * Return the given pointer's ralloc context.
+ */
+void *ralloc_parent(const void *ptr);
+
+/**
+ * Set a callback to occur just before an object is freed.
+ */
+void ralloc_set_destructor(const void *ptr, void(*destructor)(void *));
+
+/// \defgroup array String Functions @{
+/**
+ * Duplicate a string, allocating the memory from the given context.
+ */
+char *ralloc_strdup(const void *ctx, const char *str) MALLOCLIKE;
+
+/**
+ * Duplicate a string, allocating the memory from the given context.
+ *
+ * Like \c strndup, at most \p n characters are copied. If \p str is longer
+ * than \p n characters, \p n are copied, and a termining \c '\0' byte is added.
+ */
+char *ralloc_strndup(const void *ctx, const char *str, size_t n) MALLOCLIKE;
+
+/**
+ * Concatenate two strings, allocating the necessary space.
+ *
+ * This appends \p str to \p *dest, similar to \c strcat, using ralloc_resize
+ * to expand \p *dest to the appropriate size. \p dest will be updated to the
+ * new pointer unless allocation fails.
+ *
+ * The result will always be null-terminated.
+ *
+ * \return True unless allocation failed.
+ */
+bool ralloc_strcat(char **dest, const char *str);
+
+/**
+ * Concatenate two strings, allocating the necessary space.
+ *
+ * This appends at most \p n bytes of \p str to \p *dest, using ralloc_resize
+ * to expand \p *dest to the appropriate size. \p dest will be updated to the
+ * new pointer unless allocation fails.
+ *
+ * The result will always be null-terminated; \p str does not need to be null
+ * terminated if it is longer than \p n.
+ *
+ * \return True unless allocation failed.
+ */
+bool ralloc_strncat(char **dest, const char *str, size_t n);
+
+/**
+ * Concatenate two strings, allocating the necessary space.
+ *
+ * This appends \p n bytes of \p str to \p *dest, using ralloc_resize
+ * to expand \p *dest to the appropriate size. \p dest will be updated to the
+ * new pointer unless allocation fails.
+ *
+ * The result will always be null-terminated.
+ *
+ * This function differs from ralloc_strcat() and ralloc_strncat() in that it
+ * does not do any strlen() calls which can become costly on large strings.
+ *
+ * \return True unless allocation failed.
+ */
+bool
+ralloc_str_append(char **dest, const char *str,
+ size_t existing_length, size_t str_size);
+
+/**
+ * Print to a string.
+ *
+ * This is analogous to \c sprintf, but allocates enough space (using \p ctx
+ * as the context) for the resulting string.
+ *
+ * \return The newly allocated string.
+ */
+char *ralloc_asprintf (const void *ctx, const char *fmt, ...) PRINTFLIKE(2, 3) MALLOCLIKE;
+
+/* Return the length of the string that would be generated by a printf-style
+ * format and argument list, not including the \0 byte.
+ */
+size_t printf_length(const char *fmt, va_list untouched_args);
+
+/**
+ * Print to a string, given a va_list.
+ *
+ * This is analogous to \c vsprintf, but allocates enough space (using \p ctx
+ * as the context) for the resulting string.
+ *
+ * \return The newly allocated string.
+ */
+char *ralloc_vasprintf(const void *ctx, const char *fmt, va_list args) MALLOCLIKE;
+
+/**
+ * Rewrite the tail of an existing string, starting at a given index.
+ *
+ * Overwrites the contents of *str starting at \p start with newly formatted
+ * text, including a new null-terminator. Allocates more memory as necessary.
+ *
+ * This can be used to append formatted text when the length of the existing
+ * string is already known, saving a strlen() call.
+ *
+ * \sa ralloc_asprintf_append
+ *
+ * \param str The string to be updated.
+ * \param start The index to start appending new data at.
+ * \param fmt A printf-style formatting string
+ *
+ * \p str will be updated to the new pointer unless allocation fails.
+ * \p start will be increased by the length of the newly formatted text.
+ *
+ * \return True unless allocation failed.
+ */
+bool ralloc_asprintf_rewrite_tail(char **str, size_t *start,
+ const char *fmt, ...)
+ PRINTFLIKE(3, 4);
+
+/**
+ * Rewrite the tail of an existing string, starting at a given index.
+ *
+ * Overwrites the contents of *str starting at \p start with newly formatted
+ * text, including a new null-terminator. Allocates more memory as necessary.
+ *
+ * This can be used to append formatted text when the length of the existing
+ * string is already known, saving a strlen() call.
+ *
+ * \sa ralloc_vasprintf_append
+ *
+ * \param str The string to be updated.
+ * \param start The index to start appending new data at.
+ * \param fmt A printf-style formatting string
+ * \param args A va_list containing the data to be formatted
+ *
+ * \p str will be updated to the new pointer unless allocation fails.
+ * \p start will be increased by the length of the newly formatted text.
+ *
+ * \return True unless allocation failed.
+ */
+bool ralloc_vasprintf_rewrite_tail(char **str, size_t *start, const char *fmt,
+ va_list args);
+
+/**
+ * Append formatted text to the supplied string.
+ *
+ * This is equivalent to
+ * \code
+ * ralloc_asprintf_rewrite_tail(str, strlen(*str), fmt, ...)
+ * \endcode
+ *
+ * \sa ralloc_asprintf
+ * \sa ralloc_asprintf_rewrite_tail
+ * \sa ralloc_strcat
+ *
+ * \p str will be updated to the new pointer unless allocation fails.
+ *
+ * \return True unless allocation failed.
+ */
+bool ralloc_asprintf_append (char **str, const char *fmt, ...)
+ PRINTFLIKE(2, 3);
+
+/**
+ * Append formatted text to the supplied string, given a va_list.
+ *
+ * This is equivalent to
+ * \code
+ * ralloc_vasprintf_rewrite_tail(str, strlen(*str), fmt, args)
+ * \endcode
+ *
+ * \sa ralloc_vasprintf
+ * \sa ralloc_vasprintf_rewrite_tail
+ * \sa ralloc_strcat
+ *
+ * \p str will be updated to the new pointer unless allocation fails.
+ *
+ * \return True unless allocation failed.
+ */
+bool ralloc_vasprintf_append(char **str, const char *fmt, va_list args);
+/// @}
+
+/**
+ * Declare C++ new and delete operators which use ralloc.
+ *
+ * Placing this macro in the body of a class makes it possible to do:
+ *
+ * TYPE *var = new(mem_ctx) TYPE(...);
+ * delete var;
+ *
+ * which is more idiomatic in C++ than calling ralloc.
+ */
+#define DECLARE_ALLOC_CXX_OPERATORS_TEMPLATE(TYPE, ALLOC_FUNC) \
+private: \
+ static void _ralloc_destructor(void *p) \
+ { \
+ reinterpret_cast<TYPE *>(p)->TYPE::~TYPE(); \
+ } \
+public: \
+ static void* operator new(size_t size, void *mem_ctx) \
+ { \
+ void *p = ALLOC_FUNC(mem_ctx, size); \
+ assert(p != NULL); \
+ if (!HAS_TRIVIAL_DESTRUCTOR(TYPE)) \
+ ralloc_set_destructor(p, _ralloc_destructor); \
+ return p; \
+ } \
+ \
+ static void operator delete(void *p) \
+ { \
+ /* The object's destructor is guaranteed to have already been \
+ * called by the delete operator at this point -- Make sure it's \
+ * not called again. \
+ */ \
+ if (!HAS_TRIVIAL_DESTRUCTOR(TYPE)) \
+ ralloc_set_destructor(p, NULL); \
+ ralloc_free(p); \
+ }
+
+#define DECLARE_RALLOC_CXX_OPERATORS(type) \
+ DECLARE_ALLOC_CXX_OPERATORS_TEMPLATE(type, ralloc_size)
+
+#define DECLARE_RZALLOC_CXX_OPERATORS(type) \
+ DECLARE_ALLOC_CXX_OPERATORS_TEMPLATE(type, rzalloc_size)
+
+#define DECLARE_LINEAR_ALLOC_CXX_OPERATORS(type) \
+ DECLARE_ALLOC_CXX_OPERATORS_TEMPLATE(type, linear_alloc_child)
+
+#define DECLARE_LINEAR_ZALLOC_CXX_OPERATORS(type) \
+ DECLARE_ALLOC_CXX_OPERATORS_TEMPLATE(type, linear_zalloc_child)
+
+
+/**
+ * Do a fast allocation from the linear buffer, also known as the child node
+ * from the allocator's point of view. It can't be freed directly. You have
+ * to free the parent or the ralloc parent.
+ *
+ * \param parent parent node of the linear allocator
+ * \param size size to allocate (max 32 bits)
+ */
+void *linear_alloc_child(void *parent, unsigned size);
+
+/**
+ * Allocate a parent node that will hold linear buffers. The returned
+ * allocation is actually the first child node, but it's also the handle
+ * of the parent node. Use it for all child node allocations.
+ *
+ * \param ralloc_ctx ralloc context, must not be NULL
+ * \param size size to allocate (max 32 bits)
+ */
+void *linear_alloc_parent(void *ralloc_ctx, unsigned size);
+
+/**
+ * Same as linear_alloc_child, but also clears memory.
+ */
+void *linear_zalloc_child(void *parent, unsigned size);
+
+/**
+ * Same as linear_alloc_parent, but also clears memory.
+ */
+void *linear_zalloc_parent(void *ralloc_ctx, unsigned size);
+
+/**
+ * Free the linear parent node. This will free all child nodes too.
+ * Freeing the ralloc parent will also free this.
+ */
+void linear_free_parent(void *ptr);
+
+/**
+ * Same as ralloc_steal, but steals the linear parent node.
+ */
+void ralloc_steal_linear_parent(void *new_ralloc_ctx, void *ptr);
+
+/**
+ * Return the ralloc parent of the linear parent node.
+ */
+void *ralloc_parent_of_linear_parent(void *ptr);
+
+/**
+ * Same as realloc except that the linear allocator doesn't free child nodes,
+ * so it's reduced to memory duplication. It's used in places where
+ * reallocation is required. Don't use it often. It's much slower than
+ * realloc.
+ */
+void *linear_realloc(void *parent, void *old, unsigned new_size);
+
+/* The functions below have the same semantics as their ralloc counterparts,
+ * except that they always allocate a linear child node.
+ */
+char *linear_strdup(void *parent, const char *str);
+char *linear_asprintf(void *parent, const char *fmt, ...);
+char *linear_vasprintf(void *parent, const char *fmt, va_list args);
+bool linear_asprintf_append(void *parent, char **str, const char *fmt, ...);
+bool linear_vasprintf_append(void *parent, char **str, const char *fmt,
+ va_list args);
+bool linear_asprintf_rewrite_tail(void *parent, char **str, size_t *start,
+ const char *fmt, ...);
+bool linear_vasprintf_rewrite_tail(void *parent, char **str, size_t *start,
+ const char *fmt, va_list args);
+bool linear_strcat(void *parent, char **dest, const char *str);
+
+#ifdef __cplusplus
+} /* end of extern "C" */
+#endif
+
+#endif
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/util/rounding.h b/third_party/rust/glslopt/glsl-optimizer/src/util/rounding.h
new file mode 100644
index 0000000000..e329d43824
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/util/rounding.h
@@ -0,0 +1,148 @@
+/*
+ * Copyright © 2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#ifndef _ROUNDING_H
+#define _ROUNDING_H
+
+#include "c99_math.h"
+
+#include <limits.h>
+#include <stdint.h>
+
+#if defined(__SSE__) || (defined(_M_IX86_FP) && (_M_IX86_FP >= 1)) || defined(_M_X64)
+#include <xmmintrin.h>
+#include <emmintrin.h>
+#endif
+
+#ifdef __SSE4_1__
+#include <smmintrin.h>
+#endif
+
+/* The C standard library has functions round()/rint()/nearbyint() that round
+ * their arguments according to the rounding mode set in the floating-point
+ * control register. While there are trunc()/ceil()/floor() functions that do
+ * a specific operation without modifying the rounding mode, there is no
+ * roundeven() in any version of C.
+ *
+ * Technical Specification 18661 (ISO/IEC TS 18661-1:2014) adds roundeven(),
+ * but it's unfortunately not implemented by glibc.
+ *
+ * This implementation differs in that it does not raise the inexact exception.
+ *
+ * We use rint() to implement these functions, with the assumption that the
+ * floating-point rounding mode has not been changed from the default Round
+ * to Nearest.
+ */
+
+/**
+ * \brief Rounds \c x to the nearest integer, with ties to the even integer.
+ */
+static inline float
+_mesa_roundevenf(float x)
+{
+#ifdef __SSE4_1__
+ float ret;
+ __m128 m = _mm_load_ss(&x);
+ m = _mm_round_ss(m, m, _MM_FROUND_CUR_DIRECTION | _MM_FROUND_NO_EXC);
+ _mm_store_ss(&ret, m);
+ return ret;
+#else
+ return rintf(x);
+#endif
+}
+
+/**
+ * \brief Rounds \c x to the nearest integer, with ties to the even integer.
+ */
+static inline double
+_mesa_roundeven(double x)
+{
+#ifdef __SSE4_1__
+ double ret;
+ __m128d m = _mm_load_sd(&x);
+ m = _mm_round_sd(m, m, _MM_FROUND_CUR_DIRECTION | _MM_FROUND_NO_EXC);
+ _mm_store_sd(&ret, m);
+ return ret;
+#else
+ return rint(x);
+#endif
+}
+
+/**
+ * \brief Rounds \c x to the nearest integer, with ties to the even integer,
+ * and returns the value as a long int.
+ */
+static inline long
+_mesa_lroundevenf(float x)
+{
+#if defined(__SSE__) || (defined(_M_IX86_FP) && (_M_IX86_FP >= 1)) || defined(_M_X64)
+#if LONG_MAX == INT64_MAX
+ return _mm_cvtss_si64(_mm_load_ss(&x));
+#elif LONG_MAX == INT32_MAX
+ return _mm_cvtss_si32(_mm_load_ss(&x));
+#else
+#error "Unsupported long size"
+#endif
+#else
+ return lrintf(x);
+#endif
+}
+
+
+/**
+ * \brief Rounds \c x to the nearest integer, with ties to the even integer,
+ * and returns the value as a long int.
+ */
+static inline long
+_mesa_lroundeven(double x)
+{
+#if defined(__SSE2__) || (defined(_M_IX86_FP) && (_M_IX86_FP >= 2)) || defined(_M_X64)
+#if LONG_MAX == INT64_MAX
+ return _mm_cvtsd_si64(_mm_load_sd(&x));
+#elif LONG_MAX == INT32_MAX
+ return _mm_cvtsd_si32(_mm_load_sd(&x));
+#else
+#error "Unsupported long size"
+#endif
+#else
+ return lrint(x);
+#endif
+}
+
+/**
+ * \brief Rounds \c x to the nearest integer, with ties to the even integer,
+ * and returns the value as an int64_t.
+ */
+static inline int64_t
+_mesa_i64roundevenf(float x)
+{
+#if LONG_MAX == INT64_MAX
+ return _mesa_lroundevenf(x);
+#elif LONG_MAX == INT32_MAX
+ return llrintf(x);
+#else
+#error "Unsupported long size"
+#endif
+}
+
+#endif
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/util/set.c b/third_party/rust/glslopt/glsl-optimizer/src/util/set.c
new file mode 100644
index 0000000000..ffe0fe808e
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/util/set.c
@@ -0,0 +1,572 @@
+/*
+ * Copyright © 2009-2012 Intel Corporation
+ * Copyright © 1988-2004 Keith Packard and Bart Massey.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Except as contained in this notice, the names of the authors
+ * or their institutions shall not be used in advertising or
+ * otherwise to promote the sale, use or other dealings in this
+ * Software without prior written authorization from the
+ * authors.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ * Keith Packard <keithp@keithp.com>
+ */
+
+#include <stdlib.h>
+#include <assert.h>
+#include <string.h>
+
+#include "hash_table.h"
+#include "macros.h"
+#include "ralloc.h"
+#include "set.h"
+#include "fast_urem_by_const.h"
+
+/*
+ * From Knuth -- a good choice for hash/rehash values is p, p-2 where
+ * p and p-2 are both prime. These tables are sized to have an extra 10%
+ * free to avoid exponential performance degradation as the hash table fills
+ */
+
+static const uint32_t deleted_key_value;
+static const void *deleted_key = &deleted_key_value;
+
+static const struct {
+ uint32_t max_entries, size, rehash;
+ uint64_t size_magic, rehash_magic;
+} hash_sizes[] = {
+#define ENTRY(max_entries, size, rehash) \
+ { max_entries, size, rehash, \
+ REMAINDER_MAGIC(size), REMAINDER_MAGIC(rehash) }
+
+ ENTRY(2, 5, 3 ),
+ ENTRY(4, 7, 5 ),
+ ENTRY(8, 13, 11 ),
+ ENTRY(16, 19, 17 ),
+ ENTRY(32, 43, 41 ),
+ ENTRY(64, 73, 71 ),
+ ENTRY(128, 151, 149 ),
+ ENTRY(256, 283, 281 ),
+ ENTRY(512, 571, 569 ),
+ ENTRY(1024, 1153, 1151 ),
+ ENTRY(2048, 2269, 2267 ),
+ ENTRY(4096, 4519, 4517 ),
+ ENTRY(8192, 9013, 9011 ),
+ ENTRY(16384, 18043, 18041 ),
+ ENTRY(32768, 36109, 36107 ),
+ ENTRY(65536, 72091, 72089 ),
+ ENTRY(131072, 144409, 144407 ),
+ ENTRY(262144, 288361, 288359 ),
+ ENTRY(524288, 576883, 576881 ),
+ ENTRY(1048576, 1153459, 1153457 ),
+ ENTRY(2097152, 2307163, 2307161 ),
+ ENTRY(4194304, 4613893, 4613891 ),
+ ENTRY(8388608, 9227641, 9227639 ),
+ ENTRY(16777216, 18455029, 18455027 ),
+ ENTRY(33554432, 36911011, 36911009 ),
+ ENTRY(67108864, 73819861, 73819859 ),
+ ENTRY(134217728, 147639589, 147639587 ),
+ ENTRY(268435456, 295279081, 295279079 ),
+ ENTRY(536870912, 590559793, 590559791 ),
+ ENTRY(1073741824, 1181116273, 1181116271 ),
+ ENTRY(2147483648ul, 2362232233ul, 2362232231ul )
+};
+
+ASSERTED static inline bool
+key_pointer_is_reserved(const void *key)
+{
+ return key == NULL || key == deleted_key;
+}
+
+static int
+entry_is_free(struct set_entry *entry)
+{
+ return entry->key == NULL;
+}
+
+static int
+entry_is_deleted(struct set_entry *entry)
+{
+ return entry->key == deleted_key;
+}
+
+static int
+entry_is_present(struct set_entry *entry)
+{
+ return entry->key != NULL && entry->key != deleted_key;
+}
+
+struct set *
+_mesa_set_create(void *mem_ctx,
+ uint32_t (*key_hash_function)(const void *key),
+ bool (*key_equals_function)(const void *a,
+ const void *b))
+{
+ struct set *ht;
+
+ ht = ralloc(mem_ctx, struct set);
+ if (ht == NULL)
+ return NULL;
+
+ ht->size_index = 0;
+ ht->size = hash_sizes[ht->size_index].size;
+ ht->rehash = hash_sizes[ht->size_index].rehash;
+ ht->size_magic = hash_sizes[ht->size_index].size_magic;
+ ht->rehash_magic = hash_sizes[ht->size_index].rehash_magic;
+ ht->max_entries = hash_sizes[ht->size_index].max_entries;
+ ht->key_hash_function = key_hash_function;
+ ht->key_equals_function = key_equals_function;
+ ht->table = rzalloc_array(ht, struct set_entry, ht->size);
+ ht->entries = 0;
+ ht->deleted_entries = 0;
+
+ if (ht->table == NULL) {
+ ralloc_free(ht);
+ return NULL;
+ }
+
+ return ht;
+}
+
+struct set *
+_mesa_set_clone(struct set *set, void *dst_mem_ctx)
+{
+ struct set *clone;
+
+ clone = ralloc(dst_mem_ctx, struct set);
+ if (clone == NULL)
+ return NULL;
+
+ memcpy(clone, set, sizeof(struct set));
+
+ clone->table = ralloc_array(clone, struct set_entry, clone->size);
+ if (clone->table == NULL) {
+ ralloc_free(clone);
+ return NULL;
+ }
+
+ memcpy(clone->table, set->table, clone->size * sizeof(struct set_entry));
+
+ return clone;
+}
+
+/**
+ * Frees the given set.
+ *
+ * If delete_function is passed, it gets called on each entry present before
+ * freeing.
+ */
+void
+_mesa_set_destroy(struct set *ht, void (*delete_function)(struct set_entry *entry))
+{
+ if (!ht)
+ return;
+
+ if (delete_function) {
+ set_foreach (ht, entry) {
+ delete_function(entry);
+ }
+ }
+ ralloc_free(ht->table);
+ ralloc_free(ht);
+}
+
+/**
+ * Clears all values from the given set.
+ *
+ * If delete_function is passed, it gets called on each entry present before
+ * the set is cleared.
+ */
+void
+_mesa_set_clear(struct set *set, void (*delete_function)(struct set_entry *entry))
+{
+ if (!set)
+ return;
+
+ set_foreach (set, entry) {
+ if (delete_function)
+ delete_function(entry);
+ entry->key = deleted_key;
+ }
+
+ set->entries = set->deleted_entries = 0;
+}
+
+/**
+ * Finds a set entry with the given key and hash of that key.
+ *
+ * Returns NULL if no entry is found.
+ */
+static struct set_entry *
+set_search(const struct set *ht, uint32_t hash, const void *key)
+{
+ assert(!key_pointer_is_reserved(key));
+
+ uint32_t size = ht->size;
+ uint32_t start_address = util_fast_urem32(hash, size, ht->size_magic);
+ uint32_t double_hash = util_fast_urem32(hash, ht->rehash,
+ ht->rehash_magic) + 1;
+ uint32_t hash_address = start_address;
+ do {
+ struct set_entry *entry = ht->table + hash_address;
+
+ if (entry_is_free(entry)) {
+ return NULL;
+ } else if (entry_is_present(entry) && entry->hash == hash) {
+ if (ht->key_equals_function(key, entry->key)) {
+ return entry;
+ }
+ }
+
+ hash_address += double_hash;
+ if (hash_address >= size)
+ hash_address -= size;
+ } while (hash_address != start_address);
+
+ return NULL;
+}
+
+struct set_entry *
+_mesa_set_search(const struct set *set, const void *key)
+{
+ assert(set->key_hash_function);
+ return set_search(set, set->key_hash_function(key), key);
+}
+
+struct set_entry *
+_mesa_set_search_pre_hashed(const struct set *set, uint32_t hash,
+ const void *key)
+{
+ assert(set->key_hash_function == NULL ||
+ hash == set->key_hash_function(key));
+ return set_search(set, hash, key);
+}
+
+static void
+set_add_rehash(struct set *ht, uint32_t hash, const void *key)
+{
+ uint32_t size = ht->size;
+ uint32_t start_address = util_fast_urem32(hash, size, ht->size_magic);
+ uint32_t double_hash = util_fast_urem32(hash, ht->rehash,
+ ht->rehash_magic) + 1;
+ uint32_t hash_address = start_address;
+ do {
+ struct set_entry *entry = ht->table + hash_address;
+ if (likely(entry->key == NULL)) {
+ entry->hash = hash;
+ entry->key = key;
+ return;
+ }
+
+ hash_address = hash_address + double_hash;
+ if (hash_address >= size)
+ hash_address -= size;
+ } while (true);
+}
+
+static void
+set_rehash(struct set *ht, unsigned new_size_index)
+{
+ struct set old_ht;
+ struct set_entry *table;
+
+ if (new_size_index >= ARRAY_SIZE(hash_sizes))
+ return;
+
+ table = rzalloc_array(ht, struct set_entry,
+ hash_sizes[new_size_index].size);
+ if (table == NULL)
+ return;
+
+ old_ht = *ht;
+
+ ht->table = table;
+ ht->size_index = new_size_index;
+ ht->size = hash_sizes[ht->size_index].size;
+ ht->rehash = hash_sizes[ht->size_index].rehash;
+ ht->size_magic = hash_sizes[ht->size_index].size_magic;
+ ht->rehash_magic = hash_sizes[ht->size_index].rehash_magic;
+ ht->max_entries = hash_sizes[ht->size_index].max_entries;
+ ht->entries = 0;
+ ht->deleted_entries = 0;
+
+ set_foreach(&old_ht, entry) {
+ set_add_rehash(ht, entry->hash, entry->key);
+ }
+
+ ht->entries = old_ht.entries;
+
+ ralloc_free(old_ht.table);
+}
+
+void
+_mesa_set_resize(struct set *set, uint32_t entries)
+{
+ /* You can't shrink a set below its number of entries */
+ if (set->entries > entries)
+ entries = set->entries;
+
+ unsigned size_index = 0;
+ while (hash_sizes[size_index].max_entries < entries)
+ size_index++;
+
+ set_rehash(set, size_index);
+}
+
+/**
+ * Find a matching entry for the given key, or insert it if it doesn't already
+ * exist.
+ *
+ * Note that insertion may rearrange the table on a resize or rehash,
+ * so previously found hash_entries are no longer valid after this function.
+ */
+static struct set_entry *
+set_search_or_add(struct set *ht, uint32_t hash, const void *key, bool *found)
+{
+ struct set_entry *available_entry = NULL;
+
+ assert(!key_pointer_is_reserved(key));
+
+ if (ht->entries >= ht->max_entries) {
+ set_rehash(ht, ht->size_index + 1);
+ } else if (ht->deleted_entries + ht->entries >= ht->max_entries) {
+ set_rehash(ht, ht->size_index);
+ }
+
+ uint32_t size = ht->size;
+ uint32_t start_address = util_fast_urem32(hash, size, ht->size_magic);
+ uint32_t double_hash = util_fast_urem32(hash, ht->rehash,
+ ht->rehash_magic) + 1;
+ uint32_t hash_address = start_address;
+ do {
+ struct set_entry *entry = ht->table + hash_address;
+
+ if (!entry_is_present(entry)) {
+ /* Stash the first available entry we find */
+ if (available_entry == NULL)
+ available_entry = entry;
+ if (entry_is_free(entry))
+ break;
+ }
+
+ if (!entry_is_deleted(entry) &&
+ entry->hash == hash &&
+ ht->key_equals_function(key, entry->key)) {
+ if (found)
+ *found = true;
+ return entry;
+ }
+
+ hash_address = hash_address + double_hash;
+ if (hash_address >= size)
+ hash_address -= size;
+ } while (hash_address != start_address);
+
+ if (available_entry) {
+ /* There is no matching entry, create it. */
+ if (entry_is_deleted(available_entry))
+ ht->deleted_entries--;
+ available_entry->hash = hash;
+ available_entry->key = key;
+ ht->entries++;
+ if (found)
+ *found = false;
+ return available_entry;
+ }
+
+ /* We could hit here if a required resize failed. An unchecked-malloc
+ * application could ignore this result.
+ */
+ return NULL;
+}
+
+/**
+ * Inserts the key with the given hash into the table.
+ *
+ * Note that insertion may rearrange the table on a resize or rehash,
+ * so previously found hash_entries are no longer valid after this function.
+ */
+static struct set_entry *
+set_add(struct set *ht, uint32_t hash, const void *key)
+{
+ struct set_entry *entry = set_search_or_add(ht, hash, key, NULL);
+
+ if (unlikely(!entry))
+ return NULL;
+
+ /* Note: If a matching entry already exists, this will replace it. This is
+ * a relatively common feature of hash tables, with the alternative
+ * generally being "insert the new value as well, and return it first when
+ * the key is searched for".
+ *
+ * Note that the hash table doesn't have a delete callback. If freeing of
+ * old keys is required to avoid memory leaks, use the alternative
+ * _mesa_set_search_or_add function and implement the replacement yourself.
+ */
+ entry->key = key;
+ return entry;
+}
+
+struct set_entry *
+_mesa_set_add(struct set *set, const void *key)
+{
+ assert(set->key_hash_function);
+ return set_add(set, set->key_hash_function(key), key);
+}
+
+struct set_entry *
+_mesa_set_add_pre_hashed(struct set *set, uint32_t hash, const void *key)
+{
+ assert(set->key_hash_function == NULL ||
+ hash == set->key_hash_function(key));
+ return set_add(set, hash, key);
+}
+
+struct set_entry *
+_mesa_set_search_and_add(struct set *set, const void *key, bool *replaced)
+{
+ assert(set->key_hash_function);
+ return _mesa_set_search_and_add_pre_hashed(set,
+ set->key_hash_function(key),
+ key, replaced);
+}
+
+struct set_entry *
+_mesa_set_search_and_add_pre_hashed(struct set *set, uint32_t hash,
+ const void *key, bool *replaced)
+{
+ assert(set->key_hash_function == NULL ||
+ hash == set->key_hash_function(key));
+ struct set_entry *entry = set_search_or_add(set, hash, key, replaced);
+
+ if (unlikely(!entry))
+ return NULL;
+
+ /* This implements the replacement, same as _mesa_set_add(). The user will
+ * be notified if we're overwriting a found entry.
+ */
+ entry->key = key;
+ return entry;
+}
+
+struct set_entry *
+_mesa_set_search_or_add(struct set *set, const void *key)
+{
+ assert(set->key_hash_function);
+ return set_search_or_add(set, set->key_hash_function(key), key, NULL);
+}
+
+struct set_entry *
+_mesa_set_search_or_add_pre_hashed(struct set *set, uint32_t hash,
+ const void *key)
+{
+ assert(set->key_hash_function == NULL ||
+ hash == set->key_hash_function(key));
+ return set_search_or_add(set, hash, key, NULL);
+}
+
+/**
+ * This function deletes the given hash table entry.
+ *
+ * Note that deletion doesn't otherwise modify the table, so an iteration over
+ * the table deleting entries is safe.
+ */
+void
+_mesa_set_remove(struct set *ht, struct set_entry *entry)
+{
+ if (!entry)
+ return;
+
+ entry->key = deleted_key;
+ ht->entries--;
+ ht->deleted_entries++;
+}
+
+/**
+ * Removes the entry with the corresponding key, if exists.
+ */
+void
+_mesa_set_remove_key(struct set *set, const void *key)
+{
+ _mesa_set_remove(set, _mesa_set_search(set, key));
+}
+
+/**
+ * This function is an iterator over the hash table.
+ *
+ * Pass in NULL for the first entry, as in the start of a for loop. Note that
+ * an iteration over the table is O(table_size) not O(entries).
+ */
+struct set_entry *
+_mesa_set_next_entry(const struct set *ht, struct set_entry *entry)
+{
+ if (entry == NULL)
+ entry = ht->table;
+ else
+ entry = entry + 1;
+
+ for (; entry != ht->table + ht->size; entry++) {
+ if (entry_is_present(entry)) {
+ return entry;
+ }
+ }
+
+ return NULL;
+}
+
+struct set_entry *
+_mesa_set_random_entry(struct set *ht,
+ int (*predicate)(struct set_entry *entry))
+{
+ struct set_entry *entry;
+ uint32_t i = rand() % ht->size;
+
+ if (ht->entries == 0)
+ return NULL;
+
+ for (entry = ht->table + i; entry != ht->table + ht->size; entry++) {
+ if (entry_is_present(entry) &&
+ (!predicate || predicate(entry))) {
+ return entry;
+ }
+ }
+
+ for (entry = ht->table; entry != ht->table + i; entry++) {
+ if (entry_is_present(entry) &&
+ (!predicate || predicate(entry))) {
+ return entry;
+ }
+ }
+
+ return NULL;
+}
+
+/**
+ * Helper to create a set with pointer keys.
+ */
+struct set *
+_mesa_pointer_set_create(void *mem_ctx)
+{
+ return _mesa_set_create(mem_ctx, _mesa_hash_pointer,
+ _mesa_key_pointer_equal);
+}
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/util/set.h b/third_party/rust/glslopt/glsl-optimizer/src/util/set.h
new file mode 100644
index 0000000000..55857aca7a
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/util/set.h
@@ -0,0 +1,127 @@
+/*
+ * Copyright © 2009-2012 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ *
+ */
+
+#ifndef _SET_H
+#define _SET_H
+
+#include <inttypes.h>
+#include <stdbool.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+struct set_entry {
+ uint32_t hash;
+ const void *key;
+};
+
+struct set {
+ void *mem_ctx;
+ struct set_entry *table;
+ uint32_t (*key_hash_function)(const void *key);
+ bool (*key_equals_function)(const void *a, const void *b);
+ uint32_t size;
+ uint32_t rehash;
+ uint64_t size_magic;
+ uint64_t rehash_magic;
+ uint32_t max_entries;
+ uint32_t size_index;
+ uint32_t entries;
+ uint32_t deleted_entries;
+};
+
+struct set *
+_mesa_set_create(void *mem_ctx,
+ uint32_t (*key_hash_function)(const void *key),
+ bool (*key_equals_function)(const void *a,
+ const void *b));
+struct set *
+_mesa_set_clone(struct set *set, void *dst_mem_ctx);
+
+void
+_mesa_set_destroy(struct set *set,
+ void (*delete_function)(struct set_entry *entry));
+void
+_mesa_set_resize(struct set *set, uint32_t entries);
+void
+_mesa_set_clear(struct set *set,
+ void (*delete_function)(struct set_entry *entry));
+
+struct set_entry *
+_mesa_set_add(struct set *set, const void *key);
+struct set_entry *
+_mesa_set_add_pre_hashed(struct set *set, uint32_t hash, const void *key);
+
+struct set_entry *
+_mesa_set_search_or_add(struct set *set, const void *key);
+struct set_entry *
+_mesa_set_search_or_add_pre_hashed(struct set *set, uint32_t hash,
+ const void *key);
+
+struct set_entry *
+_mesa_set_search(const struct set *set, const void *key);
+struct set_entry *
+_mesa_set_search_pre_hashed(const struct set *set, uint32_t hash,
+ const void *key);
+
+struct set_entry *
+_mesa_set_search_and_add(struct set *set, const void *key, bool *replaced);
+struct set_entry *
+_mesa_set_search_and_add_pre_hashed(struct set *set, uint32_t hash,
+ const void *key, bool *replaced);
+
+void
+_mesa_set_remove(struct set *set, struct set_entry *entry);
+void
+_mesa_set_remove_key(struct set *set, const void *key);
+
+struct set_entry *
+_mesa_set_next_entry(const struct set *set, struct set_entry *entry);
+
+struct set_entry *
+_mesa_set_random_entry(struct set *set,
+ int (*predicate)(struct set_entry *entry));
+
+struct set *
+_mesa_pointer_set_create(void *mem_ctx);
+
+/**
+ * This foreach function is safe against deletion, but not against
+ * insertion (which may rehash the set, making entry a dangling
+ * pointer).
+ */
+#define set_foreach(set, entry) \
+ for (struct set_entry *entry = _mesa_set_next_entry(set, NULL); \
+ entry != NULL; \
+ entry = _mesa_set_next_entry(set, entry))
+
+#ifdef __cplusplus
+} /* extern C */
+#endif
+
+#endif /* _SET_H */
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/util/sha1/README b/third_party/rust/glslopt/glsl-optimizer/src/util/sha1/README
new file mode 100644
index 0000000000..f30acf984e
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/util/sha1/README
@@ -0,0 +1,62 @@
+This local copy of a SHA1 implementation based on the sources below.
+
+Why:
+ - Some libraries suffer from race condition and other issues. For example see
+commit ade3108bb5b0 ("util: Fix race condition on libgcrypt initialization").
+
+ - Fold the handling and detection of _eight_ implementations at configure
+stage and _seven_ different codepaths.
+
+ - Have a single, uniform, code used by developers, testers and users.
+
+ - Avoid conflicts when using software which ships with it's own SHA1 library.
+The latter of which conflicting with the one mesa is build against.
+
+
+
+Source:
+The SHA1 implementation is copied verbatim from the following links.
+At the time of checkout HEAD is 1.25 and 1.24 respectively.
+
+http://cvsweb.openbsd.org/cgi-bin/cvsweb/~checkout~/src/lib/libc/hash/sha1.c?rev=HEAD
+http://cvsweb.openbsd.org/cgi-bin/cvsweb/~checkout~/src/include/sha1.h?rev=HEAD
+
+
+Notes:
+ - The files should not have any local changes. If there are any they should be
+clearly documented below and one should aim to upstream them where possible.
+
+ - Files will be periodically syncronised with the respective upstream sources.
+Updates will be made regularly, but since the code is _not_ aimed as a
+cryptography solution any issues found should not be considered security ones.
+
+
+Local changes:
+ - Removed __bounded__ attribute qualifiers. Unavailable on platforms targeted
+by Mesa. Upstream status: TBD (N/A ?)
+
+ - Pick the sha1.h header from the current folder, by using "" over <> in the
+include directive. Upstream status: TBD
+
+ - Remove unused function prototypes - SHA1End, SHA1File, SHA1FileChunk and
+SHA1Data. Upstream status: TBD
+
+ - Use stdint.h integer types - u_int{8,16,32}_t -> uint{8,16,32}_t and
+u_int -> uint32_t, change header include. Upstream status: TBD
+
+ - Revert sha1.c rev 1.26 change (introduce DEF_WEAK).
+Upstream status: TBD (N/A ?)
+
+ - Add stdint.h include in sha1.h for uint*_t types. Upstream status: TBD
+
+ - Add stddef.h include in sha1.h for size_t type. Upstream status: TBD
+
+ - Use memset over explicit_bzero, since memset_s once isn't widely available.
+Upstream status: TBD (N/A ?)
+
+ - Manually expand __BEGIN_DECLS/__END_DECLS and make sure that they include
+the struct declaration.
+Upstream status: TBD
+
+ - Add non-typedef struct name.
+Upstream status: TBD
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/util/sha1/sha1.c b/third_party/rust/glslopt/glsl-optimizer/src/util/sha1/sha1.c
new file mode 100644
index 0000000000..4fe2aa723c
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/util/sha1/sha1.c
@@ -0,0 +1,174 @@
+/* $OpenBSD: sha1.c,v 1.26 2015/09/11 09:18:27 guenther Exp $ */
+
+/*
+ * SHA-1 in C
+ * By Steve Reid <steve@edmweb.com>
+ * 100% Public Domain
+ *
+ * Test Vectors (from FIPS PUB 180-1)
+ * "abc"
+ * A9993E36 4706816A BA3E2571 7850C26C 9CD0D89D
+ * "abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq"
+ * 84983E44 1C3BD26E BAAE4AA1 F95129E5 E54670F1
+ * A million repetitions of "a"
+ * 34AA973C D4C4DAA4 F61EEB2B DBAD2731 6534016F
+ */
+
+#include <stdint.h>
+#include <string.h>
+#include "u_endian.h"
+#include "sha1.h"
+
+#define rol(value, bits) (((value) << (bits)) | ((value) >> (32 - (bits))))
+
+/*
+ * blk0() and blk() perform the initial expand.
+ * I got the idea of expanding during the round function from SSLeay
+ */
+#if UTIL_ARCH_LITTLE_ENDIAN
+# define blk0(i) (block->l[i] = (rol(block->l[i],24)&0xFF00FF00) \
+ |(rol(block->l[i],8)&0x00FF00FF))
+#else
+# define blk0(i) block->l[i]
+#endif
+#define blk(i) (block->l[i&15] = rol(block->l[(i+13)&15]^block->l[(i+8)&15] \
+ ^block->l[(i+2)&15]^block->l[i&15],1))
+
+/*
+ * (R0+R1), R2, R3, R4 are the different operations (rounds) used in SHA1
+ */
+#define R0(v,w,x,y,z,i) z+=((w&(x^y))^y)+blk0(i)+0x5A827999+rol(v,5);w=rol(w,30);
+#define R1(v,w,x,y,z,i) z+=((w&(x^y))^y)+blk(i)+0x5A827999+rol(v,5);w=rol(w,30);
+#define R2(v,w,x,y,z,i) z+=(w^x^y)+blk(i)+0x6ED9EBA1+rol(v,5);w=rol(w,30);
+#define R3(v,w,x,y,z,i) z+=(((w|x)&y)|(w&x))+blk(i)+0x8F1BBCDC+rol(v,5);w=rol(w,30);
+#define R4(v,w,x,y,z,i) z+=(w^x^y)+blk(i)+0xCA62C1D6+rol(v,5);w=rol(w,30);
+
+typedef union {
+ uint8_t c[64];
+ uint32_t l[16];
+} CHAR64LONG16;
+
+/*
+ * Hash a single 512-bit block. This is the core of the algorithm.
+ */
+void
+SHA1Transform(uint32_t state[5], const uint8_t buffer[SHA1_BLOCK_LENGTH])
+{
+ uint32_t a, b, c, d, e;
+ uint8_t workspace[SHA1_BLOCK_LENGTH];
+ CHAR64LONG16 *block = (CHAR64LONG16 *)workspace;
+
+ (void)memcpy(block, buffer, SHA1_BLOCK_LENGTH);
+
+ /* Copy context->state[] to working vars */
+ a = state[0];
+ b = state[1];
+ c = state[2];
+ d = state[3];
+ e = state[4];
+
+ /* 4 rounds of 20 operations each. Loop unrolled. */
+ R0(a,b,c,d,e, 0); R0(e,a,b,c,d, 1); R0(d,e,a,b,c, 2); R0(c,d,e,a,b, 3);
+ R0(b,c,d,e,a, 4); R0(a,b,c,d,e, 5); R0(e,a,b,c,d, 6); R0(d,e,a,b,c, 7);
+ R0(c,d,e,a,b, 8); R0(b,c,d,e,a, 9); R0(a,b,c,d,e,10); R0(e,a,b,c,d,11);
+ R0(d,e,a,b,c,12); R0(c,d,e,a,b,13); R0(b,c,d,e,a,14); R0(a,b,c,d,e,15);
+ R1(e,a,b,c,d,16); R1(d,e,a,b,c,17); R1(c,d,e,a,b,18); R1(b,c,d,e,a,19);
+ R2(a,b,c,d,e,20); R2(e,a,b,c,d,21); R2(d,e,a,b,c,22); R2(c,d,e,a,b,23);
+ R2(b,c,d,e,a,24); R2(a,b,c,d,e,25); R2(e,a,b,c,d,26); R2(d,e,a,b,c,27);
+ R2(c,d,e,a,b,28); R2(b,c,d,e,a,29); R2(a,b,c,d,e,30); R2(e,a,b,c,d,31);
+ R2(d,e,a,b,c,32); R2(c,d,e,a,b,33); R2(b,c,d,e,a,34); R2(a,b,c,d,e,35);
+ R2(e,a,b,c,d,36); R2(d,e,a,b,c,37); R2(c,d,e,a,b,38); R2(b,c,d,e,a,39);
+ R3(a,b,c,d,e,40); R3(e,a,b,c,d,41); R3(d,e,a,b,c,42); R3(c,d,e,a,b,43);
+ R3(b,c,d,e,a,44); R3(a,b,c,d,e,45); R3(e,a,b,c,d,46); R3(d,e,a,b,c,47);
+ R3(c,d,e,a,b,48); R3(b,c,d,e,a,49); R3(a,b,c,d,e,50); R3(e,a,b,c,d,51);
+ R3(d,e,a,b,c,52); R3(c,d,e,a,b,53); R3(b,c,d,e,a,54); R3(a,b,c,d,e,55);
+ R3(e,a,b,c,d,56); R3(d,e,a,b,c,57); R3(c,d,e,a,b,58); R3(b,c,d,e,a,59);
+ R4(a,b,c,d,e,60); R4(e,a,b,c,d,61); R4(d,e,a,b,c,62); R4(c,d,e,a,b,63);
+ R4(b,c,d,e,a,64); R4(a,b,c,d,e,65); R4(e,a,b,c,d,66); R4(d,e,a,b,c,67);
+ R4(c,d,e,a,b,68); R4(b,c,d,e,a,69); R4(a,b,c,d,e,70); R4(e,a,b,c,d,71);
+ R4(d,e,a,b,c,72); R4(c,d,e,a,b,73); R4(b,c,d,e,a,74); R4(a,b,c,d,e,75);
+ R4(e,a,b,c,d,76); R4(d,e,a,b,c,77); R4(c,d,e,a,b,78); R4(b,c,d,e,a,79);
+
+ /* Add the working vars back into context.state[] */
+ state[0] += a;
+ state[1] += b;
+ state[2] += c;
+ state[3] += d;
+ state[4] += e;
+
+ /* Wipe variables */
+ a = b = c = d = e = 0;
+}
+
+
+/*
+ * SHA1Init - Initialize new context
+ */
+void
+SHA1Init(SHA1_CTX *context)
+{
+
+ /* SHA1 initialization constants */
+ context->count = 0;
+ context->state[0] = 0x67452301;
+ context->state[1] = 0xEFCDAB89;
+ context->state[2] = 0x98BADCFE;
+ context->state[3] = 0x10325476;
+ context->state[4] = 0xC3D2E1F0;
+}
+
+
+/*
+ * Run your data through this.
+ */
+void
+SHA1Update(SHA1_CTX *context, const uint8_t *data, size_t len)
+{
+ size_t i, j;
+
+ j = (size_t)((context->count >> 3) & 63);
+ context->count += (len << 3);
+ if ((j + len) > 63) {
+ (void)memcpy(&context->buffer[j], data, (i = 64-j));
+ SHA1Transform(context->state, context->buffer);
+ for ( ; i + 63 < len; i += 64)
+ SHA1Transform(context->state, (uint8_t *)&data[i]);
+ j = 0;
+ } else {
+ i = 0;
+ }
+ (void)memcpy(&context->buffer[j], &data[i], len - i);
+}
+
+
+/*
+ * Add padding and return the message digest.
+ */
+void
+SHA1Pad(SHA1_CTX *context)
+{
+ uint8_t finalcount[8];
+ uint32_t i;
+
+ for (i = 0; i < 8; i++) {
+ finalcount[i] = (uint8_t)((context->count >>
+ ((7 - (i & 7)) * 8)) & 255); /* Endian independent */
+ }
+ SHA1Update(context, (uint8_t *)"\200", 1);
+ while ((context->count & 504) != 448)
+ SHA1Update(context, (uint8_t *)"\0", 1);
+ SHA1Update(context, finalcount, 8); /* Should cause a SHA1Transform() */
+}
+
+void
+SHA1Final(uint8_t digest[SHA1_DIGEST_LENGTH], SHA1_CTX *context)
+{
+ uint32_t i;
+
+ SHA1Pad(context);
+ for (i = 0; i < SHA1_DIGEST_LENGTH; i++) {
+ digest[i] = (uint8_t)
+ ((context->state[i>>2] >> ((3-(i & 3)) * 8) ) & 255);
+ }
+ memset(context, 0, sizeof(*context));
+}
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/util/sha1/sha1.h b/third_party/rust/glslopt/glsl-optimizer/src/util/sha1/sha1.h
new file mode 100644
index 0000000000..029a0ae87f
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/util/sha1/sha1.h
@@ -0,0 +1,53 @@
+/* $OpenBSD: sha1.h,v 1.24 2012/12/05 23:19:57 deraadt Exp $ */
+
+/*
+ * SHA-1 in C
+ * By Steve Reid <steve@edmweb.com>
+ * 100% Public Domain
+ */
+
+#ifndef _SHA1_H
+#define _SHA1_H
+
+#include <stddef.h>
+#include <stdint.h>
+
+#define SHA1_BLOCK_LENGTH 64
+#define SHA1_DIGEST_LENGTH 20
+#define SHA1_DIGEST_STRING_LENGTH (SHA1_DIGEST_LENGTH * 2 + 1)
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+typedef struct _SHA1_CTX {
+ uint32_t state[5];
+ uint64_t count;
+ uint8_t buffer[SHA1_BLOCK_LENGTH];
+} SHA1_CTX;
+
+void SHA1Init(SHA1_CTX *);
+void SHA1Pad(SHA1_CTX *);
+void SHA1Transform(uint32_t [5], const uint8_t [SHA1_BLOCK_LENGTH]);
+void SHA1Update(SHA1_CTX *, const uint8_t *, size_t);
+void SHA1Final(uint8_t [SHA1_DIGEST_LENGTH], SHA1_CTX *);
+
+#define HTONDIGEST(x) do { \
+ x[0] = htonl(x[0]); \
+ x[1] = htonl(x[1]); \
+ x[2] = htonl(x[2]); \
+ x[3] = htonl(x[3]); \
+ x[4] = htonl(x[4]); } while (0)
+
+#define NTOHDIGEST(x) do { \
+ x[0] = ntohl(x[0]); \
+ x[1] = ntohl(x[1]); \
+ x[2] = ntohl(x[2]); \
+ x[3] = ntohl(x[3]); \
+ x[4] = ntohl(x[4]); } while (0)
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SHA1_H */
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/util/simple_mtx.h b/third_party/rust/glslopt/glsl-optimizer/src/util/simple_mtx.h
new file mode 100644
index 0000000000..e332816b98
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/util/simple_mtx.h
@@ -0,0 +1,148 @@
+/*
+ * Copyright © 2015 Intel
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#ifndef _SIMPLE_MTX_H
+#define _SIMPLE_MTX_H
+
+#include "util/futex.h"
+#include "util/macros.h"
+
+#include "c11/threads.h"
+
+#if UTIL_FUTEX_SUPPORTED
+
+/* mtx_t - Fast, simple mutex
+ *
+ * While modern pthread mutexes are very fast (implemented using futex), they
+ * still incur a call to an external DSO and overhead of the generality and
+ * features of pthread mutexes. Most mutexes in mesa only needs lock/unlock,
+ * and the idea here is that we can inline the atomic operation and make the
+ * fast case just two intructions. Mutexes are subtle and finicky to
+ * implement, so we carefully copy the implementation from Ulrich Dreppers
+ * well-written and well-reviewed paper:
+ *
+ * "Futexes Are Tricky"
+ * http://www.akkadia.org/drepper/futex.pdf
+ *
+ * We implement "mutex3", which gives us a mutex that has no syscalls on
+ * uncontended lock or unlock. Further, the uncontended case boils down to a
+ * locked cmpxchg and an untaken branch, the uncontended unlock is just a
+ * locked decr and an untaken branch. We use __builtin_expect() to indicate
+ * that contention is unlikely so that gcc will put the contention code out of
+ * the main code flow.
+ *
+ * A fast mutex only supports lock/unlock, can't be recursive or used with
+ * condition variables.
+ */
+
+typedef struct {
+ uint32_t val;
+} simple_mtx_t;
+
+#define _SIMPLE_MTX_INITIALIZER_NP { 0 }
+
+#define _SIMPLE_MTX_INVALID_VALUE 0xd0d0d0d0
+
+static inline void
+simple_mtx_init(simple_mtx_t *mtx, ASSERTED int type)
+{
+ assert(type == mtx_plain);
+
+ mtx->val = 0;
+}
+
+static inline void
+simple_mtx_destroy(ASSERTED simple_mtx_t *mtx)
+{
+#ifndef NDEBUG
+ mtx->val = _SIMPLE_MTX_INVALID_VALUE;
+#endif
+}
+
+static inline void
+simple_mtx_lock(simple_mtx_t *mtx)
+{
+ uint32_t c;
+
+ c = __sync_val_compare_and_swap(&mtx->val, 0, 1);
+
+ assert(c != _SIMPLE_MTX_INVALID_VALUE);
+
+ if (__builtin_expect(c != 0, 0)) {
+ if (c != 2)
+ c = __sync_lock_test_and_set(&mtx->val, 2);
+ while (c != 0) {
+ futex_wait(&mtx->val, 2, NULL);
+ c = __sync_lock_test_and_set(&mtx->val, 2);
+ }
+ }
+}
+
+static inline void
+simple_mtx_unlock(simple_mtx_t *mtx)
+{
+ uint32_t c;
+
+ c = __sync_fetch_and_sub(&mtx->val, 1);
+
+ assert(c != _SIMPLE_MTX_INVALID_VALUE);
+
+ if (__builtin_expect(c != 1, 0)) {
+ mtx->val = 0;
+ futex_wake(&mtx->val, 1);
+ }
+}
+
+#else
+
+typedef mtx_t simple_mtx_t;
+
+#define _SIMPLE_MTX_INITIALIZER_NP _MTX_INITIALIZER_NP
+
+static inline void
+simple_mtx_init(simple_mtx_t *mtx, int type)
+{
+ mtx_init(mtx, type);
+}
+
+static inline void
+simple_mtx_destroy(simple_mtx_t *mtx)
+{
+ mtx_destroy(mtx);
+}
+
+static inline void
+simple_mtx_lock(simple_mtx_t *mtx)
+{
+ mtx_lock(mtx);
+}
+
+static inline void
+simple_mtx_unlock(simple_mtx_t *mtx)
+{
+ mtx_unlock(mtx);
+}
+
+#endif
+
+#endif
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/util/softfloat.c b/third_party/rust/glslopt/glsl-optimizer/src/util/softfloat.c
new file mode 100644
index 0000000000..591128efd4
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/util/softfloat.c
@@ -0,0 +1,1475 @@
+/*
+ * License for Berkeley SoftFloat Release 3e
+ *
+ * John R. Hauser
+ * 2018 January 20
+ *
+ * The following applies to the whole of SoftFloat Release 3e as well as to
+ * each source file individually.
+ *
+ * Copyright 2011, 2012, 2013, 2014, 2015, 2016, 2017, 2018 The Regents of the
+ * University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions, and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions, and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *
+ * The functions listed in this file are modified versions of the ones
+ * from the Berkeley SoftFloat 3e Library.
+ *
+ * Their implementation correctness has been checked with the Berkeley
+ * TestFloat Release 3e tool for x86_64.
+ */
+
+#include "rounding.h"
+#include "bitscan.h"
+#include "softfloat.h"
+
+#if defined(BIG_ENDIAN)
+#define word_incr -1
+#define index_word(total, n) ((total) - 1 - (n))
+#define index_word_hi(total) 0
+#define index_word_lo(total) ((total) - 1)
+#define index_multiword_hi(total, n) 0
+#define index_multiword_lo(total, n) ((total) - (n))
+#define index_multiword_hi_but(total, n) 0
+#define index_multiword_lo_but(total, n) (n)
+#else
+#define word_incr 1
+#define index_word(total, n) (n)
+#define index_word_hi(total) ((total) - 1)
+#define index_word_lo(total) 0
+#define index_multiword_hi(total, n) ((total) - (n))
+#define index_multiword_lo(total, n) 0
+#define index_multiword_hi_but(total, n) (n)
+#define index_multiword_lo_but(total, n) 0
+#endif
+
+typedef union { double f; int64_t i; uint64_t u; } di_type;
+typedef union { float f; int32_t i; uint32_t u; } fi_type;
+
+const uint8_t count_leading_zeros8[256] = {
+ 8, 7, 6, 6, 5, 5, 5, 5, 4, 4, 4, 4, 4, 4, 4, 4,
+ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+/**
+ * \brief Shifts 'a' right by the number of bits given in 'dist', which must be in
+ * the range 1 to 63. If any nonzero bits are shifted off, they are "jammed"
+ * into the least-significant bit of the shifted value by setting the
+ * least-significant bit to 1. This shifted-and-jammed value is returned.
+ *
+ * From softfloat_shortShiftRightJam64()
+ */
+static inline
+uint64_t _mesa_short_shift_right_jam64(uint64_t a, uint8_t dist)
+{
+ return a >> dist | ((a & (((uint64_t) 1 << dist) - 1)) != 0);
+}
+
+/**
+ * \brief Shifts 'a' right by the number of bits given in 'dist', which must not
+ * be zero. If any nonzero bits are shifted off, they are "jammed" into the
+ * least-significant bit of the shifted value by setting the least-significant
+ * bit to 1. This shifted-and-jammed value is returned.
+ * The value of 'dist' can be arbitrarily large. In particular, if 'dist' is
+ * greater than 64, the result will be either 0 or 1, depending on whether 'a'
+ * is zero or nonzero.
+ *
+ * From softfloat_shiftRightJam64()
+ */
+static inline
+uint64_t _mesa_shift_right_jam64(uint64_t a, uint32_t dist)
+{
+ return
+ (dist < 63) ? a >> dist | ((uint64_t) (a << (-dist & 63)) != 0) : (a != 0);
+}
+
+/**
+ * \brief Shifts 'a' right by the number of bits given in 'dist', which must not be
+ * zero. If any nonzero bits are shifted off, they are "jammed" into the
+ * least-significant bit of the shifted value by setting the least-significant
+ * bit to 1. This shifted-and-jammed value is returned.
+ * The value of 'dist' can be arbitrarily large. In particular, if 'dist' is
+ * greater than 32, the result will be either 0 or 1, depending on whether 'a'
+ * is zero or nonzero.
+ *
+ * From softfloat_shiftRightJam32()
+ */
+static inline
+uint32_t _mesa_shift_right_jam32(uint32_t a, uint16_t dist)
+{
+ return
+ (dist < 31) ? a >> dist | ((uint32_t) (a << (-dist & 31)) != 0) : (a != 0);
+}
+
+/**
+ * \brief Extracted from softfloat_roundPackToF64()
+ */
+static inline
+double _mesa_roundtozero_f64(int64_t s, int64_t e, int64_t m)
+{
+ di_type result;
+
+ if ((uint64_t) e >= 0x7fd) {
+ if (e < 0) {
+ m = _mesa_shift_right_jam64(m, -e);
+ e = 0;
+ } else if ((e > 0x7fd) || (0x8000000000000000 <= m)) {
+ e = 0x7ff;
+ m = 0;
+ result.u = (s << 63) + (e << 52) + m;
+ result.u -= 1;
+ return result.f;
+ }
+ }
+
+ m >>= 10;
+ if (m == 0)
+ e = 0;
+
+ result.u = (s << 63) + (e << 52) + m;
+ return result.f;
+}
+
+/**
+ * \brief Extracted from softfloat_roundPackToF32()
+ */
+static inline
+float _mesa_round_f32(int32_t s, int32_t e, int32_t m, bool rtz)
+{
+ fi_type result;
+ uint8_t round_increment = rtz ? 0 : 0x40;
+
+ if ((uint32_t) e >= 0xfd) {
+ if (e < 0) {
+ m = _mesa_shift_right_jam32(m, -e);
+ e = 0;
+ } else if ((e > 0xfd) || (0x80000000 <= m + round_increment)) {
+ e = 0xff;
+ m = 0;
+ result.u = (s << 31) + (e << 23) + m;
+ result.u -= !round_increment;
+ return result.f;
+ }
+ }
+
+ uint8_t round_bits;
+ round_bits = m & 0x7f;
+ m = ((uint32_t) m + round_increment) >> 7;
+ m &= ~(uint32_t) (! (round_bits ^ 0x40) & !rtz);
+ if (m == 0)
+ e = 0;
+
+ result.u = (s << 31) + (e << 23) + m;
+ return result.f;
+}
+
+/**
+ * \brief Extracted from softfloat_roundPackToF16()
+ */
+static inline
+uint16_t _mesa_roundtozero_f16(int16_t s, int16_t e, int16_t m)
+{
+ if ((uint16_t) e >= 0x1d) {
+ if (e < 0) {
+ m = _mesa_shift_right_jam32(m, -e);
+ e = 0;
+ } else if ((e > 0x1d) || (0x8000 <= m)) {
+ e = 0x1f;
+ m = 0;
+ return (s << 15) + (e << 10) + m - 1;
+ }
+ }
+
+ m >>= 4;
+ if (m == 0)
+ e = 0;
+
+ return (s << 15) + (e << 10) + m;
+}
+
+/**
+ * \brief Shifts the N-bit unsigned integer pointed to by 'a' left by the number of
+ * bits given in 'dist', where N = 'size_words' * 32. The value of 'dist'
+ * must be in the range 1 to 31. Any nonzero bits shifted off are lost. The
+ * shifted N-bit result is stored at the location pointed to by 'm_out'. Each
+ * of 'a' and 'm_out' points to a 'size_words'-long array of 32-bit elements
+ * that concatenate in the platform's normal endian order to form an N-bit
+ * integer.
+ *
+ * From softfloat_shortShiftLeftM()
+ */
+static inline void
+_mesa_short_shift_left_m(uint8_t size_words, const uint32_t *a, uint8_t dist, uint32_t *m_out)
+{
+ uint8_t neg_dist;
+ unsigned index, last_index;
+ uint32_t part_word, a_word;
+
+ neg_dist = -dist;
+ index = index_word_hi(size_words);
+ last_index = index_word_lo(size_words);
+ part_word = a[index] << dist;
+ while (index != last_index) {
+ a_word = a[index - word_incr];
+ m_out[index] = part_word | a_word >> (neg_dist & 31);
+ index -= word_incr;
+ part_word = a_word << dist;
+ }
+ m_out[index] = part_word;
+}
+
+/**
+ * \brief Shifts the N-bit unsigned integer pointed to by 'a' left by the number of
+ * bits given in 'dist', where N = 'size_words' * 32. The value of 'dist'
+ * must not be zero. Any nonzero bits shifted off are lost. The shifted
+ * N-bit result is stored at the location pointed to by 'm_out'. Each of 'a'
+ * and 'm_out' points to a 'size_words'-long array of 32-bit elements that
+ * concatenate in the platform's normal endian order to form an N-bit
+ * integer. The value of 'dist' can be arbitrarily large. In particular, if
+ * 'dist' is greater than N, the stored result will be 0.
+ *
+ * From softfloat_shiftLeftM()
+ */
+static inline void
+_mesa_shift_left_m(uint8_t size_words, const uint32_t *a, uint32_t dist, uint32_t *m_out)
+{
+ uint32_t word_dist;
+ uint8_t inner_dist;
+ uint8_t i;
+
+ word_dist = dist >> 5;
+ if (word_dist < size_words) {
+ a += index_multiword_lo_but(size_words, word_dist);
+ inner_dist = dist & 31;
+ if (inner_dist) {
+ _mesa_short_shift_left_m(size_words - word_dist, a, inner_dist,
+ m_out + index_multiword_hi_but(size_words, word_dist));
+ if (!word_dist)
+ return;
+ } else {
+ uint32_t *dest = m_out + index_word_hi(size_words);
+ a += index_word_hi(size_words - word_dist);
+ for (i = size_words - word_dist; i; --i) {
+ *dest = *a;
+ a -= word_incr;
+ dest -= word_incr;
+ }
+ }
+ m_out += index_multiword_lo(size_words, word_dist);
+ } else {
+ word_dist = size_words;
+ }
+ do {
+ *m_out++ = 0;
+ --word_dist;
+ } while (word_dist);
+}
+
+/**
+ * \brief Shifts the N-bit unsigned integer pointed to by 'a' right by the number of
+ * bits given in 'dist', where N = 'size_words' * 32. The value of 'dist'
+ * must be in the range 1 to 31. Any nonzero bits shifted off are lost. The
+ * shifted N-bit result is stored at the location pointed to by 'm_out'. Each
+ * of 'a' and 'm_out' points to a 'size_words'-long array of 32-bit elements
+ * that concatenate in the platform's normal endian order to form an N-bit
+ * integer.
+ *
+ * From softfloat_shortShiftRightM()
+ */
+static inline void
+_mesa_short_shift_right_m(uint8_t size_words, const uint32_t *a, uint8_t dist, uint32_t *m_out)
+{
+ uint8_t neg_dist;
+ unsigned index, last_index;
+ uint32_t part_word, a_word;
+
+ neg_dist = -dist;
+ index = index_word_lo(size_words);
+ last_index = index_word_hi(size_words);
+ part_word = a[index] >> dist;
+ while (index != last_index) {
+ a_word = a[index + word_incr];
+ m_out[index] = a_word << (neg_dist & 31) | part_word;
+ index += word_incr;
+ part_word = a_word >> dist;
+ }
+ m_out[index] = part_word;
+}
+
+/**
+ * \brief Shifts the N-bit unsigned integer pointed to by 'a' right by the number of
+ * bits given in 'dist', where N = 'size_words' * 32. The value of 'dist'
+ * must be in the range 1 to 31. If any nonzero bits are shifted off, they
+ * are "jammed" into the least-significant bit of the shifted value by setting
+ * the least-significant bit to 1. This shifted-and-jammed N-bit result is
+ * stored at the location pointed to by 'm_out'. Each of 'a' and 'm_out'
+ * points to a 'size_words'-long array of 32-bit elements that concatenate in
+ * the platform's normal endian order to form an N-bit integer.
+ *
+ *
+ * From softfloat_shortShiftRightJamM()
+ */
+static inline void
+_mesa_short_shift_right_jam_m(uint8_t size_words, const uint32_t *a, uint8_t dist, uint32_t *m_out)
+{
+ uint8_t neg_dist;
+ unsigned index, last_index;
+ uint64_t part_word, a_word;
+
+ neg_dist = -dist;
+ index = index_word_lo(size_words);
+ last_index = index_word_hi(size_words);
+ a_word = a[index];
+ part_word = a_word >> dist;
+ if (part_word << dist != a_word )
+ part_word |= 1;
+ while (index != last_index) {
+ a_word = a[index + word_incr];
+ m_out[index] = a_word << (neg_dist & 31) | part_word;
+ index += word_incr;
+ part_word = a_word >> dist;
+ }
+ m_out[index] = part_word;
+}
+
+/**
+ * \brief Shifts the N-bit unsigned integer pointed to by 'a' right by the number of
+ * bits given in 'dist', where N = 'size_words' * 32. The value of 'dist'
+ * must not be zero. If any nonzero bits are shifted off, they are "jammed"
+ * into the least-significant bit of the shifted value by setting the
+ * least-significant bit to 1. This shifted-and-jammed N-bit result is stored
+ * at the location pointed to by 'm_out'. Each of 'a' and 'm_out' points to a
+ * 'size_words'-long array of 32-bit elements that concatenate in the
+ * platform's normal endian order to form an N-bit integer. The value of
+ * 'dist' can be arbitrarily large. In particular, if 'dist' is greater than
+ * N, the stored result will be either 0 or 1, depending on whether the
+ * original N bits are all zeros.
+ *
+ * From softfloat_shiftRightJamM()
+ */
+static inline void
+_mesa_shift_right_jam_m(uint8_t size_words, const uint32_t *a, uint32_t dist, uint32_t *m_out)
+{
+ uint32_t word_jam, word_dist, *tmp;
+ uint8_t i, inner_dist;
+
+ word_jam = 0;
+ word_dist = dist >> 5;
+ if (word_dist) {
+ if (size_words < word_dist)
+ word_dist = size_words;
+ tmp = (uint32_t *) (a + index_multiword_lo(size_words, word_dist));
+ i = word_dist;
+ do {
+ word_jam = *tmp++;
+ if (word_jam)
+ break;
+ --i;
+ } while (i);
+ tmp = m_out;
+ }
+ if (word_dist < size_words) {
+ a += index_multiword_hi_but(size_words, word_dist);
+ inner_dist = dist & 31;
+ if (inner_dist) {
+ _mesa_short_shift_right_jam_m(size_words - word_dist, a, inner_dist,
+ m_out + index_multiword_lo_but(size_words, word_dist));
+ if (!word_dist) {
+ if (word_jam)
+ m_out[index_word_lo(size_words)] |= 1;
+ return;
+ }
+ } else {
+ a += index_word_lo(size_words - word_dist);
+ tmp = m_out + index_word_lo(size_words);
+ for (i = size_words - word_dist; i; --i) {
+ *tmp = *a;
+ a += word_incr;
+ tmp += word_incr;
+ }
+ }
+ tmp = m_out + index_multiword_hi(size_words, word_dist);
+ }
+ do {
+ *tmp++ = 0;
+ --word_dist;
+ } while (word_dist);
+ if (word_jam)
+ m_out[index_word_lo(size_words)] |= 1;
+}
+
+/**
+ * \brief Calculate a + b but rounding to zero.
+ *
+ * Notice that this mainly differs from the original Berkeley SoftFloat 3e
+ * implementation in that we don't really treat NaNs, Zeroes nor the
+ * signalling flags. Any NaN is good for us and the sign of the Zero is not
+ * important.
+ *
+ * From f64_add()
+ */
+double
+_mesa_double_add_rtz(double a, double b)
+{
+ const di_type a_di = {a};
+ uint64_t a_flt_m = a_di.u & 0x0fffffffffffff;
+ uint64_t a_flt_e = (a_di.u >> 52) & 0x7ff;
+ uint64_t a_flt_s = (a_di.u >> 63) & 0x1;
+ const di_type b_di = {b};
+ uint64_t b_flt_m = b_di.u & 0x0fffffffffffff;
+ uint64_t b_flt_e = (b_di.u >> 52) & 0x7ff;
+ uint64_t b_flt_s = (b_di.u >> 63) & 0x1;
+ int64_t s, e, m = 0;
+
+ s = a_flt_s;
+
+ const int64_t exp_diff = a_flt_e - b_flt_e;
+
+ /* Handle special cases */
+
+ if (a_flt_s != b_flt_s) {
+ return _mesa_double_sub_rtz(a, -b);
+ } else if ((a_flt_e == 0) && (a_flt_m == 0)) {
+ /* 'a' is zero, return 'b' */
+ return b;
+ } else if ((b_flt_e == 0) && (b_flt_m == 0)) {
+ /* 'b' is zero, return 'a' */
+ return a;
+ } else if (a_flt_e == 0x7ff && a_flt_m != 0) {
+ /* 'a' is a NaN, return NaN */
+ return a;
+ } else if (b_flt_e == 0x7ff && b_flt_m != 0) {
+ /* 'b' is a NaN, return NaN */
+ return b;
+ } else if (a_flt_e == 0x7ff && a_flt_m == 0) {
+ /* Inf + x = Inf */
+ return a;
+ } else if (b_flt_e == 0x7ff && b_flt_m == 0) {
+ /* x + Inf = Inf */
+ return b;
+ } else if (exp_diff == 0 && a_flt_e == 0) {
+ di_type result_di;
+ result_di.u = a_di.u + b_flt_m;
+ return result_di.f;
+ } else if (exp_diff == 0) {
+ e = a_flt_e;
+ m = 0x0020000000000000 + a_flt_m + b_flt_m;
+ m <<= 9;
+ } else if (exp_diff < 0) {
+ a_flt_m <<= 9;
+ b_flt_m <<= 9;
+ e = b_flt_e;
+
+ if (a_flt_e != 0)
+ a_flt_m += 0x2000000000000000;
+ else
+ a_flt_m <<= 1;
+
+ a_flt_m = _mesa_shift_right_jam64(a_flt_m, -exp_diff);
+ m = 0x2000000000000000 + a_flt_m + b_flt_m;
+ if (m < 0x4000000000000000) {
+ --e;
+ m <<= 1;
+ }
+ } else {
+ a_flt_m <<= 9;
+ b_flt_m <<= 9;
+ e = a_flt_e;
+
+ if (b_flt_e != 0)
+ b_flt_m += 0x2000000000000000;
+ else
+ b_flt_m <<= 1;
+
+ b_flt_m = _mesa_shift_right_jam64(b_flt_m, exp_diff);
+ m = 0x2000000000000000 + a_flt_m + b_flt_m;
+ if (m < 0x4000000000000000) {
+ --e;
+ m <<= 1;
+ }
+ }
+
+ return _mesa_roundtozero_f64(s, e, m);
+}
+
+/**
+ * \brief Returns the number of leading 0 bits before the most-significant 1 bit of
+ * 'a'. If 'a' is zero, 64 is returned.
+ */
+static inline unsigned
+_mesa_count_leading_zeros64(uint64_t a)
+{
+ return 64 - util_last_bit64(a);
+}
+
+/**
+ * \brief Returns the number of leading 0 bits before the most-significant 1 bit of
+ * 'a'. If 'a' is zero, 32 is returned.
+ */
+static inline unsigned
+_mesa_count_leading_zeros32(uint32_t a)
+{
+ return 32 - util_last_bit(a);
+}
+
+static inline double
+_mesa_norm_round_pack_f64(int64_t s, int64_t e, int64_t m)
+{
+ int8_t shift_dist;
+
+ shift_dist = _mesa_count_leading_zeros64(m) - 1;
+ e -= shift_dist;
+ if ((10 <= shift_dist) && ((unsigned) e < 0x7fd)) {
+ di_type result;
+ result.u = (s << 63) + ((m ? e : 0) << 52) + (m << (shift_dist - 10));
+ return result.f;
+ } else {
+ return _mesa_roundtozero_f64(s, e, m << shift_dist);
+ }
+}
+
+/**
+ * \brief Replaces the N-bit unsigned integer pointed to by 'm_out' by the
+ * 2s-complement of itself, where N = 'size_words' * 32. Argument 'm_out'
+ * points to a 'size_words'-long array of 32-bit elements that concatenate in
+ * the platform's normal endian order to form an N-bit integer.
+ *
+ * From softfloat_negXM()
+ */
+static inline void
+_mesa_neg_x_m(uint8_t size_words, uint32_t *m_out)
+{
+ unsigned index, last_index;
+ uint8_t carry;
+ uint32_t word;
+
+ index = index_word_lo(size_words);
+ last_index = index_word_hi(size_words);
+ carry = 1;
+ for (;;) {
+ word = ~m_out[index] + carry;
+ m_out[index] = word;
+ if (index == last_index)
+ break;
+ index += word_incr;
+ if (word)
+ carry = 0;
+ }
+}
+
+/**
+ * \brief Adds the two N-bit integers pointed to by 'a' and 'b', where N =
+ * 'size_words' * 32. The addition is modulo 2^N, so any carry out is
+ * lost. The N-bit sum is stored at the location pointed to by 'm_out'. Each
+ * of 'a', 'b', and 'm_out' points to a 'size_words'-long array of 32-bit
+ * elements that concatenate in the platform's normal endian order to form an
+ * N-bit integer.
+ *
+ * From softfloat_addM()
+ */
+static inline void
+_mesa_add_m(uint8_t size_words, const uint32_t *a, const uint32_t *b, uint32_t *m_out)
+{
+ unsigned index, last_index;
+ uint8_t carry;
+ uint32_t a_word, word;
+
+ index = index_word_lo(size_words);
+ last_index = index_word_hi(size_words);
+ carry = 0;
+ for (;;) {
+ a_word = a[index];
+ word = a_word + b[index] + carry;
+ m_out[index] = word;
+ if (index == last_index)
+ break;
+ if (word != a_word)
+ carry = (word < a_word);
+ index += word_incr;
+ }
+}
+
+/**
+ * \brief Subtracts the two N-bit integers pointed to by 'a' and 'b', where N =
+ * 'size_words' * 32. The subtraction is modulo 2^N, so any borrow out (carry
+ * out) is lost. The N-bit difference is stored at the location pointed to by
+ * 'm_out'. Each of 'a', 'b', and 'm_out' points to a 'size_words'-long array
+ * of 32-bit elements that concatenate in the platform's normal endian order
+ * to form an N-bit integer.
+ *
+ * From softfloat_subM()
+ */
+static inline void
+_mesa_sub_m(uint8_t size_words, const uint32_t *a, const uint32_t *b, uint32_t *m_out)
+{
+ unsigned index, last_index;
+ uint8_t borrow;
+ uint32_t a_word, b_word;
+
+ index = index_word_lo(size_words);
+ last_index = index_word_hi(size_words);
+ borrow = 0;
+ for (;;) {
+ a_word = a[index];
+ b_word = b[index];
+ m_out[index] = a_word - b_word - borrow;
+ if (index == last_index)
+ break;
+ borrow = borrow ? (a_word <= b_word) : (a_word < b_word);
+ index += word_incr;
+ }
+}
+
+/* Calculate a - b but rounding to zero.
+ *
+ * Notice that this mainly differs from the original Berkeley SoftFloat 3e
+ * implementation in that we don't really treat NaNs, Zeroes nor the
+ * signalling flags. Any NaN is good for us and the sign of the Zero is not
+ * important.
+ *
+ * From f64_sub()
+ */
+double
+_mesa_double_sub_rtz(double a, double b)
+{
+ const di_type a_di = {a};
+ uint64_t a_flt_m = a_di.u & 0x0fffffffffffff;
+ uint64_t a_flt_e = (a_di.u >> 52) & 0x7ff;
+ uint64_t a_flt_s = (a_di.u >> 63) & 0x1;
+ const di_type b_di = {b};
+ uint64_t b_flt_m = b_di.u & 0x0fffffffffffff;
+ uint64_t b_flt_e = (b_di.u >> 52) & 0x7ff;
+ uint64_t b_flt_s = (b_di.u >> 63) & 0x1;
+ int64_t s, e, m = 0;
+ int64_t m_diff = 0;
+ unsigned shift_dist = 0;
+
+ s = a_flt_s;
+
+ const int64_t exp_diff = a_flt_e - b_flt_e;
+
+ /* Handle special cases */
+
+ if (a_flt_s != b_flt_s) {
+ return _mesa_double_add_rtz(a, -b);
+ } else if ((a_flt_e == 0) && (a_flt_m == 0)) {
+ /* 'a' is zero, return '-b' */
+ return -b;
+ } else if ((b_flt_e == 0) && (b_flt_m == 0)) {
+ /* 'b' is zero, return 'a' */
+ return a;
+ } else if (a_flt_e == 0x7ff && a_flt_m != 0) {
+ /* 'a' is a NaN, return NaN */
+ return a;
+ } else if (b_flt_e == 0x7ff && b_flt_m != 0) {
+ /* 'b' is a NaN, return NaN */
+ return b;
+ } else if (a_flt_e == 0x7ff && a_flt_m == 0) {
+ if (b_flt_e == 0x7ff && b_flt_m == 0) {
+ /* Inf - Inf = NaN */
+ di_type result;
+ e = 0x7ff;
+ result.u = (s << 63) + (e << 52) + 0x1;
+ return result.f;
+ }
+ /* Inf - x = Inf */
+ return a;
+ } else if (b_flt_e == 0x7ff && b_flt_m == 0) {
+ /* x - Inf = -Inf */
+ return -b;
+ } else if (exp_diff == 0) {
+ m_diff = a_flt_m - b_flt_m;
+
+ if (m_diff == 0)
+ return 0;
+ if (a_flt_e)
+ --a_flt_e;
+ if (m_diff < 0) {
+ s = !s;
+ m_diff = -m_diff;
+ }
+
+ shift_dist = _mesa_count_leading_zeros64(m_diff) - 11;
+ e = a_flt_e - shift_dist;
+ if (e < 0) {
+ shift_dist = a_flt_e;
+ e = 0;
+ }
+
+ di_type result;
+ result.u = (s << 63) + (e << 52) + (m_diff << shift_dist);
+ return result.f;
+ } else if (exp_diff < 0) {
+ a_flt_m <<= 10;
+ b_flt_m <<= 10;
+ s = !s;
+
+ a_flt_m += (a_flt_e) ? 0x4000000000000000 : a_flt_m;
+ a_flt_m = _mesa_shift_right_jam64(a_flt_m, -exp_diff);
+ b_flt_m |= 0x4000000000000000;
+ e = b_flt_e;
+ m = b_flt_m - a_flt_m;
+ } else {
+ a_flt_m <<= 10;
+ b_flt_m <<= 10;
+
+ b_flt_m += (b_flt_e) ? 0x4000000000000000 : b_flt_m;
+ b_flt_m = _mesa_shift_right_jam64(b_flt_m, exp_diff);
+ a_flt_m |= 0x4000000000000000;
+ e = a_flt_e;
+ m = a_flt_m - b_flt_m;
+ }
+
+ return _mesa_norm_round_pack_f64(s, e - 1, m);
+}
+
+static inline void
+_mesa_norm_subnormal_mantissa_f64(uint64_t m, uint64_t *exp, uint64_t *m_out)
+{
+ int shift_dist;
+
+ shift_dist = _mesa_count_leading_zeros64(m) - 11;
+ *exp = 1 - shift_dist;
+ *m_out = m << shift_dist;
+}
+
+static inline void
+_mesa_norm_subnormal_mantissa_f32(uint32_t m, uint32_t *exp, uint32_t *m_out)
+{
+ int shift_dist;
+
+ shift_dist = _mesa_count_leading_zeros32(m) - 8;
+ *exp = 1 - shift_dist;
+ *m_out = m << shift_dist;
+}
+
+/**
+ * \brief Multiplies 'a' and 'b' and stores the 128-bit product at the location
+ * pointed to by 'zPtr'. Argument 'zPtr' points to an array of four 32-bit
+ * elements that concatenate in the platform's normal endian order to form a
+ * 128-bit integer.
+ *
+ * From softfloat_mul64To128M()
+ */
+static inline void
+_mesa_softfloat_mul_f64_to_f128_m(uint64_t a, uint64_t b, uint32_t *m_out)
+{
+ uint32_t a32, a0, b32, b0;
+ uint64_t z0, mid1, z64, mid;
+
+ a32 = a >> 32;
+ a0 = a;
+ b32 = b >> 32;
+ b0 = b;
+ z0 = (uint64_t) a0 * b0;
+ mid1 = (uint64_t) a32 * b0;
+ mid = mid1 + (uint64_t) a0 * b32;
+ z64 = (uint64_t) a32 * b32;
+ z64 += (uint64_t) (mid < mid1) << 32 | mid >> 32;
+ mid <<= 32;
+ z0 += mid;
+ m_out[index_word(4, 1)] = z0 >> 32;
+ m_out[index_word(4, 0)] = z0;
+ z64 += (z0 < mid);
+ m_out[index_word(4, 3)] = z64 >> 32;
+ m_out[index_word(4, 2)] = z64;
+}
+
+/* Calculate a * b but rounding to zero.
+ *
+ * Notice that this mainly differs from the original Berkeley SoftFloat 3e
+ * implementation in that we don't really treat NaNs, Zeroes nor the
+ * signalling flags. Any NaN is good for us and the sign of the Zero is not
+ * important.
+ *
+ * From f64_mul()
+ */
+double
+_mesa_double_mul_rtz(double a, double b)
+{
+ const di_type a_di = {a};
+ uint64_t a_flt_m = a_di.u & 0x0fffffffffffff;
+ uint64_t a_flt_e = (a_di.u >> 52) & 0x7ff;
+ uint64_t a_flt_s = (a_di.u >> 63) & 0x1;
+ const di_type b_di = {b};
+ uint64_t b_flt_m = b_di.u & 0x0fffffffffffff;
+ uint64_t b_flt_e = (b_di.u >> 52) & 0x7ff;
+ uint64_t b_flt_s = (b_di.u >> 63) & 0x1;
+ int64_t s, e, m = 0;
+
+ s = a_flt_s ^ b_flt_s;
+
+ if (a_flt_e == 0x7ff) {
+ if (a_flt_m != 0) {
+ /* 'a' is a NaN, return NaN */
+ return a;
+ } else if (b_flt_e == 0x7ff && b_flt_m != 0) {
+ /* 'b' is a NaN, return NaN */
+ return b;
+ }
+
+ if (!(b_flt_e | b_flt_m)) {
+ /* Inf * 0 = NaN */
+ di_type result;
+ e = 0x7ff;
+ result.u = (s << 63) + (e << 52) + 0x1;
+ return result.f;
+ }
+ /* Inf * x = Inf */
+ di_type result;
+ e = 0x7ff;
+ result.u = (s << 63) + (e << 52) + 0;
+ return result.f;
+ }
+
+ if (b_flt_e == 0x7ff) {
+ if (b_flt_m != 0) {
+ /* 'b' is a NaN, return NaN */
+ return b;
+ }
+ if (!(a_flt_e | a_flt_m)) {
+ /* 0 * Inf = NaN */
+ di_type result;
+ e = 0x7ff;
+ result.u = (s << 63) + (e << 52) + 0x1;
+ return result.f;
+ }
+ /* x * Inf = Inf */
+ di_type result;
+ e = 0x7ff;
+ result.u = (s << 63) + (e << 52) + 0;
+ return result.f;
+ }
+
+ if (a_flt_e == 0) {
+ if (a_flt_m == 0) {
+ /* 'a' is zero. Return zero */
+ di_type result;
+ result.u = (s << 63) + 0;
+ return result.f;
+ }
+ _mesa_norm_subnormal_mantissa_f64(a_flt_m , &a_flt_e, &a_flt_m);
+ }
+ if (b_flt_e == 0) {
+ if (b_flt_m == 0) {
+ /* 'b' is zero. Return zero */
+ di_type result;
+ result.u = (s << 63) + 0;
+ return result.f;
+ }
+ _mesa_norm_subnormal_mantissa_f64(b_flt_m , &b_flt_e, &b_flt_m);
+ }
+
+ e = a_flt_e + b_flt_e - 0x3ff;
+ a_flt_m = (a_flt_m | 0x0010000000000000) << 10;
+ b_flt_m = (b_flt_m | 0x0010000000000000) << 11;
+
+ uint32_t m_128[4];
+ _mesa_softfloat_mul_f64_to_f128_m(a_flt_m, b_flt_m, m_128);
+
+ m = (uint64_t) m_128[index_word(4, 3)] << 32 | m_128[index_word(4, 2)];
+ if (m_128[index_word(4, 1)] || m_128[index_word(4, 0)])
+ m |= 1;
+
+ if (m < 0x4000000000000000) {
+ --e;
+ m <<= 1;
+ }
+
+ return _mesa_roundtozero_f64(s, e, m);
+}
+
+
+/**
+ * \brief Calculate a * b + c but rounding to zero.
+ *
+ * Notice that this mainly differs from the original Berkeley SoftFloat 3e
+ * implementation in that we don't really treat NaNs, Zeroes nor the
+ * signalling flags. Any NaN is good for us and the sign of the Zero is not
+ * important.
+ *
+ * From f64_mulAdd()
+ */
+double
+_mesa_double_fma_rtz(double a, double b, double c)
+{
+ const di_type a_di = {a};
+ uint64_t a_flt_m = a_di.u & 0x0fffffffffffff;
+ uint64_t a_flt_e = (a_di.u >> 52) & 0x7ff;
+ uint64_t a_flt_s = (a_di.u >> 63) & 0x1;
+ const di_type b_di = {b};
+ uint64_t b_flt_m = b_di.u & 0x0fffffffffffff;
+ uint64_t b_flt_e = (b_di.u >> 52) & 0x7ff;
+ uint64_t b_flt_s = (b_di.u >> 63) & 0x1;
+ const di_type c_di = {c};
+ uint64_t c_flt_m = c_di.u & 0x0fffffffffffff;
+ uint64_t c_flt_e = (c_di.u >> 52) & 0x7ff;
+ uint64_t c_flt_s = (c_di.u >> 63) & 0x1;
+ int64_t s, e, m = 0;
+
+ c_flt_s ^= 0;
+ s = a_flt_s ^ b_flt_s ^ 0;
+
+ if (a_flt_e == 0x7ff) {
+ if (a_flt_m != 0) {
+ /* 'a' is a NaN, return NaN */
+ return a;
+ } else if (b_flt_e == 0x7ff && b_flt_m != 0) {
+ /* 'b' is a NaN, return NaN */
+ return b;
+ } else if (c_flt_e == 0x7ff && c_flt_m != 0) {
+ /* 'c' is a NaN, return NaN */
+ return c;
+ }
+
+ if (!(b_flt_e | b_flt_m)) {
+ /* Inf * 0 + y = NaN */
+ di_type result;
+ e = 0x7ff;
+ result.u = (s << 63) + (e << 52) + 0x1;
+ return result.f;
+ }
+
+ if ((c_flt_e == 0x7ff && c_flt_m == 0) && (s != c_flt_s)) {
+ /* Inf * x - Inf = NaN */
+ di_type result;
+ e = 0x7ff;
+ result.u = (s << 63) + (e << 52) + 0x1;
+ return result.f;
+ }
+
+ /* Inf * x + y = Inf */
+ di_type result;
+ e = 0x7ff;
+ result.u = (s << 63) + (e << 52) + 0;
+ return result.f;
+ }
+
+ if (b_flt_e == 0x7ff) {
+ if (b_flt_m != 0) {
+ /* 'b' is a NaN, return NaN */
+ return b;
+ } else if (c_flt_e == 0x7ff && c_flt_m != 0) {
+ /* 'c' is a NaN, return NaN */
+ return c;
+ }
+
+ if (!(a_flt_e | a_flt_m)) {
+ /* 0 * Inf + y = NaN */
+ di_type result;
+ e = 0x7ff;
+ result.u = (s << 63) + (e << 52) + 0x1;
+ return result.f;
+ }
+
+ if ((c_flt_e == 0x7ff && c_flt_m == 0) && (s != c_flt_s)) {
+ /* x * Inf - Inf = NaN */
+ di_type result;
+ e = 0x7ff;
+ result.u = (s << 63) + (e << 52) + 0x1;
+ return result.f;
+ }
+
+ /* x * Inf + y = Inf */
+ di_type result;
+ e = 0x7ff;
+ result.u = (s << 63) + (e << 52) + 0;
+ return result.f;
+ }
+
+ if (c_flt_e == 0x7ff) {
+ if (c_flt_m != 0) {
+ /* 'c' is a NaN, return NaN */
+ return c;
+ }
+
+ /* x * y + Inf = Inf */
+ return c;
+ }
+
+ if (a_flt_e == 0) {
+ if (a_flt_m == 0) {
+ /* 'a' is zero, return 'c' */
+ return c;
+ }
+ _mesa_norm_subnormal_mantissa_f64(a_flt_m , &a_flt_e, &a_flt_m);
+ }
+
+ if (b_flt_e == 0) {
+ if (b_flt_m == 0) {
+ /* 'b' is zero, return 'c' */
+ return c;
+ }
+ _mesa_norm_subnormal_mantissa_f64(b_flt_m , &b_flt_e, &b_flt_m);
+ }
+
+ e = a_flt_e + b_flt_e - 0x3fe;
+ a_flt_m = (a_flt_m | 0x0010000000000000) << 10;
+ b_flt_m = (b_flt_m | 0x0010000000000000) << 11;
+
+ uint32_t m_128[4];
+ _mesa_softfloat_mul_f64_to_f128_m(a_flt_m, b_flt_m, m_128);
+
+ m = (uint64_t) m_128[index_word(4, 3)] << 32 | m_128[index_word(4, 2)];
+
+ int64_t shift_dist = 0;
+ if (!(m & 0x4000000000000000)) {
+ --e;
+ shift_dist = -1;
+ }
+
+ if (c_flt_e == 0) {
+ if (c_flt_m == 0) {
+ /* 'c' is zero, return 'a * b' */
+ if (shift_dist)
+ m <<= 1;
+
+ if (m_128[index_word(4, 1)] || m_128[index_word(4, 0)])
+ m |= 1;
+ return _mesa_roundtozero_f64(s, e - 1, m);
+ }
+ _mesa_norm_subnormal_mantissa_f64(c_flt_m , &c_flt_e, &c_flt_m);
+ }
+ c_flt_m = (c_flt_m | 0x0010000000000000) << 10;
+
+ uint32_t c_flt_m_128[4];
+ int64_t exp_diff = e - c_flt_e;
+ if (exp_diff < 0) {
+ e = c_flt_e;
+ if ((s == c_flt_s) || (exp_diff < -1)) {
+ shift_dist -= exp_diff;
+ if (shift_dist) {
+ m = _mesa_shift_right_jam64(m, shift_dist);
+ }
+ } else {
+ if (!shift_dist) {
+ _mesa_short_shift_right_m(4, m_128, 1, m_128);
+ }
+ }
+ } else {
+ if (shift_dist)
+ _mesa_add_m(4, m_128, m_128, m_128);
+ if (!exp_diff) {
+ m = (uint64_t) m_128[index_word(4, 3)] << 32
+ | m_128[index_word(4, 2)];
+ } else {
+ c_flt_m_128[index_word(4, 3)] = c_flt_m >> 32;
+ c_flt_m_128[index_word(4, 2)] = c_flt_m;
+ c_flt_m_128[index_word(4, 1)] = 0;
+ c_flt_m_128[index_word(4, 0)] = 0;
+ _mesa_shift_right_jam_m(4, c_flt_m_128, exp_diff, c_flt_m_128);
+ }
+ }
+
+ if (s == c_flt_s) {
+ if (exp_diff <= 0) {
+ m += c_flt_m;
+ } else {
+ _mesa_add_m(4, m_128, c_flt_m_128, m_128);
+ m = (uint64_t) m_128[index_word(4, 3)] << 32
+ | m_128[index_word(4, 2)];
+ }
+ if (m & 0x8000000000000000) {
+ e++;
+ m = _mesa_short_shift_right_jam64(m, 1);
+ }
+ } else {
+ if (exp_diff < 0) {
+ s = c_flt_s;
+ if (exp_diff < -1) {
+ m = c_flt_m - m;
+ if (m_128[index_word(4, 1)] || m_128[index_word(4, 0)]) {
+ m = (m - 1) | 1;
+ }
+ if (!(m & 0x4000000000000000)) {
+ --e;
+ m <<= 1;
+ }
+ return _mesa_roundtozero_f64(s, e - 1, m);
+ } else {
+ c_flt_m_128[index_word(4, 3)] = c_flt_m >> 32;
+ c_flt_m_128[index_word(4, 2)] = c_flt_m;
+ c_flt_m_128[index_word(4, 1)] = 0;
+ c_flt_m_128[index_word(4, 0)] = 0;
+ _mesa_sub_m(4, c_flt_m_128, m_128, m_128);
+ }
+ } else if (!exp_diff) {
+ m -= c_flt_m;
+ if (!m && !m_128[index_word(4, 1)] && !m_128[index_word(4, 0)]) {
+ /* Return zero */
+ di_type result;
+ result.u = (s << 63) + 0;
+ return result.f;
+ }
+ m_128[index_word(4, 3)] = m >> 32;
+ m_128[index_word(4, 2)] = m;
+ if (m & 0x8000000000000000) {
+ s = !s;
+ _mesa_neg_x_m(4, m_128);
+ }
+ } else {
+ _mesa_sub_m(4, m_128, c_flt_m_128, m_128);
+ if (1 < exp_diff) {
+ m = (uint64_t) m_128[index_word(4, 3)] << 32
+ | m_128[index_word(4, 2)];
+ if (!(m & 0x4000000000000000)) {
+ --e;
+ m <<= 1;
+ }
+ if (m_128[index_word(4, 1)] || m_128[index_word(4, 0)])
+ m |= 1;
+ return _mesa_roundtozero_f64(s, e - 1, m);
+ }
+ }
+
+ shift_dist = 0;
+ m = (uint64_t) m_128[index_word(4, 3)] << 32
+ | m_128[index_word(4, 2)];
+ if (!m) {
+ shift_dist = 64;
+ m = (uint64_t) m_128[index_word(4, 1)] << 32
+ | m_128[index_word(4, 0)];
+ }
+ shift_dist += _mesa_count_leading_zeros64(m) - 1;
+ if (shift_dist) {
+ e -= shift_dist;
+ _mesa_shift_left_m(4, m_128, shift_dist, m_128);
+ m = (uint64_t) m_128[index_word(4, 3)] << 32
+ | m_128[index_word(4, 2)];
+ }
+ }
+
+ if (m_128[index_word(4, 1)] || m_128[index_word(4, 0)])
+ m |= 1;
+ return _mesa_roundtozero_f64(s, e - 1, m);
+}
+
+
+/**
+ * \brief Calculate a * b + c but rounding to zero.
+ *
+ * Notice that this mainly differs from the original Berkeley SoftFloat 3e
+ * implementation in that we don't really treat NaNs, Zeroes nor the
+ * signalling flags. Any NaN is good for us and the sign of the Zero is not
+ * important.
+ *
+ * From f32_mulAdd()
+ */
+float
+_mesa_float_fma_rtz(float a, float b, float c)
+{
+ const fi_type a_fi = {a};
+ uint32_t a_flt_m = a_fi.u & 0x07fffff;
+ uint32_t a_flt_e = (a_fi.u >> 23) & 0xff;
+ uint32_t a_flt_s = (a_fi.u >> 31) & 0x1;
+ const fi_type b_fi = {b};
+ uint32_t b_flt_m = b_fi.u & 0x07fffff;
+ uint32_t b_flt_e = (b_fi.u >> 23) & 0xff;
+ uint32_t b_flt_s = (b_fi.u >> 31) & 0x1;
+ const fi_type c_fi = {c};
+ uint32_t c_flt_m = c_fi.u & 0x07fffff;
+ uint32_t c_flt_e = (c_fi.u >> 23) & 0xff;
+ uint32_t c_flt_s = (c_fi.u >> 31) & 0x1;
+ int32_t s, e, m = 0;
+
+ c_flt_s ^= 0;
+ s = a_flt_s ^ b_flt_s ^ 0;
+
+ if (a_flt_e == 0xff) {
+ if (a_flt_m != 0) {
+ /* 'a' is a NaN, return NaN */
+ return a;
+ } else if (b_flt_e == 0xff && b_flt_m != 0) {
+ /* 'b' is a NaN, return NaN */
+ return b;
+ } else if (c_flt_e == 0xff && c_flt_m != 0) {
+ /* 'c' is a NaN, return NaN */
+ return c;
+ }
+
+ if (!(b_flt_e | b_flt_m)) {
+ /* Inf * 0 + y = NaN */
+ fi_type result;
+ e = 0xff;
+ result.u = (s << 31) + (e << 23) + 0x1;
+ return result.f;
+ }
+
+ if ((c_flt_e == 0xff && c_flt_m == 0) && (s != c_flt_s)) {
+ /* Inf * x - Inf = NaN */
+ fi_type result;
+ e = 0xff;
+ result.u = (s << 31) + (e << 23) + 0x1;
+ return result.f;
+ }
+
+ /* Inf * x + y = Inf */
+ fi_type result;
+ e = 0xff;
+ result.u = (s << 31) + (e << 23) + 0;
+ return result.f;
+ }
+
+ if (b_flt_e == 0xff) {
+ if (b_flt_m != 0) {
+ /* 'b' is a NaN, return NaN */
+ return b;
+ } else if (c_flt_e == 0xff && c_flt_m != 0) {
+ /* 'c' is a NaN, return NaN */
+ return c;
+ }
+
+ if (!(a_flt_e | a_flt_m)) {
+ /* 0 * Inf + y = NaN */
+ fi_type result;
+ e = 0xff;
+ result.u = (s << 31) + (e << 23) + 0x1;
+ return result.f;
+ }
+
+ if ((c_flt_e == 0xff && c_flt_m == 0) && (s != c_flt_s)) {
+ /* x * Inf - Inf = NaN */
+ fi_type result;
+ e = 0xff;
+ result.u = (s << 31) + (e << 23) + 0x1;
+ return result.f;
+ }
+
+ /* x * Inf + y = Inf */
+ fi_type result;
+ e = 0xff;
+ result.u = (s << 31) + (e << 23) + 0;
+ return result.f;
+ }
+
+ if (c_flt_e == 0xff) {
+ if (c_flt_m != 0) {
+ /* 'c' is a NaN, return NaN */
+ return c;
+ }
+
+ /* x * y + Inf = Inf */
+ return c;
+ }
+
+ if (a_flt_e == 0) {
+ if (a_flt_m == 0) {
+ /* 'a' is zero, return 'c' */
+ return c;
+ }
+ _mesa_norm_subnormal_mantissa_f32(a_flt_m , &a_flt_e, &a_flt_m);
+ }
+
+ if (b_flt_e == 0) {
+ if (b_flt_m == 0) {
+ /* 'b' is zero, return 'c' */
+ return c;
+ }
+ _mesa_norm_subnormal_mantissa_f32(b_flt_m , &b_flt_e, &b_flt_m);
+ }
+
+ e = a_flt_e + b_flt_e - 0x7e;
+ a_flt_m = (a_flt_m | 0x00800000) << 7;
+ b_flt_m = (b_flt_m | 0x00800000) << 7;
+
+ uint64_t m_64 = (uint64_t) a_flt_m * b_flt_m;
+ if (m_64 < 0x2000000000000000) {
+ --e;
+ m_64 <<= 1;
+ }
+
+ if (c_flt_e == 0) {
+ if (c_flt_m == 0) {
+ /* 'c' is zero, return 'a * b' */
+ m = _mesa_short_shift_right_jam64(m_64, 31);
+ return _mesa_round_f32(s, e - 1, m, true);
+ }
+ _mesa_norm_subnormal_mantissa_f32(c_flt_m , &c_flt_e, &c_flt_m);
+ }
+ c_flt_m = (c_flt_m | 0x00800000) << 6;
+
+ int16_t exp_diff = e - c_flt_e;
+ if (s == c_flt_s) {
+ if (exp_diff <= 0) {
+ e = c_flt_e;
+ m = c_flt_m + _mesa_shift_right_jam64(m_64, 32 - exp_diff);
+ } else {
+ m_64 += _mesa_shift_right_jam64((uint64_t) c_flt_m << 32, exp_diff);
+ m = _mesa_short_shift_right_jam64(m_64, 32);
+ }
+ if (m < 0x40000000) {
+ --e;
+ m <<= 1;
+ }
+ } else {
+ uint64_t c_flt_m_64 = (uint64_t) c_flt_m << 32;
+ if (exp_diff < 0) {
+ s = c_flt_s;
+ e = c_flt_e;
+ m_64 = c_flt_m_64 - _mesa_shift_right_jam64(m_64, -exp_diff);
+ } else if (!exp_diff) {
+ m_64 -= c_flt_m_64;
+ if (!m_64) {
+ /* Return zero */
+ fi_type result;
+ result.u = (s << 31) + 0;
+ return result.f;
+ }
+ if (m_64 & 0x8000000000000000) {
+ s = !s;
+ m_64 = -m_64;
+ }
+ } else {
+ m_64 -= _mesa_shift_right_jam64(c_flt_m_64, exp_diff);
+ }
+ int8_t shift_dist = _mesa_count_leading_zeros64(m_64) - 1;
+ e -= shift_dist;
+ shift_dist -= 32;
+ if (shift_dist < 0) {
+ m = _mesa_short_shift_right_jam64(m_64, -shift_dist);
+ } else {
+ m = (uint32_t) m_64 << shift_dist;
+ }
+ }
+
+ return _mesa_round_f32(s, e, m, true);
+}
+
+
+/**
+ * \brief Converts from 64bits to 32bits float and rounds according to
+ * instructed.
+ *
+ * From f64_to_f32()
+ */
+float
+_mesa_double_to_f32(double val, bool rtz)
+{
+ const di_type di = {val};
+ uint64_t flt_m = di.u & 0x0fffffffffffff;
+ uint64_t flt_e = (di.u >> 52) & 0x7ff;
+ uint64_t flt_s = (di.u >> 63) & 0x1;
+ int32_t s, e, m = 0;
+
+ s = flt_s;
+
+ if (flt_e == 0x7ff) {
+ if (flt_m != 0) {
+ /* 'val' is a NaN, return NaN */
+ fi_type result;
+ e = 0xff;
+ m = 0x1;
+ result.u = (s << 31) + (e << 23) + m;
+ return result.f;
+ }
+
+ /* 'val' is Inf, return Inf */
+ fi_type result;
+ e = 0xff;
+ result.u = (s << 31) + (e << 23) + m;
+ return result.f;
+ }
+
+ if (!(flt_e | flt_m)) {
+ /* 'val' is zero, return zero */
+ fi_type result;
+ e = 0;
+ result.u = (s << 31) + (e << 23) + m;
+ return result.f;
+ }
+
+ m = _mesa_short_shift_right_jam64(flt_m, 22);
+ if ( ! (flt_e | m) ) {
+ /* 'val' is denorm, return zero */
+ fi_type result;
+ e = 0;
+ result.u = (s << 31) + (e << 23) + m;
+ return result.f;
+ }
+
+ return _mesa_round_f32(s, flt_e - 0x381, m | 0x40000000, rtz);
+}
+
+
+/**
+ * \brief Converts from 32bits to 16bits float and rounds the result to zero.
+ *
+ * From f32_to_f16()
+ */
+uint16_t
+_mesa_float_to_half_rtz(float val)
+{
+ const fi_type fi = {val};
+ const uint32_t flt_m = fi.u & 0x7fffff;
+ const uint32_t flt_e = (fi.u >> 23) & 0xff;
+ const uint32_t flt_s = (fi.u >> 31) & 0x1;
+ int16_t s, e, m = 0;
+
+ s = flt_s;
+
+ if (flt_e == 0xff) {
+ if (flt_m != 0) {
+ /* 'val' is a NaN, return NaN */
+ e = 0x1f;
+ m = 0x1;
+ return (s << 15) + (e << 10) + m;
+ }
+
+ /* 'val' is Inf, return Inf */
+ e = 0x1f;
+ return (s << 15) + (e << 10) + m;
+ }
+
+ if (!(flt_e | flt_m)) {
+ /* 'val' is zero, return zero */
+ e = 0;
+ return (s << 15) + (e << 10) + m;
+ }
+
+ m = flt_m >> 9 | ((flt_m & 0x1ff) != 0);
+ if ( ! (flt_e | m) ) {
+ /* 'val' is denorm, return zero */
+ e = 0;
+ return (s << 15) + (e << 10) + m;
+ }
+
+ return _mesa_roundtozero_f16(s, flt_e - 0x71, m | 0x4000);
+}
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/util/softfloat.h b/third_party/rust/glslopt/glsl-optimizer/src/util/softfloat.h
new file mode 100644
index 0000000000..4e48c6548b
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/util/softfloat.h
@@ -0,0 +1,65 @@
+/*
+ * License for Berkeley SoftFloat Release 3e
+ *
+ * John R. Hauser
+ * 2018 January 20
+ *
+ * The following applies to the whole of SoftFloat Release 3e as well as to
+ * each source file individually.
+ *
+ * Copyright 2011, 2012, 2013, 2014, 2015, 2016, 2017, 2018 The Regents of the
+ * University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions, and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions, and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *
+ * The functions listed in this file are modified versions of the ones
+ * from the Berkeley SoftFloat 3e Library.
+ */
+
+#ifndef _SOFTFLOAT_H_
+#define _SOFTFLOAT_H_
+
+#include <stdbool.h>
+#include <stdint.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+double _mesa_double_add_rtz(double a, double b);
+double _mesa_double_sub_rtz(double a, double b);
+double _mesa_double_mul_rtz(double a, double b);
+double _mesa_double_fma_rtz(double a, double b, double c);
+float _mesa_float_fma_rtz(float a, float b, float c);
+float _mesa_double_to_f32(double x, bool rtz);
+uint16_t _mesa_float_to_half_rtz(float x);
+
+#ifdef __cplusplus
+} /* extern C */
+#endif
+
+#endif /* _SOFTFLOAT_H */
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/util/string_buffer.c b/third_party/rust/glslopt/glsl-optimizer/src/util/string_buffer.c
new file mode 100644
index 0000000000..31ebe3cb7e
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/util/string_buffer.c
@@ -0,0 +1,148 @@
+/*
+ * Copyright © 2017 Thomas Helland
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#include "string_buffer.h"
+
+static bool
+ensure_capacity(struct _mesa_string_buffer *str, uint32_t needed_capacity)
+{
+ if (needed_capacity <= str->capacity)
+ return true;
+
+ /* Too small, double until we can fit the new string */
+ uint32_t new_capacity = str->capacity * 2;
+ while (needed_capacity > new_capacity)
+ new_capacity *= 2;
+
+ str->buf = reralloc_array_size(str, str->buf, sizeof(char), new_capacity);
+ if (str->buf == NULL)
+ return false;
+
+ str->capacity = new_capacity;
+ return true;
+}
+
+struct _mesa_string_buffer *
+_mesa_string_buffer_create(void *mem_ctx, uint32_t initial_capacity)
+{
+ struct _mesa_string_buffer *str;
+ str = ralloc(mem_ctx, struct _mesa_string_buffer);
+
+ if (str == NULL)
+ return NULL;
+
+ /* If no initial capacity is set then set it to something */
+ str->capacity = initial_capacity ? initial_capacity : 32;
+ str->buf = ralloc_array(str, char, str->capacity);
+
+ if (!str->buf) {
+ ralloc_free(str);
+ return NULL;
+ }
+
+ str->length = 0;
+ str->buf[str->length] = '\0';
+ return str;
+}
+
+bool
+_mesa_string_buffer_append_all(struct _mesa_string_buffer *str,
+ uint32_t num_args, ...)
+{
+ int i;
+ char* s;
+ va_list args;
+ va_start(args, num_args);
+ for (i = 0; i < num_args; i++) {
+ s = va_arg(args, char*);
+ if (!_mesa_string_buffer_append_len(str, s, strlen(s))) {
+ va_end(args);
+ return false;
+ }
+ }
+ va_end(args);
+ return true;
+}
+
+bool
+_mesa_string_buffer_append_len(struct _mesa_string_buffer *str,
+ const char *c, uint32_t len)
+{
+ uint32_t needed_length = str->length + len + 1;
+
+ /* Check if we're overflowing uint32_t */
+ if (needed_length < str->length)
+ return false;
+
+ if (!ensure_capacity(str, needed_length))
+ return false;
+
+ memcpy(str->buf + str->length, c, len);
+ str->length += len;
+ str->buf[str->length] = '\0';
+ return true;
+}
+
+bool
+_mesa_string_buffer_vprintf(struct _mesa_string_buffer *str,
+ const char *format, va_list args)
+{
+ /* We're looping two times to avoid duplicating code */
+ for (uint32_t i = 0; i < 2; i++) {
+ va_list arg_copy;
+ va_copy(arg_copy, args);
+ uint32_t space_left = str->capacity - str->length;
+
+ int32_t len = vsnprintf(str->buf + str->length,
+ space_left, format, arg_copy);
+ va_end(arg_copy);
+
+ /* Error in vsnprintf() or measured len overflows size_t */
+ if (unlikely(len < 0 || str->length + len + 1 < str->length))
+ return false;
+
+ /* There was enough space for the string; we're done */
+ if (len < space_left) {
+ str->length += len;
+ return true;
+ }
+
+ /* Not enough space, resize and retry */
+ ensure_capacity(str, str->length + len + 1);
+ }
+
+ return false;
+}
+
+bool
+_mesa_string_buffer_printf(struct _mesa_string_buffer *str,
+ const char *format, ...)
+{
+ bool res;
+ va_list args;
+ va_start(args, format);
+ res = _mesa_string_buffer_vprintf(str, format, args);
+ va_end(args);
+ return res;
+}
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/util/string_buffer.h b/third_party/rust/glslopt/glsl-optimizer/src/util/string_buffer.h
new file mode 100644
index 0000000000..eaaf5f33df
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/util/string_buffer.h
@@ -0,0 +1,104 @@
+/*
+ * Copyright © 2017 Thomas Helland
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+#ifndef _STRING_BUFFER_H
+#define _STRING_BUFFER_H
+
+#include "ralloc.h"
+#include "u_string.h"
+#include <stdbool.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <string.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+struct _mesa_string_buffer {
+ char *buf;
+ uint32_t length;
+ uint32_t capacity;
+};
+
+struct _mesa_string_buffer *
+_mesa_string_buffer_create(void *mem_ctx, uint32_t initial_capacity);
+
+static inline void
+_mesa_string_buffer_destroy(struct _mesa_string_buffer *str)
+{
+ ralloc_free(str);
+}
+
+bool
+_mesa_string_buffer_append_all(struct _mesa_string_buffer *str,
+ uint32_t num_args, ...);
+bool
+_mesa_string_buffer_append_len(struct _mesa_string_buffer *str,
+ const char *c, uint32_t len);
+
+static inline bool
+_mesa_string_buffer_append_char(struct _mesa_string_buffer *str, char c)
+{
+ return _mesa_string_buffer_append_len(str, &c, 1);
+}
+
+static inline bool
+_mesa_string_buffer_append(struct _mesa_string_buffer *str, const char *c)
+{
+ return _mesa_string_buffer_append_len(str, c, strlen(c));
+}
+
+static inline void
+_mesa_string_buffer_clear(struct _mesa_string_buffer *str)
+{
+ str->length = 0;
+ str->buf[str->length] = '\0';
+}
+
+static inline void
+_mesa_string_buffer_crimp_to_fit(struct _mesa_string_buffer *str)
+{
+ char *crimped =
+ (char *) reralloc_array_size(str, str->buf, sizeof(char),
+ str->capacity);
+ if (!crimped)
+ return;
+
+ str->capacity = str->length + 1;
+ str->buf = crimped;
+}
+
+bool
+_mesa_string_buffer_vprintf(struct _mesa_string_buffer *str,
+ const char *format, va_list args);
+
+bool
+_mesa_string_buffer_printf(struct _mesa_string_buffer *str,
+ const char *format, ...);
+
+#ifdef __cplusplus
+} /* extern "C" */
+#endif
+
+#endif /* _STRING_BUFFER_H */
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/util/strndup.h b/third_party/rust/glslopt/glsl-optimizer/src/util/strndup.h
new file mode 100644
index 0000000000..dcaa429dbe
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/util/strndup.h
@@ -0,0 +1,60 @@
+/*
+ * Copyright (c) 2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#ifndef STRNDUP_H
+#define STRNDUP_H
+
+#if defined(_WIN32)
+
+#include <stdlib.h> // size_t
+#include <string.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+static inline char *
+strndup(const char *str, size_t max)
+{
+ size_t n;
+ char *ptr;
+
+ if (!str)
+ return NULL;
+
+ n = strnlen(str, max);
+ ptr = (char *) calloc(n + 1, sizeof(char));
+ if (!ptr)
+ return NULL;
+
+ memcpy(ptr, str, n);
+ return ptr;
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _WIN32 */
+
+#endif /* STRNDUP_H */
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/util/strtod.c b/third_party/rust/glslopt/glsl-optimizer/src/util/strtod.c
new file mode 100644
index 0000000000..de695d64b4
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/util/strtod.c
@@ -0,0 +1,85 @@
+/*
+ * Copyright 2010 VMware, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+
+#include <stdlib.h>
+
+#if defined(_GNU_SOURCE) && defined(HAVE_STRTOD_L)
+#include <locale.h>
+#ifdef HAVE_XLOCALE_H
+#include <xlocale.h>
+#endif
+static locale_t loc;
+#endif
+
+#include "strtod.h"
+
+
+void
+_mesa_locale_init(void)
+{
+#if defined(_GNU_SOURCE) && defined(HAVE_STRTOD_L)
+ loc = newlocale(LC_CTYPE_MASK, "C", NULL);
+#endif
+}
+
+void
+_mesa_locale_fini(void)
+{
+#if defined(_GNU_SOURCE) && defined(HAVE_STRTOD_L)
+ freelocale(loc);
+#endif
+}
+
+/**
+ * Wrapper around strtod which uses the "C" locale so the decimal
+ * point is always '.'
+ */
+double
+_mesa_strtod(const char *s, char **end)
+{
+#if defined(_GNU_SOURCE) && defined(HAVE_STRTOD_L)
+ return strtod_l(s, end, loc);
+#else
+ return strtod(s, end);
+#endif
+}
+
+
+/**
+ * Wrapper around strtof which uses the "C" locale so the decimal
+ * point is always '.'
+ */
+float
+_mesa_strtof(const char *s, char **end)
+{
+#if defined(_GNU_SOURCE) && defined(HAVE_STRTOD_L)
+ return strtof_l(s, end, loc);
+#elif defined(HAVE_STRTOF)
+ return strtof(s, end);
+#else
+ return (float) strtod(s, end);
+#endif
+}
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/util/strtod.h b/third_party/rust/glslopt/glsl-optimizer/src/util/strtod.h
new file mode 100644
index 0000000000..60e15cfa0e
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/util/strtod.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright 2010 VMware, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+
+#ifndef STRTOD_H
+#define STRTOD_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+extern void
+_mesa_locale_init(void);
+
+extern void
+_mesa_locale_fini(void);
+
+extern double
+_mesa_strtod(const char *s, char **end);
+
+extern float
+_mesa_strtof(const char *s, char **end);
+
+
+#ifdef __cplusplus
+}
+#endif
+
+
+#endif
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/util/u_atomic.h b/third_party/rust/glslopt/glsl-optimizer/src/util/u_atomic.h
new file mode 100644
index 0000000000..db56835e9d
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/util/u_atomic.h
@@ -0,0 +1,268 @@
+/**
+ * Many similar implementations exist. See for example libwsbm
+ * or the linux kernel include/atomic.h
+ *
+ * No copyright claimed on this file.
+ *
+ */
+
+#include "no_extern_c.h"
+
+#ifndef U_ATOMIC_H
+#define U_ATOMIC_H
+
+#include <stdbool.h>
+#include <stdint.h>
+
+/* Favor OS-provided implementations.
+ *
+ * Where no OS-provided implementation is available, fall back to
+ * locally coded assembly, compiler intrinsic or ultimately a
+ * mutex-based implementation.
+ */
+#if defined(__sun)
+#define PIPE_ATOMIC_OS_SOLARIS
+#elif defined(_MSC_VER)
+#define PIPE_ATOMIC_MSVC_INTRINSIC
+#elif defined(__GNUC__)
+#define PIPE_ATOMIC_GCC_INTRINSIC
+#else
+#error "Unsupported platform"
+#endif
+
+
+/* Implementation using GCC-provided synchronization intrinsics
+ */
+#if defined(PIPE_ATOMIC_GCC_INTRINSIC)
+
+#define PIPE_ATOMIC "GCC Sync Intrinsics"
+
+#if defined(USE_GCC_ATOMIC_BUILTINS)
+
+/* The builtins with explicit memory model are available since GCC 4.7. */
+#define p_atomic_set(_v, _i) __atomic_store_n((_v), (_i), __ATOMIC_RELEASE)
+#define p_atomic_read(_v) __atomic_load_n((_v), __ATOMIC_ACQUIRE)
+#define p_atomic_dec_zero(v) (__atomic_sub_fetch((v), 1, __ATOMIC_ACQ_REL) == 0)
+#define p_atomic_inc(v) (void) __atomic_add_fetch((v), 1, __ATOMIC_ACQ_REL)
+#define p_atomic_dec(v) (void) __atomic_sub_fetch((v), 1, __ATOMIC_ACQ_REL)
+#define p_atomic_add(v, i) (void) __atomic_add_fetch((v), (i), __ATOMIC_ACQ_REL)
+#define p_atomic_inc_return(v) __atomic_add_fetch((v), 1, __ATOMIC_ACQ_REL)
+#define p_atomic_dec_return(v) __atomic_sub_fetch((v), 1, __ATOMIC_ACQ_REL)
+#define p_atomic_add_return(v, i) __atomic_add_fetch((v), (i), __ATOMIC_ACQ_REL)
+#define p_atomic_xchg(v, i) __atomic_exchange_n((v), (i), __ATOMIC_ACQ_REL)
+#define PIPE_NATIVE_ATOMIC_XCHG
+
+#else
+
+#define p_atomic_set(_v, _i) (*(_v) = (_i))
+#define p_atomic_read(_v) (*(_v))
+#define p_atomic_dec_zero(v) (__sync_sub_and_fetch((v), 1) == 0)
+#define p_atomic_inc(v) (void) __sync_add_and_fetch((v), 1)
+#define p_atomic_dec(v) (void) __sync_sub_and_fetch((v), 1)
+#define p_atomic_add(v, i) (void) __sync_add_and_fetch((v), (i))
+#define p_atomic_inc_return(v) __sync_add_and_fetch((v), 1)
+#define p_atomic_dec_return(v) __sync_sub_and_fetch((v), 1)
+#define p_atomic_add_return(v, i) __sync_add_and_fetch((v), (i))
+
+#endif
+
+/* There is no __atomic_* compare and exchange that returns the current value.
+ * Also, GCC 5.4 seems unable to optimize a compound statement expression that
+ * uses an additional stack variable with __atomic_compare_exchange[_n].
+ */
+#define p_atomic_cmpxchg(v, old, _new) \
+ __sync_val_compare_and_swap((v), (old), (_new))
+
+#endif
+
+
+
+/* Unlocked version for single threaded environments, such as some
+ * windows kernel modules.
+ */
+#if defined(PIPE_ATOMIC_OS_UNLOCKED)
+
+#define PIPE_ATOMIC "Unlocked"
+
+#define p_atomic_set(_v, _i) (*(_v) = (_i))
+#define p_atomic_read(_v) (*(_v))
+#define p_atomic_dec_zero(_v) (p_atomic_dec_return(_v) == 0)
+#define p_atomic_inc(_v) ((void) p_atomic_inc_return(_v))
+#define p_atomic_dec(_v) ((void) p_atomic_dec_return(_v))
+#define p_atomic_add(_v, _i) ((void) p_atomic_add_return((_v), (_i)))
+#define p_atomic_inc_return(_v) (++(*(_v)))
+#define p_atomic_dec_return(_v) (--(*(_v)))
+#define p_atomic_add_return(_v, _i) (*(_v) = *(_v) + (_i))
+#define p_atomic_cmpxchg(_v, _old, _new) (*(_v) == (_old) ? (*(_v) = (_new), (_old)) : *(_v))
+
+#endif
+
+
+#if defined(PIPE_ATOMIC_MSVC_INTRINSIC)
+
+#define PIPE_ATOMIC "MSVC Intrinsics"
+
+/* We use the Windows header's Interlocked*64 functions instead of the
+ * _Interlocked*64 intrinsics wherever we can, as support for the latter varies
+ * with target CPU, whereas Windows headers take care of all portability
+ * issues: using intrinsics where available, falling back to library
+ * implementations where not.
+ */
+#ifndef WIN32_LEAN_AND_MEAN
+#define WIN32_LEAN_AND_MEAN 1
+#endif
+#include <windows.h>
+#include <intrin.h>
+#include <assert.h>
+
+/* MSVC supports decltype keyword, but it's only supported on C++ and doesn't
+ * quite work here; and if a C++-only solution is worthwhile, then it would be
+ * better to use templates / function overloading, instead of decltype magic.
+ * Therefore, we rely on implicit casting to LONGLONG for the functions that return
+ */
+
+#define p_atomic_set(_v, _i) (*(_v) = (_i))
+#define p_atomic_read(_v) (*(_v))
+
+#define p_atomic_dec_zero(_v) \
+ (p_atomic_dec_return(_v) == 0)
+
+#define p_atomic_inc(_v) \
+ ((void) p_atomic_inc_return(_v))
+
+#define p_atomic_inc_return(_v) (\
+ sizeof *(_v) == sizeof(short) ? _InterlockedIncrement16((short *) (_v)) : \
+ sizeof *(_v) == sizeof(long) ? _InterlockedIncrement ((long *) (_v)) : \
+ sizeof *(_v) == sizeof(__int64) ? InterlockedIncrement64 ((__int64 *)(_v)) : \
+ (assert(!"should not get here"), 0))
+
+#define p_atomic_dec(_v) \
+ ((void) p_atomic_dec_return(_v))
+
+#define p_atomic_dec_return(_v) (\
+ sizeof *(_v) == sizeof(short) ? _InterlockedDecrement16((short *) (_v)) : \
+ sizeof *(_v) == sizeof(long) ? _InterlockedDecrement ((long *) (_v)) : \
+ sizeof *(_v) == sizeof(__int64) ? InterlockedDecrement64 ((__int64 *)(_v)) : \
+ (assert(!"should not get here"), 0))
+
+#define p_atomic_add(_v, _i) \
+ ((void) p_atomic_add_return((_v), (_i)))
+
+#define p_atomic_add_return(_v, _i) (\
+ sizeof *(_v) == sizeof(char) ? _InterlockedExchangeAdd8 ((char *) (_v), (_i)) : \
+ sizeof *(_v) == sizeof(short) ? _InterlockedExchangeAdd16((short *) (_v), (_i)) : \
+ sizeof *(_v) == sizeof(long) ? _InterlockedExchangeAdd ((long *) (_v), (_i)) : \
+ sizeof *(_v) == sizeof(__int64) ? InterlockedExchangeAdd64((__int64 *)(_v), (_i)) : \
+ (assert(!"should not get here"), 0))
+
+#define p_atomic_cmpxchg(_v, _old, _new) (\
+ sizeof *(_v) == sizeof(char) ? _InterlockedCompareExchange8 ((char *) (_v), (char) (_new), (char) (_old)) : \
+ sizeof *(_v) == sizeof(short) ? _InterlockedCompareExchange16((short *) (_v), (short) (_new), (short) (_old)) : \
+ sizeof *(_v) == sizeof(long) ? _InterlockedCompareExchange ((long *) (_v), (long) (_new), (long) (_old)) : \
+ sizeof *(_v) == sizeof(__int64) ? InterlockedCompareExchange64 ((__int64 *)(_v), (__int64)(_new), (__int64)(_old)) : \
+ (assert(!"should not get here"), 0))
+
+#endif
+
+#if defined(PIPE_ATOMIC_OS_SOLARIS)
+
+#define PIPE_ATOMIC "Solaris OS atomic functions"
+
+#include <atomic.h>
+#include <assert.h>
+
+#define p_atomic_set(_v, _i) (*(_v) = (_i))
+#define p_atomic_read(_v) (*(_v))
+
+#define p_atomic_dec_zero(v) (\
+ sizeof(*v) == sizeof(uint8_t) ? atomic_dec_8_nv ((uint8_t *)(v)) == 0 : \
+ sizeof(*v) == sizeof(uint16_t) ? atomic_dec_16_nv((uint16_t *)(v)) == 0 : \
+ sizeof(*v) == sizeof(uint32_t) ? atomic_dec_32_nv((uint32_t *)(v)) == 0 : \
+ sizeof(*v) == sizeof(uint64_t) ? atomic_dec_64_nv((uint64_t *)(v)) == 0 : \
+ (assert(!"should not get here"), 0))
+
+#define p_atomic_inc(v) (void) (\
+ sizeof(*v) == sizeof(uint8_t) ? atomic_inc_8 ((uint8_t *)(v)) : \
+ sizeof(*v) == sizeof(uint16_t) ? atomic_inc_16((uint16_t *)(v)) : \
+ sizeof(*v) == sizeof(uint32_t) ? atomic_inc_32((uint32_t *)(v)) : \
+ sizeof(*v) == sizeof(uint64_t) ? atomic_inc_64((uint64_t *)(v)) : \
+ (assert(!"should not get here"), 0))
+
+#define p_atomic_inc_return(v) (__typeof(*v))( \
+ sizeof(*v) == sizeof(uint8_t) ? atomic_inc_8_nv ((uint8_t *)(v)) : \
+ sizeof(*v) == sizeof(uint16_t) ? atomic_inc_16_nv((uint16_t *)(v)) : \
+ sizeof(*v) == sizeof(uint32_t) ? atomic_inc_32_nv((uint32_t *)(v)) : \
+ sizeof(*v) == sizeof(uint64_t) ? atomic_inc_64_nv((uint64_t *)(v)) : \
+ (assert(!"should not get here"), 0))
+
+#define p_atomic_dec(v) (void) ( \
+ sizeof(*v) == sizeof(uint8_t) ? atomic_dec_8 ((uint8_t *)(v)) : \
+ sizeof(*v) == sizeof(uint16_t) ? atomic_dec_16((uint16_t *)(v)) : \
+ sizeof(*v) == sizeof(uint32_t) ? atomic_dec_32((uint32_t *)(v)) : \
+ sizeof(*v) == sizeof(uint64_t) ? atomic_dec_64((uint64_t *)(v)) : \
+ (assert(!"should not get here"), 0))
+
+#define p_atomic_dec_return(v) (__typeof(*v))( \
+ sizeof(*v) == sizeof(uint8_t) ? atomic_dec_8_nv ((uint8_t *)(v)) : \
+ sizeof(*v) == sizeof(uint16_t) ? atomic_dec_16_nv((uint16_t *)(v)) : \
+ sizeof(*v) == sizeof(uint32_t) ? atomic_dec_32_nv((uint32_t *)(v)) : \
+ sizeof(*v) == sizeof(uint64_t) ? atomic_dec_64_nv((uint64_t *)(v)) : \
+ (assert(!"should not get here"), 0))
+
+#define p_atomic_add(v, i) (void) ( \
+ sizeof(*v) == sizeof(uint8_t) ? atomic_add_8 ((uint8_t *)(v), (i)) : \
+ sizeof(*v) == sizeof(uint16_t) ? atomic_add_16((uint16_t *)(v), (i)) : \
+ sizeof(*v) == sizeof(uint32_t) ? atomic_add_32((uint32_t *)(v), (i)) : \
+ sizeof(*v) == sizeof(uint64_t) ? atomic_add_64((uint64_t *)(v), (i)) : \
+ (assert(!"should not get here"), 0))
+
+#define p_atomic_add_return(v, i) (__typeof(*v)) ( \
+ sizeof(*v) == sizeof(uint8_t) ? atomic_add_8_nv ((uint8_t *)(v), (i)) : \
+ sizeof(*v) == sizeof(uint16_t) ? atomic_add_16_nv((uint16_t *)(v), (i)) : \
+ sizeof(*v) == sizeof(uint32_t) ? atomic_add_32_nv((uint32_t *)(v), (i)) : \
+ sizeof(*v) == sizeof(uint64_t) ? atomic_add_64_nv((uint64_t *)(v), (i)) : \
+ (assert(!"should not get here"), 0))
+
+#define p_atomic_cmpxchg(v, old, _new) (__typeof(*v))( \
+ sizeof(*v) == sizeof(uint8_t) ? atomic_cas_8 ((uint8_t *)(v), (uint8_t )(old), (uint8_t )(_new)) : \
+ sizeof(*v) == sizeof(uint16_t) ? atomic_cas_16((uint16_t *)(v), (uint16_t)(old), (uint16_t)(_new)) : \
+ sizeof(*v) == sizeof(uint32_t) ? atomic_cas_32((uint32_t *)(v), (uint32_t)(old), (uint32_t)(_new)) : \
+ sizeof(*v) == sizeof(uint64_t) ? atomic_cas_64((uint64_t *)(v), (uint64_t)(old), (uint64_t)(_new)) : \
+ (assert(!"should not get here"), 0))
+
+#endif
+
+#ifndef PIPE_ATOMIC
+#error "No pipe_atomic implementation selected"
+#endif
+
+#ifndef PIPE_NATIVE_ATOMIC_XCHG
+static inline uint32_t p_atomic_xchg_32(uint32_t *v, uint32_t i)
+{
+ uint32_t actual = p_atomic_read(v);
+ uint32_t expected;
+ do {
+ expected = actual;
+ actual = p_atomic_cmpxchg(v, expected, i);
+ } while (expected != actual);
+ return actual;
+}
+
+static inline uint64_t p_atomic_xchg_64(uint64_t *v, uint64_t i)
+{
+ uint64_t actual = p_atomic_read(v);
+ uint64_t expected;
+ do {
+ expected = actual;
+ actual = p_atomic_cmpxchg(v, expected, i);
+ } while (expected != actual);
+ return actual;
+}
+
+#define p_atomic_xchg(v, i) (__typeof(*(v)))( \
+ sizeof(*(v)) == sizeof(uint32_t) ? p_atomic_xchg_32((uint32_t *)(v), (uint32_t)(i)) : \
+ sizeof(*(v)) == sizeof(uint64_t) ? p_atomic_xchg_64((uint64_t *)(v), (uint64_t)(i)) : \
+ (assert(!"should not get here"), 0))
+#endif
+
+#endif /* U_ATOMIC_H */
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/util/u_debug.c b/third_party/rust/glslopt/glsl-optimizer/src/util/u_debug.c
new file mode 100644
index 0000000000..e1c8265965
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/util/u_debug.c
@@ -0,0 +1,440 @@
+/**************************************************************************
+ *
+ * Copyright 2008 VMware, Inc.
+ * Copyright (c) 2008 VMware, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+
+#include "pipe/p_config.h"
+
+#include "util/u_debug.h"
+#include "pipe/p_format.h"
+#include "pipe/p_state.h"
+#include "util/u_string.h"
+#include "util/u_math.h"
+#include <inttypes.h>
+
+#include <stdio.h>
+#include <limits.h> /* CHAR_BIT */
+#include <ctype.h> /* isalnum */
+
+#ifdef _WIN32
+#include <windows.h>
+#include <stdlib.h>
+#endif
+
+
+void
+_debug_vprintf(const char *format, va_list ap)
+{
+ static char buf[4096] = {'\0'};
+#if DETECT_OS_WINDOWS || defined(EMBEDDED_DEVICE)
+ /* We buffer until we find a newline. */
+ size_t len = strlen(buf);
+ int ret = vsnprintf(buf + len, sizeof(buf) - len, format, ap);
+ if (ret > (int)(sizeof(buf) - len - 1) || strchr(buf + len, '\n')) {
+ os_log_message(buf);
+ buf[0] = '\0';
+ }
+#else
+ vsnprintf(buf, sizeof(buf), format, ap);
+ os_log_message(buf);
+#endif
+}
+
+
+void
+_pipe_debug_message(struct pipe_debug_callback *cb,
+ unsigned *id,
+ enum pipe_debug_type type,
+ const char *fmt, ...)
+{
+ va_list args;
+ va_start(args, fmt);
+ if (cb && cb->debug_message)
+ cb->debug_message(cb->data, id, type, fmt, args);
+ va_end(args);
+}
+
+
+void
+debug_disable_error_message_boxes(void)
+{
+#ifdef _WIN32
+ /* When Windows' error message boxes are disabled for this process (as is
+ * typically the case when running tests in an automated fashion) we disable
+ * CRT message boxes too.
+ */
+ UINT uMode = SetErrorMode(0);
+ SetErrorMode(uMode);
+ if (uMode & SEM_FAILCRITICALERRORS) {
+ /* Disable assertion failure message box.
+ * http://msdn.microsoft.com/en-us/library/sas1dkb2.aspx
+ */
+ _set_error_mode(_OUT_TO_STDERR);
+#ifdef _MSC_VER
+ /* Disable abort message box.
+ * http://msdn.microsoft.com/en-us/library/e631wekh.aspx
+ */
+ _set_abort_behavior(0, _WRITE_ABORT_MSG | _CALL_REPORTFAULT);
+#endif
+ }
+#endif /* _WIN32 */
+}
+
+
+#ifdef DEBUG
+void
+debug_print_blob(const char *name, const void *blob, unsigned size)
+{
+ const unsigned *ublob = (const unsigned *)blob;
+ unsigned i;
+
+ debug_printf("%s (%d dwords%s)\n", name, size/4,
+ size%4 ? "... plus a few bytes" : "");
+
+ for (i = 0; i < size/4; i++) {
+ debug_printf("%d:\t%08x\n", i, ublob[i]);
+ }
+}
+#endif
+
+
+static bool
+debug_get_option_should_print(void)
+{
+ static bool first = true;
+ static bool value = false;
+
+ if (!first)
+ return value;
+
+ /* Oh hey this will call into this function,
+ * but its cool since we set first to false
+ */
+ first = false;
+ value = debug_get_bool_option("GALLIUM_PRINT_OPTIONS", false);
+ /* XXX should we print this option? Currently it wont */
+ return value;
+}
+
+
+const char *
+debug_get_option(const char *name, const char *dfault)
+{
+ const char *result;
+
+ result = os_get_option(name);
+ if (!result)
+ result = dfault;
+
+ if (debug_get_option_should_print())
+ debug_printf("%s: %s = %s\n", __FUNCTION__, name,
+ result ? result : "(null)");
+
+ return result;
+}
+
+
+bool
+debug_get_bool_option(const char *name, bool dfault)
+{
+ const char *str = os_get_option(name);
+ bool result;
+
+ if (str == NULL)
+ result = dfault;
+ else if (!strcmp(str, "n"))
+ result = false;
+ else if (!strcmp(str, "no"))
+ result = false;
+ else if (!strcmp(str, "0"))
+ result = false;
+ else if (!strcmp(str, "f"))
+ result = false;
+ else if (!strcmp(str, "F"))
+ result = false;
+ else if (!strcmp(str, "false"))
+ result = false;
+ else if (!strcmp(str, "FALSE"))
+ result = false;
+ else
+ result = true;
+
+ if (debug_get_option_should_print())
+ debug_printf("%s: %s = %s\n", __FUNCTION__, name,
+ result ? "TRUE" : "FALSE");
+
+ return result;
+}
+
+
+long
+debug_get_num_option(const char *name, long dfault)
+{
+ long result;
+ const char *str;
+
+ str = os_get_option(name);
+ if (!str) {
+ result = dfault;
+ } else {
+ char *endptr;
+
+ result = strtol(str, &endptr, 0);
+ if (str == endptr) {
+ /* Restore the default value when no digits were found. */
+ result = dfault;
+ }
+ }
+
+ if (debug_get_option_should_print())
+ debug_printf("%s: %s = %li\n", __FUNCTION__, name, result);
+
+ return result;
+}
+
+
+static bool
+str_has_option(const char *str, const char *name)
+{
+ /* Empty string. */
+ if (!*str) {
+ return false;
+ }
+
+ /* OPTION=all */
+ if (!strcmp(str, "all")) {
+ return true;
+ }
+
+ /* Find 'name' in 'str' surrounded by non-alphanumeric characters. */
+ {
+ const char *start = str;
+ unsigned name_len = strlen(name);
+
+ /* 'start' is the beginning of the currently-parsed word,
+ * we increment 'str' each iteration.
+ * if we find either the end of string or a non-alphanumeric character,
+ * we compare 'start' up to 'str-1' with 'name'. */
+
+ while (1) {
+ if (!*str || !(isalnum(*str) || *str == '_')) {
+ if (str-start == name_len &&
+ !memcmp(start, name, name_len)) {
+ return true;
+ }
+
+ if (!*str) {
+ return false;
+ }
+
+ start = str+1;
+ }
+
+ str++;
+ }
+ }
+
+ return false;
+}
+
+
+uint64_t
+debug_get_flags_option(const char *name,
+ const struct debug_named_value *flags,
+ uint64_t dfault)
+{
+ uint64_t result;
+ const char *str;
+ const struct debug_named_value *orig = flags;
+ unsigned namealign = 0;
+
+ str = os_get_option(name);
+ if (!str)
+ result = dfault;
+ else if (!strcmp(str, "help")) {
+ result = dfault;
+ _debug_printf("%s: help for %s:\n", __FUNCTION__, name);
+ for (; flags->name; ++flags)
+ namealign = MAX2(namealign, strlen(flags->name));
+ for (flags = orig; flags->name; ++flags)
+ _debug_printf("| %*s [0x%0*"PRIx64"]%s%s\n", namealign, flags->name,
+ (int)sizeof(uint64_t)*CHAR_BIT/4, flags->value,
+ flags->desc ? " " : "", flags->desc ? flags->desc : "");
+ }
+ else {
+ result = 0;
+ while (flags->name) {
+ if (str_has_option(str, flags->name))
+ result |= flags->value;
+ ++flags;
+ }
+ }
+
+ if (debug_get_option_should_print()) {
+ if (str) {
+ debug_printf("%s: %s = 0x%"PRIx64" (%s)\n",
+ __FUNCTION__, name, result, str);
+ } else {
+ debug_printf("%s: %s = 0x%"PRIx64"\n", __FUNCTION__, name, result);
+ }
+ }
+
+ return result;
+}
+
+
+void
+_debug_assert_fail(const char *expr, const char *file, unsigned line,
+ const char *function)
+{
+ _debug_printf("%s:%u:%s: Assertion `%s' failed.\n",
+ file, line, function, expr);
+ os_abort();
+}
+
+
+const char *
+debug_dump_enum(const struct debug_named_value *names,
+ unsigned long value)
+{
+ static char rest[64];
+
+ while (names->name) {
+ if (names->value == value)
+ return names->name;
+ ++names;
+ }
+
+ snprintf(rest, sizeof(rest), "0x%08lx", value);
+ return rest;
+}
+
+
+const char *
+debug_dump_enum_noprefix(const struct debug_named_value *names,
+ const char *prefix,
+ unsigned long value)
+{
+ static char rest[64];
+
+ while (names->name) {
+ if (names->value == value) {
+ const char *name = names->name;
+ while (*name == *prefix) {
+ name++;
+ prefix++;
+ }
+ return name;
+ }
+ ++names;
+ }
+
+ snprintf(rest, sizeof(rest), "0x%08lx", value);
+ return rest;
+}
+
+
+const char *
+debug_dump_flags(const struct debug_named_value *names, unsigned long value)
+{
+ static char output[4096];
+ static char rest[256];
+ int first = 1;
+
+ output[0] = '\0';
+
+ while (names->name) {
+ if ((names->value & value) == names->value) {
+ if (!first)
+ strncat(output, "|", sizeof(output) - strlen(output) - 1);
+ else
+ first = 0;
+ strncat(output, names->name, sizeof(output) - strlen(output) - 1);
+ output[sizeof(output) - 1] = '\0';
+ value &= ~names->value;
+ }
+ ++names;
+ }
+
+ if (value) {
+ if (!first)
+ strncat(output, "|", sizeof(output) - strlen(output) - 1);
+ else
+ first = 0;
+
+ snprintf(rest, sizeof(rest), "0x%08lx", value);
+ strncat(output, rest, sizeof(output) - strlen(output) - 1);
+ output[sizeof(output) - 1] = '\0';
+ }
+
+ if (first)
+ return "0";
+
+ return output;
+}
+
+
+
+#ifdef DEBUG
+int fl_indent = 0;
+const char* fl_function[1024];
+
+int
+debug_funclog_enter(const char* f, UNUSED const int line,
+ UNUSED const char* file)
+{
+ int i;
+
+ for (i = 0; i < fl_indent; i++)
+ debug_printf(" ");
+ debug_printf("%s\n", f);
+
+ assert(fl_indent < 1023);
+ fl_function[fl_indent++] = f;
+
+ return 0;
+}
+
+void
+debug_funclog_exit(const char* f, UNUSED const int line,
+ UNUSED const char* file)
+{
+ --fl_indent;
+ assert(fl_indent >= 0);
+ assert(fl_function[fl_indent] == f);
+}
+
+void
+debug_funclog_enter_exit(const char* f, UNUSED const int line,
+ UNUSED const char* file)
+{
+ int i;
+ for (i = 0; i < fl_indent; i++)
+ debug_printf(" ");
+ debug_printf("%s\n", f);
+}
+#endif
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/util/u_debug.h b/third_party/rust/glslopt/glsl-optimizer/src/util/u_debug.h
new file mode 100644
index 0000000000..a9e44bf24e
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/util/u_debug.h
@@ -0,0 +1,460 @@
+/**************************************************************************
+ *
+ * Copyright 2008 VMware, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/**
+ * @file
+ * Cross-platform debugging helpers.
+ *
+ * For now it just has assert and printf replacements, but it might be extended
+ * with stack trace reports and more advanced logging in the near future.
+ *
+ * @author Jose Fonseca <jfonseca@vmware.com>
+ */
+
+#ifndef U_DEBUG_H_
+#define U_DEBUG_H_
+
+#include <stdarg.h>
+#include <string.h>
+#include "util/os_misc.h"
+#include "util/detect_os.h"
+#include "util/macros.h"
+
+#if DETECT_OS_HAIKU
+/* Haiku provides debug_printf in libroot with OS.h */
+#include <OS.h>
+#endif
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+#if defined(__GNUC__)
+#define _util_printf_format(fmt, list) __attribute__ ((format (printf, fmt, list)))
+#else
+#define _util_printf_format(fmt, list)
+#endif
+
+void _debug_vprintf(const char *format, va_list ap);
+
+
+static inline void
+_debug_printf(const char *format, ...)
+{
+ va_list ap;
+ va_start(ap, format);
+ _debug_vprintf(format, ap);
+ va_end(ap);
+}
+
+
+/**
+ * Print debug messages.
+ *
+ * The actual channel used to output debug message is platform specific. To
+ * avoid misformating or truncation, follow these rules of thumb:
+ * - output whole lines
+ * - avoid outputing large strings (512 bytes is the current maximum length
+ * that is guaranteed to be printed in all platforms)
+ */
+#if !DETECT_OS_HAIKU
+static inline void
+debug_printf(const char *format, ...) _util_printf_format(1,2);
+
+static inline void
+debug_printf(const char *format, ...)
+{
+#ifdef DEBUG
+ va_list ap;
+ va_start(ap, format);
+ _debug_vprintf(format, ap);
+ va_end(ap);
+#else
+ (void) format; /* silence warning */
+#endif
+}
+#endif
+
+
+/*
+ * ... isn't portable so we need to pass arguments in parentheses.
+ *
+ * usage:
+ * debug_printf_once(("answer: %i\n", 42));
+ */
+#define debug_printf_once(args) \
+ do { \
+ static bool once = true; \
+ if (once) { \
+ once = false; \
+ debug_printf args; \
+ } \
+ } while (0)
+
+
+#ifdef DEBUG
+#define debug_vprintf(_format, _ap) _debug_vprintf(_format, _ap)
+#else
+#define debug_vprintf(_format, _ap) ((void)0)
+#endif
+
+
+#ifdef DEBUG
+/**
+ * Dump a blob in hex to the same place that debug_printf sends its
+ * messages.
+ */
+void debug_print_blob( const char *name, const void *blob, unsigned size );
+#else
+#define debug_print_blob(_name, _blob, _size) ((void)0)
+#endif
+
+
+/**
+ * Disable interactive error message boxes.
+ *
+ * Should be called as soon as possible for effectiveness.
+ */
+void
+debug_disable_error_message_boxes(void);
+
+
+/**
+ * Hard-coded breakpoint.
+ */
+#ifdef DEBUG
+#define debug_break() os_break()
+#else /* !DEBUG */
+#define debug_break() ((void)0)
+#endif /* !DEBUG */
+
+
+long
+debug_get_num_option(const char *name, long dfault);
+
+#ifdef _MSC_VER
+__declspec(noreturn)
+#endif
+void _debug_assert_fail(const char *expr,
+ const char *file,
+ unsigned line,
+ const char *function)
+#if defined(__GNUC__) && !defined(DEBUG)
+ __attribute__((noreturn))
+#endif
+;
+
+
+/**
+ * Assert macro
+ *
+ * Do not expect that the assert call terminates -- errors must be handled
+ * regardless of assert behavior.
+ *
+ * For non debug builds the assert macro will expand to a no-op, so do not
+ * call functions with side effects in the assert expression.
+ */
+#ifndef NDEBUG
+#define debug_assert(expr) ((expr) ? (void)0 : _debug_assert_fail(#expr, __FILE__, __LINE__, __FUNCTION__))
+#else
+#define debug_assert(expr) (void)(0 && (expr))
+#endif
+
+
+/** Override standard assert macro */
+#ifdef assert
+#undef assert
+#endif
+#define assert(expr) debug_assert(expr)
+
+
+/**
+ * Output the current function name.
+ */
+#ifdef DEBUG
+#define debug_checkpoint() \
+ _debug_printf("%s\n", __FUNCTION__)
+#else
+#define debug_checkpoint() \
+ ((void)0)
+#endif
+
+
+/**
+ * Output the full source code position.
+ */
+#ifdef DEBUG
+#define debug_checkpoint_full() \
+ _debug_printf("%s:%u:%s\n", __FILE__, __LINE__, __FUNCTION__)
+#else
+#define debug_checkpoint_full() \
+ ((void)0)
+#endif
+
+
+/**
+ * Output a warning message. Muted on release version.
+ */
+#ifdef DEBUG
+#define debug_warning(__msg) \
+ _debug_printf("%s:%u:%s: warning: %s\n", __FILE__, __LINE__, __FUNCTION__, __msg)
+#else
+#define debug_warning(__msg) \
+ ((void)0)
+#endif
+
+
+/**
+ * Emit a warning message, but only once.
+ */
+#ifdef DEBUG
+#define debug_warn_once(__msg) \
+ do { \
+ static bool warned = false; \
+ if (!warned) { \
+ _debug_printf("%s:%u:%s: one time warning: %s\n", \
+ __FILE__, __LINE__, __FUNCTION__, __msg); \
+ warned = true; \
+ } \
+ } while (0)
+#else
+#define debug_warn_once(__msg) \
+ ((void)0)
+#endif
+
+
+/**
+ * Output an error message. Not muted on release version.
+ */
+#ifdef DEBUG
+#define debug_error(__msg) \
+ _debug_printf("%s:%u:%s: error: %s\n", __FILE__, __LINE__, __FUNCTION__, __msg)
+#else
+#define debug_error(__msg) \
+ _debug_printf("error: %s\n", __msg)
+#endif
+
+/**
+ * Output a debug log message to the debug info callback.
+ */
+#define pipe_debug_message(cb, type, fmt, ...) do { \
+ static unsigned id = 0; \
+ if ((cb) && (cb)->debug_message) { \
+ _pipe_debug_message(cb, &id, \
+ PIPE_DEBUG_TYPE_ ## type, \
+ fmt, ##__VA_ARGS__); \
+ } \
+} while (0)
+
+struct pipe_debug_callback;
+
+void
+_pipe_debug_message(
+ struct pipe_debug_callback *cb,
+ unsigned *id,
+ enum pipe_debug_type type,
+ const char *fmt, ...) _util_printf_format(4, 5);
+
+
+/**
+ * Used by debug_dump_enum and debug_dump_flags to describe symbols.
+ */
+struct debug_named_value
+{
+ const char *name;
+ uint64_t value;
+ const char *desc;
+};
+
+
+/**
+ * Some C pre-processor magic to simplify creating named values.
+ *
+ * Example:
+ * @code
+ * static const debug_named_value my_names[] = {
+ * DEBUG_NAMED_VALUE(MY_ENUM_VALUE_X),
+ * DEBUG_NAMED_VALUE(MY_ENUM_VALUE_Y),
+ * DEBUG_NAMED_VALUE(MY_ENUM_VALUE_Z),
+ * DEBUG_NAMED_VALUE_END
+ * };
+ *
+ * ...
+ * debug_printf("%s = %s\n",
+ * name,
+ * debug_dump_enum(my_names, my_value));
+ * ...
+ * @endcode
+ */
+#define DEBUG_NAMED_VALUE(__symbol) {#__symbol, (unsigned long)__symbol, NULL}
+#define DEBUG_NAMED_VALUE_WITH_DESCRIPTION(__symbol, __desc) {#__symbol, (unsigned long)__symbol, __desc}
+#define DEBUG_NAMED_VALUE_END {NULL, 0, NULL}
+
+
+/**
+ * Convert a enum value to a string.
+ */
+const char *
+debug_dump_enum(const struct debug_named_value *names,
+ unsigned long value);
+
+const char *
+debug_dump_enum_noprefix(const struct debug_named_value *names,
+ const char *prefix,
+ unsigned long value);
+
+
+/**
+ * Convert binary flags value to a string.
+ */
+const char *
+debug_dump_flags(const struct debug_named_value *names,
+ unsigned long value);
+
+
+/**
+ * Function enter exit loggers
+ */
+#ifdef DEBUG
+int debug_funclog_enter(const char* f, const int line, const char* file);
+void debug_funclog_exit(const char* f, const int line, const char* file);
+void debug_funclog_enter_exit(const char* f, const int line, const char* file);
+
+#define DEBUG_FUNCLOG_ENTER() \
+ int __debug_decleration_work_around = \
+ debug_funclog_enter(__FUNCTION__, __LINE__, __FILE__)
+#define DEBUG_FUNCLOG_EXIT() \
+ do { \
+ (void)__debug_decleration_work_around; \
+ debug_funclog_exit(__FUNCTION__, __LINE__, __FILE__); \
+ return; \
+ } while(0)
+#define DEBUG_FUNCLOG_EXIT_RET(ret) \
+ do { \
+ (void)__debug_decleration_work_around; \
+ debug_funclog_exit(__FUNCTION__, __LINE__, __FILE__); \
+ return ret; \
+ } while(0)
+#define DEBUG_FUNCLOG_ENTER_EXIT() \
+ debug_funclog_enter_exit(__FUNCTION__, __LINE__, __FILE__)
+
+#else
+#define DEBUG_FUNCLOG_ENTER() \
+ int __debug_decleration_work_around
+#define DEBUG_FUNCLOG_EXIT() \
+ do { (void)__debug_decleration_work_around; return; } while(0)
+#define DEBUG_FUNCLOG_EXIT_RET(ret) \
+ do { (void)__debug_decleration_work_around; return ret; } while(0)
+#define DEBUG_FUNCLOG_ENTER_EXIT()
+#endif
+
+
+/**
+ * Get option.
+ *
+ * It is an alias for getenv on Linux.
+ *
+ * On Windows it reads C:\gallium.cfg, which is a text file with CR+LF line
+ * endings with one option per line as
+ *
+ * NAME=value
+ *
+ * This file must be terminated with an extra empty line.
+ */
+const char *
+debug_get_option(const char *name, const char *dfault);
+
+bool
+debug_get_bool_option(const char *name, bool dfault);
+
+long
+debug_get_num_option(const char *name, long dfault);
+
+uint64_t
+debug_get_flags_option(const char *name,
+ const struct debug_named_value *flags,
+ uint64_t dfault);
+
+#define DEBUG_GET_ONCE_OPTION(suffix, name, dfault) \
+static const char * \
+debug_get_option_ ## suffix (void) \
+{ \
+ static bool first = true; \
+ static const char * value; \
+ if (first) { \
+ first = false; \
+ value = debug_get_option(name, dfault); \
+ } \
+ return value; \
+}
+
+#define DEBUG_GET_ONCE_BOOL_OPTION(sufix, name, dfault) \
+static bool \
+debug_get_option_ ## sufix (void) \
+{ \
+ static bool first = true; \
+ static bool value; \
+ if (first) { \
+ first = false; \
+ value = debug_get_bool_option(name, dfault); \
+ } \
+ return value; \
+}
+
+#define DEBUG_GET_ONCE_NUM_OPTION(sufix, name, dfault) \
+static long \
+debug_get_option_ ## sufix (void) \
+{ \
+ static bool first = true; \
+ static long value; \
+ if (first) { \
+ first = false; \
+ value = debug_get_num_option(name, dfault); \
+ } \
+ return value; \
+}
+
+#define DEBUG_GET_ONCE_FLAGS_OPTION(sufix, name, flags, dfault) \
+static unsigned long \
+debug_get_option_ ## sufix (void) \
+{ \
+ static bool first = true; \
+ static unsigned long value; \
+ if (first) { \
+ first = false; \
+ value = debug_get_flags_option(name, flags, dfault); \
+ } \
+ return value; \
+}
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* U_DEBUG_H_ */
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/util/u_dynarray.h b/third_party/rust/glslopt/glsl-optimizer/src/util/u_dynarray.h
new file mode 100644
index 0000000000..000feaa834
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/util/u_dynarray.h
@@ -0,0 +1,214 @@
+/**************************************************************************
+ *
+ * Copyright 2010 Luca Barbieri
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifndef U_DYNARRAY_H
+#define U_DYNARRAY_H
+
+#include <stdlib.h>
+#include <string.h>
+#include <limits.h>
+#include "ralloc.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* A zero-initialized version of this is guaranteed to represent an
+ * empty array.
+ *
+ * Also, size <= capacity and data != 0 if and only if capacity != 0
+ * capacity will always be the allocation size of data
+ */
+struct util_dynarray
+{
+ void *mem_ctx;
+ void *data;
+ unsigned size;
+ unsigned capacity;
+};
+
+static inline void
+util_dynarray_init(struct util_dynarray *buf, void *mem_ctx)
+{
+ memset(buf, 0, sizeof(*buf));
+ buf->mem_ctx = mem_ctx;
+}
+
+static inline void
+util_dynarray_fini(struct util_dynarray *buf)
+{
+ if (buf->data) {
+ if (buf->mem_ctx) {
+ ralloc_free(buf->data);
+ } else {
+ free(buf->data);
+ }
+ util_dynarray_init(buf, buf->mem_ctx);
+ }
+}
+
+static inline void
+util_dynarray_clear(struct util_dynarray *buf)
+{
+ buf->size = 0;
+}
+
+#define DYN_ARRAY_INITIAL_SIZE 64
+
+MUST_CHECK static inline void *
+util_dynarray_ensure_cap(struct util_dynarray *buf, unsigned newcap)
+{
+ if (newcap > buf->capacity) {
+ unsigned capacity = MAX3(DYN_ARRAY_INITIAL_SIZE, buf->capacity * 2, newcap);
+ void *data;
+
+ if (buf->mem_ctx) {
+ data = reralloc_size(buf->mem_ctx, buf->data, capacity);
+ } else {
+ data = realloc(buf->data, capacity);
+ }
+ if (!data)
+ return 0;
+
+ buf->data = data;
+ buf->capacity = capacity;
+ }
+
+ return (void *)((char *)buf->data + buf->size);
+}
+
+/* use util_dynarray_trim to reduce the allocated storage */
+MUST_CHECK static inline void *
+util_dynarray_resize_bytes(struct util_dynarray *buf, unsigned nelts, size_t eltsize)
+{
+ if (unlikely(nelts > UINT_MAX / eltsize))
+ return 0;
+
+ unsigned newsize = nelts * eltsize;
+ void *p = util_dynarray_ensure_cap(buf, newsize);
+ if (!p)
+ return 0;
+
+ buf->size = newsize;
+
+ return p;
+}
+
+static inline void
+util_dynarray_clone(struct util_dynarray *buf, void *mem_ctx,
+ struct util_dynarray *from_buf)
+{
+ util_dynarray_init(buf, mem_ctx);
+ if (util_dynarray_resize_bytes(buf, from_buf->size, 1))
+ memcpy(buf->data, from_buf->data, from_buf->size);
+}
+
+MUST_CHECK static inline void *
+util_dynarray_grow_bytes(struct util_dynarray *buf, unsigned ngrow, size_t eltsize)
+{
+ unsigned growbytes = ngrow * eltsize;
+
+ if (unlikely(ngrow > (UINT_MAX / eltsize) ||
+ growbytes > UINT_MAX - buf->size))
+ return 0;
+
+ unsigned newsize = buf->size + growbytes;
+ void *p = util_dynarray_ensure_cap(buf, newsize);
+ if (!p)
+ return 0;
+
+ buf->size = newsize;
+
+ return p;
+}
+
+static inline void
+util_dynarray_trim(struct util_dynarray *buf)
+{
+ if (buf->size != buf->capacity) {
+ if (buf->size) {
+ if (buf->mem_ctx) {
+ buf->data = reralloc_size(buf->mem_ctx, buf->data, buf->size);
+ } else {
+ buf->data = realloc(buf->data, buf->size);
+ }
+ buf->capacity = buf->size;
+ } else {
+ if (buf->mem_ctx) {
+ ralloc_free(buf->data);
+ } else {
+ free(buf->data);
+ }
+ buf->data = NULL;
+ buf->capacity = 0;
+ }
+ }
+}
+
+#define util_dynarray_append(buf, type, v) do {type __v = (v); memcpy(util_dynarray_grow_bytes((buf), 1, sizeof(type)), &__v, sizeof(type));} while(0)
+/* Returns a pointer to the space of the first new element (in case of growth) or NULL on failure. */
+#define util_dynarray_resize(buf, type, nelts) util_dynarray_resize_bytes(buf, (nelts), sizeof(type))
+#define util_dynarray_grow(buf, type, ngrow) util_dynarray_grow_bytes(buf, (ngrow), sizeof(type))
+#define util_dynarray_top_ptr(buf, type) (type*)((char*)(buf)->data + (buf)->size - sizeof(type))
+#define util_dynarray_top(buf, type) *util_dynarray_top_ptr(buf, type)
+#define util_dynarray_pop_ptr(buf, type) (type*)((char*)(buf)->data + ((buf)->size -= sizeof(type)))
+#define util_dynarray_pop(buf, type) *util_dynarray_pop_ptr(buf, type)
+#define util_dynarray_contains(buf, type) ((buf)->size >= sizeof(type))
+#define util_dynarray_element(buf, type, idx) ((type*)(buf)->data + (idx))
+#define util_dynarray_begin(buf) ((buf)->data)
+#define util_dynarray_end(buf) ((void*)util_dynarray_element((buf), char, (buf)->size))
+#define util_dynarray_num_elements(buf, type) ((buf)->size / sizeof(type))
+
+#define util_dynarray_foreach(buf, type, elem) \
+ for (type *elem = (type *)(buf)->data; \
+ elem < (type *)((char *)(buf)->data + (buf)->size); elem++)
+
+#define util_dynarray_foreach_reverse(buf, type, elem) \
+ if ((buf)->size > 0) \
+ for (type *elem = util_dynarray_top_ptr(buf, type); \
+ elem; \
+ elem = elem > (type *)(buf)->data ? elem - 1 : NULL)
+
+#define util_dynarray_delete_unordered(buf, type, v) \
+ do { \
+ unsigned num_elements = (buf)->size / sizeof(type); \
+ unsigned i; \
+ for (i = 0; i < num_elements; i++) { \
+ type __v = *util_dynarray_element((buf), type, (i)); \
+ if (v == __v) { \
+ memcpy(util_dynarray_element((buf), type, (i)), \
+ util_dynarray_pop_ptr((buf), type), sizeof(type)); \
+ break; \
+ } \
+ } \
+ } while (0)
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* U_DYNARRAY_H */
+
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/util/u_endian.h b/third_party/rust/glslopt/glsl-optimizer/src/util/u_endian.h
new file mode 100644
index 0000000000..6bbae3c444
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/util/u_endian.h
@@ -0,0 +1,89 @@
+/**************************************************************************
+ *
+ * Copyright 2007-2008 VMware, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+#ifndef U_ENDIAN_H
+#define U_ENDIAN_H
+
+#ifdef HAVE_ENDIAN_H
+#include <endian.h>
+
+#if __BYTE_ORDER == __LITTLE_ENDIAN
+# define UTIL_ARCH_LITTLE_ENDIAN 1
+# define UTIL_ARCH_BIG_ENDIAN 0
+#elif __BYTE_ORDER == __BIG_ENDIAN
+# define UTIL_ARCH_LITTLE_ENDIAN 0
+# define UTIL_ARCH_BIG_ENDIAN 1
+#endif
+
+#elif defined(__APPLE__)
+#include <machine/endian.h>
+
+#if __DARWIN_BYTE_ORDER == __DARWIN_LITTLE_ENDIAN
+# define UTIL_ARCH_LITTLE_ENDIAN 1
+# define UTIL_ARCH_BIG_ENDIAN 0
+#elif __DARWIN_BYTE_ORDER == __DARWIN_BIG_ENDIAN
+# define UTIL_ARCH_LITTLE_ENDIAN 0
+# define UTIL_ARCH_BIG_ENDIAN 1
+#endif
+
+#elif defined(__sun)
+#include <sys/isa_defs.h>
+
+#if defined(_LITTLE_ENDIAN)
+# define UTIL_ARCH_LITTLE_ENDIAN 1
+# define UTIL_ARCH_BIG_ENDIAN 0
+#elif defined(_BIG_ENDIAN)
+# define UTIL_ARCH_LITTLE_ENDIAN 0
+# define UTIL_ARCH_BIG_ENDIAN 1
+#endif
+
+#elif defined(__OpenBSD__) || defined(__NetBSD__) || \
+ defined(__FreeBSD__) || defined(__DragonFly__)
+#include <sys/types.h>
+#include <machine/endian.h>
+
+#if _BYTE_ORDER == _LITTLE_ENDIAN
+# define UTIL_ARCH_LITTLE_ENDIAN 1
+# define UTIL_ARCH_BIG_ENDIAN 0
+#elif _BYTE_ORDER == _BIG_ENDIAN
+# define UTIL_ARCH_LITTLE_ENDIAN 0
+# define UTIL_ARCH_BIG_ENDIAN 1
+#endif
+
+#elif defined(_WIN32) || defined(ANDROID)
+
+#define UTIL_ARCH_LITTLE_ENDIAN 1
+#define UTIL_ARCH_BIG_ENDIAN 0
+
+#endif
+
+#if !defined(UTIL_ARCH_LITTLE_ENDIAN) || !defined(UTIL_ARCH_BIG_ENDIAN)
+# error "UTIL_ARCH_LITTLE_ENDIAN and/or UTIL_ARCH_BIG_ENDIAN were unset."
+#elif UTIL_ARCH_LITTLE_ENDIAN == UTIL_ARCH_BIG_ENDIAN
+# error "UTIL_ARCH_LITTLE_ENDIAN and UTIL_ARCH_BIG_ENDIAN must not both be 1 or 0."
+#endif
+
+#endif
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/util/u_math.c b/third_party/rust/glslopt/glsl-optimizer/src/util/u_math.c
new file mode 100644
index 0000000000..9a8a9ecbbd
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/util/u_math.c
@@ -0,0 +1,139 @@
+/**************************************************************************
+ *
+ * Copyright 2008 VMware, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+
+
+#include "pipe/p_config.h"
+#include "util/u_math.h"
+#include "util/u_cpu_detect.h"
+
+#if defined(PIPE_ARCH_SSE)
+#include <xmmintrin.h>
+/* This is defined in pmmintrin.h, but it can only be included when -msse3 is
+ * used, so just define it here to avoid further. */
+#ifndef _MM_DENORMALS_ZERO_MASK
+#define _MM_DENORMALS_ZERO_MASK 0x0040
+#endif
+#endif
+
+
+/** 2^x, for x in [-1.0, 1.0) */
+float pow2_table[POW2_TABLE_SIZE];
+
+
+static void
+init_pow2_table(void)
+{
+ int i;
+ for (i = 0; i < POW2_TABLE_SIZE; i++)
+ pow2_table[i] = exp2f((i - POW2_TABLE_OFFSET) / POW2_TABLE_SCALE);
+}
+
+
+/** log2(x), for x in [1.0, 2.0) */
+float log2_table[LOG2_TABLE_SIZE];
+
+
+static void
+init_log2_table(void)
+{
+ unsigned i;
+ for (i = 0; i < LOG2_TABLE_SIZE; i++)
+ log2_table[i] = (float) log2(1.0 + i * (1.0 / LOG2_TABLE_SCALE));
+}
+
+
+/**
+ * One time init for math utilities.
+ */
+void
+util_init_math(void)
+{
+ static bool initialized = false;
+ if (!initialized) {
+ init_pow2_table();
+ init_log2_table();
+ initialized = true;
+ }
+}
+
+/**
+ * Fetches the contents of the fpstate (mxcsr on x86) register.
+ *
+ * On platforms without support for it just returns 0.
+ */
+unsigned
+util_fpstate_get(void)
+{
+ unsigned mxcsr = 0;
+
+#if defined(PIPE_ARCH_SSE)
+ if (util_cpu_caps.has_sse) {
+ mxcsr = _mm_getcsr();
+ }
+#endif
+
+ return mxcsr;
+}
+
+/**
+ * Make sure that the fp treats the denormalized floating
+ * point numbers as zero.
+ *
+ * This is the behavior required by D3D10. OpenGL doesn't care.
+ */
+unsigned
+util_fpstate_set_denorms_to_zero(unsigned current_mxcsr)
+{
+#if defined(PIPE_ARCH_SSE)
+ if (util_cpu_caps.has_sse) {
+ /* Enable flush to zero mode */
+ current_mxcsr |= _MM_FLUSH_ZERO_MASK;
+ if (util_cpu_caps.has_daz) {
+ /* Enable denormals are zero mode */
+ current_mxcsr |= _MM_DENORMALS_ZERO_MASK;
+ }
+ util_fpstate_set(current_mxcsr);
+ }
+#endif
+ return current_mxcsr;
+}
+
+/**
+ * Set the state of the fpstate (mxcsr on x86) register.
+ *
+ * On platforms without support for it's a noop.
+ */
+void
+util_fpstate_set(unsigned mxcsr)
+{
+#if defined(PIPE_ARCH_SSE)
+ if (util_cpu_caps.has_sse) {
+ _mm_setcsr(mxcsr);
+ }
+#endif
+}
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/util/u_math.h b/third_party/rust/glslopt/glsl-optimizer/src/util/u_math.h
new file mode 100644
index 0000000000..59266c1692
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/util/u_math.h
@@ -0,0 +1,828 @@
+/**************************************************************************
+ *
+ * Copyright 2008 VMware, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+
+/**
+ * Math utilities and approximations for common math functions.
+ * Reduced precision is usually acceptable in shaders...
+ *
+ * "fast" is used in the names of functions which are low-precision,
+ * or at least lower-precision than the normal C lib functions.
+ */
+
+
+#ifndef U_MATH_H
+#define U_MATH_H
+
+
+#include "c99_math.h"
+#include <assert.h>
+#include <float.h>
+#include <stdarg.h>
+
+#include "bitscan.h"
+#include "u_endian.h" /* for UTIL_ARCH_BIG_ENDIAN */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+#ifndef M_SQRT2
+#define M_SQRT2 1.41421356237309504880
+#endif
+
+#define POW2_TABLE_SIZE_LOG2 9
+#define POW2_TABLE_SIZE (1 << POW2_TABLE_SIZE_LOG2)
+#define POW2_TABLE_OFFSET (POW2_TABLE_SIZE/2)
+#define POW2_TABLE_SCALE ((float)(POW2_TABLE_SIZE/2))
+extern float pow2_table[POW2_TABLE_SIZE];
+
+
+/**
+ * Initialize math module. This should be called before using any
+ * other functions in this module.
+ */
+extern void
+util_init_math(void);
+
+
+union fi {
+ float f;
+ int32_t i;
+ uint32_t ui;
+};
+
+
+union di {
+ double d;
+ int64_t i;
+ uint64_t ui;
+};
+
+
+/**
+ * Extract the IEEE float32 exponent.
+ */
+static inline signed
+util_get_float32_exponent(float x)
+{
+ union fi f;
+
+ f.f = x;
+
+ return ((f.ui >> 23) & 0xff) - 127;
+}
+
+
+/**
+ * Fast version of 2^x
+ * Identity: exp2(a + b) = exp2(a) * exp2(b)
+ * Let ipart = int(x)
+ * Let fpart = x - ipart;
+ * So, exp2(x) = exp2(ipart) * exp2(fpart)
+ * Compute exp2(ipart) with i << ipart
+ * Compute exp2(fpart) with lookup table.
+ */
+static inline float
+util_fast_exp2(float x)
+{
+ int32_t ipart;
+ float fpart, mpart;
+ union fi epart;
+
+ if(x > 129.00000f)
+ return 3.402823466e+38f;
+
+ if (x < -126.99999f)
+ return 0.0f;
+
+ ipart = (int32_t) x;
+ fpart = x - (float) ipart;
+
+ /* same as
+ * epart.f = (float) (1 << ipart)
+ * but faster and without integer overflow for ipart > 31
+ */
+ epart.i = (ipart + 127 ) << 23;
+
+ mpart = pow2_table[POW2_TABLE_OFFSET + (int)(fpart * POW2_TABLE_SCALE)];
+
+ return epart.f * mpart;
+}
+
+
+/**
+ * Fast approximation to exp(x).
+ */
+static inline float
+util_fast_exp(float x)
+{
+ const float k = 1.44269f; /* = log2(e) */
+ return util_fast_exp2(k * x);
+}
+
+
+#define LOG2_TABLE_SIZE_LOG2 16
+#define LOG2_TABLE_SCALE (1 << LOG2_TABLE_SIZE_LOG2)
+#define LOG2_TABLE_SIZE (LOG2_TABLE_SCALE + 1)
+extern float log2_table[LOG2_TABLE_SIZE];
+
+
+/**
+ * Fast approximation to log2(x).
+ */
+static inline float
+util_fast_log2(float x)
+{
+ union fi num;
+ float epart, mpart;
+ num.f = x;
+ epart = (float)(((num.i & 0x7f800000) >> 23) - 127);
+ /* mpart = log2_table[mantissa*LOG2_TABLE_SCALE + 0.5] */
+ mpart = log2_table[((num.i & 0x007fffff) + (1 << (22 - LOG2_TABLE_SIZE_LOG2))) >> (23 - LOG2_TABLE_SIZE_LOG2)];
+ return epart + mpart;
+}
+
+
+/**
+ * Fast approximation to x^y.
+ */
+static inline float
+util_fast_pow(float x, float y)
+{
+ return util_fast_exp2(util_fast_log2(x) * y);
+}
+
+
+/**
+ * Floor(x), returned as int.
+ */
+static inline int
+util_ifloor(float f)
+{
+#if defined(USE_X86_ASM) && defined(__GNUC__) && defined(__i386__)
+ /*
+ * IEEE floor for computers that round to nearest or even.
+ * 'f' must be between -4194304 and 4194303.
+ * This floor operation is done by "(iround(f + .5) + iround(f - .5)) >> 1",
+ * but uses some IEEE specific tricks for better speed.
+ * Contributed by Josh Vanderhoof
+ */
+ int ai, bi;
+ double af, bf;
+ af = (3 << 22) + 0.5 + (double)f;
+ bf = (3 << 22) + 0.5 - (double)f;
+ /* GCC generates an extra fstp/fld without this. */
+ __asm__ ("fstps %0" : "=m" (ai) : "t" (af) : "st");
+ __asm__ ("fstps %0" : "=m" (bi) : "t" (bf) : "st");
+ return (ai - bi) >> 1;
+#else
+ int ai, bi;
+ double af, bf;
+ union fi u;
+ af = (3 << 22) + 0.5 + (double) f;
+ bf = (3 << 22) + 0.5 - (double) f;
+ u.f = (float) af; ai = u.i;
+ u.f = (float) bf; bi = u.i;
+ return (ai - bi) >> 1;
+#endif
+}
+
+
+/**
+ * Round float to nearest int.
+ */
+static inline int
+util_iround(float f)
+{
+#if defined(PIPE_CC_GCC) && defined(PIPE_ARCH_X86)
+ int r;
+ __asm__ ("fistpl %0" : "=m" (r) : "t" (f) : "st");
+ return r;
+#elif defined(PIPE_CC_MSVC) && defined(PIPE_ARCH_X86)
+ int r;
+ _asm {
+ fld f
+ fistp r
+ }
+ return r;
+#else
+ if (f >= 0.0f)
+ return (int) (f + 0.5f);
+ else
+ return (int) (f - 0.5f);
+#endif
+}
+
+
+/**
+ * Approximate floating point comparison
+ */
+static inline bool
+util_is_approx(float a, float b, float tol)
+{
+ return fabsf(b - a) <= tol;
+}
+
+
+/**
+ * util_is_X_inf_or_nan = test if x is NaN or +/- Inf
+ * util_is_X_nan = test if x is NaN
+ * util_X_inf_sign = return +1 for +Inf, -1 for -Inf, or 0 for not Inf
+ *
+ * NaN can be checked with x != x, however this fails with the fast math flag
+ **/
+
+
+/**
+ * Single-float
+ */
+static inline bool
+util_is_inf_or_nan(float x)
+{
+ union fi tmp;
+ tmp.f = x;
+ return (tmp.ui & 0x7f800000) == 0x7f800000;
+}
+
+
+static inline bool
+util_is_nan(float x)
+{
+ union fi tmp;
+ tmp.f = x;
+ return (tmp.ui & 0x7fffffff) > 0x7f800000;
+}
+
+
+static inline int
+util_inf_sign(float x)
+{
+ union fi tmp;
+ tmp.f = x;
+ if ((tmp.ui & 0x7fffffff) != 0x7f800000) {
+ return 0;
+ }
+
+ return (x < 0) ? -1 : 1;
+}
+
+
+/**
+ * Double-float
+ */
+static inline bool
+util_is_double_inf_or_nan(double x)
+{
+ union di tmp;
+ tmp.d = x;
+ return (tmp.ui & 0x7ff0000000000000ULL) == 0x7ff0000000000000ULL;
+}
+
+
+static inline bool
+util_is_double_nan(double x)
+{
+ union di tmp;
+ tmp.d = x;
+ return (tmp.ui & 0x7fffffffffffffffULL) > 0x7ff0000000000000ULL;
+}
+
+
+static inline int
+util_double_inf_sign(double x)
+{
+ union di tmp;
+ tmp.d = x;
+ if ((tmp.ui & 0x7fffffffffffffffULL) != 0x7ff0000000000000ULL) {
+ return 0;
+ }
+
+ return (x < 0) ? -1 : 1;
+}
+
+
+/**
+ * Half-float
+ */
+static inline bool
+util_is_half_inf_or_nan(int16_t x)
+{
+ return (x & 0x7c00) == 0x7c00;
+}
+
+
+static inline bool
+util_is_half_nan(int16_t x)
+{
+ return (x & 0x7fff) > 0x7c00;
+}
+
+
+static inline int
+util_half_inf_sign(int16_t x)
+{
+ if ((x & 0x7fff) != 0x7c00) {
+ return 0;
+ }
+
+ return (x < 0) ? -1 : 1;
+}
+
+
+/**
+ * Return float bits.
+ */
+static inline unsigned
+fui( float f )
+{
+ union fi fi;
+ fi.f = f;
+ return fi.ui;
+}
+
+static inline float
+uif(uint32_t ui)
+{
+ union fi fi;
+ fi.ui = ui;
+ return fi.f;
+}
+
+
+/**
+ * Convert uint8_t to float in [0, 1].
+ */
+static inline float
+ubyte_to_float(uint8_t ub)
+{
+ return (float) ub * (1.0f / 255.0f);
+}
+
+
+/**
+ * Convert float in [0,1] to uint8_t in [0,255] with clamping.
+ */
+static inline uint8_t
+float_to_ubyte(float f)
+{
+ /* return 0 for NaN too */
+ if (!(f > 0.0f)) {
+ return (uint8_t) 0;
+ }
+ else if (f >= 1.0f) {
+ return (uint8_t) 255;
+ }
+ else {
+ union fi tmp;
+ tmp.f = f;
+ tmp.f = tmp.f * (255.0f/256.0f) + 32768.0f;
+ return (uint8_t) tmp.i;
+ }
+}
+
+/**
+ * Convert uint16_t to float in [0, 1].
+ */
+static inline float
+ushort_to_float(uint16_t us)
+{
+ return (float) us * (1.0f / 65535.0f);
+}
+
+
+/**
+ * Convert float in [0,1] to uint16_t in [0,65535] with clamping.
+ */
+static inline uint16_t
+float_to_ushort(float f)
+{
+ /* return 0 for NaN too */
+ if (!(f > 0.0f)) {
+ return (uint16_t) 0;
+ }
+ else if (f >= 1.0f) {
+ return (uint16_t) 65535;
+ }
+ else {
+ union fi tmp;
+ tmp.f = f;
+ tmp.f = tmp.f * (65535.0f/65536.0f) + 128.0f;
+ return (uint16_t) tmp.i;
+ }
+}
+
+static inline float
+byte_to_float_tex(int8_t b)
+{
+ return (b == -128) ? -1.0F : b * 1.0F / 127.0F;
+}
+
+static inline int8_t
+float_to_byte_tex(float f)
+{
+ return (int8_t) (127.0F * f);
+}
+
+/**
+ * Calc log base 2
+ */
+static inline unsigned
+util_logbase2(unsigned n)
+{
+#if defined(HAVE___BUILTIN_CLZ)
+ return ((sizeof(unsigned) * 8 - 1) - __builtin_clz(n | 1));
+#else
+ unsigned pos = 0;
+ if (n >= 1<<16) { n >>= 16; pos += 16; }
+ if (n >= 1<< 8) { n >>= 8; pos += 8; }
+ if (n >= 1<< 4) { n >>= 4; pos += 4; }
+ if (n >= 1<< 2) { n >>= 2; pos += 2; }
+ if (n >= 1<< 1) { pos += 1; }
+ return pos;
+#endif
+}
+
+static inline uint64_t
+util_logbase2_64(uint64_t n)
+{
+#if defined(HAVE___BUILTIN_CLZLL)
+ return ((sizeof(uint64_t) * 8 - 1) - __builtin_clzll(n | 1));
+#else
+ uint64_t pos = 0ull;
+ if (n >= 1ull<<32) { n >>= 32; pos += 32; }
+ if (n >= 1ull<<16) { n >>= 16; pos += 16; }
+ if (n >= 1ull<< 8) { n >>= 8; pos += 8; }
+ if (n >= 1ull<< 4) { n >>= 4; pos += 4; }
+ if (n >= 1ull<< 2) { n >>= 2; pos += 2; }
+ if (n >= 1ull<< 1) { pos += 1; }
+ return pos;
+#endif
+}
+
+/**
+ * Returns the ceiling of log n base 2, and 0 when n == 0. Equivalently,
+ * returns the smallest x such that n <= 2**x.
+ */
+static inline unsigned
+util_logbase2_ceil(unsigned n)
+{
+ if (n <= 1)
+ return 0;
+
+ return 1 + util_logbase2(n - 1);
+}
+
+static inline uint64_t
+util_logbase2_ceil64(uint64_t n)
+{
+ if (n <= 1)
+ return 0;
+
+ return 1ull + util_logbase2_64(n - 1);
+}
+
+/**
+ * Returns the smallest power of two >= x
+ */
+static inline unsigned
+util_next_power_of_two(unsigned x)
+{
+#if defined(HAVE___BUILTIN_CLZ)
+ if (x <= 1)
+ return 1;
+
+ return (1 << ((sizeof(unsigned) * 8) - __builtin_clz(x - 1)));
+#else
+ unsigned val = x;
+
+ if (x <= 1)
+ return 1;
+
+ if (util_is_power_of_two_or_zero(x))
+ return x;
+
+ val--;
+ val = (val >> 1) | val;
+ val = (val >> 2) | val;
+ val = (val >> 4) | val;
+ val = (val >> 8) | val;
+ val = (val >> 16) | val;
+ val++;
+ return val;
+#endif
+}
+
+static inline uint64_t
+util_next_power_of_two64(uint64_t x)
+{
+#if defined(HAVE___BUILTIN_CLZLL)
+ if (x <= 1)
+ return 1;
+
+ return (1ull << ((sizeof(uint64_t) * 8) - __builtin_clzll(x - 1)));
+#else
+ uint64_t val = x;
+
+ if (x <= 1)
+ return 1;
+
+ if (util_is_power_of_two_or_zero64(x))
+ return x;
+
+ val--;
+ val = (val >> 1) | val;
+ val = (val >> 2) | val;
+ val = (val >> 4) | val;
+ val = (val >> 8) | val;
+ val = (val >> 16) | val;
+ val = (val >> 32) | val;
+ val++;
+ return val;
+#endif
+}
+
+/**
+ * Reverse bits in n
+ * Algorithm taken from:
+ * http://stackoverflow.com/questions/9144800/c-reverse-bits-in-unsigned-integer
+ */
+static inline unsigned
+util_bitreverse(unsigned n)
+{
+ n = ((n >> 1) & 0x55555555u) | ((n & 0x55555555u) << 1);
+ n = ((n >> 2) & 0x33333333u) | ((n & 0x33333333u) << 2);
+ n = ((n >> 4) & 0x0f0f0f0fu) | ((n & 0x0f0f0f0fu) << 4);
+ n = ((n >> 8) & 0x00ff00ffu) | ((n & 0x00ff00ffu) << 8);
+ n = ((n >> 16) & 0xffffu) | ((n & 0xffffu) << 16);
+ return n;
+}
+
+/**
+ * Convert from little endian to CPU byte order.
+ */
+
+#if UTIL_ARCH_BIG_ENDIAN
+#define util_le64_to_cpu(x) util_bswap64(x)
+#define util_le32_to_cpu(x) util_bswap32(x)
+#define util_le16_to_cpu(x) util_bswap16(x)
+#else
+#define util_le64_to_cpu(x) (x)
+#define util_le32_to_cpu(x) (x)
+#define util_le16_to_cpu(x) (x)
+#endif
+
+#define util_cpu_to_le64(x) util_le64_to_cpu(x)
+#define util_cpu_to_le32(x) util_le32_to_cpu(x)
+#define util_cpu_to_le16(x) util_le16_to_cpu(x)
+
+/**
+ * Reverse byte order of a 32 bit word.
+ */
+static inline uint32_t
+util_bswap32(uint32_t n)
+{
+#if defined(HAVE___BUILTIN_BSWAP32)
+ return __builtin_bswap32(n);
+#else
+ return (n >> 24) |
+ ((n >> 8) & 0x0000ff00) |
+ ((n << 8) & 0x00ff0000) |
+ (n << 24);
+#endif
+}
+
+/**
+ * Reverse byte order of a 64bit word.
+ */
+static inline uint64_t
+util_bswap64(uint64_t n)
+{
+#if defined(HAVE___BUILTIN_BSWAP64)
+ return __builtin_bswap64(n);
+#else
+ return ((uint64_t)util_bswap32((uint32_t)n) << 32) |
+ util_bswap32((n >> 32));
+#endif
+}
+
+
+/**
+ * Reverse byte order of a 16 bit word.
+ */
+static inline uint16_t
+util_bswap16(uint16_t n)
+{
+ return (n >> 8) |
+ (n << 8);
+}
+
+static inline void*
+util_memcpy_cpu_to_le32(void * restrict dest, const void * restrict src, size_t n)
+{
+#if UTIL_ARCH_BIG_ENDIAN
+ size_t i, e;
+ assert(n % 4 == 0);
+
+ for (i = 0, e = n / 4; i < e; i++) {
+ uint32_t * restrict d = (uint32_t* restrict)dest;
+ const uint32_t * restrict s = (const uint32_t* restrict)src;
+ d[i] = util_bswap32(s[i]);
+ }
+ return dest;
+#else
+ return memcpy(dest, src, n);
+#endif
+}
+
+/**
+ * Clamp X to [MIN, MAX].
+ * This is a macro to allow float, int, uint, etc. types.
+ * We arbitrarily turn NaN into MIN.
+ */
+#define CLAMP( X, MIN, MAX ) ( (X)>(MIN) ? ((X)>(MAX) ? (MAX) : (X)) : (MIN) )
+
+#define MIN2( A, B ) ( (A)<(B) ? (A) : (B) )
+#define MAX2( A, B ) ( (A)>(B) ? (A) : (B) )
+
+#define MIN3( A, B, C ) ((A) < (B) ? MIN2(A, C) : MIN2(B, C))
+#define MAX3( A, B, C ) ((A) > (B) ? MAX2(A, C) : MAX2(B, C))
+
+#define MIN4( A, B, C, D ) ((A) < (B) ? MIN3(A, C, D) : MIN3(B, C, D))
+#define MAX4( A, B, C, D ) ((A) > (B) ? MAX3(A, C, D) : MAX3(B, C, D))
+
+
+/**
+ * Align a value up to an alignment value
+ *
+ * If \c value is not already aligned to the requested alignment value, it
+ * will be rounded up.
+ *
+ * \param value Value to be rounded
+ * \param alignment Alignment value to be used. This must be a power of two.
+ *
+ * \sa ROUND_DOWN_TO()
+ */
+static inline uintptr_t
+ALIGN(uintptr_t value, int32_t alignment)
+{
+ assert(util_is_power_of_two_nonzero(alignment));
+ return (((value) + (alignment) - 1) & ~((alignment) - 1));
+}
+
+/**
+ * Like ALIGN(), but works with a non-power-of-two alignment.
+ */
+static inline uintptr_t
+ALIGN_NPOT(uintptr_t value, int32_t alignment)
+{
+ assert(alignment > 0);
+ return (value + alignment - 1) / alignment * alignment;
+}
+
+/**
+ * Align a value down to an alignment value
+ *
+ * If \c value is not already aligned to the requested alignment value, it
+ * will be rounded down.
+ *
+ * \param value Value to be rounded
+ * \param alignment Alignment value to be used. This must be a power of two.
+ *
+ * \sa ALIGN()
+ */
+static inline uintptr_t
+ROUND_DOWN_TO(uintptr_t value, int32_t alignment)
+{
+ assert(util_is_power_of_two_nonzero(alignment));
+ return ((value) & ~(alignment - 1));
+}
+
+/**
+ * Align a value, only works pot alignemnts.
+ */
+static inline int
+align(int value, int alignment)
+{
+ return (value + alignment - 1) & ~(alignment - 1);
+}
+
+static inline uint64_t
+align64(uint64_t value, unsigned alignment)
+{
+ return (value + alignment - 1) & ~((uint64_t)alignment - 1);
+}
+
+/**
+ * Works like align but on npot alignments.
+ */
+static inline size_t
+util_align_npot(size_t value, size_t alignment)
+{
+ if (value % alignment)
+ return value + (alignment - (value % alignment));
+ return value;
+}
+
+static inline unsigned
+u_minify(unsigned value, unsigned levels)
+{
+ return MAX2(1, value >> levels);
+}
+
+#ifndef COPY_4V
+#define COPY_4V( DST, SRC ) \
+do { \
+ (DST)[0] = (SRC)[0]; \
+ (DST)[1] = (SRC)[1]; \
+ (DST)[2] = (SRC)[2]; \
+ (DST)[3] = (SRC)[3]; \
+} while (0)
+#endif
+
+
+#ifndef COPY_4FV
+#define COPY_4FV( DST, SRC ) COPY_4V(DST, SRC)
+#endif
+
+
+#ifndef ASSIGN_4V
+#define ASSIGN_4V( DST, V0, V1, V2, V3 ) \
+do { \
+ (DST)[0] = (V0); \
+ (DST)[1] = (V1); \
+ (DST)[2] = (V2); \
+ (DST)[3] = (V3); \
+} while (0)
+#endif
+
+
+static inline uint32_t
+util_unsigned_fixed(float value, unsigned frac_bits)
+{
+ return value < 0 ? 0 : (uint32_t)(value * (1<<frac_bits));
+}
+
+static inline int32_t
+util_signed_fixed(float value, unsigned frac_bits)
+{
+ return (int32_t)(value * (1<<frac_bits));
+}
+
+unsigned
+util_fpstate_get(void);
+unsigned
+util_fpstate_set_denorms_to_zero(unsigned current_fpstate);
+void
+util_fpstate_set(unsigned fpstate);
+
+/**
+ * For indexed draw calls, return true if the vertex count to be drawn is
+ * much lower than the vertex count that has to be uploaded, meaning
+ * that the driver should flatten indices instead of trying to upload
+ * a too big range.
+ *
+ * This is used by vertex upload code in u_vbuf and glthread.
+ */
+static inline bool
+util_is_vbo_upload_ratio_too_large(unsigned draw_vertex_count,
+ unsigned upload_vertex_count)
+{
+ if (draw_vertex_count > 1024)
+ return upload_vertex_count > draw_vertex_count * 4;
+ else if (draw_vertex_count > 32)
+ return upload_vertex_count > draw_vertex_count * 8;
+ else
+ return upload_vertex_count > draw_vertex_count * 16;
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* U_MATH_H */
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/util/u_memory.h b/third_party/rust/glslopt/glsl-optimizer/src/util/u_memory.h
new file mode 100644
index 0000000000..4cdccb66aa
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/util/u_memory.h
@@ -0,0 +1,99 @@
+/**************************************************************************
+ *
+ * Copyright 2008 VMware, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+
+/*
+ * Memory functions
+ */
+
+
+#ifndef U_MEMORY_H
+#define U_MEMORY_H
+
+#include "util/u_debug.h"
+#include "util/os_memory.h"
+
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+#define MALLOC(_size) os_malloc(_size)
+
+#define CALLOC(_count, _size) os_calloc(_count, _size)
+
+#define FREE(_ptr ) os_free(_ptr)
+
+#define REALLOC(_ptr, _old_size, _size) os_realloc(_ptr, _old_size, _size)
+
+#define MALLOC_STRUCT(T) (struct T *) MALLOC(sizeof(struct T))
+
+#define CALLOC_STRUCT(T) (struct T *) CALLOC(1, sizeof(struct T))
+
+#define CALLOC_VARIANT_LENGTH_STRUCT(T,more_size) ((struct T *) CALLOC(1, sizeof(struct T) + more_size))
+
+
+#define align_malloc(_size, _alignment) os_malloc_aligned(_size, _alignment)
+#define align_free(_ptr) os_free_aligned(_ptr)
+#define align_realloc(_ptr, _oldsize, _newsize, _alignment) os_realloc_aligned(_ptr, _oldsize, _newsize, _alignment)
+
+static inline void *
+align_calloc(size_t size, unsigned long alignment)
+{
+ void *ptr = align_malloc(size, alignment);
+ if (ptr)
+ memset(ptr, 0, size);
+ return ptr;
+}
+
+/**
+ * Duplicate a block of memory.
+ */
+static inline void *
+mem_dup(const void *src, size_t size)
+{
+ void *dup = MALLOC(size);
+ if (dup)
+ memcpy(dup, src, size);
+ return dup;
+}
+
+
+/**
+ * Offset of a field in a struct, in bytes.
+ */
+#define Offset(TYPE, MEMBER) ((uintptr_t)&(((TYPE *)NULL)->MEMBER))
+
+
+
+#ifdef __cplusplus
+}
+#endif
+
+
+#endif /* U_MEMORY_H */
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/util/u_queue.h b/third_party/rust/glslopt/glsl-optimizer/src/util/u_queue.h
new file mode 100644
index 0000000000..5943df4fcb
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/util/u_queue.h
@@ -0,0 +1,277 @@
+/*
+ * Copyright © 2016 Advanced Micro Devices, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS, AUTHORS
+ * AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ */
+
+/* Job queue with execution in a separate thread.
+ *
+ * Jobs can be added from any thread. After that, the wait call can be used
+ * to wait for completion of the job.
+ */
+
+#ifndef U_QUEUE_H
+#define U_QUEUE_H
+
+#include <string.h>
+
+#include "util/futex.h"
+#include "util/list.h"
+#include "util/macros.h"
+#include "util/os_time.h"
+#include "util/u_atomic.h"
+#include "util/u_thread.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define UTIL_QUEUE_INIT_USE_MINIMUM_PRIORITY (1 << 0)
+#define UTIL_QUEUE_INIT_RESIZE_IF_FULL (1 << 1)
+#define UTIL_QUEUE_INIT_SET_FULL_THREAD_AFFINITY (1 << 2)
+
+#if UTIL_FUTEX_SUPPORTED
+#define UTIL_QUEUE_FENCE_FUTEX
+#else
+#define UTIL_QUEUE_FENCE_STANDARD
+#endif
+
+#ifdef UTIL_QUEUE_FENCE_FUTEX
+/* Job completion fence.
+ * Put this into your job structure.
+ */
+struct util_queue_fence {
+ /* The fence can be in one of three states:
+ * 0 - signaled
+ * 1 - unsignaled
+ * 2 - unsignaled, may have waiters
+ */
+ uint32_t val;
+};
+
+static inline void
+util_queue_fence_init(struct util_queue_fence *fence)
+{
+ fence->val = 0;
+}
+
+static inline void
+util_queue_fence_destroy(struct util_queue_fence *fence)
+{
+ assert(fence->val == 0);
+ /* no-op */
+}
+
+static inline void
+util_queue_fence_signal(struct util_queue_fence *fence)
+{
+ uint32_t val = p_atomic_xchg(&fence->val, 0);
+
+ assert(val != 0);
+
+ if (val == 2)
+ futex_wake(&fence->val, INT_MAX);
+}
+
+/**
+ * Move \p fence back into unsignalled state.
+ *
+ * \warning The caller must ensure that no other thread may currently be
+ * waiting (or about to wait) on the fence.
+ */
+static inline void
+util_queue_fence_reset(struct util_queue_fence *fence)
+{
+#ifdef NDEBUG
+ fence->val = 1;
+#else
+ uint32_t v = p_atomic_xchg(&fence->val, 1);
+ assert(v == 0);
+#endif
+}
+
+static inline bool
+util_queue_fence_is_signalled(struct util_queue_fence *fence)
+{
+ return fence->val == 0;
+}
+#endif
+
+#ifdef UTIL_QUEUE_FENCE_STANDARD
+/* Job completion fence.
+ * Put this into your job structure.
+ */
+struct util_queue_fence {
+ mtx_t mutex;
+ cnd_t cond;
+ int signalled;
+};
+
+void util_queue_fence_init(struct util_queue_fence *fence);
+void util_queue_fence_destroy(struct util_queue_fence *fence);
+void util_queue_fence_signal(struct util_queue_fence *fence);
+
+/**
+ * Move \p fence back into unsignalled state.
+ *
+ * \warning The caller must ensure that no other thread may currently be
+ * waiting (or about to wait) on the fence.
+ */
+static inline void
+util_queue_fence_reset(struct util_queue_fence *fence)
+{
+ assert(fence->signalled);
+ fence->signalled = 0;
+}
+
+static inline bool
+util_queue_fence_is_signalled(struct util_queue_fence *fence)
+{
+ return fence->signalled != 0;
+}
+#endif
+
+void
+_util_queue_fence_wait(struct util_queue_fence *fence);
+
+static inline void
+util_queue_fence_wait(struct util_queue_fence *fence)
+{
+ if (unlikely(!util_queue_fence_is_signalled(fence)))
+ _util_queue_fence_wait(fence);
+}
+
+bool
+_util_queue_fence_wait_timeout(struct util_queue_fence *fence,
+ int64_t abs_timeout);
+
+/**
+ * Wait for the fence to be signaled with a timeout.
+ *
+ * \param fence the fence
+ * \param abs_timeout the absolute timeout in nanoseconds, relative to the
+ * clock provided by os_time_get_nano.
+ *
+ * \return true if the fence was signaled, false if the timeout occurred.
+ */
+static inline bool
+util_queue_fence_wait_timeout(struct util_queue_fence *fence,
+ int64_t abs_timeout)
+{
+ if (util_queue_fence_is_signalled(fence))
+ return true;
+
+ if (abs_timeout == (int64_t)OS_TIMEOUT_INFINITE) {
+ _util_queue_fence_wait(fence);
+ return true;
+ }
+
+ return _util_queue_fence_wait_timeout(fence, abs_timeout);
+}
+
+typedef void (*util_queue_execute_func)(void *job, int thread_index);
+
+struct util_queue_job {
+ void *job;
+ size_t job_size;
+ struct util_queue_fence *fence;
+ util_queue_execute_func execute;
+ util_queue_execute_func cleanup;
+};
+
+/* Put this into your context. */
+struct util_queue {
+ char name[14]; /* 13 characters = the thread name without the index */
+ mtx_t finish_lock; /* for util_queue_finish and protects threads/num_threads */
+ mtx_t lock;
+ cnd_t has_queued_cond;
+ cnd_t has_space_cond;
+ thrd_t *threads;
+ unsigned flags;
+ int num_queued;
+ unsigned max_threads;
+ unsigned num_threads; /* decreasing this number will terminate threads */
+ int max_jobs;
+ int write_idx, read_idx; /* ring buffer pointers */
+ size_t total_jobs_size; /* memory use of all jobs in the queue */
+ struct util_queue_job *jobs;
+
+ /* for cleanup at exit(), protected by exit_mutex */
+ struct list_head head;
+};
+
+bool util_queue_init(struct util_queue *queue,
+ const char *name,
+ unsigned max_jobs,
+ unsigned num_threads,
+ unsigned flags);
+void util_queue_destroy(struct util_queue *queue);
+
+/* optional cleanup callback is called after fence is signaled: */
+void util_queue_add_job(struct util_queue *queue,
+ void *job,
+ struct util_queue_fence *fence,
+ util_queue_execute_func execute,
+ util_queue_execute_func cleanup,
+ const size_t job_size);
+void util_queue_drop_job(struct util_queue *queue,
+ struct util_queue_fence *fence);
+
+void util_queue_finish(struct util_queue *queue);
+
+/* Adjust the number of active threads. The new number of threads can't be
+ * greater than the initial number of threads at the creation of the queue,
+ * and it can't be less than 1.
+ */
+void
+util_queue_adjust_num_threads(struct util_queue *queue, unsigned num_threads);
+
+int64_t util_queue_get_thread_time_nano(struct util_queue *queue,
+ unsigned thread_index);
+
+/* util_queue needs to be cleared to zeroes for this to work */
+static inline bool
+util_queue_is_initialized(struct util_queue *queue)
+{
+ return queue->threads != NULL;
+}
+
+/* Convenient structure for monitoring the queue externally and passing
+ * the structure between Mesa components. The queue doesn't use it directly.
+ */
+struct util_queue_monitoring
+{
+ /* For querying the thread busyness. */
+ struct util_queue *queue;
+
+ /* Counters updated by the user of the queue. */
+ unsigned num_offloaded_items;
+ unsigned num_direct_items;
+ unsigned num_syncs;
+};
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/util/u_string.h b/third_party/rust/glslopt/glsl-optimizer/src/util/u_string.h
new file mode 100644
index 0000000000..88df2cceda
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/util/u_string.h
@@ -0,0 +1,130 @@
+/**************************************************************************
+ *
+ * Copyright 2008 VMware, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+/**
+ * @file
+ * Platform independent functions for string manipulation.
+ *
+ * @author Jose Fonseca <jfonseca@vmware.com>
+ */
+
+#ifndef U_STRING_H_
+#define U_STRING_H_
+
+#if !defined(XF86_LIBC_H)
+#include <stdio.h>
+#endif
+#include <stdlib.h>
+#include <stddef.h>
+#include <stdarg.h>
+#include <string.h>
+
+#include "util/macros.h" // PRINTFLIKE
+
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#if !defined(_GNU_SOURCE) || defined(__APPLE__)
+
+#define strchrnul util_strchrnul
+static inline char *
+util_strchrnul(const char *s, char c)
+{
+ for (; *s && *s != c; ++s);
+
+ return (char *)s;
+}
+
+#endif
+
+#ifdef _WIN32
+
+#define sprintf util_sprintf
+static inline void
+ PRINTFLIKE(2, 3)
+util_sprintf(char *str, const char *format, ...)
+{
+ va_list ap;
+ va_start(ap, format);
+ vsnprintf(str, (size_t)-1, format, ap);
+ va_end(ap);
+}
+
+#define vasprintf util_vasprintf
+static inline int
+util_vasprintf(char **ret, const char *format, va_list ap)
+{
+ va_list ap_copy;
+
+ /* Compute length of output string first */
+ va_copy(ap_copy, ap);
+ int r = vsnprintf(NULL, 0, format, ap_copy);
+ va_end(ap_copy);
+
+ if (r < 0)
+ return -1;
+
+ *ret = (char *) malloc(r + 1);
+ if (!*ret)
+ return -1;
+
+ /* Print to buffer */
+ return vsnprintf(*ret, r + 1, format, ap);
+}
+
+#define asprintf util_asprintf
+static inline int
+util_asprintf(char **str, const char *fmt, ...)
+{
+ int ret;
+ va_list args;
+ va_start(args, fmt);
+ ret = vasprintf(str, fmt, args);
+ va_end(args);
+ return ret;
+}
+
+#ifndef strcasecmp
+#define strcasecmp stricmp
+#endif
+
+#define strdup _strdup
+
+#if defined(_WIN32) && !defined(HAVE_STRTOK_R)
+#define strtok_r strtok_s
+#endif
+
+#endif
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* U_STRING_H_ */
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/util/u_thread.h b/third_party/rust/glslopt/glsl-optimizer/src/util/u_thread.h
new file mode 100644
index 0000000000..b91d05e4cf
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/util/u_thread.h
@@ -0,0 +1,256 @@
+/**************************************************************************
+ *
+ * Copyright 1999-2006 Brian Paul
+ * Copyright 2008 VMware, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifndef U_THREAD_H_
+#define U_THREAD_H_
+
+#include <stdint.h>
+#include <stdbool.h>
+
+#include "c11/threads.h"
+#include "detect_os.h"
+
+#ifdef HAVE_PTHREAD
+#include <signal.h>
+#ifdef PTHREAD_SETAFFINITY_IN_NP_HEADER
+#include <pthread_np.h>
+#endif
+#endif
+
+#ifdef __HAIKU__
+#include <OS.h>
+#endif
+
+#ifdef __FreeBSD__
+/* pthread_np.h -> sys/param.h -> machine/param.h
+ * - defines ALIGN which clashes with our ALIGN
+ */
+#undef ALIGN
+#define cpu_set_t cpuset_t
+#endif
+
+static inline thrd_t u_thread_create(int (*routine)(void *), void *param)
+{
+ thrd_t thread;
+#ifdef HAVE_PTHREAD
+ sigset_t saved_set, new_set;
+ int ret;
+
+ sigfillset(&new_set);
+ sigdelset(&new_set, SIGSYS);
+ pthread_sigmask(SIG_BLOCK, &new_set, &saved_set);
+ ret = thrd_create( &thread, routine, param );
+ pthread_sigmask(SIG_SETMASK, &saved_set, NULL);
+#else
+ int ret;
+ ret = thrd_create( &thread, routine, param );
+#endif
+ if (ret)
+ return 0;
+
+ return thread;
+}
+
+static inline void u_thread_setname( const char *name )
+{
+#if defined(HAVE_PTHREAD)
+#if DETECT_OS_LINUX || DETECT_OS_CYGWIN || DETECT_OS_SOLARIS
+ pthread_setname_np(pthread_self(), name);
+#elif DETECT_OS_FREEBSD || DETECT_OS_OPENBSD
+ pthread_set_name_np(pthread_self(), name);
+#elif DETECT_OS_NETBSD
+ pthread_setname_np(pthread_self(), "%s", (void *)name);
+#elif DETECT_OS_APPLE
+ pthread_setname_np(name);
+#elif DETECT_OS_HAIKU
+ rename_thread(find_thread(NULL), name);
+#else
+#warning Not sure how to call pthread_setname_np
+#endif
+#endif
+ (void)name;
+}
+
+/**
+ * An AMD Zen CPU consists of multiple modules where each module has its own L3
+ * cache. Inter-thread communication such as locks and atomics between modules
+ * is very expensive. It's desirable to pin a group of closely cooperating
+ * threads to one group of cores sharing L3.
+ *
+ * \param thread thread
+ * \param L3_index index of the L3 cache
+ * \param cores_per_L3 number of CPU cores shared by one L3
+ */
+static inline void
+util_pin_thread_to_L3(thrd_t thread, unsigned L3_index, unsigned cores_per_L3)
+{
+#if defined(HAVE_PTHREAD_SETAFFINITY)
+ cpu_set_t cpuset;
+
+ CPU_ZERO(&cpuset);
+ for (unsigned i = 0; i < cores_per_L3; i++)
+ CPU_SET(L3_index * cores_per_L3 + i, &cpuset);
+ pthread_setaffinity_np(thread, sizeof(cpuset), &cpuset);
+#endif
+}
+
+/**
+ * Return the index of L3 that the thread is pinned to. If the thread is
+ * pinned to multiple L3 caches, return -1.
+ *
+ * \param thread thread
+ * \param cores_per_L3 number of CPU cores shared by one L3
+ */
+static inline int
+util_get_L3_for_pinned_thread(thrd_t thread, unsigned cores_per_L3)
+{
+#if defined(HAVE_PTHREAD_SETAFFINITY)
+ cpu_set_t cpuset;
+
+ if (pthread_getaffinity_np(thread, sizeof(cpuset), &cpuset) == 0) {
+ int L3_index = -1;
+
+ for (unsigned i = 0; i < CPU_SETSIZE; i++) {
+ if (CPU_ISSET(i, &cpuset)) {
+ int x = i / cores_per_L3;
+
+ if (L3_index != x) {
+ if (L3_index == -1)
+ L3_index = x;
+ else
+ return -1; /* multiple L3s are set */
+ }
+ }
+ }
+ return L3_index;
+ }
+#endif
+ return -1;
+}
+
+/*
+ * Thread statistics.
+ */
+
+/* Return the time of a thread's CPU time clock. */
+static inline int64_t
+u_thread_get_time_nano(thrd_t thread)
+{
+#if defined(HAVE_PTHREAD) && !defined(__APPLE__) && !defined(__HAIKU__)
+ struct timespec ts;
+ clockid_t cid;
+
+ pthread_getcpuclockid(thread, &cid);
+ clock_gettime(cid, &ts);
+ return (int64_t)ts.tv_sec * 1000000000 + ts.tv_nsec;
+#else
+ return 0;
+#endif
+}
+
+static inline bool u_thread_is_self(thrd_t thread)
+{
+#if defined(HAVE_PTHREAD)
+ return pthread_equal(pthread_self(), thread);
+#endif
+ return false;
+}
+
+/*
+ * util_barrier
+ */
+
+#if defined(HAVE_PTHREAD) && !defined(__APPLE__)
+
+typedef pthread_barrier_t util_barrier;
+
+static inline void util_barrier_init(util_barrier *barrier, unsigned count)
+{
+ pthread_barrier_init(barrier, NULL, count);
+}
+
+static inline void util_barrier_destroy(util_barrier *barrier)
+{
+ pthread_barrier_destroy(barrier);
+}
+
+static inline void util_barrier_wait(util_barrier *barrier)
+{
+ pthread_barrier_wait(barrier);
+}
+
+
+#else /* If the OS doesn't have its own, implement barriers using a mutex and a condvar */
+
+typedef struct {
+ unsigned count;
+ unsigned waiters;
+ uint64_t sequence;
+ mtx_t mutex;
+ cnd_t condvar;
+} util_barrier;
+
+static inline void util_barrier_init(util_barrier *barrier, unsigned count)
+{
+ barrier->count = count;
+ barrier->waiters = 0;
+ barrier->sequence = 0;
+ (void) mtx_init(&barrier->mutex, mtx_plain);
+ cnd_init(&barrier->condvar);
+}
+
+static inline void util_barrier_destroy(util_barrier *barrier)
+{
+ assert(barrier->waiters == 0);
+ mtx_destroy(&barrier->mutex);
+ cnd_destroy(&barrier->condvar);
+}
+
+static inline void util_barrier_wait(util_barrier *barrier)
+{
+ mtx_lock(&barrier->mutex);
+
+ assert(barrier->waiters < barrier->count);
+ barrier->waiters++;
+
+ if (barrier->waiters < barrier->count) {
+ uint64_t sequence = barrier->sequence;
+
+ do {
+ cnd_wait(&barrier->condvar, &barrier->mutex);
+ } while (sequence == barrier->sequence);
+ } else {
+ barrier->waiters = 0;
+ barrier->sequence++;
+ cnd_broadcast(&barrier->condvar);
+ }
+
+ mtx_unlock(&barrier->mutex);
+}
+
+#endif
+
+#endif /* U_THREAD_H_ */
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/util/xxhash.h b/third_party/rust/glslopt/glsl-optimizer/src/util/xxhash.h
new file mode 100644
index 0000000000..c0c8f44b60
--- /dev/null
+++ b/third_party/rust/glslopt/glsl-optimizer/src/util/xxhash.h
@@ -0,0 +1,1435 @@
+/*
+ xxHash - Extremely Fast Hash algorithm
+ Header File
+ Copyright (C) 2012-2016, Yann Collet.
+
+ BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are
+ met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following disclaimer
+ in the documentation and/or other materials provided with the
+ distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ You can contact the author at :
+ - xxHash source repository : https://github.com/Cyan4973/xxHash
+*/
+
+/* Notice extracted from xxHash homepage :
+
+xxHash is an extremely fast Hash algorithm, running at RAM speed limits.
+It also successfully passes all tests from the SMHasher suite.
+
+Comparison (single thread, Windows Seven 32 bits, using SMHasher on a Core 2 Duo @3GHz)
+
+Name Speed Q.Score Author
+xxHash 5.4 GB/s 10
+CrapWow 3.2 GB/s 2 Andrew
+MumurHash 3a 2.7 GB/s 10 Austin Appleby
+SpookyHash 2.0 GB/s 10 Bob Jenkins
+SBox 1.4 GB/s 9 Bret Mulvey
+Lookup3 1.2 GB/s 9 Bob Jenkins
+SuperFastHash 1.2 GB/s 1 Paul Hsieh
+CityHash64 1.05 GB/s 10 Pike & Alakuijala
+FNV 0.55 GB/s 5 Fowler, Noll, Vo
+CRC32 0.43 GB/s 9
+MD5-32 0.33 GB/s 10 Ronald L. Rivest
+SHA1-32 0.28 GB/s 10
+
+Q.Score is a measure of quality of the hash function.
+It depends on successfully passing SMHasher test set.
+10 is a perfect score.
+
+Note : SMHasher's CRC32 implementation is not the fastest one.
+Other speed-oriented implementations can be faster,
+especially in combination with PCLMUL instruction :
+http://fastcompression.blogspot.com/2019/03/presenting-xxh3.html?showComment=1552696407071#c3490092340461170735
+
+A 64-bit version, named XXH64, is available since r35.
+It offers much better speed, but for 64-bit applications only.
+Name Speed on 64 bits Speed on 32 bits
+XXH64 13.8 GB/s 1.9 GB/s
+XXH32 6.8 GB/s 6.0 GB/s
+*/
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+
+#ifndef XXHASH_H_5627135585666179
+#define XXHASH_H_5627135585666179 1
+
+/* ****************************
+ * API modifier
+ ******************************/
+/** XXH_INLINE_ALL (and XXH_PRIVATE_API)
+ * This build macro includes xxhash functions in `static` mode
+ * in order to inline them, and remove their symbol from the public list.
+ * Inlining offers great performance improvement on small keys,
+ * and dramatic ones when length is expressed as a compile-time constant.
+ * See https://fastcompression.blogspot.com/2018/03/xxhash-for-small-keys-impressive-power.html .
+ * Methodology :
+ * #define XXH_INLINE_ALL
+ * #include "xxhash.h"
+ * `xxhash.c` is automatically included.
+ * It's not useful to compile and link it as a separate object.
+ */
+#if defined(XXH_INLINE_ALL) || defined(XXH_PRIVATE_API)
+# ifndef XXH_STATIC_LINKING_ONLY
+# define XXH_STATIC_LINKING_ONLY
+# endif
+# if defined(__GNUC__)
+# define XXH_PUBLIC_API static __inline __attribute__((unused))
+# elif defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)
+# define XXH_PUBLIC_API static inline
+# elif defined(_MSC_VER)
+# define XXH_PUBLIC_API static __inline
+# else
+ /* this version may generate warnings for unused static functions */
+# define XXH_PUBLIC_API static
+# endif
+#else
+# if defined(WIN32) && defined(_MSC_VER) && (defined(XXH_IMPORT) || defined(XXH_EXPORT))
+# ifdef XXH_EXPORT
+# define XXH_PUBLIC_API __declspec(dllexport)
+# elif XXH_IMPORT
+# define XXH_PUBLIC_API __declspec(dllimport)
+# endif
+# else
+# define XXH_PUBLIC_API /* do nothing */
+# endif
+#endif /* XXH_INLINE_ALL || XXH_PRIVATE_API */
+
+/*! XXH_NAMESPACE, aka Namespace Emulation :
+ *
+ * If you want to include _and expose_ xxHash functions from within your own library,
+ * but also want to avoid symbol collisions with other libraries which may also include xxHash,
+ *
+ * you can use XXH_NAMESPACE, to automatically prefix any public symbol from xxhash library
+ * with the value of XXH_NAMESPACE (therefore, avoid NULL and numeric values).
+ *
+ * Note that no change is required within the calling program as long as it includes `xxhash.h` :
+ * regular symbol name will be automatically translated by this header.
+ */
+#ifdef XXH_NAMESPACE
+# define XXH_CAT(A,B) A##B
+# define XXH_NAME2(A,B) XXH_CAT(A,B)
+# define XXH_versionNumber XXH_NAME2(XXH_NAMESPACE, XXH_versionNumber)
+# define XXH32 XXH_NAME2(XXH_NAMESPACE, XXH32)
+# define XXH32_createState XXH_NAME2(XXH_NAMESPACE, XXH32_createState)
+# define XXH32_freeState XXH_NAME2(XXH_NAMESPACE, XXH32_freeState)
+# define XXH32_reset XXH_NAME2(XXH_NAMESPACE, XXH32_reset)
+# define XXH32_update XXH_NAME2(XXH_NAMESPACE, XXH32_update)
+# define XXH32_digest XXH_NAME2(XXH_NAMESPACE, XXH32_digest)
+# define XXH32_copyState XXH_NAME2(XXH_NAMESPACE, XXH32_copyState)
+# define XXH32_canonicalFromHash XXH_NAME2(XXH_NAMESPACE, XXH32_canonicalFromHash)
+# define XXH32_hashFromCanonical XXH_NAME2(XXH_NAMESPACE, XXH32_hashFromCanonical)
+# define XXH64 XXH_NAME2(XXH_NAMESPACE, XXH64)
+# define XXH64_createState XXH_NAME2(XXH_NAMESPACE, XXH64_createState)
+# define XXH64_freeState XXH_NAME2(XXH_NAMESPACE, XXH64_freeState)
+# define XXH64_reset XXH_NAME2(XXH_NAMESPACE, XXH64_reset)
+# define XXH64_update XXH_NAME2(XXH_NAMESPACE, XXH64_update)
+# define XXH64_digest XXH_NAME2(XXH_NAMESPACE, XXH64_digest)
+# define XXH64_copyState XXH_NAME2(XXH_NAMESPACE, XXH64_copyState)
+# define XXH64_canonicalFromHash XXH_NAME2(XXH_NAMESPACE, XXH64_canonicalFromHash)
+# define XXH64_hashFromCanonical XXH_NAME2(XXH_NAMESPACE, XXH64_hashFromCanonical)
+#endif
+
+
+/* *************************************
+* Version
+***************************************/
+#define XXH_VERSION_MAJOR 0
+#define XXH_VERSION_MINOR 7
+#define XXH_VERSION_RELEASE 2
+#define XXH_VERSION_NUMBER (XXH_VERSION_MAJOR *100*100 + XXH_VERSION_MINOR *100 + XXH_VERSION_RELEASE)
+XXH_PUBLIC_API unsigned XXH_versionNumber (void);
+
+
+/* ****************************
+* Definitions
+******************************/
+#include <stddef.h> /* size_t */
+typedef enum { XXH_OK=0, XXH_ERROR } XXH_errorcode;
+
+
+/*-**********************************************************************
+* 32-bit hash
+************************************************************************/
+#if !defined (__VMS) \
+ && (defined (__cplusplus) \
+ || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) )
+# include <stdint.h>
+ typedef uint32_t XXH32_hash_t;
+#else
+# include <limits.h>
+# if UINT_MAX == 0xFFFFFFFFUL
+ typedef unsigned int XXH32_hash_t;
+# else
+# if ULONG_MAX == 0xFFFFFFFFUL
+ typedef unsigned long XXH32_hash_t;
+# else
+# error "unsupported platform : need a 32-bit type"
+# endif
+# endif
+#endif
+
+/*! XXH32() :
+ Calculate the 32-bit hash of sequence "length" bytes stored at memory address "input".
+ The memory between input & input+length must be valid (allocated and read-accessible).
+ "seed" can be used to alter the result predictably.
+ Speed on Core 2 Duo @ 3 GHz (single thread, SMHasher benchmark) : 5.4 GB/s */
+XXH_PUBLIC_API XXH32_hash_t XXH32 (const void* input, size_t length, XXH32_hash_t seed);
+
+/******* Streaming *******/
+
+/*
+ * Streaming functions generate the xxHash value from an incrememtal input.
+ * This method is slower than single-call functions, due to state management.
+ * For small inputs, prefer `XXH32()` and `XXH64()`, which are better optimized.
+ *
+ * XXH state must first be allocated, using XXH*_createState() .
+ *
+ * Start a new hash by initializing state with a seed, using XXH*_reset().
+ *
+ * Then, feed the hash state by calling XXH*_update() as many times as necessary.
+ * The function returns an error code, with 0 meaning OK, and any other value meaning there is an error.
+ *
+ * Finally, a hash value can be produced anytime, by using XXH*_digest().
+ * This function returns the nn-bits hash as an int or long long.
+ *
+ * It's still possible to continue inserting input into the hash state after a digest,
+ * and generate some new hash values later on, by invoking again XXH*_digest().
+ *
+ * When done, release the state, using XXH*_freeState().
+ */
+
+typedef struct XXH32_state_s XXH32_state_t; /* incomplete type */
+XXH_PUBLIC_API XXH32_state_t* XXH32_createState(void);
+XXH_PUBLIC_API XXH_errorcode XXH32_freeState(XXH32_state_t* statePtr);
+XXH_PUBLIC_API void XXH32_copyState(XXH32_state_t* dst_state, const XXH32_state_t* src_state);
+
+XXH_PUBLIC_API XXH_errorcode XXH32_reset (XXH32_state_t* statePtr, XXH32_hash_t seed);
+XXH_PUBLIC_API XXH_errorcode XXH32_update (XXH32_state_t* statePtr, const void* input, size_t length);
+XXH_PUBLIC_API XXH32_hash_t XXH32_digest (const XXH32_state_t* statePtr);
+
+/******* Canonical representation *******/
+
+/* Default return values from XXH functions are basic unsigned 32 and 64 bits.
+ * This the simplest and fastest format for further post-processing.
+ * However, this leaves open the question of what is the order of bytes,
+ * since little and big endian conventions will write the same number differently.
+ *
+ * The canonical representation settles this issue,
+ * by mandating big-endian convention,
+ * aka, the same convention as human-readable numbers (large digits first).
+ * When writing hash values to storage, sending them over a network, or printing them,
+ * it's highly recommended to use the canonical representation,
+ * to ensure portability across a wider range of systems, present and future.
+ *
+ * The following functions allow transformation of hash values into and from canonical format.
+ */
+
+typedef struct { unsigned char digest[4]; } XXH32_canonical_t;
+XXH_PUBLIC_API void XXH32_canonicalFromHash(XXH32_canonical_t* dst, XXH32_hash_t hash);
+XXH_PUBLIC_API XXH32_hash_t XXH32_hashFromCanonical(const XXH32_canonical_t* src);
+
+
+#ifndef XXH_NO_LONG_LONG
+/*-**********************************************************************
+* 64-bit hash
+************************************************************************/
+#if !defined (__VMS) \
+ && (defined (__cplusplus) \
+ || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) )
+# include <stdint.h>
+ typedef uint64_t XXH64_hash_t;
+#else
+ /* the following type must have a width of 64-bit */
+ typedef unsigned long long XXH64_hash_t;
+#endif
+
+/*! XXH64() :
+ * Returns the 64-bit hash of sequence of length @length stored at memory address @input.
+ * @seed can be used to alter the result predictably.
+ * This function runs faster on 64-bit systems, but slower on 32-bit systems (see benchmark).
+ */
+XXH_PUBLIC_API XXH64_hash_t XXH64 (const void* input, size_t length, XXH64_hash_t seed);
+
+/******* Streaming *******/
+typedef struct XXH64_state_s XXH64_state_t; /* incomplete type */
+XXH_PUBLIC_API XXH64_state_t* XXH64_createState(void);
+XXH_PUBLIC_API XXH_errorcode XXH64_freeState(XXH64_state_t* statePtr);
+XXH_PUBLIC_API void XXH64_copyState(XXH64_state_t* dst_state, const XXH64_state_t* src_state);
+
+XXH_PUBLIC_API XXH_errorcode XXH64_reset (XXH64_state_t* statePtr, XXH64_hash_t seed);
+XXH_PUBLIC_API XXH_errorcode XXH64_update (XXH64_state_t* statePtr, const void* input, size_t length);
+XXH_PUBLIC_API XXH64_hash_t XXH64_digest (const XXH64_state_t* statePtr);
+
+/******* Canonical representation *******/
+typedef struct { unsigned char digest[8]; } XXH64_canonical_t;
+XXH_PUBLIC_API void XXH64_canonicalFromHash(XXH64_canonical_t* dst, XXH64_hash_t hash);
+XXH_PUBLIC_API XXH64_hash_t XXH64_hashFromCanonical(const XXH64_canonical_t* src);
+
+
+#endif /* XXH_NO_LONG_LONG */
+
+#endif /* XXHASH_H_5627135585666179 */
+
+
+
+#if defined(XXH_STATIC_LINKING_ONLY) && !defined(XXHASH_H_STATIC_13879238742)
+#define XXHASH_H_STATIC_13879238742
+/* ************************************************************************************************
+ This section contains declarations which are not guaranteed to remain stable.
+ They may change in future versions, becoming incompatible with a different version of the library.
+ These declarations should only be used with static linking.
+ Never use them in association with dynamic linking !
+*************************************************************************************************** */
+
+/* These definitions are only present to allow
+ * static allocation of XXH state, on stack or in a struct for example.
+ * Never **ever** use members directly. */
+
+struct XXH32_state_s {
+ XXH32_hash_t total_len_32;
+ XXH32_hash_t large_len;
+ XXH32_hash_t v1;
+ XXH32_hash_t v2;
+ XXH32_hash_t v3;
+ XXH32_hash_t v4;
+ XXH32_hash_t mem32[4];
+ XXH32_hash_t memsize;
+ XXH32_hash_t reserved; /* never read nor write, might be removed in a future version */
+}; /* typedef'd to XXH32_state_t */
+
+
+#ifndef XXH_NO_LONG_LONG /* defined when there is no 64-bit support */
+
+struct XXH64_state_s {
+ XXH64_hash_t total_len;
+ XXH64_hash_t v1;
+ XXH64_hash_t v2;
+ XXH64_hash_t v3;
+ XXH64_hash_t v4;
+ XXH64_hash_t mem64[4];
+ XXH32_hash_t memsize;
+ XXH32_hash_t reserved32; /* required for padding anyway */
+ XXH64_hash_t reserved64; /* never read nor write, might be removed in a future version */
+}; /* typedef'd to XXH64_state_t */
+
+#endif /* XXH_NO_LONG_LONG */
+
+#if defined(XXH_INLINE_ALL) || defined(XXH_PRIVATE_API)
+# define XXH_IMPLEMENTATION
+#endif
+
+#endif /* defined(XXH_STATIC_LINKING_ONLY) && !defined(XXHASH_H_STATIC_13879238742) */
+
+
+
+/*-**********************************************************************
+* xxHash implementation
+* Functions implementation used to be hosted within xxhash.c .
+* However, code inlining requires to place implementation in the header file.
+* As a consequence, xxhash.c used to be included within xxhash.h .
+* But some build systems don't like *.c inclusions.
+* So the implementation is now directly integrated within xxhash.h .
+* Another small advantage is that xxhash.c is no longer required in /includes .
+************************************************************************/
+
+#if ( defined(XXH_INLINE_ALL) || defined(XXH_PRIVATE_API) \
+ || defined(XXH_IMPLEMENTATION) ) && !defined(XXH_IMPLEM_13a8737387)
+# define XXH_IMPLEM_13a8737387
+
+/* *************************************
+* Tuning parameters
+***************************************/
+/*!XXH_FORCE_MEMORY_ACCESS :
+ * By default, access to unaligned memory is controlled by `memcpy()`, which is safe and portable.
+ * Unfortunately, on some target/compiler combinations, the generated assembly is sub-optimal.
+ * The below switch allow to select different access method for improved performance.
+ * Method 0 (default) : use `memcpy()`. Safe and portable.
+ * Method 1 : `__packed` statement. It depends on compiler extension (ie, not portable).
+ * This method is safe if your compiler supports it, and *generally* as fast or faster than `memcpy`.
+ * Method 2 : direct access. This method doesn't depend on compiler but violate C standard.
+ * It can generate buggy code on targets which do not support unaligned memory accesses.
+ * But in some circumstances, it's the only known way to get the most performance (ie GCC + ARMv6)
+ * See http://stackoverflow.com/a/32095106/646947 for details.
+ * Prefer these methods in priority order (0 > 1 > 2)
+ */
+#ifndef XXH_FORCE_MEMORY_ACCESS /* can be defined externally, on command line for example */
+# if !defined(__clang__) && defined(__GNUC__) && defined(__ARM_FEATURE_UNALIGNED) && defined(__ARM_ARCH) && (__ARM_ARCH == 6)
+# define XXH_FORCE_MEMORY_ACCESS 2
+# elif !defined(__clang__) && ((defined(__INTEL_COMPILER) && !defined(_WIN32)) || \
+ (defined(__GNUC__) && (defined(__ARM_ARCH) && __ARM_ARCH >= 7)))
+# define XXH_FORCE_MEMORY_ACCESS 1
+# endif
+#endif
+
+/*!XXH_ACCEPT_NULL_INPUT_POINTER :
+ * If input pointer is NULL, xxHash default behavior is to dereference it, triggering a segfault.
+ * When this macro is enabled, xxHash actively checks input for null pointer.
+ * It it is, result for null input pointers is the same as a null-length input.
+ */
+#ifndef XXH_ACCEPT_NULL_INPUT_POINTER /* can be defined externally */
+# define XXH_ACCEPT_NULL_INPUT_POINTER 0
+#endif
+
+/*!XXH_FORCE_ALIGN_CHECK :
+ * This is a minor performance trick, only useful with lots of very small keys.
+ * It means : check for aligned/unaligned input.
+ * The check costs one initial branch per hash;
+ * set it to 0 when the input is guaranteed to be aligned,
+ * or when alignment doesn't matter for performance.
+ */
+#ifndef XXH_FORCE_ALIGN_CHECK /* can be defined externally */
+# if defined(__i386) || defined(_M_IX86) || defined(__x86_64__) || defined(_M_X64)
+# define XXH_FORCE_ALIGN_CHECK 0
+# else
+# define XXH_FORCE_ALIGN_CHECK 1
+# endif
+#endif
+
+/*!XXH_REROLL:
+ * Whether to reroll XXH32_finalize, and XXH64_finalize,
+ * instead of using an unrolled jump table/if statement loop.
+ *
+ * This is automatically defined on -Os/-Oz on GCC and Clang. */
+#ifndef XXH_REROLL
+# if defined(__OPTIMIZE_SIZE__)
+# define XXH_REROLL 1
+# else
+# define XXH_REROLL 0
+# endif
+#endif
+
+
+/* *************************************
+* Includes & Memory related functions
+***************************************/
+/*! Modify the local functions below should you wish to use some other memory routines
+* for malloc(), free() */
+#include <stdlib.h>
+static void* XXH_malloc(size_t s) { return malloc(s); }
+static void XXH_free (void* p) { free(p); }
+/*! and for memcpy() */
+#include <string.h>
+static void* XXH_memcpy(void* dest, const void* src, size_t size) { return memcpy(dest,src,size); }
+
+#include <limits.h> /* ULLONG_MAX */
+
+
+/* *************************************
+* Compiler Specific Options
+***************************************/
+#ifdef _MSC_VER /* Visual Studio */
+# pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */
+# define XXH_FORCE_INLINE static __forceinline
+# define XXH_NO_INLINE static __declspec(noinline)
+#else
+# if defined (__cplusplus) || defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* C99 */
+# ifdef __GNUC__
+# define XXH_FORCE_INLINE static inline __attribute__((always_inline))
+# define XXH_NO_INLINE static __attribute__((noinline))
+# else
+# define XXH_FORCE_INLINE static inline
+# define XXH_NO_INLINE static
+# endif
+# else
+# define XXH_FORCE_INLINE static
+# define XXH_NO_INLINE static
+# endif /* __STDC_VERSION__ */
+#endif
+
+
+
+/* *************************************
+* Debug
+***************************************/
+/* DEBUGLEVEL is expected to be defined externally,
+ * typically through compiler command line.
+ * Value must be a number. */
+#ifndef DEBUGLEVEL
+# define DEBUGLEVEL 0
+#endif
+
+#if (DEBUGLEVEL>=1)
+# include <assert.h> /* note : can still be disabled with NDEBUG */
+# define XXH_ASSERT(c) assert(c)
+#else
+# define XXH_ASSERT(c) ((void)0)
+#endif
+
+/* note : use after variable declarations */
+#define XXH_STATIC_ASSERT(c) { enum { XXH_sa = 1/(int)(!!(c)) }; }
+
+
+/* *************************************
+* Basic Types
+***************************************/
+#if !defined (__VMS) \
+ && (defined (__cplusplus) \
+ || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) )
+# include <stdint.h>
+ typedef uint8_t xxh_u8;
+#else
+ typedef unsigned char xxh_u8;
+#endif
+typedef XXH32_hash_t xxh_u32;
+
+
+/* *** Memory access *** */
+
+#if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==2))
+
+/* Force direct memory access. Only works on CPU which support unaligned memory access in hardware */
+static xxh_u32 XXH_read32(const void* memPtr) { return *(const xxh_u32*) memPtr; }
+
+#elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==1))
+
+/* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */
+/* currently only defined for gcc and icc */
+typedef union { xxh_u32 u32; } __attribute__((packed)) unalign;
+static xxh_u32 XXH_read32(const void* ptr) { return ((const unalign*)ptr)->u32; }
+
+#else
+
+/* portable and safe solution. Generally efficient.
+ * see : http://stackoverflow.com/a/32095106/646947
+ */
+static xxh_u32 XXH_read32(const void* memPtr)
+{
+ xxh_u32 val;
+ memcpy(&val, memPtr, sizeof(val));
+ return val;
+}
+
+#endif /* XXH_FORCE_DIRECT_MEMORY_ACCESS */
+
+
+/* *** Endianess *** */
+typedef enum { XXH_bigEndian=0, XXH_littleEndian=1 } XXH_endianess;
+
+/* XXH_CPU_LITTLE_ENDIAN can be defined externally, for example on the compiler command line */
+#ifndef XXH_CPU_LITTLE_ENDIAN
+# if defined(_WIN32) /* Windows is always little endian */ \
+ || defined(__LITTLE_ENDIAN__) \
+ || (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__)
+# define XXH_CPU_LITTLE_ENDIAN 1
+# elif defined(__BIG_ENDIAN__) \
+ || (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__)
+# define XXH_CPU_LITTLE_ENDIAN 0
+# else
+static int XXH_isLittleEndian(void)
+{
+ const union { xxh_u32 u; xxh_u8 c[4]; } one = { 1 }; /* don't use static : performance detrimental */
+ return one.c[0];
+}
+# define XXH_CPU_LITTLE_ENDIAN XXH_isLittleEndian()
+# endif
+#endif
+
+
+
+
+/* ****************************************
+* Compiler-specific Functions and Macros
+******************************************/
+#define XXH_GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__)
+
+#ifndef __has_builtin
+# define __has_builtin(x) 0
+#endif
+
+#if !defined(NO_CLANG_BUILTIN) && __has_builtin(__builtin_rotateleft32) && __has_builtin(__builtin_rotateleft64)
+# define XXH_rotl32 __builtin_rotateleft32
+# define XXH_rotl64 __builtin_rotateleft64
+/* Note : although _rotl exists for minGW (GCC under windows), performance seems poor */
+#elif defined(_MSC_VER)
+# define XXH_rotl32(x,r) _rotl(x,r)
+# define XXH_rotl64(x,r) _rotl64(x,r)
+#else
+# define XXH_rotl32(x,r) (((x) << (r)) | ((x) >> (32 - (r))))
+# define XXH_rotl64(x,r) (((x) << (r)) | ((x) >> (64 - (r))))
+#endif
+
+#if defined(_MSC_VER) /* Visual Studio */
+# define XXH_swap32 _byteswap_ulong
+#elif XXH_GCC_VERSION >= 403
+# define XXH_swap32 __builtin_bswap32
+#else
+static xxh_u32 XXH_swap32 (xxh_u32 x)
+{
+ return ((x << 24) & 0xff000000 ) |
+ ((x << 8) & 0x00ff0000 ) |
+ ((x >> 8) & 0x0000ff00 ) |
+ ((x >> 24) & 0x000000ff );
+}
+#endif
+
+
+/* ***************************
+* Memory reads
+*****************************/
+typedef enum { XXH_aligned, XXH_unaligned } XXH_alignment;
+
+XXH_FORCE_INLINE xxh_u32 XXH_readLE32(const void* ptr)
+{
+ return XXH_CPU_LITTLE_ENDIAN ? XXH_read32(ptr) : XXH_swap32(XXH_read32(ptr));
+}
+
+static xxh_u32 XXH_readBE32(const void* ptr)
+{
+ return XXH_CPU_LITTLE_ENDIAN ? XXH_swap32(XXH_read32(ptr)) : XXH_read32(ptr);
+}
+
+XXH_FORCE_INLINE xxh_u32
+XXH_readLE32_align(const void* ptr, XXH_alignment align)
+{
+ if (align==XXH_unaligned) {
+ return XXH_readLE32(ptr);
+ } else {
+ return XXH_CPU_LITTLE_ENDIAN ? *(const xxh_u32*)ptr : XXH_swap32(*(const xxh_u32*)ptr);
+ }
+}
+
+
+/* *************************************
+* Misc
+***************************************/
+XXH_PUBLIC_API unsigned XXH_versionNumber (void) { return XXH_VERSION_NUMBER; }
+
+
+/* *******************************************************************
+* 32-bit hash functions
+*********************************************************************/
+static const xxh_u32 PRIME32_1 = 0x9E3779B1U; /* 0b10011110001101110111100110110001 */
+static const xxh_u32 PRIME32_2 = 0x85EBCA77U; /* 0b10000101111010111100101001110111 */
+static const xxh_u32 PRIME32_3 = 0xC2B2AE3DU; /* 0b11000010101100101010111000111101 */
+static const xxh_u32 PRIME32_4 = 0x27D4EB2FU; /* 0b00100111110101001110101100101111 */
+static const xxh_u32 PRIME32_5 = 0x165667B1U; /* 0b00010110010101100110011110110001 */
+
+static xxh_u32 XXH32_round(xxh_u32 acc, xxh_u32 input)
+{
+ acc += input * PRIME32_2;
+ acc = XXH_rotl32(acc, 13);
+ acc *= PRIME32_1;
+#if defined(__GNUC__) && defined(__SSE4_1__) && !defined(XXH_ENABLE_AUTOVECTORIZE)
+ /* UGLY HACK:
+ * This inline assembly hack forces acc into a normal register. This is the
+ * only thing that prevents GCC and Clang from autovectorizing the XXH32 loop
+ * (pragmas and attributes don't work for some resason) without globally
+ * disabling SSE4.1.
+ *
+ * The reason we want to avoid vectorization is because despite working on
+ * 4 integers at a time, there are multiple factors slowing XXH32 down on
+ * SSE4:
+ * - There's a ridiculous amount of lag from pmulld (10 cycles of latency on newer chips!)
+ * making it slightly slower to multiply four integers at once compared to four
+ * integers independently. Even when pmulld was fastest, Sandy/Ivy Bridge, it is
+ * still not worth it to go into SSE just to multiply unless doing a long operation.
+ *
+ * - Four instructions are required to rotate,
+ * movqda tmp, v // not required with VEX encoding
+ * pslld tmp, 13 // tmp <<= 13
+ * psrld v, 19 // x >>= 19
+ * por v, tmp // x |= tmp
+ * compared to one for scalar:
+ * roll v, 13 // reliably fast across the board
+ * shldl v, v, 13 // Sandy Bridge and later prefer this for some reason
+ *
+ * - Instruction level parallelism is actually more beneficial here because the
+ * SIMD actually serializes this operation: While v1 is rotating, v2 can load data,
+ * while v3 can multiply. SSE forces them to operate together.
+ *
+ * How this hack works:
+ * __asm__("" // Declare an assembly block but don't declare any instructions
+ * : // However, as an Input/Output Operand,
+ * "+r" // constrain a read/write operand (+) as a general purpose register (r).
+ * (acc) // and set acc as the operand
+ * );
+ *
+ * Because of the 'r', the compiler has promised that seed will be in a
+ * general purpose register and the '+' says that it will be 'read/write',
+ * so it has to assume it has changed. It is like volatile without all the
+ * loads and stores.
+ *
+ * Since the argument has to be in a normal register (not an SSE register),
+ * each time XXH32_round is called, it is impossible to vectorize. */
+ __asm__("" : "+r" (acc));
+#endif
+ return acc;
+}
+
+/* mix all bits */
+static xxh_u32 XXH32_avalanche(xxh_u32 h32)
+{
+ h32 ^= h32 >> 15;
+ h32 *= PRIME32_2;
+ h32 ^= h32 >> 13;
+ h32 *= PRIME32_3;
+ h32 ^= h32 >> 16;
+ return(h32);
+}
+
+#define XXH_get32bits(p) XXH_readLE32_align(p, align)
+
+static xxh_u32
+XXH32_finalize(xxh_u32 h32, const xxh_u8* ptr, size_t len, XXH_alignment align)
+{
+#define PROCESS1 \
+ h32 += (*ptr++) * PRIME32_5; \
+ h32 = XXH_rotl32(h32, 11) * PRIME32_1 ;
+
+#define PROCESS4 \
+ h32 += XXH_get32bits(ptr) * PRIME32_3; \
+ ptr+=4; \
+ h32 = XXH_rotl32(h32, 17) * PRIME32_4 ;
+
+ /* Compact rerolled version */
+ if (XXH_REROLL) {
+ len &= 15;
+ while (len >= 4) {
+ PROCESS4;
+ len -= 4;
+ }
+ while (len > 0) {
+ PROCESS1;
+ --len;
+ }
+ return XXH32_avalanche(h32);
+ } else {
+ switch(len&15) /* or switch(bEnd - p) */ {
+ case 12: PROCESS4;
+ /* fallthrough */
+ case 8: PROCESS4;
+ /* fallthrough */
+ case 4: PROCESS4;
+ return XXH32_avalanche(h32);
+
+ case 13: PROCESS4;
+ /* fallthrough */
+ case 9: PROCESS4;
+ /* fallthrough */
+ case 5: PROCESS4;
+ PROCESS1;
+ return XXH32_avalanche(h32);
+
+ case 14: PROCESS4;
+ /* fallthrough */
+ case 10: PROCESS4;
+ /* fallthrough */
+ case 6: PROCESS4;
+ PROCESS1;
+ PROCESS1;
+ return XXH32_avalanche(h32);
+
+ case 15: PROCESS4;
+ /* fallthrough */
+ case 11: PROCESS4;
+ /* fallthrough */
+ case 7: PROCESS4;
+ /* fallthrough */
+ case 3: PROCESS1;
+ /* fallthrough */
+ case 2: PROCESS1;
+ /* fallthrough */
+ case 1: PROCESS1;
+ /* fallthrough */
+ case 0: return XXH32_avalanche(h32);
+ }
+ XXH_ASSERT(0);
+ return h32; /* reaching this point is deemed impossible */
+ }
+}
+
+XXH_FORCE_INLINE xxh_u32
+XXH32_endian_align(const xxh_u8* input, size_t len, xxh_u32 seed, XXH_alignment align)
+{
+ const xxh_u8* bEnd = input + len;
+ xxh_u32 h32;
+
+#if defined(XXH_ACCEPT_NULL_INPUT_POINTER) && (XXH_ACCEPT_NULL_INPUT_POINTER>=1)
+ if (input==NULL) {
+ len=0;
+ bEnd=input=(const xxh_u8*)(size_t)16;
+ }
+#endif
+
+ if (len>=16) {
+ const xxh_u8* const limit = bEnd - 15;
+ xxh_u32 v1 = seed + PRIME32_1 + PRIME32_2;
+ xxh_u32 v2 = seed + PRIME32_2;
+ xxh_u32 v3 = seed + 0;
+ xxh_u32 v4 = seed - PRIME32_1;
+
+ do {
+ v1 = XXH32_round(v1, XXH_get32bits(input)); input += 4;
+ v2 = XXH32_round(v2, XXH_get32bits(input)); input += 4;
+ v3 = XXH32_round(v3, XXH_get32bits(input)); input += 4;
+ v4 = XXH32_round(v4, XXH_get32bits(input)); input += 4;
+ } while (input < limit);
+
+ h32 = XXH_rotl32(v1, 1) + XXH_rotl32(v2, 7)
+ + XXH_rotl32(v3, 12) + XXH_rotl32(v4, 18);
+ } else {
+ h32 = seed + PRIME32_5;
+ }
+
+ h32 += (xxh_u32)len;
+
+ return XXH32_finalize(h32, input, len&15, align);
+}
+
+
+XXH_PUBLIC_API XXH32_hash_t XXH32 (const void* input, size_t len, XXH32_hash_t seed)
+{
+#if 0
+ /* Simple version, good for code maintenance, but unfortunately slow for small inputs */
+ XXH32_state_t state;
+ XXH32_reset(&state, seed);
+ XXH32_update(&state, (const xxh_u8*)input, len);
+ return XXH32_digest(&state);
+
+#else
+
+ if (XXH_FORCE_ALIGN_CHECK) {
+ if ((((size_t)input) & 3) == 0) { /* Input is 4-bytes aligned, leverage the speed benefit */
+ return XXH32_endian_align((const xxh_u8*)input, len, seed, XXH_aligned);
+ } }
+
+ return XXH32_endian_align((const xxh_u8*)input, len, seed, XXH_unaligned);
+#endif
+}
+
+
+
+/******* Hash streaming *******/
+
+XXH_PUBLIC_API XXH32_state_t* XXH32_createState(void)
+{
+ return (XXH32_state_t*)XXH_malloc(sizeof(XXH32_state_t));
+}
+XXH_PUBLIC_API XXH_errorcode XXH32_freeState(XXH32_state_t* statePtr)
+{
+ XXH_free(statePtr);
+ return XXH_OK;
+}
+
+XXH_PUBLIC_API void XXH32_copyState(XXH32_state_t* dstState, const XXH32_state_t* srcState)
+{
+ memcpy(dstState, srcState, sizeof(*dstState));
+}
+
+XXH_PUBLIC_API XXH_errorcode XXH32_reset(XXH32_state_t* statePtr, XXH32_hash_t seed)
+{
+ XXH32_state_t state; /* using a local state to memcpy() in order to avoid strict-aliasing warnings */
+ memset(&state, 0, sizeof(state));
+ state.v1 = seed + PRIME32_1 + PRIME32_2;
+ state.v2 = seed + PRIME32_2;
+ state.v3 = seed + 0;
+ state.v4 = seed - PRIME32_1;
+ /* do not write into reserved, planned to be removed in a future version */
+ memcpy(statePtr, &state, sizeof(state) - sizeof(state.reserved));
+ return XXH_OK;
+}
+
+
+XXH_PUBLIC_API XXH_errorcode
+XXH32_update(XXH32_state_t* state, const void* input, size_t len)
+{
+ if (input==NULL)
+#if defined(XXH_ACCEPT_NULL_INPUT_POINTER) && (XXH_ACCEPT_NULL_INPUT_POINTER>=1)
+ return XXH_OK;
+#else
+ return XXH_ERROR;
+#endif
+
+ { const xxh_u8* p = (const xxh_u8*)input;
+ const xxh_u8* const bEnd = p + len;
+
+ state->total_len_32 += (XXH32_hash_t)len;
+ state->large_len |= (XXH32_hash_t)((len>=16) | (state->total_len_32>=16));
+
+ if (state->memsize + len < 16) { /* fill in tmp buffer */
+ XXH_memcpy((xxh_u8*)(state->mem32) + state->memsize, input, len);
+ state->memsize += (XXH32_hash_t)len;
+ return XXH_OK;
+ }
+
+ if (state->memsize) { /* some data left from previous update */
+ XXH_memcpy((xxh_u8*)(state->mem32) + state->memsize, input, 16-state->memsize);
+ { const xxh_u32* p32 = state->mem32;
+ state->v1 = XXH32_round(state->v1, XXH_readLE32(p32)); p32++;
+ state->v2 = XXH32_round(state->v2, XXH_readLE32(p32)); p32++;
+ state->v3 = XXH32_round(state->v3, XXH_readLE32(p32)); p32++;
+ state->v4 = XXH32_round(state->v4, XXH_readLE32(p32));
+ }
+ p += 16-state->memsize;
+ state->memsize = 0;
+ }
+
+ if (p <= bEnd-16) {
+ const xxh_u8* const limit = bEnd - 16;
+ xxh_u32 v1 = state->v1;
+ xxh_u32 v2 = state->v2;
+ xxh_u32 v3 = state->v3;
+ xxh_u32 v4 = state->v4;
+
+ do {
+ v1 = XXH32_round(v1, XXH_readLE32(p)); p+=4;
+ v2 = XXH32_round(v2, XXH_readLE32(p)); p+=4;
+ v3 = XXH32_round(v3, XXH_readLE32(p)); p+=4;
+ v4 = XXH32_round(v4, XXH_readLE32(p)); p+=4;
+ } while (p<=limit);
+
+ state->v1 = v1;
+ state->v2 = v2;
+ state->v3 = v3;
+ state->v4 = v4;
+ }
+
+ if (p < bEnd) {
+ XXH_memcpy(state->mem32, p, (size_t)(bEnd-p));
+ state->memsize = (unsigned)(bEnd-p);
+ }
+ }
+
+ return XXH_OK;
+}
+
+
+XXH_PUBLIC_API XXH32_hash_t XXH32_digest (const XXH32_state_t* state)
+{
+ xxh_u32 h32;
+
+ if (state->large_len) {
+ h32 = XXH_rotl32(state->v1, 1)
+ + XXH_rotl32(state->v2, 7)
+ + XXH_rotl32(state->v3, 12)
+ + XXH_rotl32(state->v4, 18);
+ } else {
+ h32 = state->v3 /* == seed */ + PRIME32_5;
+ }
+
+ h32 += state->total_len_32;
+
+ return XXH32_finalize(h32, (const xxh_u8*)state->mem32, state->memsize, XXH_aligned);
+}
+
+
+/******* Canonical representation *******/
+
+/*! Default XXH result types are basic unsigned 32 and 64 bits.
+* The canonical representation follows human-readable write convention, aka big-endian (large digits first).
+* These functions allow transformation of hash result into and from its canonical format.
+* This way, hash values can be written into a file or buffer, remaining comparable across different systems.
+*/
+
+XXH_PUBLIC_API void XXH32_canonicalFromHash(XXH32_canonical_t* dst, XXH32_hash_t hash)
+{
+ XXH_STATIC_ASSERT(sizeof(XXH32_canonical_t) == sizeof(XXH32_hash_t));
+ if (XXH_CPU_LITTLE_ENDIAN) hash = XXH_swap32(hash);
+ memcpy(dst, &hash, sizeof(*dst));
+}
+
+XXH_PUBLIC_API XXH32_hash_t XXH32_hashFromCanonical(const XXH32_canonical_t* src)
+{
+ return XXH_readBE32(src);
+}
+
+
+#ifndef XXH_NO_LONG_LONG
+
+/* *******************************************************************
+* 64-bit hash functions
+*********************************************************************/
+
+/******* Memory access *******/
+
+typedef XXH64_hash_t xxh_u64;
+
+
+/*! XXH_REROLL_XXH64:
+ * Whether to reroll the XXH64_finalize() loop.
+ *
+ * Just like XXH32, we can unroll the XXH64_finalize() loop. This can be a performance gain
+ * on 64-bit hosts, as only one jump is required.
+ *
+ * However, on 32-bit hosts, because arithmetic needs to be done with two 32-bit registers,
+ * and 64-bit arithmetic needs to be simulated, it isn't beneficial to unroll. The code becomes
+ * ridiculously large (the largest function in the binary on i386!), and rerolling it saves
+ * anywhere from 3kB to 20kB. It is also slightly faster because it fits into cache better
+ * and is more likely to be inlined by the compiler.
+ *
+ * If XXH_REROLL is defined, this is ignored and the loop is always rerolled. */
+#ifndef XXH_REROLL_XXH64
+# if (defined(__ILP32__) || defined(_ILP32)) /* ILP32 is often defined on 32-bit GCC family */ \
+ || !(defined(__x86_64__) || defined(_M_X64) || defined(_M_AMD64) /* x86-64 */ \
+ || defined(_M_ARM64) || defined(__aarch64__) || defined(__arm64__) /* aarch64 */ \
+ || defined(__PPC64__) || defined(__PPC64LE__) || defined(__ppc64__) || defined(__powerpc64__) /* ppc64 */ \
+ || defined(__mips64__) || defined(__mips64)) /* mips64 */ \
+ || (!defined(SIZE_MAX) || SIZE_MAX < ULLONG_MAX) /* check limits */
+# define XXH_REROLL_XXH64 1
+# else
+# define XXH_REROLL_XXH64 0
+# endif
+#endif /* !defined(XXH_REROLL_XXH64) */
+
+#if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==2))
+
+/* Force direct memory access. Only works on CPU which support unaligned memory access in hardware */
+static xxh_u64 XXH_read64(const void* memPtr) { return *(const xxh_u64*) memPtr; }
+
+#elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS==1))
+
+/* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */
+/* currently only defined for gcc and icc */
+typedef union { xxh_u32 u32; xxh_u64 u64; } __attribute__((packed)) unalign64;
+static xxh_u64 XXH_read64(const void* ptr) { return ((const unalign64*)ptr)->u64; }
+
+#else
+
+/* portable and safe solution. Generally efficient.
+ * see : http://stackoverflow.com/a/32095106/646947
+ */
+
+static xxh_u64 XXH_read64(const void* memPtr)
+{
+ xxh_u64 val;
+ memcpy(&val, memPtr, sizeof(val));
+ return val;
+}
+
+#endif /* XXH_FORCE_DIRECT_MEMORY_ACCESS */
+
+#if defined(_MSC_VER) /* Visual Studio */
+# define XXH_swap64 _byteswap_uint64
+#elif XXH_GCC_VERSION >= 403
+# define XXH_swap64 __builtin_bswap64
+#else
+static xxh_u64 XXH_swap64 (xxh_u64 x)
+{
+ return ((x << 56) & 0xff00000000000000ULL) |
+ ((x << 40) & 0x00ff000000000000ULL) |
+ ((x << 24) & 0x0000ff0000000000ULL) |
+ ((x << 8) & 0x000000ff00000000ULL) |
+ ((x >> 8) & 0x00000000ff000000ULL) |
+ ((x >> 24) & 0x0000000000ff0000ULL) |
+ ((x >> 40) & 0x000000000000ff00ULL) |
+ ((x >> 56) & 0x00000000000000ffULL);
+}
+#endif
+
+XXH_FORCE_INLINE xxh_u64 XXH_readLE64(const void* ptr)
+{
+ return XXH_CPU_LITTLE_ENDIAN ? XXH_read64(ptr) : XXH_swap64(XXH_read64(ptr));
+}
+
+static xxh_u64 XXH_readBE64(const void* ptr)
+{
+ return XXH_CPU_LITTLE_ENDIAN ? XXH_swap64(XXH_read64(ptr)) : XXH_read64(ptr);
+}
+
+XXH_FORCE_INLINE xxh_u64
+XXH_readLE64_align(const void* ptr, XXH_alignment align)
+{
+ if (align==XXH_unaligned)
+ return XXH_readLE64(ptr);
+ else
+ return XXH_CPU_LITTLE_ENDIAN ? *(const xxh_u64*)ptr : XXH_swap64(*(const xxh_u64*)ptr);
+}
+
+
+/******* xxh64 *******/
+
+static const xxh_u64 PRIME64_1 = 0x9E3779B185EBCA87ULL; /* 0b1001111000110111011110011011000110000101111010111100101010000111 */
+static const xxh_u64 PRIME64_2 = 0xC2B2AE3D27D4EB4FULL; /* 0b1100001010110010101011100011110100100111110101001110101101001111 */
+static const xxh_u64 PRIME64_3 = 0x165667B19E3779F9ULL; /* 0b0001011001010110011001111011000110011110001101110111100111111001 */
+static const xxh_u64 PRIME64_4 = 0x85EBCA77C2B2AE63ULL; /* 0b1000010111101011110010100111011111000010101100101010111001100011 */
+static const xxh_u64 PRIME64_5 = 0x27D4EB2F165667C5ULL; /* 0b0010011111010100111010110010111100010110010101100110011111000101 */
+
+static xxh_u64 XXH64_round(xxh_u64 acc, xxh_u64 input)
+{
+ acc += input * PRIME64_2;
+ acc = XXH_rotl64(acc, 31);
+ acc *= PRIME64_1;
+ return acc;
+}
+
+static xxh_u64 XXH64_mergeRound(xxh_u64 acc, xxh_u64 val)
+{
+ val = XXH64_round(0, val);
+ acc ^= val;
+ acc = acc * PRIME64_1 + PRIME64_4;
+ return acc;
+}
+
+static xxh_u64 XXH64_avalanche(xxh_u64 h64)
+{
+ h64 ^= h64 >> 33;
+ h64 *= PRIME64_2;
+ h64 ^= h64 >> 29;
+ h64 *= PRIME64_3;
+ h64 ^= h64 >> 32;
+ return h64;
+}
+
+
+#define XXH_get64bits(p) XXH_readLE64_align(p, align)
+
+static xxh_u64
+XXH64_finalize(xxh_u64 h64, const xxh_u8* ptr, size_t len, XXH_alignment align)
+{
+#define PROCESS1_64 \
+ h64 ^= (*ptr++) * PRIME64_5; \
+ h64 = XXH_rotl64(h64, 11) * PRIME64_1;
+
+#define PROCESS4_64 \
+ h64 ^= (xxh_u64)(XXH_get32bits(ptr)) * PRIME64_1; \
+ ptr+=4; \
+ h64 = XXH_rotl64(h64, 23) * PRIME64_2 + PRIME64_3;
+
+#define PROCESS8_64 { \
+ xxh_u64 const k1 = XXH64_round(0, XXH_get64bits(ptr)); \
+ ptr+=8; \
+ h64 ^= k1; \
+ h64 = XXH_rotl64(h64,27) * PRIME64_1 + PRIME64_4; \
+}
+
+ /* Rerolled version for 32-bit targets is faster and much smaller. */
+ if (XXH_REROLL || XXH_REROLL_XXH64) {
+ len &= 31;
+ while (len >= 8) {
+ PROCESS8_64;
+ len -= 8;
+ }
+ if (len >= 4) {
+ PROCESS4_64;
+ len -= 4;
+ }
+ while (len > 0) {
+ PROCESS1_64;
+ --len;
+ }
+ return XXH64_avalanche(h64);
+ } else {
+ switch(len & 31) {
+ case 24: PROCESS8_64;
+ /* fallthrough */
+ case 16: PROCESS8_64;
+ /* fallthrough */
+ case 8: PROCESS8_64;
+ return XXH64_avalanche(h64);
+
+ case 28: PROCESS8_64;
+ /* fallthrough */
+ case 20: PROCESS8_64;
+ /* fallthrough */
+ case 12: PROCESS8_64;
+ /* fallthrough */
+ case 4: PROCESS4_64;
+ return XXH64_avalanche(h64);
+
+ case 25: PROCESS8_64;
+ /* fallthrough */
+ case 17: PROCESS8_64;
+ /* fallthrough */
+ case 9: PROCESS8_64;
+ PROCESS1_64;
+ return XXH64_avalanche(h64);
+
+ case 29: PROCESS8_64;
+ /* fallthrough */
+ case 21: PROCESS8_64;
+ /* fallthrough */
+ case 13: PROCESS8_64;
+ /* fallthrough */
+ case 5: PROCESS4_64;
+ PROCESS1_64;
+ return XXH64_avalanche(h64);
+
+ case 26: PROCESS8_64;
+ /* fallthrough */
+ case 18: PROCESS8_64;
+ /* fallthrough */
+ case 10: PROCESS8_64;
+ PROCESS1_64;
+ PROCESS1_64;
+ return XXH64_avalanche(h64);
+
+ case 30: PROCESS8_64;
+ /* fallthrough */
+ case 22: PROCESS8_64;
+ /* fallthrough */
+ case 14: PROCESS8_64;
+ /* fallthrough */
+ case 6: PROCESS4_64;
+ PROCESS1_64;
+ PROCESS1_64;
+ return XXH64_avalanche(h64);
+
+ case 27: PROCESS8_64;
+ /* fallthrough */
+ case 19: PROCESS8_64;
+ /* fallthrough */
+ case 11: PROCESS8_64;
+ PROCESS1_64;
+ PROCESS1_64;
+ PROCESS1_64;
+ return XXH64_avalanche(h64);
+
+ case 31: PROCESS8_64;
+ /* fallthrough */
+ case 23: PROCESS8_64;
+ /* fallthrough */
+ case 15: PROCESS8_64;
+ /* fallthrough */
+ case 7: PROCESS4_64;
+ /* fallthrough */
+ case 3: PROCESS1_64;
+ /* fallthrough */
+ case 2: PROCESS1_64;
+ /* fallthrough */
+ case 1: PROCESS1_64;
+ /* fallthrough */
+ case 0: return XXH64_avalanche(h64);
+ }
+ }
+ /* impossible to reach */
+ XXH_ASSERT(0);
+ return 0; /* unreachable, but some compilers complain without it */
+}
+
+XXH_FORCE_INLINE xxh_u64
+XXH64_endian_align(const xxh_u8* input, size_t len, xxh_u64 seed, XXH_alignment align)
+{
+ const xxh_u8* bEnd = input + len;
+ xxh_u64 h64;
+
+#if defined(XXH_ACCEPT_NULL_INPUT_POINTER) && (XXH_ACCEPT_NULL_INPUT_POINTER>=1)
+ if (input==NULL) {
+ len=0;
+ bEnd=input=(const xxh_u8*)(size_t)32;
+ }
+#endif
+
+ if (len>=32) {
+ const xxh_u8* const limit = bEnd - 32;
+ xxh_u64 v1 = seed + PRIME64_1 + PRIME64_2;
+ xxh_u64 v2 = seed + PRIME64_2;
+ xxh_u64 v3 = seed + 0;
+ xxh_u64 v4 = seed - PRIME64_1;
+
+ do {
+ v1 = XXH64_round(v1, XXH_get64bits(input)); input+=8;
+ v2 = XXH64_round(v2, XXH_get64bits(input)); input+=8;
+ v3 = XXH64_round(v3, XXH_get64bits(input)); input+=8;
+ v4 = XXH64_round(v4, XXH_get64bits(input)); input+=8;
+ } while (input<=limit);
+
+ h64 = XXH_rotl64(v1, 1) + XXH_rotl64(v2, 7) + XXH_rotl64(v3, 12) + XXH_rotl64(v4, 18);
+ h64 = XXH64_mergeRound(h64, v1);
+ h64 = XXH64_mergeRound(h64, v2);
+ h64 = XXH64_mergeRound(h64, v3);
+ h64 = XXH64_mergeRound(h64, v4);
+
+ } else {
+ h64 = seed + PRIME64_5;
+ }
+
+ h64 += (xxh_u64) len;
+
+ return XXH64_finalize(h64, input, len, align);
+}
+
+
+XXH_PUBLIC_API XXH64_hash_t XXH64 (const void* input, size_t len, XXH64_hash_t seed)
+{
+#if 0
+ /* Simple version, good for code maintenance, but unfortunately slow for small inputs */
+ XXH64_state_t state;
+ XXH64_reset(&state, seed);
+ XXH64_update(&state, (const xxh_u8*)input, len);
+ return XXH64_digest(&state);
+
+#else
+
+ if (XXH_FORCE_ALIGN_CHECK) {
+ if ((((size_t)input) & 7)==0) { /* Input is aligned, let's leverage the speed advantage */
+ return XXH64_endian_align((const xxh_u8*)input, len, seed, XXH_aligned);
+ } }
+
+ return XXH64_endian_align((const xxh_u8*)input, len, seed, XXH_unaligned);
+
+#endif
+}
+
+/******* Hash Streaming *******/
+
+XXH_PUBLIC_API XXH64_state_t* XXH64_createState(void)
+{
+ return (XXH64_state_t*)XXH_malloc(sizeof(XXH64_state_t));
+}
+XXH_PUBLIC_API XXH_errorcode XXH64_freeState(XXH64_state_t* statePtr)
+{
+ XXH_free(statePtr);
+ return XXH_OK;
+}
+
+XXH_PUBLIC_API void XXH64_copyState(XXH64_state_t* dstState, const XXH64_state_t* srcState)
+{
+ memcpy(dstState, srcState, sizeof(*dstState));
+}
+
+XXH_PUBLIC_API XXH_errorcode XXH64_reset(XXH64_state_t* statePtr, XXH64_hash_t seed)
+{
+ XXH64_state_t state; /* using a local state to memcpy() in order to avoid strict-aliasing warnings */
+ memset(&state, 0, sizeof(state));
+ state.v1 = seed + PRIME64_1 + PRIME64_2;
+ state.v2 = seed + PRIME64_2;
+ state.v3 = seed + 0;
+ state.v4 = seed - PRIME64_1;
+ /* do not write into reserved64, might be removed in a future version */
+ memcpy(statePtr, &state, sizeof(state) - sizeof(state.reserved64));
+ return XXH_OK;
+}
+
+XXH_PUBLIC_API XXH_errorcode
+XXH64_update (XXH64_state_t* state, const void* input, size_t len)
+{
+ if (input==NULL)
+#if defined(XXH_ACCEPT_NULL_INPUT_POINTER) && (XXH_ACCEPT_NULL_INPUT_POINTER>=1)
+ return XXH_OK;
+#else
+ return XXH_ERROR;
+#endif
+
+ { const xxh_u8* p = (const xxh_u8*)input;
+ const xxh_u8* const bEnd = p + len;
+
+ state->total_len += len;
+
+ if (state->memsize + len < 32) { /* fill in tmp buffer */
+ XXH_memcpy(((xxh_u8*)state->mem64) + state->memsize, input, len);
+ state->memsize += (xxh_u32)len;
+ return XXH_OK;
+ }
+
+ if (state->memsize) { /* tmp buffer is full */
+ XXH_memcpy(((xxh_u8*)state->mem64) + state->memsize, input, 32-state->memsize);
+ state->v1 = XXH64_round(state->v1, XXH_readLE64(state->mem64+0));
+ state->v2 = XXH64_round(state->v2, XXH_readLE64(state->mem64+1));
+ state->v3 = XXH64_round(state->v3, XXH_readLE64(state->mem64+2));
+ state->v4 = XXH64_round(state->v4, XXH_readLE64(state->mem64+3));
+ p += 32-state->memsize;
+ state->memsize = 0;
+ }
+
+ if (p+32 <= bEnd) {
+ const xxh_u8* const limit = bEnd - 32;
+ xxh_u64 v1 = state->v1;
+ xxh_u64 v2 = state->v2;
+ xxh_u64 v3 = state->v3;
+ xxh_u64 v4 = state->v4;
+
+ do {
+ v1 = XXH64_round(v1, XXH_readLE64(p)); p+=8;
+ v2 = XXH64_round(v2, XXH_readLE64(p)); p+=8;
+ v3 = XXH64_round(v3, XXH_readLE64(p)); p+=8;
+ v4 = XXH64_round(v4, XXH_readLE64(p)); p+=8;
+ } while (p<=limit);
+
+ state->v1 = v1;
+ state->v2 = v2;
+ state->v3 = v3;
+ state->v4 = v4;
+ }
+
+ if (p < bEnd) {
+ XXH_memcpy(state->mem64, p, (size_t)(bEnd-p));
+ state->memsize = (unsigned)(bEnd-p);
+ }
+ }
+
+ return XXH_OK;
+}
+
+
+XXH_PUBLIC_API XXH64_hash_t XXH64_digest (const XXH64_state_t* state)
+{
+ xxh_u64 h64;
+
+ if (state->total_len >= 32) {
+ xxh_u64 const v1 = state->v1;
+ xxh_u64 const v2 = state->v2;
+ xxh_u64 const v3 = state->v3;
+ xxh_u64 const v4 = state->v4;
+
+ h64 = XXH_rotl64(v1, 1) + XXH_rotl64(v2, 7) + XXH_rotl64(v3, 12) + XXH_rotl64(v4, 18);
+ h64 = XXH64_mergeRound(h64, v1);
+ h64 = XXH64_mergeRound(h64, v2);
+ h64 = XXH64_mergeRound(h64, v3);
+ h64 = XXH64_mergeRound(h64, v4);
+ } else {
+ h64 = state->v3 /*seed*/ + PRIME64_5;
+ }
+
+ h64 += (xxh_u64) state->total_len;
+
+ return XXH64_finalize(h64, (const xxh_u8*)state->mem64, (size_t)state->total_len, XXH_aligned);
+}
+
+
+/******* Canonical representation *******/
+
+XXH_PUBLIC_API void XXH64_canonicalFromHash(XXH64_canonical_t* dst, XXH64_hash_t hash)
+{
+ XXH_STATIC_ASSERT(sizeof(XXH64_canonical_t) == sizeof(XXH64_hash_t));
+ if (XXH_CPU_LITTLE_ENDIAN) hash = XXH_swap64(hash);
+ memcpy(dst, &hash, sizeof(*dst));
+}
+
+XXH_PUBLIC_API XXH64_hash_t XXH64_hashFromCanonical(const XXH64_canonical_t* src)
+{
+ return XXH_readBE64(src);
+}
+
+
+
+/* *********************************************************************
+* XXH3
+* New generation hash designed for speed on small keys and vectorization
+************************************************************************ */
+
+/* #include "xxh3.h" */
+
+
+#endif /* XXH_NO_LONG_LONG */
+
+
+#endif /* XXH_IMPLEMENTATION */
+
+
+#if defined (__cplusplus)
+}
+#endif
diff --git a/third_party/rust/glslopt/src/bindings.rs b/third_party/rust/glslopt/src/bindings.rs
new file mode 100644
index 0000000000..f4e5bebd08
--- /dev/null
+++ b/third_party/rust/glslopt/src/bindings.rs
@@ -0,0 +1,135 @@
+/* automatically generated by rust-bindgen */
+
+#[repr(C)]
+#[derive(Debug, Copy, Clone)]
+pub struct glslopt_shader {
+ _unused: [u8; 0],
+}
+#[repr(C)]
+#[derive(Debug, Copy, Clone)]
+pub struct glslopt_ctx {
+ _unused: [u8; 0],
+}
+pub const glslopt_shader_type_kGlslOptShaderVertex: glslopt_shader_type = 0;
+pub const glslopt_shader_type_kGlslOptShaderFragment: glslopt_shader_type = 1;
+pub type glslopt_shader_type = u32;
+pub const glslopt_options_kGlslOptionSkipPreprocessor: glslopt_options = 1;
+pub const glslopt_options_kGlslOptionNotFullShader: glslopt_options = 2;
+pub type glslopt_options = u32;
+pub const glslopt_target_kGlslTargetOpenGL: glslopt_target = 0;
+pub const glslopt_target_kGlslTargetOpenGLES20: glslopt_target = 1;
+pub const glslopt_target_kGlslTargetOpenGLES30: glslopt_target = 2;
+pub const glslopt_target_kGlslTargetMetal: glslopt_target = 3;
+pub type glslopt_target = u32;
+pub const glslopt_basic_type_kGlslTypeFloat: glslopt_basic_type = 0;
+pub const glslopt_basic_type_kGlslTypeInt: glslopt_basic_type = 1;
+pub const glslopt_basic_type_kGlslTypeBool: glslopt_basic_type = 2;
+pub const glslopt_basic_type_kGlslTypeTex2D: glslopt_basic_type = 3;
+pub const glslopt_basic_type_kGlslTypeTex3D: glslopt_basic_type = 4;
+pub const glslopt_basic_type_kGlslTypeTexCube: glslopt_basic_type = 5;
+pub const glslopt_basic_type_kGlslTypeTex2DShadow: glslopt_basic_type = 6;
+pub const glslopt_basic_type_kGlslTypeTex2DArray: glslopt_basic_type = 7;
+pub const glslopt_basic_type_kGlslTypeOther: glslopt_basic_type = 8;
+pub const glslopt_basic_type_kGlslTypeCount: glslopt_basic_type = 9;
+pub type glslopt_basic_type = u32;
+pub const glslopt_precision_kGlslPrecHigh: glslopt_precision = 0;
+pub const glslopt_precision_kGlslPrecMedium: glslopt_precision = 1;
+pub const glslopt_precision_kGlslPrecLow: glslopt_precision = 2;
+pub const glslopt_precision_kGlslPrecCount: glslopt_precision = 3;
+pub type glslopt_precision = u32;
+extern "C" {
+ pub fn glslopt_initialize(target: glslopt_target) -> *mut glslopt_ctx;
+}
+extern "C" {
+ pub fn glslopt_cleanup(ctx: *mut glslopt_ctx);
+}
+extern "C" {
+ pub fn glslopt_set_max_unroll_iterations(
+ ctx: *mut glslopt_ctx,
+ iterations: ::std::os::raw::c_uint,
+ );
+}
+extern "C" {
+ pub fn glslopt_optimize(
+ ctx: *mut glslopt_ctx,
+ type_: glslopt_shader_type,
+ shaderSource: *const ::std::os::raw::c_char,
+ options: ::std::os::raw::c_uint,
+ ) -> *mut glslopt_shader;
+}
+extern "C" {
+ pub fn glslopt_get_status(shader: *mut glslopt_shader) -> bool;
+}
+extern "C" {
+ pub fn glslopt_get_output(shader: *mut glslopt_shader) -> *const ::std::os::raw::c_char;
+}
+extern "C" {
+ pub fn glslopt_get_raw_output(shader: *mut glslopt_shader) -> *const ::std::os::raw::c_char;
+}
+extern "C" {
+ pub fn glslopt_get_log(shader: *mut glslopt_shader) -> *const ::std::os::raw::c_char;
+}
+extern "C" {
+ pub fn glslopt_shader_delete(shader: *mut glslopt_shader);
+}
+extern "C" {
+ pub fn glslopt_shader_get_input_count(shader: *mut glslopt_shader) -> ::std::os::raw::c_int;
+}
+extern "C" {
+ pub fn glslopt_shader_get_input_desc(
+ shader: *mut glslopt_shader,
+ index: ::std::os::raw::c_int,
+ outName: *mut *const ::std::os::raw::c_char,
+ outType: *mut glslopt_basic_type,
+ outPrec: *mut glslopt_precision,
+ outVecSize: *mut ::std::os::raw::c_int,
+ outMatSize: *mut ::std::os::raw::c_int,
+ outArraySize: *mut ::std::os::raw::c_int,
+ outLocation: *mut ::std::os::raw::c_int,
+ );
+}
+extern "C" {
+ pub fn glslopt_shader_get_uniform_count(shader: *mut glslopt_shader) -> ::std::os::raw::c_int;
+}
+extern "C" {
+ pub fn glslopt_shader_get_uniform_total_size(
+ shader: *mut glslopt_shader,
+ ) -> ::std::os::raw::c_int;
+}
+extern "C" {
+ pub fn glslopt_shader_get_uniform_desc(
+ shader: *mut glslopt_shader,
+ index: ::std::os::raw::c_int,
+ outName: *mut *const ::std::os::raw::c_char,
+ outType: *mut glslopt_basic_type,
+ outPrec: *mut glslopt_precision,
+ outVecSize: *mut ::std::os::raw::c_int,
+ outMatSize: *mut ::std::os::raw::c_int,
+ outArraySize: *mut ::std::os::raw::c_int,
+ outLocation: *mut ::std::os::raw::c_int,
+ );
+}
+extern "C" {
+ pub fn glslopt_shader_get_texture_count(shader: *mut glslopt_shader) -> ::std::os::raw::c_int;
+}
+extern "C" {
+ pub fn glslopt_shader_get_texture_desc(
+ shader: *mut glslopt_shader,
+ index: ::std::os::raw::c_int,
+ outName: *mut *const ::std::os::raw::c_char,
+ outType: *mut glslopt_basic_type,
+ outPrec: *mut glslopt_precision,
+ outVecSize: *mut ::std::os::raw::c_int,
+ outMatSize: *mut ::std::os::raw::c_int,
+ outArraySize: *mut ::std::os::raw::c_int,
+ outLocation: *mut ::std::os::raw::c_int,
+ );
+}
+extern "C" {
+ pub fn glslopt_shader_get_stats(
+ shader: *mut glslopt_shader,
+ approxMath: *mut ::std::os::raw::c_int,
+ approxTex: *mut ::std::os::raw::c_int,
+ approxFlow: *mut ::std::os::raw::c_int,
+ );
+}
diff --git a/third_party/rust/glslopt/src/lib.rs b/third_party/rust/glslopt/src/lib.rs
new file mode 100644
index 0000000000..e06ec1a4c6
--- /dev/null
+++ b/third_party/rust/glslopt/src/lib.rs
@@ -0,0 +1,102 @@
+use std::ffi::{CString, CStr};
+use std::ptr;
+
+#[allow(dead_code)]
+#[allow(non_camel_case_types)]
+#[allow(non_upper_case_globals)]
+mod bindings;
+
+pub enum ShaderType {
+ Vertex,
+ Fragment,
+}
+
+pub enum Target {
+ OpenGl,
+ OpenGles20,
+ OpenGles30,
+ Metal,
+}
+
+pub struct Context {
+ ctx: *mut bindings::glslopt_ctx,
+}
+
+impl Context {
+ pub fn new(target: Target) -> Self {
+ let target = match target {
+ Target::OpenGl => bindings::glslopt_target_kGlslTargetOpenGL,
+ Target::OpenGles20 => bindings::glslopt_target_kGlslTargetOpenGLES20,
+ Target::OpenGles30 => bindings::glslopt_target_kGlslTargetOpenGLES30,
+ Target::Metal => bindings::glslopt_target_kGlslTargetMetal,
+ };
+
+ let ctx = unsafe { bindings::glslopt_initialize(target) };
+
+ Self {
+ ctx,
+ }
+ }
+
+ pub fn optimize(&self, shader_type: ShaderType, source: String) -> Shader {
+ let shader_type = match shader_type {
+ ShaderType::Vertex => bindings::glslopt_shader_type_kGlslOptShaderVertex,
+ ShaderType::Fragment => bindings::glslopt_shader_type_kGlslOptShaderFragment,
+ };
+ let source = CString::new(source).unwrap();
+
+ let shader = unsafe { bindings::glslopt_optimize(self.ctx, shader_type, source.as_ptr(), 0) };
+ assert_ne!(shader, ptr::null_mut());
+ Shader {
+ shader,
+ }
+ }
+}
+
+impl Drop for Context {
+ fn drop(&mut self) {
+ unsafe {
+ bindings::glslopt_cleanup(self.ctx);
+ }
+ }
+}
+
+pub struct Shader {
+ shader: *mut bindings::glslopt_shader,
+}
+
+impl Shader {
+ pub fn get_status(&self) -> bool {
+ unsafe { bindings::glslopt_get_status(self.shader) }
+ }
+
+ pub fn get_output(&self) -> Result<&str, ()> {
+ unsafe {
+ let cstr = bindings::glslopt_get_output(self.shader);
+ if cstr == ptr::null() {
+ Err(())
+ } else {
+ Ok(CStr::from_ptr(cstr).to_str().unwrap())
+ }
+ }
+ }
+
+ pub fn get_log(&self) -> &str {
+ unsafe {
+ let cstr = bindings::glslopt_get_log(self.shader);
+ if cstr == ptr::null() {
+ ""
+ } else {
+ CStr::from_ptr(cstr).to_str().unwrap()
+ }
+ }
+ }
+}
+
+impl Drop for Shader {
+ fn drop(&mut self) {
+ unsafe {
+ bindings::glslopt_shader_delete(self.shader);
+ }
+ }
+}
diff --git a/third_party/rust/glslopt/wrapper.hpp b/third_party/rust/glslopt/wrapper.hpp
new file mode 100644
index 0000000000..5491b3d8a6
--- /dev/null
+++ b/third_party/rust/glslopt/wrapper.hpp
@@ -0,0 +1 @@
+#include "glsl-optimizer/src/compiler/glsl/glsl_optimizer.h"