summaryrefslogtreecommitdiffstats
path: root/third_party/rust/wgpu-hal/src/gles
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 19:33:14 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 19:33:14 +0000
commit36d22d82aa202bb199967e9512281e9a53db42c9 (patch)
tree105e8c98ddea1c1e4784a60a5a6410fa416be2de /third_party/rust/wgpu-hal/src/gles
parentInitial commit. (diff)
downloadfirefox-esr-upstream.tar.xz
firefox-esr-upstream.zip
Adding upstream version 115.7.0esr.upstream/115.7.0esrupstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to '')
-rw-r--r--third_party/rust/wgpu-hal/src/gles/adapter.rs972
-rw-r--r--third_party/rust/wgpu-hal/src/gles/command.rs1063
-rw-r--r--third_party/rust/wgpu-hal/src/gles/conv.rs501
-rw-r--r--third_party/rust/wgpu-hal/src/gles/device.rs1328
-rw-r--r--third_party/rust/wgpu-hal/src/gles/egl.rs1310
-rw-r--r--third_party/rust/wgpu-hal/src/gles/emscripten.rs26
-rw-r--r--third_party/rust/wgpu-hal/src/gles/mod.rs861
-rw-r--r--third_party/rust/wgpu-hal/src/gles/queue.rs1532
-rw-r--r--third_party/rust/wgpu-hal/src/gles/shaders/clear.frag9
-rw-r--r--third_party/rust/wgpu-hal/src/gles/shaders/clear.vert11
-rw-r--r--third_party/rust/wgpu-hal/src/gles/shaders/srgb_present.frag16
-rw-r--r--third_party/rust/wgpu-hal/src/gles/shaders/srgb_present.vert18
-rw-r--r--third_party/rust/wgpu-hal/src/gles/web.rs404
13 files changed, 8051 insertions, 0 deletions
diff --git a/third_party/rust/wgpu-hal/src/gles/adapter.rs b/third_party/rust/wgpu-hal/src/gles/adapter.rs
new file mode 100644
index 0000000000..b14857ae22
--- /dev/null
+++ b/third_party/rust/wgpu-hal/src/gles/adapter.rs
@@ -0,0 +1,972 @@
+use glow::HasContext;
+use std::sync::Arc;
+use wgt::AstcChannel;
+
+use crate::auxil::db;
+
+// https://webgl2fundamentals.org/webgl/lessons/webgl-data-textures.html
+
+const GL_UNMASKED_VENDOR_WEBGL: u32 = 0x9245;
+const GL_UNMASKED_RENDERER_WEBGL: u32 = 0x9246;
+
+impl super::Adapter {
+ /// According to the OpenGL specification, the version information is
+ /// expected to follow the following syntax:
+ ///
+ /// ~~~bnf
+ /// <major> ::= <number>
+ /// <minor> ::= <number>
+ /// <revision> ::= <number>
+ /// <vendor-info> ::= <string>
+ /// <release> ::= <major> "." <minor> ["." <release>]
+ /// <version> ::= <release> [" " <vendor-info>]
+ /// ~~~
+ ///
+ /// Note that this function is intentionally lenient in regards to parsing,
+ /// and will try to recover at least the first two version numbers without
+ /// resulting in an `Err`.
+ /// # Notes
+ /// `WebGL 2` version returned as `OpenGL ES 3.0`
+ fn parse_version(mut src: &str) -> Result<(u8, u8), crate::InstanceError> {
+ let webgl_sig = "WebGL ";
+ // According to the WebGL specification
+ // VERSION WebGL<space>1.0<space><vendor-specific information>
+ // SHADING_LANGUAGE_VERSION WebGL<space>GLSL<space>ES<space>1.0<space><vendor-specific information>
+ let is_webgl = src.starts_with(webgl_sig);
+ if is_webgl {
+ let pos = src.rfind(webgl_sig).unwrap_or(0);
+ src = &src[pos + webgl_sig.len()..];
+ } else {
+ let es_sig = " ES ";
+ match src.rfind(es_sig) {
+ Some(pos) => {
+ src = &src[pos + es_sig.len()..];
+ }
+ None => {
+ log::warn!("ES not found in '{}'", src);
+ return Err(crate::InstanceError);
+ }
+ }
+ };
+
+ let glsl_es_sig = "GLSL ES ";
+ let is_glsl = match src.find(glsl_es_sig) {
+ Some(pos) => {
+ src = &src[pos + glsl_es_sig.len()..];
+ true
+ }
+ None => false,
+ };
+
+ let (version, _vendor_info) = match src.find(' ') {
+ Some(i) => (&src[..i], src[i + 1..].to_string()),
+ None => (src, String::new()),
+ };
+
+ // TODO: make this even more lenient so that we can also accept
+ // `<major> "." <minor> [<???>]`
+ let mut it = version.split('.');
+ let major = it.next().and_then(|s| s.parse().ok());
+ let minor = it.next().and_then(|s| {
+ let trimmed = if s.starts_with('0') {
+ "0"
+ } else {
+ s.trim_end_matches('0')
+ };
+ trimmed.parse().ok()
+ });
+
+ match (major, minor) {
+ (Some(major), Some(minor)) => Ok((
+ // Return WebGL 2.0 version as OpenGL ES 3.0
+ if is_webgl && !is_glsl {
+ major + 1
+ } else {
+ major
+ },
+ minor,
+ )),
+ _ => {
+ log::warn!("Unable to extract the version from '{}'", version);
+ Err(crate::InstanceError)
+ }
+ }
+ }
+
+ fn make_info(vendor_orig: String, renderer_orig: String) -> wgt::AdapterInfo {
+ let vendor = vendor_orig.to_lowercase();
+ let renderer = renderer_orig.to_lowercase();
+
+ // opengl has no way to discern device_type, so we can try to infer it from the renderer string
+ let strings_that_imply_integrated = [
+ " xpress", // space here is on purpose so we don't match express
+ "amd renoir",
+ "radeon hd 4200",
+ "radeon hd 4250",
+ "radeon hd 4290",
+ "radeon hd 4270",
+ "radeon hd 4225",
+ "radeon hd 3100",
+ "radeon hd 3200",
+ "radeon hd 3000",
+ "radeon hd 3300",
+ "radeon(tm) r4 graphics",
+ "radeon(tm) r5 graphics",
+ "radeon(tm) r6 graphics",
+ "radeon(tm) r7 graphics",
+ "radeon r7 graphics",
+ "nforce", // all nvidia nforce are integrated
+ "tegra", // all nvidia tegra are integrated
+ "shield", // all nvidia shield are integrated
+ "igp",
+ "mali",
+ "intel",
+ "v3d",
+ "apple m", // all apple m are integrated
+ ];
+ let strings_that_imply_cpu = ["mesa offscreen", "swiftshader", "llvmpipe"];
+
+ //TODO: handle Intel Iris XE as discreet
+ let inferred_device_type = if vendor.contains("qualcomm")
+ || vendor.contains("intel")
+ || strings_that_imply_integrated
+ .iter()
+ .any(|&s| renderer.contains(s))
+ {
+ wgt::DeviceType::IntegratedGpu
+ } else if strings_that_imply_cpu.iter().any(|&s| renderer.contains(s)) {
+ wgt::DeviceType::Cpu
+ } else {
+ // At this point the Device type is Unknown.
+ // It's most likely DiscreteGpu, but we do not know for sure.
+ // Use "Other" to avoid possibly making incorrect assumptions.
+ // Note that if this same device is available under some other API (ex: Vulkan),
+ // It will mostly likely get a different device type (probably DiscreteGpu).
+ wgt::DeviceType::Other
+ };
+
+ // source: Sascha Willems at Vulkan
+ let vendor_id = if vendor.contains("amd") {
+ db::amd::VENDOR
+ } else if vendor.contains("imgtec") {
+ db::imgtec::VENDOR
+ } else if vendor.contains("nvidia") {
+ db::nvidia::VENDOR
+ } else if vendor.contains("arm") {
+ db::arm::VENDOR
+ } else if vendor.contains("qualcomm") {
+ db::qualcomm::VENDOR
+ } else if vendor.contains("intel") {
+ db::intel::VENDOR
+ } else if vendor.contains("broadcom") {
+ db::broadcom::VENDOR
+ } else if vendor.contains("mesa") {
+ db::mesa::VENDOR
+ } else if vendor.contains("apple") {
+ db::apple::VENDOR
+ } else {
+ 0
+ };
+
+ wgt::AdapterInfo {
+ name: renderer_orig,
+ vendor: vendor_id,
+ device: 0,
+ device_type: inferred_device_type,
+ driver: String::new(),
+ driver_info: String::new(),
+ backend: wgt::Backend::Gl,
+ }
+ }
+
+ pub(super) unsafe fn expose(
+ context: super::AdapterContext,
+ ) -> Option<crate::ExposedAdapter<super::Api>> {
+ let gl = context.lock();
+ let extensions = gl.supported_extensions();
+
+ let (vendor_const, renderer_const) = if extensions.contains("WEBGL_debug_renderer_info") {
+ // emscripten doesn't enable "WEBGL_debug_renderer_info" extension by default. so, we do it manually.
+ // See https://github.com/gfx-rs/wgpu/issues/3245 for context
+ #[cfg(target_os = "emscripten")]
+ if unsafe { super::emscripten::enable_extension("WEBGL_debug_renderer_info\0") } {
+ (GL_UNMASKED_VENDOR_WEBGL, GL_UNMASKED_RENDERER_WEBGL)
+ } else {
+ (glow::VENDOR, glow::RENDERER)
+ }
+ // glow already enables WEBGL_debug_renderer_info on wasm32-unknown-unknown target by default.
+ #[cfg(not(target_os = "emscripten"))]
+ (GL_UNMASKED_VENDOR_WEBGL, GL_UNMASKED_RENDERER_WEBGL)
+ } else {
+ (glow::VENDOR, glow::RENDERER)
+ };
+
+ let (vendor, renderer) = {
+ let vendor = unsafe { gl.get_parameter_string(vendor_const) };
+ let renderer = unsafe { gl.get_parameter_string(renderer_const) };
+
+ (vendor, renderer)
+ };
+ let version = unsafe { gl.get_parameter_string(glow::VERSION) };
+ log::info!("Vendor: {}", vendor);
+ log::info!("Renderer: {}", renderer);
+ log::info!("Version: {}", version);
+
+ log::debug!("Extensions: {:#?}", extensions);
+
+ let ver = Self::parse_version(&version).ok()?;
+ if ver < (3, 0) {
+ log::warn!(
+ "Returned GLES context is {}.{}, when 3.0+ was requested",
+ ver.0,
+ ver.1
+ );
+ return None;
+ }
+
+ let supports_storage = ver >= (3, 1);
+ let supports_work_group_params = ver >= (3, 1);
+
+ let shading_language_version = {
+ let sl_version = unsafe { gl.get_parameter_string(glow::SHADING_LANGUAGE_VERSION) };
+ log::info!("SL version: {}", &sl_version);
+ let (sl_major, sl_minor) = Self::parse_version(&sl_version).ok()?;
+ let value = sl_major as u16 * 100 + sl_minor as u16 * 10;
+ naga::back::glsl::Version::Embedded {
+ version: value,
+ is_webgl: cfg!(target_arch = "wasm32"),
+ }
+ };
+
+ // ANGLE provides renderer strings like: "ANGLE (Apple, Apple M1 Pro, OpenGL 4.1)"
+ let is_angle = renderer.contains("ANGLE");
+
+ let vertex_shader_storage_blocks = if supports_storage {
+ (unsafe { gl.get_parameter_i32(glow::MAX_VERTEX_SHADER_STORAGE_BLOCKS) } as u32)
+ } else {
+ 0
+ };
+ let fragment_shader_storage_blocks = if supports_storage {
+ (unsafe { gl.get_parameter_i32(glow::MAX_FRAGMENT_SHADER_STORAGE_BLOCKS) } as u32)
+ } else {
+ 0
+ };
+ let vertex_shader_storage_textures = if supports_storage {
+ (unsafe { gl.get_parameter_i32(glow::MAX_VERTEX_IMAGE_UNIFORMS) } as u32)
+ } else {
+ 0
+ };
+ let fragment_shader_storage_textures = if supports_storage {
+ (unsafe { gl.get_parameter_i32(glow::MAX_FRAGMENT_IMAGE_UNIFORMS) } as u32)
+ } else {
+ 0
+ };
+ let max_storage_block_size = if supports_storage {
+ (unsafe { gl.get_parameter_i32(glow::MAX_SHADER_STORAGE_BLOCK_SIZE) } as u32)
+ } else {
+ 0
+ };
+ let max_element_index = unsafe { gl.get_parameter_i32(glow::MAX_ELEMENT_INDEX) } as u32;
+
+ // WORKAROUND: In order to work around an issue with GL on RPI4 and similar, we ignore a
+ // zero vertex ssbo count if there are vertex sstos. (more info:
+ // https://github.com/gfx-rs/wgpu/pull/1607#issuecomment-874938961) The hardware does not
+ // want us to write to these SSBOs, but GLES cannot express that. We detect this case and
+ // disable writing to SSBOs.
+ let vertex_ssbo_false_zero =
+ vertex_shader_storage_blocks == 0 && vertex_shader_storage_textures != 0;
+ if vertex_ssbo_false_zero {
+ // We only care about fragment here as the 0 is a lie.
+ log::warn!("Max vertex shader SSBO == 0 and SSTO != 0. Interpreting as false zero.");
+ }
+
+ let max_storage_buffers_per_shader_stage = if vertex_shader_storage_blocks == 0 {
+ fragment_shader_storage_blocks
+ } else {
+ vertex_shader_storage_blocks.min(fragment_shader_storage_blocks)
+ };
+ let max_storage_textures_per_shader_stage = if vertex_shader_storage_textures == 0 {
+ fragment_shader_storage_textures
+ } else {
+ vertex_shader_storage_textures.min(fragment_shader_storage_textures)
+ };
+
+ let mut downlevel_flags = wgt::DownlevelFlags::empty()
+ | wgt::DownlevelFlags::NON_POWER_OF_TWO_MIPMAPPED_TEXTURES
+ | wgt::DownlevelFlags::CUBE_ARRAY_TEXTURES
+ | wgt::DownlevelFlags::COMPARISON_SAMPLERS;
+ downlevel_flags.set(wgt::DownlevelFlags::COMPUTE_SHADERS, ver >= (3, 1));
+ downlevel_flags.set(
+ wgt::DownlevelFlags::FRAGMENT_WRITABLE_STORAGE,
+ max_storage_block_size != 0,
+ );
+ downlevel_flags.set(wgt::DownlevelFlags::INDIRECT_EXECUTION, ver >= (3, 1));
+ //TODO: we can actually support positive `base_vertex` in the same way
+ // as we emulate the `start_instance`. But we can't deal with negatives...
+ downlevel_flags.set(wgt::DownlevelFlags::BASE_VERTEX, ver >= (3, 2));
+ downlevel_flags.set(
+ wgt::DownlevelFlags::INDEPENDENT_BLEND,
+ ver >= (3, 2) || extensions.contains("GL_EXT_draw_buffers_indexed"),
+ );
+ downlevel_flags.set(
+ wgt::DownlevelFlags::VERTEX_STORAGE,
+ max_storage_block_size != 0
+ && max_storage_buffers_per_shader_stage != 0
+ && (vertex_shader_storage_blocks != 0 || vertex_ssbo_false_zero),
+ );
+ downlevel_flags.set(wgt::DownlevelFlags::FRAGMENT_STORAGE, supports_storage);
+ if extensions.contains("EXT_texture_filter_anisotropic") {
+ let max_aniso =
+ unsafe { gl.get_parameter_i32(glow::MAX_TEXTURE_MAX_ANISOTROPY_EXT) } as u32;
+ downlevel_flags.set(wgt::DownlevelFlags::ANISOTROPIC_FILTERING, max_aniso >= 16);
+ }
+ downlevel_flags.set(
+ wgt::DownlevelFlags::BUFFER_BINDINGS_NOT_16_BYTE_ALIGNED,
+ !(cfg!(target_arch = "wasm32") || is_angle),
+ );
+ // see https://registry.khronos.org/webgl/specs/latest/2.0/#BUFFER_OBJECT_BINDING
+ downlevel_flags.set(
+ wgt::DownlevelFlags::UNRESTRICTED_INDEX_BUFFER,
+ !cfg!(target_arch = "wasm32"),
+ );
+ downlevel_flags.set(
+ wgt::DownlevelFlags::UNRESTRICTED_EXTERNAL_TEXTURE_COPIES,
+ !cfg!(target_arch = "wasm32"),
+ );
+ downlevel_flags.set(
+ wgt::DownlevelFlags::FULL_DRAW_INDEX_UINT32,
+ max_element_index == u32::MAX,
+ );
+ downlevel_flags.set(
+ wgt::DownlevelFlags::MULTISAMPLED_SHADING,
+ ver >= (3, 2) || extensions.contains("OES_sample_variables"),
+ );
+
+ let mut features = wgt::Features::empty()
+ | wgt::Features::TEXTURE_ADAPTER_SPECIFIC_FORMAT_FEATURES
+ | wgt::Features::CLEAR_TEXTURE
+ | wgt::Features::PUSH_CONSTANTS;
+ features.set(
+ wgt::Features::ADDRESS_MODE_CLAMP_TO_BORDER | wgt::Features::ADDRESS_MODE_CLAMP_TO_ZERO,
+ extensions.contains("GL_EXT_texture_border_clamp"),
+ );
+ features.set(
+ wgt::Features::DEPTH_CLIP_CONTROL,
+ extensions.contains("GL_EXT_depth_clamp"),
+ );
+ features.set(
+ wgt::Features::VERTEX_WRITABLE_STORAGE,
+ downlevel_flags.contains(wgt::DownlevelFlags::VERTEX_STORAGE)
+ && vertex_shader_storage_textures != 0,
+ );
+ features.set(
+ wgt::Features::MULTIVIEW,
+ extensions.contains("OVR_multiview2"),
+ );
+ features.set(
+ wgt::Features::SHADER_PRIMITIVE_INDEX,
+ ver >= (3, 2) || extensions.contains("OES_geometry_shader"),
+ );
+ features.set(wgt::Features::SHADER_EARLY_DEPTH_TEST, ver >= (3, 1));
+ let gles_bcn_exts = [
+ "GL_EXT_texture_compression_s3tc_srgb",
+ "GL_EXT_texture_compression_rgtc",
+ "GL_EXT_texture_compression_bptc",
+ ];
+ let webgl_bcn_exts = [
+ "WEBGL_compressed_texture_s3tc",
+ "WEBGL_compressed_texture_s3tc_srgb",
+ "EXT_texture_compression_rgtc",
+ "EXT_texture_compression_bptc",
+ ];
+ let bcn_exts = if cfg!(target_arch = "wasm32") {
+ &webgl_bcn_exts[..]
+ } else {
+ &gles_bcn_exts[..]
+ };
+ features.set(
+ wgt::Features::TEXTURE_COMPRESSION_BC,
+ bcn_exts.iter().all(|&ext| extensions.contains(ext)),
+ );
+ features.set(
+ wgt::Features::TEXTURE_COMPRESSION_ETC2,
+ // This is a part of GLES-3 but not WebGL2 core
+ !cfg!(target_arch = "wasm32") || extensions.contains("WEBGL_compressed_texture_etc"),
+ );
+ // `OES_texture_compression_astc` provides 2D + 3D, LDR + HDR support
+ if extensions.contains("WEBGL_compressed_texture_astc")
+ || extensions.contains("GL_OES_texture_compression_astc")
+ {
+ features.insert(wgt::Features::TEXTURE_COMPRESSION_ASTC);
+ features.insert(wgt::Features::TEXTURE_COMPRESSION_ASTC_HDR);
+ } else {
+ features.set(
+ wgt::Features::TEXTURE_COMPRESSION_ASTC,
+ extensions.contains("GL_KHR_texture_compression_astc_ldr"),
+ );
+ features.set(
+ wgt::Features::TEXTURE_COMPRESSION_ASTC_HDR,
+ extensions.contains("GL_KHR_texture_compression_astc_hdr"),
+ );
+ }
+
+ let mut private_caps = super::PrivateCapabilities::empty();
+ private_caps.set(
+ super::PrivateCapabilities::BUFFER_ALLOCATION,
+ extensions.contains("GL_EXT_buffer_storage"),
+ );
+ private_caps.set(
+ super::PrivateCapabilities::SHADER_BINDING_LAYOUT,
+ ver >= (3, 1),
+ );
+ private_caps.set(
+ super::PrivateCapabilities::SHADER_TEXTURE_SHADOW_LOD,
+ extensions.contains("GL_EXT_texture_shadow_lod"),
+ );
+ private_caps.set(super::PrivateCapabilities::MEMORY_BARRIERS, ver >= (3, 1));
+ private_caps.set(
+ super::PrivateCapabilities::VERTEX_BUFFER_LAYOUT,
+ ver >= (3, 1),
+ );
+ private_caps.set(
+ super::PrivateCapabilities::INDEX_BUFFER_ROLE_CHANGE,
+ !cfg!(target_arch = "wasm32"),
+ );
+ private_caps.set(
+ super::PrivateCapabilities::CAN_DISABLE_DRAW_BUFFER,
+ !cfg!(target_arch = "wasm32"),
+ );
+ private_caps.set(
+ super::PrivateCapabilities::GET_BUFFER_SUB_DATA,
+ cfg!(target_arch = "wasm32"),
+ );
+ let color_buffer_float = extensions.contains("GL_EXT_color_buffer_float")
+ || extensions.contains("EXT_color_buffer_float");
+ let color_buffer_half_float = extensions.contains("GL_EXT_color_buffer_half_float");
+ private_caps.set(
+ super::PrivateCapabilities::COLOR_BUFFER_HALF_FLOAT,
+ color_buffer_half_float || color_buffer_float,
+ );
+ private_caps.set(
+ super::PrivateCapabilities::COLOR_BUFFER_FLOAT,
+ color_buffer_float,
+ );
+ private_caps.set(
+ super::PrivateCapabilities::TEXTURE_FLOAT_LINEAR,
+ extensions.contains("OES_texture_float_linear"),
+ );
+
+ let max_texture_size = unsafe { gl.get_parameter_i32(glow::MAX_TEXTURE_SIZE) } as u32;
+ let max_texture_3d_size = unsafe { gl.get_parameter_i32(glow::MAX_3D_TEXTURE_SIZE) } as u32;
+
+ let min_uniform_buffer_offset_alignment =
+ (unsafe { gl.get_parameter_i32(glow::UNIFORM_BUFFER_OFFSET_ALIGNMENT) } as u32);
+ let min_storage_buffer_offset_alignment = if ver >= (3, 1) {
+ (unsafe { gl.get_parameter_i32(glow::SHADER_STORAGE_BUFFER_OFFSET_ALIGNMENT) } as u32)
+ } else {
+ 256
+ };
+ let max_uniform_buffers_per_shader_stage =
+ unsafe { gl.get_parameter_i32(glow::MAX_VERTEX_UNIFORM_BLOCKS) }
+ .min(unsafe { gl.get_parameter_i32(glow::MAX_FRAGMENT_UNIFORM_BLOCKS) })
+ as u32;
+
+ let max_compute_workgroups_per_dimension = if supports_work_group_params {
+ unsafe { gl.get_parameter_indexed_i32(glow::MAX_COMPUTE_WORK_GROUP_COUNT, 0) }
+ .min(unsafe { gl.get_parameter_indexed_i32(glow::MAX_COMPUTE_WORK_GROUP_COUNT, 1) })
+ .min(unsafe { gl.get_parameter_indexed_i32(glow::MAX_COMPUTE_WORK_GROUP_COUNT, 2) })
+ as u32
+ } else {
+ 0
+ };
+
+ let limits = wgt::Limits {
+ max_texture_dimension_1d: max_texture_size,
+ max_texture_dimension_2d: max_texture_size,
+ max_texture_dimension_3d: max_texture_3d_size,
+ max_texture_array_layers: unsafe {
+ gl.get_parameter_i32(glow::MAX_ARRAY_TEXTURE_LAYERS)
+ } as u32,
+ max_bind_groups: crate::MAX_BIND_GROUPS as u32,
+ max_bindings_per_bind_group: 65535,
+ max_dynamic_uniform_buffers_per_pipeline_layout: max_uniform_buffers_per_shader_stage,
+ max_dynamic_storage_buffers_per_pipeline_layout: max_storage_buffers_per_shader_stage,
+ max_sampled_textures_per_shader_stage: super::MAX_TEXTURE_SLOTS as u32,
+ max_samplers_per_shader_stage: super::MAX_SAMPLERS as u32,
+ max_storage_buffers_per_shader_stage,
+ max_storage_textures_per_shader_stage,
+ max_uniform_buffers_per_shader_stage,
+ max_uniform_buffer_binding_size: unsafe {
+ gl.get_parameter_i32(glow::MAX_UNIFORM_BLOCK_SIZE)
+ } as u32,
+ max_storage_buffer_binding_size: if ver >= (3, 1) {
+ unsafe { gl.get_parameter_i32(glow::MAX_SHADER_STORAGE_BLOCK_SIZE) }
+ } else {
+ 0
+ } as u32,
+ max_vertex_buffers: if private_caps
+ .contains(super::PrivateCapabilities::VERTEX_BUFFER_LAYOUT)
+ {
+ (unsafe { gl.get_parameter_i32(glow::MAX_VERTEX_ATTRIB_BINDINGS) } as u32)
+ } else {
+ 16 // should this be different?
+ },
+ max_vertex_attributes: (unsafe { gl.get_parameter_i32(glow::MAX_VERTEX_ATTRIBS) }
+ as u32)
+ .min(super::MAX_VERTEX_ATTRIBUTES as u32),
+ max_vertex_buffer_array_stride: if private_caps
+ .contains(super::PrivateCapabilities::VERTEX_BUFFER_LAYOUT)
+ {
+ (unsafe { gl.get_parameter_i32(glow::MAX_VERTEX_ATTRIB_STRIDE) } as u32)
+ } else {
+ !0
+ },
+ max_push_constant_size: super::MAX_PUSH_CONSTANTS as u32 * 4,
+ min_uniform_buffer_offset_alignment,
+ min_storage_buffer_offset_alignment,
+ max_inter_stage_shader_components: unsafe {
+ gl.get_parameter_i32(glow::MAX_VARYING_COMPONENTS)
+ } as u32,
+ max_compute_workgroup_storage_size: if supports_work_group_params {
+ (unsafe { gl.get_parameter_i32(glow::MAX_COMPUTE_SHARED_MEMORY_SIZE) } as u32)
+ } else {
+ 0
+ },
+ max_compute_invocations_per_workgroup: if supports_work_group_params {
+ (unsafe { gl.get_parameter_i32(glow::MAX_COMPUTE_WORK_GROUP_INVOCATIONS) } as u32)
+ } else {
+ 0
+ },
+ max_compute_workgroup_size_x: if supports_work_group_params {
+ (unsafe { gl.get_parameter_indexed_i32(glow::MAX_COMPUTE_WORK_GROUP_SIZE, 0) }
+ as u32)
+ } else {
+ 0
+ },
+ max_compute_workgroup_size_y: if supports_work_group_params {
+ (unsafe { gl.get_parameter_indexed_i32(glow::MAX_COMPUTE_WORK_GROUP_SIZE, 1) }
+ as u32)
+ } else {
+ 0
+ },
+ max_compute_workgroup_size_z: if supports_work_group_params {
+ (unsafe { gl.get_parameter_indexed_i32(glow::MAX_COMPUTE_WORK_GROUP_SIZE, 2) }
+ as u32)
+ } else {
+ 0
+ },
+ max_compute_workgroups_per_dimension,
+ max_buffer_size: i32::MAX as u64,
+ };
+
+ let mut workarounds = super::Workarounds::empty();
+
+ workarounds.set(
+ super::Workarounds::EMULATE_BUFFER_MAP,
+ cfg!(target_arch = "wasm32"),
+ );
+
+ let r = renderer.to_lowercase();
+ // Check for Mesa sRGB clear bug. See
+ // [`super::PrivateCapabilities::MESA_I915_SRGB_SHADER_CLEAR`].
+ if context.is_owned()
+ && r.contains("mesa")
+ && r.contains("intel")
+ && r.split(&[' ', '(', ')'][..])
+ .any(|substr| substr.len() == 3 && substr.chars().nth(2) == Some('l'))
+ {
+ log::warn!(
+ "Detected skylake derivative running on mesa i915. Clears to srgb textures will \
+ use manual shader clears."
+ );
+ workarounds.set(super::Workarounds::MESA_I915_SRGB_SHADER_CLEAR, true);
+ }
+
+ let downlevel_defaults = wgt::DownlevelLimits {};
+
+ // Drop the GL guard so we can move the context into AdapterShared
+ // ( on WASM the gl handle is just a ref so we tell clippy to allow
+ // dropping the ref )
+ #[allow(clippy::drop_ref)]
+ drop(gl);
+
+ Some(crate::ExposedAdapter {
+ adapter: super::Adapter {
+ shared: Arc::new(super::AdapterShared {
+ context,
+ private_caps,
+ workarounds,
+ features,
+ shading_language_version,
+ max_texture_size,
+ next_shader_id: Default::default(),
+ program_cache: Default::default(),
+ }),
+ },
+ info: Self::make_info(vendor, renderer),
+ features,
+ capabilities: crate::Capabilities {
+ limits,
+ downlevel: wgt::DownlevelCapabilities {
+ flags: downlevel_flags,
+ limits: downlevel_defaults,
+ shader_model: wgt::ShaderModel::Sm5,
+ },
+ alignments: crate::Alignments {
+ buffer_copy_offset: wgt::BufferSize::new(4).unwrap(),
+ buffer_copy_pitch: wgt::BufferSize::new(4).unwrap(),
+ },
+ },
+ })
+ }
+
+ unsafe fn create_shader_clear_program(
+ gl: &glow::Context,
+ ) -> (glow::Program, glow::UniformLocation) {
+ let program = unsafe { gl.create_program() }.expect("Could not create shader program");
+ let vertex =
+ unsafe { gl.create_shader(glow::VERTEX_SHADER) }.expect("Could not create shader");
+ unsafe { gl.shader_source(vertex, include_str!("./shaders/clear.vert")) };
+ unsafe { gl.compile_shader(vertex) };
+ let fragment =
+ unsafe { gl.create_shader(glow::FRAGMENT_SHADER) }.expect("Could not create shader");
+ unsafe { gl.shader_source(fragment, include_str!("./shaders/clear.frag")) };
+ unsafe { gl.compile_shader(fragment) };
+ unsafe { gl.attach_shader(program, vertex) };
+ unsafe { gl.attach_shader(program, fragment) };
+ unsafe { gl.link_program(program) };
+ let color_uniform_location = unsafe { gl.get_uniform_location(program, "color") }
+ .expect("Could not find color uniform in shader clear shader");
+ unsafe { gl.delete_shader(vertex) };
+ unsafe { gl.delete_shader(fragment) };
+
+ (program, color_uniform_location)
+ }
+}
+
+impl crate::Adapter<super::Api> for super::Adapter {
+ unsafe fn open(
+ &self,
+ features: wgt::Features,
+ _limits: &wgt::Limits,
+ ) -> Result<crate::OpenDevice<super::Api>, crate::DeviceError> {
+ let gl = &self.shared.context.lock();
+ unsafe { gl.pixel_store_i32(glow::UNPACK_ALIGNMENT, 1) };
+ unsafe { gl.pixel_store_i32(glow::PACK_ALIGNMENT, 1) };
+ let main_vao =
+ unsafe { gl.create_vertex_array() }.map_err(|_| crate::DeviceError::OutOfMemory)?;
+ unsafe { gl.bind_vertex_array(Some(main_vao)) };
+
+ let zero_buffer =
+ unsafe { gl.create_buffer() }.map_err(|_| crate::DeviceError::OutOfMemory)?;
+ unsafe { gl.bind_buffer(glow::COPY_READ_BUFFER, Some(zero_buffer)) };
+ let zeroes = vec![0u8; super::ZERO_BUFFER_SIZE];
+ unsafe { gl.buffer_data_u8_slice(glow::COPY_READ_BUFFER, &zeroes, glow::STATIC_DRAW) };
+
+ // Compile the shader program we use for doing manual clears to work around Mesa fastclear
+ // bug.
+ let (shader_clear_program, shader_clear_program_color_uniform_location) =
+ unsafe { Self::create_shader_clear_program(gl) };
+
+ Ok(crate::OpenDevice {
+ device: super::Device {
+ shared: Arc::clone(&self.shared),
+ main_vao,
+ #[cfg(all(not(target_arch = "wasm32"), feature = "renderdoc"))]
+ render_doc: Default::default(),
+ },
+ queue: super::Queue {
+ shared: Arc::clone(&self.shared),
+ features,
+ draw_fbo: unsafe { gl.create_framebuffer() }
+ .map_err(|_| crate::DeviceError::OutOfMemory)?,
+ copy_fbo: unsafe { gl.create_framebuffer() }
+ .map_err(|_| crate::DeviceError::OutOfMemory)?,
+ shader_clear_program,
+ shader_clear_program_color_uniform_location,
+ zero_buffer,
+ temp_query_results: Vec::new(),
+ draw_buffer_count: 1,
+ current_index_buffer: None,
+ },
+ })
+ }
+
+ unsafe fn texture_format_capabilities(
+ &self,
+ format: wgt::TextureFormat,
+ ) -> crate::TextureFormatCapabilities {
+ use crate::TextureFormatCapabilities as Tfc;
+ use wgt::TextureFormat as Tf;
+
+ let sample_count = {
+ let max_samples = unsafe {
+ self.shared
+ .context
+ .lock()
+ .get_parameter_i32(glow::MAX_SAMPLES)
+ };
+ if max_samples >= 16 {
+ Tfc::MULTISAMPLE_X2
+ | Tfc::MULTISAMPLE_X4
+ | Tfc::MULTISAMPLE_X8
+ | Tfc::MULTISAMPLE_X16
+ } else if max_samples >= 8 {
+ Tfc::MULTISAMPLE_X2 | Tfc::MULTISAMPLE_X4 | Tfc::MULTISAMPLE_X8
+ } else if max_samples >= 4 {
+ Tfc::MULTISAMPLE_X2 | Tfc::MULTISAMPLE_X4
+ } else {
+ Tfc::MULTISAMPLE_X2
+ }
+ };
+
+ // Base types are pulled from the table in the OpenGLES 3.0 spec in section 3.8.
+ //
+ // The storage types are based on table 8.26, in section
+ // "TEXTURE IMAGE LOADS AND STORES" of OpenGLES-3.2 spec.
+ let empty = Tfc::empty();
+ let base = Tfc::COPY_SRC | Tfc::COPY_DST;
+ let unfilterable = base | Tfc::SAMPLED;
+ let depth = base | Tfc::SAMPLED | sample_count | Tfc::DEPTH_STENCIL_ATTACHMENT;
+ let filterable = unfilterable | Tfc::SAMPLED_LINEAR;
+ let renderable =
+ unfilterable | Tfc::COLOR_ATTACHMENT | sample_count | Tfc::MULTISAMPLE_RESOLVE;
+ let filterable_renderable = filterable | renderable | Tfc::COLOR_ATTACHMENT_BLEND;
+ let storage = base | Tfc::STORAGE | Tfc::STORAGE_READ_WRITE;
+
+ let feature_fn = |f, caps| {
+ if self.shared.features.contains(f) {
+ caps
+ } else {
+ empty
+ }
+ };
+
+ let bcn_features = feature_fn(wgt::Features::TEXTURE_COMPRESSION_BC, filterable);
+ let etc2_features = feature_fn(wgt::Features::TEXTURE_COMPRESSION_ETC2, filterable);
+ let astc_features = feature_fn(wgt::Features::TEXTURE_COMPRESSION_ASTC, filterable);
+ let astc_hdr_features = feature_fn(wgt::Features::TEXTURE_COMPRESSION_ASTC_HDR, filterable);
+
+ let private_caps_fn = |f, caps| {
+ if self.shared.private_caps.contains(f) {
+ caps
+ } else {
+ empty
+ }
+ };
+
+ let half_float_renderable = private_caps_fn(
+ super::PrivateCapabilities::COLOR_BUFFER_HALF_FLOAT,
+ Tfc::COLOR_ATTACHMENT
+ | Tfc::COLOR_ATTACHMENT_BLEND
+ | sample_count
+ | Tfc::MULTISAMPLE_RESOLVE,
+ );
+
+ let float_renderable = private_caps_fn(
+ super::PrivateCapabilities::COLOR_BUFFER_FLOAT,
+ Tfc::COLOR_ATTACHMENT
+ | Tfc::COLOR_ATTACHMENT_BLEND
+ | sample_count
+ | Tfc::MULTISAMPLE_RESOLVE,
+ );
+
+ let texture_float_linear =
+ private_caps_fn(super::PrivateCapabilities::TEXTURE_FLOAT_LINEAR, filterable);
+
+ match format {
+ Tf::R8Unorm => filterable_renderable,
+ Tf::R8Snorm => filterable,
+ Tf::R8Uint => renderable,
+ Tf::R8Sint => renderable,
+ Tf::R16Uint => renderable,
+ Tf::R16Sint => renderable,
+ Tf::R16Unorm => empty,
+ Tf::R16Snorm => empty,
+ Tf::R16Float => filterable | half_float_renderable,
+ Tf::Rg8Unorm => filterable_renderable,
+ Tf::Rg8Snorm => filterable,
+ Tf::Rg8Uint => renderable,
+ Tf::Rg8Sint => renderable,
+ Tf::R32Uint => renderable | storage,
+ Tf::R32Sint => renderable | storage,
+ Tf::R32Float => unfilterable | storage | float_renderable | texture_float_linear,
+ Tf::Rg16Uint => renderable,
+ Tf::Rg16Sint => renderable,
+ Tf::Rg16Unorm => empty,
+ Tf::Rg16Snorm => empty,
+ Tf::Rg16Float => filterable | half_float_renderable,
+ Tf::Rgba8Unorm | Tf::Rgba8UnormSrgb => filterable_renderable | storage,
+ Tf::Bgra8Unorm | Tf::Bgra8UnormSrgb => filterable_renderable,
+ Tf::Rgba8Snorm => filterable,
+ Tf::Rgba8Uint => renderable | storage,
+ Tf::Rgba8Sint => renderable | storage,
+ Tf::Rgb10a2Unorm => filterable_renderable,
+ Tf::Rg11b10Float => filterable | float_renderable,
+ Tf::Rg32Uint => renderable,
+ Tf::Rg32Sint => renderable,
+ Tf::Rg32Float => unfilterable | float_renderable | texture_float_linear,
+ Tf::Rgba16Uint => renderable | storage,
+ Tf::Rgba16Sint => renderable | storage,
+ Tf::Rgba16Unorm => empty,
+ Tf::Rgba16Snorm => empty,
+ Tf::Rgba16Float => filterable | storage | half_float_renderable,
+ Tf::Rgba32Uint => renderable | storage,
+ Tf::Rgba32Sint => renderable | storage,
+ Tf::Rgba32Float => unfilterable | storage | float_renderable | texture_float_linear,
+ Tf::Stencil8
+ | Tf::Depth16Unorm
+ | Tf::Depth32Float
+ | Tf::Depth32FloatStencil8
+ | Tf::Depth24Plus
+ | Tf::Depth24PlusStencil8 => depth,
+ Tf::Rgb9e5Ufloat => filterable,
+ Tf::Bc1RgbaUnorm
+ | Tf::Bc1RgbaUnormSrgb
+ | Tf::Bc2RgbaUnorm
+ | Tf::Bc2RgbaUnormSrgb
+ | Tf::Bc3RgbaUnorm
+ | Tf::Bc3RgbaUnormSrgb
+ | Tf::Bc4RUnorm
+ | Tf::Bc4RSnorm
+ | Tf::Bc5RgUnorm
+ | Tf::Bc5RgSnorm
+ | Tf::Bc6hRgbFloat
+ | Tf::Bc6hRgbUfloat
+ | Tf::Bc7RgbaUnorm
+ | Tf::Bc7RgbaUnormSrgb => bcn_features,
+ Tf::Etc2Rgb8Unorm
+ | Tf::Etc2Rgb8UnormSrgb
+ | Tf::Etc2Rgb8A1Unorm
+ | Tf::Etc2Rgb8A1UnormSrgb
+ | Tf::Etc2Rgba8Unorm
+ | Tf::Etc2Rgba8UnormSrgb
+ | Tf::EacR11Unorm
+ | Tf::EacR11Snorm
+ | Tf::EacRg11Unorm
+ | Tf::EacRg11Snorm => etc2_features,
+ Tf::Astc {
+ block: _,
+ channel: AstcChannel::Unorm | AstcChannel::UnormSrgb,
+ } => astc_features,
+ Tf::Astc {
+ block: _,
+ channel: AstcChannel::Hdr,
+ } => astc_hdr_features,
+ }
+ }
+
+ unsafe fn surface_capabilities(
+ &self,
+ surface: &super::Surface,
+ ) -> Option<crate::SurfaceCapabilities> {
+ if surface.presentable {
+ let mut formats = vec![
+ wgt::TextureFormat::Rgba8Unorm,
+ #[cfg(not(target_arch = "wasm32"))]
+ wgt::TextureFormat::Bgra8Unorm,
+ ];
+ if surface.supports_srgb() {
+ formats.extend([
+ wgt::TextureFormat::Rgba8UnormSrgb,
+ #[cfg(not(target_arch = "wasm32"))]
+ wgt::TextureFormat::Bgra8UnormSrgb,
+ ])
+ }
+ if self
+ .shared
+ .private_caps
+ .contains(super::PrivateCapabilities::COLOR_BUFFER_HALF_FLOAT)
+ {
+ formats.push(wgt::TextureFormat::Rgba16Float)
+ }
+
+ Some(crate::SurfaceCapabilities {
+ formats,
+ present_modes: vec![wgt::PresentMode::Fifo], //TODO
+ composite_alpha_modes: vec![wgt::CompositeAlphaMode::Opaque], //TODO
+ swap_chain_sizes: 2..=2,
+ current_extent: None,
+ extents: wgt::Extent3d {
+ width: 4,
+ height: 4,
+ depth_or_array_layers: 1,
+ }..=wgt::Extent3d {
+ width: self.shared.max_texture_size,
+ height: self.shared.max_texture_size,
+ depth_or_array_layers: 1,
+ },
+ usage: crate::TextureUses::COLOR_TARGET,
+ })
+ } else {
+ None
+ }
+ }
+
+ unsafe fn get_presentation_timestamp(&self) -> wgt::PresentationTimestamp {
+ wgt::PresentationTimestamp::INVALID_TIMESTAMP
+ }
+}
+
+impl super::AdapterShared {
+ pub(super) unsafe fn get_buffer_sub_data(
+ &self,
+ gl: &glow::Context,
+ target: u32,
+ offset: i32,
+ dst_data: &mut [u8],
+ ) {
+ if self
+ .private_caps
+ .contains(super::PrivateCapabilities::GET_BUFFER_SUB_DATA)
+ {
+ unsafe { gl.get_buffer_sub_data(target, offset, dst_data) };
+ } else {
+ log::error!("Fake map");
+ let length = dst_data.len();
+ let buffer_mapping =
+ unsafe { gl.map_buffer_range(target, offset, length as _, glow::MAP_READ_BIT) };
+
+ unsafe { std::ptr::copy_nonoverlapping(buffer_mapping, dst_data.as_mut_ptr(), length) };
+
+ unsafe { gl.unmap_buffer(target) };
+ }
+ }
+}
+
+// SAFE: WASM doesn't have threads
+#[cfg(target_arch = "wasm32")]
+unsafe impl Sync for super::Adapter {}
+#[cfg(target_arch = "wasm32")]
+unsafe impl Send for super::Adapter {}
+
+#[cfg(test)]
+mod tests {
+ use super::super::Adapter;
+
+ #[test]
+ fn test_version_parse() {
+ let error = Err(crate::InstanceError);
+ assert_eq!(Adapter::parse_version("1"), error);
+ assert_eq!(Adapter::parse_version("1."), error);
+ assert_eq!(Adapter::parse_version("1 h3l1o. W0rld"), error);
+ assert_eq!(Adapter::parse_version("1. h3l1o. W0rld"), error);
+ assert_eq!(Adapter::parse_version("1.2.3"), error);
+ assert_eq!(Adapter::parse_version("OpenGL ES 3.1"), Ok((3, 1)));
+ assert_eq!(
+ Adapter::parse_version("OpenGL ES 2.0 Google Nexus"),
+ Ok((2, 0))
+ );
+ assert_eq!(Adapter::parse_version("GLSL ES 1.1"), Ok((1, 1)));
+ assert_eq!(Adapter::parse_version("OpenGL ES GLSL ES 3.20"), Ok((3, 2)));
+ assert_eq!(
+ // WebGL 2.0 should parse as OpenGL ES 3.0
+ Adapter::parse_version("WebGL 2.0 (OpenGL ES 3.0 Chromium)"),
+ Ok((3, 0))
+ );
+ assert_eq!(
+ Adapter::parse_version("WebGL GLSL ES 3.00 (OpenGL ES GLSL ES 3.0 Chromium)"),
+ Ok((3, 0))
+ );
+ }
+}
diff --git a/third_party/rust/wgpu-hal/src/gles/command.rs b/third_party/rust/wgpu-hal/src/gles/command.rs
new file mode 100644
index 0000000000..5e3a1c52c8
--- /dev/null
+++ b/third_party/rust/wgpu-hal/src/gles/command.rs
@@ -0,0 +1,1063 @@
+use super::{conv, Command as C};
+use arrayvec::ArrayVec;
+use std::{mem, ops::Range};
+
+#[derive(Clone, Copy, Debug, Default)]
+struct TextureSlotDesc {
+ tex_target: super::BindTarget,
+ sampler_index: Option<u8>,
+}
+
+#[derive(Default)]
+pub(super) struct State {
+ topology: u32,
+ primitive: super::PrimitiveState,
+ index_format: wgt::IndexFormat,
+ index_offset: wgt::BufferAddress,
+ vertex_buffers:
+ [(super::VertexBufferDesc, Option<super::BufferBinding>); crate::MAX_VERTEX_BUFFERS],
+ vertex_attributes: ArrayVec<super::AttributeDesc, { super::MAX_VERTEX_ATTRIBUTES }>,
+ color_targets: ArrayVec<super::ColorTargetDesc, { crate::MAX_COLOR_ATTACHMENTS }>,
+ stencil: super::StencilState,
+ depth_bias: wgt::DepthBiasState,
+ alpha_to_coverage_enabled: bool,
+ samplers: [Option<glow::Sampler>; super::MAX_SAMPLERS],
+ texture_slots: [TextureSlotDesc; super::MAX_TEXTURE_SLOTS],
+ render_size: wgt::Extent3d,
+ resolve_attachments: ArrayVec<(u32, super::TextureView), { crate::MAX_COLOR_ATTACHMENTS }>,
+ invalidate_attachments: ArrayVec<u32, { crate::MAX_COLOR_ATTACHMENTS + 2 }>,
+ has_pass_label: bool,
+ instance_vbuf_mask: usize,
+ dirty_vbuf_mask: usize,
+ active_first_instance: u32,
+ push_offset_to_uniform: ArrayVec<super::UniformDesc, { super::MAX_PUSH_CONSTANTS }>,
+}
+
+impl super::CommandBuffer {
+ fn clear(&mut self) {
+ self.label = None;
+ self.commands.clear();
+ self.data_bytes.clear();
+ self.queries.clear();
+ }
+
+ fn add_marker(&mut self, marker: &str) -> Range<u32> {
+ let start = self.data_bytes.len() as u32;
+ self.data_bytes.extend(marker.as_bytes());
+ start..self.data_bytes.len() as u32
+ }
+
+ fn add_push_constant_data(&mut self, data: &[u32]) -> Range<u32> {
+ let data_raw = unsafe {
+ std::slice::from_raw_parts(
+ data.as_ptr() as *const _,
+ data.len() * mem::size_of::<u32>(),
+ )
+ };
+ let start = self.data_bytes.len();
+ assert!(start < u32::MAX as usize);
+ self.data_bytes.extend_from_slice(data_raw);
+ let end = self.data_bytes.len();
+ assert!(end < u32::MAX as usize);
+ (start as u32)..(end as u32)
+ }
+}
+
+impl super::CommandEncoder {
+ fn rebind_stencil_func(&mut self) {
+ fn make(s: &super::StencilSide, face: u32) -> C {
+ C::SetStencilFunc {
+ face,
+ function: s.function,
+ reference: s.reference,
+ read_mask: s.mask_read,
+ }
+ }
+
+ let s = &self.state.stencil;
+ if s.front.function == s.back.function
+ && s.front.mask_read == s.back.mask_read
+ && s.front.reference == s.back.reference
+ {
+ self.cmd_buffer
+ .commands
+ .push(make(&s.front, glow::FRONT_AND_BACK));
+ } else {
+ self.cmd_buffer.commands.push(make(&s.front, glow::FRONT));
+ self.cmd_buffer.commands.push(make(&s.back, glow::BACK));
+ }
+ }
+
+ fn rebind_vertex_data(&mut self, first_instance: u32) {
+ if self
+ .private_caps
+ .contains(super::PrivateCapabilities::VERTEX_BUFFER_LAYOUT)
+ {
+ for (index, pair) in self.state.vertex_buffers.iter().enumerate() {
+ if self.state.dirty_vbuf_mask & (1 << index) == 0 {
+ continue;
+ }
+ let (buffer_desc, vb) = match *pair {
+ // Not all dirty bindings are necessarily filled. Some may be unused.
+ (_, None) => continue,
+ (ref vb_desc, Some(ref vb)) => (vb_desc.clone(), vb),
+ };
+ let instance_offset = match buffer_desc.step {
+ wgt::VertexStepMode::Vertex => 0,
+ wgt::VertexStepMode::Instance => first_instance * buffer_desc.stride,
+ };
+
+ self.cmd_buffer.commands.push(C::SetVertexBuffer {
+ index: index as u32,
+ buffer: super::BufferBinding {
+ raw: vb.raw,
+ offset: vb.offset + instance_offset as wgt::BufferAddress,
+ },
+ buffer_desc,
+ });
+ self.state.dirty_vbuf_mask ^= 1 << index;
+ }
+ } else {
+ let mut vbuf_mask = 0;
+ for attribute in self.state.vertex_attributes.iter() {
+ if self.state.dirty_vbuf_mask & (1 << attribute.buffer_index) == 0 {
+ continue;
+ }
+ let (buffer_desc, vb) =
+ match self.state.vertex_buffers[attribute.buffer_index as usize] {
+ // Not all dirty bindings are necessarily filled. Some may be unused.
+ (_, None) => continue,
+ (ref vb_desc, Some(ref vb)) => (vb_desc.clone(), vb),
+ };
+
+ let mut attribute_desc = attribute.clone();
+ attribute_desc.offset += vb.offset as u32;
+ if buffer_desc.step == wgt::VertexStepMode::Instance {
+ attribute_desc.offset += buffer_desc.stride * first_instance;
+ }
+
+ self.cmd_buffer.commands.push(C::SetVertexAttribute {
+ buffer: Some(vb.raw),
+ buffer_desc,
+ attribute_desc,
+ });
+ vbuf_mask |= 1 << attribute.buffer_index;
+ }
+ self.state.dirty_vbuf_mask ^= vbuf_mask;
+ }
+ }
+
+ fn rebind_sampler_states(&mut self, dirty_textures: u32, dirty_samplers: u32) {
+ for (texture_index, slot) in self.state.texture_slots.iter().enumerate() {
+ if dirty_textures & (1 << texture_index) != 0
+ || slot
+ .sampler_index
+ .map_or(false, |si| dirty_samplers & (1 << si) != 0)
+ {
+ let sampler = slot
+ .sampler_index
+ .and_then(|si| self.state.samplers[si as usize]);
+ self.cmd_buffer
+ .commands
+ .push(C::BindSampler(texture_index as u32, sampler));
+ }
+ }
+ }
+
+ fn prepare_draw(&mut self, first_instance: u32) {
+ if first_instance != self.state.active_first_instance {
+ // rebind all per-instance buffers on first-instance change
+ self.state.dirty_vbuf_mask |= self.state.instance_vbuf_mask;
+ self.state.active_first_instance = first_instance;
+ }
+ if self.state.dirty_vbuf_mask != 0 {
+ self.rebind_vertex_data(first_instance);
+ }
+ }
+
+ fn set_pipeline_inner(&mut self, inner: &super::PipelineInner) {
+ self.cmd_buffer.commands.push(C::SetProgram(inner.program));
+
+ self.state.push_offset_to_uniform.clear();
+ self.state
+ .push_offset_to_uniform
+ .extend(inner.uniforms.iter().cloned());
+
+ // rebind textures, if needed
+ let mut dirty_textures = 0u32;
+ for (texture_index, (slot, &sampler_index)) in self
+ .state
+ .texture_slots
+ .iter_mut()
+ .zip(inner.sampler_map.iter())
+ .enumerate()
+ {
+ if slot.sampler_index != sampler_index {
+ slot.sampler_index = sampler_index;
+ dirty_textures |= 1 << texture_index;
+ }
+ }
+ if dirty_textures != 0 {
+ self.rebind_sampler_states(dirty_textures, 0);
+ }
+ }
+}
+
+impl crate::CommandEncoder<super::Api> for super::CommandEncoder {
+ unsafe fn begin_encoding(&mut self, label: crate::Label) -> Result<(), crate::DeviceError> {
+ self.state = State::default();
+ self.cmd_buffer.label = label.map(str::to_string);
+ Ok(())
+ }
+ unsafe fn discard_encoding(&mut self) {
+ self.cmd_buffer.clear();
+ }
+ unsafe fn end_encoding(&mut self) -> Result<super::CommandBuffer, crate::DeviceError> {
+ Ok(mem::take(&mut self.cmd_buffer))
+ }
+ unsafe fn reset_all<I>(&mut self, _command_buffers: I) {
+ //TODO: could re-use the allocations in all these command buffers
+ }
+
+ unsafe fn transition_buffers<'a, T>(&mut self, barriers: T)
+ where
+ T: Iterator<Item = crate::BufferBarrier<'a, super::Api>>,
+ {
+ if !self
+ .private_caps
+ .contains(super::PrivateCapabilities::MEMORY_BARRIERS)
+ {
+ return;
+ }
+ for bar in barriers {
+ // GLES only synchronizes storage -> anything explicitly
+ if !bar
+ .usage
+ .start
+ .contains(crate::BufferUses::STORAGE_READ_WRITE)
+ {
+ continue;
+ }
+ self.cmd_buffer
+ .commands
+ .push(C::BufferBarrier(bar.buffer.raw.unwrap(), bar.usage.end));
+ }
+ }
+
+ unsafe fn transition_textures<'a, T>(&mut self, barriers: T)
+ where
+ T: Iterator<Item = crate::TextureBarrier<'a, super::Api>>,
+ {
+ if !self
+ .private_caps
+ .contains(super::PrivateCapabilities::MEMORY_BARRIERS)
+ {
+ return;
+ }
+
+ let mut combined_usage = crate::TextureUses::empty();
+ for bar in barriers {
+ // GLES only synchronizes storage -> anything explicitly
+ if !bar
+ .usage
+ .start
+ .contains(crate::TextureUses::STORAGE_READ_WRITE)
+ {
+ continue;
+ }
+ // unlike buffers, there is no need for a concrete texture
+ // object to be bound anywhere for a barrier
+ combined_usage |= bar.usage.end;
+ }
+
+ if !combined_usage.is_empty() {
+ self.cmd_buffer
+ .commands
+ .push(C::TextureBarrier(combined_usage));
+ }
+ }
+
+ unsafe fn clear_buffer(&mut self, buffer: &super::Buffer, range: crate::MemoryRange) {
+ self.cmd_buffer.commands.push(C::ClearBuffer {
+ dst: buffer.clone(),
+ dst_target: buffer.target,
+ range,
+ });
+ }
+
+ unsafe fn copy_buffer_to_buffer<T>(
+ &mut self,
+ src: &super::Buffer,
+ dst: &super::Buffer,
+ regions: T,
+ ) where
+ T: Iterator<Item = crate::BufferCopy>,
+ {
+ let (src_target, dst_target) = if src.target == dst.target {
+ (glow::COPY_READ_BUFFER, glow::COPY_WRITE_BUFFER)
+ } else {
+ (src.target, dst.target)
+ };
+ for copy in regions {
+ self.cmd_buffer.commands.push(C::CopyBufferToBuffer {
+ src: src.clone(),
+ src_target,
+ dst: dst.clone(),
+ dst_target,
+ copy,
+ })
+ }
+ }
+
+ #[cfg(all(target_arch = "wasm32", not(target_os = "emscripten")))]
+ unsafe fn copy_external_image_to_texture<T>(
+ &mut self,
+ src: &wgt::ImageCopyExternalImage,
+ dst: &super::Texture,
+ dst_premultiplication: bool,
+ regions: T,
+ ) where
+ T: Iterator<Item = crate::TextureCopy>,
+ {
+ let (dst_raw, dst_target) = dst.inner.as_native();
+ for copy in regions {
+ self.cmd_buffer
+ .commands
+ .push(C::CopyExternalImageToTexture {
+ src: src.clone(),
+ dst: dst_raw,
+ dst_target,
+ dst_format: dst.format,
+ dst_premultiplication,
+ copy,
+ })
+ }
+ }
+
+ unsafe fn copy_texture_to_texture<T>(
+ &mut self,
+ src: &super::Texture,
+ _src_usage: crate::TextureUses,
+ dst: &super::Texture,
+ regions: T,
+ ) where
+ T: Iterator<Item = crate::TextureCopy>,
+ {
+ let (src_raw, src_target) = src.inner.as_native();
+ let (dst_raw, dst_target) = dst.inner.as_native();
+ for mut copy in regions {
+ copy.clamp_size_to_virtual(&src.copy_size, &dst.copy_size);
+ self.cmd_buffer.commands.push(C::CopyTextureToTexture {
+ src: src_raw,
+ src_target,
+ dst: dst_raw,
+ dst_target,
+ copy,
+ dst_is_cubemap: dst.is_cubemap,
+ })
+ }
+ }
+
+ unsafe fn copy_buffer_to_texture<T>(
+ &mut self,
+ src: &super::Buffer,
+ dst: &super::Texture,
+ regions: T,
+ ) where
+ T: Iterator<Item = crate::BufferTextureCopy>,
+ {
+ let (dst_raw, dst_target) = dst.inner.as_native();
+
+ for mut copy in regions {
+ copy.clamp_size_to_virtual(&dst.copy_size);
+ self.cmd_buffer.commands.push(C::CopyBufferToTexture {
+ src: src.clone(),
+ src_target: src.target,
+ dst: dst_raw,
+ dst_target,
+ dst_format: dst.format,
+ copy,
+ })
+ }
+ }
+
+ unsafe fn copy_texture_to_buffer<T>(
+ &mut self,
+ src: &super::Texture,
+ _src_usage: crate::TextureUses,
+ dst: &super::Buffer,
+ regions: T,
+ ) where
+ T: Iterator<Item = crate::BufferTextureCopy>,
+ {
+ let (src_raw, src_target) = src.inner.as_native();
+ for mut copy in regions {
+ copy.clamp_size_to_virtual(&src.copy_size);
+ self.cmd_buffer.commands.push(C::CopyTextureToBuffer {
+ src: src_raw,
+ src_target,
+ src_format: src.format,
+ dst: dst.clone(),
+ dst_target: dst.target,
+ copy,
+ })
+ }
+ }
+
+ unsafe fn begin_query(&mut self, set: &super::QuerySet, index: u32) {
+ let query = set.queries[index as usize];
+ self.cmd_buffer
+ .commands
+ .push(C::BeginQuery(query, set.target));
+ }
+ unsafe fn end_query(&mut self, set: &super::QuerySet, _index: u32) {
+ self.cmd_buffer.commands.push(C::EndQuery(set.target));
+ }
+ unsafe fn write_timestamp(&mut self, _set: &super::QuerySet, _index: u32) {
+ unimplemented!()
+ }
+ unsafe fn reset_queries(&mut self, _set: &super::QuerySet, _range: Range<u32>) {
+ //TODO: what do we do here?
+ }
+ unsafe fn copy_query_results(
+ &mut self,
+ set: &super::QuerySet,
+ range: Range<u32>,
+ buffer: &super::Buffer,
+ offset: wgt::BufferAddress,
+ _stride: wgt::BufferSize,
+ ) {
+ let start = self.cmd_buffer.queries.len();
+ self.cmd_buffer
+ .queries
+ .extend_from_slice(&set.queries[range.start as usize..range.end as usize]);
+ let query_range = start as u32..self.cmd_buffer.queries.len() as u32;
+ self.cmd_buffer.commands.push(C::CopyQueryResults {
+ query_range,
+ dst: buffer.clone(),
+ dst_target: buffer.target,
+ dst_offset: offset,
+ });
+ }
+
+ // render
+
+ unsafe fn begin_render_pass(&mut self, desc: &crate::RenderPassDescriptor<super::Api>) {
+ self.state.render_size = desc.extent;
+ self.state.resolve_attachments.clear();
+ self.state.invalidate_attachments.clear();
+ if let Some(label) = desc.label {
+ let range = self.cmd_buffer.add_marker(label);
+ self.cmd_buffer.commands.push(C::PushDebugGroup(range));
+ self.state.has_pass_label = true;
+ }
+
+ let rendering_to_external_framebuffer = desc
+ .color_attachments
+ .iter()
+ .filter_map(|at| at.as_ref())
+ .any(|at| match at.target.view.inner {
+ #[cfg(all(target_arch = "wasm32", not(target_os = "emscripten")))]
+ super::TextureInner::ExternalFramebuffer { .. } => true,
+ _ => false,
+ });
+
+ if rendering_to_external_framebuffer && desc.color_attachments.len() != 1 {
+ panic!("Multiple render attachments with external framebuffers are not supported.");
+ }
+
+ match desc
+ .color_attachments
+ .first()
+ .filter(|at| at.is_some())
+ .and_then(|at| at.as_ref().map(|at| &at.target.view.inner))
+ {
+ // default framebuffer (provided externally)
+ Some(&super::TextureInner::DefaultRenderbuffer) => {
+ self.cmd_buffer
+ .commands
+ .push(C::ResetFramebuffer { is_default: true });
+ }
+ _ => {
+ // set the framebuffer
+ self.cmd_buffer
+ .commands
+ .push(C::ResetFramebuffer { is_default: false });
+
+ for (i, cat) in desc.color_attachments.iter().enumerate() {
+ if let Some(cat) = cat.as_ref() {
+ let attachment = glow::COLOR_ATTACHMENT0 + i as u32;
+ self.cmd_buffer.commands.push(C::BindAttachment {
+ attachment,
+ view: cat.target.view.clone(),
+ });
+ if let Some(ref rat) = cat.resolve_target {
+ self.state
+ .resolve_attachments
+ .push((attachment, rat.view.clone()));
+ }
+ if !cat.ops.contains(crate::AttachmentOps::STORE) {
+ self.state.invalidate_attachments.push(attachment);
+ }
+ }
+ }
+ if let Some(ref dsat) = desc.depth_stencil_attachment {
+ let aspects = dsat.target.view.aspects;
+ let attachment = match aspects {
+ crate::FormatAspects::DEPTH => glow::DEPTH_ATTACHMENT,
+ crate::FormatAspects::STENCIL => glow::STENCIL_ATTACHMENT,
+ _ => glow::DEPTH_STENCIL_ATTACHMENT,
+ };
+ self.cmd_buffer.commands.push(C::BindAttachment {
+ attachment,
+ view: dsat.target.view.clone(),
+ });
+ if aspects.contains(crate::FormatAspects::DEPTH)
+ && !dsat.depth_ops.contains(crate::AttachmentOps::STORE)
+ {
+ self.state
+ .invalidate_attachments
+ .push(glow::DEPTH_ATTACHMENT);
+ }
+ if aspects.contains(crate::FormatAspects::STENCIL)
+ && !dsat.stencil_ops.contains(crate::AttachmentOps::STORE)
+ {
+ self.state
+ .invalidate_attachments
+ .push(glow::STENCIL_ATTACHMENT);
+ }
+ }
+
+ if !rendering_to_external_framebuffer {
+ // set the draw buffers and states
+ self.cmd_buffer
+ .commands
+ .push(C::SetDrawColorBuffers(desc.color_attachments.len() as u8));
+ }
+ }
+ }
+
+ let rect = crate::Rect {
+ x: 0,
+ y: 0,
+ w: desc.extent.width as i32,
+ h: desc.extent.height as i32,
+ };
+ self.cmd_buffer.commands.push(C::SetScissor(rect.clone()));
+ self.cmd_buffer.commands.push(C::SetViewport {
+ rect,
+ depth: 0.0..1.0,
+ });
+
+ // issue the clears
+ for (i, cat) in desc
+ .color_attachments
+ .iter()
+ .filter_map(|at| at.as_ref())
+ .enumerate()
+ {
+ if !cat.ops.contains(crate::AttachmentOps::LOAD) {
+ let c = &cat.clear_value;
+ self.cmd_buffer.commands.push(
+ match cat.target.view.format.sample_type(None).unwrap() {
+ wgt::TextureSampleType::Float { .. } => C::ClearColorF {
+ draw_buffer: i as u32,
+ color: [c.r as f32, c.g as f32, c.b as f32, c.a as f32],
+ is_srgb: cat.target.view.format.is_srgb(),
+ },
+ wgt::TextureSampleType::Uint => C::ClearColorU(
+ i as u32,
+ [c.r as u32, c.g as u32, c.b as u32, c.a as u32],
+ ),
+ wgt::TextureSampleType::Sint => C::ClearColorI(
+ i as u32,
+ [c.r as i32, c.g as i32, c.b as i32, c.a as i32],
+ ),
+ wgt::TextureSampleType::Depth => unreachable!(),
+ },
+ );
+ }
+ }
+ if let Some(ref dsat) = desc.depth_stencil_attachment {
+ let clear_depth = !dsat.depth_ops.contains(crate::AttachmentOps::LOAD);
+ let clear_stencil = !dsat.stencil_ops.contains(crate::AttachmentOps::LOAD);
+
+ if clear_depth && clear_stencil {
+ self.cmd_buffer.commands.push(C::ClearDepthAndStencil(
+ dsat.clear_value.0,
+ dsat.clear_value.1,
+ ));
+ } else if clear_depth {
+ self.cmd_buffer
+ .commands
+ .push(C::ClearDepth(dsat.clear_value.0));
+ } else if clear_stencil {
+ self.cmd_buffer
+ .commands
+ .push(C::ClearStencil(dsat.clear_value.1));
+ }
+ }
+ }
+ unsafe fn end_render_pass(&mut self) {
+ for (attachment, dst) in self.state.resolve_attachments.drain(..) {
+ self.cmd_buffer.commands.push(C::ResolveAttachment {
+ attachment,
+ dst,
+ size: self.state.render_size,
+ });
+ }
+ if !self.state.invalidate_attachments.is_empty() {
+ self.cmd_buffer.commands.push(C::InvalidateAttachments(
+ self.state.invalidate_attachments.clone(),
+ ));
+ self.state.invalidate_attachments.clear();
+ }
+ if self.state.has_pass_label {
+ self.cmd_buffer.commands.push(C::PopDebugGroup);
+ self.state.has_pass_label = false;
+ }
+ self.state.instance_vbuf_mask = 0;
+ self.state.dirty_vbuf_mask = 0;
+ self.state.active_first_instance = 0;
+ self.state.color_targets.clear();
+ for vat in &self.state.vertex_attributes {
+ self.cmd_buffer
+ .commands
+ .push(C::UnsetVertexAttribute(vat.location));
+ }
+ self.state.vertex_attributes.clear();
+ self.state.primitive = super::PrimitiveState::default();
+ }
+
+ unsafe fn set_bind_group(
+ &mut self,
+ layout: &super::PipelineLayout,
+ index: u32,
+ group: &super::BindGroup,
+ dynamic_offsets: &[wgt::DynamicOffset],
+ ) {
+ let mut do_index = 0;
+ let mut dirty_textures = 0u32;
+ let mut dirty_samplers = 0u32;
+ let group_info = &layout.group_infos[index as usize];
+
+ for (binding_layout, raw_binding) in group_info.entries.iter().zip(group.contents.iter()) {
+ let slot = group_info.binding_to_slot[binding_layout.binding as usize] as u32;
+ match *raw_binding {
+ super::RawBinding::Buffer {
+ raw,
+ offset: base_offset,
+ size,
+ } => {
+ let mut offset = base_offset;
+ let target = match binding_layout.ty {
+ wgt::BindingType::Buffer {
+ ty,
+ has_dynamic_offset,
+ min_binding_size: _,
+ } => {
+ if has_dynamic_offset {
+ offset += dynamic_offsets[do_index] as i32;
+ do_index += 1;
+ }
+ match ty {
+ wgt::BufferBindingType::Uniform => glow::UNIFORM_BUFFER,
+ wgt::BufferBindingType::Storage { .. } => {
+ glow::SHADER_STORAGE_BUFFER
+ }
+ }
+ }
+ _ => unreachable!(),
+ };
+ self.cmd_buffer.commands.push(C::BindBuffer {
+ target,
+ slot,
+ buffer: raw,
+ offset,
+ size,
+ });
+ }
+ super::RawBinding::Sampler(sampler) => {
+ dirty_samplers |= 1 << slot;
+ self.state.samplers[slot as usize] = Some(sampler);
+ }
+ super::RawBinding::Texture {
+ raw,
+ target,
+ aspects,
+ } => {
+ dirty_textures |= 1 << slot;
+ self.state.texture_slots[slot as usize].tex_target = target;
+ self.cmd_buffer.commands.push(C::BindTexture {
+ slot,
+ texture: raw,
+ target,
+ aspects,
+ });
+ }
+ super::RawBinding::Image(ref binding) => {
+ self.cmd_buffer.commands.push(C::BindImage {
+ slot,
+ binding: binding.clone(),
+ });
+ }
+ }
+ }
+
+ self.rebind_sampler_states(dirty_textures, dirty_samplers);
+ }
+
+ unsafe fn set_push_constants(
+ &mut self,
+ _layout: &super::PipelineLayout,
+ _stages: wgt::ShaderStages,
+ start_offset: u32,
+ data: &[u32],
+ ) {
+ let range = self.cmd_buffer.add_push_constant_data(data);
+
+ let end = start_offset + data.len() as u32 * 4;
+ let mut offset = start_offset;
+ while offset < end {
+ let uniform = self.state.push_offset_to_uniform[offset as usize / 4].clone();
+ let size = uniform.size;
+ if uniform.location.is_none() {
+ panic!("No uniform for push constant");
+ }
+ self.cmd_buffer.commands.push(C::SetPushConstants {
+ uniform,
+ offset: range.start + offset,
+ });
+ offset += size;
+ }
+ }
+
+ unsafe fn insert_debug_marker(&mut self, label: &str) {
+ let range = self.cmd_buffer.add_marker(label);
+ self.cmd_buffer.commands.push(C::InsertDebugMarker(range));
+ }
+ unsafe fn begin_debug_marker(&mut self, group_label: &str) {
+ let range = self.cmd_buffer.add_marker(group_label);
+ self.cmd_buffer.commands.push(C::PushDebugGroup(range));
+ }
+ unsafe fn end_debug_marker(&mut self) {
+ self.cmd_buffer.commands.push(C::PopDebugGroup);
+ }
+
+ unsafe fn set_render_pipeline(&mut self, pipeline: &super::RenderPipeline) {
+ self.state.topology = conv::map_primitive_topology(pipeline.primitive.topology);
+
+ if self
+ .private_caps
+ .contains(super::PrivateCapabilities::VERTEX_BUFFER_LAYOUT)
+ {
+ for vat in pipeline.vertex_attributes.iter() {
+ let vb = &pipeline.vertex_buffers[vat.buffer_index as usize];
+ // set the layout
+ self.cmd_buffer.commands.push(C::SetVertexAttribute {
+ buffer: None,
+ buffer_desc: vb.clone(),
+ attribute_desc: vat.clone(),
+ });
+ }
+ } else {
+ for vat in &self.state.vertex_attributes {
+ self.cmd_buffer
+ .commands
+ .push(C::UnsetVertexAttribute(vat.location));
+ }
+ self.state.vertex_attributes.clear();
+
+ self.state.dirty_vbuf_mask = 0;
+ // copy vertex attributes
+ for vat in pipeline.vertex_attributes.iter() {
+ //Note: we can invalidate more carefully here.
+ self.state.dirty_vbuf_mask |= 1 << vat.buffer_index;
+ self.state.vertex_attributes.push(vat.clone());
+ }
+ }
+
+ self.state.instance_vbuf_mask = 0;
+ // copy vertex state
+ for (index, (&mut (ref mut state_desc, _), pipe_desc)) in self
+ .state
+ .vertex_buffers
+ .iter_mut()
+ .zip(pipeline.vertex_buffers.iter())
+ .enumerate()
+ {
+ if pipe_desc.step == wgt::VertexStepMode::Instance {
+ self.state.instance_vbuf_mask |= 1 << index;
+ }
+ if state_desc != pipe_desc {
+ self.state.dirty_vbuf_mask |= 1 << index;
+ *state_desc = pipe_desc.clone();
+ }
+ }
+
+ self.set_pipeline_inner(&pipeline.inner);
+
+ // set primitive state
+ let prim_state = conv::map_primitive_state(&pipeline.primitive);
+ if prim_state != self.state.primitive {
+ self.cmd_buffer
+ .commands
+ .push(C::SetPrimitive(prim_state.clone()));
+ self.state.primitive = prim_state;
+ }
+
+ // set depth/stencil states
+ let mut aspects = crate::FormatAspects::empty();
+ if pipeline.depth_bias != self.state.depth_bias {
+ self.state.depth_bias = pipeline.depth_bias;
+ self.cmd_buffer
+ .commands
+ .push(C::SetDepthBias(pipeline.depth_bias));
+ }
+ if let Some(ref depth) = pipeline.depth {
+ aspects |= crate::FormatAspects::DEPTH;
+ self.cmd_buffer.commands.push(C::SetDepth(depth.clone()));
+ }
+ if let Some(ref stencil) = pipeline.stencil {
+ aspects |= crate::FormatAspects::STENCIL;
+ self.state.stencil = stencil.clone();
+ self.rebind_stencil_func();
+ if stencil.front.ops == stencil.back.ops
+ && stencil.front.mask_write == stencil.back.mask_write
+ {
+ self.cmd_buffer.commands.push(C::SetStencilOps {
+ face: glow::FRONT_AND_BACK,
+ write_mask: stencil.front.mask_write,
+ ops: stencil.front.ops.clone(),
+ });
+ } else {
+ self.cmd_buffer.commands.push(C::SetStencilOps {
+ face: glow::FRONT,
+ write_mask: stencil.front.mask_write,
+ ops: stencil.front.ops.clone(),
+ });
+ self.cmd_buffer.commands.push(C::SetStencilOps {
+ face: glow::BACK,
+ write_mask: stencil.back.mask_write,
+ ops: stencil.back.ops.clone(),
+ });
+ }
+ }
+ self.cmd_buffer
+ .commands
+ .push(C::ConfigureDepthStencil(aspects));
+
+ // set multisampling state
+ if pipeline.alpha_to_coverage_enabled != self.state.alpha_to_coverage_enabled {
+ self.state.alpha_to_coverage_enabled = pipeline.alpha_to_coverage_enabled;
+ self.cmd_buffer
+ .commands
+ .push(C::SetAlphaToCoverage(pipeline.alpha_to_coverage_enabled));
+ }
+
+ // set blend states
+ if self.state.color_targets[..] != pipeline.color_targets[..] {
+ if pipeline
+ .color_targets
+ .iter()
+ .skip(1)
+ .any(|ct| *ct != pipeline.color_targets[0])
+ {
+ for (index, ct) in pipeline.color_targets.iter().enumerate() {
+ self.cmd_buffer.commands.push(C::SetColorTarget {
+ draw_buffer_index: Some(index as u32),
+ desc: ct.clone(),
+ });
+ }
+ } else {
+ self.cmd_buffer.commands.push(C::SetColorTarget {
+ draw_buffer_index: None,
+ desc: pipeline.color_targets.first().cloned().unwrap_or_default(),
+ });
+ }
+ }
+ self.state.color_targets.clear();
+ for ct in pipeline.color_targets.iter() {
+ self.state.color_targets.push(ct.clone());
+ }
+ }
+
+ unsafe fn set_index_buffer<'a>(
+ &mut self,
+ binding: crate::BufferBinding<'a, super::Api>,
+ format: wgt::IndexFormat,
+ ) {
+ self.state.index_offset = binding.offset;
+ self.state.index_format = format;
+ self.cmd_buffer
+ .commands
+ .push(C::SetIndexBuffer(binding.buffer.raw.unwrap()));
+ }
+ unsafe fn set_vertex_buffer<'a>(
+ &mut self,
+ index: u32,
+ binding: crate::BufferBinding<'a, super::Api>,
+ ) {
+ self.state.dirty_vbuf_mask |= 1 << index;
+ let (_, ref mut vb) = self.state.vertex_buffers[index as usize];
+ *vb = Some(super::BufferBinding {
+ raw: binding.buffer.raw.unwrap(),
+ offset: binding.offset,
+ });
+ }
+ unsafe fn set_viewport(&mut self, rect: &crate::Rect<f32>, depth: Range<f32>) {
+ self.cmd_buffer.commands.push(C::SetViewport {
+ rect: crate::Rect {
+ x: rect.x as i32,
+ y: rect.y as i32,
+ w: rect.w as i32,
+ h: rect.h as i32,
+ },
+ depth,
+ });
+ }
+ unsafe fn set_scissor_rect(&mut self, rect: &crate::Rect<u32>) {
+ self.cmd_buffer.commands.push(C::SetScissor(crate::Rect {
+ x: rect.x as i32,
+ y: rect.y as i32,
+ w: rect.w as i32,
+ h: rect.h as i32,
+ }));
+ }
+ unsafe fn set_stencil_reference(&mut self, value: u32) {
+ self.state.stencil.front.reference = value;
+ self.state.stencil.back.reference = value;
+ self.rebind_stencil_func();
+ }
+ unsafe fn set_blend_constants(&mut self, color: &[f32; 4]) {
+ self.cmd_buffer.commands.push(C::SetBlendConstant(*color));
+ }
+
+ unsafe fn draw(
+ &mut self,
+ start_vertex: u32,
+ vertex_count: u32,
+ start_instance: u32,
+ instance_count: u32,
+ ) {
+ self.prepare_draw(start_instance);
+ self.cmd_buffer.commands.push(C::Draw {
+ topology: self.state.topology,
+ start_vertex,
+ vertex_count,
+ instance_count,
+ });
+ }
+ unsafe fn draw_indexed(
+ &mut self,
+ start_index: u32,
+ index_count: u32,
+ base_vertex: i32,
+ start_instance: u32,
+ instance_count: u32,
+ ) {
+ self.prepare_draw(start_instance);
+ let (index_size, index_type) = match self.state.index_format {
+ wgt::IndexFormat::Uint16 => (2, glow::UNSIGNED_SHORT),
+ wgt::IndexFormat::Uint32 => (4, glow::UNSIGNED_INT),
+ };
+ let index_offset = self.state.index_offset + index_size * start_index as wgt::BufferAddress;
+ self.cmd_buffer.commands.push(C::DrawIndexed {
+ topology: self.state.topology,
+ index_type,
+ index_offset,
+ index_count,
+ base_vertex,
+ instance_count,
+ });
+ }
+ unsafe fn draw_indirect(
+ &mut self,
+ buffer: &super::Buffer,
+ offset: wgt::BufferAddress,
+ draw_count: u32,
+ ) {
+ self.prepare_draw(0);
+ for draw in 0..draw_count as wgt::BufferAddress {
+ let indirect_offset =
+ offset + draw * mem::size_of::<wgt::DrawIndirectArgs>() as wgt::BufferAddress;
+ self.cmd_buffer.commands.push(C::DrawIndirect {
+ topology: self.state.topology,
+ indirect_buf: buffer.raw.unwrap(),
+ indirect_offset,
+ });
+ }
+ }
+ unsafe fn draw_indexed_indirect(
+ &mut self,
+ buffer: &super::Buffer,
+ offset: wgt::BufferAddress,
+ draw_count: u32,
+ ) {
+ self.prepare_draw(0);
+ let index_type = match self.state.index_format {
+ wgt::IndexFormat::Uint16 => glow::UNSIGNED_SHORT,
+ wgt::IndexFormat::Uint32 => glow::UNSIGNED_INT,
+ };
+ for draw in 0..draw_count as wgt::BufferAddress {
+ let indirect_offset = offset
+ + draw * mem::size_of::<wgt::DrawIndexedIndirectArgs>() as wgt::BufferAddress;
+ self.cmd_buffer.commands.push(C::DrawIndexedIndirect {
+ topology: self.state.topology,
+ index_type,
+ indirect_buf: buffer.raw.unwrap(),
+ indirect_offset,
+ });
+ }
+ }
+ unsafe fn draw_indirect_count(
+ &mut self,
+ _buffer: &super::Buffer,
+ _offset: wgt::BufferAddress,
+ _count_buffer: &super::Buffer,
+ _count_offset: wgt::BufferAddress,
+ _max_count: u32,
+ ) {
+ unreachable!()
+ }
+ unsafe fn draw_indexed_indirect_count(
+ &mut self,
+ _buffer: &super::Buffer,
+ _offset: wgt::BufferAddress,
+ _count_buffer: &super::Buffer,
+ _count_offset: wgt::BufferAddress,
+ _max_count: u32,
+ ) {
+ unreachable!()
+ }
+
+ // compute
+
+ unsafe fn begin_compute_pass(&mut self, desc: &crate::ComputePassDescriptor) {
+ if let Some(label) = desc.label {
+ let range = self.cmd_buffer.add_marker(label);
+ self.cmd_buffer.commands.push(C::PushDebugGroup(range));
+ self.state.has_pass_label = true;
+ }
+ }
+ unsafe fn end_compute_pass(&mut self) {
+ if self.state.has_pass_label {
+ self.cmd_buffer.commands.push(C::PopDebugGroup);
+ self.state.has_pass_label = false;
+ }
+ }
+
+ unsafe fn set_compute_pipeline(&mut self, pipeline: &super::ComputePipeline) {
+ self.set_pipeline_inner(&pipeline.inner);
+ }
+
+ unsafe fn dispatch(&mut self, count: [u32; 3]) {
+ self.cmd_buffer.commands.push(C::Dispatch(count));
+ }
+ unsafe fn dispatch_indirect(&mut self, buffer: &super::Buffer, offset: wgt::BufferAddress) {
+ self.cmd_buffer.commands.push(C::DispatchIndirect {
+ indirect_buf: buffer.raw.unwrap(),
+ indirect_offset: offset,
+ });
+ }
+}
diff --git a/third_party/rust/wgpu-hal/src/gles/conv.rs b/third_party/rust/wgpu-hal/src/gles/conv.rs
new file mode 100644
index 0000000000..86ff3b60b0
--- /dev/null
+++ b/third_party/rust/wgpu-hal/src/gles/conv.rs
@@ -0,0 +1,501 @@
+impl super::AdapterShared {
+ pub(super) fn describe_texture_format(
+ &self,
+ texture_format: wgt::TextureFormat,
+ ) -> super::TextureFormatDesc {
+ use wgt::TextureFormat as Tf;
+ use wgt::{AstcBlock, AstcChannel};
+
+ let (internal, external, data_type) = match texture_format {
+ Tf::R8Unorm => (glow::R8, glow::RED, glow::UNSIGNED_BYTE),
+ Tf::R8Snorm => (glow::R8_SNORM, glow::RED, glow::BYTE),
+ Tf::R8Uint => (glow::R8UI, glow::RED_INTEGER, glow::UNSIGNED_BYTE),
+ Tf::R8Sint => (glow::R8I, glow::RED_INTEGER, glow::BYTE),
+ Tf::R16Uint => (glow::R16UI, glow::RED_INTEGER, glow::UNSIGNED_SHORT),
+ Tf::R16Sint => (glow::R16I, glow::RED_INTEGER, glow::SHORT),
+ Tf::R16Unorm => (glow::R16, glow::RED, glow::UNSIGNED_SHORT),
+ Tf::R16Snorm => (glow::R16_SNORM, glow::RED, glow::SHORT),
+ Tf::R16Float => (glow::R16F, glow::RED, glow::HALF_FLOAT),
+ Tf::Rg8Unorm => (glow::RG8, glow::RG, glow::UNSIGNED_BYTE),
+ Tf::Rg8Snorm => (glow::RG8_SNORM, glow::RG, glow::BYTE),
+ Tf::Rg8Uint => (glow::RG8UI, glow::RG_INTEGER, glow::UNSIGNED_BYTE),
+ Tf::Rg8Sint => (glow::RG8I, glow::RG_INTEGER, glow::BYTE),
+ Tf::R32Uint => (glow::R32UI, glow::RED_INTEGER, glow::UNSIGNED_INT),
+ Tf::R32Sint => (glow::R32I, glow::RED_INTEGER, glow::INT),
+ Tf::R32Float => (glow::R32F, glow::RED, glow::FLOAT),
+ Tf::Rg16Uint => (glow::RG16UI, glow::RG_INTEGER, glow::UNSIGNED_SHORT),
+ Tf::Rg16Sint => (glow::RG16I, glow::RG_INTEGER, glow::SHORT),
+ Tf::Rg16Unorm => (glow::RG16, glow::RG, glow::UNSIGNED_SHORT),
+ Tf::Rg16Snorm => (glow::RG16_SNORM, glow::RG, glow::SHORT),
+ Tf::Rg16Float => (glow::RG16F, glow::RG, glow::HALF_FLOAT),
+ Tf::Rgba8Unorm => (glow::RGBA8, glow::RGBA, glow::UNSIGNED_BYTE),
+ Tf::Rgba8UnormSrgb => (glow::SRGB8_ALPHA8, glow::RGBA, glow::UNSIGNED_BYTE),
+ Tf::Bgra8UnormSrgb => (glow::SRGB8_ALPHA8, glow::BGRA, glow::UNSIGNED_BYTE), //TODO?
+ Tf::Rgba8Snorm => (glow::RGBA8_SNORM, glow::RGBA, glow::BYTE),
+ Tf::Bgra8Unorm => (glow::RGBA8, glow::BGRA, glow::UNSIGNED_BYTE), //TODO?
+ Tf::Rgba8Uint => (glow::RGBA8UI, glow::RGBA_INTEGER, glow::UNSIGNED_BYTE),
+ Tf::Rgba8Sint => (glow::RGBA8I, glow::RGBA_INTEGER, glow::BYTE),
+ Tf::Rgb10a2Unorm => (
+ glow::RGB10_A2,
+ glow::RGBA,
+ glow::UNSIGNED_INT_2_10_10_10_REV,
+ ),
+ Tf::Rg11b10Float => (
+ glow::R11F_G11F_B10F,
+ glow::RGB,
+ glow::UNSIGNED_INT_10F_11F_11F_REV,
+ ),
+ Tf::Rg32Uint => (glow::RG32UI, glow::RG_INTEGER, glow::UNSIGNED_INT),
+ Tf::Rg32Sint => (glow::RG32I, glow::RG_INTEGER, glow::INT),
+ Tf::Rg32Float => (glow::RG32F, glow::RG, glow::FLOAT),
+ Tf::Rgba16Uint => (glow::RGBA16UI, glow::RGBA_INTEGER, glow::UNSIGNED_SHORT),
+ Tf::Rgba16Sint => (glow::RGBA16I, glow::RGBA_INTEGER, glow::SHORT),
+ Tf::Rgba16Unorm => (glow::RGBA16, glow::RGBA, glow::UNSIGNED_SHORT),
+ Tf::Rgba16Snorm => (glow::RGBA16_SNORM, glow::RGBA, glow::SHORT),
+ Tf::Rgba16Float => (glow::RGBA16F, glow::RGBA, glow::HALF_FLOAT),
+ Tf::Rgba32Uint => (glow::RGBA32UI, glow::RGBA_INTEGER, glow::UNSIGNED_INT),
+ Tf::Rgba32Sint => (glow::RGBA32I, glow::RGBA_INTEGER, glow::INT),
+ Tf::Rgba32Float => (glow::RGBA32F, glow::RGBA, glow::FLOAT),
+ Tf::Stencil8 => (
+ glow::STENCIL_INDEX8,
+ glow::STENCIL_INDEX,
+ glow::UNSIGNED_BYTE,
+ ),
+ Tf::Depth16Unorm => (
+ glow::DEPTH_COMPONENT16,
+ glow::DEPTH_COMPONENT,
+ glow::UNSIGNED_SHORT,
+ ),
+ Tf::Depth32Float => (glow::DEPTH_COMPONENT32F, glow::DEPTH_COMPONENT, glow::FLOAT),
+ Tf::Depth32FloatStencil8 => (
+ glow::DEPTH32F_STENCIL8,
+ glow::DEPTH_STENCIL,
+ glow::FLOAT_32_UNSIGNED_INT_24_8_REV,
+ ),
+ Tf::Depth24Plus => (
+ glow::DEPTH_COMPONENT24,
+ glow::DEPTH_COMPONENT,
+ glow::UNSIGNED_INT,
+ ),
+ Tf::Depth24PlusStencil8 => (
+ glow::DEPTH24_STENCIL8,
+ glow::DEPTH_STENCIL,
+ glow::UNSIGNED_INT_24_8,
+ ),
+ Tf::Rgb9e5Ufloat => (glow::RGB9_E5, glow::RGB, glow::UNSIGNED_INT_5_9_9_9_REV),
+ Tf::Bc1RgbaUnorm => (glow::COMPRESSED_RGBA_S3TC_DXT1_EXT, glow::RGBA, 0),
+ Tf::Bc1RgbaUnormSrgb => (glow::COMPRESSED_SRGB_ALPHA_S3TC_DXT1_EXT, glow::RGBA, 0),
+ Tf::Bc2RgbaUnorm => (glow::COMPRESSED_RGBA_S3TC_DXT3_EXT, glow::RGBA, 0),
+ Tf::Bc2RgbaUnormSrgb => (glow::COMPRESSED_SRGB_ALPHA_S3TC_DXT3_EXT, glow::RGBA, 0),
+ Tf::Bc3RgbaUnorm => (glow::COMPRESSED_RGBA_S3TC_DXT5_EXT, glow::RGBA, 0),
+ Tf::Bc3RgbaUnormSrgb => (glow::COMPRESSED_SRGB_ALPHA_S3TC_DXT5_EXT, glow::RGBA, 0),
+ Tf::Bc4RUnorm => (glow::COMPRESSED_RED_RGTC1, glow::RED, 0),
+ Tf::Bc4RSnorm => (glow::COMPRESSED_SIGNED_RED_RGTC1, glow::RED, 0),
+ Tf::Bc5RgUnorm => (glow::COMPRESSED_RG_RGTC2, glow::RG, 0),
+ Tf::Bc5RgSnorm => (glow::COMPRESSED_SIGNED_RG_RGTC2, glow::RG, 0),
+ Tf::Bc6hRgbUfloat => (glow::COMPRESSED_RGB_BPTC_UNSIGNED_FLOAT, glow::RGB, 0),
+ Tf::Bc6hRgbFloat => (glow::COMPRESSED_RGB_BPTC_SIGNED_FLOAT, glow::RGB, 0),
+ Tf::Bc7RgbaUnorm => (glow::COMPRESSED_RGBA_BPTC_UNORM, glow::RGBA, 0),
+ Tf::Bc7RgbaUnormSrgb => (glow::COMPRESSED_SRGB_ALPHA_BPTC_UNORM, glow::RGBA, 0),
+ Tf::Etc2Rgb8Unorm => (glow::COMPRESSED_RGB8_ETC2, glow::RGB, 0),
+ Tf::Etc2Rgb8UnormSrgb => (glow::COMPRESSED_SRGB8_ETC2, glow::RGB, 0),
+ Tf::Etc2Rgb8A1Unorm => (
+ glow::COMPRESSED_RGB8_PUNCHTHROUGH_ALPHA1_ETC2,
+ glow::RGBA,
+ 0,
+ ),
+ Tf::Etc2Rgb8A1UnormSrgb => (
+ glow::COMPRESSED_SRGB8_PUNCHTHROUGH_ALPHA1_ETC2,
+ glow::RGBA,
+ 0,
+ ),
+ Tf::Etc2Rgba8Unorm => (
+ //TODO: this is a lie, it's not sRGB
+ glow::COMPRESSED_SRGB8_ALPHA8_ETC2_EAC,
+ glow::RGBA,
+ 0,
+ ),
+ Tf::Etc2Rgba8UnormSrgb => (glow::COMPRESSED_SRGB8_ALPHA8_ETC2_EAC, glow::RGBA, 0),
+ Tf::EacR11Unorm => (glow::COMPRESSED_R11_EAC, glow::RED, 0),
+ Tf::EacR11Snorm => (glow::COMPRESSED_SIGNED_R11_EAC, glow::RED, 0),
+ Tf::EacRg11Unorm => (glow::COMPRESSED_RG11_EAC, glow::RG, 0),
+ Tf::EacRg11Snorm => (glow::COMPRESSED_SIGNED_RG11_EAC, glow::RG, 0),
+ Tf::Astc { block, channel } => match channel {
+ AstcChannel::Unorm | AstcChannel::Hdr => match block {
+ AstcBlock::B4x4 => (glow::COMPRESSED_RGBA_ASTC_4x4_KHR, glow::RGBA, 0),
+ AstcBlock::B5x4 => (glow::COMPRESSED_RGBA_ASTC_5x4_KHR, glow::RGBA, 0),
+ AstcBlock::B5x5 => (glow::COMPRESSED_RGBA_ASTC_5x5_KHR, glow::RGBA, 0),
+ AstcBlock::B6x5 => (glow::COMPRESSED_RGBA_ASTC_6x5_KHR, glow::RGBA, 0),
+ AstcBlock::B6x6 => (glow::COMPRESSED_RGBA_ASTC_6x6_KHR, glow::RGBA, 0),
+ AstcBlock::B8x5 => (glow::COMPRESSED_RGBA_ASTC_8x5_KHR, glow::RGBA, 0),
+ AstcBlock::B8x6 => (glow::COMPRESSED_RGBA_ASTC_8x6_KHR, glow::RGBA, 0),
+ AstcBlock::B8x8 => (glow::COMPRESSED_RGBA_ASTC_8x8_KHR, glow::RGBA, 0),
+ AstcBlock::B10x5 => (glow::COMPRESSED_RGBA_ASTC_10x5_KHR, glow::RGBA, 0),
+ AstcBlock::B10x6 => (glow::COMPRESSED_RGBA_ASTC_10x6_KHR, glow::RGBA, 0),
+ AstcBlock::B10x8 => (glow::COMPRESSED_RGBA_ASTC_10x8_KHR, glow::RGBA, 0),
+ AstcBlock::B10x10 => (glow::COMPRESSED_RGBA_ASTC_10x10_KHR, glow::RGBA, 0),
+ AstcBlock::B12x10 => (glow::COMPRESSED_RGBA_ASTC_12x10_KHR, glow::RGBA, 0),
+ AstcBlock::B12x12 => (glow::COMPRESSED_RGBA_ASTC_12x12_KHR, glow::RGBA, 0),
+ },
+ AstcChannel::UnormSrgb => match block {
+ AstcBlock::B4x4 => (glow::COMPRESSED_SRGB8_ALPHA8_ASTC_4x4_KHR, glow::RGBA, 0),
+ AstcBlock::B5x4 => (glow::COMPRESSED_SRGB8_ALPHA8_ASTC_5x4_KHR, glow::RGBA, 0),
+ AstcBlock::B5x5 => (glow::COMPRESSED_SRGB8_ALPHA8_ASTC_5x5_KHR, glow::RGBA, 0),
+ AstcBlock::B6x5 => (glow::COMPRESSED_SRGB8_ALPHA8_ASTC_6x5_KHR, glow::RGBA, 0),
+ AstcBlock::B6x6 => (glow::COMPRESSED_SRGB8_ALPHA8_ASTC_6x6_KHR, glow::RGBA, 0),
+ AstcBlock::B8x5 => (glow::COMPRESSED_SRGB8_ALPHA8_ASTC_8x5_KHR, glow::RGBA, 0),
+ AstcBlock::B8x6 => (glow::COMPRESSED_SRGB8_ALPHA8_ASTC_8x6_KHR, glow::RGBA, 0),
+ AstcBlock::B8x8 => (glow::COMPRESSED_SRGB8_ALPHA8_ASTC_8x8_KHR, glow::RGBA, 0),
+ AstcBlock::B10x5 => {
+ (glow::COMPRESSED_SRGB8_ALPHA8_ASTC_10x5_KHR, glow::RGBA, 0)
+ }
+ AstcBlock::B10x6 => {
+ (glow::COMPRESSED_SRGB8_ALPHA8_ASTC_10x6_KHR, glow::RGBA, 0)
+ }
+ AstcBlock::B10x8 => {
+ (glow::COMPRESSED_SRGB8_ALPHA8_ASTC_10x8_KHR, glow::RGBA, 0)
+ }
+ AstcBlock::B10x10 => {
+ (glow::COMPRESSED_SRGB8_ALPHA8_ASTC_10x10_KHR, glow::RGBA, 0)
+ }
+ AstcBlock::B12x10 => {
+ (glow::COMPRESSED_SRGB8_ALPHA8_ASTC_12x10_KHR, glow::RGBA, 0)
+ }
+ AstcBlock::B12x12 => {
+ (glow::COMPRESSED_SRGB8_ALPHA8_ASTC_12x12_KHR, glow::RGBA, 0)
+ }
+ },
+ },
+ };
+
+ super::TextureFormatDesc {
+ internal,
+ external,
+ data_type,
+ }
+ }
+}
+
+pub(super) fn describe_vertex_format(vertex_format: wgt::VertexFormat) -> super::VertexFormatDesc {
+ use super::VertexAttribKind as Vak;
+ use wgt::VertexFormat as Vf;
+
+ let (element_count, element_format, attrib_kind) = match vertex_format {
+ Vf::Unorm8x2 => (2, glow::UNSIGNED_BYTE, Vak::Float),
+ Vf::Snorm8x2 => (2, glow::BYTE, Vak::Float),
+ Vf::Uint8x2 => (2, glow::UNSIGNED_BYTE, Vak::Integer),
+ Vf::Sint8x2 => (2, glow::BYTE, Vak::Integer),
+ Vf::Unorm8x4 => (4, glow::UNSIGNED_BYTE, Vak::Float),
+ Vf::Snorm8x4 => (4, glow::BYTE, Vak::Float),
+ Vf::Uint8x4 => (4, glow::UNSIGNED_BYTE, Vak::Integer),
+ Vf::Sint8x4 => (4, glow::BYTE, Vak::Integer),
+ Vf::Unorm16x2 => (2, glow::UNSIGNED_SHORT, Vak::Float),
+ Vf::Snorm16x2 => (2, glow::SHORT, Vak::Float),
+ Vf::Uint16x2 => (2, glow::UNSIGNED_SHORT, Vak::Integer),
+ Vf::Sint16x2 => (2, glow::SHORT, Vak::Integer),
+ Vf::Float16x2 => (2, glow::HALF_FLOAT, Vak::Float),
+ Vf::Unorm16x4 => (4, glow::UNSIGNED_SHORT, Vak::Float),
+ Vf::Snorm16x4 => (4, glow::SHORT, Vak::Float),
+ Vf::Uint16x4 => (4, glow::UNSIGNED_SHORT, Vak::Integer),
+ Vf::Sint16x4 => (4, glow::SHORT, Vak::Integer),
+ Vf::Float16x4 => (4, glow::HALF_FLOAT, Vak::Float),
+ Vf::Uint32 => (1, glow::UNSIGNED_INT, Vak::Integer),
+ Vf::Sint32 => (1, glow::INT, Vak::Integer),
+ Vf::Float32 => (1, glow::FLOAT, Vak::Float),
+ Vf::Uint32x2 => (2, glow::UNSIGNED_INT, Vak::Integer),
+ Vf::Sint32x2 => (2, glow::INT, Vak::Integer),
+ Vf::Float32x2 => (2, glow::FLOAT, Vak::Float),
+ Vf::Uint32x3 => (3, glow::UNSIGNED_INT, Vak::Integer),
+ Vf::Sint32x3 => (3, glow::INT, Vak::Integer),
+ Vf::Float32x3 => (3, glow::FLOAT, Vak::Float),
+ Vf::Uint32x4 => (4, glow::UNSIGNED_INT, Vak::Integer),
+ Vf::Sint32x4 => (4, glow::INT, Vak::Integer),
+ Vf::Float32x4 => (4, glow::FLOAT, Vak::Float),
+ Vf::Float64 | Vf::Float64x2 | Vf::Float64x3 | Vf::Float64x4 => unimplemented!(),
+ };
+
+ super::VertexFormatDesc {
+ element_count,
+ element_format,
+ attrib_kind,
+ }
+}
+
+pub fn map_filter_modes(
+ min: wgt::FilterMode,
+ mag: wgt::FilterMode,
+ mip: wgt::FilterMode,
+) -> (u32, u32) {
+ use wgt::FilterMode as Fm;
+
+ let mag_filter = match mag {
+ Fm::Nearest => glow::NEAREST,
+ Fm::Linear => glow::LINEAR,
+ };
+
+ let min_filter = match (min, mip) {
+ (Fm::Nearest, Fm::Nearest) => glow::NEAREST_MIPMAP_NEAREST,
+ (Fm::Nearest, Fm::Linear) => glow::NEAREST_MIPMAP_LINEAR,
+ (Fm::Linear, Fm::Nearest) => glow::LINEAR_MIPMAP_NEAREST,
+ (Fm::Linear, Fm::Linear) => glow::LINEAR_MIPMAP_LINEAR,
+ };
+
+ (min_filter, mag_filter)
+}
+
+pub fn map_address_mode(mode: wgt::AddressMode) -> u32 {
+ match mode {
+ wgt::AddressMode::Repeat => glow::REPEAT,
+ wgt::AddressMode::MirrorRepeat => glow::MIRRORED_REPEAT,
+ wgt::AddressMode::ClampToEdge => glow::CLAMP_TO_EDGE,
+ wgt::AddressMode::ClampToBorder => glow::CLAMP_TO_BORDER,
+ //wgt::AddressMode::MirrorClamp => glow::MIRROR_CLAMP_TO_EDGE,
+ }
+}
+
+pub fn map_compare_func(fun: wgt::CompareFunction) -> u32 {
+ use wgt::CompareFunction as Cf;
+ match fun {
+ Cf::Never => glow::NEVER,
+ Cf::Less => glow::LESS,
+ Cf::LessEqual => glow::LEQUAL,
+ Cf::Equal => glow::EQUAL,
+ Cf::GreaterEqual => glow::GEQUAL,
+ Cf::Greater => glow::GREATER,
+ Cf::NotEqual => glow::NOTEQUAL,
+ Cf::Always => glow::ALWAYS,
+ }
+}
+
+pub fn map_primitive_topology(topology: wgt::PrimitiveTopology) -> u32 {
+ use wgt::PrimitiveTopology as Pt;
+ match topology {
+ Pt::PointList => glow::POINTS,
+ Pt::LineList => glow::LINES,
+ Pt::LineStrip => glow::LINE_STRIP,
+ Pt::TriangleList => glow::TRIANGLES,
+ Pt::TriangleStrip => glow::TRIANGLE_STRIP,
+ }
+}
+
+pub(super) fn map_primitive_state(state: &wgt::PrimitiveState) -> super::PrimitiveState {
+ //Note: state.polygon_mode is not supported, see `Features::POLYGON_MODE_LINE` and
+ //`Features::POLYGON_MODE_POINT`
+ super::PrimitiveState {
+ //Note: we are flipping the front face, so that
+ // the Y-flip in the generated GLSL keeps the same visibility.
+ // See `naga::back::glsl::WriterFlags::ADJUST_COORDINATE_SPACE`.
+ front_face: match state.front_face {
+ wgt::FrontFace::Cw => glow::CCW,
+ wgt::FrontFace::Ccw => glow::CW,
+ },
+ cull_face: match state.cull_mode {
+ Some(wgt::Face::Front) => glow::FRONT,
+ Some(wgt::Face::Back) => glow::BACK,
+ None => 0,
+ },
+ unclipped_depth: state.unclipped_depth,
+ }
+}
+
+pub fn _map_view_dimension(dim: wgt::TextureViewDimension) -> u32 {
+ use wgt::TextureViewDimension as Tvd;
+ match dim {
+ Tvd::D1 | Tvd::D2 => glow::TEXTURE_2D,
+ Tvd::D2Array => glow::TEXTURE_2D_ARRAY,
+ Tvd::Cube => glow::TEXTURE_CUBE_MAP,
+ Tvd::CubeArray => glow::TEXTURE_CUBE_MAP_ARRAY,
+ Tvd::D3 => glow::TEXTURE_3D,
+ }
+}
+
+fn map_stencil_op(operation: wgt::StencilOperation) -> u32 {
+ use wgt::StencilOperation as So;
+ match operation {
+ So::Keep => glow::KEEP,
+ So::Zero => glow::ZERO,
+ So::Replace => glow::REPLACE,
+ So::Invert => glow::INVERT,
+ So::IncrementClamp => glow::INCR,
+ So::DecrementClamp => glow::DECR,
+ So::IncrementWrap => glow::INCR_WRAP,
+ So::DecrementWrap => glow::DECR_WRAP,
+ }
+}
+
+fn map_stencil_ops(face: &wgt::StencilFaceState) -> super::StencilOps {
+ super::StencilOps {
+ pass: map_stencil_op(face.pass_op),
+ fail: map_stencil_op(face.fail_op),
+ depth_fail: map_stencil_op(face.depth_fail_op),
+ }
+}
+
+pub(super) fn map_stencil(state: &wgt::StencilState) -> super::StencilState {
+ super::StencilState {
+ front: super::StencilSide {
+ function: map_compare_func(state.front.compare),
+ mask_read: state.read_mask,
+ mask_write: state.write_mask,
+ reference: 0,
+ ops: map_stencil_ops(&state.front),
+ },
+ back: super::StencilSide {
+ function: map_compare_func(state.back.compare),
+ mask_read: state.read_mask,
+ mask_write: state.write_mask,
+ reference: 0,
+ ops: map_stencil_ops(&state.back),
+ },
+ }
+}
+
+fn map_blend_factor(factor: wgt::BlendFactor) -> u32 {
+ use wgt::BlendFactor as Bf;
+ match factor {
+ Bf::Zero => glow::ZERO,
+ Bf::One => glow::ONE,
+ Bf::Src => glow::SRC_COLOR,
+ Bf::OneMinusSrc => glow::ONE_MINUS_SRC_COLOR,
+ Bf::Dst => glow::DST_COLOR,
+ Bf::OneMinusDst => glow::ONE_MINUS_DST_COLOR,
+ Bf::SrcAlpha => glow::SRC_ALPHA,
+ Bf::OneMinusSrcAlpha => glow::ONE_MINUS_SRC_ALPHA,
+ Bf::DstAlpha => glow::DST_ALPHA,
+ Bf::OneMinusDstAlpha => glow::ONE_MINUS_DST_ALPHA,
+ Bf::Constant => glow::CONSTANT_COLOR,
+ Bf::OneMinusConstant => glow::ONE_MINUS_CONSTANT_COLOR,
+ Bf::SrcAlphaSaturated => glow::SRC_ALPHA_SATURATE,
+ }
+}
+
+fn map_blend_component(component: &wgt::BlendComponent) -> super::BlendComponent {
+ super::BlendComponent {
+ src: map_blend_factor(component.src_factor),
+ dst: map_blend_factor(component.dst_factor),
+ equation: match component.operation {
+ wgt::BlendOperation::Add => glow::FUNC_ADD,
+ wgt::BlendOperation::Subtract => glow::FUNC_SUBTRACT,
+ wgt::BlendOperation::ReverseSubtract => glow::FUNC_REVERSE_SUBTRACT,
+ wgt::BlendOperation::Min => glow::MIN,
+ wgt::BlendOperation::Max => glow::MAX,
+ },
+ }
+}
+
+pub(super) fn map_blend(blend: &wgt::BlendState) -> super::BlendDesc {
+ super::BlendDesc {
+ color: map_blend_component(&blend.color),
+ alpha: map_blend_component(&blend.alpha),
+ }
+}
+
+pub(super) fn map_storage_access(access: wgt::StorageTextureAccess) -> u32 {
+ match access {
+ wgt::StorageTextureAccess::ReadOnly => glow::READ_ONLY,
+ wgt::StorageTextureAccess::WriteOnly => glow::WRITE_ONLY,
+ wgt::StorageTextureAccess::ReadWrite => glow::READ_WRITE,
+ }
+}
+
+pub(super) fn is_sampler(glsl_uniform_type: u32) -> bool {
+ match glsl_uniform_type {
+ glow::INT_SAMPLER_1D
+ | glow::INT_SAMPLER_1D_ARRAY
+ | glow::INT_SAMPLER_2D
+ | glow::INT_SAMPLER_2D_ARRAY
+ | glow::INT_SAMPLER_2D_MULTISAMPLE
+ | glow::INT_SAMPLER_2D_MULTISAMPLE_ARRAY
+ | glow::INT_SAMPLER_2D_RECT
+ | glow::INT_SAMPLER_3D
+ | glow::INT_SAMPLER_CUBE
+ | glow::INT_SAMPLER_CUBE_MAP_ARRAY
+ | glow::UNSIGNED_INT_SAMPLER_1D
+ | glow::UNSIGNED_INT_SAMPLER_1D_ARRAY
+ | glow::UNSIGNED_INT_SAMPLER_2D
+ | glow::UNSIGNED_INT_SAMPLER_2D_ARRAY
+ | glow::UNSIGNED_INT_SAMPLER_2D_MULTISAMPLE
+ | glow::UNSIGNED_INT_SAMPLER_2D_MULTISAMPLE_ARRAY
+ | glow::UNSIGNED_INT_SAMPLER_2D_RECT
+ | glow::UNSIGNED_INT_SAMPLER_3D
+ | glow::UNSIGNED_INT_SAMPLER_CUBE
+ | glow::UNSIGNED_INT_SAMPLER_CUBE_MAP_ARRAY
+ | glow::SAMPLER_1D
+ | glow::SAMPLER_1D_SHADOW
+ | glow::SAMPLER_1D_ARRAY
+ | glow::SAMPLER_1D_ARRAY_SHADOW
+ | glow::SAMPLER_2D
+ | glow::SAMPLER_2D_SHADOW
+ | glow::SAMPLER_2D_ARRAY
+ | glow::SAMPLER_2D_ARRAY_SHADOW
+ | glow::SAMPLER_2D_MULTISAMPLE
+ | glow::SAMPLER_2D_MULTISAMPLE_ARRAY
+ | glow::SAMPLER_2D_RECT
+ | glow::SAMPLER_2D_RECT_SHADOW
+ | glow::SAMPLER_3D
+ | glow::SAMPLER_CUBE
+ | glow::SAMPLER_CUBE_MAP_ARRAY
+ | glow::SAMPLER_CUBE_MAP_ARRAY_SHADOW
+ | glow::SAMPLER_CUBE_SHADOW => true,
+ _ => false,
+ }
+}
+
+pub(super) fn is_image(glsl_uniform_type: u32) -> bool {
+ match glsl_uniform_type {
+ glow::INT_IMAGE_1D
+ | glow::INT_IMAGE_1D_ARRAY
+ | glow::INT_IMAGE_2D
+ | glow::INT_IMAGE_2D_ARRAY
+ | glow::INT_IMAGE_2D_MULTISAMPLE
+ | glow::INT_IMAGE_2D_MULTISAMPLE_ARRAY
+ | glow::INT_IMAGE_2D_RECT
+ | glow::INT_IMAGE_3D
+ | glow::INT_IMAGE_CUBE
+ | glow::INT_IMAGE_CUBE_MAP_ARRAY
+ | glow::UNSIGNED_INT_IMAGE_1D
+ | glow::UNSIGNED_INT_IMAGE_1D_ARRAY
+ | glow::UNSIGNED_INT_IMAGE_2D
+ | glow::UNSIGNED_INT_IMAGE_2D_ARRAY
+ | glow::UNSIGNED_INT_IMAGE_2D_MULTISAMPLE
+ | glow::UNSIGNED_INT_IMAGE_2D_MULTISAMPLE_ARRAY
+ | glow::UNSIGNED_INT_IMAGE_2D_RECT
+ | glow::UNSIGNED_INT_IMAGE_3D
+ | glow::UNSIGNED_INT_IMAGE_CUBE
+ | glow::UNSIGNED_INT_IMAGE_CUBE_MAP_ARRAY
+ | glow::IMAGE_1D
+ | glow::IMAGE_1D_ARRAY
+ | glow::IMAGE_2D
+ | glow::IMAGE_2D_ARRAY
+ | glow::IMAGE_2D_MULTISAMPLE
+ | glow::IMAGE_2D_MULTISAMPLE_ARRAY
+ | glow::IMAGE_2D_RECT
+ | glow::IMAGE_3D
+ | glow::IMAGE_CUBE
+ | glow::IMAGE_CUBE_MAP_ARRAY => true,
+ _ => false,
+ }
+}
+
+pub(super) fn is_atomic_counter(glsl_uniform_type: u32) -> bool {
+ glsl_uniform_type == glow::UNSIGNED_INT_ATOMIC_COUNTER
+}
+
+pub(super) fn is_opaque_type(glsl_uniform_type: u32) -> bool {
+ is_sampler(glsl_uniform_type)
+ || is_image(glsl_uniform_type)
+ || is_atomic_counter(glsl_uniform_type)
+}
+
+pub(super) fn uniform_byte_size(glsl_uniform_type: u32) -> u32 {
+ match glsl_uniform_type {
+ glow::FLOAT | glow::INT => 4,
+ glow::FLOAT_VEC2 | glow::INT_VEC2 => 8,
+ glow::FLOAT_VEC3 | glow::INT_VEC3 => 12,
+ glow::FLOAT_VEC4 | glow::INT_VEC4 => 16,
+ glow::FLOAT_MAT2 => 16,
+ glow::FLOAT_MAT3 => 36,
+ glow::FLOAT_MAT4 => 64,
+ _ => panic!("Unsupported uniform datatype! {glsl_uniform_type:#X}"),
+ }
+}
diff --git a/third_party/rust/wgpu-hal/src/gles/device.rs b/third_party/rust/wgpu-hal/src/gles/device.rs
new file mode 100644
index 0000000000..0a1cfaf241
--- /dev/null
+++ b/third_party/rust/wgpu-hal/src/gles/device.rs
@@ -0,0 +1,1328 @@
+use super::conv;
+use crate::auxil::map_naga_stage;
+use glow::HasContext;
+use std::{
+ convert::TryInto,
+ ptr,
+ sync::{Arc, Mutex},
+};
+
+use arrayvec::ArrayVec;
+#[cfg(not(target_arch = "wasm32"))]
+use std::mem;
+use std::sync::atomic::Ordering;
+
+type ShaderStage<'a> = (
+ naga::ShaderStage,
+ &'a crate::ProgrammableStage<'a, super::Api>,
+);
+type NameBindingMap = rustc_hash::FxHashMap<String, (super::BindingRegister, u8)>;
+
+struct CompilationContext<'a> {
+ layout: &'a super::PipelineLayout,
+ sampler_map: &'a mut super::SamplerBindMap,
+ name_binding_map: &'a mut NameBindingMap,
+ multiview: Option<std::num::NonZeroU32>,
+}
+
+impl CompilationContext<'_> {
+ fn consume_reflection(
+ self,
+ module: &naga::Module,
+ ep_info: &naga::valid::FunctionInfo,
+ reflection_info: naga::back::glsl::ReflectionInfo,
+ ) {
+ for (handle, var) in module.global_variables.iter() {
+ if ep_info[handle].is_empty() {
+ continue;
+ }
+ let register = match var.space {
+ naga::AddressSpace::Uniform => super::BindingRegister::UniformBuffers,
+ naga::AddressSpace::Storage { .. } => super::BindingRegister::StorageBuffers,
+ _ => continue,
+ };
+
+ let br = var.binding.as_ref().unwrap();
+ let slot = self.layout.get_slot(br);
+
+ let name = match reflection_info.uniforms.get(&handle) {
+ Some(name) => name.clone(),
+ None => continue,
+ };
+ log::debug!(
+ "Rebind buffer: {:?} -> {}, register={:?}, slot={}",
+ var.name.as_ref(),
+ &name,
+ register,
+ slot
+ );
+ self.name_binding_map.insert(name, (register, slot));
+ }
+
+ for (name, mapping) in reflection_info.texture_mapping {
+ let var = &module.global_variables[mapping.texture];
+ let register = match module.types[var.ty].inner {
+ naga::TypeInner::Image {
+ class: naga::ImageClass::Storage { .. },
+ ..
+ } => super::BindingRegister::Images,
+ _ => super::BindingRegister::Textures,
+ };
+
+ let tex_br = var.binding.as_ref().unwrap();
+ let texture_linear_index = self.layout.get_slot(tex_br);
+
+ self.name_binding_map
+ .insert(name, (register, texture_linear_index));
+ if let Some(sampler_handle) = mapping.sampler {
+ let sam_br = module.global_variables[sampler_handle]
+ .binding
+ .as_ref()
+ .unwrap();
+ let sampler_linear_index = self.layout.get_slot(sam_br);
+ self.sampler_map[texture_linear_index as usize] = Some(sampler_linear_index);
+ }
+ }
+ }
+}
+
+impl super::Device {
+ /// # Safety
+ ///
+ /// - `name` must be created respecting `desc`
+ /// - `name` must be a texture
+ /// - If `drop_guard` is [`None`], wgpu-hal will take ownership of the texture. If `drop_guard` is
+ /// [`Some`], the texture must be valid until the drop implementation
+ /// of the drop guard is called.
+ #[cfg(any(not(target_arch = "wasm32"), target_os = "emscripten"))]
+ pub unsafe fn texture_from_raw(
+ &self,
+ name: std::num::NonZeroU32,
+ desc: &crate::TextureDescriptor,
+ drop_guard: Option<crate::DropGuard>,
+ ) -> super::Texture {
+ let (target, _, is_cubemap) = super::Texture::get_info_from_desc(desc);
+
+ super::Texture {
+ inner: super::TextureInner::Texture {
+ raw: glow::NativeTexture(name),
+ target,
+ },
+ drop_guard,
+ mip_level_count: desc.mip_level_count,
+ array_layer_count: desc.array_layer_count(),
+ format: desc.format,
+ format_desc: self.shared.describe_texture_format(desc.format),
+ copy_size: desc.copy_extent(),
+ is_cubemap,
+ }
+ }
+
+ /// # Safety
+ ///
+ /// - `name` must be created respecting `desc`
+ /// - `name` must be a renderbuffer
+ /// - If `drop_guard` is [`None`], wgpu-hal will take ownership of the renderbuffer. If `drop_guard` is
+ /// [`Some`], the renderbuffer must be valid until the drop implementation
+ /// of the drop guard is called.
+ #[cfg(any(not(target_arch = "wasm32"), target_os = "emscripten"))]
+ pub unsafe fn texture_from_raw_renderbuffer(
+ &self,
+ name: std::num::NonZeroU32,
+ desc: &crate::TextureDescriptor,
+ drop_guard: Option<crate::DropGuard>,
+ ) -> super::Texture {
+ super::Texture {
+ inner: super::TextureInner::Renderbuffer {
+ raw: glow::NativeRenderbuffer(name),
+ },
+ drop_guard,
+ mip_level_count: desc.mip_level_count,
+ array_layer_count: desc.array_layer_count(),
+ format: desc.format,
+ format_desc: self.shared.describe_texture_format(desc.format),
+ copy_size: desc.copy_extent(),
+ is_cubemap: false,
+ }
+ }
+
+ unsafe fn compile_shader(
+ gl: &glow::Context,
+ shader: &str,
+ naga_stage: naga::ShaderStage,
+ #[cfg_attr(target_arch = "wasm32", allow(unused))] label: Option<&str>,
+ ) -> Result<glow::Shader, crate::PipelineError> {
+ let target = match naga_stage {
+ naga::ShaderStage::Vertex => glow::VERTEX_SHADER,
+ naga::ShaderStage::Fragment => glow::FRAGMENT_SHADER,
+ naga::ShaderStage::Compute => glow::COMPUTE_SHADER,
+ };
+
+ let raw = unsafe { gl.create_shader(target) }.unwrap();
+ #[cfg(not(target_arch = "wasm32"))]
+ if gl.supports_debug() {
+ //TODO: remove all transmutes from `object_label`
+ // https://github.com/grovesNL/glow/issues/186
+ let name = unsafe { mem::transmute(raw) };
+ unsafe { gl.object_label(glow::SHADER, name, label) };
+ }
+
+ unsafe { gl.shader_source(raw, shader) };
+ unsafe { gl.compile_shader(raw) };
+
+ log::info!("\tCompiled shader {:?}", raw);
+
+ let compiled_ok = unsafe { gl.get_shader_compile_status(raw) };
+ let msg = unsafe { gl.get_shader_info_log(raw) };
+ if compiled_ok {
+ if !msg.is_empty() {
+ log::warn!("\tCompile: {}", msg);
+ }
+ Ok(raw)
+ } else {
+ Err(crate::PipelineError::Linkage(
+ map_naga_stage(naga_stage),
+ msg,
+ ))
+ }
+ }
+
+ fn create_shader(
+ gl: &glow::Context,
+ naga_stage: naga::ShaderStage,
+ stage: &crate::ProgrammableStage<super::Api>,
+ context: CompilationContext,
+ ) -> Result<glow::Shader, crate::PipelineError> {
+ use naga::back::glsl;
+ let pipeline_options = glsl::PipelineOptions {
+ shader_stage: naga_stage,
+ entry_point: stage.entry_point.to_string(),
+ multiview: context.multiview,
+ };
+
+ let shader = &stage.module.naga;
+ let entry_point_index = shader
+ .module
+ .entry_points
+ .iter()
+ .position(|ep| ep.name.as_str() == stage.entry_point)
+ .ok_or(crate::PipelineError::EntryPoint(naga_stage))?;
+
+ use naga::proc::BoundsCheckPolicy;
+ // The image bounds checks require the TEXTURE_LEVELS feature available in GL core 1.3+.
+ let version = gl.version();
+ let image_check = if !version.is_embedded && (version.major, version.minor) >= (1, 3) {
+ BoundsCheckPolicy::ReadZeroSkipWrite
+ } else {
+ BoundsCheckPolicy::Unchecked
+ };
+
+ // Other bounds check are either provided by glsl or not implemented yet.
+ let policies = naga::proc::BoundsCheckPolicies {
+ index: BoundsCheckPolicy::Unchecked,
+ buffer: BoundsCheckPolicy::Unchecked,
+ image: image_check,
+ binding_array: BoundsCheckPolicy::Unchecked,
+ };
+
+ let mut output = String::new();
+ let mut writer = glsl::Writer::new(
+ &mut output,
+ &shader.module,
+ &shader.info,
+ &context.layout.naga_options,
+ &pipeline_options,
+ policies,
+ )
+ .map_err(|e| {
+ let msg = format!("{e}");
+ crate::PipelineError::Linkage(map_naga_stage(naga_stage), msg)
+ })?;
+
+ let reflection_info = writer.write().map_err(|e| {
+ let msg = format!("{e}");
+ crate::PipelineError::Linkage(map_naga_stage(naga_stage), msg)
+ })?;
+
+ log::debug!("Naga generated shader:\n{}", output);
+
+ context.consume_reflection(
+ &shader.module,
+ shader.info.get_entry_point(entry_point_index),
+ reflection_info,
+ );
+
+ unsafe { Self::compile_shader(gl, &output, naga_stage, stage.module.label.as_deref()) }
+ }
+
+ unsafe fn create_pipeline<'a>(
+ &self,
+ gl: &glow::Context,
+ shaders: ArrayVec<ShaderStage<'a>, 3>,
+ layout: &super::PipelineLayout,
+ #[cfg_attr(target_arch = "wasm32", allow(unused))] label: Option<&str>,
+ multiview: Option<std::num::NonZeroU32>,
+ ) -> Result<Arc<super::PipelineInner>, crate::PipelineError> {
+ let mut program_stages = ArrayVec::new();
+ let mut group_to_binding_to_slot = Vec::with_capacity(layout.group_infos.len());
+ for group in &*layout.group_infos {
+ group_to_binding_to_slot.push(group.binding_to_slot.clone());
+ }
+ for &(naga_stage, stage) in &shaders {
+ program_stages.push(super::ProgramStage {
+ naga_stage: naga_stage.to_owned(),
+ shader_id: stage.module.id,
+ entry_point: stage.entry_point.to_owned(),
+ });
+ }
+ let glsl_version = match self.shared.shading_language_version {
+ naga::back::glsl::Version::Embedded { version, .. } => version,
+ naga::back::glsl::Version::Desktop(_) => unreachable!(),
+ };
+ let mut guard = self
+ .shared
+ .program_cache
+ .try_lock()
+ .expect("Couldn't acquire program_cache lock");
+ // This guard ensures that we can't accidentally destroy a program whilst we're about to reuse it
+ // The only place that destroys a pipeline is also locking on `program_cache`
+ let program = guard
+ .entry(super::ProgramCacheKey {
+ stages: program_stages,
+ group_to_binding_to_slot: group_to_binding_to_slot.into_boxed_slice(),
+ })
+ .or_insert_with(|| unsafe {
+ Self::create_program(
+ gl,
+ shaders,
+ layout,
+ label,
+ multiview,
+ glsl_version,
+ self.shared.private_caps,
+ )
+ })
+ .to_owned()?;
+ drop(guard);
+
+ Ok(program)
+ }
+
+ unsafe fn create_program<'a>(
+ gl: &glow::Context,
+ shaders: ArrayVec<ShaderStage<'a>, 3>,
+ layout: &super::PipelineLayout,
+ #[cfg_attr(target_arch = "wasm32", allow(unused))] label: Option<&str>,
+ multiview: Option<std::num::NonZeroU32>,
+ glsl_version: u16,
+ private_caps: super::PrivateCapabilities,
+ ) -> Result<Arc<super::PipelineInner>, crate::PipelineError> {
+ let program = unsafe { gl.create_program() }.unwrap();
+ #[cfg(not(target_arch = "wasm32"))]
+ if let Some(label) = label {
+ if gl.supports_debug() {
+ let name = unsafe { mem::transmute(program) };
+ unsafe { gl.object_label(glow::PROGRAM, name, Some(label)) };
+ }
+ }
+
+ let mut name_binding_map = NameBindingMap::default();
+ let mut sampler_map = [None; super::MAX_TEXTURE_SLOTS];
+ let mut has_stages = wgt::ShaderStages::empty();
+ let mut shaders_to_delete = arrayvec::ArrayVec::<_, 3>::new();
+
+ for (naga_stage, stage) in shaders {
+ has_stages |= map_naga_stage(naga_stage);
+ let context = CompilationContext {
+ layout,
+ sampler_map: &mut sampler_map,
+ name_binding_map: &mut name_binding_map,
+ multiview,
+ };
+
+ let shader = Self::create_shader(gl, naga_stage, stage, context)?;
+ shaders_to_delete.push(shader);
+ }
+
+ // Create empty fragment shader if only vertex shader is present
+ if has_stages == wgt::ShaderStages::VERTEX {
+ let shader_src = format!("#version {glsl_version} es \n void main(void) {{}}",);
+ log::info!("Only vertex shader is present. Creating an empty fragment shader",);
+ let shader = unsafe {
+ Self::compile_shader(
+ gl,
+ &shader_src,
+ naga::ShaderStage::Fragment,
+ Some("(wgpu internal) dummy fragment shader"),
+ )
+ }?;
+ shaders_to_delete.push(shader);
+ }
+
+ for &shader in shaders_to_delete.iter() {
+ unsafe { gl.attach_shader(program, shader) };
+ }
+ unsafe { gl.link_program(program) };
+
+ for shader in shaders_to_delete {
+ unsafe { gl.delete_shader(shader) };
+ }
+
+ log::info!("\tLinked program {:?}", program);
+
+ let linked_ok = unsafe { gl.get_program_link_status(program) };
+ let msg = unsafe { gl.get_program_info_log(program) };
+ if !linked_ok {
+ return Err(crate::PipelineError::Linkage(has_stages, msg));
+ }
+ if !msg.is_empty() {
+ log::warn!("\tLink: {}", msg);
+ }
+
+ if !private_caps.contains(super::PrivateCapabilities::SHADER_BINDING_LAYOUT) {
+ // This remapping is only needed if we aren't able to put the binding layout
+ // in the shader. We can't remap storage buffers this way.
+ unsafe { gl.use_program(Some(program)) };
+ for (ref name, (register, slot)) in name_binding_map {
+ log::trace!("Get binding {:?} from program {:?}", name, program);
+ match register {
+ super::BindingRegister::UniformBuffers => {
+ let index = unsafe { gl.get_uniform_block_index(program, name) }.unwrap();
+ unsafe { gl.uniform_block_binding(program, index, slot as _) };
+ }
+ super::BindingRegister::StorageBuffers => {
+ let index =
+ unsafe { gl.get_shader_storage_block_index(program, name) }.unwrap();
+ log::error!(
+ "Unable to re-map shader storage block {} to {}",
+ name,
+ index
+ );
+ return Err(crate::DeviceError::Lost.into());
+ }
+ super::BindingRegister::Textures | super::BindingRegister::Images => {
+ let location = unsafe { gl.get_uniform_location(program, name) };
+ unsafe { gl.uniform_1_i32(location.as_ref(), slot as _) };
+ }
+ }
+ }
+ }
+
+ let mut uniforms: [super::UniformDesc; super::MAX_PUSH_CONSTANTS] =
+ [None; super::MAX_PUSH_CONSTANTS].map(|_: Option<()>| Default::default());
+ let count = unsafe { gl.get_active_uniforms(program) };
+ let mut offset = 0;
+
+ for uniform in 0..count {
+ let glow::ActiveUniform { utype, name, .. } =
+ unsafe { gl.get_active_uniform(program, uniform) }.unwrap();
+
+ if conv::is_opaque_type(utype) {
+ continue;
+ }
+
+ if let Some(location) = unsafe { gl.get_uniform_location(program, &name) } {
+ if uniforms[offset / 4].location.is_some() {
+ panic!("Offset already occupied")
+ }
+
+ // `size` will always be 1 so we need to guess the real size from the type
+ let uniform_size = conv::uniform_byte_size(utype);
+
+ uniforms[offset / 4] = super::UniformDesc {
+ location: Some(location),
+ size: uniform_size,
+ utype,
+ };
+
+ offset += uniform_size as usize;
+ }
+ }
+
+ Ok(Arc::new(super::PipelineInner {
+ program,
+ sampler_map,
+ uniforms,
+ }))
+ }
+}
+
+impl crate::Device<super::Api> for super::Device {
+ unsafe fn exit(self, queue: super::Queue) {
+ let gl = &self.shared.context.lock();
+ unsafe { gl.delete_vertex_array(self.main_vao) };
+ unsafe { gl.delete_framebuffer(queue.draw_fbo) };
+ unsafe { gl.delete_framebuffer(queue.copy_fbo) };
+ unsafe { gl.delete_buffer(queue.zero_buffer) };
+ }
+
+ unsafe fn create_buffer(
+ &self,
+ desc: &crate::BufferDescriptor,
+ ) -> Result<super::Buffer, crate::DeviceError> {
+ let target = if desc.usage.contains(crate::BufferUses::INDEX) {
+ glow::ELEMENT_ARRAY_BUFFER
+ } else {
+ glow::ARRAY_BUFFER
+ };
+
+ let emulate_map = self
+ .shared
+ .workarounds
+ .contains(super::Workarounds::EMULATE_BUFFER_MAP)
+ || !self
+ .shared
+ .private_caps
+ .contains(super::PrivateCapabilities::BUFFER_ALLOCATION);
+
+ if emulate_map && desc.usage.intersects(crate::BufferUses::MAP_WRITE) {
+ return Ok(super::Buffer {
+ raw: None,
+ target,
+ size: desc.size,
+ map_flags: 0,
+ data: Some(Arc::new(Mutex::new(vec![0; desc.size as usize]))),
+ });
+ }
+
+ let gl = &self.shared.context.lock();
+
+ let target = if desc.usage.contains(crate::BufferUses::INDEX) {
+ glow::ELEMENT_ARRAY_BUFFER
+ } else {
+ glow::ARRAY_BUFFER
+ };
+
+ let is_host_visible = desc
+ .usage
+ .intersects(crate::BufferUses::MAP_READ | crate::BufferUses::MAP_WRITE);
+ let is_coherent = desc
+ .memory_flags
+ .contains(crate::MemoryFlags::PREFER_COHERENT);
+
+ let mut map_flags = 0;
+ if desc.usage.contains(crate::BufferUses::MAP_READ) {
+ map_flags |= glow::MAP_READ_BIT;
+ }
+ if desc.usage.contains(crate::BufferUses::MAP_WRITE) {
+ map_flags |= glow::MAP_WRITE_BIT;
+ }
+
+ let raw = Some(unsafe { gl.create_buffer() }.map_err(|_| crate::DeviceError::OutOfMemory)?);
+ unsafe { gl.bind_buffer(target, raw) };
+ let raw_size = desc
+ .size
+ .try_into()
+ .map_err(|_| crate::DeviceError::OutOfMemory)?;
+
+ if self
+ .shared
+ .private_caps
+ .contains(super::PrivateCapabilities::BUFFER_ALLOCATION)
+ {
+ if is_host_visible {
+ map_flags |= glow::MAP_PERSISTENT_BIT;
+ if is_coherent {
+ map_flags |= glow::MAP_COHERENT_BIT;
+ }
+ }
+ unsafe { gl.buffer_storage(target, raw_size, None, map_flags) };
+ } else {
+ assert!(!is_coherent);
+ let usage = if is_host_visible {
+ if desc.usage.contains(crate::BufferUses::MAP_READ) {
+ glow::STREAM_READ
+ } else {
+ glow::DYNAMIC_DRAW
+ }
+ } else {
+ // Even if the usage doesn't contain SRC_READ, we update it internally at least once
+ // Some vendors take usage very literally and STATIC_DRAW will freeze us with an empty buffer
+ // https://github.com/gfx-rs/wgpu/issues/3371
+ glow::DYNAMIC_DRAW
+ };
+ unsafe { gl.buffer_data_size(target, raw_size, usage) };
+ }
+
+ unsafe { gl.bind_buffer(target, None) };
+
+ if !is_coherent && desc.usage.contains(crate::BufferUses::MAP_WRITE) {
+ map_flags |= glow::MAP_FLUSH_EXPLICIT_BIT;
+ }
+ //TODO: do we need `glow::MAP_UNSYNCHRONIZED_BIT`?
+
+ #[cfg(not(target_arch = "wasm32"))]
+ if let Some(label) = desc.label {
+ if gl.supports_debug() {
+ let name = unsafe { mem::transmute(raw) };
+ unsafe { gl.object_label(glow::BUFFER, name, Some(label)) };
+ }
+ }
+
+ let data = if emulate_map && desc.usage.contains(crate::BufferUses::MAP_READ) {
+ Some(Arc::new(Mutex::new(vec![0; desc.size as usize])))
+ } else {
+ None
+ };
+
+ Ok(super::Buffer {
+ raw,
+ target,
+ size: desc.size,
+ map_flags,
+ data,
+ })
+ }
+ unsafe fn destroy_buffer(&self, buffer: super::Buffer) {
+ if let Some(raw) = buffer.raw {
+ let gl = &self.shared.context.lock();
+ unsafe { gl.delete_buffer(raw) };
+ }
+ }
+
+ unsafe fn map_buffer(
+ &self,
+ buffer: &super::Buffer,
+ range: crate::MemoryRange,
+ ) -> Result<crate::BufferMapping, crate::DeviceError> {
+ let is_coherent = buffer.map_flags & glow::MAP_COHERENT_BIT != 0;
+ let ptr = match buffer.raw {
+ None => {
+ let mut vec = buffer.data.as_ref().unwrap().lock().unwrap();
+ let slice = &mut vec.as_mut_slice()[range.start as usize..range.end as usize];
+ slice.as_mut_ptr()
+ }
+ Some(raw) => {
+ let gl = &self.shared.context.lock();
+ unsafe { gl.bind_buffer(buffer.target, Some(raw)) };
+ let ptr = if let Some(ref map_read_allocation) = buffer.data {
+ let mut guard = map_read_allocation.lock().unwrap();
+ let slice = guard.as_mut_slice();
+ unsafe { self.shared.get_buffer_sub_data(gl, buffer.target, 0, slice) };
+ slice.as_mut_ptr()
+ } else {
+ unsafe {
+ gl.map_buffer_range(
+ buffer.target,
+ range.start as i32,
+ (range.end - range.start) as i32,
+ buffer.map_flags,
+ )
+ }
+ };
+ unsafe { gl.bind_buffer(buffer.target, None) };
+ ptr
+ }
+ };
+ Ok(crate::BufferMapping {
+ ptr: ptr::NonNull::new(ptr).ok_or(crate::DeviceError::Lost)?,
+ is_coherent,
+ })
+ }
+ unsafe fn unmap_buffer(&self, buffer: &super::Buffer) -> Result<(), crate::DeviceError> {
+ if let Some(raw) = buffer.raw {
+ if buffer.data.is_none() {
+ let gl = &self.shared.context.lock();
+ unsafe { gl.bind_buffer(buffer.target, Some(raw)) };
+ unsafe { gl.unmap_buffer(buffer.target) };
+ unsafe { gl.bind_buffer(buffer.target, None) };
+ }
+ }
+ Ok(())
+ }
+ unsafe fn flush_mapped_ranges<I>(&self, buffer: &super::Buffer, ranges: I)
+ where
+ I: Iterator<Item = crate::MemoryRange>,
+ {
+ if let Some(raw) = buffer.raw {
+ let gl = &self.shared.context.lock();
+ unsafe { gl.bind_buffer(buffer.target, Some(raw)) };
+ for range in ranges {
+ unsafe {
+ gl.flush_mapped_buffer_range(
+ buffer.target,
+ range.start as i32,
+ (range.end - range.start) as i32,
+ )
+ };
+ }
+ }
+ }
+ unsafe fn invalidate_mapped_ranges<I>(&self, _buffer: &super::Buffer, _ranges: I) {
+ //TODO: do we need to do anything?
+ }
+
+ unsafe fn create_texture(
+ &self,
+ desc: &crate::TextureDescriptor,
+ ) -> Result<super::Texture, crate::DeviceError> {
+ let gl = &self.shared.context.lock();
+
+ let render_usage = crate::TextureUses::COLOR_TARGET
+ | crate::TextureUses::DEPTH_STENCIL_WRITE
+ | crate::TextureUses::DEPTH_STENCIL_READ;
+ let format_desc = self.shared.describe_texture_format(desc.format);
+
+ let (inner, is_cubemap) = if render_usage.contains(desc.usage)
+ && desc.dimension == wgt::TextureDimension::D2
+ && desc.size.depth_or_array_layers == 1
+ {
+ let raw = unsafe { gl.create_renderbuffer().unwrap() };
+ unsafe { gl.bind_renderbuffer(glow::RENDERBUFFER, Some(raw)) };
+ if desc.sample_count > 1 {
+ unsafe {
+ gl.renderbuffer_storage_multisample(
+ glow::RENDERBUFFER,
+ desc.sample_count as i32,
+ format_desc.internal,
+ desc.size.width as i32,
+ desc.size.height as i32,
+ )
+ };
+ } else {
+ unsafe {
+ gl.renderbuffer_storage(
+ glow::RENDERBUFFER,
+ format_desc.internal,
+ desc.size.width as i32,
+ desc.size.height as i32,
+ )
+ };
+ }
+
+ #[cfg(not(target_arch = "wasm32"))]
+ if let Some(label) = desc.label {
+ if gl.supports_debug() {
+ let name = unsafe { mem::transmute(raw) };
+ unsafe { gl.object_label(glow::RENDERBUFFER, name, Some(label)) };
+ }
+ }
+
+ unsafe { gl.bind_renderbuffer(glow::RENDERBUFFER, None) };
+ (super::TextureInner::Renderbuffer { raw }, false)
+ } else {
+ let raw = unsafe { gl.create_texture().unwrap() };
+ let (target, is_3d, is_cubemap) = super::Texture::get_info_from_desc(desc);
+
+ unsafe { gl.bind_texture(target, Some(raw)) };
+ //Note: this has to be done before defining the storage!
+ match desc.format.sample_type(None) {
+ Some(
+ wgt::TextureSampleType::Float { filterable: false }
+ | wgt::TextureSampleType::Uint
+ | wgt::TextureSampleType::Sint,
+ ) => {
+ // reset default filtering mode
+ unsafe {
+ gl.tex_parameter_i32(target, glow::TEXTURE_MIN_FILTER, glow::NEAREST as i32)
+ };
+ unsafe {
+ gl.tex_parameter_i32(target, glow::TEXTURE_MAG_FILTER, glow::NEAREST as i32)
+ };
+ }
+ _ => {}
+ }
+
+ if is_3d {
+ unsafe {
+ gl.tex_storage_3d(
+ target,
+ desc.mip_level_count as i32,
+ format_desc.internal,
+ desc.size.width as i32,
+ desc.size.height as i32,
+ desc.size.depth_or_array_layers as i32,
+ )
+ };
+ } else if desc.sample_count > 1 {
+ unsafe {
+ gl.tex_storage_2d_multisample(
+ target,
+ desc.sample_count as i32,
+ format_desc.internal,
+ desc.size.width as i32,
+ desc.size.height as i32,
+ true,
+ )
+ };
+ } else {
+ unsafe {
+ gl.tex_storage_2d(
+ target,
+ desc.mip_level_count as i32,
+ format_desc.internal,
+ desc.size.width as i32,
+ desc.size.height as i32,
+ )
+ };
+ }
+
+ #[cfg(not(target_arch = "wasm32"))]
+ if let Some(label) = desc.label {
+ if gl.supports_debug() {
+ let name = unsafe { mem::transmute(raw) };
+ unsafe { gl.object_label(glow::TEXTURE, name, Some(label)) };
+ }
+ }
+
+ unsafe { gl.bind_texture(target, None) };
+ (super::TextureInner::Texture { raw, target }, is_cubemap)
+ };
+
+ Ok(super::Texture {
+ inner,
+ drop_guard: None,
+ mip_level_count: desc.mip_level_count,
+ array_layer_count: desc.array_layer_count(),
+ format: desc.format,
+ format_desc,
+ copy_size: desc.copy_extent(),
+ is_cubemap,
+ })
+ }
+ unsafe fn destroy_texture(&self, texture: super::Texture) {
+ if texture.drop_guard.is_none() {
+ let gl = &self.shared.context.lock();
+ match texture.inner {
+ super::TextureInner::Renderbuffer { raw, .. } => {
+ unsafe { gl.delete_renderbuffer(raw) };
+ }
+ super::TextureInner::DefaultRenderbuffer => {}
+ super::TextureInner::Texture { raw, .. } => {
+ unsafe { gl.delete_texture(raw) };
+ }
+ #[cfg(all(target_arch = "wasm32", not(target_os = "emscripten")))]
+ super::TextureInner::ExternalFramebuffer { .. } => {}
+ }
+ }
+
+ // For clarity, we explicitly drop the drop guard. Although this has no real semantic effect as the
+ // end of the scope will drop the drop guard since this function takes ownership of the texture.
+ drop(texture.drop_guard);
+ }
+
+ unsafe fn create_texture_view(
+ &self,
+ texture: &super::Texture,
+ desc: &crate::TextureViewDescriptor,
+ ) -> Result<super::TextureView, crate::DeviceError> {
+ Ok(super::TextureView {
+ //TODO: use `conv::map_view_dimension(desc.dimension)`?
+ inner: texture.inner.clone(),
+ aspects: crate::FormatAspects::new(texture.format, desc.range.aspect),
+ mip_levels: desc.range.mip_range(texture.mip_level_count),
+ array_layers: desc.range.layer_range(texture.array_layer_count),
+ format: texture.format,
+ })
+ }
+ unsafe fn destroy_texture_view(&self, _view: super::TextureView) {}
+
+ unsafe fn create_sampler(
+ &self,
+ desc: &crate::SamplerDescriptor,
+ ) -> Result<super::Sampler, crate::DeviceError> {
+ let gl = &self.shared.context.lock();
+
+ let raw = unsafe { gl.create_sampler().unwrap() };
+
+ let (min, mag) =
+ conv::map_filter_modes(desc.min_filter, desc.mag_filter, desc.mipmap_filter);
+
+ unsafe { gl.sampler_parameter_i32(raw, glow::TEXTURE_MIN_FILTER, min as i32) };
+ unsafe { gl.sampler_parameter_i32(raw, glow::TEXTURE_MAG_FILTER, mag as i32) };
+
+ unsafe {
+ gl.sampler_parameter_i32(
+ raw,
+ glow::TEXTURE_WRAP_S,
+ conv::map_address_mode(desc.address_modes[0]) as i32,
+ )
+ };
+ unsafe {
+ gl.sampler_parameter_i32(
+ raw,
+ glow::TEXTURE_WRAP_T,
+ conv::map_address_mode(desc.address_modes[1]) as i32,
+ )
+ };
+ unsafe {
+ gl.sampler_parameter_i32(
+ raw,
+ glow::TEXTURE_WRAP_R,
+ conv::map_address_mode(desc.address_modes[2]) as i32,
+ )
+ };
+
+ if let Some(border_color) = desc.border_color {
+ let border = match border_color {
+ wgt::SamplerBorderColor::TransparentBlack | wgt::SamplerBorderColor::Zero => {
+ [0.0; 4]
+ }
+ wgt::SamplerBorderColor::OpaqueBlack => [0.0, 0.0, 0.0, 1.0],
+ wgt::SamplerBorderColor::OpaqueWhite => [1.0; 4],
+ };
+ unsafe { gl.sampler_parameter_f32_slice(raw, glow::TEXTURE_BORDER_COLOR, &border) };
+ }
+
+ unsafe { gl.sampler_parameter_f32(raw, glow::TEXTURE_MIN_LOD, desc.lod_clamp.start) };
+ unsafe { gl.sampler_parameter_f32(raw, glow::TEXTURE_MAX_LOD, desc.lod_clamp.end) };
+
+ // If clamp is not 1, we know anisotropy is supported up to 16x
+ if desc.anisotropy_clamp != 1 {
+ unsafe {
+ gl.sampler_parameter_i32(
+ raw,
+ glow::TEXTURE_MAX_ANISOTROPY,
+ desc.anisotropy_clamp as i32,
+ )
+ };
+ }
+
+ //set_param_float(glow::TEXTURE_LOD_BIAS, info.lod_bias.0);
+
+ if let Some(compare) = desc.compare {
+ unsafe {
+ gl.sampler_parameter_i32(
+ raw,
+ glow::TEXTURE_COMPARE_MODE,
+ glow::COMPARE_REF_TO_TEXTURE as i32,
+ )
+ };
+ unsafe {
+ gl.sampler_parameter_i32(
+ raw,
+ glow::TEXTURE_COMPARE_FUNC,
+ conv::map_compare_func(compare) as i32,
+ )
+ };
+ }
+
+ #[cfg(not(target_arch = "wasm32"))]
+ if let Some(label) = desc.label {
+ if gl.supports_debug() {
+ let name = unsafe { mem::transmute(raw) };
+ unsafe { gl.object_label(glow::SAMPLER, name, Some(label)) };
+ }
+ }
+
+ Ok(super::Sampler { raw })
+ }
+ unsafe fn destroy_sampler(&self, sampler: super::Sampler) {
+ let gl = &self.shared.context.lock();
+ unsafe { gl.delete_sampler(sampler.raw) };
+ }
+
+ unsafe fn create_command_encoder(
+ &self,
+ _desc: &crate::CommandEncoderDescriptor<super::Api>,
+ ) -> Result<super::CommandEncoder, crate::DeviceError> {
+ Ok(super::CommandEncoder {
+ cmd_buffer: super::CommandBuffer::default(),
+ state: Default::default(),
+ private_caps: self.shared.private_caps,
+ })
+ }
+ unsafe fn destroy_command_encoder(&self, _encoder: super::CommandEncoder) {}
+
+ unsafe fn create_bind_group_layout(
+ &self,
+ desc: &crate::BindGroupLayoutDescriptor,
+ ) -> Result<super::BindGroupLayout, crate::DeviceError> {
+ Ok(super::BindGroupLayout {
+ entries: Arc::from(desc.entries),
+ })
+ }
+ unsafe fn destroy_bind_group_layout(&self, _bg_layout: super::BindGroupLayout) {}
+
+ unsafe fn create_pipeline_layout(
+ &self,
+ desc: &crate::PipelineLayoutDescriptor<super::Api>,
+ ) -> Result<super::PipelineLayout, crate::DeviceError> {
+ use naga::back::glsl;
+
+ let mut group_infos = Vec::with_capacity(desc.bind_group_layouts.len());
+ let mut num_samplers = 0u8;
+ let mut num_textures = 0u8;
+ let mut num_images = 0u8;
+ let mut num_uniform_buffers = 0u8;
+ let mut num_storage_buffers = 0u8;
+
+ let mut writer_flags = glsl::WriterFlags::ADJUST_COORDINATE_SPACE;
+ writer_flags.set(
+ glsl::WriterFlags::TEXTURE_SHADOW_LOD,
+ self.shared
+ .private_caps
+ .contains(super::PrivateCapabilities::SHADER_TEXTURE_SHADOW_LOD),
+ );
+ // We always force point size to be written and it will be ignored by the driver if it's not a point list primitive.
+ // https://github.com/gfx-rs/wgpu/pull/3440/files#r1095726950
+ writer_flags.set(glsl::WriterFlags::FORCE_POINT_SIZE, true);
+ let mut binding_map = glsl::BindingMap::default();
+
+ for (group_index, bg_layout) in desc.bind_group_layouts.iter().enumerate() {
+ // create a vector with the size enough to hold all the bindings, filled with `!0`
+ let mut binding_to_slot = vec![
+ !0;
+ bg_layout
+ .entries
+ .last()
+ .map_or(0, |b| b.binding as usize + 1)
+ ]
+ .into_boxed_slice();
+
+ for entry in bg_layout.entries.iter() {
+ let counter = match entry.ty {
+ wgt::BindingType::Sampler { .. } => &mut num_samplers,
+ wgt::BindingType::Texture { .. } => &mut num_textures,
+ wgt::BindingType::StorageTexture { .. } => &mut num_images,
+ wgt::BindingType::Buffer {
+ ty: wgt::BufferBindingType::Uniform,
+ ..
+ } => &mut num_uniform_buffers,
+ wgt::BindingType::Buffer {
+ ty: wgt::BufferBindingType::Storage { .. },
+ ..
+ } => &mut num_storage_buffers,
+ };
+
+ binding_to_slot[entry.binding as usize] = *counter;
+ let br = naga::ResourceBinding {
+ group: group_index as u32,
+ binding: entry.binding,
+ };
+ binding_map.insert(br, *counter);
+ *counter += entry.count.map_or(1, |c| c.get() as u8);
+ }
+
+ group_infos.push(super::BindGroupLayoutInfo {
+ entries: Arc::clone(&bg_layout.entries),
+ binding_to_slot,
+ });
+ }
+
+ Ok(super::PipelineLayout {
+ group_infos: group_infos.into_boxed_slice(),
+ naga_options: glsl::Options {
+ version: self.shared.shading_language_version,
+ writer_flags,
+ binding_map,
+ zero_initialize_workgroup_memory: true,
+ },
+ })
+ }
+ unsafe fn destroy_pipeline_layout(&self, _pipeline_layout: super::PipelineLayout) {}
+
+ unsafe fn create_bind_group(
+ &self,
+ desc: &crate::BindGroupDescriptor<super::Api>,
+ ) -> Result<super::BindGroup, crate::DeviceError> {
+ let mut contents = Vec::new();
+
+ for (entry, layout) in desc.entries.iter().zip(desc.layout.entries.iter()) {
+ let binding = match layout.ty {
+ wgt::BindingType::Buffer { .. } => {
+ let bb = &desc.buffers[entry.resource_index as usize];
+ super::RawBinding::Buffer {
+ raw: bb.buffer.raw.unwrap(),
+ offset: bb.offset as i32,
+ size: match bb.size {
+ Some(s) => s.get() as i32,
+ None => (bb.buffer.size - bb.offset) as i32,
+ },
+ }
+ }
+ wgt::BindingType::Sampler { .. } => {
+ let sampler = desc.samplers[entry.resource_index as usize];
+ super::RawBinding::Sampler(sampler.raw)
+ }
+ wgt::BindingType::Texture { .. } => {
+ let view = desc.textures[entry.resource_index as usize].view;
+ if view.mip_levels.start != 0 || view.array_layers.start != 0 {
+ log::error!("Unable to create a sampled texture binding for non-zero mipmap level or array layer.\n{}",
+ "This is an implementation problem of wgpu-hal/gles backend.")
+ }
+ let (raw, target) = view.inner.as_native();
+ super::RawBinding::Texture {
+ raw,
+ target,
+ aspects: view.aspects,
+ }
+ }
+ wgt::BindingType::StorageTexture {
+ access,
+ format,
+ view_dimension,
+ } => {
+ let view = desc.textures[entry.resource_index as usize].view;
+ let format_desc = self.shared.describe_texture_format(format);
+ let (raw, _target) = view.inner.as_native();
+ super::RawBinding::Image(super::ImageBinding {
+ raw,
+ mip_level: view.mip_levels.start,
+ array_layer: match view_dimension {
+ wgt::TextureViewDimension::D2Array
+ | wgt::TextureViewDimension::CubeArray => None,
+ _ => Some(view.array_layers.start),
+ },
+ access: conv::map_storage_access(access),
+ format: format_desc.internal,
+ })
+ }
+ };
+ contents.push(binding);
+ }
+
+ Ok(super::BindGroup {
+ contents: contents.into_boxed_slice(),
+ })
+ }
+ unsafe fn destroy_bind_group(&self, _group: super::BindGroup) {}
+
+ unsafe fn create_shader_module(
+ &self,
+ desc: &crate::ShaderModuleDescriptor,
+ shader: crate::ShaderInput,
+ ) -> Result<super::ShaderModule, crate::ShaderError> {
+ Ok(super::ShaderModule {
+ naga: match shader {
+ crate::ShaderInput::SpirV(_) => {
+ panic!("`Features::SPIRV_SHADER_PASSTHROUGH` is not enabled")
+ }
+ crate::ShaderInput::Naga(naga) => naga,
+ },
+ label: desc.label.map(|str| str.to_string()),
+ id: self.shared.next_shader_id.fetch_add(1, Ordering::Relaxed),
+ })
+ }
+ unsafe fn destroy_shader_module(&self, _module: super::ShaderModule) {}
+
+ unsafe fn create_render_pipeline(
+ &self,
+ desc: &crate::RenderPipelineDescriptor<super::Api>,
+ ) -> Result<super::RenderPipeline, crate::PipelineError> {
+ let gl = &self.shared.context.lock();
+ let mut shaders = ArrayVec::new();
+ shaders.push((naga::ShaderStage::Vertex, &desc.vertex_stage));
+ if let Some(ref fs) = desc.fragment_stage {
+ shaders.push((naga::ShaderStage::Fragment, fs));
+ }
+ let inner =
+ unsafe { self.create_pipeline(gl, shaders, desc.layout, desc.label, desc.multiview) }?;
+
+ let (vertex_buffers, vertex_attributes) = {
+ let mut buffers = Vec::new();
+ let mut attributes = Vec::new();
+ for (index, vb_layout) in desc.vertex_buffers.iter().enumerate() {
+ buffers.push(super::VertexBufferDesc {
+ step: vb_layout.step_mode,
+ stride: vb_layout.array_stride as u32,
+ });
+ for vat in vb_layout.attributes.iter() {
+ let format_desc = conv::describe_vertex_format(vat.format);
+ attributes.push(super::AttributeDesc {
+ location: vat.shader_location,
+ offset: vat.offset as u32,
+ buffer_index: index as u32,
+ format_desc,
+ });
+ }
+ }
+ (buffers.into_boxed_slice(), attributes.into_boxed_slice())
+ };
+
+ let color_targets = {
+ let mut targets = Vec::new();
+ for ct in desc.color_targets.iter().filter_map(|at| at.as_ref()) {
+ targets.push(super::ColorTargetDesc {
+ mask: ct.write_mask,
+ blend: ct.blend.as_ref().map(conv::map_blend),
+ });
+ }
+ //Note: if any of the states are different, and `INDEPENDENT_BLEND` flag
+ // is not exposed, then this pipeline will not bind correctly.
+ targets.into_boxed_slice()
+ };
+
+ Ok(super::RenderPipeline {
+ inner,
+ primitive: desc.primitive,
+ vertex_buffers,
+ vertex_attributes,
+ color_targets,
+ depth: desc.depth_stencil.as_ref().map(|ds| super::DepthState {
+ function: conv::map_compare_func(ds.depth_compare),
+ mask: ds.depth_write_enabled,
+ }),
+ depth_bias: desc
+ .depth_stencil
+ .as_ref()
+ .map(|ds| ds.bias)
+ .unwrap_or_default(),
+ stencil: desc
+ .depth_stencil
+ .as_ref()
+ .map(|ds| conv::map_stencil(&ds.stencil)),
+ alpha_to_coverage_enabled: desc.multisample.alpha_to_coverage_enabled,
+ })
+ }
+ unsafe fn destroy_render_pipeline(&self, pipeline: super::RenderPipeline) {
+ let mut program_cache = self.shared.program_cache.lock();
+ // If the pipeline only has 2 strong references remaining, they're `pipeline` and `program_cache`
+ // This is safe to assume as long as:
+ // - `RenderPipeline` can't be cloned
+ // - The only place that we can get a new reference is during `program_cache.lock()`
+ if Arc::strong_count(&pipeline.inner) == 2 {
+ program_cache.retain(|_, v| match *v {
+ Ok(ref p) => p.program != pipeline.inner.program,
+ Err(_) => false,
+ });
+ let gl = &self.shared.context.lock();
+ unsafe { gl.delete_program(pipeline.inner.program) };
+ }
+ }
+
+ unsafe fn create_compute_pipeline(
+ &self,
+ desc: &crate::ComputePipelineDescriptor<super::Api>,
+ ) -> Result<super::ComputePipeline, crate::PipelineError> {
+ let gl = &self.shared.context.lock();
+ let mut shaders = ArrayVec::new();
+ shaders.push((naga::ShaderStage::Compute, &desc.stage));
+ let inner = unsafe { self.create_pipeline(gl, shaders, desc.layout, desc.label, None) }?;
+
+ Ok(super::ComputePipeline { inner })
+ }
+ unsafe fn destroy_compute_pipeline(&self, pipeline: super::ComputePipeline) {
+ let mut program_cache = self.shared.program_cache.lock();
+ // If the pipeline only has 2 strong references remaining, they're `pipeline` and `program_cache``
+ // This is safe to assume as long as:
+ // - `ComputePipeline` can't be cloned
+ // - The only place that we can get a new reference is during `program_cache.lock()`
+ if Arc::strong_count(&pipeline.inner) == 2 {
+ program_cache.retain(|_, v| match *v {
+ Ok(ref p) => p.program != pipeline.inner.program,
+ Err(_) => false,
+ });
+ let gl = &self.shared.context.lock();
+ unsafe { gl.delete_program(pipeline.inner.program) };
+ }
+ }
+
+ #[cfg_attr(target_arch = "wasm32", allow(unused))]
+ unsafe fn create_query_set(
+ &self,
+ desc: &wgt::QuerySetDescriptor<crate::Label>,
+ ) -> Result<super::QuerySet, crate::DeviceError> {
+ let gl = &self.shared.context.lock();
+ let mut temp_string = String::new();
+
+ let mut queries = Vec::with_capacity(desc.count as usize);
+ for i in 0..desc.count {
+ let query =
+ unsafe { gl.create_query() }.map_err(|_| crate::DeviceError::OutOfMemory)?;
+ #[cfg(not(target_arch = "wasm32"))]
+ if gl.supports_debug() {
+ use std::fmt::Write;
+
+ if let Some(label) = desc.label {
+ temp_string.clear();
+ let _ = write!(temp_string, "{label}[{i}]");
+ let name = unsafe { mem::transmute(query) };
+ unsafe { gl.object_label(glow::QUERY, name, Some(&temp_string)) };
+ }
+ }
+ queries.push(query);
+ }
+
+ Ok(super::QuerySet {
+ queries: queries.into_boxed_slice(),
+ target: match desc.ty {
+ wgt::QueryType::Occlusion => glow::ANY_SAMPLES_PASSED,
+ _ => unimplemented!(),
+ },
+ })
+ }
+ unsafe fn destroy_query_set(&self, set: super::QuerySet) {
+ let gl = &self.shared.context.lock();
+ for &query in set.queries.iter() {
+ unsafe { gl.delete_query(query) };
+ }
+ }
+ unsafe fn create_fence(&self) -> Result<super::Fence, crate::DeviceError> {
+ Ok(super::Fence {
+ last_completed: 0,
+ pending: Vec::new(),
+ })
+ }
+ unsafe fn destroy_fence(&self, fence: super::Fence) {
+ let gl = &self.shared.context.lock();
+ for (_, sync) in fence.pending {
+ unsafe { gl.delete_sync(sync) };
+ }
+ }
+ unsafe fn get_fence_value(
+ &self,
+ fence: &super::Fence,
+ ) -> Result<crate::FenceValue, crate::DeviceError> {
+ #[cfg_attr(target_arch = "wasm32", allow(clippy::needless_borrow))]
+ Ok(fence.get_latest(&self.shared.context.lock()))
+ }
+ unsafe fn wait(
+ &self,
+ fence: &super::Fence,
+ wait_value: crate::FenceValue,
+ timeout_ms: u32,
+ ) -> Result<bool, crate::DeviceError> {
+ if fence.last_completed < wait_value {
+ let gl = &self.shared.context.lock();
+ let timeout_ns = if cfg!(target_arch = "wasm32") {
+ 0
+ } else {
+ (timeout_ms as u64 * 1_000_000).min(!0u32 as u64)
+ };
+ let &(_, sync) = fence
+ .pending
+ .iter()
+ .find(|&&(value, _)| value >= wait_value)
+ .unwrap();
+ match unsafe {
+ gl.client_wait_sync(sync, glow::SYNC_FLUSH_COMMANDS_BIT, timeout_ns as i32)
+ } {
+ // for some reason firefox returns WAIT_FAILED, to investigate
+ #[cfg(target_arch = "wasm32")]
+ glow::WAIT_FAILED => {
+ log::warn!("wait failed!");
+ Ok(false)
+ }
+ glow::TIMEOUT_EXPIRED => Ok(false),
+ glow::CONDITION_SATISFIED | glow::ALREADY_SIGNALED => Ok(true),
+ _ => Err(crate::DeviceError::Lost),
+ }
+ } else {
+ Ok(true)
+ }
+ }
+
+ unsafe fn start_capture(&self) -> bool {
+ #[cfg(all(not(target_arch = "wasm32"), feature = "renderdoc"))]
+ return unsafe {
+ self.render_doc
+ .start_frame_capture(self.shared.context.raw_context(), ptr::null_mut())
+ };
+ #[allow(unreachable_code)]
+ false
+ }
+ unsafe fn stop_capture(&self) {
+ #[cfg(all(not(target_arch = "wasm32"), feature = "renderdoc"))]
+ unsafe {
+ self.render_doc
+ .end_frame_capture(ptr::null_mut(), ptr::null_mut())
+ }
+ }
+}
+
+// SAFE: WASM doesn't have threads
+#[cfg(target_arch = "wasm32")]
+unsafe impl Sync for super::Device {}
+#[cfg(target_arch = "wasm32")]
+unsafe impl Send for super::Device {}
diff --git a/third_party/rust/wgpu-hal/src/gles/egl.rs b/third_party/rust/wgpu-hal/src/gles/egl.rs
new file mode 100644
index 0000000000..13e217f9d9
--- /dev/null
+++ b/third_party/rust/wgpu-hal/src/gles/egl.rs
@@ -0,0 +1,1310 @@
+use glow::HasContext;
+use parking_lot::{Mutex, MutexGuard};
+
+use std::{ffi, os::raw, ptr, sync::Arc, time::Duration};
+
+/// The amount of time to wait while trying to obtain a lock to the adapter context
+const CONTEXT_LOCK_TIMEOUT_SECS: u64 = 1;
+
+const EGL_CONTEXT_FLAGS_KHR: i32 = 0x30FC;
+const EGL_CONTEXT_OPENGL_DEBUG_BIT_KHR: i32 = 0x0001;
+const EGL_CONTEXT_OPENGL_ROBUST_ACCESS_EXT: i32 = 0x30BF;
+const EGL_PLATFORM_WAYLAND_KHR: u32 = 0x31D8;
+const EGL_PLATFORM_X11_KHR: u32 = 0x31D5;
+const EGL_PLATFORM_ANGLE_ANGLE: u32 = 0x3202;
+const EGL_PLATFORM_ANGLE_NATIVE_PLATFORM_TYPE_ANGLE: u32 = 0x348F;
+const EGL_PLATFORM_ANGLE_DEBUG_LAYERS_ENABLED: u32 = 0x3451;
+const EGL_PLATFORM_SURFACELESS_MESA: u32 = 0x31DD;
+const EGL_GL_COLORSPACE_KHR: u32 = 0x309D;
+const EGL_GL_COLORSPACE_SRGB_KHR: u32 = 0x3089;
+
+type XOpenDisplayFun =
+ unsafe extern "system" fn(display_name: *const raw::c_char) -> *mut raw::c_void;
+
+type WlDisplayConnectFun =
+ unsafe extern "system" fn(display_name: *const raw::c_char) -> *mut raw::c_void;
+
+type WlDisplayDisconnectFun = unsafe extern "system" fn(display: *const raw::c_void);
+
+#[cfg(not(target_os = "emscripten"))]
+type EglInstance = khronos_egl::DynamicInstance<khronos_egl::EGL1_4>;
+
+#[cfg(target_os = "emscripten")]
+type EglInstance = khronos_egl::Instance<khronos_egl::Static>;
+
+type WlEglWindowCreateFun = unsafe extern "system" fn(
+ surface: *const raw::c_void,
+ width: raw::c_int,
+ height: raw::c_int,
+) -> *mut raw::c_void;
+
+type WlEglWindowResizeFun = unsafe extern "system" fn(
+ window: *const raw::c_void,
+ width: raw::c_int,
+ height: raw::c_int,
+ dx: raw::c_int,
+ dy: raw::c_int,
+);
+
+type WlEglWindowDestroyFun = unsafe extern "system" fn(window: *const raw::c_void);
+
+#[cfg(target_os = "android")]
+extern "C" {
+ pub fn ANativeWindow_setBuffersGeometry(
+ window: *mut raw::c_void,
+ width: i32,
+ height: i32,
+ format: i32,
+ ) -> i32;
+}
+
+type EglLabel = *const raw::c_void;
+
+#[allow(clippy::upper_case_acronyms)]
+type EGLDEBUGPROCKHR = Option<
+ unsafe extern "system" fn(
+ error: khronos_egl::Enum,
+ command: *const raw::c_char,
+ message_type: u32,
+ thread_label: EglLabel,
+ object_label: EglLabel,
+ message: *const raw::c_char,
+ ),
+>;
+
+const EGL_DEBUG_MSG_CRITICAL_KHR: u32 = 0x33B9;
+const EGL_DEBUG_MSG_ERROR_KHR: u32 = 0x33BA;
+const EGL_DEBUG_MSG_WARN_KHR: u32 = 0x33BB;
+const EGL_DEBUG_MSG_INFO_KHR: u32 = 0x33BC;
+
+type EglDebugMessageControlFun = unsafe extern "system" fn(
+ proc: EGLDEBUGPROCKHR,
+ attrib_list: *const khronos_egl::Attrib,
+) -> raw::c_int;
+
+unsafe extern "system" fn egl_debug_proc(
+ error: khronos_egl::Enum,
+ command_raw: *const raw::c_char,
+ message_type: u32,
+ _thread_label: EglLabel,
+ _object_label: EglLabel,
+ message_raw: *const raw::c_char,
+) {
+ let log_severity = match message_type {
+ EGL_DEBUG_MSG_CRITICAL_KHR | EGL_DEBUG_MSG_ERROR_KHR => log::Level::Error,
+ EGL_DEBUG_MSG_WARN_KHR => log::Level::Warn,
+ EGL_DEBUG_MSG_INFO_KHR => log::Level::Info,
+ _ => log::Level::Debug,
+ };
+ let command = unsafe { ffi::CStr::from_ptr(command_raw) }.to_string_lossy();
+ let message = if message_raw.is_null() {
+ "".into()
+ } else {
+ unsafe { ffi::CStr::from_ptr(message_raw) }.to_string_lossy()
+ };
+
+ log::log!(
+ log_severity,
+ "EGL '{}' code 0x{:x}: {}",
+ command,
+ error,
+ message,
+ );
+}
+
+fn open_x_display() -> Option<(ptr::NonNull<raw::c_void>, libloading::Library)> {
+ log::info!("Loading X11 library to get the current display");
+ unsafe {
+ let library = libloading::Library::new("libX11.so").ok()?;
+ let func: libloading::Symbol<XOpenDisplayFun> = library.get(b"XOpenDisplay").unwrap();
+ let result = func(ptr::null());
+ ptr::NonNull::new(result).map(|ptr| (ptr, library))
+ }
+}
+
+unsafe fn find_library(paths: &[&str]) -> Option<libloading::Library> {
+ for path in paths {
+ match unsafe { libloading::Library::new(path) } {
+ Ok(lib) => return Some(lib),
+ _ => continue,
+ };
+ }
+ None
+}
+
+fn test_wayland_display() -> Option<libloading::Library> {
+ /* We try to connect and disconnect here to simply ensure there
+ * is an active wayland display available.
+ */
+ log::info!("Loading Wayland library to get the current display");
+ let library = unsafe {
+ let client_library = find_library(&["libwayland-client.so.0", "libwayland-client.so"])?;
+ let wl_display_connect: libloading::Symbol<WlDisplayConnectFun> =
+ client_library.get(b"wl_display_connect").unwrap();
+ let wl_display_disconnect: libloading::Symbol<WlDisplayDisconnectFun> =
+ client_library.get(b"wl_display_disconnect").unwrap();
+ let display = ptr::NonNull::new(wl_display_connect(ptr::null()))?;
+ wl_display_disconnect(display.as_ptr());
+ find_library(&["libwayland-egl.so.1", "libwayland-egl.so"])?
+ };
+ Some(library)
+}
+
+#[derive(Clone, Copy, Debug)]
+enum SrgbFrameBufferKind {
+ /// No support for SRGB surface
+ None,
+ /// Using EGL 1.5's support for colorspaces
+ Core,
+ /// Using EGL_KHR_gl_colorspace
+ Khr,
+}
+
+/// Choose GLES framebuffer configuration.
+fn choose_config(
+ egl: &EglInstance,
+ display: khronos_egl::Display,
+ srgb_kind: SrgbFrameBufferKind,
+) -> Result<(khronos_egl::Config, bool), crate::InstanceError> {
+ //TODO: EGL_SLOW_CONFIG
+ let tiers = [
+ (
+ "off-screen",
+ &[
+ khronos_egl::SURFACE_TYPE,
+ khronos_egl::PBUFFER_BIT,
+ khronos_egl::RENDERABLE_TYPE,
+ khronos_egl::OPENGL_ES2_BIT,
+ ][..],
+ ),
+ (
+ "presentation",
+ &[khronos_egl::SURFACE_TYPE, khronos_egl::WINDOW_BIT][..],
+ ),
+ #[cfg(not(target_os = "android"))]
+ (
+ "native-render",
+ &[khronos_egl::NATIVE_RENDERABLE, khronos_egl::TRUE as _][..],
+ ),
+ ];
+
+ let mut attributes = Vec::with_capacity(9);
+ for tier_max in (0..tiers.len()).rev() {
+ let name = tiers[tier_max].0;
+ log::info!("\tTrying {}", name);
+
+ attributes.clear();
+ for &(_, tier_attr) in tiers[..=tier_max].iter() {
+ attributes.extend_from_slice(tier_attr);
+ }
+ // make sure the Alpha is enough to support sRGB
+ match srgb_kind {
+ SrgbFrameBufferKind::None => {}
+ _ => {
+ attributes.push(khronos_egl::ALPHA_SIZE);
+ attributes.push(8);
+ }
+ }
+ attributes.push(khronos_egl::NONE);
+
+ match egl.choose_first_config(display, &attributes) {
+ Ok(Some(config)) => {
+ if tier_max == 1 {
+ //Note: this has been confirmed to malfunction on Intel+NV laptops,
+ // but also on Angle.
+ log::warn!("EGL says it can present to the window but not natively",);
+ }
+ // Android emulator can't natively present either.
+ let tier_threshold = if cfg!(target_os = "android") || cfg!(windows) {
+ 1
+ } else {
+ 2
+ };
+ return Ok((config, tier_max >= tier_threshold));
+ }
+ Ok(None) => {
+ log::warn!("No config found!");
+ }
+ Err(e) => {
+ log::error!("error in choose_first_config: {:?}", e);
+ }
+ }
+ }
+
+ Err(crate::InstanceError)
+}
+
+fn gl_debug_message_callback(source: u32, gltype: u32, id: u32, severity: u32, message: &str) {
+ let source_str = match source {
+ glow::DEBUG_SOURCE_API => "API",
+ glow::DEBUG_SOURCE_WINDOW_SYSTEM => "Window System",
+ glow::DEBUG_SOURCE_SHADER_COMPILER => "ShaderCompiler",
+ glow::DEBUG_SOURCE_THIRD_PARTY => "Third Party",
+ glow::DEBUG_SOURCE_APPLICATION => "Application",
+ glow::DEBUG_SOURCE_OTHER => "Other",
+ _ => unreachable!(),
+ };
+
+ let log_severity = match severity {
+ glow::DEBUG_SEVERITY_HIGH => log::Level::Error,
+ glow::DEBUG_SEVERITY_MEDIUM => log::Level::Warn,
+ glow::DEBUG_SEVERITY_LOW => log::Level::Info,
+ glow::DEBUG_SEVERITY_NOTIFICATION => log::Level::Trace,
+ _ => unreachable!(),
+ };
+
+ let type_str = match gltype {
+ glow::DEBUG_TYPE_DEPRECATED_BEHAVIOR => "Deprecated Behavior",
+ glow::DEBUG_TYPE_ERROR => "Error",
+ glow::DEBUG_TYPE_MARKER => "Marker",
+ glow::DEBUG_TYPE_OTHER => "Other",
+ glow::DEBUG_TYPE_PERFORMANCE => "Performance",
+ glow::DEBUG_TYPE_POP_GROUP => "Pop Group",
+ glow::DEBUG_TYPE_PORTABILITY => "Portability",
+ glow::DEBUG_TYPE_PUSH_GROUP => "Push Group",
+ glow::DEBUG_TYPE_UNDEFINED_BEHAVIOR => "Undefined Behavior",
+ _ => unreachable!(),
+ };
+
+ let _ = std::panic::catch_unwind(|| {
+ log::log!(
+ log_severity,
+ "GLES: [{}/{}] ID {} : {}",
+ source_str,
+ type_str,
+ id,
+ message
+ );
+ });
+
+ if cfg!(debug_assertions) && log_severity == log::Level::Error {
+ // Set canary and continue
+ crate::VALIDATION_CANARY.set();
+ }
+}
+
+#[derive(Clone, Debug)]
+struct EglContext {
+ instance: Arc<EglInstance>,
+ version: (i32, i32),
+ display: khronos_egl::Display,
+ raw: khronos_egl::Context,
+ pbuffer: Option<khronos_egl::Surface>,
+}
+
+impl EglContext {
+ fn make_current(&self) {
+ self.instance
+ .make_current(self.display, self.pbuffer, self.pbuffer, Some(self.raw))
+ .unwrap();
+ }
+ fn unmake_current(&self) {
+ self.instance
+ .make_current(self.display, None, None, None)
+ .unwrap();
+ }
+}
+
+/// A wrapper around a [`glow::Context`] and the required EGL context that uses locking to guarantee
+/// exclusive access when shared with multiple threads.
+pub struct AdapterContext {
+ glow: Mutex<glow::Context>,
+ egl: Option<EglContext>,
+}
+
+unsafe impl Sync for AdapterContext {}
+unsafe impl Send for AdapterContext {}
+
+impl AdapterContext {
+ pub fn is_owned(&self) -> bool {
+ self.egl.is_some()
+ }
+
+ /// Returns the EGL instance.
+ ///
+ /// This provides access to EGL functions and the ability to load GL and EGL extension functions.
+ pub fn egl_instance(&self) -> Option<&EglInstance> {
+ self.egl.as_ref().map(|egl| &*egl.instance)
+ }
+
+ /// Returns the EGLDisplay corresponding to the adapter context.
+ ///
+ /// Returns [`None`] if the adapter was externally created.
+ pub fn raw_display(&self) -> Option<&khronos_egl::Display> {
+ self.egl.as_ref().map(|egl| &egl.display)
+ }
+
+ /// Returns the EGL version the adapter context was created with.
+ ///
+ /// Returns [`None`] if the adapter was externally created.
+ pub fn egl_version(&self) -> Option<(i32, i32)> {
+ self.egl.as_ref().map(|egl| egl.version)
+ }
+
+ pub fn raw_context(&self) -> *mut raw::c_void {
+ match self.egl {
+ Some(ref egl) => egl.raw.as_ptr(),
+ None => ptr::null_mut(),
+ }
+ }
+}
+
+struct EglContextLock<'a> {
+ instance: &'a Arc<EglInstance>,
+ display: khronos_egl::Display,
+}
+
+/// A guard containing a lock to an [`AdapterContext`]
+pub struct AdapterContextLock<'a> {
+ glow: MutexGuard<'a, glow::Context>,
+ egl: Option<EglContextLock<'a>>,
+}
+
+impl<'a> std::ops::Deref for AdapterContextLock<'a> {
+ type Target = glow::Context;
+
+ fn deref(&self) -> &Self::Target {
+ &self.glow
+ }
+}
+
+impl<'a> Drop for AdapterContextLock<'a> {
+ fn drop(&mut self) {
+ if let Some(egl) = self.egl.take() {
+ egl.instance
+ .make_current(egl.display, None, None, None)
+ .unwrap();
+ }
+ }
+}
+
+impl AdapterContext {
+ /// Get's the [`glow::Context`] without waiting for a lock
+ ///
+ /// # Safety
+ ///
+ /// This should only be called when you have manually made sure that the current thread has made
+ /// the EGL context current and that no other thread also has the EGL context current.
+ /// Additionally, you must manually make the EGL context **not** current after you are done with
+ /// it, so that future calls to `lock()` will not fail.
+ ///
+ /// > **Note:** Calling this function **will** still lock the [`glow::Context`] which adds an
+ /// > extra safe-guard against accidental concurrent access to the context.
+ pub unsafe fn get_without_egl_lock(&self) -> MutexGuard<glow::Context> {
+ self.glow
+ .try_lock_for(Duration::from_secs(CONTEXT_LOCK_TIMEOUT_SECS))
+ .expect("Could not lock adapter context. This is most-likely a deadlock.")
+ }
+
+ /// Obtain a lock to the EGL context and get handle to the [`glow::Context`] that can be used to
+ /// do rendering.
+ #[track_caller]
+ pub fn lock<'a>(&'a self) -> AdapterContextLock<'a> {
+ let glow = self
+ .glow
+ // Don't lock forever. If it takes longer than 1 second to get the lock we've got a
+ // deadlock and should panic to show where we got stuck
+ .try_lock_for(Duration::from_secs(CONTEXT_LOCK_TIMEOUT_SECS))
+ .expect("Could not lock adapter context. This is most-likely a deadlock.");
+
+ let egl = self.egl.as_ref().map(|egl| {
+ egl.make_current();
+ EglContextLock {
+ instance: &egl.instance,
+ display: egl.display,
+ }
+ });
+
+ AdapterContextLock { glow, egl }
+ }
+}
+
+#[derive(Debug)]
+struct Inner {
+ /// Note: the context contains a dummy pbuffer (1x1).
+ /// Required for `eglMakeCurrent` on platforms that doesn't supports `EGL_KHR_surfaceless_context`.
+ egl: EglContext,
+ #[allow(unused)]
+ version: (i32, i32),
+ supports_native_window: bool,
+ config: khronos_egl::Config,
+ #[cfg_attr(target_os = "emscripten", allow(dead_code))]
+ wl_display: Option<*mut raw::c_void>,
+ /// Method by which the framebuffer should support srgb
+ srgb_kind: SrgbFrameBufferKind,
+}
+
+impl Inner {
+ fn create(
+ flags: crate::InstanceFlags,
+ egl: Arc<EglInstance>,
+ display: khronos_egl::Display,
+ ) -> Result<Self, crate::InstanceError> {
+ let version = egl.initialize(display).map_err(|_| crate::InstanceError)?;
+ let vendor = egl
+ .query_string(Some(display), khronos_egl::VENDOR)
+ .unwrap();
+ let display_extensions = egl
+ .query_string(Some(display), khronos_egl::EXTENSIONS)
+ .unwrap()
+ .to_string_lossy();
+ log::info!("Display vendor {:?}, version {:?}", vendor, version,);
+ log::debug!(
+ "Display extensions: {:#?}",
+ display_extensions.split_whitespace().collect::<Vec<_>>()
+ );
+
+ let srgb_kind = if version >= (1, 5) {
+ log::info!("\tEGL surface: +srgb");
+ SrgbFrameBufferKind::Core
+ } else if display_extensions.contains("EGL_KHR_gl_colorspace") {
+ log::info!("\tEGL surface: +srgb khr");
+ SrgbFrameBufferKind::Khr
+ } else {
+ log::warn!("\tEGL surface: -srgb");
+ SrgbFrameBufferKind::None
+ };
+
+ if log::max_level() >= log::LevelFilter::Trace {
+ log::trace!("Configurations:");
+ let config_count = egl.get_config_count(display).unwrap();
+ let mut configurations = Vec::with_capacity(config_count);
+ egl.get_configs(display, &mut configurations).unwrap();
+ for &config in configurations.iter() {
+ log::trace!("\tCONFORMANT=0x{:X}, RENDERABLE=0x{:X}, NATIVE_RENDERABLE=0x{:X}, SURFACE_TYPE=0x{:X}, ALPHA_SIZE={}",
+ egl.get_config_attrib(display, config, khronos_egl::CONFORMANT).unwrap(),
+ egl.get_config_attrib(display, config, khronos_egl::RENDERABLE_TYPE).unwrap(),
+ egl.get_config_attrib(display, config, khronos_egl::NATIVE_RENDERABLE).unwrap(),
+ egl.get_config_attrib(display, config, khronos_egl::SURFACE_TYPE).unwrap(),
+ egl.get_config_attrib(display, config, khronos_egl::ALPHA_SIZE).unwrap(),
+ );
+ }
+ }
+
+ let (config, supports_native_window) = choose_config(&egl, display, srgb_kind)?;
+ egl.bind_api(khronos_egl::OPENGL_ES_API).unwrap();
+
+ let needs_robustness = true;
+ let mut khr_context_flags = 0;
+ let supports_khr_context = display_extensions.contains("EGL_KHR_create_context");
+
+ //TODO: make it so `Device` == EGL Context
+ let mut context_attributes = vec![
+ khronos_egl::CONTEXT_CLIENT_VERSION,
+ 3, // Request GLES 3.0 or higher
+ ];
+ if flags.contains(crate::InstanceFlags::DEBUG) {
+ if version >= (1, 5) {
+ log::info!("\tEGL context: +debug");
+ context_attributes.push(khronos_egl::CONTEXT_OPENGL_DEBUG);
+ context_attributes.push(khronos_egl::TRUE as _);
+ } else if supports_khr_context {
+ log::info!("\tEGL context: +debug KHR");
+ khr_context_flags |= EGL_CONTEXT_OPENGL_DEBUG_BIT_KHR;
+ } else {
+ log::info!("\tEGL context: -debug");
+ }
+ }
+ if needs_robustness {
+ //Note: the core version can fail if robustness is not supported
+ // (regardless of whether the extension is supported!).
+ // In fact, Angle does precisely that awful behavior, so we don't try it there.
+ if version >= (1, 5) && !display_extensions.contains("EGL_ANGLE_") {
+ log::info!("\tEGL context: +robust access");
+ context_attributes.push(khronos_egl::CONTEXT_OPENGL_ROBUST_ACCESS);
+ context_attributes.push(khronos_egl::TRUE as _);
+ } else if display_extensions.contains("EGL_EXT_create_context_robustness") {
+ log::info!("\tEGL context: +robust access EXT");
+ context_attributes.push(EGL_CONTEXT_OPENGL_ROBUST_ACCESS_EXT);
+ context_attributes.push(khronos_egl::TRUE as _);
+ } else {
+ //Note: we aren't trying `EGL_CONTEXT_OPENGL_ROBUST_ACCESS_BIT_KHR`
+ // because it's for desktop GL only, not GLES.
+ log::warn!("\tEGL context: -robust access");
+ }
+
+ //TODO do we need `khronos_egl::CONTEXT_OPENGL_NOTIFICATION_STRATEGY_EXT`?
+ }
+ if khr_context_flags != 0 {
+ context_attributes.push(EGL_CONTEXT_FLAGS_KHR);
+ context_attributes.push(khr_context_flags);
+ }
+ context_attributes.push(khronos_egl::NONE);
+ let context = match egl.create_context(display, config, None, &context_attributes) {
+ Ok(context) => context,
+ Err(e) => {
+ log::warn!("unable to create GLES 3.x context: {:?}", e);
+ return Err(crate::InstanceError);
+ }
+ };
+
+ // Testing if context can be binded without surface
+ // and creating dummy pbuffer surface if not.
+ let pbuffer = if version >= (1, 5)
+ || display_extensions.contains("EGL_KHR_surfaceless_context")
+ || cfg!(target_os = "emscripten")
+ {
+ log::info!("\tEGL context: +surfaceless");
+ None
+ } else {
+ let attributes = [
+ khronos_egl::WIDTH,
+ 1,
+ khronos_egl::HEIGHT,
+ 1,
+ khronos_egl::NONE,
+ ];
+ egl.create_pbuffer_surface(display, config, &attributes)
+ .map(Some)
+ .map_err(|e| {
+ log::warn!("Error in create_pbuffer_surface: {:?}", e);
+ crate::InstanceError
+ })?
+ };
+
+ Ok(Self {
+ egl: EglContext {
+ instance: egl,
+ display,
+ raw: context,
+ pbuffer,
+ version,
+ },
+ version,
+ supports_native_window,
+ config,
+ wl_display: None,
+ srgb_kind,
+ })
+ }
+}
+
+impl Drop for Inner {
+ fn drop(&mut self) {
+ if let Err(e) = self
+ .egl
+ .instance
+ .destroy_context(self.egl.display, self.egl.raw)
+ {
+ log::warn!("Error in destroy_context: {:?}", e);
+ }
+ if let Err(e) = self.egl.instance.terminate(self.egl.display) {
+ log::warn!("Error in terminate: {:?}", e);
+ }
+ }
+}
+
+#[derive(Clone, Copy, Debug, PartialEq)]
+enum WindowKind {
+ Wayland,
+ X11,
+ AngleX11,
+ Unknown,
+}
+
+#[derive(Clone, Debug)]
+struct WindowSystemInterface {
+ library: Option<Arc<libloading::Library>>,
+ kind: WindowKind,
+}
+
+pub struct Instance {
+ wsi: WindowSystemInterface,
+ flags: crate::InstanceFlags,
+ inner: Mutex<Inner>,
+}
+
+impl Instance {
+ pub fn raw_display(&self) -> khronos_egl::Display {
+ self.inner
+ .try_lock()
+ .expect("Could not lock instance. This is most-likely a deadlock.")
+ .egl
+ .display
+ }
+
+ /// Returns the version of the EGL display.
+ pub fn egl_version(&self) -> (i32, i32) {
+ self.inner
+ .try_lock()
+ .expect("Could not lock instance. This is most-likely a deadlock.")
+ .version
+ }
+}
+
+unsafe impl Send for Instance {}
+unsafe impl Sync for Instance {}
+
+impl crate::Instance<super::Api> for Instance {
+ unsafe fn init(desc: &crate::InstanceDescriptor) -> Result<Self, crate::InstanceError> {
+ #[cfg(target_os = "emscripten")]
+ let egl_result: Result<EglInstance, khronos_egl::Error> =
+ Ok(khronos_egl::Instance::new(khronos_egl::Static));
+
+ #[cfg(not(target_os = "emscripten"))]
+ let egl_result = if cfg!(windows) {
+ unsafe {
+ khronos_egl::DynamicInstance::<khronos_egl::EGL1_4>::load_required_from_filename(
+ "libEGL.dll",
+ )
+ }
+ } else if cfg!(any(target_os = "macos", target_os = "ios")) {
+ unsafe {
+ khronos_egl::DynamicInstance::<khronos_egl::EGL1_4>::load_required_from_filename(
+ "libEGL.dylib",
+ )
+ }
+ } else {
+ unsafe { khronos_egl::DynamicInstance::<khronos_egl::EGL1_4>::load_required() }
+ };
+ let egl = match egl_result {
+ Ok(egl) => Arc::new(egl),
+ Err(e) => {
+ log::info!("Unable to open libEGL: {:?}", e);
+ return Err(crate::InstanceError);
+ }
+ };
+
+ let client_extensions = egl.query_string(None, khronos_egl::EXTENSIONS);
+
+ let client_ext_str = match client_extensions {
+ Ok(ext) => ext.to_string_lossy().into_owned(),
+ Err(_) => String::new(),
+ };
+ log::debug!(
+ "Client extensions: {:#?}",
+ client_ext_str.split_whitespace().collect::<Vec<_>>()
+ );
+
+ let wayland_library = if client_ext_str.contains("EGL_EXT_platform_wayland") {
+ test_wayland_display()
+ } else {
+ None
+ };
+ let x11_display_library = if client_ext_str.contains("EGL_EXT_platform_x11") {
+ open_x_display()
+ } else {
+ None
+ };
+ let angle_x11_display_library = if client_ext_str.contains("EGL_ANGLE_platform_angle") {
+ open_x_display()
+ } else {
+ None
+ };
+
+ #[cfg(not(target_os = "emscripten"))]
+ let egl1_5 = egl.upcast::<khronos_egl::EGL1_5>();
+
+ #[cfg(target_os = "emscripten")]
+ let egl1_5: Option<&Arc<EglInstance>> = Some(&egl);
+
+ let (display, wsi_library, wsi_kind) = if let (Some(library), Some(egl)) =
+ (wayland_library, egl1_5)
+ {
+ log::info!("Using Wayland platform");
+ let display_attributes = [khronos_egl::ATTRIB_NONE];
+ let display = egl
+ .get_platform_display(
+ EGL_PLATFORM_WAYLAND_KHR,
+ khronos_egl::DEFAULT_DISPLAY,
+ &display_attributes,
+ )
+ .unwrap();
+ (display, Some(Arc::new(library)), WindowKind::Wayland)
+ } else if let (Some((display, library)), Some(egl)) = (x11_display_library, egl1_5) {
+ log::info!("Using X11 platform");
+ let display_attributes = [khronos_egl::ATTRIB_NONE];
+ let display = egl
+ .get_platform_display(EGL_PLATFORM_X11_KHR, display.as_ptr(), &display_attributes)
+ .unwrap();
+ (display, Some(Arc::new(library)), WindowKind::X11)
+ } else if let (Some((display, library)), Some(egl)) = (angle_x11_display_library, egl1_5) {
+ log::info!("Using Angle platform with X11");
+ let display_attributes = [
+ EGL_PLATFORM_ANGLE_NATIVE_PLATFORM_TYPE_ANGLE as khronos_egl::Attrib,
+ EGL_PLATFORM_X11_KHR as khronos_egl::Attrib,
+ EGL_PLATFORM_ANGLE_DEBUG_LAYERS_ENABLED as khronos_egl::Attrib,
+ usize::from(desc.flags.contains(crate::InstanceFlags::VALIDATION)),
+ khronos_egl::ATTRIB_NONE,
+ ];
+ let display = egl
+ .get_platform_display(
+ EGL_PLATFORM_ANGLE_ANGLE,
+ display.as_ptr(),
+ &display_attributes,
+ )
+ .unwrap();
+ (display, Some(Arc::new(library)), WindowKind::AngleX11)
+ } else if client_ext_str.contains("EGL_MESA_platform_surfaceless") {
+ log::info!("No windowing system present. Using surfaceless platform");
+ let egl = egl1_5.expect("Failed to get EGL 1.5 for surfaceless");
+ let display = egl
+ .get_platform_display(
+ EGL_PLATFORM_SURFACELESS_MESA,
+ std::ptr::null_mut(),
+ &[khronos_egl::ATTRIB_NONE],
+ )
+ .unwrap();
+ (display, None, WindowKind::Unknown)
+ } else {
+ log::info!("EGL_MESA_platform_surfaceless not available. Using default platform");
+ let display = egl.get_display(khronos_egl::DEFAULT_DISPLAY).unwrap();
+ (display, None, WindowKind::Unknown)
+ };
+
+ if desc.flags.contains(crate::InstanceFlags::VALIDATION)
+ && client_ext_str.contains("EGL_KHR_debug")
+ {
+ log::info!("Enabling EGL debug output");
+ let function: EglDebugMessageControlFun = {
+ let addr = egl.get_proc_address("eglDebugMessageControlKHR").unwrap();
+ unsafe { std::mem::transmute(addr) }
+ };
+ let attributes = [
+ EGL_DEBUG_MSG_CRITICAL_KHR as khronos_egl::Attrib,
+ 1,
+ EGL_DEBUG_MSG_ERROR_KHR as khronos_egl::Attrib,
+ 1,
+ EGL_DEBUG_MSG_WARN_KHR as khronos_egl::Attrib,
+ 1,
+ EGL_DEBUG_MSG_INFO_KHR as khronos_egl::Attrib,
+ 1,
+ khronos_egl::ATTRIB_NONE,
+ ];
+ unsafe { (function)(Some(egl_debug_proc), attributes.as_ptr()) };
+ }
+
+ let inner = Inner::create(desc.flags, egl, display)?;
+
+ Ok(Instance {
+ wsi: WindowSystemInterface {
+ library: wsi_library,
+ kind: wsi_kind,
+ },
+ flags: desc.flags,
+ inner: Mutex::new(inner),
+ })
+ }
+
+ #[cfg_attr(target_os = "macos", allow(unused, unused_mut, unreachable_code))]
+ unsafe fn create_surface(
+ &self,
+ display_handle: raw_window_handle::RawDisplayHandle,
+ window_handle: raw_window_handle::RawWindowHandle,
+ ) -> Result<Surface, crate::InstanceError> {
+ use raw_window_handle::RawWindowHandle as Rwh;
+
+ #[cfg_attr(
+ any(target_os = "android", target_os = "emscripten"),
+ allow(unused_mut)
+ )]
+ let mut inner = self.inner.lock();
+
+ match (window_handle, display_handle) {
+ (Rwh::Xlib(_), _) => {}
+ (Rwh::Xcb(_), _) => {}
+ (Rwh::Win32(_), _) => {}
+ (Rwh::AppKit(_), _) => {}
+ #[cfg(target_os = "android")]
+ (Rwh::AndroidNdk(handle), _) => {
+ let format = inner
+ .egl
+ .instance
+ .get_config_attrib(
+ inner.egl.display,
+ inner.config,
+ khronos_egl::NATIVE_VISUAL_ID,
+ )
+ .unwrap();
+
+ let ret = unsafe {
+ ANativeWindow_setBuffersGeometry(handle.a_native_window, 0, 0, format)
+ };
+
+ if ret != 0 {
+ log::error!("Error returned from ANativeWindow_setBuffersGeometry");
+ return Err(crate::InstanceError);
+ }
+ }
+ #[cfg(not(target_os = "emscripten"))]
+ (Rwh::Wayland(_), raw_window_handle::RawDisplayHandle::Wayland(display_handle)) => {
+ if inner
+ .wl_display
+ .map(|ptr| ptr != display_handle.display)
+ .unwrap_or(true)
+ {
+ /* Wayland displays are not sharable between surfaces so if the
+ * surface we receive from this handle is from a different
+ * display, we must re-initialize the context.
+ *
+ * See gfx-rs/gfx#3545
+ */
+ log::warn!("Re-initializing Gles context due to Wayland window");
+
+ use std::ops::DerefMut;
+ let display_attributes = [khronos_egl::ATTRIB_NONE];
+
+ let display = inner
+ .egl
+ .instance
+ .upcast::<khronos_egl::EGL1_5>()
+ .unwrap()
+ .get_platform_display(
+ EGL_PLATFORM_WAYLAND_KHR,
+ display_handle.display,
+ &display_attributes,
+ )
+ .unwrap();
+
+ let new_inner =
+ Inner::create(self.flags, Arc::clone(&inner.egl.instance), display)
+ .map_err(|_| crate::InstanceError)?;
+
+ let old_inner = std::mem::replace(inner.deref_mut(), new_inner);
+ inner.wl_display = Some(display_handle.display);
+
+ drop(old_inner);
+ }
+ }
+ #[cfg(target_os = "emscripten")]
+ (Rwh::Web(_), _) => {}
+ other => {
+ log::error!("Unsupported window: {:?}", other);
+ return Err(crate::InstanceError);
+ }
+ };
+
+ inner.egl.unmake_current();
+
+ Ok(Surface {
+ egl: inner.egl.clone(),
+ wsi: self.wsi.clone(),
+ config: inner.config,
+ presentable: inner.supports_native_window,
+ raw_window_handle: window_handle,
+ swapchain: None,
+ srgb_kind: inner.srgb_kind,
+ })
+ }
+ unsafe fn destroy_surface(&self, _surface: Surface) {}
+
+ unsafe fn enumerate_adapters(&self) -> Vec<crate::ExposedAdapter<super::Api>> {
+ let inner = self.inner.lock();
+ inner.egl.make_current();
+
+ let gl = unsafe {
+ glow::Context::from_loader_function(|name| {
+ inner
+ .egl
+ .instance
+ .get_proc_address(name)
+ .map_or(ptr::null(), |p| p as *const _)
+ })
+ };
+
+ if self.flags.contains(crate::InstanceFlags::DEBUG) && gl.supports_debug() {
+ log::info!("Max label length: {}", unsafe {
+ gl.get_parameter_i32(glow::MAX_LABEL_LENGTH)
+ });
+ }
+
+ if self.flags.contains(crate::InstanceFlags::VALIDATION) && gl.supports_debug() {
+ log::info!("Enabling GLES debug output");
+ unsafe { gl.enable(glow::DEBUG_OUTPUT) };
+ unsafe { gl.debug_message_callback(gl_debug_message_callback) };
+ }
+
+ inner.egl.unmake_current();
+
+ unsafe {
+ super::Adapter::expose(AdapterContext {
+ glow: Mutex::new(gl),
+ egl: Some(inner.egl.clone()),
+ })
+ }
+ .into_iter()
+ .collect()
+ }
+}
+
+impl super::Adapter {
+ /// Creates a new external adapter using the specified loader function.
+ ///
+ /// # Safety
+ ///
+ /// - The underlying OpenGL ES context must be current.
+ /// - The underlying OpenGL ES context must be current when interfacing with any objects returned by
+ /// wgpu-hal from this adapter.
+ pub unsafe fn new_external(
+ fun: impl FnMut(&str) -> *const ffi::c_void,
+ ) -> Option<crate::ExposedAdapter<super::Api>> {
+ let context = unsafe { glow::Context::from_loader_function(fun) };
+ unsafe {
+ Self::expose(AdapterContext {
+ glow: Mutex::new(context),
+ egl: None,
+ })
+ }
+ }
+
+ pub fn adapter_context(&self) -> &AdapterContext {
+ &self.shared.context
+ }
+}
+
+impl super::Device {
+ /// Returns the underlying EGL context.
+ pub fn context(&self) -> &AdapterContext {
+ &self.shared.context
+ }
+}
+
+#[derive(Debug)]
+pub struct Swapchain {
+ surface: khronos_egl::Surface,
+ wl_window: Option<*mut raw::c_void>,
+ framebuffer: glow::Framebuffer,
+ renderbuffer: glow::Renderbuffer,
+ /// Extent because the window lies
+ extent: wgt::Extent3d,
+ format: wgt::TextureFormat,
+ format_desc: super::TextureFormatDesc,
+ #[allow(unused)]
+ sample_type: wgt::TextureSampleType,
+}
+
+#[derive(Debug)]
+pub struct Surface {
+ egl: EglContext,
+ wsi: WindowSystemInterface,
+ config: khronos_egl::Config,
+ pub(super) presentable: bool,
+ raw_window_handle: raw_window_handle::RawWindowHandle,
+ swapchain: Option<Swapchain>,
+ srgb_kind: SrgbFrameBufferKind,
+}
+
+unsafe impl Send for Surface {}
+unsafe impl Sync for Surface {}
+
+impl Surface {
+ pub(super) unsafe fn present(
+ &mut self,
+ _suf_texture: super::Texture,
+ gl: &glow::Context,
+ ) -> Result<(), crate::SurfaceError> {
+ let sc = self.swapchain.as_ref().unwrap();
+
+ self.egl
+ .instance
+ .make_current(
+ self.egl.display,
+ Some(sc.surface),
+ Some(sc.surface),
+ Some(self.egl.raw),
+ )
+ .map_err(|e| {
+ log::error!("make_current(surface) failed: {}", e);
+ crate::SurfaceError::Lost
+ })?;
+
+ unsafe { gl.disable(glow::SCISSOR_TEST) };
+ unsafe { gl.color_mask(true, true, true, true) };
+
+ unsafe { gl.bind_framebuffer(glow::DRAW_FRAMEBUFFER, None) };
+ unsafe { gl.bind_framebuffer(glow::READ_FRAMEBUFFER, Some(sc.framebuffer)) };
+ // Note the Y-flipping here. GL's presentation is not flipped,
+ // but main rendering is. Therefore, we Y-flip the output positions
+ // in the shader, and also this blit.
+ unsafe {
+ gl.blit_framebuffer(
+ 0,
+ sc.extent.height as i32,
+ sc.extent.width as i32,
+ 0,
+ 0,
+ 0,
+ sc.extent.width as i32,
+ sc.extent.height as i32,
+ glow::COLOR_BUFFER_BIT,
+ glow::NEAREST,
+ )
+ };
+ unsafe { gl.bind_framebuffer(glow::READ_FRAMEBUFFER, None) };
+
+ self.egl
+ .instance
+ .swap_buffers(self.egl.display, sc.surface)
+ .map_err(|e| {
+ log::error!("swap_buffers failed: {}", e);
+ crate::SurfaceError::Lost
+ })?;
+ self.egl
+ .instance
+ .make_current(self.egl.display, None, None, None)
+ .map_err(|e| {
+ log::error!("make_current(null) failed: {}", e);
+ crate::SurfaceError::Lost
+ })?;
+
+ Ok(())
+ }
+
+ unsafe fn unconfigure_impl(
+ &mut self,
+ device: &super::Device,
+ ) -> Option<(khronos_egl::Surface, Option<*mut raw::c_void>)> {
+ let gl = &device.shared.context.lock();
+ match self.swapchain.take() {
+ Some(sc) => {
+ unsafe { gl.delete_renderbuffer(sc.renderbuffer) };
+ unsafe { gl.delete_framebuffer(sc.framebuffer) };
+ Some((sc.surface, sc.wl_window))
+ }
+ None => None,
+ }
+ }
+
+ pub fn supports_srgb(&self) -> bool {
+ match self.srgb_kind {
+ SrgbFrameBufferKind::None => false,
+ _ => true,
+ }
+ }
+}
+
+impl crate::Surface<super::Api> for Surface {
+ unsafe fn configure(
+ &mut self,
+ device: &super::Device,
+ config: &crate::SurfaceConfiguration,
+ ) -> Result<(), crate::SurfaceError> {
+ use raw_window_handle::RawWindowHandle as Rwh;
+
+ let (surface, wl_window) = match unsafe { self.unconfigure_impl(device) } {
+ Some(pair) => pair,
+ None => {
+ let mut wl_window = None;
+ let (mut temp_xlib_handle, mut temp_xcb_handle);
+ #[allow(trivial_casts)]
+ let native_window_ptr = match (self.wsi.kind, self.raw_window_handle) {
+ (WindowKind::Unknown | WindowKind::X11, Rwh::Xlib(handle)) => {
+ temp_xlib_handle = handle.window;
+ &mut temp_xlib_handle as *mut _ as *mut std::ffi::c_void
+ }
+ (WindowKind::AngleX11, Rwh::Xlib(handle)) => {
+ handle.window as *mut std::ffi::c_void
+ }
+ (WindowKind::Unknown | WindowKind::X11, Rwh::Xcb(handle)) => {
+ temp_xcb_handle = handle.window;
+ &mut temp_xcb_handle as *mut _ as *mut std::ffi::c_void
+ }
+ (WindowKind::AngleX11, Rwh::Xcb(handle)) => {
+ handle.window as *mut std::ffi::c_void
+ }
+ (WindowKind::Unknown, Rwh::AndroidNdk(handle)) => handle.a_native_window,
+ (WindowKind::Wayland, Rwh::Wayland(handle)) => {
+ let library = self.wsi.library.as_ref().unwrap();
+ let wl_egl_window_create: libloading::Symbol<WlEglWindowCreateFun> =
+ unsafe { library.get(b"wl_egl_window_create") }.unwrap();
+ let window = unsafe { wl_egl_window_create(handle.surface, 640, 480) }
+ as *mut _ as *mut std::ffi::c_void;
+ wl_window = Some(window);
+ window
+ }
+ #[cfg(target_os = "emscripten")]
+ (WindowKind::Unknown, Rwh::Web(handle)) => handle.id as *mut std::ffi::c_void,
+ (WindowKind::Unknown, Rwh::Win32(handle)) => handle.hwnd,
+ (WindowKind::Unknown, Rwh::AppKit(handle)) => {
+ #[cfg(not(target_os = "macos"))]
+ let window_ptr = handle.ns_view;
+ #[cfg(target_os = "macos")]
+ let window_ptr = {
+ use objc::{msg_send, runtime::Object, sel, sel_impl};
+ // ns_view always have a layer and don't need to verify that it exists.
+ let layer: *mut Object =
+ msg_send![handle.ns_view as *mut Object, layer];
+ layer as *mut ffi::c_void
+ };
+ window_ptr
+ }
+ _ => {
+ log::warn!(
+ "Initialized platform {:?} doesn't work with window {:?}",
+ self.wsi.kind,
+ self.raw_window_handle
+ );
+ return Err(crate::SurfaceError::Other("incompatible window kind"));
+ }
+ };
+
+ let mut attributes = vec![
+ khronos_egl::RENDER_BUFFER,
+ // We don't want any of the buffering done by the driver, because we
+ // manage a swapchain on our side.
+ // Some drivers just fail on surface creation seeing `EGL_SINGLE_BUFFER`.
+ if cfg!(any(target_os = "android", target_os = "macos"))
+ || cfg!(windows)
+ || self.wsi.kind == WindowKind::AngleX11
+ {
+ khronos_egl::BACK_BUFFER
+ } else {
+ khronos_egl::SINGLE_BUFFER
+ },
+ ];
+ match self.srgb_kind {
+ SrgbFrameBufferKind::None => {}
+ SrgbFrameBufferKind::Core => {
+ attributes.push(khronos_egl::GL_COLORSPACE);
+ attributes.push(khronos_egl::GL_COLORSPACE_SRGB);
+ }
+ SrgbFrameBufferKind::Khr => {
+ attributes.push(EGL_GL_COLORSPACE_KHR as i32);
+ attributes.push(EGL_GL_COLORSPACE_SRGB_KHR as i32);
+ }
+ }
+ attributes.push(khronos_egl::ATTRIB_NONE as i32);
+
+ #[cfg(not(target_os = "emscripten"))]
+ let egl1_5 = self.egl.instance.upcast::<khronos_egl::EGL1_5>();
+
+ #[cfg(target_os = "emscripten")]
+ let egl1_5: Option<&Arc<EglInstance>> = Some(&self.egl.instance);
+
+ // Careful, we can still be in 1.4 version even if `upcast` succeeds
+ let raw_result = match egl1_5 {
+ Some(egl) if self.wsi.kind != WindowKind::Unknown => {
+ let attributes_usize = attributes
+ .into_iter()
+ .map(|v| v as usize)
+ .collect::<Vec<_>>();
+ egl.create_platform_window_surface(
+ self.egl.display,
+ self.config,
+ native_window_ptr,
+ &attributes_usize,
+ )
+ }
+ _ => unsafe {
+ self.egl.instance.create_window_surface(
+ self.egl.display,
+ self.config,
+ native_window_ptr,
+ Some(&attributes),
+ )
+ },
+ };
+
+ match raw_result {
+ Ok(raw) => (raw, wl_window),
+ Err(e) => {
+ log::warn!("Error in create_window_surface: {:?}", e);
+ return Err(crate::SurfaceError::Lost);
+ }
+ }
+ }
+ };
+
+ if let Some(window) = wl_window {
+ let library = self.wsi.library.as_ref().unwrap();
+ let wl_egl_window_resize: libloading::Symbol<WlEglWindowResizeFun> =
+ unsafe { library.get(b"wl_egl_window_resize") }.unwrap();
+ unsafe {
+ wl_egl_window_resize(
+ window,
+ config.extent.width as i32,
+ config.extent.height as i32,
+ 0,
+ 0,
+ )
+ };
+ }
+
+ let format_desc = device.shared.describe_texture_format(config.format);
+ let gl = &device.shared.context.lock();
+ let renderbuffer = unsafe { gl.create_renderbuffer() }.map_err(|error| {
+ log::error!("Internal swapchain renderbuffer creation failed: {error}");
+ crate::DeviceError::OutOfMemory
+ })?;
+ unsafe { gl.bind_renderbuffer(glow::RENDERBUFFER, Some(renderbuffer)) };
+ unsafe {
+ gl.renderbuffer_storage(
+ glow::RENDERBUFFER,
+ format_desc.internal,
+ config.extent.width as _,
+ config.extent.height as _,
+ )
+ };
+ let framebuffer = unsafe { gl.create_framebuffer() }.map_err(|error| {
+ log::error!("Internal swapchain framebuffer creation failed: {error}");
+ crate::DeviceError::OutOfMemory
+ })?;
+ unsafe { gl.bind_framebuffer(glow::READ_FRAMEBUFFER, Some(framebuffer)) };
+ unsafe {
+ gl.framebuffer_renderbuffer(
+ glow::READ_FRAMEBUFFER,
+ glow::COLOR_ATTACHMENT0,
+ glow::RENDERBUFFER,
+ Some(renderbuffer),
+ )
+ };
+ unsafe { gl.bind_renderbuffer(glow::RENDERBUFFER, None) };
+ unsafe { gl.bind_framebuffer(glow::READ_FRAMEBUFFER, None) };
+
+ self.swapchain = Some(Swapchain {
+ surface,
+ wl_window,
+ renderbuffer,
+ framebuffer,
+ extent: config.extent,
+ format: config.format,
+ format_desc,
+ sample_type: wgt::TextureSampleType::Float { filterable: false },
+ });
+
+ Ok(())
+ }
+
+ unsafe fn unconfigure(&mut self, device: &super::Device) {
+ if let Some((surface, wl_window)) = unsafe { self.unconfigure_impl(device) } {
+ self.egl
+ .instance
+ .destroy_surface(self.egl.display, surface)
+ .unwrap();
+ if let Some(window) = wl_window {
+ let library = self.wsi.library.as_ref().expect("unsupported window");
+ let wl_egl_window_destroy: libloading::Symbol<WlEglWindowDestroyFun> =
+ unsafe { library.get(b"wl_egl_window_destroy") }.unwrap();
+ unsafe { wl_egl_window_destroy(window) };
+ }
+ }
+ }
+
+ unsafe fn acquire_texture(
+ &mut self,
+ _timeout_ms: Option<Duration>, //TODO
+ ) -> Result<Option<crate::AcquiredSurfaceTexture<super::Api>>, crate::SurfaceError> {
+ let sc = self.swapchain.as_ref().unwrap();
+ let texture = super::Texture {
+ inner: super::TextureInner::Renderbuffer {
+ raw: sc.renderbuffer,
+ },
+ drop_guard: None,
+ array_layer_count: 1,
+ mip_level_count: 1,
+ format: sc.format,
+ format_desc: sc.format_desc.clone(),
+ copy_size: crate::CopyExtent {
+ width: sc.extent.width,
+ height: sc.extent.height,
+ depth: 1,
+ },
+ is_cubemap: false,
+ };
+ Ok(Some(crate::AcquiredSurfaceTexture {
+ texture,
+ suboptimal: false,
+ }))
+ }
+ unsafe fn discard_texture(&mut self, _texture: super::Texture) {}
+}
diff --git a/third_party/rust/wgpu-hal/src/gles/emscripten.rs b/third_party/rust/wgpu-hal/src/gles/emscripten.rs
new file mode 100644
index 0000000000..7372dbd369
--- /dev/null
+++ b/third_party/rust/wgpu-hal/src/gles/emscripten.rs
@@ -0,0 +1,26 @@
+extern "C" {
+ /// returns 1 if success. 0 if failure. extension name must be null terminated
+ fn emscripten_webgl_enable_extension(
+ context: std::ffi::c_int,
+ extension: *const std::ffi::c_char,
+ ) -> std::ffi::c_int;
+ fn emscripten_webgl_get_current_context() -> std::ffi::c_int;
+}
+/// Webgl requires extensions to be enabled before using them.
+/// This function can be used to enable webgl extension on emscripten target.
+///
+/// returns true on success
+///
+/// # Safety:
+///
+/// - opengl context MUST BE current
+/// - extension_name_null_terminated argument must be a valid string with null terminator.
+/// - extension must be present. check `glow_context.supported_extensions()`
+pub unsafe fn enable_extension(extension_name_null_terminated: &str) -> bool {
+ unsafe {
+ emscripten_webgl_enable_extension(
+ emscripten_webgl_get_current_context(),
+ extension_name_null_terminated.as_ptr() as _,
+ ) == 1
+ }
+}
diff --git a/third_party/rust/wgpu-hal/src/gles/mod.rs b/third_party/rust/wgpu-hal/src/gles/mod.rs
new file mode 100644
index 0000000000..f11286833d
--- /dev/null
+++ b/third_party/rust/wgpu-hal/src/gles/mod.rs
@@ -0,0 +1,861 @@
+/*!
+# OpenGL ES3 API (aka GLES3).
+
+Designed to work on Linux and Android, with context provided by EGL.
+
+## Texture views
+
+GLES3 doesn't really have separate texture view objects. We have to remember the
+original texture and the sub-range into it. Problem is, however, that there is
+no way to expose a subset of array layers or mip levels of a sampled texture.
+
+## Binding model
+
+Binding model is very different from WebGPU, especially with regards to samplers.
+GLES3 has sampler objects, but they aren't separately bindable to the shaders.
+Each sampled texture is exposed to the shader as a combined texture-sampler binding.
+
+When building the pipeline layout, we linearize binding entries based on the groups
+(uniform/storage buffers, uniform/storage textures), and record the mapping into
+`BindGroupLayoutInfo`.
+When a pipeline gets created, and we track all the texture-sampler associations
+from the static use in the shader.
+We only support at most one sampler used with each texture so far. The linear index
+of this sampler is stored per texture slot in `SamplerBindMap` array.
+
+The texture-sampler pairs get potentially invalidated in 2 places:
+ - when a new pipeline is set, we update the linear indices of associated samplers
+ - when a new bind group is set, we update both the textures and the samplers
+
+We expect that the changes to sampler states between any 2 pipelines of the same layout
+will be minimal, if any.
+
+## Vertex data
+
+Generally, vertex buffers are marked as dirty and lazily bound on draw.
+
+GLES3 doesn't support "base instance" semantics. However, it's easy to support,
+since we are forced to do late binding anyway. We just adjust the offsets
+into the vertex data.
+
+### Old path
+
+In GLES-3.0 and WebGL2, vertex buffer layout is provided
+together with the actual buffer binding.
+We invalidate the attributes on the vertex buffer change, and re-bind them.
+
+### New path
+
+In GLES-3.1 and higher, the vertex buffer layout can be declared separately
+from the vertex data itself. This mostly matches WebGPU, however there is a catch:
+`stride` needs to be specified with the data, not as a part of the layout.
+
+To address this, we invalidate the vertex buffers based on:
+ - whether or not `start_instance` is used
+ - stride has changed
+
+*/
+
+///cbindgen:ignore
+#[cfg(any(not(target_arch = "wasm32"), target_os = "emscripten"))]
+mod egl;
+#[cfg(target_os = "emscripten")]
+mod emscripten;
+#[cfg(all(target_arch = "wasm32", not(target_os = "emscripten")))]
+mod web;
+
+mod adapter;
+mod command;
+mod conv;
+mod device;
+mod queue;
+
+use crate::{CopyExtent, TextureDescriptor};
+
+#[cfg(any(not(target_arch = "wasm32"), target_os = "emscripten"))]
+pub use self::egl::{AdapterContext, AdapterContextLock};
+#[cfg(any(not(target_arch = "wasm32"), target_os = "emscripten"))]
+use self::egl::{Instance, Surface};
+
+#[cfg(all(target_arch = "wasm32", not(target_os = "emscripten")))]
+pub use self::web::AdapterContext;
+#[cfg(all(target_arch = "wasm32", not(target_os = "emscripten")))]
+use self::web::{Instance, Surface};
+
+use arrayvec::ArrayVec;
+
+use glow::HasContext;
+
+use naga::FastHashMap;
+use parking_lot::Mutex;
+use std::sync::atomic::AtomicU32;
+use std::{fmt, ops::Range, sync::Arc};
+
+#[derive(Clone)]
+pub struct Api;
+
+//Note: we can support more samplers if not every one of them is used at a time,
+// but it probably doesn't worth it.
+const MAX_TEXTURE_SLOTS: usize = 16;
+const MAX_SAMPLERS: usize = 16;
+const MAX_VERTEX_ATTRIBUTES: usize = 16;
+const ZERO_BUFFER_SIZE: usize = 256 << 10;
+const MAX_PUSH_CONSTANTS: usize = 64;
+
+impl crate::Api for Api {
+ type Instance = Instance;
+ type Surface = Surface;
+ type Adapter = Adapter;
+ type Device = Device;
+
+ type Queue = Queue;
+ type CommandEncoder = CommandEncoder;
+ type CommandBuffer = CommandBuffer;
+
+ type Buffer = Buffer;
+ type Texture = Texture;
+ type SurfaceTexture = Texture;
+ type TextureView = TextureView;
+ type Sampler = Sampler;
+ type QuerySet = QuerySet;
+ type Fence = Fence;
+
+ type BindGroupLayout = BindGroupLayout;
+ type BindGroup = BindGroup;
+ type PipelineLayout = PipelineLayout;
+ type ShaderModule = ShaderModule;
+ type RenderPipeline = RenderPipeline;
+ type ComputePipeline = ComputePipeline;
+}
+
+bitflags::bitflags! {
+ /// Flags that affect internal code paths but do not
+ /// change the exposed feature set.
+ #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
+ struct PrivateCapabilities: u32 {
+ /// Indicates support for `glBufferStorage` allocation.
+ const BUFFER_ALLOCATION = 1 << 0;
+ /// Support explicit layouts in shader.
+ const SHADER_BINDING_LAYOUT = 1 << 1;
+ /// Support extended shadow sampling instructions.
+ const SHADER_TEXTURE_SHADOW_LOD = 1 << 2;
+ /// Support memory barriers.
+ const MEMORY_BARRIERS = 1 << 3;
+ /// Vertex buffer layouts separate from the data.
+ const VERTEX_BUFFER_LAYOUT = 1 << 4;
+ /// Indicates that buffers used as `GL_ELEMENT_ARRAY_BUFFER` may be created / initialized / used
+ /// as other targets, if not present they must not be mixed with other targets.
+ const INDEX_BUFFER_ROLE_CHANGE = 1 << 5;
+ /// Indicates that the device supports disabling draw buffers
+ const CAN_DISABLE_DRAW_BUFFER = 1 << 6;
+ /// Supports `glGetBufferSubData`
+ const GET_BUFFER_SUB_DATA = 1 << 7;
+ /// Supports `f16` color buffers
+ const COLOR_BUFFER_HALF_FLOAT = 1 << 8;
+ /// Supports `f11/f10` and `f32` color buffers
+ const COLOR_BUFFER_FLOAT = 1 << 9;
+ /// Supports linear flitering `f32` textures.
+ const TEXTURE_FLOAT_LINEAR = 1 << 10;
+ }
+}
+
+bitflags::bitflags! {
+ /// Flags that indicate necessary workarounds for specific devices or driver bugs
+ #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
+ struct Workarounds: u32 {
+ // Needs workaround for Intel Mesa bug:
+ // https://gitlab.freedesktop.org/mesa/mesa/-/issues/2565.
+ //
+ // This comment
+ // (https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/4972/diffs?diff_id=75888#22f5d1004713c9bbf857988c7efb81631ab88f99_323_327)
+ // seems to indicate all skylake models are effected.
+ const MESA_I915_SRGB_SHADER_CLEAR = 1 << 0;
+ /// Buffer map must emulated becuase it is not supported natively
+ const EMULATE_BUFFER_MAP = 1 << 1;
+ }
+}
+
+type BindTarget = u32;
+
+#[derive(Debug, Clone, Copy)]
+enum VertexAttribKind {
+ Float, // glVertexAttribPointer
+ Integer, // glVertexAttribIPointer
+ //Double, // glVertexAttribLPointer
+}
+
+impl Default for VertexAttribKind {
+ fn default() -> Self {
+ Self::Float
+ }
+}
+
+#[derive(Clone, Debug)]
+pub struct TextureFormatDesc {
+ pub internal: u32,
+ pub external: u32,
+ pub data_type: u32,
+}
+
+struct AdapterShared {
+ context: AdapterContext,
+ private_caps: PrivateCapabilities,
+ features: wgt::Features,
+ workarounds: Workarounds,
+ shading_language_version: naga::back::glsl::Version,
+ max_texture_size: u32,
+ next_shader_id: AtomicU32,
+ program_cache: Mutex<ProgramCache>,
+}
+
+pub struct Adapter {
+ shared: Arc<AdapterShared>,
+}
+
+pub struct Device {
+ shared: Arc<AdapterShared>,
+ main_vao: glow::VertexArray,
+ #[cfg(all(not(target_arch = "wasm32"), feature = "renderdoc"))]
+ render_doc: crate::auxil::renderdoc::RenderDoc,
+}
+
+pub struct Queue {
+ shared: Arc<AdapterShared>,
+ features: wgt::Features,
+ draw_fbo: glow::Framebuffer,
+ copy_fbo: glow::Framebuffer,
+ /// Shader program used to clear the screen for [`Workarounds::MESA_I915_SRGB_SHADER_CLEAR`]
+ /// devices.
+ shader_clear_program: glow::Program,
+ /// The uniform location of the color uniform in the shader clear program
+ shader_clear_program_color_uniform_location: glow::UniformLocation,
+ /// Keep a reasonably large buffer filled with zeroes, so that we can implement `ClearBuffer` of
+ /// zeroes by copying from it.
+ zero_buffer: glow::Buffer,
+ temp_query_results: Vec<u64>,
+ draw_buffer_count: u8,
+ current_index_buffer: Option<glow::Buffer>,
+}
+
+#[derive(Clone, Debug)]
+pub struct Buffer {
+ raw: Option<glow::Buffer>,
+ target: BindTarget,
+ size: wgt::BufferAddress,
+ map_flags: u32,
+ data: Option<Arc<std::sync::Mutex<Vec<u8>>>>,
+}
+
+// Safe: WASM doesn't have threads
+#[cfg(target_arch = "wasm32")]
+unsafe impl Sync for Buffer {}
+#[cfg(target_arch = "wasm32")]
+unsafe impl Send for Buffer {}
+
+#[derive(Clone, Debug)]
+pub enum TextureInner {
+ Renderbuffer {
+ raw: glow::Renderbuffer,
+ },
+ DefaultRenderbuffer,
+ Texture {
+ raw: glow::Texture,
+ target: BindTarget,
+ },
+ #[cfg(all(target_arch = "wasm32", not(target_os = "emscripten")))]
+ ExternalFramebuffer {
+ inner: web_sys::WebGlFramebuffer,
+ },
+}
+
+// SAFE: WASM doesn't have threads
+#[cfg(target_arch = "wasm32")]
+unsafe impl Send for TextureInner {}
+#[cfg(target_arch = "wasm32")]
+unsafe impl Sync for TextureInner {}
+
+impl TextureInner {
+ fn as_native(&self) -> (glow::Texture, BindTarget) {
+ match *self {
+ Self::Renderbuffer { .. } | Self::DefaultRenderbuffer => {
+ panic!("Unexpected renderbuffer");
+ }
+ Self::Texture { raw, target } => (raw, target),
+ #[cfg(all(target_arch = "wasm32", not(target_os = "emscripten")))]
+ Self::ExternalFramebuffer { .. } => panic!("Unexpected external framebuffer"),
+ }
+ }
+}
+
+#[derive(Debug)]
+pub struct Texture {
+ pub inner: TextureInner,
+ pub drop_guard: Option<crate::DropGuard>,
+ pub mip_level_count: u32,
+ pub array_layer_count: u32,
+ pub format: wgt::TextureFormat,
+ #[allow(unused)]
+ pub format_desc: TextureFormatDesc,
+ pub copy_size: CopyExtent,
+ pub is_cubemap: bool,
+}
+
+impl Texture {
+ pub fn default_framebuffer(format: wgt::TextureFormat) -> Self {
+ Self {
+ inner: TextureInner::DefaultRenderbuffer,
+ drop_guard: None,
+ mip_level_count: 1,
+ array_layer_count: 1,
+ format,
+ format_desc: TextureFormatDesc {
+ internal: 0,
+ external: 0,
+ data_type: 0,
+ },
+ copy_size: CopyExtent {
+ width: 0,
+ height: 0,
+ depth: 0,
+ },
+ is_cubemap: false,
+ }
+ }
+
+ /// Returns the `target`, whether the image is 3d and whether the image is a cubemap.
+ fn get_info_from_desc(desc: &TextureDescriptor) -> (u32, bool, bool) {
+ match desc.dimension {
+ wgt::TextureDimension::D1 => (glow::TEXTURE_2D, false, false),
+ wgt::TextureDimension::D2 => {
+ // HACK: detect a cube map; forces cube compatible textures to be cube textures
+ match (desc.is_cube_compatible(), desc.size.depth_or_array_layers) {
+ (false, 1) => (glow::TEXTURE_2D, false, false),
+ (false, _) => (glow::TEXTURE_2D_ARRAY, true, false),
+ (true, 6) => (glow::TEXTURE_CUBE_MAP, false, true),
+ (true, _) => (glow::TEXTURE_CUBE_MAP_ARRAY, true, true),
+ }
+ }
+ wgt::TextureDimension::D3 => (glow::TEXTURE_3D, true, false),
+ }
+ }
+}
+
+#[derive(Clone, Debug)]
+pub struct TextureView {
+ inner: TextureInner,
+ aspects: crate::FormatAspects,
+ mip_levels: Range<u32>,
+ array_layers: Range<u32>,
+ format: wgt::TextureFormat,
+}
+
+#[derive(Debug)]
+pub struct Sampler {
+ raw: glow::Sampler,
+}
+
+pub struct BindGroupLayout {
+ entries: Arc<[wgt::BindGroupLayoutEntry]>,
+}
+
+struct BindGroupLayoutInfo {
+ entries: Arc<[wgt::BindGroupLayoutEntry]>,
+ /// Mapping of resources, indexed by `binding`, into the whole layout space.
+ /// For texture resources, the value is the texture slot index.
+ /// For sampler resources, the value is the index of the sampler in the whole layout.
+ /// For buffers, the value is the uniform or storage slot index.
+ /// For unused bindings, the value is `!0`
+ binding_to_slot: Box<[u8]>,
+}
+
+pub struct PipelineLayout {
+ group_infos: Box<[BindGroupLayoutInfo]>,
+ naga_options: naga::back::glsl::Options,
+}
+
+impl PipelineLayout {
+ fn get_slot(&self, br: &naga::ResourceBinding) -> u8 {
+ let group_info = &self.group_infos[br.group as usize];
+ group_info.binding_to_slot[br.binding as usize]
+ }
+}
+
+#[derive(Debug)]
+enum BindingRegister {
+ UniformBuffers,
+ StorageBuffers,
+ Textures,
+ Images,
+}
+
+#[derive(Debug)]
+enum RawBinding {
+ Buffer {
+ raw: glow::Buffer,
+ offset: i32,
+ size: i32,
+ },
+ Texture {
+ raw: glow::Texture,
+ target: BindTarget,
+ aspects: crate::FormatAspects,
+ //TODO: mip levels, array layers
+ },
+ Image(ImageBinding),
+ Sampler(glow::Sampler),
+}
+
+#[derive(Debug)]
+pub struct BindGroup {
+ contents: Box<[RawBinding]>,
+}
+
+type ShaderId = u32;
+
+#[derive(Debug)]
+pub struct ShaderModule {
+ naga: crate::NagaShader,
+ label: Option<String>,
+ id: ShaderId,
+}
+
+#[derive(Clone, Debug, Default)]
+struct VertexFormatDesc {
+ element_count: i32,
+ element_format: u32,
+ attrib_kind: VertexAttribKind,
+}
+
+#[derive(Clone, Debug, Default)]
+struct AttributeDesc {
+ location: u32,
+ offset: u32,
+ buffer_index: u32,
+ format_desc: VertexFormatDesc,
+}
+
+#[derive(Clone, Debug)]
+struct BufferBinding {
+ raw: glow::Buffer,
+ offset: wgt::BufferAddress,
+}
+
+#[derive(Clone, Debug)]
+struct ImageBinding {
+ raw: glow::Texture,
+ mip_level: u32,
+ array_layer: Option<u32>,
+ access: u32,
+ format: u32,
+}
+
+#[derive(Clone, Debug, Default, PartialEq)]
+struct VertexBufferDesc {
+ step: wgt::VertexStepMode,
+ stride: u32,
+}
+
+#[derive(Clone, Debug, Default)]
+struct UniformDesc {
+ location: Option<glow::UniformLocation>,
+ size: u32,
+ utype: u32,
+}
+
+// Safe: WASM doesn't have threads
+#[cfg(target_arch = "wasm32")]
+unsafe impl Sync for UniformDesc {}
+#[cfg(target_arch = "wasm32")]
+unsafe impl Send for UniformDesc {}
+
+/// For each texture in the pipeline layout, store the index of the only
+/// sampler (in this layout) that the texture is used with.
+type SamplerBindMap = [Option<u8>; MAX_TEXTURE_SLOTS];
+
+struct PipelineInner {
+ program: glow::Program,
+ sampler_map: SamplerBindMap,
+ uniforms: [UniformDesc; MAX_PUSH_CONSTANTS],
+}
+
+#[derive(Clone, Debug)]
+struct DepthState {
+ function: u32,
+ mask: bool,
+}
+
+#[derive(Clone, Debug, PartialEq)]
+struct BlendComponent {
+ src: u32,
+ dst: u32,
+ equation: u32,
+}
+
+#[derive(Clone, Debug, PartialEq)]
+struct BlendDesc {
+ alpha: BlendComponent,
+ color: BlendComponent,
+}
+
+#[derive(Clone, Debug, Default, PartialEq)]
+struct ColorTargetDesc {
+ mask: wgt::ColorWrites,
+ blend: Option<BlendDesc>,
+}
+
+#[derive(PartialEq, Eq, Hash)]
+struct ProgramStage {
+ naga_stage: naga::ShaderStage,
+ shader_id: ShaderId,
+ entry_point: String,
+}
+
+#[derive(PartialEq, Eq, Hash)]
+struct ProgramCacheKey {
+ stages: ArrayVec<ProgramStage, 3>,
+ group_to_binding_to_slot: Box<[Box<[u8]>]>,
+}
+
+type ProgramCache = FastHashMap<ProgramCacheKey, Result<Arc<PipelineInner>, crate::PipelineError>>;
+
+pub struct RenderPipeline {
+ inner: Arc<PipelineInner>,
+ primitive: wgt::PrimitiveState,
+ vertex_buffers: Box<[VertexBufferDesc]>,
+ vertex_attributes: Box<[AttributeDesc]>,
+ color_targets: Box<[ColorTargetDesc]>,
+ depth: Option<DepthState>,
+ depth_bias: wgt::DepthBiasState,
+ stencil: Option<StencilState>,
+ alpha_to_coverage_enabled: bool,
+}
+
+// SAFE: WASM doesn't have threads
+#[cfg(target_arch = "wasm32")]
+unsafe impl Send for RenderPipeline {}
+#[cfg(target_arch = "wasm32")]
+unsafe impl Sync for RenderPipeline {}
+
+pub struct ComputePipeline {
+ inner: Arc<PipelineInner>,
+}
+
+// SAFE: WASM doesn't have threads
+#[cfg(target_arch = "wasm32")]
+unsafe impl Send for ComputePipeline {}
+#[cfg(target_arch = "wasm32")]
+unsafe impl Sync for ComputePipeline {}
+
+#[derive(Debug)]
+pub struct QuerySet {
+ queries: Box<[glow::Query]>,
+ target: BindTarget,
+}
+
+#[derive(Debug)]
+pub struct Fence {
+ last_completed: crate::FenceValue,
+ pending: Vec<(crate::FenceValue, glow::Fence)>,
+}
+
+unsafe impl Send for Fence {}
+unsafe impl Sync for Fence {}
+
+impl Fence {
+ fn get_latest(&self, gl: &glow::Context) -> crate::FenceValue {
+ let mut max_value = self.last_completed;
+ for &(value, sync) in self.pending.iter() {
+ let status = unsafe { gl.get_sync_status(sync) };
+ if status == glow::SIGNALED {
+ max_value = value;
+ }
+ }
+ max_value
+ }
+
+ fn maintain(&mut self, gl: &glow::Context) {
+ let latest = self.get_latest(gl);
+ for &(value, sync) in self.pending.iter() {
+ if value <= latest {
+ unsafe {
+ gl.delete_sync(sync);
+ }
+ }
+ }
+ self.pending.retain(|&(value, _)| value > latest);
+ self.last_completed = latest;
+ }
+}
+
+#[derive(Clone, Debug, PartialEq)]
+struct StencilOps {
+ pass: u32,
+ fail: u32,
+ depth_fail: u32,
+}
+
+impl Default for StencilOps {
+ fn default() -> Self {
+ Self {
+ pass: glow::KEEP,
+ fail: glow::KEEP,
+ depth_fail: glow::KEEP,
+ }
+ }
+}
+
+#[derive(Clone, Debug, PartialEq)]
+struct StencilSide {
+ function: u32,
+ mask_read: u32,
+ mask_write: u32,
+ reference: u32,
+ ops: StencilOps,
+}
+
+impl Default for StencilSide {
+ fn default() -> Self {
+ Self {
+ function: glow::ALWAYS,
+ mask_read: 0xFF,
+ mask_write: 0xFF,
+ reference: 0,
+ ops: StencilOps::default(),
+ }
+ }
+}
+
+#[derive(Clone, Default)]
+struct StencilState {
+ front: StencilSide,
+ back: StencilSide,
+}
+
+#[derive(Clone, Debug, Default, PartialEq)]
+struct PrimitiveState {
+ front_face: u32,
+ cull_face: u32,
+ unclipped_depth: bool,
+}
+
+type InvalidatedAttachments = ArrayVec<u32, { crate::MAX_COLOR_ATTACHMENTS + 2 }>;
+
+#[derive(Debug)]
+enum Command {
+ Draw {
+ topology: u32,
+ start_vertex: u32,
+ vertex_count: u32,
+ instance_count: u32,
+ },
+ DrawIndexed {
+ topology: u32,
+ index_type: u32,
+ index_count: u32,
+ index_offset: wgt::BufferAddress,
+ base_vertex: i32,
+ instance_count: u32,
+ },
+ DrawIndirect {
+ topology: u32,
+ indirect_buf: glow::Buffer,
+ indirect_offset: wgt::BufferAddress,
+ },
+ DrawIndexedIndirect {
+ topology: u32,
+ index_type: u32,
+ indirect_buf: glow::Buffer,
+ indirect_offset: wgt::BufferAddress,
+ },
+ Dispatch([u32; 3]),
+ DispatchIndirect {
+ indirect_buf: glow::Buffer,
+ indirect_offset: wgt::BufferAddress,
+ },
+ ClearBuffer {
+ dst: Buffer,
+ dst_target: BindTarget,
+ range: crate::MemoryRange,
+ },
+ CopyBufferToBuffer {
+ src: Buffer,
+ src_target: BindTarget,
+ dst: Buffer,
+ dst_target: BindTarget,
+ copy: crate::BufferCopy,
+ },
+ #[cfg(all(target_arch = "wasm32", not(target_os = "emscripten")))]
+ CopyExternalImageToTexture {
+ src: wgt::ImageCopyExternalImage,
+ dst: glow::Texture,
+ dst_target: BindTarget,
+ dst_format: wgt::TextureFormat,
+ dst_premultiplication: bool,
+ copy: crate::TextureCopy,
+ },
+ CopyTextureToTexture {
+ src: glow::Texture,
+ src_target: BindTarget,
+ dst: glow::Texture,
+ dst_target: BindTarget,
+ copy: crate::TextureCopy,
+ dst_is_cubemap: bool,
+ },
+ CopyBufferToTexture {
+ src: Buffer,
+ #[allow(unused)]
+ src_target: BindTarget,
+ dst: glow::Texture,
+ dst_target: BindTarget,
+ dst_format: wgt::TextureFormat,
+ copy: crate::BufferTextureCopy,
+ },
+ CopyTextureToBuffer {
+ src: glow::Texture,
+ src_target: BindTarget,
+ src_format: wgt::TextureFormat,
+ dst: Buffer,
+ #[allow(unused)]
+ dst_target: BindTarget,
+ copy: crate::BufferTextureCopy,
+ },
+ SetIndexBuffer(glow::Buffer),
+ BeginQuery(glow::Query, BindTarget),
+ EndQuery(BindTarget),
+ CopyQueryResults {
+ query_range: Range<u32>,
+ dst: Buffer,
+ dst_target: BindTarget,
+ dst_offset: wgt::BufferAddress,
+ },
+ ResetFramebuffer {
+ is_default: bool,
+ },
+ BindAttachment {
+ attachment: u32,
+ view: TextureView,
+ },
+ ResolveAttachment {
+ attachment: u32,
+ dst: TextureView,
+ size: wgt::Extent3d,
+ },
+ InvalidateAttachments(InvalidatedAttachments),
+ SetDrawColorBuffers(u8),
+ ClearColorF {
+ draw_buffer: u32,
+ color: [f32; 4],
+ is_srgb: bool,
+ },
+ ClearColorU(u32, [u32; 4]),
+ ClearColorI(u32, [i32; 4]),
+ ClearDepth(f32),
+ ClearStencil(u32),
+ // Clearing both the depth and stencil buffer individually appears to
+ // result in the stencil buffer failing to clear, atleast in WebGL.
+ // It is also more efficient to emit a single command instead of two for
+ // this.
+ ClearDepthAndStencil(f32, u32),
+ BufferBarrier(glow::Buffer, crate::BufferUses),
+ TextureBarrier(crate::TextureUses),
+ SetViewport {
+ rect: crate::Rect<i32>,
+ depth: Range<f32>,
+ },
+ SetScissor(crate::Rect<i32>),
+ SetStencilFunc {
+ face: u32,
+ function: u32,
+ reference: u32,
+ read_mask: u32,
+ },
+ SetStencilOps {
+ face: u32,
+ write_mask: u32,
+ ops: StencilOps,
+ },
+ SetDepth(DepthState),
+ SetDepthBias(wgt::DepthBiasState),
+ ConfigureDepthStencil(crate::FormatAspects),
+ SetAlphaToCoverage(bool),
+ SetVertexAttribute {
+ buffer: Option<glow::Buffer>,
+ buffer_desc: VertexBufferDesc,
+ attribute_desc: AttributeDesc,
+ },
+ UnsetVertexAttribute(u32),
+ SetVertexBuffer {
+ index: u32,
+ buffer: BufferBinding,
+ buffer_desc: VertexBufferDesc,
+ },
+ SetProgram(glow::Program),
+ SetPrimitive(PrimitiveState),
+ SetBlendConstant([f32; 4]),
+ SetColorTarget {
+ draw_buffer_index: Option<u32>,
+ desc: ColorTargetDesc,
+ },
+ BindBuffer {
+ target: BindTarget,
+ slot: u32,
+ buffer: glow::Buffer,
+ offset: i32,
+ size: i32,
+ },
+ BindSampler(u32, Option<glow::Sampler>),
+ BindTexture {
+ slot: u32,
+ texture: glow::Texture,
+ target: BindTarget,
+ aspects: crate::FormatAspects,
+ },
+ BindImage {
+ slot: u32,
+ binding: ImageBinding,
+ },
+ InsertDebugMarker(Range<u32>),
+ PushDebugGroup(Range<u32>),
+ PopDebugGroup,
+ SetPushConstants {
+ uniform: UniformDesc,
+ /// Offset from the start of the `data_bytes`
+ offset: u32,
+ },
+}
+
+#[derive(Default)]
+pub struct CommandBuffer {
+ label: Option<String>,
+ commands: Vec<Command>,
+ data_bytes: Vec<u8>,
+ queries: Vec<glow::Query>,
+}
+
+impl fmt::Debug for CommandBuffer {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ let mut builder = f.debug_struct("CommandBuffer");
+ if let Some(ref label) = self.label {
+ builder.field("label", label);
+ }
+ builder.finish()
+ }
+}
+
+//TODO: we would have something like `Arc<typed_arena::Arena>`
+// here and in the command buffers. So that everything grows
+// inside the encoder and stays there until `reset_all`.
+
+pub struct CommandEncoder {
+ cmd_buffer: CommandBuffer,
+ state: command::State,
+ private_caps: PrivateCapabilities,
+}
+
+impl fmt::Debug for CommandEncoder {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("CommandEncoder")
+ .field("cmd_buffer", &self.cmd_buffer)
+ .finish()
+ }
+}
diff --git a/third_party/rust/wgpu-hal/src/gles/queue.rs b/third_party/rust/wgpu-hal/src/gles/queue.rs
new file mode 100644
index 0000000000..87a82524fc
--- /dev/null
+++ b/third_party/rust/wgpu-hal/src/gles/queue.rs
@@ -0,0 +1,1532 @@
+use super::Command as C;
+use arrayvec::ArrayVec;
+use glow::HasContext;
+use std::{mem, slice, sync::Arc};
+
+#[cfg(not(target_arch = "wasm32"))]
+const DEBUG_ID: u32 = 0;
+
+const CUBEMAP_FACES: [u32; 6] = [
+ glow::TEXTURE_CUBE_MAP_POSITIVE_X,
+ glow::TEXTURE_CUBE_MAP_NEGATIVE_X,
+ glow::TEXTURE_CUBE_MAP_POSITIVE_Y,
+ glow::TEXTURE_CUBE_MAP_NEGATIVE_Y,
+ glow::TEXTURE_CUBE_MAP_POSITIVE_Z,
+ glow::TEXTURE_CUBE_MAP_NEGATIVE_Z,
+];
+
+#[cfg(not(target_arch = "wasm32"))]
+fn extract_marker<'a>(data: &'a [u8], range: &std::ops::Range<u32>) -> &'a str {
+ std::str::from_utf8(&data[range.start as usize..range.end as usize]).unwrap()
+}
+
+fn is_layered_target(target: super::BindTarget) -> bool {
+ match target {
+ glow::TEXTURE_2D_ARRAY | glow::TEXTURE_3D | glow::TEXTURE_CUBE_MAP_ARRAY => true,
+ _ => false,
+ }
+}
+
+impl super::Queue {
+ /// Performs a manual shader clear, used as a workaround for a clearing bug on mesa
+ unsafe fn perform_shader_clear(&self, gl: &glow::Context, draw_buffer: u32, color: [f32; 4]) {
+ unsafe { gl.use_program(Some(self.shader_clear_program)) };
+ unsafe {
+ gl.uniform_4_f32(
+ Some(&self.shader_clear_program_color_uniform_location),
+ color[0],
+ color[1],
+ color[2],
+ color[3],
+ )
+ };
+ unsafe { gl.disable(glow::DEPTH_TEST) };
+ unsafe { gl.disable(glow::STENCIL_TEST) };
+ unsafe { gl.disable(glow::SCISSOR_TEST) };
+ unsafe { gl.disable(glow::BLEND) };
+ unsafe { gl.disable(glow::CULL_FACE) };
+ unsafe { gl.draw_buffers(&[glow::COLOR_ATTACHMENT0 + draw_buffer]) };
+ unsafe { gl.draw_arrays(glow::TRIANGLES, 0, 3) };
+
+ if self.draw_buffer_count != 0 {
+ // Reset the draw buffers to what they were before the clear
+ let indices = (0..self.draw_buffer_count as u32)
+ .map(|i| glow::COLOR_ATTACHMENT0 + i)
+ .collect::<ArrayVec<_, { crate::MAX_COLOR_ATTACHMENTS }>>();
+ unsafe { gl.draw_buffers(&indices) };
+ }
+ #[cfg(not(target_arch = "wasm32"))]
+ for draw_buffer in 0..self.draw_buffer_count as u32 {
+ unsafe { gl.disable_draw_buffer(glow::BLEND, draw_buffer) };
+ }
+ }
+
+ unsafe fn reset_state(&mut self, gl: &glow::Context) {
+ unsafe { gl.use_program(None) };
+ unsafe { gl.bind_framebuffer(glow::FRAMEBUFFER, None) };
+ unsafe { gl.disable(glow::DEPTH_TEST) };
+ unsafe { gl.disable(glow::STENCIL_TEST) };
+ unsafe { gl.disable(glow::SCISSOR_TEST) };
+ unsafe { gl.disable(glow::BLEND) };
+ unsafe { gl.disable(glow::CULL_FACE) };
+ unsafe { gl.disable(glow::POLYGON_OFFSET_FILL) };
+ unsafe { gl.disable(glow::SAMPLE_ALPHA_TO_COVERAGE) };
+ if self.features.contains(wgt::Features::DEPTH_CLIP_CONTROL) {
+ unsafe { gl.disable(glow::DEPTH_CLAMP) };
+ }
+
+ unsafe { gl.bind_buffer(glow::ELEMENT_ARRAY_BUFFER, None) };
+ self.current_index_buffer = None;
+ }
+
+ unsafe fn set_attachment(
+ &self,
+ gl: &glow::Context,
+ fbo_target: u32,
+ attachment: u32,
+ view: &super::TextureView,
+ ) {
+ match view.inner {
+ super::TextureInner::Renderbuffer { raw } => {
+ unsafe {
+ gl.framebuffer_renderbuffer(
+ fbo_target,
+ attachment,
+ glow::RENDERBUFFER,
+ Some(raw),
+ )
+ };
+ }
+ super::TextureInner::DefaultRenderbuffer => panic!("Unexpected default RBO"),
+ super::TextureInner::Texture { raw, target } => {
+ let num_layers = view.array_layers.end - view.array_layers.start;
+ if num_layers > 1 {
+ #[cfg(all(target_arch = "wasm32", target_os = "unknown"))]
+ unsafe {
+ gl.framebuffer_texture_multiview_ovr(
+ fbo_target,
+ attachment,
+ Some(raw),
+ view.mip_levels.start as i32,
+ view.array_layers.start as i32,
+ num_layers as i32,
+ )
+ };
+ } else if is_layered_target(target) {
+ unsafe {
+ gl.framebuffer_texture_layer(
+ fbo_target,
+ attachment,
+ Some(raw),
+ view.mip_levels.start as i32,
+ view.array_layers.start as i32,
+ )
+ };
+ } else if target == glow::TEXTURE_CUBE_MAP {
+ unsafe {
+ gl.framebuffer_texture_2d(
+ fbo_target,
+ attachment,
+ CUBEMAP_FACES[view.array_layers.start as usize],
+ Some(raw),
+ view.mip_levels.start as i32,
+ )
+ };
+ } else {
+ unsafe {
+ gl.framebuffer_texture_2d(
+ fbo_target,
+ attachment,
+ target,
+ Some(raw),
+ view.mip_levels.start as i32,
+ )
+ };
+ }
+ }
+ #[cfg(all(target_arch = "wasm32", not(target_os = "emscripten")))]
+ super::TextureInner::ExternalFramebuffer { ref inner } => unsafe {
+ gl.bind_external_framebuffer(glow::FRAMEBUFFER, inner);
+ },
+ }
+ }
+
+ unsafe fn process(
+ &mut self,
+ gl: &glow::Context,
+ command: &C,
+ #[cfg_attr(target_arch = "wasm32", allow(unused))] data_bytes: &[u8],
+ queries: &[glow::Query],
+ ) {
+ match *command {
+ C::Draw {
+ topology,
+ start_vertex,
+ vertex_count,
+ instance_count,
+ } => {
+ // Don't use `gl.draw_arrays` for `instance_count == 1`.
+ // Angle has a bug where it doesn't consider the instance divisor when `DYNAMIC_DRAW` is used in `draw_arrays`.
+ // See https://github.com/gfx-rs/wgpu/issues/3578
+ unsafe {
+ gl.draw_arrays_instanced(
+ topology,
+ start_vertex as i32,
+ vertex_count as i32,
+ instance_count as i32,
+ )
+ };
+ }
+ C::DrawIndexed {
+ topology,
+ index_type,
+ index_count,
+ index_offset,
+ base_vertex,
+ instance_count,
+ } => {
+ match base_vertex {
+ // Don't use `gl.draw_elements`/`gl.draw_elements_base_vertex` for `instance_count == 1`.
+ // Angle has a bug where it doesn't consider the instance divisor when `DYNAMIC_DRAW` is used in `gl.draw_elements`/`gl.draw_elements_base_vertex`.
+ // See https://github.com/gfx-rs/wgpu/issues/3578
+ 0 => unsafe {
+ gl.draw_elements_instanced(
+ topology,
+ index_count as i32,
+ index_type,
+ index_offset as i32,
+ instance_count as i32,
+ )
+ },
+ _ => unsafe {
+ gl.draw_elements_instanced_base_vertex(
+ topology,
+ index_count as _,
+ index_type,
+ index_offset as i32,
+ instance_count as i32,
+ base_vertex,
+ )
+ },
+ }
+ }
+ C::DrawIndirect {
+ topology,
+ indirect_buf,
+ indirect_offset,
+ } => {
+ unsafe { gl.bind_buffer(glow::DRAW_INDIRECT_BUFFER, Some(indirect_buf)) };
+ unsafe { gl.draw_arrays_indirect_offset(topology, indirect_offset as i32) };
+ }
+ C::DrawIndexedIndirect {
+ topology,
+ index_type,
+ indirect_buf,
+ indirect_offset,
+ } => {
+ unsafe { gl.bind_buffer(glow::DRAW_INDIRECT_BUFFER, Some(indirect_buf)) };
+ unsafe {
+ gl.draw_elements_indirect_offset(topology, index_type, indirect_offset as i32)
+ };
+ }
+ C::Dispatch(group_counts) => {
+ unsafe { gl.dispatch_compute(group_counts[0], group_counts[1], group_counts[2]) };
+ }
+ C::DispatchIndirect {
+ indirect_buf,
+ indirect_offset,
+ } => {
+ unsafe { gl.bind_buffer(glow::DISPATCH_INDIRECT_BUFFER, Some(indirect_buf)) };
+ unsafe { gl.dispatch_compute_indirect(indirect_offset as i32) };
+ }
+ C::ClearBuffer {
+ ref dst,
+ dst_target,
+ ref range,
+ } => match dst.raw {
+ Some(buffer) => {
+ // When `INDEX_BUFFER_ROLE_CHANGE` isn't available, we can't copy into the
+ // index buffer from the zero buffer. This would fail in Chrome with the
+ // following message:
+ //
+ // > Cannot copy into an element buffer destination from a non-element buffer
+ // > source
+ //
+ // Instead, we'll upload zeroes into the buffer.
+ let can_use_zero_buffer = self
+ .shared
+ .private_caps
+ .contains(super::PrivateCapabilities::INDEX_BUFFER_ROLE_CHANGE)
+ || dst_target != glow::ELEMENT_ARRAY_BUFFER;
+
+ if can_use_zero_buffer {
+ unsafe { gl.bind_buffer(glow::COPY_READ_BUFFER, Some(self.zero_buffer)) };
+ unsafe { gl.bind_buffer(dst_target, Some(buffer)) };
+ let mut dst_offset = range.start;
+ while dst_offset < range.end {
+ let size = (range.end - dst_offset).min(super::ZERO_BUFFER_SIZE as u64);
+ unsafe {
+ gl.copy_buffer_sub_data(
+ glow::COPY_READ_BUFFER,
+ dst_target,
+ 0,
+ dst_offset as i32,
+ size as i32,
+ )
+ };
+ dst_offset += size;
+ }
+ } else {
+ unsafe { gl.bind_buffer(dst_target, Some(buffer)) };
+ let zeroes = vec![0u8; (range.end - range.start) as usize];
+ unsafe {
+ gl.buffer_sub_data_u8_slice(dst_target, range.start as i32, &zeroes)
+ };
+ }
+ }
+ None => {
+ dst.data.as_ref().unwrap().lock().unwrap().as_mut_slice()
+ [range.start as usize..range.end as usize]
+ .fill(0);
+ }
+ },
+ C::CopyBufferToBuffer {
+ ref src,
+ src_target,
+ ref dst,
+ dst_target,
+ copy,
+ } => {
+ let copy_src_target = glow::COPY_READ_BUFFER;
+ let is_index_buffer_only_element_dst = !self
+ .shared
+ .private_caps
+ .contains(super::PrivateCapabilities::INDEX_BUFFER_ROLE_CHANGE)
+ && dst_target == glow::ELEMENT_ARRAY_BUFFER
+ || src_target == glow::ELEMENT_ARRAY_BUFFER;
+
+ // WebGL not allowed to copy data from other targets to element buffer and can't copy element data to other buffers
+ let copy_dst_target = if is_index_buffer_only_element_dst {
+ glow::ELEMENT_ARRAY_BUFFER
+ } else {
+ glow::COPY_WRITE_BUFFER
+ };
+ let size = copy.size.get() as usize;
+ match (src.raw, dst.raw) {
+ (Some(ref src), Some(ref dst)) => {
+ unsafe { gl.bind_buffer(copy_src_target, Some(*src)) };
+ unsafe { gl.bind_buffer(copy_dst_target, Some(*dst)) };
+ unsafe {
+ gl.copy_buffer_sub_data(
+ copy_src_target,
+ copy_dst_target,
+ copy.src_offset as _,
+ copy.dst_offset as _,
+ copy.size.get() as _,
+ )
+ };
+ }
+ (Some(src), None) => {
+ let mut data = dst.data.as_ref().unwrap().lock().unwrap();
+ let dst_data = &mut data.as_mut_slice()
+ [copy.dst_offset as usize..copy.dst_offset as usize + size];
+
+ unsafe { gl.bind_buffer(copy_src_target, Some(src)) };
+ unsafe {
+ self.shared.get_buffer_sub_data(
+ gl,
+ copy_src_target,
+ copy.src_offset as i32,
+ dst_data,
+ )
+ };
+ }
+ (None, Some(dst)) => {
+ let data = src.data.as_ref().unwrap().lock().unwrap();
+ let src_data = &data.as_slice()
+ [copy.src_offset as usize..copy.src_offset as usize + size];
+ unsafe { gl.bind_buffer(copy_dst_target, Some(dst)) };
+ unsafe {
+ gl.buffer_sub_data_u8_slice(
+ copy_dst_target,
+ copy.dst_offset as i32,
+ src_data,
+ )
+ };
+ }
+ (None, None) => {
+ todo!()
+ }
+ }
+ unsafe { gl.bind_buffer(copy_src_target, None) };
+ if is_index_buffer_only_element_dst {
+ unsafe {
+ gl.bind_buffer(glow::ELEMENT_ARRAY_BUFFER, self.current_index_buffer)
+ };
+ } else {
+ unsafe { gl.bind_buffer(copy_dst_target, None) };
+ }
+ }
+ #[cfg(all(target_arch = "wasm32", not(target_os = "emscripten")))]
+ C::CopyExternalImageToTexture {
+ ref src,
+ dst,
+ dst_target,
+ dst_format,
+ dst_premultiplication,
+ ref copy,
+ } => {
+ const UNPACK_FLIP_Y_WEBGL: u32 =
+ web_sys::WebGl2RenderingContext::UNPACK_FLIP_Y_WEBGL;
+ const UNPACK_PREMULTIPLY_ALPHA_WEBGL: u32 =
+ web_sys::WebGl2RenderingContext::UNPACK_PREMULTIPLY_ALPHA_WEBGL;
+
+ unsafe {
+ if src.flip_y {
+ gl.pixel_store_bool(UNPACK_FLIP_Y_WEBGL, true);
+ }
+ if dst_premultiplication {
+ gl.pixel_store_bool(UNPACK_PREMULTIPLY_ALPHA_WEBGL, true);
+ }
+ }
+
+ unsafe { gl.bind_texture(dst_target, Some(dst)) };
+ let format_desc = self.shared.describe_texture_format(dst_format);
+ if is_layered_target(dst_target) {
+ let z_offset =
+ if let glow::TEXTURE_2D_ARRAY | glow::TEXTURE_CUBE_MAP_ARRAY = dst_target {
+ copy.dst_base.array_layer as i32
+ } else {
+ copy.dst_base.origin.z as i32
+ };
+
+ match src.source {
+ wgt::ExternalImageSource::ImageBitmap(ref b) => unsafe {
+ gl.tex_sub_image_3d_with_image_bitmap(
+ dst_target,
+ copy.dst_base.mip_level as i32,
+ copy.dst_base.origin.x as i32,
+ copy.dst_base.origin.y as i32,
+ z_offset,
+ copy.size.width as i32,
+ copy.size.height as i32,
+ copy.size.depth as i32,
+ format_desc.external,
+ format_desc.data_type,
+ b,
+ );
+ },
+ wgt::ExternalImageSource::HTMLVideoElement(ref v) => unsafe {
+ gl.tex_sub_image_3d_with_html_video_element(
+ dst_target,
+ copy.dst_base.mip_level as i32,
+ copy.dst_base.origin.x as i32,
+ copy.dst_base.origin.y as i32,
+ z_offset,
+ copy.size.width as i32,
+ copy.size.height as i32,
+ copy.size.depth as i32,
+ format_desc.external,
+ format_desc.data_type,
+ v,
+ );
+ },
+ wgt::ExternalImageSource::HTMLCanvasElement(ref c) => unsafe {
+ gl.tex_sub_image_3d_with_html_canvas_element(
+ dst_target,
+ copy.dst_base.mip_level as i32,
+ copy.dst_base.origin.x as i32,
+ copy.dst_base.origin.y as i32,
+ z_offset,
+ copy.size.width as i32,
+ copy.size.height as i32,
+ copy.size.depth as i32,
+ format_desc.external,
+ format_desc.data_type,
+ c,
+ );
+ },
+ wgt::ExternalImageSource::OffscreenCanvas(_) => unreachable!(),
+ }
+ } else {
+ let dst_target = if let glow::TEXTURE_CUBE_MAP = dst_target {
+ CUBEMAP_FACES[copy.dst_base.array_layer as usize]
+ } else {
+ dst_target
+ };
+
+ match src.source {
+ wgt::ExternalImageSource::ImageBitmap(ref b) => unsafe {
+ gl.tex_sub_image_2d_with_image_bitmap_and_width_and_height(
+ dst_target,
+ copy.dst_base.mip_level as i32,
+ copy.dst_base.origin.x as i32,
+ copy.dst_base.origin.y as i32,
+ copy.size.width as i32,
+ copy.size.height as i32,
+ format_desc.external,
+ format_desc.data_type,
+ b,
+ );
+ },
+ wgt::ExternalImageSource::HTMLVideoElement(ref v) => unsafe {
+ gl.tex_sub_image_2d_with_html_video_and_width_and_height(
+ dst_target,
+ copy.dst_base.mip_level as i32,
+ copy.dst_base.origin.x as i32,
+ copy.dst_base.origin.y as i32,
+ copy.size.width as i32,
+ copy.size.height as i32,
+ format_desc.external,
+ format_desc.data_type,
+ v,
+ )
+ },
+ wgt::ExternalImageSource::HTMLCanvasElement(ref c) => unsafe {
+ gl.tex_sub_image_2d_with_html_canvas_and_width_and_height(
+ dst_target,
+ copy.dst_base.mip_level as i32,
+ copy.dst_base.origin.x as i32,
+ copy.dst_base.origin.y as i32,
+ copy.size.width as i32,
+ copy.size.height as i32,
+ format_desc.external,
+ format_desc.data_type,
+ c,
+ )
+ },
+ wgt::ExternalImageSource::OffscreenCanvas(_) => unreachable!(),
+ }
+ }
+
+ unsafe {
+ if src.flip_y {
+ gl.pixel_store_bool(UNPACK_FLIP_Y_WEBGL, false);
+ }
+ if dst_premultiplication {
+ gl.pixel_store_bool(UNPACK_PREMULTIPLY_ALPHA_WEBGL, false);
+ }
+ }
+ }
+ C::CopyTextureToTexture {
+ src,
+ src_target,
+ dst,
+ dst_target,
+ dst_is_cubemap,
+ ref copy,
+ } => {
+ //TODO: handle 3D copies
+ unsafe { gl.bind_framebuffer(glow::READ_FRAMEBUFFER, Some(self.copy_fbo)) };
+ if is_layered_target(src_target) {
+ //TODO: handle GLES without framebuffer_texture_3d
+ unsafe {
+ gl.framebuffer_texture_layer(
+ glow::READ_FRAMEBUFFER,
+ glow::COLOR_ATTACHMENT0,
+ Some(src),
+ copy.src_base.mip_level as i32,
+ copy.src_base.array_layer as i32,
+ )
+ };
+ } else {
+ unsafe {
+ gl.framebuffer_texture_2d(
+ glow::READ_FRAMEBUFFER,
+ glow::COLOR_ATTACHMENT0,
+ src_target,
+ Some(src),
+ copy.src_base.mip_level as i32,
+ )
+ };
+ }
+
+ unsafe { gl.bind_texture(dst_target, Some(dst)) };
+ if dst_is_cubemap {
+ unsafe {
+ gl.copy_tex_sub_image_2d(
+ CUBEMAP_FACES[copy.dst_base.array_layer as usize],
+ copy.dst_base.mip_level as i32,
+ copy.dst_base.origin.x as i32,
+ copy.dst_base.origin.y as i32,
+ copy.src_base.origin.x as i32,
+ copy.src_base.origin.y as i32,
+ copy.size.width as i32,
+ copy.size.height as i32,
+ )
+ };
+ } else if is_layered_target(dst_target) {
+ unsafe {
+ gl.copy_tex_sub_image_3d(
+ dst_target,
+ copy.dst_base.mip_level as i32,
+ copy.dst_base.origin.x as i32,
+ copy.dst_base.origin.y as i32,
+ if let glow::TEXTURE_2D_ARRAY | glow::TEXTURE_CUBE_MAP_ARRAY =
+ dst_target
+ {
+ copy.dst_base.array_layer as i32
+ } else {
+ copy.dst_base.origin.z as i32
+ },
+ copy.src_base.origin.x as i32,
+ copy.src_base.origin.y as i32,
+ copy.size.width as i32,
+ copy.size.height as i32,
+ )
+ };
+ } else {
+ unsafe {
+ gl.copy_tex_sub_image_2d(
+ dst_target,
+ copy.dst_base.mip_level as i32,
+ copy.dst_base.origin.x as i32,
+ copy.dst_base.origin.y as i32,
+ copy.src_base.origin.x as i32,
+ copy.src_base.origin.y as i32,
+ copy.size.width as i32,
+ copy.size.height as i32,
+ )
+ };
+ }
+ }
+ C::CopyBufferToTexture {
+ ref src,
+ src_target: _,
+ dst,
+ dst_target,
+ dst_format,
+ ref copy,
+ } => {
+ let (block_width, block_height) = dst_format.block_dimensions();
+ let block_size = dst_format.block_size(None).unwrap();
+ let format_desc = self.shared.describe_texture_format(dst_format);
+ let row_texels = copy
+ .buffer_layout
+ .bytes_per_row
+ .map_or(0, |bpr| block_width * bpr / block_size);
+ let column_texels = copy
+ .buffer_layout
+ .rows_per_image
+ .map_or(0, |rpi| block_height * rpi);
+
+ unsafe { gl.bind_texture(dst_target, Some(dst)) };
+ unsafe { gl.pixel_store_i32(glow::UNPACK_ROW_LENGTH, row_texels as i32) };
+ unsafe { gl.pixel_store_i32(glow::UNPACK_IMAGE_HEIGHT, column_texels as i32) };
+ let mut unbind_unpack_buffer = false;
+ if !dst_format.is_compressed() {
+ let buffer_data;
+ let unpack_data = match src.raw {
+ Some(buffer) => {
+ unsafe { gl.bind_buffer(glow::PIXEL_UNPACK_BUFFER, Some(buffer)) };
+ unbind_unpack_buffer = true;
+ glow::PixelUnpackData::BufferOffset(copy.buffer_layout.offset as u32)
+ }
+ None => {
+ buffer_data = src.data.as_ref().unwrap().lock().unwrap();
+ let src_data =
+ &buffer_data.as_slice()[copy.buffer_layout.offset as usize..];
+ glow::PixelUnpackData::Slice(src_data)
+ }
+ };
+ if is_layered_target(dst_target) {
+ unsafe {
+ gl.tex_sub_image_3d(
+ dst_target,
+ copy.texture_base.mip_level as i32,
+ copy.texture_base.origin.x as i32,
+ copy.texture_base.origin.y as i32,
+ if let glow::TEXTURE_2D_ARRAY | glow::TEXTURE_CUBE_MAP_ARRAY =
+ dst_target
+ {
+ copy.texture_base.array_layer as i32
+ } else {
+ copy.texture_base.origin.z as i32
+ },
+ copy.size.width as i32,
+ copy.size.height as i32,
+ copy.size.depth as i32,
+ format_desc.external,
+ format_desc.data_type,
+ unpack_data,
+ )
+ };
+ } else {
+ unsafe {
+ gl.tex_sub_image_2d(
+ if let glow::TEXTURE_CUBE_MAP = dst_target {
+ CUBEMAP_FACES[copy.texture_base.array_layer as usize]
+ } else {
+ dst_target
+ },
+ copy.texture_base.mip_level as i32,
+ copy.texture_base.origin.x as i32,
+ copy.texture_base.origin.y as i32,
+ copy.size.width as i32,
+ copy.size.height as i32,
+ format_desc.external,
+ format_desc.data_type,
+ unpack_data,
+ )
+ };
+ }
+ } else {
+ let bytes_per_row = copy
+ .buffer_layout
+ .bytes_per_row
+ .unwrap_or(copy.size.width * block_size);
+ let minimum_rows_per_image =
+ (copy.size.height + block_height - 1) / block_height;
+ let rows_per_image = copy
+ .buffer_layout
+ .rows_per_image
+ .unwrap_or(minimum_rows_per_image);
+
+ let bytes_per_image = bytes_per_row * rows_per_image;
+ let minimum_bytes_per_image = bytes_per_row * minimum_rows_per_image;
+ let bytes_in_upload =
+ (bytes_per_image * (copy.size.depth - 1)) + minimum_bytes_per_image;
+ let offset = copy.buffer_layout.offset as u32;
+
+ let buffer_data;
+ let unpack_data = match src.raw {
+ Some(buffer) => {
+ unsafe { gl.bind_buffer(glow::PIXEL_UNPACK_BUFFER, Some(buffer)) };
+ unbind_unpack_buffer = true;
+ glow::CompressedPixelUnpackData::BufferRange(
+ offset..offset + bytes_in_upload,
+ )
+ }
+ None => {
+ buffer_data = src.data.as_ref().unwrap().lock().unwrap();
+ let src_data = &buffer_data.as_slice()
+ [(offset as usize)..(offset + bytes_in_upload) as usize];
+ glow::CompressedPixelUnpackData::Slice(src_data)
+ }
+ };
+
+ if is_layered_target(dst_target) {
+ unsafe {
+ gl.compressed_tex_sub_image_3d(
+ dst_target,
+ copy.texture_base.mip_level as i32,
+ copy.texture_base.origin.x as i32,
+ copy.texture_base.origin.y as i32,
+ if let glow::TEXTURE_2D_ARRAY | glow::TEXTURE_CUBE_MAP_ARRAY =
+ dst_target
+ {
+ copy.texture_base.array_layer as i32
+ } else {
+ copy.texture_base.origin.z as i32
+ },
+ copy.size.width as i32,
+ copy.size.height as i32,
+ copy.size.depth as i32,
+ format_desc.internal,
+ unpack_data,
+ )
+ };
+ } else {
+ unsafe {
+ gl.compressed_tex_sub_image_2d(
+ if let glow::TEXTURE_CUBE_MAP = dst_target {
+ CUBEMAP_FACES[copy.texture_base.array_layer as usize]
+ } else {
+ dst_target
+ },
+ copy.texture_base.mip_level as i32,
+ copy.texture_base.origin.x as i32,
+ copy.texture_base.origin.y as i32,
+ copy.size.width as i32,
+ copy.size.height as i32,
+ format_desc.internal,
+ unpack_data,
+ )
+ };
+ }
+ }
+ if unbind_unpack_buffer {
+ unsafe { gl.bind_buffer(glow::PIXEL_UNPACK_BUFFER, None) };
+ }
+ }
+ C::CopyTextureToBuffer {
+ src,
+ src_target,
+ src_format,
+ ref dst,
+ dst_target: _,
+ ref copy,
+ } => {
+ let block_size = src_format.block_size(None).unwrap();
+ if src_format.is_compressed() {
+ log::error!("Not implemented yet: compressed texture copy to buffer");
+ return;
+ }
+ if src_target == glow::TEXTURE_CUBE_MAP
+ || src_target == glow::TEXTURE_CUBE_MAP_ARRAY
+ {
+ log::error!("Not implemented yet: cubemap texture copy to buffer");
+ return;
+ }
+ let format_desc = self.shared.describe_texture_format(src_format);
+ let row_texels = copy
+ .buffer_layout
+ .bytes_per_row
+ .map_or(copy.size.width, |bpr| bpr / block_size);
+ let column_texels = copy
+ .buffer_layout
+ .rows_per_image
+ .unwrap_or(copy.size.height);
+
+ unsafe { gl.bind_framebuffer(glow::READ_FRAMEBUFFER, Some(self.copy_fbo)) };
+
+ let read_pixels = |offset| {
+ let mut buffer_data;
+ let unpack_data = match dst.raw {
+ Some(buffer) => {
+ unsafe { gl.pixel_store_i32(glow::PACK_ROW_LENGTH, row_texels as i32) };
+ unsafe { gl.bind_buffer(glow::PIXEL_PACK_BUFFER, Some(buffer)) };
+ glow::PixelPackData::BufferOffset(offset as u32)
+ }
+ None => {
+ buffer_data = dst.data.as_ref().unwrap().lock().unwrap();
+ let dst_data = &mut buffer_data.as_mut_slice()[offset as usize..];
+ glow::PixelPackData::Slice(dst_data)
+ }
+ };
+ unsafe {
+ gl.read_pixels(
+ copy.texture_base.origin.x as i32,
+ copy.texture_base.origin.y as i32,
+ copy.size.width as i32,
+ copy.size.height as i32,
+ format_desc.external,
+ format_desc.data_type,
+ unpack_data,
+ )
+ };
+ };
+
+ match src_target {
+ glow::TEXTURE_2D => {
+ unsafe {
+ gl.framebuffer_texture_2d(
+ glow::READ_FRAMEBUFFER,
+ glow::COLOR_ATTACHMENT0,
+ src_target,
+ Some(src),
+ copy.texture_base.mip_level as i32,
+ )
+ };
+ read_pixels(copy.buffer_layout.offset);
+ }
+ glow::TEXTURE_2D_ARRAY => {
+ unsafe {
+ gl.framebuffer_texture_layer(
+ glow::READ_FRAMEBUFFER,
+ glow::COLOR_ATTACHMENT0,
+ Some(src),
+ copy.texture_base.mip_level as i32,
+ copy.texture_base.array_layer as i32,
+ )
+ };
+ read_pixels(copy.buffer_layout.offset);
+ }
+ glow::TEXTURE_3D => {
+ for z in copy.texture_base.origin.z..copy.size.depth {
+ unsafe {
+ gl.framebuffer_texture_layer(
+ glow::READ_FRAMEBUFFER,
+ glow::COLOR_ATTACHMENT0,
+ Some(src),
+ copy.texture_base.mip_level as i32,
+ z as i32,
+ )
+ };
+ let offset = copy.buffer_layout.offset
+ + (z * block_size * row_texels * column_texels) as u64;
+ read_pixels(offset);
+ }
+ }
+ glow::TEXTURE_CUBE_MAP | glow::TEXTURE_CUBE_MAP_ARRAY => unimplemented!(),
+ _ => unreachable!(),
+ }
+ }
+ C::SetIndexBuffer(buffer) => {
+ unsafe { gl.bind_buffer(glow::ELEMENT_ARRAY_BUFFER, Some(buffer)) };
+ self.current_index_buffer = Some(buffer);
+ }
+ C::BeginQuery(query, target) => {
+ unsafe { gl.begin_query(target, query) };
+ }
+ C::EndQuery(target) => {
+ unsafe { gl.end_query(target) };
+ }
+ C::CopyQueryResults {
+ ref query_range,
+ ref dst,
+ dst_target,
+ dst_offset,
+ } => {
+ self.temp_query_results.clear();
+ for &query in queries[query_range.start as usize..query_range.end as usize].iter() {
+ let result = unsafe { gl.get_query_parameter_u32(query, glow::QUERY_RESULT) };
+ self.temp_query_results.push(result as u64);
+ }
+ let query_data = unsafe {
+ slice::from_raw_parts(
+ self.temp_query_results.as_ptr() as *const u8,
+ self.temp_query_results.len() * mem::size_of::<u64>(),
+ )
+ };
+ match dst.raw {
+ Some(buffer) => {
+ unsafe { gl.bind_buffer(dst_target, Some(buffer)) };
+ unsafe {
+ gl.buffer_sub_data_u8_slice(dst_target, dst_offset as i32, query_data)
+ };
+ }
+ None => {
+ let data = &mut dst.data.as_ref().unwrap().lock().unwrap();
+ let len = query_data.len().min(data.len());
+ data[..len].copy_from_slice(&query_data[..len]);
+ }
+ }
+ }
+ C::ResetFramebuffer { is_default } => {
+ if is_default {
+ unsafe { gl.bind_framebuffer(glow::DRAW_FRAMEBUFFER, None) };
+ } else {
+ unsafe { gl.bind_framebuffer(glow::DRAW_FRAMEBUFFER, Some(self.draw_fbo)) };
+ unsafe {
+ gl.framebuffer_texture_2d(
+ glow::DRAW_FRAMEBUFFER,
+ glow::DEPTH_STENCIL_ATTACHMENT,
+ glow::TEXTURE_2D,
+ None,
+ 0,
+ )
+ };
+ for i in 0..crate::MAX_COLOR_ATTACHMENTS {
+ let target = glow::COLOR_ATTACHMENT0 + i as u32;
+ unsafe {
+ gl.framebuffer_texture_2d(
+ glow::DRAW_FRAMEBUFFER,
+ target,
+ glow::TEXTURE_2D,
+ None,
+ 0,
+ )
+ };
+ }
+ }
+ unsafe { gl.color_mask(true, true, true, true) };
+ unsafe { gl.depth_mask(true) };
+ unsafe { gl.stencil_mask(!0) };
+ unsafe { gl.disable(glow::DEPTH_TEST) };
+ unsafe { gl.disable(glow::STENCIL_TEST) };
+ unsafe { gl.disable(glow::SCISSOR_TEST) };
+ }
+ C::BindAttachment {
+ attachment,
+ ref view,
+ } => {
+ unsafe { self.set_attachment(gl, glow::DRAW_FRAMEBUFFER, attachment, view) };
+ }
+ C::ResolveAttachment {
+ attachment,
+ ref dst,
+ ref size,
+ } => {
+ unsafe { gl.bind_framebuffer(glow::READ_FRAMEBUFFER, Some(self.draw_fbo)) };
+ unsafe { gl.read_buffer(attachment) };
+ unsafe { gl.bind_framebuffer(glow::DRAW_FRAMEBUFFER, Some(self.copy_fbo)) };
+ unsafe {
+ self.set_attachment(gl, glow::DRAW_FRAMEBUFFER, glow::COLOR_ATTACHMENT0, dst)
+ };
+ unsafe {
+ gl.blit_framebuffer(
+ 0,
+ 0,
+ size.width as i32,
+ size.height as i32,
+ 0,
+ 0,
+ size.width as i32,
+ size.height as i32,
+ glow::COLOR_BUFFER_BIT,
+ glow::NEAREST,
+ )
+ };
+ unsafe { gl.bind_framebuffer(glow::READ_FRAMEBUFFER, None) };
+ unsafe { gl.bind_framebuffer(glow::DRAW_FRAMEBUFFER, Some(self.draw_fbo)) };
+ }
+ C::InvalidateAttachments(ref list) => {
+ unsafe { gl.invalidate_framebuffer(glow::DRAW_FRAMEBUFFER, list) };
+ }
+ C::SetDrawColorBuffers(count) => {
+ self.draw_buffer_count = count;
+ let indices = (0..count as u32)
+ .map(|i| glow::COLOR_ATTACHMENT0 + i)
+ .collect::<ArrayVec<_, { crate::MAX_COLOR_ATTACHMENTS }>>();
+ unsafe { gl.draw_buffers(&indices) };
+
+ if self
+ .shared
+ .private_caps
+ .contains(super::PrivateCapabilities::CAN_DISABLE_DRAW_BUFFER)
+ {
+ for draw_buffer in 0..count as u32 {
+ unsafe { gl.disable_draw_buffer(glow::BLEND, draw_buffer) };
+ }
+ }
+ }
+ C::ClearColorF {
+ draw_buffer,
+ ref color,
+ is_srgb,
+ } => {
+ if self
+ .shared
+ .workarounds
+ .contains(super::Workarounds::MESA_I915_SRGB_SHADER_CLEAR)
+ && is_srgb
+ {
+ unsafe { self.perform_shader_clear(gl, draw_buffer, *color) };
+ } else {
+ unsafe { gl.clear_buffer_f32_slice(glow::COLOR, draw_buffer, color) };
+ }
+ }
+ C::ClearColorU(draw_buffer, ref color) => {
+ unsafe { gl.clear_buffer_u32_slice(glow::COLOR, draw_buffer, color) };
+ }
+ C::ClearColorI(draw_buffer, ref color) => {
+ unsafe { gl.clear_buffer_i32_slice(glow::COLOR, draw_buffer, color) };
+ }
+ C::ClearDepth(depth) => {
+ unsafe { gl.clear_buffer_f32_slice(glow::DEPTH, 0, &[depth]) };
+ }
+ C::ClearStencil(value) => {
+ unsafe { gl.clear_buffer_i32_slice(glow::STENCIL, 0, &[value as i32]) };
+ }
+ C::ClearDepthAndStencil(depth, stencil_value) => {
+ unsafe {
+ gl.clear_buffer_depth_stencil(
+ glow::DEPTH_STENCIL,
+ 0,
+ depth,
+ stencil_value as i32,
+ )
+ };
+ }
+ C::BufferBarrier(raw, usage) => {
+ let mut flags = 0;
+ if usage.contains(crate::BufferUses::VERTEX) {
+ flags |= glow::VERTEX_ATTRIB_ARRAY_BARRIER_BIT;
+ unsafe { gl.bind_buffer(glow::ARRAY_BUFFER, Some(raw)) };
+ unsafe { gl.vertex_attrib_pointer_f32(0, 1, glow::BYTE, true, 0, 0) };
+ }
+ if usage.contains(crate::BufferUses::INDEX) {
+ flags |= glow::ELEMENT_ARRAY_BARRIER_BIT;
+ unsafe { gl.bind_buffer(glow::ELEMENT_ARRAY_BUFFER, Some(raw)) };
+ }
+ if usage.contains(crate::BufferUses::UNIFORM) {
+ flags |= glow::UNIFORM_BARRIER_BIT;
+ }
+ if usage.contains(crate::BufferUses::INDIRECT) {
+ flags |= glow::COMMAND_BARRIER_BIT;
+ unsafe { gl.bind_buffer(glow::DRAW_INDIRECT_BUFFER, Some(raw)) };
+ }
+ if usage.contains(crate::BufferUses::COPY_SRC) {
+ flags |= glow::PIXEL_BUFFER_BARRIER_BIT;
+ unsafe { gl.bind_buffer(glow::PIXEL_UNPACK_BUFFER, Some(raw)) };
+ }
+ if usage.contains(crate::BufferUses::COPY_DST) {
+ flags |= glow::PIXEL_BUFFER_BARRIER_BIT;
+ unsafe { gl.bind_buffer(glow::PIXEL_PACK_BUFFER, Some(raw)) };
+ }
+ if usage.intersects(crate::BufferUses::MAP_READ | crate::BufferUses::MAP_WRITE) {
+ flags |= glow::BUFFER_UPDATE_BARRIER_BIT;
+ }
+ if usage.intersects(
+ crate::BufferUses::STORAGE_READ | crate::BufferUses::STORAGE_READ_WRITE,
+ ) {
+ flags |= glow::SHADER_STORAGE_BARRIER_BIT;
+ }
+ unsafe { gl.memory_barrier(flags) };
+ }
+ C::TextureBarrier(usage) => {
+ let mut flags = 0;
+ if usage.contains(crate::TextureUses::RESOURCE) {
+ flags |= glow::TEXTURE_FETCH_BARRIER_BIT;
+ }
+ if usage.intersects(
+ crate::TextureUses::STORAGE_READ | crate::TextureUses::STORAGE_READ_WRITE,
+ ) {
+ flags |= glow::SHADER_IMAGE_ACCESS_BARRIER_BIT;
+ }
+ if usage.contains(crate::TextureUses::COPY_DST) {
+ flags |= glow::TEXTURE_UPDATE_BARRIER_BIT;
+ }
+ if usage.intersects(
+ crate::TextureUses::COLOR_TARGET
+ | crate::TextureUses::DEPTH_STENCIL_READ
+ | crate::TextureUses::DEPTH_STENCIL_WRITE,
+ ) {
+ flags |= glow::FRAMEBUFFER_BARRIER_BIT;
+ }
+ unsafe { gl.memory_barrier(flags) };
+ }
+ C::SetViewport {
+ ref rect,
+ ref depth,
+ } => {
+ unsafe { gl.viewport(rect.x, rect.y, rect.w, rect.h) };
+ unsafe { gl.depth_range_f32(depth.start, depth.end) };
+ }
+ C::SetScissor(ref rect) => {
+ unsafe { gl.scissor(rect.x, rect.y, rect.w, rect.h) };
+ unsafe { gl.enable(glow::SCISSOR_TEST) };
+ }
+ C::SetStencilFunc {
+ face,
+ function,
+ reference,
+ read_mask,
+ } => {
+ unsafe { gl.stencil_func_separate(face, function, reference as i32, read_mask) };
+ }
+ C::SetStencilOps {
+ face,
+ write_mask,
+ ref ops,
+ } => {
+ unsafe { gl.stencil_mask_separate(face, write_mask) };
+ unsafe { gl.stencil_op_separate(face, ops.fail, ops.depth_fail, ops.pass) };
+ }
+ C::SetVertexAttribute {
+ buffer,
+ ref buffer_desc,
+ attribute_desc: ref vat,
+ } => {
+ unsafe { gl.bind_buffer(glow::ARRAY_BUFFER, buffer) };
+ unsafe { gl.enable_vertex_attrib_array(vat.location) };
+
+ if buffer.is_none() {
+ match vat.format_desc.attrib_kind {
+ super::VertexAttribKind::Float => unsafe {
+ gl.vertex_attrib_format_f32(
+ vat.location,
+ vat.format_desc.element_count,
+ vat.format_desc.element_format,
+ true, // always normalized
+ vat.offset,
+ )
+ },
+ super::VertexAttribKind::Integer => unsafe {
+ gl.vertex_attrib_format_i32(
+ vat.location,
+ vat.format_desc.element_count,
+ vat.format_desc.element_format,
+ vat.offset,
+ )
+ },
+ }
+
+ //Note: there is apparently a bug on AMD 3500U:
+ // this call is ignored if the current array is disabled.
+ unsafe { gl.vertex_attrib_binding(vat.location, vat.buffer_index) };
+ } else {
+ match vat.format_desc.attrib_kind {
+ super::VertexAttribKind::Float => unsafe {
+ gl.vertex_attrib_pointer_f32(
+ vat.location,
+ vat.format_desc.element_count,
+ vat.format_desc.element_format,
+ true, // always normalized
+ buffer_desc.stride as i32,
+ vat.offset as i32,
+ )
+ },
+ super::VertexAttribKind::Integer => unsafe {
+ gl.vertex_attrib_pointer_i32(
+ vat.location,
+ vat.format_desc.element_count,
+ vat.format_desc.element_format,
+ buffer_desc.stride as i32,
+ vat.offset as i32,
+ )
+ },
+ }
+ unsafe { gl.vertex_attrib_divisor(vat.location, buffer_desc.step as u32) };
+ }
+ }
+ C::UnsetVertexAttribute(location) => {
+ unsafe { gl.disable_vertex_attrib_array(location) };
+ }
+ C::SetVertexBuffer {
+ index,
+ ref buffer,
+ ref buffer_desc,
+ } => {
+ unsafe { gl.vertex_binding_divisor(index, buffer_desc.step as u32) };
+ unsafe {
+ gl.bind_vertex_buffer(
+ index,
+ Some(buffer.raw),
+ buffer.offset as i32,
+ buffer_desc.stride as i32,
+ )
+ };
+ }
+ C::SetDepth(ref depth) => {
+ unsafe { gl.depth_func(depth.function) };
+ unsafe { gl.depth_mask(depth.mask) };
+ }
+ C::SetDepthBias(bias) => {
+ if bias.is_enabled() {
+ unsafe { gl.enable(glow::POLYGON_OFFSET_FILL) };
+ unsafe { gl.polygon_offset(bias.constant as f32, bias.slope_scale) };
+ } else {
+ unsafe { gl.disable(glow::POLYGON_OFFSET_FILL) };
+ }
+ }
+ C::ConfigureDepthStencil(aspects) => {
+ if aspects.contains(crate::FormatAspects::DEPTH) {
+ unsafe { gl.enable(glow::DEPTH_TEST) };
+ } else {
+ unsafe { gl.disable(glow::DEPTH_TEST) };
+ }
+ if aspects.contains(crate::FormatAspects::STENCIL) {
+ unsafe { gl.enable(glow::STENCIL_TEST) };
+ } else {
+ unsafe { gl.disable(glow::STENCIL_TEST) };
+ }
+ }
+ C::SetAlphaToCoverage(enabled) => {
+ if enabled {
+ unsafe { gl.enable(glow::SAMPLE_ALPHA_TO_COVERAGE) };
+ } else {
+ unsafe { gl.disable(glow::SAMPLE_ALPHA_TO_COVERAGE) };
+ }
+ }
+ C::SetProgram(program) => {
+ unsafe { gl.use_program(Some(program)) };
+ }
+ C::SetPrimitive(ref state) => {
+ unsafe { gl.front_face(state.front_face) };
+ if state.cull_face != 0 {
+ unsafe { gl.enable(glow::CULL_FACE) };
+ unsafe { gl.cull_face(state.cull_face) };
+ } else {
+ unsafe { gl.disable(glow::CULL_FACE) };
+ }
+ if self.features.contains(wgt::Features::DEPTH_CLIP_CONTROL) {
+ //Note: this is a bit tricky, since we are controlling the clip, not the clamp.
+ if state.unclipped_depth {
+ unsafe { gl.enable(glow::DEPTH_CLAMP) };
+ } else {
+ unsafe { gl.disable(glow::DEPTH_CLAMP) };
+ }
+ }
+ }
+ C::SetBlendConstant(c) => {
+ unsafe { gl.blend_color(c[0], c[1], c[2], c[3]) };
+ }
+ C::SetColorTarget {
+ draw_buffer_index,
+ desc: super::ColorTargetDesc { mask, ref blend },
+ } => {
+ use wgt::ColorWrites as Cw;
+ if let Some(index) = draw_buffer_index {
+ unsafe {
+ gl.color_mask_draw_buffer(
+ index,
+ mask.contains(Cw::RED),
+ mask.contains(Cw::GREEN),
+ mask.contains(Cw::BLUE),
+ mask.contains(Cw::ALPHA),
+ )
+ };
+ if let Some(ref blend) = *blend {
+ unsafe { gl.enable_draw_buffer(index, glow::BLEND) };
+ if blend.color != blend.alpha {
+ unsafe {
+ gl.blend_equation_separate_draw_buffer(
+ index,
+ blend.color.equation,
+ blend.alpha.equation,
+ )
+ };
+ unsafe {
+ gl.blend_func_separate_draw_buffer(
+ index,
+ blend.color.src,
+ blend.color.dst,
+ blend.alpha.src,
+ blend.alpha.dst,
+ )
+ };
+ } else {
+ unsafe { gl.blend_equation_draw_buffer(index, blend.color.equation) };
+ unsafe {
+ gl.blend_func_draw_buffer(index, blend.color.src, blend.color.dst)
+ };
+ }
+ } else if self
+ .shared
+ .private_caps
+ .contains(super::PrivateCapabilities::CAN_DISABLE_DRAW_BUFFER)
+ {
+ unsafe { gl.disable_draw_buffer(index, glow::BLEND) };
+ }
+ } else {
+ unsafe {
+ gl.color_mask(
+ mask.contains(Cw::RED),
+ mask.contains(Cw::GREEN),
+ mask.contains(Cw::BLUE),
+ mask.contains(Cw::ALPHA),
+ )
+ };
+ if let Some(ref blend) = *blend {
+ unsafe { gl.enable(glow::BLEND) };
+ if blend.color != blend.alpha {
+ unsafe {
+ gl.blend_equation_separate(
+ blend.color.equation,
+ blend.alpha.equation,
+ )
+ };
+ unsafe {
+ gl.blend_func_separate(
+ blend.color.src,
+ blend.color.dst,
+ blend.alpha.src,
+ blend.alpha.dst,
+ )
+ };
+ } else {
+ unsafe { gl.blend_equation(blend.color.equation) };
+ unsafe { gl.blend_func(blend.color.src, blend.color.dst) };
+ }
+ } else {
+ unsafe { gl.disable(glow::BLEND) };
+ }
+ }
+ }
+ C::BindBuffer {
+ target,
+ slot,
+ buffer,
+ offset,
+ size,
+ } => {
+ unsafe { gl.bind_buffer_range(target, slot, Some(buffer), offset, size) };
+ }
+ C::BindSampler(texture_index, sampler) => {
+ unsafe { gl.bind_sampler(texture_index, sampler) };
+ }
+ C::BindTexture {
+ slot,
+ texture,
+ target,
+ aspects,
+ } => {
+ unsafe { gl.active_texture(glow::TEXTURE0 + slot) };
+ unsafe { gl.bind_texture(target, Some(texture)) };
+
+ let version = gl.version();
+ let is_min_es_3_1 = version.is_embedded && (version.major, version.minor) >= (3, 1);
+ let is_min_4_3 = !version.is_embedded && (version.major, version.minor) >= (4, 3);
+ if is_min_es_3_1 || is_min_4_3 {
+ let mode = match aspects {
+ crate::FormatAspects::DEPTH => Some(glow::DEPTH_COMPONENT),
+ crate::FormatAspects::STENCIL => Some(glow::STENCIL_INDEX),
+ _ => None,
+ };
+ if let Some(mode) = mode {
+ unsafe {
+ gl.tex_parameter_i32(
+ target,
+ glow::DEPTH_STENCIL_TEXTURE_MODE,
+ mode as _,
+ )
+ };
+ }
+ }
+ }
+ C::BindImage { slot, ref binding } => {
+ unsafe {
+ gl.bind_image_texture(
+ slot,
+ binding.raw,
+ binding.mip_level as i32,
+ binding.array_layer.is_none(),
+ binding.array_layer.unwrap_or_default() as i32,
+ binding.access,
+ binding.format,
+ )
+ };
+ }
+ #[cfg(not(target_arch = "wasm32"))]
+ C::InsertDebugMarker(ref range) => {
+ let marker = extract_marker(data_bytes, range);
+ unsafe {
+ gl.debug_message_insert(
+ glow::DEBUG_SOURCE_APPLICATION,
+ glow::DEBUG_TYPE_MARKER,
+ DEBUG_ID,
+ glow::DEBUG_SEVERITY_NOTIFICATION,
+ marker,
+ )
+ };
+ }
+ #[cfg(target_arch = "wasm32")]
+ C::InsertDebugMarker(_) => (),
+ #[cfg_attr(target_arch = "wasm32", allow(unused))]
+ C::PushDebugGroup(ref range) => {
+ #[cfg(not(target_arch = "wasm32"))]
+ let marker = extract_marker(data_bytes, range);
+ #[cfg(not(target_arch = "wasm32"))]
+ unsafe {
+ gl.push_debug_group(glow::DEBUG_SOURCE_APPLICATION, DEBUG_ID, marker)
+ };
+ }
+ C::PopDebugGroup => {
+ #[cfg(not(target_arch = "wasm32"))]
+ unsafe {
+ gl.pop_debug_group()
+ };
+ }
+ C::SetPushConstants {
+ ref uniform,
+ offset,
+ } => {
+ fn get_data<T>(data: &[u8], offset: u32) -> &[T] {
+ let raw = &data[(offset as usize)..];
+ unsafe {
+ slice::from_raw_parts(
+ raw.as_ptr() as *const _,
+ raw.len() / mem::size_of::<T>(),
+ )
+ }
+ }
+
+ let location = uniform.location.as_ref();
+
+ match uniform.utype {
+ glow::FLOAT => {
+ let data = get_data::<f32>(data_bytes, offset)[0];
+ unsafe { gl.uniform_1_f32(location, data) };
+ }
+ glow::FLOAT_VEC2 => {
+ let data = get_data::<[f32; 2]>(data_bytes, offset)[0];
+ unsafe { gl.uniform_2_f32_slice(location, &data) };
+ }
+ glow::FLOAT_VEC3 => {
+ let data = get_data::<[f32; 3]>(data_bytes, offset)[0];
+ unsafe { gl.uniform_3_f32_slice(location, &data) };
+ }
+ glow::FLOAT_VEC4 => {
+ let data = get_data::<[f32; 4]>(data_bytes, offset)[0];
+ unsafe { gl.uniform_4_f32_slice(location, &data) };
+ }
+ glow::INT => {
+ let data = get_data::<i32>(data_bytes, offset)[0];
+ unsafe { gl.uniform_1_i32(location, data) };
+ }
+ glow::INT_VEC2 => {
+ let data = get_data::<[i32; 2]>(data_bytes, offset)[0];
+ unsafe { gl.uniform_2_i32_slice(location, &data) };
+ }
+ glow::INT_VEC3 => {
+ let data = get_data::<[i32; 3]>(data_bytes, offset)[0];
+ unsafe { gl.uniform_3_i32_slice(location, &data) };
+ }
+ glow::INT_VEC4 => {
+ let data = get_data::<[i32; 4]>(data_bytes, offset)[0];
+ unsafe { gl.uniform_4_i32_slice(location, &data) };
+ }
+ glow::FLOAT_MAT2 => {
+ let data = get_data::<[f32; 4]>(data_bytes, offset)[0];
+ unsafe { gl.uniform_matrix_2_f32_slice(location, false, &data) };
+ }
+ glow::FLOAT_MAT3 => {
+ let data = get_data::<[f32; 9]>(data_bytes, offset)[0];
+ unsafe { gl.uniform_matrix_3_f32_slice(location, false, &data) };
+ }
+ glow::FLOAT_MAT4 => {
+ let data = get_data::<[f32; 16]>(data_bytes, offset)[0];
+ unsafe { gl.uniform_matrix_4_f32_slice(location, false, &data) };
+ }
+ _ => panic!("Unsupported uniform datatype!"),
+ }
+ }
+ }
+ }
+}
+
+impl crate::Queue<super::Api> for super::Queue {
+ unsafe fn submit(
+ &mut self,
+ command_buffers: &[&super::CommandBuffer],
+ signal_fence: Option<(&mut super::Fence, crate::FenceValue)>,
+ ) -> Result<(), crate::DeviceError> {
+ let shared = Arc::clone(&self.shared);
+ let gl = &shared.context.lock();
+ for cmd_buf in command_buffers.iter() {
+ // The command encoder assumes a default state when encoding the command buffer.
+ // Always reset the state between command_buffers to reflect this assumption. Do
+ // this at the beginning of the loop in case something outside of wgpu modified
+ // this state prior to commit.
+ unsafe { self.reset_state(gl) };
+ #[cfg(not(target_arch = "wasm32"))]
+ if let Some(ref label) = cmd_buf.label {
+ unsafe { gl.push_debug_group(glow::DEBUG_SOURCE_APPLICATION, DEBUG_ID, label) };
+ }
+
+ for command in cmd_buf.commands.iter() {
+ unsafe { self.process(gl, command, &cmd_buf.data_bytes, &cmd_buf.queries) };
+ }
+
+ #[cfg(not(target_arch = "wasm32"))]
+ if cmd_buf.label.is_some() {
+ unsafe { gl.pop_debug_group() };
+ }
+ }
+
+ if let Some((fence, value)) = signal_fence {
+ fence.maintain(gl);
+ let sync = unsafe { gl.fence_sync(glow::SYNC_GPU_COMMANDS_COMPLETE, 0) }
+ .map_err(|_| crate::DeviceError::OutOfMemory)?;
+ fence.pending.push((value, sync));
+ }
+
+ Ok(())
+ }
+
+ unsafe fn present(
+ &mut self,
+ surface: &mut super::Surface,
+ texture: super::Texture,
+ ) -> Result<(), crate::SurfaceError> {
+ #[cfg(any(not(target_arch = "wasm32"), target_os = "emscripten"))]
+ let gl = unsafe { &self.shared.context.get_without_egl_lock() };
+
+ #[cfg(all(target_arch = "wasm32", not(target_os = "emscripten")))]
+ let gl = &self.shared.context.glow_context;
+
+ unsafe { surface.present(texture, gl) }
+ }
+
+ unsafe fn get_timestamp_period(&self) -> f32 {
+ 1.0
+ }
+}
+
+// SAFE: WASM doesn't have threads
+#[cfg(target_arch = "wasm32")]
+unsafe impl Sync for super::Queue {}
+#[cfg(target_arch = "wasm32")]
+unsafe impl Send for super::Queue {}
diff --git a/third_party/rust/wgpu-hal/src/gles/shaders/clear.frag b/third_party/rust/wgpu-hal/src/gles/shaders/clear.frag
new file mode 100644
index 0000000000..7766c12d9f
--- /dev/null
+++ b/third_party/rust/wgpu-hal/src/gles/shaders/clear.frag
@@ -0,0 +1,9 @@
+#version 300 es
+precision lowp float;
+uniform vec4 color;
+//Hack: Some WebGL implementations don't find "color" otherwise.
+uniform vec4 color_workaround;
+out vec4 frag;
+void main() {
+ frag = color + color_workaround;
+}
diff --git a/third_party/rust/wgpu-hal/src/gles/shaders/clear.vert b/third_party/rust/wgpu-hal/src/gles/shaders/clear.vert
new file mode 100644
index 0000000000..ac655e7f31
--- /dev/null
+++ b/third_party/rust/wgpu-hal/src/gles/shaders/clear.vert
@@ -0,0 +1,11 @@
+#version 300 es
+precision lowp float;
+// A triangle that fills the whole screen
+const vec2[3] TRIANGLE_POS = vec2[](
+ vec2( 0.0, -3.0),
+ vec2(-3.0, 1.0),
+ vec2( 3.0, 1.0)
+);
+void main() {
+ gl_Position = vec4(TRIANGLE_POS[gl_VertexID], 0.0, 1.0);
+} \ No newline at end of file
diff --git a/third_party/rust/wgpu-hal/src/gles/shaders/srgb_present.frag b/third_party/rust/wgpu-hal/src/gles/shaders/srgb_present.frag
new file mode 100644
index 0000000000..853f82a6ae
--- /dev/null
+++ b/third_party/rust/wgpu-hal/src/gles/shaders/srgb_present.frag
@@ -0,0 +1,16 @@
+#version 300 es
+precision mediump float;
+in vec2 uv;
+uniform sampler2D present_texture;
+out vec4 frag;
+vec4 linear_to_srgb(vec4 linear) {
+ vec3 color_linear = linear.rgb;
+ vec3 selector = ceil(color_linear - 0.0031308); // 0 if under value, 1 if over
+ vec3 under = 12.92 * color_linear;
+ vec3 over = 1.055 * pow(color_linear, vec3(0.41666)) - 0.055;
+ vec3 result = mix(under, over, selector);
+ return vec4(result, linear.a);
+}
+void main() {
+ frag = linear_to_srgb(texture(present_texture, uv));
+} \ No newline at end of file
diff --git a/third_party/rust/wgpu-hal/src/gles/shaders/srgb_present.vert b/third_party/rust/wgpu-hal/src/gles/shaders/srgb_present.vert
new file mode 100644
index 0000000000..922f2a1848
--- /dev/null
+++ b/third_party/rust/wgpu-hal/src/gles/shaders/srgb_present.vert
@@ -0,0 +1,18 @@
+#version 300 es
+precision mediump float;
+// A triangle that fills the whole screen
+const vec2[3] TRIANGLE_POS = vec2[](
+ vec2( 0.0, -3.0),
+ vec2(-3.0, 1.0),
+ vec2( 3.0, 1.0)
+);
+const vec2[3] TRIANGLE_UV = vec2[](
+ vec2( 0.5, 1.),
+ vec2( -1.0, -1.0),
+ vec2( 2.0, -1.0)
+);
+out vec2 uv;
+void main() {
+ uv = TRIANGLE_UV[gl_VertexID];
+ gl_Position = vec4(TRIANGLE_POS[gl_VertexID], 0.0, 1.0);
+} \ No newline at end of file
diff --git a/third_party/rust/wgpu-hal/src/gles/web.rs b/third_party/rust/wgpu-hal/src/gles/web.rs
new file mode 100644
index 0000000000..565ffcab18
--- /dev/null
+++ b/third_party/rust/wgpu-hal/src/gles/web.rs
@@ -0,0 +1,404 @@
+use glow::HasContext;
+use parking_lot::Mutex;
+use wasm_bindgen::JsCast;
+
+use super::TextureFormatDesc;
+
+/// A wrapper around a [`glow::Context`] to provide a fake `lock()` api that makes it compatible
+/// with the `AdapterContext` API from the EGL implementation.
+pub struct AdapterContext {
+ pub glow_context: glow::Context,
+}
+
+impl AdapterContext {
+ pub fn is_owned(&self) -> bool {
+ false
+ }
+
+ /// Obtain a lock to the EGL context and get handle to the [`glow::Context`] that can be used to
+ /// do rendering.
+ #[track_caller]
+ pub fn lock(&self) -> &glow::Context {
+ &self.glow_context
+ }
+}
+
+#[derive(Debug)]
+pub struct Instance {
+ webgl2_context: Mutex<Option<web_sys::WebGl2RenderingContext>>,
+}
+
+impl Instance {
+ pub fn create_surface_from_canvas(
+ &self,
+ canvas: web_sys::HtmlCanvasElement,
+ ) -> Result<Surface, crate::InstanceError> {
+ let result =
+ canvas.get_context_with_context_options("webgl2", &Self::create_context_options());
+ self.create_surface_from_context(Canvas::Canvas(canvas), result)
+ }
+
+ pub fn create_surface_from_offscreen_canvas(
+ &self,
+ canvas: web_sys::OffscreenCanvas,
+ ) -> Result<Surface, crate::InstanceError> {
+ let result =
+ canvas.get_context_with_context_options("webgl2", &Self::create_context_options());
+ self.create_surface_from_context(Canvas::Offscreen(canvas), result)
+ }
+
+ /// Common portion of public `create_surface_from_*` functions.
+ ///
+ /// Note: Analogous code also exists in the WebGPU backend at
+ /// `wgpu::backend::web::Context`.
+ fn create_surface_from_context(
+ &self,
+ canvas: Canvas,
+ context_result: Result<Option<js_sys::Object>, wasm_bindgen::JsValue>,
+ ) -> Result<Surface, crate::InstanceError> {
+ let context_object: js_sys::Object = match context_result {
+ Ok(Some(context)) => context,
+ Ok(None) => {
+ // <https://html.spec.whatwg.org/multipage/canvas.html#dom-canvas-getcontext-dev>
+ // A getContext() call “returns null if contextId is not supported, or if the
+ // canvas has already been initialized with another context type”. Additionally,
+ // “not supported” could include “insufficient GPU resources” or “the GPU process
+ // previously crashed”. So, we must return it as an `Err` since it could occur
+ // for circumstances outside the application author's control.
+ return Err(crate::InstanceError);
+ }
+ Err(js_error) => {
+ // <https://html.spec.whatwg.org/multipage/canvas.html#dom-canvas-getcontext>
+ // A thrown exception indicates misuse of the canvas state. Ideally we wouldn't
+ // panic in this case, but for now, `InstanceError` conveys no detail, so it
+ // is more informative to panic with a specific message.
+ panic!("canvas.getContext() threw {js_error:?}")
+ }
+ };
+
+ // Not returning this error because it is a type error that shouldn't happen unless
+ // the browser, JS builtin objects, or wasm bindings are misbehaving somehow.
+ let webgl2_context: web_sys::WebGl2RenderingContext = context_object
+ .dyn_into()
+ .expect("canvas context is not a WebGl2RenderingContext");
+
+ *self.webgl2_context.lock() = Some(webgl2_context.clone());
+
+ Ok(Surface {
+ canvas,
+ webgl2_context,
+ srgb_present_program: None,
+ swapchain: None,
+ texture: None,
+ presentable: true,
+ })
+ }
+
+ fn create_context_options() -> js_sys::Object {
+ let context_options = js_sys::Object::new();
+ js_sys::Reflect::set(
+ &context_options,
+ &"antialias".into(),
+ &wasm_bindgen::JsValue::FALSE,
+ )
+ .expect("Cannot create context options");
+ context_options
+ }
+}
+
+// SAFE: WASM doesn't have threads
+unsafe impl Sync for Instance {}
+unsafe impl Send for Instance {}
+
+impl crate::Instance<super::Api> for Instance {
+ unsafe fn init(_desc: &crate::InstanceDescriptor) -> Result<Self, crate::InstanceError> {
+ Ok(Instance {
+ webgl2_context: Mutex::new(None),
+ })
+ }
+
+ unsafe fn enumerate_adapters(&self) -> Vec<crate::ExposedAdapter<super::Api>> {
+ let context_guard = self.webgl2_context.lock();
+ let gl = match *context_guard {
+ Some(ref webgl2_context) => glow::Context::from_webgl2_context(webgl2_context.clone()),
+ None => return Vec::new(),
+ };
+
+ unsafe { super::Adapter::expose(AdapterContext { glow_context: gl }) }
+ .into_iter()
+ .collect()
+ }
+
+ unsafe fn create_surface(
+ &self,
+ _display_handle: raw_window_handle::RawDisplayHandle,
+ window_handle: raw_window_handle::RawWindowHandle,
+ ) -> Result<Surface, crate::InstanceError> {
+ if let raw_window_handle::RawWindowHandle::Web(handle) = window_handle {
+ let canvas: web_sys::HtmlCanvasElement = web_sys::window()
+ .and_then(|win| win.document())
+ .expect("Cannot get document")
+ .query_selector(&format!("canvas[data-raw-handle=\"{}\"]", handle.id))
+ .expect("Cannot query for canvas")
+ .expect("Canvas is not found")
+ .dyn_into()
+ .expect("Failed to downcast to canvas type");
+
+ self.create_surface_from_canvas(canvas)
+ } else {
+ Err(crate::InstanceError)
+ }
+ }
+
+ unsafe fn destroy_surface(&self, surface: Surface) {
+ let mut context_option_ref = self.webgl2_context.lock();
+
+ if let Some(context) = context_option_ref.as_ref() {
+ if context == &surface.webgl2_context {
+ *context_option_ref = None;
+ }
+ }
+ }
+}
+
+#[derive(Clone, Debug)]
+pub struct Surface {
+ canvas: Canvas,
+ webgl2_context: web_sys::WebGl2RenderingContext,
+ pub(super) swapchain: Option<Swapchain>,
+ texture: Option<glow::Texture>,
+ pub(super) presentable: bool,
+ srgb_present_program: Option<glow::Program>,
+}
+
+#[derive(Clone, Debug)]
+enum Canvas {
+ Canvas(web_sys::HtmlCanvasElement),
+ Offscreen(web_sys::OffscreenCanvas),
+}
+
+// SAFE: Because web doesn't have threads ( yet )
+unsafe impl Sync for Surface {}
+unsafe impl Send for Surface {}
+
+#[derive(Clone, Debug)]
+pub struct Swapchain {
+ pub(crate) extent: wgt::Extent3d,
+ // pub(crate) channel: f::ChannelType,
+ pub(super) format: wgt::TextureFormat,
+ pub(super) framebuffer: glow::Framebuffer,
+ pub(super) format_desc: TextureFormatDesc,
+}
+
+impl Surface {
+ pub(super) unsafe fn present(
+ &mut self,
+ _suf_texture: super::Texture,
+ gl: &glow::Context,
+ ) -> Result<(), crate::SurfaceError> {
+ let swapchain = self.swapchain.as_ref().ok_or(crate::SurfaceError::Other(
+ "need to configure surface before presenting",
+ ))?;
+
+ if swapchain.format.is_srgb() {
+ // Important to set the viewport since we don't know in what state the user left it.
+ unsafe {
+ gl.viewport(
+ 0,
+ 0,
+ swapchain.extent.width as _,
+ swapchain.extent.height as _,
+ )
+ };
+ unsafe { gl.bind_framebuffer(glow::DRAW_FRAMEBUFFER, None) };
+ unsafe { gl.bind_sampler(0, None) };
+ unsafe { gl.active_texture(glow::TEXTURE0) };
+ unsafe { gl.bind_texture(glow::TEXTURE_2D, self.texture) };
+ unsafe { gl.use_program(self.srgb_present_program) };
+ unsafe { gl.disable(glow::DEPTH_TEST) };
+ unsafe { gl.disable(glow::STENCIL_TEST) };
+ unsafe { gl.disable(glow::SCISSOR_TEST) };
+ unsafe { gl.disable(glow::BLEND) };
+ unsafe { gl.disable(glow::CULL_FACE) };
+ unsafe { gl.draw_buffers(&[glow::BACK]) };
+ unsafe { gl.draw_arrays(glow::TRIANGLES, 0, 3) };
+ } else {
+ unsafe { gl.bind_framebuffer(glow::READ_FRAMEBUFFER, Some(swapchain.framebuffer)) };
+ unsafe { gl.bind_framebuffer(glow::DRAW_FRAMEBUFFER, None) };
+ // Note the Y-flipping here. GL's presentation is not flipped,
+ // but main rendering is. Therefore, we Y-flip the output positions
+ // in the shader, and also this blit.
+ unsafe {
+ gl.blit_framebuffer(
+ 0,
+ swapchain.extent.height as i32,
+ swapchain.extent.width as i32,
+ 0,
+ 0,
+ 0,
+ swapchain.extent.width as i32,
+ swapchain.extent.height as i32,
+ glow::COLOR_BUFFER_BIT,
+ glow::NEAREST,
+ )
+ };
+ }
+
+ Ok(())
+ }
+
+ unsafe fn create_srgb_present_program(gl: &glow::Context) -> glow::Program {
+ let program = unsafe { gl.create_program() }.expect("Could not create shader program");
+ let vertex =
+ unsafe { gl.create_shader(glow::VERTEX_SHADER) }.expect("Could not create shader");
+ unsafe { gl.shader_source(vertex, include_str!("./shaders/srgb_present.vert")) };
+ unsafe { gl.compile_shader(vertex) };
+ let fragment =
+ unsafe { gl.create_shader(glow::FRAGMENT_SHADER) }.expect("Could not create shader");
+ unsafe { gl.shader_source(fragment, include_str!("./shaders/srgb_present.frag")) };
+ unsafe { gl.compile_shader(fragment) };
+ unsafe { gl.attach_shader(program, vertex) };
+ unsafe { gl.attach_shader(program, fragment) };
+ unsafe { gl.link_program(program) };
+ unsafe { gl.delete_shader(vertex) };
+ unsafe { gl.delete_shader(fragment) };
+ unsafe { gl.bind_texture(glow::TEXTURE_2D, None) };
+
+ program
+ }
+
+ pub fn supports_srgb(&self) -> bool {
+ // present.frag takes care of handling srgb conversion
+ true
+ }
+}
+
+impl crate::Surface<super::Api> for Surface {
+ unsafe fn configure(
+ &mut self,
+ device: &super::Device,
+ config: &crate::SurfaceConfiguration,
+ ) -> Result<(), crate::SurfaceError> {
+ match self.canvas {
+ Canvas::Canvas(ref canvas) => {
+ canvas.set_width(config.extent.width);
+ canvas.set_height(config.extent.height);
+ }
+ Canvas::Offscreen(ref canvas) => {
+ canvas.set_width(config.extent.width);
+ canvas.set_height(config.extent.height);
+ }
+ }
+
+ let gl = &device.shared.context.lock();
+
+ if let Some(swapchain) = self.swapchain.take() {
+ // delete all frame buffers already allocated
+ unsafe { gl.delete_framebuffer(swapchain.framebuffer) };
+ }
+
+ if self.srgb_present_program.is_none() && config.format.is_srgb() {
+ self.srgb_present_program = Some(unsafe { Self::create_srgb_present_program(gl) });
+ }
+
+ if let Some(texture) = self.texture.take() {
+ unsafe { gl.delete_texture(texture) };
+ }
+
+ self.texture = Some(unsafe { gl.create_texture() }.map_err(|error| {
+ log::error!("Internal swapchain texture creation failed: {error}");
+ crate::DeviceError::OutOfMemory
+ })?);
+
+ let desc = device.shared.describe_texture_format(config.format);
+ unsafe { gl.bind_texture(glow::TEXTURE_2D, self.texture) };
+ unsafe {
+ gl.tex_parameter_i32(
+ glow::TEXTURE_2D,
+ glow::TEXTURE_MIN_FILTER,
+ glow::NEAREST as _,
+ )
+ };
+ unsafe {
+ gl.tex_parameter_i32(
+ glow::TEXTURE_2D,
+ glow::TEXTURE_MAG_FILTER,
+ glow::NEAREST as _,
+ )
+ };
+ unsafe {
+ gl.tex_storage_2d(
+ glow::TEXTURE_2D,
+ 1,
+ desc.internal,
+ config.extent.width as i32,
+ config.extent.height as i32,
+ )
+ };
+
+ let framebuffer = unsafe { gl.create_framebuffer() }.map_err(|error| {
+ log::error!("Internal swapchain framebuffer creation failed: {error}");
+ crate::DeviceError::OutOfMemory
+ })?;
+ unsafe { gl.bind_framebuffer(glow::READ_FRAMEBUFFER, Some(framebuffer)) };
+ unsafe {
+ gl.framebuffer_texture_2d(
+ glow::READ_FRAMEBUFFER,
+ glow::COLOR_ATTACHMENT0,
+ glow::TEXTURE_2D,
+ self.texture,
+ 0,
+ )
+ };
+ unsafe { gl.bind_texture(glow::TEXTURE_2D, None) };
+
+ self.swapchain = Some(Swapchain {
+ extent: config.extent,
+ // channel: config.format.base_format().1,
+ format: config.format,
+ format_desc: desc,
+ framebuffer,
+ });
+ Ok(())
+ }
+
+ unsafe fn unconfigure(&mut self, device: &super::Device) {
+ let gl = device.shared.context.lock();
+ if let Some(swapchain) = self.swapchain.take() {
+ unsafe { gl.delete_framebuffer(swapchain.framebuffer) };
+ }
+ if let Some(renderbuffer) = self.texture.take() {
+ unsafe { gl.delete_texture(renderbuffer) };
+ }
+ }
+
+ unsafe fn acquire_texture(
+ &mut self,
+ _timeout_ms: Option<std::time::Duration>, //TODO
+ ) -> Result<Option<crate::AcquiredSurfaceTexture<super::Api>>, crate::SurfaceError> {
+ let sc = self.swapchain.as_ref().unwrap();
+ let texture = super::Texture {
+ inner: super::TextureInner::Texture {
+ raw: self.texture.unwrap(),
+ target: glow::TEXTURE_2D,
+ },
+ drop_guard: None,
+ array_layer_count: 1,
+ mip_level_count: 1,
+ format: sc.format,
+ format_desc: sc.format_desc.clone(),
+ copy_size: crate::CopyExtent {
+ width: sc.extent.width,
+ height: sc.extent.height,
+ depth: 1,
+ },
+ is_cubemap: false,
+ };
+ Ok(Some(crate::AcquiredSurfaceTexture {
+ texture,
+ suboptimal: false,
+ }))
+ }
+
+ unsafe fn discard_texture(&mut self, _texture: super::Texture) {}
+}