summaryrefslogtreecommitdiffstats
path: root/third_party/rust
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-19 01:14:29 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-19 01:14:29 +0000
commitfbaf0bb26397aa498eb9156f06d5a6fe34dd7dd8 (patch)
tree4c1ccaf5486d4f2009f9a338a98a83e886e29c97 /third_party/rust
parentReleasing progress-linux version 124.0.1-1~progress7.99u1. (diff)
downloadfirefox-fbaf0bb26397aa498eb9156f06d5a6fe34dd7dd8.tar.xz
firefox-fbaf0bb26397aa498eb9156f06d5a6fe34dd7dd8.zip
Merging upstream version 125.0.1.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'third_party/rust')
-rw-r--r--third_party/rust/aa-stroke/.cargo-checksum.json2
-rw-r--r--third_party/rust/aa-stroke/src/bezierflattener.rs40
-rw-r--r--third_party/rust/aa-stroke/src/lib.rs56
-rw-r--r--third_party/rust/ahash/.cargo-checksum.json2
-rw-r--r--third_party/rust/ahash/Cargo.toml61
-rw-r--r--third_party/rust/ahash/build.rs4
-rw-r--r--third_party/rust/ahash/src/aes_hash.rs13
-rw-r--r--third_party/rust/ahash/src/hash_quality_test.rs36
-rw-r--r--third_party/rust/ahash/src/random_state.rs7
-rw-r--r--third_party/rust/cc/.cargo-checksum.json2
-rw-r--r--third_party/rust/cc/Cargo.lock110
-rw-r--r--third_party/rust/cc/Cargo.toml22
-rw-r--r--third_party/rust/cc/README.md212
-rw-r--r--third_party/rust/cc/src/bin/gcc-shim.rs48
-rw-r--r--third_party/rust/cc/src/command_helpers.rs433
-rw-r--r--third_party/rust/cc/src/lib.rs2668
-rw-r--r--third_party/rust/cc/src/parallel/async_executor.rs118
-rw-r--r--third_party/rust/cc/src/parallel/job_token.rs255
-rw-r--r--third_party/rust/cc/src/parallel/mod.rs20
-rw-r--r--third_party/rust/cc/src/parallel/stderr.rs90
-rw-r--r--third_party/rust/cc/src/tool.rs399
-rw-r--r--third_party/rust/cc/src/windows/com.rs (renamed from third_party/rust/cc/src/com.rs)42
-rw-r--r--third_party/rust/cc/src/windows/find_tools.rs (renamed from third_party/rust/cc/src/windows_registry.rs)456
-rw-r--r--third_party/rust/cc/src/windows/mod.rs20
-rw-r--r--third_party/rust/cc/src/windows/registry.rs (renamed from third_party/rust/cc/src/registry.rs)94
-rw-r--r--third_party/rust/cc/src/windows/setup_config.rs (renamed from third_party/rust/cc/src/setup_config.rs)34
-rw-r--r--third_party/rust/cc/src/windows/vs_instances.rs (renamed from third_party/rust/cc/src/vs_instances.rs)2
-rw-r--r--third_party/rust/cc/src/windows/winapi.rs (renamed from third_party/rust/cc/src/winapi.rs)102
-rw-r--r--third_party/rust/cc/src/windows/windows_sys.rs223
-rw-r--r--third_party/rust/cc/tests/cc_env.rs118
-rw-r--r--third_party/rust/cc/tests/cflags.rs15
-rw-r--r--third_party/rust/cc/tests/cxxflags.rs15
-rw-r--r--third_party/rust/cc/tests/support/mod.rs172
-rw-r--r--third_party/rust/cc/tests/test.rs461
-rw-r--r--third_party/rust/document-features/.cargo-checksum.json1
-rw-r--r--third_party/rust/document-features/CHANGELOG.md44
-rw-r--r--third_party/rust/document-features/Cargo.toml40
-rw-r--r--third_party/rust/document-features/LICENSE-APACHE73
-rw-r--r--third_party/rust/document-features/LICENSE-MIT19
-rw-r--r--third_party/rust/document-features/README.md43
-rw-r--r--third_party/rust/document-features/lib.rs877
-rw-r--r--third_party/rust/document-features/rustfmt.toml1
-rw-r--r--third_party/rust/document-features/tests/self-doc.rs37
-rw-r--r--third_party/rust/glean-core/.cargo-checksum.json2
-rw-r--r--third_party/rust/glean-core/Cargo.toml4
-rw-r--r--third_party/rust/glean-core/src/core/mod.rs6
-rw-r--r--third_party/rust/glean-core/src/glean.udl10
-rw-r--r--third_party/rust/glean-core/src/internal_pings.rs4
-rw-r--r--third_party/rust/glean-core/src/lib.rs38
-rw-r--r--third_party/rust/glean-core/src/lib_unit_tests.rs4
-rw-r--r--third_party/rust/glean-core/src/metrics/boolean.rs12
-rw-r--r--third_party/rust/glean-core/src/metrics/counter.rs12
-rw-r--r--third_party/rust/glean-core/src/metrics/custom_distribution.rs38
-rw-r--r--third_party/rust/glean-core/src/metrics/datetime.rs10
-rw-r--r--third_party/rust/glean-core/src/metrics/denominator.rs11
-rw-r--r--third_party/rust/glean-core/src/metrics/event.rs7
-rw-r--r--third_party/rust/glean-core/src/metrics/experiment.rs9
-rw-r--r--third_party/rust/glean-core/src/metrics/labeled.rs2
-rw-r--r--third_party/rust/glean-core/src/metrics/memory_distribution.rs11
-rw-r--r--third_party/rust/glean-core/src/metrics/mod.rs11
-rw-r--r--third_party/rust/glean-core/src/metrics/numerator.rs8
-rw-r--r--third_party/rust/glean-core/src/metrics/object.rs135
-rw-r--r--third_party/rust/glean-core/src/metrics/ping.rs28
-rw-r--r--third_party/rust/glean-core/src/metrics/quantity.rs11
-rw-r--r--third_party/rust/glean-core/src/metrics/rate.rs11
-rw-r--r--third_party/rust/glean-core/src/metrics/string.rs11
-rw-r--r--third_party/rust/glean-core/src/metrics/string_list.rs11
-rw-r--r--third_party/rust/glean-core/src/metrics/text.rs11
-rw-r--r--third_party/rust/glean-core/src/metrics/timespan.rs11
-rw-r--r--third_party/rust/glean-core/src/metrics/timing_distribution.rs43
-rw-r--r--third_party/rust/glean-core/src/metrics/url.rs11
-rw-r--r--third_party/rust/glean-core/src/metrics/uuid.rs11
-rw-r--r--third_party/rust/glean-core/src/ping/mod.rs44
-rw-r--r--third_party/rust/glean-core/src/traits/custom_distribution.rs16
-rw-r--r--third_party/rust/glean-core/src/traits/mod.rs6
-rw-r--r--third_party/rust/glean-core/src/traits/object.rs53
-rw-r--r--third_party/rust/glean-core/src/traits/timing_distribution.rs25
-rw-r--r--third_party/rust/glean-core/src/upload/directory.rs78
-rw-r--r--third_party/rust/glean-core/src/upload/mod.rs343
-rw-r--r--third_party/rust/glean-core/src/upload/request.rs37
-rw-r--r--third_party/rust/glean-core/tests/custom_distribution.rs20
-rw-r--r--third_party/rust/glean-core/tests/event.rs2
-rw-r--r--third_party/rust/glean-core/tests/object.rs104
-rw-r--r--third_party/rust/glean-core/tests/ping.rs14
-rw-r--r--third_party/rust/glean-core/tests/ping_maker.rs26
-rw-r--r--third_party/rust/glean-core/tests/timing_distribution.rs6
-rw-r--r--third_party/rust/glean/.cargo-checksum.json2
-rw-r--r--third_party/rust/glean/Cargo.toml34
-rw-r--r--third_party/rust/glean/src/common_test.rs1
-rw-r--r--third_party/rust/glean/src/configuration.rs2
-rw-r--r--third_party/rust/glean/src/lib.rs2
-rw-r--r--third_party/rust/glean/src/net/http_uploader.rs11
-rw-r--r--third_party/rust/glean/src/net/mod.rs25
-rw-r--r--third_party/rust/glean/src/private/mod.rs2
-rw-r--r--third_party/rust/glean/src/private/object.rs192
-rw-r--r--third_party/rust/glean/src/private/ping.rs2
-rw-r--r--third_party/rust/glean/src/test.rs215
-rw-r--r--third_party/rust/glean/tests/init_fails.rs2
-rw-r--r--third_party/rust/glean/tests/never_init.rs2
-rw-r--r--third_party/rust/glean/tests/no_time_to_init.rs2
-rw-r--r--third_party/rust/glean/tests/schema.rs13
-rw-r--r--third_party/rust/glean/tests/simple.rs2
-rw-r--r--third_party/rust/glean/tests/upload_timing.rs10
-rw-r--r--third_party/rust/glslopt/.cargo-checksum.json2
-rw-r--r--third_party/rust/glslopt/Cargo.toml21
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/include/c99_alloca.h49
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/include/c99_math.h211
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/lower_instructions.cpp2
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/util/rounding.h3
-rw-r--r--third_party/rust/glslopt/glsl-optimizer/src/util/u_math.h3
-rw-r--r--third_party/rust/litrs/.cargo-checksum.json1
-rw-r--r--third_party/rust/litrs/CHANGELOG.md103
-rw-r--r--third_party/rust/litrs/Cargo.toml51
-rw-r--r--third_party/rust/litrs/LICENSE-APACHE176
-rw-r--r--third_party/rust/litrs/LICENSE-MIT25
-rw-r--r--third_party/rust/litrs/README.md88
-rw-r--r--third_party/rust/litrs/src/bool/mod.rs55
-rw-r--r--third_party/rust/litrs/src/bool/tests.rs48
-rw-r--r--third_party/rust/litrs/src/byte/mod.rs107
-rw-r--r--third_party/rust/litrs/src/byte/tests.rs188
-rw-r--r--third_party/rust/litrs/src/bytestr/mod.rs126
-rw-r--r--third_party/rust/litrs/src/bytestr/tests.rs224
-rw-r--r--third_party/rust/litrs/src/char/mod.rs105
-rw-r--r--third_party/rust/litrs/src/char/tests.rs227
-rw-r--r--third_party/rust/litrs/src/err.rs371
-rw-r--r--third_party/rust/litrs/src/escape.rs262
-rw-r--r--third_party/rust/litrs/src/float/mod.rs257
-rw-r--r--third_party/rust/litrs/src/float/tests.rs253
-rw-r--r--third_party/rust/litrs/src/impls.rs401
-rw-r--r--third_party/rust/litrs/src/integer/mod.rs356
-rw-r--r--third_party/rust/litrs/src/integer/tests.rs357
-rw-r--r--third_party/rust/litrs/src/lib.rs370
-rw-r--r--third_party/rust/litrs/src/parse.rs125
-rw-r--r--third_party/rust/litrs/src/string/mod.rs125
-rw-r--r--third_party/rust/litrs/src/string/tests.rs278
-rw-r--r--third_party/rust/litrs/src/test_util.rs128
-rw-r--r--third_party/rust/litrs/src/tests.rs349
-rw-r--r--third_party/rust/naga/.cargo-checksum.json2
-rw-r--r--third_party/rust/naga/Cargo.toml4
-rw-r--r--third_party/rust/naga/src/back/glsl/features.rs53
-rw-r--r--third_party/rust/naga/src/back/glsl/mod.rs194
-rw-r--r--third_party/rust/naga/src/back/hlsl/conv.rs12
-rw-r--r--third_party/rust/naga/src/back/hlsl/help.rs150
-rw-r--r--third_party/rust/naga/src/back/hlsl/keywords.rs2
-rw-r--r--third_party/rust/naga/src/back/hlsl/mod.rs2
-rw-r--r--third_party/rust/naga/src/back/hlsl/storage.rs84
-rw-r--r--third_party/rust/naga/src/back/hlsl/writer.rs183
-rw-r--r--third_party/rust/naga/src/back/msl/keywords.rs2
-rw-r--r--third_party/rust/naga/src/back/msl/mod.rs15
-rw-r--r--third_party/rust/naga/src/back/msl/writer.rs200
-rw-r--r--third_party/rust/naga/src/back/spv/block.rs242
-rw-r--r--third_party/rust/naga/src/back/spv/writer.rs3
-rw-r--r--third_party/rust/naga/src/back/wgsl/writer.rs25
-rw-r--r--third_party/rust/naga/src/front/glsl/functions.rs2
-rw-r--r--third_party/rust/naga/src/front/glsl/parser/functions.rs2
-rw-r--r--third_party/rust/naga/src/front/spv/function.rs464
-rw-r--r--third_party/rust/naga/src/front/spv/mod.rs68
-rw-r--r--third_party/rust/naga/src/front/wgsl/error.rs2
-rw-r--r--third_party/rust/naga/src/front/wgsl/lower/mod.rs2
-rw-r--r--third_party/rust/naga/src/front/wgsl/parse/conv.rs8
-rw-r--r--third_party/rust/naga/src/front/wgsl/parse/number.rs16
-rw-r--r--third_party/rust/naga/src/front/wgsl/tests.rs1
-rw-r--r--third_party/rust/naga/src/keywords/wgsl.rs2
-rw-r--r--third_party/rust/naga/src/lib.rs27
-rw-r--r--third_party/rust/naga/src/proc/constant_evaluator.rs242
-rw-r--r--third_party/rust/naga/src/proc/mod.rs19
-rw-r--r--third_party/rust/naga/src/valid/expression.rs65
-rw-r--r--third_party/rust/naga/src/valid/mod.rs4
-rw-r--r--third_party/rust/naga/src/valid/type.rs44
-rw-r--r--third_party/rust/neqo-common/.cargo-checksum.json2
-rw-r--r--third_party/rust/neqo-common/Cargo.toml52
-rw-r--r--third_party/rust/neqo-common/build.rs6
-rw-r--r--third_party/rust/neqo-common/src/codec.rs6
-rw-r--r--third_party/rust/neqo-common/src/datagram.rs6
-rw-r--r--third_party/rust/neqo-common/src/event.rs2
-rw-r--r--third_party/rust/neqo-common/src/hrtime.rs5
-rw-r--r--third_party/rust/neqo-common/src/lib.rs5
-rw-r--r--third_party/rust/neqo-common/src/log.rs24
-rw-r--r--third_party/rust/neqo-common/src/qlog.rs3
-rw-r--r--third_party/rust/neqo-common/src/timer.rs62
-rw-r--r--third_party/rust/neqo-common/src/tos.rs37
-rw-r--r--third_party/rust/neqo-common/src/udp.rs222
-rw-r--r--third_party/rust/neqo-common/tests/log.rs3
-rw-r--r--third_party/rust/neqo-crypto/.cargo-checksum.json2
-rw-r--r--third_party/rust/neqo-crypto/Cargo.toml40
-rw-r--r--third_party/rust/neqo-crypto/build.rs22
-rw-r--r--third_party/rust/neqo-crypto/src/aead.rs1
-rw-r--r--third_party/rust/neqo-crypto/src/agent.rs12
-rw-r--r--third_party/rust/neqo-crypto/src/agentio.rs9
-rw-r--r--third_party/rust/neqo-crypto/src/cert.rs18
-rw-r--r--third_party/rust/neqo-crypto/src/ech.rs10
-rw-r--r--third_party/rust/neqo-crypto/src/ext.rs4
-rw-r--r--third_party/rust/neqo-crypto/src/hkdf.rs23
-rw-r--r--third_party/rust/neqo-crypto/src/hp.rs8
-rw-r--r--third_party/rust/neqo-crypto/src/lib.rs132
-rw-r--r--third_party/rust/neqo-crypto/src/once.rs44
-rw-r--r--third_party/rust/neqo-crypto/src/p11.rs114
-rw-r--r--third_party/rust/neqo-crypto/src/replay.rs1
-rw-r--r--third_party/rust/neqo-crypto/src/selfencrypt.rs2
-rw-r--r--third_party/rust/neqo-crypto/src/time.rs74
-rw-r--r--third_party/rust/neqo-crypto/tests/aead.rs7
-rw-r--r--third_party/rust/neqo-crypto/tests/agent.rs9
-rw-r--r--third_party/rust/neqo-crypto/tests/ext.rs7
-rw-r--r--third_party/rust/neqo-crypto/tests/handshake.rs9
-rw-r--r--third_party/rust/neqo-crypto/tests/hkdf.rs7
-rw-r--r--third_party/rust/neqo-crypto/tests/hp.rs7
-rw-r--r--third_party/rust/neqo-crypto/tests/init.rs7
-rw-r--r--third_party/rust/neqo-crypto/tests/selfencrypt.rs8
-rw-r--r--third_party/rust/neqo-http3/.cargo-checksum.json2
-rw-r--r--third_party/rust/neqo-http3/Cargo.toml45
-rw-r--r--third_party/rust/neqo-http3/src/client_events.rs6
-rw-r--r--third_party/rust/neqo-http3/src/connection.rs3
-rw-r--r--third_party/rust/neqo-http3/src/connection_client.rs33
-rw-r--r--third_party/rust/neqo-http3/src/control_stream_local.rs5
-rw-r--r--third_party/rust/neqo-http3/src/features/extended_connect/tests/webtransport/datagrams.rs2
-rw-r--r--third_party/rust/neqo-http3/src/features/extended_connect/tests/webtransport/mod.rs6
-rw-r--r--third_party/rust/neqo-http3/src/features/extended_connect/webtransport_session.rs20
-rw-r--r--third_party/rust/neqo-http3/src/features/extended_connect/webtransport_streams.rs10
-rw-r--r--third_party/rust/neqo-http3/src/frames/hframe.rs7
-rw-r--r--third_party/rust/neqo-http3/src/frames/reader.rs2
-rw-r--r--third_party/rust/neqo-http3/src/frames/wtframe.rs2
-rw-r--r--third_party/rust/neqo-http3/src/headers_checks.rs2
-rw-r--r--third_party/rust/neqo-http3/src/lib.rs29
-rw-r--r--third_party/rust/neqo-http3/src/priority.rs8
-rw-r--r--third_party/rust/neqo-http3/src/push_controller.rs2
-rw-r--r--third_party/rust/neqo-http3/src/qlog.rs7
-rw-r--r--third_party/rust/neqo-http3/src/recv_message.rs8
-rw-r--r--third_party/rust/neqo-http3/src/send_message.rs18
-rw-r--r--third_party/rust/neqo-http3/src/server.rs6
-rw-r--r--third_party/rust/neqo-http3/src/server_events.rs1
-rw-r--r--third_party/rust/neqo-http3/tests/priority.rs6
-rw-r--r--third_party/rust/neqo-http3/tests/send_message.rs31
-rw-r--r--third_party/rust/neqo-http3/tests/webtransport.rs6
-rw-r--r--third_party/rust/neqo-qpack/.cargo-checksum.json2
-rw-r--r--third_party/rust/neqo-qpack/Cargo.toml30
-rw-r--r--third_party/rust/neqo-qpack/src/decoder.rs4
-rw-r--r--third_party/rust/neqo-qpack/src/encoder.rs5
-rw-r--r--third_party/rust/neqo-qpack/src/huffman.rs6
-rw-r--r--third_party/rust/neqo-qpack/src/huffman_decode_helper.rs9
-rw-r--r--third_party/rust/neqo-qpack/src/lib.rs6
-rw-r--r--third_party/rust/neqo-qpack/src/qpack_send_buf.rs2
-rw-r--r--third_party/rust/neqo-qpack/src/reader.rs19
-rw-r--r--third_party/rust/neqo-qpack/src/table.rs2
-rw-r--r--third_party/rust/neqo-transport/.cargo-checksum.json2
-rw-r--r--third_party/rust/neqo-transport/Cargo.toml59
-rw-r--r--third_party/rust/neqo-transport/benches/range_tracker.rs50
-rw-r--r--third_party/rust/neqo-transport/benches/rx_stream_orderer.rs6
-rw-r--r--third_party/rust/neqo-transport/benches/transfer.rs70
-rw-r--r--third_party/rust/neqo-transport/src/ackrate.rs3
-rw-r--r--third_party/rust/neqo-transport/src/addr_valid.rs46
-rw-r--r--third_party/rust/neqo-transport/src/cc/classic_cc.rs6
-rw-r--r--third_party/rust/neqo-transport/src/cc/cubic.rs3
-rw-r--r--third_party/rust/neqo-transport/src/cc/mod.rs1
-rw-r--r--third_party/rust/neqo-transport/src/cc/new_reno.rs1
-rw-r--r--third_party/rust/neqo-transport/src/cc/tests/cubic.rs1
-rw-r--r--third_party/rust/neqo-transport/src/cc/tests/mod.rs1
-rw-r--r--third_party/rust/neqo-transport/src/cc/tests/new_reno.rs1
-rw-r--r--third_party/rust/neqo-transport/src/cid.rs98
-rw-r--r--third_party/rust/neqo-transport/src/connection/dump.rs4
-rw-r--r--third_party/rust/neqo-transport/src/connection/mod.rs210
-rw-r--r--third_party/rust/neqo-transport/src/connection/params.rs39
-rw-r--r--third_party/rust/neqo-transport/src/connection/state.rs19
-rw-r--r--third_party/rust/neqo-transport/src/connection/tests/ackrate.rs4
-rw-r--r--third_party/rust/neqo-transport/src/connection/tests/cc.rs4
-rw-r--r--third_party/rust/neqo-transport/src/connection/tests/close.rs2
-rw-r--r--third_party/rust/neqo-transport/src/connection/tests/datagram.rs2
-rw-r--r--third_party/rust/neqo-transport/src/connection/tests/fuzzing.rs2
-rw-r--r--third_party/rust/neqo-transport/src/connection/tests/handshake.rs70
-rw-r--r--third_party/rust/neqo-transport/src/connection/tests/idle.rs34
-rw-r--r--third_party/rust/neqo-transport/src/connection/tests/keys.rs2
-rw-r--r--third_party/rust/neqo-transport/src/connection/tests/migration.rs108
-rw-r--r--third_party/rust/neqo-transport/src/connection/tests/mod.rs13
-rw-r--r--third_party/rust/neqo-transport/src/connection/tests/priority.rs4
-rw-r--r--third_party/rust/neqo-transport/src/connection/tests/resumption.rs8
-rw-r--r--third_party/rust/neqo-transport/src/connection/tests/stream.rs2
-rw-r--r--third_party/rust/neqo-transport/src/connection/tests/vn.rs2
-rw-r--r--third_party/rust/neqo-transport/src/connection/tests/zerortt.rs2
-rw-r--r--third_party/rust/neqo-transport/src/crypto.rs34
-rw-r--r--third_party/rust/neqo-transport/src/events.rs9
-rw-r--r--third_party/rust/neqo-transport/src/fc.rs3
-rw-r--r--third_party/rust/neqo-transport/src/frame.rs14
-rw-r--r--third_party/rust/neqo-transport/src/lib.rs8
-rw-r--r--third_party/rust/neqo-transport/src/pace.rs2
-rw-r--r--third_party/rust/neqo-transport/src/packet/mod.rs41
-rw-r--r--third_party/rust/neqo-transport/src/packet/retry.rs2
-rw-r--r--third_party/rust/neqo-transport/src/path.rs12
-rw-r--r--third_party/rust/neqo-transport/src/qlog.rs10
-rw-r--r--third_party/rust/neqo-transport/src/quic_datagrams.rs4
-rw-r--r--third_party/rust/neqo-transport/src/recovery.rs15
-rw-r--r--third_party/rust/neqo-transport/src/recv_stream.rs82
-rw-r--r--third_party/rust/neqo-transport/src/rtt.rs2
-rw-r--r--third_party/rust/neqo-transport/src/send_stream.rs904
-rw-r--r--third_party/rust/neqo-transport/src/sender.rs2
-rw-r--r--third_party/rust/neqo-transport/src/server.rs59
-rw-r--r--third_party/rust/neqo-transport/src/stats.rs1
-rw-r--r--third_party/rust/neqo-transport/src/stream_id.rs14
-rw-r--r--third_party/rust/neqo-transport/src/streams.rs49
-rw-r--r--third_party/rust/neqo-transport/src/tparams.rs80
-rw-r--r--third_party/rust/neqo-transport/src/tracking.rs166
-rw-r--r--third_party/rust/neqo-transport/src/version.rs19
-rw-r--r--third_party/rust/neqo-transport/tests/common/mod.rs6
-rw-r--r--third_party/rust/neqo-transport/tests/conn_vectors.rs4
-rw-r--r--third_party/rust/neqo-transport/tests/connection.rs8
-rw-r--r--third_party/rust/neqo-transport/tests/network.rs117
-rw-r--r--third_party/rust/neqo-transport/tests/retry.rs5
-rw-r--r--third_party/rust/neqo-transport/tests/server.rs7
-rw-r--r--third_party/rust/neqo-transport/tests/sim/connection.rs315
-rw-r--r--third_party/rust/neqo-transport/tests/sim/delay.rs102
-rw-r--r--third_party/rust/neqo-transport/tests/sim/drop.rs75
-rw-r--r--third_party/rust/neqo-transport/tests/sim/mod.rs232
-rw-r--r--third_party/rust/neqo-transport/tests/sim/net.rs111
-rw-r--r--third_party/rust/neqo-transport/tests/sim/rng.rs81
-rw-r--r--third_party/rust/neqo-transport/tests/sim/taildrop.rs188
-rw-r--r--third_party/rust/qlog/.cargo-checksum.json2
-rw-r--r--third_party/rust/qlog/Cargo.toml8
-rw-r--r--third_party/rust/qlog/src/events/h3.rs3
-rw-r--r--third_party/rust/serde/.cargo-checksum.json2
-rw-r--r--third_party/rust/serde/Cargo.toml4
-rw-r--r--third_party/rust/serde/README.md6
-rw-r--r--third_party/rust/serde/src/de/mod.rs61
-rw-r--r--third_party/rust/serde/src/de/value.rs4
-rw-r--r--third_party/rust/serde/src/lib.rs15
-rw-r--r--third_party/rust/serde_derive/.cargo-checksum.json2
-rw-r--r--third_party/rust/serde_derive/Cargo.toml14
-rw-r--r--third_party/rust/serde_derive/README.md6
-rw-r--r--third_party/rust/serde_derive/src/lib.rs2
-rw-r--r--third_party/rust/smallvec/.cargo-checksum.json2
-rw-r--r--third_party/rust/smallvec/Cargo.toml2
-rw-r--r--third_party/rust/smallvec/benches/bench.rs4
-rw-r--r--third_party/rust/smallvec/src/lib.rs66
-rw-r--r--third_party/rust/smallvec/src/tests.rs21
-rw-r--r--third_party/rust/thiserror-impl/.cargo-checksum.json2
-rw-r--r--third_party/rust/thiserror-impl/Cargo.toml2
-rw-r--r--third_party/rust/thiserror-impl/src/attr.rs34
-rw-r--r--third_party/rust/thiserror-impl/src/fmt.rs3
-rw-r--r--third_party/rust/thiserror/.cargo-checksum.json2
-rw-r--r--third_party/rust/thiserror/Cargo.toml4
-rw-r--r--third_party/rust/thiserror/src/lib.rs2
-rw-r--r--third_party/rust/thiserror/tests/test_display.rs57
-rw-r--r--third_party/rust/thiserror/tests/ui/no-display.stderr3
-rw-r--r--third_party/rust/thiserror/tests/ui/source-enum-not-error.stderr8
-rw-r--r--third_party/rust/thiserror/tests/ui/source-enum-unnamed-field-not-error.stderr8
-rw-r--r--third_party/rust/thiserror/tests/ui/source-struct-not-error.stderr9
-rw-r--r--third_party/rust/thiserror/tests/ui/source-struct-unnamed-field-not-error.stderr9
-rw-r--r--third_party/rust/thiserror/tests/ui/transparent-enum-not-error.stderr5
-rw-r--r--third_party/rust/thiserror/tests/ui/transparent-enum-unnamed-field-not-error.stderr5
-rw-r--r--third_party/rust/thiserror/tests/ui/transparent-struct-not-error.stderr5
-rw-r--r--third_party/rust/thiserror/tests/ui/transparent-struct-unnamed-field-not-error.stderr5
-rw-r--r--third_party/rust/unicode-bidi/.appveyor.yml19
-rw-r--r--third_party/rust/unicode-bidi/.cargo-checksum.json2
-rw-r--r--third_party/rust/unicode-bidi/.github/workflows/main.yml49
-rw-r--r--third_party/rust/unicode-bidi/.rustfmt.toml1
-rw-r--r--third_party/rust/unicode-bidi/Cargo.lock175
-rw-r--r--third_party/rust/unicode-bidi/Cargo.toml10
-rw-r--r--third_party/rust/unicode-bidi/src/char_data/mod.rs5
-rw-r--r--third_party/rust/unicode-bidi/src/char_data/tables.rs4
-rw-r--r--third_party/rust/unicode-bidi/src/deprecated.rs9
-rw-r--r--third_party/rust/unicode-bidi/src/explicit.rs129
-rw-r--r--third_party/rust/unicode-bidi/src/implicit.rs93
-rw-r--r--third_party/rust/unicode-bidi/src/level.rs15
-rw-r--r--third_party/rust/unicode-bidi/src/lib.rs129
-rw-r--r--third_party/rust/unicode-bidi/src/prepare.rs266
-rw-r--r--third_party/rust/unicode-bidi/src/utf16.rs36
-rw-r--r--third_party/rust/wgpu-core/.cargo-checksum.json2
-rw-r--r--third_party/rust/wgpu-core/Cargo.toml1
-rw-r--r--third_party/rust/wgpu-core/src/binding_model.rs26
-rw-r--r--third_party/rust/wgpu-core/src/command/bundle.rs294
-rw-r--r--third_party/rust/wgpu-core/src/command/clear.rs45
-rw-r--r--third_party/rust/wgpu-core/src/command/compute.rs35
-rw-r--r--third_party/rust/wgpu-core/src/command/draw.rs120
-rw-r--r--third_party/rust/wgpu-core/src/command/mod.rs5
-rw-r--r--third_party/rust/wgpu-core/src/command/query.rs9
-rw-r--r--third_party/rust/wgpu-core/src/command/render.rs54
-rw-r--r--third_party/rust/wgpu-core/src/device/global.rs245
-rw-r--r--third_party/rust/wgpu-core/src/device/life.rs129
-rw-r--r--third_party/rust/wgpu-core/src/device/queue.rs125
-rw-r--r--third_party/rust/wgpu-core/src/device/resource.rs204
-rw-r--r--third_party/rust/wgpu-core/src/id.rs2
-rw-r--r--third_party/rust/wgpu-core/src/identity.rs63
-rw-r--r--third_party/rust/wgpu-core/src/instance.rs22
-rw-r--r--third_party/rust/wgpu-core/src/lib.rs30
-rw-r--r--third_party/rust/wgpu-core/src/pipeline.rs23
-rw-r--r--third_party/rust/wgpu-core/src/present.rs43
-rw-r--r--third_party/rust/wgpu-core/src/registry.rs9
-rw-r--r--third_party/rust/wgpu-core/src/resource.rs38
-rw-r--r--third_party/rust/wgpu-core/src/track/buffer.rs65
-rw-r--r--third_party/rust/wgpu-core/src/track/metadata.rs11
-rw-r--r--third_party/rust/wgpu-core/src/track/mod.rs218
-rw-r--r--third_party/rust/wgpu-core/src/track/stateless.rs43
-rw-r--r--third_party/rust/wgpu-core/src/track/texture.rs65
-rw-r--r--third_party/rust/wgpu-core/src/validation.rs115
-rw-r--r--third_party/rust/wgpu-hal/.cargo-checksum.json2
-rw-r--r--third_party/rust/wgpu-hal/Cargo.toml14
-rw-r--r--third_party/rust/wgpu-hal/src/auxil/dxgi/result.rs18
-rw-r--r--third_party/rust/wgpu-hal/src/dx12/adapter.rs25
-rw-r--r--third_party/rust/wgpu-hal/src/dx12/command.rs7
-rw-r--r--third_party/rust/wgpu-hal/src/dx12/device.rs6
-rw-r--r--third_party/rust/wgpu-hal/src/dx12/mod.rs3
-rw-r--r--third_party/rust/wgpu-hal/src/dx12/shader_compilation.rs4
-rw-r--r--third_party/rust/wgpu-hal/src/gles/adapter.rs73
-rw-r--r--third_party/rust/wgpu-hal/src/gles/command.rs7
-rw-r--r--third_party/rust/wgpu-hal/src/gles/device.rs5
-rw-r--r--third_party/rust/wgpu-hal/src/gles/egl.rs69
-rw-r--r--third_party/rust/wgpu-hal/src/gles/mod.rs53
-rw-r--r--third_party/rust/wgpu-hal/src/gles/queue.rs8
-rw-r--r--third_party/rust/wgpu-hal/src/gles/wgl.rs3
-rw-r--r--third_party/rust/wgpu-hal/src/lib.rs97
-rw-r--r--third_party/rust/wgpu-hal/src/metal/adapter.rs16
-rw-r--r--third_party/rust/wgpu-hal/src/metal/mod.rs1
-rw-r--r--third_party/rust/wgpu-hal/src/vulkan/adapter.rs18
-rw-r--r--third_party/rust/wgpu-hal/src/vulkan/instance.rs183
-rw-r--r--third_party/rust/wgpu-hal/src/vulkan/mod.rs34
-rw-r--r--third_party/rust/wgpu-types/.cargo-checksum.json2
-rw-r--r--third_party/rust/wgpu-types/Cargo.toml2
-rw-r--r--third_party/rust/wgpu-types/src/lib.rs409
415 files changed, 18342 insertions, 7974 deletions
diff --git a/third_party/rust/aa-stroke/.cargo-checksum.json b/third_party/rust/aa-stroke/.cargo-checksum.json
index 40f73c9547..7fd07daa0a 100644
--- a/third_party/rust/aa-stroke/.cargo-checksum.json
+++ b/third_party/rust/aa-stroke/.cargo-checksum.json
@@ -1 +1 @@
-{"files":{".github/workflows/rust.yml":"6a9f1b122ea02367a2f1ff1fc7b9a728284ceb47fad12e1610cde9d760f4efc3","Cargo.toml":"f507cac11c3c26af28420d68ec3748a5453322d51ef1379a340fdd3b1c9b187a","README.md":"60b34cfa653114d5054009696df2ed2ea1d4926a6bc312d0cac4b84845c2beff","examples/simple.rs":"c196e79568fe4be31a08374aa451c70c9377db5428aef924a985e069c12ed91e","src/bezierflattener.rs":"61687da22490cb1bd901d0b5eb1de3a98802b46c03719ded4163c7a4997f0ad9","src/c_bindings.rs":"06225ddd132ae959eda1b445f4e375cead4d8e135c5cba81e828815fe6a5e88b","src/lib.rs":"fc7990e62434f3143b5162aba85ea828ceab51447c5fad5e26e8c6b06ec77050","src/tri_rasterize.rs":"fb6f595ab9340d8ea6429b41638c378bbd772c8e4d8f7793e225624c12cd3a21"},"package":null} \ No newline at end of file
+{"files":{".github/workflows/rust.yml":"6a9f1b122ea02367a2f1ff1fc7b9a728284ceb47fad12e1610cde9d760f4efc3","Cargo.toml":"f507cac11c3c26af28420d68ec3748a5453322d51ef1379a340fdd3b1c9b187a","README.md":"60b34cfa653114d5054009696df2ed2ea1d4926a6bc312d0cac4b84845c2beff","examples/simple.rs":"c196e79568fe4be31a08374aa451c70c9377db5428aef924a985e069c12ed91e","src/bezierflattener.rs":"c7183a850d51525db4389d5c0badb76e1d8c4110697bfa51ef746fda6a858bb9","src/c_bindings.rs":"06225ddd132ae959eda1b445f4e375cead4d8e135c5cba81e828815fe6a5e88b","src/lib.rs":"3009746efe5f6753cd999258077a4baea30a740190e7a8ccaec0d78f4719fdfb","src/tri_rasterize.rs":"fb6f595ab9340d8ea6429b41638c378bbd772c8e4d8f7793e225624c12cd3a21"},"package":null} \ No newline at end of file
diff --git a/third_party/rust/aa-stroke/src/bezierflattener.rs b/third_party/rust/aa-stroke/src/bezierflattener.rs
index 1a615941b6..ab2f96e4a8 100644
--- a/third_party/rust/aa-stroke/src/bezierflattener.rs
+++ b/third_party/rust/aa-stroke/src/bezierflattener.rs
@@ -16,8 +16,8 @@ pub type HRESULT = i32;
pub const S_OK: i32 = 0;
#[derive(Clone, Copy, Debug, PartialEq)]
pub struct GpPointR {
- pub x: f64,
- pub y: f64
+ pub x: f32,
+ pub y: f32
}
impl Sub for GpPointR {
@@ -48,32 +48,32 @@ impl SubAssign for GpPointR {
}
}
-impl MulAssign<f64> for GpPointR {
- fn mul_assign(&mut self, rhs: f64) {
+impl MulAssign<f32> for GpPointR {
+ fn mul_assign(&mut self, rhs: f32) {
*self = *self * rhs;
}
}
-impl Mul<f64> for GpPointR {
+impl Mul<f32> for GpPointR {
type Output = Self;
- fn mul(self, rhs: f64) -> Self::Output {
+ fn mul(self, rhs: f32) -> Self::Output {
GpPointR { x: self.x * rhs, y: self.y * rhs }
}
}
-impl Div<f64> for GpPointR {
+impl Div<f32> for GpPointR {
type Output = Self;
- fn div(self, rhs: f64) -> Self::Output {
+ fn div(self, rhs: f32) -> Self::Output {
GpPointR { x: self.x / rhs, y: self.y / rhs }
}
}
impl Mul for GpPointR {
- type Output = f64;
+ type Output = f32;
fn mul(self, rhs: Self) -> Self::Output {
self.x * rhs.x + self.y * rhs.y
@@ -81,17 +81,17 @@ impl Mul for GpPointR {
}
impl GpPointR {
- pub fn ApproxNorm(&self) -> f64 {
+ pub fn ApproxNorm(&self) -> f32 {
self.x.abs().max(self.y.abs())
}
- pub fn Norm(&self) -> f64 {
+ pub fn Norm(&self) -> f32 {
self.x.hypot(self.y)
}
}
// Relative to this is relative to the tolerance squared. In other words, a vector
// whose length is less than .01*tolerance will be considered 0
-const SQ_LENGTH_FUZZ: f64 = 1.0e-4;
+const SQ_LENGTH_FUZZ: f32 = 1.0e-4;
// Some of these constants need further thinking
@@ -103,7 +103,7 @@ const SQ_LENGTH_FUZZ: f64 = 1.0e-4;
const FUZZ_DOUBLE: f64 = 1.0e-12; // Double-precision relative 0
const MIN_TOLERANCE: f64 = 1.0e-6;
const DEFAULT_FLATTENING_TOLERANCE: f64 = 0.25;*/
-const TWICE_MIN_BEZIER_STEP_SIZE: f64 = 1.0e-3; // The step size in the Bezier flattener should
+const TWICE_MIN_BEZIER_STEP_SIZE: f32 = 1.0e-3; // The step size in the Bezier flattener should
// never go below half this amount.
//+-----------------------------------------------------------------------------
//
@@ -318,7 +318,7 @@ pub trait CFlatteningSink {
fn AcceptPoint(&mut self,
pt: &GpPointR,
// The point
- t: f64,
+ t: f32,
// Parameter we're at
fAborted: &mut bool,
lastPoint: bool
@@ -339,16 +339,16 @@ pub struct CBezierFlattener<'a>
bezier: CBezier,
// Flattening defining data
m_pSink: &'a mut dyn CFlatteningSink, // The recipient of the flattening data
- m_rTolerance: f64, // Prescribed tolerance
+ m_rTolerance: f32, // Prescribed tolerance
m_fWithTangents: bool, // Generate tangent vectors if true
- m_rQuarterTolerance: f64,// Prescribed tolerance/4 (for doubling the step)
- m_rFuzz: f64, // Computational zero
+ m_rQuarterTolerance: f32,// Prescribed tolerance/4 (for doubling the step)
+ m_rFuzz: f32, // Computational zero
// Flattening working data
m_ptE: [GpPointR; 4], // The moving basis of the curve definition
m_cSteps: i32, // The number of steps left to the end of the curve
- m_rParameter: f64, // Parameter value
- m_rStepSize: f64, // Steps size in parameter domain
+ m_rParameter: f32, // Parameter value
+ m_rStepSize: f32, // Steps size in parameter domain
}
impl<'a> CBezierFlattener<'a> {
/*fn new(
@@ -449,7 +449,7 @@ impl<'a> CBezierFlattener<'a> {
pub fn new(bezier: &CBezier,
pSink: &'a mut dyn CFlatteningSink,
// The reciptient of the flattened data
- rTolerance: f64) // Flattening tolerance
+ rTolerance: f32) // Flattening tolerance
-> Self
{
let mut result = CBezierFlattener {
diff --git a/third_party/rust/aa-stroke/src/lib.rs b/third_party/rust/aa-stroke/src/lib.rs
index 38c47312ec..92fcf3bf47 100644
--- a/third_party/rust/aa-stroke/src/lib.rs
+++ b/third_party/rust/aa-stroke/src/lib.rs
@@ -230,8 +230,8 @@ fn arc_segment_tri(path: &mut PathBuilder, xc: f32, yc: f32, radius: f32, a: Vec
let h = (4. / 3.) * dot(perp(a), mid2) / dot(a, mid2);
- let last_point = GpPointR { x: (xc + r_cos_a) as f64, y: (yc + r_sin_a) as f64 };
- let initial_normal = GpPointR { x: a.x as f64, y: a.y as f64 };
+ let last_point = GpPointR { x: (xc + r_cos_a), y: (yc + r_sin_a) };
+ let initial_normal = GpPointR { x: a.x, y: a.y };
struct Target<'a, 'b> { last_point: GpPointR, last_normal: GpPointR, xc: f32, yc: f32, path: &'a mut PathBuilder<'b> }
@@ -253,24 +253,24 @@ fn arc_segment_tri(path: &mut PathBuilder, xc: f32, yc: f32, radius: f32, a: Vec
let width = 0.5;
self.path.ramp(
- (pt.x - normal.x * width) as f32,
- (pt.y - normal.y * width) as f32,
- (pt.x + normal.x * width) as f32,
- (pt.y + normal.y * width) as f32,
- (self.last_point.x + self.last_normal.x * width) as f32,
- (self.last_point.y + self.last_normal.y * width) as f32,
- (self.last_point.x - self.last_normal.x * width) as f32,
- (self.last_point.y - self.last_normal.y * width) as f32, );
+ pt.x - normal.x * width,
+ pt.y - normal.y * width,
+ pt.x + normal.x * width,
+ pt.y + normal.y * width,
+ self.last_point.x + self.last_normal.x * width,
+ self.last_point.y + self.last_normal.y * width,
+ self.last_point.x - self.last_normal.x * width,
+ self.last_point.y - self.last_normal.y * width, );
self.path.push_tri(
- (self.last_point.x - self.last_normal.x * 0.5) as f32,
- (self.last_point.y - self.last_normal.y * 0.5) as f32,
- (pt.x - normal.x * 0.5) as f32,
- (pt.y - normal.y * 0.5) as f32,
+ self.last_point.x - self.last_normal.x * 0.5,
+ self.last_point.y - self.last_normal.y * 0.5,
+ pt.x - normal.x * 0.5,
+ pt.y - normal.y * 0.5,
self.xc, self.yc);
self.last_normal = normal;
} else {
- self.path.push_tri(self.last_point.x as f32, self.last_point.y as f32, pt.x as f32, pt.y as f32, self.xc, self.yc);
+ self.path.push_tri(self.last_point.x, self.last_point.y, pt.x, pt.y, self.xc, self.yc);
}
self.last_point = pt.clone();
return S_OK;
@@ -279,19 +279,19 @@ fn arc_segment_tri(path: &mut PathBuilder, xc: f32, yc: f32, radius: f32, a: Vec
fn AcceptPoint(&mut self,
pt: &GpPointR,
// The point
- _t: f64,
+ _t: f32,
// Parameter we're at
_aborted: &mut bool,
_last_point: bool) -> HRESULT {
- self.path.push_tri(self.last_point.x as f32, self.last_point.y as f32, pt.x as f32, pt.y as f32, self.xc, self.yc);
+ self.path.push_tri(self.last_point.x, self.last_point.y, pt.x, pt.y, self.xc, self.yc);
self.last_point = pt.clone();
return S_OK;
}
}
- let bezier = CBezier::new([GpPointR { x: (xc + r_cos_a) as f64, y: (yc + r_sin_a) as f64, },
- GpPointR { x: (xc + r_cos_a - h * r_sin_a) as f64, y: (yc + r_sin_a + h * r_cos_a) as f64, },
- GpPointR { x: (xc + r_cos_b + h * r_sin_b) as f64, y: (yc + r_sin_b - h * r_cos_b) as f64, },
- GpPointR { x: (xc + r_cos_b) as f64, y: (yc + r_sin_b) as f64, }]);
+ let bezier = CBezier::new([GpPointR { x: (xc + r_cos_a), y: (yc + r_sin_a), },
+ GpPointR { x: (xc + r_cos_a - h * r_sin_a), y: (yc + r_sin_a + h * r_cos_a), },
+ GpPointR { x: (xc + r_cos_b + h * r_sin_b), y: (yc + r_sin_b - h * r_cos_b), },
+ GpPointR { x: (xc + r_cos_b), y: (yc + r_sin_b), }]);
if bezier.is_degenerate() {
return;
}
@@ -810,23 +810,23 @@ impl<'z> Stroker<'z> {
fn AcceptPoint(&mut self,
pt: &GpPointR,
// The point
- _t: f64,
+ _t: f32,
// Parameter we're at
_aborted: &mut bool,
last_point: bool) -> HRESULT {
if last_point && self.end {
- self.stroker.line_to_capped(Point::new(pt.x as f32, pt.y as f32));
+ self.stroker.line_to_capped(Point::new(pt.x, pt.y));
} else {
- self.stroker.line_to(Point::new(pt.x as f32, pt.y as f32));
+ self.stroker.line_to(Point::new(pt.x, pt.y));
}
return S_OK;
}
}
let cur_pt = self.cur_pt.unwrap_or(cx1);
- let bezier = CBezier::new([GpPointR { x: cur_pt.x as f64, y: cur_pt.y as f64, },
- GpPointR { x: cx1.x as f64, y: cx1.y as f64, },
- GpPointR { x: cx2.x as f64, y: cx2.y as f64, },
- GpPointR { x: pt.x as f64, y: pt.y as f64, }]);
+ let bezier = CBezier::new([GpPointR { x: cur_pt.x, y: cur_pt.y, },
+ GpPointR { x: cx1.x, y: cx1.y, },
+ GpPointR { x: cx2.x, y: cx2.y, },
+ GpPointR { x: pt.x, y: pt.y, }]);
let mut t = Target{ stroker: self, end };
let mut f = CBezierFlattener::new(&bezier, &mut t, 0.25);
f.Flatten(false);
diff --git a/third_party/rust/ahash/.cargo-checksum.json b/third_party/rust/ahash/.cargo-checksum.json
index 8c7a101059..d2fa45cb86 100644
--- a/third_party/rust/ahash/.cargo-checksum.json
+++ b/third_party/rust/ahash/.cargo-checksum.json
@@ -1 +1 @@
-{"files":{"Cargo.toml":"31399b9babab52fabfe71f52ffe38fef0d3d894fdd5829f84a4e69b031f463c9","FAQ.md":"9eb41898523ee209a0a937f9bcb78afe45ad55ca0556f8a4d4063558098f6d1e","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"ff8f68cb076caf8cefe7a6430d4ac086ce6af2ca8ce2c4e5a2004d4552ef52a2","README.md":"72185284f100e149998fe5301f70489e617cc4415b51cc77e967c63c6e970a67","build.rs":"123b322034273b5aa1b4934d84c277fef279afd533ecfb44831a3715e9887fcd","rustfmt.toml":"e090969e99df9360705680cc0097cfaddae10c22dc2e01470592cf3b9787fd36","src/aes_hash.rs":"04483498a0d86e3ab099e22f734ea6fcac384b92f349bd310456856bf6a9a5e2","src/convert.rs":"f0e78840046493d0679a9ec077c8164cf57cf30d5e852b11bfadfdd996d29bd1","src/fallback_hash.rs":"ec00691bd555c69f7446afe893b6631cb84207cb7b512260dec8ef488e1905f3","src/hash_map.rs":"ed0c79c41c2218ad9591a585670a2b9b983807c9725880b780138a44c126cbfd","src/hash_quality_test.rs":"6c1fea90a38dc7b2ac5b2fa6e44a565e0c3385c72df6e87231401f920912d053","src/hash_set.rs":"dc3d33e290aad62457ab1f5e64d3e33eb79e28c9468bfc8686339f0bbd8b19aa","src/lib.rs":"9fec7d1d412e414231c9b929081b1daa7c3b788a9f91eedd79a55efdf5d0d291","src/operations.rs":"10772e65b8b7106f195428c5eb8dbf6cbd49dd5a2165ac750e54af5995210f88","src/random_state.rs":"b7981967ec5ecbff04d166b5e05c59c386062b4de3b36816f3c98ef284372f63","src/specialize.rs":"38d3b56ef4f264d564f48dbcb8ac137928babf90635090c9771c1a62140d1f30","tests/bench.rs":"0851dffebaffd7a437f6f9946ed5e03a957e9a6eb0da7911451af58778c411ec","tests/map_tests.rs":"e0f155f964dd965740b072ee1da110a8c6ef34491c95219f7c89064112c7840f","tests/nopanic.rs":"3363675c4c1a197b86604a0aebbe958fb5ec7c01a414fbfd70e9eb8a29707400"},"package":"fcb51a0695d8f838b1ee009b3fbf66bda078cd64590202a864a8f3e8c4315c47"} \ No newline at end of file
+{"files":{"Cargo.toml":"a519809f46c9aad28d6c48748f330fe31e05029d234b6d1743522ec403441744","FAQ.md":"9eb41898523ee209a0a937f9bcb78afe45ad55ca0556f8a4d4063558098f6d1e","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"ff8f68cb076caf8cefe7a6430d4ac086ce6af2ca8ce2c4e5a2004d4552ef52a2","README.md":"72185284f100e149998fe5301f70489e617cc4415b51cc77e967c63c6e970a67","build.rs":"f80cb1cdb731a63d16513f1f0b0871d9c077d2c6bf2312af40a202f9c0fefe49","rustfmt.toml":"e090969e99df9360705680cc0097cfaddae10c22dc2e01470592cf3b9787fd36","src/aes_hash.rs":"0b11ce066931396222d2bed7eff678fdd7c8351819485efb721f62a30551866b","src/convert.rs":"f0e78840046493d0679a9ec077c8164cf57cf30d5e852b11bfadfdd996d29bd1","src/fallback_hash.rs":"ec00691bd555c69f7446afe893b6631cb84207cb7b512260dec8ef488e1905f3","src/hash_map.rs":"ed0c79c41c2218ad9591a585670a2b9b983807c9725880b780138a44c126cbfd","src/hash_quality_test.rs":"61695e5cac46ea25021a9d04199fb00c513e0c0c9c0f67aca0c647b9d2f7dd5a","src/hash_set.rs":"dc3d33e290aad62457ab1f5e64d3e33eb79e28c9468bfc8686339f0bbd8b19aa","src/lib.rs":"9fec7d1d412e414231c9b929081b1daa7c3b788a9f91eedd79a55efdf5d0d291","src/operations.rs":"10772e65b8b7106f195428c5eb8dbf6cbd49dd5a2165ac750e54af5995210f88","src/random_state.rs":"ce9689147659efa975887debe1481daddca09386ea8e1d5b4ee90ebeda6c8745","src/specialize.rs":"38d3b56ef4f264d564f48dbcb8ac137928babf90635090c9771c1a62140d1f30","tests/bench.rs":"0851dffebaffd7a437f6f9946ed5e03a957e9a6eb0da7911451af58778c411ec","tests/map_tests.rs":"e0f155f964dd965740b072ee1da110a8c6ef34491c95219f7c89064112c7840f","tests/nopanic.rs":"3363675c4c1a197b86604a0aebbe958fb5ec7c01a414fbfd70e9eb8a29707400"},"package":"891477e0c6a8957309ee5c45a6368af3ae14bb510732d2684ffa19af310920f9"} \ No newline at end of file
diff --git a/third_party/rust/ahash/Cargo.toml b/third_party/rust/ahash/Cargo.toml
index b412f79166..5830a500d9 100644
--- a/third_party/rust/ahash/Cargo.toml
+++ b/third_party/rust/ahash/Cargo.toml
@@ -3,43 +3,63 @@
# When uploading crates to the registry Cargo will automatically
# "normalize" Cargo.toml files for maximal compatibility
# with all versions of Cargo and also rewrite `path` dependencies
-# to registry (e.g., crates.io) dependencies
+# to registry (e.g., crates.io) dependencies.
#
-# If you believe there's an error in this file please file an
-# issue against the rust-lang/cargo repository. If you're
-# editing this file be aware that the upstream Cargo.toml
-# will likely look very different (and much more reasonable)
+# If you are reading this file be aware that the original Cargo.toml
+# will likely look very different (and much more reasonable).
+# See Cargo.toml.orig for the original contents.
[package]
edition = "2018"
name = "ahash"
-version = "0.7.6"
+version = "0.7.8"
authors = ["Tom Kaitchuck <Tom.Kaitchuck@gmail.com>"]
build = "./build.rs"
-exclude = ["/smhasher", "/benchmark_tools"]
+exclude = [
+ "/smhasher",
+ "/benchmark_tools",
+]
description = "A non-cryptographic hash function using AES-NI for high performance"
documentation = "https://docs.rs/ahash"
readme = "README.md"
-keywords = ["hash", "hasher", "hashmap", "aes", "no-std"]
-categories = ["algorithms", "data-structures", "no-std"]
+keywords = [
+ "hash",
+ "hasher",
+ "hashmap",
+ "aes",
+ "no-std",
+]
+categories = [
+ "algorithms",
+ "data-structures",
+ "no-std",
+]
license = "MIT OR Apache-2.0"
repository = "https://github.com/tkaitchuck/ahash"
+
[package.metadata.docs.rs]
features = ["std"]
-rustc-args = ["-C", "target-feature=+aes"]
-rustdoc-args = ["-C", "target-feature=+aes"]
+rustc-args = [
+ "-C",
+ "target-feature=+aes",
+]
+rustdoc-args = [
+ "-C",
+ "target-feature=+aes",
+]
+
[profile.bench]
opt-level = 3
lto = "fat"
codegen-units = 1
-debug = false
+debug = 0
debug-assertions = false
[profile.release]
opt-level = 3
lto = "fat"
codegen-units = 1
-debug = false
+debug = 0
debug-assertions = false
[profile.test]
@@ -63,6 +83,7 @@ harness = false
name = "map"
path = "tests/map_tests.rs"
harness = false
+
[dev-dependencies.criterion]
version = "0.3.2"
@@ -86,13 +107,19 @@ version = "4.0"
[dev-dependencies.serde_json]
version = "1.0.59"
+
[build-dependencies.version_check]
version = "0.9"
[features]
+atomic-polyfill = [
+ "dep:atomic-polyfill",
+ "once_cell/atomic-polyfill",
+]
compile-time-rng = ["const-random"]
default = ["std"]
std = []
+
[target."cfg(any(target_os = \"linux\", target_os = \"android\", target_os = \"windows\", target_os = \"macos\", target_os = \"ios\", target_os = \"freebsd\", target_os = \"openbsd\", target_os = \"netbsd\", target_os = \"dragonfly\", target_os = \"solaris\", target_os = \"illumos\", target_os = \"fuchsia\", target_os = \"redox\", target_os = \"cloudabi\", target_os = \"haiku\", target_os = \"vxworks\", target_os = \"emscripten\", target_os = \"wasi\"))".dependencies.const-random]
version = "0.1.12"
optional = true
@@ -103,10 +130,16 @@ version = "0.2.3"
[target."cfg(any(target_os = \"linux\", target_os = \"android\", target_os = \"windows\", target_os = \"macos\", target_os = \"ios\", target_os = \"freebsd\", target_os = \"openbsd\", target_os = \"netbsd\", target_os = \"dragonfly\", target_os = \"solaris\", target_os = \"illumos\", target_os = \"fuchsia\", target_os = \"redox\", target_os = \"cloudabi\", target_os = \"haiku\", target_os = \"vxworks\", target_os = \"emscripten\", target_os = \"wasi\"))".dependencies.serde]
version = "1.0.117"
optional = true
+
[target."cfg(not(all(target_arch = \"arm\", target_os = \"none\")))".dependencies.once_cell]
-version = "1.8"
+version = "1.13.1"
features = ["alloc"]
default-features = false
+
+[target."cfg(not(any(target_os = \"linux\", target_os = \"android\", target_os = \"windows\", target_os = \"macos\", target_os = \"ios\", target_os = \"freebsd\", target_os = \"openbsd\", target_os = \"netbsd\", target_os = \"dragonfly\", target_os = \"solaris\", target_os = \"illumos\", target_os = \"fuchsia\", target_os = \"redox\", target_os = \"cloudabi\", target_os = \"haiku\", target_os = \"vxworks\", target_os = \"emscripten\", target_os = \"wasi\")))".dependencies.atomic-polyfill]
+version = "1.0.1"
+optional = true
+
[target."cfg(not(any(target_os = \"linux\", target_os = \"android\", target_os = \"windows\", target_os = \"macos\", target_os = \"ios\", target_os = \"freebsd\", target_os = \"openbsd\", target_os = \"netbsd\", target_os = \"dragonfly\", target_os = \"solaris\", target_os = \"illumos\", target_os = \"fuchsia\", target_os = \"redox\", target_os = \"cloudabi\", target_os = \"haiku\", target_os = \"vxworks\", target_os = \"emscripten\", target_os = \"wasi\")))".dependencies.const-random]
version = "0.1.12"
optional = true
diff --git a/third_party/rust/ahash/build.rs b/third_party/rust/ahash/build.rs
index 8be4964e52..6aba02526b 100644
--- a/third_party/rust/ahash/build.rs
+++ b/third_party/rust/ahash/build.rs
@@ -7,7 +7,9 @@ fn main() {
if let Some(channel) = version_check::Channel::read() {
if channel.supports_features() {
println!("cargo:rustc-cfg=feature=\"specialize\"");
- println!("cargo:rustc-cfg=feature=\"stdsimd\"");
+ if version_check::Version::read().map_or(false, |v| v.at_most("1.77.9")) {
+ println!("cargo:rustc-cfg=feature=\"stdsimd\"");
+ }
}
}
let os = env::var("CARGO_CFG_TARGET_OS").expect("CARGO_CFG_TARGET_OS was not set");
diff --git a/third_party/rust/ahash/src/aes_hash.rs b/third_party/rust/ahash/src/aes_hash.rs
index 1c98582cee..7e619b520b 100644
--- a/third_party/rust/ahash/src/aes_hash.rs
+++ b/third_party/rust/ahash/src/aes_hash.rs
@@ -170,10 +170,10 @@ impl Hasher for AHasher {
let tail = data.read_last_u128x4();
let mut current: [u128; 4] = [self.key; 4];
current[0] = aesenc(current[0], tail[0]);
- current[1] = aesenc(current[1], tail[1]);
+ current[1] = aesdec(current[1], tail[1]);
current[2] = aesenc(current[2], tail[2]);
- current[3] = aesenc(current[3], tail[3]);
- let mut sum: [u128; 2] = [self.key, self.key];
+ current[3] = aesdec(current[3], tail[3]);
+ let mut sum: [u128; 2] = [self.key, !self.key];
sum[0] = add_by_64s(sum[0].convert(), tail[0].convert()).convert();
sum[1] = add_by_64s(sum[1].convert(), tail[1].convert()).convert();
sum[0] = shuffle_and_add(sum[0], tail[2]);
@@ -190,8 +190,9 @@ impl Hasher for AHasher {
sum[1] = shuffle_and_add(sum[1], blocks[3]);
data = rest;
}
- self.hash_in_2(aesenc(current[0], current[1]), aesenc(current[2], current[3]));
- self.hash_in(add_by_64s(sum[0].convert(), sum[1].convert()).convert());
+ self.hash_in_2(current[0], current[1]);
+ self.hash_in_2(current[2], current[3]);
+ self.hash_in_2(sum[0], sum[1]);
} else {
//len 33-64
let (head, _) = data.read_u128x2();
@@ -215,7 +216,7 @@ impl Hasher for AHasher {
fn finish(&self) -> u64 {
let combined = aesdec(self.sum, self.enc);
let result: [u64; 2] = aesenc(aesenc(combined, self.key), combined).convert();
- result[0]
+ result[1]
}
}
diff --git a/third_party/rust/ahash/src/hash_quality_test.rs b/third_party/rust/ahash/src/hash_quality_test.rs
index 4cd3156afe..17228d4716 100644
--- a/third_party/rust/ahash/src/hash_quality_test.rs
+++ b/third_party/rust/ahash/src/hash_quality_test.rs
@@ -1,5 +1,5 @@
use core::hash::{Hash, Hasher};
-use std::collections::HashMap;
+use std::collections::{HashMap};
fn assert_sufficiently_different(a: u64, b: u64, tolerance: i32) {
let (same_byte_count, same_nibble_count) = count_same_bytes_and_nibbles(a, b);
@@ -326,6 +326,28 @@ fn test_length_extension<T: Hasher>(hasher: impl Fn(u128, u128) -> T) {
}
}
+fn test_sparse<T: Hasher>(hasher: impl Fn() -> T) {
+ let mut buf = [0u8; 256];
+ let mut hashes = HashMap::new();
+ for idx_1 in 0..256 {
+ for idx_2 in idx_1+1..256 {
+ for value_1 in [1, 2, 4, 8, 16, 32, 64, 128] {
+ for value_2 in [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 12, 15, 16, 17, 18, 20, 24, 31, 32, 33, 48, 64, 96, 127, 128, 129, 192, 254, 255] {
+ buf[idx_1] = value_1;
+ buf[idx_2] = value_2;
+ let hash_value = hash_with(&buf, &mut hasher());
+ let keys = hashes.entry(hash_value).or_insert(Vec::new());
+ keys.push((idx_1, value_1, idx_2, value_2));
+ buf[idx_1] = 0;
+ buf[idx_2] = 0;
+ }
+ }
+ }
+ }
+ hashes.retain(|_key, value| value.len() != 1);
+ assert_eq!(0, hashes.len(), "Collision with: {:?}", hashes);
+}
+
#[cfg(test)]
mod fallback_tests {
use crate::fallback_hash::*;
@@ -392,6 +414,12 @@ mod fallback_tests {
fn fallback_length_extension() {
test_length_extension(|a, b| AHasher::new_with_keys(a, b));
}
+
+ #[test]
+ fn test_no_sparse_collisions() {
+ test_sparse(|| AHasher::new_with_keys(0, 0));
+ test_sparse(|| AHasher::new_with_keys(1, 2));
+ }
}
///Basic sanity tests of the cypto properties of aHash.
@@ -480,4 +508,10 @@ mod aes_tests {
fn aes_length_extension() {
test_length_extension(|a, b| AHasher::test_with_keys(a, b));
}
+
+ #[test]
+ fn aes_no_sparse_collisions() {
+ test_sparse(|| AHasher::test_with_keys(0, 0));
+ test_sparse(|| AHasher::test_with_keys(1, 2));
+ }
}
diff --git a/third_party/rust/ahash/src/random_state.rs b/third_party/rust/ahash/src/random_state.rs
index c3628bf145..9ac2f3ec43 100644
--- a/third_party/rust/ahash/src/random_state.rs
+++ b/third_party/rust/ahash/src/random_state.rs
@@ -29,8 +29,13 @@ extern crate alloc;
#[cfg(feature = "std")]
extern crate std as alloc;
+#[cfg(feature = "atomic-polyfill")]
+use atomic_polyfill as atomic;
+#[cfg(not(feature = "atomic-polyfill"))]
+use core::sync::atomic;
+
use alloc::boxed::Box;
-use core::sync::atomic::{AtomicUsize, Ordering};
+use atomic::{AtomicUsize, Ordering};
#[cfg(not(all(target_arch = "arm", target_os = "none")))]
use once_cell::race::OnceBox;
diff --git a/third_party/rust/cc/.cargo-checksum.json b/third_party/rust/cc/.cargo-checksum.json
index 4dc2fe2390..f7d42ebd81 100644
--- a/third_party/rust/cc/.cargo-checksum.json
+++ b/third_party/rust/cc/.cargo-checksum.json
@@ -1 +1 @@
-{"files":{"Cargo.lock":"23c26d62ba5114f5ac6e7ffa3ea233cea77e5cb7f98d9f056f40fe2c49971f67","Cargo.toml":"fd4b39488866b6717476fadc460ff91c89511628080769516eec452c0def8bc7","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"378f5840b258e2779c39418f3f2d7b2ba96f1c7917dd6be0713f88305dbda397","README.md":"58af5106352aafa62175a90f8a5f25fa114028bf909220dc0735d79745999ec1","src/bin/gcc-shim.rs":"b77907875029494b6288841c3aed2e4939ed40708c7f597fca5c9e2570490ca6","src/com.rs":"29d0dee08a656ab1a4cc3e5fe24542e0fab5c1373cbc9b05059f7572cf9b8313","src/lib.rs":"e0cc228db97675d6a0d86b219a20e9e48925a1ccbfd9e9fd038ccf6ef129957e","src/registry.rs":"98ae2b71781acc49297e5544fa0cf059f735636f8f1338edef8dbf7232443945","src/setup_config.rs":"72deaf1927c0b713fd5c2b2d5b8f0ea3a303a00fda1579427895cac26a94122d","src/vs_instances.rs":"2d3f8278a803b0e7052f4eeb1979b29f963dd0143f4458e2cb5f33c4e5f0963b","src/winapi.rs":"e128e95b2d39ae7a02f54a7e25d33c488c14759b9f1a50a449e10545856950c3","src/windows_registry.rs":"c0340379c1f540cf96f45bbd4cf8fc28db555826f30ac937b75b87e4377b716b","tests/cc_env.rs":"e02b3b0824ad039b47e4462c5ef6dbe6c824c28e7953af94a0f28f7b5158042e","tests/cflags.rs":"57f06eb5ce1557e5b4a032d0c4673e18fbe6f8d26c1deb153126e368b96b41b3","tests/cxxflags.rs":"c2c6c6d8a0d7146616fa1caed26876ee7bc9fcfffd525eb4743593cade5f3371","tests/support/mod.rs":"a3c8d116973bb16066bf6ec4de5143183f97de7aad085d85f8118a2eaac3e1e0","tests/test.rs":"61fb35ae6dd5cf506ada000bdd82c92e9f8eac9cc053b63e83d3f897436fbf8f"},"package":"a20104e2335ce8a659d6dd92a51a767a0c062599c73b343fd152cb401e828c3d"} \ No newline at end of file
+{"files":{"Cargo.toml":"1288f536f4ddf6bcdc664a91a070aad2ebd7c6edc32ce24e8d6bc04c2cd64d49","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"378f5840b258e2779c39418f3f2d7b2ba96f1c7917dd6be0713f88305dbda397","README.md":"f1ddbede208a5b78333a25dac0a7598e678e9b601a7d99a791069bddaf180dfe","src/command_helpers.rs":"3ef95bdcd79a43406fdab275d8a8f45ba787876399b54df34068955ec0109e69","src/lib.rs":"91efa8f9242266752658edd66ee607ce30635f4c30710508a99eb62e7b3c54da","src/parallel/async_executor.rs":"4ce24435fff6b6555b43fee042c16bd65d4150d0346567f246b9190d85b45983","src/parallel/job_token.rs":"0676c3177b5be9d7ede483bf4bd45c5ca0f5511073e4d1c9f181a0bc83db05dc","src/parallel/mod.rs":"aaffed5ad3dc0d28641533ab0d6f522bf34a059d4b1a239dc4d217cb5d58e232","src/parallel/stderr.rs":"a2d18ba3f2e04deb9047ece9ab7ca5452d9a76b515afbe20a76307e31597f34b","src/tool.rs":"172cfcbecd7c6a363ea841a48a10a75b0a01e83b83c0691107c601598b68dedf","src/windows/com.rs":"be1564756c9f3ef1398eafeed7b54ba610caba28e8f6258d28a997737ebf9535","src/windows/find_tools.rs":"9234fe7ab27b0259c6fa9fb47826e7d1a3d1d2c7c4042ef7153ab90ccb9a3412","src/windows/mod.rs":"42f1ad7fee35a17686b003e6aa520d3d1940d47d2f531d626e9ae0c48ba49005","src/windows/registry.rs":"c521b72c825e8095843e73482ffa810ed066ad8bb9f86e6db0c5c143c171aba1","src/windows/setup_config.rs":"754439cbab492afd44c9755abcbec1a41c9b2c358131cee2df13c0e996dbbec8","src/windows/vs_instances.rs":"76e3cee74b5fd38ddaf533bba11fe401667c50dda5f9d064099840893eaa7587","src/windows/winapi.rs":"250d51c1826d1a2329e9889dd9f058cfce253dbf2a678b076147c6cdb5db046c","src/windows/windows_sys.rs":"f6b90b87f23e446284bde86749b53858c0d37b8a43515ed8d0e90b1ac8cf7771"},"package":"a0ba8f7aaa012f30d5b2861462f6708eccd49c3c39863fe083a308035f63d723"} \ No newline at end of file
diff --git a/third_party/rust/cc/Cargo.lock b/third_party/rust/cc/Cargo.lock
deleted file mode 100644
index 2d065bc6a8..0000000000
--- a/third_party/rust/cc/Cargo.lock
+++ /dev/null
@@ -1,110 +0,0 @@
-# This file is automatically @generated by Cargo.
-# It is not intended for manual editing.
-version = 3
-
-[[package]]
-name = "bitflags"
-version = "1.3.2"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a"
-
-[[package]]
-name = "cc"
-version = "1.0.78"
-dependencies = [
- "jobserver",
- "tempfile",
-]
-
-[[package]]
-name = "cfg-if"
-version = "1.0.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd"
-
-[[package]]
-name = "fastrand"
-version = "1.8.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a7a407cfaa3385c4ae6b23e84623d48c2798d06e3e6a1878f7f59f17b3f86499"
-dependencies = [
- "instant",
-]
-
-[[package]]
-name = "instant"
-version = "0.1.12"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "7a5bbe824c507c5da5956355e86a746d82e0e1464f65d862cc5e71da70e94b2c"
-dependencies = [
- "cfg-if",
-]
-
-[[package]]
-name = "jobserver"
-version = "0.1.25"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "068b1ee6743e4d11fb9c6a1e6064b3693a1b600e7f5f5988047d98b3dc9fb90b"
-dependencies = [
- "libc",
-]
-
-[[package]]
-name = "libc"
-version = "0.2.138"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "db6d7e329c562c5dfab7a46a2afabc8b987ab9a4834c9d1ca04dc54c1546cef8"
-
-[[package]]
-name = "redox_syscall"
-version = "0.2.16"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "fb5a58c1855b4b6819d59012155603f0b22ad30cad752600aadfcb695265519a"
-dependencies = [
- "bitflags",
-]
-
-[[package]]
-name = "remove_dir_all"
-version = "0.5.3"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "3acd125665422973a33ac9d3dd2df85edad0f4ae9b00dafb1a05e43a9f5ef8e7"
-dependencies = [
- "winapi",
-]
-
-[[package]]
-name = "tempfile"
-version = "3.3.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "5cdb1ef4eaeeaddc8fbd371e5017057064af0911902ef36b39801f67cc6d79e4"
-dependencies = [
- "cfg-if",
- "fastrand",
- "libc",
- "redox_syscall",
- "remove_dir_all",
- "winapi",
-]
-
-[[package]]
-name = "winapi"
-version = "0.3.9"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419"
-dependencies = [
- "winapi-i686-pc-windows-gnu",
- "winapi-x86_64-pc-windows-gnu",
-]
-
-[[package]]
-name = "winapi-i686-pc-windows-gnu"
-version = "0.4.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6"
-
-[[package]]
-name = "winapi-x86_64-pc-windows-gnu"
-version = "0.4.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f"
diff --git a/third_party/rust/cc/Cargo.toml b/third_party/rust/cc/Cargo.toml
index c4ec0bf79d..5d974e076f 100644
--- a/third_party/rust/cc/Cargo.toml
+++ b/third_party/rust/cc/Cargo.toml
@@ -11,10 +11,15 @@
[package]
edition = "2018"
+rust-version = "1.53"
name = "cc"
-version = "1.0.78"
+version = "1.0.89"
authors = ["Alex Crichton <alex@alexcrichton.com>"]
-exclude = ["/.github"]
+exclude = [
+ "/.github",
+ "tests",
+ "src/bin",
+]
description = """
A build-time dependency for Cargo build scripts to assist in invoking the native
C compiler to compile native C code into a static archive to be linked into Rust
@@ -29,11 +34,20 @@ license = "MIT OR Apache-2.0"
repository = "https://github.com/rust-lang/cc-rs"
[dependencies.jobserver]
-version = "0.1.16"
+version = "0.1.20"
optional = true
+default-features = false
[dev-dependencies.tempfile]
version = "3"
[features]
-parallel = ["jobserver"]
+parallel = [
+ "libc",
+ "jobserver",
+]
+
+[target."cfg(unix)".dependencies.libc]
+version = "0.2.62"
+optional = true
+default-features = false
diff --git a/third_party/rust/cc/README.md b/third_party/rust/cc/README.md
index 863540d2d9..33d4bb40f8 100644
--- a/third_party/rust/cc/README.md
+++ b/third_party/rust/cc/README.md
@@ -1,209 +1,13 @@
# cc-rs
-A library to compile C/C++/assembly into a Rust library/application.
-
-[Documentation](https://docs.rs/cc)
-
-A simple library meant to be used as a build dependency with Cargo packages in
-order to build a set of C/C++ files into a static archive. This crate calls out
-to the most relevant compiler for a platform, for example using `cl` on MSVC.
-
-## Using cc-rs
-
-First, you'll want to both add a build script for your crate (`build.rs`) and
-also add this crate to your `Cargo.toml` via:
-
-```toml
-[build-dependencies]
-cc = "1.0"
-```
-
-Next up, you'll want to write a build script like so:
-
-```rust,no_run
-// build.rs
-
-fn main() {
- cc::Build::new()
- .file("foo.c")
- .file("bar.c")
- .compile("foo");
-}
-```
-
-And that's it! Running `cargo build` should take care of the rest and your Rust
-application will now have the C files `foo.c` and `bar.c` compiled into a file
-named `libfoo.a`. If the C files contain
-
-```c
-void foo_function(void) { ... }
-```
-
-and
-
-```c
-int32_t bar_function(int32_t x) { ... }
-```
-
-you can call them from Rust by declaring them in
-your Rust code like so:
-
-```rust,no_run
-extern {
- fn foo_function();
- fn bar_function(x: i32) -> i32;
-}
-
-pub fn call() {
- unsafe {
- foo_function();
- bar_function(42);
- }
-}
-
-fn main() {
- // ...
-}
-```
-
-See [the Rustonomicon](https://doc.rust-lang.org/nomicon/ffi.html) for more details.
-
-## External configuration via environment variables
-
-To control the programs and flags used for building, the builder can set a
-number of different environment variables.
-
-* `CFLAGS` - a series of space separated flags passed to compilers. Note that
- individual flags cannot currently contain spaces, so doing
- something like: `-L=foo\ bar` is not possible.
-* `CC` - the actual C compiler used. Note that this is used as an exact
- executable name, so (for example) no extra flags can be passed inside
- this variable, and the builder must ensure that there aren't any
- trailing spaces. This compiler must understand the `-c` flag. For
- certain `TARGET`s, it also is assumed to know about other flags (most
- common is `-fPIC`).
-* `AR` - the `ar` (archiver) executable to use to build the static library.
-* `CRATE_CC_NO_DEFAULTS` - the default compiler flags may cause conflicts in some cross compiling scenarios. Setting this variable will disable the generation of default compiler flags.
-* `CXX...` - see [C++ Support](#c-support).
-
-Each of these variables can also be supplied with certain prefixes and suffixes,
-in the following prioritized order:
-
-1. `<var>_<target>` - for example, `CC_x86_64-unknown-linux-gnu`
-2. `<var>_<target_with_underscores>` - for example, `CC_x86_64_unknown_linux_gnu`
-3. `<build-kind>_<var>` - for example, `HOST_CC` or `TARGET_CFLAGS`
-4. `<var>` - a plain `CC`, `AR` as above.
-
-If none of these variables exist, cc-rs uses built-in defaults
-
-In addition to the above optional environment variables, `cc-rs` has some
-functions with hard requirements on some variables supplied by [cargo's
-build-script driver][cargo] that it has the `TARGET`, `OUT_DIR`, `OPT_LEVEL`,
-and `HOST` variables.
-
-[cargo]: https://doc.rust-lang.org/cargo/reference/build-scripts.html#inputs-to-the-build-script
-
-## Optional features
-
-### Parallel
-
-Currently cc-rs supports parallel compilation (think `make -jN`) but this
-feature is turned off by default. To enable cc-rs to compile C/C++ in parallel,
-you can change your dependency to:
-
-```toml
-[build-dependencies]
-cc = { version = "1.0", features = ["parallel"] }
-```
-
-By default cc-rs will limit parallelism to `$NUM_JOBS`, or if not present it
-will limit it to the number of cpus on the machine. If you are using cargo,
-use `-jN` option of `build`, `test` and `run` commands as `$NUM_JOBS`
-is supplied by cargo.
-
-## Compile-time Requirements
-
-To work properly this crate needs access to a C compiler when the build script
-is being run. This crate does not ship a C compiler with it. The compiler
-required varies per platform, but there are three broad categories:
-
-* Unix platforms require `cc` to be the C compiler. This can be found by
- installing cc/clang on Linux distributions and Xcode on macOS, for example.
-* Windows platforms targeting MSVC (e.g. your target triple ends in `-msvc`)
- require `cl.exe` to be available and in `PATH`. This is typically found in
- standard Visual Studio installations and the `PATH` can be set up by running
- the appropriate developer tools shell.
-* Windows platforms targeting MinGW (e.g. your target triple ends in `-gnu`)
- require `cc` to be available in `PATH`. We recommend the
- [MinGW-w64](https://www.mingw-w64.org/) distribution, which is using the
- [Win-builds](http://win-builds.org/) installation system.
- You may also acquire it via
- [MSYS2](https://www.msys2.org/), as explained [here][msys2-help]. Make sure
- to install the appropriate architecture corresponding to your installation of
- rustc. GCC from older [MinGW](http://www.mingw.org/) project is compatible
- only with 32-bit rust compiler.
-
-[msys2-help]: https://github.com/rust-lang/rust#building-on-windows
-
-## C++ support
-
-`cc-rs` supports C++ libraries compilation by using the `cpp` method on
-`Build`:
-
-```rust,no_run
-fn main() {
- cc::Build::new()
- .cpp(true) // Switch to C++ library compilation.
- .file("foo.cpp")
- .compile("libfoo.a");
-}
-```
-
-For C++ libraries, the `CXX` and `CXXFLAGS` environment variables are used instead of `CC` and `CFLAGS`.
-
-The C++ standard library may be linked to the crate target. By default it's `libc++` for macOS, FreeBSD, and OpenBSD, `libc++_shared` for Android, nothing for MSVC, and `libstdc++` for anything else. It can be changed in one of two ways:
-
-1. by using the `cpp_link_stdlib` method on `Build`:
- ```rust,no-run
- fn main() {
- cc::Build::new()
- .cpp(true)
- .file("foo.cpp")
- .cpp_link_stdlib("stdc++") // use libstdc++
- .compile("libfoo.a");
- }
- ```
-2. by setting the `CXXSTDLIB` environment variable.
-
-In particular, for Android you may want to [use `c++_static` if you have at most one shared library](https://developer.android.com/ndk/guides/cpp-support).
-
-Remember that C++ does name mangling so `extern "C"` might be required to enable Rust linker to find your functions.
-
-## CUDA C++ support
-
-`cc-rs` also supports compiling CUDA C++ libraries by using the `cuda` method
-on `Build` (currently for GNU/Clang toolchains only):
-
-```rust,no_run
-fn main() {
- cc::Build::new()
- // Switch to CUDA C++ library compilation using NVCC.
- .cuda(true)
- .cudart("static")
- // Generate code for Maxwell (GTX 970, 980, 980 Ti, Titan X).
- .flag("-gencode").flag("arch=compute_52,code=sm_52")
- // Generate code for Maxwell (Jetson TX1).
- .flag("-gencode").flag("arch=compute_53,code=sm_53")
- // Generate code for Pascal (GTX 1070, 1080, 1080 Ti, Titan Xp).
- .flag("-gencode").flag("arch=compute_61,code=sm_61")
- // Generate code for Pascal (Tesla P100).
- .flag("-gencode").flag("arch=compute_60,code=sm_60")
- // Generate code for Pascal (Jetson TX2).
- .flag("-gencode").flag("arch=compute_62,code=sm_62")
- .file("bar.cu")
- .compile("libbar.a");
-}
-```
+A library for [Cargo build scripts](https://doc.rust-lang.org/cargo/reference/build-scripts.html)
+to compile a set of C/C++/assembly/CUDA files into a static archive for Cargo
+to link into the crate being built. This crate does not compile code itself;
+it calls out to the default compiler for the platform. This crate will
+automatically detect situations such as cross compilation and
+various environment variables and will build code appropriately.
+
+Refer to the [documentation](https://docs.rs/cc) for detailed usage instructions.
## License
diff --git a/third_party/rust/cc/src/bin/gcc-shim.rs b/third_party/rust/cc/src/bin/gcc-shim.rs
deleted file mode 100644
index 1731df82ea..0000000000
--- a/third_party/rust/cc/src/bin/gcc-shim.rs
+++ /dev/null
@@ -1,48 +0,0 @@
-#![cfg_attr(test, allow(dead_code))]
-
-use std::env;
-use std::fs::File;
-use std::io::prelude::*;
-use std::path::PathBuf;
-
-fn main() {
- let mut args = env::args();
- let program = args.next().expect("Unexpected empty args");
-
- let out_dir = PathBuf::from(
- env::var_os("GCCTEST_OUT_DIR").expect(&format!("{}: GCCTEST_OUT_DIR not found", program)),
- );
-
- // Find the first nonexistent candidate file to which the program's args can be written.
- for i in 0.. {
- let candidate = &out_dir.join(format!("out{}", i));
-
- // If the file exists, commands have already run. Try again.
- if candidate.exists() {
- continue;
- }
-
- // Create a file and record the args passed to the command.
- let mut f = File::create(candidate).expect(&format!(
- "{}: can't create candidate: {}",
- program,
- candidate.to_string_lossy()
- ));
- for arg in args {
- writeln!(f, "{}", arg).expect(&format!(
- "{}: can't write to candidate: {}",
- program,
- candidate.to_string_lossy()
- ));
- }
- break;
- }
-
- // Create a file used by some tests.
- let path = &out_dir.join("libfoo.a");
- File::create(path).expect(&format!(
- "{}: can't create libfoo.a: {}",
- program,
- path.to_string_lossy()
- ));
-}
diff --git a/third_party/rust/cc/src/command_helpers.rs b/third_party/rust/cc/src/command_helpers.rs
new file mode 100644
index 0000000000..919d276c84
--- /dev/null
+++ b/third_party/rust/cc/src/command_helpers.rs
@@ -0,0 +1,433 @@
+//! Miscellaneous helpers for running commands
+
+use std::{
+ collections::hash_map,
+ ffi::OsString,
+ fmt::Display,
+ fs,
+ hash::Hasher,
+ io::{self, Read, Write},
+ path::Path,
+ process::{Child, ChildStderr, Command, Stdio},
+ sync::{
+ atomic::{AtomicBool, Ordering},
+ Arc,
+ },
+};
+
+use crate::{Error, ErrorKind, Object};
+
+#[derive(Clone, Debug)]
+pub(crate) struct CargoOutput {
+ pub(crate) metadata: bool,
+ pub(crate) warnings: bool,
+ pub(crate) debug: bool,
+ checked_dbg_var: Arc<AtomicBool>,
+}
+
+impl CargoOutput {
+ pub(crate) fn new() -> Self {
+ Self {
+ metadata: true,
+ warnings: true,
+ debug: std::env::var_os("CC_ENABLE_DEBUG_OUTPUT").is_some(),
+ checked_dbg_var: Arc::new(AtomicBool::new(false)),
+ }
+ }
+
+ pub(crate) fn print_metadata(&self, s: &dyn Display) {
+ if self.metadata {
+ println!("{}", s);
+ }
+ }
+
+ pub(crate) fn print_warning(&self, arg: &dyn Display) {
+ if self.warnings {
+ println!("cargo:warning={}", arg);
+ }
+ }
+
+ pub(crate) fn print_debug(&self, arg: &dyn Display) {
+ if self.metadata && !self.checked_dbg_var.load(Ordering::Relaxed) {
+ self.checked_dbg_var.store(true, Ordering::Relaxed);
+ println!("cargo:rerun-if-env-changed=CC_ENABLE_DEBUG_OUTPUT");
+ }
+ if self.debug {
+ println!("{}", arg);
+ }
+ }
+
+ fn stdio_for_warnings(&self) -> Stdio {
+ if self.warnings {
+ Stdio::piped()
+ } else {
+ Stdio::null()
+ }
+ }
+}
+
+pub(crate) struct StderrForwarder {
+ inner: Option<(ChildStderr, Vec<u8>)>,
+ #[cfg(feature = "parallel")]
+ is_non_blocking: bool,
+ #[cfg(feature = "parallel")]
+ bytes_available_failed: bool,
+}
+
+const MIN_BUFFER_CAPACITY: usize = 100;
+
+impl StderrForwarder {
+ pub(crate) fn new(child: &mut Child) -> Self {
+ Self {
+ inner: child
+ .stderr
+ .take()
+ .map(|stderr| (stderr, Vec::with_capacity(MIN_BUFFER_CAPACITY))),
+ #[cfg(feature = "parallel")]
+ is_non_blocking: false,
+ #[cfg(feature = "parallel")]
+ bytes_available_failed: false,
+ }
+ }
+
+ #[allow(clippy::uninit_vec)]
+ fn forward_available(&mut self) -> bool {
+ if let Some((stderr, buffer)) = self.inner.as_mut() {
+ loop {
+ let old_data_end = buffer.len();
+
+ // For non-blocking we check to see if there is data available, so we should try to
+ // read at least that much. For blocking, always read at least the minimum amount.
+ #[cfg(not(feature = "parallel"))]
+ let to_reserve = MIN_BUFFER_CAPACITY;
+ #[cfg(feature = "parallel")]
+ let to_reserve = if self.is_non_blocking && !self.bytes_available_failed {
+ match crate::parallel::stderr::bytes_available(stderr) {
+ #[cfg(windows)]
+ Ok(0) => return false,
+ #[cfg(unix)]
+ Ok(0) => {
+ // On Unix, depending on the implementation, we may sometimes get 0 in a
+ // loop (either there is data available or the pipe is broken), so
+ // continue with the non-blocking read anyway.
+ MIN_BUFFER_CAPACITY
+ }
+ #[cfg(windows)]
+ Err(_) => {
+ // On Windows, if we get an error then the pipe is broken, so flush
+ // the buffer and bail.
+ if !buffer.is_empty() {
+ write_warning(&buffer[..]);
+ }
+ self.inner = None;
+ return true;
+ }
+ #[cfg(unix)]
+ Err(_) => {
+ // On Unix, depending on the implementation, we may get spurious
+ // errors so make a note not to use bytes_available again and try
+ // the non-blocking read anyway.
+ self.bytes_available_failed = true;
+ MIN_BUFFER_CAPACITY
+ }
+ Ok(bytes_available) => MIN_BUFFER_CAPACITY.max(bytes_available),
+ }
+ } else {
+ MIN_BUFFER_CAPACITY
+ };
+ buffer.reserve(to_reserve);
+
+ // SAFETY: 1) the length is set to the capacity, so we are never using memory beyond
+ // the underlying buffer and 2) we always call `truncate` below to set the len back
+ // to the initialized data.
+ unsafe {
+ buffer.set_len(buffer.capacity());
+ }
+ match stderr.read(&mut buffer[old_data_end..]) {
+ Err(err) if err.kind() == std::io::ErrorKind::WouldBlock => {
+ // No data currently, yield back.
+ buffer.truncate(old_data_end);
+ return false;
+ }
+ Err(err) if err.kind() == std::io::ErrorKind::Interrupted => {
+ // Interrupted, try again.
+ buffer.truncate(old_data_end);
+ }
+ Ok(0) | Err(_) => {
+ // End of stream: flush remaining data and bail.
+ if old_data_end > 0 {
+ write_warning(&buffer[..old_data_end]);
+ }
+ self.inner = None;
+ return true;
+ }
+ Ok(bytes_read) => {
+ buffer.truncate(old_data_end + bytes_read);
+ let mut consumed = 0;
+ for line in buffer.split_inclusive(|&b| b == b'\n') {
+ // Only forward complete lines, leave the rest in the buffer.
+ if let Some((b'\n', line)) = line.split_last() {
+ consumed += line.len() + 1;
+ write_warning(line);
+ }
+ }
+ buffer.drain(..consumed);
+ }
+ }
+ }
+ } else {
+ true
+ }
+ }
+
+ #[cfg(feature = "parallel")]
+ pub(crate) fn set_non_blocking(&mut self) -> Result<(), Error> {
+ assert!(!self.is_non_blocking);
+
+ #[cfg(unix)]
+ if let Some((stderr, _)) = self.inner.as_ref() {
+ crate::parallel::stderr::set_non_blocking(stderr)?;
+ }
+
+ self.is_non_blocking = true;
+ Ok(())
+ }
+
+ #[cfg(feature = "parallel")]
+ fn forward_all(&mut self) {
+ while !self.forward_available() {}
+ }
+
+ #[cfg(not(feature = "parallel"))]
+ fn forward_all(&mut self) {
+ let forward_result = self.forward_available();
+ assert!(forward_result, "Should have consumed all data");
+ }
+}
+
+fn write_warning(line: &[u8]) {
+ let stdout = io::stdout();
+ let mut stdout = stdout.lock();
+ stdout.write_all(b"cargo:warning=").unwrap();
+ stdout.write_all(line).unwrap();
+ stdout.write_all(b"\n").unwrap();
+}
+
+fn wait_on_child(
+ cmd: &Command,
+ program: &str,
+ child: &mut Child,
+ cargo_output: &CargoOutput,
+) -> Result<(), Error> {
+ StderrForwarder::new(child).forward_all();
+
+ let status = match child.wait() {
+ Ok(s) => s,
+ Err(e) => {
+ return Err(Error::new(
+ ErrorKind::ToolExecError,
+ format!(
+ "Failed to wait on spawned child process, command {:?} with args {:?}: {}.",
+ cmd, program, e
+ ),
+ ));
+ }
+ };
+
+ cargo_output.print_debug(&status);
+
+ if status.success() {
+ Ok(())
+ } else {
+ Err(Error::new(
+ ErrorKind::ToolExecError,
+ format!(
+ "Command {:?} with args {:?} did not execute successfully (status code {}).",
+ cmd, program, status
+ ),
+ ))
+ }
+}
+
+/// Find the destination object path for each file in the input source files,
+/// and store them in the output Object.
+pub(crate) fn objects_from_files(files: &[Arc<Path>], dst: &Path) -> Result<Vec<Object>, Error> {
+ let mut objects = Vec::with_capacity(files.len());
+ for file in files {
+ let basename = file
+ .file_name()
+ .ok_or_else(|| {
+ Error::new(
+ ErrorKind::InvalidArgument,
+ "No file_name for object file path!",
+ )
+ })?
+ .to_string_lossy();
+ let dirname = file
+ .parent()
+ .ok_or_else(|| {
+ Error::new(
+ ErrorKind::InvalidArgument,
+ "No parent for object file path!",
+ )
+ })?
+ .to_string_lossy();
+
+ // Hash the dirname. This should prevent conflicts if we have multiple
+ // object files with the same filename in different subfolders.
+ let mut hasher = hash_map::DefaultHasher::new();
+ hasher.write(dirname.to_string().as_bytes());
+ let obj = dst
+ .join(format!("{:016x}-{}", hasher.finish(), basename))
+ .with_extension("o");
+
+ match obj.parent() {
+ Some(s) => fs::create_dir_all(s)?,
+ None => {
+ return Err(Error::new(
+ ErrorKind::InvalidArgument,
+ "dst is an invalid path with no parent",
+ ));
+ }
+ };
+
+ objects.push(Object::new(file.to_path_buf(), obj));
+ }
+
+ Ok(objects)
+}
+
+pub(crate) fn run(
+ cmd: &mut Command,
+ program: &str,
+ cargo_output: &CargoOutput,
+) -> Result<(), Error> {
+ let mut child = spawn(cmd, program, cargo_output)?;
+ wait_on_child(cmd, program, &mut child, cargo_output)
+}
+
+pub(crate) fn run_output(
+ cmd: &mut Command,
+ program: &str,
+ cargo_output: &CargoOutput,
+) -> Result<Vec<u8>, Error> {
+ cmd.stdout(Stdio::piped());
+
+ let mut child = spawn(cmd, program, cargo_output)?;
+
+ let mut stdout = vec![];
+ child
+ .stdout
+ .take()
+ .unwrap()
+ .read_to_end(&mut stdout)
+ .unwrap();
+
+ wait_on_child(cmd, program, &mut child, cargo_output)?;
+
+ Ok(stdout)
+}
+
+pub(crate) fn spawn(
+ cmd: &mut Command,
+ program: &str,
+ cargo_output: &CargoOutput,
+) -> Result<Child, Error> {
+ struct ResetStderr<'cmd>(&'cmd mut Command);
+
+ impl Drop for ResetStderr<'_> {
+ fn drop(&mut self) {
+ // Reset stderr to default to release pipe_writer so that print thread will
+ // not block forever.
+ self.0.stderr(Stdio::inherit());
+ }
+ }
+
+ cargo_output.print_debug(&format_args!("running: {:?}", cmd));
+
+ let cmd = ResetStderr(cmd);
+ let child = cmd.0.stderr(cargo_output.stdio_for_warnings()).spawn();
+ match child {
+ Ok(child) => Ok(child),
+ Err(ref e) if e.kind() == io::ErrorKind::NotFound => {
+ let extra = if cfg!(windows) {
+ " (see https://github.com/rust-lang/cc-rs#compile-time-requirements \
+for help)"
+ } else {
+ ""
+ };
+ Err(Error::new(
+ ErrorKind::ToolNotFound,
+ format!("Failed to find tool. Is `{}` installed?{}", program, extra),
+ ))
+ }
+ Err(e) => Err(Error::new(
+ ErrorKind::ToolExecError,
+ format!(
+ "Command {:?} with args {:?} failed to start: {:?}",
+ cmd.0, program, e
+ ),
+ )),
+ }
+}
+
+pub(crate) fn command_add_output_file(
+ cmd: &mut Command,
+ dst: &Path,
+ cuda: bool,
+ msvc: bool,
+ clang: bool,
+ gnu: bool,
+ is_asm: bool,
+ is_arm: bool,
+) {
+ if msvc && !clang && !gnu && !cuda && !(is_asm && is_arm) {
+ let mut s = OsString::from("-Fo");
+ s.push(dst);
+ cmd.arg(s);
+ } else {
+ cmd.arg("-o").arg(dst);
+ }
+}
+
+#[cfg(feature = "parallel")]
+pub(crate) fn try_wait_on_child(
+ cmd: &Command,
+ program: &str,
+ child: &mut Child,
+ stdout: &mut dyn io::Write,
+ stderr_forwarder: &mut StderrForwarder,
+) -> Result<Option<()>, Error> {
+ stderr_forwarder.forward_available();
+
+ match child.try_wait() {
+ Ok(Some(status)) => {
+ stderr_forwarder.forward_all();
+
+ let _ = writeln!(stdout, "{}", status);
+
+ if status.success() {
+ Ok(Some(()))
+ } else {
+ Err(Error::new(
+ ErrorKind::ToolExecError,
+ format!(
+ "Command {:?} with args {:?} did not execute successfully (status code {}).",
+ cmd, program, status
+ ),
+ ))
+ }
+ }
+ Ok(None) => Ok(None),
+ Err(e) => {
+ stderr_forwarder.forward_all();
+ Err(Error::new(
+ ErrorKind::ToolExecError,
+ format!(
+ "Failed to wait on spawned child process, command {:?} with args {:?}: {}.",
+ cmd, program, e
+ ),
+ ))
+ }
+ }
+}
diff --git a/third_party/rust/cc/src/lib.rs b/third_party/rust/cc/src/lib.rs
index 1ebd2cc7a5..1b193dbd0a 100644
--- a/third_party/rust/cc/src/lib.rs
+++ b/third_party/rust/cc/src/lib.rs
@@ -1,88 +1,251 @@
-//! A library for build scripts to compile custom C code
+//! A library for [Cargo build scripts](https://doc.rust-lang.org/cargo/reference/build-scripts.html)
+//! to compile a set of C/C++/assembly/CUDA files into a static archive for Cargo
+//! to link into the crate being built. This crate does not compile code itself;
+//! it calls out to the default compiler for the platform. This crate will
+//! automatically detect situations such as cross compilation and
+//! [various environment variables](#external-configuration-via-environment-variables) and will build code appropriately.
//!
-//! This library is intended to be used as a `build-dependencies` entry in
-//! `Cargo.toml`:
+//! # Example
+//!
+//! First, you'll want to both add a build script for your crate (`build.rs`) and
+//! also add this crate to your `Cargo.toml` via:
//!
//! ```toml
//! [build-dependencies]
//! cc = "1.0"
//! ```
//!
-//! The purpose of this crate is to provide the utility functions necessary to
-//! compile C code into a static archive which is then linked into a Rust crate.
-//! Configuration is available through the `Build` struct.
+//! Next up, you'll want to write a build script like so:
+//!
+//! ```rust,no_run
+//! // build.rs
+//!
+//! fn main() {
+//! cc::Build::new()
+//! .file("foo.c")
+//! .file("bar.c")
+//! .compile("foo");
+//! }
+//! ```
+//!
+//! And that's it! Running `cargo build` should take care of the rest and your Rust
+//! application will now have the C files `foo.c` and `bar.c` compiled into a file
+//! named `libfoo.a`. If the C files contain
+//!
+//! ```c
+//! void foo_function(void) { ... }
+//! ```
+//!
+//! and
+//!
+//! ```c
+//! int32_t bar_function(int32_t x) { ... }
+//! ```
+//!
+//! you can call them from Rust by declaring them in
+//! your Rust code like so:
+//!
+//! ```rust,no_run
+//! extern "C" {
+//! fn foo_function();
+//! fn bar_function(x: i32) -> i32;
+//! }
//!
-//! This crate will automatically detect situations such as cross compilation or
-//! other environment variables set by Cargo and will build code appropriately.
+//! pub fn call() {
+//! unsafe {
+//! foo_function();
+//! bar_function(42);
+//! }
+//! }
+//!
+//! fn main() {
+//! call();
+//! }
+//! ```
//!
-//! The crate is not limited to C code, it can accept any source code that can
-//! be passed to a C or C++ compiler. As such, assembly files with extensions
-//! `.s` (gcc/clang) and `.asm` (MSVC) can also be compiled.
+//! See [the Rustonomicon](https://doc.rust-lang.org/nomicon/ffi.html) for more details.
//!
-//! [`Build`]: struct.Build.html
+//! # External configuration via environment variables
//!
-//! # Parallelism
+//! To control the programs and flags used for building, the builder can set a
+//! number of different environment variables.
//!
-//! To parallelize computation, enable the `parallel` feature for the crate.
+//! * `CFLAGS` - a series of space separated flags passed to compilers. Note that
+//! individual flags cannot currently contain spaces, so doing
+//! something like: `-L=foo\ bar` is not possible.
+//! * `CC` - the actual C compiler used. Note that this is used as an exact
+//! executable name, so (for example) no extra flags can be passed inside
+//! this variable, and the builder must ensure that there aren't any
+//! trailing spaces. This compiler must understand the `-c` flag. For
+//! certain `TARGET`s, it also is assumed to know about other flags (most
+//! common is `-fPIC`).
+//! * `AR` - the `ar` (archiver) executable to use to build the static library.
+//! * `CRATE_CC_NO_DEFAULTS` - the default compiler flags may cause conflicts in
+//! some cross compiling scenarios. Setting this variable
+//! will disable the generation of default compiler
+//! flags.
+//! * `CC_ENABLE_DEBUG_OUTPUT` - if set, compiler command invocations and exit codes will
+//! be logged to stdout. This is useful for debugging build script issues, but can be
+//! overly verbose for normal use.
+//! * `CXX...` - see [C++ Support](#c-support).
+//!
+//! Furthermore, projects using this crate may specify custom environment variables
+//! to be inspected, for example via the `Build::try_flags_from_environment`
+//! function. Consult the project’s own documentation or its use of the `cc` crate
+//! for any additional variables it may use.
+//!
+//! Each of these variables can also be supplied with certain prefixes and suffixes,
+//! in the following prioritized order:
+//!
+//! 1. `<var>_<target>` - for example, `CC_x86_64-unknown-linux-gnu`
+//! 2. `<var>_<target_with_underscores>` - for example, `CC_x86_64_unknown_linux_gnu`
+//! 3. `<build-kind>_<var>` - for example, `HOST_CC` or `TARGET_CFLAGS`
+//! 4. `<var>` - a plain `CC`, `AR` as above.
+//!
+//! If none of these variables exist, cc-rs uses built-in defaults.
+//!
+//! In addition to the above optional environment variables, `cc-rs` has some
+//! functions with hard requirements on some variables supplied by [cargo's
+//! build-script driver][cargo] that it has the `TARGET`, `OUT_DIR`, `OPT_LEVEL`,
+//! and `HOST` variables.
+//!
+//! [cargo]: https://doc.rust-lang.org/cargo/reference/build-scripts.html#inputs-to-the-build-script
+//!
+//! # Optional features
+//!
+//! ## Parallel
+//!
+//! Currently cc-rs supports parallel compilation (think `make -jN`) but this
+//! feature is turned off by default. To enable cc-rs to compile C/C++ in parallel,
+//! you can change your dependency to:
//!
//! ```toml
//! [build-dependencies]
//! cc = { version = "1.0", features = ["parallel"] }
//! ```
-//! To specify the max number of concurrent compilation jobs, set the `NUM_JOBS`
-//! environment variable to the desired amount.
//!
-//! Cargo will also set this environment variable when executed with the `-jN` flag.
+//! By default cc-rs will limit parallelism to `$NUM_JOBS`, or if not present it
+//! will limit it to the number of cpus on the machine. If you are using cargo,
+//! use `-jN` option of `build`, `test` and `run` commands as `$NUM_JOBS`
+//! is supplied by cargo.
+//!
+//! # Compile-time Requirements
+//!
+//! To work properly this crate needs access to a C compiler when the build script
+//! is being run. This crate does not ship a C compiler with it. The compiler
+//! required varies per platform, but there are three broad categories:
//!
-//! If `NUM_JOBS` is not set, the `RAYON_NUM_THREADS` environment variable can
-//! also specify the build parallelism.
+//! * Unix platforms require `cc` to be the C compiler. This can be found by
+//! installing cc/clang on Linux distributions and Xcode on macOS, for example.
+//! * Windows platforms targeting MSVC (e.g. your target triple ends in `-msvc`)
+//! require Visual Studio to be installed. `cc-rs` attempts to locate it, and
+//! if it fails, `cl.exe` is expected to be available in `PATH`. This can be
+//! set up by running the appropriate developer tools shell.
+//! * Windows platforms targeting MinGW (e.g. your target triple ends in `-gnu`)
+//! require `cc` to be available in `PATH`. We recommend the
+//! [MinGW-w64](https://www.mingw-w64.org/) distribution, which is using the
+//! [Win-builds](http://win-builds.org/) installation system.
+//! You may also acquire it via
+//! [MSYS2](https://www.msys2.org/), as explained [here][msys2-help]. Make sure
+//! to install the appropriate architecture corresponding to your installation of
+//! rustc. GCC from older [MinGW](http://www.mingw.org/) project is compatible
+//! only with 32-bit rust compiler.
//!
-//! # Examples
+//! [msys2-help]: https://github.com/rust-lang/rust#building-on-windows
//!
-//! Use the `Build` struct to compile `src/foo.c`:
+//! # C++ support
//!
-//! ```no_run
+//! `cc-rs` supports C++ libraries compilation by using the `cpp` method on
+//! `Build`:
+//!
+//! ```rust,no_run
+//! fn main() {
+//! cc::Build::new()
+//! .cpp(true) // Switch to C++ library compilation.
+//! .file("foo.cpp")
+//! .compile("foo");
+//! }
+//! ```
+//!
+//! For C++ libraries, the `CXX` and `CXXFLAGS` environment variables are used instead of `CC` and `CFLAGS`.
+//!
+//! The C++ standard library may be linked to the crate target. By default it's `libc++` for macOS, FreeBSD, and OpenBSD, `libc++_shared` for Android, nothing for MSVC, and `libstdc++` for anything else. It can be changed in one of two ways:
+//!
+//! 1. by using the `cpp_link_stdlib` method on `Build`:
+//! ```rust,no_run
//! fn main() {
//! cc::Build::new()
-//! .file("src/foo.c")
-//! .define("FOO", Some("bar"))
-//! .include("src")
+//! .cpp(true)
+//! .file("foo.cpp")
+//! .cpp_link_stdlib("stdc++") // use libstdc++
//! .compile("foo");
//! }
//! ```
+//! 2. by setting the `CXXSTDLIB` environment variable.
+//!
+//! In particular, for Android you may want to [use `c++_static` if you have at most one shared library](https://developer.android.com/ndk/guides/cpp-support).
+//!
+//! Remember that C++ does name mangling so `extern "C"` might be required to enable Rust linker to find your functions.
+//!
+//! # CUDA C++ support
+//!
+//! `cc-rs` also supports compiling CUDA C++ libraries by using the `cuda` method
+//! on `Build`:
+//!
+//! ```rust,no_run
+//! fn main() {
+//! cc::Build::new()
+//! // Switch to CUDA C++ library compilation using NVCC.
+//! .cuda(true)
+//! .cudart("static")
+//! // Generate code for Maxwell (GTX 970, 980, 980 Ti, Titan X).
+//! .flag("-gencode").flag("arch=compute_52,code=sm_52")
+//! // Generate code for Maxwell (Jetson TX1).
+//! .flag("-gencode").flag("arch=compute_53,code=sm_53")
+//! // Generate code for Pascal (GTX 1070, 1080, 1080 Ti, Titan Xp).
+//! .flag("-gencode").flag("arch=compute_61,code=sm_61")
+//! // Generate code for Pascal (Tesla P100).
+//! .flag("-gencode").flag("arch=compute_60,code=sm_60")
+//! // Generate code for Pascal (Jetson TX2).
+//! .flag("-gencode").flag("arch=compute_62,code=sm_62")
+//! // Generate code in parallel
+//! .flag("-t0")
+//! .file("bar.cu")
+//! .compile("bar");
+//! }
+//! ```
#![doc(html_root_url = "https://docs.rs/cc/1.0")]
#![cfg_attr(test, deny(warnings))]
#![allow(deprecated)]
#![deny(missing_docs)]
-use std::collections::{hash_map, HashMap};
+use std::borrow::Cow;
+use std::collections::HashMap;
use std::env;
use std::ffi::{OsStr, OsString};
use std::fmt::{self, Display, Formatter};
use std::fs;
-use std::hash::Hasher;
-use std::io::{self, BufRead, BufReader, Read, Write};
+use std::io::{self, Write};
use std::path::{Component, Path, PathBuf};
-use std::process::{Child, Command, Stdio};
+#[cfg(feature = "parallel")]
+use std::process::Child;
+use std::process::Command;
use std::sync::{Arc, Mutex};
-use std::thread::{self, JoinHandle};
-
-// These modules are all glue to support reading the MSVC version from
-// the registry and from COM interfaces
-#[cfg(windows)]
-mod registry;
-#[cfg(windows)]
-#[macro_use]
-mod winapi;
-#[cfg(windows)]
-mod com;
-#[cfg(windows)]
-mod setup_config;
-#[cfg(windows)]
-mod vs_instances;
-
-pub mod windows_registry;
+
+#[cfg(feature = "parallel")]
+mod parallel;
+mod windows;
+// Regardless of whether this should be in this crate's public API,
+// it has been since 2015, so don't break it.
+pub use windows::find_tools as windows_registry;
+
+mod command_helpers;
+use command_helpers::*;
+
+mod tool;
+pub use tool::Tool;
+use tool::ToolFamily;
/// A builder for compilation of a native library.
///
@@ -91,32 +254,34 @@ pub mod windows_registry;
/// documentation on each method itself.
#[derive(Clone, Debug)]
pub struct Build {
- include_directories: Vec<PathBuf>,
- definitions: Vec<(String, Option<String>)>,
- objects: Vec<PathBuf>,
- flags: Vec<String>,
- flags_supported: Vec<String>,
+ include_directories: Vec<Arc<Path>>,
+ definitions: Vec<(Arc<str>, Option<Arc<str>>)>,
+ objects: Vec<Arc<Path>>,
+ flags: Vec<Arc<str>>,
+ flags_supported: Vec<Arc<str>>,
known_flag_support_status: Arc<Mutex<HashMap<String, bool>>>,
- ar_flags: Vec<String>,
- asm_flags: Vec<String>,
+ ar_flags: Vec<Arc<str>>,
+ asm_flags: Vec<Arc<str>>,
no_default_flags: bool,
- files: Vec<PathBuf>,
+ files: Vec<Arc<Path>>,
cpp: bool,
- cpp_link_stdlib: Option<Option<String>>,
- cpp_set_stdlib: Option<String>,
+ cpp_link_stdlib: Option<Option<Arc<str>>>,
+ cpp_set_stdlib: Option<Arc<str>>,
cuda: bool,
- cudart: Option<String>,
- target: Option<String>,
- host: Option<String>,
- out_dir: Option<PathBuf>,
- opt_level: Option<String>,
+ cudart: Option<Arc<str>>,
+ std: Option<Arc<str>>,
+ target: Option<Arc<str>>,
+ host: Option<Arc<str>>,
+ out_dir: Option<Arc<Path>>,
+ opt_level: Option<Arc<str>>,
debug: Option<bool>,
force_frame_pointer: Option<bool>,
- env: Vec<(OsString, OsString)>,
- compiler: Option<PathBuf>,
- archiver: Option<PathBuf>,
- cargo_metadata: bool,
- link_lib_modifiers: Vec<String>,
+ env: Vec<(Arc<OsStr>, Arc<OsStr>)>,
+ compiler: Option<Arc<Path>>,
+ archiver: Option<Arc<Path>>,
+ ranlib: Option<Arc<Path>>,
+ cargo_output: CargoOutput,
+ link_lib_modifiers: Vec<Arc<str>>,
pic: Option<bool>,
use_plt: Option<bool>,
static_crt: Option<bool>,
@@ -125,9 +290,11 @@ pub struct Build {
warnings_into_errors: bool,
warnings: Option<bool>,
extra_warnings: Option<bool>,
- env_cache: Arc<Mutex<HashMap<String, Option<String>>>>,
+ env_cache: Arc<Mutex<HashMap<String, Option<Arc<str>>>>>,
apple_sdk_root_cache: Arc<Mutex<HashMap<String, OsString>>>,
+ apple_versions_cache: Arc<Mutex<HashMap<String, String>>>,
emit_rerun_if_env_changed: bool,
+ cached_compiler_family: Arc<Mutex<HashMap<Box<Path>, ToolFamily>>>,
}
/// Represents the types of errors that may occur while using cc-rs.
@@ -145,6 +312,9 @@ enum ErrorKind {
ToolNotFound,
/// One of the function arguments failed validation.
InvalidArgument,
+ #[cfg(feature = "parallel")]
+ /// jobserver helpthread failure
+ JobserverHelpThreadError,
}
/// Represents an internal error that occurred, with an explanation.
@@ -153,21 +323,21 @@ pub struct Error {
/// Describes the kind of error that occurred.
kind: ErrorKind,
/// More explanation of error that occurred.
- message: String,
+ message: Cow<'static, str>,
}
impl Error {
- fn new(kind: ErrorKind, message: &str) -> Error {
+ fn new(kind: ErrorKind, message: impl Into<Cow<'static, str>>) -> Error {
Error {
- kind: kind,
- message: message.to_owned(),
+ kind,
+ message: message.into(),
}
}
}
impl From<io::Error> for Error {
fn from(e: io::Error) -> Error {
- Error::new(ErrorKind::IOError, &format!("{}", e))
+ Error::new(ErrorKind::IOError, format!("{}", e))
}
}
@@ -179,97 +349,6 @@ impl Display for Error {
impl std::error::Error for Error {}
-/// Configuration used to represent an invocation of a C compiler.
-///
-/// This can be used to figure out what compiler is in use, what the arguments
-/// to it are, and what the environment variables look like for the compiler.
-/// This can be used to further configure other build systems (e.g. forward
-/// along CC and/or CFLAGS) or the `to_command` method can be used to run the
-/// compiler itself.
-#[derive(Clone, Debug)]
-pub struct Tool {
- path: PathBuf,
- cc_wrapper_path: Option<PathBuf>,
- cc_wrapper_args: Vec<OsString>,
- args: Vec<OsString>,
- env: Vec<(OsString, OsString)>,
- family: ToolFamily,
- cuda: bool,
- removed_args: Vec<OsString>,
-}
-
-/// Represents the family of tools this tool belongs to.
-///
-/// Each family of tools differs in how and what arguments they accept.
-///
-/// Detection of a family is done on best-effort basis and may not accurately reflect the tool.
-#[derive(Copy, Clone, Debug, PartialEq)]
-enum ToolFamily {
- /// Tool is GNU Compiler Collection-like.
- Gnu,
- /// Tool is Clang-like. It differs from the GCC in a sense that it accepts superset of flags
- /// and its cross-compilation approach is different.
- Clang,
- /// Tool is the MSVC cl.exe.
- Msvc { clang_cl: bool },
-}
-
-impl ToolFamily {
- /// What the flag to request debug info for this family of tools look like
- fn add_debug_flags(&self, cmd: &mut Tool, dwarf_version: Option<u32>) {
- match *self {
- ToolFamily::Msvc { .. } => {
- cmd.push_cc_arg("-Z7".into());
- }
- ToolFamily::Gnu | ToolFamily::Clang => {
- cmd.push_cc_arg(
- dwarf_version
- .map_or_else(|| "-g".into(), |v| format!("-gdwarf-{}", v))
- .into(),
- );
- }
- }
- }
-
- /// What the flag to force frame pointers.
- fn add_force_frame_pointer(&self, cmd: &mut Tool) {
- match *self {
- ToolFamily::Gnu | ToolFamily::Clang => {
- cmd.push_cc_arg("-fno-omit-frame-pointer".into());
- }
- _ => (),
- }
- }
-
- /// What the flags to enable all warnings
- fn warnings_flags(&self) -> &'static str {
- match *self {
- ToolFamily::Msvc { .. } => "-W4",
- ToolFamily::Gnu | ToolFamily::Clang => "-Wall",
- }
- }
-
- /// What the flags to enable extra warnings
- fn extra_warnings_flags(&self) -> Option<&'static str> {
- match *self {
- ToolFamily::Msvc { .. } => None,
- ToolFamily::Gnu | ToolFamily::Clang => Some("-Wextra"),
- }
- }
-
- /// What the flag to turn warning into errors
- fn warnings_to_errors_flag(&self) -> &'static str {
- match *self {
- ToolFamily::Msvc { .. } => "-WX",
- ToolFamily::Gnu | ToolFamily::Clang => "-Werror",
- }
- }
-
- fn verbose_stderr(&self) -> bool {
- *self == ToolFamily::Clang
- }
-}
-
/// Represents an object.
///
/// This is a source file -> object file pair.
@@ -282,7 +361,7 @@ struct Object {
impl Object {
/// Create a new source file -> object file pair.
fn new(src: PathBuf, dst: PathBuf) -> Object {
- Object { src: src, dst: dst }
+ Object { src, dst }
}
}
@@ -311,6 +390,7 @@ impl Build {
cpp_set_stdlib: None,
cuda: false,
cudart: None,
+ std: None,
target: None,
host: None,
out_dir: None,
@@ -320,7 +400,8 @@ impl Build {
env: Vec::new(),
compiler: None,
archiver: None,
- cargo_metadata: true,
+ ranlib: None,
+ cargo_output: CargoOutput::new(),
link_lib_modifiers: Vec::new(),
pic: None,
use_plt: None,
@@ -330,7 +411,9 @@ impl Build {
warnings_into_errors: false,
env_cache: Arc::new(Mutex::new(HashMap::new())),
apple_sdk_root_cache: Arc::new(Mutex::new(HashMap::new())),
+ apple_versions_cache: Arc::new(Mutex::new(HashMap::new())),
emit_rerun_if_env_changed: true,
+ cached_compiler_family: Arc::default(),
}
}
@@ -350,7 +433,7 @@ impl Build {
/// .compile("foo");
/// ```
pub fn include<P: AsRef<Path>>(&mut self, dir: P) -> &mut Build {
- self.include_directories.push(dir.as_ref().to_path_buf());
+ self.include_directories.push(dir.as_ref().into());
self
}
@@ -396,13 +479,13 @@ impl Build {
/// ```
pub fn define<'a, V: Into<Option<&'a str>>>(&mut self, var: &str, val: V) -> &mut Build {
self.definitions
- .push((var.to_string(), val.into().map(|s| s.to_string())));
+ .push((var.into(), val.into().map(Into::into)));
self
}
/// Add an arbitrary object file to link in
pub fn object<P: AsRef<Path>>(&mut self, obj: P) -> &mut Build {
- self.objects.push(obj.as_ref().to_path_buf());
+ self.objects.push(obj.as_ref().into());
self
}
@@ -417,7 +500,25 @@ impl Build {
/// .compile("foo");
/// ```
pub fn flag(&mut self, flag: &str) -> &mut Build {
- self.flags.push(flag.to_string());
+ self.flags.push(flag.into());
+ self
+ }
+
+ /// Removes a compiler flag that was added by [`Build::flag`].
+ ///
+ /// Will not remove flags added by other means (default flags,
+ /// flags from env, and so on).
+ ///
+ /// # Example
+ /// ```
+ /// cc::Build::new()
+ /// .file("src/foo.c")
+ /// .flag("unwanted_flag")
+ /// .remove_flag("unwanted_flag");
+ /// ```
+
+ pub fn remove_flag(&mut self, flag: &str) -> &mut Build {
+ self.flags.retain(|other_flag| &**other_flag != flag);
self
}
@@ -433,7 +534,7 @@ impl Build {
/// .compile("foo");
/// ```
pub fn ar_flag(&mut self, flag: &str) -> &mut Build {
- self.ar_flags.push(flag.to_string());
+ self.ar_flags.push(flag.into());
self
}
@@ -452,7 +553,7 @@ impl Build {
/// .compile("foo");
/// ```
pub fn asm_flag(&mut self, flag: &str) -> &mut Build {
- self.asm_flags.push(flag.to_string());
+ self.asm_flags.push(flag.into());
self
}
@@ -499,6 +600,7 @@ impl Build {
let host = self.get_host()?;
let mut cfg = Build::new();
cfg.flag(flag)
+ .cargo_metadata(self.cargo_output.metadata)
.target(&target)
.opt_level(0)
.host(&host)
@@ -515,30 +617,34 @@ impl Build {
if compiler.family.verbose_stderr() {
compiler.remove_arg("-v".into());
}
+ if compiler.family == ToolFamily::Clang {
+ // Avoid reporting that the arg is unsupported just because the
+ // compiler complains that it wasn't used.
+ compiler.push_cc_arg("-Wno-unused-command-line-argument".into());
+ }
let mut cmd = compiler.to_command();
let is_arm = target.contains("aarch64") || target.contains("arm");
let clang = compiler.family == ToolFamily::Clang;
+ let gnu = compiler.family == ToolFamily::Gnu;
command_add_output_file(
&mut cmd,
&obj,
self.cuda,
target.contains("msvc"),
clang,
+ gnu,
false,
is_arm,
);
- // We need to explicitly tell msvc not to link and create an exe
- // in the root directory of the crate
- if target.contains("msvc") && !self.cuda {
- cmd.arg("-c");
- }
+ // Checking for compiler flags does not require linking
+ cmd.arg("-c");
cmd.arg(&src);
let output = cmd.output()?;
- let is_supported = output.stderr.is_empty();
+ let is_supported = output.status.success() && output.stderr.is_empty();
known_status.insert(flag.to_owned(), is_supported);
Ok(is_supported)
@@ -556,10 +662,39 @@ impl Build {
/// .compile("foo");
/// ```
pub fn flag_if_supported(&mut self, flag: &str) -> &mut Build {
- self.flags_supported.push(flag.to_string());
+ self.flags_supported.push(flag.into());
self
}
+ /// Add flags from the specified environment variable.
+ ///
+ /// Normally the `cc` crate will consult with the standard set of environment
+ /// variables (such as `CFLAGS` and `CXXFLAGS`) to construct the compiler invocation. Use of
+ /// this method provides additional levers for the end user to use when configuring the build
+ /// process.
+ ///
+ /// Just like the standard variables, this method will search for an environment variable with
+ /// appropriate target prefixes, when appropriate.
+ ///
+ /// # Examples
+ ///
+ /// This method is particularly beneficial in introducing the ability to specify crate-specific
+ /// flags.
+ ///
+ /// ```no_run
+ /// cc::Build::new()
+ /// .file("src/foo.c")
+ /// .try_flags_from_environment(concat!(env!("CARGO_PKG_NAME"), "_CFLAGS"))
+ /// .expect("the environment variable must be specified and UTF-8")
+ /// .compile("foo");
+ /// ```
+ ///
+ pub fn try_flags_from_environment(&mut self, environ_key: &str) -> Result<&mut Build, Error> {
+ let flags = self.envflags(environ_key)?;
+ self.flags.extend(flags.into_iter().map(Into::into));
+ Ok(self)
+ }
+
/// Set the `-shared` flag.
///
/// When enabled, the compiler will produce a shared object which can
@@ -610,7 +745,7 @@ impl Build {
/// Add a file which will be compiled
pub fn file<P: AsRef<Path>>(&mut self, p: P) -> &mut Build {
- self.files.push(p.as_ref().to_path_buf());
+ self.files.push(p.as_ref().into());
self
}
@@ -626,10 +761,21 @@ impl Build {
self
}
+ /// Get the files which will be compiled
+ pub fn get_files(&self) -> impl Iterator<Item = &Path> {
+ self.files.iter().map(AsRef::as_ref)
+ }
+
/// Set C++ support.
///
/// The other `cpp_*` options will only become active if this is set to
/// `true`.
+ ///
+ /// The name of the C++ standard library to link is decided by:
+ /// 1. If [`cpp_link_stdlib`](Build::cpp_link_stdlib) is set, use its value.
+ /// 2. Else if the `CXXSTDLIB` environment variable is set, use its value.
+ /// 3. Else the default is `libc++` for OS X and BSDs, `libc++_shared` for Android,
+ /// `None` for MSVC and `libstdc++` for anything else.
pub fn cpp(&mut self, cpp: bool) -> &mut Build {
self.cpp = cpp;
self
@@ -637,17 +783,19 @@ impl Build {
/// Set CUDA C++ support.
///
- /// Enabling CUDA will pass the detected C/C++ toolchain as an argument to
- /// the CUDA compiler, NVCC. NVCC itself accepts some limited GNU-like args;
- /// any other arguments for the C/C++ toolchain will be redirected using
- /// "-Xcompiler" flags.
+ /// Enabling CUDA will invoke the CUDA compiler, NVCC. While NVCC accepts
+ /// the most common compiler flags, e.g. `-std=c++17`, some project-specific
+ /// flags might have to be prefixed with "-Xcompiler" flag, for example as
+ /// `.flag("-Xcompiler").flag("-fpermissive")`. See the documentation for
+ /// `nvcc`, the CUDA compiler driver, at <https://docs.nvidia.com/cuda/cuda-compiler-driver-nvcc/>
+ /// for more information.
///
/// If enabled, this also implicitly enables C++ support.
pub fn cuda(&mut self, cuda: bool) -> &mut Build {
self.cuda = cuda;
if cuda {
self.cpp = true;
- self.cudart = Some("static".to_string());
+ self.cudart = Some("static".into());
}
self
}
@@ -660,11 +808,42 @@ impl Build {
/// at all, if the default is right for the project.
pub fn cudart(&mut self, cudart: &str) -> &mut Build {
if self.cuda {
- self.cudart = Some(cudart.to_string());
+ self.cudart = Some(cudart.into());
}
self
}
+ /// Specify the C or C++ language standard version.
+ ///
+ /// These values are common to modern versions of GCC, Clang and MSVC:
+ /// - `c11` for ISO/IEC 9899:2011
+ /// - `c17` for ISO/IEC 9899:2018
+ /// - `c++14` for ISO/IEC 14882:2014
+ /// - `c++17` for ISO/IEC 14882:2017
+ /// - `c++20` for ISO/IEC 14882:2020
+ ///
+ /// Other values have less broad support, e.g. MSVC does not support `c++11`
+ /// (`c++14` is the minimum), `c89` (omit the flag instead) or `c99`.
+ ///
+ /// For compiling C++ code, you should also set `.cpp(true)`.
+ ///
+ /// The default is that no standard flag is passed to the compiler, so the
+ /// language version will be the compiler's default.
+ ///
+ /// # Example
+ ///
+ /// ```no_run
+ /// cc::Build::new()
+ /// .file("src/modern.cpp")
+ /// .cpp(true)
+ /// .std("c++17")
+ /// .compile("modern");
+ /// ```
+ pub fn std(&mut self, std: &str) -> &mut Build {
+ self.std = Some(std.into());
+ self
+ }
+
/// Set warnings into errors flag.
///
/// Disabled by default.
@@ -736,8 +915,6 @@ impl Build {
/// Set the standard library to link against when compiling with C++
/// support.
///
- /// See [`get_cpp_link_stdlib`](cc::Build::get_cpp_link_stdlib) documentation
- /// for the default value.
/// If the `CXXSTDLIB` environment variable is set, its value will
/// override the default value, but not the value explicitly set by calling
/// this function.
@@ -826,7 +1003,7 @@ impl Build {
/// .compile("foo");
/// ```
pub fn target(&mut self, target: &str) -> &mut Build {
- self.target = Some(target.to_string());
+ self.target = Some(target.into());
self
}
@@ -844,7 +1021,7 @@ impl Build {
/// .compile("foo");
/// ```
pub fn host(&mut self, host: &str) -> &mut Build {
- self.host = Some(host.to_string());
+ self.host = Some(host.into());
self
}
@@ -853,7 +1030,7 @@ impl Build {
/// This option is automatically scraped from the `OPT_LEVEL` environment
/// variable by build scripts, so it's not required to call this function.
pub fn opt_level(&mut self, opt_level: u32) -> &mut Build {
- self.opt_level = Some(opt_level.to_string());
+ self.opt_level = Some(opt_level.to_string().into());
self
}
@@ -862,7 +1039,7 @@ impl Build {
/// This option is automatically scraped from the `OPT_LEVEL` environment
/// variable by build scripts, so it's not required to call this function.
pub fn opt_level_str(&mut self, opt_level: &str) -> &mut Build {
- self.opt_level = Some(opt_level.to_string());
+ self.opt_level = Some(opt_level.into());
self
}
@@ -893,7 +1070,7 @@ impl Build {
/// This option is automatically scraped from the `OUT_DIR` environment
/// variable by build scripts, so it's not required to call this function.
pub fn out_dir<P: AsRef<Path>>(&mut self, out_dir: P) -> &mut Build {
- self.out_dir = Some(out_dir.as_ref().to_owned());
+ self.out_dir = Some(out_dir.as_ref().into());
self
}
@@ -903,7 +1080,7 @@ impl Build {
/// number of environment variables, so it's not required to call this
/// function.
pub fn compiler<P: AsRef<Path>>(&mut self, compiler: P) -> &mut Build {
- self.compiler = Some(compiler.as_ref().to_owned());
+ self.compiler = Some(compiler.as_ref().into());
self
}
@@ -913,9 +1090,20 @@ impl Build {
/// number of environment variables, so it's not required to call this
/// function.
pub fn archiver<P: AsRef<Path>>(&mut self, archiver: P) -> &mut Build {
- self.archiver = Some(archiver.as_ref().to_owned());
+ self.archiver = Some(archiver.as_ref().into());
self
}
+
+ /// Configures the tool used to index archives.
+ ///
+ /// This option is automatically determined from the target platform or a
+ /// number of environment variables, so it's not required to call this
+ /// function.
+ pub fn ranlib<P: AsRef<Path>>(&mut self, ranlib: P) -> &mut Build {
+ self.ranlib = Some(ranlib.as_ref().into());
+ self
+ }
+
/// Define whether metadata should be emitted for cargo allowing it to
/// automatically link the binary. Defaults to `true`.
///
@@ -928,17 +1116,37 @@ impl Build {
/// - If `emit_rerun_if_env_changed` is not `false`, `rerun-if-env-changed=`*env*
///
pub fn cargo_metadata(&mut self, cargo_metadata: bool) -> &mut Build {
- self.cargo_metadata = cargo_metadata;
+ self.cargo_output.metadata = cargo_metadata;
+ self
+ }
+
+ /// Define whether compile warnings should be emitted for cargo. Defaults to
+ /// `true`.
+ ///
+ /// If disabled, compiler messages will not be printed.
+ /// Issues unrelated to the compilation will always produce cargo warnings regardless of this setting.
+ pub fn cargo_warnings(&mut self, cargo_warnings: bool) -> &mut Build {
+ self.cargo_output.warnings = cargo_warnings;
+ self
+ }
+
+ /// Define whether debug information should be emitted for cargo. Defaults to whether
+ /// or not the environment variable `CC_ENABLE_DEBUG_OUTPUT` is set.
+ ///
+ /// If enabled, the compiler will emit debug information when generating object files,
+ /// such as the command invoked and the exit status.
+ pub fn cargo_debug(&mut self, cargo_debug: bool) -> &mut Build {
+ self.cargo_output.debug = cargo_debug;
self
}
/// Adds a native library modifier that will be added to the
/// `rustc-link-lib=static:MODIFIERS=LIBRARY_NAME` metadata line
/// emitted for cargo if `cargo_metadata` is enabled.
- /// See https://doc.rust-lang.org/rustc/command-line-arguments.html#-l-link-the-generated-crate-to-a-native-library
+ /// See <https://doc.rust-lang.org/rustc/command-line-arguments.html#-l-link-the-generated-crate-to-a-native-library>
/// for the list of modifiers accepted by rustc.
pub fn link_lib_modifier(&mut self, link_lib_modifier: &str) -> &mut Build {
- self.link_lib_modifiers.push(link_lib_modifier.to_string());
+ self.link_lib_modifiers.push(link_lib_modifier.into());
self
}
@@ -991,14 +1199,13 @@ impl Build {
A: AsRef<OsStr>,
B: AsRef<OsStr>,
{
- self.env
- .push((a.as_ref().to_owned(), b.as_ref().to_owned()));
+ self.env.push((a.as_ref().into(), b.as_ref().into()));
self
}
/// Run the compiler, generating the file `output`
///
- /// This will return a result instead of panicing; see compile() for the complete description.
+ /// This will return a result instead of panicking; see compile() for the complete description.
pub fn try_compile(&self, output: &str) -> Result<(), Error> {
let mut output_components = Path::new(output).components();
match (output_components.next(), output_components.next()) {
@@ -1016,52 +1223,14 @@ impl Build {
} else {
let mut gnu = String::with_capacity(5 + output.len());
gnu.push_str("lib");
- gnu.push_str(&output);
+ gnu.push_str(output);
gnu.push_str(".a");
(output, gnu)
};
let dst = self.get_out_dir()?;
- let mut objects = Vec::new();
- for file in self.files.iter() {
- let obj = if file.has_root() {
- // If `file` is an absolute path, prefix the `basename`
- // with the `dirname`'s hash to ensure name uniqueness.
- let basename = file
- .file_name()
- .ok_or_else(|| Error::new(ErrorKind::InvalidArgument, "file_name() failure"))?
- .to_string_lossy();
- let dirname = file
- .parent()
- .ok_or_else(|| Error::new(ErrorKind::InvalidArgument, "parent() failure"))?
- .to_string_lossy();
- let mut hasher = hash_map::DefaultHasher::new();
- hasher.write(dirname.to_string().as_bytes());
- dst.join(format!("{:016x}-{}", hasher.finish(), basename))
- .with_extension("o")
- } else {
- dst.join(file).with_extension("o")
- };
- let obj = if !obj.starts_with(&dst) {
- dst.join(obj.file_name().ok_or_else(|| {
- Error::new(ErrorKind::IOError, "Getting object file details failed.")
- })?)
- } else {
- obj
- };
+ let objects = objects_from_files(&self.files, &dst)?;
- match obj.parent() {
- Some(s) => fs::create_dir_all(s)?,
- None => {
- return Err(Error::new(
- ErrorKind::IOError,
- "Getting object file details failed.",
- ));
- }
- };
-
- objects.push(Object::new(file.to_path_buf(), obj));
- }
self.compile_objects(&objects)?;
self.assemble(lib_name, &dst.join(gnu_lib_name), &objects)?;
@@ -1070,8 +1239,8 @@ impl Build {
let atlmfc_lib = compiler
.env()
.iter()
- .find(|&&(ref var, _)| var.as_os_str() == OsStr::new("LIB"))
- .and_then(|&(_, ref lib_paths)| {
+ .find(|&(var, _)| var.as_os_str() == OsStr::new("LIB"))
+ .and_then(|(_, lib_paths)| {
env::split_paths(lib_paths).find(|path| {
let sub = Path::new("atlmfc/lib");
path.ends_with(sub) || path.parent().map_or(false, |p| p.ends_with(sub))
@@ -1079,7 +1248,7 @@ impl Build {
});
if let Some(atlmfc_lib) = atlmfc_lib {
- self.print(&format!(
+ self.cargo_output.print_metadata(&format_args!(
"cargo:rustc-link-search=native={}",
atlmfc_lib.display()
));
@@ -1087,26 +1256,34 @@ impl Build {
}
if self.link_lib_modifiers.is_empty() {
- self.print(&format!("cargo:rustc-link-lib=static={}", lib_name));
+ self.cargo_output
+ .print_metadata(&format_args!("cargo:rustc-link-lib=static={}", lib_name));
} else {
let m = self.link_lib_modifiers.join(",");
- self.print(&format!("cargo:rustc-link-lib=static:{}={}", m, lib_name));
+ self.cargo_output.print_metadata(&format_args!(
+ "cargo:rustc-link-lib=static:{}={}",
+ m, lib_name
+ ));
}
- self.print(&format!("cargo:rustc-link-search=native={}", dst.display()));
+ self.cargo_output.print_metadata(&format_args!(
+ "cargo:rustc-link-search=native={}",
+ dst.display()
+ ));
// Add specific C++ libraries, if enabled.
if self.cpp {
if let Some(stdlib) = self.get_cpp_link_stdlib()? {
- self.print(&format!("cargo:rustc-link-lib={}", stdlib));
+ self.cargo_output
+ .print_metadata(&format_args!("cargo:rustc-link-lib={}", stdlib));
}
}
let cudart = match &self.cudart {
- Some(opt) => opt.as_str(), // {none|shared|static}
+ Some(opt) => &*opt, // {none|shared|static}
None => "none",
};
if cudart != "none" {
- if let Some(nvcc) = which(&self.get_compiler().path) {
+ if let Some(nvcc) = which(&self.get_compiler().path, None) {
// Try to figure out the -L search path. If it fails,
// it's on user to specify one by passing it through
// RUSTFLAGS environment variable.
@@ -1135,10 +1312,10 @@ impl Build {
}
}
if libtst && libdir.is_dir() {
- println!(
+ self.cargo_output.print_metadata(&format_args!(
"cargo:rustc-link-search=native={}",
libdir.to_str().unwrap()
- );
+ ));
}
// And now the -l flag.
@@ -1147,7 +1324,8 @@ impl Build {
"static" => "cudart_static",
bad => panic!("unsupported cudart option: {}", bad),
};
- println!("cargo:rustc-link-lib={}", lib);
+ self.cargo_output
+ .print_metadata(&format_args!("cargo:rustc-link-lib={}", lib));
}
}
@@ -1197,18 +1375,48 @@ impl Build {
}
}
+ /// Run the compiler, generating intermediate files, but without linking
+ /// them into an archive file.
+ ///
+ /// This will return a list of compiled object files, in the same order
+ /// as they were passed in as `file`/`files` methods.
+ pub fn compile_intermediates(&self) -> Vec<PathBuf> {
+ match self.try_compile_intermediates() {
+ Ok(v) => v,
+ Err(e) => fail(&e.message),
+ }
+ }
+
+ /// Run the compiler, generating intermediate files, but without linking
+ /// them into an archive file.
+ ///
+ /// This will return a result instead of panicking; see `compile_intermediates()` for the complete description.
+ pub fn try_compile_intermediates(&self) -> Result<Vec<PathBuf>, Error> {
+ let dst = self.get_out_dir()?;
+ let objects = objects_from_files(&self.files, &dst)?;
+
+ self.compile_objects(&objects)?;
+
+ Ok(objects.into_iter().map(|v| v.dst).collect())
+ }
+
#[cfg(feature = "parallel")]
- fn compile_objects<'me>(&'me self, objs: &[Object]) -> Result<(), Error> {
- use std::sync::atomic::{AtomicBool, Ordering::SeqCst};
- use std::sync::Once;
-
- // Limit our parallelism globally with a jobserver. Start off by
- // releasing our own token for this process so we can have a bit of an
- // easier to write loop below. If this fails, though, then we're likely
- // on Windows with the main implicit token, so we just have a bit extra
- // parallelism for a bit and don't reacquire later.
- let server = jobserver();
- let reacquire = server.release_raw().is_ok();
+ fn compile_objects(&self, objs: &[Object]) -> Result<(), Error> {
+ use std::cell::Cell;
+
+ use parallel::async_executor::{block_on, YieldOnce};
+
+ if objs.len() <= 1 {
+ for obj in objs {
+ let (mut cmd, name) = self.create_compile_object_cmd(obj)?;
+ run(&mut cmd, &name, &self.cargo_output)?;
+ }
+
+ return Ok(());
+ }
+
+ // Limit our parallelism globally with a jobserver.
+ let tokens = parallel::job_token::ActiveJobTokenServer::new()?;
// When compiling objects in parallel we do a few dirty tricks to speed
// things up:
@@ -1222,153 +1430,154 @@ impl Build {
// Note that this jobserver is cached globally so we only used one per
// process and only worry about creating it once.
//
- // * Next we use a raw `thread::spawn` per thread to actually compile
- // objects in parallel. We only actually spawn a thread after we've
- // acquired a token to perform some work
- //
- // * Finally though we want to keep the dependencies of this crate
- // pretty light, so we avoid using a safe abstraction like `rayon` and
- // instead rely on some bits of `unsafe` code. We know that this stack
- // frame persists while everything is compiling so we use all the
- // stack-allocated objects without cloning/reallocating. We use a
- // transmute to `State` with a `'static` lifetime to persist
- // everything we need across the boundary, and the join-on-drop
- // semantics of `JoinOnDrop` should ensure that our stack frame is
- // alive while threads are alive.
+ // * Next we use spawn the process to actually compile objects in
+ // parallel after we've acquired a token to perform some work
//
// With all that in mind we compile all objects in a loop here, after we
// acquire the appropriate tokens, Once all objects have been compiled
- // we join on all the threads and propagate the results of compilation.
- //
- // Note that as a slight optimization we try to break out as soon as
- // possible as soon as any compilation fails to ensure that errors get
- // out to the user as fast as possible.
- let error = AtomicBool::new(false);
- let mut threads = Vec::new();
- for obj in objs {
- if error.load(SeqCst) {
- break;
- }
- let token = server.acquire()?;
- let state = State {
- build: self,
- obj,
- error: &error,
- };
- let state = unsafe { std::mem::transmute::<State, State<'static>>(state) };
- let thread = thread::spawn(|| {
- let state: State<'me> = state; // erase the `'static` lifetime
- let result = state.build.compile_object(state.obj);
- if result.is_err() {
- state.error.store(true, SeqCst);
+ // we wait on all the processes and propagate the results of compilation.
+
+ let pendings = Cell::new(Vec::<(
+ Command,
+ String,
+ KillOnDrop,
+ parallel::job_token::JobToken,
+ )>::new());
+ let is_disconnected = Cell::new(false);
+ let has_made_progress = Cell::new(false);
+
+ let wait_future = async {
+ let mut error = None;
+ // Buffer the stdout
+ let mut stdout = io::BufWriter::with_capacity(128, io::stdout());
+
+ loop {
+ // If the other end of the pipe is already disconnected, then we're not gonna get any new jobs,
+ // so it doesn't make sense to reuse the tokens; in fact,
+ // releasing them as soon as possible (once we know that the other end is disconnected) is beneficial.
+ // Imagine that the last file built takes an hour to finish; in this scenario,
+ // by not releasing the tokens before that last file is done we would effectively block other processes from
+ // starting sooner - even though we only need one token for that last file, not N others that were acquired.
+
+ let mut pendings_is_empty = false;
+
+ cell_update(&pendings, |mut pendings| {
+ // Try waiting on them.
+ parallel::retain_unordered_mut(
+ &mut pendings,
+ |(cmd, program, child, _token)| {
+ match try_wait_on_child(
+ cmd,
+ program,
+ &mut child.0,
+ &mut stdout,
+ &mut child.1,
+ ) {
+ Ok(Some(())) => {
+ // Task done, remove the entry
+ has_made_progress.set(true);
+ false
+ }
+ Ok(None) => true, // Task still not finished, keep the entry
+ Err(err) => {
+ // Task fail, remove the entry.
+ // Since we can only return one error, log the error to make
+ // sure users always see all the compilation failures.
+ has_made_progress.set(true);
+
+ if self.cargo_output.warnings {
+ let _ = writeln!(stdout, "cargo:warning={}", err);
+ }
+ error = Some(err);
+
+ false
+ }
+ }
+ },
+ );
+ pendings_is_empty = pendings.is_empty();
+ pendings
+ });
+
+ if pendings_is_empty && is_disconnected.get() {
+ break if let Some(err) = error {
+ Err(err)
+ } else {
+ Ok(())
+ };
}
- drop(token); // make sure our jobserver token is released after the compile
- return result;
- });
- threads.push(JoinOnDrop(Some(thread)));
- }
- for mut thread in threads {
- if let Some(thread) = thread.0.take() {
- thread.join().expect("thread should not panic")?;
+ YieldOnce::default().await;
}
- }
-
- // Reacquire our process's token before we proceed, which we released
- // before entering the loop above.
- if reacquire {
- server.acquire_raw()?;
- }
-
- return Ok(());
-
- /// Shared state from the parent thread to the child thread. This
- /// package of pointers is temporarily transmuted to a `'static`
- /// lifetime to cross the thread boundary and then once the thread is
- /// running we erase the `'static` to go back to an anonymous lifetime.
- struct State<'a> {
- build: &'a Build,
- obj: &'a Object,
- error: &'a AtomicBool,
- }
-
- /// Returns a suitable `jobserver::Client` used to coordinate
- /// parallelism between build scripts.
- fn jobserver() -> &'static jobserver::Client {
- static INIT: Once = Once::new();
- static mut JOBSERVER: Option<jobserver::Client> = None;
-
- fn _assert_sync<T: Sync>() {}
- _assert_sync::<jobserver::Client>();
-
- unsafe {
- INIT.call_once(|| {
- let server = default_jobserver();
- JOBSERVER = Some(server);
+ };
+ let spawn_future = async {
+ for obj in objs {
+ let (mut cmd, program) = self.create_compile_object_cmd(obj)?;
+ let token = tokens.acquire().await?;
+ let mut child = spawn(&mut cmd, &program, &self.cargo_output)?;
+ let mut stderr_forwarder = StderrForwarder::new(&mut child);
+ stderr_forwarder.set_non_blocking()?;
+
+ cell_update(&pendings, |mut pendings| {
+ pendings.push((cmd, program, KillOnDrop(child, stderr_forwarder), token));
+ pendings
});
- JOBSERVER.as_ref().unwrap()
- }
- }
- unsafe fn default_jobserver() -> jobserver::Client {
- // Try to use the environmental jobserver which Cargo typically
- // initializes for us...
- if let Some(client) = jobserver::Client::from_env() {
- return client;
+ has_made_progress.set(true);
}
+ is_disconnected.set(true);
- // ... but if that fails for whatever reason select something
- // reasonable and crate a new jobserver. Use `NUM_JOBS` if set (it's
- // configured by Cargo) and otherwise just fall back to a
- // semi-reasonable number. Note that we could use `num_cpus` here
- // but it's an extra dependency that will almost never be used, so
- // it's generally not too worth it.
- let mut parallelism = 4;
- if let Ok(amt) = env::var("NUM_JOBS") {
- if let Ok(amt) = amt.parse() {
- parallelism = amt;
- }
- }
+ Ok::<_, Error>(())
+ };
- // If we create our own jobserver then be sure to reserve one token
- // for ourselves.
- let client = jobserver::Client::new(parallelism).expect("failed to create jobserver");
- client.acquire_raw().expect("failed to acquire initial");
- return client;
- }
+ return block_on(wait_future, spawn_future, &has_made_progress);
- struct JoinOnDrop(Option<thread::JoinHandle<Result<(), Error>>>);
+ struct KillOnDrop(Child, StderrForwarder);
- impl Drop for JoinOnDrop {
+ impl Drop for KillOnDrop {
fn drop(&mut self) {
- if let Some(thread) = self.0.take() {
- drop(thread.join());
- }
+ let child = &mut self.0;
+
+ child.kill().ok();
}
}
+
+ fn cell_update<T, F>(cell: &Cell<T>, f: F)
+ where
+ T: Default,
+ F: FnOnce(T) -> T,
+ {
+ let old = cell.take();
+ let new = f(old);
+ cell.set(new);
+ }
}
#[cfg(not(feature = "parallel"))]
fn compile_objects(&self, objs: &[Object]) -> Result<(), Error> {
for obj in objs {
- self.compile_object(obj)?;
+ let (mut cmd, name) = self.create_compile_object_cmd(obj)?;
+ run(&mut cmd, &name, &self.cargo_output)?;
}
+
Ok(())
}
- fn compile_object(&self, obj: &Object) -> Result<(), Error> {
+ fn create_compile_object_cmd(&self, obj: &Object) -> Result<(Command, String), Error> {
let asm_ext = AsmFileExt::from_path(&obj.src);
let is_asm = asm_ext.is_some();
let target = self.get_target()?;
let msvc = target.contains("msvc");
let compiler = self.try_get_compiler()?;
let clang = compiler.family == ToolFamily::Clang;
+ let gnu = compiler.family == ToolFamily::Gnu;
- let (mut cmd, name) = if msvc && asm_ext == Some(AsmFileExt::DotAsm) {
+ let is_assembler_msvc = msvc && asm_ext == Some(AsmFileExt::DotAsm);
+ let (mut cmd, name) = if is_assembler_msvc {
self.msvc_macro_assembler()?
} else {
let mut cmd = compiler.to_command();
- for &(ref a, ref b) in self.env.iter() {
+ for (a, b) in self.env.iter() {
cmd.env(a, b);
}
(
@@ -1382,18 +1591,20 @@ impl Build {
)
};
let is_arm = target.contains("aarch64") || target.contains("arm");
- command_add_output_file(&mut cmd, &obj.dst, self.cuda, msvc, clang, is_asm, is_arm);
+ command_add_output_file(
+ &mut cmd, &obj.dst, self.cuda, msvc, clang, gnu, is_asm, is_arm,
+ );
// armasm and armasm64 don't requrie -c option
- if !msvc || !is_asm || !is_arm {
+ if !is_assembler_msvc || !is_arm {
cmd.arg("-c");
}
if self.cuda && self.cuda_file_count() > 1 {
cmd.arg("--device-c");
}
if is_asm {
- cmd.args(&self.asm_flags);
+ cmd.args(self.asm_flags.iter().map(std::ops::Deref::deref));
}
- if compiler.family == (ToolFamily::Msvc { clang_cl: true }) && !is_asm {
+ if compiler.family == (ToolFamily::Msvc { clang_cl: true }) && !is_assembler_msvc {
// #513: For `clang-cl`, separate flags/options from the input file.
// When cross-compiling macOS -> Windows, this avoids interpreting
// common `/Users/...` paths as the `/U` flag and triggering
@@ -1405,15 +1616,14 @@ impl Build {
self.fix_env_for_apple_os(&mut cmd)?;
}
- run(&mut cmd, &name)?;
- Ok(())
+ Ok((cmd, name))
}
- /// This will return a result instead of panicing; see expand() for the complete description.
+ /// This will return a result instead of panicking; see expand() for the complete description.
pub fn try_expand(&self) -> Result<Vec<u8>, Error> {
let compiler = self.try_get_compiler()?;
let mut cmd = compiler.to_command();
- for &(ref a, ref b) in self.env.iter() {
+ for (a, b) in self.env.iter() {
cmd.env(a, b);
}
cmd.arg("-E");
@@ -1423,10 +1633,23 @@ impl Build {
"Expand may only be called for a single file"
);
- for file in self.files.iter() {
- cmd.arg(file);
+ let is_asm = self
+ .files
+ .iter()
+ .map(std::ops::Deref::deref)
+ .find_map(AsmFileExt::from_path)
+ .is_some();
+
+ if compiler.family == (ToolFamily::Msvc { clang_cl: true }) && !is_asm {
+ // #513: For `clang-cl`, separate flags/options from the input file.
+ // When cross-compiling macOS -> Windows, this avoids interpreting
+ // common `/Users/...` paths as the `/U` flag and triggering
+ // `-Wslash-u-filename` warning.
+ cmd.arg("--");
}
+ cmd.args(self.files.iter().map(std::ops::Deref::deref));
+
let name = compiler
.path
.file_name()
@@ -1434,7 +1657,7 @@ impl Build {
.to_string_lossy()
.into_owned();
- Ok(run_output(&mut cmd, &name)?)
+ Ok(run_output(&mut cmd, &name, &self.cargo_output)?)
}
/// Run the compiler, returning the macro-expanded version of the input files.
@@ -1483,13 +1706,13 @@ impl Build {
/// Get the compiler that's in use for this configuration.
///
- /// This will return a result instead of panicing; see get_compiler() for the complete description.
+ /// This will return a result instead of panicking; see
+ /// [`get_compiler()`](Self::get_compiler) for the complete description.
pub fn try_get_compiler(&self) -> Result<Tool, Error> {
let opt_level = self.get_opt_level()?;
let target = self.get_target()?;
let mut cmd = self.get_base_compiler()?;
- let envflags = self.envflags(if self.cpp { "CXXFLAGS" } else { "CFLAGS" });
// Disable default flag generation via `no_default_flags` or environment variable
let no_defaults = self.no_default_flags || self.getenv("CRATE_CC_NO_DEFAULTS").is_some();
@@ -1500,13 +1723,23 @@ impl Build {
println!("Info: default compiler flags are disabled");
}
- for arg in envflags {
- cmd.push_cc_arg(arg.into());
+ if let Some(ref std) = self.std {
+ let separator = match cmd.family {
+ ToolFamily::Msvc { .. } => ':',
+ ToolFamily::Gnu | ToolFamily::Clang => '=',
+ };
+ cmd.push_cc_arg(format!("-std{}{}", separator, std).into());
+ }
+
+ if let Ok(flags) = self.envflags(if self.cpp { "CXXFLAGS" } else { "CFLAGS" }) {
+ for arg in flags {
+ cmd.push_cc_arg(arg.into());
+ }
}
for directory in self.include_directories.iter() {
cmd.args.push("-I".into());
- cmd.args.push(directory.into());
+ cmd.args.push(directory.as_os_str().into());
}
// If warnings and/or extra_warnings haven't been explicitly set,
@@ -1514,34 +1747,28 @@ impl Build {
// CFLAGS/CXXFLAGS, since those variables presumably already contain
// the desired set of warnings flags.
- if self
- .warnings
- .unwrap_or(if self.has_flags() { false } else { true })
- {
+ if self.warnings.unwrap_or(!self.has_flags()) {
let wflags = cmd.family.warnings_flags().into();
cmd.push_cc_arg(wflags);
}
- if self
- .extra_warnings
- .unwrap_or(if self.has_flags() { false } else { true })
- {
+ if self.extra_warnings.unwrap_or(!self.has_flags()) {
if let Some(wflags) = cmd.family.extra_warnings_flags() {
cmd.push_cc_arg(wflags.into());
}
}
for flag in self.flags.iter() {
- cmd.args.push(flag.into());
+ cmd.args.push((**flag).into());
}
for flag in self.flags_supported.iter() {
if self.is_flag_supported(flag).unwrap_or(false) {
- cmd.push_cc_arg(flag.into());
+ cmd.push_cc_arg((**flag).into());
}
}
- for &(ref key, ref value) in self.definitions.iter() {
+ for (key, value) in self.definitions.iter() {
if let Some(ref value) = *value {
cmd.args.push(format!("-D{}={}", key, value).into());
} else {
@@ -1573,9 +1800,8 @@ impl Build {
Some(true) => "-MT",
Some(false) => "-MD",
None => {
- let features = self
- .getenv("CARGO_CFG_TARGET_FEATURE")
- .unwrap_or(String::new());
+ let features = self.getenv("CARGO_CFG_TARGET_FEATURE");
+ let features = features.as_deref().unwrap_or_default();
if features.contains("crt-static") {
"-MT"
} else {
@@ -1602,6 +1828,13 @@ impl Build {
cmd.push_opt_unless_duplicate(format!("-O{}", opt_level).into());
}
+ if cmd.family == ToolFamily::Clang && target.contains("windows") {
+ // Disambiguate mingw and msvc on Windows. Problem is that
+ // depending on the origin clang can default to a mismatchig
+ // run-time.
+ cmd.push_cc_arg(format!("--target={}", target).into());
+ }
+
if cmd.family == ToolFamily::Clang && target.contains("android") {
// For compatibility with code that doesn't use pre-defined `__ANDROID__` macro.
// If compiler used via ndk-build or cmake (officially supported build methods)
@@ -1611,7 +1844,10 @@ impl Build {
cmd.push_opt_unless_duplicate("-DANDROID".into());
}
- if !target.contains("apple-ios") && !target.contains("apple-watchos") {
+ if !target.contains("apple-ios")
+ && !target.contains("apple-watchos")
+ && !target.contains("apple-tvos")
+ {
cmd.push_cc_arg("-ffunction-sections".into());
cmd.push_cc_arg("-fdata-sections".into());
}
@@ -1645,55 +1881,50 @@ impl Build {
family.add_force_frame_pointer(cmd);
}
+ if !cmd.is_like_msvc() {
+ if target.contains("i686") || target.contains("i586") {
+ cmd.args.push("-m32".into());
+ } else if target == "x86_64-unknown-linux-gnux32" {
+ cmd.args.push("-mx32".into());
+ } else if target.contains("x86_64") || target.contains("powerpc64") {
+ cmd.args.push("-m64".into());
+ }
+ }
+
// Target flags
+ if target.contains("-apple-") {
+ self.apple_flags(cmd, target)?;
+ } else {
+ self.target_flags(cmd, target);
+ }
+
+ if self.static_flag.unwrap_or(false) {
+ cmd.args.push("-static".into());
+ }
+ if self.shared_flag.unwrap_or(false) {
+ cmd.args.push("-shared".into());
+ }
+
+ if self.cpp {
+ match (self.cpp_set_stdlib.as_ref(), cmd.family) {
+ (None, _) => {}
+ (Some(stdlib), ToolFamily::Gnu) | (Some(stdlib), ToolFamily::Clang) => {
+ cmd.push_cc_arg(format!("-stdlib=lib{}", stdlib).into());
+ }
+ _ => {
+ self.cargo_output.print_warning(&format_args!("cpp_set_stdlib is specified, but the {:?} compiler does not support this option, ignored", cmd.family));
+ }
+ }
+ }
+
+ Ok(())
+ }
+
+ fn target_flags(&self, cmd: &mut Tool, target: &str) {
match cmd.family {
ToolFamily::Clang => {
- if !(target.contains("android")
- && android_clang_compiler_uses_target_arg_internally(&cmd.path))
- {
- if target.contains("darwin") {
- if let Some(arch) =
- map_darwin_target_from_rust_to_compiler_architecture(target)
- {
- cmd.args
- .push(format!("--target={}-apple-darwin", arch).into());
- }
- } else if target.contains("macabi") {
- if let Some(arch) =
- map_darwin_target_from_rust_to_compiler_architecture(target)
- {
- cmd.args
- .push(format!("--target={}-apple-ios-macabi", arch).into());
- }
- } else if target.contains("ios-sim") {
- if let Some(arch) =
- map_darwin_target_from_rust_to_compiler_architecture(target)
- {
- let deployment_target = env::var("IPHONEOS_DEPLOYMENT_TARGET")
- .unwrap_or_else(|_| "7.0".into());
- cmd.args.push(
- format!(
- "--target={}-apple-ios{}-simulator",
- arch, deployment_target
- )
- .into(),
- );
- }
- } else if target.contains("watchos-sim") {
- if let Some(arch) =
- map_darwin_target_from_rust_to_compiler_architecture(target)
- {
- let deployment_target = env::var("WATCHOS_DEPLOYMENT_TARGET")
- .unwrap_or_else(|_| "5.0".into());
- cmd.args.push(
- format!(
- "--target={}-apple-watchos{}-simulator",
- arch, deployment_target
- )
- .into(),
- );
- }
- } else if target.starts_with("riscv64gc-") {
+ if !(target.contains("android") && cmd.has_internal_target_arg) {
+ if target.starts_with("riscv64gc-") {
cmd.args.push(
format!("--target={}", target.replace("riscv64gc", "riscv64")).into(),
);
@@ -1709,6 +1940,30 @@ impl Build {
} else if target.contains("aarch64") {
cmd.args.push("--target=aarch64-unknown-windows-gnu".into())
}
+ } else if target.ends_with("-freebsd") {
+ // FreeBSD only supports C++11 and above when compiling against libc++
+ // (available from FreeBSD 10 onwards). Under FreeBSD, clang uses libc++ by
+ // default on FreeBSD 10 and newer unless `--target` is manually passed to
+ // the compiler, in which case its default behavior differs:
+ // * If --target=xxx-unknown-freebsdX(.Y) is specified and X is greater than
+ // or equal to 10, clang++ uses libc++
+ // * If --target=xxx-unknown-freebsd is specified (without a version),
+ // clang++ cannot assume libc++ is available and reverts to a default of
+ // libstdc++ (this behavior was changed in llvm 14).
+ //
+ // This breaks C++11 (or greater) builds if targeting FreeBSD with the
+ // generic xxx-unknown-freebsd triple on clang 13 or below *without*
+ // explicitly specifying that libc++ should be used.
+ // When cross-compiling, we can't infer from the rust/cargo target triple
+ // which major version of FreeBSD we are targeting, so we need to make sure
+ // that libc++ is used (unless the user has explicitly specified otherwise).
+ // There's no compelling reason to use a different approach when compiling
+ // natively.
+ if self.cpp && self.cpp_set_stdlib.is_none() {
+ cmd.push_cc_arg("-stdlib=libc++".into());
+ }
+
+ cmd.push_cc_arg(format!("--target={}", target).into());
} else {
cmd.push_cc_arg(format!("--target={}", target).into());
}
@@ -1732,6 +1987,8 @@ impl Build {
} else {
if target.contains("i586") {
cmd.push_cc_arg("-arch:IA32".into());
+ } else if target.contains("arm64ec") {
+ cmd.push_cc_arg("-arm64EC".into());
}
}
@@ -1750,30 +2007,13 @@ impl Build {
}
}
ToolFamily::Gnu => {
- if target.contains("i686") || target.contains("i586") {
- cmd.args.push("-m32".into());
- } else if target == "x86_64-unknown-linux-gnux32" {
- cmd.args.push("-mx32".into());
- } else if target.contains("x86_64") || target.contains("powerpc64") {
- cmd.args.push("-m64".into());
- }
-
- if target.contains("darwin") {
- if let Some(arch) = map_darwin_target_from_rust_to_compiler_architecture(target)
- {
- cmd.args.push("-arch".into());
- cmd.args.push(arch.into());
- }
- }
-
if target.contains("-kmc-solid_") {
cmd.args.push("-finput-charset=utf-8".into());
}
if self.static_flag.is_none() {
- let features = self
- .getenv("CARGO_CFG_TARGET_FEATURE")
- .unwrap_or(String::new());
+ let features = self.getenv("CARGO_CFG_TARGET_FEATURE");
+ let features = features.as_deref().unwrap_or_default();
if features.contains("crt-static") {
cmd.args.push("-static".into());
}
@@ -1927,64 +2167,38 @@ impl Build {
let mut parts = target.split('-');
if let Some(arch) = parts.next() {
let arch = &arch[5..];
- if target.contains("linux") && arch.starts_with("64") {
- cmd.args.push(("-march=rv64gc").into());
- cmd.args.push("-mabi=lp64d".into());
- } else if target.contains("freebsd") && arch.starts_with("64") {
- cmd.args.push(("-march=rv64gc").into());
- cmd.args.push("-mabi=lp64d".into());
- } else if target.contains("openbsd") && arch.starts_with("64") {
- cmd.args.push(("-march=rv64gc").into());
- cmd.args.push("-mabi=lp64d".into());
- } else if target.contains("linux") && arch.starts_with("32") {
- cmd.args.push(("-march=rv32gc").into());
- cmd.args.push("-mabi=ilp32d".into());
- } else if arch.starts_with("64") {
- cmd.args.push(("-march=rv".to_owned() + arch).into());
- cmd.args.push("-mabi=lp64".into());
+ if arch.starts_with("64") {
+ if target.contains("linux")
+ | target.contains("freebsd")
+ | target.contains("netbsd")
+ | target.contains("linux")
+ {
+ cmd.args.push(("-march=rv64gc").into());
+ cmd.args.push("-mabi=lp64d".into());
+ } else {
+ cmd.args.push(("-march=rv".to_owned() + arch).into());
+ cmd.args.push("-mabi=lp64".into());
+ }
+ } else if arch.starts_with("32") {
+ if target.contains("linux") {
+ cmd.args.push(("-march=rv32gc").into());
+ cmd.args.push("-mabi=ilp32d".into());
+ } else {
+ cmd.args.push(("-march=rv".to_owned() + arch).into());
+ cmd.args.push("-mabi=ilp32".into());
+ }
} else {
- cmd.args.push(("-march=rv".to_owned() + arch).into());
- cmd.args.push("-mabi=ilp32".into());
+ cmd.args.push("-mcmodel=medany".into());
}
- cmd.args.push("-mcmodel=medany".into());
}
}
}
}
-
- if target.contains("apple-ios") || target.contains("apple-watchos") {
- self.ios_watchos_flags(cmd)?;
- }
-
- if self.static_flag.unwrap_or(false) {
- cmd.args.push("-static".into());
- }
- if self.shared_flag.unwrap_or(false) {
- cmd.args.push("-shared".into());
- }
-
- if self.cpp {
- match (self.cpp_set_stdlib.as_ref(), cmd.family) {
- (None, _) => {}
- (Some(stdlib), ToolFamily::Gnu) | (Some(stdlib), ToolFamily::Clang) => {
- cmd.push_cc_arg(format!("-stdlib=lib{}", stdlib).into());
- }
- _ => {
- println!(
- "cargo:warning=cpp_set_stdlib is specified, but the {:?} compiler \
- does not support this option, ignored",
- cmd.family
- );
- }
- }
- }
-
- Ok(())
}
fn has_flags(&self) -> bool {
let flags_env_var_name = if self.cpp { "CXXFLAGS" } else { "CFLAGS" };
- let flags_env_var_value = self.get_var(flags_env_var_name);
+ let flags_env_var_value = self.getenv_with_target_prefixes(flags_env_var_name);
if let Ok(_) = flags_env_var_value {
true
} else {
@@ -2006,20 +2220,33 @@ impl Build {
let mut cmd = windows_registry::find(&target, tool).unwrap_or_else(|| self.cmd(tool));
cmd.arg("-nologo"); // undocumented, yet working with armasm[64]
for directory in self.include_directories.iter() {
- cmd.arg("-I").arg(directory);
+ cmd.arg("-I").arg(&**directory);
}
if target.contains("aarch64") || target.contains("arm") {
if self.get_debug() {
cmd.arg("-g");
}
- println!("cargo:warning=The MSVC ARM assemblers do not support -D flags");
+ for (key, value) in self.definitions.iter() {
+ cmd.arg("-PreDefine");
+ if let Some(ref value) = *value {
+ if let Ok(i) = value.parse::<i32>() {
+ cmd.arg(&format!("{} SETA {}", key, i));
+ } else if value.starts_with('"') && value.ends_with('"') {
+ cmd.arg(&format!("{} SETS {}", key, value));
+ } else {
+ cmd.arg(&format!("{} SETS \"{}\"", key, value));
+ }
+ } else {
+ cmd.arg(&format!("{} SETL {}", key, "{TRUE}"));
+ }
+ }
} else {
if self.get_debug() {
cmd.arg("-Zi");
}
- for &(ref key, ref value) in self.definitions.iter() {
+ for (key, value) in self.definitions.iter() {
if let Some(ref value) = *value {
cmd.arg(&format!("-D{}={}", key, value));
} else {
@@ -2031,9 +2258,6 @@ impl Build {
if target.contains("i686") || target.contains("i586") {
cmd.arg("-safeseh");
}
- for flag in self.flags.iter() {
- cmd.arg(flag);
- }
Ok((cmd, tool.to_string()))
}
@@ -2041,15 +2265,15 @@ impl Build {
fn assemble(&self, lib_name: &str, dst: &Path, objs: &[Object]) -> Result<(), Error> {
// Delete the destination if it exists as we want to
// create on the first iteration instead of appending.
- let _ = fs::remove_file(&dst);
+ let _ = fs::remove_file(dst);
// Add objects to the archive in limited-length batches. This helps keep
// the length of the command line within a reasonable length to avoid
// blowing system limits on limiting platforms like Windows.
let objs: Vec<_> = objs
.iter()
- .map(|o| o.dst.clone())
- .chain(self.objects.clone())
+ .map(|o| o.dst.as_path())
+ .chain(self.objects.iter().map(std::ops::Deref::deref))
.collect();
for chunk in objs.chunks(100) {
self.assemble_progressive(dst, chunk)?;
@@ -2062,12 +2286,9 @@ impl Build {
let out_dir = self.get_out_dir()?;
let dlink = out_dir.join(lib_name.to_owned() + "_dlink.o");
let mut nvcc = self.get_compiler().to_command();
- nvcc.arg("--device-link")
- .arg("-o")
- .arg(dlink.clone())
- .arg(dst);
- run(&mut nvcc, "nvcc")?;
- self.assemble_progressive(dst, &[dlink])?;
+ nvcc.arg("--device-link").arg("-o").arg(&dlink).arg(dst);
+ run(&mut nvcc, "nvcc", &self.cargo_output)?;
+ self.assemble_progressive(dst, &[dlink.as_path()])?;
}
let target = self.get_target()?;
@@ -2078,9 +2299,9 @@ impl Build {
let lib_dst = dst.with_file_name(format!("{}.lib", lib_name));
let _ = fs::remove_file(&lib_dst);
- match fs::hard_link(&dst, &lib_dst).or_else(|_| {
+ match fs::hard_link(dst, &lib_dst).or_else(|_| {
// if hard-link fails, just copy (ignoring the number of bytes written)
- fs::copy(&dst, &lib_dst).map(|_| ())
+ fs::copy(dst, &lib_dst).map(|_| ())
}) {
Ok(_) => (),
Err(_) => {
@@ -2094,23 +2315,31 @@ impl Build {
// Non-msvc targets (those using `ar`) need a separate step to add
// the symbol table to archives since our construction command of
// `cq` doesn't add it for us.
- let (mut ar, cmd) = self.get_ar()?;
- run(ar.arg("s").arg(dst), &cmd)?;
+ let (mut ar, cmd, _any_flags) = self.get_ar()?;
+
+ // NOTE: We add `s` even if flags were passed using $ARFLAGS/ar_flag, because `s`
+ // here represents a _mode_, not an arbitrary flag. Further discussion of this choice
+ // can be seen in https://github.com/rust-lang/cc-rs/pull/763.
+ run(ar.arg("s").arg(dst), &cmd, &self.cargo_output)?;
}
Ok(())
}
- fn assemble_progressive(&self, dst: &Path, objs: &[PathBuf]) -> Result<(), Error> {
+ fn assemble_progressive(&self, dst: &Path, objs: &[&Path]) -> Result<(), Error> {
let target = self.get_target()?;
if target.contains("msvc") {
- let (mut cmd, program) = self.get_ar()?;
+ let (mut cmd, program, any_flags) = self.get_ar()?;
+ // NOTE: -out: here is an I/O flag, and so must be included even if $ARFLAGS/ar_flag is
+ // in use. -nologo on the other hand is just a regular flag, and one that we'll skip if
+ // the caller has explicitly dictated the flags they want. See
+ // https://github.com/rust-lang/cc-rs/pull/763 for further discussion.
let mut out = OsString::from("-out:");
out.push(dst);
- cmd.arg(out).arg("-nologo");
- for flag in self.ar_flags.iter() {
- cmd.arg(flag);
+ cmd.arg(out);
+ if !any_flags {
+ cmd.arg("-nologo");
}
// If the library file already exists, add the library name
// as an argument to let lib.exe know we are appending the objs.
@@ -2118,9 +2347,9 @@ impl Build {
cmd.arg(dst);
}
cmd.args(objs);
- run(&mut cmd, &program)?;
+ run(&mut cmd, &program, &self.cargo_output)?;
} else {
- let (mut ar, cmd) = self.get_ar()?;
+ let (mut ar, cmd, _any_flags) = self.get_ar()?;
// Set an environment variable to tell the OSX archiver to ensure
// that all dates listed in the archive are zero, improving
@@ -2145,46 +2374,35 @@ impl Build {
// In any case if this doesn't end up getting read, it shouldn't
// cause that many issues!
ar.env("ZERO_AR_DATE", "1");
- for flag in self.ar_flags.iter() {
- ar.arg(flag);
- }
- run(ar.arg("cq").arg(dst).args(objs), &cmd)?;
+
+ // NOTE: We add cq here regardless of whether $ARFLAGS/ar_flag have been used because
+ // it dictates the _mode_ ar runs in, which the setter of $ARFLAGS/ar_flag can't
+ // dictate. See https://github.com/rust-lang/cc-rs/pull/763 for further discussion.
+ run(ar.arg("cq").arg(dst).args(objs), &cmd, &self.cargo_output)?;
}
Ok(())
}
- fn ios_watchos_flags(&self, cmd: &mut Tool) -> Result<(), Error> {
- enum ArchSpec {
- Device(&'static str),
- Simulator(&'static str),
- Catalyst(&'static str),
- }
-
- enum Os {
- Ios,
- WatchOs,
- }
- impl Display for Os {
- fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
- match self {
- Os::Ios => f.write_str("iOS"),
- Os::WatchOs => f.write_str("WatchOS"),
- }
- }
- }
-
- let target = self.get_target()?;
- let os = if target.contains("-watchos") {
- Os::WatchOs
+ fn apple_flags(&self, cmd: &mut Tool, target: &str) -> Result<(), Error> {
+ let os = if target.contains("-darwin") {
+ AppleOs::MacOs
+ } else if target.contains("-watchos") {
+ AppleOs::WatchOs
+ } else if target.contains("-tvos") {
+ AppleOs::TvOs
} else {
- Os::Ios
+ AppleOs::Ios
+ };
+ let is_mac = match os {
+ AppleOs::MacOs => true,
+ _ => false,
};
- let arch = target.split('-').nth(0).ok_or_else(|| {
+ let arch_str = target.split('-').nth(0).ok_or_else(|| {
Error::new(
ErrorKind::ArchitectureInvalid,
- format!("Unknown architecture for {} target.", os).as_str(),
+ format!("Unknown architecture for {:?} target.", os),
)
})?;
@@ -2193,16 +2411,27 @@ impl Build {
None => false,
};
- let is_sim = match target.split('-').nth(3) {
+ let is_arm_sim = match target.split('-').nth(3) {
Some(v) => v == "sim",
None => false,
};
- let arch = if is_catalyst {
- match arch {
- "arm64e" => ArchSpec::Catalyst("arm64e"),
- "arm64" | "aarch64" => ArchSpec::Catalyst("arm64"),
- "x86_64" => ArchSpec::Catalyst("-m64"),
+ let arch = if is_mac {
+ match arch_str {
+ "i686" => AppleArchSpec::Device("-m32"),
+ "x86_64" | "x86_64h" | "aarch64" => AppleArchSpec::Device("-m64"),
+ _ => {
+ return Err(Error::new(
+ ErrorKind::ArchitectureInvalid,
+ "Unknown architecture for macOS target.",
+ ));
+ }
+ }
+ } else if is_catalyst {
+ match arch_str {
+ "arm64e" => AppleArchSpec::Catalyst("arm64e"),
+ "arm64" | "aarch64" => AppleArchSpec::Catalyst("arm64"),
+ "x86_64" | "x86_64h" => AppleArchSpec::Catalyst("-m64"),
_ => {
return Err(Error::new(
ErrorKind::ArchitectureInvalid,
@@ -2210,105 +2439,192 @@ impl Build {
));
}
}
- } else if is_sim {
- match arch {
- "arm64" | "aarch64" => ArchSpec::Simulator("-arch arm64"),
- "x86_64" => ArchSpec::Simulator("-m64"),
+ } else if is_arm_sim {
+ match arch_str {
+ "arm64" | "aarch64" => AppleArchSpec::Simulator("arm64"),
+ "x86_64" | "x86_64h" => AppleArchSpec::Simulator("-m64"),
_ => {
return Err(Error::new(
ErrorKind::ArchitectureInvalid,
- "Unknown architecture for iOS simulator target.",
+ "Unknown architecture for simulator target.",
));
}
}
} else {
- match arch {
- "arm" | "armv7" | "thumbv7" => ArchSpec::Device("armv7"),
- "armv7k" => ArchSpec::Device("armv7k"),
- "armv7s" | "thumbv7s" => ArchSpec::Device("armv7s"),
- "arm64e" => ArchSpec::Device("arm64e"),
- "arm64" | "aarch64" => ArchSpec::Device("arm64"),
- "arm64_32" => ArchSpec::Device("arm64_32"),
- "i386" | "i686" => ArchSpec::Simulator("-m32"),
- "x86_64" => ArchSpec::Simulator("-m64"),
+ match arch_str {
+ "arm" | "armv7" | "thumbv7" => AppleArchSpec::Device("armv7"),
+ "armv7k" => AppleArchSpec::Device("armv7k"),
+ "armv7s" | "thumbv7s" => AppleArchSpec::Device("armv7s"),
+ "arm64e" => AppleArchSpec::Device("arm64e"),
+ "arm64" | "aarch64" => AppleArchSpec::Device("arm64"),
+ "arm64_32" => AppleArchSpec::Device("arm64_32"),
+ "i386" | "i686" => AppleArchSpec::Simulator("-m32"),
+ "x86_64" | "x86_64h" => AppleArchSpec::Simulator("-m64"),
_ => {
return Err(Error::new(
ErrorKind::ArchitectureInvalid,
- format!("Unknown architecture for {} target.", os).as_str(),
+ format!("Unknown architecture for {:?} target.", os),
));
}
}
};
- let (sdk_prefix, sim_prefix, min_version) = match os {
- Os::Ios => (
- "iphone",
- "ios-",
- std::env::var("IPHONEOS_DEPLOYMENT_TARGET").unwrap_or_else(|_| "7.0".into()),
- ),
- Os::WatchOs => (
- "watch",
- "watch",
- std::env::var("WATCHOS_DEPLOYMENT_TARGET").unwrap_or_else(|_| "2.0".into()),
- ),
- };
+ let sdk_details = apple_os_sdk_parts(os, &arch);
+ let min_version = self.apple_deployment_version(os, Some(arch_str), &sdk_details.sdk);
- let sdk = match arch {
- ArchSpec::Device(arch) => {
- cmd.args.push("-arch".into());
- cmd.args.push(arch.into());
+ match arch {
+ AppleArchSpec::Device(_) if is_mac => {
cmd.args
- .push(format!("-m{}os-version-min={}", sdk_prefix, min_version).into());
- format!("{}os", sdk_prefix)
+ .push(format!("-mmacosx-version-min={}", min_version).into());
}
- ArchSpec::Simulator(arch) => {
+ AppleArchSpec::Device(arch) => {
+ cmd.args.push("-arch".into());
cmd.args.push(arch.into());
- cmd.args
- .push(format!("-m{}simulator-version-min={}", sim_prefix, min_version).into());
- format!("{}simulator", sdk_prefix)
+ cmd.args.push(
+ format!("-m{}os-version-min={}", sdk_details.sdk_prefix, min_version).into(),
+ );
}
- ArchSpec::Catalyst(_) => "macosx".to_owned(),
+ AppleArchSpec::Simulator(arch) => {
+ if arch.starts_with('-') {
+ // -m32 or -m64
+ cmd.args.push(arch.into());
+ } else {
+ cmd.args.push("-arch".into());
+ cmd.args.push(arch.into());
+ }
+ cmd.args.push(
+ format!(
+ "-m{}simulator-version-min={}",
+ sdk_details.sim_prefix, min_version
+ )
+ .into(),
+ );
+ }
+ AppleArchSpec::Catalyst(_) => {}
};
- self.print(&format!("Detecting {} SDK path for {}", os, sdk));
- let sdk_path = if let Some(sdkroot) = env::var_os("SDKROOT") {
- sdkroot
- } else {
- self.apple_sdk_root(sdk.as_str())?
- };
+ // AppleClang sometimes requires sysroot even for darwin
+ if cmd.is_xctoolchain_clang() || !target.ends_with("-darwin") {
+ self.cargo_output.print_metadata(&format_args!(
+ "Detecting {:?} SDK path for {}",
+ os, sdk_details.sdk
+ ));
+ let sdk_path = self.apple_sdk_root(&sdk_details.sdk)?;
- cmd.args.push("-isysroot".into());
- cmd.args.push(sdk_path);
- cmd.args.push("-fembed-bitcode".into());
- /*
- * TODO we probably ultimately want the -fembed-bitcode-marker flag
- * but can't have it now because of an issue in LLVM:
- * https://github.com/rust-lang/cc-rs/issues/301
- * https://github.com/rust-lang/rust/pull/48896#comment-372192660
- */
- /*
- if self.get_opt_level()? == "0" {
- cmd.args.push("-fembed-bitcode-marker".into());
- }
- */
+ cmd.args.push("-isysroot".into());
+ cmd.args.push(sdk_path);
+ }
+
+ match cmd.family {
+ ToolFamily::Gnu => {
+ if target.contains("darwin") {
+ if let Some(arch) = map_darwin_target_from_rust_to_compiler_architecture(target)
+ {
+ cmd.args.push("-arch".into());
+ cmd.args.push(arch.into());
+ }
+ }
+ }
+ ToolFamily::Clang => {
+ if target.contains("darwin") {
+ if let Some(arch) = map_darwin_target_from_rust_to_compiler_architecture(target)
+ {
+ cmd.args
+ .push(format!("--target={}-apple-darwin", arch).into());
+ }
+ } else if target.contains("macabi") {
+ if let Some(arch) = map_darwin_target_from_rust_to_compiler_architecture(target)
+ {
+ cmd.args
+ .push(format!("--target={}-apple-ios-macabi", arch).into());
+ }
+ } else if target.contains("ios-sim") {
+ if let Some(arch) = map_darwin_target_from_rust_to_compiler_architecture(target)
+ {
+ cmd.args.push(
+ format!("--target={}-apple-ios{}-simulator", arch, min_version).into(),
+ );
+ }
+ } else if target.contains("watchos-sim") {
+ if let Some(arch) = map_darwin_target_from_rust_to_compiler_architecture(target)
+ {
+ cmd.args.push(
+ format!("--target={}-apple-watchos{}-simulator", arch, min_version)
+ .into(),
+ );
+ }
+ } else if target.contains("tvos-sim") || target.contains("x86_64-apple-tvos") {
+ if let Some(arch) = map_darwin_target_from_rust_to_compiler_architecture(target)
+ {
+ cmd.args.push(
+ format!("--target={}-apple-tvos{}-simulator", arch, min_version).into(),
+ );
+ }
+ } else if target.contains("aarch64-apple-tvos") {
+ if let Some(arch) = map_darwin_target_from_rust_to_compiler_architecture(target)
+ {
+ cmd.args
+ .push(format!("--target={}-apple-tvos{}", arch, min_version).into());
+ }
+ }
+ }
+ _ => unreachable!("unexpected compiler for apple architectures"),
+ }
+
+ if let AppleArchSpec::Catalyst(_) = arch {
+ // Mac Catalyst uses the macOS SDK, but to compile against and
+ // link to iOS-specific frameworks, we should have the support
+ // library stubs in the include and library search path.
+ let sdk_path = self.apple_sdk_root(&sdk_details.sdk)?;
+ let ios_support = PathBuf::from(sdk_path).join("/System/iOSSupport");
+
+ cmd.args.extend([
+ // Header search path
+ OsString::from("-isystem"),
+ ios_support.join("/usr/include").into(),
+ // Framework header search path
+ OsString::from("-iframework"),
+ ios_support.join("/System/Library/Frameworks").into(),
+ // Library search path
+ {
+ let mut s = OsString::from("-L");
+ s.push(&ios_support.join("/usr/lib"));
+ s
+ },
+ // Framework linker search path
+ {
+ // Technically, we _could_ avoid emitting `-F`, as
+ // `-iframework` implies it, but let's keep it in for
+ // clarity.
+ let mut s = OsString::from("-F");
+ s.push(&ios_support.join("/System/Library/Frameworks"));
+ s
+ },
+ ]);
+ }
Ok(())
}
fn cmd<P: AsRef<OsStr>>(&self, prog: P) -> Command {
let mut cmd = Command::new(prog);
- for &(ref a, ref b) in self.env.iter() {
+ for (a, b) in self.env.iter() {
cmd.env(a, b);
}
cmd
}
fn get_base_compiler(&self) -> Result<Tool, Error> {
- if let Some(ref c) = self.compiler {
- return Ok(Tool::new(c.clone()));
+ if let Some(c) = &self.compiler {
+ return Ok(Tool::new(
+ (**c).to_owned(),
+ &self.cached_compiler_family,
+ &self.cargo_output,
+ ));
}
let host = self.get_host()?;
let target = self.get_target()?;
+ let target = &*target;
let (env, msvc, gnu, traditional, clang) = if self.cpp {
("CXX", "cl.exe", "g++", "c++", "clang++")
} else {
@@ -2325,7 +2641,7 @@ impl Build {
traditional
};
- let cl_exe = windows_registry::find_tool(&target, "cl.exe");
+ let cl_exe = windows_registry::find_tool(target, "cl.exe");
let tool_opt: Option<Tool> = self
.env_tool(env)
@@ -2340,7 +2656,12 @@ impl Build {
// semi-buggy build scripts which are shared in
// makefiles/configure scripts (where spaces are far more
// lenient)
- let mut t = Tool::with_clang_driver(PathBuf::from(tool.trim()), driver_mode);
+ let mut t = Tool::with_clang_driver(
+ tool,
+ driver_mode,
+ &self.cached_compiler_family,
+ &self.cargo_output,
+ );
if let Some(cc_wrapper) = wrapper {
t.cc_wrapper_path = Some(PathBuf::from(cc_wrapper));
}
@@ -2354,12 +2675,20 @@ impl Build {
let tool = if self.cpp { "em++" } else { "emcc" };
// Windows uses bat file so we have to be a bit more specific
if cfg!(windows) {
- let mut t = Tool::new(PathBuf::from("cmd"));
+ let mut t = Tool::new(
+ PathBuf::from("cmd"),
+ &self.cached_compiler_family,
+ &self.cargo_output,
+ );
t.args.push("/c".into());
t.args.push(format!("{}.bat", tool).into());
Some(t)
} else {
- Some(Tool::new(PathBuf::from(tool)))
+ Some(Tool::new(
+ PathBuf::from(tool),
+ &self.cached_compiler_family,
+ &self.cargo_output,
+ ))
}
} else {
None
@@ -2377,12 +2706,13 @@ impl Build {
let cc = if target.contains("llvm") { clang } else { gnu };
format!("{}.exe", cc)
}
- } else if target.contains("apple-ios") {
- clang.to_string()
- } else if target.contains("apple-watchos") {
+ } else if target.contains("apple-ios")
+ | target.contains("apple-watchos")
+ | target.contains("apple-tvos")
+ {
clang.to_string()
} else if target.contains("android") {
- autodetect_android_compiler(&target, &host, gnu, clang)
+ autodetect_android_compiler(target, &host, gnu, clang)
} else if target.contains("cloudabi") {
format!("{}-{}", target, traditional)
} else if target == "wasm32-wasi"
@@ -2400,8 +2730,8 @@ impl Build {
format!("arm-kmc-eabi-{}", gnu)
} else if target.starts_with("aarch64-kmc-solid_") {
format!("aarch64-kmc-elf-{}", gnu)
- } else if self.get_host()? != target {
- let prefix = self.prefix_for_target(&target);
+ } else if &*self.get_host()? != target {
+ let prefix = self.prefix_for_target(target);
match prefix {
Some(prefix) => {
let cc = if target.contains("llvm") { clang } else { gnu };
@@ -2413,7 +2743,11 @@ impl Build {
default.to_string()
};
- let mut t = Tool::new(PathBuf::from(compiler));
+ let mut t = Tool::new(
+ PathBuf::from(compiler),
+ &self.cached_compiler_family,
+ &self.cargo_output,
+ );
if let Some(cc_wrapper) = Self::rustc_wrapper_fallback() {
t.cc_wrapper_path = Some(PathBuf::from(cc_wrapper));
}
@@ -2426,11 +2760,17 @@ impl Build {
tool.args.is_empty(),
"CUDA compilation currently assumes empty pre-existing args"
);
- let nvcc = match self.get_var("NVCC") {
- Err(_) => "nvcc".into(),
- Ok(nvcc) => nvcc,
+ let nvcc = match self.getenv_with_target_prefixes("NVCC") {
+ Err(_) => PathBuf::from("nvcc"),
+ Ok(nvcc) => PathBuf::from(&*nvcc),
};
- let mut nvcc_tool = Tool::with_features(PathBuf::from(nvcc), None, self.cuda);
+ let mut nvcc_tool = Tool::with_features(
+ nvcc,
+ None,
+ self.cuda,
+ &self.cached_compiler_family,
+ &self.cargo_output,
+ );
nvcc_tool
.args
.push(format!("-ccbin={}", tool.path.display()).into());
@@ -2455,16 +2795,17 @@ impl Build {
{
if let Some(path) = tool.path.file_name() {
let file_name = path.to_str().unwrap().to_owned();
- let (target, clang) = file_name.split_at(file_name.rfind("-").unwrap());
+ let (target, clang) = file_name.split_at(file_name.rfind('-').unwrap());
- tool.path.set_file_name(clang.trim_start_matches("-"));
+ tool.has_internal_target_arg = true;
+ tool.path.set_file_name(clang.trim_start_matches('-'));
tool.path.set_extension("exe");
tool.args.push(format!("--target={}", target).into());
// Additionally, shell scripts for target i686-linux-android versions 16 to 24
// pass the `mstackrealign` option so we do that here as well.
if target.contains("i686-linux-android") {
- let (_, version) = target.split_at(target.rfind("d").unwrap() + 1);
+ let (_, version) = target.split_at(target.rfind('d').unwrap() + 1);
if let Ok(version) = version.parse::<u32>() {
if version > 15 && version < 25 {
tool.args.push("-mstackrealign".into());
@@ -2489,41 +2830,18 @@ impl Build {
&& tool.env.len() == 0
&& target.contains("msvc")
{
- for &(ref k, ref v) in cl_exe.env.iter() {
+ for (k, v) in cl_exe.env.iter() {
tool.env.push((k.to_owned(), v.to_owned()));
}
}
}
- Ok(tool)
- }
-
- fn get_var(&self, var_base: &str) -> Result<String, Error> {
- let target = self.get_target()?;
- let host = self.get_host()?;
- let kind = if host == target { "HOST" } else { "TARGET" };
- let target_u = target.replace("-", "_");
- let res = self
- .getenv(&format!("{}_{}", var_base, target))
- .or_else(|| self.getenv(&format!("{}_{}", var_base, target_u)))
- .or_else(|| self.getenv(&format!("{}_{}", kind, var_base)))
- .or_else(|| self.getenv(var_base));
-
- match res {
- Some(res) => Ok(res),
- None => Err(Error::new(
- ErrorKind::EnvVarNotFound,
- &format!("Could not find environment variable {}.", var_base),
- )),
+ if target.contains("msvc") && tool.family == ToolFamily::Gnu {
+ self.cargo_output
+ .print_warning(&"GNU compiler is not supported for this target");
}
- }
- fn envflags(&self, name: &str) -> Vec<String> {
- self.get_var(name)
- .unwrap_or(String::new())
- .split_ascii_whitespace()
- .map(|slice| slice.to_string())
- .collect()
+ Ok(tool)
}
/// Returns a fallback `cc_compiler_wrapper` by introspecting `RUSTC_WRAPPER`
@@ -2545,8 +2863,8 @@ impl Build {
}
/// Returns compiler path, optional modifier name from whitelist, and arguments vec
- fn env_tool(&self, name: &str) -> Option<(String, Option<String>, Vec<String>)> {
- let tool = match self.get_var(name) {
+ fn env_tool(&self, name: &str) -> Option<(PathBuf, Option<String>, Vec<String>)> {
+ let tool = match self.getenv_with_target_prefixes(name) {
Ok(tool) => tool,
Err(_) => return None,
};
@@ -2554,8 +2872,12 @@ impl Build {
// If this is an exact path on the filesystem we don't want to do any
// interpretation at all, just pass it on through. This'll hopefully get
// us to support spaces-in-paths.
- if Path::new(&tool).exists() {
- return Some((tool, None, Vec::new()));
+ if Path::new(&*tool).exists() {
+ return Some((
+ PathBuf::from(&*tool),
+ Self::rustc_wrapper_fallback(),
+ Vec::new(),
+ ));
}
// Ok now we want to handle a couple of scenarios. We'll assume from
@@ -2594,7 +2916,7 @@ impl Build {
if known_wrappers.contains(&file_stem) {
if let Some(compiler) = parts.next() {
return Some((
- compiler.to_string(),
+ compiler.into(),
Some(maybe_wrapper.to_string()),
parts.map(|s| s.to_string()).collect(),
));
@@ -2602,36 +2924,37 @@ impl Build {
}
Some((
- maybe_wrapper.to_string(),
+ maybe_wrapper.into(),
Self::rustc_wrapper_fallback(),
parts.map(|s| s.to_string()).collect(),
))
}
/// Returns the C++ standard library:
- /// 1. If [cpp_link_stdlib](cc::Build::cpp_link_stdlib) is set, uses its value.
+ /// 1. If [`cpp_link_stdlib`](cc::Build::cpp_link_stdlib) is set, uses its value.
/// 2. Else if the `CXXSTDLIB` environment variable is set, uses its value.
/// 3. Else the default is `libc++` for OS X and BSDs, `libc++_shared` for Android,
/// `None` for MSVC and `libstdc++` for anything else.
fn get_cpp_link_stdlib(&self) -> Result<Option<String>, Error> {
- match self.cpp_link_stdlib.clone() {
- Some(s) => Ok(s),
+ match &self.cpp_link_stdlib {
+ Some(s) => Ok(s.as_ref().map(|s| (*s).to_string())),
None => {
- if let Ok(stdlib) = self.get_var("CXXSTDLIB") {
+ if let Ok(stdlib) = self.getenv_with_target_prefixes("CXXSTDLIB") {
if stdlib.is_empty() {
Ok(None)
} else {
- Ok(Some(stdlib))
+ Ok(Some(stdlib.to_string()))
}
} else {
let target = self.get_target()?;
if target.contains("msvc") {
Ok(None)
- } else if target.contains("apple") {
- Ok(Some("c++".to_string()))
- } else if target.contains("freebsd") {
- Ok(Some("c++".to_string()))
- } else if target.contains("openbsd") {
+ } else if target.contains("apple")
+ | target.contains("freebsd")
+ | target.contains("openbsd")
+ | target.contains("aix")
+ | target.contains("linux-ohos")
+ {
Ok(Some("c++".to_string()))
} else if target.contains("android") {
Ok(Some("c++_shared".to_string()))
@@ -2643,101 +2966,243 @@ impl Build {
}
}
- fn get_ar(&self) -> Result<(Command, String), Error> {
- if let Some(ref p) = self.archiver {
- let name = p.file_name().and_then(|s| s.to_str()).unwrap_or("ar");
- return Ok((self.cmd(p), name.to_string()));
+ fn get_ar(&self) -> Result<(Command, String, bool), Error> {
+ self.try_get_archiver_and_flags()
+ }
+
+ /// Get the archiver (ar) that's in use for this configuration.
+ ///
+ /// You can use [`Command::get_program`] to get just the path to the command.
+ ///
+ /// This method will take into account all configuration such as debug
+ /// information, optimization level, include directories, defines, etc.
+ /// Additionally, the compiler binary in use follows the standard
+ /// conventions for this path, e.g. looking at the explicitly set compiler,
+ /// environment variables (a number of which are inspected here), and then
+ /// falling back to the default configuration.
+ ///
+ /// # Panics
+ ///
+ /// Panics if an error occurred while determining the architecture.
+ pub fn get_archiver(&self) -> Command {
+ match self.try_get_archiver() {
+ Ok(tool) => tool,
+ Err(e) => fail(&e.message),
}
- if let Ok(p) = self.get_var("AR") {
- return Ok((self.cmd(&p), p));
+ }
+
+ /// Get the archiver that's in use for this configuration.
+ ///
+ /// This will return a result instead of panicking;
+ /// see [`Self::get_archiver`] for the complete description.
+ pub fn try_get_archiver(&self) -> Result<Command, Error> {
+ Ok(self.try_get_archiver_and_flags()?.0)
+ }
+
+ fn try_get_archiver_and_flags(&self) -> Result<(Command, String, bool), Error> {
+ let (mut cmd, name) = self.get_base_archiver()?;
+ let mut any_flags = false;
+ if let Ok(flags) = self.envflags("ARFLAGS") {
+ any_flags = any_flags | !flags.is_empty();
+ cmd.args(flags);
}
- let target = self.get_target()?;
- let default_ar = "ar".to_string();
- let program = if target.contains("android") {
- format!("{}-ar", target.replace("armv7", "arm"))
- } else if target.contains("emscripten") {
- // Windows use bat files so we have to be a bit more specific
- if cfg!(windows) {
- let mut cmd = self.cmd("cmd");
- cmd.arg("/c").arg("emar.bat");
- return Ok((cmd, "emar.bat".to_string()));
- }
+ for flag in &self.ar_flags {
+ any_flags = true;
+ cmd.arg(&**flag);
+ }
+ Ok((cmd, name, any_flags))
+ }
- "emar".to_string()
- } else if target.contains("msvc") {
- let compiler = self.get_base_compiler()?;
- let mut lib = String::new();
- if compiler.family == (ToolFamily::Msvc { clang_cl: true }) {
- // See if there is 'llvm-lib' next to 'clang-cl'
- // Another possibility could be to see if there is 'clang'
- // next to 'clang-cl' and use 'search_programs()' to locate
- // 'llvm-lib'. This is because 'clang-cl' doesn't support
- // the -print-search-dirs option.
- if let Some(mut cmd) = which(&compiler.path) {
- cmd.pop();
- cmd.push("llvm-lib.exe");
- if let Some(llvm_lib) = which(&cmd) {
- lib = llvm_lib.to_str().unwrap().to_owned();
+ fn get_base_archiver(&self) -> Result<(Command, String), Error> {
+ if let Some(ref a) = self.archiver {
+ return Ok((self.cmd(&**a), a.to_string_lossy().into_owned()));
+ }
+
+ self.get_base_archiver_variant("AR", "ar")
+ }
+
+ /// Get the ranlib that's in use for this configuration.
+ ///
+ /// You can use [`Command::get_program`] to get just the path to the command.
+ ///
+ /// This method will take into account all configuration such as debug
+ /// information, optimization level, include directories, defines, etc.
+ /// Additionally, the compiler binary in use follows the standard
+ /// conventions for this path, e.g. looking at the explicitly set compiler,
+ /// environment variables (a number of which are inspected here), and then
+ /// falling back to the default configuration.
+ ///
+ /// # Panics
+ ///
+ /// Panics if an error occurred while determining the architecture.
+ pub fn get_ranlib(&self) -> Command {
+ match self.try_get_ranlib() {
+ Ok(tool) => tool,
+ Err(e) => fail(&e.message),
+ }
+ }
+
+ /// Get the ranlib that's in use for this configuration.
+ ///
+ /// This will return a result instead of panicking;
+ /// see [`Self::get_ranlib`] for the complete description.
+ pub fn try_get_ranlib(&self) -> Result<Command, Error> {
+ let mut cmd = self.get_base_ranlib()?;
+ if let Ok(flags) = self.envflags("RANLIBFLAGS") {
+ cmd.args(flags);
+ }
+ Ok(cmd)
+ }
+
+ fn get_base_ranlib(&self) -> Result<Command, Error> {
+ if let Some(ref r) = self.ranlib {
+ return Ok(self.cmd(&**r));
+ }
+
+ Ok(self.get_base_archiver_variant("RANLIB", "ranlib")?.0)
+ }
+
+ fn get_base_archiver_variant(&self, env: &str, tool: &str) -> Result<(Command, String), Error> {
+ let target = self.get_target()?;
+ let mut name = String::new();
+ let tool_opt: Option<Command> = self
+ .env_tool(env)
+ .map(|(tool, _wrapper, args)| {
+ let mut cmd = self.cmd(tool);
+ cmd.args(args);
+ cmd
+ })
+ .or_else(|| {
+ if target.contains("emscripten") {
+ // Windows use bat files so we have to be a bit more specific
+ if cfg!(windows) {
+ let mut cmd = self.cmd("cmd");
+ name = format!("em{}.bat", tool);
+ cmd.arg("/c").arg(&name);
+ Some(cmd)
+ } else {
+ name = format!("em{}", tool);
+ Some(self.cmd(&name))
}
+ } else if target.starts_with("wasm32") {
+ // Formally speaking one should be able to use this approach,
+ // parsing -print-search-dirs output, to cover all clang targets,
+ // including Android SDKs and other cross-compilation scenarios...
+ // And even extend it to gcc targets by searching for "ar" instead
+ // of "llvm-ar"...
+ let compiler = self.get_base_compiler().ok()?;
+ if compiler.family == ToolFamily::Clang {
+ name = format!("llvm-{}", tool);
+ search_programs(&mut self.cmd(&compiler.path), &name, &self.cargo_output)
+ .map(|name| self.cmd(name))
+ } else {
+ None
+ }
+ } else {
+ None
}
- }
- if lib.is_empty() {
- lib = match windows_registry::find(&target, "lib.exe") {
- Some(t) => return Ok((t, "lib.exe".to_string())),
- None => "lib.exe".to_string(),
- }
- }
- lib
- } else if target.contains("illumos") {
- // The default 'ar' on illumos uses a non-standard flags,
- // but the OS comes bundled with a GNU-compatible variant.
- //
- // Use the GNU-variant to match other Unix systems.
- "gar".to_string()
- } else if self.get_host()? != target {
- match self.prefix_for_target(&target) {
- Some(p) => {
- // GCC uses $target-gcc-ar, whereas binutils uses $target-ar -- try both.
- // Prefer -ar if it exists, as builds of `-gcc-ar` have been observed to be
- // outright broken (such as when targetting freebsd with `--disable-lto`
- // toolchain where the archiver attempts to load the LTO plugin anyway but
- // fails to find one).
- let mut ar = default_ar;
- for &infix in &["", "-gcc"] {
- let target_ar = format!("{}{}-ar", p, infix);
- if Command::new(&target_ar).output().is_ok() {
- ar = target_ar;
- break;
+ });
+
+ let default = tool.to_string();
+ let tool = match tool_opt {
+ Some(t) => t,
+ None => {
+ if target.contains("android") {
+ name = format!("llvm-{}", tool);
+ match Command::new(&name).arg("--version").status() {
+ Ok(status) if status.success() => (),
+ _ => name = format!("{}-{}", target.replace("armv7", "arm"), tool),
+ }
+ self.cmd(&name)
+ } else if target.contains("msvc") {
+ // NOTE: There isn't really a ranlib on msvc, so arguably we should return
+ // `None` somehow here. But in general, callers will already have to be aware
+ // of not running ranlib on Windows anyway, so it feels okay to return lib.exe
+ // here.
+
+ let compiler = self.get_base_compiler()?;
+ let mut lib = String::new();
+ if compiler.family == (ToolFamily::Msvc { clang_cl: true }) {
+ // See if there is 'llvm-lib' next to 'clang-cl'
+ // Another possibility could be to see if there is 'clang'
+ // next to 'clang-cl' and use 'search_programs()' to locate
+ // 'llvm-lib'. This is because 'clang-cl' doesn't support
+ // the -print-search-dirs option.
+ if let Some(mut cmd) = which(&compiler.path, None) {
+ cmd.pop();
+ cmd.push("llvm-lib.exe");
+ if let Some(llvm_lib) = which(&cmd, None) {
+ lib = llvm_lib.to_str().unwrap().to_owned();
+ }
+ }
+ }
+
+ if lib.is_empty() {
+ name = String::from("lib.exe");
+ let mut cmd = match windows_registry::find(&target, "lib.exe") {
+ Some(t) => t,
+ None => self.cmd("lib.exe"),
+ };
+ if target.contains("arm64ec") {
+ cmd.arg("/machine:arm64ec");
}
+ cmd
+ } else {
+ name = lib;
+ self.cmd(&name)
}
- ar
+ } else if target.contains("illumos") {
+ // The default 'ar' on illumos uses a non-standard flags,
+ // but the OS comes bundled with a GNU-compatible variant.
+ //
+ // Use the GNU-variant to match other Unix systems.
+ name = format!("g{}", tool);
+ self.cmd(&name)
+ } else if self.get_host()? != target {
+ match self.prefix_for_target(&target) {
+ Some(p) => {
+ // GCC uses $target-gcc-ar, whereas binutils uses $target-ar -- try both.
+ // Prefer -ar if it exists, as builds of `-gcc-ar` have been observed to be
+ // outright broken (such as when targeting freebsd with `--disable-lto`
+ // toolchain where the archiver attempts to load the LTO plugin anyway but
+ // fails to find one).
+ //
+ // The same applies to ranlib.
+ let mut chosen = default;
+ for &infix in &["", "-gcc"] {
+ let target_p = format!("{}{}-{}", p, infix, tool);
+ if Command::new(&target_p).output().is_ok() {
+ chosen = target_p;
+ break;
+ }
+ }
+ name = chosen;
+ self.cmd(&name)
+ }
+ None => {
+ name = default;
+ self.cmd(&name)
+ }
+ }
+ } else {
+ name = default;
+ self.cmd(&name)
}
- None => default_ar,
}
- } else {
- default_ar
};
- Ok((self.cmd(&program), program))
+
+ Ok((tool, name))
}
fn prefix_for_target(&self, target: &str) -> Option<String> {
- // Put aside RUSTC_LINKER's prefix to be used as last resort
- let rustc_linker = self.getenv("RUSTC_LINKER").unwrap_or("".to_string());
- // let linker_prefix = rustc_linker.strip_suffix("-gcc"); // >=1.45.0
- let linker_prefix = if rustc_linker.len() > 4 {
- let (prefix, suffix) = rustc_linker.split_at(rustc_linker.len() - 4);
- if suffix == "-gcc" {
- Some(prefix)
- } else {
- None
- }
- } else {
- None
- };
+ // Put aside RUSTC_LINKER's prefix to be used as second choice, after CROSS_COMPILE
+ let linker_prefix = self
+ .getenv("RUSTC_LINKER")
+ .and_then(|var| var.strip_suffix("-gcc").map(str::to_string));
// CROSS_COMPILE is of the form: "arm-linux-gnueabi-"
let cc_env = self.getenv("CROSS_COMPILE");
let cross_compile = cc_env.as_ref().map(|s| s.trim_end_matches('-').to_owned());
- cross_compile.or(match &target[..] {
+ cross_compile.or(linker_prefix).or(match &target[..] {
// Note: there is no `aarch64-pc-windows-gnu` target, only `-gnullvm`
"aarch64-pc-windows-gnullvm" => Some("aarch64-w64-mingw32"),
"aarch64-uwp-windows-gnu" => Some("aarch64-w64-mingw32"),
@@ -2774,6 +3239,7 @@ impl Build {
]), // explicit None if not found, so caller knows to fall back
"i686-unknown-linux-musl" => Some("musl"),
"i686-unknown-netbsd" => Some("i486--netbsdelf"),
+ "loongarch64-unknown-linux-gnu" => Some("loongarch64-linux-gnu"),
"mips-unknown-linux-gnu" => Some("mips-linux-gnu"),
"mips-unknown-linux-musl" => Some("mips-linux-musl"),
"mipsel-unknown-linux-gnu" => Some("mipsel-linux-gnu"),
@@ -2794,6 +3260,7 @@ impl Build {
"riscv64-unknown-elf",
"riscv-none-embed",
]),
+ "riscv32imac-esp-espidf" => Some("riscv32-esp-elf"),
"riscv32imac-unknown-none-elf" => self.find_working_gnu_prefix(&[
"riscv32-unknown-elf",
"riscv64-unknown-elf",
@@ -2804,6 +3271,7 @@ impl Build {
"riscv64-unknown-elf",
"riscv-none-embed",
]),
+ "riscv32imc-esp-espidf" => Some("riscv32-esp-elf"),
"riscv32imc-unknown-none-elf" => self.find_working_gnu_prefix(&[
"riscv32-unknown-elf",
"riscv64-unknown-elf",
@@ -2823,6 +3291,7 @@ impl Build {
"riscv32gc-unknown-linux-gnu" => Some("riscv32-linux-gnu"),
"riscv64gc-unknown-linux-musl" => Some("riscv64-linux-musl"),
"riscv32gc-unknown-linux-musl" => Some("riscv32-linux-musl"),
+ "riscv64gc-unknown-netbsd" => Some("riscv64--netbsd"),
"s390x-unknown-linux-gnu" => Some("s390x-linux-gnu"),
"sparc-unknown-linux-gnu" => Some("sparc-linux-gnu"),
"sparc64-unknown-linux-gnu" => Some("sparc64-linux-gnu"),
@@ -2834,6 +3303,7 @@ impl Build {
"armebv7r-none-eabihf" => Some("arm-none-eabi"),
"armv7r-none-eabi" => Some("arm-none-eabi"),
"armv7r-none-eabihf" => Some("arm-none-eabi"),
+ "armv8r-none-eabihf" => Some("arm-none-eabi"),
"thumbv6m-none-eabi" => Some("arm-none-eabi"),
"thumbv7em-none-eabi" => Some("arm-none-eabi"),
"thumbv7em-none-eabihf" => Some("arm-none-eabi"),
@@ -2850,7 +3320,7 @@ impl Build {
]), // explicit None if not found, so caller knows to fall back
"x86_64-unknown-linux-musl" => Some("musl"),
"x86_64-unknown-netbsd" => Some("x86_64--netbsd"),
- _ => linker_prefix,
+ _ => None,
}
.map(|x| x.to_owned()))
}
@@ -2887,30 +3357,30 @@ impl Build {
prefixes.first().map(|prefix| *prefix))
}
- fn get_target(&self) -> Result<String, Error> {
- match self.target.clone() {
- Some(t) => Ok(t),
- None => Ok(self.getenv_unwrap("TARGET")?),
+ fn get_target(&self) -> Result<Arc<str>, Error> {
+ match &self.target {
+ Some(t) => Ok(t.clone()),
+ None => self.getenv_unwrap("TARGET"),
}
}
- fn get_host(&self) -> Result<String, Error> {
- match self.host.clone() {
- Some(h) => Ok(h),
- None => Ok(self.getenv_unwrap("HOST")?),
+ fn get_host(&self) -> Result<Arc<str>, Error> {
+ match &self.host {
+ Some(h) => Ok(h.clone()),
+ None => self.getenv_unwrap("HOST"),
}
}
- fn get_opt_level(&self) -> Result<String, Error> {
- match self.opt_level.as_ref().cloned() {
- Some(ol) => Ok(ol),
- None => Ok(self.getenv_unwrap("OPT_LEVEL")?),
+ fn get_opt_level(&self) -> Result<Arc<str>, Error> {
+ match &self.opt_level {
+ Some(ol) => Ok(ol.clone()),
+ None => self.getenv_unwrap("OPT_LEVEL"),
}
}
fn get_debug(&self) -> bool {
self.debug.unwrap_or_else(|| match self.getenv("DEBUG") {
- Some(s) => s != "false",
+ Some(s) => &*s != "false",
None => false,
})
}
@@ -2938,19 +3408,22 @@ impl Build {
self.force_frame_pointer.unwrap_or_else(|| self.get_debug())
}
- fn get_out_dir(&self) -> Result<PathBuf, Error> {
- match self.out_dir.clone() {
- Some(p) => Ok(p),
- None => Ok(env::var_os("OUT_DIR").map(PathBuf::from).ok_or_else(|| {
- Error::new(
- ErrorKind::EnvVarNotFound,
- "Environment variable OUT_DIR not defined.",
- )
- })?),
+ fn get_out_dir(&self) -> Result<Cow<'_, Path>, Error> {
+ match &self.out_dir {
+ Some(p) => Ok(Cow::Borrowed(&**p)),
+ None => env::var_os("OUT_DIR")
+ .map(PathBuf::from)
+ .map(Cow::Owned)
+ .ok_or_else(|| {
+ Error::new(
+ ErrorKind::EnvVarNotFound,
+ "Environment variable OUT_DIR not defined.",
+ )
+ }),
}
}
- fn getenv(&self, v: &str) -> Option<String> {
+ fn getenv(&self, v: &str) -> Option<Arc<str>> {
// Returns true for environment variables cargo sets for build scripts:
// https://doc.rust-lang.org/cargo/reference/environment-variables.html#environment-variables-cargo-sets-for-build-scripts
//
@@ -2970,47 +3443,58 @@ impl Build {
return val.clone();
}
if self.emit_rerun_if_env_changed && !provided_by_cargo(v) {
- self.print(&format!("cargo:rerun-if-env-changed={}", v));
+ self.cargo_output
+ .print_metadata(&format_args!("cargo:rerun-if-env-changed={}", v));
}
- let r = env::var(v).ok();
- self.print(&format!("{} = {:?}", v, r));
+ let r = env::var(v).ok().map(Arc::from);
+ self.cargo_output
+ .print_metadata(&format_args!("{} = {:?}", v, r));
cache.insert(v.to_string(), r.clone());
r
}
- fn getenv_unwrap(&self, v: &str) -> Result<String, Error> {
+ fn getenv_unwrap(&self, v: &str) -> Result<Arc<str>, Error> {
match self.getenv(v) {
Some(s) => Ok(s),
None => Err(Error::new(
ErrorKind::EnvVarNotFound,
- &format!("Environment variable {} not defined.", v.to_string()),
+ format!("Environment variable {} not defined.", v),
)),
}
}
- fn print(&self, s: &str) {
- if self.cargo_metadata {
- println!("{}", s);
+ fn getenv_with_target_prefixes(&self, var_base: &str) -> Result<Arc<str>, Error> {
+ let target = self.get_target()?;
+ let host = self.get_host()?;
+ let kind = if host == target { "HOST" } else { "TARGET" };
+ let target_u = target.replace('-', "_");
+ let res = self
+ .getenv(&format!("{}_{}", var_base, target))
+ .or_else(|| self.getenv(&format!("{}_{}", var_base, target_u)))
+ .or_else(|| self.getenv(&format!("{}_{}", kind, var_base)))
+ .or_else(|| self.getenv(var_base));
+
+ match res {
+ Some(res) => Ok(res),
+ None => Err(Error::new(
+ ErrorKind::EnvVarNotFound,
+ format!("Could not find environment variable {}.", var_base),
+ )),
}
}
+ fn envflags(&self, name: &str) -> Result<Vec<String>, Error> {
+ Ok(self
+ .getenv_with_target_prefixes(name)?
+ .split_ascii_whitespace()
+ .map(|slice| slice.to_string())
+ .collect())
+ }
+
fn fix_env_for_apple_os(&self, cmd: &mut Command) -> Result<(), Error> {
let target = self.get_target()?;
let host = self.get_host()?;
if host.contains("apple-darwin") && target.contains("apple-darwin") {
- // If, for example, `cargo` runs during the build of an XCode project, then `SDKROOT` environment variable
- // would represent the current target, and this is the problem for us, if we want to compile something
- // for the host, when host != target.
- // We can not just remove `SDKROOT`, because, again, for example, XCode add to PATH
- // /Applications/Xcode.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/bin
- // and `cc` from this path can not find system include files, like `pthread.h`, if `SDKROOT`
- // is not set
- if let Ok(sdkroot) = env::var("SDKROOT") {
- if !sdkroot.contains("MacOSX") {
- let macos_sdk = self.apple_sdk_root("macosx")?;
- cmd.env("SDKROOT", macos_sdk);
- }
- }
// Additionally, `IPHONEOS_DEPLOYMENT_TARGET` must not be set when using the Xcode linker at
// "/Applications/Xcode.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/bin/ld",
// although this is apparently ignored when using the linker at "/usr/bin/ld".
@@ -3020,6 +3504,10 @@ impl Build {
}
fn apple_sdk_root(&self, sdk: &str) -> Result<OsString, Error> {
+ if let Some(sdkroot) = env::var_os("SDKROOT") {
+ return Ok(sdkroot);
+ }
+
let mut cache = self
.apple_sdk_root_cache
.lock()
@@ -3034,6 +3522,7 @@ impl Build {
.arg("--sdk")
.arg(sdk),
"xcrun",
+ &self.cargo_output,
)?;
let sdk_path = match String::from_utf8(sdk_path) {
@@ -3050,362 +3539,199 @@ impl Build {
Ok(ret)
}
- fn cuda_file_count(&self) -> usize {
- self.files
- .iter()
- .filter(|file| file.extension() == Some(OsStr::new("cu")))
- .count()
- }
-}
+ fn apple_deployment_version(&self, os: AppleOs, arch_str: Option<&str>, sdk: &str) -> String {
+ let default_deployment_from_sdk = || {
+ let mut cache = self
+ .apple_versions_cache
+ .lock()
+ .expect("apple_versions_cache lock failed");
-impl Default for Build {
- fn default() -> Build {
- Build::new()
- }
-}
+ if let Some(ret) = cache.get(sdk) {
+ return Some(ret.clone());
+ }
-impl Tool {
- fn new(path: PathBuf) -> Self {
- Tool::with_features(path, None, false)
- }
+ let version = run_output(
+ self.cmd("xcrun")
+ .arg("--show-sdk-platform-version")
+ .arg("--sdk")
+ .arg(sdk),
+ "xcrun",
+ &self.cargo_output,
+ )
+ .ok()?;
- fn with_clang_driver(path: PathBuf, clang_driver: Option<&str>) -> Self {
- Self::with_features(path, clang_driver, false)
- }
+ let version = std::str::from_utf8(&version).ok()?.trim().to_owned();
- #[cfg(windows)]
- /// Explicitly set the `ToolFamily`, skipping name-based detection.
- fn with_family(path: PathBuf, family: ToolFamily) -> Self {
- Self {
- path: path,
- cc_wrapper_path: None,
- cc_wrapper_args: Vec::new(),
- args: Vec::new(),
- env: Vec::new(),
- family: family,
- cuda: false,
- removed_args: Vec::new(),
- }
- }
+ cache.insert(sdk.into(), version.clone());
+ Some(version)
+ };
- fn with_features(path: PathBuf, clang_driver: Option<&str>, cuda: bool) -> Self {
- // Try to detect family of the tool from its name, falling back to Gnu.
- let family = if let Some(fname) = path.file_name().and_then(|p| p.to_str()) {
- if fname.contains("clang-cl") {
- ToolFamily::Msvc { clang_cl: true }
- } else if fname.ends_with("cl") || fname == "cl.exe" {
- ToolFamily::Msvc { clang_cl: false }
- } else if fname.contains("clang") {
- match clang_driver {
- Some("cl") => ToolFamily::Msvc { clang_cl: true },
- _ => ToolFamily::Clang,
- }
+ let deployment_from_env = |name: &str| {
+ // note this isn't hit in production codepaths, its mostly just for tests which don't
+ // set the real env
+ if let Some((_, v)) = self.env.iter().find(|(k, _)| &**k == OsStr::new(name)) {
+ Some(v.to_str().unwrap().to_string())
} else {
- ToolFamily::Gnu
+ env::var(name).ok()
}
- } else {
- ToolFamily::Gnu
};
- Tool {
- path: path,
- cc_wrapper_path: None,
- cc_wrapper_args: Vec::new(),
- args: Vec::new(),
- env: Vec::new(),
- family: family,
- cuda: cuda,
- removed_args: Vec::new(),
- }
- }
-
- /// Add an argument to be stripped from the final command arguments.
- fn remove_arg(&mut self, flag: OsString) {
- self.removed_args.push(flag);
- }
-
- /// Add a flag, and optionally prepend the NVCC wrapper flag "-Xcompiler".
- ///
- /// Currently this is only used for compiling CUDA sources, since NVCC only
- /// accepts a limited set of GNU-like flags, and the rest must be prefixed
- /// with a "-Xcompiler" flag to get passed to the underlying C++ compiler.
- fn push_cc_arg(&mut self, flag: OsString) {
- if self.cuda {
- self.args.push("-Xcompiler".into());
- }
- self.args.push(flag);
- }
-
- fn is_duplicate_opt_arg(&self, flag: &OsString) -> bool {
- let flag = flag.to_str().unwrap();
- let mut chars = flag.chars();
-
- // Only duplicate check compiler flags
- if self.is_like_msvc() {
- if chars.next() != Some('/') {
- return false;
- }
- } else if self.is_like_gnu() || self.is_like_clang() {
- if chars.next() != Some('-') {
- return false;
+ // Determines if the acquired deployment target is too low to support modern C++ on some Apple platform.
+ //
+ // A long time ago they used libstdc++, but since macOS 10.9 and iOS 7 libc++ has been the library the SDKs provide to link against.
+ // If a `cc`` config wants to use C++, we round up to these versions as the baseline.
+ let maybe_cpp_version_baseline = |deployment_target_ver: String| -> Option<String> {
+ if !self.cpp {
+ return Some(deployment_target_ver);
}
- }
- // Check for existing optimization flags (-O, /O)
- if chars.next() == Some('O') {
- return self
- .args()
- .iter()
- .any(|ref a| a.to_str().unwrap_or("").chars().nth(1) == Some('O'));
- }
-
- // TODO Check for existing -m..., -m...=..., /arch:... flags
- return false;
- }
-
- /// Don't push optimization arg if it conflicts with existing args
- fn push_opt_unless_duplicate(&mut self, flag: OsString) {
- if self.is_duplicate_opt_arg(&flag) {
- println!("Info: Ignoring duplicate arg {:?}", &flag);
- } else {
- self.push_cc_arg(flag);
- }
- }
-
- /// Converts this compiler into a `Command` that's ready to be run.
- ///
- /// This is useful for when the compiler needs to be executed and the
- /// command returned will already have the initial arguments and environment
- /// variables configured.
- pub fn to_command(&self) -> Command {
- let mut cmd = match self.cc_wrapper_path {
- Some(ref cc_wrapper_path) => {
- let mut cmd = Command::new(&cc_wrapper_path);
- cmd.arg(&self.path);
- cmd
+ let mut deployment_target = deployment_target_ver
+ .split('.')
+ .map(|v| v.parse::<u32>().expect("integer version"));
+
+ match os {
+ AppleOs::MacOs => {
+ let major = deployment_target.next().unwrap_or(0);
+ let minor = deployment_target.next().unwrap_or(0);
+
+ // If below 10.9, we ignore it and let the SDK's target definitions handle it.
+ if major == 10 && minor < 9 {
+ self.cargo_output.print_warning(&format_args!(
+ "macOS deployment target ({}) too low, it will be increased",
+ deployment_target_ver
+ ));
+ return None;
+ }
+ }
+ AppleOs::Ios => {
+ let major = deployment_target.next().unwrap_or(0);
+
+ // If below 10.7, we ignore it and let the SDK's target definitions handle it.
+ if major < 7 {
+ self.cargo_output.print_warning(&format_args!(
+ "iOS deployment target ({}) too low, it will be increased",
+ deployment_target_ver
+ ));
+ return None;
+ }
+ }
+ // watchOS, tvOS, and others are all new enough that libc++ is their baseline.
+ _ => {}
}
- None => Command::new(&self.path),
- };
- cmd.args(&self.cc_wrapper_args);
- let value = self
- .args
- .iter()
- .filter(|a| !self.removed_args.contains(a))
- .collect::<Vec<_>>();
- cmd.args(&value);
-
- for &(ref k, ref v) in self.env.iter() {
- cmd.env(k, v);
- }
- cmd
- }
+ // If the deployment target met or exceeded the C++ baseline
+ Some(deployment_target_ver)
+ };
- /// Returns the path for this compiler.
- ///
- /// Note that this may not be a path to a file on the filesystem, e.g. "cc",
- /// but rather something which will be resolved when a process is spawned.
- pub fn path(&self) -> &Path {
- &self.path
- }
+ // The hardcoded minimums here are subject to change in a future compiler release,
+ // and only exist as last resort fallbacks. Don't consider them stable.
+ // `cc` doesn't use rustc's `--print deployment-target`` because the compiler's defaults
+ // don't align well with Apple's SDKs and other third-party libraries that require ~generally~ higher
+ // deployment targets. rustc isn't interested in those by default though so its fine to be different here.
+ //
+ // If no explicit target is passed, `cc` defaults to the current Xcode SDK's `DefaultDeploymentTarget` for better
+ // compatibility. This is also the crate's historical behavior and what has become a relied-on value.
+ //
+ // The ordering of env -> XCode SDK -> old rustc defaults is intentional for performance when using
+ // an explicit target.
+ match os {
+ AppleOs::MacOs => deployment_from_env("MACOSX_DEPLOYMENT_TARGET")
+ .and_then(maybe_cpp_version_baseline)
+ .or_else(default_deployment_from_sdk)
+ .unwrap_or_else(|| {
+ if arch_str == Some("aarch64") {
+ "11.0".into()
+ } else {
+ let default = "10.7";
+ maybe_cpp_version_baseline(default.into()).unwrap_or_else(|| default.into())
+ }
+ }),
- /// Returns the default set of arguments to the compiler needed to produce
- /// executables for the target this compiler generates.
- pub fn args(&self) -> &[OsString] {
- &self.args
- }
+ AppleOs::Ios => deployment_from_env("IPHONEOS_DEPLOYMENT_TARGET")
+ .and_then(maybe_cpp_version_baseline)
+ .or_else(default_deployment_from_sdk)
+ .unwrap_or_else(|| "7.0".into()),
- /// Returns the set of environment variables needed for this compiler to
- /// operate.
- ///
- /// This is typically only used for MSVC compilers currently.
- pub fn env(&self) -> &[(OsString, OsString)] {
- &self.env
- }
+ AppleOs::WatchOs => deployment_from_env("WATCHOS_DEPLOYMENT_TARGET")
+ .or_else(default_deployment_from_sdk)
+ .unwrap_or_else(|| "5.0".into()),
- /// Returns the compiler command in format of CC environment variable.
- /// Or empty string if CC env was not present
- ///
- /// This is typically used by configure script
- pub fn cc_env(&self) -> OsString {
- match self.cc_wrapper_path {
- Some(ref cc_wrapper_path) => {
- let mut cc_env = cc_wrapper_path.as_os_str().to_owned();
- cc_env.push(" ");
- cc_env.push(self.path.to_path_buf().into_os_string());
- for arg in self.cc_wrapper_args.iter() {
- cc_env.push(" ");
- cc_env.push(arg);
- }
- cc_env
- }
- None => OsString::from(""),
+ AppleOs::TvOs => deployment_from_env("TVOS_DEPLOYMENT_TARGET")
+ .or_else(default_deployment_from_sdk)
+ .unwrap_or_else(|| "9.0".into()),
}
}
- /// Returns the compiler flags in format of CFLAGS environment variable.
- /// Important here - this will not be CFLAGS from env, its internal gcc's flags to use as CFLAGS
- /// This is typically used by configure script
- pub fn cflags_env(&self) -> OsString {
- let mut flags = OsString::new();
- for (i, arg) in self.args.iter().enumerate() {
- if i > 0 {
- flags.push(" ");
- }
- flags.push(arg);
- }
- flags
+ fn cuda_file_count(&self) -> usize {
+ self.files
+ .iter()
+ .filter(|file| file.extension() == Some(OsStr::new("cu")))
+ .count()
}
+}
- /// Whether the tool is GNU Compiler Collection-like.
- pub fn is_like_gnu(&self) -> bool {
- self.family == ToolFamily::Gnu
+impl Default for Build {
+ fn default() -> Build {
+ Build::new()
}
+}
- /// Whether the tool is Clang-like.
- pub fn is_like_clang(&self) -> bool {
- self.family == ToolFamily::Clang
- }
+fn fail(s: &str) -> ! {
+ eprintln!("\n\nerror occurred: {}\n\n", s);
+ std::process::exit(1);
+}
- /// Whether the tool is MSVC-like.
- pub fn is_like_msvc(&self) -> bool {
- match self.family {
- ToolFamily::Msvc { .. } => true,
- _ => false,
+#[derive(Clone, Copy, PartialEq)]
+enum AppleOs {
+ MacOs,
+ Ios,
+ WatchOs,
+ TvOs,
+}
+impl std::fmt::Debug for AppleOs {
+ fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
+ match self {
+ AppleOs::MacOs => f.write_str("macOS"),
+ AppleOs::Ios => f.write_str("iOS"),
+ AppleOs::WatchOs => f.write_str("WatchOS"),
+ AppleOs::TvOs => f.write_str("AppleTVOS"),
}
}
}
-fn run(cmd: &mut Command, program: &str) -> Result<(), Error> {
- let (mut child, print) = spawn(cmd, program)?;
- let status = match child.wait() {
- Ok(s) => s,
- Err(_) => {
- return Err(Error::new(
- ErrorKind::ToolExecError,
- &format!(
- "Failed to wait on spawned child process, command {:?} with args {:?}.",
- cmd, program
- ),
- ));
- }
- };
- print.join().unwrap();
- println!("{}", status);
-
- if status.success() {
- Ok(())
- } else {
- Err(Error::new(
- ErrorKind::ToolExecError,
- &format!(
- "Command {:?} with args {:?} did not execute successfully (status code {}).",
- cmd, program, status
- ),
- ))
- }
+struct AppleSdkTargetParts {
+ sdk_prefix: &'static str,
+ sim_prefix: &'static str,
+ sdk: Cow<'static, str>,
}
-fn run_output(cmd: &mut Command, program: &str) -> Result<Vec<u8>, Error> {
- cmd.stdout(Stdio::piped());
- let (mut child, print) = spawn(cmd, program)?;
- let mut stdout = vec![];
- child
- .stdout
- .take()
- .unwrap()
- .read_to_end(&mut stdout)
- .unwrap();
- let status = match child.wait() {
- Ok(s) => s,
- Err(_) => {
- return Err(Error::new(
- ErrorKind::ToolExecError,
- &format!(
- "Failed to wait on spawned child process, command {:?} with args {:?}.",
- cmd, program
- ),
- ));
- }
+fn apple_os_sdk_parts(os: AppleOs, arch: &AppleArchSpec) -> AppleSdkTargetParts {
+ let (sdk_prefix, sim_prefix) = match os {
+ AppleOs::MacOs => ("macosx", ""),
+ AppleOs::Ios => ("iphone", "ios-"),
+ AppleOs::WatchOs => ("watch", "watch"),
+ AppleOs::TvOs => ("appletv", "appletv"),
+ };
+ let sdk = match arch {
+ AppleArchSpec::Device(_) if os == AppleOs::MacOs => Cow::Borrowed("macosx"),
+ AppleArchSpec::Device(_) => format!("{}os", sdk_prefix).into(),
+ AppleArchSpec::Simulator(_) => format!("{}simulator", sdk_prefix).into(),
+ AppleArchSpec::Catalyst(_) => Cow::Borrowed("macosx"),
};
- print.join().unwrap();
- println!("{}", status);
-
- if status.success() {
- Ok(stdout)
- } else {
- Err(Error::new(
- ErrorKind::ToolExecError,
- &format!(
- "Command {:?} with args {:?} did not execute successfully (status code {}).",
- cmd, program, status
- ),
- ))
- }
-}
-fn spawn(cmd: &mut Command, program: &str) -> Result<(Child, JoinHandle<()>), Error> {
- println!("running: {:?}", cmd);
-
- // Capture the standard error coming from these programs, and write it out
- // with cargo:warning= prefixes. Note that this is a bit wonky to avoid
- // requiring the output to be UTF-8, we instead just ship bytes from one
- // location to another.
- match cmd.stderr(Stdio::piped()).spawn() {
- Ok(mut child) => {
- let stderr = BufReader::new(child.stderr.take().unwrap());
- let print = thread::spawn(move || {
- for line in stderr.split(b'\n').filter_map(|l| l.ok()) {
- print!("cargo:warning=");
- std::io::stdout().write_all(&line).unwrap();
- println!("");
- }
- });
- Ok((child, print))
- }
- Err(ref e) if e.kind() == io::ErrorKind::NotFound => {
- let extra = if cfg!(windows) {
- " (see https://github.com/rust-lang/cc-rs#compile-time-requirements \
- for help)"
- } else {
- ""
- };
- Err(Error::new(
- ErrorKind::ToolNotFound,
- &format!("Failed to find tool. Is `{}` installed?{}", program, extra),
- ))
- }
- Err(ref e) => Err(Error::new(
- ErrorKind::ToolExecError,
- &format!(
- "Command {:?} with args {:?} failed to start: {:?}",
- cmd, program, e
- ),
- )),
+ AppleSdkTargetParts {
+ sdk_prefix,
+ sim_prefix,
+ sdk,
}
}
-fn fail(s: &str) -> ! {
- eprintln!("\n\nerror occurred: {}\n\n", s);
- std::process::exit(1);
-}
-
-fn command_add_output_file(
- cmd: &mut Command,
- dst: &Path,
- cuda: bool,
- msvc: bool,
- clang: bool,
- is_asm: bool,
- is_arm: bool,
-) {
- if msvc && !clang && !cuda && !(is_asm && is_arm) {
- let mut s = OsString::from("-Fo");
- s.push(&dst);
- cmd.arg(s);
- } else {
- cmd.arg("-o").arg(&dst);
- }
+#[allow(dead_code)]
+enum AppleArchSpec {
+ Device(&'static str),
+ Simulator(&'static str),
+ #[allow(dead_code)]
+ Catalyst(&'static str),
}
// Use by default minimum available API level
@@ -3429,13 +3755,12 @@ static NEW_STANDALONE_ANDROID_COMPILERS: [&str; 4] = [
fn android_clang_compiler_uses_target_arg_internally(clang_path: &Path) -> bool {
if let Some(filename) = clang_path.file_name() {
if let Some(filename_str) = filename.to_str() {
- filename_str.contains("android")
- } else {
- false
+ if let Some(idx) = filename_str.rfind('-') {
+ return filename_str.split_at(idx).0.contains("android");
+ }
}
- } else {
- false
}
+ false
}
#[test]
@@ -3449,6 +3774,9 @@ fn test_android_clang_compiler_uses_target_arg_internally() {
));
}
assert!(!android_clang_compiler_uses_target_arg_internally(
+ &PathBuf::from("clang-i686-linux-android")
+ ));
+ assert!(!android_clang_compiler_uses_target_arg_internally(
&PathBuf::from("clang")
));
assert!(!android_clang_compiler_uses_target_arg_internally(
@@ -3505,7 +3833,9 @@ fn autodetect_android_compiler(target: &str, host: &str, gnu: &str, clang: &str)
// Rust and clang/cc don't agree on how to name the target.
fn map_darwin_target_from_rust_to_compiler_architecture(target: &str) -> Option<&'static str> {
- if target.contains("x86_64") {
+ if target.contains("x86_64h") {
+ Some("x86_64h")
+ } else if target.contains("x86_64") {
Some("x86_64")
} else if target.contains("arm64e") {
Some("arm64e")
@@ -3522,7 +3852,7 @@ fn map_darwin_target_from_rust_to_compiler_architecture(target: &str) -> Option<
}
}
-fn which(tool: &Path) -> Option<PathBuf> {
+fn which(tool: &Path, path_entries: Option<OsString>) -> Option<PathBuf> {
fn check_exe(exe: &mut PathBuf) -> bool {
let exe_ext = std::env::consts::EXE_EXTENSION;
exe.exists() || (!exe_ext.is_empty() && exe.set_extension(exe_ext) && exe.exists())
@@ -3535,13 +3865,37 @@ fn which(tool: &Path) -> Option<PathBuf> {
}
// Loop through PATH entries searching for the |tool|.
- let path_entries = env::var_os("PATH")?;
+ let path_entries = path_entries.or(env::var_os("PATH"))?;
env::split_paths(&path_entries).find_map(|path_entry| {
let mut exe = path_entry.join(tool);
- return if check_exe(&mut exe) { Some(exe) } else { None };
+ if check_exe(&mut exe) {
+ Some(exe)
+ } else {
+ None
+ }
})
}
+// search for |prog| on 'programs' path in '|cc| -print-search-dirs' output
+fn search_programs(cc: &mut Command, prog: &str, cargo_output: &CargoOutput) -> Option<PathBuf> {
+ let search_dirs = run_output(
+ cc.arg("-print-search-dirs"),
+ "cc",
+ // this doesn't concern the compilation so we always want to show warnings.
+ cargo_output,
+ )
+ .ok()?;
+ // clang driver appears to be forcing UTF-8 output even on Windows,
+ // hence from_utf8 is assumed to be usable in all cases.
+ let search_dirs = std::str::from_utf8(&search_dirs).ok()?;
+ for dirs in search_dirs.split(|c| c == '\r' || c == '\n') {
+ if let Some(path) = dirs.strip_prefix("programs: =") {
+ return which(Path::new(prog), Some(OsString::from(path)));
+ }
+ }
+ None
+}
+
#[derive(Clone, Copy, PartialEq)]
enum AsmFileExt {
/// `.asm` files. On MSVC targets, we assume these should be passed to MASM
diff --git a/third_party/rust/cc/src/parallel/async_executor.rs b/third_party/rust/cc/src/parallel/async_executor.rs
new file mode 100644
index 0000000000..9ebd1ad562
--- /dev/null
+++ b/third_party/rust/cc/src/parallel/async_executor.rs
@@ -0,0 +1,118 @@
+use std::{
+ cell::Cell,
+ future::Future,
+ pin::Pin,
+ ptr,
+ task::{Context, Poll, RawWaker, RawWakerVTable, Waker},
+ thread,
+ time::Duration,
+};
+
+use crate::Error;
+
+const NOOP_WAKER_VTABLE: RawWakerVTable = RawWakerVTable::new(
+ // Cloning just returns a new no-op raw waker
+ |_| NOOP_RAW_WAKER,
+ // `wake` does nothing
+ |_| {},
+ // `wake_by_ref` does nothing
+ |_| {},
+ // Dropping does nothing as we don't allocate anything
+ |_| {},
+);
+const NOOP_RAW_WAKER: RawWaker = RawWaker::new(ptr::null(), &NOOP_WAKER_VTABLE);
+
+#[derive(Default)]
+pub(crate) struct YieldOnce(bool);
+
+impl Future for YieldOnce {
+ type Output = ();
+
+ fn poll(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<()> {
+ let flag = &mut std::pin::Pin::into_inner(self).0;
+ if !*flag {
+ *flag = true;
+ Poll::Pending
+ } else {
+ Poll::Ready(())
+ }
+ }
+}
+
+/// Execute the futures and return when they are all done.
+///
+/// Here we use our own homebrew async executor since cc is used in the build
+/// script of many popular projects, pulling in additional dependencies would
+/// significantly slow down its compilation.
+pub(crate) fn block_on<Fut1, Fut2>(
+ mut fut1: Fut1,
+ mut fut2: Fut2,
+ has_made_progress: &Cell<bool>,
+) -> Result<(), Error>
+where
+ Fut1: Future<Output = Result<(), Error>>,
+ Fut2: Future<Output = Result<(), Error>>,
+{
+ // Shadows the future so that it can never be moved and is guaranteed
+ // to be pinned.
+ //
+ // The same trick used in `pin!` macro.
+ //
+ // TODO: Once MSRV is bumped to 1.68, replace this with `std::pin::pin!`
+ let mut fut1 = Some(unsafe { Pin::new_unchecked(&mut fut1) });
+ let mut fut2 = Some(unsafe { Pin::new_unchecked(&mut fut2) });
+
+ // TODO: Once `Waker::noop` stablised and our MSRV is bumped to the version
+ // which it is stablised, replace this with `Waker::noop`.
+ let waker = unsafe { Waker::from_raw(NOOP_RAW_WAKER) };
+ let mut context = Context::from_waker(&waker);
+
+ let mut backoff_cnt = 0;
+
+ loop {
+ has_made_progress.set(false);
+
+ if let Some(fut) = fut2.as_mut() {
+ if let Poll::Ready(res) = fut.as_mut().poll(&mut context) {
+ fut2 = None;
+ res?;
+ }
+ }
+
+ if let Some(fut) = fut1.as_mut() {
+ if let Poll::Ready(res) = fut.as_mut().poll(&mut context) {
+ fut1 = None;
+ res?;
+ }
+ }
+
+ if fut1.is_none() && fut2.is_none() {
+ return Ok(());
+ }
+
+ if !has_made_progress.get() {
+ if backoff_cnt > 3 {
+ // We have yielded at least three times without making'
+ // any progress, so we will sleep for a while.
+ let duration = Duration::from_millis(100 * (backoff_cnt - 3).min(10));
+ thread::sleep(duration);
+ } else {
+ // Given that we spawned a lot of compilation tasks, it is unlikely
+ // that OS cannot find other ready task to execute.
+ //
+ // If all of them are done, then we will yield them and spawn more,
+ // or simply return.
+ //
+ // Thus this will not be turned into a busy-wait loop and it will not
+ // waste CPU resource.
+ thread::yield_now();
+ }
+ }
+
+ backoff_cnt = if has_made_progress.get() {
+ 0
+ } else {
+ backoff_cnt + 1
+ };
+ }
+}
diff --git a/third_party/rust/cc/src/parallel/job_token.rs b/third_party/rust/cc/src/parallel/job_token.rs
new file mode 100644
index 0000000000..4fec982f85
--- /dev/null
+++ b/third_party/rust/cc/src/parallel/job_token.rs
@@ -0,0 +1,255 @@
+use std::{marker::PhantomData, mem::MaybeUninit, sync::Once};
+
+use crate::Error;
+
+pub(crate) struct JobToken(PhantomData<()>);
+
+impl JobToken {
+ fn new() -> Self {
+ Self(PhantomData)
+ }
+}
+
+impl Drop for JobToken {
+ fn drop(&mut self) {
+ match JobTokenServer::new() {
+ JobTokenServer::Inherited(jobserver) => jobserver.release_token_raw(),
+ JobTokenServer::InProcess(jobserver) => jobserver.release_token_raw(),
+ }
+ }
+}
+
+enum JobTokenServer {
+ Inherited(inherited_jobserver::JobServer),
+ InProcess(inprocess_jobserver::JobServer),
+}
+
+impl JobTokenServer {
+ /// This function returns a static reference to the jobserver because
+ /// - creating a jobserver from env is a bit fd-unsafe (e.g. the fd might
+ /// be closed by other jobserver users in the process) and better do it
+ /// at the start of the program.
+ /// - in case a jobserver cannot be created from env (e.g. it's not
+ /// present), we will create a global in-process only jobserver
+ /// that has to be static so that it will be shared by all cc
+ /// compilation.
+ fn new() -> &'static Self {
+ static INIT: Once = Once::new();
+ static mut JOBSERVER: MaybeUninit<JobTokenServer> = MaybeUninit::uninit();
+
+ unsafe {
+ INIT.call_once(|| {
+ let server = inherited_jobserver::JobServer::from_env()
+ .map(Self::Inherited)
+ .unwrap_or_else(|| Self::InProcess(inprocess_jobserver::JobServer::new()));
+ JOBSERVER = MaybeUninit::new(server);
+ });
+ // TODO: Poor man's assume_init_ref, as that'd require a MSRV of 1.55.
+ &*JOBSERVER.as_ptr()
+ }
+ }
+}
+
+pub(crate) enum ActiveJobTokenServer {
+ Inherited(inherited_jobserver::ActiveJobServer<'static>),
+ InProcess(&'static inprocess_jobserver::JobServer),
+}
+
+impl ActiveJobTokenServer {
+ pub(crate) fn new() -> Result<Self, Error> {
+ match JobTokenServer::new() {
+ JobTokenServer::Inherited(inherited_jobserver) => {
+ inherited_jobserver.enter_active().map(Self::Inherited)
+ }
+ JobTokenServer::InProcess(inprocess_jobserver) => {
+ Ok(Self::InProcess(inprocess_jobserver))
+ }
+ }
+ }
+
+ pub(crate) async fn acquire(&self) -> Result<JobToken, Error> {
+ match &self {
+ Self::Inherited(jobserver) => jobserver.acquire().await,
+ Self::InProcess(jobserver) => Ok(jobserver.acquire().await),
+ }
+ }
+}
+
+mod inherited_jobserver {
+ use super::JobToken;
+
+ use crate::{parallel::async_executor::YieldOnce, Error, ErrorKind};
+
+ use std::{
+ io, mem,
+ sync::{mpsc, Mutex, MutexGuard, PoisonError},
+ };
+
+ pub(super) struct JobServer {
+ /// Implicit token for this process which is obtained and will be
+ /// released in parent. Since JobTokens only give back what they got,
+ /// there should be at most one global implicit token in the wild.
+ ///
+ /// Since Rust does not execute any `Drop` for global variables,
+ /// we can't just put it back to jobserver and then re-acquire it at
+ /// the end of the process.
+ ///
+ /// Use `Mutex` to avoid race between acquire and release.
+ /// If an `AtomicBool` is used, then it's possible for:
+ /// - `release_token_raw`: Tries to set `global_implicit_token` to true, but it is already
+ /// set to `true`, continue to release it to jobserver
+ /// - `acquire` takes the global implicit token, set `global_implicit_token` to false
+ /// - `release_token_raw` now writes the token back into the jobserver, while
+ /// `global_implicit_token` is `false`
+ ///
+ /// If the program exits here, then cc effectively increases parallelism by one, which is
+ /// incorrect, hence we use a `Mutex` here.
+ global_implicit_token: Mutex<bool>,
+ inner: jobserver::Client,
+ }
+
+ impl JobServer {
+ pub(super) unsafe fn from_env() -> Option<Self> {
+ jobserver::Client::from_env().map(|inner| Self {
+ inner,
+ global_implicit_token: Mutex::new(true),
+ })
+ }
+
+ fn get_global_implicit_token(&self) -> MutexGuard<'_, bool> {
+ self.global_implicit_token
+ .lock()
+ .unwrap_or_else(PoisonError::into_inner)
+ }
+
+ /// All tokens except for the global implicit token will be put back into the jobserver
+ /// immediately and they cannot be cached, since Rust does not call `Drop::drop` on
+ /// global variables.
+ pub(super) fn release_token_raw(&self) {
+ let mut global_implicit_token = self.get_global_implicit_token();
+
+ if *global_implicit_token {
+ // There's already a global implicit token, so this token must
+ // be released back into jobserver.
+ //
+ // `release_raw` should not block
+ let _ = self.inner.release_raw();
+ } else {
+ *global_implicit_token = true;
+ }
+ }
+
+ pub(super) fn enter_active(&self) -> Result<ActiveJobServer<'_>, Error> {
+ ActiveJobServer::new(self)
+ }
+ }
+
+ pub(crate) struct ActiveJobServer<'a> {
+ jobserver: &'a JobServer,
+ helper_thread: jobserver::HelperThread,
+ /// When rx is dropped, all the token stored within it will be dropped.
+ rx: mpsc::Receiver<io::Result<jobserver::Acquired>>,
+ }
+
+ impl<'a> ActiveJobServer<'a> {
+ fn new(jobserver: &'a JobServer) -> Result<Self, Error> {
+ let (tx, rx) = mpsc::channel();
+
+ Ok(Self {
+ rx,
+ helper_thread: jobserver.inner.clone().into_helper_thread(move |res| {
+ let _ = tx.send(res);
+ })?,
+ jobserver,
+ })
+ }
+
+ pub(super) async fn acquire(&self) -> Result<JobToken, Error> {
+ let mut has_requested_token = false;
+
+ loop {
+ // Fast path
+ if mem::replace(&mut *self.jobserver.get_global_implicit_token(), false) {
+ break Ok(JobToken::new());
+ }
+
+ // Cold path, no global implicit token, obtain one
+ match self.rx.try_recv() {
+ Ok(res) => {
+ let acquired = res?;
+ acquired.drop_without_releasing();
+ break Ok(JobToken::new());
+ }
+ Err(mpsc::TryRecvError::Disconnected) => {
+ break Err(Error::new(
+ ErrorKind::JobserverHelpThreadError,
+ "jobserver help thread has returned before ActiveJobServer is dropped",
+ ))
+ }
+ Err(mpsc::TryRecvError::Empty) => {
+ if !has_requested_token {
+ self.helper_thread.request_token();
+ has_requested_token = true;
+ }
+ YieldOnce::default().await
+ }
+ }
+ }
+ }
+ }
+}
+
+mod inprocess_jobserver {
+ use super::JobToken;
+
+ use crate::parallel::async_executor::YieldOnce;
+
+ use std::{
+ env::var,
+ sync::atomic::{
+ AtomicU32,
+ Ordering::{AcqRel, Acquire},
+ },
+ };
+
+ pub(crate) struct JobServer(AtomicU32);
+
+ impl JobServer {
+ pub(super) fn new() -> Self {
+ // Use `NUM_JOBS` if set (it's configured by Cargo) and otherwise
+ // just fall back to a semi-reasonable number.
+ //
+ // Note that we could use `num_cpus` here but it's an extra
+ // dependency that will almost never be used, so
+ // it's generally not too worth it.
+ let mut parallelism = 4;
+ // TODO: Use std::thread::available_parallelism as an upper bound
+ // when MSRV is bumped.
+ if let Ok(amt) = var("NUM_JOBS") {
+ if let Ok(amt) = amt.parse() {
+ parallelism = amt;
+ }
+ }
+
+ Self(AtomicU32::new(parallelism))
+ }
+
+ pub(super) async fn acquire(&self) -> JobToken {
+ loop {
+ let res = self
+ .0
+ .fetch_update(AcqRel, Acquire, |tokens| tokens.checked_sub(1));
+
+ if res.is_ok() {
+ break JobToken::new();
+ }
+
+ YieldOnce::default().await
+ }
+ }
+
+ pub(super) fn release_token_raw(&self) {
+ self.0.fetch_add(1, AcqRel);
+ }
+ }
+}
diff --git a/third_party/rust/cc/src/parallel/mod.rs b/third_party/rust/cc/src/parallel/mod.rs
new file mode 100644
index 0000000000..d69146dc59
--- /dev/null
+++ b/third_party/rust/cc/src/parallel/mod.rs
@@ -0,0 +1,20 @@
+pub(crate) mod async_executor;
+pub(crate) mod job_token;
+pub(crate) mod stderr;
+
+/// Remove all element in `vec` which `f(element)` returns `false`.
+///
+/// TODO: Remove this once the MSRV is bumped to v1.61
+pub(crate) fn retain_unordered_mut<T, F>(vec: &mut Vec<T>, mut f: F)
+where
+ F: FnMut(&mut T) -> bool,
+{
+ let mut i = 0;
+ while i < vec.len() {
+ if f(&mut vec[i]) {
+ i += 1;
+ } else {
+ vec.swap_remove(i);
+ }
+ }
+}
diff --git a/third_party/rust/cc/src/parallel/stderr.rs b/third_party/rust/cc/src/parallel/stderr.rs
new file mode 100644
index 0000000000..47fa085dba
--- /dev/null
+++ b/third_party/rust/cc/src/parallel/stderr.rs
@@ -0,0 +1,90 @@
+/// Helpers functions for [ChildStderr].
+use std::{convert::TryInto, process::ChildStderr};
+
+use crate::{Error, ErrorKind};
+
+#[cfg(all(not(unix), not(windows)))]
+compile_error!("Only unix and windows support non-blocking pipes! For other OSes, disable the parallel feature.");
+
+#[cfg(unix)]
+fn get_flags(fd: std::os::unix::io::RawFd) -> Result<i32, Error> {
+ let flags = unsafe { libc::fcntl(fd, libc::F_GETFL, 0) };
+ if flags == -1 {
+ Err(Error::new(
+ ErrorKind::IOError,
+ format!(
+ "Failed to get flags for pipe {}: {}",
+ fd,
+ std::io::Error::last_os_error()
+ ),
+ ))
+ } else {
+ Ok(flags)
+ }
+}
+
+#[cfg(unix)]
+fn set_flags(fd: std::os::unix::io::RawFd, flags: std::os::raw::c_int) -> Result<(), Error> {
+ if unsafe { libc::fcntl(fd, libc::F_SETFL, flags) } == -1 {
+ Err(Error::new(
+ ErrorKind::IOError,
+ format!(
+ "Failed to set flags for pipe {}: {}",
+ fd,
+ std::io::Error::last_os_error()
+ ),
+ ))
+ } else {
+ Ok(())
+ }
+}
+
+#[cfg(unix)]
+pub fn set_non_blocking(pipe: &impl std::os::unix::io::AsRawFd) -> Result<(), Error> {
+ // On Unix, switch the pipe to non-blocking mode.
+ // On Windows, we have a different way to be non-blocking.
+ let fd = pipe.as_raw_fd();
+
+ let flags = get_flags(fd)?;
+ set_flags(fd, flags | libc::O_NONBLOCK)
+}
+
+pub fn bytes_available(stderr: &mut ChildStderr) -> Result<usize, Error> {
+ let mut bytes_available = 0;
+ #[cfg(windows)]
+ {
+ use crate::windows::windows_sys::PeekNamedPipe;
+ use std::os::windows::io::AsRawHandle;
+ use std::ptr::null_mut;
+ if unsafe {
+ PeekNamedPipe(
+ stderr.as_raw_handle(),
+ null_mut(),
+ 0,
+ null_mut(),
+ &mut bytes_available,
+ null_mut(),
+ )
+ } == 0
+ {
+ return Err(Error::new(
+ ErrorKind::IOError,
+ format!(
+ "PeekNamedPipe failed with {}",
+ std::io::Error::last_os_error()
+ ),
+ ));
+ }
+ }
+ #[cfg(unix)]
+ {
+ use std::os::unix::io::AsRawFd;
+ if unsafe { libc::ioctl(stderr.as_raw_fd(), libc::FIONREAD, &mut bytes_available) } != 0 {
+ return Err(Error::new(
+ ErrorKind::IOError,
+ format!("ioctl failed with {}", std::io::Error::last_os_error()),
+ ));
+ }
+ }
+ Ok(bytes_available.try_into().unwrap())
+}
diff --git a/third_party/rust/cc/src/tool.rs b/third_party/rust/cc/src/tool.rs
new file mode 100644
index 0000000000..a193a90ff7
--- /dev/null
+++ b/third_party/rust/cc/src/tool.rs
@@ -0,0 +1,399 @@
+use std::{
+ collections::HashMap,
+ ffi::OsString,
+ path::{Path, PathBuf},
+ process::Command,
+ sync::Mutex,
+};
+
+use crate::command_helpers::{run_output, CargoOutput};
+
+/// Configuration used to represent an invocation of a C compiler.
+///
+/// This can be used to figure out what compiler is in use, what the arguments
+/// to it are, and what the environment variables look like for the compiler.
+/// This can be used to further configure other build systems (e.g. forward
+/// along CC and/or CFLAGS) or the `to_command` method can be used to run the
+/// compiler itself.
+#[derive(Clone, Debug)]
+#[allow(missing_docs)]
+pub struct Tool {
+ pub(crate) path: PathBuf,
+ pub(crate) cc_wrapper_path: Option<PathBuf>,
+ pub(crate) cc_wrapper_args: Vec<OsString>,
+ pub(crate) args: Vec<OsString>,
+ pub(crate) env: Vec<(OsString, OsString)>,
+ pub(crate) family: ToolFamily,
+ pub(crate) cuda: bool,
+ pub(crate) removed_args: Vec<OsString>,
+ pub(crate) has_internal_target_arg: bool,
+}
+
+impl Tool {
+ pub(crate) fn new(
+ path: PathBuf,
+ cached_compiler_family: &Mutex<HashMap<Box<Path>, ToolFamily>>,
+ cargo_output: &CargoOutput,
+ ) -> Self {
+ Self::with_features(path, None, false, cached_compiler_family, cargo_output)
+ }
+
+ pub(crate) fn with_clang_driver(
+ path: PathBuf,
+ clang_driver: Option<&str>,
+ cached_compiler_family: &Mutex<HashMap<Box<Path>, ToolFamily>>,
+ cargo_output: &CargoOutput,
+ ) -> Self {
+ Self::with_features(
+ path,
+ clang_driver,
+ false,
+ cached_compiler_family,
+ cargo_output,
+ )
+ }
+
+ /// Explicitly set the `ToolFamily`, skipping name-based detection.
+ pub(crate) fn with_family(path: PathBuf, family: ToolFamily) -> Self {
+ Self {
+ path,
+ cc_wrapper_path: None,
+ cc_wrapper_args: Vec::new(),
+ args: Vec::new(),
+ env: Vec::new(),
+ family,
+ cuda: false,
+ removed_args: Vec::new(),
+ has_internal_target_arg: false,
+ }
+ }
+
+ pub(crate) fn with_features(
+ path: PathBuf,
+ clang_driver: Option<&str>,
+ cuda: bool,
+ cached_compiler_family: &Mutex<HashMap<Box<Path>, ToolFamily>>,
+ cargo_output: &CargoOutput,
+ ) -> Self {
+ fn detect_family_inner(path: &Path, cargo_output: &CargoOutput) -> ToolFamily {
+ let mut cmd = Command::new(path);
+ cmd.arg("--version");
+
+ let stdout = match run_output(
+ &mut cmd,
+ &path.to_string_lossy(),
+ // tool detection issues should always be shown as warnings
+ cargo_output,
+ )
+ .ok()
+ .and_then(|o| String::from_utf8(o).ok())
+ {
+ Some(s) => s,
+ None => {
+ // --version failed. fallback to gnu
+ cargo_output.print_warning(&format_args!("Failed to run: {:?}", cmd));
+ return ToolFamily::Gnu;
+ }
+ };
+ if stdout.contains("clang") {
+ ToolFamily::Clang
+ } else if stdout.contains("GCC") {
+ ToolFamily::Gnu
+ } else {
+ // --version doesn't include clang for GCC
+ cargo_output.print_warning(&format_args!(
+ "Compiler version doesn't include clang or GCC: {:?}",
+ cmd
+ ));
+ ToolFamily::Gnu
+ }
+ }
+ let detect_family = |path: &Path| -> ToolFamily {
+ if let Some(family) = cached_compiler_family.lock().unwrap().get(path) {
+ return *family;
+ }
+
+ let family = detect_family_inner(path, cargo_output);
+ cached_compiler_family
+ .lock()
+ .unwrap()
+ .insert(path.into(), family);
+ family
+ };
+
+ // Try to detect family of the tool from its name, falling back to Gnu.
+ let family = if let Some(fname) = path.file_name().and_then(|p| p.to_str()) {
+ if fname.contains("clang-cl") {
+ ToolFamily::Msvc { clang_cl: true }
+ } else if fname.ends_with("cl") || fname == "cl.exe" {
+ ToolFamily::Msvc { clang_cl: false }
+ } else if fname.contains("clang") {
+ match clang_driver {
+ Some("cl") => ToolFamily::Msvc { clang_cl: true },
+ _ => ToolFamily::Clang,
+ }
+ } else {
+ detect_family(&path)
+ }
+ } else {
+ detect_family(&path)
+ };
+
+ Tool {
+ path,
+ cc_wrapper_path: None,
+ cc_wrapper_args: Vec::new(),
+ args: Vec::new(),
+ env: Vec::new(),
+ family,
+ cuda,
+ removed_args: Vec::new(),
+ has_internal_target_arg: false,
+ }
+ }
+
+ /// Add an argument to be stripped from the final command arguments.
+ pub(crate) fn remove_arg(&mut self, flag: OsString) {
+ self.removed_args.push(flag);
+ }
+
+ /// Push an "exotic" flag to the end of the compiler's arguments list.
+ ///
+ /// Nvidia compiler accepts only the most common compiler flags like `-D`,
+ /// `-I`, `-c`, etc. Options meant specifically for the underlying
+ /// host C++ compiler have to be prefixed with `-Xcompiler`.
+ /// [Another possible future application for this function is passing
+ /// clang-specific flags to clang-cl, which otherwise accepts only
+ /// MSVC-specific options.]
+ pub(crate) fn push_cc_arg(&mut self, flag: OsString) {
+ if self.cuda {
+ self.args.push("-Xcompiler".into());
+ }
+ self.args.push(flag);
+ }
+
+ /// Checks if an argument or flag has already been specified or conflicts.
+ ///
+ /// Currently only checks optimization flags.
+ pub(crate) fn is_duplicate_opt_arg(&self, flag: &OsString) -> bool {
+ let flag = flag.to_str().unwrap();
+ let mut chars = flag.chars();
+
+ // Only duplicate check compiler flags
+ if self.is_like_msvc() {
+ if chars.next() != Some('/') {
+ return false;
+ }
+ } else if self.is_like_gnu() || self.is_like_clang() {
+ if chars.next() != Some('-') {
+ return false;
+ }
+ }
+
+ // Check for existing optimization flags (-O, /O)
+ if chars.next() == Some('O') {
+ return self
+ .args()
+ .iter()
+ .any(|a| a.to_str().unwrap_or("").chars().nth(1) == Some('O'));
+ }
+
+ // TODO Check for existing -m..., -m...=..., /arch:... flags
+ false
+ }
+
+ /// Don't push optimization arg if it conflicts with existing args.
+ pub(crate) fn push_opt_unless_duplicate(&mut self, flag: OsString) {
+ if self.is_duplicate_opt_arg(&flag) {
+ println!("Info: Ignoring duplicate arg {:?}", &flag);
+ } else {
+ self.push_cc_arg(flag);
+ }
+ }
+
+ /// Converts this compiler into a `Command` that's ready to be run.
+ ///
+ /// This is useful for when the compiler needs to be executed and the
+ /// command returned will already have the initial arguments and environment
+ /// variables configured.
+ pub fn to_command(&self) -> Command {
+ let mut cmd = match self.cc_wrapper_path {
+ Some(ref cc_wrapper_path) => {
+ let mut cmd = Command::new(cc_wrapper_path);
+ cmd.arg(&self.path);
+ cmd
+ }
+ None => Command::new(&self.path),
+ };
+ cmd.args(&self.cc_wrapper_args);
+
+ let value = self
+ .args
+ .iter()
+ .filter(|a| !self.removed_args.contains(a))
+ .collect::<Vec<_>>();
+ cmd.args(&value);
+
+ for (k, v) in self.env.iter() {
+ cmd.env(k, v);
+ }
+ cmd
+ }
+
+ /// Returns the path for this compiler.
+ ///
+ /// Note that this may not be a path to a file on the filesystem, e.g. "cc",
+ /// but rather something which will be resolved when a process is spawned.
+ pub fn path(&self) -> &Path {
+ &self.path
+ }
+
+ /// Returns the default set of arguments to the compiler needed to produce
+ /// executables for the target this compiler generates.
+ pub fn args(&self) -> &[OsString] {
+ &self.args
+ }
+
+ /// Returns the set of environment variables needed for this compiler to
+ /// operate.
+ ///
+ /// This is typically only used for MSVC compilers currently.
+ pub fn env(&self) -> &[(OsString, OsString)] {
+ &self.env
+ }
+
+ /// Returns the compiler command in format of CC environment variable.
+ /// Or empty string if CC env was not present
+ ///
+ /// This is typically used by configure script
+ pub fn cc_env(&self) -> OsString {
+ match self.cc_wrapper_path {
+ Some(ref cc_wrapper_path) => {
+ let mut cc_env = cc_wrapper_path.as_os_str().to_owned();
+ cc_env.push(" ");
+ cc_env.push(self.path.to_path_buf().into_os_string());
+ for arg in self.cc_wrapper_args.iter() {
+ cc_env.push(" ");
+ cc_env.push(arg);
+ }
+ cc_env
+ }
+ None => OsString::from(""),
+ }
+ }
+
+ /// Returns the compiler flags in format of CFLAGS environment variable.
+ /// Important here - this will not be CFLAGS from env, its internal gcc's flags to use as CFLAGS
+ /// This is typically used by configure script
+ pub fn cflags_env(&self) -> OsString {
+ let mut flags = OsString::new();
+ for (i, arg) in self.args.iter().enumerate() {
+ if i > 0 {
+ flags.push(" ");
+ }
+ flags.push(arg);
+ }
+ flags
+ }
+
+ /// Whether the tool is GNU Compiler Collection-like.
+ pub fn is_like_gnu(&self) -> bool {
+ self.family == ToolFamily::Gnu
+ }
+
+ /// Whether the tool is Clang-like.
+ pub fn is_like_clang(&self) -> bool {
+ self.family == ToolFamily::Clang
+ }
+
+ /// Whether the tool is AppleClang under .xctoolchain
+ #[cfg(target_vendor = "apple")]
+ pub(crate) fn is_xctoolchain_clang(&self) -> bool {
+ let path = self.path.to_string_lossy();
+ path.contains(".xctoolchain/")
+ }
+ #[cfg(not(target_vendor = "apple"))]
+ pub(crate) fn is_xctoolchain_clang(&self) -> bool {
+ false
+ }
+
+ /// Whether the tool is MSVC-like.
+ pub fn is_like_msvc(&self) -> bool {
+ match self.family {
+ ToolFamily::Msvc { .. } => true,
+ _ => false,
+ }
+ }
+}
+
+/// Represents the family of tools this tool belongs to.
+///
+/// Each family of tools differs in how and what arguments they accept.
+///
+/// Detection of a family is done on best-effort basis and may not accurately reflect the tool.
+#[derive(Copy, Clone, Debug, PartialEq)]
+pub enum ToolFamily {
+ /// Tool is GNU Compiler Collection-like.
+ Gnu,
+ /// Tool is Clang-like. It differs from the GCC in a sense that it accepts superset of flags
+ /// and its cross-compilation approach is different.
+ Clang,
+ /// Tool is the MSVC cl.exe.
+ Msvc { clang_cl: bool },
+}
+
+impl ToolFamily {
+ /// What the flag to request debug info for this family of tools look like
+ pub(crate) fn add_debug_flags(&self, cmd: &mut Tool, dwarf_version: Option<u32>) {
+ match *self {
+ ToolFamily::Msvc { .. } => {
+ cmd.push_cc_arg("-Z7".into());
+ }
+ ToolFamily::Gnu | ToolFamily::Clang => {
+ cmd.push_cc_arg(
+ dwarf_version
+ .map_or_else(|| "-g".into(), |v| format!("-gdwarf-{}", v))
+ .into(),
+ );
+ }
+ }
+ }
+
+ /// What the flag to force frame pointers.
+ pub(crate) fn add_force_frame_pointer(&self, cmd: &mut Tool) {
+ match *self {
+ ToolFamily::Gnu | ToolFamily::Clang => {
+ cmd.push_cc_arg("-fno-omit-frame-pointer".into());
+ }
+ _ => (),
+ }
+ }
+
+ /// What the flags to enable all warnings
+ pub(crate) fn warnings_flags(&self) -> &'static str {
+ match *self {
+ ToolFamily::Msvc { .. } => "-W4",
+ ToolFamily::Gnu | ToolFamily::Clang => "-Wall",
+ }
+ }
+
+ /// What the flags to enable extra warnings
+ pub(crate) fn extra_warnings_flags(&self) -> Option<&'static str> {
+ match *self {
+ ToolFamily::Msvc { .. } => None,
+ ToolFamily::Gnu | ToolFamily::Clang => Some("-Wextra"),
+ }
+ }
+
+ /// What the flag to turn warning into errors
+ pub(crate) fn warnings_to_errors_flag(&self) -> &'static str {
+ match *self {
+ ToolFamily::Msvc { .. } => "-WX",
+ ToolFamily::Gnu | ToolFamily::Clang => "-Werror",
+ }
+ }
+
+ pub(crate) fn verbose_stderr(&self) -> bool {
+ *self == ToolFamily::Clang
+ }
+}
diff --git a/third_party/rust/cc/src/com.rs b/third_party/rust/cc/src/windows/com.rs
index 843247e588..e81bb1d3c3 100644
--- a/third_party/rust/cc/src/com.rs
+++ b/third_party/rust/cc/src/windows/com.rs
@@ -7,27 +7,31 @@
#![allow(unused)]
-use crate::winapi::CoInitializeEx;
-use crate::winapi::IUnknown;
-use crate::winapi::Interface;
-use crate::winapi::BSTR;
-use crate::winapi::COINIT_MULTITHREADED;
-use crate::winapi::{SysFreeString, SysStringLen};
-use crate::winapi::{HRESULT, S_FALSE, S_OK};
-use std::ffi::{OsStr, OsString};
-use std::mem::forget;
-use std::ops::Deref;
-use std::os::windows::ffi::{OsStrExt, OsStringExt};
-use std::ptr::null_mut;
-use std::slice::from_raw_parts;
+use crate::windows::{
+ winapi::{IUnknown, Interface},
+ windows_sys::{
+ CoInitializeEx, SysFreeString, SysStringLen, BSTR, COINIT_MULTITHREADED, HRESULT, S_FALSE,
+ S_OK,
+ },
+};
+use std::{
+ convert::TryInto,
+ ffi::{OsStr, OsString},
+ mem::ManuallyDrop,
+ ops::Deref,
+ os::windows::ffi::{OsStrExt, OsStringExt},
+ ptr::{null, null_mut},
+ slice::from_raw_parts,
+};
pub fn initialize() -> Result<(), HRESULT> {
- let err = unsafe { CoInitializeEx(null_mut(), COINIT_MULTITHREADED) };
+ let err = unsafe { CoInitializeEx(null(), COINIT_MULTITHREADED.try_into().unwrap()) };
if err != S_OK && err != S_FALSE {
// S_FALSE just means COM is already initialized
- return Err(err);
+ Err(err)
+ } else {
+ Ok(())
}
- Ok(())
}
pub struct ComPtr<T>(*mut T)
@@ -55,15 +59,13 @@ where
/// Extracts the raw pointer.
/// You are now responsible for releasing it yourself.
pub fn into_raw(self) -> *mut T {
- let p = self.0;
- forget(self);
- p
+ ManuallyDrop::new(self).0
}
/// For internal use only.
fn as_unknown(&self) -> &IUnknown {
unsafe { &*(self.0 as *mut IUnknown) }
}
- /// Performs QueryInterface fun.
+ /// Performs `QueryInterface` fun.
pub fn cast<U>(&self) -> Result<ComPtr<U>, i32>
where
U: Interface,
diff --git a/third_party/rust/cc/src/windows_registry.rs b/third_party/rust/cc/src/windows/find_tools.rs
index 276688b03f..4f0ed87411 100644
--- a/third_party/rust/cc/src/windows_registry.rs
+++ b/third_party/rust/cc/src/windows/find_tools.rs
@@ -8,18 +8,34 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-//! A helper module to probe the Windows Registry when looking for
-//! windows-specific tools.
+//! A helper module to looking for windows-specific tools:
+//! 1. On Windows host, probe the Windows Registry if needed;
+//! 2. On non-Windows host, check specified environment variables.
+
+#![allow(clippy::upper_case_acronyms)]
use std::process::Command;
use crate::Tool;
-#[cfg(windows)]
use crate::ToolFamily;
-#[cfg(windows)]
const MSVC_FAMILY: ToolFamily = ToolFamily::Msvc { clang_cl: false };
+#[derive(Copy, Clone)]
+struct TargetArch<'a>(pub &'a str);
+
+impl PartialEq<&str> for TargetArch<'_> {
+ fn eq(&self, other: &&str) -> bool {
+ self.0 == *other
+ }
+}
+
+impl<'a> From<TargetArch<'a>> for &'a str {
+ fn from(target: TargetArch<'a>) -> Self {
+ target.0
+ }
+}
+
/// Attempts to find a tool within an MSVC installation using the Windows
/// registry as a point to search from.
///
@@ -39,13 +55,6 @@ pub fn find(target: &str, tool: &str) -> Option<Command> {
/// Similar to the `find` function above, this function will attempt the same
/// operation (finding a MSVC tool in a local install) but instead returns a
/// `Tool` which may be introspected.
-#[cfg(not(windows))]
-pub fn find_tool(_target: &str, _tool: &str) -> Option<Tool> {
- None
-}
-
-/// Documented above.
-#[cfg(windows)]
pub fn find_tool(target: &str, tool: &str) -> Option<Tool> {
// This logic is all tailored for MSVC, if we're not that then bail out
// early.
@@ -53,13 +62,17 @@ pub fn find_tool(target: &str, tool: &str) -> Option<Tool> {
return None;
}
+ // Split the target to get the arch.
+ let target = TargetArch(target.split_once('-')?.0);
+
// Looks like msbuild isn't located in the same location as other tools like
- // cl.exe and lib.exe. To handle this we probe for it manually with
- // dedicated registry keys.
+ // cl.exe and lib.exe.
if tool.contains("msbuild") {
return impl_::find_msbuild(target);
}
+ // Looks like devenv isn't located in the same location as other tools like
+ // cl.exe and lib.exe.
if tool.contains("devenv") {
return impl_::find_devenv(target);
}
@@ -71,15 +84,16 @@ pub fn find_tool(target: &str, tool: &str) -> Option<Tool> {
// environment variables like `LIB`, `INCLUDE`, and `PATH` to ensure that
// the tool is actually usable.
- return impl_::find_msvc_environment(tool, target)
+ impl_::find_msvc_environment(tool, target)
.or_else(|| impl_::find_msvc_15plus(tool, target))
.or_else(|| impl_::find_msvc_14(tool, target))
.or_else(|| impl_::find_msvc_12(tool, target))
- .or_else(|| impl_::find_msvc_11(tool, target));
+ .or_else(|| impl_::find_msvc_11(tool, target))
}
/// A version of Visual Studio
#[derive(Debug, PartialEq, Eq, Copy, Clone)]
+#[non_exhaustive]
pub enum VsVers {
/// Visual Studio 12 (2013)
Vs12,
@@ -91,30 +105,14 @@ pub enum VsVers {
Vs16,
/// Visual Studio 17 (2022)
Vs17,
-
- /// Hidden variant that should not be matched on. Callers that want to
- /// handle an enumeration of `VsVers` instances should always have a default
- /// case meaning that it's a VS version they don't understand.
- #[doc(hidden)]
- #[allow(bad_style)]
- __Nonexhaustive_do_not_match_this_or_your_code_will_break,
}
/// Find the most recent installed version of Visual Studio
///
/// This is used by the cmake crate to figure out the correct
/// generator.
-#[cfg(not(windows))]
pub fn find_vs_version() -> Result<VsVers, String> {
- Err(format!("not windows"))
-}
-
-/// Documented above
-#[cfg(windows)]
-pub fn find_vs_version() -> Result<VsVers, String> {
- use std::env;
-
- match env::var("VisualStudioVersion") {
+ match std::env::var("VisualStudioVersion") {
Ok(version) => match &version[..] {
"17.0" => Ok(VsVers::Vs17),
"16.0" => Ok(VsVers::Vs16),
@@ -158,12 +156,17 @@ pub fn find_vs_version() -> Result<VsVers, String> {
}
}
+/// Windows Implementation.
#[cfg(windows)]
mod impl_ {
- use crate::com;
- use crate::registry::{RegistryKey, LOCAL_MACHINE};
- use crate::setup_config::SetupConfiguration;
- use crate::vs_instances::{VsInstances, VswhereInstance};
+ use crate::windows::com;
+ use crate::windows::registry::{RegistryKey, LOCAL_MACHINE};
+ use crate::windows::setup_config::SetupConfiguration;
+ use crate::windows::vs_instances::{VsInstances, VswhereInstance};
+ use crate::windows::windows_sys::{
+ FreeLibrary, GetMachineTypeAttributes, GetProcAddress, LoadLibraryA, UserEnabled, HMODULE,
+ IMAGE_FILE_MACHINE_AMD64, MACHINE_ATTRIBUTES, S_OK,
+ };
use std::convert::TryFrom;
use std::env;
use std::ffi::OsString;
@@ -174,8 +177,10 @@ mod impl_ {
use std::path::{Path, PathBuf};
use std::process::Command;
use std::str::FromStr;
+ use std::sync::atomic::{AtomicBool, Ordering};
+ use std::sync::Once;
- use super::MSVC_FAMILY;
+ use super::{TargetArch, MSVC_FAMILY};
use crate::Tool;
struct MsvcTool {
@@ -185,10 +190,75 @@ mod impl_ {
include: Vec<PathBuf>,
}
+ struct LibraryHandle(HMODULE);
+
+ impl LibraryHandle {
+ fn new(name: &[u8]) -> Option<Self> {
+ let handle = unsafe { LoadLibraryA(name.as_ptr() as _) };
+ (!handle.is_null()).then(|| Self(handle))
+ }
+
+ /// Get a function pointer to a function in the library.
+ /// SAFETY: The caller must ensure that the function signature matches the actual function.
+ /// The easiest way to do this is to add an entry to windows_sys_no_link.list and use the
+ /// generated function for `func_signature`.
+ unsafe fn get_proc_address<F>(&self, name: &[u8]) -> Option<F> {
+ let symbol = unsafe { GetProcAddress(self.0, name.as_ptr() as _) };
+ symbol.map(|symbol| unsafe { mem::transmute_copy(&symbol) })
+ }
+ }
+
+ impl Drop for LibraryHandle {
+ fn drop(&mut self) {
+ unsafe { FreeLibrary(self.0) };
+ }
+ }
+
+ type GetMachineTypeAttributesFuncType =
+ unsafe extern "system" fn(u16, *mut MACHINE_ATTRIBUTES) -> i32;
+ const _: () = {
+ // Ensure that our hand-written signature matches the actual function signature.
+ // We can't use `GetMachineTypeAttributes` outside of a const scope otherwise we'll end up statically linking to
+ // it, which will fail to load on older versions of Windows.
+ let _: GetMachineTypeAttributesFuncType = GetMachineTypeAttributes;
+ };
+
+ fn is_amd64_emulation_supported_inner() -> Option<bool> {
+ // GetMachineTypeAttributes is only available on Win11 22000+, so dynamically load it.
+ let kernel32 = LibraryHandle::new(b"kernel32.dll\0")?;
+ // SAFETY: GetMachineTypeAttributesFuncType is checked to match the real function signature.
+ let get_machine_type_attributes = unsafe {
+ kernel32
+ .get_proc_address::<GetMachineTypeAttributesFuncType>(b"GetMachineTypeAttributes\0")
+ }?;
+ let mut attributes = Default::default();
+ if unsafe { get_machine_type_attributes(IMAGE_FILE_MACHINE_AMD64, &mut attributes) } == S_OK
+ {
+ Some((attributes & UserEnabled) != 0)
+ } else {
+ Some(false)
+ }
+ }
+
+ fn is_amd64_emulation_supported() -> bool {
+ // TODO: Replace with a OnceLock once MSRV is 1.70.
+ static LOAD_VALUE: Once = Once::new();
+ static IS_SUPPORTED: AtomicBool = AtomicBool::new(false);
+
+ // Using Relaxed ordering since the Once is providing synchronization.
+ LOAD_VALUE.call_once(|| {
+ IS_SUPPORTED.store(
+ is_amd64_emulation_supported_inner().unwrap_or(false),
+ Ordering::Relaxed,
+ );
+ });
+ IS_SUPPORTED.load(Ordering::Relaxed)
+ }
+
impl MsvcTool {
fn new(tool: PathBuf) -> MsvcTool {
MsvcTool {
- tool: tool,
+ tool,
libs: Vec::new(),
path: Vec::new(),
include: Vec::new(),
@@ -202,7 +272,7 @@ mod impl_ {
path,
include,
} = self;
- let mut tool = Tool::with_family(tool.into(), MSVC_FAMILY);
+ let mut tool = Tool::with_family(tool, MSVC_FAMILY);
add_env(&mut tool, "LIB", libs);
add_env(&mut tool, "PATH", path);
add_env(&mut tool, "INCLUDE", include);
@@ -212,15 +282,14 @@ mod impl_ {
/// Checks to see if the `VSCMD_ARG_TGT_ARCH` environment variable matches the
/// given target's arch. Returns `None` if the variable does not exist.
- #[cfg(windows)]
- fn is_vscmd_target(target: &str) -> Option<bool> {
+ fn is_vscmd_target(target: TargetArch<'_>) -> Option<bool> {
let vscmd_arch = env::var("VSCMD_ARG_TGT_ARCH").ok()?;
// Convert the Rust target arch to its VS arch equivalent.
- let arch = match target.split("-").next() {
- Some("x86_64") => "x64",
- Some("aarch64") => "arm64",
- Some("i686") | Some("i586") => "x86",
- Some("thumbv7a") => "arm",
+ let arch = match target.into() {
+ "x86_64" => "x64",
+ "aarch64" | "arm64ec" => "arm64",
+ "i686" | "i586" => "x86",
+ "thumbv7a" => "arm",
// An unrecognized arch.
_ => return Some(false),
};
@@ -228,7 +297,7 @@ mod impl_ {
}
/// Attempt to find the tool using environment variables set by vcvars.
- pub fn find_msvc_environment(tool: &str, target: &str) -> Option<Tool> {
+ pub(super) fn find_msvc_environment(tool: &str, target: TargetArch<'_>) -> Option<Tool> {
// Early return if the environment doesn't contain a VC install.
if env::var_os("VCINSTALLDIR").is_none() {
return None;
@@ -248,16 +317,19 @@ mod impl_ {
.map(|p| p.join(tool))
.find(|p| p.exists())
})
- .map(|path| Tool::with_family(path.into(), MSVC_FAMILY))
+ .map(|path| Tool::with_family(path, MSVC_FAMILY))
}
}
- fn find_msbuild_vs17(target: &str) -> Option<Tool> {
+ fn find_msbuild_vs17(target: TargetArch<'_>) -> Option<Tool> {
find_tool_in_vs16plus_path(r"MSBuild\Current\Bin\MSBuild.exe", target, "17")
}
#[allow(bare_trait_objects)]
- fn vs16plus_instances(target: &str, version: &'static str) -> Box<Iterator<Item = PathBuf>> {
+ fn vs16plus_instances(
+ target: TargetArch<'_>,
+ version: &'static str,
+ ) -> Box<Iterator<Item = PathBuf>> {
let instances = if let Some(instances) = vs15plus_instances(target) {
instances
} else {
@@ -275,7 +347,11 @@ mod impl_ {
}))
}
- fn find_tool_in_vs16plus_path(tool: &str, target: &str, version: &'static str) -> Option<Tool> {
+ fn find_tool_in_vs16plus_path(
+ tool: &str,
+ target: TargetArch<'_>,
+ version: &'static str,
+ ) -> Option<Tool> {
vs16plus_instances(target, version)
.filter_map(|path| {
let path = path.join(tool);
@@ -283,10 +359,10 @@ mod impl_ {
return None;
}
let mut tool = Tool::with_family(path, MSVC_FAMILY);
- if target.contains("x86_64") {
+ if target == "x86_64" {
tool.env.push(("Platform".into(), "X64".into()));
}
- if target.contains("aarch64") {
+ if target == "aarch64" || target == "arm64ec" {
tool.env.push(("Platform".into(), "ARM64".into()));
}
Some(tool)
@@ -294,7 +370,7 @@ mod impl_ {
.next()
}
- fn find_msbuild_vs16(target: &str) -> Option<Tool> {
+ fn find_msbuild_vs16(target: TargetArch<'_>) -> Option<Tool> {
find_tool_in_vs16plus_path(r"MSBuild\Current\Bin\MSBuild.exe", target, "16")
}
@@ -310,7 +386,7 @@ mod impl_ {
//
// However, on ARM64 this method doesn't work because VS Installer fails to register COM component on ARM64.
// Hence, as the last resort we try to use vswhere.exe to list available instances.
- fn vs15plus_instances(target: &str) -> Option<VsInstances> {
+ fn vs15plus_instances(target: TargetArch<'_>) -> Option<VsInstances> {
vs15plus_instances_using_com().or_else(|| vs15plus_instances_using_vswhere(target))
}
@@ -323,7 +399,7 @@ mod impl_ {
Some(VsInstances::ComBased(enum_setup_instances))
}
- fn vs15plus_instances_using_vswhere(target: &str) -> Option<VsInstances> {
+ fn vs15plus_instances_using_vswhere(target: TargetArch<'_>) -> Option<VsInstances> {
let program_files_path: PathBuf = env::var("ProgramFiles(x86)")
.or_else(|_| env::var("ProgramFiles"))
.ok()?
@@ -336,11 +412,10 @@ mod impl_ {
return None;
}
- let arch = target.split('-').next().unwrap();
- let tools_arch = match arch {
+ let tools_arch = match target.into() {
"i586" | "i686" | "x86_64" => Some("x86.x64"),
"arm" | "thumbv7a" => Some("ARM"),
- "aarch64" => Some("ARM64"),
+ "aarch64" | "arm64ec" => Some("ARM64"),
_ => None,
};
@@ -374,7 +449,7 @@ mod impl_ {
.collect()
}
- pub fn find_msvc_15plus(tool: &str, target: &str) -> Option<Tool> {
+ pub(super) fn find_msvc_15plus(tool: &str, target: TargetArch<'_>) -> Option<Tool> {
let iter = vs15plus_instances(target)?;
iter.into_iter()
.filter_map(|instance| {
@@ -394,13 +469,13 @@ mod impl_ {
// we keep the registry method as a fallback option.
//
// [more reliable]: https://github.com/rust-lang/cc-rs/pull/331
- fn find_tool_in_vs15_path(tool: &str, target: &str) -> Option<Tool> {
+ fn find_tool_in_vs15_path(tool: &str, target: TargetArch<'_>) -> Option<Tool> {
let mut path = match vs15plus_instances(target) {
Some(instances) => instances
.into_iter()
.filter_map(|instance| instance.installation_path())
.map(|path| path.join(tool))
- .find(|ref path| path.is_file()),
+ .find(|path| path.is_file()),
None => None,
};
@@ -416,10 +491,9 @@ mod impl_ {
path.map(|path| {
let mut tool = Tool::with_family(path, MSVC_FAMILY);
- if target.contains("x86_64") {
+ if target == "x86_64" {
tool.env.push(("Platform".into(), "X64".into()));
- }
- if target.contains("aarch64") {
+ } else if target == "aarch64" {
tool.env.push(("Platform".into(), "ARM64".into()));
}
tool
@@ -428,10 +502,10 @@ mod impl_ {
fn tool_from_vs15plus_instance(
tool: &str,
- target: &str,
+ target: TargetArch<'_>,
instance_path: &PathBuf,
) -> Option<Tool> {
- let (root_path, bin_path, host_dylib_path, lib_path, include_path) =
+ let (root_path, bin_path, host_dylib_path, lib_path, alt_lib_path, include_path) =
vs15plus_vc_paths(target, instance_path)?;
let tool_path = bin_path.join(tool);
if !tool_path.exists() {
@@ -441,6 +515,9 @@ mod impl_ {
let mut tool = MsvcTool::new(tool_path);
tool.path.push(bin_path.clone());
tool.path.push(host_dylib_path);
+ if let Some(alt_lib_path) = alt_lib_path {
+ tool.libs.push(alt_lib_path);
+ }
tool.libs.push(lib_path);
tool.include.push(include_path);
@@ -455,45 +532,97 @@ mod impl_ {
}
fn vs15plus_vc_paths(
- target: &str,
- instance_path: &PathBuf,
- ) -> Option<(PathBuf, PathBuf, PathBuf, PathBuf, PathBuf)> {
- let version_path =
- instance_path.join(r"VC\Auxiliary\Build\Microsoft.VCToolsVersion.default.txt");
- let mut version_file = File::open(version_path).ok()?;
- let mut version = String::new();
- version_file.read_to_string(&mut version).ok()?;
- let version = version.trim();
- let host = match host_arch() {
- X86 => "X86",
- X86_64 => "X64",
- // There is no natively hosted compiler on ARM64.
- // Instead, use the x86 toolchain under emulation (there is no x64 emulation).
- AARCH64 => "X86",
+ target: TargetArch<'_>,
+ instance_path: &Path,
+ ) -> Option<(PathBuf, PathBuf, PathBuf, PathBuf, Option<PathBuf>, PathBuf)> {
+ let version = vs15plus_vc_read_version(instance_path)?;
+
+ let hosts = match host_arch() {
+ X86 => &["X86"],
+ X86_64 => &["X64"],
+ // Starting with VS 17.4, there is a natively hosted compiler on ARM64:
+ // https://devblogs.microsoft.com/visualstudio/arm64-visual-studio-is-officially-here/
+ // On older versions of VS, we use x64 if running under emulation is supported,
+ // otherwise use x86.
+ AARCH64 => {
+ if is_amd64_emulation_supported() {
+ &["ARM64", "X64", "X86"][..]
+ } else {
+ &["ARM64", "X86"]
+ }
+ }
_ => return None,
};
let target = lib_subdir(target)?;
// The directory layout here is MSVC/bin/Host$host/$target/
let path = instance_path.join(r"VC\Tools\MSVC").join(version);
+ // We use the first available host architecture that can build for the target
+ let (host_path, host) = hosts.iter().find_map(|&x| {
+ let candidate = path.join("bin").join(format!("Host{}", x));
+ if candidate.join(target).exists() {
+ Some((candidate, x))
+ } else {
+ None
+ }
+ })?;
// This is the path to the toolchain for a particular target, running
// on a given host
- let bin_path = path
- .join("bin")
- .join(&format!("Host{}", host))
- .join(&target);
+ let bin_path = host_path.join(target);
// But! we also need PATH to contain the target directory for the host
// architecture, because it contains dlls like mspdb140.dll compiled for
// the host architecture.
- let host_dylib_path = path
- .join("bin")
- .join(&format!("Host{}", host))
- .join(&host.to_lowercase());
- let lib_path = path.join("lib").join(&target);
+ let host_dylib_path = host_path.join(host.to_lowercase());
+ let lib_path = path.join("lib").join(target);
+ let alt_lib_path = (target == "arm64ec").then(|| path.join("lib").join("arm64ec"));
let include_path = path.join("include");
- Some((path, bin_path, host_dylib_path, lib_path, include_path))
+ Some((
+ path,
+ bin_path,
+ host_dylib_path,
+ lib_path,
+ alt_lib_path,
+ include_path,
+ ))
}
- fn atl_paths(target: &str, path: &Path) -> Option<(PathBuf, PathBuf)> {
+ fn vs15plus_vc_read_version(dir: &Path) -> Option<String> {
+ // Try to open the default version file.
+ let mut version_path: PathBuf =
+ dir.join(r"VC\Auxiliary\Build\Microsoft.VCToolsVersion.default.txt");
+ let mut version_file = if let Ok(f) = File::open(&version_path) {
+ f
+ } else {
+ // If the default doesn't exist, search for other version files.
+ // These are in the form Microsoft.VCToolsVersion.v143.default.txt
+ // where `143` is any three decimal digit version number.
+ // This sorts versions by lexical order and selects the highest version.
+ let mut version_file = String::new();
+ version_path.pop();
+ for file in version_path.read_dir().ok()? {
+ let name = file.ok()?.file_name();
+ let name = name.to_str()?;
+ if name.starts_with("Microsoft.VCToolsVersion.v")
+ && name.ends_with(".default.txt")
+ && name > &version_file
+ {
+ version_file.replace_range(.., name);
+ }
+ }
+ if version_file.is_empty() {
+ return None;
+ }
+ version_path.push(version_file);
+ File::open(version_path).ok()?
+ };
+
+ // Get the version string from the file we found.
+ let mut version = String::new();
+ version_file.read_to_string(&mut version).ok()?;
+ version.truncate(version.trim_end().len());
+ Some(version)
+ }
+
+ fn atl_paths(target: TargetArch<'_>, path: &Path) -> Option<(PathBuf, PathBuf)> {
let atl_path = path.join("atlmfc");
let sub = lib_subdir(target)?;
if atl_path.exists() {
@@ -505,14 +634,14 @@ mod impl_ {
// For MSVC 14 we need to find the Universal CRT as well as either
// the Windows 10 SDK or Windows 8.1 SDK.
- pub fn find_msvc_14(tool: &str, target: &str) -> Option<Tool> {
+ pub(super) fn find_msvc_14(tool: &str, target: TargetArch<'_>) -> Option<Tool> {
let vcdir = get_vc_dir("14.0")?;
let mut tool = get_tool(tool, &vcdir, target)?;
add_sdks(&mut tool, target)?;
Some(tool.into_tool())
}
- fn add_sdks(tool: &mut MsvcTool, target: &str) -> Option<()> {
+ fn add_sdks(tool: &mut MsvcTool, target: TargetArch<'_>) -> Option<()> {
let sub = lib_subdir(target)?;
let (ucrt, ucrt_version) = get_ucrt_dir()?;
@@ -555,7 +684,7 @@ mod impl_ {
}
// For MSVC 12 we need to find the Windows 8.1 SDK.
- pub fn find_msvc_12(tool: &str, target: &str) -> Option<Tool> {
+ pub(super) fn find_msvc_12(tool: &str, target: TargetArch<'_>) -> Option<Tool> {
let vcdir = get_vc_dir("12.0")?;
let mut tool = get_tool(tool, &vcdir, target)?;
let sub = lib_subdir(target)?;
@@ -571,7 +700,7 @@ mod impl_ {
}
// For MSVC 11 we need to find the Windows 8 SDK.
- pub fn find_msvc_11(tool: &str, target: &str) -> Option<Tool> {
+ pub(super) fn find_msvc_11(tool: &str, target: TargetArch<'_>) -> Option<Tool> {
let vcdir = get_vc_dir("11.0")?;
let mut tool = get_tool(tool, &vcdir, target)?;
let sub = lib_subdir(target)?;
@@ -596,7 +725,7 @@ mod impl_ {
// Given a possible MSVC installation directory, we look for the linker and
// then add the MSVC library path.
- fn get_tool(tool: &str, path: &Path, target: &str) -> Option<MsvcTool> {
+ fn get_tool(tool: &str, path: &Path, target: TargetArch<'_>) -> Option<MsvcTool> {
bin_subdir(target)
.into_iter()
.map(|(sub, host)| {
@@ -605,7 +734,7 @@ mod impl_ {
path.join("bin").join(host),
)
})
- .filter(|&(ref path, _)| path.is_file())
+ .filter(|(path, _)| path.is_file())
.map(|(path, host)| {
let mut tool = MsvcTool::new(path);
tool.path.push(host);
@@ -734,9 +863,8 @@ mod impl_ {
// linkers that can target the architecture we desire. The 64-bit host
// linker is preferred, and hence first, due to 64-bit allowing it more
// address space to work with and potentially being faster.
- fn bin_subdir(target: &str) -> Vec<(&'static str, &'static str)> {
- let arch = target.split('-').next().unwrap();
- match (arch, host_arch()) {
+ fn bin_subdir(target: TargetArch<'_>) -> Vec<(&'static str, &'static str)> {
+ match (target.into(), host_arch()) {
("i586", X86) | ("i686", X86) => vec![("", "")],
("i586", X86_64) | ("i686", X86_64) => vec![("amd64_x86", "amd64"), ("", "")],
("x86_64", X86) => vec![("x86_amd64", "")],
@@ -747,21 +875,19 @@ mod impl_ {
}
}
- fn lib_subdir(target: &str) -> Option<&'static str> {
- let arch = target.split('-').next().unwrap();
- match arch {
+ fn lib_subdir(target: TargetArch<'_>) -> Option<&'static str> {
+ match target.into() {
"i586" | "i686" => Some("x86"),
"x86_64" => Some("x64"),
"arm" | "thumbv7a" => Some("arm"),
- "aarch64" => Some("arm64"),
+ "aarch64" | "arm64ec" => Some("arm64"),
_ => None,
}
}
// MSVC's x86 libraries are not in a subfolder
- fn vc_lib_subdir(target: &str) -> Option<&'static str> {
- let arch = target.split('-').next().unwrap();
- match arch {
+ fn vc_lib_subdir(target: TargetArch<'_>) -> Option<&'static str> {
+ match target.into() {
"i586" | "i686" => Some(""),
"x86_64" => Some("amd64"),
"arm" | "thumbv7a" => Some("arm"),
@@ -813,7 +939,7 @@ mod impl_ {
for subkey in key.iter().filter_map(|k| k.ok()) {
let val = subkey
.to_str()
- .and_then(|s| s.trim_left_matches("v").replace(".", "").parse().ok());
+ .and_then(|s| s.trim_left_matches("v").replace('.', "").parse().ok());
let val = match val {
Some(s) => s,
None => continue,
@@ -828,22 +954,22 @@ mod impl_ {
max_key
}
- pub fn has_msbuild_version(version: &str) -> bool {
+ pub(super) fn has_msbuild_version(version: &str) -> bool {
match version {
"17.0" => {
- find_msbuild_vs17("x86_64-pc-windows-msvc").is_some()
- || find_msbuild_vs17("i686-pc-windows-msvc").is_some()
- || find_msbuild_vs17("aarch64-pc-windows-msvc").is_some()
+ find_msbuild_vs17(TargetArch("x86_64")).is_some()
+ || find_msbuild_vs17(TargetArch("i686")).is_some()
+ || find_msbuild_vs17(TargetArch("aarch64")).is_some()
}
"16.0" => {
- find_msbuild_vs16("x86_64-pc-windows-msvc").is_some()
- || find_msbuild_vs16("i686-pc-windows-msvc").is_some()
- || find_msbuild_vs16("aarch64-pc-windows-msvc").is_some()
+ find_msbuild_vs16(TargetArch("x86_64")).is_some()
+ || find_msbuild_vs16(TargetArch("i686")).is_some()
+ || find_msbuild_vs16(TargetArch("aarch64")).is_some()
}
"15.0" => {
- find_msbuild_vs15("x86_64-pc-windows-msvc").is_some()
- || find_msbuild_vs15("i686-pc-windows-msvc").is_some()
- || find_msbuild_vs15("aarch64-pc-windows-msvc").is_some()
+ find_msbuild_vs15(TargetArch("x86_64")).is_some()
+ || find_msbuild_vs15(TargetArch("i686")).is_some()
+ || find_msbuild_vs15(TargetArch("aarch64")).is_some()
}
"12.0" | "14.0" => LOCAL_MACHINE
.open(&OsString::from(format!(
@@ -855,18 +981,20 @@ mod impl_ {
}
}
- pub fn find_devenv(target: &str) -> Option<Tool> {
- find_devenv_vs15(&target)
+ pub(super) fn find_devenv(target: TargetArch<'_>) -> Option<Tool> {
+ find_devenv_vs15(target)
}
- fn find_devenv_vs15(target: &str) -> Option<Tool> {
+ fn find_devenv_vs15(target: TargetArch<'_>) -> Option<Tool> {
find_tool_in_vs15_path(r"Common7\IDE\devenv.exe", target)
}
// see http://stackoverflow.com/questions/328017/path-to-msbuild
- pub fn find_msbuild(target: &str) -> Option<Tool> {
+ pub(super) fn find_msbuild(target: TargetArch<'_>) -> Option<Tool> {
// VS 15 (2017) changed how to locate msbuild
- if let Some(r) = find_msbuild_vs16(target) {
+ if let Some(r) = find_msbuild_vs17(target) {
+ Some(r)
+ } else if let Some(r) = find_msbuild_vs16(target) {
return Some(r);
} else if let Some(r) = find_msbuild_vs15(target) {
return Some(r);
@@ -875,11 +1003,11 @@ mod impl_ {
}
}
- fn find_msbuild_vs15(target: &str) -> Option<Tool> {
+ fn find_msbuild_vs15(target: TargetArch<'_>) -> Option<Tool> {
find_tool_in_vs15_path(r"MSBuild\15.0\Bin\MSBuild.exe", target)
}
- fn find_old_msbuild(target: &str) -> Option<Tool> {
+ fn find_old_msbuild(target: TargetArch<'_>) -> Option<Tool> {
let key = r"SOFTWARE\Microsoft\MSBuild\ToolsVersions";
LOCAL_MACHINE
.open(key.as_ref())
@@ -891,10 +1019,82 @@ mod impl_ {
let mut path = PathBuf::from(path);
path.push("MSBuild.exe");
let mut tool = Tool::with_family(path, MSVC_FAMILY);
- if target.contains("x86_64") {
+ if target == "x86_64" {
tool.env.push(("Platform".into(), "X64".into()));
}
tool
})
}
}
+
+/// Non-Windows Implementation.
+#[cfg(not(windows))]
+mod impl_ {
+ use std::{env, ffi::OsString};
+
+ use super::{TargetArch, MSVC_FAMILY};
+ use crate::Tool;
+
+ /// Finding msbuild.exe tool under unix system is not currently supported.
+ /// Maybe can check it using an environment variable looks like `MSBUILD_BIN`.
+ pub(super) fn find_msbuild(_target: TargetArch<'_>) -> Option<Tool> {
+ None
+ }
+
+ // Finding devenv.exe tool under unix system is not currently supported.
+ // Maybe can check it using an environment variable looks like `DEVENV_BIN`.
+ pub(super) fn find_devenv(_target: TargetArch<'_>) -> Option<Tool> {
+ None
+ }
+
+ /// Attempt to find the tool using environment variables set by vcvars.
+ pub(super) fn find_msvc_environment(tool: &str, _target: TargetArch<'_>) -> Option<Tool> {
+ // Early return if the environment doesn't contain a VC install.
+ let vc_install_dir = env::var_os("VCINSTALLDIR")?;
+ let vs_install_dir = env::var_os("VSINSTALLDIR")?;
+
+ let get_tool = |install_dir: OsString| {
+ env::split_paths(&install_dir)
+ .map(|p| p.join(tool))
+ .find(|p| p.exists())
+ .map(|path| Tool::with_family(path.into(), MSVC_FAMILY))
+ };
+
+ // Take the path of tool for the vc install directory.
+ get_tool(vc_install_dir)
+ // Take the path of tool for the vs install directory.
+ .or_else(|| get_tool(vs_install_dir))
+ // Take the path of tool for the current path environment.
+ .or_else(|| env::var_os("PATH").and_then(|path| get_tool(path)))
+ }
+
+ pub(super) fn find_msvc_15plus(_tool: &str, _target: TargetArch<'_>) -> Option<Tool> {
+ None
+ }
+
+ // For MSVC 14 we need to find the Universal CRT as well as either
+ // the Windows 10 SDK or Windows 8.1 SDK.
+ pub(super) fn find_msvc_14(_tool: &str, _target: TargetArch<'_>) -> Option<Tool> {
+ None
+ }
+
+ // For MSVC 12 we need to find the Windows 8.1 SDK.
+ pub(super) fn find_msvc_12(_tool: &str, _target: TargetArch<'_>) -> Option<Tool> {
+ None
+ }
+
+ // For MSVC 11 we need to find the Windows 8 SDK.
+ pub(super) fn find_msvc_11(_tool: &str, _target: TargetArch<'_>) -> Option<Tool> {
+ None
+ }
+
+ pub(super) fn has_msbuild_version(version: &str) -> bool {
+ match version {
+ "17.0" => false,
+ "16.0" => false,
+ "15.0" => false,
+ "12.0" | "14.0" => false,
+ _ => false,
+ }
+ }
+}
diff --git a/third_party/rust/cc/src/windows/mod.rs b/third_party/rust/cc/src/windows/mod.rs
new file mode 100644
index 0000000000..9b6f297e1a
--- /dev/null
+++ b/third_party/rust/cc/src/windows/mod.rs
@@ -0,0 +1,20 @@
+//! These modules are all glue to support reading the MSVC version from
+//! the registry and from COM interfaces.
+
+// This is used in the crate's public API, so don't use #[cfg(windows)]
+pub mod find_tools;
+
+#[cfg(windows)]
+pub(crate) mod windows_sys;
+
+#[cfg(windows)]
+mod registry;
+#[cfg(windows)]
+#[macro_use]
+mod winapi;
+#[cfg(windows)]
+mod com;
+#[cfg(windows)]
+mod setup_config;
+#[cfg(windows)]
+mod vs_instances;
diff --git a/third_party/rust/cc/src/registry.rs b/third_party/rust/cc/src/windows/registry.rs
index cae32219c7..83983032de 100644
--- a/third_party/rust/cc/src/registry.rs
+++ b/third_party/rust/cc/src/windows/registry.rs
@@ -8,63 +8,23 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-use std::ffi::{OsStr, OsString};
-use std::io;
-use std::ops::RangeFrom;
-use std::os::raw;
-use std::os::windows::prelude::*;
+use crate::windows::windows_sys::{
+ RegCloseKey, RegEnumKeyExW, RegOpenKeyExW, RegQueryValueExW, ERROR_NO_MORE_ITEMS,
+ ERROR_SUCCESS, HKEY, HKEY_LOCAL_MACHINE, KEY_READ, KEY_WOW64_32KEY, REG_SZ,
+};
+use std::{
+ ffi::{OsStr, OsString},
+ io,
+ ops::RangeFrom,
+ os::windows::prelude::*,
+ ptr::null_mut,
+};
/// Must never be `HKEY_PERFORMANCE_DATA`.
pub(crate) struct RegistryKey(Repr);
-type HKEY = *mut u8;
+#[allow(clippy::upper_case_acronyms)]
type DWORD = u32;
-type LPDWORD = *mut DWORD;
-type LPCWSTR = *const u16;
-type LPWSTR = *mut u16;
-type LONG = raw::c_long;
-type PHKEY = *mut HKEY;
-type PFILETIME = *mut u8;
-type LPBYTE = *mut u8;
-type REGSAM = u32;
-
-const ERROR_SUCCESS: DWORD = 0;
-const ERROR_NO_MORE_ITEMS: DWORD = 259;
-// Sign-extend into 64 bits if needed.
-const HKEY_LOCAL_MACHINE: HKEY = 0x80000002u32 as i32 as isize as HKEY;
-const REG_SZ: DWORD = 1;
-const KEY_READ: DWORD = 0x20019;
-const KEY_WOW64_32KEY: DWORD = 0x200;
-
-#[link(name = "advapi32")]
-extern "system" {
- fn RegOpenKeyExW(
- key: HKEY,
- lpSubKey: LPCWSTR,
- ulOptions: DWORD,
- samDesired: REGSAM,
- phkResult: PHKEY,
- ) -> LONG;
- fn RegEnumKeyExW(
- key: HKEY,
- dwIndex: DWORD,
- lpName: LPWSTR,
- lpcName: LPDWORD,
- lpReserved: LPDWORD,
- lpClass: LPWSTR,
- lpcClass: LPDWORD,
- lpftLastWriteTime: PFILETIME,
- ) -> LONG;
- fn RegQueryValueExW(
- hKey: HKEY,
- lpValueName: LPCWSTR,
- lpReserved: LPDWORD,
- lpType: LPDWORD,
- lpData: LPBYTE,
- lpcbData: LPDWORD,
- ) -> LONG;
- fn RegCloseKey(hKey: HKEY) -> LONG;
-}
struct OwnedKey(HKEY);
@@ -97,7 +57,7 @@ impl RegistryKey {
/// Open a sub-key of `self`.
pub fn open(&self, key: &OsStr) -> io::Result<RegistryKey> {
let key = key.encode_wide().chain(Some(0)).collect::<Vec<_>>();
- let mut ret = 0 as *mut _;
+ let mut ret = null_mut();
let err = unsafe {
RegOpenKeyExW(
self.raw(),
@@ -107,7 +67,7 @@ impl RegistryKey {
&mut ret,
)
};
- if err == ERROR_SUCCESS as LONG {
+ if err == ERROR_SUCCESS {
Ok(RegistryKey(Repr::Owned(OwnedKey(ret))))
} else {
Err(io::Error::from_raw_os_error(err as i32))
@@ -130,12 +90,12 @@ impl RegistryKey {
let err = RegQueryValueExW(
self.raw(),
name.as_ptr(),
- 0 as *mut _,
+ null_mut(),
&mut kind,
- 0 as *mut _,
+ null_mut(),
&mut len,
);
- if err != ERROR_SUCCESS as LONG {
+ if err != ERROR_SUCCESS {
return Err(io::Error::from_raw_os_error(err as i32));
}
if kind != REG_SZ {
@@ -156,8 +116,8 @@ impl RegistryKey {
let err = RegQueryValueExW(
self.raw(),
name.as_ptr(),
- 0 as *mut _,
- 0 as *mut _,
+ null_mut(),
+ null_mut(),
v.as_mut_ptr() as *mut _,
&mut len,
);
@@ -165,7 +125,7 @@ impl RegistryKey {
// grew between the first and second call to `RegQueryValueExW`),
// both because it's extremely unlikely, and this is a bit more
// defensive more defensive against weird types of registry keys.
- if err != ERROR_SUCCESS as LONG {
+ if err != ERROR_SUCCESS {
return Err(io::Error::from_raw_os_error(err as i32));
}
// The length is allowed to change, but should still be even, as
@@ -188,7 +148,7 @@ impl RegistryKey {
if !v.is_empty() && v[v.len() - 1] == 0 {
v.pop();
}
- return Ok(OsString::from_wide(&v));
+ Ok(OsString::from_wide(&v))
}
}
}
@@ -213,14 +173,14 @@ impl<'a> Iterator for Iter<'a> {
i,
v.as_mut_ptr(),
&mut len,
- 0 as *mut _,
- 0 as *mut _,
- 0 as *mut _,
- 0 as *mut _,
+ null_mut(),
+ null_mut(),
+ null_mut(),
+ null_mut(),
);
- if ret == ERROR_NO_MORE_ITEMS as LONG {
+ if ret == ERROR_NO_MORE_ITEMS {
None
- } else if ret != ERROR_SUCCESS as LONG {
+ } else if ret != ERROR_SUCCESS {
Some(Err(io::Error::from_raw_os_error(ret as i32)))
} else {
v.set_len(len as usize);
diff --git a/third_party/rust/cc/src/setup_config.rs b/third_party/rust/cc/src/windows/setup_config.rs
index 030051ca69..5739ecf7d6 100644
--- a/third_party/rust/cc/src/setup_config.rs
+++ b/third_party/rust/cc/src/windows/setup_config.rs
@@ -8,19 +8,19 @@
#![allow(bad_style)]
#![allow(unused)]
-use crate::winapi::Interface;
-use crate::winapi::BSTR;
-use crate::winapi::LPCOLESTR;
-use crate::winapi::LPSAFEARRAY;
-use crate::winapi::S_FALSE;
-use crate::winapi::{CoCreateInstance, CLSCTX_ALL};
-use crate::winapi::{IUnknown, IUnknownVtbl};
-use crate::winapi::{HRESULT, LCID, LPCWSTR, PULONGLONG};
-use crate::winapi::{LPFILETIME, ULONG};
-use std::ffi::OsString;
-use std::ptr::null_mut;
+use crate::windows::{
+ com::{BStr, ComPtr},
+ winapi::{
+ IUnknown, IUnknownVtbl, Interface, LCID, LPCOLESTR, LPCWSTR, LPFILETIME, LPSAFEARRAY,
+ PULONGLONG, ULONG,
+ },
+ windows_sys::{CoCreateInstance, BSTR, CLSCTX_ALL, HRESULT, S_FALSE},
+};
-use crate::com::{BStr, ComPtr};
+use std::{
+ ffi::OsString,
+ ptr::{null, null_mut},
+};
// Bindings to the Setup.Configuration stuff
pub type InstanceState = u32;
@@ -212,7 +212,7 @@ impl SetupInstance {
SetupInstance(ComPtr::from_raw(obj))
}
pub fn instance_id(&self) -> Result<OsString, i32> {
- let mut s = null_mut();
+ let mut s = null();
let err = unsafe { self.0.GetInstanceId(&mut s) };
let bstr = unsafe { BStr::from_raw(s) };
if err < 0 {
@@ -221,7 +221,7 @@ impl SetupInstance {
Ok(bstr.to_osstring())
}
pub fn installation_name(&self) -> Result<OsString, i32> {
- let mut s = null_mut();
+ let mut s = null();
let err = unsafe { self.0.GetInstallationName(&mut s) };
let bstr = unsafe { BStr::from_raw(s) };
if err < 0 {
@@ -230,7 +230,7 @@ impl SetupInstance {
Ok(bstr.to_osstring())
}
pub fn installation_path(&self) -> Result<OsString, i32> {
- let mut s = null_mut();
+ let mut s = null();
let err = unsafe { self.0.GetInstallationPath(&mut s) };
let bstr = unsafe { BStr::from_raw(s) };
if err < 0 {
@@ -239,7 +239,7 @@ impl SetupInstance {
Ok(bstr.to_osstring())
}
pub fn installation_version(&self) -> Result<OsString, i32> {
- let mut s = null_mut();
+ let mut s = null();
let err = unsafe { self.0.GetInstallationVersion(&mut s) };
let bstr = unsafe { BStr::from_raw(s) };
if err < 0 {
@@ -248,7 +248,7 @@ impl SetupInstance {
Ok(bstr.to_osstring())
}
pub fn product_path(&self) -> Result<OsString, i32> {
- let mut s = null_mut();
+ let mut s = null();
let this = self.0.cast::<ISetupInstance2>()?;
let err = unsafe { this.GetProductPath(&mut s) };
let bstr = unsafe { BStr::from_raw(s) };
diff --git a/third_party/rust/cc/src/vs_instances.rs b/third_party/rust/cc/src/windows/vs_instances.rs
index 31d3dd1470..e863dadabb 100644
--- a/third_party/rust/cc/src/vs_instances.rs
+++ b/third_party/rust/cc/src/windows/vs_instances.rs
@@ -4,7 +4,7 @@ use std::convert::TryFrom;
use std::io::BufRead;
use std::path::PathBuf;
-use crate::setup_config::{EnumSetupInstances, SetupInstance};
+use crate::windows::setup_config::{EnumSetupInstances, SetupInstance};
pub enum VsInstance {
Com(SetupInstance),
diff --git a/third_party/rust/cc/src/winapi.rs b/third_party/rust/cc/src/windows/winapi.rs
index 8e04ce9cbd..09965daa89 100644
--- a/third_party/rust/cc/src/winapi.rs
+++ b/third_party/rust/cc/src/windows/winapi.rs
@@ -5,26 +5,19 @@
// All files in the project carrying such notice may not be copied, modified, or distributed
// except according to those terms.
-#![allow(bad_style)]
+#![allow(bad_style, clippy::upper_case_acronyms)]
use std::os::raw;
pub type wchar_t = u16;
-pub type UINT = raw::c_uint;
-pub type LPUNKNOWN = *mut IUnknown;
+pub use crate::windows::windows_sys::{FILETIME, GUID, HRESULT, SAFEARRAY};
+
pub type REFIID = *const IID;
pub type IID = GUID;
-pub type REFCLSID = *const IID;
-pub type PVOID = *mut raw::c_void;
-pub type USHORT = raw::c_ushort;
pub type ULONG = raw::c_ulong;
-pub type LONG = raw::c_long;
pub type DWORD = u32;
-pub type LPVOID = *mut raw::c_void;
-pub type HRESULT = raw::c_long;
pub type LPFILETIME = *mut FILETIME;
-pub type BSTR = *mut OLECHAR;
pub type OLECHAR = WCHAR;
pub type WCHAR = wchar_t;
pub type LPCOLESTR = *const OLECHAR;
@@ -33,75 +26,10 @@ pub type LPCWSTR = *const WCHAR;
pub type PULONGLONG = *mut ULONGLONG;
pub type ULONGLONG = u64;
-pub const S_OK: HRESULT = 0;
-pub const S_FALSE: HRESULT = 1;
-pub const COINIT_MULTITHREADED: u32 = 0x0;
-
-pub type CLSCTX = u32;
-
-pub const CLSCTX_INPROC_SERVER: CLSCTX = 0x1;
-pub const CLSCTX_INPROC_HANDLER: CLSCTX = 0x2;
-pub const CLSCTX_LOCAL_SERVER: CLSCTX = 0x4;
-pub const CLSCTX_REMOTE_SERVER: CLSCTX = 0x10;
-
-pub const CLSCTX_ALL: CLSCTX =
- CLSCTX_INPROC_SERVER | CLSCTX_INPROC_HANDLER | CLSCTX_LOCAL_SERVER | CLSCTX_REMOTE_SERVER;
-
-#[repr(C)]
-#[derive(Copy, Clone)]
-pub struct GUID {
- pub Data1: raw::c_ulong,
- pub Data2: raw::c_ushort,
- pub Data3: raw::c_ushort,
- pub Data4: [raw::c_uchar; 8],
-}
-
-#[repr(C)]
-#[derive(Copy, Clone)]
-pub struct FILETIME {
- pub dwLowDateTime: DWORD,
- pub dwHighDateTime: DWORD,
-}
-
pub trait Interface {
fn uuidof() -> GUID;
}
-#[link(name = "ole32")]
-#[link(name = "oleaut32")]
-extern "C" {}
-
-extern "system" {
- pub fn CoInitializeEx(pvReserved: LPVOID, dwCoInit: DWORD) -> HRESULT;
- pub fn CoCreateInstance(
- rclsid: REFCLSID,
- pUnkOuter: LPUNKNOWN,
- dwClsContext: DWORD,
- riid: REFIID,
- ppv: *mut LPVOID,
- ) -> HRESULT;
- pub fn SysFreeString(bstrString: BSTR);
- pub fn SysStringLen(pbstr: BSTR) -> UINT;
-}
-
-#[repr(C)]
-#[derive(Copy, Clone)]
-pub struct SAFEARRAYBOUND {
- pub cElements: ULONG,
- pub lLbound: LONG,
-}
-
-#[repr(C)]
-#[derive(Copy, Clone)]
-pub struct SAFEARRAY {
- pub cDims: USHORT,
- pub fFeatures: USHORT,
- pub cbElements: ULONG,
- pub cLocks: ULONG,
- pub pvData: PVOID,
- pub rgsabound: [SAFEARRAYBOUND; 1],
-}
-
pub type LPSAFEARRAY = *mut SAFEARRAY;
macro_rules! DEFINE_GUID {
@@ -109,11 +37,11 @@ macro_rules! DEFINE_GUID {
$name:ident, $l:expr, $w1:expr, $w2:expr,
$b1:expr, $b2:expr, $b3:expr, $b4:expr, $b5:expr, $b6:expr, $b7:expr, $b8:expr
) => {
- pub const $name: $crate::winapi::GUID = $crate::winapi::GUID {
- Data1: $l,
- Data2: $w1,
- Data3: $w2,
- Data4: [$b1, $b2, $b3, $b4, $b5, $b6, $b7, $b8],
+ pub const $name: $crate::windows::winapi::GUID = $crate::windows::winapi::GUID {
+ data1: $l,
+ data2: $w1,
+ data3: $w2,
+ data4: [$b1, $b2, $b3, $b4, $b5, $b6, $b7, $b8],
};
};
}
@@ -193,14 +121,14 @@ macro_rules! RIDL {
$l:expr, $w1:expr, $w2:expr,
$b1:expr, $b2:expr, $b3:expr, $b4:expr, $b5:expr, $b6:expr, $b7:expr, $b8:expr
) => (
- impl $crate::winapi::Interface for $interface {
+ impl $crate::windows::winapi::Interface for $interface {
#[inline]
- fn uuidof() -> $crate::winapi::GUID {
- $crate::winapi::GUID {
- Data1: $l,
- Data2: $w1,
- Data3: $w2,
- Data4: [$b1, $b2, $b3, $b4, $b5, $b6, $b7, $b8],
+ fn uuidof() -> $crate::windows::winapi::GUID {
+ $crate::windows::winapi::GUID {
+ data1: $l,
+ data2: $w1,
+ data3: $w2,
+ data4: [$b1, $b2, $b3, $b4, $b5, $b6, $b7, $b8],
}
}
}
diff --git a/third_party/rust/cc/src/windows/windows_sys.rs b/third_party/rust/cc/src/windows/windows_sys.rs
new file mode 100644
index 0000000000..8b98ce97f8
--- /dev/null
+++ b/third_party/rust/cc/src/windows/windows_sys.rs
@@ -0,0 +1,223 @@
+// This file is autogenerated.
+//
+// To add bindings, edit windows_sys.lst then run:
+//
+// ```
+// cd generate-windows-sys/
+// cargo run
+// ```
+// Bindings generated by `windows-bindgen` 0.53.0
+
+#![allow(
+ non_snake_case,
+ non_upper_case_globals,
+ non_camel_case_types,
+ dead_code,
+ clippy::all
+)]
+#[link(name = "advapi32")]
+extern "system" {
+ pub fn RegCloseKey(hkey: HKEY) -> WIN32_ERROR;
+}
+#[link(name = "advapi32")]
+extern "system" {
+ pub fn RegEnumKeyExW(
+ hkey: HKEY,
+ dwindex: u32,
+ lpname: PWSTR,
+ lpcchname: *mut u32,
+ lpreserved: *const u32,
+ lpclass: PWSTR,
+ lpcchclass: *mut u32,
+ lpftlastwritetime: *mut FILETIME,
+ ) -> WIN32_ERROR;
+}
+#[link(name = "advapi32")]
+extern "system" {
+ pub fn RegOpenKeyExW(
+ hkey: HKEY,
+ lpsubkey: PCWSTR,
+ uloptions: u32,
+ samdesired: REG_SAM_FLAGS,
+ phkresult: *mut HKEY,
+ ) -> WIN32_ERROR;
+}
+#[link(name = "advapi32")]
+extern "system" {
+ pub fn RegQueryValueExW(
+ hkey: HKEY,
+ lpvaluename: PCWSTR,
+ lpreserved: *const u32,
+ lptype: *mut REG_VALUE_TYPE,
+ lpdata: *mut u8,
+ lpcbdata: *mut u32,
+ ) -> WIN32_ERROR;
+}
+#[link(name = "kernel32")]
+extern "system" {
+ pub fn FreeLibrary(hlibmodule: HMODULE) -> BOOL;
+}
+#[link(name = "kernel32")]
+extern "system" {
+ pub fn GetMachineTypeAttributes(
+ machine: u16,
+ machinetypeattributes: *mut MACHINE_ATTRIBUTES,
+ ) -> HRESULT;
+}
+#[link(name = "kernel32")]
+extern "system" {
+ pub fn GetProcAddress(hmodule: HMODULE, lpprocname: PCSTR) -> FARPROC;
+}
+#[link(name = "kernel32")]
+extern "system" {
+ pub fn LoadLibraryA(lplibfilename: PCSTR) -> HMODULE;
+}
+#[link(name = "kernel32")]
+extern "system" {
+ pub fn OpenSemaphoreA(dwdesiredaccess: u32, binherithandle: BOOL, lpname: PCSTR) -> HANDLE;
+}
+#[link(name = "kernel32")]
+extern "system" {
+ pub fn PeekNamedPipe(
+ hnamedpipe: HANDLE,
+ lpbuffer: *mut ::core::ffi::c_void,
+ nbuffersize: u32,
+ lpbytesread: *mut u32,
+ lptotalbytesavail: *mut u32,
+ lpbytesleftthismessage: *mut u32,
+ ) -> BOOL;
+}
+#[link(name = "kernel32")]
+extern "system" {
+ pub fn ReleaseSemaphore(
+ hsemaphore: HANDLE,
+ lreleasecount: i32,
+ lppreviouscount: *mut i32,
+ ) -> BOOL;
+}
+#[link(name = "kernel32")]
+extern "system" {
+ pub fn WaitForSingleObject(hhandle: HANDLE, dwmilliseconds: u32) -> WAIT_EVENT;
+}
+#[link(name = "ole32")]
+extern "system" {
+ pub fn CoCreateInstance(
+ rclsid: *const GUID,
+ punkouter: *mut ::core::ffi::c_void,
+ dwclscontext: CLSCTX,
+ riid: *const GUID,
+ ppv: *mut *mut ::core::ffi::c_void,
+ ) -> HRESULT;
+}
+#[link(name = "ole32")]
+extern "system" {
+ pub fn CoInitializeEx(pvreserved: *const ::core::ffi::c_void, dwcoinit: u32) -> HRESULT;
+}
+#[link(name = "oleaut32")]
+extern "system" {
+ pub fn SysFreeString(bstrstring: BSTR);
+}
+#[link(name = "oleaut32")]
+extern "system" {
+ pub fn SysStringLen(pbstr: BSTR) -> u32;
+}
+pub type ADVANCED_FEATURE_FLAGS = u16;
+pub type BOOL = i32;
+pub type BSTR = *const u16;
+pub type CLSCTX = u32;
+pub const CLSCTX_ALL: CLSCTX = 23u32;
+pub type COINIT = i32;
+pub const COINIT_MULTITHREADED: COINIT = 0i32;
+pub const ERROR_NO_MORE_ITEMS: WIN32_ERROR = 259u32;
+pub const ERROR_SUCCESS: WIN32_ERROR = 0u32;
+pub const FALSE: BOOL = 0i32;
+pub type FARPROC = ::core::option::Option<unsafe extern "system" fn() -> isize>;
+#[repr(C)]
+pub struct FILETIME {
+ pub dwLowDateTime: u32,
+ pub dwHighDateTime: u32,
+}
+impl ::core::marker::Copy for FILETIME {}
+impl ::core::clone::Clone for FILETIME {
+ fn clone(&self) -> Self {
+ *self
+ }
+}
+#[repr(C)]
+pub struct GUID {
+ pub data1: u32,
+ pub data2: u16,
+ pub data3: u16,
+ pub data4: [u8; 8],
+}
+impl ::core::marker::Copy for GUID {}
+impl ::core::clone::Clone for GUID {
+ fn clone(&self) -> Self {
+ *self
+ }
+}
+impl GUID {
+ pub const fn from_u128(uuid: u128) -> Self {
+ Self {
+ data1: (uuid >> 96) as u32,
+ data2: (uuid >> 80 & 0xffff) as u16,
+ data3: (uuid >> 64 & 0xffff) as u16,
+ data4: (uuid as u64).to_be_bytes(),
+ }
+ }
+}
+pub type HANDLE = *mut ::core::ffi::c_void;
+pub type HKEY = *mut ::core::ffi::c_void;
+pub const HKEY_LOCAL_MACHINE: HKEY = -2147483646i32 as _;
+pub type HMODULE = *mut ::core::ffi::c_void;
+pub type HRESULT = i32;
+pub type IMAGE_FILE_MACHINE = u16;
+pub const IMAGE_FILE_MACHINE_AMD64: IMAGE_FILE_MACHINE = 34404u16;
+pub const KEY_READ: REG_SAM_FLAGS = 131097u32;
+pub const KEY_WOW64_32KEY: REG_SAM_FLAGS = 512u32;
+pub type MACHINE_ATTRIBUTES = i32;
+pub type PCSTR = *const u8;
+pub type PCWSTR = *const u16;
+pub type PWSTR = *mut u16;
+pub type REG_SAM_FLAGS = u32;
+pub const REG_SZ: REG_VALUE_TYPE = 1u32;
+pub type REG_VALUE_TYPE = u32;
+#[repr(C)]
+pub struct SAFEARRAY {
+ pub cDims: u16,
+ pub fFeatures: ADVANCED_FEATURE_FLAGS,
+ pub cbElements: u32,
+ pub cLocks: u32,
+ pub pvData: *mut ::core::ffi::c_void,
+ pub rgsabound: [SAFEARRAYBOUND; 1],
+}
+impl ::core::marker::Copy for SAFEARRAY {}
+impl ::core::clone::Clone for SAFEARRAY {
+ fn clone(&self) -> Self {
+ *self
+ }
+}
+#[repr(C)]
+pub struct SAFEARRAYBOUND {
+ pub cElements: u32,
+ pub lLbound: i32,
+}
+impl ::core::marker::Copy for SAFEARRAYBOUND {}
+impl ::core::clone::Clone for SAFEARRAYBOUND {
+ fn clone(&self) -> Self {
+ *self
+ }
+}
+pub const SEMAPHORE_MODIFY_STATE: SYNCHRONIZATION_ACCESS_RIGHTS = 2u32;
+pub type SYNCHRONIZATION_ACCESS_RIGHTS = u32;
+pub const S_FALSE: HRESULT = 0x1_u32 as _;
+pub const S_OK: HRESULT = 0x0_u32 as _;
+pub type THREAD_ACCESS_RIGHTS = u32;
+pub const THREAD_SYNCHRONIZE: THREAD_ACCESS_RIGHTS = 1048576u32;
+pub const UserEnabled: MACHINE_ATTRIBUTES = 1i32;
+pub const WAIT_ABANDONED: WAIT_EVENT = 128u32;
+pub type WAIT_EVENT = u32;
+pub const WAIT_FAILED: WAIT_EVENT = 4294967295u32;
+pub const WAIT_OBJECT_0: WAIT_EVENT = 0u32;
+pub const WAIT_TIMEOUT: WAIT_EVENT = 258u32;
+pub type WIN32_ERROR = u32;
diff --git a/third_party/rust/cc/tests/cc_env.rs b/third_party/rust/cc/tests/cc_env.rs
deleted file mode 100644
index 43eb689f0f..0000000000
--- a/third_party/rust/cc/tests/cc_env.rs
+++ /dev/null
@@ -1,118 +0,0 @@
-use std::env;
-use std::ffi::OsString;
-use std::path::Path;
-
-mod support;
-use crate::support::Test;
-
-#[test]
-fn main() {
- ccache();
- distcc();
- ccache_spaces();
- ccache_env_flags();
- leading_spaces();
- extra_flags();
- path_to_ccache();
- more_spaces();
-}
-
-fn ccache() {
- let test = Test::gnu();
-
- env::set_var("CC", "ccache cc");
- let compiler = test.gcc().file("foo.c").get_compiler();
-
- assert_eq!(compiler.path(), Path::new("cc"));
-}
-
-fn ccache_spaces() {
- let test = Test::gnu();
- test.shim("ccache");
-
- env::set_var("CC", "ccache cc");
- let compiler = test.gcc().file("foo.c").get_compiler();
- assert_eq!(compiler.path(), Path::new("cc"));
-}
-
-fn distcc() {
- let test = Test::gnu();
- test.shim("distcc");
-
- env::set_var("CC", "distcc cc");
- let compiler = test.gcc().file("foo.c").get_compiler();
- assert_eq!(compiler.path(), Path::new("cc"));
-}
-
-fn ccache_env_flags() {
- let test = Test::gnu();
- test.shim("ccache");
-
- env::set_var("CC", "ccache lol-this-is-not-a-compiler");
- let compiler = test.gcc().file("foo.c").get_compiler();
- assert_eq!(compiler.path(), Path::new("lol-this-is-not-a-compiler"));
- assert_eq!(
- compiler.cc_env(),
- OsString::from("ccache lol-this-is-not-a-compiler")
- );
- assert!(
- compiler
- .cflags_env()
- .into_string()
- .unwrap()
- .contains("ccache")
- == false
- );
- assert!(
- compiler
- .cflags_env()
- .into_string()
- .unwrap()
- .contains(" lol-this-is-not-a-compiler")
- == false
- );
-
- env::set_var("CC", "");
-}
-
-fn leading_spaces() {
- let test = Test::gnu();
- test.shim("ccache");
-
- env::set_var("CC", " test ");
- let compiler = test.gcc().file("foo.c").get_compiler();
- assert_eq!(compiler.path(), Path::new("test"));
-
- env::set_var("CC", "");
-}
-
-fn extra_flags() {
- let test = Test::gnu();
- test.shim("ccache");
-
- env::set_var("CC", "ccache cc -m32");
- let compiler = test.gcc().file("foo.c").get_compiler();
- assert_eq!(compiler.path(), Path::new("cc"));
-}
-
-fn path_to_ccache() {
- let test = Test::gnu();
- test.shim("ccache");
-
- env::set_var("CC", "/path/to/ccache.exe cc -m32");
- let compiler = test.gcc().file("foo.c").get_compiler();
- assert_eq!(compiler.path(), Path::new("cc"));
- assert_eq!(
- compiler.cc_env(),
- OsString::from("/path/to/ccache.exe cc -m32"),
- );
-}
-
-fn more_spaces() {
- let test = Test::gnu();
- test.shim("ccache");
-
- env::set_var("CC", "cc -m32");
- let compiler = test.gcc().file("foo.c").get_compiler();
- assert_eq!(compiler.path(), Path::new("cc"));
-}
diff --git a/third_party/rust/cc/tests/cflags.rs b/third_party/rust/cc/tests/cflags.rs
deleted file mode 100644
index caec6ea4ed..0000000000
--- a/third_party/rust/cc/tests/cflags.rs
+++ /dev/null
@@ -1,15 +0,0 @@
-mod support;
-
-use crate::support::Test;
-use std::env;
-
-/// This test is in its own module because it modifies the environment and would affect other tests
-/// when run in parallel with them.
-#[test]
-fn gnu_no_warnings_if_cflags() {
- env::set_var("CFLAGS", "-arbitrary");
- let test = Test::gnu();
- test.gcc().file("foo.c").compile("foo");
-
- test.cmd(0).must_not_have("-Wall").must_not_have("-Wextra");
-}
diff --git a/third_party/rust/cc/tests/cxxflags.rs b/third_party/rust/cc/tests/cxxflags.rs
deleted file mode 100644
index c524c7da4e..0000000000
--- a/third_party/rust/cc/tests/cxxflags.rs
+++ /dev/null
@@ -1,15 +0,0 @@
-mod support;
-
-use crate::support::Test;
-use std::env;
-
-/// This test is in its own module because it modifies the environment and would affect other tests
-/// when run in parallel with them.
-#[test]
-fn gnu_no_warnings_if_cxxflags() {
- env::set_var("CXXFLAGS", "-arbitrary");
- let test = Test::gnu();
- test.gcc().file("foo.cpp").cpp(true).compile("foo");
-
- test.cmd(0).must_not_have("-Wall").must_not_have("-Wextra");
-}
diff --git a/third_party/rust/cc/tests/support/mod.rs b/third_party/rust/cc/tests/support/mod.rs
deleted file mode 100644
index f3c04405a3..0000000000
--- a/third_party/rust/cc/tests/support/mod.rs
+++ /dev/null
@@ -1,172 +0,0 @@
-#![allow(dead_code)]
-
-use std::env;
-use std::ffi::{OsStr, OsString};
-use std::fs::{self, File};
-use std::io;
-use std::io::prelude::*;
-use std::path::{Path, PathBuf};
-
-use cc;
-use tempfile::{Builder, TempDir};
-
-pub struct Test {
- pub td: TempDir,
- pub gcc: PathBuf,
- pub msvc: bool,
-}
-
-pub struct Execution {
- args: Vec<String>,
-}
-
-impl Test {
- pub fn new() -> Test {
- // This is ugly: `sccache` needs to introspect the compiler it is
- // executing, as it adjusts its behavior depending on the
- // language/compiler. This crate's test driver uses mock compilers that
- // are obviously not supported by sccache, so the tests fail if
- // RUSTC_WRAPPER is set. rust doesn't build test dependencies with
- // the `test` feature enabled, so we can't conditionally disable the
- // usage of `sccache` if running in a test environment, at least not
- // without setting an environment variable here and testing for it
- // there. Explicitly deasserting RUSTC_WRAPPER here seems to be the
- // lesser of the two evils.
- env::remove_var("RUSTC_WRAPPER");
-
- let mut gcc = PathBuf::from(env::current_exe().unwrap());
- gcc.pop();
- if gcc.ends_with("deps") {
- gcc.pop();
- }
- let td = Builder::new().prefix("gcc-test").tempdir_in(&gcc).unwrap();
- gcc.push(format!("gcc-shim{}", env::consts::EXE_SUFFIX));
- Test {
- td: td,
- gcc: gcc,
- msvc: false,
- }
- }
-
- pub fn gnu() -> Test {
- let t = Test::new();
- t.shim("cc").shim("c++").shim("ar");
- t
- }
-
- pub fn msvc() -> Test {
- let mut t = Test::new();
- t.shim("cl").shim("lib.exe");
- t.msvc = true;
- t
- }
-
- pub fn shim(&self, name: &str) -> &Test {
- let name = if name.ends_with(env::consts::EXE_SUFFIX) {
- name.to_string()
- } else {
- format!("{}{}", name, env::consts::EXE_SUFFIX)
- };
- link_or_copy(&self.gcc, self.td.path().join(name)).unwrap();
- self
- }
-
- pub fn gcc(&self) -> cc::Build {
- let mut cfg = cc::Build::new();
- let target = if self.msvc {
- "x86_64-pc-windows-msvc"
- } else {
- "x86_64-unknown-linux-gnu"
- };
-
- cfg.target(target)
- .host(target)
- .opt_level(2)
- .debug(false)
- .out_dir(self.td.path())
- .__set_env("PATH", self.path())
- .__set_env("GCCTEST_OUT_DIR", self.td.path());
- if self.msvc {
- cfg.compiler(self.td.path().join("cl"));
- cfg.archiver(self.td.path().join("lib.exe"));
- }
- cfg
- }
-
- fn path(&self) -> OsString {
- let mut path = env::split_paths(&env::var_os("PATH").unwrap()).collect::<Vec<_>>();
- path.insert(0, self.td.path().to_owned());
- env::join_paths(path).unwrap()
- }
-
- pub fn cmd(&self, i: u32) -> Execution {
- let mut s = String::new();
- File::open(self.td.path().join(format!("out{}", i)))
- .unwrap()
- .read_to_string(&mut s)
- .unwrap();
- Execution {
- args: s.lines().map(|s| s.to_string()).collect(),
- }
- }
-}
-
-impl Execution {
- pub fn must_have<P: AsRef<OsStr>>(&self, p: P) -> &Execution {
- if !self.has(p.as_ref()) {
- panic!("didn't find {:?} in {:?}", p.as_ref(), self.args);
- } else {
- self
- }
- }
-
- pub fn must_not_have<P: AsRef<OsStr>>(&self, p: P) -> &Execution {
- if self.has(p.as_ref()) {
- panic!("found {:?}", p.as_ref());
- } else {
- self
- }
- }
-
- pub fn has(&self, p: &OsStr) -> bool {
- self.args.iter().any(|arg| OsStr::new(arg) == p)
- }
-
- pub fn must_have_in_order(&self, before: &str, after: &str) -> &Execution {
- let before_position = self
- .args
- .iter()
- .rposition(|x| OsStr::new(x) == OsStr::new(before));
- let after_position = self
- .args
- .iter()
- .rposition(|x| OsStr::new(x) == OsStr::new(after));
- match (before_position, after_position) {
- (Some(b), Some(a)) if b < a => {}
- (b, a) => panic!(
- "{:?} (last position: {:?}) did not appear before {:?} (last position: {:?})",
- before, b, after, a
- ),
- };
- self
- }
-}
-
-/// Hard link an executable or copy it if that fails.
-///
-/// We first try to hard link an executable to save space. If that fails (as on Windows with
-/// different mount points, issue #60), we copy.
-#[cfg(not(target_os = "macos"))]
-fn link_or_copy<P: AsRef<Path>, Q: AsRef<Path>>(from: P, to: Q) -> io::Result<()> {
- let from = from.as_ref();
- let to = to.as_ref();
- fs::hard_link(from, to).or_else(|_| fs::copy(from, to).map(|_| ()))
-}
-
-/// Copy an executable.
-///
-/// On macOS, hard linking the executable leads to strange failures (issue #419), so we just copy.
-#[cfg(target_os = "macos")]
-fn link_or_copy<P: AsRef<Path>, Q: AsRef<Path>>(from: P, to: Q) -> io::Result<()> {
- fs::copy(from, to).map(|_| ())
-}
diff --git a/third_party/rust/cc/tests/test.rs b/third_party/rust/cc/tests/test.rs
deleted file mode 100644
index 161abd8ab7..0000000000
--- a/third_party/rust/cc/tests/test.rs
+++ /dev/null
@@ -1,461 +0,0 @@
-use crate::support::Test;
-
-mod support;
-
-// Some tests check that a flag is *not* present. These tests might fail if the flag is set in the
-// CFLAGS or CXXFLAGS environment variables. This function clears the CFLAGS and CXXFLAGS
-// variables to make sure that the tests can run correctly.
-fn reset_env() {
- std::env::set_var("CFLAGS", "");
- std::env::set_var("CXXFLAGS", "");
-}
-
-#[test]
-fn gnu_smoke() {
- reset_env();
-
- let test = Test::gnu();
- test.gcc().file("foo.c").compile("foo");
-
- test.cmd(0)
- .must_have("-O2")
- .must_have("foo.c")
- .must_not_have("-gdwarf-4")
- .must_have("-c")
- .must_have("-ffunction-sections")
- .must_have("-fdata-sections");
- test.cmd(1).must_have(test.td.path().join("foo.o"));
-}
-
-#[test]
-fn gnu_opt_level_1() {
- reset_env();
-
- let test = Test::gnu();
- test.gcc().opt_level(1).file("foo.c").compile("foo");
-
- test.cmd(0).must_have("-O1").must_not_have("-O2");
-}
-
-#[test]
-fn gnu_opt_level_s() {
- reset_env();
-
- let test = Test::gnu();
- test.gcc().opt_level_str("s").file("foo.c").compile("foo");
-
- test.cmd(0)
- .must_have("-Os")
- .must_not_have("-O1")
- .must_not_have("-O2")
- .must_not_have("-O3")
- .must_not_have("-Oz");
-}
-
-#[test]
-fn gnu_debug() {
- let test = Test::gnu();
- test.gcc().debug(true).file("foo.c").compile("foo");
- test.cmd(0).must_have("-gdwarf-4");
-
- let test = Test::gnu();
- test.gcc()
- .target("x86_64-apple-darwin")
- .debug(true)
- .file("foo.c")
- .compile("foo");
- test.cmd(0).must_have("-gdwarf-2");
-}
-
-#[test]
-fn gnu_debug_fp_auto() {
- let test = Test::gnu();
- test.gcc().debug(true).file("foo.c").compile("foo");
- test.cmd(0).must_have("-gdwarf-4");
- test.cmd(0).must_have("-fno-omit-frame-pointer");
-}
-
-#[test]
-fn gnu_debug_fp() {
- let test = Test::gnu();
- test.gcc().debug(true).file("foo.c").compile("foo");
- test.cmd(0).must_have("-gdwarf-4");
- test.cmd(0).must_have("-fno-omit-frame-pointer");
-}
-
-#[test]
-fn gnu_debug_nofp() {
- reset_env();
-
- let test = Test::gnu();
- test.gcc()
- .debug(true)
- .force_frame_pointer(false)
- .file("foo.c")
- .compile("foo");
- test.cmd(0).must_have("-gdwarf-4");
- test.cmd(0).must_not_have("-fno-omit-frame-pointer");
-
- let test = Test::gnu();
- test.gcc()
- .force_frame_pointer(false)
- .debug(true)
- .file("foo.c")
- .compile("foo");
- test.cmd(0).must_have("-gdwarf-4");
- test.cmd(0).must_not_have("-fno-omit-frame-pointer");
-}
-
-#[test]
-fn gnu_warnings_into_errors() {
- let test = Test::gnu();
- test.gcc()
- .warnings_into_errors(true)
- .file("foo.c")
- .compile("foo");
-
- test.cmd(0).must_have("-Werror");
-}
-
-#[test]
-fn gnu_warnings() {
- let test = Test::gnu();
- test.gcc()
- .warnings(true)
- .flag("-Wno-missing-field-initializers")
- .file("foo.c")
- .compile("foo");
-
- test.cmd(0).must_have("-Wall").must_have("-Wextra");
-}
-
-#[test]
-fn gnu_extra_warnings0() {
- reset_env();
-
- let test = Test::gnu();
- test.gcc()
- .warnings(true)
- .extra_warnings(false)
- .flag("-Wno-missing-field-initializers")
- .file("foo.c")
- .compile("foo");
-
- test.cmd(0).must_have("-Wall").must_not_have("-Wextra");
-}
-
-#[test]
-fn gnu_extra_warnings1() {
- reset_env();
-
- let test = Test::gnu();
- test.gcc()
- .warnings(false)
- .extra_warnings(true)
- .flag("-Wno-missing-field-initializers")
- .file("foo.c")
- .compile("foo");
-
- test.cmd(0).must_not_have("-Wall").must_have("-Wextra");
-}
-
-#[test]
-fn gnu_warnings_overridable() {
- reset_env();
-
- let test = Test::gnu();
- test.gcc()
- .warnings(true)
- .flag("-Wno-missing-field-initializers")
- .file("foo.c")
- .compile("foo");
-
- test.cmd(0)
- .must_have_in_order("-Wall", "-Wno-missing-field-initializers");
-}
-
-#[test]
-fn gnu_x86_64() {
- for vendor in &["unknown-linux-gnu", "apple-darwin"] {
- let target = format!("x86_64-{}", vendor);
- let test = Test::gnu();
- test.gcc()
- .target(&target)
- .host(&target)
- .file("foo.c")
- .compile("foo");
-
- test.cmd(0).must_have("-fPIC").must_have("-m64");
- }
-}
-
-#[test]
-fn gnu_x86_64_no_pic() {
- reset_env();
-
- for vendor in &["unknown-linux-gnu", "apple-darwin"] {
- let target = format!("x86_64-{}", vendor);
- let test = Test::gnu();
- test.gcc()
- .pic(false)
- .target(&target)
- .host(&target)
- .file("foo.c")
- .compile("foo");
-
- test.cmd(0).must_not_have("-fPIC");
- }
-}
-
-#[test]
-fn gnu_i686() {
- for vendor in &["unknown-linux-gnu", "apple-darwin"] {
- let target = format!("i686-{}", vendor);
- let test = Test::gnu();
- test.gcc()
- .target(&target)
- .host(&target)
- .file("foo.c")
- .compile("foo");
-
- test.cmd(0).must_have("-m32");
- }
-}
-
-#[test]
-fn gnu_i686_pic() {
- for vendor in &["unknown-linux-gnu", "apple-darwin"] {
- let target = format!("i686-{}", vendor);
- let test = Test::gnu();
- test.gcc()
- .pic(true)
- .target(&target)
- .host(&target)
- .file("foo.c")
- .compile("foo");
-
- test.cmd(0).must_have("-fPIC");
- }
-}
-
-#[test]
-fn gnu_x86_64_no_plt() {
- let target = "x86_64-unknown-linux-gnu";
- let test = Test::gnu();
- test.gcc()
- .pic(true)
- .use_plt(false)
- .target(&target)
- .host(&target)
- .file("foo.c")
- .compile("foo");
- test.cmd(0).must_have("-fno-plt");
-}
-
-#[test]
-fn gnu_set_stdlib() {
- reset_env();
-
- let test = Test::gnu();
- test.gcc()
- .cpp_set_stdlib(Some("foo"))
- .file("foo.c")
- .compile("foo");
-
- test.cmd(0).must_not_have("-stdlib=foo");
-}
-
-#[test]
-fn gnu_include() {
- let test = Test::gnu();
- test.gcc().include("foo/bar").file("foo.c").compile("foo");
-
- test.cmd(0).must_have("-I").must_have("foo/bar");
-}
-
-#[test]
-fn gnu_define() {
- let test = Test::gnu();
- test.gcc()
- .define("FOO", "bar")
- .define("BAR", None)
- .file("foo.c")
- .compile("foo");
-
- test.cmd(0).must_have("-DFOO=bar").must_have("-DBAR");
-}
-
-#[test]
-fn gnu_compile_assembly() {
- let test = Test::gnu();
- test.gcc().file("foo.S").compile("foo");
- test.cmd(0).must_have("foo.S");
-}
-
-#[test]
-fn gnu_shared() {
- reset_env();
-
- let test = Test::gnu();
- test.gcc()
- .file("foo.c")
- .shared_flag(true)
- .static_flag(false)
- .compile("foo");
-
- test.cmd(0).must_have("-shared").must_not_have("-static");
-}
-
-#[test]
-fn gnu_flag_if_supported() {
- reset_env();
-
- if cfg!(windows) {
- return;
- }
- let test = Test::gnu();
- test.gcc()
- .file("foo.c")
- .flag("-v")
- .flag_if_supported("-Wall")
- .flag_if_supported("-Wflag-does-not-exist")
- .flag_if_supported("-std=c++11")
- .compile("foo");
-
- test.cmd(0)
- .must_have("-v")
- .must_have("-Wall")
- .must_not_have("-Wflag-does-not-exist")
- .must_not_have("-std=c++11");
-}
-
-#[test]
-fn gnu_flag_if_supported_cpp() {
- if cfg!(windows) {
- return;
- }
- let test = Test::gnu();
- test.gcc()
- .cpp(true)
- .file("foo.cpp")
- .flag_if_supported("-std=c++11")
- .compile("foo");
-
- test.cmd(0).must_have("-std=c++11");
-}
-
-#[test]
-fn gnu_static() {
- reset_env();
-
- let test = Test::gnu();
- test.gcc()
- .file("foo.c")
- .shared_flag(false)
- .static_flag(true)
- .compile("foo");
-
- test.cmd(0).must_have("-static").must_not_have("-shared");
-}
-
-#[test]
-fn gnu_no_dash_dash() {
- let test = Test::gnu();
- test.gcc().file("foo.c").compile("foo");
-
- test.cmd(0).must_not_have("--");
-}
-
-#[test]
-fn msvc_smoke() {
- reset_env();
-
- let test = Test::msvc();
- test.gcc().file("foo.c").compile("foo");
-
- test.cmd(0)
- .must_have("-O2")
- .must_have("foo.c")
- .must_not_have("-Z7")
- .must_have("-c")
- .must_have("-MD");
- test.cmd(1).must_have(test.td.path().join("foo.o"));
-}
-
-#[test]
-fn msvc_opt_level_0() {
- reset_env();
-
- let test = Test::msvc();
- test.gcc().opt_level(0).file("foo.c").compile("foo");
-
- test.cmd(0).must_not_have("-O2");
-}
-
-#[test]
-fn msvc_debug() {
- let test = Test::msvc();
- test.gcc().debug(true).file("foo.c").compile("foo");
- test.cmd(0).must_have("-Z7");
-}
-
-#[test]
-fn msvc_include() {
- let test = Test::msvc();
- test.gcc().include("foo/bar").file("foo.c").compile("foo");
-
- test.cmd(0).must_have("-I").must_have("foo/bar");
-}
-
-#[test]
-fn msvc_define() {
- let test = Test::msvc();
- test.gcc()
- .define("FOO", "bar")
- .define("BAR", None)
- .file("foo.c")
- .compile("foo");
-
- test.cmd(0).must_have("-DFOO=bar").must_have("-DBAR");
-}
-
-#[test]
-fn msvc_static_crt() {
- let test = Test::msvc();
- test.gcc().static_crt(true).file("foo.c").compile("foo");
-
- test.cmd(0).must_have("-MT");
-}
-
-#[test]
-fn msvc_no_static_crt() {
- let test = Test::msvc();
- test.gcc().static_crt(false).file("foo.c").compile("foo");
-
- test.cmd(0).must_have("-MD");
-}
-
-#[test]
-fn msvc_no_dash_dash() {
- let test = Test::msvc();
- test.gcc().file("foo.c").compile("foo");
-
- test.cmd(0).must_not_have("--");
-}
-
-// Disable this test with the parallel feature because the execution
-// order is not deterministic.
-#[cfg(not(feature = "parallel"))]
-#[test]
-fn asm_flags() {
- let test = Test::gnu();
- test.gcc()
- .file("foo.c")
- .file("x86_64.asm")
- .file("x86_64.S")
- .asm_flag("--abc")
- .compile("foo");
- test.cmd(0).must_not_have("--abc");
- test.cmd(1).must_have("--abc");
- test.cmd(2).must_have("--abc");
-}
diff --git a/third_party/rust/document-features/.cargo-checksum.json b/third_party/rust/document-features/.cargo-checksum.json
new file mode 100644
index 0000000000..0b9d0b431c
--- /dev/null
+++ b/third_party/rust/document-features/.cargo-checksum.json
@@ -0,0 +1 @@
+{"files":{"CHANGELOG.md":"c1ccf4587ca168b3baa54580469c5dcc776decac0d996d3bb31d2341b47efa11","Cargo.toml":"390d32c2b791a6745c075c474e6d57c65d5f77f0e7190ff8a8c5342fbb40722a","LICENSE-APACHE":"074e6e32c86a4c0ef8b3ed25b721ca23aca83df277cd88106ef7177c354615ff","LICENSE-MIT":"aa893340d14b9844625be6a50ac644169a01b52f0211cbf81b09e1874c8cd81d","README.md":"89a83c4acc6891e5651772fc78a1d6362070774eaa6c5b5d4bfbe9e57a957be9","lib.rs":"2f4ede9d0619d85449891d9055605188db681d57b405e40e529831266e014ee5","rustfmt.toml":"f74204a6f92aa7422a16ecb2ffe2d5bae0f123b778d08b5db1a398a3c9ca4306","tests/self-doc.rs":"24bbda93f3b323c0b7c543c1df3bf45522b8026283103211805f070de66abadc"},"package":"ef5282ad69563b5fc40319526ba27e0e7363d552a896f0297d54f767717f9b95"} \ No newline at end of file
diff --git a/third_party/rust/document-features/CHANGELOG.md b/third_party/rust/document-features/CHANGELOG.md
new file mode 100644
index 0000000000..f94ee19ead
--- /dev/null
+++ b/third_party/rust/document-features/CHANGELOG.md
@@ -0,0 +1,44 @@
+# Changelog
+
+
+## 0.2.7 - 2023-12-29
+
+* Remove `\n` between features (#17)
+* Don't throw an error when there is no features in Cargo.toml (#20)
+
+## 0.2.7 - 2022-12-21
+
+* Fix parsing of Cargo.toml with multi-line array of array (#16)
+
+## 0.2.6 - 2022-09-24
+
+* Fix parsing of escaped string literal in the macro arguments
+
+## 0.2.5 - 2022-09-17
+
+* Allow customization of the output with the `feature_label=` parameter
+
+## 0.2.4 - 2022-09-14
+
+* Fix dependencies or features written with quotes
+
+## 0.2.3 - 2022-08-15
+
+* Fix parsing of table with `#` within strings (#10)
+
+## 0.2.2 - 2022-07-25
+
+* Fix parsing of dependencies or feature spanning multiple lines (#9)
+
+## 0.2.1 - 2022-02-12
+
+* Fix indentation of multi-lines feature comments (#5)
+
+## 0.2.0 - 2022-02-11
+
+* Added ability to document optional features. (This is a breaking change in the
+ sense that previously ignored comments may now result in errors)
+
+## 0.1.0 - 2022-02-01
+
+Initial release
diff --git a/third_party/rust/document-features/Cargo.toml b/third_party/rust/document-features/Cargo.toml
new file mode 100644
index 0000000000..bae5ff2a3b
--- /dev/null
+++ b/third_party/rust/document-features/Cargo.toml
@@ -0,0 +1,40 @@
+# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
+#
+# When uploading crates to the registry Cargo will automatically
+# "normalize" Cargo.toml files for maximal compatibility
+# with all versions of Cargo and also rewrite `path` dependencies
+# to registry (e.g., crates.io) dependencies.
+#
+# If you are reading this file be aware that the original Cargo.toml
+# will likely look very different (and much more reasonable).
+# See Cargo.toml.orig for the original contents.
+
+[package]
+edition = "2018"
+name = "document-features"
+version = "0.2.8"
+authors = ["Slint Developers <info@slint-ui.com>"]
+description = "Extract documentation for the feature flags from comments in Cargo.toml"
+homepage = "https://slint-ui.com"
+readme = "README.md"
+keywords = [
+ "documentation",
+ "features",
+ "rustdoc",
+ "macro",
+]
+categories = ["development-tools"]
+license = "MIT OR Apache-2.0"
+repository = "https://github.com/slint-ui/document-features"
+
+[lib]
+path = "lib.rs"
+proc-macro = true
+
+[dependencies.litrs]
+version = "0.4.1"
+default-features = false
+
+[features]
+default = []
+self-test = []
diff --git a/third_party/rust/document-features/LICENSE-APACHE b/third_party/rust/document-features/LICENSE-APACHE
new file mode 100644
index 0000000000..137069b823
--- /dev/null
+++ b/third_party/rust/document-features/LICENSE-APACHE
@@ -0,0 +1,73 @@
+Apache License
+Version 2.0, January 2004
+http://www.apache.org/licenses/
+
+TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+1. Definitions.
+
+"License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document.
+
+"Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License.
+
+"Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity.
+
+"You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License.
+
+"Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files.
+
+"Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types.
+
+"Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below).
+
+"Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof.
+
+"Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution."
+
+"Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work.
+
+2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form.
+
+3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed.
+
+4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions:
+
+ (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License.
+
+5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions.
+
+6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file.
+
+7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License.
+
+8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages.
+
+9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability.
+
+END OF TERMS AND CONDITIONS
+
+APPENDIX: How to apply the Apache License to your work.
+
+To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives.
+
+Copyright [yyyy] [name of copyright owner]
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
diff --git a/third_party/rust/document-features/LICENSE-MIT b/third_party/rust/document-features/LICENSE-MIT
new file mode 100644
index 0000000000..13a82b270c
--- /dev/null
+++ b/third_party/rust/document-features/LICENSE-MIT
@@ -0,0 +1,19 @@
+Copyright (c) 2020 Olivier Goffart <ogoffart@sixtyfps.io>
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/third_party/rust/document-features/README.md b/third_party/rust/document-features/README.md
new file mode 100644
index 0000000000..81fccc0f44
--- /dev/null
+++ b/third_party/rust/document-features/README.md
@@ -0,0 +1,43 @@
+# Document your crate's feature flags
+
+[![Crates.io](https://img.shields.io/crates/v/document-features)](https://crates.io/crates/document-features)
+[![Documentation](https://docs.rs/document-features/badge.svg)](https://docs.rs/document-features/)
+
+This crate provides a macro that extracts documentation comments from Cargo.toml
+
+To use this crate, add `#![doc = document_features::document_features!()]` in your crate documentation.
+The `document_features!()` macro reads your `Cargo.toml` file, extracts feature comments and generates
+a markdown string for your documentation.
+
+Use `## ` and `#! ` comments in your Cargo.toml to document features, for example:
+
+```toml
+[dependencies]
+document-features = "0.2"
+## ...
+
+[features]
+## The foo feature enables the `foo` functions
+foo = []
+## The bar feature enables the [`bar`] module
+bar = []
+
+#! ### Experimental features
+#! The following features are experimental
+
+## Activate the fusion reactor
+fusion = []
+```
+
+These comments keep the feature definition and documentation next to each other, and they are then
+rendered into your crate documentation.
+
+Check out the [documentation](https://docs.rs/document-features/) for more details.
+
+## Contributions
+
+Contributions are welcome. We accept pull requests and bug reports.
+
+## License
+
+MIT OR Apache-2.0
diff --git a/third_party/rust/document-features/lib.rs b/third_party/rust/document-features/lib.rs
new file mode 100644
index 0000000000..b30ebe7f42
--- /dev/null
+++ b/third_party/rust/document-features/lib.rs
@@ -0,0 +1,877 @@
+// Copyright © SixtyFPS GmbH <info@sixtyfps.io>
+// SPDX-License-Identifier: MIT OR Apache-2.0
+
+/*!
+Document your crate's feature flags.
+
+This crates provides a macro that extracts "documentation" comments from Cargo.toml
+
+To use this crate, add `#![doc = document_features::document_features!()]` in your crate documentation.
+The `document_features!()` macro reads your `Cargo.toml` file, extracts feature comments and generates
+a markdown string for your documentation.
+
+Basic example:
+
+```rust
+//! Normal crate documentation goes here.
+//!
+//! ## Feature flags
+#![doc = document_features::document_features!()]
+
+// rest of the crate goes here.
+```
+
+## Documentation format:
+
+The documentation of your crate features goes into `Cargo.toml`, where they are defined.
+
+The `document_features!()` macro analyzes the contents of `Cargo.toml`.
+Similar to Rust's documentation comments `///` and `//!`, the macro understands
+comments that start with `## ` and `#! `. Note the required trailing space.
+Lines starting with `###` will not be understood as doc comment.
+
+`## ` comments are meant to be *above* the feature they document.
+There can be several `## ` comments, but they must always be followed by a
+feature name or an optional dependency.
+There should not be `#! ` comments between the comment and the feature they document.
+
+`#! ` comments are not associated with a particular feature, and will be printed
+in where they occur. Use them to group features, for example.
+
+## Examples:
+
+*/
+// Note: because rustdoc escapes the first `#` of a line starting with `#`,
+// these docs comments have one more `#` ,
+#![doc = self_test!(/**
+[package]
+name = "..."
+## ...
+
+[features]
+default = ["foo"]
+##! This comments goes on top
+
+### The foo feature enables the `foo` functions
+foo = []
+
+### The bar feature enables the bar module
+bar = []
+
+##! ### Experimental features
+##! The following features are experimental
+
+### Enable the fusion reactor
+###
+### ⚠️ Can lead to explosions
+fusion = []
+
+[dependencies]
+document-features = "0.2"
+
+##! ### Optional dependencies
+
+### Enable this feature to implement the trait for the types from the genial crate
+genial = { version = "0.2", optional = true }
+
+### This awesome dependency is specified in its own table
+[dependencies.awesome]
+version = "1.3.5"
+optional = true
+*/
+=>
+ /**
+This comments goes on top
+* **`foo`** *(enabled by default)* — The foo feature enables the `foo` functions
+* **`bar`** — The bar feature enables the bar module
+
+#### Experimental features
+The following features are experimental
+* **`fusion`** — Enable the fusion reactor
+
+ ⚠️ Can lead to explosions
+
+#### Optional dependencies
+* **`genial`** — Enable this feature to implement the trait for the types from the genial crate
+* **`awesome`** — This awesome dependency is specified in its own table
+*/
+)]
+/*!
+
+## Customization
+
+You can customize the formatting of the features in the generated documentation by setting
+the key **`feature_label=`** to a given format string. This format string must be either
+a [string literal](https://doc.rust-lang.org/reference/tokens.html#string-literals) or
+a [raw string literal](https://doc.rust-lang.org/reference/tokens.html#raw-string-literals).
+Every occurrence of `{feature}` inside the format string will be substituted with the name of the feature.
+
+For instance, to emulate the HTML formatting used by `rustdoc` one can use the following:
+
+```rust
+#![doc = document_features::document_features!(feature_label = r#"<span class="stab portability"><code>{feature}</code></span>"#)]
+```
+
+The default formatting is equivalent to:
+
+```rust
+#![doc = document_features::document_features!(feature_label = "**`{feature}`**")]
+```
+
+## Compatibility
+
+The minimum Rust version required to use this crate is Rust 1.54 because of the
+feature to have macro in doc comments. You can make this crate optional and use
+`#[cfg_attr()]` statements to enable it only when building the documentation:
+You need to have two levels of `cfg_attr` because Rust < 1.54 doesn't parse the attribute
+otherwise.
+
+```rust,ignore
+#![cfg_attr(
+ feature = "document-features",
+ cfg_attr(doc, doc = ::document_features::document_features!())
+)]
+```
+
+In your Cargo.toml, enable this feature while generating the documentation on docs.rs:
+
+```toml
+[dependencies]
+document-features = { version = "0.2", optional = true }
+
+[package.metadata.docs.rs]
+features = ["document-features"]
+## Alternative: enable all features so they are all documented
+## all-features = true
+```
+ */
+
+#[cfg(not(feature = "default"))]
+compile_error!(
+ "The feature `default` must be enabled to ensure \
+ forward compatibility with future version of this crate"
+);
+
+extern crate proc_macro;
+
+use proc_macro::{TokenStream, TokenTree};
+use std::borrow::Cow;
+use std::collections::HashSet;
+use std::convert::TryFrom;
+use std::fmt::Write;
+use std::path::Path;
+use std::str::FromStr;
+
+fn error(e: &str) -> TokenStream {
+ TokenStream::from_str(&format!("::core::compile_error!{{\"{}\"}}", e.escape_default())).unwrap()
+}
+
+fn compile_error(msg: &str, tt: Option<TokenTree>) -> TokenStream {
+ let span = tt.as_ref().map_or_else(proc_macro::Span::call_site, TokenTree::span);
+ use proc_macro::{Delimiter, Group, Ident, Literal, Punct, Spacing};
+ use std::iter::FromIterator;
+ TokenStream::from_iter(vec![
+ TokenTree::Ident(Ident::new("compile_error", span)),
+ TokenTree::Punct({
+ let mut punct = Punct::new('!', Spacing::Alone);
+ punct.set_span(span);
+ punct
+ }),
+ TokenTree::Group({
+ let mut group = Group::new(Delimiter::Brace, {
+ TokenStream::from_iter([TokenTree::Literal({
+ let mut string = Literal::string(msg);
+ string.set_span(span);
+ string
+ })])
+ });
+ group.set_span(span);
+ group
+ }),
+ ])
+}
+
+#[derive(Default)]
+struct Args {
+ feature_label: Option<String>,
+}
+
+fn parse_args(input: TokenStream) -> Result<Args, TokenStream> {
+ let mut token_trees = input.into_iter().fuse();
+
+ // parse the key, ensuring that it is the identifier `feature_label`
+ match token_trees.next() {
+ None => return Ok(Args::default()),
+ Some(TokenTree::Ident(ident)) if ident.to_string() == "feature_label" => (),
+ tt => return Err(compile_error("expected `feature_label`", tt)),
+ }
+
+ // parse a single equal sign `=`
+ match token_trees.next() {
+ Some(TokenTree::Punct(p)) if p.as_char() == '=' => (),
+ tt => return Err(compile_error("expected `=`", tt)),
+ }
+
+ // parse the value, ensuring that it is a string literal containing the substring `"{feature}"`
+ let feature_label;
+ if let Some(tt) = token_trees.next() {
+ match litrs::StringLit::<String>::try_from(&tt) {
+ Ok(string_lit) if string_lit.value().contains("{feature}") => {
+ feature_label = string_lit.value().to_string()
+ }
+ _ => {
+ return Err(compile_error(
+ "expected a string literal containing the substring \"{feature}\"",
+ Some(tt),
+ ))
+ }
+ }
+ } else {
+ return Err(compile_error(
+ "expected a string literal containing the substring \"{feature}\"",
+ None,
+ ));
+ }
+
+ // ensure there is nothing left after the format string
+ if let tt @ Some(_) = token_trees.next() {
+ return Err(compile_error("unexpected token after the format string", tt));
+ }
+
+ Ok(Args { feature_label: Some(feature_label) })
+}
+
+/// Produce a literal string containing documentation extracted from Cargo.toml
+///
+/// See the [crate] documentation for details
+#[proc_macro]
+pub fn document_features(tokens: TokenStream) -> TokenStream {
+ parse_args(tokens)
+ .and_then(|args| document_features_impl(&args))
+ .unwrap_or_else(std::convert::identity)
+}
+
+fn document_features_impl(args: &Args) -> Result<TokenStream, TokenStream> {
+ let path = std::env::var("CARGO_MANIFEST_DIR").unwrap();
+ let mut cargo_toml = std::fs::read_to_string(Path::new(&path).join("Cargo.toml"))
+ .map_err(|e| error(&format!("Can't open Cargo.toml: {:?}", e)))?;
+
+ if !cargo_toml.contains("\n##") && !cargo_toml.contains("\n#!") {
+ // On crates.io, Cargo.toml is usually "normalized" and stripped of all comments.
+ // The original Cargo.toml has been renamed Cargo.toml.orig
+ if let Ok(orig) = std::fs::read_to_string(Path::new(&path).join("Cargo.toml.orig")) {
+ if orig.contains("##") || orig.contains("#!") {
+ cargo_toml = orig;
+ }
+ }
+ }
+
+ let result = process_toml(&cargo_toml, args).map_err(|e| error(&e))?;
+ Ok(std::iter::once(proc_macro::TokenTree::from(proc_macro::Literal::string(&result))).collect())
+}
+
+fn process_toml(cargo_toml: &str, args: &Args) -> Result<String, String> {
+ // Get all lines between the "[features]" and the next block
+ let mut lines = cargo_toml
+ .lines()
+ .map(str::trim)
+ // and skip empty lines and comments that are not docs comments
+ .filter(|l| {
+ !l.is_empty() && (!l.starts_with('#') || l.starts_with("##") || l.starts_with("#!"))
+ });
+ let mut top_comment = String::new();
+ let mut current_comment = String::new();
+ let mut features = vec![];
+ let mut default_features = HashSet::new();
+ let mut current_table = "";
+ while let Some(line) = lines.next() {
+ if let Some(x) = line.strip_prefix("#!") {
+ if !x.is_empty() && !x.starts_with(' ') {
+ continue; // it's not a doc comment
+ }
+ if !current_comment.is_empty() {
+ return Err("Cannot mix ## and #! comments between features.".into());
+ }
+ if top_comment.is_empty() && !features.is_empty() {
+ top_comment = "\n".into();
+ }
+ writeln!(top_comment, "{}", x).unwrap();
+ } else if let Some(x) = line.strip_prefix("##") {
+ if !x.is_empty() && !x.starts_with(' ') {
+ continue; // it's not a doc comment
+ }
+ writeln!(current_comment, " {}", x).unwrap();
+ } else if let Some(table) = line.strip_prefix('[') {
+ current_table = table
+ .split_once(']')
+ .map(|(t, _)| t.trim())
+ .ok_or_else(|| format!("Parse error while parsing line: {}", line))?;
+ if !current_comment.is_empty() {
+ let dep = current_table
+ .rsplit_once('.')
+ .and_then(|(table, dep)| table.trim().ends_with("dependencies").then(|| dep))
+ .ok_or_else(|| format!("Not a feature: `{}`", line))?;
+ features.push((
+ dep.trim(),
+ std::mem::take(&mut top_comment),
+ std::mem::take(&mut current_comment),
+ ));
+ }
+ } else if let Some((dep, rest)) = line.split_once('=') {
+ let dep = dep.trim().trim_matches('"');
+ let rest = get_balanced(rest, &mut lines)
+ .map_err(|e| format!("Parse error while parsing value {}: {}", dep, e))?;
+ if current_table == "features" && dep == "default" {
+ let defaults = rest
+ .trim()
+ .strip_prefix('[')
+ .and_then(|r| r.strip_suffix(']'))
+ .ok_or_else(|| format!("Parse error while parsing dependency {}", dep))?
+ .split(',')
+ .map(|d| d.trim().trim_matches(|c| c == '"' || c == '\'').trim().to_string())
+ .filter(|d| !d.is_empty());
+ default_features.extend(defaults);
+ }
+ if !current_comment.is_empty() {
+ if current_table.ends_with("dependencies") {
+ if !rest
+ .split_once("optional")
+ .and_then(|(_, r)| r.trim().strip_prefix('='))
+ .map_or(false, |r| r.trim().starts_with("true"))
+ {
+ return Err(format!("Dependency {} is not an optional dependency", dep));
+ }
+ } else if current_table != "features" {
+ return Err(format!(
+ r#"Comment cannot be associated with a feature: "{}""#,
+ current_comment.trim()
+ ));
+ }
+ features.push((
+ dep,
+ std::mem::take(&mut top_comment),
+ std::mem::take(&mut current_comment),
+ ));
+ }
+ }
+ }
+ if !current_comment.is_empty() {
+ return Err("Found comment not associated with a feature".into());
+ }
+ if features.is_empty() {
+ return Ok("*No documented features in Cargo.toml*".into());
+ }
+ let mut result = String::new();
+ for (f, top, comment) in features {
+ let default = if default_features.contains(f) { " *(enabled by default)*" } else { "" };
+ if !comment.trim().is_empty() {
+ if let Some(feature_label) = &args.feature_label {
+ writeln!(
+ result,
+ "{}* {}{} —{}",
+ top,
+ feature_label.replace("{feature}", f),
+ default,
+ comment.trim_end(),
+ )
+ .unwrap();
+ } else {
+ writeln!(result, "{}* **`{}`**{} —{}", top, f, default, comment.trim_end())
+ .unwrap();
+ }
+ } else if let Some(feature_label) = &args.feature_label {
+ writeln!(result, "{}* {}{}", top, feature_label.replace("{feature}", f), default,)
+ .unwrap();
+ } else {
+ writeln!(result, "{}* **`{}`**{}", top, f, default).unwrap();
+ }
+ }
+ result += &top_comment;
+ Ok(result)
+}
+
+fn get_balanced<'a>(
+ first_line: &'a str,
+ lines: &mut impl Iterator<Item = &'a str>,
+) -> Result<Cow<'a, str>, String> {
+ let mut line = first_line;
+ let mut result = Cow::from("");
+
+ let mut in_quote = false;
+ let mut level = 0;
+ loop {
+ let mut last_slash = false;
+ for (idx, b) in line.as_bytes().iter().enumerate() {
+ if last_slash {
+ last_slash = false
+ } else if in_quote {
+ match b {
+ b'\\' => last_slash = true,
+ b'"' | b'\'' => in_quote = false,
+ _ => (),
+ }
+ } else {
+ match b {
+ b'\\' => last_slash = true,
+ b'"' => in_quote = true,
+ b'{' | b'[' => level += 1,
+ b'}' | b']' if level == 0 => return Err("unbalanced source".into()),
+ b'}' | b']' => level -= 1,
+ b'#' => {
+ line = &line[..idx];
+ break;
+ }
+ _ => (),
+ }
+ }
+ }
+ if result.len() == 0 {
+ result = Cow::from(line);
+ } else {
+ *result.to_mut() += line;
+ }
+ if level == 0 {
+ return Ok(result);
+ }
+ line = if let Some(l) = lines.next() {
+ l
+ } else {
+ return Err("unbalanced source".into());
+ };
+ }
+}
+
+#[test]
+fn test_get_balanced() {
+ assert_eq!(
+ get_balanced(
+ "{",
+ &mut IntoIterator::into_iter(["a", "{ abc[], #ignore", " def }", "}", "xxx"])
+ ),
+ Ok("{a{ abc[], def }}".into())
+ );
+ assert_eq!(
+ get_balanced("{ foo = \"{#\" } #ignore", &mut IntoIterator::into_iter(["xxx"])),
+ Ok("{ foo = \"{#\" } ".into())
+ );
+ assert_eq!(
+ get_balanced("]", &mut IntoIterator::into_iter(["["])),
+ Err("unbalanced source".into())
+ );
+}
+
+#[cfg(feature = "self-test")]
+#[proc_macro]
+#[doc(hidden)]
+/// Helper macro for the tests. Do not use
+pub fn self_test_helper(input: TokenStream) -> TokenStream {
+ process_toml((&input).to_string().trim_matches(|c| c == '"' || c == '#'), &Args::default())
+ .map_or_else(
+ |e| error(&e),
+ |r| {
+ std::iter::once(proc_macro::TokenTree::from(proc_macro::Literal::string(&r)))
+ .collect()
+ },
+ )
+}
+
+#[cfg(feature = "self-test")]
+macro_rules! self_test {
+ (#[doc = $toml:literal] => #[doc = $md:literal]) => {
+ concat!(
+ "\n`````rust\n\
+ fn normalize_md(md : &str) -> String {
+ md.lines().skip_while(|l| l.is_empty()).map(|l| l.trim())
+ .collect::<Vec<_>>().join(\"\\n\")
+ }
+ assert_eq!(normalize_md(document_features::self_test_helper!(",
+ stringify!($toml),
+ ")), normalize_md(",
+ stringify!($md),
+ "));\n`````\n\n"
+ )
+ };
+}
+
+#[cfg(not(feature = "self-test"))]
+macro_rules! self_test {
+ (#[doc = $toml:literal] => #[doc = $md:literal]) => {
+ concat!(
+ "This contents in Cargo.toml:\n`````toml",
+ $toml,
+ "\n`````\n Generates the following:\n\
+ <table><tr><th>Preview</th></tr><tr><td>\n\n",
+ $md,
+ "\n</td></tr></table>\n\n&nbsp;\n",
+ )
+ };
+}
+
+// The following struct is inserted only during generation of the documentation in order to exploit doc-tests.
+// These doc-tests are used to check that invalid arguments to the `document_features!` macro cause a compile time error.
+// For a more principled way of testing compilation error, maybe investigate <https://docs.rs/trybuild>.
+//
+/// ```rust
+/// #![doc = document_features::document_features!()]
+/// #![doc = document_features::document_features!(feature_label = "**`{feature}`**")]
+/// #![doc = document_features::document_features!(feature_label = r"**`{feature}`**")]
+/// #![doc = document_features::document_features!(feature_label = r#"**`{feature}`**"#)]
+/// #![doc = document_features::document_features!(feature_label = "<span class=\"stab portability\"><code>{feature}</code></span>")]
+/// #![doc = document_features::document_features!(feature_label = r#"<span class="stab portability"><code>{feature}</code></span>"#)]
+/// ```
+/// ```compile_fail
+/// #![doc = document_features::document_features!(feature_label > "<span>{feature}</span>")]
+/// ```
+/// ```compile_fail
+/// #![doc = document_features::document_features!(label = "<span>{feature}</span>")]
+/// ```
+/// ```compile_fail
+/// #![doc = document_features::document_features!(feature_label = "{feat}")]
+/// ```
+/// ```compile_fail
+/// #![doc = document_features::document_features!(feature_label = 3.14)]
+/// ```
+/// ```compile_fail
+/// #![doc = document_features::document_features!(feature_label = )]
+/// ```
+/// ```compile_fail
+/// #![doc = document_features::document_features!(feature_label = "**`{feature}`**" extra)]
+/// ```
+#[cfg(doc)]
+struct FeatureLabelCompilationTest;
+
+#[cfg(test)]
+mod tests {
+ use super::{process_toml, Args};
+
+ #[track_caller]
+ fn test_error(toml: &str, expected: &str) {
+ let err = process_toml(toml, &Args::default()).unwrap_err();
+ assert!(err.contains(expected), "{:?} does not contain {:?}", err, expected)
+ }
+
+ #[test]
+ fn only_get_balanced_in_correct_table() {
+ process_toml(
+ r#"
+
+[package.metadata.release]
+pre-release-replacements = [
+ {test=\"\#\# \"},
+]
+[abcd]
+[features]#xyz
+#! abc
+#
+###
+#! def
+#!
+## 123
+## 456
+feat1 = ["plop"]
+#! ghi
+no_doc = []
+##
+feat2 = ["momo"]
+#! klm
+default = ["feat1", "something_else"]
+#! end
+ "#,
+ &Args::default(),
+ )
+ .unwrap();
+ }
+
+ #[test]
+ fn no_features() {
+ let r = process_toml(
+ r#"
+[features]
+[dependencies]
+foo = 4;
+"#,
+ &Args::default(),
+ )
+ .unwrap();
+ assert_eq!(r, "*No documented features in Cargo.toml*");
+ }
+
+ #[test]
+ fn no_features2() {
+ let r = process_toml(
+ r#"
+[packages]
+[dependencies]
+"#,
+ &Args::default(),
+ )
+ .unwrap();
+ assert_eq!(r, "*No documented features in Cargo.toml*");
+ }
+
+ #[test]
+ fn parse_error3() {
+ test_error(
+ r#"
+[features]
+ff = []
+[abcd
+efgh
+[dependencies]
+"#,
+ "Parse error while parsing line: [abcd",
+ );
+ }
+
+ #[test]
+ fn parse_error4() {
+ test_error(
+ r#"
+[features]
+## dd
+## ff
+#! ee
+## ff
+"#,
+ "Cannot mix",
+ );
+ }
+
+ #[test]
+ fn parse_error5() {
+ test_error(
+ r#"
+[features]
+## dd
+"#,
+ "not associated with a feature",
+ );
+ }
+
+ #[test]
+ fn parse_error6() {
+ test_error(
+ r#"
+[features]
+# ff
+foo = []
+default = [
+#ffff
+# ff
+"#,
+ "Parse error while parsing value default",
+ );
+ }
+
+ #[test]
+ fn parse_error7() {
+ test_error(
+ r#"
+[features]
+# f
+foo = [ x = { ]
+bar = []
+"#,
+ "Parse error while parsing value foo",
+ );
+ }
+
+ #[test]
+ fn not_a_feature1() {
+ test_error(
+ r#"
+## hallo
+[features]
+"#,
+ "Not a feature: `[features]`",
+ );
+ }
+
+ #[test]
+ fn not_a_feature2() {
+ test_error(
+ r#"
+[package]
+## hallo
+foo = []
+"#,
+ "Comment cannot be associated with a feature: \"hallo\"",
+ );
+ }
+
+ #[test]
+ fn non_optional_dep1() {
+ test_error(
+ r#"
+[dev-dependencies]
+## Not optional
+foo = { version = "1.2", optional = false }
+"#,
+ "Dependency foo is not an optional dependency",
+ );
+ }
+
+ #[test]
+ fn non_optional_dep2() {
+ test_error(
+ r#"
+[dev-dependencies]
+## Not optional
+foo = { version = "1.2" }
+"#,
+ "Dependency foo is not an optional dependency",
+ );
+ }
+
+ #[test]
+ fn basic() {
+ let toml = r#"
+[abcd]
+[features]#xyz
+#! abc
+#
+###
+#! def
+#!
+## 123
+## 456
+feat1 = ["plop"]
+#! ghi
+no_doc = []
+##
+feat2 = ["momo"]
+#! klm
+default = ["feat1", "something_else"]
+#! end
+ "#;
+ let parsed = process_toml(toml, &Args::default()).unwrap();
+ assert_eq!(
+ parsed,
+ " abc\n def\n\n* **`feat1`** *(enabled by default)* — 123\n 456\n\n ghi\n* **`feat2`**\n\n klm\n end\n"
+ );
+ let parsed = process_toml(
+ toml,
+ &Args {
+ feature_label: Some(
+ "<span class=\"stab portability\"><code>{feature}</code></span>".into(),
+ ),
+ },
+ )
+ .unwrap();
+ assert_eq!(
+ parsed,
+ " abc\n def\n\n* <span class=\"stab portability\"><code>feat1</code></span> *(enabled by default)* — 123\n 456\n\n ghi\n* <span class=\"stab portability\"><code>feat2</code></span>\n\n klm\n end\n"
+ );
+ }
+
+ #[test]
+ fn dependencies() {
+ let toml = r#"
+#! top
+[dev-dependencies] #yo
+## dep1
+dep1 = { version="1.2", optional=true}
+#! yo
+dep2 = "1.3"
+## dep3
+[target.'cfg(unix)'.build-dependencies.dep3]
+version = "42"
+optional = true
+ "#;
+ let parsed = process_toml(toml, &Args::default()).unwrap();
+ assert_eq!(parsed, " top\n* **`dep1`** — dep1\n\n yo\n* **`dep3`** — dep3\n");
+ let parsed = process_toml(
+ toml,
+ &Args {
+ feature_label: Some(
+ "<span class=\"stab portability\"><code>{feature}</code></span>".into(),
+ ),
+ },
+ )
+ .unwrap();
+ assert_eq!(parsed, " top\n* <span class=\"stab portability\"><code>dep1</code></span> — dep1\n\n yo\n* <span class=\"stab portability\"><code>dep3</code></span> — dep3\n");
+ }
+
+ #[test]
+ fn multi_lines() {
+ let toml = r#"
+[package.metadata.foo]
+ixyz = [
+ ["array"],
+ [
+ "of",
+ "arrays"
+ ]
+]
+[dev-dependencies]
+## dep1
+dep1 = {
+ version="1.2-}",
+ optional=true
+}
+[features]
+default = [
+ "goo",
+ "\"]",
+ "bar",
+]
+## foo
+foo = [
+ "bar"
+]
+## bar
+bar = [
+
+]
+ "#;
+ let parsed = process_toml(toml, &Args::default()).unwrap();
+ assert_eq!(
+ parsed,
+ "* **`dep1`** — dep1\n* **`foo`** — foo\n* **`bar`** *(enabled by default)* — bar\n"
+ );
+ let parsed = process_toml(
+ toml,
+ &Args {
+ feature_label: Some(
+ "<span class=\"stab portability\"><code>{feature}</code></span>".into(),
+ ),
+ },
+ )
+ .unwrap();
+ assert_eq!(
+ parsed,
+ "* <span class=\"stab portability\"><code>dep1</code></span> — dep1\n* <span class=\"stab portability\"><code>foo</code></span> — foo\n* <span class=\"stab portability\"><code>bar</code></span> *(enabled by default)* — bar\n"
+ );
+ }
+
+ #[test]
+ fn dots_in_feature() {
+ let toml = r#"
+[features]
+## This is a test
+"teßt." = []
+default = ["teßt."]
+[dependencies]
+## A dep
+"dep" = { version = "123", optional = true }
+ "#;
+ let parsed = process_toml(toml, &Args::default()).unwrap();
+ assert_eq!(
+ parsed,
+ "* **`teßt.`** *(enabled by default)* — This is a test\n* **`dep`** — A dep\n"
+ );
+ let parsed = process_toml(
+ toml,
+ &Args {
+ feature_label: Some(
+ "<span class=\"stab portability\"><code>{feature}</code></span>".into(),
+ ),
+ },
+ )
+ .unwrap();
+ assert_eq!(
+ parsed,
+ "* <span class=\"stab portability\"><code>teßt.</code></span> *(enabled by default)* — This is a test\n* <span class=\"stab portability\"><code>dep</code></span> — A dep\n"
+ );
+ }
+}
diff --git a/third_party/rust/document-features/rustfmt.toml b/third_party/rust/document-features/rustfmt.toml
new file mode 100644
index 0000000000..2a35f0230c
--- /dev/null
+++ b/third_party/rust/document-features/rustfmt.toml
@@ -0,0 +1 @@
+use_small_heuristics = "Max"
diff --git a/third_party/rust/document-features/tests/self-doc.rs b/third_party/rust/document-features/tests/self-doc.rs
new file mode 100644
index 0000000000..4e27113145
--- /dev/null
+++ b/third_party/rust/document-features/tests/self-doc.rs
@@ -0,0 +1,37 @@
+#[test]
+fn ensure_it_compiles() {
+ document_features::document_features!();
+ document_features::document_features!(feature_label = "**`{feature}`**");
+ document_features::document_features!(feature_label = r"**`{feature}`**");
+ document_features::document_features!(feature_label = r#"**`{feature}`**"#);
+ document_features::document_features!(
+ feature_label = "<span class=\"stab portability\"><code>{feature}</code></span>"
+ );
+ document_features::document_features!(
+ feature_label = r#"<span class="stab portability"><code>{feature}</code></span>"#
+ );
+ document_features::document_features!(
+ feature_label = r##"<span class="stab portability"><code>{feature}</code></span>"##
+ );
+}
+
+#[test]
+fn self_doc() {
+ let actual = document_features::document_features!();
+ let expected = "* **`self-test`** — Internal feature used only for the tests, don't enable\n";
+ assert_eq!(actual, expected);
+}
+
+#[test]
+fn self_doc_with_custom_label() {
+ let actual = document_features::document_features!(
+ feature_label = r#"<span class="stab portability"><code>{feature}</code></span>"#
+ );
+ let expected =
+ "* <span class=\"stab portability\"><code>self-test</code></span> — Internal feature used only for the tests, don't enable\n";
+ assert_eq!(actual, expected);
+ let actual2 = document_features::document_features!(
+ feature_label = "<span class=\"stab\u{0020}portability\"><code>{feature}</code></span>"
+ );
+ assert_eq!(actual2, expected);
+}
diff --git a/third_party/rust/glean-core/.cargo-checksum.json b/third_party/rust/glean-core/.cargo-checksum.json
index 859a8ecdae..54674fc768 100644
--- a/third_party/rust/glean-core/.cargo-checksum.json
+++ b/third_party/rust/glean-core/.cargo-checksum.json
@@ -1 +1 @@
-{"files":{"Cargo.toml":"85eba7a464630582734247336bb970481f894c7b6e418d6eb5c868693af35a14","LICENSE":"1f256ecad192880510e84ad60474eab7589218784b9a50bc7ceee34c2b91f1d5","README.md":"026495898699b54608eb4ec16074ffafc57920d80ccb59961c501a1ea28c9985","build.rs":"4857bea99c6b8c08db8818efa9d3738716f52d3acb68159323957ae52892a3eb","src/common_metric_data.rs":"72051c4349885d4a94fc41bb4edda88d31551f2f9ebcdb4e868a98161bc76233","src/core/mod.rs":"4749e71ba495d5aed48cb4d311e0beb7e9d53c10b18ab33938e8fd9c3cfa5666","src/core_metrics.rs":"a877e42e0f8b932adb52a5681ad76fd977808cb48c7eeb29b1e4bbe804f1ea96","src/coverage.rs":"49613fd310bd24d779472720975fbe6c97ec370a95eb55f10afa43f67539c942","src/database/mod.rs":"3917bad7773696a43ab58e7860d5a8f1d63dca7c27920343aa8786acc5a586cd","src/debug.rs":"90158cc5d488ba67b60d06647e54e59a1d7bdeb906087e4fe4cfab4373c1cc6c","src/dispatcher/global.rs":"f69cd81a90a37c306d4e0ce8177ea5a3ae2ffda5b431ae46b9a22c9e38891271","src/dispatcher/mod.rs":"440a331a7beeaa6e9824c2fd4306c09ce2a115a358d5beb830dba4d69aec3563","src/error.rs":"b93c7d3e243b21bb9eafc95f17860aba1a942b2f2b0a7f43307690f05fece516","src/error_recording.rs":"d7858647808d81173558e12de88f3fbe4e589969e3bd246bfb388f90f8ff3814","src/event_database/mod.rs":"9d4d3e4b075dc585c00317304401b2c9115f39db8fdbd9f1c93d3fc6fd350fd5","src/fd_logger.rs":"0c9def6fa53db1a2ab93c85795f8a7df57797bcfd3978146923e151752e291a6","src/glean.udl":"13150d3344874d8d801aaa76880ba191ab99e337af5ac5c3cbb55bc70e5daa8a","src/glean_metrics.rs":"9414fb1453d19f6832df33e4c6ef7383d62203e47026bf5bc9552b083101ddd1","src/histogram/exponential.rs":"58bb1770bae45770d92995515d328eb50a7e78726224f779446ae7d1632a6a3e","src/histogram/functional.rs":"1a63a305b48bcef7bc38136b40d916df4bb8f098dc602514ada54a9b091f6951","src/histogram/linear.rs":"4342a1733175d7f97b2b41adb18100537c206100c9fccb5bd13bd782c9cb3c9a","src/histogram/mod.rs":"eeb7aff80806ab76cdce101dc08887b5552f8b4bdf64683f64f767e0f06a889d","src/internal_metrics.rs":"263779535963a804c8c7fa6f8e284ac8ec7f415ceeadbb6a8f913a1e7073ae18","src/internal_pings.rs":"f7a3a3aef3661ae0066ba1d2890a5e8e6871a7a8016b5f7b4da077663bc0c0d0","src/lib.rs":"778a3778ce89db007b175e08b3a4bfd1104585709ba6d5c1e3972d388f3a6f8e","src/lib_unit_tests.rs":"f053c04aab63ddb86303bca7ceea5bbf104bc7de7c89213e7cadf0157db57645","src/metrics/boolean.rs":"0591043a88e81c77b694cc8124b9e4996e81a46df1402e1bdf364d2885d8215e","src/metrics/counter.rs":"57544e9fa971086b89a4844ab9cbc55ee5b79c9d391c7d9d9ac74a7bc844b076","src/metrics/custom_distribution.rs":"9ca60176837e216da3655b2a3ba08a296444aaa7d966ddd498478fdfb2b21049","src/metrics/datetime.rs":"ca12a426fbc564847d64e6ddcaa3f298065ba4bca0372c638058c20909f626d5","src/metrics/denominator.rs":"7bacb81ea2b81da8775f61544022d35d91f159e1aa494944eaf9536cacc18b4d","src/metrics/event.rs":"62d450e4db227b6c12b9179933e4b3a9df92abc4aa7da14c9274dc82e549e210","src/metrics/experiment.rs":"48aaf06b32a691500dbc51be95955071947d7760e3b81d30ac8d8d0161c1df3f","src/metrics/labeled.rs":"c6e2200694e3cd623e8369420d3b35759b117ef9e8188fd33afaa81685fdce28","src/metrics/memory_distribution.rs":"e9456afc7759980d06d1e2e3620a3117995d27d16bc3c5a05ea8c023475ae47a","src/metrics/memory_unit.rs":"d7a678e5242febd021283b30c0099a9e62729944816a3f17d2d91e2808bc0570","src/metrics/metrics_enabled_config.rs":"87fed12219c756ecf1e5c8cd6a21f26999b6bbcf3ffc1b5467b0a58ca5ad35d8","src/metrics/mod.rs":"759ba845d4598e3fea4877d2687da958b15480ec453562c48dac7872ab300ee8","src/metrics/numerator.rs":"442236e1d63b31299f3f073bead683101de995845a638834201c6f30fc03ea90","src/metrics/ping.rs":"5a7b483450fdc145ee8e73f7efef27a9e2b8b551ef5682fc08fcb445a92b5c0d","src/metrics/quantity.rs":"915ab10cf1e1666fc2c707bc51232843e914d93eea2a249eb15df28b6a74cd2b","src/metrics/rate.rs":"38a6986387ec3927dbd81d8a881e257674e91bb97ccd43eb6285f691d5e06531","src/metrics/recorded_experiment.rs":"33958abee79d8b55dec4cb5d20742640423713010f76314075cefde18b5c118a","src/metrics/string.rs":"f7ffc07c23bedc1b8579121d931468b0713834fc037f7945267257fdbdf9a5d0","src/metrics/string_list.rs":"4eeb320cb24dec60c5551c6c9630dbbb292725a5d4c10df8f6c6e2a111eea25e","src/metrics/text.rs":"d727bcc14d2c4f946a517ac0110f585bfbe4898f813bdbad68756505529300f6","src/metrics/time_unit.rs":"b7578010c6270a45b30342b59189a862b2ede9dd24e9afae3e90fa6b970b3d24","src/metrics/timespan.rs":"ae1a2966f0a5446327ad2b9ca0738992976350ad334a5e11fd4ee611a690394e","src/metrics/timing_distribution.rs":"57d4b3f19e7382f80cc596406d3d1c191f61bc3c81f92bf137a13c2fa174c822","src/metrics/url.rs":"9bec842b7a811f01c1f0a0deb54b2dac2f8488882e7e9251cd91d18e6b500939","src/metrics/uuid.rs":"f824019fe3e8d8945490b7eb0ac1c6b7b2ffc991543f4b6332d12827955cf420","src/ping/mod.rs":"e805bfa51007c30c5a549be8d8f7ccbe502db4a8ad5727abaf298a9348f584c0","src/scheduler.rs":"129863e31205404a3d1708627a62583324c347d143f976216f769893ec541ea0","src/storage/mod.rs":"04dc1a94be1d59097cd87b14386952a6ec8b9115bc06397ae389a323f6f55dcc","src/system.rs":"e3d1b54e1d39cafe6f4dc7ff5021b08c879733f909951b0e1332b3efa9ed97bd","src/traits/boolean.rs":"be0e130f8043215705becc956d45b126c340568f1b24a396c0af9b4334a41ced","src/traits/counter.rs":"c686d26e131d854cd7a7df83c900ca7c17a03c663a30cf58ab48c7259476ce85","src/traits/custom_distribution.rs":"159a5e26fb2326f5bcdc46979aa9c6481c5f0e93ecf957f668bb3f6988d8b00f","src/traits/datetime.rs":"636ac1456b1b042e38cf5ae6193c5b232ea0b80df62f583a2097891baef9641b","src/traits/event.rs":"3f48aa336854141784d121f7fa9e283f6ff708a9214f9c0aade3a68cc38dda99","src/traits/labeled.rs":"c633c68e70a44e73f8aff88aaab1029c0faded3cad08d822590ed8838f24b4fd","src/traits/memory_distribution.rs":"55bb8f45e948319fbba9d28a50d8742da134b066a42e480887db7c7e435f4096","src/traits/mod.rs":"d0aa19a7cd97326fd2e026635406a5a9403953ced4954443a2bcbca32d868554","src/traits/numerator.rs":"6e4f236bdc448f1bde7a8c249dcd086204c2c69990d3f444e746290929226ed3","src/traits/ping.rs":"8831c106c03afeb458b0b028fa1ce61f056ebf8e82bc0a171a1bff255d920748","src/traits/quantity.rs":"6ffe25c913bef4315573d747308c182de740b2a4e02ba22cd21d0c33ba521f31","src/traits/rate.rs":"f000790440e0f389f0b160526a9a9a266e58d1405915ae56ac550f482858222c","src/traits/string.rs":"0c3c88382ff2e8eba89c7cfe129c4b84e31140af717819533c14919541ad790c","src/traits/string_list.rs":"14e56b62c2c2be1dd8013f12001f235b084abd2a0d5aa2f7932843877af49ac0","src/traits/text.rs":"8af7d3a0c87cfd8c6d33d6ad47532b431055bbdd395f9110da5630222c23cf93","src/traits/timespan.rs":"52be325a9c061916f34c5b638a07a93b4a14aa89fe365783103d2e06b998f547","src/traits/timing_distribution.rs":"76a7b8640128769763a275e831ed1f1e9ba9bfaab5ff10de638d5be3c57b5421","src/traits/url.rs":"c27f7add23214ff051078b65b88120b620560d2841a1056c7214d5237e86b9e4","src/traits/uuid.rs":"81322e71c7e847bacaf827a2cd58f6193bdc208355524207f7f38db039da6aa8","src/upload/directory.rs":"5fa2c64a2bc561883ec01ed274179d91ff86d40d4d9837661be1b85cd067177c","src/upload/mod.rs":"d281050d7ab29980d48a44240256c21e82858dd795b60549fb46cfd5ce7a0214","src/upload/policy.rs":"c250957a37783e74af8002cd80ba06ef9780a389fb0f61b8b665b79688f0a360","src/upload/request.rs":"a16fbe823228e73a2acc6f4324d6c635be22dfefb19ae71146245d9236baa87a","src/upload/result.rs":"7efbbe50e8d36beb3f23e7bfd172d22e1c003472d2dd8055b06f6050c36437c5","src/util.rs":"ee7500434d9758a320dd410f18d7e18da956591e19d2555db87eef9623e4b916","tests/boolean.rs":"76d6014ff108cb6514d9bceb1b2b14749a55b09921f4595a5e30f1bd3546e9f0","tests/common/mod.rs":"c1d980a9cff0b64f452ebbe43f24d70aa685b80b48db08fc4338a60466b07a5e","tests/counter.rs":"3663a3f5ec5c0bd2b758a9920cd20cc619a12566b445e4421ec7c98232bf5a32","tests/custom_distribution.rs":"53530972243670ef58c85a906b70931d14e81ae2e1f69092c71a27e561edff6c","tests/datetime.rs":"ec3c9760e70bb2cbc61ab23281c891bc1ec493c5c545466c29fd13e4f05c2c96","tests/event.rs":"bf5b8d3ee9d12d12e91b71dd46a813a2cf17f0544f6d2e3b14f6f931ce276fa1","tests/labeled.rs":"e9ea6dba17059d68114efce0c23373be9ceed922bf5e638a2158a6422c75a1c1","tests/memory_distribution.rs":"a5a7aa955e60823ea29a6f4bc96c61e41f1e41f08958aa4854668cf8fe04cde6","tests/ping.rs":"a0fbaed178459c6e3ed3363d966d4a9119cbbcf94574f336f2e7c37eb4b59324","tests/ping_maker.rs":"40dfa0f45af6a35364c068bc53f540b27c95483204104377e0f619a9b10bc711","tests/quantity.rs":"55e7dca346fd1d27f0974b78ca3fb12427cb5da2ee637afc08a54f360f947361","tests/rate.rs":"1de571b9f0ee9a9006cbc8a31f91352d3ff1190b50840f0f668b470a7cd2a3a5","tests/storage.rs":"f0c8312bd789d7bda502cd45f35fef6b8591652bd194d07da4d81935ebe69b48","tests/string.rs":"7ece988a4b8efe6932ccb90bfe2f3c8aaea983777e99d7de6028bf6a29459ee6","tests/string_list.rs":"77188a2b90663c3f8dac5da89a6cb6b1d16a9f8c66ccd032d02966dfd14a3486","tests/text.rs":"1d43f6b90a43124311cacf0a6ee16f9e1e9263bcd11fee8b996d6efd81633638","tests/timespan.rs":"d50d75c7d75da3a878d67331cb0df8ae5e6a099ffab474361f71a408e02528d7","tests/timing_distribution.rs":"dbe2c8efa5dfb8037765b50433568c04ba111953822584da1c4931837fdfc060","tests/uuid.rs":"052ad26a6927c56272219340211cf4a059d200f14287b482fe8621d7bce3cc54","uniffi.toml":"6ddc98b686b0925a81abd9d1c769e5c98ac29771b210a1c535931a46dec9a8e3"},"package":"6831cadd28b625bc296732d71dc7c978f208ba27911cad072785f87f23b1e634"} \ No newline at end of file
+{"files":{"Cargo.toml":"2dde200f0e0e4e523634f8c2c8c1c2ca75af83163ac7b0ba8f62f3096fd0c97d","LICENSE":"1f256ecad192880510e84ad60474eab7589218784b9a50bc7ceee34c2b91f1d5","README.md":"026495898699b54608eb4ec16074ffafc57920d80ccb59961c501a1ea28c9985","build.rs":"4857bea99c6b8c08db8818efa9d3738716f52d3acb68159323957ae52892a3eb","src/common_metric_data.rs":"72051c4349885d4a94fc41bb4edda88d31551f2f9ebcdb4e868a98161bc76233","src/core/mod.rs":"8f5e98a108ec5d1849402af1de90b5f53ba839240743c2c5283a49a4045e1293","src/core_metrics.rs":"a877e42e0f8b932adb52a5681ad76fd977808cb48c7eeb29b1e4bbe804f1ea96","src/coverage.rs":"49613fd310bd24d779472720975fbe6c97ec370a95eb55f10afa43f67539c942","src/database/mod.rs":"3917bad7773696a43ab58e7860d5a8f1d63dca7c27920343aa8786acc5a586cd","src/debug.rs":"90158cc5d488ba67b60d06647e54e59a1d7bdeb906087e4fe4cfab4373c1cc6c","src/dispatcher/global.rs":"f69cd81a90a37c306d4e0ce8177ea5a3ae2ffda5b431ae46b9a22c9e38891271","src/dispatcher/mod.rs":"440a331a7beeaa6e9824c2fd4306c09ce2a115a358d5beb830dba4d69aec3563","src/error.rs":"b93c7d3e243b21bb9eafc95f17860aba1a942b2f2b0a7f43307690f05fece516","src/error_recording.rs":"d7858647808d81173558e12de88f3fbe4e589969e3bd246bfb388f90f8ff3814","src/event_database/mod.rs":"9d4d3e4b075dc585c00317304401b2c9115f39db8fdbd9f1c93d3fc6fd350fd5","src/fd_logger.rs":"0c9def6fa53db1a2ab93c85795f8a7df57797bcfd3978146923e151752e291a6","src/glean.udl":"24d9e431f95d79dc4254feff68f19a4ea4e6e76c33b110e10c5e5dbd5bc64ff2","src/glean_metrics.rs":"9414fb1453d19f6832df33e4c6ef7383d62203e47026bf5bc9552b083101ddd1","src/histogram/exponential.rs":"58bb1770bae45770d92995515d328eb50a7e78726224f779446ae7d1632a6a3e","src/histogram/functional.rs":"1a63a305b48bcef7bc38136b40d916df4bb8f098dc602514ada54a9b091f6951","src/histogram/linear.rs":"4342a1733175d7f97b2b41adb18100537c206100c9fccb5bd13bd782c9cb3c9a","src/histogram/mod.rs":"eeb7aff80806ab76cdce101dc08887b5552f8b4bdf64683f64f767e0f06a889d","src/internal_metrics.rs":"263779535963a804c8c7fa6f8e284ac8ec7f415ceeadbb6a8f913a1e7073ae18","src/internal_pings.rs":"7267166a8e357053526c545cf62bb502a7b6f07aed1de48d43041228d8835366","src/lib.rs":"367ea21f9d3f1c808b258011821d8505cd47d29eff8e8e6d938623e6e9997b73","src/lib_unit_tests.rs":"46897c6bb4003c5e00152d7b55c00d3176b5bffb28d8669a3fb0d10e5233e3a5","src/metrics/boolean.rs":"2b9ef57e3582c9bd8b2cca8ab94c962a4871ecc00e837b913c9b0349ba9dff08","src/metrics/counter.rs":"b4a52a8167fb0edd6354f952525e59f3eadb4261de3483374f03c94449d30b92","src/metrics/custom_distribution.rs":"e1f2edfefb67da4bf369bab3d3047f4ff6539a1fea0eee81c78d96626e5b4bb0","src/metrics/datetime.rs":"e4405762fc71718299fa1b208e3d5fda654bd1b82fe908c884c284e3530de2ec","src/metrics/denominator.rs":"95e8442f90bad97f80fc74b146782f215344b52c5f3825ae0a8baffdc001a714","src/metrics/event.rs":"7281d8b63f34758a47abd7ae3956f44701d1fd48433ccba7a4302526a9912255","src/metrics/experiment.rs":"5f9278cca4e133eb8df33bbfe36d1fe0ef3eade8c09f1b46db3c4d0790515412","src/metrics/labeled.rs":"8d6e76a07064d132cd617c7901f2bc11ff6ba31e3483ba3b96354a4a3736b58d","src/metrics/memory_distribution.rs":"7f6ca51acb470df277ff14427c0e7bb07d921c0a0087d0cc56aebe038d198ccc","src/metrics/memory_unit.rs":"d7a678e5242febd021283b30c0099a9e62729944816a3f17d2d91e2808bc0570","src/metrics/metrics_enabled_config.rs":"87fed12219c756ecf1e5c8cd6a21f26999b6bbcf3ffc1b5467b0a58ca5ad35d8","src/metrics/mod.rs":"8f8958b8cedfe01df6c97ec26b63f14fd7516f9de7ba62984062db96b5708720","src/metrics/numerator.rs":"937dfd583b797ac798a525cedca95c5a36262356760a89670d8113983c263154","src/metrics/object.rs":"89ce5190ed681b26b74a06a4ecaf9f96c36f96be1276f1fdb40f4406648e08c1","src/metrics/ping.rs":"4ccdf0ae2ac6f3e5a352334797d2805f1a3d932e92f08447285dd9bec4e7d724","src/metrics/quantity.rs":"aa13a8f8cf8e5e0281668fbbafc2998411df2a499479423558fd91b9bd7f8702","src/metrics/rate.rs":"603cc45c149c7a27c93b6a80146bf43f8ce70d9655f905bb5be6bc2c15bcb22b","src/metrics/recorded_experiment.rs":"33958abee79d8b55dec4cb5d20742640423713010f76314075cefde18b5c118a","src/metrics/string.rs":"2418632c492463970c3eca533d5318f519698bb361d73dd8781db108d7d1fbd8","src/metrics/string_list.rs":"ed53a095184c3e8224d0511809b5d7601ba3166505a39b0570f24ebeb0a5b97c","src/metrics/text.rs":"5c994a282b16b9dde6d6dc4922475457a72c82f64248778811b84db70ed4c116","src/metrics/time_unit.rs":"b7578010c6270a45b30342b59189a862b2ede9dd24e9afae3e90fa6b970b3d24","src/metrics/timespan.rs":"b0fda3a45597c8306a0d1928dcf0837538859e66ebd9db113ebb6efbea721d4c","src/metrics/timing_distribution.rs":"5da04272dd8b44502ffd0b60b12c84239a7fe359a51754b6c0cd96388a4e8a3c","src/metrics/url.rs":"f6b27a60d13a1268f0115c5d292c9b16b6bc370055961368cb2648283b7140a0","src/metrics/uuid.rs":"cacffd95ab30ed327ec2fa5feaf1359e667706746401f1e2c1195ad9553c4b54","src/ping/mod.rs":"fcadd52d2d536c9ace01f8a3812c3fb3c39b8094915db1b3656839fb87f771b5","src/scheduler.rs":"129863e31205404a3d1708627a62583324c347d143f976216f769893ec541ea0","src/storage/mod.rs":"04dc1a94be1d59097cd87b14386952a6ec8b9115bc06397ae389a323f6f55dcc","src/system.rs":"e3d1b54e1d39cafe6f4dc7ff5021b08c879733f909951b0e1332b3efa9ed97bd","src/traits/boolean.rs":"be0e130f8043215705becc956d45b126c340568f1b24a396c0af9b4334a41ced","src/traits/counter.rs":"c686d26e131d854cd7a7df83c900ca7c17a03c663a30cf58ab48c7259476ce85","src/traits/custom_distribution.rs":"0bd1d425e4c059cca6af2dfb13c78e5e4c6c07fb46c7e31489ad0c5959854833","src/traits/datetime.rs":"636ac1456b1b042e38cf5ae6193c5b232ea0b80df62f583a2097891baef9641b","src/traits/event.rs":"3f48aa336854141784d121f7fa9e283f6ff708a9214f9c0aade3a68cc38dda99","src/traits/labeled.rs":"c633c68e70a44e73f8aff88aaab1029c0faded3cad08d822590ed8838f24b4fd","src/traits/memory_distribution.rs":"55bb8f45e948319fbba9d28a50d8742da134b066a42e480887db7c7e435f4096","src/traits/mod.rs":"d14b69d0946848c1f92cc8977cbc3fc9338ff1b53b7acc31ea0fe2f1122beecb","src/traits/numerator.rs":"6e4f236bdc448f1bde7a8c249dcd086204c2c69990d3f444e746290929226ed3","src/traits/object.rs":"c03bad670ec7affbc578247f9e1904e898c1870b9bf25750c5094113f995623f","src/traits/ping.rs":"8831c106c03afeb458b0b028fa1ce61f056ebf8e82bc0a171a1bff255d920748","src/traits/quantity.rs":"6ffe25c913bef4315573d747308c182de740b2a4e02ba22cd21d0c33ba521f31","src/traits/rate.rs":"f000790440e0f389f0b160526a9a9a266e58d1405915ae56ac550f482858222c","src/traits/string.rs":"0c3c88382ff2e8eba89c7cfe129c4b84e31140af717819533c14919541ad790c","src/traits/string_list.rs":"14e56b62c2c2be1dd8013f12001f235b084abd2a0d5aa2f7932843877af49ac0","src/traits/text.rs":"8af7d3a0c87cfd8c6d33d6ad47532b431055bbdd395f9110da5630222c23cf93","src/traits/timespan.rs":"52be325a9c061916f34c5b638a07a93b4a14aa89fe365783103d2e06b998f547","src/traits/timing_distribution.rs":"00ebdef647a7a208c01d13ba7b3996750e36de98d1f63859b609c80c8df25b6f","src/traits/url.rs":"c27f7add23214ff051078b65b88120b620560d2841a1056c7214d5237e86b9e4","src/traits/uuid.rs":"81322e71c7e847bacaf827a2cd58f6193bdc208355524207f7f38db039da6aa8","src/upload/directory.rs":"6359220db9d85ee0f3931ca518f95ffb2020c1c03bd632f17ed5c16ddd00343b","src/upload/mod.rs":"a388563d5e2940c5c28b48fc7b67ca507512efccae95fd1c2f04b15ec21aa08c","src/upload/policy.rs":"c250957a37783e74af8002cd80ba06ef9780a389fb0f61b8b665b79688f0a360","src/upload/request.rs":"0b7e215f61499a681d1cebc9cf4a0efbaae2f543a5d44e5db40cbe61ed90549e","src/upload/result.rs":"7efbbe50e8d36beb3f23e7bfd172d22e1c003472d2dd8055b06f6050c36437c5","src/util.rs":"ee7500434d9758a320dd410f18d7e18da956591e19d2555db87eef9623e4b916","tests/boolean.rs":"76d6014ff108cb6514d9bceb1b2b14749a55b09921f4595a5e30f1bd3546e9f0","tests/common/mod.rs":"c1d980a9cff0b64f452ebbe43f24d70aa685b80b48db08fc4338a60466b07a5e","tests/counter.rs":"3663a3f5ec5c0bd2b758a9920cd20cc619a12566b445e4421ec7c98232bf5a32","tests/custom_distribution.rs":"41c593a0b4561e21f29d1a5b948de964a866253c58ca76ffefebe370fca150e0","tests/datetime.rs":"ec3c9760e70bb2cbc61ab23281c891bc1ec493c5c545466c29fd13e4f05c2c96","tests/event.rs":"67291cbcc4d1cba56ada6ba733fb1dc4c6327680059e8d7637add2ae45cd344b","tests/labeled.rs":"e9ea6dba17059d68114efce0c23373be9ceed922bf5e638a2158a6422c75a1c1","tests/memory_distribution.rs":"a5a7aa955e60823ea29a6f4bc96c61e41f1e41f08958aa4854668cf8fe04cde6","tests/object.rs":"8c35676e04f6ccf54a28764700915e753fc0355bfa5d7804d72caba66fd564cd","tests/ping.rs":"eb9f6be1aba21acc5dc670622bf622976718a706df1cc2095efa56a8e8b3fe1a","tests/ping_maker.rs":"b267ecf7c714ff27512424b743da0ea4f05a87755c1b96355bfca3e173e3f62e","tests/quantity.rs":"55e7dca346fd1d27f0974b78ca3fb12427cb5da2ee637afc08a54f360f947361","tests/rate.rs":"1de571b9f0ee9a9006cbc8a31f91352d3ff1190b50840f0f668b470a7cd2a3a5","tests/storage.rs":"f0c8312bd789d7bda502cd45f35fef6b8591652bd194d07da4d81935ebe69b48","tests/string.rs":"7ece988a4b8efe6932ccb90bfe2f3c8aaea983777e99d7de6028bf6a29459ee6","tests/string_list.rs":"77188a2b90663c3f8dac5da89a6cb6b1d16a9f8c66ccd032d02966dfd14a3486","tests/text.rs":"1d43f6b90a43124311cacf0a6ee16f9e1e9263bcd11fee8b996d6efd81633638","tests/timespan.rs":"d50d75c7d75da3a878d67331cb0df8ae5e6a099ffab474361f71a408e02528d7","tests/timing_distribution.rs":"20860a7baccdcee6aed40c9cc8202b94f3b2e61164fbaf8f2af96b0f404a895a","tests/uuid.rs":"052ad26a6927c56272219340211cf4a059d200f14287b482fe8621d7bce3cc54","uniffi.toml":"6ddc98b686b0925a81abd9d1c769e5c98ac29771b210a1c535931a46dec9a8e3"},"package":"ed9acc46fd38c5c995a0537e76364496addace660839dc279079e5957e3c1093"} \ No newline at end of file
diff --git a/third_party/rust/glean-core/Cargo.toml b/third_party/rust/glean-core/Cargo.toml
index 44c159051d..9d33444fbd 100644
--- a/third_party/rust/glean-core/Cargo.toml
+++ b/third_party/rust/glean-core/Cargo.toml
@@ -13,7 +13,7 @@
edition = "2021"
rust-version = "1.66"
name = "glean-core"
-version = "57.0.0"
+version = "58.1.0"
authors = [
"Jan-Erik Rediger <jrediger@mozilla.com>",
"The Glean Team <glean-team@mozilla.com>",
@@ -35,7 +35,7 @@ license = "MPL-2.0"
repository = "https://github.com/mozilla/glean"
[package.metadata.glean]
-glean-parser = "11.0.1"
+glean-parser = "13.0.0"
[dependencies.bincode]
version = "1.2.1"
diff --git a/third_party/rust/glean-core/src/core/mod.rs b/third_party/rust/glean-core/src/core/mod.rs
index 5a8dd56cde..30f9a34f11 100644
--- a/third_party/rust/glean-core/src/core/mod.rs
+++ b/third_party/rust/glean-core/src/core/mod.rs
@@ -118,11 +118,11 @@ where
/// trim_data_to_registered_pings: false,
/// log_level: None,
/// rate_limit: None,
-/// enable_event_timestamps: false,
+/// enable_event_timestamps: true,
/// experimentation_id: None,
/// };
/// let mut glean = Glean::new(cfg).unwrap();
-/// let ping = PingType::new("sample", true, false, true, vec![]);
+/// let ping = PingType::new("sample", true, false, true, true, vec![]);
/// glean.register_ping_type(&ping);
///
/// let call_counter: CounterMetric = CounterMetric::new(CommonMetricData {
@@ -318,7 +318,7 @@ impl Glean {
trim_data_to_registered_pings: false,
log_level: None,
rate_limit: None,
- enable_event_timestamps: false,
+ enable_event_timestamps: true,
experimentation_id: None,
};
diff --git a/third_party/rust/glean-core/src/glean.udl b/third_party/rust/glean-core/src/glean.udl
index e531f64a26..e68a57ea4c 100644
--- a/third_party/rust/glean-core/src/glean.udl
+++ b/third_party/rust/glean-core/src/glean.udl
@@ -201,6 +201,10 @@ dictionary PingRequest {
sequence<u8> body;
// A map with all the headers to be sent with the request.
record<DOMString, string> headers;
+ // Whether the body has {client|ping}_info sections.
+ boolean body_has_info_sections;
+ // The ping's name. Likely also somewhere in `path`.
+ string ping_name;
};
// An enum representing the possible upload tasks to be performed by an uploader.
@@ -287,7 +291,7 @@ enum ErrorType {
};
interface PingType {
- constructor(string name, boolean include_client_id, boolean send_if_empty, boolean precise_timestamps, sequence<string> reason_codes);
+ constructor(string name, boolean include_client_id, boolean send_if_empty, boolean precise_timestamps, boolean include_info_sections, sequence<string> reason_codes);
void submit(optional string? reason = null);
};
@@ -480,6 +484,8 @@ interface TimingDistributionMetric {
void accumulate_samples(sequence<i64> samples);
+ void accumulate_single_sample(i64 sample);
+
DistributionData? test_get_value(optional string? ping_name = null);
i32 test_get_num_recorded_errors(ErrorType error);
@@ -523,6 +529,8 @@ interface CustomDistributionMetric {
void accumulate_samples(sequence<i64> samples);
+ void accumulate_single_sample(i64 sample);
+
DistributionData? test_get_value(optional string? ping_name = null);
i32 test_get_num_recorded_errors(ErrorType error);
diff --git a/third_party/rust/glean-core/src/internal_pings.rs b/third_party/rust/glean-core/src/internal_pings.rs
index 076a1f7485..07c3849006 100644
--- a/third_party/rust/glean-core/src/internal_pings.rs
+++ b/third_party/rust/glean-core/src/internal_pings.rs
@@ -26,6 +26,7 @@ impl InternalPings {
true,
true,
true,
+ true,
vec![
"active".to_string(),
"dirty_startup".to_string(),
@@ -37,6 +38,7 @@ impl InternalPings {
true,
false,
true,
+ true,
vec![
"overdue".to_string(),
"reschedule".to_string(),
@@ -50,6 +52,7 @@ impl InternalPings {
true,
false,
true,
+ true,
vec![
"startup".to_string(),
"inactive".to_string(),
@@ -61,6 +64,7 @@ impl InternalPings {
true,
true,
true,
+ true,
vec!["at_init".to_string(), "set_upload_enabled".to_string()],
),
}
diff --git a/third_party/rust/glean-core/src/lib.rs b/third_party/rust/glean-core/src/lib.rs
index a54e57a95b..b7f9d73beb 100644
--- a/third_party/rust/glean-core/src/lib.rs
+++ b/third_party/rust/glean-core/src/lib.rs
@@ -91,12 +91,12 @@ pub(crate) const DELETION_REQUEST_PINGS_DIRECTORY: &str = "deletion_request";
static INITIALIZE_CALLED: AtomicBool = AtomicBool::new(false);
/// Keep track of the debug features before Glean is initialized.
-static PRE_INIT_DEBUG_VIEW_TAG: OnceCell<Mutex<String>> = OnceCell::new();
+static PRE_INIT_DEBUG_VIEW_TAG: Mutex<String> = Mutex::new(String::new());
static PRE_INIT_LOG_PINGS: AtomicBool = AtomicBool::new(false);
-static PRE_INIT_SOURCE_TAGS: OnceCell<Mutex<Vec<String>>> = OnceCell::new();
+static PRE_INIT_SOURCE_TAGS: Mutex<Vec<String>> = Mutex::new(Vec::new());
/// Keep track of pings registered before Glean is initialized.
-static PRE_INIT_PING_REGISTRATION: OnceCell<Mutex<Vec<metrics::PingType>>> = OnceCell::new();
+static PRE_INIT_PING_REGISTRATION: Mutex<Vec<metrics::PingType>> = Mutex::new(Vec::new());
/// Global singleton of the handles of the glean.init threads.
/// For joining. For tests.
@@ -396,11 +396,9 @@ fn initialize_inner(
core::with_glean_mut(|glean| {
// The debug view tag might have been set before initialize,
// get the cached value and set it.
- if let Some(tag) = PRE_INIT_DEBUG_VIEW_TAG.get() {
- let lock = tag.try_lock();
- if let Ok(ref debug_tag) = lock {
- glean.set_debug_view_tag(debug_tag);
- }
+ let debug_tag = PRE_INIT_DEBUG_VIEW_TAG.lock().unwrap();
+ if debug_tag.len() > 0 {
+ glean.set_debug_view_tag(&debug_tag);
}
// The log pings debug option might have been set before initialize,
@@ -412,11 +410,9 @@ fn initialize_inner(
// The source tags might have been set before initialize,
// get the cached value and set them.
- if let Some(tags) = PRE_INIT_SOURCE_TAGS.get() {
- let lock = tags.try_lock();
- if let Ok(ref source_tags) = lock {
- glean.set_source_tags(source_tags.to_vec());
- }
+ let source_tags = PRE_INIT_SOURCE_TAGS.lock().unwrap();
+ if source_tags.len() > 0 {
+ glean.set_source_tags(source_tags.to_vec());
}
// Get the current value of the dirty flag so we know whether to
@@ -428,13 +424,9 @@ fn initialize_inner(
// Perform registration of pings that were attempted to be
// registered before init.
- if let Some(tags) = PRE_INIT_PING_REGISTRATION.get() {
- let lock = tags.try_lock();
- if let Ok(pings) = lock {
- for ping in &*pings {
- glean.register_ping_type(ping);
- }
- }
+ let pings = PRE_INIT_PING_REGISTRATION.lock().unwrap();
+ for ping in pings.iter() {
+ glean.register_ping_type(ping);
}
// If this is the first time ever the Glean SDK runs, make sure to set
@@ -861,7 +853,7 @@ pub(crate) fn register_ping_type(ping: &PingType) {
// if ping registration is attempted before Glean initializes.
// This state is kept across Glean resets, which should only ever happen in test mode.
// It's a set and keeping them around forever should not have much of an impact.
- let m = PRE_INIT_PING_REGISTRATION.get_or_init(Default::default);
+ let m = &PRE_INIT_PING_REGISTRATION;
let mut lock = m.lock().unwrap();
lock.push(ping.clone());
}
@@ -956,7 +948,7 @@ pub fn glean_set_debug_view_tag(tag: String) -> bool {
true
} else {
// Glean has not been initialized yet. Cache the provided tag value.
- let m = PRE_INIT_DEBUG_VIEW_TAG.get_or_init(Default::default);
+ let m = &PRE_INIT_DEBUG_VIEW_TAG;
let mut lock = m.lock().unwrap();
*lock = tag;
// When setting the debug view tag before initialization,
@@ -984,7 +976,7 @@ pub fn glean_set_source_tags(tags: Vec<String>) -> bool {
true
} else {
// Glean has not been initialized yet. Cache the provided source tags.
- let m = PRE_INIT_SOURCE_TAGS.get_or_init(Default::default);
+ let m = &PRE_INIT_SOURCE_TAGS;
let mut lock = m.lock().unwrap();
*lock = tags;
// When setting the source tags before initialization,
diff --git a/third_party/rust/glean-core/src/lib_unit_tests.rs b/third_party/rust/glean-core/src/lib_unit_tests.rs
index 0fc85b4602..cb1e4129d8 100644
--- a/third_party/rust/glean-core/src/lib_unit_tests.rs
+++ b/third_party/rust/glean-core/src/lib_unit_tests.rs
@@ -197,7 +197,7 @@ fn experimentation_id_is_set_correctly() {
trim_data_to_registered_pings: false,
log_level: None,
rate_limit: None,
- enable_event_timestamps: false,
+ enable_event_timestamps: true,
experimentation_id: Some(experimentation_id.to_string()),
})
.unwrap();
@@ -423,6 +423,7 @@ fn correct_order() {
Jwe("eyJhbGciOiJSU0EtT0FFUCIsImVuYyI6IkEyNTZHQ00ifQ.OKOawDo13gRp2ojaHV7LFpZcgV7T6DVZKTyKOMTYUmKoTCVJRgckCL9kiMT03JGeipsEdY3mx_etLbbWSrFr05kLzcSr4qKAq7YN7e9jwQRb23nfa6c9d-StnImGyFDbSv04uVuxIp5Zms1gNxKKK2Da14B8S4rzVRltdYwam_lDp5XnZAYpQdb76FdIKLaVmqgfwX7XWRxv2322i-vDxRfqNzo_tETKzpVLzfiwQyeyPGLBIO56YJ7eObdv0je81860ppamavo35UgoRdbYaBcoh9QcfylQr66oc6vFWXRcZ_ZT2LawVCWTIy3brGPi6UklfCpIMfIjf7iGdXKHzg.48V1_ALb6US04U3b.5eym8TW_c8SuK0ltJ3rpYIzOeDQz7TALvtu6UG9oMo4vpzs9tX_EFShS8iB7j6jiSdiwkIr3ajwQzaBtQD_A.XFBoMYUZodetZdvTiFvSkQ".into()),
Rate(0, 0),
Text(long_string),
+ Object("{}".into()),
];
for metric in all_metrics {
@@ -451,6 +452,7 @@ fn correct_order() {
Rate(..) => assert_eq!(14, disc),
Url(..) => assert_eq!(15, disc),
Text(..) => assert_eq!(16, disc),
+ Object(..) => assert_eq!(17, disc),
}
}
}
diff --git a/third_party/rust/glean-core/src/metrics/boolean.rs b/third_party/rust/glean-core/src/metrics/boolean.rs
index 71ed2372c2..ade4a22bfc 100644
--- a/third_party/rust/glean-core/src/metrics/boolean.rs
+++ b/third_party/rust/glean-core/src/metrics/boolean.rs
@@ -74,7 +74,6 @@ impl BooleanMetric {
///
/// # Arguments
///
- /// * `glean` - the Glean instance this metric belongs to.
/// * `value` - the value to set.
pub fn set(&self, value: bool) {
let metric = self.clone();
@@ -106,6 +105,15 @@ impl BooleanMetric {
/// Gets the currently stored value as an integer.
///
/// This doesn't clear the stored value.
+ ///
+ /// # Arguments
+ ///
+ /// * `ping_name` - the optional name of the ping to retrieve the metric
+ /// for. Defaults to the first value in `send_in_pings`.
+ ///
+ /// # Returns
+ ///
+ /// The stored value or `None` if nothing stored.
pub fn test_get_value(&self, ping_name: Option<String>) -> Option<bool> {
crate::block_on_dispatcher();
crate::core::with_glean(|glean| self.get_value(glean, ping_name.as_deref()))
@@ -118,8 +126,6 @@ impl BooleanMetric {
/// # Arguments
///
/// * `error` - The type of error
- /// * `ping_name` - represents the optional name of the ping to retrieve the
- /// metric for. inner to the first value in `send_in_pings`.
///
/// # Returns
///
diff --git a/third_party/rust/glean-core/src/metrics/counter.rs b/third_party/rust/glean-core/src/metrics/counter.rs
index 8f0a01cc3e..7e262c7d68 100644
--- a/third_party/rust/glean-core/src/metrics/counter.rs
+++ b/third_party/rust/glean-core/src/metrics/counter.rs
@@ -105,7 +105,6 @@ impl CounterMetric {
///
/// # Arguments
///
- /// * `glean` - The Glean instance this metric belongs to.
/// * `amount` - The amount to increase by. Should be positive.
///
/// ## Notes
@@ -143,6 +142,15 @@ impl CounterMetric {
/// Gets the currently stored value as an integer.
///
/// This doesn't clear the stored value.
+ ///
+ /// # Arguments
+ ///
+ /// * `ping_name` - the optional name of the ping to retrieve the metric
+ /// for. Defaults to the first value in `send_in_pings`.
+ ///
+ /// # Returns
+ ///
+ /// The stored value or `None` if nothing stored.
pub fn test_get_value(&self, ping_name: Option<String>) -> Option<i32> {
crate::block_on_dispatcher();
crate::core::with_glean(|glean| self.get_value(glean, ping_name.as_deref()))
@@ -155,8 +163,6 @@ impl CounterMetric {
/// # Arguments
///
/// * `error` - The type of error
- /// * `ping_name` - represents the optional name of the ping to retrieve the
- /// metric for. inner to the first value in `send_in_pings`.
///
/// # Returns
///
diff --git a/third_party/rust/glean-core/src/metrics/custom_distribution.rs b/third_party/rust/glean-core/src/metrics/custom_distribution.rs
index 929e4863ec..c7f3fbc56f 100644
--- a/third_party/rust/glean-core/src/metrics/custom_distribution.rs
+++ b/third_party/rust/glean-core/src/metrics/custom_distribution.rs
@@ -84,14 +84,33 @@ impl CustomDistributionMetric {
/// for each of them.
pub fn accumulate_samples(&self, samples: Vec<i64>) {
let metric = self.clone();
- crate::launch_with_glean(move |glean| metric.accumulate_samples_sync(glean, samples))
+ crate::launch_with_glean(move |glean| metric.accumulate_samples_sync(glean, &samples))
+ }
+
+ /// Accumulates precisely one signed sample and appends it to the metric.
+ ///
+ /// Signed is required so that the platform-specific code can provide us with a
+ /// 64 bit signed integer if no `u64` comparable type is available. This
+ /// will take care of filtering and reporting errors.
+ ///
+ /// # Arguments
+ ///
+ /// - `sample` - The singular sample to be recorded by the metric.
+ ///
+ /// ## Notes
+ ///
+ /// Discards any negative value of `sample` and reports an
+ /// [`ErrorType::InvalidValue`](crate::ErrorType::InvalidValue).
+ pub fn accumulate_single_sample(&self, sample: i64) {
+ let metric = self.clone();
+ crate::launch_with_glean(move |glean| metric.accumulate_samples_sync(glean, &[sample]))
}
/// Accumulates the provided sample in the metric synchronously.
///
/// See [`accumulate_samples`](Self::accumulate_samples) for details.
#[doc(hidden)]
- pub fn accumulate_samples_sync(&self, glean: &Glean, samples: Vec<i64>) {
+ pub fn accumulate_samples_sync(&self, glean: &Glean, samples: &[i64]) {
if !self.should_record(glean) {
return;
}
@@ -132,7 +151,7 @@ impl CustomDistributionMetric {
self.bucket_count as usize,
)
};
- accumulate(&samples, hist, Metric::CustomDistributionLinear)
+ accumulate(samples, hist, Metric::CustomDistributionLinear)
}
HistogramType::Exponential => {
let hist = if let Some(Metric::CustomDistributionExponential(hist)) = old_value
@@ -145,7 +164,7 @@ impl CustomDistributionMetric {
self.bucket_count as usize,
)
};
- accumulate(&samples, hist, Metric::CustomDistributionExponential)
+ accumulate(samples, hist, Metric::CustomDistributionExponential)
}
};
@@ -194,6 +213,15 @@ impl CustomDistributionMetric {
/// Gets the currently stored value as an integer.
///
/// This doesn't clear the stored value.
+ ///
+ /// # Arguments
+ ///
+ /// * `ping_name` - the optional name of the ping to retrieve the metric
+ /// for. Defaults to the first value in `send_in_pings`.
+ ///
+ /// # Returns
+ ///
+ /// The stored value or `None` if nothing stored.
pub fn test_get_value(&self, ping_name: Option<String>) -> Option<DistributionData> {
crate::block_on_dispatcher();
crate::core::with_glean(|glean| self.get_value(glean, ping_name.as_deref()))
@@ -206,8 +234,6 @@ impl CustomDistributionMetric {
/// # Arguments
///
/// * `error` - The type of error
- /// * `ping_name` - represents the optional name of the ping to retrieve the
- /// metric for. inner to the first value in `send_in_pings`.
///
/// # Returns
///
diff --git a/third_party/rust/glean-core/src/metrics/datetime.rs b/third_party/rust/glean-core/src/metrics/datetime.rs
index 3ef846a32c..e04f7fc051 100644
--- a/third_party/rust/glean-core/src/metrics/datetime.rs
+++ b/third_party/rust/glean-core/src/metrics/datetime.rs
@@ -262,8 +262,8 @@ impl DatetimeMetric {
///
/// # Arguments
///
- /// * `glean` - the Glean instance this metric belongs to.
- /// * `storage_name` - the storage name to look into.
+ /// * `ping_name` - the optional name of the ping to retrieve the metric
+ /// for. Defaults to the first value in `send_in_pings`.
///
/// # Returns
///
@@ -284,8 +284,8 @@ impl DatetimeMetric {
///
/// # Arguments
///
- /// * `glean` - the Glean instance this metric belongs to.
- /// * `storage_name` - the storage name to look into.
+ /// * `ping_name` - the optional name of the ping to retrieve the metric
+ /// for. Defaults to the first value in `send_in_pings`.
///
/// # Returns
///
@@ -311,8 +311,6 @@ impl DatetimeMetric {
/// # Arguments
///
/// * `error` - The type of error
- /// * `ping_name` - represents the optional name of the ping to retrieve the
- /// metric for. inner to the first value in `send_in_pings`.
///
/// # Returns
///
diff --git a/third_party/rust/glean-core/src/metrics/denominator.rs b/third_party/rust/glean-core/src/metrics/denominator.rs
index fb80874924..3083d6e78a 100644
--- a/third_party/rust/glean-core/src/metrics/denominator.rs
+++ b/third_party/rust/glean-core/src/metrics/denominator.rs
@@ -91,6 +91,15 @@ impl DenominatorMetric {
/// Gets the currently stored value as an integer.
///
/// This doesn't clear the stored value.
+ ///
+ /// # Arguments
+ ///
+ /// * `ping_name` - the optional name of the ping to retrieve the metric
+ /// for. Defaults to the first value in `send_in_pings`.
+ ///
+ /// # Returns
+ ///
+ /// The stored value or `None` if nothing stored.
pub fn test_get_value(&self, ping_name: Option<String>) -> Option<i32> {
crate::block_on_dispatcher();
crate::core::with_glean(|glean| self.get_value(glean, ping_name.as_deref()))
@@ -124,8 +133,6 @@ impl DenominatorMetric {
/// # Arguments
///
/// * `error` - The type of error
- /// * `ping_name` - the optional name of the ping to retrieve the metric
- /// for. inner to the first value in `send_in_pings`.
///
/// # Returns
///
diff --git a/third_party/rust/glean-core/src/metrics/event.rs b/third_party/rust/glean-core/src/metrics/event.rs
index 5ad6e6d50c..c7aefd9cd6 100644
--- a/third_party/rust/glean-core/src/metrics/event.rs
+++ b/third_party/rust/glean-core/src/metrics/event.rs
@@ -185,6 +185,11 @@ impl EventMetric {
/// Get the vector of currently stored events for this event metric.
///
/// This doesn't clear the stored value.
+ ///
+ /// # Arguments
+ ///
+ /// * `ping_name` - the optional name of the ping to retrieve the metric
+ /// for. Defaults to the first value in `send_in_pings`.
pub fn test_get_value(&self, ping_name: Option<String>) -> Option<Vec<RecordedEvent>> {
crate::block_on_dispatcher();
crate::core::with_glean(|glean| self.get_value(glean, ping_name.as_deref()))
@@ -197,8 +202,6 @@ impl EventMetric {
/// # Arguments
///
/// * `error` - The type of error
- /// * `ping_name` - represents the optional name of the ping to retrieve the
- /// metric for. inner to the first value in `send_in_pings`.
///
/// # Returns
///
diff --git a/third_party/rust/glean-core/src/metrics/experiment.rs b/third_party/rust/glean-core/src/metrics/experiment.rs
index 23e6c41ce2..5695bf942e 100644
--- a/third_party/rust/glean-core/src/metrics/experiment.rs
+++ b/third_party/rust/glean-core/src/metrics/experiment.rs
@@ -195,6 +195,15 @@ impl ExperimentMetric {
/// the RecordedExperiment.
///
/// This doesn't clear the stored value.
+ ///
+ /// # Arguments
+ ///
+ /// * `ping_name` - the optional name of the ping to retrieve the metric
+ /// for. Defaults to the first value in `send_in_pings`.
+ ///
+ /// # Returns
+ ///
+ /// The stored value or `None` if nothing stored.
pub fn test_get_value(&self, glean: &Glean) -> Option<RecordedExperiment> {
match StorageManager.snapshot_metric_for_test(
glean.storage(),
diff --git a/third_party/rust/glean-core/src/metrics/labeled.rs b/third_party/rust/glean-core/src/metrics/labeled.rs
index fa3e6a6a75..f9f6a28880 100644
--- a/third_party/rust/glean-core/src/metrics/labeled.rs
+++ b/third_party/rust/glean-core/src/metrics/labeled.rs
@@ -205,8 +205,6 @@ where
/// # Arguments
///
/// * `error` - The type of error
- /// * `ping_name` - represents the optional name of the ping to retrieve the
- /// metric for. Defaults to the first value in `send_in_pings`.
///
/// # Returns
///
diff --git a/third_party/rust/glean-core/src/metrics/memory_distribution.rs b/third_party/rust/glean-core/src/metrics/memory_distribution.rs
index ac9eda1a90..7b5e5ee192 100644
--- a/third_party/rust/glean-core/src/metrics/memory_distribution.rs
+++ b/third_party/rust/glean-core/src/metrics/memory_distribution.rs
@@ -254,6 +254,15 @@ impl MemoryDistributionMetric {
/// Gets the currently stored value.
///
/// This doesn't clear the stored value.
+ ///
+ /// # Arguments
+ ///
+ /// * `ping_name` - the optional name of the ping to retrieve the metric
+ /// for. Defaults to the first value in `send_in_pings`.
+ ///
+ /// # Returns
+ ///
+ /// The stored value or `None` if nothing stored.
pub fn test_get_value(&self, ping_name: Option<String>) -> Option<DistributionData> {
crate::block_on_dispatcher();
crate::core::with_glean(|glean| self.get_value(glean, ping_name.as_deref()))
@@ -266,8 +275,6 @@ impl MemoryDistributionMetric {
/// # Arguments
///
/// * `error` - The type of error
- /// * `ping_name` - represents the optional name of the ping to retrieve the
- /// metric for. Defaults to the first value in `send_in_pings`.
///
/// # Returns
///
diff --git a/third_party/rust/glean-core/src/metrics/mod.rs b/third_party/rust/glean-core/src/metrics/mod.rs
index 43253b9aa7..92001efd2a 100644
--- a/third_party/rust/glean-core/src/metrics/mod.rs
+++ b/third_party/rust/glean-core/src/metrics/mod.rs
@@ -9,7 +9,8 @@ use std::sync::atomic::Ordering;
use chrono::{DateTime, FixedOffset};
use serde::{Deserialize, Serialize};
-use serde_json::{json, Value as JsonValue};
+use serde_json::json;
+pub use serde_json::Value as JsonValue;
mod boolean;
mod counter;
@@ -23,6 +24,7 @@ mod memory_distribution;
mod memory_unit;
mod metrics_enabled_config;
mod numerator;
+mod object;
mod ping;
mod quantity;
mod rate;
@@ -54,6 +56,7 @@ pub use self::labeled::{LabeledBoolean, LabeledCounter, LabeledMetric, LabeledSt
pub use self::memory_distribution::MemoryDistributionMetric;
pub use self::memory_unit::MemoryUnit;
pub use self::numerator::NumeratorMetric;
+pub use self::object::ObjectMetric;
pub use self::ping::PingType;
pub use self::quantity::QuantityMetric;
pub use self::rate::{Rate, RateMetric};
@@ -141,6 +144,8 @@ pub enum Metric {
Url(String),
/// A Text metric. See [`TextMetric`] for more information.
Text(String),
+ /// An Object metric. See [`ObjectMetric`] for more information.
+ Object(String),
}
/// A [`MetricType`] describes common behavior across all metrics.
@@ -251,6 +256,7 @@ impl Metric {
Metric::MemoryDistribution(_) => "memory_distribution",
Metric::Jwe(_) => "jwe",
Metric::Text(_) => "text",
+ Metric::Object(_) => "object",
}
}
@@ -280,6 +286,9 @@ impl Metric {
Metric::MemoryDistribution(hist) => json!(memory_distribution::snapshot(hist)),
Metric::Jwe(s) => json!(s),
Metric::Text(s) => json!(s),
+ Metric::Object(s) => {
+ serde_json::from_str(s).expect("object storage should have been json")
+ }
}
}
}
diff --git a/third_party/rust/glean-core/src/metrics/numerator.rs b/third_party/rust/glean-core/src/metrics/numerator.rs
index 3c340cab1d..de29338a5c 100644
--- a/third_party/rust/glean-core/src/metrics/numerator.rs
+++ b/third_party/rust/glean-core/src/metrics/numerator.rs
@@ -55,12 +55,16 @@ impl NumeratorMetric {
///
/// Gets the currently stored value as a pair of integers.
///
+ /// This doesn't clear the stored value.
+ ///
/// # Arguments
///
/// * `ping_name` - the optional name of the ping to retrieve the metric
/// for. Defaults to the first value in `send_in_pings`.
///
- /// This doesn't clear the stored value.
+ /// # Returns
+ ///
+ /// The stored value or `None` if nothing stored.
pub fn test_get_value(&self, ping_name: Option<String>) -> Option<Rate> {
crate::block_on_dispatcher();
crate::core::with_glean(|glean| self.get_value(glean, ping_name.as_deref()))
@@ -82,8 +86,6 @@ impl NumeratorMetric {
/// # Arguments
///
/// * `error` - The type of error
- /// * `ping_name` - the optional name of the ping to retrieve the metric
- /// for. Defaults to the first value in `send_in_pings`.
///
/// # Returns
///
diff --git a/third_party/rust/glean-core/src/metrics/object.rs b/third_party/rust/glean-core/src/metrics/object.rs
new file mode 100644
index 0000000000..6071e2b33a
--- /dev/null
+++ b/third_party/rust/glean-core/src/metrics/object.rs
@@ -0,0 +1,135 @@
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this
+// file, You can obtain one at https://mozilla.org/MPL/2.0/.
+
+use std::sync::Arc;
+
+use crate::common_metric_data::CommonMetricDataInternal;
+use crate::error_recording::{record_error, test_get_num_recorded_errors, ErrorType};
+use crate::metrics::JsonValue;
+use crate::metrics::Metric;
+use crate::metrics::MetricType;
+use crate::storage::StorageManager;
+use crate::CommonMetricData;
+use crate::Glean;
+
+/// An object metric.
+///
+/// Record structured data.
+/// The value must adhere to a predefined structure and is serialized into JSON.
+#[derive(Clone, Debug)]
+pub struct ObjectMetric {
+ meta: Arc<CommonMetricDataInternal>,
+}
+
+impl MetricType for ObjectMetric {
+ fn meta(&self) -> &CommonMetricDataInternal {
+ &self.meta
+ }
+}
+
+// IMPORTANT:
+//
+// When changing this implementation, make sure all the operations are
+// also declared in the related trait in `../traits/`.
+impl ObjectMetric {
+ /// Creates a new object metric.
+ pub fn new(meta: CommonMetricData) -> Self {
+ Self {
+ meta: Arc::new(meta.into()),
+ }
+ }
+
+ /// Sets to the specified structure.
+ ///
+ /// # Arguments
+ ///
+ /// * `glean` - the Glean instance this metric belongs to.
+ /// * `value` - the value to set.
+ #[doc(hidden)]
+ pub fn set_sync(&self, glean: &Glean, value: JsonValue) {
+ let value = Metric::Object(serde_json::to_string(&value).unwrap());
+ glean.storage().record(glean, &self.meta, &value)
+ }
+
+ /// Sets to the specified structure.
+ ///
+ /// No additional verification is done.
+ /// The shape needs to be externally verified.
+ ///
+ /// # Arguments
+ ///
+ /// * `value` - the value to set.
+ pub fn set(&self, value: JsonValue) {
+ let metric = self.clone();
+ crate::launch_with_glean(move |glean| metric.set_sync(glean, value))
+ }
+
+ /// Record an `InvalidValue` error for this metric.
+ ///
+ /// Only to be used by the RLB.
+ // TODO(bug 1691073): This can probably go once we have a more generic mechanism to record
+ // errors
+ pub fn record_schema_error(&self) {
+ let metric = self.clone();
+ crate::launch_with_glean(move |glean| {
+ let msg = "Value did not match predefined schema";
+ record_error(glean, &metric.meta, ErrorType::InvalidValue, msg, None);
+ });
+ }
+
+ /// Get current value
+ #[doc(hidden)]
+ pub fn get_value<'a, S: Into<Option<&'a str>>>(
+ &self,
+ glean: &Glean,
+ ping_name: S,
+ ) -> Option<String> {
+ let queried_ping_name = ping_name
+ .into()
+ .unwrap_or_else(|| &self.meta().inner.send_in_pings[0]);
+
+ match StorageManager.snapshot_metric_for_test(
+ glean.storage(),
+ queried_ping_name,
+ &self.meta.identifier(glean),
+ self.meta.inner.lifetime,
+ ) {
+ Some(Metric::Object(o)) => Some(o),
+ _ => None,
+ }
+ }
+
+ /// **Test-only API (exported for FFI purposes).**
+ ///
+ /// Gets the currently stored value as JSON.
+ ///
+ /// This doesn't clear the stored value.
+ pub fn test_get_value(&self, ping_name: Option<String>) -> Option<JsonValue> {
+ crate::block_on_dispatcher();
+ let value = crate::core::with_glean(|glean| self.get_value(glean, ping_name.as_deref()));
+ // We only store valid JSON
+ value.map(|val| serde_json::from_str(&val).unwrap())
+ }
+
+ /// **Exported for test purposes.**
+ ///
+ /// Gets the number of recorded errors for the given metric and error type.
+ ///
+ /// # Arguments
+ ///
+ /// * `error` - The type of error
+ /// * `ping_name` - represents the optional name of the ping to retrieve the
+ /// metric for. inner to the first value in `send_in_pings`.
+ ///
+ /// # Returns
+ ///
+ /// The number of errors reported.
+ pub fn test_get_num_recorded_errors(&self, error: ErrorType) -> i32 {
+ crate::block_on_dispatcher();
+
+ crate::core::with_glean(|glean| {
+ test_get_num_recorded_errors(glean, self.meta(), error).unwrap_or(0)
+ })
+ }
+}
diff --git a/third_party/rust/glean-core/src/metrics/ping.rs b/third_party/rust/glean-core/src/metrics/ping.rs
index dc37d76a45..e60284b1e2 100644
--- a/third_party/rust/glean-core/src/metrics/ping.rs
+++ b/third_party/rust/glean-core/src/metrics/ping.rs
@@ -6,6 +6,7 @@ use std::fmt;
use std::sync::Arc;
use crate::ping::PingMaker;
+use crate::upload::PingPayload;
use crate::Glean;
use uuid::Uuid;
@@ -26,6 +27,8 @@ struct InnerPing {
pub send_if_empty: bool,
/// Whether to use millisecond-precise start/end times.
pub precise_timestamps: bool,
+ /// Whether to include the {client|ping}_info sections on assembly.
+ pub include_info_sections: bool,
/// The "reason" codes that this ping can send
pub reason_codes: Vec<String>,
}
@@ -37,6 +40,7 @@ impl fmt::Debug for PingType {
.field("include_client_id", &self.0.include_client_id)
.field("send_if_empty", &self.0.send_if_empty)
.field("precise_timestamps", &self.0.precise_timestamps)
+ .field("include_info_sections", &self.0.include_info_sections)
.field("reason_codes", &self.0.reason_codes)
.finish()
}
@@ -61,6 +65,7 @@ impl PingType {
include_client_id: bool,
send_if_empty: bool,
precise_timestamps: bool,
+ include_info_sections: bool,
reason_codes: Vec<String>,
) -> Self {
let this = Self(Arc::new(InnerPing {
@@ -68,6 +73,7 @@ impl PingType {
include_client_id,
send_if_empty,
precise_timestamps,
+ include_info_sections,
reason_codes,
}));
@@ -94,6 +100,10 @@ impl PingType {
self.0.precise_timestamps
}
+ pub(crate) fn include_info_sections(&self) -> bool {
+ self.0.include_info_sections
+ }
+
/// Submits the ping for eventual uploading.
///
/// The ping content is assembled as soon as possible, but upload is not
@@ -186,13 +196,17 @@ impl PingType {
// so both scenarios should be impossible.
let content =
::serde_json::to_string(&ping.content).expect("ping serialization failed");
- glean.upload_manager.enqueue_ping(
- glean,
- ping.doc_id,
- ping.url_path,
- &content,
- Some(ping.headers),
- );
+ // TODO: Shouldn't we consolidate on a single collected Ping representation?
+ let ping = PingPayload {
+ document_id: ping.doc_id.to_string(),
+ upload_path: ping.url_path.to_string(),
+ json_body: content,
+ headers: Some(ping.headers),
+ body_has_info_sections: self.0.include_info_sections,
+ ping_name: self.0.name.to_string(),
+ };
+
+ glean.upload_manager.enqueue_ping(glean, ping);
return true;
}
diff --git a/third_party/rust/glean-core/src/metrics/quantity.rs b/third_party/rust/glean-core/src/metrics/quantity.rs
index c59d3a4a21..92216625d6 100644
--- a/third_party/rust/glean-core/src/metrics/quantity.rs
+++ b/third_party/rust/glean-core/src/metrics/quantity.rs
@@ -98,6 +98,15 @@ impl QuantityMetric {
/// Gets the currently stored value as an integer.
///
/// This doesn't clear the stored value.
+ ///
+ /// # Arguments
+ ///
+ /// * `ping_name` - the optional name of the ping to retrieve the metric
+ /// for. Defaults to the first value in `send_in_pings`.
+ ///
+ /// # Returns
+ ///
+ /// The stored value or `None` if nothing stored.
pub fn test_get_value(&self, ping_name: Option<String>) -> Option<i64> {
crate::block_on_dispatcher();
crate::core::with_glean(|glean| self.get_value(glean, ping_name.as_deref()))
@@ -110,8 +119,6 @@ impl QuantityMetric {
/// # Arguments
///
/// * `error` - The type of error
- /// * `ping_name` - represents the optional name of the ping to retrieve the
- /// metric for. Defaults to the first value in `send_in_pings`.
///
/// # Returns
///
diff --git a/third_party/rust/glean-core/src/metrics/rate.rs b/third_party/rust/glean-core/src/metrics/rate.rs
index ba7f085b55..843d35002e 100644
--- a/third_party/rust/glean-core/src/metrics/rate.rs
+++ b/third_party/rust/glean-core/src/metrics/rate.rs
@@ -141,6 +141,15 @@ impl RateMetric {
/// Gets the currently stored value as a pair of integers.
///
/// This doesn't clear the stored value.
+ ///
+ /// # Arguments
+ ///
+ /// * `ping_name` - the optional name of the ping to retrieve the metric
+ /// for. Defaults to the first value in `send_in_pings`.
+ ///
+ /// # Returns
+ ///
+ /// The stored value or `None` if nothing stored.
pub fn test_get_value(&self, ping_name: Option<String>) -> Option<Rate> {
crate::block_on_dispatcher();
crate::core::with_glean(|glean| self.get_value(glean, ping_name.as_deref()))
@@ -175,8 +184,6 @@ impl RateMetric {
/// # Arguments
///
/// * `error` - The type of error
- /// * `ping_name` - represents the optional name of the ping to retrieve the
- /// metric for. Defaults to the first value in `send_in_pings`.
///
/// # Returns
///
diff --git a/third_party/rust/glean-core/src/metrics/string.rs b/third_party/rust/glean-core/src/metrics/string.rs
index 5ed7b2c7f1..4aa30a8d7e 100644
--- a/third_party/rust/glean-core/src/metrics/string.rs
+++ b/third_party/rust/glean-core/src/metrics/string.rs
@@ -112,6 +112,15 @@ impl StringMetric {
/// Gets the currently stored value as a string.
///
/// This doesn't clear the stored value.
+ ///
+ /// # Arguments
+ ///
+ /// * `ping_name` - the optional name of the ping to retrieve the metric
+ /// for. Defaults to the first value in `send_in_pings`.
+ ///
+ /// # Returns
+ ///
+ /// The stored value or `None` if nothing stored.
pub fn test_get_value(&self, ping_name: Option<String>) -> Option<String> {
crate::block_on_dispatcher();
crate::core::with_glean(|glean| self.get_value(glean, ping_name.as_deref()))
@@ -124,8 +133,6 @@ impl StringMetric {
/// # Arguments
///
/// * `error` - The type of error
- /// * `ping_name` - represents the optional name of the ping to retrieve the
- /// metric for. Defaults to the first value in `send_in_pings`.
///
/// # Returns
///
diff --git a/third_party/rust/glean-core/src/metrics/string_list.rs b/third_party/rust/glean-core/src/metrics/string_list.rs
index 75b2df7f80..cd4e71b885 100644
--- a/third_party/rust/glean-core/src/metrics/string_list.rs
+++ b/third_party/rust/glean-core/src/metrics/string_list.rs
@@ -171,6 +171,15 @@ impl StringListMetric {
/// Gets the currently-stored values.
///
/// This doesn't clear the stored value.
+ ///
+ /// # Arguments
+ ///
+ /// * `ping_name` - the optional name of the ping to retrieve the metric
+ /// for. Defaults to the first value in `send_in_pings`.
+ ///
+ /// # Returns
+ ///
+ /// The stored value or `None` if nothing stored.
pub fn test_get_value(&self, ping_name: Option<String>) -> Option<Vec<String>> {
crate::block_on_dispatcher();
crate::core::with_glean(|glean| self.get_value(glean, ping_name.as_deref()))
@@ -183,8 +192,6 @@ impl StringListMetric {
/// # Arguments
///
/// * `error` - The type of error
- /// * `ping_name` - represents the optional name of the ping to retrieve the
- /// metric for. Defaults to the first value in `send_in_pings`.
///
/// # Returns
///
diff --git a/third_party/rust/glean-core/src/metrics/text.rs b/third_party/rust/glean-core/src/metrics/text.rs
index 06ad5c0d78..baa8e88d75 100644
--- a/third_party/rust/glean-core/src/metrics/text.rs
+++ b/third_party/rust/glean-core/src/metrics/text.rs
@@ -116,6 +116,15 @@ impl TextMetric {
/// Gets the currently stored value as a string.
///
/// This doesn't clear the stored value.
+ ///
+ /// # Arguments
+ ///
+ /// * `ping_name` - the optional name of the ping to retrieve the metric
+ /// for. Defaults to the first value in `send_in_pings`.
+ ///
+ /// # Returns
+ ///
+ /// The stored value or `None` if nothing stored.
pub fn test_get_value(&self, ping_name: Option<String>) -> Option<String> {
crate::block_on_dispatcher();
crate::core::with_glean(|glean| self.get_value(glean, ping_name.as_deref()))
@@ -128,8 +137,6 @@ impl TextMetric {
/// # Arguments
///
/// * `error` - The type of error
- /// * `ping_name` - represents the optional name of the ping to retrieve the
- /// metric for. Defaults to the first value in `send_in_pings`.
///
/// # Returns
///
diff --git a/third_party/rust/glean-core/src/metrics/timespan.rs b/third_party/rust/glean-core/src/metrics/timespan.rs
index b4d3bd5902..ee63fb52f8 100644
--- a/third_party/rust/glean-core/src/metrics/timespan.rs
+++ b/third_party/rust/glean-core/src/metrics/timespan.rs
@@ -253,6 +253,15 @@ impl TimespanMetric {
/// Gets the currently stored value as an integer.
///
/// This doesn't clear the stored value.
+ ///
+ /// # Arguments
+ ///
+ /// * `ping_name` - the optional name of the ping to retrieve the metric
+ /// for. Defaults to the first value in `send_in_pings`.
+ ///
+ /// # Returns
+ ///
+ /// The stored value or `None` if nothing stored.
pub fn test_get_value(&self, ping_name: Option<String>) -> Option<i64> {
crate::block_on_dispatcher();
crate::core::with_glean(|glean| {
@@ -292,8 +301,6 @@ impl TimespanMetric {
/// # Arguments
///
/// * `error` - The type of error
- /// * `ping_name` - represents the optional name of the ping to retrieve the
- /// metric for. Defaults to the first value in `send_in_pings`.
///
/// # Returns
///
diff --git a/third_party/rust/glean-core/src/metrics/timing_distribution.rs b/third_party/rust/glean-core/src/metrics/timing_distribution.rs
index e339ef8882..3293be9518 100644
--- a/third_party/rust/glean-core/src/metrics/timing_distribution.rs
+++ b/third_party/rust/glean-core/src/metrics/timing_distribution.rs
@@ -293,7 +293,35 @@ impl TimingDistributionMetric {
/// are longer than `MAX_SAMPLE_TIME`.
pub fn accumulate_samples(&self, samples: Vec<i64>) {
let metric = self.clone();
- crate::launch_with_glean(move |glean| metric.accumulate_samples_sync(glean, samples))
+ crate::launch_with_glean(move |glean| metric.accumulate_samples_sync(glean, &samples))
+ }
+
+ /// Accumulates precisely one signed sample and appends it to the metric.
+ ///
+ /// Precludes the need for a collection in the most common use case.
+ ///
+ /// Sign is required so that the platform-specific code can provide us with
+ /// a 64 bit signed integer if no `u64` comparable type is available. This
+ /// will take care of filtering and reporting errors for any provided negative
+ /// sample.
+ ///
+ /// Please note that this assumes that the provided sample is already in
+ /// the "unit" declared by the instance of the metric type (e.g. if the
+ /// instance this method was called on is using [`crate::TimeUnit::Second`], then
+ /// `sample` is assumed to be in that unit).
+ ///
+ /// # Arguments
+ ///
+ /// * `sample` - The singular sample to be recorded by the metric.
+ ///
+ /// ## Notes
+ ///
+ /// Discards any negative value and reports an [`ErrorType::InvalidValue`].
+ /// Reports an [`ErrorType::InvalidOverflow`] error if the sample is longer than
+ /// `MAX_SAMPLE_TIME`.
+ pub fn accumulate_single_sample(&self, sample: i64) {
+ let metric = self.clone();
+ crate::launch_with_glean(move |glean| metric.accumulate_samples_sync(glean, &[sample]))
}
/// **Test-only API (exported for testing purposes).**
@@ -301,7 +329,7 @@ impl TimingDistributionMetric {
///
/// Use [`accumulate_samples`](Self::accumulate_samples)
#[doc(hidden)]
- pub fn accumulate_samples_sync(&self, glean: &Glean, samples: Vec<i64>) {
+ pub fn accumulate_samples_sync(&self, glean: &Glean, samples: &[i64]) {
if !self.should_record(glean) {
return;
}
@@ -464,6 +492,15 @@ impl TimingDistributionMetric {
/// Gets the currently stored value as an integer.
///
/// This doesn't clear the stored value.
+ ///
+ /// # Arguments
+ ///
+ /// * `ping_name` - the optional name of the ping to retrieve the metric
+ /// for. Defaults to the first value in `send_in_pings`.
+ ///
+ /// # Returns
+ ///
+ /// The stored value or `None` if nothing stored.
pub fn test_get_value(&self, ping_name: Option<String>) -> Option<DistributionData> {
crate::block_on_dispatcher();
crate::core::with_glean(|glean| self.get_value(glean, ping_name.as_deref()))
@@ -476,8 +513,6 @@ impl TimingDistributionMetric {
/// # Arguments
///
/// * `error` - The type of error
- /// * `ping_name` - represents the optional name of the ping to retrieve the
- /// metric for. Defaults to the first value in `send_in_pings`.
///
/// # Returns
///
diff --git a/third_party/rust/glean-core/src/metrics/url.rs b/third_party/rust/glean-core/src/metrics/url.rs
index c9eb824a3e..48b3f9e7ae 100644
--- a/third_party/rust/glean-core/src/metrics/url.rs
+++ b/third_party/rust/glean-core/src/metrics/url.rs
@@ -131,6 +131,15 @@ impl UrlMetric {
/// Gets the currently stored value as a string.
///
/// This doesn't clear the stored value.
+ ///
+ /// # Arguments
+ ///
+ /// * `ping_name` - the optional name of the ping to retrieve the metric
+ /// for. Defaults to the first value in `send_in_pings`.
+ ///
+ /// # Returns
+ ///
+ /// The stored value or `None` if nothing stored.
pub fn test_get_value(&self, ping_name: Option<String>) -> Option<String> {
crate::block_on_dispatcher();
crate::core::with_glean(|glean| self.get_value(glean, ping_name.as_deref()))
@@ -143,8 +152,6 @@ impl UrlMetric {
/// # Arguments
///
/// * `error` - The type of error
- /// * `ping_name` - represents the optional name of the ping to retrieve the
- /// metric for. Defaults to the first value in `send_in_pings`.
///
/// # Returns
///
diff --git a/third_party/rust/glean-core/src/metrics/uuid.rs b/third_party/rust/glean-core/src/metrics/uuid.rs
index e78d15ad3b..77d0f82320 100644
--- a/third_party/rust/glean-core/src/metrics/uuid.rs
+++ b/third_party/rust/glean-core/src/metrics/uuid.rs
@@ -128,6 +128,15 @@ impl UuidMetric {
/// Gets the currently stored value as a string.
///
/// This doesn't clear the stored value.
+ ///
+ /// # Arguments
+ ///
+ /// * `ping_name` - the optional name of the ping to retrieve the metric
+ /// for. Defaults to the first value in `send_in_pings`.
+ ///
+ /// # Returns
+ ///
+ /// The stored value or `None` if nothing stored.
pub fn test_get_value(&self, ping_name: Option<String>) -> Option<String> {
crate::block_on_dispatcher();
crate::core::with_glean(|glean| {
@@ -143,8 +152,6 @@ impl UuidMetric {
/// # Arguments
///
/// * `error` - The type of error
- /// * `ping_name` - represents the optional name of the ping to retrieve the
- /// metric for. Defaults to the first value in `send_in_pings`.
///
/// # Returns
///
diff --git a/third_party/rust/glean-core/src/ping/mod.rs b/third_party/rust/glean-core/src/ping/mod.rs
index c22a890aa2..d1a67ae360 100644
--- a/third_party/rust/glean-core/src/ping/mod.rs
+++ b/third_party/rust/glean-core/src/ping/mod.rs
@@ -14,7 +14,7 @@ use serde_json::{json, Value as JsonValue};
use crate::common_metric_data::{CommonMetricData, Lifetime};
use crate::metrics::{CounterMetric, DatetimeMetric, Metric, MetricType, PingType, TimeUnit};
use crate::storage::{StorageManager, INTERNAL_STORAGE};
-use crate::upload::HeaderMap;
+use crate::upload::{HeaderMap, PingMetadata};
use crate::util::{get_iso_time_string, local_now_with_offset};
use crate::{Glean, Result, DELETION_REQUEST_PINGS_DIRECTORY, PENDING_PINGS_DIRECTORY};
@@ -30,6 +30,8 @@ pub struct Ping<'a> {
pub content: JsonValue,
/// The headers to upload with the payload.
pub headers: HeaderMap,
+ /// Whether the content contains {client|ping}_info sections.
+ pub includes_info_sections: bool,
}
/// Collect a ping's data, assemble it into its full payload and store it on disk.
@@ -237,9 +239,9 @@ impl PingMaker {
.snapshot_as_json(glean, ping.name(), true);
// Due to the way the experimentation identifier could link datasets that are intentionally unlinked,
- // it will not be included in pings that specifically exclude the Glean client-id and those pings that
- // should not be sent if empty.
- if (!ping.include_client_id() || !ping.send_if_empty())
+ // it will not be included in pings that specifically exclude the Glean client-id, those pings that
+ // should not be sent if empty, or pings that exclude the {client|ping}_info sections wholesale.
+ if (!ping.include_client_id() || !ping.send_if_empty() || !ping.include_info_sections())
&& glean.test_get_experimentation_id().is_some()
&& metrics_data.is_some()
{
@@ -285,13 +287,18 @@ impl PingMaker {
TimeUnit::Minute
};
- let ping_info = self.get_ping_info(glean, ping.name(), reason, precision);
- let client_info = self.get_client_info(glean, ping.include_client_id());
+ let mut json = if ping.include_info_sections() {
+ let ping_info = self.get_ping_info(glean, ping.name(), reason, precision);
+ let client_info = self.get_client_info(glean, ping.include_client_id());
+
+ json!({
+ "ping_info": ping_info,
+ "client_info": client_info
+ })
+ } else {
+ json!({})
+ };
- let mut json = json!({
- "ping_info": ping_info,
- "client_info": client_info
- });
let json_obj = json.as_object_mut()?;
if let Some(metrics_data) = metrics_data {
json_obj.insert("metrics".to_string(), metrics_data);
@@ -306,6 +313,7 @@ impl PingMaker {
doc_id,
url_path,
headers: self.get_headers(glean),
+ includes_info_sections: ping.include_info_sections(),
})
}
@@ -355,11 +363,17 @@ impl PingMaker {
file.write_all(ping.url_path.as_bytes())?;
file.write_all(b"\n")?;
file.write_all(::serde_json::to_string(&ping.content)?.as_bytes())?;
- if !ping.headers.is_empty() {
- file.write_all(b"\n{\"headers\":")?;
- file.write_all(::serde_json::to_string(&ping.headers)?.as_bytes())?;
- file.write_all(b"}")?;
- }
+ file.write_all(b"\n")?;
+ let metadata = PingMetadata {
+ // We don't actually need to clone the headers except to match PingMetadata's ownership.
+ // But since we're going to write a file to disk in a sec,
+ // and HeaderMaps tend to have only like two things in them, tops,
+ // the cost is bearable.
+ headers: Some(ping.headers.clone()),
+ body_has_info_sections: Some(ping.includes_info_sections),
+ ping_name: Some(ping.name.to_string()),
+ };
+ file.write_all(::serde_json::to_string(&metadata)?.as_bytes())?;
}
if let Err(e) = std::fs::rename(&temp_ping_path, &ping_path) {
diff --git a/third_party/rust/glean-core/src/traits/custom_distribution.rs b/third_party/rust/glean-core/src/traits/custom_distribution.rs
index c0c80c028b..43dfdb7da8 100644
--- a/third_party/rust/glean-core/src/traits/custom_distribution.rs
+++ b/third_party/rust/glean-core/src/traits/custom_distribution.rs
@@ -28,6 +28,22 @@ pub trait CustomDistribution {
/// them.
fn accumulate_samples_signed(&self, samples: Vec<i64>);
+ /// Accumulates precisely one signed sample in the metric.
+ ///
+ /// This is required so that the platform-specific code can provide us with a
+ /// 64 bit signed integer if no `u64` comparable type is available. This
+ /// will take care of filtering and reporting errors.
+ ///
+ /// # Arguments
+ ///
+ /// - `sample` - The singular sample to be recorded by the metric.
+ ///
+ /// ## Notes
+ ///
+ /// Discards any negative value of `sample` and reports an
+ /// [`ErrorType::InvalidValue`](crate::ErrorType::InvalidValue).
+ fn accumulate_single_sample_signed(&self, sample: i64);
+
/// **Exported for test purposes.**
///
/// Gets the currently stored histogram.
diff --git a/third_party/rust/glean-core/src/traits/mod.rs b/third_party/rust/glean-core/src/traits/mod.rs
index c4bcf7cdd6..4115609fdd 100644
--- a/third_party/rust/glean-core/src/traits/mod.rs
+++ b/third_party/rust/glean-core/src/traits/mod.rs
@@ -7,6 +7,10 @@
//! Individual metric types implement this trait to expose the specific metrics API.
//! It can be used by wrapping implementations to guarantee API conformance.
+/// Re-export for use in generated code.
+#[doc(hidden)]
+pub extern crate serde as __serde;
+
mod boolean;
mod counter;
mod custom_distribution;
@@ -15,6 +19,7 @@ mod event;
mod labeled;
mod memory_distribution;
mod numerator;
+mod object;
mod ping;
mod quantity;
mod rate;
@@ -37,6 +42,7 @@ pub use self::event::NoExtraKeys;
pub use self::labeled::Labeled;
pub use self::memory_distribution::MemoryDistribution;
pub use self::numerator::Numerator;
+pub use self::object::{ObjectError, ObjectSerialize};
pub use self::ping::Ping;
pub use self::quantity::Quantity;
pub use self::rate::Rate;
diff --git a/third_party/rust/glean-core/src/traits/object.rs b/third_party/rust/glean-core/src/traits/object.rs
new file mode 100644
index 0000000000..c579efeac7
--- /dev/null
+++ b/third_party/rust/glean-core/src/traits/object.rs
@@ -0,0 +1,53 @@
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this
+// file, You can obtain one at https://mozilla.org/MPL/2.0/.
+
+use std::fmt::Display;
+
+use serde::{Deserialize, Serialize};
+use serde_json::Value as JsonValue;
+
+/// This type represents all possible errors that can occur when serializing or deserializing an object from/to JSON.
+#[derive(Debug)]
+pub struct ObjectError(serde_json::Error);
+
+impl Display for ObjectError {
+ fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+ Display::fmt(&self.0, f)
+ }
+}
+
+impl std::error::Error for ObjectError {}
+
+/// An object that can be serialized into JSON.
+///
+/// Objects are defined by their structure in the metrics definition.
+///
+/// This is essentially a wrapper around serde's `Serialize`/`Deserialize`,
+/// but in a way we can name it for our JSON (de)serialization.
+pub trait ObjectSerialize {
+ /// Deserialize the object from its JSON representation.
+ ///
+ /// Returns an error if deserialization fails.
+ /// This should not happen for glean_parser-generated and later serialized objects.
+ fn from_str(obj: &str) -> Result<Self, ObjectError>
+ where
+ Self: Sized;
+
+ /// Serialize this object into a JSON string.
+ fn into_serialized_object(self) -> Result<JsonValue, ObjectError>;
+}
+
+impl<V> ObjectSerialize for V
+where
+ V: Serialize,
+ V: for<'de> Deserialize<'de>,
+{
+ fn from_str(obj: &str) -> Result<Self, ObjectError> {
+ serde_json::from_str(obj).map_err(ObjectError)
+ }
+
+ fn into_serialized_object(self) -> Result<JsonValue, ObjectError> {
+ serde_json::to_value(self).map_err(ObjectError)
+ }
+}
diff --git a/third_party/rust/glean-core/src/traits/timing_distribution.rs b/third_party/rust/glean-core/src/traits/timing_distribution.rs
index 03083753c6..ba618d2b4b 100644
--- a/third_party/rust/glean-core/src/traits/timing_distribution.rs
+++ b/third_party/rust/glean-core/src/traits/timing_distribution.rs
@@ -66,6 +66,31 @@ pub trait TimingDistribution {
/// are longer than `MAX_SAMPLE_TIME`.
fn accumulate_samples(&self, samples: Vec<i64>);
+ /// Accumulates precisely one signed sample in the metric.
+ ///
+ /// Precludes the need for a collection in the most common use case.
+ ///
+ /// Sign is required so that the platform-specific code can provide us with
+ /// a 64 bit signed integer if no `u64` comparable type is available. This
+ /// will take care of filtering and reporting errors for any provided negative
+ /// sample.
+ ///
+ /// Please note that this assumes that the provided sample is already in
+ /// the "unit" declared by the instance of the metric type (e.g. if the
+ /// instance this method was called on is using [`crate::TimeUnit::Second`], then
+ /// `sample` is assumed to be in that unit).
+ ///
+ /// # Arguments
+ ///
+ /// * `sample` - The singular sample to be recorded by the metric.
+ ///
+ /// ## Notes
+ ///
+ /// Discards any negative value and reports an [`ErrorType::InvalidValue`].
+ /// Reports an [`ErrorType::InvalidOverflow`] error if the sample is longer than
+ /// `MAX_SAMPLE_TIME`.
+ fn accumulate_single_sample(&self, sample: i64);
+
/// Accumulates the provided samples in the metric.
///
/// # Arguments
diff --git a/third_party/rust/glean-core/src/upload/directory.rs b/third_party/rust/glean-core/src/upload/directory.rs
index a78bbf0bdb..706550fe6c 100644
--- a/third_party/rust/glean-core/src/upload/directory.rs
+++ b/third_party/rust/glean-core/src/upload/directory.rs
@@ -9,15 +9,28 @@ use std::fs::{self, File};
use std::io::{BufRead, BufReader};
use std::path::{Path, PathBuf};
-use serde::Deserialize;
+use serde::{Deserialize, Serialize};
use uuid::Uuid;
use super::request::HeaderMap;
use crate::{DELETION_REQUEST_PINGS_DIRECTORY, PENDING_PINGS_DIRECTORY};
/// A representation of the data extracted from a ping file,
-/// this will contain the document_id, path, JSON encoded body of a ping and the persisted headers.
-pub type PingPayload = (String, String, String, Option<HeaderMap>);
+#[derive(Clone, Debug, Default)]
+pub struct PingPayload {
+ /// The ping's doc_id.
+ pub document_id: String,
+ /// The path to upload the ping to.
+ pub upload_path: String,
+ /// The ping body as JSON-encoded string.
+ pub json_body: String,
+ /// HTTP headers to include in the upload request.
+ pub headers: Option<HeaderMap>,
+ /// Whether the ping body contains {client|ping}_info
+ pub body_has_info_sections: bool,
+ /// The ping's name. (Also likely in the upload_path.)
+ pub ping_name: String,
+}
/// A struct to hold the result of scanning all pings directories.
#[derive(Clone, Debug, Default)]
@@ -62,20 +75,28 @@ fn get_file_name_as_str(path: &Path) -> Option<&str> {
}
}
+/// A ping's metadata, as (optionally) represented on disk.
+///
+/// Anything that isn't the upload path or the ping body.
+#[derive(Default, Deserialize, Serialize)]
+pub struct PingMetadata {
+ /// HTTP headers to include when uploading the ping.
+ pub headers: Option<HeaderMap>,
+ /// Whether the body has {client|ping}_info sections.
+ pub body_has_info_sections: Option<bool>,
+ /// The name of the ping.
+ pub ping_name: Option<String>,
+}
+
/// Processes a ping's metadata.
///
/// The metadata is an optional third line in the ping file,
/// currently it contains only additonal headers to be added to each ping request.
/// Therefore, we will process the contents of this line
/// and return a HeaderMap of the persisted headers.
-fn process_metadata(path: &str, metadata: &str) -> Option<HeaderMap> {
- #[derive(Deserialize)]
- struct PingMetadata {
- pub headers: HeaderMap,
- }
-
+fn process_metadata(path: &str, metadata: &str) -> Option<PingMetadata> {
if let Ok(metadata) = serde_json::from_str::<PingMetadata>(metadata) {
- return Some(metadata.headers);
+ return Some(metadata);
} else {
log::warn!("Error while parsing ping metadata: {}", path);
}
@@ -171,8 +192,23 @@ impl PingDirectoryManager {
if let (Some(Ok(path)), Some(Ok(body)), Ok(metadata)) =
(lines.next(), lines.next(), lines.next().transpose())
{
- let headers = metadata.and_then(|m| process_metadata(&path, &m));
- return Some((document_id.into(), path, body, headers));
+ let PingMetadata {
+ headers,
+ body_has_info_sections,
+ ping_name,
+ } = metadata
+ .and_then(|m| process_metadata(&path, &m))
+ .unwrap_or_default();
+ let ping_name =
+ ping_name.unwrap_or_else(|| path.split('/').nth(3).unwrap_or("").into());
+ return Some(PingPayload {
+ document_id: document_id.into(),
+ upload_path: path,
+ json_body: body,
+ headers,
+ body_has_info_sections: body_has_info_sections.unwrap_or(true),
+ ping_name,
+ });
} else {
log::warn!(
"Error processing ping file: {}. Ping file is not formatted as expected.",
@@ -303,7 +339,7 @@ mod test {
let (mut glean, dir) = new_glean(None);
// Register a ping for testing
- let ping_type = PingType::new("test", true, true, true, vec![]);
+ let ping_type = PingType::new("test", true, true, true, true, vec![]);
glean.register_ping_type(&ping_type);
// Submit the ping to populate the pending_pings directory
@@ -320,7 +356,8 @@ mod test {
// Verify request was returned for the "test" ping
let ping = &data.pending_pings[0].1;
- let request_ping_type = ping.1.split('/').nth(3).unwrap();
+ let request_ping_type = ping.upload_path.split('/').nth(3).unwrap();
+ assert_eq!(request_ping_type, ping.ping_name);
assert_eq!(request_ping_type, "test");
}
@@ -329,7 +366,7 @@ mod test {
let (mut glean, dir) = new_glean(None);
// Register a ping for testing
- let ping_type = PingType::new("test", true, true, true, vec![]);
+ let ping_type = PingType::new("test", true, true, true, true, vec![]);
glean.register_ping_type(&ping_type);
// Submit the ping to populate the pending_pings directory
@@ -352,7 +389,8 @@ mod test {
// Verify request was returned for the "test" ping
let ping = &data.pending_pings[0].1;
- let request_ping_type = ping.1.split('/').nth(3).unwrap();
+ let request_ping_type = ping.upload_path.split('/').nth(3).unwrap();
+ assert_eq!(request_ping_type, ping.ping_name);
assert_eq!(request_ping_type, "test");
// Verify that file was indeed deleted
@@ -364,7 +402,7 @@ mod test {
let (mut glean, dir) = new_glean(None);
// Register a ping for testing
- let ping_type = PingType::new("test", true, true, true, vec![]);
+ let ping_type = PingType::new("test", true, true, true, true, vec![]);
glean.register_ping_type(&ping_type);
// Submit the ping to populate the pending_pings directory
@@ -387,7 +425,8 @@ mod test {
// Verify request was returned for the "test" ping
let ping = &data.pending_pings[0].1;
- let request_ping_type = ping.1.split('/').nth(3).unwrap();
+ let request_ping_type = ping.upload_path.split('/').nth(3).unwrap();
+ assert_eq!(request_ping_type, ping.ping_name);
assert_eq!(request_ping_type, "test");
// Verify that file was indeed deleted
@@ -414,7 +453,8 @@ mod test {
// Verify request was returned for the "deletion-request" ping
let ping = &data.deletion_request_pings[0].1;
- let request_ping_type = ping.1.split('/').nth(3).unwrap();
+ let request_ping_type = ping.upload_path.split('/').nth(3).unwrap();
+ assert_eq!(request_ping_type, ping.ping_name);
assert_eq!(request_ping_type, "deletion-request");
}
}
diff --git a/third_party/rust/glean-core/src/upload/mod.rs b/third_party/rust/glean-core/src/upload/mod.rs
index d764dcd29e..e51a9d9508 100644
--- a/third_party/rust/glean-core/src/upload/mod.rs
+++ b/third_party/rust/glean-core/src/upload/mod.rs
@@ -30,6 +30,7 @@ use directory::{PingDirectoryManager, PingPayloadsByDirectory};
use policy::Policy;
use request::create_date_header_value;
+pub use directory::{PingMetadata, PingPayload};
pub use request::{HeaderMap, PingRequest};
pub use result::{UploadResult, UploadTaskAction};
@@ -322,21 +323,24 @@ impl PingUploadManager {
///
/// Returns the `PingRequest` or `None` if unable to build,
/// in which case it will delete the ping file and record an error.
- fn build_ping_request(
- &self,
- glean: &Glean,
- document_id: &str,
- path: &str,
- body: &str,
- headers: Option<HeaderMap>,
- ) -> Option<PingRequest> {
+ fn build_ping_request(&self, glean: &Glean, ping: PingPayload) -> Option<PingRequest> {
+ let PingPayload {
+ document_id,
+ upload_path: path,
+ json_body: body,
+ headers,
+ body_has_info_sections,
+ ping_name,
+ } = ping;
let mut request = PingRequest::builder(
&self.language_binding_name,
self.policy.max_ping_body_size(),
)
- .document_id(document_id)
+ .document_id(&document_id)
.path(path)
- .body(body);
+ .body(body)
+ .body_has_info_sections(body_has_info_sections)
+ .ping_name(ping_name);
if let Some(headers) = headers {
request = request.headers(headers);
@@ -346,7 +350,7 @@ impl PingUploadManager {
Ok(request) => Some(request),
Err(e) => {
log::warn!("Error trying to build ping request: {}", e);
- self.directory_manager.delete_file(document_id);
+ self.directory_manager.delete_file(&document_id);
// Record the error.
// Currently the only possible error is PingBodyOverflow.
@@ -362,23 +366,21 @@ impl PingUploadManager {
}
/// Enqueue a ping for upload.
- pub fn enqueue_ping(
- &self,
- glean: &Glean,
- document_id: &str,
- path: &str,
- body: &str,
- headers: Option<HeaderMap>,
- ) {
+ pub fn enqueue_ping(&self, glean: &Glean, ping: PingPayload) {
let mut queue = self
.queue
.write()
.expect("Can't write to pending pings queue.");
+ let PingPayload {
+ ref document_id,
+ upload_path: ref path,
+ ..
+ } = ping;
// Checks if a ping with this `document_id` is already enqueued.
if queue
.iter()
- .any(|request| request.document_id == document_id)
+ .any(|request| request.document_id.as_str() == document_id)
{
log::warn!(
"Attempted to enqueue a duplicate ping {} at {}.",
@@ -404,7 +406,7 @@ impl PingUploadManager {
}
log::trace!("Enqueuing ping {} at {}", document_id, path);
- if let Some(request) = self.build_ping_request(glean, document_id, path, body, headers) {
+ if let Some(request) = self.build_ping_request(glean, ping) {
queue.push_back(request)
}
}
@@ -455,7 +457,7 @@ impl PingUploadManager {
// Thus, we reverse the order of the pending pings vector,
// so that we iterate in descending order (newest -> oldest).
cached_pings.pending_pings.reverse();
- cached_pings.pending_pings.retain(|(file_size, (document_id, _, _, _))| {
+ cached_pings.pending_pings.retain(|(file_size, PingPayload {document_id, ..})| {
pending_pings_count += 1;
pending_pings_directory_size += file_size;
@@ -493,14 +495,14 @@ impl PingUploadManager {
// Enqueue the remaining pending pings and
// enqueue all deletion-request pings.
- let deletion_request_pings = cached_pings.deletion_request_pings.drain(..);
- for (_, (document_id, path, body, headers)) in deletion_request_pings {
- self.enqueue_ping(glean, &document_id, &path, &body, headers);
- }
- let pending_pings = cached_pings.pending_pings.drain(..);
- for (_, (document_id, path, body, headers)) in pending_pings {
- self.enqueue_ping(glean, &document_id, &path, &body, headers);
- }
+ cached_pings
+ .deletion_request_pings
+ .drain(..)
+ .for_each(|(_, ping)| self.enqueue_ping(glean, ping));
+ cached_pings
+ .pending_pings
+ .drain(..)
+ .for_each(|(_, ping)| self.enqueue_ping(glean, ping));
}
}
@@ -532,10 +534,8 @@ impl PingUploadManager {
/// * `glean` - The Glean object holding the database.
/// * `document_id` - The UUID of the ping in question.
pub fn enqueue_ping_from_file(&self, glean: &Glean, document_id: &str) {
- if let Some((doc_id, path, body, headers)) =
- self.directory_manager.process_file(document_id)
- {
- self.enqueue_ping(glean, &doc_id, &path, &body, headers)
+ if let Some(ping) = self.directory_manager.process_file(document_id) {
+ self.enqueue_ping(glean, ping);
}
}
@@ -883,7 +883,17 @@ mod test {
let upload_manager = PingUploadManager::no_policy(dir.path());
// Enqueue a ping
- upload_manager.enqueue_ping(&glean, &Uuid::new_v4().to_string(), PATH, "", None);
+ upload_manager.enqueue_ping(
+ &glean,
+ PingPayload {
+ document_id: Uuid::new_v4().to_string(),
+ upload_path: PATH.into(),
+ json_body: "".into(),
+ headers: None,
+ body_has_info_sections: true,
+ ping_name: "ping-name".into(),
+ },
+ );
// Try and get the next request.
// Verify request was returned
@@ -900,7 +910,17 @@ mod test {
// Enqueue a ping multiple times
let n = 10;
for _ in 0..n {
- upload_manager.enqueue_ping(&glean, &Uuid::new_v4().to_string(), PATH, "", None);
+ upload_manager.enqueue_ping(
+ &glean,
+ PingPayload {
+ document_id: Uuid::new_v4().to_string(),
+ upload_path: PATH.into(),
+ json_body: "".into(),
+ headers: None,
+ body_has_info_sections: true,
+ ping_name: "ping-name".into(),
+ },
+ );
}
// Verify a request is returned for each submitted ping
@@ -928,7 +948,17 @@ mod test {
// Enqueue the max number of pings allowed per uploading window
for _ in 0..max_pings_per_interval {
- upload_manager.enqueue_ping(&glean, &Uuid::new_v4().to_string(), PATH, "", None);
+ upload_manager.enqueue_ping(
+ &glean,
+ PingPayload {
+ document_id: Uuid::new_v4().to_string(),
+ upload_path: PATH.into(),
+ json_body: "".into(),
+ headers: None,
+ body_has_info_sections: true,
+ ping_name: "ping-name".into(),
+ },
+ );
}
// Verify a request is returned for each submitted ping
@@ -938,7 +968,17 @@ mod test {
}
// Enqueue just one more ping
- upload_manager.enqueue_ping(&glean, &Uuid::new_v4().to_string(), PATH, "", None);
+ upload_manager.enqueue_ping(
+ &glean,
+ PingPayload {
+ document_id: Uuid::new_v4().to_string(),
+ upload_path: PATH.into(),
+ json_body: "".into(),
+ headers: None,
+ body_has_info_sections: true,
+ ping_name: "ping-name".into(),
+ },
+ );
// Verify that we are indeed told to wait because we are at capacity
match upload_manager.get_upload_task(&glean, false) {
@@ -961,7 +1001,17 @@ mod test {
// Enqueue a ping multiple times
for _ in 0..10 {
- upload_manager.enqueue_ping(&glean, &Uuid::new_v4().to_string(), PATH, "", None);
+ upload_manager.enqueue_ping(
+ &glean,
+ PingPayload {
+ document_id: Uuid::new_v4().to_string(),
+ upload_path: PATH.into(),
+ json_body: "".into(),
+ headers: None,
+ body_has_info_sections: true,
+ ping_name: "ping-name".into(),
+ },
+ );
}
// Clear the queue
@@ -979,7 +1029,14 @@ mod test {
let (mut glean, _t) = new_glean(None);
// Register a ping for testing
- let ping_type = PingType::new("test", true, /* send_if_empty */ true, true, vec![]);
+ let ping_type = PingType::new(
+ "test",
+ true,
+ /* send_if_empty */ true,
+ true,
+ true,
+ vec![],
+ );
glean.register_ping_type(&ping_type);
// Submit the ping multiple times
@@ -1011,7 +1068,14 @@ mod test {
let (mut glean, dir) = new_glean(None);
// Register a ping for testing
- let ping_type = PingType::new("test", true, /* send_if_empty */ true, true, vec![]);
+ let ping_type = PingType::new(
+ "test",
+ true,
+ /* send_if_empty */ true,
+ true,
+ true,
+ vec![],
+ );
glean.register_ping_type(&ping_type);
// Submit the ping multiple times
@@ -1041,7 +1105,14 @@ mod test {
let (mut glean, dir) = new_glean(None);
// Register a ping for testing
- let ping_type = PingType::new("test", true, /* send_if_empty */ true, true, vec![]);
+ let ping_type = PingType::new(
+ "test",
+ true,
+ /* send_if_empty */ true,
+ true,
+ true,
+ vec![],
+ );
glean.register_ping_type(&ping_type);
// Submit a ping
@@ -1071,7 +1142,14 @@ mod test {
let (mut glean, dir) = new_glean(None);
// Register a ping for testing
- let ping_type = PingType::new("test", true, /* send_if_empty */ true, true, vec![]);
+ let ping_type = PingType::new(
+ "test",
+ true,
+ /* send_if_empty */ true,
+ true,
+ true,
+ vec![],
+ );
glean.register_ping_type(&ping_type);
// Submit a ping
@@ -1101,7 +1179,14 @@ mod test {
let (mut glean, _t) = new_glean(None);
// Register a ping for testing
- let ping_type = PingType::new("test", true, /* send_if_empty */ true, true, vec![]);
+ let ping_type = PingType::new(
+ "test",
+ true,
+ /* send_if_empty */ true,
+ true,
+ true,
+ vec![],
+ );
glean.register_ping_type(&ping_type);
// Submit a ping
@@ -1133,7 +1218,14 @@ mod test {
let (mut glean, dir) = new_glean(None);
// Register a ping for testing
- let ping_type = PingType::new("test", true, /* send_if_empty */ true, true, vec![]);
+ let ping_type = PingType::new(
+ "test",
+ true,
+ /* send_if_empty */ true,
+ true,
+ true,
+ vec![],
+ );
glean.register_ping_type(&ping_type);
// Submit a ping
@@ -1174,7 +1266,17 @@ mod test {
let path2 = format!("/submit/app_id/test-ping/1/{}", doc2);
// Enqueue a ping
- upload_manager.enqueue_ping(&glean, &doc1, &path1, "", None);
+ upload_manager.enqueue_ping(
+ &glean,
+ PingPayload {
+ document_id: doc1.clone(),
+ upload_path: path1,
+ json_body: "".into(),
+ headers: None,
+ body_has_info_sections: true,
+ ping_name: "test-ping".into(),
+ },
+ );
// Try and get the first request.
let req = match upload_manager.get_upload_task(&glean, false) {
@@ -1184,7 +1286,17 @@ mod test {
assert_eq!(doc1, req.document_id);
// Schedule the next one while the first one is "in progress"
- upload_manager.enqueue_ping(&glean, &doc2, &path2, "", None);
+ upload_manager.enqueue_ping(
+ &glean,
+ PingPayload {
+ document_id: doc2.clone(),
+ upload_path: path2,
+ json_body: "".into(),
+ headers: None,
+ body_has_info_sections: true,
+ ping_name: "test-ping".into(),
+ },
+ );
// Mark as processed
upload_manager.process_ping_upload_response(
@@ -1221,7 +1333,14 @@ mod test {
glean.set_debug_view_tag("valid-tag");
// Register a ping for testing
- let ping_type = PingType::new("test", true, /* send_if_empty */ true, true, vec![]);
+ let ping_type = PingType::new(
+ "test",
+ true,
+ /* send_if_empty */ true,
+ true,
+ true,
+ vec![],
+ );
glean.register_ping_type(&ping_type);
// Submit a ping
@@ -1248,8 +1367,28 @@ mod test {
let path = format!("/submit/app_id/test-ping/1/{}", doc_id);
// Try to enqueue a ping with the same doc_id twice
- upload_manager.enqueue_ping(&glean, &doc_id, &path, "", None);
- upload_manager.enqueue_ping(&glean, &doc_id, &path, "", None);
+ upload_manager.enqueue_ping(
+ &glean,
+ PingPayload {
+ document_id: doc_id.clone(),
+ upload_path: path.clone(),
+ json_body: "".into(),
+ headers: None,
+ body_has_info_sections: true,
+ ping_name: "test-ping".into(),
+ },
+ );
+ upload_manager.enqueue_ping(
+ &glean,
+ PingPayload {
+ document_id: doc_id,
+ upload_path: path,
+ json_body: "".into(),
+ headers: None,
+ body_has_info_sections: true,
+ ping_name: "test-ping".into(),
+ },
+ );
// Get a task once
let task = upload_manager.get_upload_task(&glean, false);
@@ -1267,7 +1406,14 @@ mod test {
let (mut glean, dir) = new_glean(None);
// Register a ping for testing
- let ping_type = PingType::new("test", true, /* send_if_empty */ true, true, vec![]);
+ let ping_type = PingType::new(
+ "test",
+ true,
+ /* send_if_empty */ true,
+ true,
+ true,
+ vec![],
+ );
glean.register_ping_type(&ping_type);
// Submit the ping multiple times
@@ -1317,7 +1463,14 @@ mod test {
let (mut glean, dir) = new_glean(None);
// Register a ping for testing
- let ping_type = PingType::new("test", true, /* send_if_empty */ true, true, vec![]);
+ let ping_type = PingType::new(
+ "test",
+ true,
+ /* send_if_empty */ true,
+ true,
+ true,
+ vec![],
+ );
glean.register_ping_type(&ping_type);
// Submit the ping multiple times
@@ -1331,7 +1484,10 @@ mod test {
// The pending pings array is sorted by date in ascending order,
// the newest element is the last one.
let (_, newest_ping) = &pending_pings.last().unwrap();
- let (newest_ping_id, _, _, _) = &newest_ping;
+ let PingPayload {
+ document_id: newest_ping_id,
+ ..
+ } = &newest_ping;
// Create a new upload manager pointing to the same data_path as the glean instance.
let mut upload_manager = PingUploadManager::no_policy(dir.path());
@@ -1385,7 +1541,14 @@ mod test {
let (mut glean, dir) = new_glean(None);
// Register a ping for testing
- let ping_type = PingType::new("test", true, /* send_if_empty */ true, true, vec![]);
+ let ping_type = PingType::new(
+ "test",
+ true,
+ /* send_if_empty */ true,
+ true,
+ true,
+ vec![],
+ );
glean.register_ping_type(&ping_type);
// How many pings we allow at maximum
@@ -1406,7 +1569,7 @@ mod test {
.iter()
.rev()
.take(count_quota)
- .map(|(_, ping)| ping.0.clone())
+ .map(|(_, ping)| ping.document_id.clone())
.collect::<Vec<_>>();
// Create a new upload manager pointing to the same data_path as the glean instance.
@@ -1457,7 +1620,14 @@ mod test {
let (mut glean, dir) = new_glean(None);
// Register a ping for testing
- let ping_type = PingType::new("test", true, /* send_if_empty */ true, true, vec![]);
+ let ping_type = PingType::new(
+ "test",
+ true,
+ /* send_if_empty */ true,
+ true,
+ true,
+ vec![],
+ );
glean.register_ping_type(&ping_type);
let expected_number_of_pings = 3;
@@ -1477,7 +1647,7 @@ mod test {
.iter()
.rev()
.take(expected_number_of_pings)
- .map(|(_, ping)| ping.0.clone())
+ .map(|(_, ping)| ping.document_id.clone())
.collect::<Vec<_>>();
// Create a new upload manager pointing to the same data_path as the glean instance.
@@ -1531,7 +1701,14 @@ mod test {
let (mut glean, dir) = new_glean(None);
// Register a ping for testing
- let ping_type = PingType::new("test", true, /* send_if_empty */ true, true, vec![]);
+ let ping_type = PingType::new(
+ "test",
+ true,
+ /* send_if_empty */ true,
+ true,
+ true,
+ vec![],
+ );
glean.register_ping_type(&ping_type);
let expected_number_of_pings = 2;
@@ -1551,7 +1728,7 @@ mod test {
.iter()
.rev()
.take(expected_number_of_pings)
- .map(|(_, ping)| ping.0.clone())
+ .map(|(_, ping)| ping.document_id.clone())
.collect::<Vec<_>>();
// Create a new upload manager pointing to the same data_path as the glean instance.
@@ -1622,8 +1799,28 @@ mod test {
upload_manager.set_rate_limiter(secs_per_interval, max_pings_per_interval);
// Enqueue two pings
- upload_manager.enqueue_ping(&glean, &Uuid::new_v4().to_string(), PATH, "", None);
- upload_manager.enqueue_ping(&glean, &Uuid::new_v4().to_string(), PATH, "", None);
+ upload_manager.enqueue_ping(
+ &glean,
+ PingPayload {
+ document_id: Uuid::new_v4().to_string(),
+ upload_path: PATH.into(),
+ json_body: "".into(),
+ headers: None,
+ body_has_info_sections: true,
+ ping_name: "ping-name".into(),
+ },
+ );
+ upload_manager.enqueue_ping(
+ &glean,
+ PingPayload {
+ document_id: Uuid::new_v4().to_string(),
+ upload_path: PATH.into(),
+ json_body: "".into(),
+ headers: None,
+ body_has_info_sections: true,
+ ping_name: "ping-name".into(),
+ },
+ );
// Get the first ping, it should be returned normally.
match upload_manager.get_upload_task(&glean, false) {
@@ -1679,12 +1876,28 @@ mod test {
let upload_manager = PingUploadManager::no_policy(dir.path());
// Enqueue a ping and start processing it
- let identifier = &Uuid::new_v4().to_string();
- upload_manager.enqueue_ping(&glean, identifier, PATH, "", None);
+ let identifier = &Uuid::new_v4();
+ let ping = PingPayload {
+ document_id: identifier.to_string(),
+ upload_path: PATH.into(),
+ json_body: "".into(),
+ headers: None,
+ body_has_info_sections: true,
+ ping_name: "ping-name".into(),
+ };
+ upload_manager.enqueue_ping(&glean, ping);
assert!(upload_manager.get_upload_task(&glean, false).is_upload());
// Attempt to re-enqueue the same ping
- upload_manager.enqueue_ping(&glean, identifier, PATH, "", None);
+ let ping = PingPayload {
+ document_id: identifier.to_string(),
+ upload_path: PATH.into(),
+ json_body: "".into(),
+ headers: None,
+ body_has_info_sections: true,
+ ping_name: "ping-name".into(),
+ };
+ upload_manager.enqueue_ping(&glean, ping);
// No new pings should have been enqueued so the upload task is Done.
assert_eq!(
@@ -1695,7 +1908,7 @@ mod test {
// Process the upload response
upload_manager.process_ping_upload_response(
&glean,
- identifier,
+ &identifier.to_string(),
UploadResult::http_status(200),
);
}
diff --git a/third_party/rust/glean-core/src/upload/request.rs b/third_party/rust/glean-core/src/upload/request.rs
index 0fd5ec5713..b4ac6eba97 100644
--- a/third_party/rust/glean-core/src/upload/request.rs
+++ b/third_party/rust/glean-core/src/upload/request.rs
@@ -62,6 +62,8 @@ pub struct Builder {
body: Option<Vec<u8>>,
headers: HeaderMap,
body_max_size: usize,
+ body_has_info_sections: Option<bool>,
+ ping_name: Option<String>,
}
impl Builder {
@@ -87,6 +89,8 @@ impl Builder {
body: None,
headers,
body_max_size,
+ body_has_info_sections: None,
+ ping_name: None,
}
}
@@ -138,6 +142,18 @@ impl Builder {
self
}
+ /// Sets whether the request body has {client|ping}_info sections.
+ pub fn body_has_info_sections(mut self, body_has_info_sections: bool) -> Self {
+ self.body_has_info_sections = Some(body_has_info_sections);
+ self
+ }
+
+ /// Sets the ping's name aka doctype.
+ pub fn ping_name<S: Into<String>>(mut self, ping_name: S) -> Self {
+ self.ping_name = Some(ping_name.into());
+ self
+ }
+
/// Sets a header for this request.
pub fn header<S: Into<String>>(mut self, key: S, value: S) -> Self {
self.headers.insert(key.into(), value.into());
@@ -174,6 +190,12 @@ impl Builder {
.expect("path must be set before attempting to build PingRequest"),
body,
headers: self.headers,
+ body_has_info_sections: self.body_has_info_sections.expect(
+ "body_has_info_sections must be set before attempting to build PingRequest",
+ ),
+ ping_name: self
+ .ping_name
+ .expect("ping_name must be set before attempting to build PingRequest"),
})
}
}
@@ -192,6 +214,10 @@ pub struct PingRequest {
pub body: Vec<u8>,
/// A map with all the headers to be sent with the request.
pub headers: HeaderMap,
+ /// Whether the body has {client|ping}_info sections.
+ pub body_has_info_sections: bool,
+ /// The ping's name. Likely also somewhere in `path`.
+ pub ping_name: String,
}
impl PingRequest {
@@ -208,12 +234,7 @@ impl PingRequest {
/// Verifies if current request is for a deletion-request ping.
pub fn is_deletion_request(&self) -> bool {
- // The path format should be `/submit/<app_id>/<ping_name>/<schema_version/<doc_id>`
- self.path
- .split('/')
- .nth(3)
- .map(|url| url == "deletion-request")
- .unwrap_or(false)
+ self.ping_name == "deletion-request"
}
/// Decompresses and pretty-format the ping payload
@@ -257,11 +278,15 @@ mod test {
.document_id("woop")
.path("/random/path/doesnt/matter")
.body("{}")
+ .body_has_info_sections(false)
+ .ping_name("whatevs")
.build()
.unwrap();
assert_eq!(request.document_id, "woop");
assert_eq!(request.path, "/random/path/doesnt/matter");
+ assert!(!request.body_has_info_sections);
+ assert_eq!(request.ping_name, "whatevs");
// Make sure all the expected headers were added.
assert!(request.headers.contains_key("X-Telemetry-Agent"));
diff --git a/third_party/rust/glean-core/tests/custom_distribution.rs b/third_party/rust/glean-core/tests/custom_distribution.rs
index 43c69fb26d..4cdfa5e99b 100644
--- a/third_party/rust/glean-core/tests/custom_distribution.rs
+++ b/third_party/rust/glean-core/tests/custom_distribution.rs
@@ -40,7 +40,7 @@ mod linear {
HistogramType::Linear,
);
- metric.accumulate_samples_sync(&glean, vec![50]);
+ metric.accumulate_samples_sync(&glean, &[50]);
let snapshot = metric
.get_value(&glean, "store1")
@@ -84,7 +84,7 @@ mod linear {
HistogramType::Linear,
);
- metric.accumulate_samples_sync(&glean, vec![50]);
+ metric.accumulate_samples_sync(&glean, &[50]);
for store_name in store_names {
let snapshot = StorageManager
@@ -126,7 +126,7 @@ mod linear {
// Accumulate the samples. We intentionally do not report
// negative values to not trigger error reporting.
- metric.accumulate_samples_sync(&glean, [1, 2, 3].to_vec());
+ metric.accumulate_samples_sync(&glean, &[1, 2, 3]);
let snapshot = metric
.get_value(&glean, "store1")
@@ -168,7 +168,7 @@ mod linear {
);
// Accumulate the samples.
- metric.accumulate_samples_sync(&glean, [-1, 1, 2, 3].to_vec());
+ metric.accumulate_samples_sync(&glean, &[-1, 1, 2, 3]);
let snapshot = metric
.get_value(&glean, "store1")
@@ -209,7 +209,7 @@ mod linear {
HistogramType::Linear,
);
- metric.accumulate_samples_sync(&glean, vec![50]);
+ metric.accumulate_samples_sync(&glean, &[50]);
let snapshot = metric.get_value(&glean, "store1");
assert!(snapshot.is_some());
@@ -242,7 +242,7 @@ mod exponential {
HistogramType::Exponential,
);
- metric.accumulate_samples_sync(&glean, vec![50]);
+ metric.accumulate_samples_sync(&glean, &[50]);
let snapshot = metric
.get_value(&glean, "store1")
@@ -286,7 +286,7 @@ mod exponential {
HistogramType::Exponential,
);
- metric.accumulate_samples_sync(&glean, vec![50]);
+ metric.accumulate_samples_sync(&glean, &[50]);
for store_name in store_names {
let snapshot = StorageManager
@@ -328,7 +328,7 @@ mod exponential {
// Accumulate the samples. We intentionally do not report
// negative values to not trigger error reporting.
- metric.accumulate_samples_sync(&glean, [1, 2, 3].to_vec());
+ metric.accumulate_samples_sync(&glean, &[1, 2, 3]);
let snapshot = metric
.get_value(&glean, "store1")
@@ -370,7 +370,7 @@ mod exponential {
);
// Accumulate the samples.
- metric.accumulate_samples_sync(&glean, [-1, 1, 2, 3].to_vec());
+ metric.accumulate_samples_sync(&glean, &[-1, 1, 2, 3]);
let snapshot = metric
.get_value(&glean, "store1")
@@ -411,7 +411,7 @@ mod exponential {
HistogramType::Exponential,
);
- metric.accumulate_samples_sync(&glean, vec![50]);
+ metric.accumulate_samples_sync(&glean, &[50]);
let snapshot = metric.get_value(&glean, "store1");
assert!(snapshot.is_some());
diff --git a/third_party/rust/glean-core/tests/event.rs b/third_party/rust/glean-core/tests/event.rs
index ed8f7d807f..c83e225ca2 100644
--- a/third_party/rust/glean-core/tests/event.rs
+++ b/third_party/rust/glean-core/tests/event.rs
@@ -166,6 +166,7 @@ fn test_sending_of_event_ping_when_it_fills_up() {
true,
false,
true,
+ true,
vec!["max_capacity".to_string()],
));
}
@@ -450,6 +451,7 @@ fn event_storage_trimming() {
true,
false,
true,
+ true,
vec![],
));
diff --git a/third_party/rust/glean-core/tests/object.rs b/third_party/rust/glean-core/tests/object.rs
new file mode 100644
index 0000000000..1e734e99d2
--- /dev/null
+++ b/third_party/rust/glean-core/tests/object.rs
@@ -0,0 +1,104 @@
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this
+// file, You can obtain one at https://mozilla.org/MPL/2.0/.
+
+mod common;
+use crate::common::*;
+
+use serde_json::json;
+
+use glean_core::metrics::*;
+use glean_core::storage::StorageManager;
+use glean_core::{CommonMetricData, Lifetime};
+
+#[test]
+fn object_serializer_should_correctly_serialize_objects() {
+ let (mut tempdir, _) = tempdir();
+
+ {
+ // We give tempdir to the `new_glean` function...
+ let (glean, dir) = new_glean(Some(tempdir));
+ // And then we get it back once that function returns.
+ tempdir = dir;
+
+ let metric = ObjectMetric::new(CommonMetricData {
+ name: "object_metric".into(),
+ category: "telemetry".into(),
+ send_in_pings: vec!["store1".into()],
+ disabled: false,
+ lifetime: Lifetime::User,
+ ..Default::default()
+ });
+
+ let obj = serde_json::from_str("{ \"value\": 1 }").unwrap();
+ metric.set_sync(&glean, obj);
+
+ let snapshot = StorageManager
+ .snapshot_as_json(glean.storage(), "store1", true)
+ .unwrap();
+ assert_eq!(
+ json!({"object": {"telemetry.object_metric": { "value": 1 }}}),
+ snapshot
+ );
+ }
+
+ // Make a new Glean instance here, which should force reloading of the data from disk
+ // so we can ensure it persisted, because it has User lifetime
+ {
+ let (glean, _t) = new_glean(Some(tempdir));
+ let snapshot = StorageManager
+ .snapshot_as_json(glean.storage(), "store1", true)
+ .unwrap();
+ assert_eq!(
+ json!({"object": {"telemetry.object_metric": { "value": 1 }}}),
+ snapshot
+ );
+ }
+}
+
+#[test]
+fn set_value_properly_sets_the_value_in_all_stores() {
+ let (glean, _t) = new_glean(None);
+ let store_names: Vec<String> = vec!["store1".into(), "store2".into()];
+
+ let metric = ObjectMetric::new(CommonMetricData {
+ name: "object_metric".into(),
+ category: "telemetry".into(),
+ send_in_pings: store_names.clone(),
+ disabled: false,
+ lifetime: Lifetime::Ping,
+ ..Default::default()
+ });
+
+ let obj = serde_json::from_str("{ \"value\": 1 }").unwrap();
+ metric.set_sync(&glean, obj);
+
+ for store_name in store_names {
+ let snapshot = StorageManager
+ .snapshot_as_json(glean.storage(), &store_name, true)
+ .unwrap();
+
+ assert_eq!(
+ json!({"object": {"telemetry.object_metric": { "value": 1 }}}),
+ snapshot
+ );
+ }
+}
+
+#[test]
+fn getting_data_json_encoded() {
+ let (glean, _t) = new_glean(None);
+
+ let object: ObjectMetric = ObjectMetric::new(CommonMetricData {
+ name: "transformation".into(),
+ category: "local".into(),
+ send_in_pings: vec!["store1".into()],
+ ..Default::default()
+ });
+
+ let obj_str = "{\"value\":1}";
+ let obj = serde_json::from_str(obj_str).unwrap();
+ object.set_sync(&glean, obj);
+
+ assert_eq!(obj_str, object.get_value(&glean, Some("store1")).unwrap());
+}
diff --git a/third_party/rust/glean-core/tests/ping.rs b/third_party/rust/glean-core/tests/ping.rs
index 0ee3736168..17944b4c24 100644
--- a/third_party/rust/glean-core/tests/ping.rs
+++ b/third_party/rust/glean-core/tests/ping.rs
@@ -15,7 +15,7 @@ use glean_core::Lifetime;
fn write_ping_to_disk() {
let (mut glean, _temp) = new_glean(None);
- let ping = PingType::new("metrics", true, false, true, vec![]);
+ let ping = PingType::new("metrics", true, false, true, true, vec![]);
glean.register_ping_type(&ping);
// We need to store a metric as an empty ping is not stored.
@@ -36,7 +36,7 @@ fn write_ping_to_disk() {
fn disabling_upload_clears_pending_pings() {
let (mut glean, _t) = new_glean(None);
- let ping = PingType::new("metrics", true, false, true, vec![]);
+ let ping = PingType::new("metrics", true, false, true, true, vec![]);
glean.register_ping_type(&ping);
// We need to store a metric as an empty ping is not stored.
@@ -105,9 +105,9 @@ fn deletion_request_only_when_toggled_from_on_to_off() {
fn empty_pings_with_flag_are_sent() {
let (mut glean, _t) = new_glean(None);
- let ping1 = PingType::new("custom-ping1", true, true, true, vec![]);
+ let ping1 = PingType::new("custom-ping1", true, true, true, true, vec![]);
glean.register_ping_type(&ping1);
- let ping2 = PingType::new("custom-ping2", true, false, true, vec![]);
+ let ping2 = PingType::new("custom-ping2", true, false, true, true, vec![]);
glean.register_ping_type(&ping2);
// No data is stored in either of the custom pings
@@ -139,10 +139,10 @@ fn test_pings_submitted_metric() {
None,
);
- let metrics_ping = PingType::new("metrics", true, false, true, vec![]);
+ let metrics_ping = PingType::new("metrics", true, false, true, true, vec![]);
glean.register_ping_type(&metrics_ping);
- let baseline_ping = PingType::new("baseline", true, false, true, vec![]);
+ let baseline_ping = PingType::new("baseline", true, false, true, true, vec![]);
glean.register_ping_type(&baseline_ping);
// We need to store a metric as an empty ping is not stored.
@@ -218,7 +218,7 @@ fn test_pings_submitted_metric() {
fn events_ping_with_metric_but_no_events_is_not_sent() {
let (mut glean, _t) = new_glean(None);
- let events_ping = PingType::new("events", true, true, true, vec![]);
+ let events_ping = PingType::new("events", true, true, true, true, vec![]);
glean.register_ping_type(&events_ping);
let counter = CounterMetric::new(CommonMetricData {
name: "counter".into(),
diff --git a/third_party/rust/glean-core/tests/ping_maker.rs b/third_party/rust/glean-core/tests/ping_maker.rs
index 29b6bccaca..bc3aac6311 100644
--- a/third_party/rust/glean-core/tests/ping_maker.rs
+++ b/third_party/rust/glean-core/tests/ping_maker.rs
@@ -13,7 +13,7 @@ fn set_up_basic_ping() -> (Glean, PingMaker, PingType, tempfile::TempDir) {
let (tempdir, _) = tempdir();
let (mut glean, t) = new_glean(Some(tempdir));
let ping_maker = PingMaker::new();
- let ping_type = PingType::new("store1", true, false, true, vec![]);
+ let ping_type = PingType::new("store1", true, false, true, true, vec![]);
glean.register_ping_type(&ping_type);
// Record something, so the ping will have data
@@ -89,12 +89,12 @@ fn test_metrics_must_report_experimentation_id() {
trim_data_to_registered_pings: false,
log_level: None,
rate_limit: None,
- enable_event_timestamps: false,
+ enable_event_timestamps: true,
experimentation_id: Some("test-experimentation-id".to_string()),
})
.unwrap();
let ping_maker = PingMaker::new();
- let ping_type = PingType::new("store1", true, false, true, vec![]);
+ let ping_type = PingType::new("store1", true, false, true, true, vec![]);
glean.register_ping_type(&ping_type);
// Record something, so the ping will have data
@@ -141,13 +141,13 @@ fn experimentation_id_is_removed_if_send_if_empty_is_false() {
trim_data_to_registered_pings: false,
log_level: None,
rate_limit: None,
- enable_event_timestamps: false,
+ enable_event_timestamps: true,
experimentation_id: Some("test-experimentation-id".to_string()),
})
.unwrap();
let ping_maker = PingMaker::new();
- let unknown_ping_type = PingType::new("unknown", true, false, true, vec![]);
+ let unknown_ping_type = PingType::new("unknown", true, false, true, true, vec![]);
glean.register_ping_type(&unknown_ping_type);
assert!(ping_maker
@@ -163,7 +163,7 @@ fn collect_must_report_none_when_no_data_is_stored() {
let (mut glean, ping_maker, ping_type, _t) = set_up_basic_ping();
- let unknown_ping_type = PingType::new("unknown", true, false, true, vec![]);
+ let unknown_ping_type = PingType::new("unknown", true, false, true, true, vec![]);
glean.register_ping_type(&ping_type);
assert!(ping_maker
@@ -187,7 +187,7 @@ fn seq_number_must_be_sequential() {
for i in 0..=1 {
for ping_name in ["store1", "store2"].iter() {
- let ping_type = PingType::new(*ping_name, true, false, true, vec![]);
+ let ping_type = PingType::new(*ping_name, true, false, true, true, vec![]);
let ping = ping_maker
.collect(&glean, &ping_type, None, "", "")
.unwrap();
@@ -200,7 +200,7 @@ fn seq_number_must_be_sequential() {
// Test that ping sequence numbers increase independently.
{
- let ping_type = PingType::new("store1", true, false, true, vec![]);
+ let ping_type = PingType::new("store1", true, false, true, true, vec![]);
// 3rd ping of store1
let ping = ping_maker
@@ -218,7 +218,7 @@ fn seq_number_must_be_sequential() {
}
{
- let ping_type = PingType::new("store2", true, false, true, vec![]);
+ let ping_type = PingType::new("store2", true, false, true, true, vec![]);
// 3rd ping of store2
let ping = ping_maker
@@ -229,7 +229,7 @@ fn seq_number_must_be_sequential() {
}
{
- let ping_type = PingType::new("store1", true, false, true, vec![]);
+ let ping_type = PingType::new("store1", true, false, true, true, vec![]);
// 5th ping of store1
let ping = ping_maker
@@ -244,7 +244,7 @@ fn seq_number_must_be_sequential() {
fn clear_pending_pings() {
let (mut glean, _t) = new_glean(None);
let ping_maker = PingMaker::new();
- let ping_type = PingType::new("store1", true, false, true, vec![]);
+ let ping_type = PingType::new("store1", true, false, true, true, vec![]);
glean.register_ping_type(&ping_type);
// Record something, so the ping will have data
@@ -272,7 +272,7 @@ fn no_pings_submitted_if_upload_disabled() {
// Regression test, bug 1603571
let (mut glean, _t) = new_glean(None);
- let ping_type = PingType::new("store1", true, true, true, vec![]);
+ let ping_type = PingType::new("store1", true, true, true, true, vec![]);
glean.register_ping_type(&ping_type);
assert!(ping_type.submit_sync(&glean, None));
@@ -290,7 +290,7 @@ fn no_pings_submitted_if_upload_disabled() {
fn metadata_is_correctly_added_when_necessary() {
let (mut glean, _t) = new_glean(None);
glean.set_debug_view_tag("valid-tag");
- let ping_type = PingType::new("store1", true, true, true, vec![]);
+ let ping_type = PingType::new("store1", true, true, true, true, vec![]);
glean.register_ping_type(&ping_type);
assert!(ping_type.submit_sync(&glean, None));
diff --git a/third_party/rust/glean-core/tests/timing_distribution.rs b/third_party/rust/glean-core/tests/timing_distribution.rs
index 96f7fae5af..59ce5fbadb 100644
--- a/third_party/rust/glean-core/tests/timing_distribution.rs
+++ b/third_party/rust/glean-core/tests/timing_distribution.rs
@@ -169,7 +169,7 @@ fn the_accumulate_samples_api_correctly_stores_timing_values() {
// Accumulate the samples. We intentionally do not report
// negative values to not trigger error reporting.
- metric.accumulate_samples_sync(&glean, [1, 2, 3].to_vec());
+ metric.accumulate_samples_sync(&glean, &[1, 2, 3]);
let snapshot = metric
.get_value(&glean, "store1")
@@ -211,7 +211,7 @@ fn the_accumulate_samples_api_correctly_handles_negative_values() {
);
// Accumulate the samples.
- metric.accumulate_samples_sync(&glean, [-1, 1, 2, 3].to_vec());
+ metric.accumulate_samples_sync(&glean, &[-1, 1, 2, 3]);
let snapshot = metric
.get_value(&glean, "store1")
@@ -255,7 +255,7 @@ fn the_accumulate_samples_api_correctly_handles_overflowing_values() {
const MAX_SAMPLE_TIME: u64 = 1000 * 1000 * 1000 * 60 * 10;
let overflowing_val = MAX_SAMPLE_TIME as i64 + 1;
// Accumulate the samples.
- metric.accumulate_samples_sync(&glean, [overflowing_val, 1, 2, 3].to_vec());
+ metric.accumulate_samples_sync(&glean, &[overflowing_val, 1, 2, 3]);
let snapshot = metric
.get_value(&glean, "store1")
diff --git a/third_party/rust/glean/.cargo-checksum.json b/third_party/rust/glean/.cargo-checksum.json
index 74c885aad7..f624e73c99 100644
--- a/third_party/rust/glean/.cargo-checksum.json
+++ b/third_party/rust/glean/.cargo-checksum.json
@@ -1 +1 @@
-{"files":{"Cargo.toml":"13aebaf3dc74c32fa745410731828cf3b8d27025f5d3371b31178531bf49b009","LICENSE":"1f256ecad192880510e84ad60474eab7589218784b9a50bc7ceee34c2b91f1d5","README.md":"5627cc81e6187ab6c2b4dff061af16d559edcab64ba786bac39daa69c703c595","src/common_test.rs":"de47b53dcca37985c0a2b8c02daecbf32309aa54f5a4dd9290719c2c1fd0fa55","src/configuration.rs":"186b9d92e48f2f34f1ea51023ad83c9d7abec97bcb0b862641bcb79a93c97d9e","src/core_metrics.rs":"fef8fb4e5fa57c179836c6eb2cf59278fe3b8b036dbe57b0ff02971b4acd822f","src/lib.rs":"c0229403026edc22ddf53ea60ea92cdd2eb820c06f63a6057b3ef10050800279","src/net/http_uploader.rs":"43812a70d19a38e8d7a093c8076c2b6345372c3c861b0f3511428762700a65e0","src/net/mod.rs":"612a9f13ade0b202c8762bccc7b5dc288101cb3820e47be2755331911a221c55","src/private/event.rs":"d7c70c02648584c19c73af89e5180d3c6153c911f2c6830f7d1599b18d6150eb","src/private/mod.rs":"eb8fe4e588bb32a54617324db39319920c627e6fc23c23cf4da5c17c63e0afed","src/private/ping.rs":"da7545b8fc3b8cbaa8f598dc18fa4ba7c499573c360e8de6e1714d38dd4a665d","src/system.rs":"6eae5b41c15eba9cad6dbd116abe3519ee3e1fe034e79bdd692b029829a8c384","src/test.rs":"3ff6cf151f416e7a8d0f1b54ff28360ad7ebd3dcf47df02c08a8039702d636d9","tests/common/mod.rs":"08fb9483d9b6ed9fe873b4395245166ae8a15263be750c7a8e298c41d9604745","tests/init_fails.rs":"96bbb67ec64976f505fc05eb2c2e101f06c11bd2eaf6a0c6383b1a3e05a1cf4b","tests/never_init.rs":"2cf13a67b34cda8447f489cf3755eba0595978d242d29025670f9bb7c1025937","tests/no_time_to_init.rs":"5fd92258e3fb0302c7f8ab1dee374ece7343cc727206d9da134442bca746b88d","tests/overflowing_preinit.rs":"7ad4b2274dd9240b53430859a4eb1d2597cf508a5a678333f3d3abbadd2ed4a7","tests/persist_ping_lifetime.rs":"81415dc1d74743f02269f0d0dfa524003147056853f080276972e64a0b761d3c","tests/persist_ping_lifetime_nopanic.rs":"18379d3ffbf4a2c8c684c04ff7a0660b86dfbbb447db2d24dfed6073cb7ddf8f","tests/schema.rs":"e611cc168b648b5f515c54873c71b3e5e86431afa2aaff94581eeeb4f1d4935d","tests/simple.rs":"3a762995c06c0adc5adb94ad9c37e41dbbcf08a96b87bc20a449240fa0d93b0c","tests/test-shutdown-blocking.sh":"9b16a01c190c7062474dd92182298a3d9a27928c8fa990340fdd798e6cdb7ab2","tests/test-thread-crashing.sh":"ff1bc8e5d7e4ba3a10d0d38bef222db8bfba469e7d30e45b1053d177a4084f09","tests/upload_timing.rs":"4100ddba97cad28e1abfeb1492dfff61d151a67360392393d6f90d891b3e86be"},"package":"ae5847ad58b7f925c984de7f4dffcad67d7d0befa59a5a888cf93741b5ef1e6a"} \ No newline at end of file
+{"files":{"Cargo.toml":"29b8551de6fff2f0fa3a821eb933f71a2a326b3ce3d37c25bcef3001f9146dfb","LICENSE":"1f256ecad192880510e84ad60474eab7589218784b9a50bc7ceee34c2b91f1d5","README.md":"5627cc81e6187ab6c2b4dff061af16d559edcab64ba786bac39daa69c703c595","src/common_test.rs":"454df3d99eef045270e813946f921f56c39b16c18a5fadedc32829c3d44129cf","src/configuration.rs":"82b3a7933d913e1e2a4f328a76621db2d2e618d209d9785086d64c5c78c2a2d6","src/core_metrics.rs":"fef8fb4e5fa57c179836c6eb2cf59278fe3b8b036dbe57b0ff02971b4acd822f","src/lib.rs":"aa9c81fc6dc19ca1cb4bede25d554377a5d717fb3b246967edb1be12a395ce61","src/net/http_uploader.rs":"01ad5bd91384411a12c74434cd1c5cd585078cb34faba4615c70bdb669a9bccb","src/net/mod.rs":"f47b96bb878f1a6c771cedbaeaeefb270bc87fb1d1bbbed1b282dddca16216ed","src/private/event.rs":"d7c70c02648584c19c73af89e5180d3c6153c911f2c6830f7d1599b18d6150eb","src/private/mod.rs":"3565eb569d2b96f938f130abe0fc3ee3f55e7e03fd6501e309d3ef6af72ef6ee","src/private/object.rs":"3f70363a196aea46cc163af025a53e48c117c6208babc4bce772bb4c337cced8","src/private/ping.rs":"a6262a3453c77cbf30766c19b535a1bf66a37b2a316e8f87baee03025255c33e","src/system.rs":"6eae5b41c15eba9cad6dbd116abe3519ee3e1fe034e79bdd692b029829a8c384","src/test.rs":"6388b9e8bf96e0fb56ad71b7a5b5630d209ae62f1a65c62e878cbc1757ddd585","tests/common/mod.rs":"08fb9483d9b6ed9fe873b4395245166ae8a15263be750c7a8e298c41d9604745","tests/init_fails.rs":"906bbf0faa613976623e0cf782bd86545b49d76afaab182af7634690b747ebf7","tests/never_init.rs":"19bad996e22f7d6958cc1a650528530aa7d1aeb4a8ab42229a90bbc0315c8ed1","tests/no_time_to_init.rs":"06c81148c27d383cb708c0c80a2e806024c9955337d7adfba8c53aaeade9be67","tests/overflowing_preinit.rs":"7ad4b2274dd9240b53430859a4eb1d2597cf508a5a678333f3d3abbadd2ed4a7","tests/persist_ping_lifetime.rs":"81415dc1d74743f02269f0d0dfa524003147056853f080276972e64a0b761d3c","tests/persist_ping_lifetime_nopanic.rs":"18379d3ffbf4a2c8c684c04ff7a0660b86dfbbb447db2d24dfed6073cb7ddf8f","tests/schema.rs":"9d24028cab4dc60fe3c4d7a0bafbff0815cbc0249fa3e23625d42c3b4fa71734","tests/simple.rs":"1b8b227249ae9d3cc281db07ed779bc75252c7849b1c48b4ac3d765228d65b20","tests/test-shutdown-blocking.sh":"9b16a01c190c7062474dd92182298a3d9a27928c8fa990340fdd798e6cdb7ab2","tests/test-thread-crashing.sh":"ff1bc8e5d7e4ba3a10d0d38bef222db8bfba469e7d30e45b1053d177a4084f09","tests/upload_timing.rs":"3024b7999a0c23f2c3d7e59725b5455522e4e9fdf63e3265b93fea4cec18725f"},"package":"f58388f10d013e2d12bb58e6e76983ede120789956fe827913a3d2560c66d44d"} \ No newline at end of file
diff --git a/third_party/rust/glean/Cargo.toml b/third_party/rust/glean/Cargo.toml
index 1a702be403..bc25a08940 100644
--- a/third_party/rust/glean/Cargo.toml
+++ b/third_party/rust/glean/Cargo.toml
@@ -13,7 +13,7 @@
edition = "2021"
rust-version = "1.66"
name = "glean"
-version = "57.0.0"
+version = "58.1.0"
authors = [
"Jan-Erik Rediger <jrediger@mozilla.com>",
"The Glean Team <glean-team@mozilla.com>",
@@ -34,15 +34,8 @@ keywords = [
license = "MPL-2.0"
repository = "https://github.com/mozilla/glean"
-[dependencies.chrono]
-version = "0.4.10"
-features = ["serde"]
-
-[dependencies.crossbeam-channel]
-version = "0.5"
-
[dependencies.glean-core]
-version = "57.0.0"
+version = "58.1.0"
[dependencies.inherent]
version = "1"
@@ -53,26 +46,12 @@ version = "0.4.8"
[dependencies.once_cell]
version = "1.18.0"
-[dependencies.serde]
-version = "1.0.104"
-features = ["derive"]
-
-[dependencies.serde_json]
-version = "1.0.44"
-
-[dependencies.thiserror]
-version = "1.0.4"
-
-[dependencies.time]
-version = "0.1.40"
-
-[dependencies.uuid]
-version = "1.0"
-features = ["v4"]
-
[dependencies.whatsys]
version = "0.3.0"
+[dev-dependencies.crossbeam-channel]
+version = "0.5"
+
[dev-dependencies.env_logger]
version = "0.10.0"
features = ["humantime"]
@@ -87,6 +66,9 @@ version = "0.5.0"
[dev-dependencies.libc]
version = "0.2"
+[dev-dependencies.serde_json]
+version = "1.0.44"
+
[dev-dependencies.tempfile]
version = "3.1.0"
diff --git a/third_party/rust/glean/src/common_test.rs b/third_party/rust/glean/src/common_test.rs
index fdb7cfadbf..e3c80da5f2 100644
--- a/third_party/rust/glean/src/common_test.rs
+++ b/third_party/rust/glean/src/common_test.rs
@@ -42,6 +42,7 @@ pub(crate) fn new_glean(
Some(c) => c,
None => ConfigurationBuilder::new(true, tmpname, GLOBAL_APPLICATION_ID)
.with_server_endpoint("invalid-test-host")
+ .with_event_timestamps(false)
.build(),
};
diff --git a/third_party/rust/glean/src/configuration.rs b/third_party/rust/glean/src/configuration.rs
index 42360e96e0..ca0a39c3f1 100644
--- a/third_party/rust/glean/src/configuration.rs
+++ b/third_party/rust/glean/src/configuration.rs
@@ -113,7 +113,7 @@ impl Builder {
trim_data_to_registered_pings: false,
log_level: None,
rate_limit: None,
- enable_event_timestamps: false,
+ enable_event_timestamps: true,
experimentation_id: None,
}
}
diff --git a/third_party/rust/glean/src/lib.rs b/third_party/rust/glean/src/lib.rs
index 538b8c590d..5c5b945a95 100644
--- a/third_party/rust/glean/src/lib.rs
+++ b/third_party/rust/glean/src/lib.rs
@@ -23,7 +23,7 @@
//! let cfg = ConfigurationBuilder::new(true, "/tmp/data", "org.mozilla.glean_core.example").build();
//! glean::initialize(cfg, ClientInfoMetrics::unknown());
//!
-//! let prototype_ping = PingType::new("prototype", true, true, true, vec!());
+//! let prototype_ping = PingType::new("prototype", true, true, true, true, vec!());
//!
//! prototype_ping.submit(None);
//! ```
diff --git a/third_party/rust/glean/src/net/http_uploader.rs b/third_party/rust/glean/src/net/http_uploader.rs
index 4646fe61b4..4ca1687acf 100644
--- a/third_party/rust/glean/src/net/http_uploader.rs
+++ b/third_party/rust/glean/src/net/http_uploader.rs
@@ -2,7 +2,7 @@
// License, v. 2.0. If a copy of the MPL was not distributed with this
// file, You can obtain one at https://mozilla.org/MPL/2.0/.
-use crate::net::{PingUploader, UploadResult};
+use crate::net::{PingUploadRequest, PingUploader, UploadResult};
/// A simple mechanism to upload pings over HTTPS.
#[derive(Debug)]
@@ -13,12 +13,9 @@ impl PingUploader for HttpUploader {
///
/// # Arguments
///
- /// * `url` - the URL path to upload the data to.
- /// * `body` - the serialized text data to send.
- /// * `headers` - a vector of tuples containing the headers to send with
- /// the request, i.e. (Name, Value).
- fn upload(&self, url: String, _body: Vec<u8>, _headers: Vec<(String, String)>) -> UploadResult {
- log::debug!("TODO bug 1675468: submitting to {:?}", url);
+ /// * `upload_request` - the requested upload.
+ fn upload(&self, upload_request: PingUploadRequest) -> UploadResult {
+ log::debug!("TODO bug 1675468: submitting to {:?}", upload_request.url);
UploadResult::http_status(200)
}
}
diff --git a/third_party/rust/glean/src/net/mod.rs b/third_party/rust/glean/src/net/mod.rs
index 5571d30e67..5546078e63 100644
--- a/third_party/rust/glean/src/net/mod.rs
+++ b/third_party/rust/glean/src/net/mod.rs
@@ -19,6 +19,20 @@ use thread_state::{AtomicState, State};
mod http_uploader;
+/// Everything you need to request a ping to be uploaded.
+pub struct PingUploadRequest {
+ /// The URL the Glean SDK expects you to use to upload the ping.
+ pub url: String,
+ /// The body, already content-encoded, for upload.
+ pub body: Vec<u8>,
+ /// The HTTP headers, including any Content-Encoding.
+ pub headers: Vec<(String, String)>,
+ /// Whether the body has {client|ping}_info sections in it.
+ pub body_has_info_sections: bool,
+ /// The name (aka doctype) of the ping.
+ pub ping_name: String,
+}
+
/// A description of a component used to upload pings.
pub trait PingUploader: std::fmt::Debug + Send + Sync {
/// Uploads a ping to a server.
@@ -29,7 +43,7 @@ pub trait PingUploader: std::fmt::Debug + Send + Sync {
/// * `body` - the serialized text data to send.
/// * `headers` - a vector of tuples containing the headers to send with
/// the request, i.e. (Name, Value).
- fn upload(&self, url: String, body: Vec<u8>, headers: Vec<(String, String)>) -> UploadResult;
+ fn upload(&self, upload_request: PingUploadRequest) -> UploadResult;
}
/// The logic for uploading pings: this leaves the actual upload mechanism as
@@ -105,7 +119,14 @@ impl UploadManager {
let upload_url = format!("{}{}", inner.server_endpoint, request.path);
let headers: Vec<(String, String)> =
request.headers.into_iter().collect();
- let result = inner.uploader.upload(upload_url, request.body, headers);
+ let upload_request = PingUploadRequest {
+ url: upload_url,
+ body: request.body,
+ headers,
+ body_has_info_sections: request.body_has_info_sections,
+ ping_name: request.ping_name,
+ };
+ let result = inner.uploader.upload(upload_request);
// Process the upload response.
match glean_core::glean_process_ping_upload_response(doc_id, result) {
UploadTaskAction::Next => (),
diff --git a/third_party/rust/glean/src/private/mod.rs b/third_party/rust/glean/src/private/mod.rs
index 8a5c304193..575707cf59 100644
--- a/third_party/rust/glean/src/private/mod.rs
+++ b/third_party/rust/glean/src/private/mod.rs
@@ -5,6 +5,7 @@
//! The different metric types supported by the Glean SDK to handle data.
mod event;
+mod object;
mod ping;
pub use event::EventMetric;
@@ -26,6 +27,7 @@ pub use glean_core::UrlMetric;
pub use glean_core::UuidMetric;
pub use glean_core::{AllowLabeled, LabeledMetric};
pub use glean_core::{Datetime, DatetimeMetric};
+pub use object::ObjectMetric;
pub use ping::PingType;
// Re-export types that are used by the glean_parser-generated code.
diff --git a/third_party/rust/glean/src/private/object.rs b/third_party/rust/glean/src/private/object.rs
new file mode 100644
index 0000000000..f7403ec889
--- /dev/null
+++ b/third_party/rust/glean/src/private/object.rs
@@ -0,0 +1,192 @@
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this
+// file, You can obtain one at https://mozilla.org/MPL/2.0/.
+
+use std::marker::PhantomData;
+
+use glean_core::metrics::JsonValue;
+use glean_core::traits;
+
+use crate::ErrorType;
+
+// We need to wrap the glean-core type: otherwise if we try to implement
+// the trait for the metric in `glean_core::metrics` we hit error[E0117]:
+// only traits defined in the current crate can be implemented for arbitrary
+// types.
+
+/// Developer-facing API for recording object metrics.
+///
+/// Instances of this class type are automatically generated by the parsers
+/// at build time, allowing developers to record values that were previously
+/// registered in the metrics.yaml file.
+#[derive(Clone)]
+pub struct ObjectMetric<K> {
+ pub(crate) inner: glean_core::metrics::ObjectMetric,
+ object_type: PhantomData<K>,
+}
+
+impl<K: traits::ObjectSerialize> ObjectMetric<K> {
+ /// The public constructor used by automatically generated metrics.
+ pub fn new(meta: glean_core::CommonMetricData) -> Self {
+ let inner = glean_core::metrics::ObjectMetric::new(meta);
+ Self {
+ inner,
+ object_type: PhantomData,
+ }
+ }
+
+ /// Sets to the specified structure.
+ ///
+ /// # Arguments
+ ///
+ /// * `object` - the object to set.
+ pub fn set(&self, object: K) {
+ let obj = object
+ .into_serialized_object()
+ .expect("failed to serialize object. This should be impossible.");
+ self.inner.set(obj);
+ }
+
+ /// Sets to the specified structure.
+ ///
+ /// Parses the passed JSON string.
+ /// If it can't be parsed into a valid object it records an invalid value error.
+ ///
+ /// # Arguments
+ ///
+ /// * `object` - JSON representation of the object to set.
+ pub fn set_string(&self, object: String) {
+ let data = match K::from_str(&object) {
+ Ok(data) => data,
+ Err(_) => {
+ self.inner.record_schema_error();
+ return;
+ }
+ };
+ self.set(data)
+ }
+
+ /// **Test-only API (exported for FFI purposes).**
+ ///
+ /// Gets the currently stored value as JSON-encoded string.
+ ///
+ /// This doesn't clear the stored value.
+ pub fn test_get_value<'a, S: Into<Option<&'a str>>>(&self, ping_name: S) -> Option<JsonValue> {
+ let ping_name = ping_name.into().map(|s| s.to_string());
+ self.inner.test_get_value(ping_name)
+ }
+
+ /// **Exported for test purposes.**
+ ///
+ /// Gets the number of recorded errors for the given metric and error type.
+ ///
+ /// # Arguments
+ ///
+ /// * `error` - The type of error
+ ///
+ /// # Returns
+ ///
+ /// The number of errors reported.
+ pub fn test_get_num_recorded_errors(&self, error: ErrorType) -> i32 {
+ self.inner.test_get_num_recorded_errors(error)
+ }
+}
+
+#[cfg(test)]
+mod test {
+ use super::*;
+ use crate::common_test::{lock_test, new_glean};
+ use crate::CommonMetricData;
+
+ use serde_json::json;
+
+ #[test]
+ fn simple_array() {
+ let _lock = lock_test();
+ let _t = new_glean(None, true);
+
+ type SimpleArray = Vec<i64>;
+
+ let metric: ObjectMetric<SimpleArray> = ObjectMetric::new(CommonMetricData {
+ name: "object".into(),
+ category: "test".into(),
+ send_in_pings: vec!["test1".into()],
+ ..Default::default()
+ });
+
+ let arr = SimpleArray::from([1, 2, 3]);
+ metric.set(arr);
+
+ let data = metric.test_get_value(None).expect("no object recorded");
+ let expected = json!([1, 2, 3]);
+ assert_eq!(expected, data);
+ }
+
+ #[test]
+ fn complex_nested_object() {
+ let _lock = lock_test();
+ let _t = new_glean(None, true);
+
+ type BalloonsObject = Vec<BalloonsObjectItem>;
+
+ #[derive(
+ Debug, Hash, Eq, PartialEq, traits::__serde::Deserialize, traits::__serde::Serialize,
+ )]
+ #[serde(crate = "traits::__serde")]
+ #[serde(deny_unknown_fields)]
+ struct BalloonsObjectItem {
+ #[serde(skip_serializing_if = "Option::is_none")]
+ colour: Option<String>,
+ #[serde(skip_serializing_if = "Option::is_none")]
+ diameter: Option<i64>,
+ }
+
+ let metric: ObjectMetric<BalloonsObject> = ObjectMetric::new(CommonMetricData {
+ name: "object".into(),
+ category: "test".into(),
+ send_in_pings: vec!["test1".into()],
+ ..Default::default()
+ });
+
+ let balloons = BalloonsObject::from([
+ BalloonsObjectItem {
+ colour: Some("red".to_string()),
+ diameter: Some(5),
+ },
+ BalloonsObjectItem {
+ colour: Some("green".to_string()),
+ diameter: None,
+ },
+ ]);
+ metric.set(balloons);
+
+ let data = metric.test_get_value(None).expect("no object recorded");
+ let expected = json!([
+ { "colour": "red", "diameter": 5 },
+ { "colour": "green" },
+ ]);
+ assert_eq!(expected, data);
+ }
+
+ #[test]
+ fn set_string_api() {
+ let _lock = lock_test();
+ let _t = new_glean(None, true);
+
+ type SimpleArray = Vec<i64>;
+
+ let metric: ObjectMetric<SimpleArray> = ObjectMetric::new(CommonMetricData {
+ name: "object".into(),
+ category: "test".into(),
+ send_in_pings: vec!["test1".into()],
+ ..Default::default()
+ });
+
+ let arr_str = String::from("[1, 2, 3]");
+ metric.set_string(arr_str);
+
+ let data = metric.test_get_value(None).expect("no object recorded");
+ let expected = json!([1, 2, 3]);
+ assert_eq!(expected, data);
+ }
+}
diff --git a/third_party/rust/glean/src/private/ping.rs b/third_party/rust/glean/src/private/ping.rs
index c9c68a10a2..6c126992bc 100644
--- a/third_party/rust/glean/src/private/ping.rs
+++ b/third_party/rust/glean/src/private/ping.rs
@@ -33,6 +33,7 @@ impl PingType {
include_client_id: bool,
send_if_empty: bool,
precise_timestamps: bool,
+ include_info_sections: bool,
reason_codes: Vec<String>,
) -> Self {
let inner = glean_core::metrics::PingType::new(
@@ -40,6 +41,7 @@ impl PingType {
include_client_id,
send_if_empty,
precise_timestamps,
+ include_info_sections,
reason_codes,
);
diff --git a/third_party/rust/glean/src/test.rs b/third_party/rust/glean/src/test.rs
index cb41b49b66..16d6d05447 100644
--- a/third_party/rust/glean/src/test.rs
+++ b/third_party/rust/glean/src/test.rs
@@ -22,22 +22,16 @@ use crate::common_test::{lock_test, new_glean, GLOBAL_APPLICATION_ID};
fn send_a_ping() {
let _lock = lock_test();
- let (s, r) = crossbeam_channel::bounded::<String>(1);
+ let (s, r) = crossbeam_channel::bounded::<net::PingUploadRequest>(1);
- // Define a fake uploader that reports back the submission URL
- // using a crossbeam channel.
+ // Define a fake uploader that reports back the ping upload request.
#[derive(Debug)]
pub struct FakeUploader {
- sender: crossbeam_channel::Sender<String>,
+ sender: crossbeam_channel::Sender<net::PingUploadRequest>,
}
impl net::PingUploader for FakeUploader {
- fn upload(
- &self,
- url: String,
- _body: Vec<u8>,
- _headers: Vec<(String, String)>,
- ) -> net::UploadResult {
- self.sender.send(url).unwrap();
+ fn upload(&self, upload_request: net::PingUploadRequest) -> net::UploadResult {
+ self.sender.send(upload_request).unwrap();
net::UploadResult::http_status(200)
}
}
@@ -55,12 +49,54 @@ fn send_a_ping() {
// Define a new ping and submit it.
const PING_NAME: &str = "test-ping";
- let custom_ping = private::PingType::new(PING_NAME, true, true, true, vec![]);
+ let custom_ping = private::PingType::new(PING_NAME, true, true, true, true, vec![]);
custom_ping.submit(None);
// Wait for the ping to arrive.
- let url = r.recv().unwrap();
- assert!(url.contains(PING_NAME));
+ let upload_request = r.recv().unwrap();
+ assert!(upload_request.body_has_info_sections);
+ assert_eq!(upload_request.ping_name, PING_NAME);
+ assert!(upload_request.url.contains(PING_NAME));
+}
+
+#[test]
+fn send_a_ping_without_info_sections() {
+ let _lock = lock_test();
+
+ let (s, r) = crossbeam_channel::bounded::<net::PingUploadRequest>(1);
+
+ // Define a fake uploader that reports back the ping upload request.
+ #[derive(Debug)]
+ pub struct FakeUploader {
+ sender: crossbeam_channel::Sender<net::PingUploadRequest>,
+ }
+ impl net::PingUploader for FakeUploader {
+ fn upload(&self, upload_request: net::PingUploadRequest) -> net::UploadResult {
+ self.sender.send(upload_request).unwrap();
+ net::UploadResult::http_status(200)
+ }
+ }
+
+ // Create a custom configuration to use a fake uploader.
+ let dir = tempfile::tempdir().unwrap();
+ let tmpname = dir.path().to_path_buf();
+
+ let cfg = ConfigurationBuilder::new(true, tmpname, GLOBAL_APPLICATION_ID)
+ .with_server_endpoint("invalid-test-host")
+ .with_uploader(FakeUploader { sender: s })
+ .build();
+
+ let _t = new_glean(Some(cfg), true);
+
+ // Define a new ping and submit it.
+ const PING_NAME: &str = "noinfo-ping";
+ let custom_ping = private::PingType::new(PING_NAME, true, true, true, false, vec![]);
+ custom_ping.submit(None);
+
+ // Wait for the ping to arrive.
+ let upload_request = r.recv().unwrap();
+ assert!(!upload_request.body_has_info_sections);
+ assert_eq!(upload_request.ping_name, PING_NAME);
}
#[test]
@@ -190,13 +226,8 @@ fn sending_of_foreground_background_pings() {
sender: crossbeam_channel::Sender<String>,
}
impl net::PingUploader for FakeUploader {
- fn upload(
- &self,
- url: String,
- _body: Vec<u8>,
- _headers: Vec<(String, String)>,
- ) -> net::UploadResult {
- self.sender.send(url).unwrap();
+ fn upload(&self, upload_request: net::PingUploadRequest) -> net::UploadResult {
+ self.sender.send(upload_request.url).unwrap();
net::UploadResult::http_status(200)
}
}
@@ -263,13 +294,8 @@ fn sending_of_startup_baseline_ping() {
sender: crossbeam_channel::Sender<String>,
}
impl net::PingUploader for FakeUploader {
- fn upload(
- &self,
- url: String,
- _body: Vec<u8>,
- _headers: Vec<(String, String)>,
- ) -> net::UploadResult {
- self.sender.send(url).unwrap();
+ fn upload(&self, upload_request: net::PingUploadRequest) -> net::UploadResult {
+ self.sender.send(upload_request.url).unwrap();
net::UploadResult::http_status(200)
}
}
@@ -315,13 +341,8 @@ fn no_dirty_baseline_on_clean_shutdowns() {
sender: crossbeam_channel::Sender<String>,
}
impl net::PingUploader for FakeUploader {
- fn upload(
- &self,
- url: String,
- _body: Vec<u8>,
- _headers: Vec<(String, String)>,
- ) -> net::UploadResult {
- self.sender.send(url).unwrap();
+ fn upload(&self, upload_request: net::PingUploadRequest) -> net::UploadResult {
+ self.sender.send(upload_request.url).unwrap();
net::UploadResult::http_status(200)
}
}
@@ -543,12 +564,8 @@ fn ping_collection_must_happen_after_concurrently_scheduled_metrics_recordings()
sender: crossbeam_channel::Sender<(String, JsonValue)>,
}
impl net::PingUploader for FakeUploader {
- fn upload(
- &self,
- url: String,
- body: Vec<u8>,
- _headers: Vec<(String, String)>,
- ) -> net::UploadResult {
+ fn upload(&self, upload_request: net::PingUploadRequest) -> net::UploadResult {
+ let net::PingUploadRequest { body, url, .. } = upload_request;
// Decode the gzipped body.
let mut gzip_decoder = GzDecoder::new(&body[..]);
let mut s = String::with_capacity(body.len());
@@ -577,7 +594,7 @@ fn ping_collection_must_happen_after_concurrently_scheduled_metrics_recordings()
);
let ping_name = "custom_ping_1";
- let ping = private::PingType::new(ping_name, true, false, true, vec![]);
+ let ping = private::PingType::new(ping_name, true, false, true, true, vec![]);
let metric = private::StringMetric::new(CommonMetricData {
name: "string_metric".into(),
category: "telemetry".into(),
@@ -681,13 +698,8 @@ fn sending_deletion_ping_if_disabled_outside_of_run() {
sender: crossbeam_channel::Sender<String>,
}
impl net::PingUploader for FakeUploader {
- fn upload(
- &self,
- url: String,
- _body: Vec<u8>,
- _headers: Vec<(String, String)>,
- ) -> net::UploadResult {
- self.sender.send(url).unwrap();
+ fn upload(&self, upload_request: net::PingUploadRequest) -> net::UploadResult {
+ self.sender.send(upload_request.url).unwrap();
net::UploadResult::http_status(200)
}
}
@@ -731,13 +743,8 @@ fn no_sending_of_deletion_ping_if_unchanged_outside_of_run() {
sender: crossbeam_channel::Sender<String>,
}
impl net::PingUploader for FakeUploader {
- fn upload(
- &self,
- url: String,
- _body: Vec<u8>,
- _headers: Vec<(String, String)>,
- ) -> net::UploadResult {
- self.sender.send(url).unwrap();
+ fn upload(&self, upload_request: net::PingUploadRequest) -> net::UploadResult {
+ self.sender.send(upload_request.url).unwrap();
net::UploadResult::http_status(200)
}
}
@@ -779,12 +786,8 @@ fn deletion_request_ping_contains_experimentation_id() {
sender: crossbeam_channel::Sender<JsonValue>,
}
impl net::PingUploader for FakeUploader {
- fn upload(
- &self,
- _url: String,
- body: Vec<u8>,
- _headers: Vec<(String, String)>,
- ) -> net::UploadResult {
+ fn upload(&self, upload_request: net::PingUploadRequest) -> net::UploadResult {
+ let body = upload_request.body;
let mut gzip_decoder = GzDecoder::new(&body[..]);
let mut body_str = String::with_capacity(body.len());
let data: JsonValue = gzip_decoder
@@ -847,12 +850,8 @@ fn test_sending_of_startup_baseline_ping_with_application_lifetime_metric() {
sender: crossbeam_channel::Sender<(String, JsonValue)>,
}
impl net::PingUploader for FakeUploader {
- fn upload(
- &self,
- url: String,
- body: Vec<u8>,
- _headers: Vec<(String, String)>,
- ) -> net::UploadResult {
+ fn upload(&self, upload_request: net::PingUploadRequest) -> net::UploadResult {
+ let net::PingUploadRequest { url, body, .. } = upload_request;
// Decode the gzipped body.
let mut gzip_decoder = GzDecoder::new(&body[..]);
let mut s = String::with_capacity(body.len());
@@ -932,13 +931,8 @@ fn setting_debug_view_tag_before_initialization_should_not_crash() {
sender: crossbeam_channel::Sender<Vec<(String, String)>>,
}
impl net::PingUploader for FakeUploader {
- fn upload(
- &self,
- _url: String,
- _body: Vec<u8>,
- headers: Vec<(String, String)>,
- ) -> net::UploadResult {
- self.sender.send(headers).unwrap();
+ fn upload(&self, upload_request: net::PingUploadRequest) -> net::UploadResult {
+ self.sender.send(upload_request.headers).unwrap();
net::UploadResult::http_status(200)
}
}
@@ -983,13 +977,8 @@ fn setting_source_tags_before_initialization_should_not_crash() {
sender: crossbeam_channel::Sender<Vec<(String, String)>>,
}
impl net::PingUploader for FakeUploader {
- fn upload(
- &self,
- _url: String,
- _body: Vec<u8>,
- headers: Vec<(String, String)>,
- ) -> net::UploadResult {
- self.sender.send(headers).unwrap();
+ fn upload(&self, upload_request: net::PingUploadRequest) -> net::UploadResult {
+ self.sender.send(upload_request.headers).unwrap();
net::UploadResult::http_status(200)
}
}
@@ -1033,13 +1022,8 @@ fn setting_source_tags_after_initialization_should_not_crash() {
sender: crossbeam_channel::Sender<Vec<(String, String)>>,
}
impl net::PingUploader for FakeUploader {
- fn upload(
- &self,
- _url: String,
- _body: Vec<u8>,
- headers: Vec<(String, String)>,
- ) -> net::UploadResult {
- self.sender.send(headers).unwrap();
+ fn upload(&self, upload_request: net::PingUploadRequest) -> net::UploadResult {
+ self.sender.send(upload_request.headers).unwrap();
net::UploadResult::http_status(200)
}
}
@@ -1097,13 +1081,8 @@ fn flipping_upload_enabled_respects_order_of_events() {
sender: crossbeam_channel::Sender<String>,
}
impl net::PingUploader for FakeUploader {
- fn upload(
- &self,
- url: String,
- _body: Vec<u8>,
- _headers: Vec<(String, String)>,
- ) -> net::UploadResult {
- self.sender.send(url).unwrap();
+ fn upload(&self, upload_request: net::PingUploadRequest) -> net::UploadResult {
+ self.sender.send(upload_request.url).unwrap();
net::UploadResult::http_status(200)
}
}
@@ -1118,7 +1097,7 @@ fn flipping_upload_enabled_respects_order_of_events() {
.build();
// We create a ping and a metric before we initialize Glean
- let sample_ping = PingType::new("sample-ping-1", true, false, true, vec![]);
+ let sample_ping = PingType::new("sample-ping-1", true, false, true, true, vec![]);
let metric = private::StringMetric::new(CommonMetricData {
name: "string_metric".into(),
category: "telemetry".into(),
@@ -1155,19 +1134,14 @@ fn registering_pings_before_init_must_work() {
sender: crossbeam_channel::Sender<String>,
}
impl net::PingUploader for FakeUploader {
- fn upload(
- &self,
- url: String,
- _body: Vec<u8>,
- _headers: Vec<(String, String)>,
- ) -> net::UploadResult {
- self.sender.send(url).unwrap();
+ fn upload(&self, upload_request: net::PingUploadRequest) -> net::UploadResult {
+ self.sender.send(upload_request.url).unwrap();
net::UploadResult::http_status(200)
}
}
// Create a custom ping and attempt its registration.
- let sample_ping = PingType::new("pre-register", true, true, true, vec![]);
+ let sample_ping = PingType::new("pre-register", true, true, true, true, vec![]);
// Create a custom configuration to use a fake uploader.
let dir = tempfile::tempdir().unwrap();
@@ -1201,13 +1175,8 @@ fn test_a_ping_before_submission() {
sender: crossbeam_channel::Sender<String>,
}
impl net::PingUploader for FakeUploader {
- fn upload(
- &self,
- url: String,
- _body: Vec<u8>,
- _headers: Vec<(String, String)>,
- ) -> net::UploadResult {
- self.sender.send(url).unwrap();
+ fn upload(&self, upload_request: net::PingUploadRequest) -> net::UploadResult {
+ self.sender.send(upload_request.url).unwrap();
net::UploadResult::http_status(200)
}
}
@@ -1224,7 +1193,7 @@ fn test_a_ping_before_submission() {
let _t = new_glean(Some(cfg), true);
// Create a custom ping and register it.
- let sample_ping = PingType::new("custom1", true, true, true, vec![]);
+ let sample_ping = PingType::new("custom1", true, true, true, true, vec![]);
let metric = CounterMetric::new(CommonMetricData {
name: "counter_metric".into(),
@@ -1308,12 +1277,7 @@ fn signaling_done() {
counter: Arc<Mutex<HashMap<ThreadId, u32>>>,
}
impl net::PingUploader for FakeUploader {
- fn upload(
- &self,
- _url: String,
- _body: Vec<u8>,
- _headers: Vec<(String, String)>,
- ) -> net::UploadResult {
+ fn upload(&self, _upload_request: net::PingUploadRequest) -> net::UploadResult {
let mut map = self.counter.lock().unwrap();
*map.entry(thread::current().id()).or_insert(0) += 1;
@@ -1346,7 +1310,7 @@ fn signaling_done() {
// Define a new ping and submit it.
const PING_NAME: &str = "test-ping";
- let custom_ping = private::PingType::new(PING_NAME, true, true, true, vec![]);
+ let custom_ping = private::PingType::new(PING_NAME, true, true, true, true, vec![]);
custom_ping.submit(None);
custom_ping.submit(None);
@@ -1385,17 +1349,12 @@ fn configure_ping_throttling() {
done: Arc<std::sync::atomic::AtomicBool>,
}
impl net::PingUploader for FakeUploader {
- fn upload(
- &self,
- url: String,
- _body: Vec<u8>,
- _headers: Vec<(String, String)>,
- ) -> net::UploadResult {
+ fn upload(&self, upload_request: net::PingUploadRequest) -> net::UploadResult {
if self.done.load(std::sync::atomic::Ordering::SeqCst) {
// If we've outlived the test, just lie.
return net::UploadResult::http_status(200);
}
- self.sender.send(url).unwrap();
+ self.sender.send(upload_request.url).unwrap();
net::UploadResult::http_status(200)
}
}
@@ -1422,7 +1381,7 @@ fn configure_ping_throttling() {
// Define a new ping.
const PING_NAME: &str = "test-ping";
- let custom_ping = private::PingType::new(PING_NAME, true, true, true, vec![]);
+ let custom_ping = private::PingType::new(PING_NAME, true, true, true, true, vec![]);
// Submit and receive it `pings_per_interval` times.
for _ in 0..pings_per_interval {
diff --git a/third_party/rust/glean/tests/init_fails.rs b/third_party/rust/glean/tests/init_fails.rs
index def5acc4b9..a0c23ca277 100644
--- a/third_party/rust/glean/tests/init_fails.rs
+++ b/third_party/rust/glean/tests/init_fails.rs
@@ -43,7 +43,7 @@ mod pings {
#[allow(non_upper_case_globals)]
pub static validation: Lazy<PingType> =
- Lazy::new(|| glean::private::PingType::new("validation", true, true, true, vec![]));
+ Lazy::new(|| glean::private::PingType::new("validation", true, true, true, true, vec![]));
}
/// Test scenario: Glean initialization fails.
diff --git a/third_party/rust/glean/tests/never_init.rs b/third_party/rust/glean/tests/never_init.rs
index 3df472ee31..0d0d3768ff 100644
--- a/third_party/rust/glean/tests/never_init.rs
+++ b/third_party/rust/glean/tests/never_init.rs
@@ -39,7 +39,7 @@ mod pings {
#[allow(non_upper_case_globals)]
pub static validation: Lazy<PingType> =
- Lazy::new(|| glean::private::PingType::new("validation", true, true, true, vec![]));
+ Lazy::new(|| glean::private::PingType::new("validation", true, true, true, true, vec![]));
}
/// Test scenario: Glean is never initialized.
diff --git a/third_party/rust/glean/tests/no_time_to_init.rs b/third_party/rust/glean/tests/no_time_to_init.rs
index 763835f2f3..c312b397af 100644
--- a/third_party/rust/glean/tests/no_time_to_init.rs
+++ b/third_party/rust/glean/tests/no_time_to_init.rs
@@ -41,7 +41,7 @@ mod pings {
#[allow(non_upper_case_globals)]
pub static validation: Lazy<PingType> =
- Lazy::new(|| glean::private::PingType::new("validation", true, true, true, vec![]));
+ Lazy::new(|| glean::private::PingType::new("validation", true, true, true, true, vec![]));
}
/// Test scenario: Glean initialization fails.
diff --git a/third_party/rust/glean/tests/schema.rs b/third_party/rust/glean/tests/schema.rs
index 0a1bf4d2e8..01a2108b3c 100644
--- a/third_party/rust/glean/tests/schema.rs
+++ b/third_party/rust/glean/tests/schema.rs
@@ -10,7 +10,7 @@ use glean_core::TextMetric;
use jsonschema_valid::{self, schemas::Draft};
use serde_json::Value;
-use glean::net::UploadResult;
+use glean::net::{PingUploadRequest, UploadResult};
use glean::private::*;
use glean::{
traits, ClientInfoMetrics, CommonMetricData, ConfigurationBuilder, HistogramType, MemoryUnit,
@@ -60,13 +60,8 @@ fn validate_against_schema() {
sender: crossbeam_channel::Sender<Vec<u8>>,
}
impl glean::net::PingUploader for ValidatingUploader {
- fn upload(
- &self,
- _url: String,
- body: Vec<u8>,
- _headers: Vec<(String, String)>,
- ) -> UploadResult {
- self.sender.send(body).unwrap();
+ fn upload(&self, ping_request: PingUploadRequest) -> UploadResult {
+ self.sender.send(ping_request.body).unwrap();
UploadResult::http_status(200)
}
}
@@ -176,7 +171,7 @@ fn validate_against_schema() {
text_metric.set("loooooong text".repeat(100));
// Define a new ping and submit it.
- let custom_ping = glean::private::PingType::new(PING_NAME, true, true, true, vec![]);
+ let custom_ping = glean::private::PingType::new(PING_NAME, true, true, true, true, vec![]);
custom_ping.submit(None);
// Wait for the ping to arrive.
diff --git a/third_party/rust/glean/tests/simple.rs b/third_party/rust/glean/tests/simple.rs
index 3685d44faa..3baa4df14e 100644
--- a/third_party/rust/glean/tests/simple.rs
+++ b/third_party/rust/glean/tests/simple.rs
@@ -41,7 +41,7 @@ mod pings {
#[allow(non_upper_case_globals)]
pub static validation: Lazy<PingType> =
- Lazy::new(|| glean::private::PingType::new("validation", true, true, true, vec![]));
+ Lazy::new(|| glean::private::PingType::new("validation", true, true, true, true, vec![]));
}
/// Test scenario: A clean run
diff --git a/third_party/rust/glean/tests/upload_timing.rs b/third_party/rust/glean/tests/upload_timing.rs
index 9e77fc3eb5..ba0eee3402 100644
--- a/third_party/rust/glean/tests/upload_timing.rs
+++ b/third_party/rust/glean/tests/upload_timing.rs
@@ -97,7 +97,7 @@ mod pings {
#[allow(non_upper_case_globals)]
pub static validation: Lazy<PingType> =
- Lazy::new(|| glean::private::PingType::new("validation", true, true, true, vec![]));
+ Lazy::new(|| glean::private::PingType::new("validation", true, true, true, true, vec![]));
}
// Define a fake uploader that sleeps.
@@ -108,13 +108,9 @@ struct FakeUploader {
}
impl net::PingUploader for FakeUploader {
- fn upload(
- &self,
- _url: String,
- body: Vec<u8>,
- _headers: Vec<(String, String)>,
- ) -> net::UploadResult {
+ fn upload(&self, upload_request: net::PingUploadRequest) -> net::UploadResult {
let calls = self.calls.fetch_add(1, Ordering::SeqCst);
+ let body = upload_request.body;
let decode = |body: Vec<u8>| {
let mut gzip_decoder = GzDecoder::new(&body[..]);
let mut s = String::with_capacity(body.len());
diff --git a/third_party/rust/glslopt/.cargo-checksum.json b/third_party/rust/glslopt/.cargo-checksum.json
index 4e4e911018..a09c03e66a 100644
--- a/third_party/rust/glslopt/.cargo-checksum.json
+++ b/third_party/rust/glslopt/.cargo-checksum.json
@@ -1 +1 @@
-{"files":{"Cargo.toml":"5e6232fe2ab7866ef301c1dacaa3135a02e2fd69744c9372eceffbc3c1fd1c32","README.md":"4468e08c64c19977707d792bfab0080e35ff927b64990eab77873f8ba056ba1c","build.rs":"6a64610018701781af182c418a4355c9ac5d99d000be9457f0e38a7dadf7542a","glsl-optimizer/CMakeLists.txt":"42ce94744e82ffa000da8b64d81fc140e293b9f5da7dd4cf6b49e7404a2448d9","glsl-optimizer/README.md":"b18eef11a92d267d88a937b1154f7670ee433c730b102fdf7e2da0b02722b146","glsl-optimizer/contrib/glslopt/Main.cpp":"14ba213210c62e234b8d9b0052105fed28eedd83d535ebe85acc10bda7322dd4","glsl-optimizer/contrib/glslopt/Readme":"65d2a6f1aa1dc61e903e090cdade027abad33e02e7c9c81e07dc80508acadec4","glsl-optimizer/generateParsers.sh":"878a97db5d3b69eb3b4c3a95780763b373cfcc0c02e0b28894f162dbbd1b8848","glsl-optimizer/include/GL/gl.h":"1989b51365b6d7d0c48ff6e8b181ef75e2cdf71bfb1626b1cc4362e2f54854a3","glsl-optimizer/include/GL/glext.h":"2ac3681045a35a2194a81a960cad395c04bef1c8a20ef46b799fb24af3ec5f70","glsl-optimizer/include/KHR/khrplatform.h":"1448141a0c054d7f46edfb63f4fe6c203acf9591974049481c32442fb03fd6ed","glsl-optimizer/include/c11/threads.h":"56e9e592b28df19f0db432125223cb3eb5c0c1f960c22db96a15692e14776337","glsl-optimizer/include/c11/threads_posix.h":"f8ad2b69fa472e332b50572c1b2dcc1c8a0fa783a1199aad245398d3df421b4b","glsl-optimizer/include/c11/threads_win32.h":"95bf19d7fc14d328a016889afd583e4c49c050a93bcfb114bd2e9130a4532488","glsl-optimizer/include/c11_compat.h":"103fedb48f658d36cb416c9c9e5ea4d70dff181aab551fcb1028107d098ffa3e","glsl-optimizer/include/c99_alloca.h":"96ffde34c6cabd17e41df0ea8b79b034ce8f406a60ef58fe8f068af406d8b194","glsl-optimizer/include/c99_compat.h":"aafad02f1ea90a7857636913ea21617a0fcd6197256dcfc6dd97bb3410ba892e","glsl-optimizer/include/c99_math.h":"9730d800899f1e3a605f58e19451cd016385024a05a5300e1ed9c7aeeb1c3463","glsl-optimizer/include/no_extern_c.h":"40069dbb6dd2843658d442f926e609c7799b9c296046a90b62b570774fd618f5","glsl-optimizer/license.txt":"e26a745226f4a46b3ca00ffbe8be18507362189a2863d04b4f563ba176a9a836","glsl-optimizer/src/compiler/builtin_type_macros.h":"5b4fc4d4da7b07f997b6eb569e37db79fa0735286575ef1fab08d419e76776ff","glsl-optimizer/src/compiler/glsl/README":"e7d408b621c1b605857c4cab63902f615edb06b530142b91ac040808df6e22f7","glsl-optimizer/src/compiler/glsl/TODO":"dd3b7a098e6f9c85ca8c99ce6dea49d65bb75d4cea243b917f29e4ad2c974603","glsl-optimizer/src/compiler/glsl/ast.h":"3e68ff374350c49211a9931f7f55a485d8d89fc4b21caaffbf6655009ad95bf8","glsl-optimizer/src/compiler/glsl/ast_array_index.cpp":"92b4d501f33e0544c00d14e4f8837753afd916c2b42e076ccc95c9e8fc37ba94","glsl-optimizer/src/compiler/glsl/ast_expr.cpp":"afd712a7b1beb2b633888f4a0911b0a8e4ae5eb5ab9c1e3f247d518cdaaa56d6","glsl-optimizer/src/compiler/glsl/ast_function.cpp":"74f4fbd490e366b37f4715168bb3465ecd9334d4130942f75dcc8e80e8e7f027","glsl-optimizer/src/compiler/glsl/ast_to_hir.cpp":"d0f798eb09271d41d068b9e7b18220d37f1ed0083300ab51eba30989698fe23d","glsl-optimizer/src/compiler/glsl/ast_type.cpp":"8eb790b24b26dfb72bdc333744b566c26d8464c5d47d20eae659461f5c4899f7","glsl-optimizer/src/compiler/glsl/builtin_functions.cpp":"454189d643c220fcb49116ee5c8a34f7b349aa67564040deb8607f6a41a15e70","glsl-optimizer/src/compiler/glsl/builtin_functions.h":"a37cad7ed09b522c5b8bec7b80115a36846e7ba6e0874a2a858e32f7f202c665","glsl-optimizer/src/compiler/glsl/builtin_int64.h":"619def6f3aebf180da3944ef08f159ab12a58b24767e41d8b985ac37ded54d62","glsl-optimizer/src/compiler/glsl/builtin_types.cpp":"afec060b62d6f3b00bfbf94e9fa5f96341ce096c128d1eef322791e6ed9cea4d","glsl-optimizer/src/compiler/glsl/builtin_variables.cpp":"6563bfb1345cbca4c77e00eef09ad152f3e1dc271d246a08c5ce9e1f4ce4250a","glsl-optimizer/src/compiler/glsl/float64.glsl":"1072fd888be48c2a7a5117cd2d92a65f034965a66375f598bb856bff5d7be766","glsl-optimizer/src/compiler/glsl/generate_ir.cpp":"e5f0175370a0d07f93c48d3f0f1b8233d12c64a7b02de02dcc753ef7b398ef0f","glsl-optimizer/src/compiler/glsl/glcpp/README":"a0332a1b221d047e9cce5181a64d4ac4056046fd878360ec8ae3a7b1e062bcff","glsl-optimizer/src/compiler/glsl/glcpp/glcpp-lex.c":"2d179879b1ffe84f58875eee5b0c19b6bae9c973b0c48e6bcd99978f2f501c80","glsl-optimizer/src/compiler/glsl/glcpp/glcpp-lex.l":"e4c5744c837200dafd7c15a912d13f650308ea552454d4fa67271bc0a5bde118","glsl-optimizer/src/compiler/glsl/glcpp/glcpp-parse.c":"03494f9ce1cb82260506e2559e73a3eeb622c4bd51b65eaa0a2c3351862bd4c8","glsl-optimizer/src/compiler/glsl/glcpp/glcpp-parse.h":"264d9a18421cde255ce34a0a62b3d8e73465359f0d167e64aa3973062aae5bdd","glsl-optimizer/src/compiler/glsl/glcpp/glcpp-parse.y":"fafb66e3a8f149d19e085f18a4273ba6d4c11af9e9a01d665cc784dddf97b79f","glsl-optimizer/src/compiler/glsl/glcpp/glcpp.c":"37ed294403c2abfd17fd999d1ae8d11b170e5e9c878979fefac74a31195c96b0","glsl-optimizer/src/compiler/glsl/glcpp/glcpp.h":"85ac8b444bcbd0822b66448a1da407b6ae5467b649f5afaf5c58325bd7569468","glsl-optimizer/src/compiler/glsl/glcpp/pp.c":"a52d94f1bcb3fb2747a95709c4a77c25de7eea8354d2b83bb18efd96976a4473","glsl-optimizer/src/compiler/glsl/glcpp/pp_standalone_scaffolding.c":"d11aeb3acfe966d1b78f1ee49804093f2434214c41391d139ffcb67b69dc9862","glsl-optimizer/src/compiler/glsl/glcpp/pp_standalone_scaffolding.h":"abbf1f36ec5a92d035bfbb841b9452287d147616e56373cdbee1c0e55af46406","glsl-optimizer/src/compiler/glsl/glsl_lexer.cpp":"272b9fc1383d72b81bfc03fa11fdf82270ed91a294e523f9ce2b4554bd3effa9","glsl-optimizer/src/compiler/glsl/glsl_lexer.ll":"2b57d9f9eb830c3d7961d4533048a158ee6f458c8d05c65bea7b7cfbc36e4458","glsl-optimizer/src/compiler/glsl/glsl_optimizer.cpp":"f8095d20629d0af70be930b0612e169edb274551a1d25a3cd1bf9995a11ce2e8","glsl-optimizer/src/compiler/glsl/glsl_optimizer.h":"22e843b4ec53ba5f6cd85ca5f7bad33922dca8061b19fb512d46f1caca8d4757","glsl-optimizer/src/compiler/glsl/glsl_parser.cpp":"126baf368d525aba301854e3d91ba60b5aee32e1102376af71416f32cb95ec48","glsl-optimizer/src/compiler/glsl/glsl_parser.h":"2ea9a50716098a8f7bef782d2a030d757b68da73afb01b4d4940d3e8381d44e8","glsl-optimizer/src/compiler/glsl/glsl_parser.yy":"6b1fd1576b29fce005dff744a6dbd0219e4c695c361d61864e1f3a8d6fa6b764","glsl-optimizer/src/compiler/glsl/glsl_parser_extras.cpp":"aad64b5b66467da650091430681e8c6a820cf3cadc4db3c160bf2f15875390ae","glsl-optimizer/src/compiler/glsl/glsl_parser_extras.h":"71fd0e92bbdb193dfb067d7bfdb1200d77392be2fbd0cbfc9ca89d1bb4c7e741","glsl-optimizer/src/compiler/glsl/glsl_symbol_table.cpp":"6660fb83c0ddddbbd64581d46ccfdb9c84bfaa99d13348c289e6442ab00df046","glsl-optimizer/src/compiler/glsl/glsl_symbol_table.h":"24682b8304e0ea3f6318ddb8c859686bd1faee23cd0511d1760977ae975d41bf","glsl-optimizer/src/compiler/glsl/hir_field_selection.cpp":"72a039b0fcab4161788def9e4bedac7ac06a20d8e13146529c6d246bd5202afd","glsl-optimizer/src/compiler/glsl/int64.glsl":"303dbe95dde44b91aee3e38b115b92028400d6a92f9268975d607471984e13eb","glsl-optimizer/src/compiler/glsl/ir.cpp":"2b4741cce90b5d4abff5d719c7324e2693c67294d4d99736cb241554adb281bc","glsl-optimizer/src/compiler/glsl/ir.h":"990b1c74447c4eb4835353ccb0ed9aea644f97fc1129ef1739cd935075d85d2e","glsl-optimizer/src/compiler/glsl/ir_array_refcount.cpp":"8cdc1cffe01e42e0566fa2193a75f789628e8025ad1b82f0ee6f204451b7f9f7","glsl-optimizer/src/compiler/glsl/ir_array_refcount.h":"75f06ec81342b379096ca52e1dc0fd5f19a11ff8e9b58203c20628179d644c12","glsl-optimizer/src/compiler/glsl/ir_basic_block.cpp":"1e2920b1c0ecb08424c745c558f84d0d7e44b74585cf2cc2265dc4dfede3fa2f","glsl-optimizer/src/compiler/glsl/ir_basic_block.h":"81be7da0fc0ee547cd13ec60c1fcd7d3ce3d70d7e5e988f01a3b43a827acdf05","glsl-optimizer/src/compiler/glsl/ir_builder.cpp":"daba29c5a1efdd5a9754f420eb3e2ebdf73485273497f40d4863dadeddb23c0d","glsl-optimizer/src/compiler/glsl/ir_builder.h":"2822e74dd3f6e3df8b300af27d5b11ea2dd99d0e5e7ca809b7bbcce9833c483c","glsl-optimizer/src/compiler/glsl/ir_builder_print_visitor.cpp":"8c6df5abf2fe313363f285f171c19ca6c8ee4f3bc2ed79d33c0c88cc8be45c48","glsl-optimizer/src/compiler/glsl/ir_builder_print_visitor.h":"799852adc3a0e54d04080655e7cebfa0d3bf5b6ffed5d8414f141380665d4db7","glsl-optimizer/src/compiler/glsl/ir_clone.cpp":"d897a4e1f5bbec4a6a2f15044c1be9a4d13899c73be77335b041049a4589aa5d","glsl-optimizer/src/compiler/glsl/ir_constant_expression.cpp":"78bd87ddb09db67f6c499067728d72aef4f16aa02721a99a4b769d1e0cfa9010","glsl-optimizer/src/compiler/glsl/ir_equals.cpp":"bca28533a6310b0fc152b56d80872368f1510dc62ed6e8ac199b9ffa7fac02e7","glsl-optimizer/src/compiler/glsl/ir_expression_flattening.cpp":"7e918d4e1f237eca01396004015865ce345afe32a876c9dbc6728576a1a7eae4","glsl-optimizer/src/compiler/glsl/ir_expression_flattening.h":"f45b66aa9497520e7e08e612d24b308477c34477fbd963ee9320eac664957f16","glsl-optimizer/src/compiler/glsl/ir_expression_operation.h":"cc9f10727dbd26cac506804f51456302c702650f9eeb59054a7e1575d5cf6687","glsl-optimizer/src/compiler/glsl/ir_expression_operation.py":"7b86c96021b9fbe165957f4ecb0b612fefcde1c2cf3c6d75e3cdb22e369216ba","glsl-optimizer/src/compiler/glsl/ir_expression_operation_constant.h":"9ad3346416392e3efa11e12ecf2feca7453c5253d241eb96c91dfb85d4f2b971","glsl-optimizer/src/compiler/glsl/ir_expression_operation_strings.h":"a6826daf496a8b9e89885bc2a161ac3445d501b23c6e0ac33e2c01b506b273c8","glsl-optimizer/src/compiler/glsl/ir_function.cpp":"7537365fc0fbe4b37a26b9a2146cc64d3e9a774d60eab63b65002ad165ae8fc7","glsl-optimizer/src/compiler/glsl/ir_function_can_inline.cpp":"faddbf112187a048d502716a3fb82570a322299ba2a3abd79388382c82040bfc","glsl-optimizer/src/compiler/glsl/ir_function_detect_recursion.cpp":"9176973eaf5c0a984701f953bb7a80f37dca43d59b5bce50fc69b3f02f2902d7","glsl-optimizer/src/compiler/glsl/ir_function_inlining.h":"9739493f99c489987d650762fccdd3fb3d432f6481d67f6c799176685bd59632","glsl-optimizer/src/compiler/glsl/ir_hierarchical_visitor.cpp":"3725861fbe2b98e0617f52d3b14cf6d3b25fb5ec00f5ef5d308b03642f592767","glsl-optimizer/src/compiler/glsl/ir_hierarchical_visitor.h":"e0560210e966c0c31e4ca843e80ea154e64db5a444b8c2df845b6ba5b3a43fc1","glsl-optimizer/src/compiler/glsl/ir_hv_accept.cpp":"caf7ce2cd9494aadd3c58bcf77f29de58368dc9e347a362bbf37f8bda9509b80","glsl-optimizer/src/compiler/glsl/ir_optimization.h":"8b3dcfc7f9e96b21a8dd47a0040d90be483a9e67a2cdce3a697188fb758d4630","glsl-optimizer/src/compiler/glsl/ir_print_glsl_visitor.cpp":"f8e34a983452be0dcb5a695e9c8e895eead24f9e540992a8afe510ae85da4c4c","glsl-optimizer/src/compiler/glsl/ir_print_glsl_visitor.h":"1ad1bd3efd1ace39051c13f904c05fd80425d329444f9a8d47fd6d948faf46e0","glsl-optimizer/src/compiler/glsl/ir_print_visitor.cpp":"643f5a68aae3fb37267fd793f1216d1cfdeb2c09338c26b1f30e4c6deaef4de5","glsl-optimizer/src/compiler/glsl/ir_print_visitor.h":"4573eb93268a2654c14b505253dd651e2695d43dc745904d824da18305269b95","glsl-optimizer/src/compiler/glsl/ir_reader.cpp":"06bfba802c8354e5a8b2334b6d78d6297de18235bedd3f8fbb382c89870b02f2","glsl-optimizer/src/compiler/glsl/ir_reader.h":"63e3f7f1597936a7011d5b520e171b197bf82bee6c1560d822c3edf5aaa6f9e9","glsl-optimizer/src/compiler/glsl/ir_rvalue_visitor.cpp":"84b5c5d746555adca85759c2912fe48010232b7c1c0bd2cf03bd04067a85e66f","glsl-optimizer/src/compiler/glsl/ir_rvalue_visitor.h":"fd8c561b71085d3211fff85ed514fecb299d8ce19a04bc063419a55b6d840525","glsl-optimizer/src/compiler/glsl/ir_set_program_inouts.cpp":"ab9f115ce9e7f312d9c7978340ced0dc4ae6d13a80e08442ba9709d11d50cae5","glsl-optimizer/src/compiler/glsl/ir_uniform.h":"683ae6896b1a08470c090be5f822fc31cd434eab9216e954b9bba24a46975109","glsl-optimizer/src/compiler/glsl/ir_unused_structs.cpp":"9c1620c45f2fc071fe5ed828472040b14c5f42effe06aa0e3b8352c95ef78786","glsl-optimizer/src/compiler/glsl/ir_unused_structs.h":"13387b49c23093575276b25b9dfd31fedd8f131c5c4f3128ab04cf03e15b5295","glsl-optimizer/src/compiler/glsl/ir_validate.cpp":"6b232be5999a86ea278f4f15b2832d76843246509118d924243055a3b9b0299f","glsl-optimizer/src/compiler/glsl/ir_variable_refcount.cpp":"2764a3cad937d53f36db7447c3a5b98b04bf153acf81074d971857fc5bca460d","glsl-optimizer/src/compiler/glsl/ir_variable_refcount.h":"b0668e3eb1501ef65e38fe12830742ecb3d28e6039f30e366c8924efc29b4a39","glsl-optimizer/src/compiler/glsl/ir_visitor.h":"f21b3534c3d66d5fb707d1581fece7e1eb043523afbaedf89918cfb031c6df94","glsl-optimizer/src/compiler/glsl/link_atomics.cpp":"360f0209e11f367ba358223597b0a118bae095bff16337cf03f1fb89c5b80ca6","glsl-optimizer/src/compiler/glsl/link_functions.cpp":"de7895da8aa33a1e3c2c1eb2fdaf267ab5d1fbfdb79ae2e67f95211e946e294c","glsl-optimizer/src/compiler/glsl/link_interface_blocks.cpp":"1926cfa73810704eb19b916c1b2cdb9321155e2f98b2a0a57c7c3c6e960540cd","glsl-optimizer/src/compiler/glsl/link_uniform_block_active_visitor.cpp":"1e14e06ca3b2c1089cfba2e8eaf0c1f373d9d6374b6082f320962dd71ae09611","glsl-optimizer/src/compiler/glsl/link_uniform_block_active_visitor.h":"fd58c155af645295bb6aec08797889de586f4d919731de2bce57e8dce59bb048","glsl-optimizer/src/compiler/glsl/link_uniform_blocks.cpp":"09589f49776dce32e6c4044937de7e0c839a9754ad31960148f8f9e010658997","glsl-optimizer/src/compiler/glsl/link_uniform_initializers.cpp":"bf98e08c12db466acf9623cbeb8fa8e3b4002512722e7a6521287f558a099f37","glsl-optimizer/src/compiler/glsl/link_uniforms.cpp":"84bad5b1377362cecf259b05124239be5220b03ce1c0c61b59bd9a47e4379af2","glsl-optimizer/src/compiler/glsl/link_varyings.cpp":"a5f1a53e7c80d635515fe808ff223d89fef1767abb0f2b7aa28fa6773dca353f","glsl-optimizer/src/compiler/glsl/link_varyings.h":"b9dbe018f038df69763df2e928742ce81bbc6e3aaba26f50621e30a6d9aa6220","glsl-optimizer/src/compiler/glsl/linker.cpp":"40b1ecd5d4f6c7f13d5a87ce390561a51fdf6f3fcd9b2197b9c88b03a773ba94","glsl-optimizer/src/compiler/glsl/linker.h":"ecf94b4ad75ef461c27c557fda4bd25f34c91930822b8e1d729ec84520d4a049","glsl-optimizer/src/compiler/glsl/linker_util.cpp":"1663ad88e2a369305659aeeffaedb5bd752cf76340a2ba5797fc0bf600633cf9","glsl-optimizer/src/compiler/glsl/linker_util.h":"6db788daf9c8e87ae2764b61a8b37ebe419e69c1b82ddee01986e37c978c6993","glsl-optimizer/src/compiler/glsl/list.h":"b1f46ce0e552fe7c45b2a19408a9d97662e23e4b182ab335491c26f8cf25886f","glsl-optimizer/src/compiler/glsl/loop_analysis.cpp":"57ecd573477c68091c7cc99537faa7139a8f395935e3d4f10144cefdefb5a611","glsl-optimizer/src/compiler/glsl/loop_analysis.h":"a85f045a038ee5b5176063e85d7988865862c44ab0580f771b993a042d0b69cc","glsl-optimizer/src/compiler/glsl/loop_unroll.cpp":"bd4292ea2809f5a669bcb76ceaa1ac365772dcd638c579c3ed10275214901a54","glsl-optimizer/src/compiler/glsl/lower_blend_equation_advanced.cpp":"8cfbef140d9c4b4d2f57bfa05c9c374d31a121d0f87afce94333f049023b654a","glsl-optimizer/src/compiler/glsl/lower_buffer_access.cpp":"1ae221c3c7a95aeb867207e7a742be635f91b406c157747bfd6ddf10274d97fb","glsl-optimizer/src/compiler/glsl/lower_buffer_access.h":"807886953a576a323591798cbca5e2df24295ea893b28affd8ffb5926cebaa04","glsl-optimizer/src/compiler/glsl/lower_builtins.cpp":"4d81afc32cf58e1481fcb5e42888ab93dbe6820310a20ff7a9982b77b2152d9b","glsl-optimizer/src/compiler/glsl/lower_const_arrays_to_uniforms.cpp":"608403f0eeeedf21cfcd3014116e0f44e28cbdf6c4c32aac7e613e64e30205e1","glsl-optimizer/src/compiler/glsl/lower_cs_derived.cpp":"179905cd47a294122adeb5b0abfed6f2f67782dcde21b544d1ee2c1985154e66","glsl-optimizer/src/compiler/glsl/lower_discard.cpp":"3b361b2db0004d544d64611cb50d5a6e364cf6c5f2e60c449085d7d753dd7fb0","glsl-optimizer/src/compiler/glsl/lower_discard_flow.cpp":"f5c29b6a27690bb5c91f196d1a1cf9f6be4f1025292311fe2dac561ce6774dee","glsl-optimizer/src/compiler/glsl/lower_distance.cpp":"a118c85493d5d22b2c059a930c51a5854896d4b1dade76598eaa985e5a3dff8c","glsl-optimizer/src/compiler/glsl/lower_if_to_cond_assign.cpp":"469e617757fd1728709cce021aac5c8da05ee503bf5366977bdc4ef7a6d83950","glsl-optimizer/src/compiler/glsl/lower_instructions.cpp":"defd043e8576437c1ef63c7d0bf5f828068bbfb5fdbec16457a9c191a1e9242d","glsl-optimizer/src/compiler/glsl/lower_int64.cpp":"d1ed41196880dd53c7b13e2782f9423f8442bf1d46186e8be92b1b66218a83ee","glsl-optimizer/src/compiler/glsl/lower_jumps.cpp":"34de7b493f281589fb0c2c0f6e885d0a0fabbe7a4e97a73de374dd714777a58c","glsl-optimizer/src/compiler/glsl/lower_mat_op_to_vec.cpp":"dff7a308edc4846c348ed4225c6699a9c75abac68d88f41f85954276552779f4","glsl-optimizer/src/compiler/glsl/lower_named_interface_blocks.cpp":"16063ac127bff75a68272070ab11c21c25101edbff62b4c68f4983b4cd941af0","glsl-optimizer/src/compiler/glsl/lower_offset_array.cpp":"3b00773399135aea85746a5a68b96ef000bc6841be1a2c8e6f25c516628b0949","glsl-optimizer/src/compiler/glsl/lower_output_reads.cpp":"a0fc9975d5aa1617e21fc6c353659a9802da9e83779a3eef4ec584f74b4dadc5","glsl-optimizer/src/compiler/glsl/lower_packed_varyings.cpp":"7550099d4ae123d71541c2fc88bc04fbfe9271ec75d7e210987d1c8cac3cf3ea","glsl-optimizer/src/compiler/glsl/lower_packing_builtins.cpp":"79a13d161fe505a410ab948d92769395708693ec888153630fa240e5b97e356f","glsl-optimizer/src/compiler/glsl/lower_precision.cpp":"f82a185b879872b977a1787d8061b9a80bc4cf8db1b970db6efba2ad9cc20fa2","glsl-optimizer/src/compiler/glsl/lower_shared_reference.cpp":"ea2dccf50a83bc19391bf6b7ab6aa53c0005f427af4066d25140340af9a4beef","glsl-optimizer/src/compiler/glsl/lower_subroutine.cpp":"f69fa53650eeb6f2944fce4d36a6e0a423e6705f3a3bd3389c7fadb83cfc8802","glsl-optimizer/src/compiler/glsl/lower_tess_level.cpp":"b196c9d424c0569f3e85d75c2d125af21566cb113d69036db87c0990703e0fa7","glsl-optimizer/src/compiler/glsl/lower_texture_projection.cpp":"4d247f244272adc8250fd888d8d932a140dd5de4d1efc7a58492c3c2b8291527","glsl-optimizer/src/compiler/glsl/lower_ubo_reference.cpp":"89bdbc6c1669230c644c0857db1ce2781ec61d349ecd08c7914146e1f4750a4a","glsl-optimizer/src/compiler/glsl/lower_variable_index_to_cond_assign.cpp":"fce930f29ac9405b297d1f749d68f59506b89c70b4ee1b1ab8cf49a34cc71ecf","glsl-optimizer/src/compiler/glsl/lower_vec_index_to_cond_assign.cpp":"3c67d851a11a55fad1c49a550f3a0cfe50892d33a3f238ce266cd829eba510a8","glsl-optimizer/src/compiler/glsl/lower_vec_index_to_swizzle.cpp":"f5ec666b73e1415cbab32519a53605ed385f3b03e889560373dbce69dda5000e","glsl-optimizer/src/compiler/glsl/lower_vector.cpp":"f7c13f5572ebe09b6a71553133b2cf003cd4b77b9657600672ee3b21bf890725","glsl-optimizer/src/compiler/glsl/lower_vector_derefs.cpp":"b05793da6dd620a531b43df5af8b2ecbc37b9db0c88910f5724ea10bcd057e19","glsl-optimizer/src/compiler/glsl/lower_vector_insert.cpp":"fee772ec17eea5e86a529bf9c5fa2ee0d29a5982bb75ebc6d68ed36cd19aa299","glsl-optimizer/src/compiler/glsl/lower_vertex_id.cpp":"690e8715182e03fead5cc5a35251fb4f41b357e4c71a1dfbc4bd7be19862b56d","glsl-optimizer/src/compiler/glsl/lower_xfb_varying.cpp":"58c0e8b270e4bbde54250be03cdb2f36966bcafb785372ad2e2b786835df7f9f","glsl-optimizer/src/compiler/glsl/main.cpp":"ae5e88abbbc8a12f769e1296bad938b9d7398cc6da0d3d0caeceeeb876536850","glsl-optimizer/src/compiler/glsl/opt_add_neg_to_sub.h":"f5054944bfd068810629080d0ea11df78b3f57a8f86df75e13ca50157ad1964d","glsl-optimizer/src/compiler/glsl/opt_algebraic.cpp":"25f45b20e1972ee8c789177a1aeda6e4286c25db2eae3a43ff83029ae64969c0","glsl-optimizer/src/compiler/glsl/opt_array_splitting.cpp":"19d3ce0e815438f4df9ab2890e767b03a4f3f191b53bb30c0217cf2ae6a95430","glsl-optimizer/src/compiler/glsl/opt_conditional_discard.cpp":"0e44e0e126711a3725c1f3a2aa65ff03c381fed08680ffc30101aae60f716c4e","glsl-optimizer/src/compiler/glsl/opt_constant_folding.cpp":"a088d04d9b45f9e55e235835648f614c89b7803c03a6d4f6a6d1a6bc1f0228bd","glsl-optimizer/src/compiler/glsl/opt_constant_propagation.cpp":"8a9440d77ecd6dcf13e683cbb99943aab6311c8fd4b5f6a9189a8d4f270746f4","glsl-optimizer/src/compiler/glsl/opt_constant_variable.cpp":"63d3ccd4dd09f19c9cf1a2f51592111bed41284504f29f3c0de4cadebc439a37","glsl-optimizer/src/compiler/glsl/opt_copy_propagation_elements.cpp":"ffa0f50863995e0d2e31f55a52e82319edc71e520987bebd7f7e561ea331c64b","glsl-optimizer/src/compiler/glsl/opt_dead_builtin_variables.cpp":"84e8747b948232f01dd56b428b9315f96f9511f605f240119fc446fae28981a9","glsl-optimizer/src/compiler/glsl/opt_dead_builtin_varyings.cpp":"761523e88f5b3ba785170f4d7205e94fa99acb7e74d29efbe40e1c010e1dbdb3","glsl-optimizer/src/compiler/glsl/opt_dead_code.cpp":"fd1ba2da7337d4e5dad17f5c2d73d9cc8880305f423e85d64cf94553588fa401","glsl-optimizer/src/compiler/glsl/opt_dead_code_local.cpp":"969a598b4df322baf222258a66cd64a326ea20e5b3125be9d8d1771f522c69e0","glsl-optimizer/src/compiler/glsl/opt_dead_functions.cpp":"774cae6536d02edf26e996a2a895e1f62d5098f16dc96b44798b4fc731a9a95f","glsl-optimizer/src/compiler/glsl/opt_flatten_nested_if_blocks.cpp":"3696a5c55f02e20056e085bc2714f73ac992f221b6f3387d655068e86b512046","glsl-optimizer/src/compiler/glsl/opt_flip_matrices.cpp":"44f0fe05b49329667671f88c96dc86ab3fe1459ff7b87f2b2d88de2d49829f9f","glsl-optimizer/src/compiler/glsl/opt_function_inlining.cpp":"fb56a33c90419a01676b57cbd91d0674a54cca40e6defaacc88dd33facebc131","glsl-optimizer/src/compiler/glsl/opt_if_simplification.cpp":"ac406eb35e379c357641d6c5749f50c65961455924d3dc884e2b90046fa92c5c","glsl-optimizer/src/compiler/glsl/opt_minmax.cpp":"8abd59d3b14ef60ff14a9c69660e6945f5cf10b97edb4afebe56be3f81d96316","glsl-optimizer/src/compiler/glsl/opt_rebalance_tree.cpp":"8bb6329dc0f299042368fc81934c2df019b45ab9f7aa0415d4e57b8d1ff98c9f","glsl-optimizer/src/compiler/glsl/opt_redundant_jumps.cpp":"222c73e2ac7a938ebb6428cc6c780c908ff6156d8ff935b04fed93a48fc10496","glsl-optimizer/src/compiler/glsl/opt_structure_splitting.cpp":"2edc79cc13f3177934e0443ad62f5976a1991f01f86ea303a803434849b13a47","glsl-optimizer/src/compiler/glsl/opt_swizzle.cpp":"015d0abddfe507f67c4b96c82988d861d018ededf7bf055e2bcbe9ea92da694e","glsl-optimizer/src/compiler/glsl/opt_tree_grafting.cpp":"46d28ac983ea244a4315bdc0e8892979ec4d1f9b9a96ac8a8a08006d9bc5e878","glsl-optimizer/src/compiler/glsl/opt_vectorize.cpp":"d80ee43bb97d9f016fb9c5e1e06f5b2afa569811f368ba067be794ec11d085fb","glsl-optimizer/src/compiler/glsl/program.h":"2982447e2abd35371e273ad87951722782a8b21c08294f67c39d987da1e1c55f","glsl-optimizer/src/compiler/glsl/propagate_invariance.cpp":"080943e21baa32494723a2eefb185915d2daae1f46d6df420145c5ad6857e119","glsl-optimizer/src/compiler/glsl/s_expression.cpp":"1ced972bc6ecc8eab4116ea71fb0212ab9ae5bcc0be3b47aa5d9d903566b3af1","glsl-optimizer/src/compiler/glsl/s_expression.h":"65b847e30e22a809b57d0bc70243049c99d9c6318803c5b8d0826aba55dc217e","glsl-optimizer/src/compiler/glsl/serialize.cpp":"be0eb4251348a9d921acb839a5c48c6023a2e9d116d602bb0432787ab623655d","glsl-optimizer/src/compiler/glsl/serialize.h":"57425732eba1233d928e5f07f88b623ce65af46b3bb034bf147f0a4b7f94f9a1","glsl-optimizer/src/compiler/glsl/shader_cache.cpp":"e0c5c433f2df3fccdf1d61281bfcb0ee5633433339b97c697d64db99611cbaaf","glsl-optimizer/src/compiler/glsl/shader_cache.h":"9217164d8d7f54aca0fe5922c7187095a6ae0cb703b196b79805aeef07a7e697","glsl-optimizer/src/compiler/glsl/standalone.cpp":"8e6c416a14d631261917a5fe4cc91880c287b22b2dfd70eb22028289a8fa5364","glsl-optimizer/src/compiler/glsl/standalone.h":"a7c397d1dfdd1e7fb2cfe99db35cd9df93251e642059208533202b7f20497f83","glsl-optimizer/src/compiler/glsl/standalone_scaffolding.cpp":"970d14b7a9d58e5270321f97bf5d57795558b1c570a56678e04a65b26c60bf4f","glsl-optimizer/src/compiler/glsl/standalone_scaffolding.h":"d921a617ea82b9e49413314492a645c44356de503581b1be3f1b57de236e480d","glsl-optimizer/src/compiler/glsl/string_to_uint_map.cpp":"d824bf5b839bd39498dc9e457103cdbe3e5289ddf7564107c27b1505948dd31f","glsl-optimizer/src/compiler/glsl/string_to_uint_map.h":"e2f18e66359c9d620e085de7f4a334a47df9c66e65a5bfe8b734c627bec04104","glsl-optimizer/src/compiler/glsl/test_optpass.h":"b27b8f35f5387e7ce4982bb51c7b63ccf14f91757f3108a5d02ed006925bb8a0","glsl-optimizer/src/compiler/glsl/xxd.py":"376484142f27f45090ea8203ae2621abf73f06175cb0ee8d96f44a3b9327f4bd","glsl-optimizer/src/compiler/glsl_types.cpp":"044bb6754f45419a3151e7a25c39202a82009ae3c6bc54ff7f0bb4258a5deefe","glsl-optimizer/src/compiler/glsl_types.h":"fd899a42f34ddeb8601bc3cd6c5e3aed82fc8aef4042dde1b39b3c01e1dcc219","glsl-optimizer/src/compiler/shader_enums.c":"436bff5216b11b0980bdfada5885fc6ac9afa2037a3027fcd6eea2a8635597ac","glsl-optimizer/src/compiler/shader_enums.h":"13220442a5c02e83540cf2c0ad4f8417b2fbda5f2586dec4e92082544c937cdd","glsl-optimizer/src/compiler/shader_info.h":"4c5453e81197ca83593ee4f365074b23530f2ab21c78e1733b63dec6f344c12a","glsl-optimizer/src/gallium/auxiliary/util/u_half.h":"3c2b37bda3ccb64387e44b723d29cf9046decab1a893bf42d842e9603398bdee","glsl-optimizer/src/gallium/include/pipe/p_compiler.h":"c75620096ce8523dae90599e50aa2ef6468d3b0e368a77795edeb20dd1abfc0c","glsl-optimizer/src/gallium/include/pipe/p_config.h":"a27692fc35f9e55df3224b7529e66b3001e911e94e6bc5f8f569e493e1ee3fb7","glsl-optimizer/src/gallium/include/pipe/p_defines.h":"be26d68c0acc67c5e44788c6299716a9eee415fd81d7d747e3738a829e3b6b38","glsl-optimizer/src/gallium/include/pipe/p_format.h":"5674215fc41d27496f037cf837717daefbf23ebb38d40ace7c0c414bc08182b0","glsl-optimizer/src/gallium/include/pipe/p_state.h":"d600593aba5f5a17072a6c38f6baa81e01c7994b0174250f7e433bb41684b702","glsl-optimizer/src/mapi/glapi/glapi.h":"73632a625c0ddabc401205e8b5a81eb8af8506868efe4b170d7979ec3619e9c5","glsl-optimizer/src/mesa/main/config.h":"5800259373099e5405de2eb52619f9de242552a479902a3a642a333c8cb3c1e7","glsl-optimizer/src/mesa/main/context.c":"2f3208473d99c94f734b1137ba91889d4a1babb9e7534bf1dc85d851ee98274e","glsl-optimizer/src/mesa/main/context.h":"cc7e4194797db9d007f01884e23d786c453b3860821f7f2ddcdf0f1bf3f8ffb1","glsl-optimizer/src/mesa/main/dd.h":"6a964acd06b6c2d88700e69fb75fe3c6b3b3d45bbc41db24f3f897a29695fe0c","glsl-optimizer/src/mesa/main/debug_output.h":"7312422e90b8c0e34028ac27280e438139b5cba525c99deb3ac883cd3d87e452","glsl-optimizer/src/mesa/main/draw.h":"7eaef3a9e27a60ea6f7937109bf3a6190b831162fde0479abb12077ce27c353d","glsl-optimizer/src/mesa/main/enums.h":"87d562a6764f51c014a2274fa7c3aca17c04441537ddd56b2554f13c6fffea92","glsl-optimizer/src/mesa/main/errors.h":"c79444b5df289c90fbb22a33b2d0c23917d9fc4510960088f0b79e53bb56b1b2","glsl-optimizer/src/mesa/main/extensions.h":"a38b2f87cc93c513994281350d69e06c84ff8eded5313ec0a1be33f375e0ebbd","glsl-optimizer/src/mesa/main/extensions_table.c":"17642d1a8c9a0bf2bd61060052d33ff14a005d2b962e6cf91465797a50851e85","glsl-optimizer/src/mesa/main/extensions_table.h":"2c879571c238d2e14461031ac740372fd0f9ac3a34c0d5541bb9b7ed4c0376c8","glsl-optimizer/src/mesa/main/formats.h":"02e2f7ec3e39286cf9f27e2641043e6df8ecb1dfde9e643313210e214af2a929","glsl-optimizer/src/mesa/main/glheader.h":"58217b33eead6aa6b23cd4a291cefeaa6cb84e465f4960daffca97c44d6d1c35","glsl-optimizer/src/mesa/main/glthread.h":"51fb2711f77e7eafcfc52d29d5b844978832b24c930d88accd48d143a6eb9c6f","glsl-optimizer/src/mesa/main/hash.h":"7e7f782034c16a8e693de48e00c31d4a90b0129f4029fd074033d7d16ccbe718","glsl-optimizer/src/mesa/main/macros.h":"73d15ddfd64f2b57b9b2ffeeb993b9c2c0899a80563e9d6ff337b11ccbe6eee5","glsl-optimizer/src/mesa/main/menums.h":"5dfac0e2279d60b0cd0c7b9fc2a5021620d0f6282ed2e738c420214e3af152d3","glsl-optimizer/src/mesa/main/mesa_private.h":"edda678b93438944279a551f663b8858ad84814a9fc88ba9672ef195599c24ae","glsl-optimizer/src/mesa/main/mtypes.h":"6efddefa099e4d2e3fdd97f0055644f47aba21711385edfeabc2d9b0676f2eec","glsl-optimizer/src/mesa/main/shaderobj.h":"9f0dfe96d0c2154201adef942bd36053533ac7b2492fb3786acda5bea514c75e","glsl-optimizer/src/mesa/main/uniforms.h":"4e331e6ad6e9cbded978b4082dbe0a57c1f8f01327446bb6892bfc179976c38b","glsl-optimizer/src/mesa/main/version.h":"9d0a13a758099302dc55cf7d045791834a89b0f9d4cf17b2692259b369a8a9a1","glsl-optimizer/src/mesa/math/m_matrix.h":"a37b19f182e070db3df93b0ede43c22fb8be8c2906504133ee6dbd7db1185d8b","glsl-optimizer/src/mesa/program/dummy_errors.c":"1820e305515b4c5e041f5e1623266a48ec8f076a155310be7d60637101f593e4","glsl-optimizer/src/mesa/program/ir_to_mesa.h":"b47f58d22e3ca2ae42d52501ea769d15c4476834944fa97eeccd3a3439211d00","glsl-optimizer/src/mesa/program/prog_instruction.h":"ab3832152a7e144b59e5a2264b2c29db56d93be31e76bbd958527a56771b40eb","glsl-optimizer/src/mesa/program/prog_parameter.h":"ba18c743284eadbc837c2c364c73e5d372321a7637a76e589d8d39fe8b5de225","glsl-optimizer/src/mesa/program/prog_statevars.h":"fc413698f84bc52d45fdeae0471934ee9904bfb7eac1a2b5f70446e54bcbbdca","glsl-optimizer/src/mesa/program/program.h":"1f01026a4eff440a3f122fd9b519d03546fe7f7d8be60dca834e95a2f8fbbfd2","glsl-optimizer/src/mesa/program/symbol_table.c":"6611cb9f078035bf5ff8c9112093a6c7d99f8af99a3931d0c07f227cc72283ea","glsl-optimizer/src/mesa/program/symbol_table.h":"631dc35ac48d5e87962d45507461920f6575610960ffcc42a08cefeb43300cda","glsl-optimizer/src/mesa/vbo/vbo.h":"6eb1dcd9a08c92f276c5fe08da184ff9d455d1be421913b8ad732a7b65e858fb","glsl-optimizer/src/util/bitscan.h":"d4fcb47b57a50d70cb97f99ca3e619bc06282a877768a435e009775ce8d77f36","glsl-optimizer/src/util/bitset.h":"c40f78515c6230fed18345c6751ce33833a49da7a27901c7e6d7340cbdcbc5e7","glsl-optimizer/src/util/blob.c":"8f729846f66efc9c15485cc5fc24c6ec861fc1fecb2f652573f2a237d481b791","glsl-optimizer/src/util/blob.h":"93e1eaac866b9a7cd6fc03b533c18fb2edf0e97f03395eff4f3a605c4fc14d0c","glsl-optimizer/src/util/compiler.h":"79e3bf40a5bab704e6c949f23a1352759607bb57d80e5d8df2ef159755f10b68","glsl-optimizer/src/util/crc32.c":"2f3467a046b3a76784ecb9aa55d527698c8607fd0b12c622f6691aaa77b58505","glsl-optimizer/src/util/crc32.h":"59bd81865e51042b73a86f8fb117c312418df095fed2d828c5c1d1c8b6fc6cd4","glsl-optimizer/src/util/debug.c":"c3d68e9752ccc19e66c669562cd113cf1d0ac83cbb30174789e7fb8d1df58f9c","glsl-optimizer/src/util/debug.h":"50068d745c4199ccbd33d68dd4c8a36d2b5179c7869a21e75906ddd0718ca456","glsl-optimizer/src/util/detect_os.h":"343a8790d17a3710c6dd015ee367f84e3902ff3f2e36faca2bf93f9d725d3574","glsl-optimizer/src/util/disk_cache.c":"f533937e5a4fffe76e2739ef4b6b1e1da097d96d63eb808e68ebbc7027641c23","glsl-optimizer/src/util/disk_cache.h":"e83314fb14134a8e079b15e470a6376ba5a8253701f048c890a62b7e55d64bc8","glsl-optimizer/src/util/fast_urem_by_const.h":"e108fce804616c47d071dfe4a04163eec1126e448ed1aa89abb6b3a6d772bd5b","glsl-optimizer/src/util/fnv1a.h":"ab2596f19c6adf431ae27618f62c5743e24ad23ef83bb359a4c4c218245ab459","glsl-optimizer/src/util/format/u_format.h":"4cdfc0c59cbc99a092e5ec5a396910f2d93b9643e5d8141050b011e66f11e45b","glsl-optimizer/src/util/futex.h":"26f7c9d86e9ffef4c0fa2761f1aaa1918337302e20bd6ca10e61dc3c47356deb","glsl-optimizer/src/util/half_float.c":"11bc2584493d5d9d46e8c8a619a0307cf150bf5ab5d0f96bb764b061dc37a00e","glsl-optimizer/src/util/half_float.h":"7f7c380f126da1400a91758cc0392f24bf967bce1672890b62be26fe9fbd922b","glsl-optimizer/src/util/hash_table.c":"0ca40352e35dedab0a84c64c903f1b16d47e950bb5f43b4d22bb57d499bfea6e","glsl-optimizer/src/util/hash_table.h":"217191bb360592e2232f187473c10287d2cda8ae6fa5c53d0ef74c8c206118b4","glsl-optimizer/src/util/list.h":"9fab03c6a78186bb5f173269f825f6ce976b409d931852e3d93bac632e07989a","glsl-optimizer/src/util/macros.h":"63faf65b51058c483b17f1f77da51d1c53c8beab52678cb6bd01f1228a63b6b0","glsl-optimizer/src/util/mesa-sha1.c":"00c692ec353ebc02c06c57c5a71de0ab7a119f86a4146f452e65ec87e4944417","glsl-optimizer/src/util/mesa-sha1.h":"bff4c29f4bf7cdbcefb30fa0c996a7604a380eba8976467c2a60e7cd328f7e26","glsl-optimizer/src/util/mesa-sha1_test.c":"25da89a59d51469f77b4c468ca23ffdce0a7a1166a70b6cc23026a6800b0143c","glsl-optimizer/src/util/os_memory.h":"64555faf1760ae6954f42c83727c38dfc4c278e9152115779ffaad58b42adacf","glsl-optimizer/src/util/os_memory_aligned.h":"12d86fa94be38c13f7eeebdf313795e1267dd5a7187d2f0072e0e896f41702f6","glsl-optimizer/src/util/os_memory_stdc.h":"07360363b88c927065e10df71bebf6c8a0cc3b9167c9dfce55f2d65f11e6f787","glsl-optimizer/src/util/os_misc.c":"a9936e613ec84803abd59ad47c192c8e3939993c950ac91973fdc4cec1801bb8","glsl-optimizer/src/util/os_misc.h":"cc68eb12e05b5e749c54298cb4a6f4cd20cc5af7db3403e70b3c27b56090c740","glsl-optimizer/src/util/os_time.h":"73e775f7335244ff5964c678c27eedf1aea6abea44c4169d327ea8c7ce4a3a88","glsl-optimizer/src/util/ralloc.c":"4b51189595ef67bcef52c40cbf654d969041dbd15e15d4a893ad494ac060aeca","glsl-optimizer/src/util/ralloc.h":"e573c45875ff1530f0dbee9a93ae55535fdac8d5cc88a79ebc327c688824bde5","glsl-optimizer/src/util/rounding.h":"fe22a2a198057b5442de8034968a68a70909811aa12e1096ced221415f237e2b","glsl-optimizer/src/util/set.c":"86f8c9a830bead5a5a79bc970b0ff97809312af07b3beb39ef9d90af04d40a1b","glsl-optimizer/src/util/set.h":"3e39ca161e7ed4ec7c436cc9c7919ed9a55ed1b71edbf2caf6f9bcfd9bc578ed","glsl-optimizer/src/util/sha1/README":"00af7419af05247081858acb2902efd99fcda2ce16e331079f701645bb3729c0","glsl-optimizer/src/util/sha1/sha1.c":"1403bbe0aad42ba3e6be7e09f7cad87a6a8c4ad5b63962f7b92b9f37d8133b04","glsl-optimizer/src/util/sha1/sha1.h":"68d9f240eab2918026ecdf22be36811abbd4f1389f6c36e31258041aeaedd247","glsl-optimizer/src/util/simple_mtx.h":"12c6c3c4b7db9168bc656d5b3c65912075084d2b388c415d5c3d3f5953a9d6c7","glsl-optimizer/src/util/softfloat.c":"a97e51a96fe5e6a052c02aa6bbec683fe73fb88a8c087d9c930503e2120d8a2e","glsl-optimizer/src/util/softfloat.h":"66664b0250e83bf5dd4cc743acd119d076efcea624a0eab3d6b60718e6ee8811","glsl-optimizer/src/util/string_buffer.c":"63a1d1b1e34926c88ea00159cafbcd56568b805c4f64d1e8c97169fe313921fc","glsl-optimizer/src/util/string_buffer.h":"7b88d1b1d9c6cfb8e93331813535c127289437c75f822029e9a3bca8ea6b52ee","glsl-optimizer/src/util/strndup.h":"0273c4fdb7482cd7746881a63d3998648c6d63415ba85af1d1860f0e0dc504c6","glsl-optimizer/src/util/strtod.c":"5cf610d8a37373cf37cfb7aae903525d943b2674b1f32594c70b0eb19a8c9697","glsl-optimizer/src/util/strtod.h":"237396def4e264d35ed4bedea00ef9a4ceab6d7a11a18c770d9747d22c69ed2d","glsl-optimizer/src/util/u_atomic.h":"c02e809526c6c09ba8fe51f50b2490d1b6c8e5c7f3c4031ae958250d098fc3bb","glsl-optimizer/src/util/u_debug.c":"8c060e379b816618f3dd22c9ea523c68b9425c76c36a7dfe5d6d375b337f5f4a","glsl-optimizer/src/util/u_debug.h":"e11e26edd9b9e4e6f8e6a435e69f4d9edda27e9a379f68f4c82ea2525aaaea68","glsl-optimizer/src/util/u_dynarray.h":"853d0fa6ff2261614488be624deb8a2b01e57c2c8eabc28578cbeed4ccc95694","glsl-optimizer/src/util/u_endian.h":"3ccea7e529740318d8a4b05c00db3adc9d1e292a52bdc56a05c9fae99209720f","glsl-optimizer/src/util/u_math.c":"c868a8c0886dc78f1b06b13404ba8b253090449045774dd56893ac9d75795184","glsl-optimizer/src/util/u_math.h":"ae235e8f73fea1b83e68e4141244666280094b3871ce299023b4c2daa1d7555c","glsl-optimizer/src/util/u_memory.h":"c5db17c724c70283ddbe04165722f6988d4e0eb9aa3602ae472feff016649af9","glsl-optimizer/src/util/u_queue.h":"92930ce236c0528a98b695f5cea8c5c6aa9683beaf71a2227bdc5d33d1b21506","glsl-optimizer/src/util/u_string.h":"c5a2f4ef576d1547bda12c4ea219179fefa54414977743ac094abcaf696ef6ca","glsl-optimizer/src/util/u_thread.h":"00b708459b27f9910d18db92c18cc65cfc618ac2b3cd144e45f8640057b10d58","glsl-optimizer/src/util/xxhash.h":"2f2aff2fc6c0c929f52cf6ae7314122124c5be026d41ad1c357608383c4a37ad","src/bindings.rs":"79993db2058bde39f99ef483d02560d33b1cb882f6a552319e8b86eb6f9021e1","src/lib.rs":"04be1554cd829eb40864b06d80b491dd48117a4e3a601c7d482117f7a0391e67","wrapper.hpp":"f3ea34cc496f7d90b9bfcada3250b37b314c3524dac693b2ece9517bc7d274ac"},"package":"74a3f5c04450dfdadb4b08f6e5ee6f5110f674de1acbd6199bfec68392a8cbaf"} \ No newline at end of file
+{"files":{"Cargo.toml":"7706cf6777f1f50fd565d1ba608ae08f8b2c63342a2c6e5b40f171ffc8d6e6b6","README.md":"4468e08c64c19977707d792bfab0080e35ff927b64990eab77873f8ba056ba1c","build.rs":"6a64610018701781af182c418a4355c9ac5d99d000be9457f0e38a7dadf7542a","glsl-optimizer/CMakeLists.txt":"42ce94744e82ffa000da8b64d81fc140e293b9f5da7dd4cf6b49e7404a2448d9","glsl-optimizer/README.md":"b18eef11a92d267d88a937b1154f7670ee433c730b102fdf7e2da0b02722b146","glsl-optimizer/contrib/glslopt/Main.cpp":"14ba213210c62e234b8d9b0052105fed28eedd83d535ebe85acc10bda7322dd4","glsl-optimizer/contrib/glslopt/Readme":"65d2a6f1aa1dc61e903e090cdade027abad33e02e7c9c81e07dc80508acadec4","glsl-optimizer/generateParsers.sh":"878a97db5d3b69eb3b4c3a95780763b373cfcc0c02e0b28894f162dbbd1b8848","glsl-optimizer/include/GL/gl.h":"1989b51365b6d7d0c48ff6e8b181ef75e2cdf71bfb1626b1cc4362e2f54854a3","glsl-optimizer/include/GL/glext.h":"2ac3681045a35a2194a81a960cad395c04bef1c8a20ef46b799fb24af3ec5f70","glsl-optimizer/include/KHR/khrplatform.h":"1448141a0c054d7f46edfb63f4fe6c203acf9591974049481c32442fb03fd6ed","glsl-optimizer/include/c11/threads.h":"56e9e592b28df19f0db432125223cb3eb5c0c1f960c22db96a15692e14776337","glsl-optimizer/include/c11/threads_posix.h":"f8ad2b69fa472e332b50572c1b2dcc1c8a0fa783a1199aad245398d3df421b4b","glsl-optimizer/include/c11/threads_win32.h":"95bf19d7fc14d328a016889afd583e4c49c050a93bcfb114bd2e9130a4532488","glsl-optimizer/include/c11_compat.h":"103fedb48f658d36cb416c9c9e5ea4d70dff181aab551fcb1028107d098ffa3e","glsl-optimizer/include/c99_compat.h":"aafad02f1ea90a7857636913ea21617a0fcd6197256dcfc6dd97bb3410ba892e","glsl-optimizer/include/no_extern_c.h":"40069dbb6dd2843658d442f926e609c7799b9c296046a90b62b570774fd618f5","glsl-optimizer/license.txt":"e26a745226f4a46b3ca00ffbe8be18507362189a2863d04b4f563ba176a9a836","glsl-optimizer/src/compiler/builtin_type_macros.h":"5b4fc4d4da7b07f997b6eb569e37db79fa0735286575ef1fab08d419e76776ff","glsl-optimizer/src/compiler/glsl/README":"e7d408b621c1b605857c4cab63902f615edb06b530142b91ac040808df6e22f7","glsl-optimizer/src/compiler/glsl/TODO":"dd3b7a098e6f9c85ca8c99ce6dea49d65bb75d4cea243b917f29e4ad2c974603","glsl-optimizer/src/compiler/glsl/ast.h":"3e68ff374350c49211a9931f7f55a485d8d89fc4b21caaffbf6655009ad95bf8","glsl-optimizer/src/compiler/glsl/ast_array_index.cpp":"92b4d501f33e0544c00d14e4f8837753afd916c2b42e076ccc95c9e8fc37ba94","glsl-optimizer/src/compiler/glsl/ast_expr.cpp":"afd712a7b1beb2b633888f4a0911b0a8e4ae5eb5ab9c1e3f247d518cdaaa56d6","glsl-optimizer/src/compiler/glsl/ast_function.cpp":"74f4fbd490e366b37f4715168bb3465ecd9334d4130942f75dcc8e80e8e7f027","glsl-optimizer/src/compiler/glsl/ast_to_hir.cpp":"d0f798eb09271d41d068b9e7b18220d37f1ed0083300ab51eba30989698fe23d","glsl-optimizer/src/compiler/glsl/ast_type.cpp":"8eb790b24b26dfb72bdc333744b566c26d8464c5d47d20eae659461f5c4899f7","glsl-optimizer/src/compiler/glsl/builtin_functions.cpp":"454189d643c220fcb49116ee5c8a34f7b349aa67564040deb8607f6a41a15e70","glsl-optimizer/src/compiler/glsl/builtin_functions.h":"a37cad7ed09b522c5b8bec7b80115a36846e7ba6e0874a2a858e32f7f202c665","glsl-optimizer/src/compiler/glsl/builtin_int64.h":"619def6f3aebf180da3944ef08f159ab12a58b24767e41d8b985ac37ded54d62","glsl-optimizer/src/compiler/glsl/builtin_types.cpp":"afec060b62d6f3b00bfbf94e9fa5f96341ce096c128d1eef322791e6ed9cea4d","glsl-optimizer/src/compiler/glsl/builtin_variables.cpp":"6563bfb1345cbca4c77e00eef09ad152f3e1dc271d246a08c5ce9e1f4ce4250a","glsl-optimizer/src/compiler/glsl/float64.glsl":"1072fd888be48c2a7a5117cd2d92a65f034965a66375f598bb856bff5d7be766","glsl-optimizer/src/compiler/glsl/generate_ir.cpp":"e5f0175370a0d07f93c48d3f0f1b8233d12c64a7b02de02dcc753ef7b398ef0f","glsl-optimizer/src/compiler/glsl/glcpp/README":"a0332a1b221d047e9cce5181a64d4ac4056046fd878360ec8ae3a7b1e062bcff","glsl-optimizer/src/compiler/glsl/glcpp/glcpp-lex.c":"2d179879b1ffe84f58875eee5b0c19b6bae9c973b0c48e6bcd99978f2f501c80","glsl-optimizer/src/compiler/glsl/glcpp/glcpp-lex.l":"e4c5744c837200dafd7c15a912d13f650308ea552454d4fa67271bc0a5bde118","glsl-optimizer/src/compiler/glsl/glcpp/glcpp-parse.c":"03494f9ce1cb82260506e2559e73a3eeb622c4bd51b65eaa0a2c3351862bd4c8","glsl-optimizer/src/compiler/glsl/glcpp/glcpp-parse.h":"264d9a18421cde255ce34a0a62b3d8e73465359f0d167e64aa3973062aae5bdd","glsl-optimizer/src/compiler/glsl/glcpp/glcpp-parse.y":"fafb66e3a8f149d19e085f18a4273ba6d4c11af9e9a01d665cc784dddf97b79f","glsl-optimizer/src/compiler/glsl/glcpp/glcpp.c":"37ed294403c2abfd17fd999d1ae8d11b170e5e9c878979fefac74a31195c96b0","glsl-optimizer/src/compiler/glsl/glcpp/glcpp.h":"85ac8b444bcbd0822b66448a1da407b6ae5467b649f5afaf5c58325bd7569468","glsl-optimizer/src/compiler/glsl/glcpp/pp.c":"a52d94f1bcb3fb2747a95709c4a77c25de7eea8354d2b83bb18efd96976a4473","glsl-optimizer/src/compiler/glsl/glcpp/pp_standalone_scaffolding.c":"d11aeb3acfe966d1b78f1ee49804093f2434214c41391d139ffcb67b69dc9862","glsl-optimizer/src/compiler/glsl/glcpp/pp_standalone_scaffolding.h":"abbf1f36ec5a92d035bfbb841b9452287d147616e56373cdbee1c0e55af46406","glsl-optimizer/src/compiler/glsl/glsl_lexer.cpp":"272b9fc1383d72b81bfc03fa11fdf82270ed91a294e523f9ce2b4554bd3effa9","glsl-optimizer/src/compiler/glsl/glsl_lexer.ll":"2b57d9f9eb830c3d7961d4533048a158ee6f458c8d05c65bea7b7cfbc36e4458","glsl-optimizer/src/compiler/glsl/glsl_optimizer.cpp":"f8095d20629d0af70be930b0612e169edb274551a1d25a3cd1bf9995a11ce2e8","glsl-optimizer/src/compiler/glsl/glsl_optimizer.h":"22e843b4ec53ba5f6cd85ca5f7bad33922dca8061b19fb512d46f1caca8d4757","glsl-optimizer/src/compiler/glsl/glsl_parser.cpp":"126baf368d525aba301854e3d91ba60b5aee32e1102376af71416f32cb95ec48","glsl-optimizer/src/compiler/glsl/glsl_parser.h":"2ea9a50716098a8f7bef782d2a030d757b68da73afb01b4d4940d3e8381d44e8","glsl-optimizer/src/compiler/glsl/glsl_parser.yy":"6b1fd1576b29fce005dff744a6dbd0219e4c695c361d61864e1f3a8d6fa6b764","glsl-optimizer/src/compiler/glsl/glsl_parser_extras.cpp":"aad64b5b66467da650091430681e8c6a820cf3cadc4db3c160bf2f15875390ae","glsl-optimizer/src/compiler/glsl/glsl_parser_extras.h":"71fd0e92bbdb193dfb067d7bfdb1200d77392be2fbd0cbfc9ca89d1bb4c7e741","glsl-optimizer/src/compiler/glsl/glsl_symbol_table.cpp":"6660fb83c0ddddbbd64581d46ccfdb9c84bfaa99d13348c289e6442ab00df046","glsl-optimizer/src/compiler/glsl/glsl_symbol_table.h":"24682b8304e0ea3f6318ddb8c859686bd1faee23cd0511d1760977ae975d41bf","glsl-optimizer/src/compiler/glsl/hir_field_selection.cpp":"72a039b0fcab4161788def9e4bedac7ac06a20d8e13146529c6d246bd5202afd","glsl-optimizer/src/compiler/glsl/int64.glsl":"303dbe95dde44b91aee3e38b115b92028400d6a92f9268975d607471984e13eb","glsl-optimizer/src/compiler/glsl/ir.cpp":"2b4741cce90b5d4abff5d719c7324e2693c67294d4d99736cb241554adb281bc","glsl-optimizer/src/compiler/glsl/ir.h":"990b1c74447c4eb4835353ccb0ed9aea644f97fc1129ef1739cd935075d85d2e","glsl-optimizer/src/compiler/glsl/ir_array_refcount.cpp":"8cdc1cffe01e42e0566fa2193a75f789628e8025ad1b82f0ee6f204451b7f9f7","glsl-optimizer/src/compiler/glsl/ir_array_refcount.h":"75f06ec81342b379096ca52e1dc0fd5f19a11ff8e9b58203c20628179d644c12","glsl-optimizer/src/compiler/glsl/ir_basic_block.cpp":"1e2920b1c0ecb08424c745c558f84d0d7e44b74585cf2cc2265dc4dfede3fa2f","glsl-optimizer/src/compiler/glsl/ir_basic_block.h":"81be7da0fc0ee547cd13ec60c1fcd7d3ce3d70d7e5e988f01a3b43a827acdf05","glsl-optimizer/src/compiler/glsl/ir_builder.cpp":"daba29c5a1efdd5a9754f420eb3e2ebdf73485273497f40d4863dadeddb23c0d","glsl-optimizer/src/compiler/glsl/ir_builder.h":"2822e74dd3f6e3df8b300af27d5b11ea2dd99d0e5e7ca809b7bbcce9833c483c","glsl-optimizer/src/compiler/glsl/ir_builder_print_visitor.cpp":"8c6df5abf2fe313363f285f171c19ca6c8ee4f3bc2ed79d33c0c88cc8be45c48","glsl-optimizer/src/compiler/glsl/ir_builder_print_visitor.h":"799852adc3a0e54d04080655e7cebfa0d3bf5b6ffed5d8414f141380665d4db7","glsl-optimizer/src/compiler/glsl/ir_clone.cpp":"d897a4e1f5bbec4a6a2f15044c1be9a4d13899c73be77335b041049a4589aa5d","glsl-optimizer/src/compiler/glsl/ir_constant_expression.cpp":"78bd87ddb09db67f6c499067728d72aef4f16aa02721a99a4b769d1e0cfa9010","glsl-optimizer/src/compiler/glsl/ir_equals.cpp":"bca28533a6310b0fc152b56d80872368f1510dc62ed6e8ac199b9ffa7fac02e7","glsl-optimizer/src/compiler/glsl/ir_expression_flattening.cpp":"7e918d4e1f237eca01396004015865ce345afe32a876c9dbc6728576a1a7eae4","glsl-optimizer/src/compiler/glsl/ir_expression_flattening.h":"f45b66aa9497520e7e08e612d24b308477c34477fbd963ee9320eac664957f16","glsl-optimizer/src/compiler/glsl/ir_expression_operation.h":"cc9f10727dbd26cac506804f51456302c702650f9eeb59054a7e1575d5cf6687","glsl-optimizer/src/compiler/glsl/ir_expression_operation.py":"7b86c96021b9fbe165957f4ecb0b612fefcde1c2cf3c6d75e3cdb22e369216ba","glsl-optimizer/src/compiler/glsl/ir_expression_operation_constant.h":"9ad3346416392e3efa11e12ecf2feca7453c5253d241eb96c91dfb85d4f2b971","glsl-optimizer/src/compiler/glsl/ir_expression_operation_strings.h":"a6826daf496a8b9e89885bc2a161ac3445d501b23c6e0ac33e2c01b506b273c8","glsl-optimizer/src/compiler/glsl/ir_function.cpp":"7537365fc0fbe4b37a26b9a2146cc64d3e9a774d60eab63b65002ad165ae8fc7","glsl-optimizer/src/compiler/glsl/ir_function_can_inline.cpp":"faddbf112187a048d502716a3fb82570a322299ba2a3abd79388382c82040bfc","glsl-optimizer/src/compiler/glsl/ir_function_detect_recursion.cpp":"9176973eaf5c0a984701f953bb7a80f37dca43d59b5bce50fc69b3f02f2902d7","glsl-optimizer/src/compiler/glsl/ir_function_inlining.h":"9739493f99c489987d650762fccdd3fb3d432f6481d67f6c799176685bd59632","glsl-optimizer/src/compiler/glsl/ir_hierarchical_visitor.cpp":"3725861fbe2b98e0617f52d3b14cf6d3b25fb5ec00f5ef5d308b03642f592767","glsl-optimizer/src/compiler/glsl/ir_hierarchical_visitor.h":"e0560210e966c0c31e4ca843e80ea154e64db5a444b8c2df845b6ba5b3a43fc1","glsl-optimizer/src/compiler/glsl/ir_hv_accept.cpp":"caf7ce2cd9494aadd3c58bcf77f29de58368dc9e347a362bbf37f8bda9509b80","glsl-optimizer/src/compiler/glsl/ir_optimization.h":"8b3dcfc7f9e96b21a8dd47a0040d90be483a9e67a2cdce3a697188fb758d4630","glsl-optimizer/src/compiler/glsl/ir_print_glsl_visitor.cpp":"f8e34a983452be0dcb5a695e9c8e895eead24f9e540992a8afe510ae85da4c4c","glsl-optimizer/src/compiler/glsl/ir_print_glsl_visitor.h":"1ad1bd3efd1ace39051c13f904c05fd80425d329444f9a8d47fd6d948faf46e0","glsl-optimizer/src/compiler/glsl/ir_print_visitor.cpp":"643f5a68aae3fb37267fd793f1216d1cfdeb2c09338c26b1f30e4c6deaef4de5","glsl-optimizer/src/compiler/glsl/ir_print_visitor.h":"4573eb93268a2654c14b505253dd651e2695d43dc745904d824da18305269b95","glsl-optimizer/src/compiler/glsl/ir_reader.cpp":"06bfba802c8354e5a8b2334b6d78d6297de18235bedd3f8fbb382c89870b02f2","glsl-optimizer/src/compiler/glsl/ir_reader.h":"63e3f7f1597936a7011d5b520e171b197bf82bee6c1560d822c3edf5aaa6f9e9","glsl-optimizer/src/compiler/glsl/ir_rvalue_visitor.cpp":"84b5c5d746555adca85759c2912fe48010232b7c1c0bd2cf03bd04067a85e66f","glsl-optimizer/src/compiler/glsl/ir_rvalue_visitor.h":"fd8c561b71085d3211fff85ed514fecb299d8ce19a04bc063419a55b6d840525","glsl-optimizer/src/compiler/glsl/ir_set_program_inouts.cpp":"ab9f115ce9e7f312d9c7978340ced0dc4ae6d13a80e08442ba9709d11d50cae5","glsl-optimizer/src/compiler/glsl/ir_uniform.h":"683ae6896b1a08470c090be5f822fc31cd434eab9216e954b9bba24a46975109","glsl-optimizer/src/compiler/glsl/ir_unused_structs.cpp":"9c1620c45f2fc071fe5ed828472040b14c5f42effe06aa0e3b8352c95ef78786","glsl-optimizer/src/compiler/glsl/ir_unused_structs.h":"13387b49c23093575276b25b9dfd31fedd8f131c5c4f3128ab04cf03e15b5295","glsl-optimizer/src/compiler/glsl/ir_validate.cpp":"6b232be5999a86ea278f4f15b2832d76843246509118d924243055a3b9b0299f","glsl-optimizer/src/compiler/glsl/ir_variable_refcount.cpp":"2764a3cad937d53f36db7447c3a5b98b04bf153acf81074d971857fc5bca460d","glsl-optimizer/src/compiler/glsl/ir_variable_refcount.h":"b0668e3eb1501ef65e38fe12830742ecb3d28e6039f30e366c8924efc29b4a39","glsl-optimizer/src/compiler/glsl/ir_visitor.h":"f21b3534c3d66d5fb707d1581fece7e1eb043523afbaedf89918cfb031c6df94","glsl-optimizer/src/compiler/glsl/link_atomics.cpp":"360f0209e11f367ba358223597b0a118bae095bff16337cf03f1fb89c5b80ca6","glsl-optimizer/src/compiler/glsl/link_functions.cpp":"de7895da8aa33a1e3c2c1eb2fdaf267ab5d1fbfdb79ae2e67f95211e946e294c","glsl-optimizer/src/compiler/glsl/link_interface_blocks.cpp":"1926cfa73810704eb19b916c1b2cdb9321155e2f98b2a0a57c7c3c6e960540cd","glsl-optimizer/src/compiler/glsl/link_uniform_block_active_visitor.cpp":"1e14e06ca3b2c1089cfba2e8eaf0c1f373d9d6374b6082f320962dd71ae09611","glsl-optimizer/src/compiler/glsl/link_uniform_block_active_visitor.h":"fd58c155af645295bb6aec08797889de586f4d919731de2bce57e8dce59bb048","glsl-optimizer/src/compiler/glsl/link_uniform_blocks.cpp":"09589f49776dce32e6c4044937de7e0c839a9754ad31960148f8f9e010658997","glsl-optimizer/src/compiler/glsl/link_uniform_initializers.cpp":"bf98e08c12db466acf9623cbeb8fa8e3b4002512722e7a6521287f558a099f37","glsl-optimizer/src/compiler/glsl/link_uniforms.cpp":"84bad5b1377362cecf259b05124239be5220b03ce1c0c61b59bd9a47e4379af2","glsl-optimizer/src/compiler/glsl/link_varyings.cpp":"a5f1a53e7c80d635515fe808ff223d89fef1767abb0f2b7aa28fa6773dca353f","glsl-optimizer/src/compiler/glsl/link_varyings.h":"b9dbe018f038df69763df2e928742ce81bbc6e3aaba26f50621e30a6d9aa6220","glsl-optimizer/src/compiler/glsl/linker.cpp":"40b1ecd5d4f6c7f13d5a87ce390561a51fdf6f3fcd9b2197b9c88b03a773ba94","glsl-optimizer/src/compiler/glsl/linker.h":"ecf94b4ad75ef461c27c557fda4bd25f34c91930822b8e1d729ec84520d4a049","glsl-optimizer/src/compiler/glsl/linker_util.cpp":"1663ad88e2a369305659aeeffaedb5bd752cf76340a2ba5797fc0bf600633cf9","glsl-optimizer/src/compiler/glsl/linker_util.h":"6db788daf9c8e87ae2764b61a8b37ebe419e69c1b82ddee01986e37c978c6993","glsl-optimizer/src/compiler/glsl/list.h":"b1f46ce0e552fe7c45b2a19408a9d97662e23e4b182ab335491c26f8cf25886f","glsl-optimizer/src/compiler/glsl/loop_analysis.cpp":"57ecd573477c68091c7cc99537faa7139a8f395935e3d4f10144cefdefb5a611","glsl-optimizer/src/compiler/glsl/loop_analysis.h":"a85f045a038ee5b5176063e85d7988865862c44ab0580f771b993a042d0b69cc","glsl-optimizer/src/compiler/glsl/loop_unroll.cpp":"bd4292ea2809f5a669bcb76ceaa1ac365772dcd638c579c3ed10275214901a54","glsl-optimizer/src/compiler/glsl/lower_blend_equation_advanced.cpp":"8cfbef140d9c4b4d2f57bfa05c9c374d31a121d0f87afce94333f049023b654a","glsl-optimizer/src/compiler/glsl/lower_buffer_access.cpp":"1ae221c3c7a95aeb867207e7a742be635f91b406c157747bfd6ddf10274d97fb","glsl-optimizer/src/compiler/glsl/lower_buffer_access.h":"807886953a576a323591798cbca5e2df24295ea893b28affd8ffb5926cebaa04","glsl-optimizer/src/compiler/glsl/lower_builtins.cpp":"4d81afc32cf58e1481fcb5e42888ab93dbe6820310a20ff7a9982b77b2152d9b","glsl-optimizer/src/compiler/glsl/lower_const_arrays_to_uniforms.cpp":"608403f0eeeedf21cfcd3014116e0f44e28cbdf6c4c32aac7e613e64e30205e1","glsl-optimizer/src/compiler/glsl/lower_cs_derived.cpp":"179905cd47a294122adeb5b0abfed6f2f67782dcde21b544d1ee2c1985154e66","glsl-optimizer/src/compiler/glsl/lower_discard.cpp":"3b361b2db0004d544d64611cb50d5a6e364cf6c5f2e60c449085d7d753dd7fb0","glsl-optimizer/src/compiler/glsl/lower_discard_flow.cpp":"f5c29b6a27690bb5c91f196d1a1cf9f6be4f1025292311fe2dac561ce6774dee","glsl-optimizer/src/compiler/glsl/lower_distance.cpp":"a118c85493d5d22b2c059a930c51a5854896d4b1dade76598eaa985e5a3dff8c","glsl-optimizer/src/compiler/glsl/lower_if_to_cond_assign.cpp":"469e617757fd1728709cce021aac5c8da05ee503bf5366977bdc4ef7a6d83950","glsl-optimizer/src/compiler/glsl/lower_instructions.cpp":"6ff5c396abe40d8a2145d571e99e2bbe9143393e15aafc28adc2803a01d821b6","glsl-optimizer/src/compiler/glsl/lower_int64.cpp":"d1ed41196880dd53c7b13e2782f9423f8442bf1d46186e8be92b1b66218a83ee","glsl-optimizer/src/compiler/glsl/lower_jumps.cpp":"34de7b493f281589fb0c2c0f6e885d0a0fabbe7a4e97a73de374dd714777a58c","glsl-optimizer/src/compiler/glsl/lower_mat_op_to_vec.cpp":"dff7a308edc4846c348ed4225c6699a9c75abac68d88f41f85954276552779f4","glsl-optimizer/src/compiler/glsl/lower_named_interface_blocks.cpp":"16063ac127bff75a68272070ab11c21c25101edbff62b4c68f4983b4cd941af0","glsl-optimizer/src/compiler/glsl/lower_offset_array.cpp":"3b00773399135aea85746a5a68b96ef000bc6841be1a2c8e6f25c516628b0949","glsl-optimizer/src/compiler/glsl/lower_output_reads.cpp":"a0fc9975d5aa1617e21fc6c353659a9802da9e83779a3eef4ec584f74b4dadc5","glsl-optimizer/src/compiler/glsl/lower_packed_varyings.cpp":"7550099d4ae123d71541c2fc88bc04fbfe9271ec75d7e210987d1c8cac3cf3ea","glsl-optimizer/src/compiler/glsl/lower_packing_builtins.cpp":"79a13d161fe505a410ab948d92769395708693ec888153630fa240e5b97e356f","glsl-optimizer/src/compiler/glsl/lower_precision.cpp":"f82a185b879872b977a1787d8061b9a80bc4cf8db1b970db6efba2ad9cc20fa2","glsl-optimizer/src/compiler/glsl/lower_shared_reference.cpp":"ea2dccf50a83bc19391bf6b7ab6aa53c0005f427af4066d25140340af9a4beef","glsl-optimizer/src/compiler/glsl/lower_subroutine.cpp":"f69fa53650eeb6f2944fce4d36a6e0a423e6705f3a3bd3389c7fadb83cfc8802","glsl-optimizer/src/compiler/glsl/lower_tess_level.cpp":"b196c9d424c0569f3e85d75c2d125af21566cb113d69036db87c0990703e0fa7","glsl-optimizer/src/compiler/glsl/lower_texture_projection.cpp":"4d247f244272adc8250fd888d8d932a140dd5de4d1efc7a58492c3c2b8291527","glsl-optimizer/src/compiler/glsl/lower_ubo_reference.cpp":"89bdbc6c1669230c644c0857db1ce2781ec61d349ecd08c7914146e1f4750a4a","glsl-optimizer/src/compiler/glsl/lower_variable_index_to_cond_assign.cpp":"fce930f29ac9405b297d1f749d68f59506b89c70b4ee1b1ab8cf49a34cc71ecf","glsl-optimizer/src/compiler/glsl/lower_vec_index_to_cond_assign.cpp":"3c67d851a11a55fad1c49a550f3a0cfe50892d33a3f238ce266cd829eba510a8","glsl-optimizer/src/compiler/glsl/lower_vec_index_to_swizzle.cpp":"f5ec666b73e1415cbab32519a53605ed385f3b03e889560373dbce69dda5000e","glsl-optimizer/src/compiler/glsl/lower_vector.cpp":"f7c13f5572ebe09b6a71553133b2cf003cd4b77b9657600672ee3b21bf890725","glsl-optimizer/src/compiler/glsl/lower_vector_derefs.cpp":"b05793da6dd620a531b43df5af8b2ecbc37b9db0c88910f5724ea10bcd057e19","glsl-optimizer/src/compiler/glsl/lower_vector_insert.cpp":"fee772ec17eea5e86a529bf9c5fa2ee0d29a5982bb75ebc6d68ed36cd19aa299","glsl-optimizer/src/compiler/glsl/lower_vertex_id.cpp":"690e8715182e03fead5cc5a35251fb4f41b357e4c71a1dfbc4bd7be19862b56d","glsl-optimizer/src/compiler/glsl/lower_xfb_varying.cpp":"58c0e8b270e4bbde54250be03cdb2f36966bcafb785372ad2e2b786835df7f9f","glsl-optimizer/src/compiler/glsl/main.cpp":"ae5e88abbbc8a12f769e1296bad938b9d7398cc6da0d3d0caeceeeb876536850","glsl-optimizer/src/compiler/glsl/opt_add_neg_to_sub.h":"f5054944bfd068810629080d0ea11df78b3f57a8f86df75e13ca50157ad1964d","glsl-optimizer/src/compiler/glsl/opt_algebraic.cpp":"25f45b20e1972ee8c789177a1aeda6e4286c25db2eae3a43ff83029ae64969c0","glsl-optimizer/src/compiler/glsl/opt_array_splitting.cpp":"19d3ce0e815438f4df9ab2890e767b03a4f3f191b53bb30c0217cf2ae6a95430","glsl-optimizer/src/compiler/glsl/opt_conditional_discard.cpp":"0e44e0e126711a3725c1f3a2aa65ff03c381fed08680ffc30101aae60f716c4e","glsl-optimizer/src/compiler/glsl/opt_constant_folding.cpp":"a088d04d9b45f9e55e235835648f614c89b7803c03a6d4f6a6d1a6bc1f0228bd","glsl-optimizer/src/compiler/glsl/opt_constant_propagation.cpp":"8a9440d77ecd6dcf13e683cbb99943aab6311c8fd4b5f6a9189a8d4f270746f4","glsl-optimizer/src/compiler/glsl/opt_constant_variable.cpp":"63d3ccd4dd09f19c9cf1a2f51592111bed41284504f29f3c0de4cadebc439a37","glsl-optimizer/src/compiler/glsl/opt_copy_propagation_elements.cpp":"ffa0f50863995e0d2e31f55a52e82319edc71e520987bebd7f7e561ea331c64b","glsl-optimizer/src/compiler/glsl/opt_dead_builtin_variables.cpp":"84e8747b948232f01dd56b428b9315f96f9511f605f240119fc446fae28981a9","glsl-optimizer/src/compiler/glsl/opt_dead_builtin_varyings.cpp":"761523e88f5b3ba785170f4d7205e94fa99acb7e74d29efbe40e1c010e1dbdb3","glsl-optimizer/src/compiler/glsl/opt_dead_code.cpp":"fd1ba2da7337d4e5dad17f5c2d73d9cc8880305f423e85d64cf94553588fa401","glsl-optimizer/src/compiler/glsl/opt_dead_code_local.cpp":"969a598b4df322baf222258a66cd64a326ea20e5b3125be9d8d1771f522c69e0","glsl-optimizer/src/compiler/glsl/opt_dead_functions.cpp":"774cae6536d02edf26e996a2a895e1f62d5098f16dc96b44798b4fc731a9a95f","glsl-optimizer/src/compiler/glsl/opt_flatten_nested_if_blocks.cpp":"3696a5c55f02e20056e085bc2714f73ac992f221b6f3387d655068e86b512046","glsl-optimizer/src/compiler/glsl/opt_flip_matrices.cpp":"44f0fe05b49329667671f88c96dc86ab3fe1459ff7b87f2b2d88de2d49829f9f","glsl-optimizer/src/compiler/glsl/opt_function_inlining.cpp":"fb56a33c90419a01676b57cbd91d0674a54cca40e6defaacc88dd33facebc131","glsl-optimizer/src/compiler/glsl/opt_if_simplification.cpp":"ac406eb35e379c357641d6c5749f50c65961455924d3dc884e2b90046fa92c5c","glsl-optimizer/src/compiler/glsl/opt_minmax.cpp":"8abd59d3b14ef60ff14a9c69660e6945f5cf10b97edb4afebe56be3f81d96316","glsl-optimizer/src/compiler/glsl/opt_rebalance_tree.cpp":"8bb6329dc0f299042368fc81934c2df019b45ab9f7aa0415d4e57b8d1ff98c9f","glsl-optimizer/src/compiler/glsl/opt_redundant_jumps.cpp":"222c73e2ac7a938ebb6428cc6c780c908ff6156d8ff935b04fed93a48fc10496","glsl-optimizer/src/compiler/glsl/opt_structure_splitting.cpp":"2edc79cc13f3177934e0443ad62f5976a1991f01f86ea303a803434849b13a47","glsl-optimizer/src/compiler/glsl/opt_swizzle.cpp":"015d0abddfe507f67c4b96c82988d861d018ededf7bf055e2bcbe9ea92da694e","glsl-optimizer/src/compiler/glsl/opt_tree_grafting.cpp":"46d28ac983ea244a4315bdc0e8892979ec4d1f9b9a96ac8a8a08006d9bc5e878","glsl-optimizer/src/compiler/glsl/opt_vectorize.cpp":"d80ee43bb97d9f016fb9c5e1e06f5b2afa569811f368ba067be794ec11d085fb","glsl-optimizer/src/compiler/glsl/program.h":"2982447e2abd35371e273ad87951722782a8b21c08294f67c39d987da1e1c55f","glsl-optimizer/src/compiler/glsl/propagate_invariance.cpp":"080943e21baa32494723a2eefb185915d2daae1f46d6df420145c5ad6857e119","glsl-optimizer/src/compiler/glsl/s_expression.cpp":"1ced972bc6ecc8eab4116ea71fb0212ab9ae5bcc0be3b47aa5d9d903566b3af1","glsl-optimizer/src/compiler/glsl/s_expression.h":"65b847e30e22a809b57d0bc70243049c99d9c6318803c5b8d0826aba55dc217e","glsl-optimizer/src/compiler/glsl/serialize.cpp":"be0eb4251348a9d921acb839a5c48c6023a2e9d116d602bb0432787ab623655d","glsl-optimizer/src/compiler/glsl/serialize.h":"57425732eba1233d928e5f07f88b623ce65af46b3bb034bf147f0a4b7f94f9a1","glsl-optimizer/src/compiler/glsl/shader_cache.cpp":"e0c5c433f2df3fccdf1d61281bfcb0ee5633433339b97c697d64db99611cbaaf","glsl-optimizer/src/compiler/glsl/shader_cache.h":"9217164d8d7f54aca0fe5922c7187095a6ae0cb703b196b79805aeef07a7e697","glsl-optimizer/src/compiler/glsl/standalone.cpp":"8e6c416a14d631261917a5fe4cc91880c287b22b2dfd70eb22028289a8fa5364","glsl-optimizer/src/compiler/glsl/standalone.h":"a7c397d1dfdd1e7fb2cfe99db35cd9df93251e642059208533202b7f20497f83","glsl-optimizer/src/compiler/glsl/standalone_scaffolding.cpp":"970d14b7a9d58e5270321f97bf5d57795558b1c570a56678e04a65b26c60bf4f","glsl-optimizer/src/compiler/glsl/standalone_scaffolding.h":"d921a617ea82b9e49413314492a645c44356de503581b1be3f1b57de236e480d","glsl-optimizer/src/compiler/glsl/string_to_uint_map.cpp":"d824bf5b839bd39498dc9e457103cdbe3e5289ddf7564107c27b1505948dd31f","glsl-optimizer/src/compiler/glsl/string_to_uint_map.h":"e2f18e66359c9d620e085de7f4a334a47df9c66e65a5bfe8b734c627bec04104","glsl-optimizer/src/compiler/glsl/test_optpass.h":"b27b8f35f5387e7ce4982bb51c7b63ccf14f91757f3108a5d02ed006925bb8a0","glsl-optimizer/src/compiler/glsl/xxd.py":"376484142f27f45090ea8203ae2621abf73f06175cb0ee8d96f44a3b9327f4bd","glsl-optimizer/src/compiler/glsl_types.cpp":"044bb6754f45419a3151e7a25c39202a82009ae3c6bc54ff7f0bb4258a5deefe","glsl-optimizer/src/compiler/glsl_types.h":"fd899a42f34ddeb8601bc3cd6c5e3aed82fc8aef4042dde1b39b3c01e1dcc219","glsl-optimizer/src/compiler/shader_enums.c":"436bff5216b11b0980bdfada5885fc6ac9afa2037a3027fcd6eea2a8635597ac","glsl-optimizer/src/compiler/shader_enums.h":"13220442a5c02e83540cf2c0ad4f8417b2fbda5f2586dec4e92082544c937cdd","glsl-optimizer/src/compiler/shader_info.h":"4c5453e81197ca83593ee4f365074b23530f2ab21c78e1733b63dec6f344c12a","glsl-optimizer/src/gallium/auxiliary/util/u_half.h":"3c2b37bda3ccb64387e44b723d29cf9046decab1a893bf42d842e9603398bdee","glsl-optimizer/src/gallium/include/pipe/p_compiler.h":"c75620096ce8523dae90599e50aa2ef6468d3b0e368a77795edeb20dd1abfc0c","glsl-optimizer/src/gallium/include/pipe/p_config.h":"a27692fc35f9e55df3224b7529e66b3001e911e94e6bc5f8f569e493e1ee3fb7","glsl-optimizer/src/gallium/include/pipe/p_defines.h":"be26d68c0acc67c5e44788c6299716a9eee415fd81d7d747e3738a829e3b6b38","glsl-optimizer/src/gallium/include/pipe/p_format.h":"5674215fc41d27496f037cf837717daefbf23ebb38d40ace7c0c414bc08182b0","glsl-optimizer/src/gallium/include/pipe/p_state.h":"d600593aba5f5a17072a6c38f6baa81e01c7994b0174250f7e433bb41684b702","glsl-optimizer/src/mapi/glapi/glapi.h":"73632a625c0ddabc401205e8b5a81eb8af8506868efe4b170d7979ec3619e9c5","glsl-optimizer/src/mesa/main/config.h":"5800259373099e5405de2eb52619f9de242552a479902a3a642a333c8cb3c1e7","glsl-optimizer/src/mesa/main/context.c":"2f3208473d99c94f734b1137ba91889d4a1babb9e7534bf1dc85d851ee98274e","glsl-optimizer/src/mesa/main/context.h":"cc7e4194797db9d007f01884e23d786c453b3860821f7f2ddcdf0f1bf3f8ffb1","glsl-optimizer/src/mesa/main/dd.h":"6a964acd06b6c2d88700e69fb75fe3c6b3b3d45bbc41db24f3f897a29695fe0c","glsl-optimizer/src/mesa/main/debug_output.h":"7312422e90b8c0e34028ac27280e438139b5cba525c99deb3ac883cd3d87e452","glsl-optimizer/src/mesa/main/draw.h":"7eaef3a9e27a60ea6f7937109bf3a6190b831162fde0479abb12077ce27c353d","glsl-optimizer/src/mesa/main/enums.h":"87d562a6764f51c014a2274fa7c3aca17c04441537ddd56b2554f13c6fffea92","glsl-optimizer/src/mesa/main/errors.h":"c79444b5df289c90fbb22a33b2d0c23917d9fc4510960088f0b79e53bb56b1b2","glsl-optimizer/src/mesa/main/extensions.h":"a38b2f87cc93c513994281350d69e06c84ff8eded5313ec0a1be33f375e0ebbd","glsl-optimizer/src/mesa/main/extensions_table.c":"17642d1a8c9a0bf2bd61060052d33ff14a005d2b962e6cf91465797a50851e85","glsl-optimizer/src/mesa/main/extensions_table.h":"2c879571c238d2e14461031ac740372fd0f9ac3a34c0d5541bb9b7ed4c0376c8","glsl-optimizer/src/mesa/main/formats.h":"02e2f7ec3e39286cf9f27e2641043e6df8ecb1dfde9e643313210e214af2a929","glsl-optimizer/src/mesa/main/glheader.h":"58217b33eead6aa6b23cd4a291cefeaa6cb84e465f4960daffca97c44d6d1c35","glsl-optimizer/src/mesa/main/glthread.h":"51fb2711f77e7eafcfc52d29d5b844978832b24c930d88accd48d143a6eb9c6f","glsl-optimizer/src/mesa/main/hash.h":"7e7f782034c16a8e693de48e00c31d4a90b0129f4029fd074033d7d16ccbe718","glsl-optimizer/src/mesa/main/macros.h":"73d15ddfd64f2b57b9b2ffeeb993b9c2c0899a80563e9d6ff337b11ccbe6eee5","glsl-optimizer/src/mesa/main/menums.h":"5dfac0e2279d60b0cd0c7b9fc2a5021620d0f6282ed2e738c420214e3af152d3","glsl-optimizer/src/mesa/main/mesa_private.h":"edda678b93438944279a551f663b8858ad84814a9fc88ba9672ef195599c24ae","glsl-optimizer/src/mesa/main/mtypes.h":"6efddefa099e4d2e3fdd97f0055644f47aba21711385edfeabc2d9b0676f2eec","glsl-optimizer/src/mesa/main/shaderobj.h":"9f0dfe96d0c2154201adef942bd36053533ac7b2492fb3786acda5bea514c75e","glsl-optimizer/src/mesa/main/uniforms.h":"4e331e6ad6e9cbded978b4082dbe0a57c1f8f01327446bb6892bfc179976c38b","glsl-optimizer/src/mesa/main/version.h":"9d0a13a758099302dc55cf7d045791834a89b0f9d4cf17b2692259b369a8a9a1","glsl-optimizer/src/mesa/math/m_matrix.h":"a37b19f182e070db3df93b0ede43c22fb8be8c2906504133ee6dbd7db1185d8b","glsl-optimizer/src/mesa/program/dummy_errors.c":"1820e305515b4c5e041f5e1623266a48ec8f076a155310be7d60637101f593e4","glsl-optimizer/src/mesa/program/ir_to_mesa.h":"b47f58d22e3ca2ae42d52501ea769d15c4476834944fa97eeccd3a3439211d00","glsl-optimizer/src/mesa/program/prog_instruction.h":"ab3832152a7e144b59e5a2264b2c29db56d93be31e76bbd958527a56771b40eb","glsl-optimizer/src/mesa/program/prog_parameter.h":"ba18c743284eadbc837c2c364c73e5d372321a7637a76e589d8d39fe8b5de225","glsl-optimizer/src/mesa/program/prog_statevars.h":"fc413698f84bc52d45fdeae0471934ee9904bfb7eac1a2b5f70446e54bcbbdca","glsl-optimizer/src/mesa/program/program.h":"1f01026a4eff440a3f122fd9b519d03546fe7f7d8be60dca834e95a2f8fbbfd2","glsl-optimizer/src/mesa/program/symbol_table.c":"6611cb9f078035bf5ff8c9112093a6c7d99f8af99a3931d0c07f227cc72283ea","glsl-optimizer/src/mesa/program/symbol_table.h":"631dc35ac48d5e87962d45507461920f6575610960ffcc42a08cefeb43300cda","glsl-optimizer/src/mesa/vbo/vbo.h":"6eb1dcd9a08c92f276c5fe08da184ff9d455d1be421913b8ad732a7b65e858fb","glsl-optimizer/src/util/bitscan.h":"d4fcb47b57a50d70cb97f99ca3e619bc06282a877768a435e009775ce8d77f36","glsl-optimizer/src/util/bitset.h":"c40f78515c6230fed18345c6751ce33833a49da7a27901c7e6d7340cbdcbc5e7","glsl-optimizer/src/util/blob.c":"8f729846f66efc9c15485cc5fc24c6ec861fc1fecb2f652573f2a237d481b791","glsl-optimizer/src/util/blob.h":"93e1eaac866b9a7cd6fc03b533c18fb2edf0e97f03395eff4f3a605c4fc14d0c","glsl-optimizer/src/util/compiler.h":"79e3bf40a5bab704e6c949f23a1352759607bb57d80e5d8df2ef159755f10b68","glsl-optimizer/src/util/crc32.c":"2f3467a046b3a76784ecb9aa55d527698c8607fd0b12c622f6691aaa77b58505","glsl-optimizer/src/util/crc32.h":"59bd81865e51042b73a86f8fb117c312418df095fed2d828c5c1d1c8b6fc6cd4","glsl-optimizer/src/util/debug.c":"c3d68e9752ccc19e66c669562cd113cf1d0ac83cbb30174789e7fb8d1df58f9c","glsl-optimizer/src/util/debug.h":"50068d745c4199ccbd33d68dd4c8a36d2b5179c7869a21e75906ddd0718ca456","glsl-optimizer/src/util/detect_os.h":"343a8790d17a3710c6dd015ee367f84e3902ff3f2e36faca2bf93f9d725d3574","glsl-optimizer/src/util/disk_cache.c":"f533937e5a4fffe76e2739ef4b6b1e1da097d96d63eb808e68ebbc7027641c23","glsl-optimizer/src/util/disk_cache.h":"e83314fb14134a8e079b15e470a6376ba5a8253701f048c890a62b7e55d64bc8","glsl-optimizer/src/util/fast_urem_by_const.h":"e108fce804616c47d071dfe4a04163eec1126e448ed1aa89abb6b3a6d772bd5b","glsl-optimizer/src/util/fnv1a.h":"ab2596f19c6adf431ae27618f62c5743e24ad23ef83bb359a4c4c218245ab459","glsl-optimizer/src/util/format/u_format.h":"4cdfc0c59cbc99a092e5ec5a396910f2d93b9643e5d8141050b011e66f11e45b","glsl-optimizer/src/util/futex.h":"26f7c9d86e9ffef4c0fa2761f1aaa1918337302e20bd6ca10e61dc3c47356deb","glsl-optimizer/src/util/half_float.c":"11bc2584493d5d9d46e8c8a619a0307cf150bf5ab5d0f96bb764b061dc37a00e","glsl-optimizer/src/util/half_float.h":"7f7c380f126da1400a91758cc0392f24bf967bce1672890b62be26fe9fbd922b","glsl-optimizer/src/util/hash_table.c":"0ca40352e35dedab0a84c64c903f1b16d47e950bb5f43b4d22bb57d499bfea6e","glsl-optimizer/src/util/hash_table.h":"217191bb360592e2232f187473c10287d2cda8ae6fa5c53d0ef74c8c206118b4","glsl-optimizer/src/util/list.h":"9fab03c6a78186bb5f173269f825f6ce976b409d931852e3d93bac632e07989a","glsl-optimizer/src/util/macros.h":"63faf65b51058c483b17f1f77da51d1c53c8beab52678cb6bd01f1228a63b6b0","glsl-optimizer/src/util/mesa-sha1.c":"00c692ec353ebc02c06c57c5a71de0ab7a119f86a4146f452e65ec87e4944417","glsl-optimizer/src/util/mesa-sha1.h":"bff4c29f4bf7cdbcefb30fa0c996a7604a380eba8976467c2a60e7cd328f7e26","glsl-optimizer/src/util/mesa-sha1_test.c":"25da89a59d51469f77b4c468ca23ffdce0a7a1166a70b6cc23026a6800b0143c","glsl-optimizer/src/util/os_memory.h":"64555faf1760ae6954f42c83727c38dfc4c278e9152115779ffaad58b42adacf","glsl-optimizer/src/util/os_memory_aligned.h":"12d86fa94be38c13f7eeebdf313795e1267dd5a7187d2f0072e0e896f41702f6","glsl-optimizer/src/util/os_memory_stdc.h":"07360363b88c927065e10df71bebf6c8a0cc3b9167c9dfce55f2d65f11e6f787","glsl-optimizer/src/util/os_misc.c":"a9936e613ec84803abd59ad47c192c8e3939993c950ac91973fdc4cec1801bb8","glsl-optimizer/src/util/os_misc.h":"cc68eb12e05b5e749c54298cb4a6f4cd20cc5af7db3403e70b3c27b56090c740","glsl-optimizer/src/util/os_time.h":"73e775f7335244ff5964c678c27eedf1aea6abea44c4169d327ea8c7ce4a3a88","glsl-optimizer/src/util/ralloc.c":"4b51189595ef67bcef52c40cbf654d969041dbd15e15d4a893ad494ac060aeca","glsl-optimizer/src/util/ralloc.h":"e573c45875ff1530f0dbee9a93ae55535fdac8d5cc88a79ebc327c688824bde5","glsl-optimizer/src/util/rounding.h":"0450722353caf83de07e67f335949dbe95fe53b534052d4ee9d28d2781387614","glsl-optimizer/src/util/set.c":"86f8c9a830bead5a5a79bc970b0ff97809312af07b3beb39ef9d90af04d40a1b","glsl-optimizer/src/util/set.h":"3e39ca161e7ed4ec7c436cc9c7919ed9a55ed1b71edbf2caf6f9bcfd9bc578ed","glsl-optimizer/src/util/sha1/README":"00af7419af05247081858acb2902efd99fcda2ce16e331079f701645bb3729c0","glsl-optimizer/src/util/sha1/sha1.c":"1403bbe0aad42ba3e6be7e09f7cad87a6a8c4ad5b63962f7b92b9f37d8133b04","glsl-optimizer/src/util/sha1/sha1.h":"68d9f240eab2918026ecdf22be36811abbd4f1389f6c36e31258041aeaedd247","glsl-optimizer/src/util/simple_mtx.h":"12c6c3c4b7db9168bc656d5b3c65912075084d2b388c415d5c3d3f5953a9d6c7","glsl-optimizer/src/util/softfloat.c":"a97e51a96fe5e6a052c02aa6bbec683fe73fb88a8c087d9c930503e2120d8a2e","glsl-optimizer/src/util/softfloat.h":"66664b0250e83bf5dd4cc743acd119d076efcea624a0eab3d6b60718e6ee8811","glsl-optimizer/src/util/string_buffer.c":"63a1d1b1e34926c88ea00159cafbcd56568b805c4f64d1e8c97169fe313921fc","glsl-optimizer/src/util/string_buffer.h":"7b88d1b1d9c6cfb8e93331813535c127289437c75f822029e9a3bca8ea6b52ee","glsl-optimizer/src/util/strndup.h":"0273c4fdb7482cd7746881a63d3998648c6d63415ba85af1d1860f0e0dc504c6","glsl-optimizer/src/util/strtod.c":"5cf610d8a37373cf37cfb7aae903525d943b2674b1f32594c70b0eb19a8c9697","glsl-optimizer/src/util/strtod.h":"237396def4e264d35ed4bedea00ef9a4ceab6d7a11a18c770d9747d22c69ed2d","glsl-optimizer/src/util/u_atomic.h":"c02e809526c6c09ba8fe51f50b2490d1b6c8e5c7f3c4031ae958250d098fc3bb","glsl-optimizer/src/util/u_debug.c":"8c060e379b816618f3dd22c9ea523c68b9425c76c36a7dfe5d6d375b337f5f4a","glsl-optimizer/src/util/u_debug.h":"e11e26edd9b9e4e6f8e6a435e69f4d9edda27e9a379f68f4c82ea2525aaaea68","glsl-optimizer/src/util/u_dynarray.h":"853d0fa6ff2261614488be624deb8a2b01e57c2c8eabc28578cbeed4ccc95694","glsl-optimizer/src/util/u_endian.h":"3ccea7e529740318d8a4b05c00db3adc9d1e292a52bdc56a05c9fae99209720f","glsl-optimizer/src/util/u_math.c":"c868a8c0886dc78f1b06b13404ba8b253090449045774dd56893ac9d75795184","glsl-optimizer/src/util/u_math.h":"a04e32e126db016413f9de0a2028a3e71737137463b1289eae576f884b06fcf1","glsl-optimizer/src/util/u_memory.h":"c5db17c724c70283ddbe04165722f6988d4e0eb9aa3602ae472feff016649af9","glsl-optimizer/src/util/u_queue.h":"92930ce236c0528a98b695f5cea8c5c6aa9683beaf71a2227bdc5d33d1b21506","glsl-optimizer/src/util/u_string.h":"c5a2f4ef576d1547bda12c4ea219179fefa54414977743ac094abcaf696ef6ca","glsl-optimizer/src/util/u_thread.h":"00b708459b27f9910d18db92c18cc65cfc618ac2b3cd144e45f8640057b10d58","glsl-optimizer/src/util/xxhash.h":"2f2aff2fc6c0c929f52cf6ae7314122124c5be026d41ad1c357608383c4a37ad","src/bindings.rs":"79993db2058bde39f99ef483d02560d33b1cb882f6a552319e8b86eb6f9021e1","src/lib.rs":"04be1554cd829eb40864b06d80b491dd48117a4e3a601c7d482117f7a0391e67","wrapper.hpp":"f3ea34cc496f7d90b9bfcada3250b37b314c3524dac693b2ece9517bc7d274ac"},"package":"ee5be629003d587bab188f3e2e3b010aa2cde7c41ec967b3a244f388d4d81877"} \ No newline at end of file
diff --git a/third_party/rust/glslopt/Cargo.toml b/third_party/rust/glslopt/Cargo.toml
index 2ecdbb976a..206ad8f149 100644
--- a/third_party/rust/glslopt/Cargo.toml
+++ b/third_party/rust/glslopt/Cargo.toml
@@ -3,21 +3,28 @@
# When uploading crates to the registry Cargo will automatically
# "normalize" Cargo.toml files for maximal compatibility
# with all versions of Cargo and also rewrite `path` dependencies
-# to registry (e.g., crates.io) dependencies
+# to registry (e.g., crates.io) dependencies.
#
-# If you believe there's an error in this file please file an
-# issue against the rust-lang/cargo repository. If you're
-# editing this file be aware that the upstream Cargo.toml
-# will likely look very different (and much more reasonable)
+# If you are reading this file be aware that the original Cargo.toml
+# will likely look very different (and much more reasonable).
+# See Cargo.toml.orig for the original contents.
[package]
edition = "2018"
name = "glslopt"
-version = "0.1.9"
+version = "0.1.10"
authors = ["Jamie Nicol <jnicol@mozilla.com>"]
description = "Optimizes GLSL shader code"
-keywords = ["opengl", "gl", "gles", "glsl", "shader"]
+readme = "README.md"
+keywords = [
+ "opengl",
+ "gl",
+ "gles",
+ "glsl",
+ "shader",
+]
license = "MIT"
repository = "https://github.com/jamienicol/glslopt-rs"
+
[build-dependencies.cc]
version = "1.0"
diff --git a/third_party/rust/glslopt/glsl-optimizer/include/c99_alloca.h b/third_party/rust/glslopt/glsl-optimizer/include/c99_alloca.h
deleted file mode 100644
index 5a3b8c19ab..0000000000
--- a/third_party/rust/glslopt/glsl-optimizer/include/c99_alloca.h
+++ /dev/null
@@ -1,49 +0,0 @@
-/**************************************************************************
- *
- * Copyright 2015 VMware, Inc.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
- * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
- * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
- * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
- * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
- * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- **************************************************************************/
-
-#ifndef _C99_ALLOCA_H_
-#define _C99_ALLOCA_H_
-
-
-#if defined(_MSC_VER)
-
-# include <malloc.h>
-
-# define alloca _alloca
-
-#elif defined(__sun) || defined(__CYGWIN__)
-
-# include <alloca.h>
-
-#else /* !defined(_MSC_VER) */
-
-# include <stdlib.h>
-
-#endif /* !defined(_MSC_VER) */
-
-
-#endif
diff --git a/third_party/rust/glslopt/glsl-optimizer/include/c99_math.h b/third_party/rust/glslopt/glsl-optimizer/include/c99_math.h
deleted file mode 100644
index e906c26aa5..0000000000
--- a/third_party/rust/glslopt/glsl-optimizer/include/c99_math.h
+++ /dev/null
@@ -1,211 +0,0 @@
-/**************************************************************************
- *
- * Copyright 2007-2015 VMware, Inc.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
- * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
- * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
- * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
- * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
- * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- **************************************************************************/
-
-/**
- * Wrapper for math.h which makes sure we have definitions of all the c99
- * functions.
- */
-
-
-#ifndef _C99_MATH_H_
-#define _C99_MATH_H_
-
-#include <math.h>
-#include "c99_compat.h"
-
-
-/* This is to ensure that we get M_PI, etc. definitions */
-#if defined(_MSC_VER) && !defined(_USE_MATH_DEFINES)
-#error _USE_MATH_DEFINES define required when building with MSVC
-#endif
-
-
-#if !defined(_MSC_VER) && \
- __STDC_VERSION__ < 199901L && \
- (!defined(_XOPEN_SOURCE) || _XOPEN_SOURCE < 600) && \
- !defined(__cplusplus)
-
-static inline long int
-lrint(double d)
-{
- long int rounded = (long int)(d + 0.5);
-
- if (d - floor(d) == 0.5) {
- if (rounded % 2 != 0)
- rounded += (d > 0) ? -1 : 1;
- }
-
- return rounded;
-}
-
-static inline long int
-lrintf(float f)
-{
- long int rounded = (long int)(f + 0.5f);
-
- if (f - floorf(f) == 0.5f) {
- if (rounded % 2 != 0)
- rounded += (f > 0) ? -1 : 1;
- }
-
- return rounded;
-}
-
-static inline long long int
-llrint(double d)
-{
- long long int rounded = (long long int)(d + 0.5);
-
- if (d - floor(d) == 0.5) {
- if (rounded % 2 != 0)
- rounded += (d > 0) ? -1 : 1;
- }
-
- return rounded;
-}
-
-static inline long long int
-llrintf(float f)
-{
- long long int rounded = (long long int)(f + 0.5f);
-
- if (f - floorf(f) == 0.5f) {
- if (rounded % 2 != 0)
- rounded += (f > 0) ? -1 : 1;
- }
-
- return rounded;
-}
-
-static inline float
-exp2f(float f)
-{
- return powf(2.0f, f);
-}
-
-static inline double
-exp2(double d)
-{
- return pow(2.0, d);
-}
-
-#endif /* C99 */
-
-
-/*
- * signbit() is a macro on Linux. Not available on Windows.
- */
-#ifndef signbit
-#define signbit(x) ((x) < 0.0f)
-#endif
-
-
-#ifndef M_PI
-#define M_PI (3.14159265358979323846)
-#endif
-
-#ifndef M_E
-#define M_E (2.7182818284590452354)
-#endif
-
-#ifndef M_LOG2E
-#define M_LOG2E (1.4426950408889634074)
-#endif
-
-#ifndef FLT_MAX_EXP
-#define FLT_MAX_EXP 128
-#endif
-
-
-#if defined(fpclassify)
-/* ISO C99 says that fpclassify is a macro. Assume that any implementation
- * of fpclassify, whether it's in a C99 compiler or not, will be a macro.
- */
-#elif defined(__cplusplus)
-/* For C++, fpclassify() should be defined in <cmath> */
-#elif defined(_MSC_VER)
-/* Not required on VS2013 and above. Oddly, the fpclassify() function
- * doesn't exist in such a form on MSVC. This is an implementation using
- * slightly different lower-level Windows functions.
- */
-#include <float.h>
-
-static inline enum {FP_NAN, FP_INFINITE, FP_ZERO, FP_SUBNORMAL, FP_NORMAL}
-fpclassify(double x)
-{
- switch(_fpclass(x)) {
- case _FPCLASS_SNAN: /* signaling NaN */
- case _FPCLASS_QNAN: /* quiet NaN */
- return FP_NAN;
- case _FPCLASS_NINF: /* negative infinity */
- case _FPCLASS_PINF: /* positive infinity */
- return FP_INFINITE;
- case _FPCLASS_NN: /* negative normal */
- case _FPCLASS_PN: /* positive normal */
- return FP_NORMAL;
- case _FPCLASS_ND: /* negative denormalized */
- case _FPCLASS_PD: /* positive denormalized */
- return FP_SUBNORMAL;
- case _FPCLASS_NZ: /* negative zero */
- case _FPCLASS_PZ: /* positive zero */
- return FP_ZERO;
- default:
- /* Should never get here; but if we do, this will guarantee
- * that the pattern is not treated like a number.
- */
- return FP_NAN;
- }
-}
-#else
-#error "Need to include or define an fpclassify function"
-#endif
-
-
-/* Since C++11, the following functions are part of the std namespace. Their C
- * counteparts should still exist in the global namespace, however cmath
- * undefines those functions, which in glibc 2.23, are defined as macros rather
- * than functions as in glibc 2.22.
- */
-#if __cplusplus >= 201103L && (__GLIBC__ > 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ >= 23))
-#include <cmath>
-
-using std::fpclassify;
-using std::isfinite;
-using std::isinf;
-using std::isnan;
-using std::isnormal;
-using std::signbit;
-using std::isgreater;
-using std::isgreaterequal;
-using std::isless;
-using std::islessequal;
-using std::islessgreater;
-using std::isunordered;
-#endif
-
-
-#endif /* #define _C99_MATH_H_ */
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/lower_instructions.cpp b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/lower_instructions.cpp
index c549d16d2a..3bf6959e7f 100644
--- a/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/lower_instructions.cpp
+++ b/third_party/rust/glslopt/glsl-optimizer/src/compiler/glsl/lower_instructions.cpp
@@ -118,7 +118,7 @@
* Converts double trunc, ceil, floor, round to fract
*/
-#include "c99_math.h"
+#include <math.h>
#include "program/prog_instruction.h" /* for swizzle */
#include "compiler/glsl_types.h"
#include "ir.h"
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/util/rounding.h b/third_party/rust/glslopt/glsl-optimizer/src/util/rounding.h
index e329d43824..f897ec7969 100644
--- a/third_party/rust/glslopt/glsl-optimizer/src/util/rounding.h
+++ b/third_party/rust/glslopt/glsl-optimizer/src/util/rounding.h
@@ -24,9 +24,8 @@
#ifndef _ROUNDING_H
#define _ROUNDING_H
-#include "c99_math.h"
-
#include <limits.h>
+#include <math.h>
#include <stdint.h>
#if defined(__SSE__) || (defined(_M_IX86_FP) && (_M_IX86_FP >= 1)) || defined(_M_X64)
diff --git a/third_party/rust/glslopt/glsl-optimizer/src/util/u_math.h b/third_party/rust/glslopt/glsl-optimizer/src/util/u_math.h
index 59266c1692..42d9e348ec 100644
--- a/third_party/rust/glslopt/glsl-optimizer/src/util/u_math.h
+++ b/third_party/rust/glslopt/glsl-optimizer/src/util/u_math.h
@@ -38,10 +38,9 @@
#ifndef U_MATH_H
#define U_MATH_H
-
-#include "c99_math.h"
#include <assert.h>
#include <float.h>
+#include <math.h>
#include <stdarg.h>
#include "bitscan.h"
diff --git a/third_party/rust/litrs/.cargo-checksum.json b/third_party/rust/litrs/.cargo-checksum.json
new file mode 100644
index 0000000000..d12ccffda0
--- /dev/null
+++ b/third_party/rust/litrs/.cargo-checksum.json
@@ -0,0 +1 @@
+{"files":{"CHANGELOG.md":"03cea7c394dd09087f6b2c7ba4b4641b5c2c50b32b7286cabd5be4850f62f170","Cargo.toml":"6ef884164a0139f0591a381ada2c99d850d38e5f3af3451efa12f808f8a799e0","LICENSE-APACHE":"62c7a1e35f56406896d7aa7ca52d0cc0d272ac022b5d2796e7d6905db8a3636a","LICENSE-MIT":"7dc1552e88f49132cb358b1b962fc5e79fa42d70bcbb88c526d33e45b8e98036","README.md":"533d31adf3b4258b838cd6a1cdb58139e2cf761c3c38aa4654f66f34335c9073","src/bool/mod.rs":"53c6eedfd94552689e51233fffb8a99ce9321a32db0f08de8b18d48cda9b1877","src/bool/tests.rs":"a0e6d034036aa04aac6b847bb561bdba759d85c78d4cbb7fb93f4422efb83656","src/byte/mod.rs":"ff2a3e6108a9b32ae0d925ec34735d20194d5c6b27af060516a46d21397c75be","src/byte/tests.rs":"ac36dace42cd151ac9d26cc35701bc8b65f8f1ed6ee1cfef4eeb6caa9dd702bc","src/bytestr/mod.rs":"8fd951374f7edc2077465cd4f97001eece46358f2bb0c45fddb2942aac6ee13b","src/bytestr/tests.rs":"194b28f157196260b1c2a612dfb36fb1dace491db2ed2bbb39227771ed6baf60","src/char/mod.rs":"2bb6f25da83670f18ec40f8a38565aa2294a4cdf81c8bbaf081531a32b6c6d0c","src/char/tests.rs":"9de497c8c7d7a139ff81f3d7bf8b5c682316d983bebb58c58d2af97f4cd26c35","src/err.rs":"54d000c4f37258c6886dd5b7069e2f5282e51aec3731feb77935582ae8c18908","src/escape.rs":"a944e95344df54c16bf4cc6a8fb01a81e2eac2aacd4758b938d3339212fce60c","src/float/mod.rs":"defaf83526acdc8f9b34c7d1ac17d866a93409dc392eb608160778d6bb4a1e25","src/float/tests.rs":"5875403f1a72104973ed83d0cf29d766e7b2fa5c23615c85a5f2eeed02b115c9","src/impls.rs":"c5dd37dd3ecd29c40a0ed243b907765a27729a1b1f73fa2c6762105feb6527bc","src/integer/mod.rs":"2b9109ddd34faf76fc9ce9dfb04bcc6aed4834231c74bd8a774bd256cc57c18a","src/integer/tests.rs":"01147ce9b6742bb1614cf863090699c54bf660b9f2c6a5eb529d67ae92230c0d","src/lib.rs":"2e79c8035d0fb77db9414b5569eeef13b6db8cde48ef2a45ffcf5f2492d02a4a","src/parse.rs":"e1fa4a76331d52f711e1b06cdba853a4f815281366f4f4f68b4c0a109f8a1734","src/string/mod.rs":"52a9cda38f7cd5b025bc5ec7edb8106487ba3d141789f5bc239c4561490cdc29","src/string/tests.rs":"1e0150ddd921a74ed5ebf6216708132d7768f3beb11a8c7bbfcf4ba01db40a5b","src/test_util.rs":"3badda83d7f256bb25b840820bc0d3a6523b4ded913555cbea5533b6ccad5654","src/tests.rs":"9f0dc2fe7a0eefb6575acd824767bb7d837a584dc7999ef59a457255a2cd7f3d"},"package":"b4ce301924b7887e9d637144fdade93f9dfff9b60981d4ac161db09720d39aa5"} \ No newline at end of file
diff --git a/third_party/rust/litrs/CHANGELOG.md b/third_party/rust/litrs/CHANGELOG.md
new file mode 100644
index 0000000000..e2927c2964
--- /dev/null
+++ b/third_party/rust/litrs/CHANGELOG.md
@@ -0,0 +1,103 @@
+# Changelog
+
+All notable changes to this project will be documented in this file.
+
+
+## [Unreleased]
+
+## [0.4.1] - 2023-10-18
+- Fixed incorrectly labeling `27f32` a float literals in docs.
+- Added hint to integer literal docs about parsing as `u128`.
+
+## [0.4.0] - 2023-03-05
+### Added
+- Add ability to parse literals with arbitrary suffixes (e.g. `"foo"bla` or `23px`)
+- Add `suffix()` method to all literal types except `BoolLit`
+- Add `IntegerBase::value`
+- Add `from_suffix` and `suffix` methods to `FloatType` and `IntegerType`
+- Add `FromStr` and `Display` impls to `FloatType` and `IntegerType`
+
+### Changed
+- **Breaking**: Mark `FloatType` and `IntegerType` as `#[non_exhaustive]`
+- **Breaking**: Fix integer parsing for cases like `27f32`. `Literal::parse`
+ and `IntegerLit::parse` will both identify this as an integer literal.
+- **Breaking**: Fix float parsing by correctly rejecting inputs like `27f32`. A
+ float literal must have a period OR an exponent part, according to the spec.
+ Previously decimal integers were accepted in `FloatLit::parse`.
+- Improved some parts of the docs
+
+### Removed
+- **Breaking**: Remove `OwnedLiteral` and `SharedLiteral`
+
+## [0.3.0] - 2022-12-19
+### Breaking
+- Bump MSRV (minimal supported Rust version) to 1.54
+
+### Added
+- Add `raw_input` and `into_raw_input` to non-bool `*Lit` types
+- Add `impl From<*Lit> for pm::Literal` (for non-bool literals)
+- Add `impl From<BoolLit> for pm::Ident`
+
+### Fixed
+- Fix link to reference and clarify bool literals ([#7](https://github.com/LukasKalbertodt/litrs/pull/7))
+
+### Internals
+- Move lots of parsing code into non-generic functions (this hopefully reduces compile times)
+- To implement `[into_]raw_input` for integer and float literals, their
+ internals were changed a bit so that they store the full input string now.
+
+## [0.2.3] - 2021-06-09
+### Changed
+- Minor internal code change to bring MSRV from 1.52 to 1.42
+
+## [0.2.2] - 2021-06-09
+### Changed
+- Fixed (byte) string literal parsing by:
+ - Correctly handling "string continue" sequences
+ - Correctly converting `\n\r` into `\n`
+
+## [0.2.1] - 2021-06-04
+### Changed
+- Fixed the `expected` value of the error returned from `TryFrom<TokenTree>` impls in some cases
+
+## [0.2.0] - 2021-05-28
+### Changed
+- **Breaking**: rename `Error` to `ParseError`. That describes its purpose more
+ closely and is particular useful now that other error types exist in the library.
+
+### Removed
+- **Breaking**: remove `proc-macro` feature and instead offer the corresponding
+ `impl`s unconditionally. Since the feature didn't enable/disable a
+ dependency (`proc-macro` is a compiler provided crate) and since apparently
+ it works fine in `no_std` environments, I dropped this feature. I don't
+ currently see a reason why the corresponding impls should be conditional.
+
+### Added
+- `TryFrom<TokenTree> for litrs::Literal` impls
+- `From<*Lit> for litrs::Literal` impls
+- `TryFrom<proc_macro[2]::Literal> for *Lit`
+- `TryFrom<TokenTree> for *Lit`
+- `InvalidToken` error type for all new `TryFrom` impls
+
+
+## [0.1.1] - 2021-05-25
+### Added
+- `From` impls to create a `Literal` from references to proc-macro literal types:
+ - `From<&proc_macro::Literal>`
+ - `From<&proc_macro2::Literal>`
+- Better examples in README and repository
+
+## 0.1.0 - 2021-05-24
+### Added
+- Everything
+
+
+[Unreleased]: https://github.com/LukasKalbertodt/litrs/compare/v0.4.1...HEAD
+[0.4.1]: https://github.com/LukasKalbertodt/litrs/compare/v0.4.0...v0.4.1
+[0.4.0]: https://github.com/LukasKalbertodt/litrs/compare/v0.3.0...v0.4.0
+[0.3.0]: https://github.com/LukasKalbertodt/litrs/compare/v0.2.3...v0.3.0
+[0.2.3]: https://github.com/LukasKalbertodt/litrs/compare/v0.2.2...v0.2.3
+[0.2.2]: https://github.com/LukasKalbertodt/litrs/compare/v0.2.1...v0.2.2
+[0.2.1]: https://github.com/LukasKalbertodt/litrs/compare/v0.2.0...v0.2.1
+[0.2.0]: https://github.com/LukasKalbertodt/litrs/compare/v0.1.1...v0.2.0
+[0.1.1]: https://github.com/LukasKalbertodt/litrs/compare/v0.1.0...v0.1.1
diff --git a/third_party/rust/litrs/Cargo.toml b/third_party/rust/litrs/Cargo.toml
new file mode 100644
index 0000000000..6e65403490
--- /dev/null
+++ b/third_party/rust/litrs/Cargo.toml
@@ -0,0 +1,51 @@
+# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
+#
+# When uploading crates to the registry Cargo will automatically
+# "normalize" Cargo.toml files for maximal compatibility
+# with all versions of Cargo and also rewrite `path` dependencies
+# to registry (e.g., crates.io) dependencies.
+#
+# If you are reading this file be aware that the original Cargo.toml
+# will likely look very different (and much more reasonable).
+# See Cargo.toml.orig for the original contents.
+
+[package]
+edition = "2018"
+rust-version = "1.54"
+name = "litrs"
+version = "0.4.1"
+authors = ["Lukas Kalbertodt <lukas.kalbertodt@gmail.com>"]
+exclude = [".github"]
+description = """
+Parse and inspect Rust literals (i.e. tokens in the Rust programming language
+representing fixed values). Particularly useful for proc macros, but can also
+be used outside of a proc-macro context.
+"""
+documentation = "https://docs.rs/litrs/"
+readme = "README.md"
+keywords = [
+ "literal",
+ "parsing",
+ "proc-macro",
+ "type",
+ "procedural",
+]
+categories = [
+ "development-tools::procedural-macro-helpers",
+ "parser-implementations",
+ "development-tools::build-utils",
+]
+license = "MIT/Apache-2.0"
+repository = "https://github.com/LukasKalbertodt/litrs/"
+
+[dependencies.proc-macro2]
+version = "1"
+optional = true
+
+[dependencies.unicode-xid]
+version = "0.2.4"
+optional = true
+
+[features]
+check_suffix = ["unicode-xid"]
+default = ["proc-macro2"]
diff --git a/third_party/rust/litrs/LICENSE-APACHE b/third_party/rust/litrs/LICENSE-APACHE
new file mode 100644
index 0000000000..1b5ec8b78e
--- /dev/null
+++ b/third_party/rust/litrs/LICENSE-APACHE
@@ -0,0 +1,176 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+END OF TERMS AND CONDITIONS
diff --git a/third_party/rust/litrs/LICENSE-MIT b/third_party/rust/litrs/LICENSE-MIT
new file mode 100644
index 0000000000..4fa8658f71
--- /dev/null
+++ b/third_party/rust/litrs/LICENSE-MIT
@@ -0,0 +1,25 @@
+Copyright (c) 2020 Project Developers
+
+Permission is hereby granted, free of charge, to any
+person obtaining a copy of this software and associated
+documentation files (the "Software"), to deal in the
+Software without restriction, including without
+limitation the rights to use, copy, modify, merge,
+publish, distribute, sublicense, and/or sell copies of
+the Software, and to permit persons to whom the Software
+is furnished to do so, subject to the following
+conditions:
+
+The above copyright notice and this permission notice
+shall be included in all copies or substantial portions
+of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
+ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
+TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
+PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
+IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+DEALINGS IN THE SOFTWARE.
diff --git a/third_party/rust/litrs/README.md b/third_party/rust/litrs/README.md
new file mode 100644
index 0000000000..2307a5f429
--- /dev/null
+++ b/third_party/rust/litrs/README.md
@@ -0,0 +1,88 @@
+# `litrs`: parsing and inspecting Rust literals
+
+[<img alt="CI status of main" src="https://img.shields.io/github/actions/workflow/status/LukasKalbertodt/litrs/ci.yml?branch=main&label=CI&logo=github&logoColor=white&style=for-the-badge" height="23">](https://github.com/LukasKalbertodt/litrs/actions/workflows/ci.yml)
+[<img alt="Crates.io Version" src="https://img.shields.io/crates/v/litrs?logo=rust&style=for-the-badge" height="23">](https://crates.io/crates/litrs)
+[<img alt="docs.rs" src="https://img.shields.io/crates/v/litrs?color=blue&label=docs&style=for-the-badge" height="23">](https://docs.rs/litrs)
+
+`litrs` offers functionality to parse Rust literals, i.e. tokens in the Rust programming language that represent fixed values.
+For example: `27`, `"crab"`, `bool`.
+This is particularly useful for proc macros, but can also be used outside of a proc-macro context.
+
+**Why this library?**
+Unfortunately, the `proc_macro` API shipped with the compiler offers no easy way to inspect literals.
+There are mainly two libraries for this purpose:
+[`syn`](https://github.com/dtolnay/syn) and [`literalext`](https://github.com/mystor/literalext).
+The latter is deprecated.
+And `syn` is oftentimes overkill for the task at hand, especially when developing function-like proc-macros (e.g. `foo!(..)`).
+This crate is a lightweight alternative.
+Also, when it comes to literals, `litrs` offers a bit more flexibility and a few more features compared to `syn`.
+
+I'm interested in community feedback!
+If you consider using this, please speak your mind [in this issue](https://github.com/LukasKalbertodt/litrs/issues/1).
+
+## Example
+
+### In proc macro
+
+```rust
+use std::convert::TryFrom;
+use proc_macro::TokenStream;
+use litrs::Literal;
+
+#[proc_macro]
+pub fn foo(input: TokenStream) -> TokenStream {
+ // Please do proper error handling in your real code!
+ let first_token = input.into_iter().next().expect("no input");
+
+ // `try_from` will return an error if the token is not a literal.
+ match Literal::try_from(first_token) {
+ // Convenient methods to produce decent errors via `compile_error!`.
+ Err(e) => return e.to_compile_error(),
+
+ // You can now inspect your literal!
+ Ok(Literal::Integer(i)) => {
+ println!("Got an integer specified in base {:?}", i.base());
+
+ let value = i.value::<u64>().expect("integer literal too large");
+ println!("Is your integer even? {}", value % 2 == 0);
+ }
+ Ok(other) => {
+ println!("Got a non-integer literal");
+ }
+ }
+
+ TokenStream::new() // dummy output
+}
+```
+
+If you are expecting a specific kind of literal, you can also use this, which will return an error if the token is not a float literal.
+
+```rust
+FloatLit::try_from(first_token)
+```
+
+### Parsing from a `&str`
+
+Outside of a proc macro context you might want to parse a string directly.
+
+```rust
+use litrs::{FloatLit, Literal};
+
+let lit = Literal::parse("'🦀'").expect("failed to parse literal");
+let float_lit = FloatLit::parse("2.7e3").expect("failed to parse as float literal");
+```
+
+See [**the documentation**](https://docs.rs/litrs) or the `examples/` directory for more examples and information.
+
+
+<br />
+
+---
+
+## License
+
+Licensed under either of <a href="LICENSE-APACHE">Apache License, Version
+2.0</a> or <a href="LICENSE-MIT">MIT license</a> at your option.
+Unless you explicitly state otherwise, any contribution intentionally submitted
+for inclusion in this project by you, as defined in the Apache-2.0 license,
+shall be dual licensed as above, without any additional terms or conditions.
diff --git a/third_party/rust/litrs/src/bool/mod.rs b/third_party/rust/litrs/src/bool/mod.rs
new file mode 100644
index 0000000000..d7b54a1b9f
--- /dev/null
+++ b/third_party/rust/litrs/src/bool/mod.rs
@@ -0,0 +1,55 @@
+use std::fmt;
+
+use crate::{ParseError, err::{perr, ParseErrorKind::*}};
+
+
+/// A bool literal: `true` or `false`. Also see [the reference][ref].
+///
+/// Notice that, strictly speaking, from Rust point of view "boolean literals" are not
+/// actual literals but [keywords].
+///
+/// [ref]: https://doc.rust-lang.org/reference/expressions/literal-expr.html#boolean-literal-expressions
+/// [keywords]: https://doc.rust-lang.org/reference/keywords.html#strict-keywords
+#[derive(Debug, Clone, Copy, PartialEq, Eq)]
+pub enum BoolLit {
+ False,
+ True,
+}
+
+impl BoolLit {
+ /// Parses the input as a bool literal. Returns an error if the input is
+ /// invalid or represents a different kind of literal.
+ pub fn parse(s: &str) -> Result<Self, ParseError> {
+ match s {
+ "false" => Ok(Self::False),
+ "true" => Ok(Self::True),
+ _ => Err(perr(None, InvalidLiteral)),
+ }
+ }
+
+ /// Returns the actual Boolean value of this literal.
+ pub fn value(self) -> bool {
+ match self {
+ Self::False => false,
+ Self::True => true,
+ }
+ }
+
+ /// Returns the literal as string.
+ pub fn as_str(&self) -> &'static str {
+ match self {
+ Self::False => "false",
+ Self::True => "true",
+ }
+ }
+}
+
+impl fmt::Display for BoolLit {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.pad(self.as_str())
+ }
+}
+
+
+#[cfg(test)]
+mod tests;
diff --git a/third_party/rust/litrs/src/bool/tests.rs b/third_party/rust/litrs/src/bool/tests.rs
new file mode 100644
index 0000000000..4b829244b8
--- /dev/null
+++ b/third_party/rust/litrs/src/bool/tests.rs
@@ -0,0 +1,48 @@
+use crate::{
+ Literal, BoolLit,
+ test_util::assert_parse_ok_eq,
+};
+
+macro_rules! assert_bool_parse {
+ ($input:literal, $expected:expr) => {
+ assert_parse_ok_eq(
+ $input, Literal::parse($input), Literal::Bool($expected), "Literal::parse");
+ assert_parse_ok_eq($input, BoolLit::parse($input), $expected, "BoolLit::parse");
+ };
+}
+
+
+
+#[test]
+fn parse_ok() {
+ assert_bool_parse!("false", BoolLit::False);
+ assert_bool_parse!("true", BoolLit::True);
+}
+
+#[test]
+fn parse_err() {
+ assert!(Literal::parse("fa").is_err());
+ assert!(Literal::parse("fal").is_err());
+ assert!(Literal::parse("fals").is_err());
+ assert!(Literal::parse(" false").is_err());
+ assert!(Literal::parse("false ").is_err());
+ assert!(Literal::parse("False").is_err());
+
+ assert!(Literal::parse("tr").is_err());
+ assert!(Literal::parse("tru").is_err());
+ assert!(Literal::parse(" true").is_err());
+ assert!(Literal::parse("true ").is_err());
+ assert!(Literal::parse("True").is_err());
+}
+
+#[test]
+fn value() {
+ assert!(!BoolLit::False.value());
+ assert!(BoolLit::True.value());
+}
+
+#[test]
+fn as_str() {
+ assert_eq!(BoolLit::False.as_str(), "false");
+ assert_eq!(BoolLit::True.as_str(), "true");
+}
diff --git a/third_party/rust/litrs/src/byte/mod.rs b/third_party/rust/litrs/src/byte/mod.rs
new file mode 100644
index 0000000000..ffdff5d04a
--- /dev/null
+++ b/third_party/rust/litrs/src/byte/mod.rs
@@ -0,0 +1,107 @@
+use core::fmt;
+
+use crate::{
+ Buffer, ParseError,
+ err::{perr, ParseErrorKind::*},
+ escape::unescape,
+ parse::check_suffix,
+};
+
+
+/// A (single) byte literal, e.g. `b'k'` or `b'!'`.
+///
+/// See [the reference][ref] for more information.
+///
+/// [ref]: https://doc.rust-lang.org/reference/tokens.html#byte-literals
+#[derive(Debug, Clone, Copy, PartialEq, Eq)]
+pub struct ByteLit<B: Buffer> {
+ raw: B,
+ /// Start index of the suffix or `raw.len()` if there is no suffix.
+ start_suffix: usize,
+ value: u8,
+}
+
+impl<B: Buffer> ByteLit<B> {
+ /// Parses the input as a byte literal. Returns an error if the input is
+ /// invalid or represents a different kind of literal.
+ pub fn parse(input: B) -> Result<Self, ParseError> {
+ if input.is_empty() {
+ return Err(perr(None, Empty));
+ }
+ if !input.starts_with("b'") {
+ return Err(perr(None, InvalidByteLiteralStart));
+ }
+
+ let (value, start_suffix) = parse_impl(&input)?;
+ Ok(Self { raw: input, value, start_suffix })
+ }
+
+ /// Returns the byte value that this literal represents.
+ pub fn value(&self) -> u8 {
+ self.value
+ }
+
+ /// The optional suffix. Returns `""` if the suffix is empty/does not exist.
+ pub fn suffix(&self) -> &str {
+ &(*self.raw)[self.start_suffix..]
+ }
+
+ /// Returns the raw input that was passed to `parse`.
+ pub fn raw_input(&self) -> &str {
+ &self.raw
+ }
+
+ /// Returns the raw input that was passed to `parse`, potentially owned.
+ pub fn into_raw_input(self) -> B {
+ self.raw
+ }
+
+}
+
+impl ByteLit<&str> {
+ /// Makes a copy of the underlying buffer and returns the owned version of
+ /// `Self`.
+ pub fn to_owned(&self) -> ByteLit<String> {
+ ByteLit {
+ raw: self.raw.to_owned(),
+ start_suffix: self.start_suffix,
+ value: self.value,
+ }
+ }
+}
+
+impl<B: Buffer> fmt::Display for ByteLit<B> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.pad(&self.raw)
+ }
+}
+
+/// Precondition: must start with `b'`.
+#[inline(never)]
+pub(crate) fn parse_impl(input: &str) -> Result<(u8, usize), ParseError> {
+ let input_bytes = input.as_bytes();
+ let first = input_bytes.get(2).ok_or(perr(None, UnterminatedByteLiteral))?;
+ let (c, len) = match first {
+ b'\'' if input_bytes.get(3) == Some(&b'\'') => return Err(perr(2, UnescapedSingleQuote)),
+ b'\'' => return Err(perr(None, EmptyByteLiteral)),
+ b'\n' | b'\t' | b'\r' => return Err(perr(2, UnescapedSpecialWhitespace)),
+ b'\\' => unescape::<u8>(&input[2..], 2)?,
+ other if other.is_ascii() => (*other, 1),
+ _ => return Err(perr(2, NonAsciiInByteLiteral)),
+ };
+
+ match input[2 + len..].find('\'') {
+ Some(0) => {}
+ Some(_) => return Err(perr(None, OverlongByteLiteral)),
+ None => return Err(perr(None, UnterminatedByteLiteral)),
+ }
+
+ let start_suffix = 2 + len + 1;
+ let suffix = &input[start_suffix..];
+ check_suffix(suffix).map_err(|kind| perr(start_suffix, kind))?;
+
+ Ok((c, start_suffix))
+}
+
+#[cfg(test)]
+mod tests;
diff --git a/third_party/rust/litrs/src/byte/tests.rs b/third_party/rust/litrs/src/byte/tests.rs
new file mode 100644
index 0000000000..3cf16b5fc2
--- /dev/null
+++ b/third_party/rust/litrs/src/byte/tests.rs
@@ -0,0 +1,188 @@
+use crate::{ByteLit, Literal, test_util::{assert_parse_ok_eq, assert_roundtrip}};
+
+// ===== Utility functions =======================================================================
+
+macro_rules! check {
+ ($lit:literal) => { check!($lit, stringify!($lit), "") };
+ ($lit:literal, $input:expr, $suffix:literal) => {
+ let input = $input;
+ let expected = ByteLit {
+ raw: input,
+ start_suffix: input.len() - $suffix.len(),
+ value: $lit,
+ };
+
+ assert_parse_ok_eq(input, ByteLit::parse(input), expected.clone(), "ByteLit::parse");
+ assert_parse_ok_eq(input, Literal::parse(input), Literal::Byte(expected), "Literal::parse");
+ let lit = ByteLit::parse(input).unwrap();
+ assert_eq!(lit.value(), $lit);
+ assert_eq!(lit.suffix(), $suffix);
+ assert_roundtrip(expected.to_owned(), input);
+ };
+}
+
+
+// ===== Actual tests ============================================================================
+
+#[test]
+fn alphanumeric() {
+ check!(b'a');
+ check!(b'b');
+ check!(b'y');
+ check!(b'z');
+ check!(b'A');
+ check!(b'B');
+ check!(b'Y');
+ check!(b'Z');
+
+ check!(b'0');
+ check!(b'1');
+ check!(b'8');
+ check!(b'9');
+}
+
+#[test]
+fn special_chars() {
+ check!(b' ');
+ check!(b'!');
+ check!(b'"');
+ check!(b'#');
+ check!(b'$');
+ check!(b'%');
+ check!(b'&');
+ check!(b'(');
+ check!(b')');
+ check!(b'*');
+ check!(b'+');
+ check!(b',');
+ check!(b'-');
+ check!(b'.');
+ check!(b'/');
+ check!(b':');
+ check!(b';');
+ check!(b'<');
+ check!(b'=');
+ check!(b'>');
+ check!(b'?');
+ check!(b'@');
+ check!(b'[');
+ check!(b']');
+ check!(b'^');
+ check!(b'_');
+ check!(b'`');
+ check!(b'{');
+ check!(b'|');
+ check!(b'}');
+ check!(b'~');
+}
+
+#[test]
+fn quote_escapes() {
+ check!(b'\'');
+ check!(b'\"');
+}
+
+#[test]
+fn ascii_escapes() {
+ check!(b'\n');
+ check!(b'\r');
+ check!(b'\t');
+ check!(b'\\');
+ check!(b'\0');
+
+ check!(b'\x00');
+ check!(b'\x01');
+ check!(b'\x0c');
+ check!(b'\x0D');
+ check!(b'\x13');
+ check!(b'\x30');
+ check!(b'\x30');
+ check!(b'\x4B');
+ check!(b'\x6b');
+ check!(b'\x7F');
+ check!(b'\x7f');
+}
+
+#[test]
+fn byte_escapes() {
+ check!(b'\x80');
+ check!(b'\x8a');
+ check!(b'\x8C');
+ check!(b'\x99');
+ check!(b'\xa0');
+ check!(b'\xAd');
+ check!(b'\xfe');
+ check!(b'\xFe');
+ check!(b'\xfF');
+ check!(b'\xFF');
+}
+
+#[test]
+fn suffixes() {
+ check!(b'a', r##"b'a'peter"##, "peter");
+ check!(b'#', r##"b'#'peter"##, "peter");
+ check!(b'\n', r##"b'\n'peter"##, "peter");
+ check!(b'\'', r##"b'\''peter"##, "peter");
+ check!(b'\"', r##"b'\"'peter"##, "peter");
+ check!(b'\xFF', r##"b'\xFF'peter"##, "peter");
+}
+
+#[test]
+fn invald_escapes() {
+ assert_err!(ByteLit, r"b'\a'", UnknownEscape, 2..4);
+ assert_err!(ByteLit, r"b'\y'", UnknownEscape, 2..4);
+ assert_err!(ByteLit, r"b'\", UnterminatedEscape, 2..3);
+ assert_err!(ByteLit, r"b'\x'", UnterminatedEscape, 2..5);
+ assert_err!(ByteLit, r"b'\x1'", InvalidXEscape, 2..6);
+ assert_err!(ByteLit, r"b'\xaj'", InvalidXEscape, 2..6);
+ assert_err!(ByteLit, r"b'\xjb'", InvalidXEscape, 2..6);
+}
+
+#[test]
+fn unicode_escape_not_allowed() {
+ assert_err!(ByteLit, r"b'\u{0}'", UnicodeEscapeInByteLiteral, 2..4);
+ assert_err!(ByteLit, r"b'\u{00}'", UnicodeEscapeInByteLiteral, 2..4);
+ assert_err!(ByteLit, r"b'\u{b}'", UnicodeEscapeInByteLiteral, 2..4);
+ assert_err!(ByteLit, r"b'\u{B}'", UnicodeEscapeInByteLiteral, 2..4);
+ assert_err!(ByteLit, r"b'\u{7e}'", UnicodeEscapeInByteLiteral, 2..4);
+ assert_err!(ByteLit, r"b'\u{E4}'", UnicodeEscapeInByteLiteral, 2..4);
+ assert_err!(ByteLit, r"b'\u{e4}'", UnicodeEscapeInByteLiteral, 2..4);
+ assert_err!(ByteLit, r"b'\u{fc}'", UnicodeEscapeInByteLiteral, 2..4);
+ assert_err!(ByteLit, r"b'\u{Fc}'", UnicodeEscapeInByteLiteral, 2..4);
+ assert_err!(ByteLit, r"b'\u{fC}'", UnicodeEscapeInByteLiteral, 2..4);
+ assert_err!(ByteLit, r"b'\u{FC}'", UnicodeEscapeInByteLiteral, 2..4);
+ assert_err!(ByteLit, r"b'\u{b10}'", UnicodeEscapeInByteLiteral, 2..4);
+ assert_err!(ByteLit, r"b'\u{B10}'", UnicodeEscapeInByteLiteral, 2..4);
+ assert_err!(ByteLit, r"b'\u{0b10}'", UnicodeEscapeInByteLiteral, 2..4);
+ assert_err!(ByteLit, r"b'\u{2764}'", UnicodeEscapeInByteLiteral, 2..4);
+ assert_err!(ByteLit, r"b'\u{1f602}'", UnicodeEscapeInByteLiteral, 2..4);
+ assert_err!(ByteLit, r"b'\u{1F602}'", UnicodeEscapeInByteLiteral, 2..4);
+}
+
+#[test]
+fn parse_err() {
+ assert_err!(ByteLit, r"b''", EmptyByteLiteral, None);
+ assert_err!(ByteLit, r"b' ''", UnexpectedChar, 4..5);
+
+ assert_err!(ByteLit, r"b'", UnterminatedByteLiteral, None);
+ assert_err!(ByteLit, r"b'a", UnterminatedByteLiteral, None);
+ assert_err!(ByteLit, r"b'\n", UnterminatedByteLiteral, None);
+ assert_err!(ByteLit, r"b'\x35", UnterminatedByteLiteral, None);
+
+ assert_err!(ByteLit, r"b'ab'", OverlongByteLiteral, None);
+ assert_err!(ByteLit, r"b'a _'", OverlongByteLiteral, None);
+ assert_err!(ByteLit, r"b'\n3'", OverlongByteLiteral, None);
+
+ assert_err!(ByteLit, r"", Empty, None);
+
+ assert_err!(ByteLit, r"b'''", UnescapedSingleQuote, 2);
+ assert_err!(ByteLit, r"b''''", UnescapedSingleQuote, 2);
+
+ assert_err!(ByteLit, "b'\n'", UnescapedSpecialWhitespace, 2);
+ assert_err!(ByteLit, "b'\t'", UnescapedSpecialWhitespace, 2);
+ assert_err!(ByteLit, "b'\r'", UnescapedSpecialWhitespace, 2);
+
+ assert_err!(ByteLit, "b'న'", NonAsciiInByteLiteral, 2);
+ assert_err!(ByteLit, "b'犬'", NonAsciiInByteLiteral, 2);
+ assert_err!(ByteLit, "b'🦊'", NonAsciiInByteLiteral, 2);
+}
diff --git a/third_party/rust/litrs/src/bytestr/mod.rs b/third_party/rust/litrs/src/bytestr/mod.rs
new file mode 100644
index 0000000000..a0e09727f4
--- /dev/null
+++ b/third_party/rust/litrs/src/bytestr/mod.rs
@@ -0,0 +1,126 @@
+use std::{fmt, ops::Range};
+
+use crate::{
+ Buffer, ParseError,
+ err::{perr, ParseErrorKind::*},
+ escape::{scan_raw_string, unescape_string},
+};
+
+
+/// A byte string or raw byte string literal, e.g. `b"hello"` or `br#"abc"def"#`.
+///
+/// See [the reference][ref] for more information.
+///
+/// [ref]: https://doc.rust-lang.org/reference/tokens.html#byte-string-literals
+#[derive(Debug, Clone, PartialEq, Eq)]
+pub struct ByteStringLit<B: Buffer> {
+ /// The raw input.
+ raw: B,
+
+ /// The string value (with all escaped unescaped), or `None` if there were
+ /// no escapes. In the latter case, `input` is the string value.
+ value: Option<Vec<u8>>,
+
+ /// The number of hash signs in case of a raw string literal, or `None` if
+ /// it's not a raw string literal.
+ num_hashes: Option<u32>,
+
+ /// Start index of the suffix or `raw.len()` if there is no suffix.
+ start_suffix: usize,
+}
+
+impl<B: Buffer> ByteStringLit<B> {
+ /// Parses the input as a (raw) byte string literal. Returns an error if the
+ /// input is invalid or represents a different kind of literal.
+ pub fn parse(input: B) -> Result<Self, ParseError> {
+ if input.is_empty() {
+ return Err(perr(None, Empty));
+ }
+ if !input.starts_with(r#"b""#) && !input.starts_with("br") {
+ return Err(perr(None, InvalidByteStringLiteralStart));
+ }
+
+ let (value, num_hashes, start_suffix) = parse_impl(&input)?;
+ Ok(Self { raw: input, value, num_hashes, start_suffix })
+ }
+
+ /// Returns the string value this literal represents (where all escapes have
+ /// been turned into their respective values).
+ pub fn value(&self) -> &[u8] {
+ self.value.as_deref().unwrap_or(&self.raw.as_bytes()[self.inner_range()])
+ }
+
+ /// Like `value` but returns a potentially owned version of the value.
+ ///
+ /// The return value is either `Cow<'static, [u8]>` if `B = String`, or
+ /// `Cow<'a, [u8]>` if `B = &'a str`.
+ pub fn into_value(self) -> B::ByteCow {
+ let inner_range = self.inner_range();
+ let Self { raw, value, .. } = self;
+ value.map(B::ByteCow::from).unwrap_or_else(|| raw.cut(inner_range).into_byte_cow())
+ }
+
+ /// The optional suffix. Returns `""` if the suffix is empty/does not exist.
+ pub fn suffix(&self) -> &str {
+ &(*self.raw)[self.start_suffix..]
+ }
+
+ /// Returns whether this literal is a raw string literal (starting with
+ /// `r`).
+ pub fn is_raw_byte_string(&self) -> bool {
+ self.num_hashes.is_some()
+ }
+
+ /// Returns the raw input that was passed to `parse`.
+ pub fn raw_input(&self) -> &str {
+ &self.raw
+ }
+
+ /// Returns the raw input that was passed to `parse`, potentially owned.
+ pub fn into_raw_input(self) -> B {
+ self.raw
+ }
+
+ /// The range within `self.raw` that excludes the quotes and potential `r#`.
+ fn inner_range(&self) -> Range<usize> {
+ match self.num_hashes {
+ None => 2..self.start_suffix - 1,
+ Some(n) => 2 + n as usize + 1..self.start_suffix - n as usize - 1,
+ }
+ }
+}
+
+impl ByteStringLit<&str> {
+ /// Makes a copy of the underlying buffer and returns the owned version of
+ /// `Self`.
+ pub fn into_owned(self) -> ByteStringLit<String> {
+ ByteStringLit {
+ raw: self.raw.to_owned(),
+ value: self.value,
+ num_hashes: self.num_hashes,
+ start_suffix: self.start_suffix,
+ }
+ }
+}
+
+impl<B: Buffer> fmt::Display for ByteStringLit<B> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.pad(&self.raw)
+ }
+}
+
+
+/// Precondition: input has to start with either `b"` or `br`.
+#[inline(never)]
+fn parse_impl(input: &str) -> Result<(Option<Vec<u8>>, Option<u32>, usize), ParseError> {
+ if input.starts_with("br") {
+ scan_raw_string::<u8>(&input, 2)
+ .map(|(v, num, start_suffix)| (v.map(String::into_bytes), Some(num), start_suffix))
+ } else {
+ unescape_string::<u8>(&input, 2)
+ .map(|(v, start_suffix)| (v.map(String::into_bytes), None, start_suffix))
+ }
+}
+
+#[cfg(test)]
+mod tests;
diff --git a/third_party/rust/litrs/src/bytestr/tests.rs b/third_party/rust/litrs/src/bytestr/tests.rs
new file mode 100644
index 0000000000..2afef5a99c
--- /dev/null
+++ b/third_party/rust/litrs/src/bytestr/tests.rs
@@ -0,0 +1,224 @@
+use crate::{Literal, ByteStringLit, test_util::{assert_parse_ok_eq, assert_roundtrip}};
+
+// ===== Utility functions =======================================================================
+
+macro_rules! check {
+ ($lit:literal, $has_escapes:expr, $num_hashes:expr) => {
+ check!($lit, stringify!($lit), $has_escapes, $num_hashes, "")
+ };
+ ($lit:literal, $input:expr, $has_escapes:expr, $num_hashes:expr, $suffix:literal) => {
+ let input = $input;
+ let expected = ByteStringLit {
+ raw: input,
+ value: if $has_escapes { Some($lit.to_vec()) } else { None },
+ num_hashes: $num_hashes,
+ start_suffix: input.len() - $suffix.len(),
+ };
+
+ assert_parse_ok_eq(
+ input, ByteStringLit::parse(input), expected.clone(), "ByteStringLit::parse");
+ assert_parse_ok_eq(
+ input, Literal::parse(input), Literal::ByteString(expected.clone()), "Literal::parse");
+ let lit = ByteStringLit::parse(input).unwrap();
+ assert_eq!(lit.value(), $lit);
+ assert_eq!(lit.suffix(), $suffix);
+ assert_eq!(lit.into_value().as_ref(), $lit);
+ assert_roundtrip(expected.into_owned(), input);
+ };
+}
+
+
+// ===== Actual tests ============================================================================
+
+#[test]
+fn simple() {
+ check!(b"", false, None);
+ check!(b"a", false, None);
+ check!(b"peter", false, None);
+}
+
+#[test]
+fn special_whitespace() {
+ let strings = ["\n", "\t", "foo\tbar", "baz\n"];
+
+ for &s in &strings {
+ let input = format!(r#"b"{}""#, s);
+ let input_raw = format!(r#"br"{}""#, s);
+ for (input, num_hashes) in vec![(input, None), (input_raw, Some(0))] {
+ let expected = ByteStringLit {
+ raw: &*input,
+ value: None,
+ num_hashes,
+ start_suffix: input.len(),
+ };
+ assert_parse_ok_eq(
+ &input, ByteStringLit::parse(&*input), expected.clone(), "ByteStringLit::parse");
+ assert_parse_ok_eq(
+ &input, Literal::parse(&*input), Literal::ByteString(expected), "Literal::parse");
+ assert_eq!(ByteStringLit::parse(&*input).unwrap().value(), s.as_bytes());
+ assert_eq!(ByteStringLit::parse(&*input).unwrap().into_value(), s.as_bytes());
+ }
+ }
+
+ let res = ByteStringLit::parse("br\"\r\"").expect("failed to parse");
+ assert_eq!(res.value(), b"\r");
+}
+
+#[test]
+fn simple_escapes() {
+ check!(b"a\nb", true, None);
+ check!(b"\nb", true, None);
+ check!(b"a\n", true, None);
+ check!(b"\n", true, None);
+
+ check!(b"\x60foo \t bar\rbaz\n banana \0kiwi", true, None);
+ check!(b"foo \\ferris", true, None);
+ check!(b"baz \\ferris\"box", true, None);
+ check!(b"\\foo\\ banana\" baz\"", true, None);
+ check!(b"\"foo \\ferris \" baz\\", true, None);
+
+ check!(b"\x00", true, None);
+ check!(b" \x01", true, None);
+ check!(b"\x0c foo", true, None);
+ check!(b" foo\x0D ", true, None);
+ check!(b"\\x13", true, None);
+ check!(b"\"x30", true, None);
+}
+
+#[test]
+fn string_continue() {
+ check!(b"foo\
+ bar", true, None);
+ check!(b"foo\
+bar", true, None);
+
+ check!(b"foo\
+
+ banana", true, None);
+
+ // Weird whitespace characters
+ let lit = ByteStringLit::parse("b\"foo\\\n\r\t\n \n\tbar\"").expect("failed to parse");
+ assert_eq!(lit.value(), b"foobar");
+
+ // Raw strings do not handle "string continues"
+ check!(br"foo\
+ bar", false, Some(0));
+}
+
+#[test]
+fn crlf_newlines() {
+ let lit = ByteStringLit::parse("b\"foo\r\nbar\"").expect("failed to parse");
+ assert_eq!(lit.value(), b"foo\nbar");
+
+ let lit = ByteStringLit::parse("b\"\r\nbar\"").expect("failed to parse");
+ assert_eq!(lit.value(), b"\nbar");
+
+ let lit = ByteStringLit::parse("b\"foo\r\n\"").expect("failed to parse");
+ assert_eq!(lit.value(), b"foo\n");
+
+ let lit = ByteStringLit::parse("br\"foo\r\nbar\"").expect("failed to parse");
+ assert_eq!(lit.value(), b"foo\nbar");
+
+ let lit = ByteStringLit::parse("br#\"\r\nbar\"#").expect("failed to parse");
+ assert_eq!(lit.value(), b"\nbar");
+
+ let lit = ByteStringLit::parse("br##\"foo\r\n\"##").expect("failed to parse");
+ assert_eq!(lit.value(), b"foo\n");
+}
+
+#[test]
+fn raw_byte_string() {
+ check!(br"", false, Some(0));
+ check!(br"a", false, Some(0));
+ check!(br"peter", false, Some(0));
+ check!(br"Greetings jason!", false, Some(0));
+
+ check!(br#""#, false, Some(1));
+ check!(br#"a"#, false, Some(1));
+ check!(br##"peter"##, false, Some(2));
+ check!(br###"Greetings # Jason!"###, false, Some(3));
+ check!(br########"we ## need #### more ####### hashtags"########, false, Some(8));
+
+ check!(br#"foo " bar"#, false, Some(1));
+ check!(br##"foo " bar"##, false, Some(2));
+ check!(br#"foo """" '"'" bar"#, false, Some(1));
+ check!(br#""foo""#, false, Some(1));
+ check!(br###""foo'"###, false, Some(3));
+ check!(br#""x'#_#s'"#, false, Some(1));
+ check!(br"#", false, Some(0));
+ check!(br"foo#", false, Some(0));
+ check!(br"##bar", false, Some(0));
+ check!(br###""##foo"##bar'"###, false, Some(3));
+
+ check!(br"foo\n\t\r\0\\x60\u{123}doggo", false, Some(0));
+ check!(br#"cat\n\t\r\0\\x60\u{123}doggo"#, false, Some(1));
+}
+
+#[test]
+fn suffixes() {
+ check!(b"hello", r###"b"hello"suffix"###, false, None, "suffix");
+ check!(b"fox", r#"b"fox"peter"#, false, None, "peter");
+ check!(b"a\x0cb\\", r#"b"a\x0cb\\"_jürgen"#, true, None, "_jürgen");
+ check!(br"a\x0cb\\", r###"br#"a\x0cb\\"#_jürgen"###, false, Some(1), "_jürgen");
+}
+
+#[test]
+fn parse_err() {
+ assert_err!(ByteStringLit, r#"b""#, UnterminatedString, None);
+ assert_err!(ByteStringLit, r#"b"cat"#, UnterminatedString, None);
+ assert_err!(ByteStringLit, r#"b"Jurgen"#, UnterminatedString, None);
+ assert_err!(ByteStringLit, r#"b"foo bar baz"#, UnterminatedString, None);
+
+ assert_err!(ByteStringLit, r#"b"fox"peter""#, InvalidSuffix, 6);
+ assert_err!(ByteStringLit, r###"br#"foo "# bar"#"###, UnexpectedChar, 10);
+
+ assert_err!(ByteStringLit, "b\"\r\"", IsolatedCr, 2);
+ assert_err!(ByteStringLit, "b\"fo\rx\"", IsolatedCr, 4);
+
+ assert_err!(ByteStringLit, r##"br####""##, UnterminatedRawString, None);
+ assert_err!(ByteStringLit, r#####"br##"foo"#bar"#####, UnterminatedRawString, None);
+ assert_err!(ByteStringLit, r##"br####"##, InvalidLiteral, None);
+ assert_err!(ByteStringLit, r##"br####x"##, InvalidLiteral, None);
+}
+
+#[test]
+fn non_ascii() {
+ assert_err!(ByteStringLit, r#"b"న""#, NonAsciiInByteLiteral, 2);
+ assert_err!(ByteStringLit, r#"b"foo犬""#, NonAsciiInByteLiteral, 5);
+ assert_err!(ByteStringLit, r#"b"x🦊baz""#, NonAsciiInByteLiteral, 3);
+ assert_err!(ByteStringLit, r#"br"న""#, NonAsciiInByteLiteral, 3);
+ assert_err!(ByteStringLit, r#"br"foo犬""#, NonAsciiInByteLiteral, 6);
+ assert_err!(ByteStringLit, r#"br"x🦊baz""#, NonAsciiInByteLiteral, 4);
+}
+
+#[test]
+fn invalid_escapes() {
+ assert_err!(ByteStringLit, r#"b"\a""#, UnknownEscape, 2..4);
+ assert_err!(ByteStringLit, r#"b"foo\y""#, UnknownEscape, 5..7);
+ assert_err!(ByteStringLit, r#"b"\"#, UnterminatedEscape, 2);
+ assert_err!(ByteStringLit, r#"b"\x""#, UnterminatedEscape, 2..4);
+ assert_err!(ByteStringLit, r#"b"foo\x1""#, UnterminatedEscape, 5..8);
+ assert_err!(ByteStringLit, r#"b" \xaj""#, InvalidXEscape, 3..7);
+ assert_err!(ByteStringLit, r#"b"\xjbbaz""#, InvalidXEscape, 2..6);
+}
+
+#[test]
+fn unicode_escape_not_allowed() {
+ assert_err!(ByteStringLit, r#"b"\u{0}""#, UnicodeEscapeInByteLiteral, 2..4);
+ assert_err!(ByteStringLit, r#"b"\u{00}""#, UnicodeEscapeInByteLiteral, 2..4);
+ assert_err!(ByteStringLit, r#"b"\u{b}""#, UnicodeEscapeInByteLiteral, 2..4);
+ assert_err!(ByteStringLit, r#"b"\u{B}""#, UnicodeEscapeInByteLiteral, 2..4);
+ assert_err!(ByteStringLit, r#"b"\u{7e}""#, UnicodeEscapeInByteLiteral, 2..4);
+ assert_err!(ByteStringLit, r#"b"\u{E4}""#, UnicodeEscapeInByteLiteral, 2..4);
+ assert_err!(ByteStringLit, r#"b"\u{e4}""#, UnicodeEscapeInByteLiteral, 2..4);
+ assert_err!(ByteStringLit, r#"b"\u{fc}""#, UnicodeEscapeInByteLiteral, 2..4);
+ assert_err!(ByteStringLit, r#"b"\u{Fc}""#, UnicodeEscapeInByteLiteral, 2..4);
+ assert_err!(ByteStringLit, r#"b"\u{fC}""#, UnicodeEscapeInByteLiteral, 2..4);
+ assert_err!(ByteStringLit, r#"b"\u{FC}""#, UnicodeEscapeInByteLiteral, 2..4);
+ assert_err!(ByteStringLit, r#"b"\u{b10}""#, UnicodeEscapeInByteLiteral, 2..4);
+ assert_err!(ByteStringLit, r#"b"\u{B10}""#, UnicodeEscapeInByteLiteral, 2..4);
+ assert_err!(ByteStringLit, r#"b"\u{0b10}""#, UnicodeEscapeInByteLiteral, 2..4);
+ assert_err!(ByteStringLit, r#"b"\u{2764}""#, UnicodeEscapeInByteLiteral, 2..4);
+ assert_err!(ByteStringLit, r#"b"\u{1f602}""#, UnicodeEscapeInByteLiteral, 2..4);
+ assert_err!(ByteStringLit, r#"b"\u{1F602}""#, UnicodeEscapeInByteLiteral, 2..4);
+}
diff --git a/third_party/rust/litrs/src/char/mod.rs b/third_party/rust/litrs/src/char/mod.rs
new file mode 100644
index 0000000000..54f6f1137f
--- /dev/null
+++ b/third_party/rust/litrs/src/char/mod.rs
@@ -0,0 +1,105 @@
+use std::fmt;
+
+use crate::{
+ Buffer, ParseError,
+ err::{perr, ParseErrorKind::*},
+ escape::unescape,
+ parse::{first_byte_or_empty, check_suffix},
+};
+
+
+/// A character literal, e.g. `'g'` or `'🦊'`.
+///
+/// See [the reference][ref] for more information.
+///
+/// [ref]: https://doc.rust-lang.org/reference/tokens.html#character-literals
+#[derive(Debug, Clone, Copy, PartialEq, Eq)]
+pub struct CharLit<B: Buffer> {
+ raw: B,
+ /// Start index of the suffix or `raw.len()` if there is no suffix.
+ start_suffix: usize,
+ value: char,
+}
+
+impl<B: Buffer> CharLit<B> {
+ /// Parses the input as a character literal. Returns an error if the input
+ /// is invalid or represents a different kind of literal.
+ pub fn parse(input: B) -> Result<Self, ParseError> {
+ match first_byte_or_empty(&input)? {
+ b'\'' => {
+ let (value, start_suffix) = parse_impl(&input)?;
+ Ok(Self { raw: input, value, start_suffix })
+ },
+ _ => Err(perr(0, DoesNotStartWithQuote)),
+ }
+ }
+
+ /// Returns the character value that this literal represents.
+ pub fn value(&self) -> char {
+ self.value
+ }
+
+ /// The optional suffix. Returns `""` if the suffix is empty/does not exist.
+ pub fn suffix(&self) -> &str {
+ &(*self.raw)[self.start_suffix..]
+ }
+
+ /// Returns the raw input that was passed to `parse`.
+ pub fn raw_input(&self) -> &str {
+ &self.raw
+ }
+
+ /// Returns the raw input that was passed to `parse`, potentially owned.
+ pub fn into_raw_input(self) -> B {
+ self.raw
+ }
+
+}
+
+impl CharLit<&str> {
+ /// Makes a copy of the underlying buffer and returns the owned version of
+ /// `Self`.
+ pub fn to_owned(&self) -> CharLit<String> {
+ CharLit {
+ raw: self.raw.to_owned(),
+ start_suffix: self.start_suffix,
+ value: self.value,
+ }
+ }
+}
+
+impl<B: Buffer> fmt::Display for CharLit<B> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.pad(&self.raw)
+ }
+}
+
+/// Precondition: first character in input must be `'`.
+#[inline(never)]
+pub(crate) fn parse_impl(input: &str) -> Result<(char, usize), ParseError> {
+ let first = input.chars().nth(1).ok_or(perr(None, UnterminatedCharLiteral))?;
+ let (c, len) = match first {
+ '\'' if input.chars().nth(2) == Some('\'') => return Err(perr(1, UnescapedSingleQuote)),
+ '\'' => return Err(perr(None, EmptyCharLiteral)),
+ '\n' | '\t' | '\r'
+ => return Err(perr(1, UnescapedSpecialWhitespace)),
+
+ '\\' => unescape::<char>(&input[1..], 1)?,
+ other => (other, other.len_utf8()),
+ };
+
+ match input[1 + len..].find('\'') {
+ Some(0) => {}
+ Some(_) => return Err(perr(None, OverlongCharLiteral)),
+ None => return Err(perr(None, UnterminatedCharLiteral)),
+ }
+
+ let start_suffix = 1 + len + 1;
+ let suffix = &input[start_suffix..];
+ check_suffix(suffix).map_err(|kind| perr(start_suffix, kind))?;
+
+ Ok((c, start_suffix))
+}
+
+#[cfg(test)]
+mod tests;
diff --git a/third_party/rust/litrs/src/char/tests.rs b/third_party/rust/litrs/src/char/tests.rs
new file mode 100644
index 0000000000..19219db73b
--- /dev/null
+++ b/third_party/rust/litrs/src/char/tests.rs
@@ -0,0 +1,227 @@
+use crate::{Literal, test_util::{assert_parse_ok_eq, assert_roundtrip}};
+use super::CharLit;
+
+// ===== Utility functions =======================================================================
+
+macro_rules! check {
+ ($lit:literal) => { check!($lit, stringify!($lit), "") };
+ ($lit:literal, $input:expr, $suffix:literal) => {
+ let input = $input;
+ let expected = CharLit {
+ raw: input,
+ start_suffix: input.len() - $suffix.len(),
+ value: $lit,
+ };
+
+ assert_parse_ok_eq(input, CharLit::parse(input), expected.clone(), "CharLit::parse");
+ assert_parse_ok_eq(input, Literal::parse(input), Literal::Char(expected), "Literal::parse");
+ let lit = CharLit::parse(input).unwrap();
+ assert_eq!(lit.value(), $lit);
+ assert_eq!(lit.suffix(), $suffix);
+ assert_roundtrip(expected.to_owned(), input);
+ };
+}
+
+
+// ===== Actual tests ============================================================================
+
+#[test]
+fn alphanumeric() {
+ check!('a');
+ check!('b');
+ check!('y');
+ check!('z');
+ check!('A');
+ check!('B');
+ check!('Y');
+ check!('Z');
+
+ check!('0');
+ check!('1');
+ check!('8');
+ check!('9');
+}
+
+#[test]
+fn special_chars() {
+ check!(' ');
+ check!('!');
+ check!('"');
+ check!('#');
+ check!('$');
+ check!('%');
+ check!('&');
+ check!('(');
+ check!(')');
+ check!('*');
+ check!('+');
+ check!(',');
+ check!('-');
+ check!('.');
+ check!('/');
+ check!(':');
+ check!(';');
+ check!('<');
+ check!('=');
+ check!('>');
+ check!('?');
+ check!('@');
+ check!('[');
+ check!(']');
+ check!('^');
+ check!('_');
+ check!('`');
+ check!('{');
+ check!('|');
+ check!('}');
+ check!('~');
+}
+
+#[test]
+fn unicode() {
+ check!('న');
+ check!('犬');
+ check!('🦊');
+}
+
+#[test]
+fn quote_escapes() {
+ check!('\'');
+ check!('\"');
+}
+
+#[test]
+fn ascii_escapes() {
+ check!('\n');
+ check!('\r');
+ check!('\t');
+ check!('\\');
+ check!('\0');
+
+ check!('\x00');
+ check!('\x01');
+ check!('\x0c');
+ check!('\x0D');
+ check!('\x13');
+ check!('\x30');
+ check!('\x30');
+ check!('\x4B');
+ check!('\x6b');
+ check!('\x7F');
+ check!('\x7f');
+}
+
+#[test]
+fn unicode_escapes() {
+ check!('\u{0}');
+ check!('\u{00}');
+ check!('\u{b}');
+ check!('\u{B}');
+ check!('\u{7e}');
+ check!('\u{E4}');
+ check!('\u{e4}');
+ check!('\u{fc}');
+ check!('\u{Fc}');
+ check!('\u{fC}');
+ check!('\u{FC}');
+ check!('\u{b10}');
+ check!('\u{B10}');
+ check!('\u{0b10}');
+ check!('\u{2764}');
+ check!('\u{1f602}');
+ check!('\u{1F602}');
+
+ check!('\u{0}');
+ check!('\u{0__}');
+ check!('\u{3_b}');
+ check!('\u{1_F_6_0_2}');
+ check!('\u{1_F6_02_____}');
+}
+
+#[test]
+fn suffixes() {
+ check!('a', r##"'a'peter"##, "peter");
+ check!('#', r##"'#'peter"##, "peter");
+ check!('\n', r##"'\n'peter"##, "peter");
+ check!('\'', r##"'\''peter"##, "peter");
+ check!('\"', r##"'\"'peter"##, "peter");
+}
+
+#[test]
+fn invald_ascii_escapes() {
+ assert_err!(CharLit, r"'\x80'", NonAsciiXEscape, 1..5);
+ assert_err!(CharLit, r"'\x81'", NonAsciiXEscape, 1..5);
+ assert_err!(CharLit, r"'\x8a'", NonAsciiXEscape, 1..5);
+ assert_err!(CharLit, r"'\x8F'", NonAsciiXEscape, 1..5);
+ assert_err!(CharLit, r"'\xa0'", NonAsciiXEscape, 1..5);
+ assert_err!(CharLit, r"'\xB0'", NonAsciiXEscape, 1..5);
+ assert_err!(CharLit, r"'\xc3'", NonAsciiXEscape, 1..5);
+ assert_err!(CharLit, r"'\xDf'", NonAsciiXEscape, 1..5);
+ assert_err!(CharLit, r"'\xff'", NonAsciiXEscape, 1..5);
+ assert_err!(CharLit, r"'\xfF'", NonAsciiXEscape, 1..5);
+ assert_err!(CharLit, r"'\xFf'", NonAsciiXEscape, 1..5);
+ assert_err!(CharLit, r"'\xFF'", NonAsciiXEscape, 1..5);
+}
+
+#[test]
+fn invalid_escapes() {
+ assert_err!(CharLit, r"'\a'", UnknownEscape, 1..3);
+ assert_err!(CharLit, r"'\y'", UnknownEscape, 1..3);
+ assert_err!(CharLit, r"'\", UnterminatedEscape, 1);
+ assert_err!(CharLit, r"'\x'", UnterminatedEscape, 1..4);
+ assert_err!(CharLit, r"'\x1'", InvalidXEscape, 1..5);
+ assert_err!(CharLit, r"'\xaj'", InvalidXEscape, 1..5);
+ assert_err!(CharLit, r"'\xjb'", InvalidXEscape, 1..5);
+}
+
+#[test]
+fn invalid_unicode_escapes() {
+ assert_err!(CharLit, r"'\u'", UnicodeEscapeWithoutBrace, 1..3);
+ assert_err!(CharLit, r"'\u '", UnicodeEscapeWithoutBrace, 1..3);
+ assert_err!(CharLit, r"'\u3'", UnicodeEscapeWithoutBrace, 1..3);
+
+ assert_err!(CharLit, r"'\u{'", UnterminatedUnicodeEscape, 1..5);
+ assert_err!(CharLit, r"'\u{12'", UnterminatedUnicodeEscape, 1..7);
+ assert_err!(CharLit, r"'\u{a0b'", UnterminatedUnicodeEscape, 1..8);
+ assert_err!(CharLit, r"'\u{a0_b '", UnterminatedUnicodeEscape, 1..11);
+
+ assert_err!(CharLit, r"'\u{_}'", InvalidStartOfUnicodeEscape, 4);
+ assert_err!(CharLit, r"'\u{_5f}'", InvalidStartOfUnicodeEscape, 4);
+
+ assert_err!(CharLit, r"'\u{x}'", NonHexDigitInUnicodeEscape, 4);
+ assert_err!(CharLit, r"'\u{0x}'", NonHexDigitInUnicodeEscape, 5);
+ assert_err!(CharLit, r"'\u{3bx}'", NonHexDigitInUnicodeEscape, 6);
+ assert_err!(CharLit, r"'\u{3b_x}'", NonHexDigitInUnicodeEscape, 7);
+ assert_err!(CharLit, r"'\u{4x_}'", NonHexDigitInUnicodeEscape, 5);
+
+ assert_err!(CharLit, r"'\u{1234567}'", TooManyDigitInUnicodeEscape, 10);
+ assert_err!(CharLit, r"'\u{1234567}'", TooManyDigitInUnicodeEscape, 10);
+ assert_err!(CharLit, r"'\u{1_23_4_56_7}'", TooManyDigitInUnicodeEscape, 14);
+ assert_err!(CharLit, r"'\u{abcdef123}'", TooManyDigitInUnicodeEscape, 10);
+
+ assert_err!(CharLit, r"'\u{110000}'", InvalidUnicodeEscapeChar, 1..10);
+}
+
+#[test]
+fn parse_err() {
+ assert_err!(CharLit, r"''", EmptyCharLiteral, None);
+ assert_err!(CharLit, r"' ''", UnexpectedChar, 3);
+
+ assert_err!(CharLit, r"'", UnterminatedCharLiteral, None);
+ assert_err!(CharLit, r"'a", UnterminatedCharLiteral, None);
+ assert_err!(CharLit, r"'\n", UnterminatedCharLiteral, None);
+ assert_err!(CharLit, r"'\x35", UnterminatedCharLiteral, None);
+
+ assert_err!(CharLit, r"'ab'", OverlongCharLiteral, None);
+ assert_err!(CharLit, r"'a _'", OverlongCharLiteral, None);
+ assert_err!(CharLit, r"'\n3'", OverlongCharLiteral, None);
+
+ assert_err!(CharLit, r"", Empty, None);
+
+ assert_err!(CharLit, r"'''", UnescapedSingleQuote, 1);
+ assert_err!(CharLit, r"''''", UnescapedSingleQuote, 1);
+
+ assert_err!(CharLit, "'\n'", UnescapedSpecialWhitespace, 1);
+ assert_err!(CharLit, "'\t'", UnescapedSpecialWhitespace, 1);
+ assert_err!(CharLit, "'\r'", UnescapedSpecialWhitespace, 1);
+}
diff --git a/third_party/rust/litrs/src/err.rs b/third_party/rust/litrs/src/err.rs
new file mode 100644
index 0000000000..86d51dc4a8
--- /dev/null
+++ b/third_party/rust/litrs/src/err.rs
@@ -0,0 +1,371 @@
+use std::{fmt, ops::Range};
+
+
+/// An error signaling that a different kind of token was expected. Returned by
+/// the various `TryFrom` impls.
+#[derive(Debug, Clone, Copy)]
+pub struct InvalidToken {
+ pub(crate) expected: TokenKind,
+ pub(crate) actual: TokenKind,
+ pub(crate) span: Span,
+}
+
+impl InvalidToken {
+ /// Returns a token stream representing `compile_error!("msg");` where
+ /// `"msg"` is the output of `self.to_string()`. **Panics if called outside
+ /// of a proc-macro context!**
+ pub fn to_compile_error(&self) -> proc_macro::TokenStream {
+ use proc_macro::{Delimiter, Ident, Group, Punct, Spacing, TokenTree};
+
+ let span = match self.span {
+ Span::One(s) => s,
+ #[cfg(feature = "proc-macro2")]
+ Span::Two(s) => s.unwrap(),
+ };
+ let msg = self.to_string();
+ let tokens = vec![
+ TokenTree::from(Ident::new("compile_error", span)),
+ TokenTree::from(Punct::new('!', Spacing::Alone)),
+ TokenTree::from(Group::new(
+ Delimiter::Parenthesis,
+ TokenTree::from(proc_macro::Literal::string(&msg)).into(),
+ )),
+ ];
+
+
+ tokens.into_iter().map(|mut t| { t.set_span(span); t }).collect()
+ }
+
+ /// Like [`to_compile_error`][Self::to_compile_error], but returns a token
+ /// stream from `proc_macro2` and does not panic outside of a proc-macro
+ /// context.
+ #[cfg(feature = "proc-macro2")]
+ pub fn to_compile_error2(&self) -> proc_macro2::TokenStream {
+ use proc_macro2::{Delimiter, Ident, Group, Punct, Spacing, TokenTree};
+
+ let span = match self.span {
+ Span::One(s) => proc_macro2::Span::from(s),
+ Span::Two(s) => s,
+ };
+ let msg = self.to_string();
+ let tokens = vec![
+ TokenTree::from(Ident::new("compile_error", span)),
+ TokenTree::from(Punct::new('!', Spacing::Alone)),
+ TokenTree::from(Group::new(
+ Delimiter::Parenthesis,
+ TokenTree::from(proc_macro2::Literal::string(&msg)).into(),
+ )),
+ ];
+
+
+ tokens.into_iter().map(|mut t| { t.set_span(span); t }).collect()
+ }
+}
+
+impl std::error::Error for InvalidToken {}
+
+impl fmt::Display for InvalidToken {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fn kind_desc(kind: TokenKind) -> &'static str {
+ match kind {
+ TokenKind::Punct => "a punctuation character",
+ TokenKind::Ident => "an identifier",
+ TokenKind::Group => "a group",
+ TokenKind::Literal => "a literal",
+ TokenKind::BoolLit => "a bool literal (`true` or `false`)",
+ TokenKind::ByteLit => "a byte literal (e.g. `b'r')",
+ TokenKind::ByteStringLit => r#"a byte string literal (e.g. `b"fox"`)"#,
+ TokenKind::CharLit => "a character literal (e.g. `'P'`)",
+ TokenKind::FloatLit => "a float literal (e.g. `3.14`)",
+ TokenKind::IntegerLit => "an integer literal (e.g. `27`)",
+ TokenKind::StringLit => r#"a string literal (e.g. "Ferris")"#,
+ }
+ }
+
+ write!(f, "expected {}, but found {}", kind_desc(self.expected), kind_desc(self.actual))
+ }
+}
+
+#[derive(Debug, Clone, Copy, PartialEq, Eq)]
+pub(crate) enum TokenKind {
+ Punct,
+ Ident,
+ Group,
+ Literal,
+ BoolLit,
+ ByteLit,
+ ByteStringLit,
+ CharLit,
+ FloatLit,
+ IntegerLit,
+ StringLit,
+}
+
+/// Unfortunately, we have to deal with both cases.
+#[derive(Debug, Clone, Copy)]
+pub(crate) enum Span {
+ One(proc_macro::Span),
+ #[cfg(feature = "proc-macro2")]
+ Two(proc_macro2::Span),
+}
+
+impl From<proc_macro::Span> for Span {
+ fn from(src: proc_macro::Span) -> Self {
+ Self::One(src)
+ }
+}
+
+#[cfg(feature = "proc-macro2")]
+impl From<proc_macro2::Span> for Span {
+ fn from(src: proc_macro2::Span) -> Self {
+ Self::Two(src)
+ }
+}
+
+/// Errors during parsing.
+///
+/// This type should be seen primarily for error reporting and not for catching
+/// specific cases. The span and error kind are not guaranteed to be stable
+/// over different versions of this library, meaning that a returned error can
+/// change from one version to the next. There are simply too many fringe cases
+/// that are not easy to classify as a specific error kind. It depends entirely
+/// on the specific parser code how an invalid input is categorized.
+///
+/// Consider these examples:
+/// - `'\` can be seen as
+/// - invalid escape in character literal, or
+/// - unterminated character literal.
+/// - `'''` can be seen as
+/// - empty character literal, or
+/// - unescaped quote character in character literal.
+/// - `0b64` can be seen as
+/// - binary integer literal with invalid digit 6, or
+/// - binary integer literal with invalid digit 4, or
+/// - decimal integer literal with invalid digit b, or
+/// - decimal integer literal 0 with unknown type suffix `b64`.
+///
+/// If you want to see more if these examples, feel free to check out the unit
+/// tests of this library.
+///
+/// While this library does its best to emit sensible and precise errors, and to
+/// keep the returned errors as stable as possible, full stability cannot be
+/// guaranteed.
+#[derive(Debug, Clone)]
+pub struct ParseError {
+ pub(crate) span: Option<Range<usize>>,
+ pub(crate) kind: ParseErrorKind,
+}
+
+impl ParseError {
+ /// Returns a span of this error, if available. **Note**: the returned span
+ /// might change in future versions of this library. See [the documentation
+ /// of this type][ParseError] for more information.
+ pub fn span(&self) -> Option<Range<usize>> {
+ self.span.clone()
+ }
+}
+
+/// This is a free standing function instead of an associated one to reduce
+/// noise around parsing code. There are lots of places that create errors, we
+/// I wanna keep them as short as possible.
+pub(crate) fn perr(span: impl SpanLike, kind: ParseErrorKind) -> ParseError {
+ ParseError {
+ span: span.into_span(),
+ kind,
+ }
+}
+
+pub(crate) trait SpanLike {
+ fn into_span(self) -> Option<Range<usize>>;
+}
+
+impl SpanLike for Option<Range<usize>> {
+ #[inline(always)]
+ fn into_span(self) -> Option<Range<usize>> {
+ self
+ }
+}
+impl SpanLike for Range<usize> {
+ #[inline(always)]
+ fn into_span(self) -> Option<Range<usize>> {
+ Some(self)
+ }
+}
+impl SpanLike for usize {
+ #[inline(always)]
+ fn into_span(self) -> Option<Range<usize>> {
+ Some(self..self + 1)
+ }
+}
+
+
+/// Kinds of errors.
+#[derive(Debug, Clone, Copy, PartialEq, Eq)]
+#[non_exhaustive]
+pub(crate) enum ParseErrorKind {
+ /// The input was an empty string
+ Empty,
+
+ /// An unexpected char was encountered.
+ UnexpectedChar,
+
+ /// Literal was not recognized.
+ InvalidLiteral,
+
+ /// Input does not start with decimal digit when trying to parse an integer.
+ DoesNotStartWithDigit,
+
+ /// A digit invalid for the specified integer base was found.
+ InvalidDigit,
+
+ /// Integer literal does not contain any valid digits.
+ NoDigits,
+
+ /// Exponent of a float literal does not contain any digits.
+ NoExponentDigits,
+
+ /// An unknown escape code, e.g. `\b`.
+ UnknownEscape,
+
+ /// A started escape sequence where the input ended before the escape was
+ /// finished.
+ UnterminatedEscape,
+
+ /// An `\x` escape where the two digits are not valid hex digits.
+ InvalidXEscape,
+
+ /// A string or character literal using the `\xNN` escape where `NN > 0x7F`.
+ NonAsciiXEscape,
+
+ /// A `\u{...}` escape in a byte or byte string literal.
+ UnicodeEscapeInByteLiteral,
+
+ /// A Unicode escape that does not start with a hex digit.
+ InvalidStartOfUnicodeEscape,
+
+ /// A `\u{...}` escape that lacks the opening brace.
+ UnicodeEscapeWithoutBrace,
+
+ /// In a `\u{...}` escape, a non-hex digit and non-underscore character was
+ /// found.
+ NonHexDigitInUnicodeEscape,
+
+ /// More than 6 digits found in unicode escape.
+ TooManyDigitInUnicodeEscape,
+
+ /// The value from a unicode escape does not represent a valid character.
+ InvalidUnicodeEscapeChar,
+
+ /// A `\u{..` escape that is not terminated (lacks the closing brace).
+ UnterminatedUnicodeEscape,
+
+ /// A character literal that's not terminated.
+ UnterminatedCharLiteral,
+
+ /// A character literal that contains more than one character.
+ OverlongCharLiteral,
+
+ /// An empty character literal, i.e. `''`.
+ EmptyCharLiteral,
+
+ UnterminatedByteLiteral,
+ OverlongByteLiteral,
+ EmptyByteLiteral,
+ NonAsciiInByteLiteral,
+
+ /// A `'` character was not escaped in a character or byte literal, or a `"`
+ /// character was not escaped in a string or byte string literal.
+ UnescapedSingleQuote,
+
+ /// A \n, \t or \r raw character in a char or byte literal.
+ UnescapedSpecialWhitespace,
+
+ /// When parsing a character, byte, string or byte string literal directly
+ /// and the input does not start with the corresponding quote character
+ /// (plus optional raw string prefix).
+ DoesNotStartWithQuote,
+
+ /// Unterminated raw string literal.
+ UnterminatedRawString,
+
+ /// String literal without a `"` at the end.
+ UnterminatedString,
+
+ /// Invalid start for a string literal.
+ InvalidStringLiteralStart,
+
+ /// Invalid start for a byte literal.
+ InvalidByteLiteralStart,
+
+ InvalidByteStringLiteralStart,
+
+ /// An literal `\r` character not followed by a `\n` character in a
+ /// (raw) string or byte string literal.
+ IsolatedCr,
+
+ /// Literal suffix is not a valid identifier.
+ InvalidSuffix,
+
+ /// Returned by `Float::parse` if an integer literal (no fractional nor
+ /// exponent part) is passed.
+ UnexpectedIntegerLit,
+
+ /// Integer suffixes cannot start with `e` or `E` as this conflicts with the
+ /// grammar for float literals.
+ IntegerSuffixStartingWithE,
+}
+
+impl std::error::Error for ParseError {}
+
+impl fmt::Display for ParseError {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ use ParseErrorKind::*;
+
+ let description = match self.kind {
+ Empty => "input is empty",
+ UnexpectedChar => "unexpected character",
+ InvalidLiteral => "invalid literal",
+ DoesNotStartWithDigit => "number literal does not start with decimal digit",
+ InvalidDigit => "integer literal contains a digit invalid for its base",
+ NoDigits => "integer literal does not contain any digits",
+ NoExponentDigits => "exponent of floating point literal does not contain any digits",
+ UnknownEscape => "unknown escape",
+ UnterminatedEscape => "unterminated escape: input ended too soon",
+ InvalidXEscape => r"invalid `\x` escape: not followed by two hex digits",
+ NonAsciiXEscape => r"`\x` escape in char/string literal exceed ASCII range",
+ UnicodeEscapeInByteLiteral => r"`\u{...}` escape in byte (string) literal not allowed",
+ InvalidStartOfUnicodeEscape => r"invalid start of `\u{...}` escape",
+ UnicodeEscapeWithoutBrace => r"`Unicode \u{...}` escape without opening brace",
+ NonHexDigitInUnicodeEscape => r"non-hex digit found in `\u{...}` escape",
+ TooManyDigitInUnicodeEscape => r"more than six digits in `\u{...}` escape",
+ InvalidUnicodeEscapeChar => r"value specified in `\u{...}` escape is not a valid char",
+ UnterminatedUnicodeEscape => r"unterminated `\u{...}` escape",
+ UnterminatedCharLiteral => "character literal is not terminated",
+ OverlongCharLiteral => "character literal contains more than one character",
+ EmptyCharLiteral => "empty character literal",
+ UnterminatedByteLiteral => "byte literal is not terminated",
+ OverlongByteLiteral => "byte literal contains more than one byte",
+ EmptyByteLiteral => "empty byte literal",
+ NonAsciiInByteLiteral => "non ASCII character in byte (string) literal",
+ UnescapedSingleQuote => "character literal contains unescaped ' character",
+ UnescapedSpecialWhitespace => r"unescaped newline (\n), tab (\t) or cr (\r) character",
+ DoesNotStartWithQuote => "invalid start for char/byte/string literal",
+ UnterminatedRawString => "unterminated raw (byte) string literal",
+ UnterminatedString => "unterminated (byte) string literal",
+ InvalidStringLiteralStart => "invalid start for string literal",
+ InvalidByteLiteralStart => "invalid start for byte literal",
+ InvalidByteStringLiteralStart => "invalid start for byte string literal",
+ IsolatedCr => r"`\r` not immediately followed by `\n` in string",
+ InvalidSuffix => "literal suffix is not a valid identifier",
+ UnexpectedIntegerLit => "expected float literal, but found integer",
+ IntegerSuffixStartingWithE => "integer literal suffix must not start with 'e' or 'E'",
+ };
+
+ description.fmt(f)?;
+ if let Some(span) = &self.span {
+ write!(f, " (at {}..{})", span.start, span.end)?;
+ }
+
+ Ok(())
+ }
+}
diff --git a/third_party/rust/litrs/src/escape.rs b/third_party/rust/litrs/src/escape.rs
new file mode 100644
index 0000000000..5eb8382bc4
--- /dev/null
+++ b/third_party/rust/litrs/src/escape.rs
@@ -0,0 +1,262 @@
+use crate::{ParseError, err::{perr, ParseErrorKind::*}, parse::{hex_digit_value, check_suffix}};
+
+
+/// Must start with `\`
+pub(crate) fn unescape<E: Escapee>(input: &str, offset: usize) -> Result<(E, usize), ParseError> {
+ let first = input.as_bytes().get(1)
+ .ok_or(perr(offset, UnterminatedEscape))?;
+ let out = match first {
+ // Quote escapes
+ b'\'' => (E::from_byte(b'\''), 2),
+ b'"' => (E::from_byte(b'"'), 2),
+
+ // Ascii escapes
+ b'n' => (E::from_byte(b'\n'), 2),
+ b'r' => (E::from_byte(b'\r'), 2),
+ b't' => (E::from_byte(b'\t'), 2),
+ b'\\' => (E::from_byte(b'\\'), 2),
+ b'0' => (E::from_byte(b'\0'), 2),
+ b'x' => {
+ let hex_string = input.get(2..4)
+ .ok_or(perr(offset..offset + input.len(), UnterminatedEscape))?
+ .as_bytes();
+ let first = hex_digit_value(hex_string[0])
+ .ok_or(perr(offset..offset + 4, InvalidXEscape))?;
+ let second = hex_digit_value(hex_string[1])
+ .ok_or(perr(offset..offset + 4, InvalidXEscape))?;
+ let value = second + 16 * first;
+
+ if E::SUPPORTS_UNICODE && value > 0x7F {
+ return Err(perr(offset..offset + 4, NonAsciiXEscape));
+ }
+
+ (E::from_byte(value), 4)
+ },
+
+ // Unicode escape
+ b'u' => {
+ if !E::SUPPORTS_UNICODE {
+ return Err(perr(offset..offset + 2, UnicodeEscapeInByteLiteral));
+ }
+
+ if input.as_bytes().get(2) != Some(&b'{') {
+ return Err(perr(offset..offset + 2, UnicodeEscapeWithoutBrace));
+ }
+
+ let closing_pos = input.bytes().position(|b| b == b'}')
+ .ok_or(perr(offset..offset + input.len(), UnterminatedUnicodeEscape))?;
+
+ let inner = &input[3..closing_pos];
+ if inner.as_bytes().first() == Some(&b'_') {
+ return Err(perr(4, InvalidStartOfUnicodeEscape));
+ }
+
+ let mut v: u32 = 0;
+ let mut digit_count = 0;
+ for (i, b) in inner.bytes().enumerate() {
+ if b == b'_'{
+ continue;
+ }
+
+ let digit = hex_digit_value(b)
+ .ok_or(perr(offset + 3 + i, NonHexDigitInUnicodeEscape))?;
+
+ if digit_count == 6 {
+ return Err(perr(offset + 3 + i, TooManyDigitInUnicodeEscape));
+ }
+ digit_count += 1;
+ v = 16 * v + digit as u32;
+ }
+
+ let c = std::char::from_u32(v)
+ .ok_or(perr(offset..closing_pos + 1, InvalidUnicodeEscapeChar))?;
+
+ (E::from_char(c), closing_pos + 1)
+ }
+
+ _ => return Err(perr(offset..offset + 2, UnknownEscape)),
+ };
+
+ Ok(out)
+}
+
+pub(crate) trait Escapee: Into<char> {
+ const SUPPORTS_UNICODE: bool;
+ fn from_byte(b: u8) -> Self;
+ fn from_char(c: char) -> Self;
+}
+
+impl Escapee for u8 {
+ const SUPPORTS_UNICODE: bool = false;
+ fn from_byte(b: u8) -> Self {
+ b
+ }
+ fn from_char(_: char) -> Self {
+ panic!("bug: `<u8 as Escapee>::from_char` was called");
+ }
+}
+
+impl Escapee for char {
+ const SUPPORTS_UNICODE: bool = true;
+ fn from_byte(b: u8) -> Self {
+ b.into()
+ }
+ fn from_char(c: char) -> Self {
+ c
+ }
+}
+
+/// Checks whether the character is skipped after a string continue start
+/// (unescaped backlash followed by `\n`).
+fn is_string_continue_skipable_whitespace(b: u8) -> bool {
+ b == b' ' || b == b'\t' || b == b'\n' || b == b'\r'
+}
+
+/// Unescapes a whole string or byte string.
+#[inline(never)]
+pub(crate) fn unescape_string<E: Escapee>(
+ input: &str,
+ offset: usize,
+) -> Result<(Option<String>, usize), ParseError> {
+ let mut closing_quote_pos = None;
+ let mut i = offset;
+ let mut end_last_escape = offset;
+ let mut value = String::new();
+ while i < input.len() {
+ match input.as_bytes()[i] {
+ // Handle "string continue".
+ b'\\' if input.as_bytes().get(i + 1) == Some(&b'\n') => {
+ value.push_str(&input[end_last_escape..i]);
+
+ // Find the first non-whitespace character.
+ let end_escape = input[i + 2..].bytes()
+ .position(|b| !is_string_continue_skipable_whitespace(b))
+ .ok_or(perr(None, UnterminatedString))?;
+
+ i += 2 + end_escape;
+ end_last_escape = i;
+ }
+ b'\\' => {
+ let (c, len) = unescape::<E>(&input[i..input.len() - 1], i)?;
+ value.push_str(&input[end_last_escape..i]);
+ value.push(c.into());
+ i += len;
+ end_last_escape = i;
+ }
+ b'\r' => {
+ if input.as_bytes().get(i + 1) == Some(&b'\n') {
+ value.push_str(&input[end_last_escape..i]);
+ value.push('\n');
+ i += 2;
+ end_last_escape = i;
+ } else {
+ return Err(perr(i, IsolatedCr))
+ }
+ }
+ b'"' => {
+ closing_quote_pos = Some(i);
+ break;
+ },
+ b if !E::SUPPORTS_UNICODE && !b.is_ascii()
+ => return Err(perr(i, NonAsciiInByteLiteral)),
+ _ => i += 1,
+ }
+ }
+
+ let closing_quote_pos = closing_quote_pos.ok_or(perr(None, UnterminatedString))?;
+
+ let start_suffix = closing_quote_pos + 1;
+ let suffix = &input[start_suffix..];
+ check_suffix(suffix).map_err(|kind| perr(start_suffix, kind))?;
+
+ // `value` is only empty if there was no escape in the input string
+ // (with the special case of the input being empty). This means the
+ // string value basically equals the input, so we store `None`.
+ let value = if value.is_empty() {
+ None
+ } else {
+ // There was an escape in the string, so we need to push the
+ // remaining unescaped part of the string still.
+ value.push_str(&input[end_last_escape..closing_quote_pos]);
+ Some(value)
+ };
+
+ Ok((value, start_suffix))
+}
+
+/// Reads and checks a raw (byte) string literal, converting `\r\n` sequences to
+/// just `\n` sequences. Returns an optional new string (if the input contained
+/// any `\r\n`) and the number of hashes used by the literal.
+#[inline(never)]
+pub(crate) fn scan_raw_string<E: Escapee>(
+ input: &str,
+ offset: usize,
+) -> Result<(Option<String>, u32, usize), ParseError> {
+ // Raw string literal
+ let num_hashes = input[offset..].bytes().position(|b| b != b'#')
+ .ok_or(perr(None, InvalidLiteral))?;
+
+ if input.as_bytes().get(offset + num_hashes) != Some(&b'"') {
+ return Err(perr(None, InvalidLiteral));
+ }
+ let start_inner = offset + num_hashes + 1;
+ let hashes = &input[offset..num_hashes + offset];
+
+ let mut closing_quote_pos = None;
+ let mut i = start_inner;
+ let mut end_last_escape = start_inner;
+ let mut value = String::new();
+ while i < input.len() {
+ let b = input.as_bytes()[i];
+ if b == b'"' && input[i + 1..].starts_with(hashes) {
+ closing_quote_pos = Some(i);
+ break;
+ }
+
+ if b == b'\r' {
+ // Convert `\r\n` into `\n`. This is currently not well documented
+ // in the Rust reference, but is done even for raw strings. That's
+ // because rustc simply converts all line endings when reading
+ // source files.
+ if input.as_bytes().get(i + 1) == Some(&b'\n') {
+ value.push_str(&input[end_last_escape..i]);
+ value.push('\n');
+ i += 2;
+ end_last_escape = i;
+ continue;
+ } else if E::SUPPORTS_UNICODE {
+ // If no \n follows the \r and we are scanning a raw string
+ // (not raw byte string), we error.
+ return Err(perr(i, IsolatedCr))
+ }
+ }
+
+ if !E::SUPPORTS_UNICODE {
+ if !b.is_ascii() {
+ return Err(perr(i, NonAsciiInByteLiteral));
+ }
+ }
+
+ i += 1;
+ }
+
+ let closing_quote_pos = closing_quote_pos.ok_or(perr(None, UnterminatedRawString))?;
+
+ let start_suffix = closing_quote_pos + num_hashes + 1;
+ let suffix = &input[start_suffix..];
+ check_suffix(suffix).map_err(|kind| perr(start_suffix, kind))?;
+
+ // `value` is only empty if there was no \r\n in the input string (with the
+ // special case of the input being empty). This means the string value
+ // equals the input, so we store `None`.
+ let value = if value.is_empty() {
+ None
+ } else {
+ // There was an \r\n in the string, so we need to push the remaining
+ // unescaped part of the string still.
+ value.push_str(&input[end_last_escape..closing_quote_pos]);
+ Some(value)
+ };
+
+ Ok((value, num_hashes as u32, start_suffix))
+}
diff --git a/third_party/rust/litrs/src/float/mod.rs b/third_party/rust/litrs/src/float/mod.rs
new file mode 100644
index 0000000000..0518633a6b
--- /dev/null
+++ b/third_party/rust/litrs/src/float/mod.rs
@@ -0,0 +1,257 @@
+use std::{fmt, str::FromStr};
+
+use crate::{
+ Buffer, ParseError,
+ err::{perr, ParseErrorKind::*},
+ parse::{end_dec_digits, first_byte_or_empty, check_suffix},
+};
+
+
+
+/// A floating point literal, e.g. `3.14`, `8.`, `135e12`, or `1.956e2f64`.
+///
+/// This kind of literal has several forms, but generally consists of a main
+/// number part, an optional exponent and an optional type suffix. See
+/// [the reference][ref] for more information.
+///
+/// A leading minus sign `-` is not part of the literal grammar! `-3.14` are two
+/// tokens in the Rust grammar. Further, `27` and `27f32` are both not float,
+/// but integer literals! Consequently `FloatLit::parse` will reject them.
+///
+///
+/// [ref]: https://doc.rust-lang.org/reference/tokens.html#floating-point-literals
+#[derive(Debug, Clone, Copy, PartialEq, Eq)]
+pub struct FloatLit<B: Buffer> {
+ /// The whole raw input. The `usize` fields in this struct partition this
+ /// string. Always true: `end_integer_part <= end_fractional_part`.
+ ///
+ /// ```text
+ /// 12_3.4_56e789f32
+ /// ╷ ╷ ╷
+ /// | | └ end_number_part = 13
+ /// | └ end_fractional_part = 9
+ /// └ end_integer_part = 4
+ ///
+ /// 246.
+ /// ╷╷
+ /// |└ end_fractional_part = end_number_part = 4
+ /// └ end_integer_part = 3
+ ///
+ /// 1234e89
+ /// ╷ ╷
+ /// | └ end_number_part = 7
+ /// └ end_integer_part = end_fractional_part = 4
+ /// ```
+ raw: B,
+
+ /// The first index not part of the integer part anymore. Since the integer
+ /// part is at the start, this is also the length of that part.
+ end_integer_part: usize,
+
+ /// The first index after the fractional part.
+ end_fractional_part: usize,
+
+ /// The first index after the whole number part (everything except type suffix).
+ end_number_part: usize,
+}
+
+impl<B: Buffer> FloatLit<B> {
+ /// Parses the input as a floating point literal. Returns an error if the
+ /// input is invalid or represents a different kind of literal. Will also
+ /// reject decimal integer literals like `23` or `17f32`, in accordance
+ /// with the spec.
+ pub fn parse(s: B) -> Result<Self, ParseError> {
+ match first_byte_or_empty(&s)? {
+ b'0'..=b'9' => {
+ // TODO: simplify once RFC 2528 is stabilized
+ let FloatLit {
+ end_integer_part,
+ end_fractional_part,
+ end_number_part,
+ ..
+ } = parse_impl(&s)?;
+
+ Ok(Self { raw: s, end_integer_part, end_fractional_part, end_number_part })
+ },
+ _ => Err(perr(0, DoesNotStartWithDigit)),
+ }
+ }
+
+ /// Returns the number part (including integer part, fractional part and
+ /// exponent), but without the suffix. If you want an actual floating
+ /// point value, you need to parse this string, e.g. with `f32::from_str`
+ /// or an external crate.
+ pub fn number_part(&self) -> &str {
+ &(*self.raw)[..self.end_number_part]
+ }
+
+ /// Returns the non-empty integer part of this literal.
+ pub fn integer_part(&self) -> &str {
+ &(*self.raw)[..self.end_integer_part]
+ }
+
+ /// Returns the optional fractional part of this literal. Does not include
+ /// the period. If a period exists in the input, `Some` is returned, `None`
+ /// otherwise. Note that `Some("")` might be returned, e.g. for `3.`.
+ pub fn fractional_part(&self) -> Option<&str> {
+ if self.end_integer_part == self.end_fractional_part {
+ None
+ } else {
+ Some(&(*self.raw)[self.end_integer_part + 1..self.end_fractional_part])
+ }
+ }
+
+ /// Optional exponent part. Might be empty if there was no exponent part in
+ /// the input. Includes the `e` or `E` at the beginning.
+ pub fn exponent_part(&self) -> &str {
+ &(*self.raw)[self.end_fractional_part..self.end_number_part]
+ }
+
+ /// The optional suffix. Returns `""` if the suffix is empty/does not exist.
+ pub fn suffix(&self) -> &str {
+ &(*self.raw)[self.end_number_part..]
+ }
+
+ /// Returns the raw input that was passed to `parse`.
+ pub fn raw_input(&self) -> &str {
+ &self.raw
+ }
+
+ /// Returns the raw input that was passed to `parse`, potentially owned.
+ pub fn into_raw_input(self) -> B {
+ self.raw
+ }
+}
+
+impl FloatLit<&str> {
+ /// Makes a copy of the underlying buffer and returns the owned version of
+ /// `Self`.
+ pub fn to_owned(&self) -> FloatLit<String> {
+ FloatLit {
+ raw: self.raw.to_owned(),
+ end_integer_part: self.end_integer_part,
+ end_fractional_part: self.end_fractional_part,
+ end_number_part: self.end_number_part,
+ }
+ }
+}
+
+impl<B: Buffer> fmt::Display for FloatLit<B> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(f, "{}", &*self.raw)
+ }
+}
+
+/// Precondition: first byte of string has to be in `b'0'..=b'9'`.
+#[inline(never)]
+pub(crate) fn parse_impl(input: &str) -> Result<FloatLit<&str>, ParseError> {
+ // Integer part.
+ let end_integer_part = end_dec_digits(input.as_bytes());
+ let rest = &input[end_integer_part..];
+
+
+ // Fractional part.
+ let end_fractional_part = if rest.as_bytes().get(0) == Some(&b'.') {
+ // The fractional part must not start with `_`.
+ if rest.as_bytes().get(1) == Some(&b'_') {
+ return Err(perr(end_integer_part + 1, UnexpectedChar));
+ }
+
+ end_dec_digits(rest[1..].as_bytes()) + 1 + end_integer_part
+ } else {
+ end_integer_part
+ };
+ let rest = &input[end_fractional_part..];
+
+ // If we have a period that is not followed by decimal digits, the
+ // literal must end now.
+ if end_integer_part + 1 == end_fractional_part && !rest.is_empty() {
+ return Err(perr(end_integer_part + 1, UnexpectedChar));
+ }
+
+ // Optional exponent.
+ let end_number_part = if rest.starts_with('e') || rest.starts_with('E') {
+ // Strip single - or + sign at the beginning.
+ let exp_number_start = match rest.as_bytes().get(1) {
+ Some(b'-') | Some(b'+') => 2,
+ _ => 1,
+ };
+
+ // Find end of exponent and make sure there is at least one digit.
+ let end_exponent = end_dec_digits(rest[exp_number_start..].as_bytes()) + exp_number_start;
+ if !rest[exp_number_start..end_exponent].bytes().any(|b| matches!(b, b'0'..=b'9')) {
+ return Err(perr(
+ end_fractional_part..end_fractional_part + end_exponent,
+ NoExponentDigits,
+ ));
+ }
+
+ end_exponent + end_fractional_part
+ } else {
+ end_fractional_part
+ };
+
+ // Make sure the suffix is valid.
+ let suffix = &input[end_number_part..];
+ check_suffix(suffix).map_err(|kind| perr(end_number_part..input.len(), kind))?;
+
+ // A float literal needs either a fractional or exponent part, otherwise its
+ // an integer literal.
+ if end_integer_part == end_number_part {
+ return Err(perr(None, UnexpectedIntegerLit));
+ }
+
+ Ok(FloatLit {
+ raw: input,
+ end_integer_part,
+ end_fractional_part,
+ end_number_part,
+ })
+}
+
+
+/// All possible float type suffixes.
+#[derive(Debug, Clone, Copy, PartialEq, Eq)]
+#[non_exhaustive]
+pub enum FloatType {
+ F32,
+ F64,
+}
+
+impl FloatType {
+ /// Returns the type corresponding to the given suffix (e.g. `"f32"` is
+ /// mapped to `Self::F32`). If the suffix is not a valid float type, `None`
+ /// is returned.
+ pub fn from_suffix(suffix: &str) -> Option<Self> {
+ match suffix {
+ "f32" => Some(FloatType::F32),
+ "f64" => Some(FloatType::F64),
+ _ => None,
+ }
+ }
+
+ /// Returns the suffix for this type, e.g. `"f32"` for `Self::F32`.
+ pub fn suffix(self) -> &'static str {
+ match self {
+ Self::F32 => "f32",
+ Self::F64 => "f64",
+ }
+ }
+}
+
+impl FromStr for FloatType {
+ type Err = ();
+ fn from_str(s: &str) -> Result<Self, Self::Err> {
+ Self::from_suffix(s).ok_or(())
+ }
+}
+
+impl fmt::Display for FloatType {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ self.suffix().fmt(f)
+ }
+}
+
+
+#[cfg(test)]
+mod tests;
diff --git a/third_party/rust/litrs/src/float/tests.rs b/third_party/rust/litrs/src/float/tests.rs
new file mode 100644
index 0000000000..f22443bd19
--- /dev/null
+++ b/third_party/rust/litrs/src/float/tests.rs
@@ -0,0 +1,253 @@
+use crate::{
+ Literal, ParseError,
+ test_util::{assert_parse_ok_eq, assert_roundtrip},
+};
+use super::{FloatLit, FloatType};
+
+
+// ===== Utility functions =======================================================================
+
+/// Helper macro to check parsing a float.
+///
+/// This macro contains quite a bit of logic itself (which can be buggy of
+/// course), so we have a few test functions below to test a bunch of cases
+/// manually.
+macro_rules! check {
+ ($intpart:literal $fracpart:literal $exppart:literal $suffix:tt) => {
+ let input = concat!($intpart, $fracpart, $exppart, check!(@stringify_suffix $suffix));
+ let expected_float = FloatLit {
+ raw: input,
+ end_integer_part: $intpart.len(),
+ end_fractional_part: $intpart.len() + $fracpart.len(),
+ end_number_part: $intpart.len() + $fracpart.len() + $exppart.len(),
+ };
+
+ assert_parse_ok_eq(
+ input, FloatLit::parse(input), expected_float.clone(), "FloatLit::parse");
+ assert_parse_ok_eq(
+ input, Literal::parse(input), Literal::Float(expected_float), "Literal::parse");
+ assert_eq!(FloatLit::parse(input).unwrap().suffix(), check!(@ty $suffix));
+ assert_roundtrip(expected_float.to_owned(), input);
+ };
+ (@ty f32) => { "f32" };
+ (@ty f64) => { "f64" };
+ (@ty -) => { "" };
+ (@stringify_suffix -) => { "" };
+ (@stringify_suffix $suffix:ident) => { stringify!($suffix) };
+}
+
+
+// ===== Actual tests ===========================================================================
+
+#[test]
+fn manual_without_suffix() -> Result<(), ParseError> {
+ let f = FloatLit::parse("3.14")?;
+ assert_eq!(f.number_part(), "3.14");
+ assert_eq!(f.integer_part(), "3");
+ assert_eq!(f.fractional_part(), Some("14"));
+ assert_eq!(f.exponent_part(), "");
+ assert_eq!(f.suffix(), "");
+
+ let f = FloatLit::parse("9.")?;
+ assert_eq!(f.number_part(), "9.");
+ assert_eq!(f.integer_part(), "9");
+ assert_eq!(f.fractional_part(), Some(""));
+ assert_eq!(f.exponent_part(), "");
+ assert_eq!(f.suffix(), "");
+
+ let f = FloatLit::parse("8e1")?;
+ assert_eq!(f.number_part(), "8e1");
+ assert_eq!(f.integer_part(), "8");
+ assert_eq!(f.fractional_part(), None);
+ assert_eq!(f.exponent_part(), "e1");
+ assert_eq!(f.suffix(), "");
+
+ let f = FloatLit::parse("8E3")?;
+ assert_eq!(f.number_part(), "8E3");
+ assert_eq!(f.integer_part(), "8");
+ assert_eq!(f.fractional_part(), None);
+ assert_eq!(f.exponent_part(), "E3");
+ assert_eq!(f.suffix(), "");
+
+ let f = FloatLit::parse("8_7_6.1_23e15")?;
+ assert_eq!(f.number_part(), "8_7_6.1_23e15");
+ assert_eq!(f.integer_part(), "8_7_6");
+ assert_eq!(f.fractional_part(), Some("1_23"));
+ assert_eq!(f.exponent_part(), "e15");
+ assert_eq!(f.suffix(), "");
+
+ let f = FloatLit::parse("8.2e-_04_9")?;
+ assert_eq!(f.number_part(), "8.2e-_04_9");
+ assert_eq!(f.integer_part(), "8");
+ assert_eq!(f.fractional_part(), Some("2"));
+ assert_eq!(f.exponent_part(), "e-_04_9");
+ assert_eq!(f.suffix(), "");
+
+ Ok(())
+}
+
+#[test]
+fn manual_with_suffix() -> Result<(), ParseError> {
+ let f = FloatLit::parse("3.14f32")?;
+ assert_eq!(f.number_part(), "3.14");
+ assert_eq!(f.integer_part(), "3");
+ assert_eq!(f.fractional_part(), Some("14"));
+ assert_eq!(f.exponent_part(), "");
+ assert_eq!(FloatType::from_suffix(f.suffix()), Some(FloatType::F32));
+
+ let f = FloatLit::parse("8e1f64")?;
+ assert_eq!(f.number_part(), "8e1");
+ assert_eq!(f.integer_part(), "8");
+ assert_eq!(f.fractional_part(), None);
+ assert_eq!(f.exponent_part(), "e1");
+ assert_eq!(FloatType::from_suffix(f.suffix()), Some(FloatType::F64));
+
+ let f = FloatLit::parse("8_7_6.1_23e15f32")?;
+ assert_eq!(f.number_part(), "8_7_6.1_23e15");
+ assert_eq!(f.integer_part(), "8_7_6");
+ assert_eq!(f.fractional_part(), Some("1_23"));
+ assert_eq!(f.exponent_part(), "e15");
+ assert_eq!(FloatType::from_suffix(f.suffix()), Some(FloatType::F32));
+
+ let f = FloatLit::parse("8.2e-_04_9f64")?;
+ assert_eq!(f.number_part(), "8.2e-_04_9");
+ assert_eq!(f.integer_part(), "8");
+ assert_eq!(f.fractional_part(), Some("2"));
+ assert_eq!(f.exponent_part(), "e-_04_9");
+ assert_eq!(FloatType::from_suffix(f.suffix()), Some(FloatType::F64));
+
+ Ok(())
+}
+
+#[test]
+fn simple() {
+ check!("3" ".14" "" -);
+ check!("3" ".14" "" f32);
+ check!("3" ".14" "" f64);
+
+ check!("3" "" "e987654321" -);
+ check!("3" "" "e987654321" f64);
+
+ check!("42_888" ".05" "" -);
+ check!("42_888" ".05" "E5___" f32);
+ check!("123456789" "" "e_1" f64);
+ check!("123456789" ".99" "e_1" f64);
+ check!("123456789" ".99" "" f64);
+ check!("123456789" ".99" "" -);
+
+ check!("147" ".3_33" "" -);
+ check!("147" ".3_33__" "E3" f64);
+ check!("147" ".3_33__" "" f32);
+
+ check!("147" ".333" "e-10" -);
+ check!("147" ".333" "e-_7" f32);
+ check!("147" ".333" "e+10" -);
+ check!("147" ".333" "e+_7" f32);
+
+ check!("86" "." "" -);
+ check!("0" "." "" -);
+ check!("0_" "." "" -);
+ check!("0" ".0000001" "" -);
+ check!("0" ".000_0001" "" -);
+
+ check!("0" ".0" "e+0" -);
+ check!("0" "" "E+0" -);
+ check!("34" "" "e+0" -);
+ check!("0" ".9182" "E+0" f32);
+}
+
+#[test]
+fn non_standard_suffixes() {
+ #[track_caller]
+ fn check_suffix(
+ input: &str,
+ integer_part: &str,
+ fractional_part: Option<&str>,
+ exponent_part: &str,
+ suffix: &str,
+ ) {
+ let lit = FloatLit::parse(input)
+ .unwrap_or_else(|e| panic!("expected to parse '{}' but got {}", input, e));
+ assert_eq!(lit.integer_part(), integer_part);
+ assert_eq!(lit.fractional_part(), fractional_part);
+ assert_eq!(lit.exponent_part(), exponent_part);
+ assert_eq!(lit.suffix(), suffix);
+
+ let lit = match Literal::parse(input) {
+ Ok(Literal::Float(f)) => f,
+ other => panic!("Expected float literal, but got {:?} for '{}'", other, input),
+ };
+ assert_eq!(lit.integer_part(), integer_part);
+ assert_eq!(lit.fractional_part(), fractional_part);
+ assert_eq!(lit.exponent_part(), exponent_part);
+ assert_eq!(lit.suffix(), suffix);
+ }
+
+ check_suffix("7.1f23", "7", Some("1"), "", "f23");
+ check_suffix("7.1f320", "7", Some("1"), "", "f320");
+ check_suffix("7.1f64_", "7", Some("1"), "", "f64_");
+ check_suffix("8.1f649", "8", Some("1"), "", "f649");
+ check_suffix("8.1f64f32", "8", Some("1"), "", "f64f32");
+ check_suffix("23e2_banana", "23", None, "e2_", "banana");
+ check_suffix("23.2_banana", "23", Some("2_"), "", "banana");
+ check_suffix("23e2pe55ter", "23", None, "e2", "pe55ter");
+ check_suffix("23e2p_e55ter", "23", None, "e2", "p_e55ter");
+ check_suffix("3.15Jürgen", "3", Some("15"), "", "Jürgen");
+ check_suffix("3e2e5", "3", None, "e2", "e5");
+ check_suffix("3e2e5f", "3", None, "e2", "e5f");
+}
+
+#[test]
+fn parse_err() {
+ assert_err!(FloatLit, "", Empty, None);
+ assert_err_single!(FloatLit::parse("."), DoesNotStartWithDigit, 0);
+ assert_err_single!(FloatLit::parse("+"), DoesNotStartWithDigit, 0);
+ assert_err_single!(FloatLit::parse("-"), DoesNotStartWithDigit, 0);
+ assert_err_single!(FloatLit::parse("e"), DoesNotStartWithDigit, 0);
+ assert_err_single!(FloatLit::parse("e8"), DoesNotStartWithDigit, 0);
+ assert_err!(FloatLit, "0e", NoExponentDigits, 1..2);
+ assert_err_single!(FloatLit::parse("f32"), DoesNotStartWithDigit, 0);
+ assert_err_single!(FloatLit::parse("foo"), DoesNotStartWithDigit, 0);
+
+ assert_err_single!(FloatLit::parse("inf"), DoesNotStartWithDigit, 0);
+ assert_err_single!(FloatLit::parse("nan"), DoesNotStartWithDigit, 0);
+ assert_err_single!(FloatLit::parse("NaN"), DoesNotStartWithDigit, 0);
+ assert_err_single!(FloatLit::parse("NAN"), DoesNotStartWithDigit, 0);
+
+ assert_err_single!(FloatLit::parse("_2.7"), DoesNotStartWithDigit, 0);
+ assert_err_single!(FloatLit::parse(".5"), DoesNotStartWithDigit, 0);
+ assert_err!(FloatLit, "1e", NoExponentDigits, 1..2);
+ assert_err!(FloatLit, "1.e4", UnexpectedChar, 2);
+ assert_err!(FloatLit, "3._4", UnexpectedChar, 2);
+ assert_err!(FloatLit, "3.f32", UnexpectedChar, 2);
+ assert_err!(FloatLit, "3.e5", UnexpectedChar, 2);
+ assert_err!(FloatLit, "12345._987", UnexpectedChar, 6);
+ assert_err!(FloatLit, "46._", UnexpectedChar, 3);
+ assert_err!(FloatLit, "46.f32", UnexpectedChar, 3);
+ assert_err!(FloatLit, "46.e3", UnexpectedChar, 3);
+ assert_err!(FloatLit, "46._e3", UnexpectedChar, 3);
+ assert_err!(FloatLit, "46.e3f64", UnexpectedChar, 3);
+ assert_err!(FloatLit, "23.4e_", NoExponentDigits, 4..6);
+ assert_err!(FloatLit, "23E___f32", NoExponentDigits, 2..6);
+ assert_err!(FloatLit, "55e3.1", UnexpectedChar, 4..6);
+
+ assert_err!(FloatLit, "3.7+", UnexpectedChar, 3..4);
+ assert_err!(FloatLit, "3.7+2", UnexpectedChar, 3..5);
+ assert_err!(FloatLit, "3.7-", UnexpectedChar, 3..4);
+ assert_err!(FloatLit, "3.7-2", UnexpectedChar, 3..5);
+ assert_err!(FloatLit, "3.7e+", NoExponentDigits, 3..5);
+ assert_err!(FloatLit, "3.7e-", NoExponentDigits, 3..5);
+ assert_err!(FloatLit, "3.7e-+3", NoExponentDigits, 3..5); // suboptimal error
+ assert_err!(FloatLit, "3.7e+-3", NoExponentDigits, 3..5); // suboptimal error
+ assert_err_single!(FloatLit::parse("0x44.5"), InvalidSuffix, 1..6);
+
+ assert_err_single!(FloatLit::parse("3"), UnexpectedIntegerLit, None);
+ assert_err_single!(FloatLit::parse("35_389"), UnexpectedIntegerLit, None);
+ assert_err_single!(FloatLit::parse("9_8_7f32"), UnexpectedIntegerLit, None);
+ assert_err_single!(FloatLit::parse("9_8_7banana"), UnexpectedIntegerLit, None);
+ assert_err_single!(FloatLit::parse("7f23"), UnexpectedIntegerLit, None);
+ assert_err_single!(FloatLit::parse("7f320"), UnexpectedIntegerLit, None);
+ assert_err_single!(FloatLit::parse("7f64_"), UnexpectedIntegerLit, None);
+ assert_err_single!(FloatLit::parse("8f649"), UnexpectedIntegerLit, None);
+ assert_err_single!(FloatLit::parse("8f64f32"), UnexpectedIntegerLit, None);
+}
diff --git a/third_party/rust/litrs/src/impls.rs b/third_party/rust/litrs/src/impls.rs
new file mode 100644
index 0000000000..61a314dd84
--- /dev/null
+++ b/third_party/rust/litrs/src/impls.rs
@@ -0,0 +1,401 @@
+use std::convert::TryFrom;
+
+use crate::{Literal, err::{InvalidToken, TokenKind}};
+
+
+/// Helper macro to call a `callback` macro four times for all combinations of
+/// `proc_macro`/`proc_macro2` and `&`/owned.
+macro_rules! helper {
+ ($callback:ident, $($input:tt)*) => {
+ $callback!([proc_macro::] => $($input)*);
+ $callback!([&proc_macro::] => $($input)*);
+ #[cfg(feature = "proc-macro2")]
+ $callback!([proc_macro2::] => $($input)*);
+ #[cfg(feature = "proc-macro2")]
+ $callback!([&proc_macro2::] => $($input)*);
+ };
+}
+
+/// Like `helper!` but without reference types.
+macro_rules! helper_no_refs {
+ ($callback:ident, $($input:tt)*) => {
+ $callback!([proc_macro::] => $($input)*);
+ #[cfg(feature = "proc-macro2")]
+ $callback!([proc_macro2::] => $($input)*);
+ };
+}
+
+
+// ==============================================================================================
+// ===== `From<*Lit> for Literal`
+// ==============================================================================================
+
+macro_rules! impl_specific_lit_to_lit {
+ ($ty:ty, $variant:ident) => {
+ impl<B: crate::Buffer> From<$ty> for Literal<B> {
+ fn from(src: $ty) -> Self {
+ Literal::$variant(src)
+ }
+ }
+ };
+}
+
+impl_specific_lit_to_lit!(crate::BoolLit, Bool);
+impl_specific_lit_to_lit!(crate::IntegerLit<B>, Integer);
+impl_specific_lit_to_lit!(crate::FloatLit<B>, Float);
+impl_specific_lit_to_lit!(crate::CharLit<B>, Char);
+impl_specific_lit_to_lit!(crate::StringLit<B>, String);
+impl_specific_lit_to_lit!(crate::ByteLit<B>, Byte);
+impl_specific_lit_to_lit!(crate::ByteStringLit<B>, ByteString);
+
+
+
+// ==============================================================================================
+// ===== `From<pm::Literal> for Literal`
+// ==============================================================================================
+
+
+macro_rules! impl_tt_to_lit {
+ ([$($prefix:tt)*] => ) => {
+ impl From<$($prefix)* Literal> for Literal<String> {
+ fn from(src: $($prefix)* Literal) -> Self {
+ // We call `expect` in all these impls: this library aims to implement exactly
+ // the Rust grammar, so if we have a valid Rust literal, we should always be
+ // able to parse it.
+ Self::parse(src.to_string())
+ .expect("bug: failed to parse output of `Literal::to_string`")
+ }
+ }
+ }
+}
+
+helper!(impl_tt_to_lit, );
+
+
+// ==============================================================================================
+// ===== `TryFrom<pm::TokenTree> for Literal`
+// ==============================================================================================
+
+macro_rules! impl_tt_to_lit {
+ ([$($prefix:tt)*] => ) => {
+ impl TryFrom<$($prefix)* TokenTree> for Literal<String> {
+ type Error = InvalidToken;
+ fn try_from(tt: $($prefix)* TokenTree) -> Result<Self, Self::Error> {
+ let span = tt.span();
+ let res = match tt {
+ $($prefix)* TokenTree::Group(_) => Err(TokenKind::Group),
+ $($prefix)* TokenTree::Punct(_) => Err(TokenKind::Punct),
+ $($prefix)* TokenTree::Ident(ref ident) if ident.to_string() == "true"
+ => return Ok(Literal::Bool(crate::BoolLit::True)),
+ $($prefix)* TokenTree::Ident(ref ident) if ident.to_string() == "false"
+ => return Ok(Literal::Bool(crate::BoolLit::False)),
+ $($prefix)* TokenTree::Ident(_) => Err(TokenKind::Ident),
+ $($prefix)* TokenTree::Literal(ref lit) => Ok(lit),
+ };
+
+ match res {
+ Ok(lit) => Ok(From::from(lit)),
+ Err(actual) => Err(InvalidToken {
+ actual,
+ expected: TokenKind::Literal,
+ span: span.into(),
+ }),
+ }
+ }
+ }
+ }
+}
+
+helper!(impl_tt_to_lit, );
+
+
+// ==============================================================================================
+// ===== `TryFrom<pm::Literal>`, `TryFrom<pm::TokenTree>` for non-bool `*Lit`
+// ==============================================================================================
+
+fn kind_of(lit: &Literal<String>) -> TokenKind {
+ match lit {
+ Literal::String(_) => TokenKind::StringLit,
+ Literal::Bool(_) => TokenKind::BoolLit,
+ Literal::Integer(_) => TokenKind::IntegerLit,
+ Literal::Float(_) => TokenKind::FloatLit,
+ Literal::Char(_) => TokenKind::CharLit,
+ Literal::Byte(_) => TokenKind::ByteLit,
+ Literal::ByteString(_) => TokenKind::ByteStringLit,
+ }
+}
+
+macro_rules! impl_for_specific_lit {
+ ([$($prefix:tt)*] => $ty:ty, $variant:ident, $kind:ident) => {
+ impl TryFrom<$($prefix)* Literal> for $ty {
+ type Error = InvalidToken;
+ fn try_from(src: $($prefix)* Literal) -> Result<Self, Self::Error> {
+ let span = src.span();
+ let lit: Literal<String> = src.into();
+ match lit {
+ Literal::$variant(s) => Ok(s),
+ other => Err(InvalidToken {
+ expected: TokenKind::$kind,
+ actual: kind_of(&other),
+ span: span.into(),
+ }),
+ }
+ }
+ }
+
+ impl TryFrom<$($prefix)* TokenTree> for $ty {
+ type Error = InvalidToken;
+ fn try_from(tt: $($prefix)* TokenTree) -> Result<Self, Self::Error> {
+ let span = tt.span();
+ let res = match tt {
+ $($prefix)* TokenTree::Group(_) => Err(TokenKind::Group),
+ $($prefix)* TokenTree::Punct(_) => Err(TokenKind::Punct),
+ $($prefix)* TokenTree::Ident(_) => Err(TokenKind::Ident),
+ $($prefix)* TokenTree::Literal(ref lit) => Ok(lit),
+ };
+
+ match res {
+ Ok(lit) => <$ty>::try_from(lit),
+ Err(actual) => Err(InvalidToken {
+ actual,
+ expected: TokenKind::$kind,
+ span: span.into(),
+ }),
+ }
+ }
+ }
+ };
+}
+
+helper!(impl_for_specific_lit, crate::IntegerLit<String>, Integer, IntegerLit);
+helper!(impl_for_specific_lit, crate::FloatLit<String>, Float, FloatLit);
+helper!(impl_for_specific_lit, crate::CharLit<String>, Char, CharLit);
+helper!(impl_for_specific_lit, crate::StringLit<String>, String, StringLit);
+helper!(impl_for_specific_lit, crate::ByteLit<String>, Byte, ByteLit);
+helper!(impl_for_specific_lit, crate::ByteStringLit<String>, ByteString, ByteStringLit);
+
+
+// ==============================================================================================
+// ===== `From<*Lit> for pm::Literal`
+// ==============================================================================================
+
+macro_rules! impl_specific_lit_to_pm_lit {
+ ([$($prefix:tt)*] => $ty:ident, $variant:ident, $kind:ident) => {
+ impl<B: crate::Buffer> From<crate::$ty<B>> for $($prefix)* Literal {
+ fn from(l: crate::$ty<B>) -> Self {
+ // This should never fail: an input that is parsed successfuly
+ // as one of our literal types should always parse as a
+ // proc_macro literal as well!
+ l.raw_input().parse().unwrap_or_else(|e| {
+ panic!(
+ "failed to parse `{}` as `{}`: {}",
+ l.raw_input(),
+ std::any::type_name::<Self>(),
+ e,
+ )
+ })
+ }
+ }
+ };
+}
+
+helper_no_refs!(impl_specific_lit_to_pm_lit, IntegerLit, Integer, IntegerLit);
+helper_no_refs!(impl_specific_lit_to_pm_lit, FloatLit, Float, FloatLit);
+helper_no_refs!(impl_specific_lit_to_pm_lit, CharLit, Char, CharLit);
+helper_no_refs!(impl_specific_lit_to_pm_lit, StringLit, String, StringLit);
+helper_no_refs!(impl_specific_lit_to_pm_lit, ByteLit, Byte, ByteLit);
+helper_no_refs!(impl_specific_lit_to_pm_lit, ByteStringLit, ByteString, ByteStringLit);
+
+
+// ==============================================================================================
+// ===== `TryFrom<pm::TokenTree> for BoolLit`
+// ==============================================================================================
+
+macro_rules! impl_from_tt_for_bool {
+ ([$($prefix:tt)*] => ) => {
+ impl TryFrom<$($prefix)* TokenTree> for crate::BoolLit {
+ type Error = InvalidToken;
+ fn try_from(tt: $($prefix)* TokenTree) -> Result<Self, Self::Error> {
+ let span = tt.span();
+ let actual = match tt {
+ $($prefix)* TokenTree::Ident(ref ident) if ident.to_string() == "true"
+ => return Ok(crate::BoolLit::True),
+ $($prefix)* TokenTree::Ident(ref ident) if ident.to_string() == "false"
+ => return Ok(crate::BoolLit::False),
+
+ $($prefix)* TokenTree::Group(_) => TokenKind::Group,
+ $($prefix)* TokenTree::Punct(_) => TokenKind::Punct,
+ $($prefix)* TokenTree::Ident(_) => TokenKind::Ident,
+ $($prefix)* TokenTree::Literal(ref lit) => kind_of(&Literal::from(lit)),
+ };
+
+ Err(InvalidToken {
+ actual,
+ expected: TokenKind::BoolLit,
+ span: span.into(),
+ })
+ }
+ }
+ };
+}
+
+helper!(impl_from_tt_for_bool, );
+
+// ==============================================================================================
+// ===== `From<BoolLit> for pm::Ident`
+// ==============================================================================================
+
+macro_rules! impl_bool_lit_to_pm_lit {
+ ([$($prefix:tt)*] => ) => {
+ impl From<crate::BoolLit> for $($prefix)* Ident {
+ fn from(l: crate::BoolLit) -> Self {
+ Self::new(l.as_str(), $($prefix)* Span::call_site())
+ }
+ }
+ };
+}
+
+helper_no_refs!(impl_bool_lit_to_pm_lit, );
+
+
+mod tests {
+ //! # Tests
+ //!
+ //! ```no_run
+ //! extern crate proc_macro;
+ //!
+ //! use std::convert::TryFrom;
+ //! use litrs::Literal;
+ //!
+ //! fn give<T>() -> T {
+ //! panic!()
+ //! }
+ //!
+ //! let _ = litrs::Literal::<String>::from(give::<litrs::BoolLit>());
+ //! let _ = litrs::Literal::<String>::from(give::<litrs::IntegerLit<String>>());
+ //! let _ = litrs::Literal::<String>::from(give::<litrs::FloatLit<String>>());
+ //! let _ = litrs::Literal::<String>::from(give::<litrs::CharLit<String>>());
+ //! let _ = litrs::Literal::<String>::from(give::<litrs::StringLit<String>>());
+ //! let _ = litrs::Literal::<String>::from(give::<litrs::ByteLit<String>>());
+ //! let _ = litrs::Literal::<String>::from(give::<litrs::ByteStringLit<String>>());
+ //!
+ //! let _ = litrs::Literal::<&'static str>::from(give::<litrs::BoolLit>());
+ //! let _ = litrs::Literal::<&'static str>::from(give::<litrs::IntegerLit<&'static str>>());
+ //! let _ = litrs::Literal::<&'static str>::from(give::<litrs::FloatLit<&'static str>>());
+ //! let _ = litrs::Literal::<&'static str>::from(give::<litrs::CharLit<&'static str>>());
+ //! let _ = litrs::Literal::<&'static str>::from(give::<litrs::StringLit<&'static str>>());
+ //! let _ = litrs::Literal::<&'static str>::from(give::<litrs::ByteLit<&'static str>>());
+ //! let _ = litrs::Literal::<&'static str>::from(give::<litrs::ByteStringLit<&'static str>>());
+ //!
+ //!
+ //! let _ = litrs::Literal::from(give::<proc_macro::Literal>());
+ //! let _ = litrs::Literal::from(give::<&proc_macro::Literal>());
+ //!
+ //! let _ = litrs::Literal::try_from(give::<proc_macro::TokenTree>());
+ //! let _ = litrs::Literal::try_from(give::<&proc_macro::TokenTree>());
+ //!
+ //!
+ //! let _ = litrs::IntegerLit::try_from(give::<proc_macro::Literal>());
+ //! let _ = litrs::IntegerLit::try_from(give::<&proc_macro::Literal>());
+ //!
+ //! let _ = litrs::FloatLit::try_from(give::<proc_macro::Literal>());
+ //! let _ = litrs::FloatLit::try_from(give::<&proc_macro::Literal>());
+ //!
+ //! let _ = litrs::CharLit::try_from(give::<proc_macro::Literal>());
+ //! let _ = litrs::CharLit::try_from(give::<&proc_macro::Literal>());
+ //!
+ //! let _ = litrs::StringLit::try_from(give::<proc_macro::Literal>());
+ //! let _ = litrs::StringLit::try_from(give::<&proc_macro::Literal>());
+ //!
+ //! let _ = litrs::ByteLit::try_from(give::<proc_macro::Literal>());
+ //! let _ = litrs::ByteLit::try_from(give::<&proc_macro::Literal>());
+ //!
+ //! let _ = litrs::ByteStringLit::try_from(give::<proc_macro::Literal>());
+ //! let _ = litrs::ByteStringLit::try_from(give::<&proc_macro::Literal>());
+ //!
+ //!
+ //! let _ = litrs::BoolLit::try_from(give::<proc_macro::TokenTree>());
+ //! let _ = litrs::BoolLit::try_from(give::<&proc_macro::TokenTree>());
+ //!
+ //! let _ = litrs::IntegerLit::try_from(give::<proc_macro::TokenTree>());
+ //! let _ = litrs::IntegerLit::try_from(give::<&proc_macro::TokenTree>());
+ //!
+ //! let _ = litrs::FloatLit::try_from(give::<proc_macro::TokenTree>());
+ //! let _ = litrs::FloatLit::try_from(give::<&proc_macro::TokenTree>());
+ //!
+ //! let _ = litrs::CharLit::try_from(give::<proc_macro::TokenTree>());
+ //! let _ = litrs::CharLit::try_from(give::<&proc_macro::TokenTree>());
+ //!
+ //! let _ = litrs::StringLit::try_from(give::<proc_macro::TokenTree>());
+ //! let _ = litrs::StringLit::try_from(give::<&proc_macro::TokenTree>());
+ //!
+ //! let _ = litrs::ByteLit::try_from(give::<proc_macro::TokenTree>());
+ //! let _ = litrs::ByteLit::try_from(give::<&proc_macro::TokenTree>());
+ //!
+ //! let _ = litrs::ByteStringLit::try_from(give::<proc_macro::TokenTree>());
+ //! let _ = litrs::ByteStringLit::try_from(give::<&proc_macro::TokenTree>());
+ //! ```
+}
+
+#[cfg(feature = "proc-macro2")]
+mod tests_proc_macro2 {
+ //! # Tests
+ //!
+ //! ```no_run
+ //! extern crate proc_macro;
+ //!
+ //! use std::convert::TryFrom;
+ //! use litrs::Literal;
+ //!
+ //! fn give<T>() -> T {
+ //! panic!()
+ //! }
+ //!
+ //! let _ = litrs::Literal::from(give::<proc_macro2::Literal>());
+ //! let _ = litrs::Literal::from(give::<&proc_macro2::Literal>());
+ //!
+ //! let _ = litrs::Literal::try_from(give::<proc_macro2::TokenTree>());
+ //! let _ = litrs::Literal::try_from(give::<&proc_macro2::TokenTree>());
+ //!
+ //!
+ //! let _ = litrs::IntegerLit::try_from(give::<proc_macro2::Literal>());
+ //! let _ = litrs::IntegerLit::try_from(give::<&proc_macro2::Literal>());
+ //!
+ //! let _ = litrs::FloatLit::try_from(give::<proc_macro2::Literal>());
+ //! let _ = litrs::FloatLit::try_from(give::<&proc_macro2::Literal>());
+ //!
+ //! let _ = litrs::CharLit::try_from(give::<proc_macro2::Literal>());
+ //! let _ = litrs::CharLit::try_from(give::<&proc_macro2::Literal>());
+ //!
+ //! let _ = litrs::StringLit::try_from(give::<proc_macro2::Literal>());
+ //! let _ = litrs::StringLit::try_from(give::<&proc_macro2::Literal>());
+ //!
+ //! let _ = litrs::ByteLit::try_from(give::<proc_macro2::Literal>());
+ //! let _ = litrs::ByteLit::try_from(give::<&proc_macro2::Literal>());
+ //!
+ //! let _ = litrs::ByteStringLit::try_from(give::<proc_macro2::Literal>());
+ //! let _ = litrs::ByteStringLit::try_from(give::<&proc_macro2::Literal>());
+ //!
+ //!
+ //! let _ = litrs::BoolLit::try_from(give::<proc_macro2::TokenTree>());
+ //! let _ = litrs::BoolLit::try_from(give::<&proc_macro2::TokenTree>());
+ //!
+ //! let _ = litrs::IntegerLit::try_from(give::<proc_macro2::TokenTree>());
+ //! let _ = litrs::IntegerLit::try_from(give::<&proc_macro2::TokenTree>());
+ //!
+ //! let _ = litrs::FloatLit::try_from(give::<proc_macro2::TokenTree>());
+ //! let _ = litrs::FloatLit::try_from(give::<&proc_macro2::TokenTree>());
+ //!
+ //! let _ = litrs::CharLit::try_from(give::<proc_macro2::TokenTree>());
+ //! let _ = litrs::CharLit::try_from(give::<&proc_macro2::TokenTree>());
+ //!
+ //! let _ = litrs::StringLit::try_from(give::<proc_macro2::TokenTree>());
+ //! let _ = litrs::StringLit::try_from(give::<&proc_macro2::TokenTree>());
+ //!
+ //! let _ = litrs::ByteLit::try_from(give::<proc_macro2::TokenTree>());
+ //! let _ = litrs::ByteLit::try_from(give::<&proc_macro2::TokenTree>());
+ //!
+ //! let _ = litrs::ByteStringLit::try_from(give::<proc_macro2::TokenTree>());
+ //! let _ = litrs::ByteStringLit::try_from(give::<&proc_macro2::TokenTree>());
+ //! ```
+}
diff --git a/third_party/rust/litrs/src/integer/mod.rs b/third_party/rust/litrs/src/integer/mod.rs
new file mode 100644
index 0000000000..cecd79d3fb
--- /dev/null
+++ b/third_party/rust/litrs/src/integer/mod.rs
@@ -0,0 +1,356 @@
+use std::{fmt, str::FromStr};
+
+use crate::{
+ Buffer, ParseError,
+ err::{perr, ParseErrorKind::*},
+ parse::{first_byte_or_empty, hex_digit_value, check_suffix},
+};
+
+
+/// An integer literal, e.g. `27`, `0x7F`, `0b101010u8` or `5_000_000i64`.
+///
+/// An integer literal consists of an optional base prefix (`0b`, `0o`, `0x`),
+/// the main part (digits and underscores), and an optional type suffix
+/// (e.g. `u64` or `i8`). See [the reference][ref] for more information.
+///
+/// Note that integer literals are always positive: the grammar does not contain
+/// the minus sign at all. The minus sign is just the unary negate operator,
+/// not part of the literal. Which is interesting for cases like `- 128i8`:
+/// here, the literal itself would overflow the specified type (`i8` cannot
+/// represent 128). That's why in rustc, the literal overflow check is
+/// performed as a lint after parsing, not during the lexing stage. Similarly,
+/// [`IntegerLit::parse`] does not perform an overflow check.
+///
+/// [ref]: https://doc.rust-lang.org/reference/tokens.html#integer-literals
+#[derive(Debug, Clone, Copy, PartialEq, Eq)]
+#[non_exhaustive]
+pub struct IntegerLit<B: Buffer> {
+ /// The raw literal. Grammar: `<prefix?><main part><suffix?>`.
+ raw: B,
+ /// First index of the main number part (after the base prefix).
+ start_main_part: usize,
+ /// First index not part of the main number part.
+ end_main_part: usize,
+ /// Parsed `raw[..start_main_part]`.
+ base: IntegerBase,
+}
+
+impl<B: Buffer> IntegerLit<B> {
+ /// Parses the input as an integer literal. Returns an error if the input is
+ /// invalid or represents a different kind of literal.
+ pub fn parse(input: B) -> Result<Self, ParseError> {
+ match first_byte_or_empty(&input)? {
+ digit @ b'0'..=b'9' => {
+ // TODO: simplify once RFC 2528 is stabilized
+ let IntegerLit {
+ start_main_part,
+ end_main_part,
+ base,
+ ..
+ } = parse_impl(&input, digit)?;
+
+ Ok(Self { raw: input, start_main_part, end_main_part, base })
+ },
+ _ => Err(perr(0, DoesNotStartWithDigit)),
+ }
+ }
+
+ /// Performs the actual string to int conversion to obtain the integer
+ /// value. The optional type suffix of the literal **is ignored by this
+ /// method**. This means `N` does not need to match the type suffix!
+ ///
+ /// Returns `None` if the literal overflows `N`.
+ ///
+ /// Hint: `u128` can represent all possible values integer literal values,
+ /// as there are no negative literals (see type docs). Thus you can, for
+ /// example, safely use `lit.value::<u128>().to_string()` to get a decimal
+ /// string. (Technically, Rust integer literals can represent arbitrarily
+ /// large numbers, but those would be rejected at a later stage by the Rust
+ /// compiler).
+ pub fn value<N: FromIntegerLiteral>(&self) -> Option<N> {
+ let base = N::from_small_number(self.base.value());
+
+ let mut acc = N::from_small_number(0);
+ for digit in self.raw_main_part().bytes() {
+ if digit == b'_' {
+ continue;
+ }
+
+ // We don't actually need the base here: we already know this main
+ // part only contains digits valid for the specified base.
+ let digit = hex_digit_value(digit)
+ .unwrap_or_else(|| unreachable!("bug: integer main part contains non-digit"));
+
+ acc = acc.checked_mul(base)?;
+ acc = acc.checked_add(N::from_small_number(digit))?;
+ }
+
+ Some(acc)
+ }
+
+ /// The base of this integer literal.
+ pub fn base(&self) -> IntegerBase {
+ self.base
+ }
+
+ /// The main part containing the digits and potentially `_`. Do not try to
+ /// parse this directly as that would ignore the base!
+ pub fn raw_main_part(&self) -> &str {
+ &(*self.raw)[self.start_main_part..self.end_main_part]
+ }
+
+ /// The optional suffix. Returns `""` if the suffix is empty/does not exist.
+ ///
+ /// If you want the type, try `IntegerType::from_suffix(lit.suffix())`.
+ pub fn suffix(&self) -> &str {
+ &(*self.raw)[self.end_main_part..]
+ }
+
+ /// Returns the raw input that was passed to `parse`.
+ pub fn raw_input(&self) -> &str {
+ &self.raw
+ }
+
+ /// Returns the raw input that was passed to `parse`, potentially owned.
+ pub fn into_raw_input(self) -> B {
+ self.raw
+ }
+}
+
+impl IntegerLit<&str> {
+ /// Makes a copy of the underlying buffer and returns the owned version of
+ /// `Self`.
+ pub fn to_owned(&self) -> IntegerLit<String> {
+ IntegerLit {
+ raw: self.raw.to_owned(),
+ start_main_part: self.start_main_part,
+ end_main_part: self.end_main_part,
+ base: self.base,
+ }
+ }
+}
+
+impl<B: Buffer> fmt::Display for IntegerLit<B> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(f, "{}", &*self.raw)
+ }
+}
+
+/// Integer literal types. *Implementation detail*.
+///
+/// Implemented for all integer literal types. This trait is sealed and cannot
+/// be implemented outside of this crate. The trait's methods are implementation
+/// detail of this library and are not subject to semver.
+pub trait FromIntegerLiteral: self::sealed::Sealed + Copy {
+ /// Creates itself from the given number. `n` is guaranteed to be `<= 16`.
+ #[doc(hidden)]
+ fn from_small_number(n: u8) -> Self;
+
+ #[doc(hidden)]
+ fn checked_add(self, rhs: Self) -> Option<Self>;
+
+ #[doc(hidden)]
+ fn checked_mul(self, rhs: Self) -> Option<Self>;
+
+ #[doc(hidden)]
+ fn ty() -> IntegerType;
+}
+
+macro_rules! impl_from_int_literal {
+ ($( $ty:ty => $variant:ident ,)* ) => {
+ $(
+ impl self::sealed::Sealed for $ty {}
+ impl FromIntegerLiteral for $ty {
+ fn from_small_number(n: u8) -> Self {
+ n as Self
+ }
+ fn checked_add(self, rhs: Self) -> Option<Self> {
+ self.checked_add(rhs)
+ }
+ fn checked_mul(self, rhs: Self) -> Option<Self> {
+ self.checked_mul(rhs)
+ }
+ fn ty() -> IntegerType {
+ IntegerType::$variant
+ }
+ }
+ )*
+ };
+}
+
+impl_from_int_literal!(
+ u8 => U8, u16 => U16, u32 => U32, u64 => U64, u128 => U128, usize => Usize,
+ i8 => I8, i16 => I16, i32 => I32, i64 => I64, i128 => I128, isize => Isize,
+);
+
+mod sealed {
+ pub trait Sealed {}
+}
+
+/// Precondition: first byte of string has to be in `b'0'..=b'9'`.
+#[inline(never)]
+pub(crate) fn parse_impl(input: &str, first: u8) -> Result<IntegerLit<&str>, ParseError> {
+ // Figure out base and strip prefix base, if it exists.
+ let (end_prefix, base) = match (first, input.as_bytes().get(1)) {
+ (b'0', Some(b'b')) => (2, IntegerBase::Binary),
+ (b'0', Some(b'o')) => (2, IntegerBase::Octal),
+ (b'0', Some(b'x')) => (2, IntegerBase::Hexadecimal),
+
+ // Everything else is treated as decimal. Several cases are caught
+ // by this:
+ // - "123"
+ // - "0"
+ // - "0u8"
+ // - "0r" -> this will error later
+ _ => (0, IntegerBase::Decimal),
+ };
+ let without_prefix = &input[end_prefix..];
+
+
+ // Scan input to find the first character that's not a valid digit.
+ let is_valid_digit = match base {
+ IntegerBase::Binary => |b| matches!(b, b'0' | b'1' | b'_'),
+ IntegerBase::Octal => |b| matches!(b, b'0'..=b'7' | b'_'),
+ IntegerBase::Decimal => |b| matches!(b, b'0'..=b'9' | b'_'),
+ IntegerBase::Hexadecimal => |b| matches!(b, b'0'..=b'9' | b'a'..=b'f' | b'A'..=b'F' | b'_'),
+ };
+ let end_main = without_prefix.bytes()
+ .position(|b| !is_valid_digit(b))
+ .unwrap_or(without_prefix.len());
+ let (main_part, suffix) = without_prefix.split_at(end_main);
+
+ check_suffix(suffix).map_err(|kind| {
+ // This is just to have a nicer error kind for this special case. If the
+ // suffix is invalid, it is non-empty -> unwrap ok.
+ let first = suffix.as_bytes()[0];
+ if !is_valid_digit(first) && first.is_ascii_digit() {
+ perr(end_main + end_prefix, InvalidDigit)
+ } else {
+ perr(end_main + end_prefix..input.len(), kind)
+ }
+ })?;
+ if suffix.starts_with('e') || suffix.starts_with('E') {
+ return Err(perr(end_main, IntegerSuffixStartingWithE));
+ }
+
+ // Make sure main number part is not empty.
+ if main_part.bytes().filter(|&b| b != b'_').count() == 0 {
+ return Err(perr(end_prefix..end_prefix + end_main, NoDigits));
+ }
+
+ Ok(IntegerLit {
+ raw: input,
+ start_main_part: end_prefix,
+ end_main_part: end_main + end_prefix,
+ base,
+ })
+}
+
+
+/// The bases in which an integer can be specified.
+#[derive(Debug, Clone, Copy, PartialEq, Eq)]
+pub enum IntegerBase {
+ Binary,
+ Octal,
+ Decimal,
+ Hexadecimal,
+}
+
+impl IntegerBase {
+ /// Returns the literal prefix that indicates this base, i.e. `"0b"`,
+ /// `"0o"`, `""` and `"0x"`.
+ pub fn prefix(self) -> &'static str {
+ match self {
+ Self::Binary => "0b",
+ Self::Octal => "0o",
+ Self::Decimal => "",
+ Self::Hexadecimal => "0x",
+ }
+ }
+
+ /// Returns the base value, i.e. 2, 8, 10 or 16.
+ pub fn value(self) -> u8 {
+ match self {
+ Self::Binary => 2,
+ Self::Octal => 8,
+ Self::Decimal => 10,
+ Self::Hexadecimal => 16,
+ }
+ }
+}
+
+/// All possible integer type suffixes.
+#[derive(Debug, Clone, Copy, PartialEq, Eq)]
+#[non_exhaustive]
+pub enum IntegerType {
+ U8,
+ U16,
+ U32,
+ U64,
+ U128,
+ Usize,
+ I8,
+ I16,
+ I32,
+ I64,
+ I128,
+ Isize,
+}
+
+impl IntegerType {
+ /// Returns the type corresponding to the given suffix (e.g. `"u8"` is
+ /// mapped to `Self::U8`). If the suffix is not a valid integer type,
+ /// `None` is returned.
+ pub fn from_suffix(suffix: &str) -> Option<Self> {
+ match suffix {
+ "u8" => Some(Self::U8),
+ "u16" => Some(Self::U16),
+ "u32" => Some(Self::U32),
+ "u64" => Some(Self::U64),
+ "u128" => Some(Self::U128),
+ "usize" => Some(Self::Usize),
+ "i8" => Some(Self::I8),
+ "i16" => Some(Self::I16),
+ "i32" => Some(Self::I32),
+ "i64" => Some(Self::I64),
+ "i128" => Some(Self::I128),
+ "isize" => Some(Self::Isize),
+ _ => None,
+ }
+ }
+
+ /// Returns the suffix for this type, e.g. `"u8"` for `Self::U8`.
+ pub fn suffix(self) -> &'static str {
+ match self {
+ Self::U8 => "u8",
+ Self::U16 => "u16",
+ Self::U32 => "u32",
+ Self::U64 => "u64",
+ Self::U128 => "u128",
+ Self::Usize => "usize",
+ Self::I8 => "i8",
+ Self::I16 => "i16",
+ Self::I32 => "i32",
+ Self::I64 => "i64",
+ Self::I128 => "i128",
+ Self::Isize => "isize",
+ }
+ }
+}
+
+impl FromStr for IntegerType {
+ type Err = ();
+ fn from_str(s: &str) -> Result<Self, Self::Err> {
+ Self::from_suffix(s).ok_or(())
+ }
+}
+
+impl fmt::Display for IntegerType {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ self.suffix().fmt(f)
+ }
+}
+
+
+#[cfg(test)]
+mod tests;
diff --git a/third_party/rust/litrs/src/integer/tests.rs b/third_party/rust/litrs/src/integer/tests.rs
new file mode 100644
index 0000000000..e6dad3f031
--- /dev/null
+++ b/third_party/rust/litrs/src/integer/tests.rs
@@ -0,0 +1,357 @@
+use std::fmt::{Debug, Display};
+use crate::{
+ FromIntegerLiteral, Literal, IntegerLit, IntegerType as Ty, IntegerBase, IntegerBase::*,
+ test_util::{assert_parse_ok_eq, assert_roundtrip},
+};
+
+
+// ===== Utility functions =======================================================================
+
+#[track_caller]
+fn check<T: FromIntegerLiteral + PartialEq + Debug + Display>(
+ input: &str,
+ value: T,
+ base: IntegerBase,
+ main_part: &str,
+ type_suffix: Option<Ty>,
+) {
+ let expected_integer = IntegerLit {
+ raw: input,
+ start_main_part: base.prefix().len(),
+ end_main_part: base.prefix().len() + main_part.len(),
+ base,
+ };
+ assert_parse_ok_eq(
+ input, IntegerLit::parse(input), expected_integer.clone(), "IntegerLit::parse");
+ assert_parse_ok_eq(
+ input, Literal::parse(input), Literal::Integer(expected_integer), "Literal::parse");
+ assert_roundtrip(expected_integer.to_owned(), input);
+ assert_eq!(Ty::from_suffix(IntegerLit::parse(input).unwrap().suffix()), type_suffix);
+
+ let actual_value = IntegerLit::parse(input)
+ .unwrap()
+ .value::<T>()
+ .unwrap_or_else(|| panic!("unexpected overflow in `IntegerLit::value` for `{}`", input));
+ if actual_value != value {
+ panic!(
+ "Parsing int literal `{}` should give value `{}`, but actually resulted in `{}`",
+ input,
+ value,
+ actual_value,
+ );
+ }
+}
+
+
+// ===== Actual tests ===========================================================================
+
+#[test]
+fn parse_decimal() {
+ check("0", 0u128, Decimal, "0", None);
+ check("1", 1u8, Decimal, "1", None);
+ check("8", 8u16, Decimal, "8", None);
+ check("9", 9u32, Decimal, "9", None);
+ check("10", 10u64, Decimal, "10", None);
+ check("11", 11i8, Decimal, "11", None);
+ check("123456789", 123456789i128, Decimal, "123456789", None);
+
+ check("05", 5i16, Decimal, "05", None);
+ check("00005", 5i32, Decimal, "00005", None);
+ check("0123456789", 123456789i64, Decimal, "0123456789", None);
+
+ check("123_456_789", 123_456_789, Decimal, "123_456_789", None);
+ check("0___4", 4, Decimal, "0___4", None);
+ check("0___4_3", 43, Decimal, "0___4_3", None);
+ check("0___4_3", 43, Decimal, "0___4_3", None);
+ check("123___________", 123, Decimal, "123___________", None);
+
+ check(
+ "340282366920938463463374607431768211455",
+ 340282366920938463463374607431768211455u128,
+ Decimal,
+ "340282366920938463463374607431768211455",
+ None,
+ );
+ check(
+ "340_282_366_920_938_463_463_374_607_431_768_211_455",
+ 340282366920938463463374607431768211455u128,
+ Decimal,
+ "340_282_366_920_938_463_463_374_607_431_768_211_455",
+ None,
+ );
+ check(
+ "3_40_282_3669_20938_463463_3746074_31768211_455___",
+ 340282366920938463463374607431768211455u128,
+ Decimal,
+ "3_40_282_3669_20938_463463_3746074_31768211_455___",
+ None,
+ );
+}
+
+#[test]
+fn parse_binary() {
+ check("0b0", 0b0, Binary, "0", None);
+ check("0b000", 0b000, Binary, "000", None);
+ check("0b1", 0b1, Binary, "1", None);
+ check("0b01", 0b01, Binary, "01", None);
+ check("0b101010", 0b101010, Binary, "101010", None);
+ check("0b10_10_10", 0b10_10_10, Binary, "10_10_10", None);
+ check("0b01101110____", 0b01101110____, Binary, "01101110____", None);
+
+ check("0b10010u8", 0b10010u8, Binary, "10010", Some(Ty::U8));
+ check("0b10010i8", 0b10010u8, Binary, "10010", Some(Ty::I8));
+ check("0b10010u64", 0b10010u64, Binary, "10010", Some(Ty::U64));
+ check("0b10010i64", 0b10010i64, Binary, "10010", Some(Ty::I64));
+ check(
+ "0b1011001_00110000_00101000_10100101u32",
+ 0b1011001_00110000_00101000_10100101u32,
+ Binary,
+ "1011001_00110000_00101000_10100101",
+ Some(Ty::U32),
+ );
+}
+
+#[test]
+fn parse_octal() {
+ check("0o0", 0o0, Octal, "0", None);
+ check("0o1", 0o1, Octal, "1", None);
+ check("0o6", 0o6, Octal, "6", None);
+ check("0o7", 0o7, Octal, "7", None);
+ check("0o17", 0o17, Octal, "17", None);
+ check("0o123", 0o123, Octal, "123", None);
+ check("0o7654321", 0o7654321, Octal, "7654321", None);
+ check("0o7_53_1", 0o7_53_1, Octal, "7_53_1", None);
+ check("0o66_", 0o66_, Octal, "66_", None);
+
+ check("0o755u16", 0o755u16, Octal, "755", Some(Ty::U16));
+ check("0o755i128", 0o755i128, Octal, "755", Some(Ty::I128));
+}
+
+#[test]
+fn parse_hexadecimal() {
+ check("0x0", 0x0, Hexadecimal, "0", None);
+ check("0x1", 0x1, Hexadecimal, "1", None);
+ check("0x9", 0x9, Hexadecimal, "9", None);
+
+ check("0xa", 0xa, Hexadecimal, "a", None);
+ check("0xf", 0xf, Hexadecimal, "f", None);
+ check("0x17", 0x17, Hexadecimal, "17", None);
+ check("0x1b", 0x1b, Hexadecimal, "1b", None);
+ check("0x123", 0x123, Hexadecimal, "123", None);
+ check("0xace", 0xace, Hexadecimal, "ace", None);
+ check("0xfdb971", 0xfdb971, Hexadecimal, "fdb971", None);
+ check("0xa_54_f", 0xa_54_f, Hexadecimal, "a_54_f", None);
+ check("0x6d_", 0x6d_, Hexadecimal, "6d_", None);
+
+ check("0xA", 0xA, Hexadecimal, "A", None);
+ check("0xF", 0xF, Hexadecimal, "F", None);
+ check("0x17", 0x17, Hexadecimal, "17", None);
+ check("0x1B", 0x1B, Hexadecimal, "1B", None);
+ check("0x123", 0x123, Hexadecimal, "123", None);
+ check("0xACE", 0xACE, Hexadecimal, "ACE", None);
+ check("0xFDB971", 0xFDB971, Hexadecimal, "FDB971", None);
+ check("0xA_54_F", 0xA_54_F, Hexadecimal, "A_54_F", None);
+ check("0x6D_", 0x6D_, Hexadecimal, "6D_", None);
+
+ check("0xFdB97a1", 0xFdB97a1, Hexadecimal, "FdB97a1", None);
+ check("0xfdB97A1", 0xfdB97A1, Hexadecimal, "fdB97A1", None);
+
+ check("0x40u16", 0x40u16, Hexadecimal, "40", Some(Ty::U16));
+ check("0xffi128", 0xffi128, Hexadecimal, "ff", Some(Ty::I128));
+}
+
+#[test]
+fn starting_underscore() {
+ check("0b_1", 1, Binary, "_1", None);
+ check("0b_010i16", 0b_010, Binary, "_010", Some(Ty::I16));
+
+ check("0o_5", 5, Octal, "_5", None);
+ check("0o_750u128", 0o_750u128, Octal, "_750", Some(Ty::U128));
+
+ check("0x_c", 0xc, Hexadecimal, "_c", None);
+ check("0x_cf3i8", 0x_cf3, Hexadecimal, "_cf3", Some(Ty::I8));
+}
+
+#[test]
+fn parse_overflowing_just_fine() {
+ check("256u8", 256u16, Decimal, "256", Some(Ty::U8));
+ check("123_456_789u8", 123_456_789u32, Decimal, "123_456_789", Some(Ty::U8));
+ check("123_456_789u16", 123_456_789u32, Decimal, "123_456_789", Some(Ty::U16));
+
+ check("123_123_456_789u8", 123_123_456_789u64, Decimal, "123_123_456_789", Some(Ty::U8));
+ check("123_123_456_789u16", 123_123_456_789u64, Decimal, "123_123_456_789", Some(Ty::U16));
+ check("123_123_456_789u32", 123_123_456_789u64, Decimal, "123_123_456_789", Some(Ty::U32));
+}
+
+#[test]
+fn suffixes() {
+ [
+ ("123i8", Ty::I8),
+ ("123i16", Ty::I16),
+ ("123i32", Ty::I32),
+ ("123i64", Ty::I64),
+ ("123i128", Ty::I128),
+ ("123u8", Ty::U8),
+ ("123u16", Ty::U16),
+ ("123u32", Ty::U32),
+ ("123u64", Ty::U64),
+ ("123u128", Ty::U128),
+ ].iter().for_each(|&(s, ty)| {
+ assert_eq!(Ty::from_suffix(IntegerLit::parse(s).unwrap().suffix()), Some(ty));
+ });
+}
+
+#[test]
+fn overflow_u128() {
+ let inputs = [
+ "340282366920938463463374607431768211456",
+ "0x100000000000000000000000000000000",
+ "0o4000000000000000000000000000000000000000000",
+ "0b1000000000000000000000000000000000000000000000000000000000000000000\
+ 00000000000000000000000000000000000000000000000000000000000000",
+ "340282366920938463463374607431768211456u128",
+ "340282366920938463463374607431768211457",
+ "3_40_282_3669_20938_463463_3746074_31768211_456___",
+ "3_40_282_3669_20938_463463_3746074_31768211_455___1",
+ "3_40_282_3669_20938_463463_3746074_31768211_455___0u128",
+ "3402823669209384634633746074317682114570",
+ ];
+
+ for &input in &inputs {
+ let lit = IntegerLit::parse(input).expect("failed to parse");
+ assert!(lit.value::<u128>().is_none());
+ }
+}
+
+#[test]
+fn overflow_u8() {
+ let inputs = [
+ "256", "0x100", "0o400", "0b100000000",
+ "257", "0x101", "0o401", "0b100000001",
+ "300",
+ "1548",
+ "2548985",
+ "256u128",
+ "256u8",
+ "2_5_6",
+ "256_____1",
+ "256__",
+ ];
+
+ for &input in &inputs {
+ let lit = IntegerLit::parse(input).expect("failed to parse");
+ assert!(lit.value::<u8>().is_none());
+ }
+}
+
+#[test]
+fn parse_err() {
+ assert_err!(IntegerLit, "", Empty, None);
+ assert_err_single!(IntegerLit::parse("a"), DoesNotStartWithDigit, 0);
+ assert_err_single!(IntegerLit::parse(";"), DoesNotStartWithDigit, 0);
+ assert_err_single!(IntegerLit::parse("0;"), UnexpectedChar, 1..2);
+ assert_err!(IntegerLit, "0b", NoDigits, 2..2);
+ assert_err_single!(IntegerLit::parse(" 0"), DoesNotStartWithDigit, 0);
+ assert_err_single!(IntegerLit::parse("0 "), UnexpectedChar, 1);
+ assert_err!(IntegerLit, "0b3", InvalidDigit, 2);
+ assert_err_single!(IntegerLit::parse("_"), DoesNotStartWithDigit, 0);
+ assert_err_single!(IntegerLit::parse("_3"), DoesNotStartWithDigit, 0);
+ assert_err!(IntegerLit, "0x44.5", UnexpectedChar, 4..6);
+ assert_err_single!(IntegerLit::parse("123em"), IntegerSuffixStartingWithE, 3);
+}
+
+#[test]
+fn invalid_digits() {
+ assert_err!(IntegerLit, "0b10201", InvalidDigit, 4);
+ assert_err!(IntegerLit, "0b9", InvalidDigit, 2);
+ assert_err!(IntegerLit, "0b07", InvalidDigit, 3);
+
+ assert_err!(IntegerLit, "0o12380", InvalidDigit, 5);
+ assert_err!(IntegerLit, "0o192", InvalidDigit, 3);
+
+ assert_err_single!(IntegerLit::parse("a_123"), DoesNotStartWithDigit, 0);
+ assert_err_single!(IntegerLit::parse("B_123"), DoesNotStartWithDigit, 0);
+}
+
+#[test]
+fn no_valid_digits() {
+ assert_err!(IntegerLit, "0x_", NoDigits, 2..3);
+ assert_err!(IntegerLit, "0x__", NoDigits, 2..4);
+ assert_err!(IntegerLit, "0x________", NoDigits, 2..10);
+ assert_err!(IntegerLit, "0x_i8", NoDigits, 2..3);
+ assert_err!(IntegerLit, "0x_u8", NoDigits, 2..3);
+ assert_err!(IntegerLit, "0x_isize", NoDigits, 2..3);
+ assert_err!(IntegerLit, "0x_usize", NoDigits, 2..3);
+
+ assert_err!(IntegerLit, "0o_", NoDigits, 2..3);
+ assert_err!(IntegerLit, "0o__", NoDigits, 2..4);
+ assert_err!(IntegerLit, "0o________", NoDigits, 2..10);
+ assert_err!(IntegerLit, "0o_i32", NoDigits, 2..3);
+ assert_err!(IntegerLit, "0o_u32", NoDigits, 2..3);
+
+ assert_err!(IntegerLit, "0b_", NoDigits, 2..3);
+ assert_err!(IntegerLit, "0b__", NoDigits, 2..4);
+ assert_err!(IntegerLit, "0b________", NoDigits, 2..10);
+ assert_err!(IntegerLit, "0b_i128", NoDigits, 2..3);
+ assert_err!(IntegerLit, "0b_u128", NoDigits, 2..3);
+}
+
+#[test]
+fn non_standard_suffixes() {
+ #[track_caller]
+ fn check_suffix<T: FromIntegerLiteral + PartialEq + Debug + Display>(
+ input: &str,
+ value: T,
+ base: IntegerBase,
+ main_part: &str,
+ suffix: &str,
+ ) {
+ check(input, value, base, main_part, None);
+ assert_eq!(IntegerLit::parse(input).unwrap().suffix(), suffix);
+ }
+
+ check_suffix("5u7", 5, Decimal, "5", "u7");
+ check_suffix("5u7", 5, Decimal, "5", "u7");
+ check_suffix("5u9", 5, Decimal, "5", "u9");
+ check_suffix("5u0", 5, Decimal, "5", "u0");
+ check_suffix("33u12", 33, Decimal, "33", "u12");
+ check_suffix("84u17", 84, Decimal, "84", "u17");
+ check_suffix("99u80", 99, Decimal, "99", "u80");
+ check_suffix("1234uu16", 1234, Decimal, "1234", "uu16");
+
+ check_suffix("5i7", 5, Decimal, "5", "i7");
+ check_suffix("5i9", 5, Decimal, "5", "i9");
+ check_suffix("5i0", 5, Decimal, "5", "i0");
+ check_suffix("33i12", 33, Decimal, "33", "i12");
+ check_suffix("84i17", 84, Decimal, "84", "i17");
+ check_suffix("99i80", 99, Decimal, "99", "i80");
+ check_suffix("1234ii16", 1234, Decimal, "1234", "ii16");
+
+ check_suffix("0ui32", 0, Decimal, "0", "ui32");
+ check_suffix("1iu32", 1, Decimal, "1", "iu32");
+ check_suffix("54321a64", 54321, Decimal, "54321", "a64");
+ check_suffix("54321b64", 54321, Decimal, "54321", "b64");
+ check_suffix("54321x64", 54321, Decimal, "54321", "x64");
+ check_suffix("54321o64", 54321, Decimal, "54321", "o64");
+
+ check_suffix("0a", 0, Decimal, "0", "a");
+ check_suffix("0a3", 0, Decimal, "0", "a3");
+ check_suffix("0z", 0, Decimal, "0", "z");
+ check_suffix("0z3", 0, Decimal, "0", "z3");
+ check_suffix("0b0a", 0, Binary, "0", "a");
+ check_suffix("0b0A", 0, Binary, "0", "A");
+ check_suffix("0b01f", 1, Binary, "01", "f");
+ check_suffix("0b01F", 1, Binary, "01", "F");
+ check_suffix("0o7a_", 7, Octal, "7", "a_");
+ check_suffix("0o7A_", 7, Octal, "7", "A_");
+ check_suffix("0o72f_0", 0o72, Octal, "72", "f_0");
+ check_suffix("0o72F_0", 0o72, Octal, "72", "F_0");
+
+ check_suffix("0x8cg", 0x8c, Hexadecimal, "8c", "g");
+ check_suffix("0x8cG", 0x8c, Hexadecimal, "8c", "G");
+ check_suffix("0x8c1h_", 0x8c1, Hexadecimal, "8c1", "h_");
+ check_suffix("0x8c1H_", 0x8c1, Hexadecimal, "8c1", "H_");
+ check_suffix("0x8czu16", 0x8c, Hexadecimal, "8c", "zu16");
+
+ check_suffix("123_foo", 123, Decimal, "123_", "foo");
+}
diff --git a/third_party/rust/litrs/src/lib.rs b/third_party/rust/litrs/src/lib.rs
new file mode 100644
index 0000000000..64ed7813c9
--- /dev/null
+++ b/third_party/rust/litrs/src/lib.rs
@@ -0,0 +1,370 @@
+//! Parsing and inspecting Rust literal tokens.
+//!
+//! This library offers functionality to parse Rust literals, i.e. tokens in the
+//! Rust programming language that represent fixed values. The grammar for
+//! those is defined [here][ref].
+//!
+//! This kind of functionality already exists in the crate `syn`. However, as
+//! you oftentimes don't need (nor want) the full power of `syn`, `litrs` was
+//! built. This crate also offers a bit more flexibility compared to `syn`
+//! (only regarding literals, of course).
+//!
+//!
+//! # Quick start
+//!
+//! | **`StringLit::try_from(tt)?.value()`** |
+//! | - |
+//!
+//! ... where `tt` is a `proc_macro::TokenTree` and where [`StringLit`] can be
+//! replaced with [`Literal`] or other types of literals (e.g. [`FloatLit`]).
+//! Calling `value()` returns the value that is represented by the literal.
+//!
+//! **Mini Example**
+//!
+//! ```ignore
+//! use proc_macro::TokenStream;
+//!
+//! #[proc_macro]
+//! pub fn foo(input: TokenStream) -> TokenStream {
+//! let first_token = input.into_iter().next().unwrap(); // Do proper error handling!
+//! let string_value = match litrs::StringLit::try_from(first_token) {
+//! Ok(string_lit) => string_lit.value(),
+//! Err(e) => return e.to_compile_error(),
+//! };
+//!
+//! // `string_value` is the string value with all escapes resolved.
+//! todo!()
+//! }
+//! ```
+//!
+//! # Overview
+//!
+//! The main types of this library are [`Literal`], representing any kind of
+//! literal, and `*Lit`, like [`StringLit`] or [`FloatLit`], representing a
+//! specific kind of literal.
+//!
+//! There are different ways to obtain such a literal type:
+//!
+//! - **`parse`**: parses a `&str` or `String` and returns `Result<_,
+//! ParseError>`. For example: [`Literal::parse`] and
+//! [`IntegerLit::parse`].
+//!
+//! - **`From<proc_macro::Literal> for Literal`**: turns a `Literal` value from
+//! the `proc_macro` crate into a `Literal` from this crate.
+//!
+//! - **`TryFrom<proc_macro::Literal> for *Lit`**: tries to turn a
+//! `proc_macro::Literal` into a specific literal type of this crate. If
+//! the input is a literal of a different kind, `Err(InvalidToken)` is
+//! returned.
+//!
+//! - **`TryFrom<proc_macro::TokenTree>`**: attempts to turn a token tree into a
+//! literal type of this crate. An error is returned if the token tree is
+//! not a literal, or if you are trying to turn it into a specific kind of
+//! literal and the token tree is a different kind of literal.
+//!
+//! All of the `From` and `TryFrom` conversions also work for reference to
+//! `proc_macro` types. Additionally, if the crate feature `proc-macro2` is
+//! enabled (which it is by default), all these `From` and `TryFrom` impls also
+//! exist for the corresponding `proc_macro2` types.
+//!
+//! **Note**: `true` and `false` are `Ident`s when passed to your proc macro.
+//! The `TryFrom<TokenTree>` impls check for those two special idents and
+//! return a [`BoolLit`] appropriately. For that reason, there is also no
+//! `TryFrom<proc_macro::Literal>` impl for [`BoolLit`]. The `proc_macro::Literal`
+//! simply cannot represent bool literals.
+//!
+//!
+//! # Examples
+//!
+//! In a proc-macro:
+//!
+//! ```ignore
+//! use std::convert::TryFrom;
+//! use proc_macro::TokenStream;
+//! use litrs::FloatLit;
+//!
+//! #[proc_macro]
+//! pub fn foo(input: TokenStream) -> TokenStream {
+//! let mut input = input.into_iter().collect::<Vec<_>>();
+//! if input.len() != 1 {
+//! // Please do proper error handling in your real code!
+//! panic!("expected exactly one token as input");
+//! }
+//! let token = input.remove(0);
+//!
+//! match FloatLit::try_from(token) {
+//! Ok(float_lit) => { /* do something */ }
+//! Err(e) => return e.to_compile_error(),
+//! }
+//!
+//! // Dummy output
+//! TokenStream::new()
+//! }
+//! ```
+//!
+//! Parsing from string:
+//!
+//! ```
+//! use litrs::{FloatLit, Literal};
+//!
+//! // Parse a specific kind of literal (float in this case):
+//! let float_lit = FloatLit::parse("3.14f32");
+//! assert!(float_lit.is_ok());
+//! assert_eq!(float_lit.unwrap().suffix(), "f32");
+//! assert!(FloatLit::parse("'c'").is_err());
+//!
+//! // Parse any kind of literal. After parsing, you can inspect the literal
+//! // and decide what to do in each case.
+//! let lit = Literal::parse("0xff80").expect("failed to parse literal");
+//! match lit {
+//! Literal::Integer(lit) => { /* ... */ }
+//! Literal::Float(lit) => { /* ... */ }
+//! Literal::Bool(lit) => { /* ... */ }
+//! Literal::Char(lit) => { /* ... */ }
+//! Literal::String(lit) => { /* ... */ }
+//! Literal::Byte(lit) => { /* ... */ }
+//! Literal::ByteString(lit) => { /* ... */ }
+//! }
+//! ```
+//!
+//!
+//!
+//! # Crate features
+//!
+//! - `proc-macro2` (**default**): adds the dependency `proc_macro2`, a bunch of
+//! `From` and `TryFrom` impls, and [`InvalidToken::to_compile_error2`].
+//! - `check_suffix`: if enabled, `parse` functions will exactly verify that the
+//! literal suffix is valid. Adds the dependency `unicode-xid`. If disabled,
+//! only an approximate check (only in ASCII range) is done. If you are
+//! writing a proc macro, you don't need to enable this as the suffix is
+//! already checked by the compiler.
+//!
+//!
+//! [ref]: https://doc.rust-lang.org/reference/tokens.html#literals
+//!
+
+#![deny(missing_debug_implementations)]
+
+extern crate proc_macro;
+
+#[cfg(test)]
+#[macro_use]
+mod test_util;
+
+#[cfg(test)]
+mod tests;
+
+mod bool;
+mod byte;
+mod bytestr;
+mod char;
+mod err;
+mod escape;
+mod float;
+mod impls;
+mod integer;
+mod parse;
+mod string;
+
+
+use std::{borrow::{Borrow, Cow}, fmt, ops::{Deref, Range}};
+
+pub use self::{
+ bool::BoolLit,
+ byte::ByteLit,
+ bytestr::ByteStringLit,
+ char::CharLit,
+ err::{InvalidToken, ParseError},
+ float::{FloatLit, FloatType},
+ integer::{FromIntegerLiteral, IntegerLit, IntegerBase, IntegerType},
+ string::StringLit,
+};
+
+
+// ==============================================================================================
+// ===== `Literal` and type defs
+// ==============================================================================================
+
+/// A literal. This is the main type of this library.
+///
+/// This type is generic over the underlying buffer `B`, which can be `&str` or
+/// `String`.
+///
+/// To create this type, you have to either call [`Literal::parse`] with an
+/// input string or use the `From<_>` impls of this type. The impls are only
+/// available of the corresponding crate features are enabled (they are enabled
+/// by default).
+#[derive(Debug, Clone, PartialEq, Eq)]
+pub enum Literal<B: Buffer> {
+ Bool(BoolLit),
+ Integer(IntegerLit<B>),
+ Float(FloatLit<B>),
+ Char(CharLit<B>),
+ String(StringLit<B>),
+ Byte(ByteLit<B>),
+ ByteString(ByteStringLit<B>),
+}
+
+impl<B: Buffer> Literal<B> {
+ /// Parses the given input as a Rust literal.
+ pub fn parse(input: B) -> Result<Self, ParseError> {
+ parse::parse(input)
+ }
+
+ /// Returns the suffix of this literal or `""` if it doesn't have one.
+ ///
+ /// Rust token grammar actually allows suffixes for all kinds of tokens.
+ /// Most Rust programmer only know the type suffixes for integer and
+ /// floats, e.g. `0u32`. And in normal Rust code, everything else causes an
+ /// error. But it is possible to pass literals with arbitrary suffixes to
+ /// proc macros, for example:
+ ///
+ /// ```ignore
+ /// some_macro!(3.14f33 16px '🦊'good_boy "toph"beifong);
+ /// ```
+ ///
+ /// Boolean literals, not actually being literals, but idents, cannot have
+ /// suffixes and this method always returns `""` for those.
+ ///
+ /// There are some edge cases to be aware of:
+ /// - Integer suffixes must not start with `e` or `E` as that conflicts with
+ /// the exponent grammar for floats. `0e1` is a float; `0eel` is also
+ /// parsed as a float and results in an error.
+ /// - Hexadecimal integers eagerly parse digits, so `0x5abcdefgh` has a
+ /// suffix von `gh`.
+ /// - Suffixes can contain and start with `_`, but for integer and number
+ /// literals, `_` is eagerly parsed as part of the number, so `1_x` has
+ /// the suffix `x`.
+ /// - The input `55f32` is regarded as integer literal with suffix `f32`.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// use litrs::Literal;
+ ///
+ /// assert_eq!(Literal::parse(r##"3.14f33"##).unwrap().suffix(), "f33");
+ /// assert_eq!(Literal::parse(r##"123hackerman"##).unwrap().suffix(), "hackerman");
+ /// assert_eq!(Literal::parse(r##"0x0fuck"##).unwrap().suffix(), "uck");
+ /// assert_eq!(Literal::parse(r##"'🦊'good_boy"##).unwrap().suffix(), "good_boy");
+ /// assert_eq!(Literal::parse(r##""toph"beifong"##).unwrap().suffix(), "beifong");
+ /// ```
+ pub fn suffix(&self) -> &str {
+ match self {
+ Literal::Bool(_) => "",
+ Literal::Integer(l) => l.suffix(),
+ Literal::Float(l) => l.suffix(),
+ Literal::Char(l) => l.suffix(),
+ Literal::String(l) => l.suffix(),
+ Literal::Byte(l) => l.suffix(),
+ Literal::ByteString(l) => l.suffix(),
+ }
+ }
+}
+
+impl Literal<&str> {
+ /// Makes a copy of the underlying buffer and returns the owned version of
+ /// `Self`.
+ pub fn into_owned(self) -> Literal<String> {
+ match self {
+ Literal::Bool(l) => Literal::Bool(l.to_owned()),
+ Literal::Integer(l) => Literal::Integer(l.to_owned()),
+ Literal::Float(l) => Literal::Float(l.to_owned()),
+ Literal::Char(l) => Literal::Char(l.to_owned()),
+ Literal::String(l) => Literal::String(l.into_owned()),
+ Literal::Byte(l) => Literal::Byte(l.to_owned()),
+ Literal::ByteString(l) => Literal::ByteString(l.into_owned()),
+ }
+ }
+}
+
+impl<B: Buffer> fmt::Display for Literal<B> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ match self {
+ Literal::Bool(l) => l.fmt(f),
+ Literal::Integer(l) => l.fmt(f),
+ Literal::Float(l) => l.fmt(f),
+ Literal::Char(l) => l.fmt(f),
+ Literal::String(l) => l.fmt(f),
+ Literal::Byte(l) => l.fmt(f),
+ Literal::ByteString(l) => l.fmt(f),
+ }
+ }
+}
+
+
+// ==============================================================================================
+// ===== Buffer
+// ==============================================================================================
+
+/// A shared or owned string buffer. Implemented for `String` and `&str`. *Implementation detail*.
+///
+/// This is trait is implementation detail of this library, cannot be
+/// implemented in other crates and is not subject to semantic versioning.
+/// `litrs` only guarantees that this trait is implemented for `String` and
+/// `for<'a> &'a str`.
+pub trait Buffer: sealed::Sealed + Deref<Target = str> {
+ /// This is `Cow<'static, str>` for `String`, and `Cow<'a, str>` for `&'a str`.
+ type Cow: From<String> + AsRef<str> + Borrow<str> + Deref<Target = str>;
+
+ #[doc(hidden)]
+ fn into_cow(self) -> Self::Cow;
+
+ /// This is `Cow<'static, [u8]>` for `String`, and `Cow<'a, [u8]>` for `&'a str`.
+ type ByteCow: From<Vec<u8>> + AsRef<[u8]> + Borrow<[u8]> + Deref<Target = [u8]>;
+
+ #[doc(hidden)]
+ fn into_byte_cow(self) -> Self::ByteCow;
+
+ /// Cuts away some characters at the beginning and some at the end. Given
+ /// range has to be in bounds.
+ #[doc(hidden)]
+ fn cut(self, range: Range<usize>) -> Self;
+}
+
+mod sealed {
+ pub trait Sealed {}
+}
+
+impl<'a> sealed::Sealed for &'a str {}
+impl<'a> Buffer for &'a str {
+ #[doc(hidden)]
+ fn cut(self, range: Range<usize>) -> Self {
+ &self[range]
+ }
+
+ type Cow = Cow<'a, str>;
+ #[doc(hidden)]
+ fn into_cow(self) -> Self::Cow {
+ self.into()
+ }
+ type ByteCow = Cow<'a, [u8]>;
+ #[doc(hidden)]
+ fn into_byte_cow(self) -> Self::ByteCow {
+ self.as_bytes().into()
+ }
+}
+
+impl sealed::Sealed for String {}
+impl Buffer for String {
+ #[doc(hidden)]
+ fn cut(mut self, range: Range<usize>) -> Self {
+ // This is not the most efficient way, but it works. First we cut the
+ // end, then the beginning. Note that `drain` also removes the range if
+ // the iterator is not consumed.
+ self.truncate(range.end);
+ self.drain(..range.start);
+ self
+ }
+
+ type Cow = Cow<'static, str>;
+ #[doc(hidden)]
+ fn into_cow(self) -> Self::Cow {
+ self.into()
+ }
+
+ type ByteCow = Cow<'static, [u8]>;
+ #[doc(hidden)]
+ fn into_byte_cow(self) -> Self::ByteCow {
+ self.into_bytes().into()
+ }
+}
diff --git a/third_party/rust/litrs/src/parse.rs b/third_party/rust/litrs/src/parse.rs
new file mode 100644
index 0000000000..efc6b870f6
--- /dev/null
+++ b/third_party/rust/litrs/src/parse.rs
@@ -0,0 +1,125 @@
+use crate::{
+ BoolLit,
+ Buffer,
+ ByteLit,
+ ByteStringLit,
+ CharLit,
+ ParseError,
+ FloatLit,
+ IntegerLit,
+ Literal,
+ StringLit,
+ err::{perr, ParseErrorKind::{*, self}},
+};
+
+
+pub fn parse<B: Buffer>(input: B) -> Result<Literal<B>, ParseError> {
+ let (first, rest) = input.as_bytes().split_first().ok_or(perr(None, Empty))?;
+ let second = input.as_bytes().get(1).copied();
+
+ match first {
+ b'f' if &*input == "false" => Ok(Literal::Bool(BoolLit::False)),
+ b't' if &*input == "true" => Ok(Literal::Bool(BoolLit::True)),
+
+ // A number literal (integer or float).
+ b'0'..=b'9' => {
+ // To figure out whether this is a float or integer, we do some
+ // quick inspection here. Yes, this is technically duplicate
+ // work with what is happening in the integer/float parse
+ // methods, but it makes the code way easier for now and won't
+ // be a huge performance loss.
+ //
+ // The first non-decimal char in a float literal must
+ // be '.', 'e' or 'E'.
+ match input.as_bytes().get(1 + end_dec_digits(rest)) {
+ Some(b'.') | Some(b'e') | Some(b'E')
+ => FloatLit::parse(input).map(Literal::Float),
+
+ _ => IntegerLit::parse(input).map(Literal::Integer),
+ }
+ },
+
+ b'\'' => CharLit::parse(input).map(Literal::Char),
+ b'"' | b'r' => StringLit::parse(input).map(Literal::String),
+
+ b'b' if second == Some(b'\'') => ByteLit::parse(input).map(Literal::Byte),
+ b'b' if second == Some(b'r') || second == Some(b'"')
+ => ByteStringLit::parse(input).map(Literal::ByteString),
+
+ _ => Err(perr(None, InvalidLiteral)),
+ }
+}
+
+
+pub(crate) fn first_byte_or_empty(s: &str) -> Result<u8, ParseError> {
+ s.as_bytes().get(0).copied().ok_or(perr(None, Empty))
+}
+
+/// Returns the index of the first non-underscore, non-decimal digit in `input`,
+/// or the `input.len()` if all characters are decimal digits.
+pub(crate) fn end_dec_digits(input: &[u8]) -> usize {
+ input.iter()
+ .position(|b| !matches!(b, b'_' | b'0'..=b'9'))
+ .unwrap_or(input.len())
+}
+
+pub(crate) fn hex_digit_value(digit: u8) -> Option<u8> {
+ match digit {
+ b'0'..=b'9' => Some(digit - b'0'),
+ b'a'..=b'f' => Some(digit - b'a' + 10),
+ b'A'..=b'F' => Some(digit - b'A' + 10),
+ _ => None,
+ }
+}
+
+/// Makes sure that `s` is a valid literal suffix.
+pub(crate) fn check_suffix(s: &str) -> Result<(), ParseErrorKind> {
+ if s.is_empty() {
+ return Ok(());
+ }
+
+ let mut chars = s.chars();
+ let first = chars.next().unwrap();
+ let rest = chars.as_str();
+ if first == '_' && rest.is_empty() {
+ return Err(InvalidSuffix);
+ }
+
+ // This is just an extra check to improve the error message. If the first
+ // character of the "suffix" is already some invalid ASCII
+ // char, "unexpected character" seems like the more fitting error.
+ if first.is_ascii() && !(first.is_ascii_alphabetic() || first == '_') {
+ return Err(UnexpectedChar);
+ }
+
+ // Proper check is optional as it's not really necessary in proc macro
+ // context.
+ #[cfg(feature = "check_suffix")]
+ fn is_valid_suffix(first: char, rest: &str) -> bool {
+ use unicode_xid::UnicodeXID;
+
+ (first == '_' || first.is_xid_start())
+ && rest.chars().all(|c| c.is_xid_continue())
+ }
+
+ // When avoiding the dependency on `unicode_xid`, we just do a best effort
+ // to catch the most common errors.
+ #[cfg(not(feature = "check_suffix"))]
+ fn is_valid_suffix(first: char, rest: &str) -> bool {
+ if first.is_ascii() && !(first.is_ascii_alphabetic() || first == '_') {
+ return false;
+ }
+ for c in rest.chars() {
+ if c.is_ascii() && !(c.is_ascii_alphanumeric() || c == '_') {
+ return false;
+ }
+ }
+ true
+ }
+
+ if is_valid_suffix(first, rest) {
+ Ok(())
+ } else {
+ Err(InvalidSuffix)
+ }
+}
diff --git a/third_party/rust/litrs/src/string/mod.rs b/third_party/rust/litrs/src/string/mod.rs
new file mode 100644
index 0000000000..d2034a62a9
--- /dev/null
+++ b/third_party/rust/litrs/src/string/mod.rs
@@ -0,0 +1,125 @@
+use std::{fmt, ops::Range};
+
+use crate::{
+ Buffer, ParseError,
+ err::{perr, ParseErrorKind::*},
+ escape::{scan_raw_string, unescape_string},
+ parse::first_byte_or_empty,
+};
+
+
+/// A string or raw string literal, e.g. `"foo"`, `"Grüße"` or `r#"a🦊c"d🦀f"#`.
+///
+/// See [the reference][ref] for more information.
+///
+/// [ref]: https://doc.rust-lang.org/reference/tokens.html#string-literals
+#[derive(Debug, Clone, PartialEq, Eq)]
+pub struct StringLit<B: Buffer> {
+ /// The raw input.
+ raw: B,
+
+ /// The string value (with all escapes unescaped), or `None` if there were
+ /// no escapes. In the latter case, the string value is in `raw`.
+ value: Option<String>,
+
+ /// The number of hash signs in case of a raw string literal, or `None` if
+ /// it's not a raw string literal.
+ num_hashes: Option<u32>,
+
+ /// Start index of the suffix or `raw.len()` if there is no suffix.
+ start_suffix: usize,
+}
+
+impl<B: Buffer> StringLit<B> {
+ /// Parses the input as a (raw) string literal. Returns an error if the
+ /// input is invalid or represents a different kind of literal.
+ pub fn parse(input: B) -> Result<Self, ParseError> {
+ match first_byte_or_empty(&input)? {
+ b'r' | b'"' => {
+ let (value, num_hashes, start_suffix) = parse_impl(&input)?;
+ Ok(Self { raw: input, value, num_hashes, start_suffix })
+ }
+ _ => Err(perr(0, InvalidStringLiteralStart)),
+ }
+ }
+
+ /// Returns the string value this literal represents (where all escapes have
+ /// been turned into their respective values).
+ pub fn value(&self) -> &str {
+ self.value.as_deref().unwrap_or(&self.raw[self.inner_range()])
+ }
+
+ /// Like `value` but returns a potentially owned version of the value.
+ ///
+ /// The return value is either `Cow<'static, str>` if `B = String`, or
+ /// `Cow<'a, str>` if `B = &'a str`.
+ pub fn into_value(self) -> B::Cow {
+ let inner_range = self.inner_range();
+ let Self { raw, value, .. } = self;
+ value.map(B::Cow::from).unwrap_or_else(|| raw.cut(inner_range).into_cow())
+ }
+
+ /// The optional suffix. Returns `""` if the suffix is empty/does not exist.
+ pub fn suffix(&self) -> &str {
+ &(*self.raw)[self.start_suffix..]
+ }
+
+ /// Returns whether this literal is a raw string literal (starting with
+ /// `r`).
+ pub fn is_raw_string(&self) -> bool {
+ self.num_hashes.is_some()
+ }
+
+ /// Returns the raw input that was passed to `parse`.
+ pub fn raw_input(&self) -> &str {
+ &self.raw
+ }
+
+ /// Returns the raw input that was passed to `parse`, potentially owned.
+ pub fn into_raw_input(self) -> B {
+ self.raw
+ }
+
+ /// The range within `self.raw` that excludes the quotes and potential `r#`.
+ fn inner_range(&self) -> Range<usize> {
+ match self.num_hashes {
+ None => 1..self.start_suffix - 1,
+ Some(n) => 1 + n as usize + 1..self.start_suffix - n as usize - 1,
+ }
+ }
+}
+
+impl StringLit<&str> {
+ /// Makes a copy of the underlying buffer and returns the owned version of
+ /// `Self`.
+ pub fn into_owned(self) -> StringLit<String> {
+ StringLit {
+ raw: self.raw.to_owned(),
+ value: self.value,
+ num_hashes: self.num_hashes,
+ start_suffix: self.start_suffix,
+ }
+ }
+}
+
+impl<B: Buffer> fmt::Display for StringLit<B> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.pad(&self.raw)
+ }
+}
+
+/// Precondition: input has to start with either `"` or `r`.
+#[inline(never)]
+pub(crate) fn parse_impl(input: &str) -> Result<(Option<String>, Option<u32>, usize), ParseError> {
+ if input.starts_with('r') {
+ scan_raw_string::<char>(&input, 1)
+ .map(|(v, hashes, start_suffix)| (v, Some(hashes), start_suffix))
+ } else {
+ unescape_string::<char>(&input, 1)
+ .map(|(v, start_suffix)| (v, None, start_suffix))
+ }
+}
+
+
+#[cfg(test)]
+mod tests;
diff --git a/third_party/rust/litrs/src/string/tests.rs b/third_party/rust/litrs/src/string/tests.rs
new file mode 100644
index 0000000000..1c0cb63061
--- /dev/null
+++ b/third_party/rust/litrs/src/string/tests.rs
@@ -0,0 +1,278 @@
+use crate::{Literal, StringLit, test_util::{assert_parse_ok_eq, assert_roundtrip}};
+
+// ===== Utility functions =======================================================================
+
+macro_rules! check {
+ ($lit:literal, $has_escapes:expr, $num_hashes:expr) => {
+ check!($lit, stringify!($lit), $has_escapes, $num_hashes, "")
+ };
+ ($lit:literal, $input:expr, $has_escapes:expr, $num_hashes:expr, $suffix:literal) => {
+ let input = $input;
+ let expected = StringLit {
+ raw: input,
+ value: if $has_escapes { Some($lit.to_string()) } else { None },
+ num_hashes: $num_hashes,
+ start_suffix: input.len() - $suffix.len(),
+ };
+
+ assert_parse_ok_eq(input, StringLit::parse(input), expected.clone(), "StringLit::parse");
+ assert_parse_ok_eq(
+ input, Literal::parse(input), Literal::String(expected.clone()), "Literal::parse");
+ let lit = StringLit::parse(input).unwrap();
+ assert_eq!(lit.value(), $lit);
+ assert_eq!(lit.suffix(), $suffix);
+ assert_eq!(lit.into_value(), $lit);
+ assert_roundtrip(expected.into_owned(), input);
+ };
+}
+
+
+// ===== Actual tests ============================================================================
+
+#[test]
+fn simple() {
+ check!("", false, None);
+ check!("a", false, None);
+ check!("peter", false, None);
+ check!("Sei gegrüßt, Bärthelt!", false, None);
+ check!("أنا لا أتحدث العربية", false, None);
+ check!("お前はもう死んでいる", false, None);
+ check!("Пушки - интересные музыкальные инструменты", false, None);
+ check!("lit 👌 😂 af", false, None);
+}
+
+#[test]
+fn special_whitespace() {
+ let strings = ["\n", "\t", "foo\tbar", "🦊\n"];
+
+ for &s in &strings {
+ let input = format!(r#""{}""#, s);
+ let input_raw = format!(r#"r"{}""#, s);
+ for (input, num_hashes) in vec![(input, None), (input_raw, Some(0))] {
+ let expected = StringLit {
+ raw: &*input,
+ value: None,
+ num_hashes,
+ start_suffix: input.len(),
+ };
+ assert_parse_ok_eq(
+ &input, StringLit::parse(&*input), expected.clone(), "StringLit::parse");
+ assert_parse_ok_eq(
+ &input, Literal::parse(&*input), Literal::String(expected), "Literal::parse");
+ assert_eq!(StringLit::parse(&*input).unwrap().value(), s);
+ assert_eq!(StringLit::parse(&*input).unwrap().into_value(), s);
+ }
+ }
+}
+
+#[test]
+fn simple_escapes() {
+ check!("a\nb", true, None);
+ check!("\nb", true, None);
+ check!("a\n", true, None);
+ check!("\n", true, None);
+
+ check!("\x60犬 \t 猫\r馬\n うさぎ \0ネズミ", true, None);
+ check!("నా \\పిల్లి లావుగా ఉంది", true, None);
+ check!("నా \\పిల్లి లావుగా 🐈\"ఉంది", true, None);
+ check!("\\నా\\ పిల్లి లావుగా\" ఉంది\"", true, None);
+ check!("\"నా \\🐈 పిల్లి లావుగా \" ఉంది\\", true, None);
+
+ check!("\x00", true, None);
+ check!(" \x01", true, None);
+ check!("\x0c 🦊", true, None);
+ check!(" 🦊\x0D ", true, None);
+ check!("\\x13", true, None);
+ check!("\"x30", true, None);
+}
+
+#[test]
+fn unicode_escapes() {
+ check!("\u{0}", true, None);
+ check!(" \u{00}", true, None);
+ check!("\u{b} ", true, None);
+ check!(" \u{B} ", true, None);
+ check!("\u{7e}", true, None);
+ check!("నక్క\u{E4}", true, None);
+ check!("\u{e4} నక్క", true, None);
+ check!(" \u{fc}నక్క ", true, None);
+ check!("\u{Fc}", true, None);
+ check!("\u{fC}🦊\nлиса", true, None);
+ check!("лиса\u{FC}", true, None);
+ check!("лиса\u{b10}నక్క🦊", true, None);
+ check!("\"నక్క\u{B10}", true, None);
+ check!("лиса\\\u{0b10}", true, None);
+ check!("ли🦊са\\\"\u{0b10}", true, None);
+ check!("నక్క\\\\u{0b10}", true, None);
+ check!("\u{2764}Füchsin", true, None);
+ check!("Füchse \u{1f602}", true, None);
+ check!("cd\u{1F602}ab", true, None);
+
+ check!("\u{0}🦊", true, None);
+ check!("лиса\u{0__}", true, None);
+ check!("\\🦊\u{3_b}", true, None);
+ check!("🦊\u{1_F_6_0_2}Füchsin", true, None);
+ check!("నక్క\\\u{1_F6_02_____}నక్క", true, None);
+}
+
+#[test]
+fn string_continue() {
+ check!("నక్క\
+ bar", true, None);
+ check!("foo\
+🦊", true, None);
+
+ check!("foo\
+
+ banana", true, None);
+
+ // Weird whitespace characters
+ let lit = StringLit::parse("\"foo\\\n\r\t\n \n\tbar\"").expect("failed to parse");
+ assert_eq!(lit.value(), "foobar");
+ let lit = StringLit::parse("\"foo\\\n\u{85}bar\"").expect("failed to parse");
+ assert_eq!(lit.value(), "foo\u{85}bar");
+ let lit = StringLit::parse("\"foo\\\n\u{a0}bar\"").expect("failed to parse");
+ assert_eq!(lit.value(), "foo\u{a0}bar");
+
+ // Raw strings do not handle "string continues"
+ check!(r"foo\
+ bar", false, Some(0));
+}
+
+#[test]
+fn crlf_newlines() {
+ let lit = StringLit::parse("\"foo\r\nbar\"").expect("failed to parse");
+ assert_eq!(lit.value(), "foo\nbar");
+
+ let lit = StringLit::parse("\"\r\nbar\"").expect("failed to parse");
+ assert_eq!(lit.value(), "\nbar");
+
+ let lit = StringLit::parse("\"лиса\r\n\"").expect("failed to parse");
+ assert_eq!(lit.value(), "лиса\n");
+
+ let lit = StringLit::parse("r\"foo\r\nbar\"").expect("failed to parse");
+ assert_eq!(lit.value(), "foo\nbar");
+
+ let lit = StringLit::parse("r#\"\r\nbar\"#").expect("failed to parse");
+ assert_eq!(lit.value(), "\nbar");
+
+ let lit = StringLit::parse("r##\"лиса\r\n\"##").expect("failed to parse");
+ assert_eq!(lit.value(), "лиса\n");
+}
+
+#[test]
+fn raw_string() {
+ check!(r"", false, Some(0));
+ check!(r"a", false, Some(0));
+ check!(r"peter", false, Some(0));
+ check!(r"Sei gegrüßt, Bärthelt!", false, Some(0));
+ check!(r"أنا لا أتحدث العربية", false, Some(0));
+ check!(r"お前はもう死んでいる", false, Some(0));
+ check!(r"Пушки - интересные музыкальные инструменты", false, Some(0));
+ check!(r"lit 👌 😂 af", false, Some(0));
+
+ check!(r#""#, false, Some(1));
+ check!(r#"a"#, false, Some(1));
+ check!(r##"peter"##, false, Some(2));
+ check!(r###"Sei gegrüßt, Bärthelt!"###, false, Some(3));
+ check!(r########"lit 👌 😂 af"########, false, Some(8));
+
+ check!(r#"foo " bar"#, false, Some(1));
+ check!(r##"foo " bar"##, false, Some(2));
+ check!(r#"foo """" '"'" bar"#, false, Some(1));
+ check!(r#""foo""#, false, Some(1));
+ check!(r###""foo'"###, false, Some(3));
+ check!(r#""x'#_#s'"#, false, Some(1));
+ check!(r"#", false, Some(0));
+ check!(r"foo#", false, Some(0));
+ check!(r"##bar", false, Some(0));
+ check!(r###""##foo"##bar'"###, false, Some(3));
+
+ check!(r"さび\n\t\r\0\\x60\u{123}フェリス", false, Some(0));
+ check!(r#"さび\n\t\r\0\\x60\u{123}フェリス"#, false, Some(1));
+}
+
+#[test]
+fn suffixes() {
+ check!("hello", r###""hello"suffix"###, false, None, "suffix");
+ check!(r"お前はもう死んでいる", r###"r"お前はもう死んでいる"_banana"###, false, Some(0), "_banana");
+ check!("fox", r#""fox"peter"#, false, None, "peter");
+ check!("🦊", r#""🦊"peter"#, false, None, "peter");
+ check!("నక్క\\\\u{0b10}", r###""నక్క\\\\u{0b10}"jü_rgen"###, true, None, "jü_rgen");
+}
+
+#[test]
+fn parse_err() {
+ assert_err!(StringLit, r#"""#, UnterminatedString, None);
+ assert_err!(StringLit, r#""犬"#, UnterminatedString, None);
+ assert_err!(StringLit, r#""Jürgen"#, UnterminatedString, None);
+ assert_err!(StringLit, r#""foo bar baz"#, UnterminatedString, None);
+
+ assert_err!(StringLit, r#""fox"peter""#, InvalidSuffix, 5);
+ assert_err!(StringLit, r###"r#"foo "# bar"#"###, UnexpectedChar, 9);
+
+ assert_err!(StringLit, "\"\r\"", IsolatedCr, 1);
+ assert_err!(StringLit, "\"fo\rx\"", IsolatedCr, 3);
+ assert_err!(StringLit, "r\"\r\"", IsolatedCr, 2);
+ assert_err!(StringLit, "r\"fo\rx\"", IsolatedCr, 4);
+
+ assert_err!(StringLit, r##"r####""##, UnterminatedRawString, None);
+ assert_err!(StringLit, r#####"r##"foo"#bar"#####, UnterminatedRawString, None);
+ assert_err!(StringLit, r##"r####"##, InvalidLiteral, None);
+ assert_err!(StringLit, r##"r####x"##, InvalidLiteral, None);
+}
+
+#[test]
+fn invald_ascii_escapes() {
+ assert_err!(StringLit, r#""\x80""#, NonAsciiXEscape, 1..5);
+ assert_err!(StringLit, r#""🦊\x81""#, NonAsciiXEscape, 5..9);
+ assert_err!(StringLit, r#"" \x8a""#, NonAsciiXEscape, 2..6);
+ assert_err!(StringLit, r#""\x8Ff""#, NonAsciiXEscape, 1..5);
+ assert_err!(StringLit, r#""\xa0 ""#, NonAsciiXEscape, 1..5);
+ assert_err!(StringLit, r#""నక్క\xB0""#, NonAsciiXEscape, 13..17);
+ assert_err!(StringLit, r#""\xc3నక్క""#, NonAsciiXEscape, 1..5);
+ assert_err!(StringLit, r#""\xDf🦊""#, NonAsciiXEscape, 1..5);
+ assert_err!(StringLit, r#""నక్క\xffనక్క""#, NonAsciiXEscape, 13..17);
+ assert_err!(StringLit, r#""\xfF ""#, NonAsciiXEscape, 1..5);
+ assert_err!(StringLit, r#"" \xFf""#, NonAsciiXEscape, 2..6);
+ assert_err!(StringLit, r#""నక్క \xFF""#, NonAsciiXEscape, 15..19);
+}
+
+#[test]
+fn invalid_escapes() {
+ assert_err!(StringLit, r#""\a""#, UnknownEscape, 1..3);
+ assert_err!(StringLit, r#""foo\y""#, UnknownEscape, 4..6);
+ assert_err!(StringLit, r#""\"#, UnterminatedEscape, 1);
+ assert_err!(StringLit, r#""\x""#, UnterminatedEscape, 1..3);
+ assert_err!(StringLit, r#""🦊\x1""#, UnterminatedEscape, 5..8);
+ assert_err!(StringLit, r#"" \xaj""#, InvalidXEscape, 2..6);
+ assert_err!(StringLit, r#""నక్క\xjb""#, InvalidXEscape, 13..17);
+}
+
+#[test]
+fn invalid_unicode_escapes() {
+ assert_err!(StringLit, r#""\u""#, UnicodeEscapeWithoutBrace, 1..3);
+ assert_err!(StringLit, r#""🦊\u ""#, UnicodeEscapeWithoutBrace, 5..7);
+ assert_err!(StringLit, r#""\u3""#, UnicodeEscapeWithoutBrace, 1..3);
+
+ assert_err!(StringLit, r#""\u{""#, UnterminatedUnicodeEscape, 1..4);
+ assert_err!(StringLit, r#""\u{12""#, UnterminatedUnicodeEscape, 1..6);
+ assert_err!(StringLit, r#""🦊\u{a0b""#, UnterminatedUnicodeEscape, 5..11);
+ assert_err!(StringLit, r#""\u{a0_b ""#, UnterminatedUnicodeEscape, 1..10);
+
+ assert_err!(StringLit, r#""\u{_}నక్క""#, InvalidStartOfUnicodeEscape, 4);
+ assert_err!(StringLit, r#""\u{_5f}""#, InvalidStartOfUnicodeEscape, 4);
+
+ assert_err!(StringLit, r#""fox\u{x}""#, NonHexDigitInUnicodeEscape, 7);
+ assert_err!(StringLit, r#""\u{0x}🦊""#, NonHexDigitInUnicodeEscape, 5);
+ assert_err!(StringLit, r#""నక్క\u{3bx}""#, NonHexDigitInUnicodeEscape, 18);
+ assert_err!(StringLit, r#""\u{3b_x}лиса""#, NonHexDigitInUnicodeEscape, 7);
+ assert_err!(StringLit, r#""\u{4x_}""#, NonHexDigitInUnicodeEscape, 5);
+
+ assert_err!(StringLit, r#""\u{1234567}""#, TooManyDigitInUnicodeEscape, 10);
+ assert_err!(StringLit, r#""నక్క\u{1234567}🦊""#, TooManyDigitInUnicodeEscape, 22);
+ assert_err!(StringLit, r#""నక్క\u{1_23_4_56_7}""#, TooManyDigitInUnicodeEscape, 26);
+ assert_err!(StringLit, r#""\u{abcdef123}лиса""#, TooManyDigitInUnicodeEscape, 10);
+
+ assert_err!(StringLit, r#""\u{110000}fox""#, InvalidUnicodeEscapeChar, 1..10);
+}
diff --git a/third_party/rust/litrs/src/test_util.rs b/third_party/rust/litrs/src/test_util.rs
new file mode 100644
index 0000000000..fd284e984e
--- /dev/null
+++ b/third_party/rust/litrs/src/test_util.rs
@@ -0,0 +1,128 @@
+use crate::*;
+use std::fmt::{Debug, Display};
+
+
+#[track_caller]
+pub(crate) fn assert_parse_ok_eq<T: PartialEq + Debug + Display>(
+ input: &str,
+ result: Result<T, ParseError>,
+ expected: T,
+ parse_method: &str,
+) {
+ match result {
+ Ok(actual) if actual == expected => {
+ if actual.to_string() != input {
+ panic!(
+ "formatting does not yield original input `{}`: {:?}",
+ input,
+ actual,
+ );
+ }
+ }
+ Ok(actual) => {
+ panic!(
+ "unexpected parsing result (with `{}`) for `{}`:\nactual: {:?}\nexpected: {:?}",
+ parse_method,
+ input,
+ actual,
+ expected,
+ );
+ }
+ Err(e) => {
+ panic!(
+ "expected `{}` to be parsed (with `{}`) successfully, but it failed: {:?}",
+ input,
+ parse_method,
+ e,
+ );
+ }
+ }
+}
+
+// This is not ideal, but to perform this check we need `proc-macro2`. So we
+// just don't do anything if that feature is not enabled.
+#[cfg(not(feature = "proc-macro2"))]
+pub(crate) fn assert_roundtrip<T>(_: T, _: &str) {}
+
+#[cfg(feature = "proc-macro2")]
+#[track_caller]
+pub(crate) fn assert_roundtrip<T>(ours: T, input: &str)
+where
+ T: std::convert::TryFrom<proc_macro2::Literal> + fmt::Debug + PartialEq + Clone,
+ proc_macro2::Literal: From<T>,
+ <T as std::convert::TryFrom<proc_macro2::Literal>>::Error: std::fmt::Display,
+{
+ let pm_lit = input.parse::<proc_macro2::Literal>()
+ .expect("failed to parse input as proc_macro2::Literal");
+ let t_name = std::any::type_name::<T>();
+
+ // Unfortunately, `proc_macro2::Literal` does not implement `PartialEq`, so
+ // this is the next best thing.
+ if proc_macro2::Literal::from(ours.clone()).to_string() != pm_lit.to_string() {
+ panic!(
+ "Converting {} to proc_macro2::Literal has unexpected result:\
+ \nconverted: {:?}\nexpected: {:?}",
+ t_name,
+ proc_macro2::Literal::from(ours),
+ pm_lit,
+ );
+ }
+
+ match T::try_from(pm_lit) {
+ Err(e) => {
+ panic!("Trying to convert proc_macro2::Literal to {} results in error: {}", t_name, e);
+ }
+ Ok(res) => {
+ if res != ours {
+ panic!(
+ "Converting proc_macro2::Literal to {} has unexpected result:\
+ \nactual: {:?}\nexpected: {:?}",
+ t_name,
+ res,
+ ours,
+ );
+ }
+ }
+ }
+}
+
+macro_rules! assert_err {
+ ($ty:ident, $input:literal, $kind:ident, $( $span:tt )+ ) => {
+ assert_err_single!($ty::parse($input), $kind, $($span)+);
+ assert_err_single!($crate::Literal::parse($input), $kind, $($span)+);
+ };
+}
+
+macro_rules! assert_err_single {
+ ($expr:expr, $kind:ident, $( $span:tt )+ ) => {
+ let res = $expr;
+ let err = match res {
+ Err(e) => e,
+ Ok(v) => panic!(
+ "Expected `{}` to return an error, but it returned Ok({:?})",
+ stringify!($expr),
+ v,
+ ),
+ };
+ if err.kind != $crate::err::ParseErrorKind::$kind {
+ panic!(
+ "Expected error kind {} for `{}` but got {:?}",
+ stringify!($kind),
+ stringify!($expr),
+ err.kind,
+ )
+ }
+ let expected_span = assert_err_single!(@span $($span)+);
+ if err.span != expected_span {
+ panic!(
+ "Expected error span {:?} for `{}` but got {:?}",
+ expected_span,
+ stringify!($expr),
+ err.span,
+ )
+ }
+ };
+ (@span $start:literal .. $end:literal) => { Some($start .. $end) };
+ (@span $at:literal) => { Some($at.. $at + 1) };
+ (@span None) => { None };
+}
diff --git a/third_party/rust/litrs/src/tests.rs b/third_party/rust/litrs/src/tests.rs
new file mode 100644
index 0000000000..613b429540
--- /dev/null
+++ b/third_party/rust/litrs/src/tests.rs
@@ -0,0 +1,349 @@
+use crate::Literal;
+
+
+#[test]
+fn empty() {
+ assert_err!(Literal, "", Empty, None);
+}
+
+#[test]
+fn invalid_literals() {
+ assert_err_single!(Literal::parse("."), InvalidLiteral, None);
+ assert_err_single!(Literal::parse("+"), InvalidLiteral, None);
+ assert_err_single!(Literal::parse("-"), InvalidLiteral, None);
+ assert_err_single!(Literal::parse("e"), InvalidLiteral, None);
+ assert_err_single!(Literal::parse("e8"), InvalidLiteral, None);
+ assert_err_single!(Literal::parse("f32"), InvalidLiteral, None);
+ assert_err_single!(Literal::parse("foo"), InvalidLiteral, None);
+ assert_err_single!(Literal::parse("inf"), InvalidLiteral, None);
+ assert_err_single!(Literal::parse("nan"), InvalidLiteral, None);
+ assert_err_single!(Literal::parse("NaN"), InvalidLiteral, None);
+ assert_err_single!(Literal::parse("NAN"), InvalidLiteral, None);
+ assert_err_single!(Literal::parse("_2.7"), InvalidLiteral, None);
+ assert_err_single!(Literal::parse(".5"), InvalidLiteral, None);
+}
+
+#[test]
+fn misc() {
+ assert_err_single!(Literal::parse("0x44.5"), UnexpectedChar, 4..6);
+ assert_err_single!(Literal::parse("a"), InvalidLiteral, None);
+ assert_err_single!(Literal::parse(";"), InvalidLiteral, None);
+ assert_err_single!(Literal::parse("0;"), UnexpectedChar, 1);
+ assert_err_single!(Literal::parse(" 0"), InvalidLiteral, None);
+ assert_err_single!(Literal::parse("0 "), UnexpectedChar, 1);
+ assert_err_single!(Literal::parse("_"), InvalidLiteral, None);
+ assert_err_single!(Literal::parse("_3"), InvalidLiteral, None);
+ assert_err_single!(Literal::parse("a_123"), InvalidLiteral, None);
+ assert_err_single!(Literal::parse("B_123"), InvalidLiteral, None);
+}
+
+macro_rules! assert_no_panic {
+ ($input:expr) => {
+ let arr = $input;
+ let input = std::str::from_utf8(&arr).expect("not unicode");
+ let res = std::panic::catch_unwind(move || {
+ let _ = Literal::parse(input);
+ let _ = crate::BoolLit::parse(input);
+ let _ = crate::IntegerLit::parse(input);
+ let _ = crate::FloatLit::parse(input);
+ let _ = crate::CharLit::parse(input);
+ let _ = crate::StringLit::parse(input);
+ let _ = crate::ByteLit::parse(input);
+ let _ = crate::ByteStringLit::parse(input);
+ });
+
+ if let Err(e) = res {
+ println!("\n!!! panic for: {:?}", input);
+ std::panic::resume_unwind(e);
+ }
+ };
+}
+
+#[test]
+#[ignore]
+fn never_panic_up_to_3() {
+ for a in 0..128 {
+ assert_no_panic!([a]);
+ for b in 0..128 {
+ assert_no_panic!([a, b]);
+ for c in 0..128 {
+ assert_no_panic!([a, b, c]);
+ }
+ }
+ }
+}
+
+// This test takes super long in debug mode, but in release mode it's fine.
+#[test]
+#[ignore]
+fn never_panic_len_4() {
+ for a in 0..128 {
+ for b in 0..128 {
+ for c in 0..128 {
+ for d in 0..128 {
+ assert_no_panic!([a, b, c, d]);
+ }
+ }
+ }
+ }
+}
+
+#[cfg(feature = "proc-macro2")]
+#[test]
+fn proc_macro() {
+ use std::convert::TryFrom;
+ use proc_macro2::{
+ self as pm2, TokenTree, Group, TokenStream, Delimiter, Spacing, Punct, Span, Ident,
+ };
+ use crate::{
+ BoolLit, ByteLit, ByteStringLit, CharLit, FloatLit, IntegerLit, StringLit, err::TokenKind
+ };
+
+
+ macro_rules! assert_invalid_token {
+ ($input:expr, expected: $expected:path, actual: $actual:path $(,)?) => {
+ let err = $input.unwrap_err();
+ if err.expected != $expected {
+ panic!(
+ "err.expected was expected to be {:?}, but is {:?}",
+ $expected,
+ err.expected,
+ );
+ }
+ if err.actual != $actual {
+ panic!("err.actual was expected to be {:?}, but is {:?}", $actual, err.actual);
+ }
+ };
+ }
+
+
+ let pm_u16_lit = pm2::Literal::u16_suffixed(2700);
+ let pm_i16_lit = pm2::Literal::i16_unsuffixed(3912);
+ let pm_f32_lit = pm2::Literal::f32_unsuffixed(3.14);
+ let pm_f64_lit = pm2::Literal::f64_suffixed(99.3);
+ let pm_string_lit = pm2::Literal::string("hello 🦊");
+ let pm_bytestr_lit = pm2::Literal::byte_string(b"hello \nfoxxo");
+ let pm_char_lit = pm2::Literal::character('🦀');
+
+ let u16_lit = Literal::parse("2700u16".to_string()).unwrap();
+ let i16_lit = Literal::parse("3912".to_string()).unwrap();
+ let f32_lit = Literal::parse("3.14".to_string()).unwrap();
+ let f64_lit = Literal::parse("99.3f64".to_string()).unwrap();
+ let string_lit = Literal::parse(r#""hello 🦊""#.to_string()).unwrap();
+ let bytestr_lit = Literal::parse(r#"b"hello \nfoxxo""#.to_string()).unwrap();
+ let char_lit = Literal::parse("'🦀'".to_string()).unwrap();
+
+ assert_eq!(Literal::from(&pm_u16_lit), u16_lit);
+ assert_eq!(Literal::from(&pm_i16_lit), i16_lit);
+ assert_eq!(Literal::from(&pm_f32_lit), f32_lit);
+ assert_eq!(Literal::from(&pm_f64_lit), f64_lit);
+ assert_eq!(Literal::from(&pm_string_lit), string_lit);
+ assert_eq!(Literal::from(&pm_bytestr_lit), bytestr_lit);
+ assert_eq!(Literal::from(&pm_char_lit), char_lit);
+
+
+ let group = TokenTree::from(Group::new(Delimiter::Brace, TokenStream::new()));
+ let punct = TokenTree::from(Punct::new(':', Spacing::Alone));
+ let ident = TokenTree::from(Ident::new("peter", Span::call_site()));
+
+ assert_eq!(
+ Literal::try_from(TokenTree::Literal(pm2::Literal::string("hello 🦊"))).unwrap(),
+ Literal::String(StringLit::parse(r#""hello 🦊""#.to_string()).unwrap()),
+ );
+ assert_invalid_token!(
+ Literal::try_from(punct.clone()),
+ expected: TokenKind::Literal,
+ actual: TokenKind::Punct,
+ );
+ assert_invalid_token!(
+ Literal::try_from(group.clone()),
+ expected: TokenKind::Literal,
+ actual: TokenKind::Group,
+ );
+ assert_invalid_token!(
+ Literal::try_from(ident.clone()),
+ expected: TokenKind::Literal,
+ actual: TokenKind::Ident,
+ );
+
+
+ assert_eq!(Literal::from(IntegerLit::try_from(pm_u16_lit.clone()).unwrap()), u16_lit);
+ assert_eq!(Literal::from(IntegerLit::try_from(pm_i16_lit.clone()).unwrap()), i16_lit);
+ assert_eq!(Literal::from(FloatLit::try_from(pm_f32_lit.clone()).unwrap()), f32_lit);
+ assert_eq!(Literal::from(FloatLit::try_from(pm_f64_lit.clone()).unwrap()), f64_lit);
+ assert_eq!(Literal::from(StringLit::try_from(pm_string_lit.clone()).unwrap()), string_lit);
+ assert_eq!(
+ Literal::from(ByteStringLit::try_from(pm_bytestr_lit.clone()).unwrap()),
+ bytestr_lit,
+ );
+ assert_eq!(Literal::from(CharLit::try_from(pm_char_lit.clone()).unwrap()), char_lit);
+
+ assert_invalid_token!(
+ StringLit::try_from(pm_u16_lit.clone()),
+ expected: TokenKind::StringLit,
+ actual: TokenKind::IntegerLit,
+ );
+ assert_invalid_token!(
+ StringLit::try_from(pm_f32_lit.clone()),
+ expected: TokenKind::StringLit,
+ actual: TokenKind::FloatLit,
+ );
+ assert_invalid_token!(
+ ByteLit::try_from(pm_bytestr_lit.clone()),
+ expected: TokenKind::ByteLit,
+ actual: TokenKind::ByteStringLit,
+ );
+ assert_invalid_token!(
+ ByteLit::try_from(pm_i16_lit.clone()),
+ expected: TokenKind::ByteLit,
+ actual: TokenKind::IntegerLit,
+ );
+ assert_invalid_token!(
+ IntegerLit::try_from(pm_string_lit.clone()),
+ expected: TokenKind::IntegerLit,
+ actual: TokenKind::StringLit,
+ );
+ assert_invalid_token!(
+ IntegerLit::try_from(pm_char_lit.clone()),
+ expected: TokenKind::IntegerLit,
+ actual: TokenKind::CharLit,
+ );
+
+
+ assert_eq!(
+ Literal::from(IntegerLit::try_from(TokenTree::from(pm_u16_lit.clone())).unwrap()),
+ u16_lit,
+ );
+ assert_eq!(
+ Literal::from(IntegerLit::try_from(TokenTree::from(pm_i16_lit.clone())).unwrap()),
+ i16_lit,
+ );
+ assert_eq!(
+ Literal::from(FloatLit::try_from(TokenTree::from(pm_f32_lit.clone())).unwrap()),
+ f32_lit,
+ );
+ assert_eq!(
+ Literal::from(FloatLit::try_from(TokenTree::from(pm_f64_lit.clone())).unwrap()),
+ f64_lit,
+ );
+ assert_eq!(
+ Literal::from(StringLit::try_from(TokenTree::from(pm_string_lit.clone())).unwrap()),
+ string_lit,
+ );
+ assert_eq!(
+ Literal::from(ByteStringLit::try_from(TokenTree::from(pm_bytestr_lit.clone())).unwrap()),
+ bytestr_lit,
+ );
+ assert_eq!(
+ Literal::from(CharLit::try_from(TokenTree::from(pm_char_lit.clone())).unwrap()),
+ char_lit,
+ );
+
+ assert_invalid_token!(
+ StringLit::try_from(TokenTree::from(pm_u16_lit.clone())),
+ expected: TokenKind::StringLit,
+ actual: TokenKind::IntegerLit,
+ );
+ assert_invalid_token!(
+ StringLit::try_from(TokenTree::from(pm_f32_lit.clone())),
+ expected: TokenKind::StringLit,
+ actual: TokenKind::FloatLit,
+ );
+ assert_invalid_token!(
+ BoolLit::try_from(TokenTree::from(pm_bytestr_lit.clone())),
+ expected: TokenKind::BoolLit,
+ actual: TokenKind::ByteStringLit,
+ );
+ assert_invalid_token!(
+ BoolLit::try_from(TokenTree::from(pm_i16_lit.clone())),
+ expected: TokenKind::BoolLit,
+ actual: TokenKind::IntegerLit,
+ );
+ assert_invalid_token!(
+ IntegerLit::try_from(TokenTree::from(pm_string_lit.clone())),
+ expected: TokenKind::IntegerLit,
+ actual: TokenKind::StringLit,
+ );
+ assert_invalid_token!(
+ IntegerLit::try_from(TokenTree::from(pm_char_lit.clone())),
+ expected: TokenKind::IntegerLit,
+ actual: TokenKind::CharLit,
+ );
+
+ assert_invalid_token!(
+ StringLit::try_from(TokenTree::from(group)),
+ expected: TokenKind::StringLit,
+ actual: TokenKind::Group,
+ );
+ assert_invalid_token!(
+ BoolLit::try_from(TokenTree::from(punct)),
+ expected: TokenKind::BoolLit,
+ actual: TokenKind::Punct,
+ );
+ assert_invalid_token!(
+ FloatLit::try_from(TokenTree::from(ident)),
+ expected: TokenKind::FloatLit,
+ actual: TokenKind::Ident,
+ );
+}
+
+#[cfg(feature = "proc-macro2")]
+#[test]
+fn bool_try_from_tt() {
+ use std::convert::TryFrom;
+ use proc_macro2::{Ident, Span, TokenTree};
+ use crate::BoolLit;
+
+
+ let ident = |s: &str| Ident::new(s, Span::call_site());
+
+ assert_eq!(BoolLit::try_from(TokenTree::Ident(ident("true"))).unwrap(), BoolLit::True);
+ assert_eq!(BoolLit::try_from(TokenTree::Ident(ident("false"))).unwrap(), BoolLit::False);
+
+ assert!(BoolLit::try_from(TokenTree::Ident(ident("falsex"))).is_err());
+ assert!(BoolLit::try_from(TokenTree::Ident(ident("_false"))).is_err());
+ assert!(BoolLit::try_from(TokenTree::Ident(ident("False"))).is_err());
+ assert!(BoolLit::try_from(TokenTree::Ident(ident("True"))).is_err());
+ assert!(BoolLit::try_from(TokenTree::Ident(ident("ltrue"))).is_err());
+
+
+ assert_eq!(
+ Literal::try_from(TokenTree::Ident(ident("true"))).unwrap(),
+ Literal::Bool(BoolLit::True),
+ );
+ assert_eq!(
+ Literal::try_from(TokenTree::Ident(ident("false"))).unwrap(),
+ Literal::Bool(BoolLit::False),
+ );
+
+ assert!(Literal::try_from(TokenTree::Ident(ident("falsex"))).is_err());
+ assert!(Literal::try_from(TokenTree::Ident(ident("_false"))).is_err());
+ assert!(Literal::try_from(TokenTree::Ident(ident("False"))).is_err());
+ assert!(Literal::try_from(TokenTree::Ident(ident("True"))).is_err());
+ assert!(Literal::try_from(TokenTree::Ident(ident("ltrue"))).is_err());
+}
+
+#[cfg(feature = "proc-macro2")]
+#[test]
+fn invalid_token_display() {
+ use crate::{InvalidToken, err::TokenKind};
+
+ let span = crate::err::Span::Two(proc_macro2::Span::call_site());
+ assert_eq!(
+ InvalidToken {
+ actual: TokenKind::StringLit,
+ expected: TokenKind::FloatLit,
+ span,
+ }.to_string(),
+ r#"expected a float literal (e.g. `3.14`), but found a string literal (e.g. "Ferris")"#,
+ );
+
+ assert_eq!(
+ InvalidToken {
+ actual: TokenKind::Punct,
+ expected: TokenKind::Literal,
+ span,
+ }.to_string(),
+ r#"expected a literal, but found a punctuation character"#,
+ );
+}
diff --git a/third_party/rust/naga/.cargo-checksum.json b/third_party/rust/naga/.cargo-checksum.json
index 9696ce31b8..8ac93423af 100644
--- a/third_party/rust/naga/.cargo-checksum.json
+++ b/third_party/rust/naga/.cargo-checksum.json
@@ -1 +1 @@
-{"files":{".cargo/config.toml":"d7389d2a0c08ec72b79e83a3c76980903e3f9123625c32e69c798721193e2e74","CHANGELOG.md":"6b2c4d8dfd8c537811c33744703b4c03fa8aa15f5fab8f0e2be76f597cb7e273","Cargo.toml":"dcdd9d3b310431871c1b37fbe3f75b2baa193820376a466d9ab0bbd481ff7e6c","README.md":"daa4717a9952b52604bbc3a55af902b252adeacc779991317d8f301f07faa94b","benches/criterion.rs":"f45e38b26e1323e934d32623572ff5395a53fed06f760eb1e07b22ed07858a38","src/arena.rs":"33ed2ec7b36429b133ed2a7de6fb9735827f69ea8b6c2ce97f64746a24a5bf36","src/back/dot/mod.rs":"a40050a73ac00c8fa43dd0b45a84fca6959d28c8c99ab3046b01f33c02f8c8f4","src/back/glsl/features.rs":"092bb95d34735b8cff198b8a50ec8ec2f8f89a1190cb33dfc59c1d0f9f0064cc","src/back/glsl/keywords.rs":"1546facbaddf696602102f32e47db7afc875f8ca3fbccc2122e0bcc45e022b53","src/back/glsl/mod.rs":"deb3e2cdcc5845e59c2644abfff4bd8a5c5d7d5e6ec70b818107d7abeddda328","src/back/hlsl/conv.rs":"5e40946f2d5ad6589dd2b0570d2c300dd96f92f0f954b829dc54a822de7774e8","src/back/hlsl/help.rs":"8f4ec24f74f3153a58b04f441bef16ecc8d400466d53187b06fb6b60a934a1ec","src/back/hlsl/keywords.rs":"eb4af8d697fb7f3991859d66339b5b2eac27c9fe41b73146ac838b207f462c9c","src/back/hlsl/mod.rs":"c677ebbb649a1c5f85f350d8da7129457d50ff52b1c88c1f0fac4a9d11eb020c","src/back/hlsl/storage.rs":"3170f165ff68d2482f8a8dbfa4bbf4b65d2446a7f208f33eea9eb187bb57eb09","src/back/hlsl/writer.rs":"3e054ef1e1d38ac5606fbfed4fe0400e2c39cfac0525dee9e1756ed2361e2244","src/back/mod.rs":"b941caed50c086f49d25e76228d247ba6c2da6dbeea18d968c02dc68bb97f409","src/back/msl/keywords.rs":"998c0d86a26e5cf031c75f35cde28f2b390fe207a2e7d0eed8516ffdb99c1a8e","src/back/msl/mod.rs":"16d905902e30cf900ec924b66ff496adbbbc54af15c59713f358bfac042a625a","src/back/msl/sampler.rs":"9b01d68669e12ff7123243284b85e1a9d2c4d49140bd74ca32dedc007cbf15af","src/back/msl/writer.rs":"e8af7054f33a812948731bd451be510e80552c61dfaed9f3079b4c3a7b369304","src/back/spv/block.rs":"2881cbc6c0a3e310a777b61c950dd97bbee5776583fe6ef13ee04095a8214768","src/back/spv/helpers.rs":"a4e260130f39c7345decec40dadf1e94419c8f6d236ce7a53b5300aa72952a1b","src/back/spv/image.rs":"e4b982ce430e17881d6370191d849dbe6bb8f6d86f4896815eb1736e43b4e302","src/back/spv/index.rs":"26611dd50df5cfd214900e19415f5374dd301d3b7d3bfedbc5ec0f254328287a","src/back/spv/instructions.rs":"d0ced535fdec49323105a7d6ee40a8ed6b4966ac5f0f40b062f0eb11a531b106","src/back/spv/layout.rs":"e263de53cd2f9a03ad94b82b434ce636609bc1ed435a2d1132951663bfaa8ebd","src/back/spv/mod.rs":"31b0229f59b5784b57851fcf6325095add58af6de3afa85d518a4e266c4b99a9","src/back/spv/ray.rs":"a34bf6b26d873f7270caa45841d9ef291aca8d9732ecd086b14d8856038e1e41","src/back/spv/recyclable.rs":"114db0ea12774d6514f995d07295cb9a42631ab75165fc60980c10e9b5ecb832","src/back/spv/selection.rs":"81e404abfa0a977f7c1f76ccb37a78d13ccadbda229048dad53cc67687cc39db","src/back/spv/writer.rs":"a76a73c0692162da24ab5508bc3ca70eb5e01367fe54472d100e237dbd594467","src/back/wgsl/mod.rs":"2dd12bbea9ace835850192bb68c5760953da6bac6a636073d1eca19381c0c0b6","src/back/wgsl/writer.rs":"15ba0e1ab7358b725d1cbc2e0b0b2284c33cc240ae84b20e186519efbb5d96d9","src/block.rs":"c69089e5bbb6de6ba24efb15b21d5d434fcabfbc4d48feae948d2a4da135aae7","src/compact/expressions.rs":"7a4c916282a5b484519ed29ab451c7b595d8dea73c83c5c2cf7efc6fbc648fda","src/compact/functions.rs":"174bd9167ecf6353afb8c36d365ba3f9b483233eb4bacf578e50183c7433aa15","src/compact/handle_set_map.rs":"817c5193352d5fd6a61a5c970daba23224e14a65aea15f8f1c8679c99f834ca2","src/compact/mod.rs":"f1a606e8732f3c5837ab40ba5569eb1687336ef412f7f4b6cc348dd52b8076b3","src/compact/statements.rs":"4df33ee9589300e769e75c674bdc30578e93704ec3eb2aabc7132121745b55c8","src/compact/types.rs":"18343f2ca2c123eea2531cffc1d54a7798797caccecaa1f9b8c4fd5dd6ca1a05","src/front/glsl/ast.rs":"a4615f0c52b0dc9fdb07f816b4534c1ca547c2d176008ca86d66f9e6874f227d","src/front/glsl/builtins.rs":"d35501d5b42b61c261da24436b82eafdf96371b1600d148648d90d041f736ae4","src/front/glsl/context.rs":"066203c24ff5bc6154aa671f4492b5e8dfede8b57ef886f093cc95470d66411b","src/front/glsl/error.rs":"cca4a3aa9de2808952ff68c183755df5fdf6a7cb81f170ba747795176c0342fd","src/front/glsl/functions.rs":"60838c34b8295112e5696b52d710acebb93e0a982e05f8eb87d3b80f52eb7793","src/front/glsl/lex.rs":"08736ae8beb955da5b0e6e3e0f45995a824995f7096d516a2910417e9c7afa32","src/front/glsl/mod.rs":"c6e81710ae94a52583ba6f2a80a505d6bcd6ea6552008b80b27539af48838df1","src/front/glsl/offset.rs":"9358602ca4f9ef21d5066d674dae757bf88fdf5c289c4360534354d13bd41dc0","src/front/glsl/parser.rs":"fe5291512db412b33b6c09d5b3dcf7c54ff6ec55b47f0a078dcc11695e78471d","src/front/glsl/parser/declarations.rs":"d637cc52e553910a2e97b70b3366c15aefbe737f413adb11c27efd184c1fbf9d","src/front/glsl/parser/expressions.rs":"520cfc9402d5fbd48e52ef1d36562c6b74794c09ec33ec1ebb10aa48d129b66f","src/front/glsl/parser/functions.rs":"67615684e1c13a1b0e6c0b6028bdf040a14f5d1aea3fde82a5783921244d90d3","src/front/glsl/parser/types.rs":"aeb97e1a5fb03205cd5630c29da59d81a376ce9a83a603b62b037e63ad948e88","src/front/glsl/parser_tests.rs":"bfd4dff2580f4369a57edbcace47d23e2666751ffc3ab55f8d7dfe01f1a66311","src/front/glsl/token.rs":"c25c489b152ee2d445ace3c2046473abe64d558b8d27fa08709110e58718b6ac","src/front/glsl/types.rs":"58c9cf3d570dff8cb68f2931faf5b18e875e510741bf035ec10b9ff6df27c5d8","src/front/glsl/variables.rs":"fb2a09e386b6e98ca9fb8fb744afac1e8b19d1b67c6ede5c474e3ba860d3d4cb","src/front/interpolator.rs":"9b6ca498d5fbd9bc1515510a04e303a00b324121d7285da3c955cfe18eb4224c","src/front/mod.rs":"77acd7fb71d004969d1ee69fc728647f03242762988786c4e15fadf8315600af","src/front/spv/convert.rs":"dccc6671e6a4a7f1139aecdf979faa3689609081af5fa2cbbd6a2e8c4128c004","src/front/spv/error.rs":"6438aac57cfcf5d3858dd7652ccda1967a3123c6374f1cab829092b00549f70f","src/front/spv/function.rs":"3a3f0c07862750f79f8ebc273c5df11efc67272566458410f776bd8fa271a0f8","src/front/spv/image.rs":"5d55cfbf6752732a594114cd09a9a207216e1ee85d8f2c9bc4310217a55ea321","src/front/spv/mod.rs":"af2771e7e6b38b44e11b8ca2dba31dfdc81a3bbde041b2e73eed361b892b9a91","src/front/spv/null.rs":"e1446d99d04c76a9c3bbd24dd9b20c4711ce8a918a9b403be6cccbde1175b3b4","src/front/type_gen.rs":"b4f1df23380e06c9fdad4140810ce96ab041dbb1d371a07045b4e0069aa8ba55","src/front/wgsl/error.rs":"cbc87c24ef97bbec96c04cab0ee75fe64f855f263b9edca90498a7cbd253b801","src/front/wgsl/index.rs":"2b9a4929a46bd822d3ed6f9a150e24d437e5bdca8293eb748aebe80ce7e74153","src/front/wgsl/lower/construction.rs":"92342e27f5bdeb598e178799b74aa610788549c19a49fe0ae8914916bfa3c7be","src/front/wgsl/lower/conversion.rs":"961b19bf8ddd4667c6caf854a1889f3d6477757f4125538c3e9ca7d730975dd7","src/front/wgsl/lower/mod.rs":"174def9462bae5c2aed3aa0eb1b4773c282e9ff0320a7dfdb662aeb4bf51cc22","src/front/wgsl/mod.rs":"02b194a0a29ef7281f71b424564e18ada4a8b1a0d8c26ec40b6be195bd4c4904","src/front/wgsl/parse/ast.rs":"c7eaae40179f0889f2b142d3b31968cbfab6d3cfe02e425912c6da8dadac51df","src/front/wgsl/parse/conv.rs":"01b25edbe80b263a3fa51bc980c075630bb31d4af851441323383eb4f3b83360","src/front/wgsl/parse/lexer.rs":"17db87d0017f8f9a80fa151b8545f04e1b40c4e5feef6197f4a117efa03488bf","src/front/wgsl/parse/mod.rs":"3b4895a2baf91c719b95f0afb6441ffac2036c2a9ff817e633882fd257afcc38","src/front/wgsl/parse/number.rs":"43b2a03963e61ee047eeac144ab817bf9f9e9a9517b26b68ff40f2f6236de05d","src/front/wgsl/tests.rs":"39d0b44d0f073a7599c88b7c4efd1572886f3af074fa2015454623be313b297f","src/front/wgsl/to_wgsl.rs":"2e2e30d86b07f209b866e530d3a882803bf28b39ce379052561a749f628e8e28","src/keywords/mod.rs":"0138f3931f8af0b0a05174549d0fd2152945b027dc3febefc1bbd676581d2e45","src/keywords/wgsl.rs":"7c3b364b60ca29cb8a68ef781de9ecd28b76b74bed18bf18a35d2ebffaa855ab","src/lib.rs":"ec1ac0883866f5bf60fc8409b97671bbafc21eaca5e13c68894bd3e7fb93e348","src/proc/constant_evaluator.rs":"8f53985da9d8f1ea16938ab3561b4a5ec496c6c5f5df116830b286eaddd3ba14","src/proc/emitter.rs":"39ac886c651e2ad33c06a676a7e4826a0e93de0af660c01e8e4b1f7406742f88","src/proc/index.rs":"f4250f6944c2b631e8140979024e8deb86fa8d5352d8641ba954a388b2c0940e","src/proc/layouter.rs":"b3d061c87424f36981c902716f37ab7b72f2bb2d0c2d7e900c51149318ea1a0a","src/proc/mod.rs":"c780f9bb2464f6c61af34e782d711128501a1c769ef27184c388a92b88bfac38","src/proc/namer.rs":"7328fac41e40890c64c7ee2fa985a4395424f18b08d30f30ca2583fdabd2fd35","src/proc/terminator.rs":"13c59bf00f5b26171d971effc421091f5e00dedddd246c2daa44fe65aeda060a","src/proc/typifier.rs":"99de19270d01c12ec49d14323aa1d9b8774f1ee715804af7235deff70739ba3d","src/span.rs":"6560599f20b8bc2de746ee9fd6b05c32bb630af914fce8845d84fdc72f9a636c","src/valid/analyzer.rs":"8472b98f16a4a4a0fa7079197db25696f77ef3e1602a7cddea1930daebd27917","src/valid/compose.rs":"83e4c09c39f853cf085b83b87e48b3db571da619132960d3ec954ebdfb0a74f2","src/valid/expression.rs":"1cdbd594dbdb33d8473d93c11112cf717e262bb8c35cee10b01db4322b2237d7","src/valid/function.rs":"40754e51906b053becdd8813b189fe709b7693c08babd28b5d3f5c576475b171","src/valid/handles.rs":"0878915e67b16d7c41cf8245d9ab3b3f4a604e7d4e87527ea40e03efcbf1f74a","src/valid/interface.rs":"32ef8e4665106b5c71540833e17ee9cd1dde5a900c9b81f61e0b7b8192c4aaf2","src/valid/mod.rs":"1690984337db07c119abd481d71d8bc9be6323dd39998dc6cf464f586deb3a7a","src/valid/type.rs":"09e18bb9510dbb0cfb4a8ac054afee4c4f56063d614159ab5b956aa1e5850468"},"package":null} \ No newline at end of file
+{"files":{".cargo/config.toml":"d7389d2a0c08ec72b79e83a3c76980903e3f9123625c32e69c798721193e2e74","CHANGELOG.md":"6b2c4d8dfd8c537811c33744703b4c03fa8aa15f5fab8f0e2be76f597cb7e273","Cargo.toml":"eab8de21e33a65dbcddbdfd97fca5b98d5cf684f288ac32cf6177a761e44a2e0","README.md":"daa4717a9952b52604bbc3a55af902b252adeacc779991317d8f301f07faa94b","benches/criterion.rs":"f45e38b26e1323e934d32623572ff5395a53fed06f760eb1e07b22ed07858a38","src/arena.rs":"33ed2ec7b36429b133ed2a7de6fb9735827f69ea8b6c2ce97f64746a24a5bf36","src/back/dot/mod.rs":"a40050a73ac00c8fa43dd0b45a84fca6959d28c8c99ab3046b01f33c02f8c8f4","src/back/glsl/features.rs":"3d12147d201aaed746a94741356458a435a1ff7cf30b66baf44ba0b8dfe4b0ca","src/back/glsl/keywords.rs":"1546facbaddf696602102f32e47db7afc875f8ca3fbccc2122e0bcc45e022b53","src/back/glsl/mod.rs":"9e8b34a09401744a2ad4deae4d4863bd0be1d7d5da6ca72a98ca80fe0e3dfde6","src/back/hlsl/conv.rs":"2d7a8e7753b8fb21659e582612eea82e42e353abd23df719de450074a4da731e","src/back/hlsl/help.rs":"06da97ea0d58e2b94823ca1dae67a8611be6d5d047649b1d83755acb4c110808","src/back/hlsl/keywords.rs":"a7164690a4da866e6bfb18ced20e32cc8c42dd7387e0e84addf0c2674f529cf5","src/back/hlsl/mod.rs":"2f5296c45a2147093cae17250321580e7f01c57f907e529d19521eccd0cd4147","src/back/hlsl/storage.rs":"2c2a0071cafe487a398e396dddc85bdb319b1a5d74c097d529078e247a904359","src/back/hlsl/writer.rs":"36f0410edf9c0a8295e4916ca0e7a4e98cd170fcd5ecf6826df6051bef003b9c","src/back/mod.rs":"b941caed50c086f49d25e76228d247ba6c2da6dbeea18d968c02dc68bb97f409","src/back/msl/keywords.rs":"e6a4ef77363f995de1f8079c0b8591497cbf9520c5d3b2d41c7e1f483e8abd24","src/back/msl/mod.rs":"15fdb90b8cd2b98273b22b9569fc322eb473fd135865eef82cc615d27320d779","src/back/msl/sampler.rs":"9b01d68669e12ff7123243284b85e1a9d2c4d49140bd74ca32dedc007cbf15af","src/back/msl/writer.rs":"27e8604a5d11391b91b328f420e93f7cf475364a783b3dc5ba8bb720f17d9d86","src/back/spv/block.rs":"e2326e10cc8ca64398636c1b27166b406611006ffc2388c20fca4a271d609afe","src/back/spv/helpers.rs":"a4e260130f39c7345decec40dadf1e94419c8f6d236ce7a53b5300aa72952a1b","src/back/spv/image.rs":"e4b982ce430e17881d6370191d849dbe6bb8f6d86f4896815eb1736e43b4e302","src/back/spv/index.rs":"26611dd50df5cfd214900e19415f5374dd301d3b7d3bfedbc5ec0f254328287a","src/back/spv/instructions.rs":"d0ced535fdec49323105a7d6ee40a8ed6b4966ac5f0f40b062f0eb11a531b106","src/back/spv/layout.rs":"e263de53cd2f9a03ad94b82b434ce636609bc1ed435a2d1132951663bfaa8ebd","src/back/spv/mod.rs":"31b0229f59b5784b57851fcf6325095add58af6de3afa85d518a4e266c4b99a9","src/back/spv/ray.rs":"a34bf6b26d873f7270caa45841d9ef291aca8d9732ecd086b14d8856038e1e41","src/back/spv/recyclable.rs":"114db0ea12774d6514f995d07295cb9a42631ab75165fc60980c10e9b5ecb832","src/back/spv/selection.rs":"81e404abfa0a977f7c1f76ccb37a78d13ccadbda229048dad53cc67687cc39db","src/back/spv/writer.rs":"e90f76d7de82429db5d375b679de5dd73f205e245c97622924271995db373d1e","src/back/wgsl/mod.rs":"2dd12bbea9ace835850192bb68c5760953da6bac6a636073d1eca19381c0c0b6","src/back/wgsl/writer.rs":"96795df390cffade8ca27baea88ab29592d7e425c4351a9e2591d4f4ecdc73f3","src/block.rs":"c69089e5bbb6de6ba24efb15b21d5d434fcabfbc4d48feae948d2a4da135aae7","src/compact/expressions.rs":"7a4c916282a5b484519ed29ab451c7b595d8dea73c83c5c2cf7efc6fbc648fda","src/compact/functions.rs":"174bd9167ecf6353afb8c36d365ba3f9b483233eb4bacf578e50183c7433aa15","src/compact/handle_set_map.rs":"817c5193352d5fd6a61a5c970daba23224e14a65aea15f8f1c8679c99f834ca2","src/compact/mod.rs":"f1a606e8732f3c5837ab40ba5569eb1687336ef412f7f4b6cc348dd52b8076b3","src/compact/statements.rs":"4df33ee9589300e769e75c674bdc30578e93704ec3eb2aabc7132121745b55c8","src/compact/types.rs":"18343f2ca2c123eea2531cffc1d54a7798797caccecaa1f9b8c4fd5dd6ca1a05","src/front/glsl/ast.rs":"a4615f0c52b0dc9fdb07f816b4534c1ca547c2d176008ca86d66f9e6874f227d","src/front/glsl/builtins.rs":"d35501d5b42b61c261da24436b82eafdf96371b1600d148648d90d041f736ae4","src/front/glsl/context.rs":"066203c24ff5bc6154aa671f4492b5e8dfede8b57ef886f093cc95470d66411b","src/front/glsl/error.rs":"cca4a3aa9de2808952ff68c183755df5fdf6a7cb81f170ba747795176c0342fd","src/front/glsl/functions.rs":"b420be6b54195e9cdabdf76bb854e3e1f3be6542c6c129656fd0b1bd900dcebd","src/front/glsl/lex.rs":"08736ae8beb955da5b0e6e3e0f45995a824995f7096d516a2910417e9c7afa32","src/front/glsl/mod.rs":"c6e81710ae94a52583ba6f2a80a505d6bcd6ea6552008b80b27539af48838df1","src/front/glsl/offset.rs":"9358602ca4f9ef21d5066d674dae757bf88fdf5c289c4360534354d13bd41dc0","src/front/glsl/parser.rs":"fe5291512db412b33b6c09d5b3dcf7c54ff6ec55b47f0a078dcc11695e78471d","src/front/glsl/parser/declarations.rs":"d637cc52e553910a2e97b70b3366c15aefbe737f413adb11c27efd184c1fbf9d","src/front/glsl/parser/expressions.rs":"520cfc9402d5fbd48e52ef1d36562c6b74794c09ec33ec1ebb10aa48d129b66f","src/front/glsl/parser/functions.rs":"75aedcea4133bc4aba06ef49b1697eac96cc28d191e9830689fc4a6c0c4856eb","src/front/glsl/parser/types.rs":"aeb97e1a5fb03205cd5630c29da59d81a376ce9a83a603b62b037e63ad948e88","src/front/glsl/parser_tests.rs":"bfd4dff2580f4369a57edbcace47d23e2666751ffc3ab55f8d7dfe01f1a66311","src/front/glsl/token.rs":"c25c489b152ee2d445ace3c2046473abe64d558b8d27fa08709110e58718b6ac","src/front/glsl/types.rs":"58c9cf3d570dff8cb68f2931faf5b18e875e510741bf035ec10b9ff6df27c5d8","src/front/glsl/variables.rs":"fb2a09e386b6e98ca9fb8fb744afac1e8b19d1b67c6ede5c474e3ba860d3d4cb","src/front/interpolator.rs":"9b6ca498d5fbd9bc1515510a04e303a00b324121d7285da3c955cfe18eb4224c","src/front/mod.rs":"77acd7fb71d004969d1ee69fc728647f03242762988786c4e15fadf8315600af","src/front/spv/convert.rs":"dccc6671e6a4a7f1139aecdf979faa3689609081af5fa2cbbd6a2e8c4128c004","src/front/spv/error.rs":"6438aac57cfcf5d3858dd7652ccda1967a3123c6374f1cab829092b00549f70f","src/front/spv/function.rs":"1acb7bdd34ecfe08c6f4b4d06c2a0ea74aaf9975352e8804e3e4fab90745132f","src/front/spv/image.rs":"5d55cfbf6752732a594114cd09a9a207216e1ee85d8f2c9bc4310217a55ea321","src/front/spv/mod.rs":"22d0de7c43c42279e788144ff806cadfe3a3ea7923d961d11740af22492c4087","src/front/spv/null.rs":"e1446d99d04c76a9c3bbd24dd9b20c4711ce8a918a9b403be6cccbde1175b3b4","src/front/type_gen.rs":"b4f1df23380e06c9fdad4140810ce96ab041dbb1d371a07045b4e0069aa8ba55","src/front/wgsl/error.rs":"e1efd61062a5eb5f7e0413dc05d17abdbe0679c08f2fbdb7478e2b6e8dd13b25","src/front/wgsl/index.rs":"2b9a4929a46bd822d3ed6f9a150e24d437e5bdca8293eb748aebe80ce7e74153","src/front/wgsl/lower/construction.rs":"92342e27f5bdeb598e178799b74aa610788549c19a49fe0ae8914916bfa3c7be","src/front/wgsl/lower/conversion.rs":"961b19bf8ddd4667c6caf854a1889f3d6477757f4125538c3e9ca7d730975dd7","src/front/wgsl/lower/mod.rs":"08eece7a5460e414e2f8398cec96f12a8b9f6a457270426d8e4f045b62290d1f","src/front/wgsl/mod.rs":"02b194a0a29ef7281f71b424564e18ada4a8b1a0d8c26ec40b6be195bd4c4904","src/front/wgsl/parse/ast.rs":"c7eaae40179f0889f2b142d3b31968cbfab6d3cfe02e425912c6da8dadac51df","src/front/wgsl/parse/conv.rs":"9b2a06b5fd577e1881b2212e1d675d3aefe4d1fee99a17b5f7b07c36913e8186","src/front/wgsl/parse/lexer.rs":"17db87d0017f8f9a80fa151b8545f04e1b40c4e5feef6197f4a117efa03488bf","src/front/wgsl/parse/mod.rs":"3b4895a2baf91c719b95f0afb6441ffac2036c2a9ff817e633882fd257afcc38","src/front/wgsl/parse/number.rs":"dafd3d8651cfa1389cb359d76d39bd689e54f8d5025aa23e06c6edd871369efd","src/front/wgsl/tests.rs":"7a0a083a5b66af8e7d4b1a02401b27f077eb72d07181b610693f35b11f107c6c","src/front/wgsl/to_wgsl.rs":"2e2e30d86b07f209b866e530d3a882803bf28b39ce379052561a749f628e8e28","src/keywords/mod.rs":"0138f3931f8af0b0a05174549d0fd2152945b027dc3febefc1bbd676581d2e45","src/keywords/wgsl.rs":"c648ac44241ad55c8c8bad3d8f1bab973d11ddb9c380dcca369b735ed3975309","src/lib.rs":"f2072172957699d4282f247c452d8d8f0a0da08b9d78b279ee010296669d28d8","src/proc/constant_evaluator.rs":"bea5d259dbc4d9f9dacf3717dcb17a0774a22f1b3e5251b7e5b6991330ed3057","src/proc/emitter.rs":"39ac886c651e2ad33c06a676a7e4826a0e93de0af660c01e8e4b1f7406742f88","src/proc/index.rs":"f4250f6944c2b631e8140979024e8deb86fa8d5352d8641ba954a388b2c0940e","src/proc/layouter.rs":"b3d061c87424f36981c902716f37ab7b72f2bb2d0c2d7e900c51149318ea1a0a","src/proc/mod.rs":"4be5dcb137147cd8182a291f90959c46f1681c2d2c7da9e63f702a5f84c8809d","src/proc/namer.rs":"7328fac41e40890c64c7ee2fa985a4395424f18b08d30f30ca2583fdabd2fd35","src/proc/terminator.rs":"13c59bf00f5b26171d971effc421091f5e00dedddd246c2daa44fe65aeda060a","src/proc/typifier.rs":"99de19270d01c12ec49d14323aa1d9b8774f1ee715804af7235deff70739ba3d","src/span.rs":"6560599f20b8bc2de746ee9fd6b05c32bb630af914fce8845d84fdc72f9a636c","src/valid/analyzer.rs":"8472b98f16a4a4a0fa7079197db25696f77ef3e1602a7cddea1930daebd27917","src/valid/compose.rs":"83e4c09c39f853cf085b83b87e48b3db571da619132960d3ec954ebdfb0a74f2","src/valid/expression.rs":"7a8d5f74677c627dee3e15d223e83453ea7f6567dc806fcdfeebd32081012779","src/valid/function.rs":"40754e51906b053becdd8813b189fe709b7693c08babd28b5d3f5c576475b171","src/valid/handles.rs":"0878915e67b16d7c41cf8245d9ab3b3f4a604e7d4e87527ea40e03efcbf1f74a","src/valid/interface.rs":"32ef8e4665106b5c71540833e17ee9cd1dde5a900c9b81f61e0b7b8192c4aaf2","src/valid/mod.rs":"3b2772c88561aeb4dc8f5ce0d8ed5169bcdf5f7db04a62aaf22d04c171cb4f35","src/valid/type.rs":"61357577fa2dffa9c7326504f5c1a5fe7c44afd5d6f439c2354b390c6783fc86"},"package":null} \ No newline at end of file
diff --git a/third_party/rust/naga/Cargo.toml b/third_party/rust/naga/Cargo.toml
index 5fa521a779..dc2434e4b0 100644
--- a/third_party/rust/naga/Cargo.toml
+++ b/third_party/rust/naga/Cargo.toml
@@ -52,7 +52,7 @@ bitflags = "2.4"
log = "0.4"
num-traits = "0.2"
rustc-hash = "1.1.0"
-thiserror = "1.0.56"
+thiserror = "1.0.57"
[dependencies.arbitrary]
version = "1.3"
@@ -79,7 +79,7 @@ version = "0.2.1"
optional = true
[dependencies.serde]
-version = "1.0.195"
+version = "1.0.196"
features = ["derive"]
optional = true
diff --git a/third_party/rust/naga/src/back/glsl/features.rs b/third_party/rust/naga/src/back/glsl/features.rs
index e7de05f695..99c128c6d9 100644
--- a/third_party/rust/naga/src/back/glsl/features.rs
+++ b/third_party/rust/naga/src/back/glsl/features.rs
@@ -1,8 +1,8 @@
use super::{BackendResult, Error, Version, Writer};
use crate::{
back::glsl::{Options, WriterFlags},
- AddressSpace, Binding, Expression, Handle, ImageClass, ImageDimension, Interpolation, Sampling,
- Scalar, ScalarKind, ShaderStage, StorageFormat, Type, TypeInner,
+ AddressSpace, Binding, Expression, Handle, ImageClass, ImageDimension, Interpolation,
+ SampleLevel, Sampling, Scalar, ScalarKind, ShaderStage, StorageFormat, Type, TypeInner,
};
use std::fmt::Write;
@@ -48,6 +48,8 @@ bitflags::bitflags! {
///
/// We can always support this, either through the language or a polyfill
const INSTANCE_INDEX = 1 << 22;
+ /// Sample specific LODs of cube / array shadow textures
+ const TEXTURE_SHADOW_LOD = 1 << 23;
}
}
@@ -125,6 +127,7 @@ impl FeaturesManager {
check_feature!(TEXTURE_SAMPLES, 150);
check_feature!(TEXTURE_LEVELS, 130);
check_feature!(IMAGE_SIZE, 430, 310);
+ check_feature!(TEXTURE_SHADOW_LOD, 200, 300);
// Return an error if there are missing features
if missing.is_empty() {
@@ -251,6 +254,11 @@ impl FeaturesManager {
}
}
+ if self.0.contains(Features::TEXTURE_SHADOW_LOD) {
+ // https://registry.khronos.org/OpenGL/extensions/EXT/EXT_texture_shadow_lod.txt
+ writeln!(out, "#extension GL_EXT_texture_shadow_lod : require")?;
+ }
+
Ok(())
}
}
@@ -469,6 +477,47 @@ impl<'a, W> Writer<'a, W> {
}
}
}
+ Expression::ImageSample { image, level, offset, .. } => {
+ if let TypeInner::Image {
+ dim,
+ arrayed,
+ class: ImageClass::Depth { .. },
+ } = *info[image].ty.inner_with(&module.types) {
+ let lod = matches!(level, SampleLevel::Zero | SampleLevel::Exact(_));
+ let bias = matches!(level, SampleLevel::Bias(_));
+ let auto = matches!(level, SampleLevel::Auto);
+ let cube = dim == ImageDimension::Cube;
+ let array2d = dim == ImageDimension::D2 && arrayed;
+ let gles = self.options.version.is_es();
+
+ // We have a workaround of using `textureGrad` instead of `textureLod` if the LOD is zero,
+ // so we don't *need* this extension for those cases.
+ // But if we're explicitly allowed to use the extension (`WriterFlags::TEXTURE_SHADOW_LOD`),
+ // we always use it instead of the workaround.
+ let grad_workaround_applicable = (array2d || (cube && !arrayed)) && level == SampleLevel::Zero;
+ let prefer_grad_workaround = grad_workaround_applicable && !self.options.writer_flags.contains(WriterFlags::TEXTURE_SHADOW_LOD);
+
+ let mut ext_used = false;
+
+ // float texture(sampler2DArrayShadow sampler, vec4 P [, float bias])
+ // float texture(samplerCubeArrayShadow sampler, vec4 P, float compare [, float bias])
+ ext_used |= (array2d || cube && arrayed) && bias;
+
+ // The non `bias` version of this was standardized in GL 4.3, but never in GLES.
+ // float textureOffset(sampler2DArrayShadow sampler, vec4 P, ivec2 offset [, float bias])
+ ext_used |= array2d && (bias || (gles && auto)) && offset.is_some();
+
+ // float textureLod(sampler2DArrayShadow sampler, vec4 P, float lod)
+ // float textureLodOffset(sampler2DArrayShadow sampler, vec4 P, float lod, ivec2 offset)
+ // float textureLod(samplerCubeShadow sampler, vec4 P, float lod)
+ // float textureLod(samplerCubeArrayShadow sampler, vec4 P, float compare, float lod)
+ ext_used |= (cube || array2d) && lod && !prefer_grad_workaround;
+
+ if ext_used {
+ features.request(Features::TEXTURE_SHADOW_LOD);
+ }
+ }
+ }
_ => {}
}
}
diff --git a/third_party/rust/naga/src/back/glsl/mod.rs b/third_party/rust/naga/src/back/glsl/mod.rs
index e346d43257..9bda594610 100644
--- a/third_party/rust/naga/src/back/glsl/mod.rs
+++ b/third_party/rust/naga/src/back/glsl/mod.rs
@@ -178,7 +178,7 @@ impl Version {
/// Note: `location=` for vertex inputs and fragment outputs is supported
/// unconditionally for GLES 300.
fn supports_explicit_locations(&self) -> bool {
- *self >= Version::Desktop(410) || *self >= Version::new_gles(310)
+ *self >= Version::Desktop(420) || *self >= Version::new_gles(310)
}
fn supports_early_depth_test(&self) -> bool {
@@ -646,16 +646,6 @@ impl<'a, W: Write> Writer<'a, W> {
// preprocessor not the processor ¯\_(ツ)_/¯
self.features.write(self.options, &mut self.out)?;
- // Write the additional extensions
- if self
- .options
- .writer_flags
- .contains(WriterFlags::TEXTURE_SHADOW_LOD)
- {
- // https://www.khronos.org/registry/OpenGL/extensions/EXT/EXT_texture_shadow_lod.txt
- writeln!(self.out, "#extension GL_EXT_texture_shadow_lod : require")?;
- }
-
// glsl es requires a precision to be specified for floats and ints
// TODO: Should this be user configurable?
if es {
@@ -1300,7 +1290,14 @@ impl<'a, W: Write> Writer<'a, W> {
let inner = expr_info.ty.inner_with(&self.module.types);
- if let Expression::Math { fun, arg, arg1, .. } = *expr {
+ if let Expression::Math {
+ fun,
+ arg,
+ arg1,
+ arg2,
+ ..
+ } = *expr
+ {
match fun {
crate::MathFunction::Dot => {
// if the expression is a Dot product with integer arguments,
@@ -1315,6 +1312,14 @@ impl<'a, W: Write> Writer<'a, W> {
}
}
}
+ crate::MathFunction::ExtractBits => {
+ // Only argument 1 is re-used.
+ self.need_bake_expressions.insert(arg1.unwrap());
+ }
+ crate::MathFunction::InsertBits => {
+ // Only argument 2 is re-used.
+ self.need_bake_expressions.insert(arg2.unwrap());
+ }
crate::MathFunction::CountLeadingZeros => {
if let Some(crate::ScalarKind::Sint) = inner.scalar_kind() {
self.need_bake_expressions.insert(arg);
@@ -2451,6 +2456,9 @@ impl<'a, W: Write> Writer<'a, W> {
crate::Literal::I64(_) => {
return Err(Error::Custom("GLSL has no 64-bit integer type".into()));
}
+ crate::Literal::U64(_) => {
+ return Err(Error::Custom("GLSL has no 64-bit integer type".into()));
+ }
crate::Literal::AbstractInt(_) | crate::Literal::AbstractFloat(_) => {
return Err(Error::Custom(
"Abstract types should not appear in IR presented to backends".into(),
@@ -2620,51 +2628,49 @@ impl<'a, W: Write> Writer<'a, W> {
level,
depth_ref,
} => {
- let dim = match *ctx.resolve_type(image, &self.module.types) {
- TypeInner::Image { dim, .. } => dim,
+ let (dim, class, arrayed) = match *ctx.resolve_type(image, &self.module.types) {
+ TypeInner::Image {
+ dim,
+ class,
+ arrayed,
+ ..
+ } => (dim, class, arrayed),
_ => unreachable!(),
};
-
- if dim == crate::ImageDimension::Cube
- && array_index.is_some()
- && depth_ref.is_some()
- {
- match level {
- crate::SampleLevel::Zero
- | crate::SampleLevel::Exact(_)
- | crate::SampleLevel::Gradient { .. }
- | crate::SampleLevel::Bias(_) => {
- return Err(Error::Custom(String::from(
- "gsamplerCubeArrayShadow isn't supported in textureGrad, \
- textureLod or texture with bias",
- )))
- }
- crate::SampleLevel::Auto => {}
+ let mut err = None;
+ if dim == crate::ImageDimension::Cube {
+ if offset.is_some() {
+ err = Some("gsamplerCube[Array][Shadow] doesn't support texture sampling with offsets");
+ }
+ if arrayed
+ && matches!(class, crate::ImageClass::Depth { .. })
+ && matches!(level, crate::SampleLevel::Gradient { .. })
+ {
+ err = Some("samplerCubeArrayShadow don't support textureGrad");
}
}
+ if gather.is_some() && level != crate::SampleLevel::Zero {
+ err = Some("textureGather doesn't support LOD parameters");
+ }
+ if let Some(err) = err {
+ return Err(Error::Custom(String::from(err)));
+ }
- // textureLod on sampler2DArrayShadow and samplerCubeShadow does not exist in GLSL.
- // To emulate this, we will have to use textureGrad with a constant gradient of 0.
- let workaround_lod_array_shadow_as_grad = (array_index.is_some()
- || dim == crate::ImageDimension::Cube)
- && depth_ref.is_some()
- && gather.is_none()
- && !self
- .options
- .writer_flags
- .contains(WriterFlags::TEXTURE_SHADOW_LOD);
-
- //Write the function to be used depending on the sample level
+ // `textureLod[Offset]` on `sampler2DArrayShadow` and `samplerCubeShadow` does not exist in GLSL,
+ // unless `GL_EXT_texture_shadow_lod` is present.
+ // But if the target LOD is zero, we can emulate that by using `textureGrad[Offset]` with a constant gradient of 0.
+ let workaround_lod_with_grad = ((dim == crate::ImageDimension::Cube && !arrayed)
+ || (dim == crate::ImageDimension::D2 && arrayed))
+ && level == crate::SampleLevel::Zero
+ && matches!(class, crate::ImageClass::Depth { .. })
+ && !self.features.contains(Features::TEXTURE_SHADOW_LOD);
+
+ // Write the function to be used depending on the sample level
let fun_name = match level {
crate::SampleLevel::Zero if gather.is_some() => "textureGather",
+ crate::SampleLevel::Zero if workaround_lod_with_grad => "textureGrad",
crate::SampleLevel::Auto | crate::SampleLevel::Bias(_) => "texture",
- crate::SampleLevel::Zero | crate::SampleLevel::Exact(_) => {
- if workaround_lod_array_shadow_as_grad {
- "textureGrad"
- } else {
- "textureLod"
- }
- }
+ crate::SampleLevel::Zero | crate::SampleLevel::Exact(_) => "textureLod",
crate::SampleLevel::Gradient { .. } => "textureGrad",
};
let offset_name = match offset {
@@ -2727,7 +2733,7 @@ impl<'a, W: Write> Writer<'a, W> {
crate::SampleLevel::Auto => (),
// Zero needs level set to 0
crate::SampleLevel::Zero => {
- if workaround_lod_array_shadow_as_grad {
+ if workaround_lod_with_grad {
let vec_dim = match dim {
crate::ImageDimension::Cube => 3,
_ => 2,
@@ -2739,13 +2745,8 @@ impl<'a, W: Write> Writer<'a, W> {
}
// Exact and bias require another argument
crate::SampleLevel::Exact(expr) => {
- if workaround_lod_array_shadow_as_grad {
- log::warn!("Unable to `textureLod` a shadow array, ignoring the LOD");
- write!(self.out, ", vec2(0,0), vec2(0,0)")?;
- } else {
- write!(self.out, ", ")?;
- self.write_expr(expr, ctx)?;
- }
+ write!(self.out, ", ")?;
+ self.write_expr(expr, ctx)?;
}
crate::SampleLevel::Bias(_) => {
// This needs to be done after the offset writing
@@ -3155,7 +3156,29 @@ impl<'a, W: Write> Writer<'a, W> {
Mf::Abs => "abs",
Mf::Min => "min",
Mf::Max => "max",
- Mf::Clamp => "clamp",
+ Mf::Clamp => {
+ let scalar_kind = ctx
+ .resolve_type(arg, &self.module.types)
+ .scalar_kind()
+ .unwrap();
+ match scalar_kind {
+ crate::ScalarKind::Float => "clamp",
+ // Clamp is undefined if min > max. In practice this means it can use a median-of-three
+ // instruction to determine the value. This is fine according to the WGSL spec for float
+ // clamp, but integer clamp _must_ use min-max. As such we write out min/max.
+ _ => {
+ write!(self.out, "min(max(")?;
+ self.write_expr(arg, ctx)?;
+ write!(self.out, ", ")?;
+ self.write_expr(arg1.unwrap(), ctx)?;
+ write!(self.out, "), ")?;
+ self.write_expr(arg2.unwrap(), ctx)?;
+ write!(self.out, ")")?;
+
+ return Ok(());
+ }
+ }
+ }
Mf::Saturate => {
write!(self.out, "clamp(")?;
@@ -3370,8 +3393,59 @@ impl<'a, W: Write> Writer<'a, W> {
}
Mf::CountOneBits => "bitCount",
Mf::ReverseBits => "bitfieldReverse",
- Mf::ExtractBits => "bitfieldExtract",
- Mf::InsertBits => "bitfieldInsert",
+ Mf::ExtractBits => {
+ // The behavior of ExtractBits is undefined when offset + count > bit_width. We need
+ // to first sanitize the offset and count first. If we don't do this, AMD and Intel chips
+ // will return out-of-spec values if the extracted range is not within the bit width.
+ //
+ // This encodes the exact formula specified by the wgsl spec, without temporary values:
+ // https://gpuweb.github.io/gpuweb/wgsl/#extractBits-unsigned-builtin
+ //
+ // w = sizeof(x) * 8
+ // o = min(offset, w)
+ // c = min(count, w - o)
+ //
+ // bitfieldExtract(x, o, c)
+ //
+ // extract_bits(e, min(offset, w), min(count, w - min(offset, w))))
+ let scalar_bits = ctx
+ .resolve_type(arg, &self.module.types)
+ .scalar_width()
+ .unwrap();
+
+ write!(self.out, "bitfieldExtract(")?;
+ self.write_expr(arg, ctx)?;
+ write!(self.out, ", int(min(")?;
+ self.write_expr(arg1.unwrap(), ctx)?;
+ write!(self.out, ", {scalar_bits}u)), int(min(",)?;
+ self.write_expr(arg2.unwrap(), ctx)?;
+ write!(self.out, ", {scalar_bits}u - min(")?;
+ self.write_expr(arg1.unwrap(), ctx)?;
+ write!(self.out, ", {scalar_bits}u))))")?;
+
+ return Ok(());
+ }
+ Mf::InsertBits => {
+ // InsertBits has the same considerations as ExtractBits above
+ let scalar_bits = ctx
+ .resolve_type(arg, &self.module.types)
+ .scalar_width()
+ .unwrap();
+
+ write!(self.out, "bitfieldInsert(")?;
+ self.write_expr(arg, ctx)?;
+ write!(self.out, ", ")?;
+ self.write_expr(arg1.unwrap(), ctx)?;
+ write!(self.out, ", int(min(")?;
+ self.write_expr(arg2.unwrap(), ctx)?;
+ write!(self.out, ", {scalar_bits}u)), int(min(",)?;
+ self.write_expr(arg3.unwrap(), ctx)?;
+ write!(self.out, ", {scalar_bits}u - min(")?;
+ self.write_expr(arg2.unwrap(), ctx)?;
+ write!(self.out, ", {scalar_bits}u))))")?;
+
+ return Ok(());
+ }
Mf::FindLsb => "findLSB",
Mf::FindMsb => "findMSB",
// data packing
diff --git a/third_party/rust/naga/src/back/hlsl/conv.rs b/third_party/rust/naga/src/back/hlsl/conv.rs
index b6918ddc42..2a6db35db8 100644
--- a/third_party/rust/naga/src/back/hlsl/conv.rs
+++ b/third_party/rust/naga/src/back/hlsl/conv.rs
@@ -21,8 +21,16 @@ impl crate::Scalar {
/// <https://docs.microsoft.com/en-us/windows/win32/direct3dhlsl/dx-graphics-hlsl-scalar>
pub(super) const fn to_hlsl_str(self) -> Result<&'static str, Error> {
match self.kind {
- crate::ScalarKind::Sint => Ok("int"),
- crate::ScalarKind::Uint => Ok("uint"),
+ crate::ScalarKind::Sint => match self.width {
+ 4 => Ok("int"),
+ 8 => Ok("int64_t"),
+ _ => Err(Error::UnsupportedScalar(self)),
+ },
+ crate::ScalarKind::Uint => match self.width {
+ 4 => Ok("uint"),
+ 8 => Ok("uint64_t"),
+ _ => Err(Error::UnsupportedScalar(self)),
+ },
crate::ScalarKind::Float => match self.width {
2 => Ok("half"),
4 => Ok("float"),
diff --git a/third_party/rust/naga/src/back/hlsl/help.rs b/third_party/rust/naga/src/back/hlsl/help.rs
index fa6062a1ad..4dd9ea5987 100644
--- a/third_party/rust/naga/src/back/hlsl/help.rs
+++ b/third_party/rust/naga/src/back/hlsl/help.rs
@@ -26,7 +26,11 @@ int dim_1d = NagaDimensions1D(image_1d);
```
*/
-use super::{super::FunctionCtx, BackendResult};
+use super::{
+ super::FunctionCtx,
+ writer::{EXTRACT_BITS_FUNCTION, INSERT_BITS_FUNCTION},
+ BackendResult,
+};
use crate::{arena::Handle, proc::NameKey};
use std::fmt::Write;
@@ -59,6 +63,13 @@ pub(super) struct WrappedMatCx2 {
pub(super) columns: crate::VectorSize,
}
+#[derive(Clone, Copy, Debug, Hash, Eq, Ord, PartialEq, PartialOrd)]
+pub(super) struct WrappedMath {
+ pub(super) fun: crate::MathFunction,
+ pub(super) scalar: crate::Scalar,
+ pub(super) components: Option<u32>,
+}
+
/// HLSL backend requires its own `ImageQuery` enum.
///
/// It is used inside `WrappedImageQuery` and should be unique per ImageQuery function.
@@ -851,12 +862,149 @@ impl<'a, W: Write> super::Writer<'a, W> {
Ok(())
}
+ pub(super) fn write_wrapped_math_functions(
+ &mut self,
+ module: &crate::Module,
+ func_ctx: &FunctionCtx,
+ ) -> BackendResult {
+ for (_, expression) in func_ctx.expressions.iter() {
+ if let crate::Expression::Math {
+ fun,
+ arg,
+ arg1: _arg1,
+ arg2: _arg2,
+ arg3: _arg3,
+ } = *expression
+ {
+ match fun {
+ crate::MathFunction::ExtractBits => {
+ // The behavior of our extractBits polyfill is undefined if offset + count > bit_width. We need
+ // to first sanitize the offset and count first. If we don't do this, we will get out-of-spec
+ // values if the extracted range is not within the bit width.
+ //
+ // This encodes the exact formula specified by the wgsl spec:
+ // https://gpuweb.github.io/gpuweb/wgsl/#extractBits-unsigned-builtin
+ //
+ // w = sizeof(x) * 8
+ // o = min(offset, w)
+ // c = min(count, w - o)
+ //
+ // bitfieldExtract(x, o, c)
+ let arg_ty = func_ctx.resolve_type(arg, &module.types);
+ let scalar = arg_ty.scalar().unwrap();
+ let components = arg_ty.components();
+
+ let wrapped = WrappedMath {
+ fun,
+ scalar,
+ components,
+ };
+
+ if !self.wrapped.math.insert(wrapped) {
+ continue;
+ }
+
+ // Write return type
+ self.write_value_type(module, arg_ty)?;
+
+ let scalar_width: u8 = scalar.width * 8;
+
+ // Write function name and parameters
+ writeln!(self.out, " {EXTRACT_BITS_FUNCTION}(")?;
+ write!(self.out, " ")?;
+ self.write_value_type(module, arg_ty)?;
+ writeln!(self.out, " e,")?;
+ writeln!(self.out, " uint offset,")?;
+ writeln!(self.out, " uint count")?;
+ writeln!(self.out, ") {{")?;
+
+ // Write function body
+ writeln!(self.out, " uint w = {scalar_width};")?;
+ writeln!(self.out, " uint o = min(offset, w);")?;
+ writeln!(self.out, " uint c = min(count, w - o);")?;
+ writeln!(
+ self.out,
+ " return (c == 0 ? 0 : (e << (w - c - o)) >> (w - c));"
+ )?;
+
+ // End of function body
+ writeln!(self.out, "}}")?;
+ }
+ crate::MathFunction::InsertBits => {
+ // The behavior of our insertBits polyfill has the same constraints as the extractBits polyfill.
+
+ let arg_ty = func_ctx.resolve_type(arg, &module.types);
+ let scalar = arg_ty.scalar().unwrap();
+ let components = arg_ty.components();
+
+ let wrapped = WrappedMath {
+ fun,
+ scalar,
+ components,
+ };
+
+ if !self.wrapped.math.insert(wrapped) {
+ continue;
+ }
+
+ // Write return type
+ self.write_value_type(module, arg_ty)?;
+
+ let scalar_width: u8 = scalar.width * 8;
+ let scalar_max: u64 = match scalar.width {
+ 1 => 0xFF,
+ 2 => 0xFFFF,
+ 4 => 0xFFFFFFFF,
+ 8 => 0xFFFFFFFFFFFFFFFF,
+ _ => unreachable!(),
+ };
+
+ // Write function name and parameters
+ writeln!(self.out, " {INSERT_BITS_FUNCTION}(")?;
+ write!(self.out, " ")?;
+ self.write_value_type(module, arg_ty)?;
+ writeln!(self.out, " e,")?;
+ write!(self.out, " ")?;
+ self.write_value_type(module, arg_ty)?;
+ writeln!(self.out, " newbits,")?;
+ writeln!(self.out, " uint offset,")?;
+ writeln!(self.out, " uint count")?;
+ writeln!(self.out, ") {{")?;
+
+ // Write function body
+ writeln!(self.out, " uint w = {scalar_width}u;")?;
+ writeln!(self.out, " uint o = min(offset, w);")?;
+ writeln!(self.out, " uint c = min(count, w - o);")?;
+
+ // The `u` suffix on the literals is _extremely_ important. Otherwise it will use
+ // i32 shifting instead of the intended u32 shifting.
+ writeln!(
+ self.out,
+ " uint mask = (({scalar_max}u >> ({scalar_width}u - c)) << o);"
+ )?;
+ writeln!(
+ self.out,
+ " return (c == 0 ? e : ((e & ~mask) | ((newbits << o) & mask)));"
+ )?;
+
+ // End of function body
+ writeln!(self.out, "}}")?;
+ }
+ _ => {}
+ }
+ }
+ }
+
+ Ok(())
+ }
+
/// Helper function that writes various wrapped functions
pub(super) fn write_wrapped_functions(
&mut self,
module: &crate::Module,
func_ctx: &FunctionCtx,
) -> BackendResult {
+ self.write_wrapped_math_functions(module, func_ctx)?;
self.write_wrapped_compose_functions(module, func_ctx.expressions)?;
for (handle, _) in func_ctx.expressions.iter() {
diff --git a/third_party/rust/naga/src/back/hlsl/keywords.rs b/third_party/rust/naga/src/back/hlsl/keywords.rs
index 059e533ff7..2cb715c42c 100644
--- a/third_party/rust/naga/src/back/hlsl/keywords.rs
+++ b/third_party/rust/naga/src/back/hlsl/keywords.rs
@@ -817,6 +817,8 @@ pub const RESERVED: &[&str] = &[
// Naga utilities
super::writer::MODF_FUNCTION,
super::writer::FREXP_FUNCTION,
+ super::writer::EXTRACT_BITS_FUNCTION,
+ super::writer::INSERT_BITS_FUNCTION,
];
// DXC scalar types, from https://github.com/microsoft/DirectXShaderCompiler/blob/18c9e114f9c314f93e68fbc72ce207d4ed2e65ae/tools/clang/lib/AST/ASTContextHLSL.cpp#L48-L254
diff --git a/third_party/rust/naga/src/back/hlsl/mod.rs b/third_party/rust/naga/src/back/hlsl/mod.rs
index 37ddbd3d67..f37a223f47 100644
--- a/third_party/rust/naga/src/back/hlsl/mod.rs
+++ b/third_party/rust/naga/src/back/hlsl/mod.rs
@@ -256,6 +256,7 @@ struct Wrapped {
constructors: crate::FastHashSet<help::WrappedConstructor>,
struct_matrix_access: crate::FastHashSet<help::WrappedStructMatrixAccess>,
mat_cx2s: crate::FastHashSet<help::WrappedMatCx2>,
+ math: crate::FastHashSet<help::WrappedMath>,
}
impl Wrapped {
@@ -265,6 +266,7 @@ impl Wrapped {
self.constructors.clear();
self.struct_matrix_access.clear();
self.mat_cx2s.clear();
+ self.math.clear();
}
}
diff --git a/third_party/rust/naga/src/back/hlsl/storage.rs b/third_party/rust/naga/src/back/hlsl/storage.rs
index 1b8a6ec12d..4d3a6af56d 100644
--- a/third_party/rust/naga/src/back/hlsl/storage.rs
+++ b/third_party/rust/naga/src/back/hlsl/storage.rs
@@ -32,6 +32,16 @@ The [`temp_access_chain`] field is a member of [`Writer`] solely to
allow re-use of the `Vec`'s dynamic allocation. Its value is no longer
needed once HLSL for the access has been generated.
+Note about DXC and Load/Store functions:
+
+DXC's HLSL has a generic [`Load` and `Store`] function for [`ByteAddressBuffer`] and
+[`RWByteAddressBuffer`]. This is not available in FXC's HLSL, so we use
+it only for types that are only available in DXC. Notably 64 and 16 bit types.
+
+FXC's HLSL has functions Load, Load2, Load3, and Load4 and Store, Store2, Store3, Store4.
+This loads/stores a vector of length 1, 2, 3, or 4. We use that for 32bit types, bitcasting to the
+correct type if necessary.
+
[`Storage`]: crate::AddressSpace::Storage
[`ByteAddressBuffer`]: https://learn.microsoft.com/en-us/windows/win32/direct3dhlsl/sm5-object-byteaddressbuffer
[`RWByteAddressBuffer`]: https://learn.microsoft.com/en-us/windows/win32/direct3dhlsl/sm5-object-rwbyteaddressbuffer
@@ -42,6 +52,7 @@ needed once HLSL for the access has been generated.
[`Writer::temp_access_chain`]: super::Writer::temp_access_chain
[`temp_access_chain`]: super::Writer::temp_access_chain
[`Writer`]: super::Writer
+[`Load` and `Store`]: https://github.com/microsoft/DirectXShaderCompiler/wiki/ByteAddressBuffer-Load-Store-Additions
*/
use super::{super::FunctionCtx, BackendResult, Error};
@@ -161,20 +172,39 @@ impl<W: fmt::Write> super::Writer<'_, W> {
// working around the borrow checker in `self.write_expr`
let chain = mem::take(&mut self.temp_access_chain);
let var_name = &self.names[&NameKey::GlobalVariable(var_handle)];
- let cast = scalar.kind.to_hlsl_cast();
- write!(self.out, "{cast}({var_name}.Load(")?;
+ // See note about DXC and Load/Store in the module's documentation.
+ if scalar.width == 4 {
+ let cast = scalar.kind.to_hlsl_cast();
+ write!(self.out, "{cast}({var_name}.Load(")?;
+ } else {
+ let ty = scalar.to_hlsl_str()?;
+ write!(self.out, "{var_name}.Load<{ty}>(")?;
+ };
self.write_storage_address(module, &chain, func_ctx)?;
- write!(self.out, "))")?;
+ write!(self.out, ")")?;
+ if scalar.width == 4 {
+ write!(self.out, ")")?;
+ }
self.temp_access_chain = chain;
}
crate::TypeInner::Vector { size, scalar } => {
// working around the borrow checker in `self.write_expr`
let chain = mem::take(&mut self.temp_access_chain);
let var_name = &self.names[&NameKey::GlobalVariable(var_handle)];
- let cast = scalar.kind.to_hlsl_cast();
- write!(self.out, "{}({}.Load{}(", cast, var_name, size as u8)?;
+ let size = size as u8;
+ // See note about DXC and Load/Store in the module's documentation.
+ if scalar.width == 4 {
+ let cast = scalar.kind.to_hlsl_cast();
+ write!(self.out, "{cast}({var_name}.Load{size}(")?;
+ } else {
+ let ty = scalar.to_hlsl_str()?;
+ write!(self.out, "{var_name}.Load<{ty}{size}>(")?;
+ };
self.write_storage_address(module, &chain, func_ctx)?;
- write!(self.out, "))")?;
+ write!(self.out, ")")?;
+ if scalar.width == 4 {
+ write!(self.out, ")")?;
+ }
self.temp_access_chain = chain;
}
crate::TypeInner::Matrix {
@@ -288,26 +318,44 @@ impl<W: fmt::Write> super::Writer<'_, W> {
}
};
match *ty_resolution.inner_with(&module.types) {
- crate::TypeInner::Scalar(_) => {
+ crate::TypeInner::Scalar(scalar) => {
// working around the borrow checker in `self.write_expr`
let chain = mem::take(&mut self.temp_access_chain);
let var_name = &self.names[&NameKey::GlobalVariable(var_handle)];
- write!(self.out, "{level}{var_name}.Store(")?;
- self.write_storage_address(module, &chain, func_ctx)?;
- write!(self.out, ", asuint(")?;
- self.write_store_value(module, &value, func_ctx)?;
- writeln!(self.out, "));")?;
+ // See note about DXC and Load/Store in the module's documentation.
+ if scalar.width == 4 {
+ write!(self.out, "{level}{var_name}.Store(")?;
+ self.write_storage_address(module, &chain, func_ctx)?;
+ write!(self.out, ", asuint(")?;
+ self.write_store_value(module, &value, func_ctx)?;
+ writeln!(self.out, "));")?;
+ } else {
+ write!(self.out, "{level}{var_name}.Store(")?;
+ self.write_storage_address(module, &chain, func_ctx)?;
+ write!(self.out, ", ")?;
+ self.write_store_value(module, &value, func_ctx)?;
+ writeln!(self.out, ");")?;
+ }
self.temp_access_chain = chain;
}
- crate::TypeInner::Vector { size, .. } => {
+ crate::TypeInner::Vector { size, scalar } => {
// working around the borrow checker in `self.write_expr`
let chain = mem::take(&mut self.temp_access_chain);
let var_name = &self.names[&NameKey::GlobalVariable(var_handle)];
- write!(self.out, "{}{}.Store{}(", level, var_name, size as u8)?;
- self.write_storage_address(module, &chain, func_ctx)?;
- write!(self.out, ", asuint(")?;
- self.write_store_value(module, &value, func_ctx)?;
- writeln!(self.out, "));")?;
+ // See note about DXC and Load/Store in the module's documentation.
+ if scalar.width == 4 {
+ write!(self.out, "{}{}.Store{}(", level, var_name, size as u8)?;
+ self.write_storage_address(module, &chain, func_ctx)?;
+ write!(self.out, ", asuint(")?;
+ self.write_store_value(module, &value, func_ctx)?;
+ writeln!(self.out, "));")?;
+ } else {
+ write!(self.out, "{}{}.Store(", level, var_name)?;
+ self.write_storage_address(module, &chain, func_ctx)?;
+ write!(self.out, ", ")?;
+ self.write_store_value(module, &value, func_ctx)?;
+ writeln!(self.out, ");")?;
+ }
self.temp_access_chain = chain;
}
crate::TypeInner::Matrix {
diff --git a/third_party/rust/naga/src/back/hlsl/writer.rs b/third_party/rust/naga/src/back/hlsl/writer.rs
index 43f7212837..4ba856946b 100644
--- a/third_party/rust/naga/src/back/hlsl/writer.rs
+++ b/third_party/rust/naga/src/back/hlsl/writer.rs
@@ -19,6 +19,8 @@ const SPECIAL_OTHER: &str = "other";
pub(crate) const MODF_FUNCTION: &str = "naga_modf";
pub(crate) const FREXP_FUNCTION: &str = "naga_frexp";
+pub(crate) const EXTRACT_BITS_FUNCTION: &str = "naga_extractBits";
+pub(crate) const INSERT_BITS_FUNCTION: &str = "naga_insertBits";
struct EpStructMember {
name: String,
@@ -125,14 +127,7 @@ impl<'a, W: fmt::Write> super::Writer<'a, W> {
self.need_bake_expressions.insert(fun_handle);
}
- if let Expression::Math {
- fun,
- arg,
- arg1,
- arg2,
- arg3,
- } = *expr
- {
+ if let Expression::Math { fun, arg, .. } = *expr {
match fun {
crate::MathFunction::Asinh
| crate::MathFunction::Acosh
@@ -149,17 +144,6 @@ impl<'a, W: fmt::Write> super::Writer<'a, W> {
| crate::MathFunction::Pack4x8unorm => {
self.need_bake_expressions.insert(arg);
}
- crate::MathFunction::ExtractBits => {
- self.need_bake_expressions.insert(arg);
- self.need_bake_expressions.insert(arg1.unwrap());
- self.need_bake_expressions.insert(arg2.unwrap());
- }
- crate::MathFunction::InsertBits => {
- self.need_bake_expressions.insert(arg);
- self.need_bake_expressions.insert(arg1.unwrap());
- self.need_bake_expressions.insert(arg2.unwrap());
- self.need_bake_expressions.insert(arg3.unwrap());
- }
crate::MathFunction::CountLeadingZeros => {
let inner = info[fun_handle].ty.inner_with(&module.types);
if let Some(crate::ScalarKind::Sint) = inner.scalar_kind() {
@@ -2038,6 +2022,7 @@ impl<'a, W: fmt::Write> super::Writer<'a, W> {
crate::Literal::F32(value) => write!(self.out, "{value:?}")?,
crate::Literal::U32(value) => write!(self.out, "{}u", value)?,
crate::Literal::I32(value) => write!(self.out, "{}", value)?,
+ crate::Literal::U64(value) => write!(self.out, "{}uL", value)?,
crate::Literal::I64(value) => write!(self.out, "{}L", value)?,
crate::Literal::Bool(value) => write!(self.out, "{}", value)?,
crate::Literal::AbstractInt(_) | crate::Literal::AbstractFloat(_) => {
@@ -2567,7 +2552,7 @@ impl<'a, W: fmt::Write> super::Writer<'a, W> {
convert,
} => {
let inner = func_ctx.resolve_type(expr, &module.types);
- match convert {
+ let close_paren = match convert {
Some(dst_width) => {
let scalar = crate::Scalar {
kind,
@@ -2600,13 +2585,21 @@ impl<'a, W: fmt::Write> super::Writer<'a, W> {
)));
}
};
+ true
}
None => {
- write!(self.out, "{}(", kind.to_hlsl_cast(),)?;
+ if inner.scalar_width() == Some(64) {
+ false
+ } else {
+ write!(self.out, "{}(", kind.to_hlsl_cast(),)?;
+ true
+ }
}
- }
+ };
self.write_expr(module, expr, func_ctx)?;
- write!(self.out, ")")?;
+ if close_paren {
+ write!(self.out, ")")?;
+ }
}
Expression::Math {
fun,
@@ -2620,8 +2613,6 @@ impl<'a, W: fmt::Write> super::Writer<'a, W> {
enum Function {
Asincosh { is_sin: bool },
Atanh,
- ExtractBits,
- InsertBits,
Pack2x16float,
Pack2x16snorm,
Pack2x16unorm,
@@ -2705,8 +2696,8 @@ impl<'a, W: fmt::Write> super::Writer<'a, W> {
Mf::ReverseBits => Function::MissingIntOverload("reversebits"),
Mf::FindLsb => Function::MissingIntReturnType("firstbitlow"),
Mf::FindMsb => Function::MissingIntReturnType("firstbithigh"),
- Mf::ExtractBits => Function::ExtractBits,
- Mf::InsertBits => Function::InsertBits,
+ Mf::ExtractBits => Function::Regular(EXTRACT_BITS_FUNCTION),
+ Mf::InsertBits => Function::Regular(INSERT_BITS_FUNCTION),
// Data Packing
Mf::Pack2x16float => Function::Pack2x16float,
Mf::Pack2x16snorm => Function::Pack2x16snorm,
@@ -2742,70 +2733,6 @@ impl<'a, W: fmt::Write> super::Writer<'a, W> {
self.write_expr(module, arg, func_ctx)?;
write!(self.out, "))")?;
}
- Function::ExtractBits => {
- // e: T,
- // offset: u32,
- // count: u32
- // T is u32 or i32 or vecN<u32> or vecN<i32>
- if let (Some(offset), Some(count)) = (arg1, arg2) {
- let scalar_width: u8 = 32;
- // Works for signed and unsigned
- // (count == 0 ? 0 : (e << (32 - count - offset)) >> (32 - count))
- write!(self.out, "(")?;
- self.write_expr(module, count, func_ctx)?;
- write!(self.out, " == 0 ? 0 : (")?;
- self.write_expr(module, arg, func_ctx)?;
- write!(self.out, " << ({scalar_width} - ")?;
- self.write_expr(module, count, func_ctx)?;
- write!(self.out, " - ")?;
- self.write_expr(module, offset, func_ctx)?;
- write!(self.out, ")) >> ({scalar_width} - ")?;
- self.write_expr(module, count, func_ctx)?;
- write!(self.out, "))")?;
- }
- }
- Function::InsertBits => {
- // e: T,
- // newbits: T,
- // offset: u32,
- // count: u32
- // returns T
- // T is i32, u32, vecN<i32>, or vecN<u32>
- if let (Some(newbits), Some(offset), Some(count)) = (arg1, arg2, arg3) {
- let scalar_width: u8 = 32;
- let scalar_max: u32 = 0xFFFFFFFF;
- // mask = ((0xFFFFFFFFu >> (32 - count)) << offset)
- // (count == 0 ? e : ((e & ~mask) | ((newbits << offset) & mask)))
- write!(self.out, "(")?;
- self.write_expr(module, count, func_ctx)?;
- write!(self.out, " == 0 ? ")?;
- self.write_expr(module, arg, func_ctx)?;
- write!(self.out, " : ")?;
- write!(self.out, "(")?;
- self.write_expr(module, arg, func_ctx)?;
- write!(self.out, " & ~")?;
- // mask
- write!(self.out, "(({scalar_max}u >> ({scalar_width}u - ")?;
- self.write_expr(module, count, func_ctx)?;
- write!(self.out, ")) << ")?;
- self.write_expr(module, offset, func_ctx)?;
- write!(self.out, ")")?;
- // end mask
- write!(self.out, ") | ((")?;
- self.write_expr(module, newbits, func_ctx)?;
- write!(self.out, " << ")?;
- self.write_expr(module, offset, func_ctx)?;
- write!(self.out, ") & ")?;
- // // mask
- write!(self.out, "(({scalar_max}u >> ({scalar_width}u - ")?;
- self.write_expr(module, count, func_ctx)?;
- write!(self.out, ")) << ")?;
- self.write_expr(module, offset, func_ctx)?;
- write!(self.out, ")")?;
- // // end mask
- write!(self.out, "))")?;
- }
- }
Function::Pack2x16float => {
write!(self.out, "(f32tof16(")?;
self.write_expr(module, arg, func_ctx)?;
@@ -2944,9 +2871,15 @@ impl<'a, W: fmt::Write> super::Writer<'a, W> {
}
write!(self.out, ")")?
}
+ // These overloads are only missing on FXC, so this is only needed for 32bit types,
+ // as non-32bit types are DXC only.
Function::MissingIntOverload(fun_name) => {
- let scalar_kind = func_ctx.resolve_type(arg, &module.types).scalar_kind();
- if let Some(ScalarKind::Sint) = scalar_kind {
+ let scalar_kind = func_ctx.resolve_type(arg, &module.types).scalar();
+ if let Some(crate::Scalar {
+ kind: ScalarKind::Sint,
+ width: 4,
+ }) = scalar_kind
+ {
write!(self.out, "asint({fun_name}(asuint(")?;
self.write_expr(module, arg, func_ctx)?;
write!(self.out, ")))")?;
@@ -2956,9 +2889,15 @@ impl<'a, W: fmt::Write> super::Writer<'a, W> {
write!(self.out, ")")?;
}
}
+ // These overloads are only missing on FXC, so this is only needed for 32bit types,
+ // as non-32bit types are DXC only.
Function::MissingIntReturnType(fun_name) => {
- let scalar_kind = func_ctx.resolve_type(arg, &module.types).scalar_kind();
- if let Some(ScalarKind::Sint) = scalar_kind {
+ let scalar_kind = func_ctx.resolve_type(arg, &module.types).scalar();
+ if let Some(crate::Scalar {
+ kind: ScalarKind::Sint,
+ width: 4,
+ }) = scalar_kind
+ {
write!(self.out, "asint({fun_name}(")?;
self.write_expr(module, arg, func_ctx)?;
write!(self.out, "))")?;
@@ -2977,23 +2916,38 @@ impl<'a, W: fmt::Write> super::Writer<'a, W> {
crate::VectorSize::Quad => ".xxxx",
};
- if let ScalarKind::Uint = scalar.kind {
- write!(self.out, "min((32u){s}, firstbitlow(")?;
+ let scalar_width_bits = scalar.width * 8;
+
+ if scalar.kind == ScalarKind::Uint || scalar.width != 4 {
+ write!(
+ self.out,
+ "min(({scalar_width_bits}u){s}, firstbitlow("
+ )?;
self.write_expr(module, arg, func_ctx)?;
write!(self.out, "))")?;
} else {
- write!(self.out, "asint(min((32u){s}, firstbitlow(")?;
+ // This is only needed for the FXC path, on 32bit signed integers.
+ write!(
+ self.out,
+ "asint(min(({scalar_width_bits}u){s}, firstbitlow("
+ )?;
self.write_expr(module, arg, func_ctx)?;
write!(self.out, ")))")?;
}
}
TypeInner::Scalar(scalar) => {
- if let ScalarKind::Uint = scalar.kind {
- write!(self.out, "min(32u, firstbitlow(")?;
+ let scalar_width_bits = scalar.width * 8;
+
+ if scalar.kind == ScalarKind::Uint || scalar.width != 4 {
+ write!(self.out, "min({scalar_width_bits}u, firstbitlow(")?;
self.write_expr(module, arg, func_ctx)?;
write!(self.out, "))")?;
} else {
- write!(self.out, "asint(min(32u, firstbitlow(")?;
+ // This is only needed for the FXC path, on 32bit signed integers.
+ write!(
+ self.out,
+ "asint(min({scalar_width_bits}u, firstbitlow("
+ )?;
self.write_expr(module, arg, func_ctx)?;
write!(self.out, ")))")?;
}
@@ -3012,30 +2966,47 @@ impl<'a, W: fmt::Write> super::Writer<'a, W> {
crate::VectorSize::Quad => ".xxxx",
};
- if let ScalarKind::Uint = scalar.kind {
- write!(self.out, "((31u){s} - firstbithigh(")?;
+ // scalar width - 1
+ let constant = scalar.width * 8 - 1;
+
+ if scalar.kind == ScalarKind::Uint {
+ write!(self.out, "(({constant}u){s} - firstbithigh(")?;
self.write_expr(module, arg, func_ctx)?;
write!(self.out, "))")?;
} else {
+ let conversion_func = match scalar.width {
+ 4 => "asint",
+ _ => "",
+ };
write!(self.out, "(")?;
self.write_expr(module, arg, func_ctx)?;
write!(
self.out,
- " < (0){s} ? (0){s} : (31){s} - asint(firstbithigh("
+ " < (0){s} ? (0){s} : ({constant}){s} - {conversion_func}(firstbithigh("
)?;
self.write_expr(module, arg, func_ctx)?;
write!(self.out, ")))")?;
}
}
TypeInner::Scalar(scalar) => {
+ // scalar width - 1
+ let constant = scalar.width * 8 - 1;
+
if let ScalarKind::Uint = scalar.kind {
- write!(self.out, "(31u - firstbithigh(")?;
+ write!(self.out, "({constant}u - firstbithigh(")?;
self.write_expr(module, arg, func_ctx)?;
write!(self.out, "))")?;
} else {
+ let conversion_func = match scalar.width {
+ 4 => "asint",
+ _ => "",
+ };
write!(self.out, "(")?;
self.write_expr(module, arg, func_ctx)?;
- write!(self.out, " < 0 ? 0 : 31 - asint(firstbithigh(")?;
+ write!(
+ self.out,
+ " < 0 ? 0 : {constant} - {conversion_func}(firstbithigh("
+ )?;
self.write_expr(module, arg, func_ctx)?;
write!(self.out, ")))")?;
}
diff --git a/third_party/rust/naga/src/back/msl/keywords.rs b/third_party/rust/naga/src/back/msl/keywords.rs
index f0025bf239..73c457dd34 100644
--- a/third_party/rust/naga/src/back/msl/keywords.rs
+++ b/third_party/rust/naga/src/back/msl/keywords.rs
@@ -4,6 +4,8 @@
// C++ - Standard for Programming Language C++ (N4431)
// https://www.open-std.org/jtc1/sc22/wg21/docs/papers/2015/n4431.pdf
pub const RESERVED: &[&str] = &[
+ // Undocumented
+ "assert", // found in https://github.com/gfx-rs/wgpu/issues/5347
// Standard for Programming Language C++ (N4431): 2.5 Alternative tokens
"and",
"bitor",
diff --git a/third_party/rust/naga/src/back/msl/mod.rs b/third_party/rust/naga/src/back/msl/mod.rs
index 5ef18730c9..68e5b79906 100644
--- a/third_party/rust/naga/src/back/msl/mod.rs
+++ b/third_party/rust/naga/src/back/msl/mod.rs
@@ -121,8 +121,8 @@ pub enum Error {
UnsupportedCall(String),
#[error("feature '{0}' is not implemented yet")]
FeatureNotImplemented(String),
- #[error("module is not valid")]
- Validation,
+ #[error("internal naga error: module should not have validated: {0}")]
+ GenericValidation(String),
#[error("BuiltIn {0:?} is not supported")]
UnsupportedBuiltIn(crate::BuiltIn),
#[error("capability {0:?} is not supported")]
@@ -306,13 +306,10 @@ impl Options {
},
})
}
- LocationMode::Uniform => {
- log::error!(
- "Unexpected Binding::Location({}) for the Uniform mode",
- location
- );
- Err(Error::Validation)
- }
+ LocationMode::Uniform => Err(Error::GenericValidation(format!(
+ "Unexpected Binding::Location({}) for the Uniform mode",
+ location
+ ))),
},
}
}
diff --git a/third_party/rust/naga/src/back/msl/writer.rs b/third_party/rust/naga/src/back/msl/writer.rs
index 1e496b5f50..5227d8e7db 100644
--- a/third_party/rust/naga/src/back/msl/writer.rs
+++ b/third_party/rust/naga/src/back/msl/writer.rs
@@ -319,7 +319,7 @@ pub struct Writer<W> {
}
impl crate::Scalar {
- const fn to_msl_name(self) -> &'static str {
+ fn to_msl_name(self) -> &'static str {
use crate::ScalarKind as Sk;
match self {
Self {
@@ -328,20 +328,29 @@ impl crate::Scalar {
} => "float",
Self {
kind: Sk::Sint,
- width: _,
+ width: 4,
} => "int",
Self {
kind: Sk::Uint,
- width: _,
+ width: 4,
} => "uint",
Self {
+ kind: Sk::Sint,
+ width: 8,
+ } => "long",
+ Self {
+ kind: Sk::Uint,
+ width: 8,
+ } => "ulong",
+ Self {
kind: Sk::Bool,
width: _,
} => "bool",
Self {
kind: Sk::AbstractInt | Sk::AbstractFloat,
width: _,
- } => unreachable!(),
+ } => unreachable!("Found Abstract scalar kind"),
+ _ => unreachable!("Unsupported scalar kind: {:?}", self),
}
}
}
@@ -735,7 +744,11 @@ impl<W: Write> Writer<W> {
crate::TypeInner::Vector { size, .. } => {
put_numeric_type(&mut self.out, crate::Scalar::U32, &[size])?
}
- _ => return Err(Error::Validation),
+ _ => {
+ return Err(Error::GenericValidation(
+ "Invalid type for image coordinate".into(),
+ ))
+ }
};
write!(self.out, "(")?;
@@ -1068,13 +1081,17 @@ impl<W: Write> Writer<W> {
let (offset, array_ty) = match context.module.types[global.ty].inner {
crate::TypeInner::Struct { ref members, .. } => match members.last() {
Some(&crate::StructMember { offset, ty, .. }) => (offset, ty),
- None => return Err(Error::Validation),
+ None => return Err(Error::GenericValidation("Struct has no members".into())),
},
crate::TypeInner::Array {
size: crate::ArraySize::Dynamic,
..
} => (0, global.ty),
- _ => return Err(Error::Validation),
+ ref ty => {
+ return Err(Error::GenericValidation(format!(
+ "Expected type with dynamic array, got {ty:?}"
+ )))
+ }
};
let (size, stride) = match context.module.types[array_ty].inner {
@@ -1084,7 +1101,11 @@ impl<W: Write> Writer<W> {
.size(context.module.to_ctx()),
stride,
),
- _ => return Err(Error::Validation),
+ ref ty => {
+ return Err(Error::GenericValidation(format!(
+ "Expected array type, got {ty:?}"
+ )))
+ }
};
// When the stride length is larger than the size, the final element's stride of
@@ -1273,6 +1294,9 @@ impl<W: Write> Writer<W> {
crate::Literal::I32(value) => {
write!(self.out, "{value}")?;
}
+ crate::Literal::U64(value) => {
+ write!(self.out, "{value}uL")?;
+ }
crate::Literal::I64(value) => {
write!(self.out, "{value}L")?;
}
@@ -1280,7 +1304,9 @@ impl<W: Write> Writer<W> {
write!(self.out, "{value}")?;
}
crate::Literal::AbstractInt(_) | crate::Literal::AbstractFloat(_) => {
- return Err(Error::Validation);
+ return Err(Error::GenericValidation(
+ "Unsupported abstract literal".into(),
+ ));
}
},
crate::Expression::Constant(handle) => {
@@ -1342,7 +1368,11 @@ impl<W: Write> Writer<W> {
crate::Expression::Splat { size, value } => {
let scalar = match *get_expr_ty(ctx, value).inner_with(&module.types) {
crate::TypeInner::Scalar(scalar) => scalar,
- _ => return Err(Error::Validation),
+ ref ty => {
+ return Err(Error::GenericValidation(format!(
+ "Expected splat value type must be a scalar, got {ty:?}",
+ )))
+ }
};
put_numeric_type(&mut self.out, scalar, &[size])?;
write!(self.out, "(")?;
@@ -1672,7 +1702,11 @@ impl<W: Write> Writer<W> {
self.put_expression(condition, context, true)?;
write!(self.out, ")")?;
}
- _ => return Err(Error::Validation),
+ ref ty => {
+ return Err(Error::GenericValidation(format!(
+ "Expected select condition to be a non-bool type, got {ty:?}",
+ )))
+ }
},
crate::Expression::Derivative { axis, expr, .. } => {
use crate::DerivativeAxis as Axis;
@@ -1794,8 +1828,8 @@ impl<W: Write> Writer<W> {
Mf::CountLeadingZeros => "clz",
Mf::CountOneBits => "popcount",
Mf::ReverseBits => "reverse_bits",
- Mf::ExtractBits => "extract_bits",
- Mf::InsertBits => "insert_bits",
+ Mf::ExtractBits => "",
+ Mf::InsertBits => "",
Mf::FindLsb => "",
Mf::FindMsb => "",
// data packing
@@ -1836,15 +1870,23 @@ impl<W: Write> Writer<W> {
self.put_expression(arg1.unwrap(), context, false)?;
write!(self.out, ")")?;
} else if fun == Mf::FindLsb {
+ let scalar = context.resolve_type(arg).scalar().unwrap();
+ let constant = scalar.width * 8 + 1;
+
write!(self.out, "((({NAMESPACE}::ctz(")?;
self.put_expression(arg, context, true)?;
- write!(self.out, ") + 1) % 33) - 1)")?;
+ write!(self.out, ") + 1) % {constant}) - 1)")?;
} else if fun == Mf::FindMsb {
let inner = context.resolve_type(arg);
+ let scalar = inner.scalar().unwrap();
+ let constant = scalar.width * 8 - 1;
- write!(self.out, "{NAMESPACE}::select(31 - {NAMESPACE}::clz(")?;
+ write!(
+ self.out,
+ "{NAMESPACE}::select({constant} - {NAMESPACE}::clz("
+ )?;
- if let Some(crate::ScalarKind::Sint) = inner.scalar_kind() {
+ if scalar.kind == crate::ScalarKind::Sint {
write!(self.out, "{NAMESPACE}::select(")?;
self.put_expression(arg, context, true)?;
write!(self.out, ", ~")?;
@@ -1862,18 +1904,12 @@ impl<W: Write> Writer<W> {
match *inner {
crate::TypeInner::Vector { size, scalar } => {
let size = back::vector_size_str(size);
- if let crate::ScalarKind::Sint = scalar.kind {
- write!(self.out, "int{size}")?;
- } else {
- write!(self.out, "uint{size}")?;
- }
+ let name = scalar.to_msl_name();
+ write!(self.out, "{name}{size}")?;
}
crate::TypeInner::Scalar(scalar) => {
- if let crate::ScalarKind::Sint = scalar.kind {
- write!(self.out, "int")?;
- } else {
- write!(self.out, "uint")?;
- }
+ let name = scalar.to_msl_name();
+ write!(self.out, "{name}")?;
}
_ => (),
}
@@ -1891,6 +1927,52 @@ impl<W: Write> Writer<W> {
write!(self.out, "as_type<uint>(half2(")?;
self.put_expression(arg, context, false)?;
write!(self.out, "))")?;
+ } else if fun == Mf::ExtractBits {
+ // The behavior of ExtractBits is undefined when offset + count > bit_width. We need
+ // to first sanitize the offset and count first. If we don't do this, Apple chips
+ // will return out-of-spec values if the extracted range is not within the bit width.
+ //
+ // This encodes the exact formula specified by the wgsl spec, without temporary values:
+ // https://gpuweb.github.io/gpuweb/wgsl/#extractBits-unsigned-builtin
+ //
+ // w = sizeof(x) * 8
+ // o = min(offset, w)
+ // tmp = w - o
+ // c = min(count, tmp)
+ //
+ // bitfieldExtract(x, o, c)
+ //
+ // extract_bits(e, min(offset, w), min(count, w - min(offset, w))))
+
+ let scalar_bits = context.resolve_type(arg).scalar_width().unwrap();
+
+ write!(self.out, "{NAMESPACE}::extract_bits(")?;
+ self.put_expression(arg, context, true)?;
+ write!(self.out, ", {NAMESPACE}::min(")?;
+ self.put_expression(arg1.unwrap(), context, true)?;
+ write!(self.out, ", {scalar_bits}u), {NAMESPACE}::min(")?;
+ self.put_expression(arg2.unwrap(), context, true)?;
+ write!(self.out, ", {scalar_bits}u - {NAMESPACE}::min(")?;
+ self.put_expression(arg1.unwrap(), context, true)?;
+ write!(self.out, ", {scalar_bits}u)))")?;
+ } else if fun == Mf::InsertBits {
+ // The behavior of InsertBits has the same issue as ExtractBits.
+ //
+ // insertBits(e, newBits, min(offset, w), min(count, w - min(offset, w))))
+
+ let scalar_bits = context.resolve_type(arg).scalar_width().unwrap();
+
+ write!(self.out, "{NAMESPACE}::insert_bits(")?;
+ self.put_expression(arg, context, true)?;
+ write!(self.out, ", ")?;
+ self.put_expression(arg1.unwrap(), context, true)?;
+ write!(self.out, ", {NAMESPACE}::min(")?;
+ self.put_expression(arg2.unwrap(), context, true)?;
+ write!(self.out, ", {scalar_bits}u), {NAMESPACE}::min(")?;
+ self.put_expression(arg3.unwrap(), context, true)?;
+ write!(self.out, ", {scalar_bits}u - {NAMESPACE}::min(")?;
+ self.put_expression(arg2.unwrap(), context, true)?;
+ write!(self.out, ", {scalar_bits}u)))")?;
} else if fun == Mf::Radians {
write!(self.out, "((")?;
self.put_expression(arg, context, false)?;
@@ -1920,14 +2002,8 @@ impl<W: Write> Writer<W> {
kind,
width: convert.unwrap_or(src.width),
};
- let is_bool_cast =
- kind == crate::ScalarKind::Bool || src.kind == crate::ScalarKind::Bool;
let op = match convert {
- Some(w) if w == src.width || is_bool_cast => "static_cast",
- Some(8) if kind == crate::ScalarKind::Float => {
- return Err(Error::CapabilityNotSupported(valid::Capabilities::FLOAT64))
- }
- Some(_) => return Err(Error::Validation),
+ Some(_) => "static_cast",
None => "as_type",
};
write!(self.out, "{op}<")?;
@@ -1955,7 +2031,11 @@ impl<W: Write> Writer<W> {
self.put_expression(expr, context, true)?;
write!(self.out, ")")?;
}
- _ => return Err(Error::Validation),
+ ref ty => {
+ return Err(Error::GenericValidation(format!(
+ "Unsupported type for As: {ty:?}"
+ )))
+ }
},
// has to be a named expression
crate::Expression::CallResult(_)
@@ -1970,11 +2050,19 @@ impl<W: Write> Writer<W> {
crate::Expression::AccessIndex { base, .. } => {
match context.function.expressions[base] {
crate::Expression::GlobalVariable(handle) => handle,
- _ => return Err(Error::Validation),
+ ref ex => {
+ return Err(Error::GenericValidation(format!(
+ "Expected global variable in AccessIndex, got {ex:?}"
+ )))
+ }
}
}
crate::Expression::GlobalVariable(handle) => handle,
- _ => return Err(Error::Validation),
+ ref ex => {
+ return Err(Error::GenericValidation(format!(
+ "Unexpected expression in ArrayLength, got {ex:?}"
+ )))
+ }
};
if !is_scoped {
@@ -2140,10 +2228,12 @@ impl<W: Write> Writer<W> {
match length {
index::IndexableLength::Known(value) => write!(self.out, "{value}")?,
index::IndexableLength::Dynamic => {
- let global = context
- .function
- .originating_global(base)
- .ok_or(Error::Validation)?;
+ let global =
+ context.function.originating_global(base).ok_or_else(|| {
+ Error::GenericValidation(
+ "Could not find originating global".into(),
+ )
+ })?;
write!(self.out, "1 + ")?;
self.put_dynamic_array_max_index(global, context)?
}
@@ -2300,10 +2390,9 @@ impl<W: Write> Writer<W> {
write!(self.out, "{}u", limit - 1)?;
}
index::IndexableLength::Dynamic => {
- let global = context
- .function
- .originating_global(base)
- .ok_or(Error::Validation)?;
+ let global = context.function.originating_global(base).ok_or_else(|| {
+ Error::GenericValidation("Could not find originating global".into())
+ })?;
self.put_dynamic_array_max_index(global, context)?;
}
}
@@ -2489,7 +2578,14 @@ impl<W: Write> Writer<W> {
}
}
- if let Expression::Math { fun, arg, arg1, .. } = *expr {
+ if let Expression::Math {
+ fun,
+ arg,
+ arg1,
+ arg2,
+ ..
+ } = *expr
+ {
match fun {
crate::MathFunction::Dot => {
// WGSL's `dot` function works on any `vecN` type, but Metal's only
@@ -2514,6 +2610,14 @@ impl<W: Write> Writer<W> {
crate::MathFunction::FindMsb => {
self.need_bake_expressions.insert(arg);
}
+ crate::MathFunction::ExtractBits => {
+ // Only argument 1 is re-used.
+ self.need_bake_expressions.insert(arg1.unwrap());
+ }
+ crate::MathFunction::InsertBits => {
+ // Only argument 2 is re-used.
+ self.need_bake_expressions.insert(arg2.unwrap());
+ }
crate::MathFunction::Sign => {
// WGSL's `sign` function works also on signed ints, but Metal's only
// works on floating points, so we emit inline code for integer `sign`
@@ -3048,7 +3152,7 @@ impl<W: Write> Writer<W> {
for statement in statements {
if let crate::Statement::Emit(ref range) = *statement {
for handle in range.clone() {
- self.named_expressions.remove(&handle);
+ self.named_expressions.shift_remove(&handle);
}
}
}
@@ -3897,7 +4001,9 @@ impl<W: Write> Writer<W> {
binding: None,
first_time: true,
};
- let binding = binding.ok_or(Error::Validation)?;
+ let binding = binding.ok_or_else(|| {
+ Error::GenericValidation("Expected binding, got None".into())
+ })?;
if let crate::Binding::BuiltIn(crate::BuiltIn::PointSize) = *binding {
has_point_size = true;
diff --git a/third_party/rust/naga/src/back/spv/block.rs b/third_party/rust/naga/src/back/spv/block.rs
index 6c96fa09e3..81f2fc10e0 100644
--- a/third_party/rust/naga/src/back/spv/block.rs
+++ b/third_party/rust/naga/src/back/spv/block.rs
@@ -731,12 +731,41 @@ impl<'w> BlockContext<'w> {
Some(crate::ScalarKind::Uint) => spirv::GLOp::UMax,
other => unimplemented!("Unexpected max({:?})", other),
}),
- Mf::Clamp => MathOp::Ext(match arg_scalar_kind {
- Some(crate::ScalarKind::Float) => spirv::GLOp::FClamp,
- Some(crate::ScalarKind::Sint) => spirv::GLOp::SClamp,
- Some(crate::ScalarKind::Uint) => spirv::GLOp::UClamp,
+ Mf::Clamp => match arg_scalar_kind {
+ // Clamp is undefined if min > max. In practice this means it can use a median-of-three
+ // instruction to determine the value. This is fine according to the WGSL spec for float
+ // clamp, but integer clamp _must_ use min-max. As such we write out min/max.
+ Some(crate::ScalarKind::Float) => MathOp::Ext(spirv::GLOp::FClamp),
+ Some(_) => {
+ let (min_op, max_op) = match arg_scalar_kind {
+ Some(crate::ScalarKind::Sint) => {
+ (spirv::GLOp::SMin, spirv::GLOp::SMax)
+ }
+ Some(crate::ScalarKind::Uint) => {
+ (spirv::GLOp::UMin, spirv::GLOp::UMax)
+ }
+ _ => unreachable!(),
+ };
+
+ let max_id = self.gen_id();
+ block.body.push(Instruction::ext_inst(
+ self.writer.gl450_ext_inst_id,
+ max_op,
+ result_type_id,
+ max_id,
+ &[arg0_id, arg1_id],
+ ));
+
+ MathOp::Custom(Instruction::ext_inst(
+ self.writer.gl450_ext_inst_id,
+ min_op,
+ result_type_id,
+ id,
+ &[max_id, arg2_id],
+ ))
+ }
other => unimplemented!("Unexpected max({:?})", other),
- }),
+ },
Mf::Saturate => {
let (maybe_size, scalar) = match *arg_ty {
crate::TypeInner::Vector { size, scalar } => (Some(size), scalar),
@@ -915,8 +944,7 @@ impl<'w> BlockContext<'w> {
)),
Mf::CountTrailingZeros => {
let uint_id = match *arg_ty {
- crate::TypeInner::Vector { size, mut scalar } => {
- scalar.kind = crate::ScalarKind::Uint;
+ crate::TypeInner::Vector { size, scalar } => {
let ty = LocalType::Value {
vector_size: Some(size),
scalar,
@@ -927,15 +955,15 @@ impl<'w> BlockContext<'w> {
self.temp_list.clear();
self.temp_list.resize(
size as _,
- self.writer.get_constant_scalar_with(32, scalar)?,
+ self.writer
+ .get_constant_scalar_with(scalar.width * 8, scalar)?,
);
self.writer.get_constant_composite(ty, &self.temp_list)
}
- crate::TypeInner::Scalar(mut scalar) => {
- scalar.kind = crate::ScalarKind::Uint;
- self.writer.get_constant_scalar_with(32, scalar)?
- }
+ crate::TypeInner::Scalar(scalar) => self
+ .writer
+ .get_constant_scalar_with(scalar.width * 8, scalar)?,
_ => unreachable!(),
};
@@ -957,9 +985,8 @@ impl<'w> BlockContext<'w> {
))
}
Mf::CountLeadingZeros => {
- let (int_type_id, int_id) = match *arg_ty {
- crate::TypeInner::Vector { size, mut scalar } => {
- scalar.kind = crate::ScalarKind::Sint;
+ let (int_type_id, int_id, width) = match *arg_ty {
+ crate::TypeInner::Vector { size, scalar } => {
let ty = LocalType::Value {
vector_size: Some(size),
scalar,
@@ -970,32 +997,41 @@ impl<'w> BlockContext<'w> {
self.temp_list.clear();
self.temp_list.resize(
size as _,
- self.writer.get_constant_scalar_with(31, scalar)?,
+ self.writer
+ .get_constant_scalar_with(scalar.width * 8 - 1, scalar)?,
);
(
self.get_type_id(ty),
self.writer.get_constant_composite(ty, &self.temp_list),
+ scalar.width,
)
}
- crate::TypeInner::Scalar(mut scalar) => {
- scalar.kind = crate::ScalarKind::Sint;
- (
- self.get_type_id(LookupType::Local(LocalType::Value {
- vector_size: None,
- scalar,
- pointer_space: None,
- })),
- self.writer.get_constant_scalar_with(31, scalar)?,
- )
- }
+ crate::TypeInner::Scalar(scalar) => (
+ self.get_type_id(LookupType::Local(LocalType::Value {
+ vector_size: None,
+ scalar,
+ pointer_space: None,
+ })),
+ self.writer
+ .get_constant_scalar_with(scalar.width * 8 - 1, scalar)?,
+ scalar.width,
+ ),
_ => unreachable!(),
};
+ if width != 4 {
+ unreachable!("This is validated out until a polyfill is implemented. https://github.com/gfx-rs/wgpu/issues/5276");
+ };
+
let msb_id = self.gen_id();
block.body.push(Instruction::ext_inst(
self.writer.gl450_ext_inst_id,
- spirv::GLOp::FindUMsb,
+ if width != 4 {
+ spirv::GLOp::FindILsb
+ } else {
+ spirv::GLOp::FindUMsb
+ },
int_type_id,
msb_id,
&[arg0_id],
@@ -1021,30 +1057,144 @@ impl<'w> BlockContext<'w> {
Some(crate::ScalarKind::Sint) => spirv::Op::BitFieldSExtract,
other => unimplemented!("Unexpected sign({:?})", other),
};
+
+ // The behavior of ExtractBits is undefined when offset + count > bit_width. We need
+ // to first sanitize the offset and count first. If we don't do this, AMD and Intel
+ // will return out-of-spec values if the extracted range is not within the bit width.
+ //
+ // This encodes the exact formula specified by the wgsl spec:
+ // https://gpuweb.github.io/gpuweb/wgsl/#extractBits-unsigned-builtin
+ //
+ // w = sizeof(x) * 8
+ // o = min(offset, w)
+ // tmp = w - o
+ // c = min(count, tmp)
+ //
+ // bitfieldExtract(x, o, c)
+
+ let bit_width = arg_ty.scalar_width().unwrap();
+ let width_constant = self
+ .writer
+ .get_constant_scalar(crate::Literal::U32(bit_width as u32));
+
+ let u32_type = self.get_type_id(LookupType::Local(LocalType::Value {
+ vector_size: None,
+ scalar: crate::Scalar {
+ kind: crate::ScalarKind::Uint,
+ width: 4,
+ },
+ pointer_space: None,
+ }));
+
+ // o = min(offset, w)
+ let offset_id = self.gen_id();
+ block.body.push(Instruction::ext_inst(
+ self.writer.gl450_ext_inst_id,
+ spirv::GLOp::UMin,
+ u32_type,
+ offset_id,
+ &[arg1_id, width_constant],
+ ));
+
+ // tmp = w - o
+ let max_count_id = self.gen_id();
+ block.body.push(Instruction::binary(
+ spirv::Op::ISub,
+ u32_type,
+ max_count_id,
+ width_constant,
+ offset_id,
+ ));
+
+ // c = min(count, tmp)
+ let count_id = self.gen_id();
+ block.body.push(Instruction::ext_inst(
+ self.writer.gl450_ext_inst_id,
+ spirv::GLOp::UMin,
+ u32_type,
+ count_id,
+ &[arg2_id, max_count_id],
+ ));
+
MathOp::Custom(Instruction::ternary(
op,
result_type_id,
id,
arg0_id,
+ offset_id,
+ count_id,
+ ))
+ }
+ Mf::InsertBits => {
+ // The behavior of InsertBits has the same undefined behavior as ExtractBits.
+
+ let bit_width = arg_ty.scalar_width().unwrap();
+ let width_constant = self
+ .writer
+ .get_constant_scalar(crate::Literal::U32(bit_width as u32));
+
+ let u32_type = self.get_type_id(LookupType::Local(LocalType::Value {
+ vector_size: None,
+ scalar: crate::Scalar {
+ kind: crate::ScalarKind::Uint,
+ width: 4,
+ },
+ pointer_space: None,
+ }));
+
+ // o = min(offset, w)
+ let offset_id = self.gen_id();
+ block.body.push(Instruction::ext_inst(
+ self.writer.gl450_ext_inst_id,
+ spirv::GLOp::UMin,
+ u32_type,
+ offset_id,
+ &[arg2_id, width_constant],
+ ));
+
+ // tmp = w - o
+ let max_count_id = self.gen_id();
+ block.body.push(Instruction::binary(
+ spirv::Op::ISub,
+ u32_type,
+ max_count_id,
+ width_constant,
+ offset_id,
+ ));
+
+ // c = min(count, tmp)
+ let count_id = self.gen_id();
+ block.body.push(Instruction::ext_inst(
+ self.writer.gl450_ext_inst_id,
+ spirv::GLOp::UMin,
+ u32_type,
+ count_id,
+ &[arg3_id, max_count_id],
+ ));
+
+ MathOp::Custom(Instruction::quaternary(
+ spirv::Op::BitFieldInsert,
+ result_type_id,
+ id,
+ arg0_id,
arg1_id,
- arg2_id,
+ offset_id,
+ count_id,
))
}
- Mf::InsertBits => MathOp::Custom(Instruction::quaternary(
- spirv::Op::BitFieldInsert,
- result_type_id,
- id,
- arg0_id,
- arg1_id,
- arg2_id,
- arg3_id,
- )),
Mf::FindLsb => MathOp::Ext(spirv::GLOp::FindILsb),
- Mf::FindMsb => MathOp::Ext(match arg_scalar_kind {
- Some(crate::ScalarKind::Uint) => spirv::GLOp::FindUMsb,
- Some(crate::ScalarKind::Sint) => spirv::GLOp::FindSMsb,
- other => unimplemented!("Unexpected findMSB({:?})", other),
- }),
+ Mf::FindMsb => {
+ if arg_ty.scalar_width() == Some(32) {
+ let thing = match arg_scalar_kind {
+ Some(crate::ScalarKind::Uint) => spirv::GLOp::FindUMsb,
+ Some(crate::ScalarKind::Sint) => spirv::GLOp::FindSMsb,
+ other => unimplemented!("Unexpected findMSB({:?})", other),
+ };
+ MathOp::Ext(thing)
+ } else {
+ unreachable!("This is validated out until a polyfill is implemented. https://github.com/gfx-rs/wgpu/issues/5276");
+ }
+ }
Mf::Pack4x8unorm => MathOp::Ext(spirv::GLOp::PackUnorm4x8),
Mf::Pack4x8snorm => MathOp::Ext(spirv::GLOp::PackSnorm4x8),
Mf::Pack2x16float => MathOp::Ext(spirv::GLOp::PackHalf2x16),
@@ -1250,6 +1400,12 @@ impl<'w> BlockContext<'w> {
(Sk::Uint, Sk::Uint, Some(dst_width)) if src_scalar.width != dst_width => {
Cast::Unary(spirv::Op::UConvert)
}
+ (Sk::Uint, Sk::Sint, Some(dst_width)) if src_scalar.width != dst_width => {
+ Cast::Unary(spirv::Op::SConvert)
+ }
+ (Sk::Sint, Sk::Uint, Some(dst_width)) if src_scalar.width != dst_width => {
+ Cast::Unary(spirv::Op::UConvert)
+ }
// We assume it's either an identity cast, or int-uint.
_ => Cast::Unary(spirv::Op::Bitcast),
}
diff --git a/third_party/rust/naga/src/back/spv/writer.rs b/third_party/rust/naga/src/back/spv/writer.rs
index 4db86c93a7..de3220bbda 100644
--- a/third_party/rust/naga/src/back/spv/writer.rs
+++ b/third_party/rust/naga/src/back/spv/writer.rs
@@ -1182,6 +1182,9 @@ impl Writer {
crate::Literal::F32(value) => Instruction::constant_32bit(type_id, id, value.to_bits()),
crate::Literal::U32(value) => Instruction::constant_32bit(type_id, id, value),
crate::Literal::I32(value) => Instruction::constant_32bit(type_id, id, value as u32),
+ crate::Literal::U64(value) => {
+ Instruction::constant_64bit(type_id, id, value as u32, (value >> 32) as u32)
+ }
crate::Literal::I64(value) => {
Instruction::constant_64bit(type_id, id, value as u32, (value >> 32) as u32)
}
diff --git a/third_party/rust/naga/src/back/wgsl/writer.rs b/third_party/rust/naga/src/back/wgsl/writer.rs
index c737934f5e..3039cbbbe4 100644
--- a/third_party/rust/naga/src/back/wgsl/writer.rs
+++ b/third_party/rust/naga/src/back/wgsl/writer.rs
@@ -109,7 +109,7 @@ impl<W: Write> Writer<W> {
self.reset(module);
// Save all ep result types
- for (_, ep) in module.entry_points.iter().enumerate() {
+ for ep in &module.entry_points {
if let Some(ref result) = ep.function.result {
self.ep_results.push((ep.stage, result.ty));
}
@@ -593,6 +593,7 @@ impl<W: Write> Writer<W> {
}
write!(self.out, ">")?;
}
+ TypeInner::AccelerationStructure => write!(self.out, "acceleration_structure")?,
_ => {
return Err(Error::Unimplemented(format!("write_value_type {inner:?}")));
}
@@ -1095,16 +1096,24 @@ impl<W: Write> Writer<W> {
// value can only be expressed in WGSL using AbstractInt and
// a unary negation operator.
if value == i32::MIN {
- write!(self.out, "i32(-2147483648)")?;
+ write!(self.out, "i32({})", value)?;
} else {
write!(self.out, "{}i", value)?;
}
}
crate::Literal::Bool(value) => write!(self.out, "{}", value)?,
crate::Literal::F64(value) => write!(self.out, "{:?}lf", value)?,
- crate::Literal::I64(_) => {
- return Err(Error::Custom("unsupported i64 literal".to_string()));
+ crate::Literal::I64(value) => {
+ // `-9223372036854775808li` is not valid WGSL. The most negative `i64`
+ // value can only be expressed in WGSL using AbstractInt and
+ // a unary negation operator.
+ if value == i64::MIN {
+ write!(self.out, "i64({})", value)?;
+ } else {
+ write!(self.out, "{}li", value)?;
+ }
}
+ crate::Literal::U64(value) => write!(self.out, "{:?}lu", value)?,
crate::Literal::AbstractInt(_) | crate::Literal::AbstractFloat(_) => {
return Err(Error::Custom(
"Abstract types should not appear in IR presented to backends".into(),
@@ -1828,6 +1837,14 @@ const fn scalar_kind_str(scalar: crate::Scalar) -> &'static str {
width: 4,
} => "u32",
Scalar {
+ kind: Sk::Sint,
+ width: 8,
+ } => "i64",
+ Scalar {
+ kind: Sk::Uint,
+ width: 8,
+ } => "u64",
+ Scalar {
kind: Sk::Bool,
width: 1,
} => "bool",
diff --git a/third_party/rust/naga/src/front/glsl/functions.rs b/third_party/rust/naga/src/front/glsl/functions.rs
index df8cc8a30e..01846eb814 100644
--- a/third_party/rust/naga/src/front/glsl/functions.rs
+++ b/third_party/rust/naga/src/front/glsl/functions.rs
@@ -160,7 +160,7 @@ impl Frontend {
} => self.matrix_one_arg(ctx, ty, columns, rows, scalar, (value, expr_meta), meta)?,
TypeInner::Struct { ref members, .. } => {
let scalar_components = members
- .get(0)
+ .first()
.and_then(|member| scalar_components(&ctx.module.types[member.ty].inner));
if let Some(scalar) = scalar_components {
ctx.implicit_conversion(&mut value, expr_meta, scalar)?;
diff --git a/third_party/rust/naga/src/front/glsl/parser/functions.rs b/third_party/rust/naga/src/front/glsl/parser/functions.rs
index 38184eedf7..d428d74761 100644
--- a/third_party/rust/naga/src/front/glsl/parser/functions.rs
+++ b/third_party/rust/naga/src/front/glsl/parser/functions.rs
@@ -435,7 +435,7 @@ impl<'source> ParsingContext<'source> {
if self.bump_if(frontend, TokenValue::Semicolon).is_none() {
if self.peek_type_name(frontend) || self.peek_type_qualifier(frontend) {
- self.parse_declaration(frontend, ctx, false, false)?;
+ self.parse_declaration(frontend, ctx, false, is_inside_loop)?;
} else {
let mut stmt = ctx.stmt_ctx();
let expr = self.parse_expression(frontend, ctx, &mut stmt)?;
diff --git a/third_party/rust/naga/src/front/spv/function.rs b/third_party/rust/naga/src/front/spv/function.rs
index 198d9c52dd..e81ecf5c9b 100644
--- a/third_party/rust/naga/src/front/spv/function.rs
+++ b/third_party/rust/naga/src/front/spv/function.rs
@@ -292,278 +292,286 @@ impl<I: Iterator<Item = u32>> super::Frontend<I> {
);
if let Some(ep) = self.lookup_entry_point.remove(&fun_id) {
- // create a wrapping function
- let mut function = crate::Function {
- name: Some(format!("{}_wrap", ep.name)),
- arguments: Vec::new(),
- result: None,
- local_variables: Arena::new(),
- expressions: Arena::new(),
- named_expressions: crate::NamedExpressions::default(),
- body: crate::Block::new(),
- };
+ self.deferred_entry_points.push((ep, fun_id));
+ }
- // 1. copy the inputs from arguments to privates
- for &v_id in ep.variable_ids.iter() {
- let lvar = self.lookup_variable.lookup(v_id)?;
- if let super::Variable::Input(ref arg) = lvar.inner {
- let span = module.global_variables.get_span(lvar.handle);
- let arg_expr = function.expressions.append(
- crate::Expression::FunctionArgument(function.arguments.len() as u32),
- span,
- );
- let load_expr = if arg.ty == module.global_variables[lvar.handle].ty {
- arg_expr
- } else {
- // The only case where the type is different is if we need to treat
- // unsigned integer as signed.
- let mut emitter = Emitter::default();
- emitter.start(&function.expressions);
- let handle = function.expressions.append(
- crate::Expression::As {
- expr: arg_expr,
- kind: crate::ScalarKind::Sint,
- convert: Some(4),
- },
- span,
- );
- function.body.extend(emitter.finish(&function.expressions));
- handle
- };
- function.body.push(
- crate::Statement::Store {
- pointer: function
- .expressions
- .append(crate::Expression::GlobalVariable(lvar.handle), span),
- value: load_expr,
+ Ok(())
+ }
+
+ pub(super) fn process_entry_point(
+ &mut self,
+ module: &mut crate::Module,
+ ep: super::EntryPoint,
+ fun_id: u32,
+ ) -> Result<(), Error> {
+ // create a wrapping function
+ let mut function = crate::Function {
+ name: Some(format!("{}_wrap", ep.name)),
+ arguments: Vec::new(),
+ result: None,
+ local_variables: Arena::new(),
+ expressions: Arena::new(),
+ named_expressions: crate::NamedExpressions::default(),
+ body: crate::Block::new(),
+ };
+
+ // 1. copy the inputs from arguments to privates
+ for &v_id in ep.variable_ids.iter() {
+ let lvar = self.lookup_variable.lookup(v_id)?;
+ if let super::Variable::Input(ref arg) = lvar.inner {
+ let span = module.global_variables.get_span(lvar.handle);
+ let arg_expr = function.expressions.append(
+ crate::Expression::FunctionArgument(function.arguments.len() as u32),
+ span,
+ );
+ let load_expr = if arg.ty == module.global_variables[lvar.handle].ty {
+ arg_expr
+ } else {
+ // The only case where the type is different is if we need to treat
+ // unsigned integer as signed.
+ let mut emitter = Emitter::default();
+ emitter.start(&function.expressions);
+ let handle = function.expressions.append(
+ crate::Expression::As {
+ expr: arg_expr,
+ kind: crate::ScalarKind::Sint,
+ convert: Some(4),
},
span,
);
+ function.body.extend(emitter.finish(&function.expressions));
+ handle
+ };
+ function.body.push(
+ crate::Statement::Store {
+ pointer: function
+ .expressions
+ .append(crate::Expression::GlobalVariable(lvar.handle), span),
+ value: load_expr,
+ },
+ span,
+ );
- let mut arg = arg.clone();
- if ep.stage == crate::ShaderStage::Fragment {
- if let Some(ref mut binding) = arg.binding {
- binding.apply_default_interpolation(&module.types[arg.ty].inner);
- }
+ let mut arg = arg.clone();
+ if ep.stage == crate::ShaderStage::Fragment {
+ if let Some(ref mut binding) = arg.binding {
+ binding.apply_default_interpolation(&module.types[arg.ty].inner);
}
- function.arguments.push(arg);
}
+ function.arguments.push(arg);
}
- // 2. call the wrapped function
- let fake_id = !(module.entry_points.len() as u32); // doesn't matter, as long as it's not a collision
- let dummy_handle = self.add_call(fake_id, fun_id);
- function.body.push(
- crate::Statement::Call {
- function: dummy_handle,
- arguments: Vec::new(),
- result: None,
- },
- crate::Span::default(),
- );
-
- // 3. copy the outputs from privates to the result
- let mut members = Vec::new();
- let mut components = Vec::new();
- for &v_id in ep.variable_ids.iter() {
- let lvar = self.lookup_variable.lookup(v_id)?;
- if let super::Variable::Output(ref result) = lvar.inner {
- let span = module.global_variables.get_span(lvar.handle);
- let expr_handle = function
- .expressions
- .append(crate::Expression::GlobalVariable(lvar.handle), span);
+ }
+ // 2. call the wrapped function
+ let fake_id = !(module.entry_points.len() as u32); // doesn't matter, as long as it's not a collision
+ let dummy_handle = self.add_call(fake_id, fun_id);
+ function.body.push(
+ crate::Statement::Call {
+ function: dummy_handle,
+ arguments: Vec::new(),
+ result: None,
+ },
+ crate::Span::default(),
+ );
- // Cull problematic builtins of gl_PerVertex.
- // See the docs for `Frontend::gl_per_vertex_builtin_access`.
+ // 3. copy the outputs from privates to the result
+ let mut members = Vec::new();
+ let mut components = Vec::new();
+ for &v_id in ep.variable_ids.iter() {
+ let lvar = self.lookup_variable.lookup(v_id)?;
+ if let super::Variable::Output(ref result) = lvar.inner {
+ let span = module.global_variables.get_span(lvar.handle);
+ let expr_handle = function
+ .expressions
+ .append(crate::Expression::GlobalVariable(lvar.handle), span);
+
+ // Cull problematic builtins of gl_PerVertex.
+ // See the docs for `Frontend::gl_per_vertex_builtin_access`.
+ {
+ let ty = &module.types[result.ty];
+ if let crate::TypeInner::Struct {
+ members: ref original_members,
+ span,
+ } = ty.inner
{
- let ty = &module.types[result.ty];
- match ty.inner {
- crate::TypeInner::Struct {
- members: ref original_members,
- span,
- } if ty.name.as_deref() == Some("gl_PerVertex") => {
- let mut new_members = original_members.clone();
- for member in &mut new_members {
- if let Some(crate::Binding::BuiltIn(built_in)) = member.binding
- {
- if !self.gl_per_vertex_builtin_access.contains(&built_in) {
- member.binding = None
- }
- }
- }
- if &new_members != original_members {
- module.types.replace(
- result.ty,
- crate::Type {
- name: ty.name.clone(),
- inner: crate::TypeInner::Struct {
- members: new_members,
- span,
- },
- },
- );
+ let mut new_members = None;
+ for (idx, member) in original_members.iter().enumerate() {
+ if let Some(crate::Binding::BuiltIn(built_in)) = member.binding {
+ if !self.gl_per_vertex_builtin_access.contains(&built_in) {
+ new_members.get_or_insert_with(|| original_members.clone())
+ [idx]
+ .binding = None;
}
}
- _ => {}
+ }
+ if let Some(new_members) = new_members {
+ module.types.replace(
+ result.ty,
+ crate::Type {
+ name: ty.name.clone(),
+ inner: crate::TypeInner::Struct {
+ members: new_members,
+ span,
+ },
+ },
+ );
}
}
+ }
- match module.types[result.ty].inner {
- crate::TypeInner::Struct {
- members: ref sub_members,
- ..
- } => {
- for (index, sm) in sub_members.iter().enumerate() {
- if sm.binding.is_none() {
- continue;
- }
- let mut sm = sm.clone();
-
- if let Some(ref mut binding) = sm.binding {
- if ep.stage == crate::ShaderStage::Vertex {
- binding.apply_default_interpolation(
- &module.types[sm.ty].inner,
- );
- }
- }
-
- members.push(sm);
-
- components.push(function.expressions.append(
- crate::Expression::AccessIndex {
- base: expr_handle,
- index: index as u32,
- },
- span,
- ));
+ match module.types[result.ty].inner {
+ crate::TypeInner::Struct {
+ members: ref sub_members,
+ ..
+ } => {
+ for (index, sm) in sub_members.iter().enumerate() {
+ if sm.binding.is_none() {
+ continue;
}
- }
- ref inner => {
- let mut binding = result.binding.clone();
- if let Some(ref mut binding) = binding {
+ let mut sm = sm.clone();
+
+ if let Some(ref mut binding) = sm.binding {
if ep.stage == crate::ShaderStage::Vertex {
- binding.apply_default_interpolation(inner);
+ binding.apply_default_interpolation(&module.types[sm.ty].inner);
}
}
- members.push(crate::StructMember {
- name: None,
- ty: result.ty,
- binding,
- offset: 0,
- });
- // populate just the globals first, then do `Load` in a
- // separate step, so that we can get a range.
- components.push(expr_handle);
+ members.push(sm);
+
+ components.push(function.expressions.append(
+ crate::Expression::AccessIndex {
+ base: expr_handle,
+ index: index as u32,
+ },
+ span,
+ ));
}
}
- }
- }
+ ref inner => {
+ let mut binding = result.binding.clone();
+ if let Some(ref mut binding) = binding {
+ if ep.stage == crate::ShaderStage::Vertex {
+ binding.apply_default_interpolation(inner);
+ }
+ }
- for (member_index, member) in members.iter().enumerate() {
- match member.binding {
- Some(crate::Binding::BuiltIn(crate::BuiltIn::Position { .. }))
- if self.options.adjust_coordinate_space =>
- {
- let mut emitter = Emitter::default();
- emitter.start(&function.expressions);
- let global_expr = components[member_index];
- let span = function.expressions.get_span(global_expr);
- let access_expr = function.expressions.append(
- crate::Expression::AccessIndex {
- base: global_expr,
- index: 1,
- },
- span,
- );
- let load_expr = function.expressions.append(
- crate::Expression::Load {
- pointer: access_expr,
- },
- span,
- );
- let neg_expr = function.expressions.append(
- crate::Expression::Unary {
- op: crate::UnaryOperator::Negate,
- expr: load_expr,
- },
- span,
- );
- function.body.extend(emitter.finish(&function.expressions));
- function.body.push(
- crate::Statement::Store {
- pointer: access_expr,
- value: neg_expr,
- },
- span,
- );
+ members.push(crate::StructMember {
+ name: None,
+ ty: result.ty,
+ binding,
+ offset: 0,
+ });
+ // populate just the globals first, then do `Load` in a
+ // separate step, so that we can get a range.
+ components.push(expr_handle);
}
- _ => {}
}
}
+ }
- let mut emitter = Emitter::default();
- emitter.start(&function.expressions);
- for component in components.iter_mut() {
- let load_expr = crate::Expression::Load {
- pointer: *component,
- };
- let span = function.expressions.get_span(*component);
- *component = function.expressions.append(load_expr, span);
- }
-
- match members[..] {
- [] => {}
- [ref member] => {
- function.body.extend(emitter.finish(&function.expressions));
- let span = function.expressions.get_span(components[0]);
- function.body.push(
- crate::Statement::Return {
- value: components.first().cloned(),
+ for (member_index, member) in members.iter().enumerate() {
+ match member.binding {
+ Some(crate::Binding::BuiltIn(crate::BuiltIn::Position { .. }))
+ if self.options.adjust_coordinate_space =>
+ {
+ let mut emitter = Emitter::default();
+ emitter.start(&function.expressions);
+ let global_expr = components[member_index];
+ let span = function.expressions.get_span(global_expr);
+ let access_expr = function.expressions.append(
+ crate::Expression::AccessIndex {
+ base: global_expr,
+ index: 1,
},
span,
);
- function.result = Some(crate::FunctionResult {
- ty: member.ty,
- binding: member.binding.clone(),
- });
- }
- _ => {
- let span = crate::Span::total_span(
- components.iter().map(|h| function.expressions.get_span(*h)),
+ let load_expr = function.expressions.append(
+ crate::Expression::Load {
+ pointer: access_expr,
+ },
+ span,
);
- let ty = module.types.insert(
- crate::Type {
- name: None,
- inner: crate::TypeInner::Struct {
- members,
- span: 0xFFFF, // shouldn't matter
- },
+ let neg_expr = function.expressions.append(
+ crate::Expression::Unary {
+ op: crate::UnaryOperator::Negate,
+ expr: load_expr,
},
span,
);
- let result_expr = function
- .expressions
- .append(crate::Expression::Compose { ty, components }, span);
function.body.extend(emitter.finish(&function.expressions));
function.body.push(
- crate::Statement::Return {
- value: Some(result_expr),
+ crate::Statement::Store {
+ pointer: access_expr,
+ value: neg_expr,
},
span,
);
- function.result = Some(crate::FunctionResult { ty, binding: None });
}
+ _ => {}
}
+ }
- module.entry_points.push(crate::EntryPoint {
- name: ep.name,
- stage: ep.stage,
- early_depth_test: ep.early_depth_test,
- workgroup_size: ep.workgroup_size,
- function,
- });
+ let mut emitter = Emitter::default();
+ emitter.start(&function.expressions);
+ for component in components.iter_mut() {
+ let load_expr = crate::Expression::Load {
+ pointer: *component,
+ };
+ let span = function.expressions.get_span(*component);
+ *component = function.expressions.append(load_expr, span);
}
+ match members[..] {
+ [] => {}
+ [ref member] => {
+ function.body.extend(emitter.finish(&function.expressions));
+ let span = function.expressions.get_span(components[0]);
+ function.body.push(
+ crate::Statement::Return {
+ value: components.first().cloned(),
+ },
+ span,
+ );
+ function.result = Some(crate::FunctionResult {
+ ty: member.ty,
+ binding: member.binding.clone(),
+ });
+ }
+ _ => {
+ let span = crate::Span::total_span(
+ components.iter().map(|h| function.expressions.get_span(*h)),
+ );
+ let ty = module.types.insert(
+ crate::Type {
+ name: None,
+ inner: crate::TypeInner::Struct {
+ members,
+ span: 0xFFFF, // shouldn't matter
+ },
+ },
+ span,
+ );
+ let result_expr = function
+ .expressions
+ .append(crate::Expression::Compose { ty, components }, span);
+ function.body.extend(emitter.finish(&function.expressions));
+ function.body.push(
+ crate::Statement::Return {
+ value: Some(result_expr),
+ },
+ span,
+ );
+ function.result = Some(crate::FunctionResult { ty, binding: None });
+ }
+ }
+
+ module.entry_points.push(crate::EntryPoint {
+ name: ep.name,
+ stage: ep.stage,
+ early_depth_test: ep.early_depth_test,
+ workgroup_size: ep.workgroup_size,
+ function,
+ });
+
Ok(())
}
}
diff --git a/third_party/rust/naga/src/front/spv/mod.rs b/third_party/rust/naga/src/front/spv/mod.rs
index 8b1c854358..b793448597 100644
--- a/third_party/rust/naga/src/front/spv/mod.rs
+++ b/third_party/rust/naga/src/front/spv/mod.rs
@@ -577,6 +577,9 @@ pub struct Frontend<I> {
lookup_function_type: FastHashMap<spirv::Word, LookupFunctionType>,
lookup_function: FastHashMap<spirv::Word, LookupFunction>,
lookup_entry_point: FastHashMap<spirv::Word, EntryPoint>,
+ // When parsing functions, each entry point function gets an entry here so that additional
+ // processing for them can be performed after all function parsing.
+ deferred_entry_points: Vec<(EntryPoint, spirv::Word)>,
//Note: each `OpFunctionCall` gets a single entry here, indexed by the
// dummy `Handle<crate::Function>` of the call site.
deferred_function_calls: Vec<spirv::Word>,
@@ -628,6 +631,7 @@ impl<I: Iterator<Item = u32>> Frontend<I> {
lookup_function_type: FastHashMap::default(),
lookup_function: FastHashMap::default(),
lookup_entry_point: FastHashMap::default(),
+ deferred_entry_points: Vec::default(),
deferred_function_calls: Vec::default(),
dummy_functions: Arena::new(),
function_call_graph: GraphMap::new(),
@@ -1561,12 +1565,10 @@ impl<I: Iterator<Item = u32>> Frontend<I> {
span,
);
- if ty.name.as_deref() == Some("gl_PerVertex") {
- if let Some(crate::Binding::BuiltIn(built_in)) =
- members[index as usize].binding
- {
- self.gl_per_vertex_builtin_access.insert(built_in);
- }
+ if let Some(crate::Binding::BuiltIn(built_in)) =
+ members[index as usize].binding
+ {
+ self.gl_per_vertex_builtin_access.insert(built_in);
}
AccessExpression {
@@ -3956,6 +3958,12 @@ impl<I: Iterator<Item = u32>> Frontend<I> {
}?;
}
+ // Do entry point specific processing after all functions are parsed so that we can
+ // cull unused problematic builtins of gl_PerVertex.
+ for (ep, fun_id) in core::mem::take(&mut self.deferred_entry_points) {
+ self.process_entry_point(&mut module, ep, fun_id)?;
+ }
+
log::info!("Patching...");
{
let mut nodes = petgraph::algo::toposort(&self.function_call_graph, None)
@@ -4868,6 +4876,11 @@ impl<I: Iterator<Item = u32>> Frontend<I> {
let low = self.next()?;
match width {
4 => crate::Literal::U32(low),
+ 8 => {
+ inst.expect(5)?;
+ let high = self.next()?;
+ crate::Literal::U64(u64::from(high) << 32 | u64::from(low))
+ }
_ => return Err(Error::InvalidTypeWidth(width as u32)),
}
}
@@ -5081,7 +5094,7 @@ impl<I: Iterator<Item = u32>> Frontend<I> {
None
};
let span = self.span_from_with_op(start);
- let mut dec = self.future_decor.remove(&id).unwrap_or_default();
+ let dec = self.future_decor.remove(&id).unwrap_or_default();
let original_ty = self.lookup_type.lookup(type_id)?.handle;
let mut ty = original_ty;
@@ -5127,17 +5140,6 @@ impl<I: Iterator<Item = u32>> Frontend<I> {
None => map_storage_class(storage_class)?,
};
- // Fix empty name for gl_PerVertex struct generated by glslang
- if let crate::TypeInner::Pointer { .. } = module.types[original_ty].inner {
- if ext_class == ExtendedClass::Input || ext_class == ExtendedClass::Output {
- if let Some(ref dec_name) = dec.name {
- if dec_name.is_empty() {
- dec.name = Some("perVertexStruct".to_string())
- }
- }
- }
- }
-
let (inner, var) = match ext_class {
ExtendedClass::Global(mut space) => {
if let crate::AddressSpace::Storage { ref mut access } = space {
@@ -5323,6 +5325,21 @@ pub fn parse_u8_slice(data: &[u8], options: &Options) -> Result<crate::Module, E
Frontend::new(words, options).parse()
}
+/// Helper function to check if `child` is in the scope of `parent`
+fn is_parent(mut child: usize, parent: usize, block_ctx: &BlockContext) -> bool {
+ loop {
+ if child == parent {
+ // The child is in the scope parent
+ break true;
+ } else if child == 0 {
+ // Searched finished at the root the child isn't in the parent's body
+ break false;
+ }
+
+ child = block_ctx.bodies[child].parent;
+ }
+}
+
#[cfg(test)]
mod test {
#[test]
@@ -5339,18 +5356,3 @@ mod test {
let _ = super::parse_u8_slice(&bin, &Default::default()).unwrap();
}
}
-
-/// Helper function to check if `child` is in the scope of `parent`
-fn is_parent(mut child: usize, parent: usize, block_ctx: &BlockContext) -> bool {
- loop {
- if child == parent {
- // The child is in the scope parent
- break true;
- } else if child == 0 {
- // Searched finished at the root the child isn't in the parent's body
- break false;
- }
-
- child = block_ctx.bodies[child].parent;
- }
-}
diff --git a/third_party/rust/naga/src/front/wgsl/error.rs b/third_party/rust/naga/src/front/wgsl/error.rs
index 07e68f8dd9..54aa8296b1 100644
--- a/third_party/rust/naga/src/front/wgsl/error.rs
+++ b/third_party/rust/naga/src/front/wgsl/error.rs
@@ -87,7 +87,7 @@ impl ParseError {
/// Returns a [`SourceLocation`] for the first label in the error message.
pub fn location(&self, source: &str) -> Option<SourceLocation> {
- self.labels.get(0).map(|label| label.0.location(source))
+ self.labels.first().map(|label| label.0.location(source))
}
}
diff --git a/third_party/rust/naga/src/front/wgsl/lower/mod.rs b/third_party/rust/naga/src/front/wgsl/lower/mod.rs
index ba9b49e135..2ca6c182b7 100644
--- a/third_party/rust/naga/src/front/wgsl/lower/mod.rs
+++ b/third_party/rust/naga/src/front/wgsl/lower/mod.rs
@@ -1530,6 +1530,8 @@ impl<'source, 'temp> Lowerer<'source, 'temp> {
ast::Literal::Number(Number::F32(f)) => crate::Literal::F32(f),
ast::Literal::Number(Number::I32(i)) => crate::Literal::I32(i),
ast::Literal::Number(Number::U32(u)) => crate::Literal::U32(u),
+ ast::Literal::Number(Number::I64(i)) => crate::Literal::I64(i),
+ ast::Literal::Number(Number::U64(u)) => crate::Literal::U64(u),
ast::Literal::Number(Number::F64(f)) => crate::Literal::F64(f),
ast::Literal::Number(Number::AbstractInt(i)) => crate::Literal::AbstractInt(i),
ast::Literal::Number(Number::AbstractFloat(f)) => {
diff --git a/third_party/rust/naga/src/front/wgsl/parse/conv.rs b/third_party/rust/naga/src/front/wgsl/parse/conv.rs
index 08f1e39285..1a4911a3bd 100644
--- a/third_party/rust/naga/src/front/wgsl/parse/conv.rs
+++ b/third_party/rust/naga/src/front/wgsl/parse/conv.rs
@@ -124,6 +124,14 @@ pub fn get_scalar_type(word: &str) -> Option<Scalar> {
kind: Sk::Uint,
width: 4,
}),
+ "i64" => Some(Scalar {
+ kind: Sk::Sint,
+ width: 8,
+ }),
+ "u64" => Some(Scalar {
+ kind: Sk::Uint,
+ width: 8,
+ }),
"bool" => Some(Scalar {
kind: Sk::Bool,
width: crate::BOOL_WIDTH,
diff --git a/third_party/rust/naga/src/front/wgsl/parse/number.rs b/third_party/rust/naga/src/front/wgsl/parse/number.rs
index 7b09ac59bb..ceb2cb336c 100644
--- a/third_party/rust/naga/src/front/wgsl/parse/number.rs
+++ b/third_party/rust/naga/src/front/wgsl/parse/number.rs
@@ -12,6 +12,10 @@ pub enum Number {
I32(i32),
/// Concrete u32
U32(u32),
+ /// Concrete i64
+ I64(i64),
+ /// Concrete u64
+ U64(u64),
/// Concrete f32
F32(f32),
/// Concrete f64
@@ -31,6 +35,8 @@ enum Kind {
enum IntKind {
I32,
U32,
+ I64,
+ U64,
}
#[derive(Debug)]
@@ -270,6 +276,8 @@ fn parse(input: &str) -> (Result<Number, NumberError>, &str) {
let kind = consume_map!(bytes, [
b'i' => Kind::Int(IntKind::I32),
b'u' => Kind::Int(IntKind::U32),
+ b'l', b'i' => Kind::Int(IntKind::I64),
+ b'l', b'u' => Kind::Int(IntKind::U64),
b'h' => Kind::Float(FloatKind::F16),
b'f' => Kind::Float(FloatKind::F32),
b'l', b'f' => Kind::Float(FloatKind::F64),
@@ -416,5 +424,13 @@ fn parse_int(input: &str, kind: Option<IntKind>, radix: u32) -> Result<Number, N
Ok(num) => Ok(Number::U32(num)),
Err(e) => Err(map_err(e)),
},
+ Some(IntKind::I64) => match i64::from_str_radix(input, radix) {
+ Ok(num) => Ok(Number::I64(num)),
+ Err(e) => Err(map_err(e)),
+ },
+ Some(IntKind::U64) => match u64::from_str_radix(input, radix) {
+ Ok(num) => Ok(Number::U64(num)),
+ Err(e) => Err(map_err(e)),
+ },
}
}
diff --git a/third_party/rust/naga/src/front/wgsl/tests.rs b/third_party/rust/naga/src/front/wgsl/tests.rs
index eb2f8a2eb3..cc3d858317 100644
--- a/third_party/rust/naga/src/front/wgsl/tests.rs
+++ b/third_party/rust/naga/src/front/wgsl/tests.rs
@@ -17,6 +17,7 @@ fn parse_comment() {
#[test]
fn parse_types() {
parse_str("const a : i32 = 2;").unwrap();
+ parse_str("const a : u64 = 2lu;").unwrap();
assert!(parse_str("const a : x32 = 2;").is_err());
parse_str("var t: texture_2d<f32>;").unwrap();
parse_str("var t: texture_cube_array<i32>;").unwrap();
diff --git a/third_party/rust/naga/src/keywords/wgsl.rs b/third_party/rust/naga/src/keywords/wgsl.rs
index 7b47a13128..683840dc1f 100644
--- a/third_party/rust/naga/src/keywords/wgsl.rs
+++ b/third_party/rust/naga/src/keywords/wgsl.rs
@@ -14,6 +14,7 @@ pub const RESERVED: &[&str] = &[
"f32",
"f16",
"i32",
+ "i64",
"mat2x2",
"mat2x3",
"mat2x4",
@@ -43,6 +44,7 @@ pub const RESERVED: &[&str] = &[
"texture_depth_cube_array",
"texture_depth_multisampled_2d",
"u32",
+ "u64",
"vec2",
"vec3",
"vec4",
diff --git a/third_party/rust/naga/src/lib.rs b/third_party/rust/naga/src/lib.rs
index d6b9c6a7f4..4b45174300 100644
--- a/third_party/rust/naga/src/lib.rs
+++ b/third_party/rust/naga/src/lib.rs
@@ -252,7 +252,8 @@ An override expression can be evaluated at pipeline creation time.
clippy::collapsible_if,
clippy::derive_partial_eq_without_eq,
clippy::needless_borrowed_reference,
- clippy::single_match
+ clippy::single_match,
+ clippy::enum_variant_names
)]
#![warn(
trivial_casts,
@@ -490,7 +491,7 @@ pub enum ScalarKind {
}
/// Characteristics of a scalar type.
-#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
+#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
#[cfg_attr(feature = "serialize", derive(Serialize))]
#[cfg_attr(feature = "deserialize", derive(Deserialize))]
#[cfg_attr(feature = "arbitrary", derive(Arbitrary))]
@@ -884,6 +885,7 @@ pub enum Literal {
F32(f32),
U32(u32),
I32(i32),
+ U64(u64),
I64(i64),
Bool(bool),
AbstractInt(i64),
@@ -1255,15 +1257,18 @@ pub enum SampleLevel {
#[cfg_attr(feature = "arbitrary", derive(Arbitrary))]
pub enum ImageQuery {
/// Get the size at the specified level.
+ ///
+ /// The return value is a `u32` for 1D images, and a `vecN<u32>`
+ /// for an image with dimensions N > 2.
Size {
/// If `None`, the base level is considered.
level: Option<Handle<Expression>>,
},
- /// Get the number of mipmap levels.
+ /// Get the number of mipmap levels, a `u32`.
NumLevels,
- /// Get the number of array layers.
+ /// Get the number of array layers, a `u32`.
NumLayers,
- /// Get the number of samples.
+ /// Get the number of samples, a `u32`.
NumSamples,
}
@@ -1683,6 +1688,10 @@ pub enum Statement {
/// A block containing more statements, to be executed sequentially.
Block(Block),
/// Conditionally executes one of two blocks, based on the value of the condition.
+ ///
+ /// Naga IR does not have "phi" instructions. If you need to use
+ /// values computed in an `accept` or `reject` block after the `If`,
+ /// store them in a [`LocalVariable`].
If {
condition: Handle<Expression>, //bool
accept: Block,
@@ -1702,6 +1711,10 @@ pub enum Statement {
/// represented in the IR as a series of fallthrough cases with empty
/// bodies, except for the last.
///
+ /// Naga IR does not have "phi" instructions. If you need to use
+ /// values computed in a [`SwitchCase::body`] block after the `Switch`,
+ /// store them in a [`LocalVariable`].
+ ///
/// [`value`]: SwitchCase::value
/// [`body`]: SwitchCase::body
/// [`Default`]: SwitchValue::Default
@@ -1736,6 +1749,10 @@ pub enum Statement {
/// if" statement in WGSL, or a loop whose back edge is an
/// `OpBranchConditional` instruction in SPIR-V.
///
+ /// Naga IR does not have "phi" instructions. If you need to use
+ /// values computed in a `body` or `continuing` block after the
+ /// `Loop`, store them in a [`LocalVariable`].
+ ///
/// [`Break`]: Statement::Break
/// [`Continue`]: Statement::Continue
/// [`Kill`]: Statement::Kill
diff --git a/third_party/rust/naga/src/proc/constant_evaluator.rs b/third_party/rust/naga/src/proc/constant_evaluator.rs
index b3884b04b1..983af3718c 100644
--- a/third_party/rust/naga/src/proc/constant_evaluator.rs
+++ b/third_party/rust/naga/src/proc/constant_evaluator.rs
@@ -31,7 +31,7 @@ macro_rules! gen_component_wise_extractor {
$(
#[doc = concat!(
"Maps to [`Literal::",
- stringify!($mapping),
+ stringify!($literal),
"`]",
)]
$mapping([$ty; N]),
@@ -200,6 +200,8 @@ gen_component_wise_extractor! {
AbstractInt => AbstractInt: i64,
U32 => U32: u32,
I32 => I32: i32,
+ U64 => U64: u64,
+ I64 => I64: i64,
],
scalar_kinds: [
Float,
@@ -847,6 +849,8 @@ impl<'a> ConstantEvaluator<'a> {
Scalar::AbstractInt([e]) => Ok(Scalar::AbstractInt([e.abs()])),
Scalar::I32([e]) => Ok(Scalar::I32([e.wrapping_abs()])),
Scalar::U32([e]) => Ok(Scalar::U32([e])), // TODO: just re-use the expression, ezpz
+ Scalar::I64([e]) => Ok(Scalar::I64([e.wrapping_abs()])),
+ Scalar::U64([e]) => Ok(Scalar::U64([e])),
})
}
crate::MathFunction::Min => {
@@ -1280,7 +1284,7 @@ impl<'a> ConstantEvaluator<'a> {
Literal::U32(v) => v as i32,
Literal::F32(v) => v as i32,
Literal::Bool(v) => v as i32,
- Literal::F64(_) | Literal::I64(_) => {
+ Literal::F64(_) | Literal::I64(_) | Literal::U64(_) => {
return make_error();
}
Literal::AbstractInt(v) => i32::try_from_abstract(v)?,
@@ -1291,18 +1295,40 @@ impl<'a> ConstantEvaluator<'a> {
Literal::U32(v) => v,
Literal::F32(v) => v as u32,
Literal::Bool(v) => v as u32,
- Literal::F64(_) | Literal::I64(_) => {
+ Literal::F64(_) | Literal::I64(_) | Literal::U64(_) => {
return make_error();
}
Literal::AbstractInt(v) => u32::try_from_abstract(v)?,
Literal::AbstractFloat(v) => u32::try_from_abstract(v)?,
}),
+ Sc::I64 => Literal::I64(match literal {
+ Literal::I32(v) => v as i64,
+ Literal::U32(v) => v as i64,
+ Literal::F32(v) => v as i64,
+ Literal::Bool(v) => v as i64,
+ Literal::F64(v) => v as i64,
+ Literal::I64(v) => v,
+ Literal::U64(v) => v as i64,
+ Literal::AbstractInt(v) => i64::try_from_abstract(v)?,
+ Literal::AbstractFloat(v) => i64::try_from_abstract(v)?,
+ }),
+ Sc::U64 => Literal::U64(match literal {
+ Literal::I32(v) => v as u64,
+ Literal::U32(v) => v as u64,
+ Literal::F32(v) => v as u64,
+ Literal::Bool(v) => v as u64,
+ Literal::F64(v) => v as u64,
+ Literal::I64(v) => v as u64,
+ Literal::U64(v) => v,
+ Literal::AbstractInt(v) => u64::try_from_abstract(v)?,
+ Literal::AbstractFloat(v) => u64::try_from_abstract(v)?,
+ }),
Sc::F32 => Literal::F32(match literal {
Literal::I32(v) => v as f32,
Literal::U32(v) => v as f32,
Literal::F32(v) => v,
Literal::Bool(v) => v as u32 as f32,
- Literal::F64(_) | Literal::I64(_) => {
+ Literal::F64(_) | Literal::I64(_) | Literal::U64(_) => {
return make_error();
}
Literal::AbstractInt(v) => f32::try_from_abstract(v)?,
@@ -1314,7 +1340,7 @@ impl<'a> ConstantEvaluator<'a> {
Literal::F32(v) => v as f64,
Literal::F64(v) => v,
Literal::Bool(v) => v as u32 as f64,
- Literal::I64(_) => return make_error(),
+ Literal::I64(_) | Literal::U64(_) => return make_error(),
Literal::AbstractInt(v) => f64::try_from_abstract(v)?,
Literal::AbstractFloat(v) => f64::try_from_abstract(v)?,
}),
@@ -1325,6 +1351,7 @@ impl<'a> ConstantEvaluator<'a> {
Literal::Bool(v) => v,
Literal::F64(_)
| Literal::I64(_)
+ | Literal::U64(_)
| Literal::AbstractInt(_)
| Literal::AbstractFloat(_) => {
return make_error();
@@ -1877,6 +1904,122 @@ impl<'a> ConstantEvaluator<'a> {
}
}
+/// Trait for conversions of abstract values to concrete types.
+trait TryFromAbstract<T>: Sized {
+ /// Convert an abstract literal `value` to `Self`.
+ ///
+ /// Since Naga's `AbstractInt` and `AbstractFloat` exist to support
+ /// WGSL, we follow WGSL's conversion rules here:
+ ///
+ /// - WGSL §6.1.2. Conversion Rank says that automatic conversions
+ /// to integers are either lossless or an error.
+ ///
+ /// - WGSL §14.6.4 Floating Point Conversion says that conversions
+ /// to floating point in constant expressions and override
+ /// expressions are errors if the value is out of range for the
+ /// destination type, but rounding is okay.
+ ///
+ /// [`AbstractInt`]: crate::Literal::AbstractInt
+ /// [`Float`]: crate::Literal::Float
+ fn try_from_abstract(value: T) -> Result<Self, ConstantEvaluatorError>;
+}
+
+impl TryFromAbstract<i64> for i32 {
+ fn try_from_abstract(value: i64) -> Result<i32, ConstantEvaluatorError> {
+ i32::try_from(value).map_err(|_| ConstantEvaluatorError::AutomaticConversionLossy {
+ value: format!("{value:?}"),
+ to_type: "i32",
+ })
+ }
+}
+
+impl TryFromAbstract<i64> for u32 {
+ fn try_from_abstract(value: i64) -> Result<u32, ConstantEvaluatorError> {
+ u32::try_from(value).map_err(|_| ConstantEvaluatorError::AutomaticConversionLossy {
+ value: format!("{value:?}"),
+ to_type: "u32",
+ })
+ }
+}
+
+impl TryFromAbstract<i64> for u64 {
+ fn try_from_abstract(value: i64) -> Result<u64, ConstantEvaluatorError> {
+ u64::try_from(value).map_err(|_| ConstantEvaluatorError::AutomaticConversionLossy {
+ value: format!("{value:?}"),
+ to_type: "u64",
+ })
+ }
+}
+
+impl TryFromAbstract<i64> for i64 {
+ fn try_from_abstract(value: i64) -> Result<i64, ConstantEvaluatorError> {
+ Ok(value)
+ }
+}
+
+impl TryFromAbstract<i64> for f32 {
+ fn try_from_abstract(value: i64) -> Result<Self, ConstantEvaluatorError> {
+ let f = value as f32;
+ // The range of `i64` is roughly ±18 × 10¹⁸, whereas the range of
+ // `f32` is roughly ±3.4 × 10³⁸, so there's no opportunity for
+ // overflow here.
+ Ok(f)
+ }
+}
+
+impl TryFromAbstract<f64> for f32 {
+ fn try_from_abstract(value: f64) -> Result<f32, ConstantEvaluatorError> {
+ let f = value as f32;
+ if f.is_infinite() {
+ return Err(ConstantEvaluatorError::AutomaticConversionLossy {
+ value: format!("{value:?}"),
+ to_type: "f32",
+ });
+ }
+ Ok(f)
+ }
+}
+
+impl TryFromAbstract<i64> for f64 {
+ fn try_from_abstract(value: i64) -> Result<Self, ConstantEvaluatorError> {
+ let f = value as f64;
+ // The range of `i64` is roughly ±18 × 10¹⁸, whereas the range of
+ // `f64` is roughly ±1.8 × 10³⁰⁸, so there's no opportunity for
+ // overflow here.
+ Ok(f)
+ }
+}
+
+impl TryFromAbstract<f64> for f64 {
+ fn try_from_abstract(value: f64) -> Result<f64, ConstantEvaluatorError> {
+ Ok(value)
+ }
+}
+
+impl TryFromAbstract<f64> for i32 {
+ fn try_from_abstract(_: f64) -> Result<Self, ConstantEvaluatorError> {
+ Err(ConstantEvaluatorError::AutomaticConversionFloatToInt { to_type: "i32" })
+ }
+}
+
+impl TryFromAbstract<f64> for u32 {
+ fn try_from_abstract(_: f64) -> Result<Self, ConstantEvaluatorError> {
+ Err(ConstantEvaluatorError::AutomaticConversionFloatToInt { to_type: "u32" })
+ }
+}
+
+impl TryFromAbstract<f64> for i64 {
+ fn try_from_abstract(_: f64) -> Result<Self, ConstantEvaluatorError> {
+ Err(ConstantEvaluatorError::AutomaticConversionFloatToInt { to_type: "i64" })
+ }
+}
+
+impl TryFromAbstract<f64> for u64 {
+ fn try_from_abstract(_: f64) -> Result<Self, ConstantEvaluatorError> {
+ Err(ConstantEvaluatorError::AutomaticConversionFloatToInt { to_type: "u64" })
+ }
+}
+
#[cfg(test)]
mod tests {
use std::vec;
@@ -2384,92 +2527,3 @@ mod tests {
}
}
}
-
-/// Trait for conversions of abstract values to concrete types.
-trait TryFromAbstract<T>: Sized {
- /// Convert an abstract literal `value` to `Self`.
- ///
- /// Since Naga's `AbstractInt` and `AbstractFloat` exist to support
- /// WGSL, we follow WGSL's conversion rules here:
- ///
- /// - WGSL §6.1.2. Conversion Rank says that automatic conversions
- /// to integers are either lossless or an error.
- ///
- /// - WGSL §14.6.4 Floating Point Conversion says that conversions
- /// to floating point in constant expressions and override
- /// expressions are errors if the value is out of range for the
- /// destination type, but rounding is okay.
- ///
- /// [`AbstractInt`]: crate::Literal::AbstractInt
- /// [`Float`]: crate::Literal::Float
- fn try_from_abstract(value: T) -> Result<Self, ConstantEvaluatorError>;
-}
-
-impl TryFromAbstract<i64> for i32 {
- fn try_from_abstract(value: i64) -> Result<i32, ConstantEvaluatorError> {
- i32::try_from(value).map_err(|_| ConstantEvaluatorError::AutomaticConversionLossy {
- value: format!("{value:?}"),
- to_type: "i32",
- })
- }
-}
-
-impl TryFromAbstract<i64> for u32 {
- fn try_from_abstract(value: i64) -> Result<u32, ConstantEvaluatorError> {
- u32::try_from(value).map_err(|_| ConstantEvaluatorError::AutomaticConversionLossy {
- value: format!("{value:?}"),
- to_type: "u32",
- })
- }
-}
-
-impl TryFromAbstract<i64> for f32 {
- fn try_from_abstract(value: i64) -> Result<Self, ConstantEvaluatorError> {
- let f = value as f32;
- // The range of `i64` is roughly ±18 × 10¹⁸, whereas the range of
- // `f32` is roughly ±3.4 × 10³⁸, so there's no opportunity for
- // overflow here.
- Ok(f)
- }
-}
-
-impl TryFromAbstract<f64> for f32 {
- fn try_from_abstract(value: f64) -> Result<f32, ConstantEvaluatorError> {
- let f = value as f32;
- if f.is_infinite() {
- return Err(ConstantEvaluatorError::AutomaticConversionLossy {
- value: format!("{value:?}"),
- to_type: "f32",
- });
- }
- Ok(f)
- }
-}
-
-impl TryFromAbstract<i64> for f64 {
- fn try_from_abstract(value: i64) -> Result<Self, ConstantEvaluatorError> {
- let f = value as f64;
- // The range of `i64` is roughly ±18 × 10¹⁸, whereas the range of
- // `f64` is roughly ±1.8 × 10³⁰⁸, so there's no opportunity for
- // overflow here.
- Ok(f)
- }
-}
-
-impl TryFromAbstract<f64> for f64 {
- fn try_from_abstract(value: f64) -> Result<f64, ConstantEvaluatorError> {
- Ok(value)
- }
-}
-
-impl TryFromAbstract<f64> for i32 {
- fn try_from_abstract(_: f64) -> Result<Self, ConstantEvaluatorError> {
- Err(ConstantEvaluatorError::AutomaticConversionFloatToInt { to_type: "i32" })
- }
-}
-
-impl TryFromAbstract<f64> for u32 {
- fn try_from_abstract(_: f64) -> Result<Self, ConstantEvaluatorError> {
- Err(ConstantEvaluatorError::AutomaticConversionFloatToInt { to_type: "u32" })
- }
-}
diff --git a/third_party/rust/naga/src/proc/mod.rs b/third_party/rust/naga/src/proc/mod.rs
index b9ce80b5ea..46cbb6c3b3 100644
--- a/third_party/rust/naga/src/proc/mod.rs
+++ b/third_party/rust/naga/src/proc/mod.rs
@@ -102,6 +102,10 @@ impl super::Scalar {
kind: crate::ScalarKind::Sint,
width: 8,
};
+ pub const U64: Self = Self {
+ kind: crate::ScalarKind::Uint,
+ width: 8,
+ };
pub const BOOL: Self = Self {
kind: crate::ScalarKind::Bool,
width: crate::BOOL_WIDTH,
@@ -156,6 +160,7 @@ impl PartialEq for crate::Literal {
(Self::F32(a), Self::F32(b)) => a.to_bits() == b.to_bits(),
(Self::U32(a), Self::U32(b)) => a == b,
(Self::I32(a), Self::I32(b)) => a == b,
+ (Self::U64(a), Self::U64(b)) => a == b,
(Self::I64(a), Self::I64(b)) => a == b,
(Self::Bool(a), Self::Bool(b)) => a == b,
_ => false,
@@ -186,10 +191,18 @@ impl std::hash::Hash for crate::Literal {
hasher.write_u8(4);
v.hash(hasher);
}
- Self::I64(v) | Self::AbstractInt(v) => {
+ Self::I64(v) => {
hasher.write_u8(5);
v.hash(hasher);
}
+ Self::U64(v) => {
+ hasher.write_u8(6);
+ v.hash(hasher);
+ }
+ Self::AbstractInt(v) => {
+ hasher.write_u8(7);
+ v.hash(hasher);
+ }
}
}
}
@@ -201,6 +214,7 @@ impl crate::Literal {
(value, crate::ScalarKind::Float, 4) => Some(Self::F32(value as _)),
(value, crate::ScalarKind::Uint, 4) => Some(Self::U32(value as _)),
(value, crate::ScalarKind::Sint, 4) => Some(Self::I32(value as _)),
+ (value, crate::ScalarKind::Uint, 8) => Some(Self::U64(value as _)),
(value, crate::ScalarKind::Sint, 8) => Some(Self::I64(value as _)),
(1, crate::ScalarKind::Bool, 4) => Some(Self::Bool(true)),
(0, crate::ScalarKind::Bool, 4) => Some(Self::Bool(false)),
@@ -218,7 +232,7 @@ impl crate::Literal {
pub const fn width(&self) -> crate::Bytes {
match *self {
- Self::F64(_) | Self::I64(_) => 8,
+ Self::F64(_) | Self::I64(_) | Self::U64(_) => 8,
Self::F32(_) | Self::U32(_) | Self::I32(_) => 4,
Self::Bool(_) => crate::BOOL_WIDTH,
Self::AbstractInt(_) | Self::AbstractFloat(_) => crate::ABSTRACT_WIDTH,
@@ -230,6 +244,7 @@ impl crate::Literal {
Self::F32(_) => crate::Scalar::F32,
Self::U32(_) => crate::Scalar::U32,
Self::I32(_) => crate::Scalar::I32,
+ Self::U64(_) => crate::Scalar::U64,
Self::I64(_) => crate::Scalar::I64,
Self::Bool(_) => crate::Scalar::BOOL,
Self::AbstractInt(_) => crate::Scalar::ABSTRACT_INT,
diff --git a/third_party/rust/naga/src/valid/expression.rs b/third_party/rust/naga/src/valid/expression.rs
index c82d60f062..838ecc4e27 100644
--- a/third_party/rust/naga/src/valid/expression.rs
+++ b/third_party/rust/naga/src/valid/expression.rs
@@ -124,6 +124,8 @@ pub enum ExpressionError {
MissingCapabilities(super::Capabilities),
#[error(transparent)]
Literal(#[from] LiteralError),
+ #[error("{0:?} is not supported for Width {2} {1:?} arguments yet, see https://github.com/gfx-rs/wgpu/issues/5276")]
+ UnsupportedWidth(crate::MathFunction, crate::ScalarKind, crate::Bytes),
}
#[derive(Clone, Debug, thiserror::Error)]
@@ -1332,28 +1334,29 @@ impl super::Validator {
_ => return Err(ExpressionError::InvalidArgumentType(fun, 0, arg)),
}
}
- Mf::CountTrailingZeros
- | Mf::CountLeadingZeros
+ // Remove once fixed https://github.com/gfx-rs/wgpu/issues/5276
+ Mf::CountLeadingZeros
+ | Mf::CountTrailingZeros
| Mf::CountOneBits
| Mf::ReverseBits
- | Mf::FindLsb
- | Mf::FindMsb => {
+ | Mf::FindMsb
+ | Mf::FindLsb => {
if arg1_ty.is_some() || arg2_ty.is_some() || arg3_ty.is_some() {
return Err(ExpressionError::WrongArgumentCount(fun));
}
match *arg_ty {
- Ti::Scalar(Sc {
- kind: Sk::Sint | Sk::Uint,
- ..
- })
- | Ti::Vector {
- scalar:
- Sc {
- kind: Sk::Sint | Sk::Uint,
- ..
- },
- ..
- } => {}
+ Ti::Scalar(scalar) | Ti::Vector { scalar, .. } => match scalar.kind {
+ Sk::Sint | Sk::Uint => {
+ if scalar.width != 4 {
+ return Err(ExpressionError::UnsupportedWidth(
+ fun,
+ scalar.kind,
+ scalar.width,
+ ));
+ }
+ }
+ _ => return Err(ExpressionError::InvalidArgumentType(fun, 0, arg)),
+ },
_ => return Err(ExpressionError::InvalidArgumentType(fun, 0, arg)),
}
}
@@ -1404,6 +1407,21 @@ impl super::Validator {
))
}
}
+ // Remove once fixed https://github.com/gfx-rs/wgpu/issues/5276
+ for &arg in [arg_ty, arg1_ty, arg2_ty, arg3_ty].iter() {
+ match *arg {
+ Ti::Scalar(scalar) | Ti::Vector { scalar, .. } => {
+ if scalar.width != 4 {
+ return Err(ExpressionError::UnsupportedWidth(
+ fun,
+ scalar.kind,
+ scalar.width,
+ ));
+ }
+ }
+ _ => {}
+ }
+ }
}
Mf::ExtractBits => {
let (arg1_ty, arg2_ty) = match (arg1_ty, arg2_ty, arg3_ty) {
@@ -1445,6 +1463,21 @@ impl super::Validator {
))
}
}
+ // Remove once fixed https://github.com/gfx-rs/wgpu/issues/5276
+ for &arg in [arg_ty, arg1_ty, arg2_ty].iter() {
+ match *arg {
+ Ti::Scalar(scalar) | Ti::Vector { scalar, .. } => {
+ if scalar.width != 4 {
+ return Err(ExpressionError::UnsupportedWidth(
+ fun,
+ scalar.kind,
+ scalar.width,
+ ));
+ }
+ }
+ _ => {}
+ }
+ }
}
Mf::Pack2x16unorm | Mf::Pack2x16snorm | Mf::Pack2x16float => {
if arg1_ty.is_some() || arg2_ty.is_some() || arg3_ty.is_some() {
diff --git a/third_party/rust/naga/src/valid/mod.rs b/third_party/rust/naga/src/valid/mod.rs
index 388495a3ac..5459434f33 100644
--- a/third_party/rust/naga/src/valid/mod.rs
+++ b/third_party/rust/naga/src/valid/mod.rs
@@ -28,7 +28,7 @@ pub use expression::{check_literal_value, LiteralError};
pub use expression::{ConstExpressionError, ExpressionError};
pub use function::{CallError, FunctionError, LocalVariableError};
pub use interface::{EntryPointError, GlobalVariableError, VaryingError};
-pub use r#type::{Disalignment, TypeError, TypeFlags};
+pub use r#type::{Disalignment, TypeError, TypeFlags, WidthError};
use self::handles::InvalidHandleError;
@@ -108,6 +108,8 @@ bitflags::bitflags! {
const DUAL_SOURCE_BLENDING = 0x2000;
/// Support for arrayed cube textures.
const CUBE_ARRAY_TEXTURES = 0x4000;
+ /// Support for 64-bit signed and unsigned integers.
+ const SHADER_INT64 = 0x8000;
}
}
diff --git a/third_party/rust/naga/src/valid/type.rs b/third_party/rust/naga/src/valid/type.rs
index 1e3e03fe19..d44a295b1a 100644
--- a/third_party/rust/naga/src/valid/type.rs
+++ b/third_party/rust/naga/src/valid/type.rs
@@ -107,6 +107,12 @@ pub enum TypeError {
MatrixElementNotFloat,
#[error("The constant {0:?} is specialized, and cannot be used as an array size")]
UnsupportedSpecializedArrayLength(Handle<crate::Constant>),
+ #[error("{} of dimensionality {dim:?} and class {class:?} are not supported", if *.arrayed {"Arrayed images"} else {"Images"})]
+ UnsupportedImageType {
+ dim: crate::ImageDimension,
+ arrayed: bool,
+ class: crate::ImageClass,
+ },
#[error("Array stride {stride} does not match the expected {expected}")]
InvalidArrayStride { stride: u32, expected: u32 },
#[error("Field '{0}' can't be dynamically-sized, has type {1:?}")]
@@ -141,9 +147,6 @@ pub enum WidthError {
flag: &'static str,
},
- #[error("64-bit integers are not yet supported")]
- Unsupported64Bit,
-
#[error("Abstract types may only appear in constant expressions")]
Abstract,
}
@@ -245,11 +248,31 @@ impl super::Validator {
scalar.width == 4
}
}
- crate::ScalarKind::Sint | crate::ScalarKind::Uint => {
+ crate::ScalarKind::Sint => {
+ if scalar.width == 8 {
+ if !self.capabilities.contains(Capabilities::SHADER_INT64) {
+ return Err(WidthError::MissingCapability {
+ name: "i64",
+ flag: "SHADER_INT64",
+ });
+ }
+ true
+ } else {
+ scalar.width == 4
+ }
+ }
+ crate::ScalarKind::Uint => {
if scalar.width == 8 {
- return Err(WidthError::Unsupported64Bit);
+ if !self.capabilities.contains(Capabilities::SHADER_INT64) {
+ return Err(WidthError::MissingCapability {
+ name: "u64",
+ flag: "SHADER_INT64",
+ });
+ }
+ true
+ } else {
+ scalar.width == 4
}
- scalar.width == 4
}
crate::ScalarKind::AbstractInt | crate::ScalarKind::AbstractFloat => {
return Err(WidthError::Abstract);
@@ -596,8 +619,15 @@ impl super::Validator {
Ti::Image {
dim,
arrayed,
- class: _,
+ class,
} => {
+ if arrayed && matches!(dim, crate::ImageDimension::D3) {
+ return Err(TypeError::UnsupportedImageType {
+ dim,
+ arrayed,
+ class,
+ });
+ }
if arrayed && matches!(dim, crate::ImageDimension::Cube) {
self.require_type_capability(Capabilities::CUBE_ARRAY_TEXTURES)?;
}
diff --git a/third_party/rust/neqo-common/.cargo-checksum.json b/third_party/rust/neqo-common/.cargo-checksum.json
index f8b692fdfe..e7daca1191 100644
--- a/third_party/rust/neqo-common/.cargo-checksum.json
+++ b/third_party/rust/neqo-common/.cargo-checksum.json
@@ -1 +1 @@
-{"files":{"Cargo.toml":"dbb5500f87df7aee6e680ac210ddb56b833aa82d6be5c407474de0895cee14e9","build.rs":"a17b1bb1bd3de3fc958f72d4d1357f7bc4432faa26640c95b5fbfccf40579d67","src/codec.rs":"8c14f09864b095e28ff52e7d96a12a6591fc9c4b20a9cafca6720d132c80efdc","src/datagram.rs":"1a7028d96a2e7385e94265de53189eb824b7cf12e0e2de5d67c3f3f8751b6043","src/event.rs":"4ef9e6f3f5168f2eacb7be982e062e743c64a64e809765d2139122839aa407e5","src/header.rs":"467b947f78bfe354d8bb51e8df0c2be69e75a45e2be688d81f0d268aa77c89ef","src/hrtime.rs":"d7c8849e9ec7a312878ea2bc28939717fa03969fb9aee259a4a516351ee37643","src/incrdecoder.rs":"577c32b9ace51f2daaf940be6d0c391c4f55cd42ef6848c68c1ffc970d8c57b5","src/lib.rs":"47c14084c6d475ebb855f3ed9302b31fa42780b93a816bf098c96987ffe33572","src/log.rs":"c68099eae0e9014be35173ac802165b128433d973390e1111c08df56e71df063","src/qlog.rs":"3f43dc4e5fdccb9d6ee74d9e7b3ff29da63e4eb9f631e4e35446e452d8ec7af6","src/timer.rs":"50a2de20933b7b5884337aded69e59e2523503481308f25de1bba1a11d505be8","src/tos.rs":"5b5a61c699266716afce2f5bda7c98151db3223ede41ce451c390863198e30a2","tests/log.rs":"480b165b7907ec642c508b303d63005eee1427115d6973a349eaf6b2242ed18d"},"package":null} \ No newline at end of file
+{"files":{"Cargo.toml":"b49758e5e8f0a6955d761e689be39530f193f7089de07f2295a7a3aef4df5898","build.rs":"306b2f909a25ae38daf5404a4e128d2a94e8975b70870864c2a71cafec9717c7","src/codec.rs":"fd239f75d374db6ff744211344c82bcd19ecf753e07410e1fe37732bbb81dfe9","src/datagram.rs":"f2ff56faa0e513edbf4331b6ee2c9e6d6111483bda7aff08d16b9f05bce5c320","src/event.rs":"106ca6c4afb107fa49a1bc72f5eb4ae95f4baa1ba19736aa38c8ba973774c160","src/header.rs":"467b947f78bfe354d8bb51e8df0c2be69e75a45e2be688d81f0d268aa77c89ef","src/hrtime.rs":"112dc758e65301b8a7a508b125d3d61063180d432bffaec566a050d4f907ab18","src/incrdecoder.rs":"577c32b9ace51f2daaf940be6d0c391c4f55cd42ef6848c68c1ffc970d8c57b5","src/lib.rs":"a86aae69900933bf83044fa96166ee51216277415eafcdb15c04a907bb2dd10e","src/log.rs":"7246053bffd704b264d42fc82f986b9d62079472a76a9fc3749c25cfc7698532","src/qlog.rs":"9b081f32bf158fd340300693acc97fe0554b617ae664eba86e4d3572e2b1e16e","src/timer.rs":"350a730cc5a159dfdac5d78ec8e8a34c5172a476d827a566703edec24c791842","src/tos.rs":"440616cb0aee9082abe00623b33e68dbe80eda47aec889ac5f4145b1566bf692","src/udp.rs":"2b92132e078791e35b66f68d99d79ff5df55efd03e788474f7781a00403a5533","tests/log.rs":"a11e21fb570258ca93bb40e3923817d381e1e605accbc3aed1df5a0a9918b41d"},"package":null} \ No newline at end of file
diff --git a/third_party/rust/neqo-common/Cargo.toml b/third_party/rust/neqo-common/Cargo.toml
index b04537bb0a..dc5bed385f 100644
--- a/third_party/rust/neqo-common/Cargo.toml
+++ b/third_party/rust/neqo-common/Cargo.toml
@@ -10,17 +10,22 @@
# See Cargo.toml.orig for the original contents.
[package]
-edition = "2018"
-rust-version = "1.70.0"
+edition = "2021"
+rust-version = "1.74.0"
name = "neqo-common"
-version = "0.7.0"
-authors = ["Bobby Holley <bobbyholley@gmail.com>"]
+version = "0.7.2"
+authors = ["The Neqo Authors <necko@mozilla.com>"]
build = "build.rs"
+homepage = "https://github.com/mozilla/neqo/"
license = "MIT OR Apache-2.0"
+repository = "https://github.com/mozilla/neqo/"
-[dependencies]
-enum-map = "2.7"
-lazy_static = "1.4"
+[lib]
+bench = false
+
+[dependencies.enum-map]
+version = "2.7"
+default-features = false
[dependencies.env_logger]
version = "0.10"
@@ -31,20 +36,45 @@ version = "0.4"
default-features = false
[dependencies.qlog]
-git = "https://github.com/cloudflare/quiche"
-rev = "09ea4b244096a013071cfe2175bbf2945fb7f8d1"
+version = "0.12"
+default-features = false
+
+[dependencies.quinn-udp]
+git = "https://github.com/quinn-rs/quinn/"
+rev = "a947962131aba8a6521253d03cc948b20098a2d6"
+optional = true
[dependencies.time]
-version = "0.3.23"
+version = "0.3"
features = ["formatting"]
+default-features = false
+
+[dependencies.tokio]
+version = "1"
+features = [
+ "net",
+ "time",
+ "macros",
+ "rt",
+ "rt-multi-thread",
+]
+optional = true
+default-features = false
[dev-dependencies.test-fixture]
path = "../test-fixture"
[features]
ci = []
-deny-warnings = []
+udp = [
+ "dep:quinn-udp",
+ "dep:tokio",
+]
[target."cfg(windows)".dependencies.winapi]
version = "0.3"
features = ["timeapi"]
+
+[lints.clippy.pedantic]
+level = "warn"
+priority = -1
diff --git a/third_party/rust/neqo-common/build.rs b/third_party/rust/neqo-common/build.rs
index 0af1a1dbbd..9047b1f5d0 100644
--- a/third_party/rust/neqo-common/build.rs
+++ b/third_party/rust/neqo-common/build.rs
@@ -1,3 +1,9 @@
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
use std::env;
fn main() {
diff --git a/third_party/rust/neqo-common/src/codec.rs b/third_party/rust/neqo-common/src/codec.rs
index 57ff13f39f..7fea2f71ab 100644
--- a/third_party/rust/neqo-common/src/codec.rs
+++ b/third_party/rust/neqo-common/src/codec.rs
@@ -4,7 +4,7 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-use std::{convert::TryFrom, fmt::Debug};
+use std::fmt::Debug;
use crate::hex_with_len;
@@ -112,9 +112,7 @@ impl<'a> Decoder<'a> {
/// Decodes a QUIC varint.
pub fn decode_varint(&mut self) -> Option<u64> {
- let Some(b1) = self.decode_byte() else {
- return None;
- };
+ let b1 = self.decode_byte()?;
match b1 >> 6 {
0 => Some(u64::from(b1 & 0x3f)),
1 => Some((u64::from(b1 & 0x3f) << 8) | self.decode_uint(1)?),
diff --git a/third_party/rust/neqo-common/src/datagram.rs b/third_party/rust/neqo-common/src/datagram.rs
index 1729c8ed8d..04ba1a45a1 100644
--- a/third_party/rust/neqo-common/src/datagram.rs
+++ b/third_party/rust/neqo-common/src/datagram.rs
@@ -53,6 +53,12 @@ impl Datagram {
pub fn ttl(&self) -> Option<u8> {
self.ttl
}
+
+ #[cfg(feature = "udp")]
+ #[must_use]
+ pub(crate) fn into_data(self) -> Vec<u8> {
+ self.d
+ }
}
impl Deref for Datagram {
diff --git a/third_party/rust/neqo-common/src/event.rs b/third_party/rust/neqo-common/src/event.rs
index 26052b7571..ea8d491822 100644
--- a/third_party/rust/neqo-common/src/event.rs
+++ b/third_party/rust/neqo-common/src/event.rs
@@ -4,7 +4,7 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-use std::{iter::Iterator, marker::PhantomData};
+use std::marker::PhantomData;
/// An event provider is able to generate a stream of events.
pub trait Provider {
diff --git a/third_party/rust/neqo-common/src/hrtime.rs b/third_party/rust/neqo-common/src/hrtime.rs
index 62d2567d42..e70b5f0ffb 100644
--- a/third_party/rust/neqo-common/src/hrtime.rs
+++ b/third_party/rust/neqo-common/src/hrtime.rs
@@ -6,7 +6,6 @@
use std::{
cell::RefCell,
- convert::TryFrom,
rc::{Rc, Weak},
time::Duration,
};
@@ -340,9 +339,7 @@ impl Time {
/// The handle can also be used to update the resolution.
#[must_use]
pub fn get(period: Duration) -> Handle {
- thread_local! {
- static HR_TIME: RefCell<Weak<RefCell<Time>>> = RefCell::default();
- }
+ thread_local!(static HR_TIME: RefCell<Weak<RefCell<Time>>> = RefCell::default());
HR_TIME.with(|r| {
let mut b = r.borrow_mut();
diff --git a/third_party/rust/neqo-common/src/lib.rs b/third_party/rust/neqo-common/src/lib.rs
index 853b05705b..fe88097983 100644
--- a/third_party/rust/neqo-common/src/lib.rs
+++ b/third_party/rust/neqo-common/src/lib.rs
@@ -4,8 +4,7 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-#![cfg_attr(feature = "deny-warnings", deny(warnings))]
-#![warn(clippy::pedantic)]
+#![allow(clippy::module_name_repetitions)] // This lint doesn't work here.
mod codec;
mod datagram;
@@ -17,6 +16,8 @@ pub mod log;
pub mod qlog;
pub mod timer;
pub mod tos;
+#[cfg(feature = "udp")]
+pub mod udp;
use std::fmt::Write;
diff --git a/third_party/rust/neqo-common/src/log.rs b/third_party/rust/neqo-common/src/log.rs
index d9c30b98b1..c5b89be8a6 100644
--- a/third_party/rust/neqo-common/src/log.rs
+++ b/third_party/rust/neqo-common/src/log.rs
@@ -6,16 +6,19 @@
#![allow(clippy::module_name_repetitions)]
-use std::{io::Write, sync::Once, time::Instant};
+use std::{
+ io::Write,
+ sync::{Once, OnceLock},
+ time::{Duration, Instant},
+};
use env_logger::Builder;
-use lazy_static::lazy_static;
#[macro_export]
macro_rules! do_log {
(target: $target:expr, $lvl:expr, $($arg:tt)+) => ({
let lvl = $lvl;
- if lvl <= ::log::max_level() {
+ if lvl <= ::log::STATIC_MAX_LEVEL && lvl <= ::log::max_level() {
::log::logger().log(
&::log::Record::builder()
.args(format_args!($($arg)+))
@@ -42,17 +45,22 @@ macro_rules! log_subject {
}};
}
-static INIT_ONCE: Once = Once::new();
-
-lazy_static! {
- static ref START_TIME: Instant = Instant::now();
+fn since_start() -> Duration {
+ static START_TIME: OnceLock<Instant> = OnceLock::new();
+ START_TIME.get_or_init(Instant::now).elapsed()
}
pub fn init() {
+ static INIT_ONCE: Once = Once::new();
+
+ if ::log::STATIC_MAX_LEVEL == ::log::LevelFilter::Off {
+ return;
+ }
+
INIT_ONCE.call_once(|| {
let mut builder = Builder::from_env("RUST_LOG");
builder.format(|buf, record| {
- let elapsed = START_TIME.elapsed();
+ let elapsed = since_start();
writeln!(
buf,
"{}s{:3}ms {} {}",
diff --git a/third_party/rust/neqo-common/src/qlog.rs b/third_party/rust/neqo-common/src/qlog.rs
index 3da8350990..c67ce62afe 100644
--- a/third_party/rust/neqo-common/src/qlog.rs
+++ b/third_party/rust/neqo-common/src/qlog.rs
@@ -12,8 +12,7 @@ use std::{
};
use qlog::{
- self, streamer::QlogStreamer, CommonFields, Configuration, TraceSeq, VantagePoint,
- VantagePointType,
+ streamer::QlogStreamer, CommonFields, Configuration, TraceSeq, VantagePoint, VantagePointType,
};
use crate::Role;
diff --git a/third_party/rust/neqo-common/src/timer.rs b/third_party/rust/neqo-common/src/timer.rs
index e8532af442..a413252e08 100644
--- a/third_party/rust/neqo-common/src/timer.rs
+++ b/third_party/rust/neqo-common/src/timer.rs
@@ -5,7 +5,6 @@
// except according to those terms.
use std::{
- convert::TryFrom,
mem,
time::{Duration, Instant},
};
@@ -247,49 +246,50 @@ impl<T> Timer<T> {
#[cfg(test)]
mod test {
- use lazy_static::lazy_static;
+ use std::sync::OnceLock;
use super::{Duration, Instant, Timer};
- lazy_static! {
- static ref NOW: Instant = Instant::now();
+ fn now() -> Instant {
+ static NOW: OnceLock<Instant> = OnceLock::new();
+ *NOW.get_or_init(Instant::now)
}
const GRANULARITY: Duration = Duration::from_millis(10);
const CAPACITY: usize = 10;
#[test]
fn create() {
- let t: Timer<()> = Timer::new(*NOW, GRANULARITY, CAPACITY);
+ let t: Timer<()> = Timer::new(now(), GRANULARITY, CAPACITY);
assert_eq!(t.span(), Duration::from_millis(100));
assert_eq!(None, t.next_time());
}
#[test]
fn immediate_entry() {
- let mut t = Timer::new(*NOW, GRANULARITY, CAPACITY);
- t.add(*NOW, 12);
- assert_eq!(*NOW, t.next_time().expect("should have an entry"));
- let values: Vec<_> = t.take_until(*NOW).collect();
+ let mut t = Timer::new(now(), GRANULARITY, CAPACITY);
+ t.add(now(), 12);
+ assert_eq!(now(), t.next_time().expect("should have an entry"));
+ let values: Vec<_> = t.take_until(now()).collect();
assert_eq!(vec![12], values);
}
#[test]
fn same_time() {
- let mut t = Timer::new(*NOW, GRANULARITY, CAPACITY);
+ let mut t = Timer::new(now(), GRANULARITY, CAPACITY);
let v1 = 12;
let v2 = 13;
- t.add(*NOW, v1);
- t.add(*NOW, v2);
- assert_eq!(*NOW, t.next_time().expect("should have an entry"));
- let values: Vec<_> = t.take_until(*NOW).collect();
+ t.add(now(), v1);
+ t.add(now(), v2);
+ assert_eq!(now(), t.next_time().expect("should have an entry"));
+ let values: Vec<_> = t.take_until(now()).collect();
assert!(values.contains(&v1));
assert!(values.contains(&v2));
}
#[test]
fn add() {
- let mut t = Timer::new(*NOW, GRANULARITY, CAPACITY);
- let near_future = *NOW + Duration::from_millis(17);
+ let mut t = Timer::new(now(), GRANULARITY, CAPACITY);
+ let near_future = now() + Duration::from_millis(17);
let v = 9;
t.add(near_future, v);
assert_eq!(near_future, t.next_time().expect("should return a value"));
@@ -305,8 +305,8 @@ mod test {
#[test]
fn add_future() {
- let mut t = Timer::new(*NOW, GRANULARITY, CAPACITY);
- let future = *NOW + Duration::from_millis(117);
+ let mut t = Timer::new(now(), GRANULARITY, CAPACITY);
+ let future = now() + Duration::from_millis(117);
let v = 9;
t.add(future, v);
assert_eq!(future, t.next_time().expect("should return a value"));
@@ -315,8 +315,8 @@ mod test {
#[test]
fn add_far_future() {
- let mut t = Timer::new(*NOW, GRANULARITY, CAPACITY);
- let far_future = *NOW + Duration::from_millis(892);
+ let mut t = Timer::new(now(), GRANULARITY, CAPACITY);
+ let far_future = now() + Duration::from_millis(892);
let v = 9;
t.add(far_future, v);
assert_eq!(far_future, t.next_time().expect("should return a value"));
@@ -333,12 +333,12 @@ mod test {
];
fn with_times() -> Timer<usize> {
- let mut t = Timer::new(*NOW, GRANULARITY, CAPACITY);
+ let mut t = Timer::new(now(), GRANULARITY, CAPACITY);
for (i, time) in TIMES.iter().enumerate() {
- t.add(*NOW + *time, i);
+ t.add(now() + *time, i);
}
assert_eq!(
- *NOW + *TIMES.iter().min().unwrap(),
+ now() + *TIMES.iter().min().unwrap(),
t.next_time().expect("should have a time")
);
t
@@ -348,7 +348,7 @@ mod test {
#[allow(clippy::needless_collect)] // false positive
fn multiple_values() {
let mut t = with_times();
- let values: Vec<_> = t.take_until(*NOW + *TIMES.iter().max().unwrap()).collect();
+ let values: Vec<_> = t.take_until(now() + *TIMES.iter().max().unwrap()).collect();
for i in 0..TIMES.len() {
assert!(values.contains(&i));
}
@@ -358,7 +358,7 @@ mod test {
#[allow(clippy::needless_collect)] // false positive
fn take_far_future() {
let mut t = with_times();
- let values: Vec<_> = t.take_until(*NOW + Duration::from_secs(100)).collect();
+ let values: Vec<_> = t.take_until(now() + Duration::from_secs(100)).collect();
for i in 0..TIMES.len() {
assert!(values.contains(&i));
}
@@ -368,15 +368,15 @@ mod test {
fn remove_each() {
let mut t = with_times();
for (i, time) in TIMES.iter().enumerate() {
- assert_eq!(Some(i), t.remove(*NOW + *time, |&x| x == i));
+ assert_eq!(Some(i), t.remove(now() + *time, |&x| x == i));
}
assert_eq!(None, t.next_time());
}
#[test]
fn remove_future() {
- let mut t = Timer::new(*NOW, GRANULARITY, CAPACITY);
- let future = *NOW + Duration::from_millis(117);
+ let mut t = Timer::new(now(), GRANULARITY, CAPACITY);
+ let future = now() + Duration::from_millis(117);
let v = 9;
t.add(future, v);
@@ -385,9 +385,9 @@ mod test {
#[test]
fn remove_too_far_future() {
- let mut t = Timer::new(*NOW, GRANULARITY, CAPACITY);
- let future = *NOW + Duration::from_millis(117);
- let too_far_future = *NOW + t.span() + Duration::from_millis(117);
+ let mut t = Timer::new(now(), GRANULARITY, CAPACITY);
+ let future = now() + Duration::from_millis(117);
+ let too_far_future = now() + t.span() + Duration::from_millis(117);
let v = 9;
t.add(future, v);
diff --git a/third_party/rust/neqo-common/src/tos.rs b/third_party/rust/neqo-common/src/tos.rs
index aa360d1d53..3610f72750 100644
--- a/third_party/rust/neqo-common/src/tos.rs
+++ b/third_party/rust/neqo-common/src/tos.rs
@@ -46,6 +46,12 @@ impl From<u8> for IpTosEcn {
}
}
+impl From<IpTos> for IpTosEcn {
+ fn from(value: IpTos) -> Self {
+ IpTosEcn::from(value.0 & 0x3)
+ }
+}
+
/// Diffserv Codepoints, mapped to the upper six bits of the TOS field.
/// <https://www.iana.org/assignments/dscp-registry/dscp-registry.xhtml>
#[derive(Copy, Clone, PartialEq, Eq, Enum, Default, Debug)]
@@ -159,6 +165,12 @@ impl From<u8> for IpTosDscp {
}
}
+impl From<IpTos> for IpTosDscp {
+ fn from(value: IpTos) -> Self {
+ IpTosDscp::from(value.0 & 0xfc)
+ }
+}
+
/// The type-of-service field in an IP packet.
#[allow(clippy::module_name_repetitions)]
#[derive(Copy, Clone, PartialEq, Eq)]
@@ -169,22 +181,37 @@ impl From<IpTosEcn> for IpTos {
Self(u8::from(v))
}
}
+
impl From<IpTosDscp> for IpTos {
fn from(v: IpTosDscp) -> Self {
Self(u8::from(v))
}
}
+
impl From<(IpTosDscp, IpTosEcn)> for IpTos {
fn from(v: (IpTosDscp, IpTosEcn)) -> Self {
Self(u8::from(v.0) | u8::from(v.1))
}
}
+
+impl From<(IpTosEcn, IpTosDscp)> for IpTos {
+ fn from(v: (IpTosEcn, IpTosDscp)) -> Self {
+ Self(u8::from(v.0) | u8::from(v.1))
+ }
+}
+
impl From<IpTos> for u8 {
fn from(v: IpTos) -> Self {
v.0
}
}
+impl From<u8> for IpTos {
+ fn from(v: u8) -> Self {
+ Self(v)
+ }
+}
+
impl Debug for IpTos {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_tuple("IpTos")
@@ -202,7 +229,7 @@ impl Default for IpTos {
#[cfg(test)]
mod tests {
- use super::*;
+ use crate::{IpTos, IpTosDscp, IpTosEcn};
#[test]
fn iptosecn_into_u8() {
@@ -287,4 +314,12 @@ mod tests {
let iptos_dscp: IpTos = dscp.into();
assert_eq!(u8::from(iptos_dscp), dscp as u8);
}
+
+ #[test]
+ fn u8_to_iptos() {
+ let tos = 0x8b;
+ let iptos: IpTos = (IpTosEcn::Ce, IpTosDscp::Af41).into();
+ assert_eq!(tos, u8::from(iptos));
+ assert_eq!(IpTos::from(tos), iptos);
+ }
}
diff --git a/third_party/rust/neqo-common/src/udp.rs b/third_party/rust/neqo-common/src/udp.rs
new file mode 100644
index 0000000000..c27b0632ff
--- /dev/null
+++ b/third_party/rust/neqo-common/src/udp.rs
@@ -0,0 +1,222 @@
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#![allow(clippy::missing_errors_doc)] // Functions simply delegate to tokio and quinn-udp.
+#![allow(clippy::missing_panics_doc)] // Functions simply delegate to tokio and quinn-udp.
+
+use std::{
+ io::{self, IoSliceMut},
+ net::{SocketAddr, ToSocketAddrs},
+ slice,
+};
+
+use quinn_udp::{EcnCodepoint, RecvMeta, Transmit, UdpSocketState};
+use tokio::io::Interest;
+
+use crate::{Datagram, IpTos};
+
+/// Socket receive buffer size.
+///
+/// Allows reading multiple datagrams in a single [`Socket::recv`] call.
+const RECV_BUF_SIZE: usize = u16::MAX as usize;
+
+pub struct Socket {
+ socket: tokio::net::UdpSocket,
+ state: UdpSocketState,
+ recv_buf: Vec<u8>,
+}
+
+impl Socket {
+ /// Calls [`std::net::UdpSocket::bind`] and instantiates [`quinn_udp::UdpSocketState`].
+ pub fn bind<A: ToSocketAddrs>(addr: A) -> Result<Self, io::Error> {
+ let socket = std::net::UdpSocket::bind(addr)?;
+
+ Ok(Self {
+ state: quinn_udp::UdpSocketState::new((&socket).into())?,
+ socket: tokio::net::UdpSocket::from_std(socket)?,
+ recv_buf: vec![0; RECV_BUF_SIZE],
+ })
+ }
+
+ /// See [`tokio::net::UdpSocket::local_addr`].
+ pub fn local_addr(&self) -> io::Result<SocketAddr> {
+ self.socket.local_addr()
+ }
+
+ /// See [`tokio::net::UdpSocket::writable`].
+ pub async fn writable(&self) -> Result<(), io::Error> {
+ self.socket.writable().await
+ }
+
+ /// See [`tokio::net::UdpSocket::readable`].
+ pub async fn readable(&self) -> Result<(), io::Error> {
+ self.socket.readable().await
+ }
+
+ /// Send the UDP datagram on the specified socket.
+ pub fn send(&self, d: Datagram) -> io::Result<usize> {
+ let transmit = Transmit {
+ destination: d.destination(),
+ ecn: EcnCodepoint::from_bits(Into::<u8>::into(d.tos())),
+ contents: d.into_data().into(),
+ segment_size: None,
+ src_ip: None,
+ };
+
+ let n = self.socket.try_io(Interest::WRITABLE, || {
+ self.state
+ .send((&self.socket).into(), slice::from_ref(&transmit))
+ })?;
+
+ assert_eq!(n, 1, "only passed one slice");
+
+ Ok(n)
+ }
+
+ /// Receive a UDP datagram on the specified socket.
+ pub fn recv(&mut self, local_address: &SocketAddr) -> Result<Vec<Datagram>, io::Error> {
+ let mut meta = RecvMeta::default();
+
+ match self.socket.try_io(Interest::READABLE, || {
+ self.state.recv(
+ (&self.socket).into(),
+ &mut [IoSliceMut::new(&mut self.recv_buf)],
+ slice::from_mut(&mut meta),
+ )
+ }) {
+ Ok(n) => {
+ assert_eq!(n, 1, "only passed one slice");
+ }
+ Err(ref err)
+ if err.kind() == io::ErrorKind::WouldBlock
+ || err.kind() == io::ErrorKind::Interrupted =>
+ {
+ return Ok(vec![])
+ }
+ Err(err) => {
+ return Err(err);
+ }
+ };
+
+ if meta.len == 0 {
+ eprintln!("zero length datagram received?");
+ return Ok(vec![]);
+ }
+ if meta.len == self.recv_buf.len() {
+ eprintln!(
+ "Might have received more than {} bytes",
+ self.recv_buf.len()
+ );
+ }
+
+ Ok(self.recv_buf[0..meta.len]
+ .chunks(meta.stride.min(self.recv_buf.len()))
+ .map(|d| {
+ Datagram::new(
+ meta.addr,
+ *local_address,
+ meta.ecn.map(|n| IpTos::from(n as u8)).unwrap_or_default(),
+ None, // TODO: get the real TTL https://github.com/quinn-rs/quinn/issues/1749
+ d,
+ )
+ })
+ .collect())
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+ use crate::{IpTosDscp, IpTosEcn};
+
+ #[tokio::test]
+ async fn datagram_tos() -> Result<(), io::Error> {
+ let sender = Socket::bind("127.0.0.1:0")?;
+ let receiver_addr: SocketAddr = "127.0.0.1:0".parse().unwrap();
+ let mut receiver = Socket::bind(receiver_addr)?;
+
+ let datagram = Datagram::new(
+ sender.local_addr()?,
+ receiver.local_addr()?,
+ IpTos::from((IpTosDscp::Le, IpTosEcn::Ect1)),
+ None,
+ "Hello, world!".as_bytes().to_vec(),
+ );
+
+ sender.writable().await?;
+ sender.send(datagram.clone())?;
+
+ receiver.readable().await?;
+ let received_datagram = receiver
+ .recv(&receiver_addr)
+ .expect("receive to succeed")
+ .into_iter()
+ .next()
+ .expect("receive to yield datagram");
+
+ // Assert that the ECN is correct.
+ assert_eq!(
+ IpTosEcn::from(datagram.tos()),
+ IpTosEcn::from(received_datagram.tos())
+ );
+
+ Ok(())
+ }
+
+ /// Expect [`Socket::recv`] to handle multiple [`Datagram`]s on GRO read.
+ #[tokio::test]
+ #[cfg_attr(not(any(target_os = "linux", target_os = "windows")), ignore)]
+ async fn many_datagrams_through_gro() -> Result<(), io::Error> {
+ const SEGMENT_SIZE: usize = 128;
+
+ let sender = Socket::bind("127.0.0.1:0")?;
+ let receiver_addr: SocketAddr = "127.0.0.1:0".parse().unwrap();
+ let mut receiver = Socket::bind(receiver_addr)?;
+
+ // `neqo_common::udp::Socket::send` does not yet
+ // (https://github.com/mozilla/neqo/issues/1693) support GSO. Use
+ // `quinn_udp` directly.
+ let max_gso_segments = sender.state.max_gso_segments();
+ let msg = vec![0xAB; SEGMENT_SIZE * max_gso_segments];
+ let transmit = Transmit {
+ destination: receiver.local_addr()?,
+ ecn: EcnCodepoint::from_bits(Into::<u8>::into(IpTos::from((
+ IpTosDscp::Le,
+ IpTosEcn::Ect1,
+ )))),
+ contents: msg.clone().into(),
+ segment_size: Some(SEGMENT_SIZE),
+ src_ip: None,
+ };
+ sender.writable().await?;
+ let n = sender.socket.try_io(Interest::WRITABLE, || {
+ sender
+ .state
+ .send((&sender.socket).into(), slice::from_ref(&transmit))
+ })?;
+ assert_eq!(n, 1, "only passed one slice");
+
+ // Allow for one GSO sendmmsg to result in multiple GRO recvmmsg.
+ let mut num_received = 0;
+ while num_received < max_gso_segments {
+ receiver.readable().await?;
+ receiver
+ .recv(&receiver_addr)
+ .expect("receive to succeed")
+ .into_iter()
+ .for_each(|d| {
+ assert_eq!(
+ SEGMENT_SIZE,
+ d.len(),
+ "Expect received datagrams to have same length as sent datagrams."
+ );
+ num_received += 1;
+ });
+ }
+
+ Ok(())
+ }
+}
diff --git a/third_party/rust/neqo-common/tests/log.rs b/third_party/rust/neqo-common/tests/log.rs
index 33b42d1411..135a667146 100644
--- a/third_party/rust/neqo-common/tests/log.rs
+++ b/third_party/rust/neqo-common/tests/log.rs
@@ -4,9 +4,6 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-#![cfg_attr(feature = "deny-warnings", deny(warnings))]
-#![warn(clippy::use_self)]
-
use neqo_common::{qdebug, qerror, qinfo, qtrace, qwarn};
#[test]
diff --git a/third_party/rust/neqo-crypto/.cargo-checksum.json b/third_party/rust/neqo-crypto/.cargo-checksum.json
index ff4ab0fc66..5622e7f4ad 100644
--- a/third_party/rust/neqo-crypto/.cargo-checksum.json
+++ b/third_party/rust/neqo-crypto/.cargo-checksum.json
@@ -1 +1 @@
-{"files":{"Cargo.toml":"7f7348b55033e19bbe51b07ee50313c87237fe09b56b338af9ab24e00aab32c6","bindings/bindings.toml":"0660c1661318b8a5094834c2f1bb12266287ef467307f66947eff7762528f70a","bindings/mozpkix.hpp":"77072c8bb0f6eb6bfe8cbadc111dcd92e0c79936d13f2e501aae1e5d289a6675","bindings/nspr_err.h":"2d5205d017b536c2d838bcf9bc4ec79f96dd50e7bb9b73892328781f1ee6629d","bindings/nspr_error.h":"e41c03c77b8c22046f8618832c9569fbcc7b26d8b9bbc35eea7168f35e346889","bindings/nspr_io.h":"085b289849ef0e77f88512a27b4d9bdc28252bd4d39c6a17303204e46ef45f72","bindings/nspr_time.h":"2e637fd338a5cf0fd3fb0070a47f474a34c2a7f4447f31b6875f5a9928d0a261","bindings/nss_ciphers.h":"95ec6344a607558b3c5ba8510f463b6295f3a2fb3f538a01410531045a5f62d1","bindings/nss_init.h":"ef49045063782fb612aff459172cc6a89340f15005808608ade5320ca9974310","bindings/nss_p11.h":"0b81e64fe6db49b2ecff94edd850be111ef99ec11220e88ceb1c67be90143a78","bindings/nss_secerr.h":"713e8368bdae5159af7893cfa517dabfe5103cede051dee9c9557c850a2defc6","bindings/nss_ssl.h":"af222fb957b989e392e762fa2125c82608a0053aff4fb97e556691646c88c335","bindings/nss_sslerr.h":"24b97f092183d8486f774cdaef5030d0249221c78343570d83a4ee5b594210ae","bindings/nss_sslopt.h":"b7807eb7abdad14db6ad7bc51048a46b065a0ea65a4508c95a12ce90e59d1eea","build.rs":"e712c16cb830a83eb4ea1f50dd341a4c30e1cce95d8c45af97030bc8ad0ae829","src/aead.rs":"b7cda4b89298cfd122cd2e1e94c462840e966c60f4832eb441106563ac332e00","src/aead_fuzzing.rs":"c3e590572314e0bb3fafa13dac3c831358b8a7b5570fe9cfe592752fce8cbdee","src/agent.rs":"c4fe47f9f5b0af20e3418da2e2ddce0ac2ca9665c0502115904f66a554e486ee","src/agentio.rs":"847ac63f6406e33bf20a861cadbfe6301ffa15bd73a5291298ffa93511b87dd5","src/auth.rs":"ced1a18f691894984244088020ea25dc1ee678603317f0c7dfc8b8842fa750b4","src/cert.rs":"6fc09012f994300ff4a7951bf8981aa266220521f58b8ff0989fee6dc1f27df9","src/constants.rs":"f22bf16bd8cb539862cb1e47138dbba79e93fe738f4b907e465891326f98883c","src/ech.rs":"58b7e0a1d2d52c59889cf8b735902577f7c3df93dfb89c72af2646b7aef29f39","src/err.rs":"fca0222167883231a5e0a569a593f44214501819adf5aadf814be27891c87c24","src/exp.rs":"cec59d61fc95914f9703d2fb6490a8507af993c9db710dde894f2f8fd38123c7","src/ext.rs":"c6ab9aefbbca531466dea938d853b1e42ed51816238afe400b20dbdb0111690b","src/hkdf.rs":"8e6cc5dce0f36efa4e13f5a24e2879bdbf10fb9a2b7dc8f13692e47d8959cdc8","src/hp.rs":"62ec073d99cf8bf3a123838c7d9b51bfdf68887148961f6307288e8dd56ac711","src/lib.rs":"40d9ac97c307c8161c2bf48156cc82377f81ad6e709f99cfd7dc0131dc192f86","src/once.rs":"b9850384899a1a016e839743d3489c0d4d916e1973746ef8c89872105d7d9736","src/p11.rs":"6c0f2f1b18e9bf9088a5ca5bdc99e789bb42234f7d2fe24d0b463bc957cb84a2","src/prio.rs":"e5e169296c0ac69919c59fb6c1f8bd6bf079452eaa13d75da0edd41d435d3f6f","src/replay.rs":"1ff4a12f6135ef2c42aef2b0947e26fd6241cd4b359020245608046452a7fcb0","src/result.rs":"0587cbb6aace71a7f9765ef7c01dcd9f73a49dcc6331e1d8fe4de2aef6ca65b6","src/secrets.rs":"4ffaa66f25df47dadf042063bff5953effa7bf2f4920cafe827757d6a659cb58","src/selfencrypt.rs":"4d2f4a6ea0fc94502130413ab5e2ea82612228f38a96a1865bf7d2b3f440620e","src/ssl.rs":"c83baa5518b81dd06f2e4072ea3c2d666ccdeb8b1ff6e3746eea9f1af47023a6","src/time.rs":"9204f3a384fb9dd2c3816c88666ad61ac3538f9e2f028954e81fd335a1479070","tests/aead.rs":"efdb92a060ca1957d890da1604513369559cb43195ee54149ed3ab47958dad59","tests/agent.rs":"0e55354595ae5f0e1ab83731564da57ba88a296e00692147c47df7067a0f416a","tests/ext.rs":"54657b45bd86d2561bb0f548736bc6f141bb664a5b043506f428422919ab95d4","tests/handshake.rs":"40701bc22f16d1ba9b9bd9683738e52b96faafee4119f7057437dae705f7867a","tests/hkdf.rs":"4160978b96505c1f1b7d6c4b5f43536ff7bd791c8746f9546c9fbc0fce5cf1c7","tests/hp.rs":"8eeee21a439e0f991145dff07b01283ae39ccd4b8dac4d011d43a464f73db670","tests/init.rs":"fc9e392b1efa0d8efb28952f73ffc05e5348e7b2b69207b60e375c3888a252a2","tests/selfencrypt.rs":"6edd0914b8466d79ecfb569c6d86995fd364b0dc71be2a0554e82f736ebd6b7c"},"package":null} \ No newline at end of file
+{"files":{"Cargo.toml":"6f1917fbd4cbf53cb4883c30e8fcb9c20f8ebe15e19576c7d37cb6ba0ab9e42b","bindings/bindings.toml":"0660c1661318b8a5094834c2f1bb12266287ef467307f66947eff7762528f70a","bindings/mozpkix.hpp":"77072c8bb0f6eb6bfe8cbadc111dcd92e0c79936d13f2e501aae1e5d289a6675","bindings/nspr_err.h":"2d5205d017b536c2d838bcf9bc4ec79f96dd50e7bb9b73892328781f1ee6629d","bindings/nspr_error.h":"e41c03c77b8c22046f8618832c9569fbcc7b26d8b9bbc35eea7168f35e346889","bindings/nspr_io.h":"085b289849ef0e77f88512a27b4d9bdc28252bd4d39c6a17303204e46ef45f72","bindings/nspr_time.h":"2e637fd338a5cf0fd3fb0070a47f474a34c2a7f4447f31b6875f5a9928d0a261","bindings/nss_ciphers.h":"95ec6344a607558b3c5ba8510f463b6295f3a2fb3f538a01410531045a5f62d1","bindings/nss_init.h":"ef49045063782fb612aff459172cc6a89340f15005808608ade5320ca9974310","bindings/nss_p11.h":"0b81e64fe6db49b2ecff94edd850be111ef99ec11220e88ceb1c67be90143a78","bindings/nss_secerr.h":"713e8368bdae5159af7893cfa517dabfe5103cede051dee9c9557c850a2defc6","bindings/nss_ssl.h":"af222fb957b989e392e762fa2125c82608a0053aff4fb97e556691646c88c335","bindings/nss_sslerr.h":"24b97f092183d8486f774cdaef5030d0249221c78343570d83a4ee5b594210ae","bindings/nss_sslopt.h":"b7807eb7abdad14db6ad7bc51048a46b065a0ea65a4508c95a12ce90e59d1eea","build.rs":"21d9a0140b2afd708583f58f2af0a4ba93ab07ec088680b4cbf0e184aeb8785b","src/aead.rs":"8f50e4557b7829edb67f57c80c777c6ae23c868e2b2eeaaae0736af04dc0d298","src/aead_fuzzing.rs":"c3e590572314e0bb3fafa13dac3c831358b8a7b5570fe9cfe592752fce8cbdee","src/agent.rs":"e995e9cc5108470594bae1b0d4e4bc6b7a8ac2b66488f71ea99e2836c0edbd7e","src/agentio.rs":"c4cb1b3cd92ef53eb0b4fb0b34a597068d82d78ba470dae5821670a0f06c9cda","src/auth.rs":"ced1a18f691894984244088020ea25dc1ee678603317f0c7dfc8b8842fa750b4","src/cert.rs":"8942cb3ce25a61f92b6ffc30fb286052ed6f56eeda3be12fd46ea76ceba6c1cf","src/constants.rs":"f22bf16bd8cb539862cb1e47138dbba79e93fe738f4b907e465891326f98883c","src/ech.rs":"9d322fcc01c0886f1dfe9bb6273cb9f88a746452ac9a802761b1816a05930c1f","src/err.rs":"fca0222167883231a5e0a569a593f44214501819adf5aadf814be27891c87c24","src/exp.rs":"cec59d61fc95914f9703d2fb6490a8507af993c9db710dde894f2f8fd38123c7","src/ext.rs":"cbf7d9f5ecabf4b8c9efd6c334637ab1596ec5266d38ab8d2d6ceae305283deb","src/hkdf.rs":"ef32f20e30a9bd7f094199536d19c87c4231b7fbbe4a9c54c70e84ca9c6575be","src/hp.rs":"644f1bed67f1c6189a67c8d02ab3358aaa7f63af4b913dd7395becbc01a84291","src/lib.rs":"23732c7799be038c0e0835b54e7c40cf6c6536113e0adb6ae3b41b216a6e5220","src/p11.rs":"e8c366def0df470101f3d120dcc4391f74f921fe59e2f3db2a56832e2852b855","src/prio.rs":"e5e169296c0ac69919c59fb6c1f8bd6bf079452eaa13d75da0edd41d435d3f6f","src/replay.rs":"96b7af8eff9e14313e79303092018b12e8834f780c96b8e247c497fdc680c696","src/result.rs":"0587cbb6aace71a7f9765ef7c01dcd9f73a49dcc6331e1d8fe4de2aef6ca65b6","src/secrets.rs":"4ffaa66f25df47dadf042063bff5953effa7bf2f4920cafe827757d6a659cb58","src/selfencrypt.rs":"ac65b13f5bade9d03ab4709364f9ec937fa4ca009965c77ca73b481534a0a470","src/ssl.rs":"c83baa5518b81dd06f2e4072ea3c2d666ccdeb8b1ff6e3746eea9f1af47023a6","src/time.rs":"3b2829a98a1648eb052db19bb470808b6b015a1eca27ab7be64b5d196c0271c0","tests/aead.rs":"3ac4fe4ab79922b5d0191a9717058fc8d0710380ce9b25448095f870f511844f","tests/agent.rs":"824735f88e487a3748200844e9481e81a72163ad74d82faa9aa16594d9b9bb25","tests/ext.rs":"1b047d23d9b224ad06eb65d8f3a7b351e263774e404c79bbcbe8f43790e29c18","tests/handshake.rs":"e892a2839b31414be16e96cdf3b1a65978716094700c1a4989229f7edbf578a0","tests/hkdf.rs":"1d2098dc8398395864baf13e4886cfd1da6d36118727c3b264f457ee3da6b048","tests/hp.rs":"b24fec53771c169be788772532d2617a5349196cf87d6444dc74214f7c73e92c","tests/init.rs":"44fe7626b75ab8c57adfee361bb70a83d5958797e1eb6c4531bb74988ba3a990","tests/selfencrypt.rs":"25813b0c6f32fc8383bb7685745feb750eb3fdc0a6a172a50d961c68d39f2a46"},"package":null} \ No newline at end of file
diff --git a/third_party/rust/neqo-crypto/Cargo.toml b/third_party/rust/neqo-crypto/Cargo.toml
index 73c1fcb364..499921e531 100644
--- a/third_party/rust/neqo-crypto/Cargo.toml
+++ b/third_party/rust/neqo-crypto/Cargo.toml
@@ -10,16 +10,21 @@
# See Cargo.toml.orig for the original contents.
[package]
-edition = "2018"
-rust-version = "1.70.0"
+edition = "2021"
+rust-version = "1.74.0"
name = "neqo-crypto"
-version = "0.7.0"
-authors = ["Martin Thomson <mt@lowentropy.net>"]
+version = "0.7.2"
+authors = ["The Neqo Authors <necko@mozilla.com>"]
build = "build.rs"
+homepage = "https://github.com/mozilla/neqo/"
license = "MIT OR Apache-2.0"
+repository = "https://github.com/mozilla/neqo/"
+
+[lib]
+bench = false
[dependencies.log]
-version = "~0.4.17"
+version = "0.4"
default-features = false
[dependencies.neqo-common]
@@ -28,21 +33,32 @@ path = "../neqo-common"
[dev-dependencies.test-fixture]
path = "../test-fixture"
-[build-dependencies]
-serde = "1.0.195"
-serde_derive = "1.0.195"
-toml = "0.5.11"
-
[build-dependencies.bindgen]
-version = "0.69.1"
+version = "0.69"
features = ["runtime"]
default-features = false
[build-dependencies.mozbuild]
version = "0.1"
optional = true
+default-features = false
+
+[build-dependencies.serde]
+version = "1.0"
+default-features = false
+
+[build-dependencies.serde_derive]
+version = "1.0"
+default-features = false
+
+[build-dependencies.toml]
+version = "0.5"
+default-features = false
[features]
-deny-warnings = []
fuzzing = []
gecko = ["mozbuild"]
+
+[lints.clippy.pedantic]
+level = "warn"
+priority = -1
diff --git a/third_party/rust/neqo-crypto/build.rs b/third_party/rust/neqo-crypto/build.rs
index a63c34dedb..c4c2a73e75 100644
--- a/third_party/rust/neqo-crypto/build.rs
+++ b/third_party/rust/neqo-crypto/build.rs
@@ -4,9 +4,6 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-#![cfg_attr(feature = "deny-warnings", deny(warnings))]
-#![warn(clippy::pedantic)]
-
use std::{
collections::HashMap,
env, fs,
@@ -37,7 +34,7 @@ struct Bindings {
opaque: Vec<String>,
/// enumerations that are turned into a module (without this, the enum is
/// mapped using the default, which means that the individual values are
- /// formed with an underscore as <enum_type>_<enum_value_name>).
+ /// formed with an underscore as <`enum_type`>_<`enum_value_name`>).
#[serde(default)]
enums: Vec<String>,
@@ -53,9 +50,10 @@ struct Bindings {
}
fn is_debug() -> bool {
- env::var("DEBUG")
- .map(|d| d.parse::<bool>().unwrap_or(false))
- .unwrap_or(false)
+ // Check the build profile and not whether debug symbols are enabled (i.e.,
+ // `env::var("DEBUG")`), because we enable those for benchmarking/profiling and still want
+ // to build NSS in release mode.
+ env::var("PROFILE").unwrap_or_default() == "debug"
}
// bindgen needs access to libclang.
@@ -126,7 +124,7 @@ fn nss_dir() -> PathBuf {
}
dir
};
- assert!(dir.is_dir(), "NSS_DIR {:?} doesn't exist", dir);
+ assert!(dir.is_dir(), "NSS_DIR {dir:?} doesn't exist");
// Note that this returns a relative path because UNC
// paths on windows cause certain tools to explode.
dir
@@ -150,10 +148,10 @@ fn build_nss(dir: PathBuf) {
let mut build_nss = vec![
String::from("./build.sh"),
String::from("-Ddisable_tests=1"),
+ // Generate static libraries in addition to shared libraries.
+ String::from("--static"),
];
- if is_debug() {
- build_nss.push(String::from("--static"));
- } else {
+ if !is_debug() {
build_nss.push(String::from("-o"));
}
if let Ok(d) = env::var("NSS_JOBS") {
@@ -318,7 +316,7 @@ fn setup_standalone() -> Vec<String> {
"cargo:rustc-link-search=native={}",
nsslibdir.to_str().unwrap()
);
- if is_debug() {
+ if is_debug() || env::consts::OS == "windows" {
static_link();
} else {
dynamic_link();
diff --git a/third_party/rust/neqo-crypto/src/aead.rs b/third_party/rust/neqo-crypto/src/aead.rs
index a2f009a403..bf7d7fe9d7 100644
--- a/third_party/rust/neqo-crypto/src/aead.rs
+++ b/third_party/rust/neqo-crypto/src/aead.rs
@@ -5,7 +5,6 @@
// except according to those terms.
use std::{
- convert::{TryFrom, TryInto},
fmt,
ops::{Deref, DerefMut},
os::raw::{c_char, c_uint},
diff --git a/third_party/rust/neqo-crypto/src/agent.rs b/third_party/rust/neqo-crypto/src/agent.rs
index cd0bb4cb12..82a6dacd48 100644
--- a/third_party/rust/neqo-crypto/src/agent.rs
+++ b/third_party/rust/neqo-crypto/src/agent.rs
@@ -6,7 +6,6 @@
use std::{
cell::RefCell,
- convert::TryFrom,
ffi::{CStr, CString},
mem::{self, MaybeUninit},
ops::{Deref, DerefMut},
@@ -33,6 +32,7 @@ use crate::{
ech,
err::{is_blocked, secstatus_to_res, Error, PRErrorCode, Res},
ext::{ExtensionHandler, ExtensionTracker},
+ null_safe_slice,
p11::{self, PrivateKey, PublicKey},
prio,
replay::AntiReplay,
@@ -897,7 +897,7 @@ impl Client {
let resumption = arg.cast::<Vec<ResumptionToken>>().as_mut().unwrap();
let len = usize::try_from(len).unwrap();
let mut v = Vec::with_capacity(len);
- v.extend_from_slice(std::slice::from_raw_parts(token, len));
+ v.extend_from_slice(null_safe_slice(token, len));
qinfo!(
[format!("{fd:p}")],
"Got resumption token {}",
@@ -1015,7 +1015,7 @@ pub enum ZeroRttCheckResult {
Accept,
/// Reject 0-RTT, but continue the handshake normally.
Reject,
- /// Send HelloRetryRequest (probably not needed for QUIC).
+ /// Send `HelloRetryRequest` (probably not needed for QUIC).
HelloRetryRequest(Vec<u8>),
/// Fail the handshake.
Fail,
@@ -1105,11 +1105,7 @@ impl Server {
}
let check_state = arg.cast::<ZeroRttCheckState>().as_mut().unwrap();
- let token = if client_token.is_null() {
- &[]
- } else {
- std::slice::from_raw_parts(client_token, usize::try_from(client_token_len).unwrap())
- };
+ let token = null_safe_slice(client_token, usize::try_from(client_token_len).unwrap());
match check_state.checker.check(token) {
ZeroRttCheckResult::Accept => ssl::SSLHelloRetryRequestAction::ssl_hello_retry_accept,
ZeroRttCheckResult::Fail => ssl::SSLHelloRetryRequestAction::ssl_hello_retry_fail,
diff --git a/third_party/rust/neqo-crypto/src/agentio.rs b/third_party/rust/neqo-crypto/src/agentio.rs
index 2bcc540530..7c57a0ef45 100644
--- a/third_party/rust/neqo-crypto/src/agentio.rs
+++ b/third_party/rust/neqo-crypto/src/agentio.rs
@@ -6,13 +6,11 @@
use std::{
cmp::min,
- convert::{TryFrom, TryInto},
fmt, mem,
ops::Deref,
os::raw::{c_uint, c_void},
pin::Pin,
ptr::{null, null_mut},
- vec::Vec,
};
use neqo_common::{hex, hex_with_len, qtrace};
@@ -20,7 +18,7 @@ use neqo_common::{hex, hex_with_len, qtrace};
use crate::{
constants::{ContentType, Epoch},
err::{nspr, Error, PR_SetError, Res},
- prio, ssl,
+ null_safe_slice, prio, ssl,
};
// Alias common types.
@@ -100,7 +98,7 @@ impl RecordList {
) -> ssl::SECStatus {
let records = arg.cast::<Self>().as_mut().unwrap();
- let slice = std::slice::from_raw_parts(data, len as usize);
+ let slice = null_safe_slice(data, len);
records.append(epoch, ContentType::try_from(ct).unwrap(), slice);
ssl::SECSuccess
}
@@ -178,6 +176,7 @@ impl AgentIoInput {
return Err(Error::NoDataAvailable);
}
+ #[allow(clippy::disallowed_methods)] // We just checked if this was empty.
let src = unsafe { std::slice::from_raw_parts(self.input, amount) };
qtrace!([self], "read {}", hex(src));
let dst = unsafe { std::slice::from_raw_parts_mut(buf, amount) };
@@ -232,7 +231,7 @@ impl AgentIo {
// Stage output from TLS into the output buffer.
fn save_output(&mut self, buf: *const u8, count: usize) {
- let slice = unsafe { std::slice::from_raw_parts(buf, count) };
+ let slice = unsafe { null_safe_slice(buf, count) };
qtrace!([self], "save output {}", hex(slice));
self.output.extend_from_slice(slice);
}
diff --git a/third_party/rust/neqo-crypto/src/cert.rs b/third_party/rust/neqo-crypto/src/cert.rs
index 64e63ec71a..2836b5237c 100644
--- a/third_party/rust/neqo-crypto/src/cert.rs
+++ b/third_party/rust/neqo-crypto/src/cert.rs
@@ -4,16 +4,13 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-use std::{
- convert::TryFrom,
- ptr::{addr_of, NonNull},
- slice,
-};
+use std::ptr::{addr_of, NonNull};
use neqo_common::qerror;
use crate::{
err::secstatus_to_res,
+ null_safe_slice,
p11::{CERTCertListNode, CERT_GetCertificateDer, CertList, Item, SECItem, SECItemArray},
ssl::{
PRFileDesc, SSL_PeerCertificateChain, SSL_PeerSignedCertTimestamps,
@@ -24,7 +21,7 @@ use crate::{
pub struct CertificateInfo {
certs: CertList,
cursor: *const CERTCertListNode,
- /// stapled_ocsp_responses and signed_cert_timestamp are properties
+ /// `stapled_ocsp_responses` and `signed_cert_timestamp` are properties
/// associated with each of the certificates. Right now, NSS only
/// reports the value for the end-entity certificate (the first).
stapled_ocsp_responses: Option<Vec<Vec<u8>>>,
@@ -52,7 +49,7 @@ fn stapled_ocsp_responses(fd: *mut PRFileDesc) -> Option<Vec<Vec<u8>>> {
};
for idx in 0..len {
let itemp: *const SECItem = unsafe { ocsp_ptr.as_ref().items.offset(idx).cast() };
- let item = unsafe { slice::from_raw_parts((*itemp).data, (*itemp).len as usize) };
+ let item = unsafe { null_safe_slice((*itemp).data, (*itemp).len) };
ocsp_helper.push(item.to_owned());
}
Some(ocsp_helper)
@@ -68,9 +65,8 @@ fn signed_cert_timestamp(fd: *mut PRFileDesc) -> Option<Vec<u8>> {
if unsafe { sct_ptr.as_ref().len == 0 || sct_ptr.as_ref().data.is_null() } {
Some(Vec::new())
} else {
- let sct_slice = unsafe {
- slice::from_raw_parts(sct_ptr.as_ref().data, sct_ptr.as_ref().len as usize)
- };
+ let sct_slice =
+ unsafe { null_safe_slice(sct_ptr.as_ref().data, sct_ptr.as_ref().len) };
Some(sct_slice.to_owned())
}
}
@@ -105,7 +101,7 @@ impl<'a> Iterator for &'a mut CertificateInfo {
let cert = unsafe { *self.cursor }.cert;
secstatus_to_res(unsafe { CERT_GetCertificateDer(cert, &mut item) })
.expect("getting DER from certificate should work");
- Some(unsafe { std::slice::from_raw_parts(item.data, item.len as usize) })
+ Some(unsafe { null_safe_slice(item.data, item.len) })
}
}
diff --git a/third_party/rust/neqo-crypto/src/ech.rs b/third_party/rust/neqo-crypto/src/ech.rs
index 1f54c4592e..4ff2cda7e8 100644
--- a/third_party/rust/neqo-crypto/src/ech.rs
+++ b/third_party/rust/neqo-crypto/src/ech.rs
@@ -5,7 +5,6 @@
// except according to those terms.
use std::{
- convert::TryFrom,
ffi::CString,
os::raw::{c_char, c_uint},
ptr::{addr_of_mut, null_mut},
@@ -15,7 +14,7 @@ use neqo_common::qtrace;
use crate::{
err::{ssl::SSL_ERROR_ECH_RETRY_WITH_ECH, Error, Res},
- experimental_api,
+ experimental_api, null_safe_slice,
p11::{
self, Item, PrivateKey, PublicKey, SECITEM_FreeItem, SECItem, SECKEYPrivateKey,
SECKEYPublicKey, Slot,
@@ -76,7 +75,7 @@ pub fn convert_ech_error(fd: *mut PRFileDesc, err: Error) -> Error {
return Error::InternalError;
}
let buf = unsafe {
- let slc = std::slice::from_raw_parts(item.data, usize::try_from(item.len).unwrap());
+ let slc = null_safe_slice(item.data, item.len);
let buf = Vec::from(slc);
SECITEM_FreeItem(&mut item, PRBool::from(false));
buf
@@ -101,8 +100,7 @@ pub fn generate_keys() -> Res<(PrivateKey, PublicKey)> {
let oid_data = unsafe { p11::SECOID_FindOIDByTag(p11::SECOidTag::SEC_OID_CURVE25519) };
let oid = unsafe { oid_data.as_ref() }.ok_or(Error::InternalError)?;
- let oid_slc =
- unsafe { std::slice::from_raw_parts(oid.oid.data, usize::try_from(oid.oid.len).unwrap()) };
+ let oid_slc = unsafe { null_safe_slice(oid.oid.data, oid.oid.len) };
let mut params: Vec<u8> = Vec::with_capacity(oid_slc.len() + 2);
params.push(u8::try_from(p11::SEC_ASN1_OBJECT_ID).unwrap());
params.push(u8::try_from(oid.oid.len).unwrap());
@@ -113,7 +111,6 @@ pub fn generate_keys() -> Res<(PrivateKey, PublicKey)> {
// If we have tracing on, try to ensure that key data can be read.
let insensitive_secret_ptr = if log::log_enabled!(log::Level::Trace) {
- #[allow(clippy::useless_conversion)] // TODO: Remove when we bump the MSRV to 1.74.0.
unsafe {
p11::PK11_GenerateKeyPairWithOpFlags(
*slot,
@@ -131,7 +128,6 @@ pub fn generate_keys() -> Res<(PrivateKey, PublicKey)> {
};
assert_eq!(insensitive_secret_ptr.is_null(), public_ptr.is_null());
let secret_ptr = if insensitive_secret_ptr.is_null() {
- #[allow(clippy::useless_conversion)] // TODO: Remove when we bump the MSRV to 1.74.0.
unsafe {
p11::PK11_GenerateKeyPairWithOpFlags(
*slot,
diff --git a/third_party/rust/neqo-crypto/src/ext.rs b/third_party/rust/neqo-crypto/src/ext.rs
index 310e87a1b7..02ee6340c1 100644
--- a/third_party/rust/neqo-crypto/src/ext.rs
+++ b/third_party/rust/neqo-crypto/src/ext.rs
@@ -6,7 +6,6 @@
use std::{
cell::RefCell,
- convert::TryFrom,
os::raw::{c_uint, c_void},
pin::Pin,
rc::Rc,
@@ -16,6 +15,7 @@ use crate::{
agentio::as_c_void,
constants::{Extension, HandshakeMessage, TLS_HS_CLIENT_HELLO, TLS_HS_ENCRYPTED_EXTENSIONS},
err::Res,
+ null_safe_slice,
ssl::{
PRBool, PRFileDesc, SECFailure, SECStatus, SECSuccess, SSLAlertDescription,
SSLExtensionHandler, SSLExtensionWriter, SSLHandshakeType,
@@ -105,7 +105,7 @@ impl ExtensionTracker {
alert: *mut SSLAlertDescription,
arg: *mut c_void,
) -> SECStatus {
- let d = std::slice::from_raw_parts(data, len as usize);
+ let d = null_safe_slice(data, len);
#[allow(clippy::cast_possible_truncation, clippy::cast_sign_loss)]
Self::wrap_handler_call(arg, |handler| {
// Cast is safe here because the message type is always part of the enum
diff --git a/third_party/rust/neqo-crypto/src/hkdf.rs b/third_party/rust/neqo-crypto/src/hkdf.rs
index e3cf77418c..3706be6c3b 100644
--- a/third_party/rust/neqo-crypto/src/hkdf.rs
+++ b/third_party/rust/neqo-crypto/src/hkdf.rs
@@ -5,7 +5,6 @@
// except according to those terms.
use std::{
- convert::TryFrom,
os::raw::{c_char, c_uint},
ptr::null_mut,
};
@@ -17,9 +16,10 @@ use crate::{
},
err::{Error, Res},
p11::{
- random, Item, PK11Origin, PK11SymKey, PK11_ImportDataKey, Slot, SymKey, CKA_DERIVE,
+ Item, PK11Origin, PK11SymKey, PK11_ImportDataKey, Slot, SymKey, CKA_DERIVE,
CKM_HKDF_DERIVE, CK_ATTRIBUTE_TYPE, CK_MECHANISM_TYPE,
},
+ random,
};
experimental_api!(SSL_HkdfExtract(
@@ -40,24 +40,32 @@ experimental_api!(SSL_HkdfExpandLabel(
secret: *mut *mut PK11SymKey,
));
-fn key_size(version: Version, cipher: Cipher) -> Res<usize> {
+const MAX_KEY_SIZE: usize = 48;
+const fn key_size(version: Version, cipher: Cipher) -> Res<usize> {
if version != TLS_VERSION_1_3 {
return Err(Error::UnsupportedVersion);
}
- Ok(match cipher {
+ let size = match cipher {
TLS_AES_128_GCM_SHA256 | TLS_CHACHA20_POLY1305_SHA256 => 32,
TLS_AES_256_GCM_SHA384 => 48,
_ => return Err(Error::UnsupportedCipher),
- })
+ };
+ debug_assert!(size <= MAX_KEY_SIZE);
+ Ok(size)
}
/// Generate a random key of the right size for the given suite.
///
/// # Errors
///
-/// Only if NSS fails.
+/// If the ciphersuite or protocol version is not supported.
pub fn generate_key(version: Version, cipher: Cipher) -> Res<SymKey> {
- import_key(version, &random(key_size(version, cipher)?))
+ // With generic_const_expr, this becomes:
+ // import_key(version, &random::<{ key_size(version, cipher) }>())
+ import_key(
+ version,
+ &random::<MAX_KEY_SIZE>()[0..key_size(version, cipher)?],
+ )
}
/// Import a symmetric key for use with HKDF.
@@ -70,7 +78,6 @@ pub fn import_key(version: Version, buf: &[u8]) -> Res<SymKey> {
return Err(Error::UnsupportedVersion);
}
let slot = Slot::internal()?;
- #[allow(clippy::useless_conversion)] // TODO: Remove when we bump the MSRV to 1.74.0.
let key_ptr = unsafe {
PK11_ImportDataKey(
*slot,
diff --git a/third_party/rust/neqo-crypto/src/hp.rs b/third_party/rust/neqo-crypto/src/hp.rs
index 2479eff8f5..1eba6a9cb5 100644
--- a/third_party/rust/neqo-crypto/src/hp.rs
+++ b/third_party/rust/neqo-crypto/src/hp.rs
@@ -6,7 +6,6 @@
use std::{
cell::RefCell,
- convert::TryFrom,
fmt::{self, Debug},
os::raw::{c_char, c_int, c_uint},
ptr::{addr_of_mut, null, null_mut},
@@ -46,7 +45,7 @@ pub enum HpKey {
/// track references using `Rc`. `PK11Context` can't be used with `PK11_CloneContext`
/// as that is not supported for these contexts.
Aes(Rc<RefCell<Context>>),
- /// The ChaCha20 mask has to invoke a new PK11_Encrypt every time as it needs to
+ /// The `ChaCha20` mask has to invoke a new `PK11_Encrypt` every time as it needs to
/// change the counter and nonce on each invocation.
Chacha(SymKey),
}
@@ -76,7 +75,6 @@ impl HpKey {
let l = label.as_bytes();
let mut secret: *mut PK11SymKey = null_mut();
- #[allow(clippy::useless_conversion)] // TODO: Remove when we bump the MSRV to 1.74.0.
let (mech, key_size) = match cipher {
TLS_AES_128_GCM_SHA256 => (CK_MECHANISM_TYPE::from(CKM_AES_ECB), 16),
TLS_AES_256_GCM_SHA384 => (CK_MECHANISM_TYPE::from(CKM_AES_ECB), 32),
@@ -104,8 +102,6 @@ impl HpKey {
let res = match cipher {
TLS_AES_128_GCM_SHA256 | TLS_AES_256_GCM_SHA384 => {
- // TODO: Remove when we bump the MSRV to 1.74.0.
- #[allow(clippy::useless_conversion)]
let context_ptr = unsafe {
PK11_CreateContextBySymKey(
mech,
@@ -181,8 +177,6 @@ impl HpKey {
};
let mut output_len: c_uint = 0;
let mut param_item = Item::wrap_struct(&params);
- // TODO: Remove when we bump the MSRV to 1.74.0.
- #[allow(clippy::useless_conversion)]
secstatus_to_res(unsafe {
PK11_Encrypt(
**key,
diff --git a/third_party/rust/neqo-crypto/src/lib.rs b/third_party/rust/neqo-crypto/src/lib.rs
index 05424ee1f3..2ec1b4a3ea 100644
--- a/third_party/rust/neqo-crypto/src/lib.rs
+++ b/third_party/rust/neqo-crypto/src/lib.rs
@@ -4,13 +4,8 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-#![cfg_attr(feature = "deny-warnings", deny(warnings))]
-#![warn(clippy::pedantic)]
-// Bindgen auto generated code
-// won't adhere to the clippy rules below
-#![allow(clippy::module_name_repetitions)]
-#![allow(clippy::unseparated_literal_suffix)]
-#![allow(clippy::used_underscore_binding)]
+#![allow(clippy::module_name_repetitions)] // This lint doesn't work here.
+#![allow(clippy::unseparated_literal_suffix, clippy::used_underscore_binding)] // For bindgen code.
mod aead;
#[cfg(feature = "fuzzing")]
@@ -27,7 +22,6 @@ mod exp;
pub mod ext;
pub mod hkdf;
pub mod hp;
-mod once;
#[macro_use]
mod p11;
mod prio;
@@ -37,11 +31,7 @@ pub mod selfencrypt;
mod ssl;
mod time;
-use std::{
- ffi::CString,
- path::{Path, PathBuf},
- ptr::null,
-};
+use std::{ffi::CString, path::PathBuf, ptr::null, sync::OnceLock};
#[cfg(not(feature = "fuzzing"))]
pub use self::aead::RealAead as Aead;
@@ -49,7 +39,6 @@ pub use self::aead::RealAead as Aead;
pub use self::aead::RealAead;
#[cfg(feature = "fuzzing")]
pub use self::aead_fuzzing::FuzzingAead as Aead;
-use self::once::OnceResult;
pub use self::{
agent::{
Agent, AllowZeroRtt, Client, HandshakeState, Record, RecordList, ResumptionToken,
@@ -64,7 +53,7 @@ pub use self::{
},
err::{Error, PRErrorCode, Res},
ext::{ExtensionHandler, ExtensionHandlerResult, ExtensionWriterResult},
- p11::{random, PrivateKey, PublicKey, SymKey},
+ p11::{random, randomize, PrivateKey, PublicKey, SymKey},
replay::AntiReplay,
secrets::SecretDirection,
ssl::Opt,
@@ -87,7 +76,7 @@ fn secstatus_to_res(code: nss::SECStatus) -> Res<()> {
enum NssLoaded {
External,
NoDb,
- Db(Box<Path>),
+ Db,
}
impl Drop for NssLoaded {
@@ -100,7 +89,7 @@ impl Drop for NssLoaded {
}
}
-static mut INITIALIZED: OnceResult<NssLoaded> = OnceResult::new();
+static INITIALIZED: OnceLock<NssLoaded> = OnceLock::new();
fn already_initialized() -> bool {
unsafe { nss::NSS_IsInitialized() != 0 }
@@ -124,19 +113,18 @@ fn version_check() {
pub fn init() {
// Set time zero.
time::init();
- unsafe {
- INITIALIZED.call_once(|| {
- version_check();
- if already_initialized() {
- return NssLoaded::External;
- }
+ _ = INITIALIZED.get_or_init(|| {
+ version_check();
+ if already_initialized() {
+ return NssLoaded::External;
+ }
- secstatus_to_res(nss::NSS_NoDB_Init(null())).expect("NSS_NoDB_Init failed");
- secstatus_to_res(nss::NSS_SetDomesticPolicy()).expect("NSS_SetDomesticPolicy failed");
+ secstatus_to_res(unsafe { nss::NSS_NoDB_Init(null()) }).expect("NSS_NoDB_Init failed");
+ secstatus_to_res(unsafe { nss::NSS_SetDomesticPolicy() })
+ .expect("NSS_SetDomesticPolicy failed");
- NssLoaded::NoDb
- });
- }
+ NssLoaded::NoDb
+ });
}
/// This enables SSLTRACE by calling a simple, harmless function to trigger its
@@ -158,51 +146,71 @@ fn enable_ssl_trace() {
/// If NSS cannot be initialized.
pub fn init_db<P: Into<PathBuf>>(dir: P) {
time::init();
- unsafe {
- INITIALIZED.call_once(|| {
- version_check();
- if already_initialized() {
- return NssLoaded::External;
- }
+ _ = INITIALIZED.get_or_init(|| {
+ version_check();
+ if already_initialized() {
+ return NssLoaded::External;
+ }
- let path = dir.into();
- assert!(path.is_dir());
- let pathstr = path.to_str().expect("path converts to string").to_string();
- let dircstr = CString::new(pathstr).unwrap();
- let empty = CString::new("").unwrap();
- secstatus_to_res(nss::NSS_Initialize(
+ let path = dir.into();
+ assert!(path.is_dir());
+ let pathstr = path.to_str().expect("path converts to string").to_string();
+ let dircstr = CString::new(pathstr).unwrap();
+ let empty = CString::new("").unwrap();
+ secstatus_to_res(unsafe {
+ nss::NSS_Initialize(
dircstr.as_ptr(),
empty.as_ptr(),
empty.as_ptr(),
nss::SECMOD_DB.as_ptr().cast(),
nss::NSS_INIT_READONLY,
- ))
- .expect("NSS_Initialize failed");
-
- secstatus_to_res(nss::NSS_SetDomesticPolicy()).expect("NSS_SetDomesticPolicy failed");
- secstatus_to_res(ssl::SSL_ConfigServerSessionIDCache(
- 1024,
- 0,
- 0,
- dircstr.as_ptr(),
- ))
- .expect("SSL_ConfigServerSessionIDCache failed");
-
- #[cfg(debug_assertions)]
- enable_ssl_trace();
-
- NssLoaded::Db(path.into_boxed_path())
- });
- }
+ )
+ })
+ .expect("NSS_Initialize failed");
+
+ secstatus_to_res(unsafe { nss::NSS_SetDomesticPolicy() })
+ .expect("NSS_SetDomesticPolicy failed");
+ secstatus_to_res(unsafe {
+ ssl::SSL_ConfigServerSessionIDCache(1024, 0, 0, dircstr.as_ptr())
+ })
+ .expect("SSL_ConfigServerSessionIDCache failed");
+
+ #[cfg(debug_assertions)]
+ enable_ssl_trace();
+
+ NssLoaded::Db
+ });
}
/// # Panics
///
/// If NSS isn't initialized.
pub fn assert_initialized() {
- unsafe {
- INITIALIZED.call_once(|| {
- panic!("NSS not initialized with init or init_db");
- });
+ INITIALIZED
+ .get()
+ .expect("NSS not initialized with init or init_db");
+}
+
+/// NSS tends to return empty "slices" with a null pointer, which will cause
+/// `std::slice::from_raw_parts` to panic if passed directly. This wrapper avoids
+/// that issue. It also performs conversion for lengths, as a convenience.
+///
+/// # Panics
+/// If the provided length doesn't fit into a `usize`.
+///
+/// # Safety
+/// The caller must adhere to the safety constraints of `std::slice::from_raw_parts`,
+/// except that this will accept a null value for `data`.
+unsafe fn null_safe_slice<'a, T>(data: *const u8, len: T) -> &'a [u8]
+where
+ usize: TryFrom<T>,
+{
+ if data.is_null() {
+ &[]
+ } else if let Ok(len) = usize::try_from(len) {
+ #[allow(clippy::disallowed_methods)]
+ std::slice::from_raw_parts(data, len)
+ } else {
+ panic!("null_safe_slice: size overflow");
}
}
diff --git a/third_party/rust/neqo-crypto/src/once.rs b/third_party/rust/neqo-crypto/src/once.rs
deleted file mode 100644
index 80657cfe26..0000000000
--- a/third_party/rust/neqo-crypto/src/once.rs
+++ /dev/null
@@ -1,44 +0,0 @@
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-use std::sync::Once;
-
-#[allow(clippy::module_name_repetitions)]
-pub struct OnceResult<T> {
- once: Once,
- v: Option<T>,
-}
-
-impl<T> OnceResult<T> {
- #[must_use]
- pub const fn new() -> Self {
- Self {
- once: Once::new(),
- v: None,
- }
- }
-
- pub fn call_once<F: FnOnce() -> T>(&mut self, f: F) -> &T {
- let v = &mut self.v;
- self.once.call_once(|| {
- *v = Some(f());
- });
- self.v.as_ref().unwrap()
- }
-}
-
-#[cfg(test)]
-mod test {
- use super::OnceResult;
-
- static mut STATIC_ONCE_RESULT: OnceResult<u64> = OnceResult::new();
-
- #[test]
- fn static_update() {
- assert_eq!(*unsafe { STATIC_ONCE_RESULT.call_once(|| 23) }, 23);
- assert_eq!(*unsafe { STATIC_ONCE_RESULT.call_once(|| 24) }, 23);
- }
-}
diff --git a/third_party/rust/neqo-crypto/src/p11.rs b/third_party/rust/neqo-crypto/src/p11.rs
index 508d240062..5552882e2e 100644
--- a/third_party/rust/neqo-crypto/src/p11.rs
+++ b/third_party/rust/neqo-crypto/src/p11.rs
@@ -10,7 +10,7 @@
#![allow(non_snake_case)]
use std::{
- convert::TryFrom,
+ cell::RefCell,
mem,
ops::{Deref, DerefMut},
os::raw::{c_int, c_uint},
@@ -19,7 +19,10 @@ use std::{
use neqo_common::hex_with_len;
-use crate::err::{secstatus_to_res, Error, Res};
+use crate::{
+ err::{secstatus_to_res, Error, Res},
+ null_safe_slice,
+};
#[allow(clippy::upper_case_acronyms)]
#[allow(clippy::unreadable_literal)]
@@ -139,7 +142,6 @@ impl PrivateKey {
/// When the values are too large to fit. So never.
pub fn key_data(&self) -> Res<Vec<u8>> {
let mut key_item = Item::make_empty();
- #[allow(clippy::useless_conversion)] // TODO: Remove when we bump the MSRV to 1.74.0.
secstatus_to_res(unsafe {
PK11_ReadRawAttribute(
PK11ObjectType::PK11_TypePrivKey,
@@ -148,9 +150,7 @@ impl PrivateKey {
&mut key_item,
)
})?;
- let slc = unsafe {
- std::slice::from_raw_parts(key_item.data, usize::try_from(key_item.len).unwrap())
- };
+ let slc = unsafe { null_safe_slice(key_item.data, key_item.len) };
let key = Vec::from(slc);
// The data that `key_item` refers to needs to be freed, but we can't
// use the scoped `Item` implementation. This is OK as long as nothing
@@ -206,7 +206,7 @@ impl SymKey {
// This is accessing a value attached to the key, so we can treat this as a borrow.
match unsafe { key_item.as_mut() } {
None => Err(Error::InternalError),
- Some(key) => Ok(unsafe { std::slice::from_raw_parts(key.data, key.len as usize) }),
+ Some(key) => Ok(unsafe { null_safe_slice(key.data, key.len) }),
}
}
}
@@ -285,36 +285,112 @@ impl Item {
let b = self.ptr.as_ref().unwrap();
// Sanity check the type, as some types don't count bytes in `Item::len`.
assert_eq!(b.type_, SECItemType::siBuffer);
- let slc = std::slice::from_raw_parts(b.data, usize::try_from(b.len).unwrap());
+ let slc = null_safe_slice(b.data, b.len);
Vec::from(slc)
}
}
-/// Generate a randomized buffer.
+/// Fill a buffer with randomness.
///
/// # Panics
///
/// When `size` is too large or NSS fails.
-#[must_use]
-pub fn random(size: usize) -> Vec<u8> {
- let mut buf = vec![0; size];
- secstatus_to_res(unsafe {
- PK11_GenerateRandom(buf.as_mut_ptr(), c_int::try_from(buf.len()).unwrap())
- })
- .unwrap();
+pub fn randomize<B: AsMut<[u8]>>(mut buf: B) -> B {
+ let m_buf = buf.as_mut();
+ let len = c_int::try_from(m_buf.len()).unwrap();
+ secstatus_to_res(unsafe { PK11_GenerateRandom(m_buf.as_mut_ptr(), len) }).unwrap();
buf
}
+struct RandomCache {
+ cache: [u8; Self::SIZE],
+ used: usize,
+}
+
+impl RandomCache {
+ const SIZE: usize = 256;
+ const CUTOFF: usize = 32;
+
+ fn new() -> Self {
+ RandomCache {
+ cache: [0; Self::SIZE],
+ used: Self::SIZE,
+ }
+ }
+
+ fn randomize<B: AsMut<[u8]>>(&mut self, mut buf: B) -> B {
+ let m_buf = buf.as_mut();
+ debug_assert!(m_buf.len() <= Self::CUTOFF);
+ let avail = Self::SIZE - self.used;
+ if m_buf.len() <= avail {
+ m_buf.copy_from_slice(&self.cache[self.used..self.used + m_buf.len()]);
+ self.used += m_buf.len();
+ } else {
+ if avail > 0 {
+ m_buf[..avail].copy_from_slice(&self.cache[self.used..]);
+ }
+ randomize(&mut self.cache[..]);
+ self.used = m_buf.len() - avail;
+ m_buf[avail..].copy_from_slice(&self.cache[..self.used]);
+ }
+ buf
+ }
+}
+
+/// Generate a randomized array.
+///
+/// # Panics
+///
+/// When `size` is too large or NSS fails.
+#[must_use]
+pub fn random<const N: usize>() -> [u8; N] {
+ thread_local!(static CACHE: RefCell<RandomCache> = RefCell::new(RandomCache::new()));
+
+ let buf = [0; N];
+ if N <= RandomCache::CUTOFF {
+ CACHE.with_borrow_mut(|c| c.randomize(buf))
+ } else {
+ randomize(buf)
+ }
+}
+
#[cfg(test)]
mod test {
use test_fixture::fixture_init;
- use super::random;
+ use super::RandomCache;
+ use crate::{random, randomize};
#[test]
fn randomness() {
fixture_init();
- // If this ever fails, there is either a bug, or it's time to buy a lottery ticket.
- assert_ne!(random(16), random(16));
+ // If any of these ever fail, there is either a bug, or it's time to buy a lottery ticket.
+ assert_ne!(random::<16>(), randomize([0; 16]));
+ assert_ne!([0; 16], random::<16>());
+ assert_ne!([0; 64], random::<64>());
+ }
+
+ #[test]
+ fn cache_random_lengths() {
+ const ZERO: [u8; 256] = [0; 256];
+
+ fixture_init();
+ let mut cache = RandomCache::new();
+ let mut buf = [0; 256];
+ let bits = usize::BITS - (RandomCache::CUTOFF - 1).leading_zeros();
+ let mask = 0xff >> (u8::BITS - bits);
+
+ for _ in 0..100 {
+ let len = loop {
+ let len = usize::from(random::<1>()[0] & mask) + 1;
+ if len <= RandomCache::CUTOFF {
+ break len;
+ }
+ };
+ buf.fill(0);
+ if len >= 16 {
+ assert_ne!(&cache.randomize(&mut buf[..len])[..len], &ZERO[..len]);
+ }
+ }
}
}
diff --git a/third_party/rust/neqo-crypto/src/replay.rs b/third_party/rust/neqo-crypto/src/replay.rs
index d4d3677f5c..5fd6fd1250 100644
--- a/third_party/rust/neqo-crypto/src/replay.rs
+++ b/third_party/rust/neqo-crypto/src/replay.rs
@@ -5,7 +5,6 @@
// except according to those terms.
use std::{
- convert::{TryFrom, TryInto},
ops::{Deref, DerefMut},
os::raw::c_uint,
ptr::null_mut,
diff --git a/third_party/rust/neqo-crypto/src/selfencrypt.rs b/third_party/rust/neqo-crypto/src/selfencrypt.rs
index b8a63153fd..1130c35250 100644
--- a/third_party/rust/neqo-crypto/src/selfencrypt.rs
+++ b/third_party/rust/neqo-crypto/src/selfencrypt.rs
@@ -82,7 +82,7 @@ impl SelfEncrypt {
// opaque aead_encrypted(plaintext)[length as expanded];
// };
// AAD covers the entire header, plus the value of the AAD parameter that is provided.
- let salt = random(Self::SALT_LENGTH);
+ let salt = random::<{ Self::SALT_LENGTH }>();
let cipher = self.make_aead(&self.key, &salt)?;
let encoded_len = 2 + salt.len() + plaintext.len() + cipher.expansion();
diff --git a/third_party/rust/neqo-crypto/src/time.rs b/third_party/rust/neqo-crypto/src/time.rs
index 84dbfdb4a5..0e59c4f5e2 100644
--- a/third_party/rust/neqo-crypto/src/time.rs
+++ b/third_party/rust/neqo-crypto/src/time.rs
@@ -7,18 +7,16 @@
#![allow(clippy::upper_case_acronyms)]
use std::{
- boxed::Box,
- convert::{TryFrom, TryInto},
ops::Deref,
os::raw::c_void,
pin::Pin,
+ sync::OnceLock,
time::{Duration, Instant},
};
use crate::{
agentio::as_c_void,
err::{Error, Res},
- once::OnceResult,
ssl::{PRFileDesc, SSLTimeFunc},
};
@@ -67,14 +65,13 @@ impl TimeZero {
}
}
-static mut BASE_TIME: OnceResult<TimeZero> = OnceResult::new();
+static BASE_TIME: OnceLock<TimeZero> = OnceLock::new();
fn get_base() -> &'static TimeZero {
- let f = || TimeZero {
+ BASE_TIME.get_or_init(|| TimeZero {
instant: Instant::now(),
prtime: unsafe { PR_Now() },
- };
- unsafe { BASE_TIME.call_once(f) }
+ })
}
pub(crate) fn init() {
@@ -97,9 +94,8 @@ impl Deref for Time {
impl From<Instant> for Time {
/// Convert from an Instant into a Time.
fn from(t: Instant) -> Self {
- // Call `TimeZero::baseline(t)` so that time zero can be set.
- let f = || TimeZero::baseline(t);
- _ = unsafe { BASE_TIME.call_once(f) };
+ // Initialize `BASE_TIME` using `TimeZero::baseline(t)`.
+ BASE_TIME.get_or_init(|| TimeZero::baseline(t));
Self { t }
}
}
@@ -108,14 +104,17 @@ impl TryFrom<PRTime> for Time {
type Error = Error;
fn try_from(prtime: PRTime) -> Res<Self> {
let base = get_base();
- if let Some(delta) = prtime.checked_sub(base.prtime) {
- let d = Duration::from_micros(delta.try_into()?);
- base.instant
- .checked_add(d)
- .map_or(Err(Error::TimeTravelError), |t| Ok(Self { t }))
+ let delta = prtime
+ .checked_sub(base.prtime)
+ .ok_or(Error::TimeTravelError)?;
+ let d = Duration::from_micros(u64::try_from(delta.abs())?);
+ let t = if delta >= 0 {
+ base.instant.checked_add(d)
} else {
- Err(Error::TimeTravelError)
- }
+ base.instant.checked_sub(d)
+ };
+ let t = t.ok_or(Error::TimeTravelError)?;
+ Ok(Self { t })
}
}
@@ -123,14 +122,21 @@ impl TryInto<PRTime> for Time {
type Error = Error;
fn try_into(self) -> Res<PRTime> {
let base = get_base();
- let delta = self
- .t
- .checked_duration_since(base.instant)
- .ok_or(Error::TimeTravelError)?;
- if let Ok(d) = PRTime::try_from(delta.as_micros()) {
- d.checked_add(base.prtime).ok_or(Error::TimeTravelError)
+
+ if let Some(delta) = self.t.checked_duration_since(base.instant) {
+ if let Ok(d) = PRTime::try_from(delta.as_micros()) {
+ d.checked_add(base.prtime).ok_or(Error::TimeTravelError)
+ } else {
+ Err(Error::TimeTravelError)
+ }
} else {
- Err(Error::TimeTravelError)
+ // Try to go backwards from the base time.
+ let backwards = base.instant - self.t; // infallible
+ if let Ok(d) = PRTime::try_from(backwards.as_micros()) {
+ base.prtime.checked_sub(d).ok_or(Error::TimeTravelError)
+ } else {
+ Err(Error::TimeTravelError)
+ }
}
}
}
@@ -207,10 +213,7 @@ impl Default for TimeHolder {
#[cfg(test)]
mod test {
- use std::{
- convert::{TryFrom, TryInto},
- time::{Duration, Instant},
- };
+ use std::time::{Duration, Instant};
use super::{get_base, init, Interval, PRTime, Time};
use crate::err::Res;
@@ -228,16 +231,23 @@ mod test {
}
#[test]
- fn past_time() {
+ fn past_prtime() {
+ const DELTA: Duration = Duration::from_secs(1);
init();
let base = get_base();
- assert!(Time::try_from(base.prtime - 1).is_err());
+ let delta_micros = PRTime::try_from(DELTA.as_micros()).unwrap();
+ println!("{} - {}", base.prtime, delta_micros);
+ let t = Time::try_from(base.prtime - delta_micros).unwrap();
+ assert_eq!(Instant::from(t) + DELTA, base.instant);
}
#[test]
- fn negative_time() {
+ fn past_instant() {
+ const DELTA: Duration = Duration::from_secs(1);
init();
- assert!(Time::try_from(-1).is_err());
+ let base = get_base();
+ let t = Time::from(base.instant.checked_sub(DELTA).unwrap());
+ assert_eq!(Instant::from(t) + DELTA, base.instant);
}
#[test]
diff --git a/third_party/rust/neqo-crypto/tests/aead.rs b/third_party/rust/neqo-crypto/tests/aead.rs
index 0ee1e66c38..5cf0034aec 100644
--- a/third_party/rust/neqo-crypto/tests/aead.rs
+++ b/third_party/rust/neqo-crypto/tests/aead.rs
@@ -1,4 +1,9 @@
-#![cfg_attr(feature = "deny-warnings", deny(warnings))]
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
#![warn(clippy::pedantic)]
#![cfg(not(feature = "fuzzing"))]
diff --git a/third_party/rust/neqo-crypto/tests/agent.rs b/third_party/rust/neqo-crypto/tests/agent.rs
index c2c83c467c..80bf816930 100644
--- a/third_party/rust/neqo-crypto/tests/agent.rs
+++ b/third_party/rust/neqo-crypto/tests/agent.rs
@@ -1,7 +1,8 @@
-#![cfg_attr(feature = "deny-warnings", deny(warnings))]
-#![warn(clippy::pedantic)]
-
-use std::boxed::Box;
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
use neqo_crypto::{
generate_ech_keys, AuthenticationStatus, Client, Error, HandshakeState, SecretAgentPreInfo,
diff --git a/third_party/rust/neqo-crypto/tests/ext.rs b/third_party/rust/neqo-crypto/tests/ext.rs
index 9ae81133f5..c8732dd014 100644
--- a/third_party/rust/neqo-crypto/tests/ext.rs
+++ b/third_party/rust/neqo-crypto/tests/ext.rs
@@ -1,5 +1,8 @@
-#![cfg_attr(feature = "deny-warnings", deny(warnings))]
-#![warn(clippy::pedantic)]
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
use std::{cell::RefCell, rc::Rc};
diff --git a/third_party/rust/neqo-crypto/tests/handshake.rs b/third_party/rust/neqo-crypto/tests/handshake.rs
index b2d8b9cc34..3cb31337fd 100644
--- a/third_party/rust/neqo-crypto/tests/handshake.rs
+++ b/third_party/rust/neqo-crypto/tests/handshake.rs
@@ -1,4 +1,12 @@
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
#![allow(dead_code)]
+#![allow(clippy::missing_panics_doc)]
+#![allow(clippy::missing_errors_doc)]
use std::{mem, time::Instant};
@@ -127,6 +135,7 @@ fn zero_rtt_setup(
}
}
+#[must_use]
pub fn resumption_setup(mode: Resumption) -> (Option<AntiReplay>, ResumptionToken) {
fixture_init();
diff --git a/third_party/rust/neqo-crypto/tests/hkdf.rs b/third_party/rust/neqo-crypto/tests/hkdf.rs
index b4dde482f8..acb5bbdda8 100644
--- a/third_party/rust/neqo-crypto/tests/hkdf.rs
+++ b/third_party/rust/neqo-crypto/tests/hkdf.rs
@@ -1,5 +1,8 @@
-#![cfg_attr(feature = "deny-warnings", deny(warnings))]
-#![warn(clippy::pedantic)]
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
use neqo_crypto::{
constants::{
diff --git a/third_party/rust/neqo-crypto/tests/hp.rs b/third_party/rust/neqo-crypto/tests/hp.rs
index 43b96869d8..da7df2cc19 100644
--- a/third_party/rust/neqo-crypto/tests/hp.rs
+++ b/third_party/rust/neqo-crypto/tests/hp.rs
@@ -1,5 +1,8 @@
-#![cfg_attr(feature = "deny-warnings", deny(warnings))]
-#![warn(clippy::pedantic)]
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
use std::mem;
diff --git a/third_party/rust/neqo-crypto/tests/init.rs b/third_party/rust/neqo-crypto/tests/init.rs
index 21291ceebb..13218cc340 100644
--- a/third_party/rust/neqo-crypto/tests/init.rs
+++ b/third_party/rust/neqo-crypto/tests/init.rs
@@ -1,5 +1,8 @@
-#![cfg_attr(feature = "deny-warnings", deny(warnings))]
-#![warn(clippy::pedantic)]
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
// This uses external interfaces to neqo_crypto rather than being a module
// inside of lib.rs. Because all other code uses the test_fixture module,
diff --git a/third_party/rust/neqo-crypto/tests/selfencrypt.rs b/third_party/rust/neqo-crypto/tests/selfencrypt.rs
index fd9d4ea1ea..4c574a3ae9 100644
--- a/third_party/rust/neqo-crypto/tests/selfencrypt.rs
+++ b/third_party/rust/neqo-crypto/tests/selfencrypt.rs
@@ -1,5 +1,9 @@
-#![cfg_attr(feature = "deny-warnings", deny(warnings))]
-#![warn(clippy::pedantic)]
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
#![cfg(not(feature = "fuzzing"))]
use neqo_crypto::{
diff --git a/third_party/rust/neqo-http3/.cargo-checksum.json b/third_party/rust/neqo-http3/.cargo-checksum.json
index 2705291744..0459fea7cc 100644
--- a/third_party/rust/neqo-http3/.cargo-checksum.json
+++ b/third_party/rust/neqo-http3/.cargo-checksum.json
@@ -1 +1 @@
-{"files":{"Cargo.toml":"fe3c1114cfbb94004bf56740c0d373568cb459efdb12504e67f31923fbd436e1","src/buffered_send_stream.rs":"f45bdf9ad2a04b3828c74ff5440681d3c9d1af39b55470e4f729842dc2412295","src/client_events.rs":"e1392e7bbb62fb0505a4d8bcd27559699bbf38f3c94e7d8cae7291db82e6334c","src/conn_params.rs":"224a8ea6ef632930a7788a1cabf47ce69ad41bd4bc8dcf3053fbd998fdb38e82","src/connection.rs":"09aeb123f8dc6b903dd7d30579e5bb09ed8f70bfae563fb2fcc1871c67d604d4","src/connection_client.rs":"ed1c9ebf443f49dbf12c193953a71ec0e6b95555e1927afce813d2a8324758be","src/connection_server.rs":"ca33b50650bd1ca2a952851b72712d55ec2e48b48f1f06e4184c808b8e1e009a","src/control_stream_local.rs":"d6ecc0adc926e1d5cec9a378317f9dfcfeeb9840a0873a2afb380c2d252d8c54","src/control_stream_remote.rs":"59eb4041e366d92f9f294e8446755caa5e91fd943bba7b79b726698ba13be248","src/features/extended_connect/mod.rs":"3b02f6b18627f3855465a81b1d9b285e6f13839e75a8a6db648ed9082908d7f0","src/features/extended_connect/tests/mod.rs":"fd6aee37243713e80fc526552f21f0222338cec9890409b6575a2a637b17ec1f","src/features/extended_connect/tests/webtransport/datagrams.rs":"7e3bdd591b9c7d02f69954629f889d52bd54f13dbca11d555e614138c2a55107","src/features/extended_connect/tests/webtransport/mod.rs":"fed03f0ded21a9f17be5be99e4572e16dd0c8598e37044df3228990ea7fcc9f4","src/features/extended_connect/tests/webtransport/negotiation.rs":"98254ef8446581ec520026b04ef9549645602181b61602c9936f6660141edf0b","src/features/extended_connect/tests/webtransport/sessions.rs":"de3d836f666c2bec31e70b33bdc2669572cabbe17df2225db7282613a224a364","src/features/extended_connect/tests/webtransport/streams.rs":"8b3c34cac1b2171252a4bb53d420ac2098549a20309c327bf56e2e9ba9e33538","src/features/extended_connect/webtransport_session.rs":"a6472eca50a2d097aa6ba8a76b45ae69fe2edd2696b2953945faa3ce6e7417f9","src/features/extended_connect/webtransport_streams.rs":"a9a106eefc93a9f6e9e1c246df64904353de1c4fbcd394b338e6b117f6c677f5","src/features/mod.rs":"925aae4427ad82e4d019354802b223d53db5e5585d4a940f5417a24a9503d7ee","src/frames/hframe.rs":"726842108261c9af1e7576bc546e7bd7bea86fbef4a5804f4b45a2b4612e2679","src/frames/mod.rs":"7d0a46ca147336d14781edb8dbee8b03c2e4bcd6646f5473a9d93d31fe73fecb","src/frames/reader.rs":"4883e25064da1fb3a6ae46b5d15e6bcfec9c5bbea55a1937ecdb9465b62a93b2","src/frames/tests/hframe.rs":"01ec74eb3eb25d95042aa0263f9267f89535e6b7b8c1161fab4ba9ee5352d4a7","src/frames/tests/mod.rs":"0610609b316767a6a022837d32ee0452e37ea296fde37e51bec87e7c77e923a3","src/frames/tests/reader.rs":"2bfadc7afbc41bff9f5f930b31550259a8a92484d35f6c5d8dd8fd9acfb88f5b","src/frames/tests/wtframe.rs":"589ebe1e62ce4da63b37b7d22cde7ba572ddbf29336fdcdbbcd0a745f79dacd8","src/frames/wtframe.rs":"c80518d1569de277767c7ccb7441898aadbfc5fb2afb968c1d5105f8d175ccff","src/headers_checks.rs":"44891c16dda6b7ef742058ecb0a8d34e219c51cae1216c09c661cf72d9a5e7d5","src/lib.rs":"ed8da14e573cc5a97afb012a78af7f076eb83b5cc20cb4fe432eb7136a3ffe52","src/priority.rs":"10d9dcfcd4585f2ca256daf254c78a428297c41976c6548f19cd3ed2222b7cd2","src/push_controller.rs":"eb27c7c2a52c6108c0e4d040b021775a2b573f32d78b7ac8652ff46fd549f780","src/qlog.rs":"b1e6108b018abb077f218d1806e0a83370afa87709e26b3d51f482ae5d9b9c82","src/qpack_decoder_receiver.rs":"c927dfc3e58c71d282210ba79280f6f03e789733bc3bedc247e68bab516b9e9e","src/qpack_encoder_receiver.rs":"d0ac03cc111b6e1c555a8654d3234116f2b135b5b040edac23cefe2d640beba9","src/recv_message.rs":"06666c22101cda41de14682dc7e2e6721f2821bd45baefc22caceae4ccfcf2e0","src/request_target.rs":"6041a69a0a74969ec08bc164509c055e9bad99f53bbeb16c0aa17d108dd68b8c","src/send_message.rs":"70f8a91d85515f42a64a88bd2a9480175b12596bc082f77587cc5bcff9ce996c","src/server.rs":"ab6d4c80cb5f6c070f74d8df27e7bd62d5c8a8e7756ff9d1a31d3f9ff91327a1","src/server_connection_events.rs":"12d353ca6301467f6d475dde3b789951a5716c89ddd7dbf1383efef8082361f3","src/server_events.rs":"c96cff96d5893a9ab7165d17e3d1afaafc5492418b30003c1c26ca8f489ab7ca","src/settings.rs":"476b154b5eea4c8d69a4a790fee3e527cef4d375df1cfb5eed04ec56406fe15a","src/stream_type_reader.rs":"7a7226b7911d69f7e00ec4987c2a32a5e8a33463203398cbee1e6645d2691478","tests/httpconn.rs":"bb6927801a8c75e4f05eb6cdb1e7f2d57be69b74e68ddad2a1614f2aeed04369","tests/priority.rs":"3418be17fbdfdbcfd80dc4532f9365f405925442fabc916f2b22f90aee89629f","tests/send_message.rs":"1e893216d9252e6fb69a0fb291b4f8b8ea954847c346ff7f9347d7895618cabf","tests/webtransport.rs":"cb30d348c0ce05efb722abac3b1c524216fa4cbde8b62a1d1e3238c3fadecbe7"},"package":null} \ No newline at end of file
+{"files":{"Cargo.toml":"458f04261cda071d61402c52cf64062ad7cfc24f3f312bfaa5d52cae47409010","src/buffered_send_stream.rs":"f45bdf9ad2a04b3828c74ff5440681d3c9d1af39b55470e4f729842dc2412295","src/client_events.rs":"77fedca72ce54956eaba3fb7103085d196a631b764662584ea2629224c5c234e","src/conn_params.rs":"224a8ea6ef632930a7788a1cabf47ce69ad41bd4bc8dcf3053fbd998fdb38e82","src/connection.rs":"9384cdfd8481a30a0cd13f56f590188ccfa47b4472f35f7a4978537bab19adc1","src/connection_client.rs":"8db29409f3a265f7dff7c7a7eaf2ac607d6923e4b3238e82eab6dc22854e4303","src/connection_server.rs":"ca33b50650bd1ca2a952851b72712d55ec2e48b48f1f06e4184c808b8e1e009a","src/control_stream_local.rs":"ae52e3286f1686ca1265e7de841392addd42616db02799bb967a59feb6039cb5","src/control_stream_remote.rs":"59eb4041e366d92f9f294e8446755caa5e91fd943bba7b79b726698ba13be248","src/features/extended_connect/mod.rs":"3b02f6b18627f3855465a81b1d9b285e6f13839e75a8a6db648ed9082908d7f0","src/features/extended_connect/tests/mod.rs":"fd6aee37243713e80fc526552f21f0222338cec9890409b6575a2a637b17ec1f","src/features/extended_connect/tests/webtransport/datagrams.rs":"4c85a90afb753ce588e3fdeb773669bc49c013aebc28912340359eb01b74fd70","src/features/extended_connect/tests/webtransport/mod.rs":"a30ea715f5271a826a739278b18e145964dedbce7026eed45f1b7d0355c407d5","src/features/extended_connect/tests/webtransport/negotiation.rs":"98254ef8446581ec520026b04ef9549645602181b61602c9936f6660141edf0b","src/features/extended_connect/tests/webtransport/sessions.rs":"de3d836f666c2bec31e70b33bdc2669572cabbe17df2225db7282613a224a364","src/features/extended_connect/tests/webtransport/streams.rs":"8b3c34cac1b2171252a4bb53d420ac2098549a20309c327bf56e2e9ba9e33538","src/features/extended_connect/webtransport_session.rs":"239d92c06fbc5f6226078bb411a803f57b555dea0077349d49d7f57671cf2eab","src/features/extended_connect/webtransport_streams.rs":"5d7507aaf6a819d266fbea9b7a415c8324329df0f6936d9045b73e17a5b844ee","src/features/mod.rs":"925aae4427ad82e4d019354802b223d53db5e5585d4a940f5417a24a9503d7ee","src/frames/hframe.rs":"56c36ac597504f28c73cf2370acd82104f8c7a7b9ffc0f6d222378abc524482d","src/frames/mod.rs":"7d0a46ca147336d14781edb8dbee8b03c2e4bcd6646f5473a9d93d31fe73fecb","src/frames/reader.rs":"e07ee9de74bc499c10afcda592fefd9a7eef3381c045aa14f6596d67313546ca","src/frames/tests/hframe.rs":"01ec74eb3eb25d95042aa0263f9267f89535e6b7b8c1161fab4ba9ee5352d4a7","src/frames/tests/mod.rs":"0610609b316767a6a022837d32ee0452e37ea296fde37e51bec87e7c77e923a3","src/frames/tests/reader.rs":"2bfadc7afbc41bff9f5f930b31550259a8a92484d35f6c5d8dd8fd9acfb88f5b","src/frames/tests/wtframe.rs":"589ebe1e62ce4da63b37b7d22cde7ba572ddbf29336fdcdbbcd0a745f79dacd8","src/frames/wtframe.rs":"1d9d0256ace2ba7262343ed035df795f21a4d45065792d3fd45b3391b6916b2f","src/headers_checks.rs":"be0f0109298dcc3a40350b7c0950076ddfe20617d195b305e3ffc8582557ab18","src/lib.rs":"4f908a021222bcc79b9d569bc3759a493379a20b47dfa228fddf51600bf6e446","src/priority.rs":"f3b77c208962e44a4e2d13138c6998b703d40e7bcf8f73ea84d8ef5b556e0aee","src/push_controller.rs":"13bccf2834ae19109504cf695a5948c3b2d03fd101bc032a92bb77a033423854","src/qlog.rs":"2debd75c7ea103c95ff79e44412f1408c3e496e324976100c55d5a833912b6c3","src/qpack_decoder_receiver.rs":"c927dfc3e58c71d282210ba79280f6f03e789733bc3bedc247e68bab516b9e9e","src/qpack_encoder_receiver.rs":"d0ac03cc111b6e1c555a8654d3234116f2b135b5b040edac23cefe2d640beba9","src/recv_message.rs":"eb711dbc6b3371373c26b75333ac5858edf0d30184b0e05d67ab02c656eb6619","src/request_target.rs":"6041a69a0a74969ec08bc164509c055e9bad99f53bbeb16c0aa17d108dd68b8c","src/send_message.rs":"7785af11b77cee398faf3f7a2875b41e251ed7a1b272c23f81a48334596ab836","src/server.rs":"b9e6060da36cfb467478f5b78b17e22a123214ad2d64c919ce688ea2bc0e24bb","src/server_connection_events.rs":"12d353ca6301467f6d475dde3b789951a5716c89ddd7dbf1383efef8082361f3","src/server_events.rs":"463dd2cb6f97a800bac32c93c4aa2a6289f71e33a89f3b33152460cb941fc378","src/settings.rs":"476b154b5eea4c8d69a4a790fee3e527cef4d375df1cfb5eed04ec56406fe15a","src/stream_type_reader.rs":"7a7226b7911d69f7e00ec4987c2a32a5e8a33463203398cbee1e6645d2691478","tests/httpconn.rs":"bb6927801a8c75e4f05eb6cdb1e7f2d57be69b74e68ddad2a1614f2aeed04369","tests/priority.rs":"364754507873298612ad12e8d1d106d26d993712142d0be4cbf056da5338854c","tests/send_message.rs":"b5435045b16429d9e626ea94a8f10e2937e1a5a878af0035763a4f5ec09bf53c","tests/webtransport.rs":"25794305017ff58e57dc3c3b9b078e5bfc1814ea82a521b7b7156228e613c092"},"package":null} \ No newline at end of file
diff --git a/third_party/rust/neqo-http3/Cargo.toml b/third_party/rust/neqo-http3/Cargo.toml
index 8eeb2a58bf..12b9a125d0 100644
--- a/third_party/rust/neqo-http3/Cargo.toml
+++ b/third_party/rust/neqo-http3/Cargo.toml
@@ -10,22 +10,24 @@
# See Cargo.toml.orig for the original contents.
[package]
-edition = "2018"
-rust-version = "1.70.0"
+edition = "2021"
+rust-version = "1.74.0"
name = "neqo-http3"
-version = "0.7.0"
-authors = ["Dragana Damjanovic <dragana.damjano@gmail.com>"]
+version = "0.7.2"
+authors = ["The Neqo Authors <necko@mozilla.com>"]
+homepage = "https://github.com/mozilla/neqo/"
license = "MIT OR Apache-2.0"
+repository = "https://github.com/mozilla/neqo/"
-[dependencies]
-enumset = "1.1.2"
-lazy_static = "1.4"
-sfv = "0.9.3"
-smallvec = "1.11.1"
-url = "2.5"
+[lib]
+bench = false
+
+[dependencies.enumset]
+version = "1.1"
+default-features = false
[dependencies.log]
-version = "0.4.17"
+version = "0.4"
default-features = false
[dependencies.neqo-common]
@@ -41,15 +43,30 @@ path = "./../neqo-qpack"
path = "./../neqo-transport"
[dependencies.qlog]
-git = "https://github.com/cloudflare/quiche"
-rev = "09ea4b244096a013071cfe2175bbf2945fb7f8d1"
+version = "0.12"
+default-features = false
+
+[dependencies.sfv]
+version = "0.9"
+default-features = false
+
+[dependencies.smallvec]
+version = "1.11"
+default-features = false
+
+[dependencies.url]
+version = "2.5"
+default-features = false
[dev-dependencies.test-fixture]
path = "../test-fixture"
[features]
-deny-warnings = []
fuzzing = [
"neqo-transport/fuzzing",
"neqo-crypto/fuzzing",
]
+
+[lints.clippy.pedantic]
+level = "warn"
+priority = -1
diff --git a/third_party/rust/neqo-http3/src/client_events.rs b/third_party/rust/neqo-http3/src/client_events.rs
index 4b2ebc6c30..61aba8f9f1 100644
--- a/third_party/rust/neqo-http3/src/client_events.rs
+++ b/third_party/rust/neqo-http3/src/client_events.rs
@@ -61,7 +61,7 @@ pub enum Http3ClientEvent {
error: AppError,
local: bool,
},
- /// Peer has sent a STOP_SENDING.
+ /// Peer has sent a `STOP_SENDING`.
StopSending {
stream_id: StreamId,
error: AppError,
@@ -83,7 +83,7 @@ pub enum Http3ClientEvent {
PushDataReadable { push_id: u64 },
/// A push has been canceled.
PushCanceled { push_id: u64 },
- /// A push stream was been reset due to a HttpGeneralProtocol error.
+ /// A push stream was been reset due to a `HttpGeneralProtocol` error.
/// Most common case are malformed response headers.
PushReset { push_id: u64, error: AppError },
/// New stream can be created
@@ -102,7 +102,7 @@ pub enum Http3ClientEvent {
GoawayReceived,
/// Connection state change.
StateChange(Http3State),
- /// WebTransport events
+ /// `WebTransport` events
WebTransport(WebTransportEvent),
}
diff --git a/third_party/rust/neqo-http3/src/connection.rs b/third_party/rust/neqo-http3/src/connection.rs
index bb2b5a6ce0..287ea2c2af 100644
--- a/third_party/rust/neqo-http3/src/connection.rs
+++ b/third_party/rust/neqo-http3/src/connection.rs
@@ -835,9 +835,6 @@ impl Http3Connection {
final_headers.push(Header::new(":protocol", conn_type.string()));
}
- if let Some(priority_header) = request.priority.header() {
- final_headers.push(priority_header);
- }
final_headers.extend_from_slice(request.headers);
Ok(final_headers)
}
diff --git a/third_party/rust/neqo-http3/src/connection_client.rs b/third_party/rust/neqo-http3/src/connection_client.rs
index 5cc0541c0c..52572a760d 100644
--- a/third_party/rust/neqo-http3/src/connection_client.rs
+++ b/third_party/rust/neqo-http3/src/connection_client.rs
@@ -6,7 +6,6 @@
use std::{
cell::RefCell,
- convert::TryFrom,
fmt::{Debug, Display},
mem,
net::SocketAddr,
@@ -894,13 +893,6 @@ impl Http3Client {
self.process_http3(now);
}
- /// This should not be used because it gives access to functionalities that may disrupt the
- /// proper functioning of the HTTP/3 session.
- /// Only used by `neqo-interop`.
- pub fn conn(&mut self) -> &mut Connection {
- &mut self.conn
- }
-
/// Process HTTP3 layer.
/// When `process_output`, `process_input`, or `process` is called we must call this function
/// as well. The functions calls `Http3Client::check_connection_events` to handle events from
@@ -943,12 +935,12 @@ impl Http3Client {
/// returned. After that, the application should call the function again if a new UDP packet is
/// received and processed or the timer value expires.
///
- /// The HTTP/3 neqo implementation drives the HTTP/3 and QUC layers, therefore this function
+ /// The HTTP/3 neqo implementation drives the HTTP/3 and QUIC layers, therefore this function
/// will call both layers:
/// - First it calls HTTP/3 layer processing (`process_http3`) to make sure the layer writes
/// data to QUIC layer or cancels streams if needed.
/// - Then QUIC layer processing is called - [`Connection::process_output`][3]. This produces a
- /// packet or a timer value. It may also produce ned [`ConnectionEvent`][2]s, e.g. connection
+ /// packet or a timer value. It may also produce new [`ConnectionEvent`][2]s, e.g. connection
/// state-change event.
/// - Therefore the HTTP/3 layer processing (`process_http3`) is called again.
///
@@ -1296,7 +1288,7 @@ impl EventProvider for Http3Client {
#[cfg(test)]
mod tests {
- use std::{convert::TryFrom, mem, time::Duration};
+ use std::{mem, time::Duration};
use neqo_common::{event::Provider, qtrace, Datagram, Decoder, Encoder};
use neqo_crypto::{AllowZeroRtt, AntiReplay, ResumptionToken};
@@ -1306,8 +1298,9 @@ mod tests {
StreamType, Version, RECV_BUFFER_SIZE, SEND_BUFFER_SIZE,
};
use test_fixture::{
- addr, anti_replay, default_server_h3, fixture_init, new_server, now,
- CountingConnectionIdGenerator, DEFAULT_ALPN_H3, DEFAULT_KEYS, DEFAULT_SERVER_NAME,
+ anti_replay, default_server_h3, fixture_init, new_server, now,
+ CountingConnectionIdGenerator, DEFAULT_ADDR, DEFAULT_ALPN_H3, DEFAULT_KEYS,
+ DEFAULT_SERVER_NAME,
};
use super::{
@@ -1340,8 +1333,8 @@ mod tests {
Http3Client::new(
DEFAULT_SERVER_NAME,
Rc::new(RefCell::new(CountingConnectionIdGenerator::default())),
- addr(),
- addr(),
+ DEFAULT_ADDR,
+ DEFAULT_ADDR,
Http3Parameters::default()
.connection_parameters(
// Disable compatible upgrade, which complicates tests.
@@ -1996,7 +1989,7 @@ mod tests {
// The response header from PUSH_DATA (0x01, 0x06, 0x00, 0x00, 0xd9, 0x54, 0x01, 0x34) are
// decoded into:
fn check_push_response_header(header: &[Header]) {
- let expected_push_response_header = vec![
+ let expected_push_response_header = [
Header::new(":status", "200"),
Header::new("content-length", "4"),
];
@@ -3952,7 +3945,7 @@ mod tests {
);
}
x => {
- panic!("event {:?}", x);
+ panic!("event {x:?}");
}
}
@@ -3998,7 +3991,7 @@ mod tests {
assert!(fin);
}
x => {
- panic!("event {:?}", x);
+ panic!("event {x:?}");
}
}
// Stream should now be closed and gone
@@ -4071,7 +4064,7 @@ mod tests {
assert_eq!(stream_id, request_stream_id);
}
x => {
- panic!("event {:?}", x);
+ panic!("event {x:?}");
}
}
}
@@ -4135,7 +4128,7 @@ mod tests {
assert!(!interim);
recv_header = true;
} else {
- panic!("event {:?}", e);
+ panic!("event {e:?}");
}
}
assert!(recv_header);
diff --git a/third_party/rust/neqo-http3/src/control_stream_local.rs b/third_party/rust/neqo-http3/src/control_stream_local.rs
index 62676ee391..2f336c63a4 100644
--- a/third_party/rust/neqo-http3/src/control_stream_local.rs
+++ b/third_party/rust/neqo-http3/src/control_stream_local.rs
@@ -4,10 +4,7 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-use std::{
- collections::{HashMap, VecDeque},
- convert::TryFrom,
-};
+use std::collections::{HashMap, VecDeque};
use neqo_common::{qtrace, Encoder};
use neqo_transport::{Connection, StreamId, StreamType};
diff --git a/third_party/rust/neqo-http3/src/features/extended_connect/tests/webtransport/datagrams.rs b/third_party/rust/neqo-http3/src/features/extended_connect/tests/webtransport/datagrams.rs
index 1c58596dd3..27b7d2b2f2 100644
--- a/third_party/rust/neqo-http3/src/features/extended_connect/tests/webtransport/datagrams.rs
+++ b/third_party/rust/neqo-http3/src/features/extended_connect/tests/webtransport/datagrams.rs
@@ -4,8 +4,6 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-use std::convert::TryFrom;
-
use neqo_common::Encoder;
use neqo_transport::Error as TransportError;
diff --git a/third_party/rust/neqo-http3/src/features/extended_connect/tests/webtransport/mod.rs b/third_party/rust/neqo-http3/src/features/extended_connect/tests/webtransport/mod.rs
index 51dc47e4c1..3753c3122d 100644
--- a/third_party/rust/neqo-http3/src/features/extended_connect/tests/webtransport/mod.rs
+++ b/third_party/rust/neqo-http3/src/features/extended_connect/tests/webtransport/mod.rs
@@ -14,7 +14,7 @@ use neqo_common::event::Provider;
use neqo_crypto::AuthenticationStatus;
use neqo_transport::{ConnectionParameters, StreamId, StreamType};
use test_fixture::{
- addr, anti_replay, fixture_init, now, CountingConnectionIdGenerator, DEFAULT_ALPN_H3,
+ anti_replay, fixture_init, now, CountingConnectionIdGenerator, DEFAULT_ADDR, DEFAULT_ALPN_H3,
DEFAULT_KEYS, DEFAULT_SERVER_NAME,
};
@@ -38,8 +38,8 @@ pub fn default_http3_client(client_params: Http3Parameters) -> Http3Client {
Http3Client::new(
DEFAULT_SERVER_NAME,
Rc::new(RefCell::new(CountingConnectionIdGenerator::default())),
- addr(),
- addr(),
+ DEFAULT_ADDR,
+ DEFAULT_ADDR,
client_params,
now(),
)
diff --git a/third_party/rust/neqo-http3/src/features/extended_connect/webtransport_session.rs b/third_party/rust/neqo-http3/src/features/extended_connect/webtransport_session.rs
index adbdf07e11..5e89225956 100644
--- a/third_party/rust/neqo-http3/src/features/extended_connect/webtransport_session.rs
+++ b/third_party/rust/neqo-http3/src/features/extended_connect/webtransport_session.rs
@@ -4,13 +4,11 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-#![allow(clippy::module_name_repetitions)]
-
-use std::{any::Any, cell::RefCell, collections::BTreeSet, mem, rc::Rc};
+use std::{cell::RefCell, collections::BTreeSet, mem, rc::Rc};
use neqo_common::{qtrace, Encoder, Header, MessageType, Role};
use neqo_qpack::{QPackDecoder, QPackEncoder};
-use neqo_transport::{streams::SendOrder, Connection, DatagramTracking, StreamId};
+use neqo_transport::{Connection, DatagramTracking, StreamId};
use super::{ExtendedConnectEvents, ExtendedConnectType, SessionCloseReason};
use crate::{
@@ -473,10 +471,6 @@ impl HttpRecvStream for Rc<RefCell<WebTransportSession>> {
fn priority_update_sent(&mut self) {
self.borrow_mut().priority_update_sent();
}
-
- fn any(&self) -> &dyn Any {
- self
- }
}
impl SendStream for Rc<RefCell<WebTransportSession>> {
@@ -492,16 +486,6 @@ impl SendStream for Rc<RefCell<WebTransportSession>> {
self.borrow_mut().has_data_to_send()
}
- fn set_sendorder(&mut self, _conn: &mut Connection, _sendorder: Option<SendOrder>) -> Res<()> {
- // Not relevant on session
- Ok(())
- }
-
- fn set_fairness(&mut self, _conn: &mut Connection, _fairness: bool) -> Res<()> {
- // Not relevant on session
- Ok(())
- }
-
fn stream_writable(&self) {}
fn done(&self) -> bool {
diff --git a/third_party/rust/neqo-http3/src/features/extended_connect/webtransport_streams.rs b/third_party/rust/neqo-http3/src/features/extended_connect/webtransport_streams.rs
index 84dcd20618..cdc692b8d7 100644
--- a/third_party/rust/neqo-http3/src/features/extended_connect/webtransport_streams.rs
+++ b/third_party/rust/neqo-http3/src/features/extended_connect/webtransport_streams.rs
@@ -215,16 +215,6 @@ impl SendStream for WebTransportSendStream {
}
}
- fn set_sendorder(&mut self, conn: &mut Connection, sendorder: Option<i64>) -> Res<()> {
- conn.stream_sendorder(self.stream_id, sendorder)
- .map_err(|_| crate::Error::InvalidStreamId)
- }
-
- fn set_fairness(&mut self, conn: &mut Connection, fairness: bool) -> Res<()> {
- conn.stream_fairness(self.stream_id, fairness)
- .map_err(|_| crate::Error::InvalidStreamId)
- }
-
fn handle_stop_sending(&mut self, close_type: CloseType) {
self.set_done(close_type);
}
diff --git a/third_party/rust/neqo-http3/src/frames/hframe.rs b/third_party/rust/neqo-http3/src/frames/hframe.rs
index 83e69ba894..e69f7b449e 100644
--- a/third_party/rust/neqo-http3/src/frames/hframe.rs
+++ b/third_party/rust/neqo-http3/src/frames/hframe.rs
@@ -74,10 +74,7 @@ impl HFrame {
Self::MaxPushId { .. } => H3_FRAME_TYPE_MAX_PUSH_ID,
Self::PriorityUpdateRequest { .. } => H3_FRAME_TYPE_PRIORITY_UPDATE_REQUEST,
Self::PriorityUpdatePush { .. } => H3_FRAME_TYPE_PRIORITY_UPDATE_PUSH,
- Self::Grease => {
- let r = random(7);
- Decoder::from(&r).decode_uint(7).unwrap() * 0x1f + 0x21
- }
+ Self::Grease => Decoder::from(&random::<7>()).decode_uint(7).unwrap() * 0x1f + 0x21,
}
}
@@ -120,7 +117,7 @@ impl HFrame {
}
Self::Grease => {
// Encode some number of random bytes.
- let r = random(8);
+ let r = random::<8>();
enc.encode_vvec(&r[1..usize::from(1 + (r[0] & 0x7))]);
}
Self::PriorityUpdateRequest {
diff --git a/third_party/rust/neqo-http3/src/frames/reader.rs b/third_party/rust/neqo-http3/src/frames/reader.rs
index 5017c666a4..1a086683cf 100644
--- a/third_party/rust/neqo-http3/src/frames/reader.rs
+++ b/third_party/rust/neqo-http3/src/frames/reader.rs
@@ -6,7 +6,7 @@
#![allow(clippy::module_name_repetitions)]
-use std::{convert::TryFrom, fmt::Debug};
+use std::fmt::Debug;
use neqo_common::{
hex_with_len, qtrace, Decoder, IncrementalDecoderBuffer, IncrementalDecoderIgnore,
diff --git a/third_party/rust/neqo-http3/src/frames/wtframe.rs b/third_party/rust/neqo-http3/src/frames/wtframe.rs
index deb7a026a0..20e9b81936 100644
--- a/third_party/rust/neqo-http3/src/frames/wtframe.rs
+++ b/third_party/rust/neqo-http3/src/frames/wtframe.rs
@@ -4,8 +4,6 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-use std::convert::TryFrom;
-
use neqo_common::{Decoder, Encoder};
use crate::{frames::reader::FrameDecoder, Error, Res};
diff --git a/third_party/rust/neqo-http3/src/headers_checks.rs b/third_party/rust/neqo-http3/src/headers_checks.rs
index 9bf661c8fe..2dbf43cd32 100644
--- a/third_party/rust/neqo-http3/src/headers_checks.rs
+++ b/third_party/rust/neqo-http3/src/headers_checks.rs
@@ -6,8 +6,6 @@
#![allow(clippy::unused_unit)] // see https://github.com/Lymia/enumset/issues/44
-use std::convert::TryFrom;
-
use enumset::{enum_set, EnumSet, EnumSetType};
use neqo_common::Header;
diff --git a/third_party/rust/neqo-http3/src/lib.rs b/third_party/rust/neqo-http3/src/lib.rs
index 635707ca7c..8272151cc1 100644
--- a/third_party/rust/neqo-http3/src/lib.rs
+++ b/third_party/rust/neqo-http3/src/lib.rs
@@ -4,8 +4,7 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-#![cfg_attr(feature = "deny-warnings", deny(warnings))]
-#![warn(clippy::pedantic)]
+#![allow(clippy::module_name_repetitions)] // This lint doesn't work here.
/*!
@@ -160,7 +159,7 @@ mod server_events;
mod settings;
mod stream_type_reader;
-use std::{any::Any, cell::RefCell, fmt::Debug, rc::Rc};
+use std::{cell::RefCell, fmt::Debug, rc::Rc};
use buffered_send_stream::BufferedStream;
pub use client_events::{Http3ClientEvent, WebTransportEvent};
@@ -433,20 +432,15 @@ pub enum Http3StreamType {
}
#[must_use]
-#[derive(PartialEq, Eq, Debug)]
+#[derive(Default, PartialEq, Eq, Debug)]
enum ReceiveOutput {
+ #[default]
NoOutput,
ControlFrames(Vec<HFrame>),
UnblockedStreams(Vec<StreamId>),
NewStream(NewStreamType),
}
-impl Default for ReceiveOutput {
- fn default() -> Self {
- Self::NoOutput
- }
-}
-
trait Stream: Debug {
fn stream_type(&self) -> Http3StreamType;
}
@@ -509,8 +503,6 @@ trait HttpRecvStream: RecvStream {
fn extended_connect_wait_for_response(&self) -> bool {
false
}
-
- fn any(&self) -> &dyn Any;
}
#[derive(Debug, PartialEq, Eq, Copy, Clone)]
@@ -567,27 +559,25 @@ trait HttpRecvStreamEvents: RecvStreamEvents {
trait SendStream: Stream {
/// # Errors
///
- /// Error my occur during sending data, e.g. protocol error, etc.
+ /// Error may occur during sending data, e.g. protocol error, etc.
fn send(&mut self, conn: &mut Connection) -> Res<()>;
fn has_data_to_send(&self) -> bool;
fn stream_writable(&self);
fn done(&self) -> bool;
- fn set_sendorder(&mut self, conn: &mut Connection, sendorder: Option<SendOrder>) -> Res<()>;
- fn set_fairness(&mut self, conn: &mut Connection, fairness: bool) -> Res<()>;
/// # Errors
///
- /// Error my occur during sending data, e.g. protocol error, etc.
+ /// Error may occur during sending data, e.g. protocol error, etc.
fn send_data(&mut self, _conn: &mut Connection, _buf: &[u8]) -> Res<usize>;
/// # Errors
///
- /// It may happen that the transport stream is already close. This is unlikely.
+ /// It may happen that the transport stream is already closed. This is unlikely.
fn close(&mut self, conn: &mut Connection) -> Res<()>;
/// # Errors
///
- /// It may happen that the transport stream is already close. This is unlikely.
+ /// It may happen that the transport stream is already closed. This is unlikely.
fn close_with_message(
&mut self,
_conn: &mut Connection,
@@ -606,7 +596,7 @@ trait SendStream: Stream {
/// # Errors
///
- /// It may happen that the transport stream is already close. This is unlikely.
+ /// It may happen that the transport stream is already closed. This is unlikely.
fn send_data_atomic(&mut self, _conn: &mut Connection, _buf: &[u8]) -> Res<()> {
Err(Error::InvalidStreamId)
}
@@ -627,7 +617,6 @@ trait HttpSendStream: SendStream {
/// This can also return an error if the underlying stream is closed.
fn send_headers(&mut self, headers: &[Header], conn: &mut Connection) -> Res<()>;
fn set_new_listener(&mut self, _conn_events: Box<dyn SendStreamEvents>) {}
- fn any(&self) -> &dyn Any;
}
trait SendStreamEvents: Debug {
diff --git a/third_party/rust/neqo-http3/src/priority.rs b/third_party/rust/neqo-http3/src/priority.rs
index f2651d3bb5..76a2cb9a85 100644
--- a/third_party/rust/neqo-http3/src/priority.rs
+++ b/third_party/rust/neqo-http3/src/priority.rs
@@ -1,4 +1,10 @@
-use std::{convert::TryFrom, fmt};
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use std::fmt;
use neqo_transport::StreamId;
use sfv::{BareItem, Item, ListEntry, Parser};
diff --git a/third_party/rust/neqo-http3/src/push_controller.rs b/third_party/rust/neqo-http3/src/push_controller.rs
index c4591991ae..ab6afccdf6 100644
--- a/third_party/rust/neqo-http3/src/push_controller.rs
+++ b/third_party/rust/neqo-http3/src/push_controller.rs
@@ -1,3 +1,4 @@
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
@@ -6,7 +7,6 @@
use std::{
cell::RefCell,
collections::VecDeque,
- convert::TryFrom,
fmt::{Debug, Display},
mem,
rc::Rc,
diff --git a/third_party/rust/neqo-http3/src/qlog.rs b/third_party/rust/neqo-http3/src/qlog.rs
index c3a13fd19f..81f9245a3c 100644
--- a/third_party/rust/neqo-http3/src/qlog.rs
+++ b/third_party/rust/neqo-http3/src/qlog.rs
@@ -6,14 +6,9 @@
// Functions that handle capturing QLOG traces.
-use std::convert::TryFrom;
-
use neqo_common::qlog::NeqoQlog;
use neqo_transport::StreamId;
-use qlog::{
- self,
- events::{DataRecipient, EventData},
-};
+use qlog::events::{DataRecipient, EventData};
pub fn h3_data_moved_up(qlog: &mut NeqoQlog, stream_id: StreamId, amount: usize) {
qlog.add_event_data(|| {
diff --git a/third_party/rust/neqo-http3/src/recv_message.rs b/third_party/rust/neqo-http3/src/recv_message.rs
index 36e8f65b19..be58b7e47c 100644
--- a/third_party/rust/neqo-http3/src/recv_message.rs
+++ b/third_party/rust/neqo-http3/src/recv_message.rs
@@ -4,9 +4,7 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-use std::{
- any::Any, cell::RefCell, cmp::min, collections::VecDeque, convert::TryFrom, fmt::Debug, rc::Rc,
-};
+use std::{cell::RefCell, cmp::min, collections::VecDeque, fmt::Debug, rc::Rc};
use neqo_common::{qdebug, qinfo, qtrace, Header};
use neqo_qpack::decoder::QPackDecoder;
@@ -494,8 +492,4 @@ impl HttpRecvStream for RecvMessage {
fn extended_connect_wait_for_response(&self) -> bool {
matches!(self.state, RecvMessageState::ExtendedConnect)
}
-
- fn any(&self) -> &dyn Any {
- self
- }
}
diff --git a/third_party/rust/neqo-http3/src/send_message.rs b/third_party/rust/neqo-http3/src/send_message.rs
index 96156938a0..c50e3e056a 100644
--- a/third_party/rust/neqo-http3/src/send_message.rs
+++ b/third_party/rust/neqo-http3/src/send_message.rs
@@ -4,11 +4,11 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-use std::{any::Any, cell::RefCell, cmp::min, fmt::Debug, rc::Rc};
+use std::{cell::RefCell, cmp::min, fmt::Debug, rc::Rc};
use neqo_common::{qdebug, qinfo, qtrace, Encoder, Header, MessageType};
use neqo_qpack::encoder::QPackEncoder;
-use neqo_transport::{streams::SendOrder, Connection, StreamId};
+use neqo_transport::{Connection, StreamId};
use crate::{
frames::HFrame,
@@ -270,16 +270,6 @@ impl SendStream for SendMessage {
self.stream.has_buffered_data()
}
- fn set_sendorder(&mut self, _conn: &mut Connection, _sendorder: Option<SendOrder>) -> Res<()> {
- // Not relevant for SendMessage
- Ok(())
- }
-
- fn set_fairness(&mut self, _conn: &mut Connection, _fairness: bool) -> Res<()> {
- // Not relevant for SendMessage
- Ok(())
- }
-
fn close(&mut self, conn: &mut Connection) -> Res<()> {
self.state.fin()?;
if !self.stream.has_buffered_data() {
@@ -332,10 +322,6 @@ impl HttpSendStream for SendMessage {
self.stream_type = Http3StreamType::ExtendedConnect;
self.conn_events = conn_events;
}
-
- fn any(&self) -> &dyn Any {
- self
- }
}
impl ::std::fmt::Display for SendMessage {
diff --git a/third_party/rust/neqo-http3/src/server.rs b/third_party/rust/neqo-http3/src/server.rs
index b29f715451..1396a4e4cf 100644
--- a/third_party/rust/neqo-http3/src/server.rs
+++ b/third_party/rust/neqo-http3/src/server.rs
@@ -151,7 +151,7 @@ impl Http3Server {
active_conns.dedup();
active_conns
.iter()
- .for_each(|conn| self.server.add_to_waiting(conn.clone()));
+ .for_each(|conn| self.server.add_to_waiting(conn));
for mut conn in active_conns {
self.process_events(&mut conn, now);
}
@@ -1271,11 +1271,11 @@ mod tests {
while let Some(event) = hconn.next_event() {
match event {
Http3ServerEvent::Headers { stream, .. } => {
- assert!(requests.get(&stream).is_none());
+ assert!(!requests.contains_key(&stream));
requests.insert(stream, 0);
}
Http3ServerEvent::Data { stream, .. } => {
- assert!(requests.get(&stream).is_some());
+ assert!(requests.contains_key(&stream));
}
Http3ServerEvent::DataWritable { .. }
| Http3ServerEvent::StreamReset { .. }
diff --git a/third_party/rust/neqo-http3/src/server_events.rs b/third_party/rust/neqo-http3/src/server_events.rs
index 4be48363df..a85ece0bfb 100644
--- a/third_party/rust/neqo-http3/src/server_events.rs
+++ b/third_party/rust/neqo-http3/src/server_events.rs
@@ -9,7 +9,6 @@
use std::{
cell::RefCell,
collections::VecDeque,
- convert::TryFrom,
ops::{Deref, DerefMut},
rc::Rc,
};
diff --git a/third_party/rust/neqo-http3/tests/priority.rs b/third_party/rust/neqo-http3/tests/priority.rs
index cdec161058..77d19e6fcf 100644
--- a/third_party/rust/neqo-http3/tests/priority.rs
+++ b/third_party/rust/neqo-http3/tests/priority.rs
@@ -68,7 +68,7 @@ fn priority_update() {
Instant::now(),
"GET",
&("https", "something.com", "/"),
- &[],
+ &[Header::new("priority", "u=4,i")],
Priority::new(4, true),
)
.unwrap();
@@ -98,7 +98,7 @@ fn priority_update() {
assert_eq!(&headers, expected_headers);
assert!(!fin);
}
- other => panic!("unexpected server event: {:?}", other),
+ other => panic!("unexpected server event: {other:?}"),
}
let update_priority = Priority::new(3, false);
@@ -129,7 +129,7 @@ fn priority_update_dont_send_for_cancelled_stream() {
Instant::now(),
"GET",
&("https", "something.com", "/"),
- &[],
+ &[Header::new("priority", "u=5")],
Priority::new(5, false),
)
.unwrap();
diff --git a/third_party/rust/neqo-http3/tests/send_message.rs b/third_party/rust/neqo-http3/tests/send_message.rs
index 507c4bd552..fbf9a7a3ea 100644
--- a/third_party/rust/neqo-http3/tests/send_message.rs
+++ b/third_party/rust/neqo-http3/tests/send_message.rs
@@ -4,7 +4,8 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-use lazy_static::lazy_static;
+use std::sync::OnceLock;
+
use neqo_common::event::Provider;
use neqo_crypto::AuthenticationStatus;
use neqo_http3::{
@@ -15,14 +16,14 @@ use test_fixture::*;
const RESPONSE_DATA: &[u8] = &[0x61, 0x62, 0x63];
-lazy_static! {
- static ref RESPONSE_HEADER_NO_DATA: Vec<Header> =
- vec![Header::new(":status", "200"), Header::new("something", "3")];
+fn response_header_no_data() -> &'static Vec<Header> {
+ static HEADERS: OnceLock<Vec<Header>> = OnceLock::new();
+ HEADERS.get_or_init(|| vec![Header::new(":status", "200"), Header::new("something", "3")])
}
-lazy_static! {
- static ref RESPONSE_HEADER_103: Vec<Header> =
- vec![Header::new(":status", "103"), Header::new("link", "...")];
+fn response_header_103() -> &'static Vec<Header> {
+ static HEADERS: OnceLock<Vec<Header>> = OnceLock::new();
+ HEADERS.get_or_init(|| vec![Header::new(":status", "103"), Header::new("link", "...")])
}
fn exchange_packets(client: &mut Http3Client, server: &mut Http3Server) {
@@ -68,7 +69,7 @@ fn send_trailers(request: &mut Http3OrWebTransportStream) -> Result<(), Error> {
}
fn send_informational_headers(request: &mut Http3OrWebTransportStream) -> Result<(), Error> {
- request.send_headers(&RESPONSE_HEADER_103)
+ request.send_headers(response_header_103())
}
fn send_headers(request: &mut Http3OrWebTransportStream) -> Result<(), Error> {
@@ -90,7 +91,7 @@ fn process_client_events(conn: &mut Http3Client) {
Header::new(":status", "200"),
Header::new("content-length", "3"),
])
- || (headers.as_ref() == *RESPONSE_HEADER_103)
+ || (headers.as_ref() == *response_header_103())
);
assert!(!fin);
response_header_found = true;
@@ -116,7 +117,7 @@ fn process_client_events_no_data(conn: &mut Http3Client) {
while let Some(event) = conn.next_event() {
match event {
Http3ClientEvent::HeaderReady { headers, fin, .. } => {
- assert_eq!(headers.as_ref(), *RESPONSE_HEADER_NO_DATA);
+ assert_eq!(headers.as_ref(), *response_header_no_data());
fin_received = fin;
response_header_found = true;
}
@@ -201,7 +202,7 @@ fn response_trailers3() {
#[test]
fn response_trailers_no_data() {
let (mut hconn_c, mut hconn_s, mut request) = connect_send_and_receive_request();
- request.send_headers(&RESPONSE_HEADER_NO_DATA).unwrap();
+ request.send_headers(response_header_no_data()).unwrap();
exchange_packets(&mut hconn_c, &mut hconn_s);
send_trailers(&mut request).unwrap();
exchange_packets(&mut hconn_c, &mut hconn_s);
@@ -258,10 +259,10 @@ fn trailers_after_close() {
#[test]
fn multiple_response_headers() {
let (mut hconn_c, mut hconn_s, mut request) = connect_send_and_receive_request();
- request.send_headers(&RESPONSE_HEADER_NO_DATA).unwrap();
+ request.send_headers(response_header_no_data()).unwrap();
assert_eq!(
- request.send_headers(&RESPONSE_HEADER_NO_DATA),
+ request.send_headers(response_header_no_data()),
Err(Error::InvalidHeader)
);
@@ -273,7 +274,7 @@ fn multiple_response_headers() {
#[test]
fn informational_after_response_headers() {
let (mut hconn_c, mut hconn_s, mut request) = connect_send_and_receive_request();
- request.send_headers(&RESPONSE_HEADER_NO_DATA).unwrap();
+ request.send_headers(response_header_no_data()).unwrap();
assert_eq!(
send_informational_headers(&mut request),
@@ -307,7 +308,7 @@ fn non_trailers_headers_after_data() {
exchange_packets(&mut hconn_c, &mut hconn_s);
assert_eq!(
- request.send_headers(&RESPONSE_HEADER_NO_DATA),
+ request.send_headers(response_header_no_data()),
Err(Error::InvalidHeader)
);
diff --git a/third_party/rust/neqo-http3/tests/webtransport.rs b/third_party/rust/neqo-http3/tests/webtransport.rs
index 4e943d86cb..b1e18a5a98 100644
--- a/third_party/rust/neqo-http3/tests/webtransport.rs
+++ b/third_party/rust/neqo-http3/tests/webtransport.rs
@@ -15,7 +15,7 @@ use neqo_http3::{
};
use neqo_transport::{StreamId, StreamType};
use test_fixture::{
- addr, anti_replay, fixture_init, now, CountingConnectionIdGenerator, DEFAULT_ALPN_H3,
+ anti_replay, fixture_init, now, CountingConnectionIdGenerator, DEFAULT_ADDR, DEFAULT_ALPN_H3,
DEFAULT_KEYS, DEFAULT_SERVER_NAME,
};
@@ -24,8 +24,8 @@ fn connect() -> (Http3Client, Http3Server) {
let mut client = Http3Client::new(
DEFAULT_SERVER_NAME,
Rc::new(RefCell::new(CountingConnectionIdGenerator::default())),
- addr(),
- addr(),
+ DEFAULT_ADDR,
+ DEFAULT_ADDR,
Http3Parameters::default().webtransport(true),
now(),
)
diff --git a/third_party/rust/neqo-qpack/.cargo-checksum.json b/third_party/rust/neqo-qpack/.cargo-checksum.json
index 2bbec5df43..aae0a1e594 100644
--- a/third_party/rust/neqo-qpack/.cargo-checksum.json
+++ b/third_party/rust/neqo-qpack/.cargo-checksum.json
@@ -1 +1 @@
-{"files":{"Cargo.toml":"2eabb2ad2846a08b51b306634ed75dc14ab3a43738b1190e3b4c4f2beb00b8e2","src/decoder.rs":"7e468d59adff1fa9373cbb703d13a7503f721a89bebafd049feaf0308a39b606","src/decoder_instructions.rs":"d991d70e51f079bc5b30d3982fd0176edfa9bb7ba14c17a20ec3eea878c56206","src/encoder.rs":"e026da38c2c3410a4e9aa330cda09ac411008772dd66d262d6c375601cebf481","src/encoder_instructions.rs":"86e3abbd9cf94332041326ac6cf806ed64623e3fd38dbc0385b1f63c37e73fd9","src/header_block.rs":"3925476df69b90d950594faadc5cb24c374d46de8c75a374a235f0d27323a7d8","src/huffman.rs":"8b0b2ea069c2a6eb6677b076b99b08ac0d29eabe1f2bbbab37f18f49187ef276","src/huffman_decode_helper.rs":"81309e27ff8f120a10c0b1598888ded21b76e297dc02cea8c7378d6a6627d62a","src/huffman_table.rs":"06fea766a6276ac56c7ee0326faed800a742c15fda1f33bf2513e6cc6a5e6d27","src/lib.rs":"196114397c2b1bf6ef154206018f519b012789cf712e89b069a7616d7278ef3a","src/prefix.rs":"fb4a9acbcf6fd3178f4474404cd3d3b131abca934f69fe14a9d744bc7e636dc5","src/qlog.rs":"e320007ea8309546b26f9c0019ab8722da80dbd38fa976233fd8ae19a0af637c","src/qpack_send_buf.rs":"14d71310c260ee15ea40a783998b507c968eef12db2892b47c689e872b5242a5","src/reader.rs":"b9a7dccd726f471fc24f1d3304f03ac0a039c0828aac7b33c927be07d395c655","src/static_table.rs":"fda9d5c6f38f94b0bf92d3afdf8432dce6e27e189736596e16727090c77b78ec","src/stats.rs":"624dfa3b40858c304097bb0ce5b1be1bb4d7916b1abfc222f1aa705907009730","src/table.rs":"05dbec6483bb24c9fc8d721b70fdfefc2df53b458488b55104147f13c386a47d"},"package":null} \ No newline at end of file
+{"files":{"Cargo.toml":"c2152600379c3961ba79e661e164630a63531744f79e082fce39cdf1cbe75ddd","src/decoder.rs":"0675444129e074e9d5d56f0d45d2eaed614c85e22cfe9f2d28cdee912c15b420","src/decoder_instructions.rs":"d991d70e51f079bc5b30d3982fd0176edfa9bb7ba14c17a20ec3eea878c56206","src/encoder.rs":"84649cbee81e050f55d7ea691ac871e072741abd8bbf96303eb2e98aa8ee0aea","src/encoder_instructions.rs":"86e3abbd9cf94332041326ac6cf806ed64623e3fd38dbc0385b1f63c37e73fd9","src/header_block.rs":"3925476df69b90d950594faadc5cb24c374d46de8c75a374a235f0d27323a7d8","src/huffman.rs":"71ec740426eee0abb6205104e504f5b97f525a76c4a5f5827b78034d28ce1876","src/huffman_decode_helper.rs":"9ce470e318b3664f58aa109bed483ab15bfd9e0b17d261ea2b609668a42a9d80","src/huffman_table.rs":"06fea766a6276ac56c7ee0326faed800a742c15fda1f33bf2513e6cc6a5e6d27","src/lib.rs":"fd673630b5ed64197851c9a9758685096d3c0aa04f4994290733a38057004ee6","src/prefix.rs":"fb4a9acbcf6fd3178f4474404cd3d3b131abca934f69fe14a9d744bc7e636dc5","src/qlog.rs":"e320007ea8309546b26f9c0019ab8722da80dbd38fa976233fd8ae19a0af637c","src/qpack_send_buf.rs":"755af90fe077b1bcca34a1a2a1bdce5ce601ea490b2ca3f1313e0107d13e67e2","src/reader.rs":"1581261741a0922b147a6975cc8b1a3503846f6dbfdb771d254760c298996982","src/static_table.rs":"fda9d5c6f38f94b0bf92d3afdf8432dce6e27e189736596e16727090c77b78ec","src/stats.rs":"624dfa3b40858c304097bb0ce5b1be1bb4d7916b1abfc222f1aa705907009730","src/table.rs":"6e16debdceadc453546f247f8316883af9eeeedd12f2070219d8484a0a131d46"},"package":null} \ No newline at end of file
diff --git a/third_party/rust/neqo-qpack/Cargo.toml b/third_party/rust/neqo-qpack/Cargo.toml
index 7df63b7bf6..1becac8190 100644
--- a/third_party/rust/neqo-qpack/Cargo.toml
+++ b/third_party/rust/neqo-qpack/Cargo.toml
@@ -10,19 +10,20 @@
# See Cargo.toml.orig for the original contents.
[package]
-edition = "2018"
-rust-version = "1.70.0"
+edition = "2021"
+rust-version = "1.74.0"
name = "neqo-qpack"
-version = "0.7.0"
-authors = ["Dragana Damjanovic <dragana.damjano@gmail.com>"]
+version = "0.7.2"
+authors = ["The Neqo Authors <necko@mozilla.com>"]
+homepage = "https://github.com/mozilla/neqo/"
license = "MIT OR Apache-2.0"
+repository = "https://github.com/mozilla/neqo/"
-[dependencies]
-lazy_static = "~1.4.0"
-static_assertions = "~1.1.0"
+[lib]
+bench = false
[dependencies.log]
-version = "~0.4.17"
+version = "0.4"
default-features = false
[dependencies.neqo-common]
@@ -35,11 +36,16 @@ path = "./../neqo-crypto"
path = "./../neqo-transport"
[dependencies.qlog]
-git = "https://github.com/cloudflare/quiche"
-rev = "09ea4b244096a013071cfe2175bbf2945fb7f8d1"
+version = "0.12"
+default-features = false
+
+[dependencies.static_assertions]
+version = "1.1"
+default-features = false
[dev-dependencies.test-fixture]
path = "../test-fixture"
-[features]
-deny-warnings = []
+[lints.clippy.pedantic]
+level = "warn"
+priority = -1
diff --git a/third_party/rust/neqo-qpack/src/decoder.rs b/third_party/rust/neqo-qpack/src/decoder.rs
index 2119db0256..b2cfb6629a 100644
--- a/third_party/rust/neqo-qpack/src/decoder.rs
+++ b/third_party/rust/neqo-qpack/src/decoder.rs
@@ -4,8 +4,6 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-use std::convert::TryFrom;
-
use neqo_common::{qdebug, Header};
use neqo_transport::{Connection, StreamId};
@@ -287,7 +285,7 @@ fn map_error(err: &Error) -> Error {
#[cfg(test)]
mod tests {
- use std::{convert::TryFrom, mem};
+ use std::mem;
use neqo_common::Header;
use neqo_transport::{StreamId, StreamType};
diff --git a/third_party/rust/neqo-qpack/src/encoder.rs b/third_party/rust/neqo-qpack/src/encoder.rs
index c7921ee2c0..c90570ccdc 100644
--- a/third_party/rust/neqo-qpack/src/encoder.rs
+++ b/third_party/rust/neqo-qpack/src/encoder.rs
@@ -4,10 +4,7 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-use std::{
- collections::{HashMap, HashSet, VecDeque},
- convert::TryFrom,
-};
+use std::collections::{HashMap, HashSet, VecDeque};
use neqo_common::{qdebug, qerror, qlog::NeqoQlog, qtrace, Header};
use neqo_transport::{Connection, Error as TransportError, StreamId};
diff --git a/third_party/rust/neqo-qpack/src/huffman.rs b/third_party/rust/neqo-qpack/src/huffman.rs
index 283a501b32..30bb880438 100644
--- a/third_party/rust/neqo-qpack/src/huffman.rs
+++ b/third_party/rust/neqo-qpack/src/huffman.rs
@@ -4,10 +4,8 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-use std::convert::TryFrom;
-
use crate::{
- huffman_decode_helper::{HuffmanDecoderNode, HUFFMAN_DECODE_ROOT},
+ huffman_decode_helper::{huffman_decoder_root, HuffmanDecoderNode},
huffman_table::HUFFMAN_TABLE,
Error, Res,
};
@@ -93,7 +91,7 @@ pub fn decode_huffman(input: &[u8]) -> Res<Vec<u8>> {
}
fn decode_character(reader: &mut BitReader) -> Res<Option<u16>> {
- let mut node: &HuffmanDecoderNode = &HUFFMAN_DECODE_ROOT;
+ let mut node: &HuffmanDecoderNode = huffman_decoder_root();
let mut i = 0;
while node.value.is_none() {
match reader.read_bit() {
diff --git a/third_party/rust/neqo-qpack/src/huffman_decode_helper.rs b/third_party/rust/neqo-qpack/src/huffman_decode_helper.rs
index 122226dd1f..939312ab22 100644
--- a/third_party/rust/neqo-qpack/src/huffman_decode_helper.rs
+++ b/third_party/rust/neqo-qpack/src/huffman_decode_helper.rs
@@ -4,9 +4,7 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-use std::convert::TryFrom;
-
-use lazy_static::lazy_static;
+use std::sync::OnceLock;
use crate::huffman_table::HUFFMAN_TABLE;
@@ -15,8 +13,9 @@ pub struct HuffmanDecoderNode {
pub value: Option<u16>,
}
-lazy_static! {
- pub static ref HUFFMAN_DECODE_ROOT: HuffmanDecoderNode = make_huffman_tree(0, 0);
+pub fn huffman_decoder_root() -> &'static HuffmanDecoderNode {
+ static ROOT: OnceLock<HuffmanDecoderNode> = OnceLock::new();
+ ROOT.get_or_init(|| make_huffman_tree(0, 0))
}
fn make_huffman_tree(prefix: u32, len: u8) -> HuffmanDecoderNode {
diff --git a/third_party/rust/neqo-qpack/src/lib.rs b/third_party/rust/neqo-qpack/src/lib.rs
index 1581712017..10ee5df61c 100644
--- a/third_party/rust/neqo-qpack/src/lib.rs
+++ b/third_party/rust/neqo-qpack/src/lib.rs
@@ -4,11 +4,7 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-#![cfg_attr(feature = "deny-warnings", deny(warnings))]
-#![warn(clippy::pedantic)]
-// This is because of Encoder and Decoder structs. TODO: think about a better namings for crate and
-// structs.
-#![allow(clippy::module_name_repetitions)]
+#![allow(clippy::module_name_repetitions)] // This lint doesn't work here.
pub mod decoder;
mod decoder_instructions;
diff --git a/third_party/rust/neqo-qpack/src/qpack_send_buf.rs b/third_party/rust/neqo-qpack/src/qpack_send_buf.rs
index a443859081..c0b8d7af1b 100644
--- a/third_party/rust/neqo-qpack/src/qpack_send_buf.rs
+++ b/third_party/rust/neqo-qpack/src/qpack_send_buf.rs
@@ -4,7 +4,7 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-use std::{convert::TryFrom, ops::Deref};
+use std::ops::Deref;
use neqo_common::Encoder;
diff --git a/third_party/rust/neqo-qpack/src/reader.rs b/third_party/rust/neqo-qpack/src/reader.rs
index ff9c42b246..0173ed7888 100644
--- a/third_party/rust/neqo-qpack/src/reader.rs
+++ b/third_party/rust/neqo-qpack/src/reader.rs
@@ -4,7 +4,7 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-use std::{convert::TryInto, mem, str};
+use std::{mem, str};
use neqo_common::{qdebug, qerror};
use neqo_transport::{Connection, StreamId};
@@ -223,20 +223,19 @@ impl IntReader {
}
}
-#[derive(Debug)]
+#[derive(Debug, Default)]
enum LiteralReaderState {
+ #[default]
ReadHuffman,
- ReadLength { reader: IntReader },
- ReadLiteral { offset: usize },
+ ReadLength {
+ reader: IntReader,
+ },
+ ReadLiteral {
+ offset: usize,
+ },
Done,
}
-impl Default for LiteralReaderState {
- fn default() -> Self {
- Self::ReadHuffman
- }
-}
-
/// This is decoder of a literal with a prefix:
/// 1) ignores `prefix_len` bits of the first byte,
/// 2) reads "huffman bit"
diff --git a/third_party/rust/neqo-qpack/src/table.rs b/third_party/rust/neqo-qpack/src/table.rs
index 7ce8572542..517e98db09 100644
--- a/third_party/rust/neqo-qpack/src/table.rs
+++ b/third_party/rust/neqo-qpack/src/table.rs
@@ -4,7 +4,7 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-use std::{collections::VecDeque, convert::TryFrom};
+use std::collections::VecDeque;
use neqo_common::qtrace;
diff --git a/third_party/rust/neqo-transport/.cargo-checksum.json b/third_party/rust/neqo-transport/.cargo-checksum.json
index ba33141b7a..669c0120f0 100644
--- a/third_party/rust/neqo-transport/.cargo-checksum.json
+++ b/third_party/rust/neqo-transport/.cargo-checksum.json
@@ -1 +1 @@
-{"files":{"Cargo.toml":"d31e1132faa19d4a3025d3b7a98a38d09591b8b75735896d5afbd7e8fdb4434d","benches/rx_stream_orderer.rs":"5f32aba0066bca15aedbf059f9b00f64ced11aa7222c0b0c5ea202bdd9e6ef14","src/ackrate.rs":"c8d8933ccd8255e5b0712a4a7c4a7304de16a430325d5125fdc538a623874279","src/addr_valid.rs":"d1badfd0ab71ad8c6368a398f52d23f817d70e70653a3313353af34542525603","src/cc/classic_cc.rs":"15b735d6c7054489fd0fadc25cbee8b88b4efe1ee0dcc43354b1552183a8b2d8","src/cc/cubic.rs":"f6669242f6566b1de711b8ff59051919a5aa9106da43ed16ae83d6fe614cec11","src/cc/mod.rs":"0141bcadb719a7fe75d037f4ebe19c7f7bdbf9177314ea5b97ee7244b14b162b","src/cc/new_reno.rs":"1d2790260fe8147b4a40c1792e862ab30b204cf4cf8fef45f5d50d6975011ec2","src/cc/tests/cubic.rs":"5367da8fa627046379bacba45a0f993b7305aef48d954c13004cb7ae88dc04ec","src/cc/tests/mod.rs":"1567bf0ddaff5cb679217f2fd65f01e15a302b9b9e68b69f3e617dcaf7b3e5ec","src/cc/tests/new_reno.rs":"7e8a81c3f16d1f21f8b42b2abba4cf8ea6f78cb2ea05c4d85d7c1cb71c1db464","src/cid.rs":"91ed2b635aabde43ed5e9d383d26e9b3a28e92f218adb8feea650d9c4e55ec0a","src/connection/dump.rs":"aea2f97fa78f3d9e0fe32c2a58ce70a7050aced3abde8b06183ed88d02571ec1","src/connection/idle.rs":"b3bc2ad1290e54278d8703092d135eda973eb12316d1f6dffedaffdf25e2a47e","src/connection/mod.rs":"c38de7f0114d2218f3fc5024bd7570199712f57c399642a7b3be0a107845d947","src/connection/params.rs":"c6433e78953df329fa241c7eba0220743f8028d0ca9c1da0021c7f5973aae5c8","src/connection/saved.rs":"97eb19792be3c4d721057021a43ea50a52f89a3cfa583d3d3dcf5d9144b332f5","src/connection/state.rs":"04352beb60ec9b51b41ae2999acb0086f3f90dc94fa1b2becf3921ec0e6ba5b1","src/connection/test_internal.rs":"f3ebfe97b25c9c716d41406066295e5aff4e96a3051ef4e2b5fb258282bbc14c","src/connection/tests/ackrate.rs":"aa92c91185a74eeb2abcc86d19d746b8de3feb7ad507494be9042a6ec37b491e","src/connection/tests/cc.rs":"ee567e43b626353beaae2f0a9e09266bbb8d62bc14178743fc3606bc53c5b6b1","src/connection/tests/close.rs":"c309974598b0b51793d54be1470338d692f1111f79ea985a5c73d62d780d15f7","src/connection/tests/datagram.rs":"ae2853c4b8dbae4b00940adcc8bd114065f134944f182270987d55daa1b27adb","src/connection/tests/fuzzing.rs":"a877ce6cb005099eb4ae1f5649c63c4b7a5c108c9459a3bb36490965712f5310","src/connection/tests/handshake.rs":"1bed309e8358dfb5026e12da7ea2f8bdf42e910fb8b41809b554c0a484a264e8","src/connection/tests/idle.rs":"30077587ed3e22934c82d675119bdcc696a91d4d0d1908fb5f4c9f8b085fd8d9","src/connection/tests/keys.rs":"792cf24ac51daff62c19dcb8c266824a9fd22cb1b715d416551ee270a26c9eb2","src/connection/tests/migration.rs":"33e0442b0d2d3e940ba4a42313419dd2d183174bede0b3046165410ce05b21b1","src/connection/tests/mod.rs":"f64c200035396f96adb6b86742c7117dc96cf500969c1eae2bddcb1d1c1c43f3","src/connection/tests/priority.rs":"e2299a873dca794573a10b0f94bbc5fdf8b75ed7531ee931c31ad7507bc71e6f","src/connection/tests/recovery.rs":"7f28767f3cca2ff60e3dcfa803e12ef043486a222f54681a8faf2ea2fee564a1","src/connection/tests/resumption.rs":"94550cd961b98fba6ab30ff97a538919c76112394470ac00568ea1ac66e6e323","src/connection/tests/stream.rs":"8d3b6fa884847de15910b005c5f9cdfcbdf5eecec5bb84804a842a85f075b0c3","src/connection/tests/vn.rs":"d2539caf17b455c9259d7cfbd823e110da463890997e578f8e74af5f468a4a7b","src/connection/tests/zerortt.rs":"73180babcf24b0eccef91656acfaac953d3abeab52d7d14cede0188ea9d40fc6","src/crypto.rs":"2070f445d4b25d6dc500ba9cf5dcf29383aba217e4ba2e0e312a45e073b28dc6","src/events.rs":"70f989e60004f62d39398c557e36337457810c5942dcfb9631f707f7ac59466d","src/fc.rs":"6c4cd4a3854e5442b33f309a24c0b1a040cdc8193611ea0e05c66b7f6fa1f68c","src/frame.rs":"5e2d28051ef585fdcfb47e7ed666f5000ad38b5407356b07da4ccd6595d3cc34","src/lib.rs":"ef0481f82f5d2173aa202fad0376dbf99e14ae3d58b3bfca4f3da8ec4e07ce8c","src/pace.rs":"05e38e711af705ea52b75843c2226c86fba3605f00d56be00038f5f769a5c8a2","src/packet/mod.rs":"a52648a30a2f066d74a7021b97b99163cf8037644faddef8138ee2dca8ec6ffa","src/packet/retry.rs":"d4cd0f68af678d212040a9e21440697cddb61811261d6e5e9117b47926b49eda","src/path.rs":"fb5240ec491f087eaa86bc737fdfaa3d2c063d69ab12c11d54186a9c8e68714f","src/qlog.rs":"83a605006a98bedd1ed13de8fc1b46aca6b0eaf26a1af5ce8bb936d1dcd6ed9a","src/quic_datagrams.rs":"bd035ac89cf4c7f94d9b839c54cc0298c5db76f7c9e55138455e3246aac01e1e","src/recovery.rs":"fdd85ae2c11bb4efa0f83fec8723a55466089384bea72f86fd1c921d586fe692","src/recv_stream.rs":"40e9da357e43fe668853f2f8251b012cea8e1331230148c448def092551f3b49","src/rtt.rs":"39039f8140e0729085e8090a7f3572cc9f1334936d2f04ff221d387abaecb282","src/send_stream.rs":"da5564521eb7ecfd31326a168c6bc957ec6e1ac832e885d7877570a5fae77222","src/sender.rs":"fe3970579b1d3869ca967058db1261b710f5b8ab6a3f47199e0db5ed5bae75ce","src/server.rs":"b2d9acbe5455255187611599f3c21af2c706968a1b042bdde9a59bdb96b5ac2a","src/stats.rs":"7e7aabe62b0d67151fdfd6b2024230ea1418955ed0ed2f03cbaef0d0877d4221","src/stream_id.rs":"188a8177fd7819e9206bab359ff3002516ecc87322211f536c2bd766a5b7d9d8","src/streams.rs":"476460ce385cfd403656af37f544f75f25bfd261a25fe528df6e926fecd7c785","src/tparams.rs":"08359a7a24f51d741c38294b362c5a0206a2ed9335f0ef61502c7a2d6447b6d8","src/tracking.rs":"6b12710c73344b366c985ff01511601cd35137359c4be015a505e01b082f4f88","src/version.rs":"afe8b5db2a9303e411a24071539cbc58c857fdecd19b3898b37ee3ecac24253c","tests/common/mod.rs":"922872201a673248889c9f511ecc995831d0316e2c5dd2918194b63ee28560ac","tests/conn_vectors.rs":"6b3a1177ca0cb123008ee52a89606d94c19ee6f36f92c040266ce4b6ea13904b","tests/connection.rs":"601100578c1c8f8562917e585654309a8f2bc268c4bc6ab37e29af33b8328eac","tests/network.rs":"1d07b5db4079db3766443b773e661a7f070d402d51be94fb75c6f96696a6b016","tests/retry.rs":"b1077e3500f7387bdd23a65481b5d937cd3f4b0b220f635f2b1db27966a5d538","tests/server.rs":"d46a736c03855f06634b3aedba9d32563d861d4408ad3bb875450d723ea3742a","tests/sim/connection.rs":"6a1ffe23fbbcae957eacf0f9d53672e3b3a0dc133e5b3bb3e2aaba872b597a67","tests/sim/delay.rs":"31171d53ced9609c4f327cef48950dbbe9fecad0d208cbcd2b1bfee474d90e31","tests/sim/drop.rs":"02e6471466f4168d6a507c621dd10d0dfeb26f13ae08ed97401c7e3857a1f43a","tests/sim/mod.rs":"fd62f126e7ddef141fe07c7ea9b4c8ba00dfc4002698a3431eaf3badebca1a53","tests/sim/net.rs":"597f4d37bc26c3d82eeeaa6d14dd03bc2be3930686df2b293748b43c07c497d7","tests/sim/rng.rs":"4d5ef201e51b5ed5a0c63ad83cf514c9b117c9d6a07da94d91acc538edb56633","tests/sim/taildrop.rs":"638adda0a3f295550692a471d471a6d0673e1e61c96f5cf6f013a98f6641201c"},"package":null} \ No newline at end of file
+{"files":{"Cargo.toml":"8eac0a271cef9232d100eb45093a3f7978a93e0576c64c99c161092ff445825d","benches/range_tracker.rs":"4821443d3cccc697b8976b7c50d787a7aa8cb86ab8633a7582be3f85135168db","benches/rx_stream_orderer.rs":"a8db922390d8506c483a3a1f40ac9bf12806ebdb4f501716904776dd58e995be","benches/transfer.rs":"11343c1ac9131585c42236749d32d9e272a33b6acd58831fa3415be4d4f1cf86","src/ackrate.rs":"4bb882e1069a0707dc85338b75327e2910c93ee5f36575767a0d58c4c41c9d4f","src/addr_valid.rs":"03c0b2ff85254179c5d425b12acfdcc6b1ea5735aeb0f604b9b3603451b3ef0a","src/cc/classic_cc.rs":"4528bb4e9059524942ee7ef931de5de90c78ee13f76489185a964ad45c12c0b3","src/cc/cubic.rs":"24c6913cc6346e5361007221c26e8096ece51583431fc3ab9c99e4ce4b0a9f5d","src/cc/mod.rs":"e0837937c9991b37edad15cd870ea9e0623489babfccc340074dd8322e1ef401","src/cc/new_reno.rs":"25d0921005688e0f0666efd0a4931b4f8cd44363587d98e5b6404818c5d05dd4","src/cc/tests/cubic.rs":"109fc8be5efba8959e777288c32ae8f2db581fc08719f318ad676e187f655478","src/cc/tests/mod.rs":"44f8df551e742ae1037cd1cdb85b2c1334c2e5ab3c23ed63d856dbc6b8743afc","src/cc/tests/new_reno.rs":"5414e26b6c928c5f82c5eeb42f04772b05be1ec2c8ee21c2b698ce8cb32829a1","src/cid.rs":"9686a3070c593cfca846d7549863728e31211b304b9fa876220f79bff5e24173","src/connection/dump.rs":"c539caffdf5b4dfaf0173bb20d1974f5242b5432a0a32fc0b8ab56ee682cb1eb","src/connection/idle.rs":"b3bc2ad1290e54278d8703092d135eda973eb12316d1f6dffedaffdf25e2a47e","src/connection/mod.rs":"dcfba9574b707318292f460dc40f54f3cdf8fd883f5f0d604f1d0d466f99f481","src/connection/params.rs":"9731bc5faa584874c48538ed19839c7a310277df39144c580cdf3001153f5a56","src/connection/saved.rs":"97eb19792be3c4d721057021a43ea50a52f89a3cfa583d3d3dcf5d9144b332f5","src/connection/state.rs":"c1820864cc63073e1f44b875be1fcde9835df644e0fa8c2e05652421ad78b7b2","src/connection/test_internal.rs":"f3ebfe97b25c9c716d41406066295e5aff4e96a3051ef4e2b5fb258282bbc14c","src/connection/tests/ackrate.rs":"4a2b835575850ae4a14209d3e51883ecb1e69afb44ef91b5e13a5e6cb7174fab","src/connection/tests/cc.rs":"d0d6ac038572ad3dcd9e6734596eaeedc6d3222d24e31b023aaab3540d195e46","src/connection/tests/close.rs":"20bf9b044ba52517693c2bd10820ff04a8c07de01d59c8c47b4e9478aa730211","src/connection/tests/datagram.rs":"f4c85099b6a8739fb99eadd8711b02066ad80fc8341a2e5e0dae2520378af9fe","src/connection/tests/fuzzing.rs":"79d9ac83fe2d952a3a13140d603569c332d29dbba2e0d2b8ee5f6e42e8f4708a","src/connection/tests/handshake.rs":"eda7308fdd46570ee3b5569ad34e63761ccde89eb5ca854c877e3a53e7de5ec8","src/connection/tests/idle.rs":"f3bcb12cd79cb8eabc969ce3fb0fab4eea26d6383b81a323c0e18ca9c42cfb59","src/connection/tests/keys.rs":"55558c057beb4221245beb262076de3991dca3f2661411db61c09d21194873df","src/connection/tests/migration.rs":"624985d925236be197eee52208dbdebe35c0df5bd9d30982d6f183dfda4cbab5","src/connection/tests/mod.rs":"8b6709a5c89becf2daed407515f894ba3337e87b2d45b21acffa02e67f37eeec","src/connection/tests/priority.rs":"dd3504f52d3fce7a96441624bc1c82c733e6bb556b9b79d24d0f4fb4efaf5a9e","src/connection/tests/recovery.rs":"7f28767f3cca2ff60e3dcfa803e12ef043486a222f54681a8faf2ea2fee564a1","src/connection/tests/resumption.rs":"1a0de0993cd325224fc79a3c094d22636d5b122ab1123d16265d4fafb23574bd","src/connection/tests/stream.rs":"e5590c2b52d33fbe1b4e995edf1c016dda460ecfa2a9f021005e4abe8ea13580","src/connection/tests/vn.rs":"550eb6b4d39d5960aafc70037c25a1a0f5db1232ce0ec6080b2c29a731a9574e","src/connection/tests/zerortt.rs":"67f77721e33d9fa2691c5ea3ef4a90935987541d81f0f42fbcfca31e690b352a","src/crypto.rs":"c5780ab85ca84e830024c31346a416f1f470694372d732ee5e5b7c5df3adc202","src/events.rs":"6e115f309c5c46f30f6223e1347bea477ada457f8bb2189ecccc6d65483318d6","src/fc.rs":"ec9de1028286870c0adf88a92e1355acf13dede8b1e91179230df3263e3827a9","src/frame.rs":"eb35c4add314f0013ad7837157fa9daeb76a2286fc7f8c922993624f54a09569","src/lib.rs":"f8d83b370cab19b3d172d0689f8d76115f5fd26c742e394fca62e253809cedc4","src/pace.rs":"86a674ac4d086148ea297214910458c3705918bd627b996ba8bbb12f2c4bf99e","src/packet/mod.rs":"9fac8f4046ada084dbbcc6601391a2bf8bbc23a09d6fe7df3c135a36840dbee3","src/packet/retry.rs":"1f10bb2c39ae4335e65b8d5d97f2b6df62e04877740af27c7b965a65e7f7ca66","src/path.rs":"3eb7e5e3bc496bfefc729c1e15fba0f9f83572151a850bf13b9c931297789279","src/qlog.rs":"b94aa36d5bac2799d8635cf6b25b9bb9383944d5607ea85aff55715f70af5f7b","src/quic_datagrams.rs":"3d33ecb9e6e80c77b812e8260fc807352300fb2305a29b797259ae34c52b67c5","src/recovery.rs":"1dadc6717dd133007943e762231a50680087392466904c2f2e6fface084e2ba9","src/recv_stream.rs":"f21ae0bb786901bb7d726a94cb3352607b0057128beaa331808137f2f57a330b","src/rtt.rs":"4635dc0c401b78a1fd9d34da6f9bf7f6e7f5be3a57ed9716e0efc8f0f94f1e47","src/send_stream.rs":"f717f64b75e368cf5fa4ca43078aa7c1b5aff48b4f6266713e6fa7dc458328aa","src/sender.rs":"5f760988bdd6fbbd5956877a97abe7c17370dd531f68b751a9e4e4459583f87b","src/server.rs":"048aaac84e15d49fd25850294759107fe1855bbbc0481c16f8bd888d8f2a8f6d","src/stats.rs":"b2a4c03d5b24edeecd00d809019c56f1a22a4e35967309ae6e6053331aafcf30","src/stream_id.rs":"fd07cbb81709a54bdb0659f676ef851cd145c004b817044ede5b21e54fdb60e4","src/streams.rs":"062b1b61edd1a76a86914f2cc1ca007c03edd9136c0c3409d960ddb805805fc6","src/tparams.rs":"10d62ac64865e0606c3c14141f9674631c610b3f042e274e110bdcef5d388491","src/tracking.rs":"f9a9aa01abc79fdd7a2cfb2c3ae342b9ab709e6a2a11076ec5c475fc89c1f598","src/version.rs":"182484ed9ecc2e17cab73cc61914a86a2d206936cab313825ae76fd37eeade77","tests/common/mod.rs":"0aa6674ae4efd2f151a65737ed5eab9e700bd1b3da5b4c165cb24de2b01598ce","tests/conn_vectors.rs":"290550072bd0c37652b79ac119729064dd486452c3a740353a6669bcdb2b82cf","tests/connection.rs":"b3c2ce0c62c4b79f80e42289eadd51933931b0ae44c0adc20ce5141edd454e00","tests/network.rs":"9e30b8610124250262fceef27d09fdecf2d6e9c3a96b1e676ff4189b9e06d5ba","tests/retry.rs":"da5c6a6f9ec1a8f556073b2d2e11fbcd2f58463818b0f08f8d23158016fea0d5","tests/server.rs":"cb83de909d858950bfd75a789fc23c3c44fcdf1d965b63800b2c7b498507987f"},"package":null} \ No newline at end of file
diff --git a/third_party/rust/neqo-transport/Cargo.toml b/third_party/rust/neqo-transport/Cargo.toml
index f993a858b8..a309987434 100644
--- a/third_party/rust/neqo-transport/Cargo.toml
+++ b/third_party/rust/neqo-transport/Cargo.toml
@@ -10,28 +10,39 @@
# See Cargo.toml.orig for the original contents.
[package]
-edition = "2018"
-rust-version = "1.70.0"
+edition = "2021"
+rust-version = "1.74.0"
name = "neqo-transport"
-version = "0.7.0"
-authors = [
- "EKR <ekr@rtfm.com>",
- "Andy Grover <agrover@mozilla.com>",
-]
+version = "0.7.2"
+authors = ["The Neqo Authors <necko@mozilla.com>"]
+homepage = "https://github.com/mozilla/neqo/"
license = "MIT OR Apache-2.0"
+repository = "https://github.com/mozilla/neqo/"
+
+[lib]
+bench = false
+
+[[bench]]
+name = "transfer"
+harness = false
+required-features = ["bench"]
[[bench]]
name = "rx_stream_orderer"
harness = false
required-features = ["bench"]
-[dependencies]
-indexmap = "1.9.3"
-lazy_static = "1.4"
-smallvec = "1.11.1"
+[[bench]]
+name = "range_tracker"
+harness = false
+required-features = ["bench"]
+
+[dependencies.indexmap]
+version = "1.9"
+default-features = false
[dependencies.log]
-version = "0.4.17"
+version = "0.4"
default-features = false
[dependencies.neqo-common]
@@ -41,17 +52,29 @@ path = "../neqo-common"
path = "../neqo-crypto"
[dependencies.qlog]
-git = "https://github.com/cloudflare/quiche"
-rev = "09ea4b244096a013071cfe2175bbf2945fb7f8d1"
+version = "0.12"
+default-features = false
+
+[dependencies.smallvec]
+version = "1.11"
+default-features = false
-[dev-dependencies]
-criterion = "0.5.1"
-enum-map = "2.7"
+[dev-dependencies.criterion]
+version = "0.5"
+features = ["html_reports"]
+default-features = false
+
+[dev-dependencies.enum-map]
+version = "2.7"
+default-features = false
[dev-dependencies.test-fixture]
path = "../test-fixture"
[features]
bench = []
-deny-warnings = []
fuzzing = ["neqo-crypto/fuzzing"]
+
+[lints.clippy.pedantic]
+level = "warn"
+priority = -1
diff --git a/third_party/rust/neqo-transport/benches/range_tracker.rs b/third_party/rust/neqo-transport/benches/range_tracker.rs
new file mode 100644
index 0000000000..c2f78f4874
--- /dev/null
+++ b/third_party/rust/neqo-transport/benches/range_tracker.rs
@@ -0,0 +1,50 @@
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use criterion::{criterion_group, criterion_main, Criterion}; // black_box
+use neqo_transport::send_stream::RangeTracker;
+
+const CHUNK: u64 = 1000;
+const END: u64 = 100_000;
+fn build_coalesce(len: u64) -> RangeTracker {
+ let mut used = RangeTracker::default();
+ used.mark_acked(0, CHUNK as usize);
+ used.mark_sent(CHUNK, END as usize);
+ // leave a gap or it will coalesce here
+ for i in 2..=len {
+ // These do not get immediately coalesced when marking since they're not at the end or start
+ used.mark_acked(i * CHUNK, CHUNK as usize);
+ }
+ used
+}
+
+fn coalesce(c: &mut Criterion, count: u64) {
+ c.bench_function(
+ &format!("coalesce_acked_from_zero {count}+1 entries"),
+ |b| {
+ b.iter_batched_ref(
+ || build_coalesce(count),
+ |used| {
+ used.mark_acked(CHUNK, CHUNK as usize);
+ let tail = (count + 1) * CHUNK;
+ used.mark_sent(tail, CHUNK as usize);
+ used.mark_acked(tail, CHUNK as usize);
+ },
+ criterion::BatchSize::SmallInput,
+ )
+ },
+ );
+}
+
+fn benchmark_coalesce(c: &mut Criterion) {
+ coalesce(c, 1);
+ coalesce(c, 3);
+ coalesce(c, 10);
+ coalesce(c, 1000);
+}
+
+criterion_group!(benches, benchmark_coalesce);
+criterion_main!(benches);
diff --git a/third_party/rust/neqo-transport/benches/rx_stream_orderer.rs b/third_party/rust/neqo-transport/benches/rx_stream_orderer.rs
index 03b401ba06..0a1e763e97 100644
--- a/third_party/rust/neqo-transport/benches/rx_stream_orderer.rs
+++ b/third_party/rust/neqo-transport/benches/rx_stream_orderer.rs
@@ -1,3 +1,9 @@
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
use criterion::{criterion_group, criterion_main, Criterion};
use neqo_transport::recv_stream::RxStreamOrderer;
diff --git a/third_party/rust/neqo-transport/benches/transfer.rs b/third_party/rust/neqo-transport/benches/transfer.rs
new file mode 100644
index 0000000000..444f738f9c
--- /dev/null
+++ b/third_party/rust/neqo-transport/benches/transfer.rs
@@ -0,0 +1,70 @@
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use std::time::Duration;
+
+use criterion::{criterion_group, criterion_main, BatchSize::SmallInput, Criterion};
+use test_fixture::{
+ boxed,
+ sim::{
+ connection::{ConnectionNode, ReceiveData, SendData},
+ network::{Delay, TailDrop},
+ Simulator,
+ },
+};
+
+const ZERO: Duration = Duration::from_millis(0);
+const JITTER: Duration = Duration::from_millis(10);
+const TRANSFER_AMOUNT: usize = 1 << 22; // 4Mbyte
+
+fn benchmark_transfer(c: &mut Criterion, label: &str, seed: Option<impl AsRef<str>>) {
+ c.bench_function(label, |b| {
+ b.iter_batched(
+ || {
+ let nodes = boxed![
+ ConnectionNode::default_client(boxed![SendData::new(TRANSFER_AMOUNT)]),
+ TailDrop::dsl_uplink(),
+ Delay::new(ZERO..JITTER),
+ ConnectionNode::default_server(boxed![ReceiveData::new(TRANSFER_AMOUNT)]),
+ TailDrop::dsl_downlink(),
+ Delay::new(ZERO..JITTER),
+ ];
+ let mut sim = Simulator::new(label, nodes);
+ if let Some(seed) = &seed {
+ sim.seed_str(seed);
+ }
+ sim.setup()
+ },
+ |sim| {
+ sim.run();
+ },
+ SmallInput,
+ )
+ });
+}
+
+fn benchmark_transfer_variable(c: &mut Criterion) {
+ benchmark_transfer(
+ c,
+ "Run multiple transfers with varying seeds",
+ std::env::var("SIMULATION_SEED").ok(),
+ );
+}
+
+fn benchmark_transfer_fixed(c: &mut Criterion) {
+ benchmark_transfer(
+ c,
+ "Run multiple transfers with the same seed",
+ Some("62df6933ba1f543cece01db8f27fb2025529b27f93df39e19f006e1db3b8c843"),
+ );
+}
+
+criterion_group! {
+ name = transfer;
+ config = Criterion::default().warm_up_time(Duration::from_secs(5)).measurement_time(Duration::from_secs(15));
+ targets = benchmark_transfer_variable, benchmark_transfer_fixed
+}
+criterion_main!(transfer);
diff --git a/third_party/rust/neqo-transport/src/ackrate.rs b/third_party/rust/neqo-transport/src/ackrate.rs
index cf68f9021f..d5923805d9 100644
--- a/third_party/rust/neqo-transport/src/ackrate.rs
+++ b/third_party/rust/neqo-transport/src/ackrate.rs
@@ -5,9 +5,8 @@
// except according to those terms.
// Management of the peer's ack rate.
-#![deny(clippy::pedantic)]
-use std::{cmp::max, convert::TryFrom, time::Duration};
+use std::{cmp::max, time::Duration};
use neqo_common::qtrace;
diff --git a/third_party/rust/neqo-transport/src/addr_valid.rs b/third_party/rust/neqo-transport/src/addr_valid.rs
index b5ed2d07d1..f596cfc3cb 100644
--- a/third_party/rust/neqo-transport/src/addr_valid.rs
+++ b/third_party/rust/neqo-transport/src/addr_valid.rs
@@ -7,7 +7,6 @@
// This file implements functions necessary for address validation.
use std::{
- convert::TryFrom,
net::{IpAddr, SocketAddr},
time::{Duration, Instant},
};
@@ -23,15 +22,15 @@ use crate::{
cid::ConnectionId, packet::PacketBuilder, recovery::RecoveryToken, stats::FrameStats, Res,
};
-/// A prefix we add to Retry tokens to distinguish them from NEW_TOKEN tokens.
+/// A prefix we add to Retry tokens to distinguish them from `NEW_TOKEN` tokens.
const TOKEN_IDENTIFIER_RETRY: &[u8] = &[0x52, 0x65, 0x74, 0x72, 0x79];
-/// A prefix on NEW_TOKEN tokens, that is maximally Hamming distant from NEW_TOKEN.
+/// A prefix on `NEW_TOKEN` tokens, that is maximally Hamming distant from `NEW_TOKEN`.
/// Together, these need to have a low probability of collision, even if there is
/// corruption of individual bits in transit.
const TOKEN_IDENTIFIER_NEW_TOKEN: &[u8] = &[0xad, 0x9a, 0x8b, 0x8d, 0x86];
-/// The maximum number of tokens we'll save from NEW_TOKEN frames.
-/// This should be the same as the value of MAX_TICKETS in neqo-crypto.
+/// The maximum number of tokens we'll save from `NEW_TOKEN` frames.
+/// This should be the same as the value of `MAX_TICKETS` in neqo-crypto.
const MAX_NEW_TOKEN: usize = 4;
/// The number of tokens we'll track for the purposes of looking for duplicates.
/// This is based on how many might be received over a period where could be
@@ -44,9 +43,9 @@ const MAX_SAVED_TOKENS: usize = 8;
pub enum ValidateAddress {
/// Require address validation never.
Never,
- /// Require address validation unless a NEW_TOKEN token is provided.
+ /// Require address validation unless a `NEW_TOKEN` token is provided.
NoToken,
- /// Require address validation even if a NEW_TOKEN token is provided.
+ /// Require address validation even if a `NEW_TOKEN` token is provided.
Always,
}
@@ -143,7 +142,7 @@ impl AddressValidation {
self.generate_token(Some(dcid), peer_address, now)
}
- /// This generates a token for use with NEW_TOKEN.
+ /// This generates a token for use with `NEW_TOKEN`.
pub fn generate_new_token(&self, peer_address: SocketAddr, now: Instant) -> Res<Vec<u8>> {
self.generate_token(None, peer_address, now)
}
@@ -184,7 +183,7 @@ impl AddressValidation {
/// Less than one difference per byte indicates that it is likely not a Retry.
/// This generous interpretation allows for a lot of damage in transit.
/// Note that if this check fails, then the token will be treated like it came
- /// from NEW_TOKEN instead. If there truly is corruption of packets that causes
+ /// from `NEW_TOKEN` instead. If there truly is corruption of packets that causes
/// validation failure, it will be a failure that we try to recover from.
fn is_likely_retry(token: &[u8]) -> bool {
let mut difference = 0;
@@ -210,10 +209,9 @@ impl AddressValidation {
if self.validation == ValidateAddress::Never {
qinfo!("AddressValidation: no token; accepting");
return AddressValidationResult::Pass;
- } else {
- qinfo!("AddressValidation: no token; validating");
- return AddressValidationResult::Validate;
}
+ qinfo!("AddressValidation: no token; validating");
+ return AddressValidationResult::Validate;
}
if token.len() <= TOKEN_IDENTIFIER_RETRY.len() {
// Treat bad tokens strictly.
@@ -231,7 +229,7 @@ impl AddressValidation {
qinfo!("AddressValidation: valid Retry token for {}", cid);
AddressValidationResult::ValidRetry(cid)
} else {
- panic!("AddressValidation: Retry token with small CID {}", cid);
+ panic!("AddressValidation: Retry token with small CID {cid}");
}
} else if cid.is_empty() {
// An empty connection ID means NEW_TOKEN.
@@ -243,7 +241,7 @@ impl AddressValidation {
AddressValidationResult::Pass
}
} else {
- panic!("AddressValidation: NEW_TOKEN token with CID {}", cid);
+ panic!("AddressValidation: NEW_TOKEN token with CID {cid}");
}
} else {
// From here on, we have a token that we couldn't decrypt.
@@ -351,14 +349,13 @@ impl NewTokenState {
builder: &mut PacketBuilder,
tokens: &mut Vec<RecoveryToken>,
stats: &mut FrameStats,
- ) -> Res<()> {
+ ) {
if let Self::Server(ref mut sender) = self {
- sender.write_frames(builder, tokens, stats)?;
+ sender.write_frames(builder, tokens, stats);
}
- Ok(())
}
- /// If this a server, buffer a NEW_TOKEN for sending.
+ /// If this a server, buffer a `NEW_TOKEN` for sending.
/// If this is a client, panic.
pub fn send_new_token(&mut self, token: Vec<u8>) {
if let Self::Server(ref mut sender) = self {
@@ -368,7 +365,7 @@ impl NewTokenState {
}
}
- /// If this a server, process a lost signal for a NEW_TOKEN frame.
+ /// If this a server, process a lost signal for a `NEW_TOKEN` frame.
/// If this is a client, panic.
pub fn lost(&mut self, seqno: usize) {
if let Self::Server(ref mut sender) = self {
@@ -378,7 +375,7 @@ impl NewTokenState {
}
}
- /// If this a server, process remove the acknowledged NEW_TOKEN frame.
+ /// If this a server, process remove the acknowledged `NEW_TOKEN` frame.
/// If this is a client, panic.
pub fn acked(&mut self, seqno: usize) {
if let Self::Server(ref mut sender) = self {
@@ -403,7 +400,7 @@ impl NewTokenFrameStatus {
#[derive(Default)]
pub struct NewTokenSender {
- /// The unacknowledged NEW_TOKEN frames we are yet to send.
+ /// The unacknowledged `NEW_TOKEN` frames we are yet to send.
tokens: Vec<NewTokenFrameStatus>,
/// A sequence number that is used to track individual tokens
/// by reference (so that recovery tokens can be simple).
@@ -426,8 +423,8 @@ impl NewTokenSender {
builder: &mut PacketBuilder,
tokens: &mut Vec<RecoveryToken>,
stats: &mut FrameStats,
- ) -> Res<()> {
- for t in self.tokens.iter_mut() {
+ ) {
+ for t in &mut self.tokens {
if t.needs_sending && t.len() <= builder.remaining() {
t.needs_sending = false;
@@ -438,11 +435,10 @@ impl NewTokenSender {
stats.new_token += 1;
}
}
- Ok(())
}
pub fn lost(&mut self, seqno: usize) {
- for t in self.tokens.iter_mut() {
+ for t in &mut self.tokens {
if t.seqno == seqno {
t.needs_sending = true;
break;
diff --git a/third_party/rust/neqo-transport/src/cc/classic_cc.rs b/third_party/rust/neqo-transport/src/cc/classic_cc.rs
index 6f4a01d795..89be6c4b0f 100644
--- a/third_party/rust/neqo-transport/src/cc/classic_cc.rs
+++ b/third_party/rust/neqo-transport/src/cc/classic_cc.rs
@@ -5,7 +5,6 @@
// except according to those terms.
// Congestion control
-#![deny(clippy::pedantic)]
use std::{
cmp::{max, min},
@@ -536,10 +535,7 @@ impl<T: WindowAdjustment> ClassicCongestionControl<T> {
#[cfg(test)]
mod tests {
- use std::{
- convert::TryFrom,
- time::{Duration, Instant},
- };
+ use std::time::{Duration, Instant};
use neqo_common::qinfo;
use test_fixture::now;
diff --git a/third_party/rust/neqo-transport/src/cc/cubic.rs b/third_party/rust/neqo-transport/src/cc/cubic.rs
index c04a29b443..058a4c2aa4 100644
--- a/third_party/rust/neqo-transport/src/cc/cubic.rs
+++ b/third_party/rust/neqo-transport/src/cc/cubic.rs
@@ -4,10 +4,7 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-#![deny(clippy::pedantic)]
-
use std::{
- convert::TryFrom,
fmt::{self, Display},
time::{Duration, Instant},
};
diff --git a/third_party/rust/neqo-transport/src/cc/mod.rs b/third_party/rust/neqo-transport/src/cc/mod.rs
index a1a43bd157..486d15e67e 100644
--- a/third_party/rust/neqo-transport/src/cc/mod.rs
+++ b/third_party/rust/neqo-transport/src/cc/mod.rs
@@ -5,7 +5,6 @@
// except according to those terms.
// Congestion control
-#![deny(clippy::pedantic)]
use std::{
fmt::{Debug, Display},
diff --git a/third_party/rust/neqo-transport/src/cc/new_reno.rs b/third_party/rust/neqo-transport/src/cc/new_reno.rs
index e51b3d6cc0..47d0d56f37 100644
--- a/third_party/rust/neqo-transport/src/cc/new_reno.rs
+++ b/third_party/rust/neqo-transport/src/cc/new_reno.rs
@@ -5,7 +5,6 @@
// except according to those terms.
// Congestion control
-#![deny(clippy::pedantic)]
use std::{
fmt::{self, Display},
diff --git a/third_party/rust/neqo-transport/src/cc/tests/cubic.rs b/third_party/rust/neqo-transport/src/cc/tests/cubic.rs
index 0c82e47817..2e0200fd6d 100644
--- a/third_party/rust/neqo-transport/src/cc/tests/cubic.rs
+++ b/third_party/rust/neqo-transport/src/cc/tests/cubic.rs
@@ -8,7 +8,6 @@
#![allow(clippy::cast_sign_loss)]
use std::{
- convert::TryFrom,
ops::Sub,
time::{Duration, Instant},
};
diff --git a/third_party/rust/neqo-transport/src/cc/tests/mod.rs b/third_party/rust/neqo-transport/src/cc/tests/mod.rs
index 238a7ad012..879693fb24 100644
--- a/third_party/rust/neqo-transport/src/cc/tests/mod.rs
+++ b/third_party/rust/neqo-transport/src/cc/tests/mod.rs
@@ -1,3 +1,4 @@
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
diff --git a/third_party/rust/neqo-transport/src/cc/tests/new_reno.rs b/third_party/rust/neqo-transport/src/cc/tests/new_reno.rs
index a73844a755..4cc20de5a7 100644
--- a/third_party/rust/neqo-transport/src/cc/tests/new_reno.rs
+++ b/third_party/rust/neqo-transport/src/cc/tests/new_reno.rs
@@ -5,7 +5,6 @@
// except according to those terms.
// Congestion control
-#![deny(clippy::pedantic)]
use std::time::Duration;
diff --git a/third_party/rust/neqo-transport/src/cid.rs b/third_party/rust/neqo-transport/src/cid.rs
index be202daf25..6b3a95eaf0 100644
--- a/third_party/rust/neqo-transport/src/cid.rs
+++ b/third_party/rust/neqo-transport/src/cid.rs
@@ -10,14 +10,13 @@ use std::{
borrow::Borrow,
cell::{Ref, RefCell},
cmp::{max, min},
- convert::{AsRef, TryFrom},
ops::Deref,
rc::Rc,
};
use neqo_common::{hex, hex_with_len, qinfo, Decoder, Encoder};
-use neqo_crypto::random;
-use smallvec::SmallVec;
+use neqo_crypto::{random, randomize};
+use smallvec::{smallvec, SmallVec};
use crate::{
frame::FRAME_TYPE_NEW_CONNECTION_ID, packet::PacketBuilder, recovery::RecoveryToken,
@@ -39,19 +38,26 @@ pub struct ConnectionId {
}
impl ConnectionId {
+ /// # Panics
+ /// When `len` is larger than `MAX_CONNECTION_ID_LEN`.
+ #[must_use]
pub fn generate(len: usize) -> Self {
assert!(matches!(len, 0..=MAX_CONNECTION_ID_LEN));
- Self::from(random(len))
+ let mut cid = smallvec![0; len];
+ randomize(&mut cid);
+ Self { cid }
}
// Apply a wee bit of greasing here in picking a length between 8 and 20 bytes long.
+ #[must_use]
pub fn generate_initial() -> Self {
- let v = random(1);
+ let v = random::<1>()[0];
// Bias selection toward picking 8 (>50% of the time).
- let len: usize = max(8, 5 + (v[0] & (v[0] >> 4))).into();
+ let len: usize = max(8, 5 + (v & (v >> 4))).into();
Self::generate(len)
}
+ #[must_use]
pub fn as_cid_ref(&self) -> ConnectionIdRef {
ConnectionIdRef::from(&self.cid[..])
}
@@ -75,12 +81,6 @@ impl From<SmallVec<[u8; MAX_CONNECTION_ID_LEN]>> for ConnectionId {
}
}
-impl From<Vec<u8>> for ConnectionId {
- fn from(cid: Vec<u8>) -> Self {
- Self::from(SmallVec::from(cid))
- }
-}
-
impl<T: AsRef<[u8]> + ?Sized> From<&T> for ConnectionId {
fn from(buf: &T) -> Self {
Self::from(SmallVec::from(buf.as_ref()))
@@ -201,7 +201,7 @@ impl ConnectionIdGenerator for EmptyConnectionIdGenerator {
}
}
-/// An RandomConnectionIdGenerator produces connection IDs of
+/// An `RandomConnectionIdGenerator` produces connection IDs of
/// a fixed length and random content. No effort is made to
/// prevent collisions.
pub struct RandomConnectionIdGenerator {
@@ -209,6 +209,7 @@ pub struct RandomConnectionIdGenerator {
}
impl RandomConnectionIdGenerator {
+ #[must_use]
pub fn new(len: usize) -> Self {
Self { len }
}
@@ -222,7 +223,9 @@ impl ConnectionIdDecoder for RandomConnectionIdGenerator {
impl ConnectionIdGenerator for RandomConnectionIdGenerator {
fn generate_cid(&mut self) -> Option<ConnectionId> {
- Some(ConnectionId::from(&random(self.len)))
+ let mut buf = smallvec![0; self.len];
+ randomize(&mut buf);
+ Some(ConnectionId::from(buf))
}
fn as_decoder(&self) -> &dyn ConnectionIdDecoder {
@@ -234,7 +237,7 @@ impl ConnectionIdGenerator for RandomConnectionIdGenerator {
}
}
-/// A single connection ID, as saved from NEW_CONNECTION_ID.
+/// A single connection ID, as saved from `NEW_CONNECTION_ID`.
/// This is templated so that the connection ID entries from a peer can be
/// saved with a stateless reset token. Local entries don't need that.
#[derive(Debug, PartialEq, Eq, Clone)]
@@ -250,8 +253,8 @@ pub struct ConnectionIdEntry<SRT: Clone + PartialEq> {
impl ConnectionIdEntry<[u8; 16]> {
/// Create a random stateless reset token so that it is hard to guess the correct
/// value and reset the connection.
- fn random_srt() -> [u8; 16] {
- <[u8; 16]>::try_from(&random(16)[..]).unwrap()
+ pub fn random_srt() -> [u8; 16] {
+ random::<16>()
}
/// Create the first entry, which won't have a stateless reset token.
@@ -294,6 +297,23 @@ impl ConnectionIdEntry<[u8; 16]> {
pub fn sequence_number(&self) -> u64 {
self.seqno
}
+
+ /// Write the entry out in a `NEW_CONNECTION_ID` frame.
+ /// Returns `true` if the frame was written, `false` if there is insufficient space.
+ pub fn write(&self, builder: &mut PacketBuilder, stats: &mut FrameStats) -> bool {
+ let len = 1 + Encoder::varint_len(self.seqno) + 1 + 1 + self.cid.len() + 16;
+ if builder.remaining() < len {
+ return false;
+ }
+
+ builder.encode_varint(FRAME_TYPE_NEW_CONNECTION_ID);
+ builder.encode_varint(self.seqno);
+ builder.encode_varint(0u64);
+ builder.encode_vec(1, &self.cid);
+ builder.encode(&self.srt);
+ stats.new_connection_id += 1;
+ true
+ }
}
impl ConnectionIdEntry<()> {
@@ -430,7 +450,7 @@ pub struct ConnectionIdManager {
limit: usize,
/// The next sequence number that will be used for sending `NEW_CONNECTION_ID` frames.
next_seqno: u64,
- /// Outstanding, but lost NEW_CONNECTION_ID frames will be stored here.
+ /// Outstanding, but lost `NEW_CONNECTION_ID` frames will be stored here.
lost_new_connection_id: Vec<ConnectionIdEntry<[u8; 16]>>,
}
@@ -476,7 +496,7 @@ impl ConnectionIdManager {
.add_local(ConnectionIdEntry::new(self.next_seqno, cid.clone(), ()));
self.next_seqno += 1;
- let srt = <[u8; 16]>::try_from(&random(16)[..]).unwrap();
+ let srt = ConnectionIdEntry::random_srt();
Ok((cid, srt))
} else {
Err(Error::ConnectionIdsExhausted)
@@ -516,39 +536,19 @@ impl ConnectionIdManager {
);
}
- fn write_entry(
- &mut self,
- entry: &ConnectionIdEntry<[u8; 16]>,
- builder: &mut PacketBuilder,
- stats: &mut FrameStats,
- ) -> Res<bool> {
- let len = 1 + Encoder::varint_len(entry.seqno) + 1 + 1 + entry.cid.len() + 16;
- if builder.remaining() < len {
- return Ok(false);
- }
-
- builder.encode_varint(FRAME_TYPE_NEW_CONNECTION_ID);
- builder.encode_varint(entry.seqno);
- builder.encode_varint(0u64);
- builder.encode_vec(1, &entry.cid);
- builder.encode(&entry.srt);
- stats.new_connection_id += 1;
- Ok(true)
- }
-
pub fn write_frames(
&mut self,
builder: &mut PacketBuilder,
tokens: &mut Vec<RecoveryToken>,
stats: &mut FrameStats,
- ) -> Res<()> {
+ ) {
if self.generator.deref().borrow().generates_empty_cids() {
debug_assert_eq!(self.generator.borrow_mut().generate_cid().unwrap().len(), 0);
- return Ok(());
+ return;
}
while let Some(entry) = self.lost_new_connection_id.pop() {
- if self.write_entry(&entry, builder, stats)? {
+ if entry.write(builder, stats) {
tokens.push(RecoveryToken::NewConnectionId(entry));
} else {
// This shouldn't happen often.
@@ -565,7 +565,7 @@ impl ConnectionIdManager {
if let Some(cid) = maybe_cid {
assert_ne!(cid.len(), 0);
// TODO: generate the stateless reset tokens from the connection ID and a key.
- let srt = <[u8; 16]>::try_from(&random(16)[..]).unwrap();
+ let srt = ConnectionIdEntry::random_srt();
let seqno = self.next_seqno;
self.next_seqno += 1;
@@ -573,11 +573,10 @@ impl ConnectionIdManager {
.add_local(ConnectionIdEntry::new(seqno, cid.clone(), ()));
let entry = ConnectionIdEntry::new(seqno, cid, srt);
- debug_assert!(self.write_entry(&entry, builder, stats)?);
+ entry.write(builder, stats);
tokens.push(RecoveryToken::NewConnectionId(entry));
}
}
- Ok(())
}
pub fn lost(&mut self, entry: &ConnectionIdEntry<[u8; 16]>) {
@@ -594,16 +593,17 @@ impl ConnectionIdManager {
mod tests {
use test_fixture::fixture_init;
- use super::*;
+ use crate::{cid::MAX_CONNECTION_ID_LEN, ConnectionId};
#[test]
fn generate_initial_cid() {
fixture_init();
for _ in 0..100 {
let cid = ConnectionId::generate_initial();
- if !matches!(cid.len(), 8..=MAX_CONNECTION_ID_LEN) {
- panic!("connection ID {:?}", cid);
- }
+ assert!(
+ matches!(cid.len(), 8..=MAX_CONNECTION_ID_LEN),
+ "connection ID length {cid:?}",
+ );
}
}
}
diff --git a/third_party/rust/neqo-transport/src/connection/dump.rs b/third_party/rust/neqo-transport/src/connection/dump.rs
index 77d51c605c..8a4f34dbb8 100644
--- a/third_party/rust/neqo-transport/src/connection/dump.rs
+++ b/third_party/rust/neqo-transport/src/connection/dump.rs
@@ -27,11 +27,11 @@ pub fn dump_packet(
pn: PacketNumber,
payload: &[u8],
) {
- if !log::log_enabled!(log::Level::Debug) {
+ if log::STATIC_MAX_LEVEL == log::LevelFilter::Off || !log::log_enabled!(log::Level::Debug) {
return;
}
- let mut s = String::from("");
+ let mut s = String::new();
let mut d = Decoder::from(payload);
while d.remaining() > 0 {
let Ok(f) = Frame::decode(&mut d) else {
diff --git a/third_party/rust/neqo-transport/src/connection/mod.rs b/third_party/rust/neqo-transport/src/connection/mod.rs
index 2de388418a..c81a3727c6 100644
--- a/third_party/rust/neqo-transport/src/connection/mod.rs
+++ b/third_party/rust/neqo-transport/src/connection/mod.rs
@@ -9,7 +9,6 @@
use std::{
cell::RefCell,
cmp::{max, min},
- convert::TryFrom,
fmt::{self, Debug},
mem,
net::{IpAddr, SocketAddr},
@@ -23,7 +22,7 @@ use neqo_common::{
qlog::NeqoQlog, qtrace, qwarn, Datagram, Decoder, Encoder, Role,
};
use neqo_crypto::{
- agent::CertificateInfo, random, Agent, AntiReplay, AuthenticationStatus, Cipher, Client, Group,
+ agent::CertificateInfo, Agent, AntiReplay, AuthenticationStatus, Cipher, Client, Group,
HandshakeState, PrivateKey, PublicKey, ResumptionToken, SecretAgentInfo, SecretAgentPreInfo,
Server, ZeroRttChecker,
};
@@ -48,6 +47,7 @@ use crate::{
recovery::{LossRecovery, RecoveryToken, SendProfile},
recv_stream::RecvStreamStats,
rtt::GRANULARITY,
+ send_stream::SendStream,
stats::{Stats, StatsCell},
stream_id::StreamType,
streams::{SendOrder, Streams},
@@ -59,6 +59,7 @@ use crate::{
version::{Version, WireVersion},
AppError, ConnectionError, Error, Res, StreamId,
};
+
mod dump;
mod idle;
pub mod params;
@@ -66,6 +67,7 @@ mod saved;
mod state;
#[cfg(test)]
pub mod test_internal;
+
use dump::dump_packet;
use idle::IdleTimeout;
pub use params::ConnectionParameters;
@@ -78,9 +80,6 @@ pub use state::{ClosingFrame, State};
pub use crate::send_stream::{RetransmissionPriority, SendStreamStats, TransmissionPriority};
-#[derive(Debug, Default)]
-struct Packet(Vec<u8>);
-
/// The number of Initial packets that the client will send in response
/// to receiving an undecryptable packet during the early part of the
/// handshake. This is a hack, but a useful one.
@@ -96,7 +95,7 @@ pub enum ZeroRttState {
}
#[derive(Clone, Debug, PartialEq, Eq)]
-/// Type returned from process() and `process_output()`. Users are required to
+/// Type returned from `process()` and `process_output()`. Users are required to
/// call these repeatedly until `Callback` or `None` is returned.
pub enum Output {
/// Connection requires no action.
@@ -119,6 +118,7 @@ impl Output {
}
/// Get a reference to the Datagram, if any.
+ #[must_use]
pub fn as_dgram_ref(&self) -> Option<&Datagram> {
match self {
Self::Datagram(dg) => Some(dg),
@@ -136,7 +136,7 @@ impl Output {
}
}
-/// Used by inner functions like Connection::output.
+/// Used by inner functions like `Connection::output`.
enum SendOption {
/// Yes, please send this datagram.
Yes(Datagram),
@@ -257,7 +257,7 @@ pub struct Connection {
/// Some packets were received, but not tracked.
received_untracked: bool,
- /// This is responsible for the QuicDatagrams' handling:
+ /// This is responsible for the `QuicDatagrams`' handling:
/// <https://datatracker.ietf.org/doc/html/draft-ietf-quic-datagram>
quic_datagrams: QuicDatagrams,
@@ -271,8 +271,8 @@ pub struct Connection {
new_token: NewTokenState,
stats: StatsCell,
qlog: NeqoQlog,
- /// A session ticket was received without NEW_TOKEN,
- /// this is when that turns into an event without NEW_TOKEN.
+ /// A session ticket was received without `NEW_TOKEN`,
+ /// this is when that turns into an event without `NEW_TOKEN`.
release_resumption_token_timer: Option<Instant>,
conn_params: ConnectionParameters,
hrtime: hrtime::Handle,
@@ -302,6 +302,8 @@ impl Connection {
const LOOSE_TIMER_RESOLUTION: Duration = Duration::from_millis(50);
/// Create a new QUIC connection with Client role.
+ /// # Errors
+ /// When NSS fails and an agent cannot be created.
pub fn new_client(
server_name: impl Into<String>,
protocols: &[impl AsRef<str>],
@@ -338,6 +340,8 @@ impl Connection {
}
/// Create a new QUIC connection with Server role.
+ /// # Errors
+ /// When NSS fails and an agent cannot be created.
pub fn new_server(
certs: &[impl AsRef<str>],
protocols: &[impl AsRef<str>],
@@ -427,6 +431,8 @@ impl Connection {
Ok(c)
}
+ /// # Errors
+ /// When the operation fails.
pub fn server_enable_0rtt(
&mut self,
anti_replay: &AntiReplay,
@@ -436,6 +442,8 @@ impl Connection {
.server_enable_0rtt(self.tps.clone(), anti_replay, zero_rtt_checker)
}
+ /// # Errors
+ /// When the operation fails.
pub fn server_enable_ech(
&mut self,
config: u8,
@@ -447,10 +455,13 @@ impl Connection {
}
/// Get the active ECH configuration, which is empty if ECH is disabled.
+ #[must_use]
pub fn ech_config(&self) -> &[u8] {
self.crypto.ech_config()
}
+ /// # Errors
+ /// When the operation fails.
pub fn client_enable_ech(&mut self, ech_config_list: impl AsRef<[u8]>) -> Res<()> {
self.crypto.client_enable_ech(ech_config_list)
}
@@ -468,8 +479,9 @@ impl Connection {
}
/// Get the original destination connection id for this connection. This
- /// will always be present for Role::Client but not if Role::Server is in
- /// State::Init.
+ /// will always be present for `Role::Client` but not if `Role::Server` is in
+ /// `State::Init`.
+ #[must_use]
pub fn odcid(&self) -> Option<&ConnectionId> {
self.original_destination_cid.as_ref()
}
@@ -478,8 +490,9 @@ impl Connection {
/// This only sets transport parameters without dealing with other aspects of
/// setting the value.
///
+ /// # Errors
+ /// When the transport parameter is invalid.
/// # Panics
- ///
/// This panics if the transport parameter is known to this crate.
pub fn set_local_tparam(&self, tp: TransportParameterId, value: TransportParameter) -> Res<()> {
#[cfg(not(test))]
@@ -502,9 +515,9 @@ impl Connection {
/// Retry.
pub(crate) fn set_retry_cids(
&mut self,
- odcid: ConnectionId,
+ odcid: &ConnectionId,
remote_cid: ConnectionId,
- retry_cid: ConnectionId,
+ retry_cid: &ConnectionId,
) {
debug_assert_eq!(self.role, Role::Server);
qtrace!(
@@ -533,12 +546,16 @@ impl Connection {
/// Set ALPN preferences. Strings that appear earlier in the list are given
/// higher preference.
+ /// # Errors
+ /// When the operation fails, which is usually due to bad inputs or bad connection state.
pub fn set_alpn(&mut self, protocols: &[impl AsRef<str>]) -> Res<()> {
self.crypto.tls.set_alpn(protocols)?;
Ok(())
}
/// Enable a set of ciphers.
+ /// # Errors
+ /// When the operation fails, which is usually due to bad inputs or bad connection state.
pub fn set_ciphers(&mut self, ciphers: &[Cipher]) -> Res<()> {
if self.state != State::Init {
qerror!([self], "Cannot enable ciphers in state {:?}", self.state);
@@ -549,6 +566,8 @@ impl Connection {
}
/// Enable a set of key exchange groups.
+ /// # Errors
+ /// When the operation fails, which is usually due to bad inputs or bad connection state.
pub fn set_groups(&mut self, groups: &[Group]) -> Res<()> {
if self.state != State::Init {
qerror!([self], "Cannot enable groups in state {:?}", self.state);
@@ -559,6 +578,8 @@ impl Connection {
}
/// Set the number of additional key shares to send in the client hello.
+ /// # Errors
+ /// When the operation fails, which is usually due to bad inputs or bad connection state.
pub fn send_additional_key_shares(&mut self, count: usize) -> Res<()> {
if self.state != State::Init {
qerror!([self], "Cannot enable groups in state {:?}", self.state);
@@ -667,6 +688,8 @@ impl Connection {
/// This can only be called once and only on the client.
/// After calling the function, it should be possible to attempt 0-RTT
/// if the token supports that.
+ /// # Errors
+ /// When the operation fails, which is usually due to bad inputs or bad connection state.
pub fn enable_resumption(&mut self, now: Instant, token: impl AsRef<[u8]>) -> Res<()> {
if self.state != State::Init {
qerror!([self], "set token in state {:?}", self.state);
@@ -683,8 +706,9 @@ impl Connection {
);
let mut dec = Decoder::from(token.as_ref());
- let version =
- Version::try_from(dec.decode_uint(4).ok_or(Error::InvalidResumptionToken)? as u32)?;
+ let version = Version::try_from(u32::try_from(
+ dec.decode_uint(4).ok_or(Error::InvalidResumptionToken)?,
+ )?)?;
qtrace!([self], " version {:?}", version);
if !self.conn_params.get_versions().all().contains(&version) {
return Err(Error::DisabledVersion);
@@ -732,13 +756,15 @@ impl Connection {
Ok(())
}
- pub(crate) fn set_validation(&mut self, validation: Rc<RefCell<AddressValidation>>) {
+ pub(crate) fn set_validation(&mut self, validation: &Rc<RefCell<AddressValidation>>) {
qtrace!([self], "Enabling NEW_TOKEN");
assert_eq!(self.role, Role::Server);
- self.address_validation = AddressValidationInfo::Server(Rc::downgrade(&validation));
+ self.address_validation = AddressValidationInfo::Server(Rc::downgrade(validation));
}
- /// Send a TLS session ticket AND a NEW_TOKEN frame (if possible).
+ /// Send a TLS session ticket AND a `NEW_TOKEN` frame (if possible).
+ /// # Errors
+ /// When the operation fails, which is usually due to bad inputs or bad connection state.
pub fn send_ticket(&mut self, now: Instant, extra: &[u8]) -> Res<()> {
if self.role == Role::Client {
return Err(Error::WrongRole);
@@ -774,15 +800,19 @@ impl Connection {
}
}
+ #[must_use]
pub fn tls_info(&self) -> Option<&SecretAgentInfo> {
self.crypto.tls.info()
}
+ /// # Errors
+ /// When there is no information to obtain.
pub fn tls_preinfo(&self) -> Res<SecretAgentPreInfo> {
Ok(self.crypto.tls.preinfo()?)
}
/// Get the peer's certificate chain and other info.
+ #[must_use]
pub fn peer_certificate(&self) -> Option<CertificateInfo> {
self.crypto.tls.peer_certificate()
}
@@ -802,26 +832,31 @@ impl Connection {
}
/// Get the role of the connection.
+ #[must_use]
pub fn role(&self) -> Role {
self.role
}
/// Get the state of the connection.
+ #[must_use]
pub fn state(&self) -> &State {
&self.state
}
/// The QUIC version in use.
+ #[must_use]
pub fn version(&self) -> Version {
self.version
}
/// Get the 0-RTT state of the connection.
+ #[must_use]
pub fn zero_rtt_state(&self) -> ZeroRttState {
self.zero_rtt_state
}
/// Get a snapshot of collected statistics.
+ #[must_use]
pub fn stats(&self) -> Stats {
let mut v = self.stats.borrow().clone();
if let Some(p) = self.paths.primary_fallible() {
@@ -888,7 +923,7 @@ impl Connection {
res
}
- /// For use with process_input(). Errors there can be ignored, but this
+ /// For use with `process_input()`. Errors there can be ignored, but this
/// needs to ensure that the state is updated.
fn absorb_error<T>(&mut self, now: Instant, res: Res<T>) -> Option<T> {
self.capture_error(None, now, 0, res).ok()
@@ -1234,6 +1269,7 @@ impl Connection {
/// Perform any processing that we might have to do on packets prior to
/// attempting to remove protection.
+ #[allow(clippy::too_many_lines)] // Yeah, it's a work in progress.
fn preprocess_packet(
&mut self,
packet: &PublicPacket,
@@ -1346,17 +1382,17 @@ impl Connection {
}
State::WaitInitial => PreprocessResult::Continue,
State::WaitVersion | State::Handshaking | State::Connected | State::Confirmed => {
- if !self.cid_manager.is_valid(packet.dcid()) {
- self.stats
- .borrow_mut()
- .pkt_dropped(format!("Invalid DCID {:?}", packet.dcid()));
- PreprocessResult::Next
- } else {
+ if self.cid_manager.is_valid(packet.dcid()) {
if self.role == Role::Server && packet.packet_type() == PacketType::Handshake {
// Server has received a Handshake packet -> discard Initial keys and states
self.discard_keys(PacketNumberSpace::Initial, now);
}
PreprocessResult::Continue
+ } else {
+ self.stats
+ .borrow_mut()
+ .pkt_dropped(format!("Invalid DCID {:?}", packet.dcid()));
+ PreprocessResult::Next
}
}
State::Closing { .. } => {
@@ -1376,7 +1412,7 @@ impl Connection {
Ok(res)
}
- /// After a Initial, Handshake, ZeroRtt, or Short packet is successfully processed.
+ /// After a Initial, Handshake, `ZeroRtt`, or Short packet is successfully processed.
fn postprocess_packet(
&mut self,
path: &PathRef,
@@ -1576,7 +1612,6 @@ impl Connection {
/// During connection setup, the first path needs to be setup.
/// This uses the connection IDs that were provided during the handshake
/// to setup that path.
- #[allow(clippy::or_fun_call)] // Remove when MSRV >= 1.59
fn setup_handshake_path(&mut self, path: &PathRef, now: Instant) {
self.paths.make_permanent(
path,
@@ -1616,7 +1651,7 @@ impl Connection {
}
}
- /// After an error, a permanent path is needed to send the CONNECTION_CLOSE.
+ /// After an error, a permanent path is needed to send the `CONNECTION_CLOSE`.
/// This attempts to ensure that this exists. As the connection is now
/// temporary, there is no reason to do anything special here.
fn ensure_error_path(&mut self, path: &PathRef, packet: &PublicPacket, now: Instant) {
@@ -1815,7 +1850,7 @@ impl Connection {
State::Closing { .. } | State::Draining { .. } | State::Closed(_) => {
if let Some(details) = self.state_signaling.close_frame() {
let path = Rc::clone(details.path());
- let res = self.output_close(details);
+ let res = self.output_close(&details);
self.capture_error(Some(path), now, 0, res)
} else {
Ok(SendOption::default())
@@ -1892,7 +1927,7 @@ impl Connection {
}
}
- fn output_close(&mut self, close: ClosingFrame) -> Res<SendOption> {
+ fn output_close(&mut self, close: &ClosingFrame) -> Res<SendOption> {
let mut encoder = Encoder::with_capacity(256);
let grease_quic_bit = self.can_grease_quic_bit();
let version = self.version();
@@ -1902,6 +1937,14 @@ impl Connection {
};
let path = close.path().borrow();
+ // In some error cases, we will not be able to make a new, permanent path.
+ // For example, if we run out of connection IDs and the error results from
+ // a packet on a new path, we avoid sending (and the privacy risk) rather
+ // than reuse a connection ID.
+ if path.is_temporary() {
+ assert!(!cfg!(test), "attempting to close with a temporary path");
+ return Err(Error::InternalError);
+ }
let (_, mut builder) = Self::build_packet_header(
&path,
cspace,
@@ -1932,7 +1975,7 @@ impl Connection {
};
sanitized
.as_ref()
- .unwrap_or(&close)
+ .unwrap_or(close)
.write_frame(&mut builder);
encoder = builder.build(tx)?;
}
@@ -1946,11 +1989,11 @@ impl Connection {
&mut self,
builder: &mut PacketBuilder,
tokens: &mut Vec<RecoveryToken>,
- ) -> Res<()> {
+ ) {
let stats = &mut self.stats.borrow_mut();
let frame_stats = &mut stats.frame_tx;
if self.role == Role::Server {
- if let Some(t) = self.state_signaling.write_done(builder)? {
+ if let Some(t) = self.state_signaling.write_done(builder) {
tokens.push(t);
frame_stats.handshake_done += 1;
}
@@ -1959,7 +2002,7 @@ impl Connection {
self.streams
.write_frames(TransmissionPriority::Critical, builder, tokens, frame_stats);
if builder.is_full() {
- return Ok(());
+ return;
}
self.streams.write_frames(
@@ -1969,36 +2012,35 @@ impl Connection {
frame_stats,
);
if builder.is_full() {
- return Ok(());
+ return;
}
// NEW_CONNECTION_ID, RETIRE_CONNECTION_ID, and ACK_FREQUENCY.
- self.cid_manager
- .write_frames(builder, tokens, frame_stats)?;
+ self.cid_manager.write_frames(builder, tokens, frame_stats);
if builder.is_full() {
- return Ok(());
+ return;
}
self.paths.write_frames(builder, tokens, frame_stats);
if builder.is_full() {
- return Ok(());
+ return;
}
self.streams
.write_frames(TransmissionPriority::High, builder, tokens, frame_stats);
if builder.is_full() {
- return Ok(());
+ return;
}
self.streams
.write_frames(TransmissionPriority::Normal, builder, tokens, frame_stats);
if builder.is_full() {
- return Ok(());
+ return;
}
// Datagrams are best-effort and unreliable. Let streams starve them for now.
self.quic_datagrams.write_frames(builder, tokens, stats);
if builder.is_full() {
- return Ok(());
+ return;
}
let frame_stats = &mut stats.frame_tx;
@@ -2009,13 +2051,13 @@ impl Connection {
builder,
tokens,
frame_stats,
- )?;
+ );
if builder.is_full() {
- return Ok(());
+ return;
}
- self.new_token.write_frames(builder, tokens, frame_stats)?;
+ self.new_token.write_frames(builder, tokens, frame_stats);
if builder.is_full() {
- return Ok(());
+ return;
}
self.streams
@@ -2027,8 +2069,6 @@ impl Connection {
w.write_frames(builder);
}
}
-
- Ok(())
}
// Maybe send a probe. Return true if the packet was ack-eliciting.
@@ -2089,7 +2129,7 @@ impl Connection {
profile: &SendProfile,
builder: &mut PacketBuilder,
now: Instant,
- ) -> Res<(Vec<RecoveryToken>, bool, bool)> {
+ ) -> (Vec<RecoveryToken>, bool, bool) {
let mut tokens = Vec::new();
let primary = path.borrow().is_primary();
let mut ack_eliciting = false;
@@ -2125,16 +2165,15 @@ impl Connection {
if profile.ack_only(space) {
// If we are CC limited we can only send acks!
- return Ok((tokens, false, false));
+ return (tokens, false, false);
}
if primary {
if space == PacketNumberSpace::ApplicationData {
- self.write_appdata_frames(builder, &mut tokens)?;
+ self.write_appdata_frames(builder, &mut tokens);
} else {
let stats = &mut self.stats.borrow_mut().frame_tx;
- self.crypto
- .write_frame(space, builder, &mut tokens, stats)?;
+ self.crypto.write_frame(space, builder, &mut tokens, stats);
}
}
@@ -2158,11 +2197,12 @@ impl Connection {
};
stats.all += tokens.len();
- Ok((tokens, ack_eliciting, padded))
+ (tokens, ack_eliciting, padded)
}
/// Build a datagram, possibly from multiple packets (for different PN
/// spaces) and each containing 1+ frames.
+ #[allow(clippy::too_many_lines)] // Yeah, that's just the way it is.
fn output_path(&mut self, path: &PathRef, now: Instant) -> Res<SendOption> {
let mut initial_sent = None;
let mut needs_padding = false;
@@ -2217,7 +2257,7 @@ impl Connection {
// Add frames to the packet.
let payload_start = builder.len();
let (tokens, ack_eliciting, padded) =
- self.write_frames(path, *space, &profile, &mut builder, now)?;
+ self.write_frames(path, *space, &profile, &mut builder, now);
if builder.packet_empty() {
// Nothing to include in this packet.
encoder = builder.abort();
@@ -2306,6 +2346,8 @@ impl Connection {
}
}
+ /// # Errors
+ /// When connection state is not valid.
pub fn initiate_key_update(&mut self) -> Res<()> {
if self.state == State::Confirmed {
let la = self
@@ -2319,6 +2361,7 @@ impl Connection {
}
#[cfg(test)]
+ #[must_use]
pub fn get_epochs(&self) -> (Option<usize>, Option<usize>) {
self.crypto.states.get_epochs()
}
@@ -2377,6 +2420,7 @@ impl Connection {
);
}
+ #[must_use]
pub fn is_stream_id_allowed(&self, stream_id: StreamId) -> bool {
self.streams.is_stream_id_allowed(stream_id)
}
@@ -2404,7 +2448,7 @@ impl Connection {
} else {
// The other side didn't provide a stateless reset token.
// That's OK, they can try guessing this.
- <[u8; 16]>::try_from(&random(16)[..]).unwrap()
+ ConnectionIdEntry::random_srt()
};
self.paths
.primary()
@@ -2585,10 +2629,16 @@ impl Connection {
) -> Res<()> {
qtrace!([self], "Handshake space={} data={:0x?}", space, data);
+ let was_authentication_pending =
+ *self.crypto.tls.state() == HandshakeState::AuthenticationPending;
let try_update = data.is_some();
match self.crypto.handshake(now, space, data)? {
HandshakeState::Authenticated(_) | HandshakeState::InProgress => (),
- HandshakeState::AuthenticationPending => self.events.authentication_needed(),
+ HandshakeState::AuthenticationPending => {
+ if !was_authentication_pending {
+ self.events.authentication_needed();
+ }
+ }
HandshakeState::EchFallbackAuthenticationPending(public_name) => self
.events
.ech_fallback_authentication_needed(public_name.clone()),
@@ -2623,6 +2673,7 @@ impl Connection {
Ok(())
}
+ #[allow(clippy::too_many_lines)] // Yep, but it's a nice big match, which is basically lots of little functions.
fn input_frame(
&mut self,
path: &PathRef,
@@ -2640,7 +2691,7 @@ impl Connection {
if frame.is_stream() {
return self
.streams
- .input_frame(frame, &mut self.stats.borrow_mut().frame_rx);
+ .input_frame(&frame, &mut self.stats.borrow_mut().frame_rx);
}
match frame {
Frame::Padding => {
@@ -3005,11 +3056,10 @@ impl Connection {
Ok(())
}
- /// Set the SendOrder of a stream. Re-enqueues to keep the ordering correct
+ /// Set the `SendOrder` of a stream. Re-enqueues to keep the ordering correct
///
/// # Errors
- ///
- /// Returns InvalidStreamId if the stream id doesn't exist
+ /// When the stream does not exist.
pub fn stream_sendorder(
&mut self,
stream_id: StreamId,
@@ -3021,16 +3071,21 @@ impl Connection {
/// Set the Fairness of a stream
///
/// # Errors
- ///
- /// Returns InvalidStreamId if the stream id doesn't exist
+ /// When the stream does not exist.
pub fn stream_fairness(&mut self, stream_id: StreamId, fairness: bool) -> Res<()> {
self.streams.set_fairness(stream_id, fairness)
}
+ /// # Errors
+ /// When the stream does not exist.
pub fn send_stream_stats(&self, stream_id: StreamId) -> Res<SendStreamStats> {
- self.streams.get_send_stream(stream_id).map(|s| s.stats())
+ self.streams
+ .get_send_stream(stream_id)
+ .map(SendStream::stats)
}
+ /// # Errors
+ /// When the stream does not exist.
pub fn recv_stream_stats(&mut self, stream_id: StreamId) -> Res<RecvStreamStats> {
let stream = self.streams.get_recv_stream_mut(stream_id)?;
@@ -3050,8 +3105,8 @@ impl Connection {
self.streams.get_send_stream_mut(stream_id)?.send(data)
}
- /// Send all data or nothing on a stream. May cause DATA_BLOCKED or
- /// STREAM_DATA_BLOCKED frames to be sent.
+ /// Send all data or nothing on a stream. May cause `DATA_BLOCKED` or
+ /// `STREAM_DATA_BLOCKED` frames to be sent.
/// Returns true if data was successfully sent, otherwise false.
///
/// # Errors
@@ -3075,20 +3130,26 @@ impl Connection {
val.map(|v| v == data.len())
}
- /// Bytes that stream_send() is guaranteed to accept for sending.
+ /// Bytes that `stream_send()` is guaranteed to accept for sending.
/// i.e. that will not be blocked by flow credits or send buffer max
/// capacity.
+ /// # Errors
+ /// When the stream ID is invalid.
pub fn stream_avail_send_space(&self, stream_id: StreamId) -> Res<usize> {
Ok(self.streams.get_send_stream(stream_id)?.avail())
}
/// Close the stream. Enqueued data will be sent.
+ /// # Errors
+ /// When the stream ID is invalid.
pub fn stream_close_send(&mut self, stream_id: StreamId) -> Res<()> {
self.streams.get_send_stream_mut(stream_id)?.close();
Ok(())
}
/// Abandon transmission of in-flight and future stream data.
+ /// # Errors
+ /// When the stream ID is invalid.
pub fn stream_reset_send(&mut self, stream_id: StreamId, err: AppError) -> Res<()> {
self.streams.get_send_stream_mut(stream_id)?.reset(err);
Ok(())
@@ -3109,6 +3170,8 @@ impl Connection {
}
/// Application is no longer interested in this stream.
+ /// # Errors
+ /// When the stream ID is invalid.
pub fn stream_stop_sending(&mut self, stream_id: StreamId, err: AppError) -> Res<()> {
let stream = self.streams.get_recv_stream_mut(stream_id)?;
@@ -3142,6 +3205,7 @@ impl Connection {
self.streams.keep_alive(stream_id, keep)
}
+ #[must_use]
pub fn remote_datagram_size(&self) -> u64 {
self.quic_datagrams.remote_datagram_size()
}
@@ -3150,9 +3214,10 @@ impl Connection {
/// The value will change over time depending on the encoded size of the
/// packet number, ack frames, etc.
///
- /// # Error
- ///
+ /// # Errors
/// The function returns `NotAvailable` if datagrams are not enabled.
+ /// # Panics
+ /// Basically never, because that unwrap won't fail.
pub fn max_datagram_size(&self) -> Res<u64> {
let max_dgram_size = self.quic_datagrams.remote_datagram_size();
if max_dgram_size == 0 {
@@ -3193,7 +3258,7 @@ impl Connection {
/// Queue a datagram for sending.
///
- /// # Error
+ /// # Errors
///
/// The function returns `TooMuchData` if the supply buffer is bigger than
/// the allowed remote datagram size. The funcion does not check if the
@@ -3203,7 +3268,6 @@ impl Connection {
/// to check the estimated max datagram size and to use smaller datagrams.
/// `max_datagram_size` is just a current estimate and will change over
/// time depending on the encoded size of the packet number, ack frames, etc.
-
pub fn send_datagram(&mut self, buf: &[u8], id: impl Into<DatagramTracking>) -> Res<()> {
self.quic_datagrams
.add_datagram(buf, id.into(), &mut self.stats.borrow_mut())
diff --git a/third_party/rust/neqo-transport/src/connection/params.rs b/third_party/rust/neqo-transport/src/connection/params.rs
index 48aba4303b..72d1efa3ee 100644
--- a/third_party/rust/neqo-transport/src/connection/params.rs
+++ b/third_party/rust/neqo-transport/src/connection/params.rs
@@ -4,7 +4,7 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-use std::{cmp::max, convert::TryFrom, time::Duration};
+use std::{cmp::max, time::Duration};
pub use crate::recovery::FAST_PTO_SCALE;
use crate::{
@@ -41,7 +41,7 @@ pub enum PreferredAddressConfig {
Address(PreferredAddress),
}
-/// ConnectionParameters use for setting intitial value for QUIC parameters.
+/// `ConnectionParameters` use for setting intitial value for QUIC parameters.
/// This collects configuration like initial limits, protocol version, and
/// congestion control algorithm.
#[derive(Debug, Clone)]
@@ -108,6 +108,7 @@ impl Default for ConnectionParameters {
}
impl ConnectionParameters {
+ #[must_use]
pub fn get_versions(&self) -> &VersionConfig {
&self.versions
}
@@ -120,29 +121,35 @@ impl ConnectionParameters {
/// versions that should be enabled. This list should contain the initial
/// version and be in order of preference, with more preferred versions
/// before less preferred.
+ #[must_use]
pub fn versions(mut self, initial: Version, all: Vec<Version>) -> Self {
self.versions = VersionConfig::new(initial, all);
self
}
+ #[must_use]
pub fn get_cc_algorithm(&self) -> CongestionControlAlgorithm {
self.cc_algorithm
}
+ #[must_use]
pub fn cc_algorithm(mut self, v: CongestionControlAlgorithm) -> Self {
self.cc_algorithm = v;
self
}
+ #[must_use]
pub fn get_max_data(&self) -> u64 {
self.max_data
}
+ #[must_use]
pub fn max_data(mut self, v: u64) -> Self {
self.max_data = v;
self
}
+ #[must_use]
pub fn get_max_streams(&self, stream_type: StreamType) -> u64 {
match stream_type {
StreamType::BiDi => self.max_streams_bidi,
@@ -153,6 +160,7 @@ impl ConnectionParameters {
/// # Panics
///
/// If v > 2^60 (the maximum allowed by the protocol).
+ #[must_use]
pub fn max_streams(mut self, stream_type: StreamType, v: u64) -> Self {
assert!(v <= (1 << 60), "max_streams is too large");
match stream_type {
@@ -171,6 +179,7 @@ impl ConnectionParameters {
/// # Panics
///
/// If `StreamType::UniDi` and `false` are passed as that is not a valid combination.
+ #[must_use]
pub fn get_max_stream_data(&self, stream_type: StreamType, remote: bool) -> u64 {
match (stream_type, remote) {
(StreamType::BiDi, false) => self.max_stream_data_bidi_local,
@@ -188,6 +197,7 @@ impl ConnectionParameters {
///
/// If `StreamType::UniDi` and `false` are passed as that is not a valid combination
/// or if v >= 62 (the maximum allowed by the protocol).
+ #[must_use]
pub fn max_stream_data(mut self, stream_type: StreamType, remote: bool, v: u64) -> Self {
assert!(v < (1 << 62), "max stream data is too large");
match (stream_type, remote) {
@@ -208,26 +218,31 @@ impl ConnectionParameters {
}
/// Set a preferred address (which only has an effect for a server).
+ #[must_use]
pub fn preferred_address(mut self, preferred: PreferredAddress) -> Self {
self.preferred_address = PreferredAddressConfig::Address(preferred);
self
}
/// Disable the use of preferred addresses.
+ #[must_use]
pub fn disable_preferred_address(mut self) -> Self {
self.preferred_address = PreferredAddressConfig::Disabled;
self
}
+ #[must_use]
pub fn get_preferred_address(&self) -> &PreferredAddressConfig {
&self.preferred_address
}
+ #[must_use]
pub fn ack_ratio(mut self, ack_ratio: u8) -> Self {
self.ack_ratio = ack_ratio;
self
}
+ #[must_use]
pub fn get_ack_ratio(&self) -> u8 {
self.ack_ratio
}
@@ -235,45 +250,54 @@ impl ConnectionParameters {
/// # Panics
///
/// If `timeout` is 2^62 milliseconds or more.
+ #[must_use]
pub fn idle_timeout(mut self, timeout: Duration) -> Self {
assert!(timeout.as_millis() < (1 << 62), "idle timeout is too long");
self.idle_timeout = timeout;
self
}
+ #[must_use]
pub fn get_idle_timeout(&self) -> Duration {
self.idle_timeout
}
+ #[must_use]
pub fn get_datagram_size(&self) -> u64 {
self.datagram_size
}
+ #[must_use]
pub fn datagram_size(mut self, v: u64) -> Self {
self.datagram_size = v;
self
}
+ #[must_use]
pub fn get_outgoing_datagram_queue(&self) -> usize {
self.outgoing_datagram_queue
}
+ #[must_use]
pub fn outgoing_datagram_queue(mut self, v: usize) -> Self {
// The max queue length must be at least 1.
self.outgoing_datagram_queue = max(v, 1);
self
}
+ #[must_use]
pub fn get_incoming_datagram_queue(&self) -> usize {
self.incoming_datagram_queue
}
+ #[must_use]
pub fn incoming_datagram_queue(mut self, v: usize) -> Self {
// The max queue length must be at least 1.
self.incoming_datagram_queue = max(v, 1);
self
}
+ #[must_use]
pub fn get_fast_pto(&self) -> u8 {
self.fast_pto
}
@@ -293,39 +317,50 @@ impl ConnectionParameters {
/// # Panics
///
/// A value of 0 is invalid and will cause a panic.
+ #[must_use]
pub fn fast_pto(mut self, scale: u8) -> Self {
assert_ne!(scale, 0);
self.fast_pto = scale;
self
}
+ #[must_use]
pub fn is_fuzzing(&self) -> bool {
self.fuzzing
}
+ #[must_use]
pub fn fuzzing(mut self, enable: bool) -> Self {
self.fuzzing = enable;
self
}
+ #[must_use]
pub fn is_greasing(&self) -> bool {
self.grease
}
+ #[must_use]
pub fn grease(mut self, grease: bool) -> Self {
self.grease = grease;
self
}
+ #[must_use]
pub fn pacing_enabled(&self) -> bool {
self.pacing
}
+ #[must_use]
pub fn pacing(mut self, pacing: bool) -> Self {
self.pacing = pacing;
self
}
+ /// # Errors
+ /// When a connection ID cannot be obtained.
+ /// # Panics
+ /// Only when this code includes a transport parameter that is invalid.
pub fn create_transport_parameter(
&self,
role: Role,
diff --git a/third_party/rust/neqo-transport/src/connection/state.rs b/third_party/rust/neqo-transport/src/connection/state.rs
index 9afb42174f..9789151d3f 100644
--- a/third_party/rust/neqo-transport/src/connection/state.rs
+++ b/third_party/rust/neqo-transport/src/connection/state.rs
@@ -21,7 +21,7 @@ use crate::{
packet::PacketBuilder,
path::PathRef,
recovery::RecoveryToken,
- ConnectionError, Error, Res,
+ ConnectionError, Error,
};
#[derive(Clone, Debug, PartialEq, Eq)]
@@ -66,6 +66,7 @@ impl State {
)
}
+ #[must_use]
pub fn error(&self) -> Option<&ConnectionError> {
if let Self::Closing { error, .. } | Self::Draining { error, .. } | Self::Closed(error) =
self
@@ -184,13 +185,13 @@ impl ClosingFrame {
}
}
-/// `StateSignaling` manages whether we need to send HANDSHAKE_DONE and CONNECTION_CLOSE.
+/// `StateSignaling` manages whether we need to send `HANDSHAKE_DONE` and `CONNECTION_CLOSE`.
/// Valid state transitions are:
-/// * Idle -> HandshakeDone: at the server when the handshake completes
-/// * HandshakeDone -> Idle: when a HANDSHAKE_DONE frame is sent
+/// * Idle -> `HandshakeDone`: at the server when the handshake completes
+/// * `HandshakeDone` -> Idle: when a `HANDSHAKE_DONE` frame is sent
/// * Idle/HandshakeDone -> Closing/Draining: when closing or draining
-/// * Closing/Draining -> CloseSent: after sending CONNECTION_CLOSE
-/// * CloseSent -> Closing: any time a new CONNECTION_CLOSE is needed
+/// * Closing/Draining -> `CloseSent`: after sending `CONNECTION_CLOSE`
+/// * `CloseSent` -> Closing: any time a new `CONNECTION_CLOSE` is needed
/// * -> Reset: from any state in case of a stateless reset
#[derive(Debug, Clone)]
pub enum StateSignaling {
@@ -214,13 +215,13 @@ impl StateSignaling {
*self = Self::HandshakeDone;
}
- pub fn write_done(&mut self, builder: &mut PacketBuilder) -> Res<Option<RecoveryToken>> {
+ pub fn write_done(&mut self, builder: &mut PacketBuilder) -> Option<RecoveryToken> {
if matches!(self, Self::HandshakeDone) && builder.remaining() >= 1 {
*self = Self::Idle;
builder.encode_varint(FRAME_TYPE_HANDSHAKE_DONE);
- Ok(Some(RecoveryToken::HandshakeDone))
+ Some(RecoveryToken::HandshakeDone)
} else {
- Ok(None)
+ None
}
}
diff --git a/third_party/rust/neqo-transport/src/connection/tests/ackrate.rs b/third_party/rust/neqo-transport/src/connection/tests/ackrate.rs
index 1b83d42acd..f0a1d17cd9 100644
--- a/third_party/rust/neqo-transport/src/connection/tests/ackrate.rs
+++ b/third_party/rust/neqo-transport/src/connection/tests/ackrate.rs
@@ -6,7 +6,7 @@
use std::{mem, time::Duration};
-use test_fixture::{addr_v4, assertions};
+use test_fixture::{assertions, DEFAULT_ADDR_V4};
use super::{
super::{ConnectionParameters, ACK_RATIO_SCALE},
@@ -164,7 +164,7 @@ fn migrate_ack_delay() {
let mut now = connect_rtt_idle(&mut client, &mut server, DEFAULT_RTT);
client
- .migrate(Some(addr_v4()), Some(addr_v4()), true, now)
+ .migrate(Some(DEFAULT_ADDR_V4), Some(DEFAULT_ADDR_V4), true, now)
.unwrap();
let client1 = send_something(&mut client, now);
diff --git a/third_party/rust/neqo-transport/src/connection/tests/cc.rs b/third_party/rust/neqo-transport/src/connection/tests/cc.rs
index b3467ea67c..b708bc421d 100644
--- a/third_party/rust/neqo-transport/src/connection/tests/cc.rs
+++ b/third_party/rust/neqo-transport/src/connection/tests/cc.rs
@@ -4,7 +4,7 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-use std::{convert::TryFrom, mem, time::Duration};
+use std::{mem, time::Duration};
use neqo_common::{qdebug, qinfo, Datagram};
@@ -71,6 +71,7 @@ fn cc_slow_start_to_cong_avoidance_recovery_period() {
client.stats().frame_rx.largest_acknowledged,
flight1_largest
);
+ let cwnd_before_cong = cwnd(&client);
// Client: send more
let (mut c_tx_dgrams, mut now) = fill_cwnd(&mut client, stream_id, now);
@@ -93,6 +94,7 @@ fn cc_slow_start_to_cong_avoidance_recovery_period() {
client.stats().frame_rx.largest_acknowledged,
flight2_largest
);
+ assert!(cwnd(&client) < cwnd_before_cong);
}
#[test]
diff --git a/third_party/rust/neqo-transport/src/connection/tests/close.rs b/third_party/rust/neqo-transport/src/connection/tests/close.rs
index f45e77e549..5351dd0d5c 100644
--- a/third_party/rust/neqo-transport/src/connection/tests/close.rs
+++ b/third_party/rust/neqo-transport/src/connection/tests/close.rs
@@ -6,7 +6,7 @@
use std::time::Duration;
-use test_fixture::{self, datagram, now};
+use test_fixture::{datagram, now};
use super::{
super::{Connection, Output, State},
diff --git a/third_party/rust/neqo-transport/src/connection/tests/datagram.rs b/third_party/rust/neqo-transport/src/connection/tests/datagram.rs
index 5b7b8dc0b4..ade8c753be 100644
--- a/third_party/rust/neqo-transport/src/connection/tests/datagram.rs
+++ b/third_party/rust/neqo-transport/src/connection/tests/datagram.rs
@@ -4,7 +4,7 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-use std::{cell::RefCell, convert::TryFrom, rc::Rc};
+use std::{cell::RefCell, rc::Rc};
use neqo_common::event::Provider;
use test_fixture::now;
diff --git a/third_party/rust/neqo-transport/src/connection/tests/fuzzing.rs b/third_party/rust/neqo-transport/src/connection/tests/fuzzing.rs
index 5425e1a16e..9924c06fa4 100644
--- a/third_party/rust/neqo-transport/src/connection/tests/fuzzing.rs
+++ b/third_party/rust/neqo-transport/src/connection/tests/fuzzing.rs
@@ -4,8 +4,6 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-#![cfg_attr(feature = "deny-warnings", deny(warnings))]
-#![warn(clippy::pedantic)]
#![cfg(feature = "fuzzing")]
use neqo_crypto::FIXED_TAG_FUZZING;
diff --git a/third_party/rust/neqo-transport/src/connection/tests/handshake.rs b/third_party/rust/neqo-transport/src/connection/tests/handshake.rs
index 93385ac1bc..af0352ce90 100644
--- a/third_party/rust/neqo-transport/src/connection/tests/handshake.rs
+++ b/third_party/rust/neqo-transport/src/connection/tests/handshake.rs
@@ -6,7 +6,6 @@
use std::{
cell::RefCell,
- convert::TryFrom,
mem,
net::{IpAddr, Ipv6Addr, SocketAddr},
rc::Rc,
@@ -18,8 +17,8 @@ use neqo_crypto::{
constants::TLS_CHACHA20_POLY1305_SHA256, generate_ech_keys, AuthenticationStatus,
};
use test_fixture::{
- self, addr, assertions, assertions::assert_coalesced_0rtt, datagram, fixture_init, now,
- split_datagram,
+ assertions, assertions::assert_coalesced_0rtt, datagram, fixture_init, now, split_datagram,
+ DEFAULT_ADDR,
};
use super::{
@@ -122,8 +121,8 @@ fn no_alpn() {
"example.com",
&["bad-alpn"],
Rc::new(RefCell::new(CountingConnectionIdGenerator::default())),
- addr(),
- addr(),
+ DEFAULT_ADDR,
+ DEFAULT_ADDR,
ConnectionParameters::default(),
now(),
)
@@ -251,8 +250,8 @@ fn chacha20poly1305() {
test_fixture::DEFAULT_SERVER_NAME,
test_fixture::DEFAULT_ALPN,
Rc::new(RefCell::new(EmptyConnectionIdGenerator::default())),
- addr(),
- addr(),
+ DEFAULT_ADDR,
+ DEFAULT_ADDR,
ConnectionParameters::default(),
now(),
)
@@ -347,7 +346,7 @@ fn reorder_05rtt_with_0rtt() {
let mut server = default_server();
let validation = AddressValidation::new(now(), ValidateAddress::NoToken).unwrap();
let validation = Rc::new(RefCell::new(validation));
- server.set_validation(Rc::clone(&validation));
+ server.set_validation(&validation);
let mut now = connect_with_rtt(&mut client, &mut server, now(), RTT);
// Include RTT in sending the ticket or the ticket age reported by the
@@ -730,8 +729,8 @@ fn connect_one_version() {
test_fixture::DEFAULT_SERVER_NAME,
test_fixture::DEFAULT_ALPN,
Rc::new(RefCell::new(CountingConnectionIdGenerator::default())),
- addr(),
- addr(),
+ DEFAULT_ADDR,
+ DEFAULT_ADDR,
ConnectionParameters::default().versions(version, vec![version]),
now(),
)
@@ -1135,3 +1134,54 @@ fn implicit_rtt_server() {
// an RTT estimate from having discarded the Initial packet number space.
assert_eq!(server.stats().rtt, RTT);
}
+
+#[test]
+fn emit_authentication_needed_once() {
+ let mut client = default_client();
+
+ let mut server = Connection::new_server(
+ test_fixture::LONG_CERT_KEYS,
+ test_fixture::DEFAULT_ALPN,
+ Rc::new(RefCell::new(CountingConnectionIdGenerator::default())),
+ ConnectionParameters::default(),
+ )
+ .expect("create a server");
+
+ let client1 = client.process(None, now());
+ assert!(client1.as_dgram_ref().is_some());
+
+ // The entire server flight doesn't fit in a single packet because the
+ // certificate is large, therefore the server will produce 2 packets.
+ let server1 = server.process(client1.as_dgram_ref(), now());
+ assert!(server1.as_dgram_ref().is_some());
+ let server2 = server.process(None, now());
+ assert!(server2.as_dgram_ref().is_some());
+
+ let authentication_needed_count = |client: &mut Connection| {
+ client
+ .events()
+ .filter(|e| matches!(e, ConnectionEvent::AuthenticationNeeded))
+ .count()
+ };
+
+ // Upon receiving the first packet, the client has the server certificate,
+ // but not yet all required handshake data. It moves to
+ // `HandshakeState::AuthenticationPending` and emits a
+ // `ConnectionEvent::AuthenticationNeeded` event.
+ //
+ // Note that this is a tiny bit fragile in that it depends on having a certificate
+ // that is within a fairly narrow range of sizes. It has to fit in a single
+ // packet, but be large enough that the CertificateVerify message does not
+ // also fit in the same packet. Our default test setup achieves this, but
+ // changes to the setup might invalidate this test.
+ let _ = client.process(server1.as_dgram_ref(), now());
+ assert_eq!(1, authentication_needed_count(&mut client));
+ assert!(client.peer_certificate().is_some());
+
+ // The `AuthenticationNeeded` event is still pending a call to
+ // `Connection::authenticated`. On receiving the second packet from the
+ // server, the client must not emit a another
+ // `ConnectionEvent::AuthenticationNeeded`.
+ let _ = client.process(server2.as_dgram_ref(), now());
+ assert_eq!(0, authentication_needed_count(&mut client));
+}
diff --git a/third_party/rust/neqo-transport/src/connection/tests/idle.rs b/third_party/rust/neqo-transport/src/connection/tests/idle.rs
index c33726917a..5d01131541 100644
--- a/third_party/rust/neqo-transport/src/connection/tests/idle.rs
+++ b/third_party/rust/neqo-transport/src/connection/tests/idle.rs
@@ -10,7 +10,7 @@ use std::{
};
use neqo_common::{qtrace, Encoder};
-use test_fixture::{self, now, split_datagram};
+use test_fixture::{now, split_datagram};
use super::{
super::{Connection, ConnectionParameters, IdleTimeout, Output, State},
@@ -310,28 +310,20 @@ fn idle_caching() {
server.process_input(&dgram.unwrap(), middle);
assert_eq!(server.stats().frame_rx.ping, ping_before_s + 1);
let mut tokens = Vec::new();
- server
- .crypto
- .streams
- .write_frame(
- PacketNumberSpace::Initial,
- &mut builder,
- &mut tokens,
- &mut FrameStats::default(),
- )
- .unwrap();
+ server.crypto.streams.write_frame(
+ PacketNumberSpace::Initial,
+ &mut builder,
+ &mut tokens,
+ &mut FrameStats::default(),
+ );
assert_eq!(tokens.len(), 1);
tokens.clear();
- server
- .crypto
- .streams
- .write_frame(
- PacketNumberSpace::Initial,
- &mut builder,
- &mut tokens,
- &mut FrameStats::default(),
- )
- .unwrap();
+ server.crypto.streams.write_frame(
+ PacketNumberSpace::Initial,
+ &mut builder,
+ &mut tokens,
+ &mut FrameStats::default(),
+ );
assert!(tokens.is_empty());
let dgram = server.process_output(middle).dgram();
diff --git a/third_party/rust/neqo-transport/src/connection/tests/keys.rs b/third_party/rust/neqo-transport/src/connection/tests/keys.rs
index c247bba670..847b253284 100644
--- a/third_party/rust/neqo-transport/src/connection/tests/keys.rs
+++ b/third_party/rust/neqo-transport/src/connection/tests/keys.rs
@@ -7,7 +7,7 @@
use std::mem;
use neqo_common::{qdebug, Datagram};
-use test_fixture::{self, now};
+use test_fixture::now;
use super::{
super::{
diff --git a/third_party/rust/neqo-transport/src/connection/tests/migration.rs b/third_party/rust/neqo-transport/src/connection/tests/migration.rs
index 8307a7dd84..405ae161a4 100644
--- a/third_party/rust/neqo-transport/src/connection/tests/migration.rs
+++ b/third_party/rust/neqo-transport/src/connection/tests/migration.rs
@@ -6,6 +6,7 @@
use std::{
cell::RefCell,
+ mem,
net::{IpAddr, Ipv6Addr, SocketAddr},
rc::Rc,
time::{Duration, Instant},
@@ -13,9 +14,8 @@ use std::{
use neqo_common::{Datagram, Decoder};
use test_fixture::{
- self, addr, addr_v4,
assertions::{assert_v4_path, assert_v6_path},
- fixture_init, new_neqo_qlog, now,
+ fixture_init, new_neqo_qlog, now, DEFAULT_ADDR, DEFAULT_ADDR_V4,
};
use super::{
@@ -94,8 +94,8 @@ fn rebinding_port() {
server.stream_close_send(stream_id).unwrap();
let dgram = server.process_output(now()).dgram();
let dgram = dgram.unwrap();
- assert_eq!(dgram.source(), addr());
- assert_eq!(dgram.destination(), new_port(addr()));
+ assert_eq!(dgram.source(), DEFAULT_ADDR);
+ assert_eq!(dgram.destination(), new_port(DEFAULT_ADDR));
}
/// This simulates an attack where a valid packet is forwarded on
@@ -109,7 +109,7 @@ fn path_forwarding_attack() {
let mut now = now();
let dgram = send_something(&mut client, now);
- let dgram = change_path(&dgram, addr_v4());
+ let dgram = change_path(&dgram, DEFAULT_ADDR_V4);
server.process_input(&dgram, now);
// The server now probes the new (primary) path.
@@ -188,7 +188,7 @@ fn migrate_immediate() {
let now = now();
client
- .migrate(Some(addr_v4()), Some(addr_v4()), true, now)
+ .migrate(Some(DEFAULT_ADDR_V4), Some(DEFAULT_ADDR_V4), true, now)
.unwrap();
let client1 = send_something(&mut client, now);
@@ -229,7 +229,7 @@ fn migrate_rtt() {
let now = connect_rtt_idle(&mut client, &mut server, RTT);
client
- .migrate(Some(addr_v4()), Some(addr_v4()), true, now)
+ .migrate(Some(DEFAULT_ADDR_V4), Some(DEFAULT_ADDR_V4), true, now)
.unwrap();
// The RTT might be increased for the new path, so allow a little flexibility.
let rtt = client.paths.rtt();
@@ -245,7 +245,7 @@ fn migrate_immediate_fail() {
let mut now = now();
client
- .migrate(Some(addr_v4()), Some(addr_v4()), true, now)
+ .migrate(Some(DEFAULT_ADDR_V4), Some(DEFAULT_ADDR_V4), true, now)
.unwrap();
let probe = client.process_output(now).dgram().unwrap();
@@ -293,7 +293,7 @@ fn migrate_same() {
let now = now();
client
- .migrate(Some(addr()), Some(addr()), true, now)
+ .migrate(Some(DEFAULT_ADDR), Some(DEFAULT_ADDR), true, now)
.unwrap();
let probe = client.process_output(now).dgram().unwrap();
@@ -320,7 +320,7 @@ fn migrate_same_fail() {
let mut now = now();
client
- .migrate(Some(addr()), Some(addr()), true, now)
+ .migrate(Some(DEFAULT_ADDR), Some(DEFAULT_ADDR), true, now)
.unwrap();
let probe = client.process_output(now).dgram().unwrap();
@@ -375,7 +375,7 @@ fn migration(mut client: Connection) {
let now = now();
client
- .migrate(Some(addr_v4()), Some(addr_v4()), false, now)
+ .migrate(Some(DEFAULT_ADDR_V4), Some(DEFAULT_ADDR_V4), false, now)
.unwrap();
let probe = client.process_output(now).dgram().unwrap();
@@ -449,8 +449,8 @@ fn migration_client_empty_cid() {
test_fixture::DEFAULT_SERVER_NAME,
test_fixture::DEFAULT_ALPN,
Rc::new(RefCell::new(EmptyConnectionIdGenerator::default())),
- addr(),
- addr(),
+ DEFAULT_ADDR,
+ DEFAULT_ADDR,
ConnectionParameters::default(),
now(),
)
@@ -568,22 +568,22 @@ fn preferred_address(hs_client: SocketAddr, hs_server: SocketAddr, preferred: So
/// Migration works for a new port number.
#[test]
fn preferred_address_new_port() {
- let a = addr();
+ let a = DEFAULT_ADDR;
preferred_address(a, a, new_port(a));
}
/// Migration works for a new address too.
#[test]
fn preferred_address_new_address() {
- let mut preferred = addr();
+ let mut preferred = DEFAULT_ADDR;
preferred.set_ip(IpAddr::V6(Ipv6Addr::new(0xfe80, 0, 0, 0, 0, 0, 0, 2)));
- preferred_address(addr(), addr(), preferred);
+ preferred_address(DEFAULT_ADDR, DEFAULT_ADDR, preferred);
}
/// Migration works for IPv4 addresses.
#[test]
fn preferred_address_new_port_v4() {
- let a = addr_v4();
+ let a = DEFAULT_ADDR_V4;
preferred_address(a, a, new_port(a));
}
@@ -623,7 +623,7 @@ fn preferred_address_ignore_loopback() {
/// A preferred address in the wrong address family is ignored.
#[test]
fn preferred_address_ignore_different_family() {
- preferred_address_ignored(PreferredAddress::new_any(Some(addr_v4()), None));
+ preferred_address_ignored(PreferredAddress::new_any(Some(DEFAULT_ADDR_V4), None));
}
/// Disabling preferred addresses at the client means that it ignores a perfectly
@@ -631,7 +631,7 @@ fn preferred_address_ignore_different_family() {
#[test]
fn preferred_address_disabled_client() {
let mut client = new_client(ConnectionParameters::default().disable_preferred_address());
- let mut preferred = addr();
+ let mut preferred = DEFAULT_ADDR;
preferred.set_ip(IpAddr::V6(Ipv6Addr::new(0xfe80, 0, 0, 0, 0, 0, 0, 2)));
let spa = PreferredAddress::new_any(None, Some(preferred));
let mut server = new_server(ConnectionParameters::default().preferred_address(spa));
@@ -643,7 +643,7 @@ fn preferred_address_disabled_client() {
fn preferred_address_empty_cid() {
fixture_init();
- let spa = PreferredAddress::new_any(None, Some(new_port(addr())));
+ let spa = PreferredAddress::new_any(None, Some(new_port(DEFAULT_ADDR)));
let res = Connection::new_server(
test_fixture::DEFAULT_KEYS,
test_fixture::DEFAULT_ALPN,
@@ -706,33 +706,33 @@ fn preferred_address_client() {
fn migration_invalid_state() {
let mut client = default_client();
assert!(client
- .migrate(Some(addr()), Some(addr()), false, now())
+ .migrate(Some(DEFAULT_ADDR), Some(DEFAULT_ADDR), false, now())
.is_err());
let mut server = default_server();
assert!(server
- .migrate(Some(addr()), Some(addr()), false, now())
+ .migrate(Some(DEFAULT_ADDR), Some(DEFAULT_ADDR), false, now())
.is_err());
connect_force_idle(&mut client, &mut server);
assert!(server
- .migrate(Some(addr()), Some(addr()), false, now())
+ .migrate(Some(DEFAULT_ADDR), Some(DEFAULT_ADDR), false, now())
.is_err());
client.close(now(), 0, "closing");
assert!(client
- .migrate(Some(addr()), Some(addr()), false, now())
+ .migrate(Some(DEFAULT_ADDR), Some(DEFAULT_ADDR), false, now())
.is_err());
let close = client.process(None, now()).dgram();
let dgram = server.process(close.as_ref(), now()).dgram();
assert!(server
- .migrate(Some(addr()), Some(addr()), false, now())
+ .migrate(Some(DEFAULT_ADDR), Some(DEFAULT_ADDR), false, now())
.is_err());
client.process_input(&dgram.unwrap(), now());
assert!(client
- .migrate(Some(addr()), Some(addr()), false, now())
+ .migrate(Some(DEFAULT_ADDR), Some(DEFAULT_ADDR), false, now())
.is_err());
}
@@ -753,32 +753,32 @@ fn migration_invalid_address() {
cant_migrate(None, None);
// Providing a zero port number isn't valid.
- let mut zero_port = addr();
+ let mut zero_port = DEFAULT_ADDR;
zero_port.set_port(0);
cant_migrate(None, Some(zero_port));
cant_migrate(Some(zero_port), None);
// An unspecified remote address is bad.
- let mut remote_unspecified = addr();
+ let mut remote_unspecified = DEFAULT_ADDR;
remote_unspecified.set_ip(IpAddr::V6(Ipv6Addr::from(0)));
cant_migrate(None, Some(remote_unspecified));
// Mixed address families is bad.
- cant_migrate(Some(addr()), Some(addr_v4()));
- cant_migrate(Some(addr_v4()), Some(addr()));
+ cant_migrate(Some(DEFAULT_ADDR), Some(DEFAULT_ADDR_V4));
+ cant_migrate(Some(DEFAULT_ADDR_V4), Some(DEFAULT_ADDR));
// Loopback to non-loopback is bad.
- cant_migrate(Some(addr()), Some(loopback()));
- cant_migrate(Some(loopback()), Some(addr()));
+ cant_migrate(Some(DEFAULT_ADDR), Some(loopback()));
+ cant_migrate(Some(loopback()), Some(DEFAULT_ADDR));
assert_eq!(
client
- .migrate(Some(addr()), Some(loopback()), true, now())
+ .migrate(Some(DEFAULT_ADDR), Some(loopback()), true, now())
.unwrap_err(),
Error::InvalidMigration
);
assert_eq!(
client
- .migrate(Some(loopback()), Some(addr()), true, now())
+ .migrate(Some(loopback()), Some(DEFAULT_ADDR), true, now())
.unwrap_err(),
Error::InvalidMigration
);
@@ -864,7 +864,7 @@ fn retire_prior_to_migration_failure() {
let original_cid = ConnectionId::from(get_cid(&send_something(&mut client, now())));
client
- .migrate(Some(addr_v4()), Some(addr_v4()), false, now())
+ .migrate(Some(DEFAULT_ADDR_V4), Some(DEFAULT_ADDR_V4), false, now())
.unwrap();
// The client now probes the new path.
@@ -919,7 +919,7 @@ fn retire_prior_to_migration_success() {
let original_cid = ConnectionId::from(get_cid(&send_something(&mut client, now())));
client
- .migrate(Some(addr_v4()), Some(addr_v4()), false, now())
+ .migrate(Some(DEFAULT_ADDR_V4), Some(DEFAULT_ADDR_V4), false, now())
.unwrap();
// The client now probes the new path.
@@ -951,3 +951,39 @@ fn retire_prior_to_migration_success() {
assert_ne!(get_cid(&dgram), original_cid);
assert_ne!(get_cid(&dgram), probe_cid);
}
+
+struct GarbageWriter {}
+
+impl crate::connection::test_internal::FrameWriter for GarbageWriter {
+ fn write_frames(&mut self, builder: &mut PacketBuilder) {
+ // Not a valid frame type.
+ builder.encode_varint(u32::MAX);
+ }
+}
+
+/// Test the case that we run out of connection ID and receive an invalid frame
+/// from a new path.
+#[test]
+#[should_panic(expected = "attempting to close with a temporary path")]
+fn error_on_new_path_with_no_connection_id() {
+ let mut client = default_client();
+ let mut server = default_server();
+ connect_force_idle(&mut client, &mut server);
+
+ let cid_gen: Rc<RefCell<dyn ConnectionIdGenerator>> =
+ Rc::new(RefCell::new(CountingConnectionIdGenerator::default()));
+ server.test_frame_writer = Some(Box::new(RetireAll { cid_gen }));
+ let retire_all = send_something(&mut server, now());
+
+ client.process_input(&retire_all, now());
+
+ server.test_frame_writer = Some(Box::new(GarbageWriter {}));
+ let garbage = send_something(&mut server, now());
+
+ let dgram = change_path(&garbage, DEFAULT_ADDR_V4);
+ client.process_input(&dgram, now());
+
+ // See issue #1697. We had a crash when the client had a temporary path and
+ // process_output is called.
+ mem::drop(client.process_output(now()));
+}
diff --git a/third_party/rust/neqo-transport/src/connection/tests/mod.rs b/third_party/rust/neqo-transport/src/connection/tests/mod.rs
index 8a999f4048..b6ce08f8d1 100644
--- a/third_party/rust/neqo-transport/src/connection/tests/mod.rs
+++ b/third_party/rust/neqo-transport/src/connection/tests/mod.rs
@@ -4,12 +4,9 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-#![deny(clippy::pedantic)]
-
use std::{
cell::RefCell,
cmp::min,
- convert::TryFrom,
mem,
rc::Rc,
time::{Duration, Instant},
@@ -18,7 +15,7 @@ use std::{
use enum_map::enum_map;
use neqo_common::{event::Provider, qdebug, qtrace, Datagram, Decoder, Role};
use neqo_crypto::{random, AllowZeroRtt, AuthenticationStatus, ResumptionToken};
-use test_fixture::{self, addr, fixture_init, new_neqo_qlog, now};
+use test_fixture::{fixture_init, new_neqo_qlog, now, DEFAULT_ADDR};
use super::{Connection, ConnectionError, ConnectionId, Output, State};
use crate::{
@@ -79,7 +76,7 @@ impl ConnectionIdDecoder for CountingConnectionIdGenerator {
impl ConnectionIdGenerator for CountingConnectionIdGenerator {
fn generate_cid(&mut self) -> Option<ConnectionId> {
- let mut r = random(20);
+ let mut r = random::<20>();
r[0] = 8;
r[1] = u8::try_from(self.counter >> 24).unwrap();
r[2] = u8::try_from((self.counter >> 16) & 0xff).unwrap();
@@ -107,8 +104,8 @@ pub fn new_client(params: ConnectionParameters) -> Connection {
test_fixture::DEFAULT_SERVER_NAME,
test_fixture::DEFAULT_ALPN,
Rc::new(RefCell::new(CountingConnectionIdGenerator::default())),
- addr(),
- addr(),
+ DEFAULT_ADDR,
+ DEFAULT_ADDR,
params,
now(),
)
@@ -278,7 +275,7 @@ fn exchange_ticket(
) -> ResumptionToken {
let validation = AddressValidation::new(now, ValidateAddress::NoToken).unwrap();
let validation = Rc::new(RefCell::new(validation));
- server.set_validation(Rc::clone(&validation));
+ server.set_validation(&validation);
server.send_ticket(now, &[]).expect("can send ticket");
let ticket = server.process_output(now).dgram();
assert!(ticket.is_some());
diff --git a/third_party/rust/neqo-transport/src/connection/tests/priority.rs b/third_party/rust/neqo-transport/src/connection/tests/priority.rs
index 1f86aa22e5..079ba93b9f 100644
--- a/third_party/rust/neqo-transport/src/connection/tests/priority.rs
+++ b/third_party/rust/neqo-transport/src/connection/tests/priority.rs
@@ -7,7 +7,7 @@
use std::{cell::RefCell, mem, rc::Rc};
use neqo_common::event::Provider;
-use test_fixture::{self, now};
+use test_fixture::now;
use super::{
super::{Connection, Error, Output},
@@ -370,7 +370,7 @@ fn low() {
let validation = Rc::new(RefCell::new(
AddressValidation::new(now, ValidateAddress::Never).unwrap(),
));
- server.set_validation(Rc::clone(&validation));
+ server.set_validation(&validation);
connect(&mut client, &mut server);
let id = server.stream_create(StreamType::UniDi).unwrap();
diff --git a/third_party/rust/neqo-transport/src/connection/tests/resumption.rs b/third_party/rust/neqo-transport/src/connection/tests/resumption.rs
index a8c45a9f06..7410e76ef8 100644
--- a/third_party/rust/neqo-transport/src/connection/tests/resumption.rs
+++ b/third_party/rust/neqo-transport/src/connection/tests/resumption.rs
@@ -6,7 +6,7 @@
use std::{cell::RefCell, mem, rc::Rc, time::Duration};
-use test_fixture::{self, assertions, now};
+use test_fixture::{assertions, now};
use super::{
connect, connect_with_rtt, default_client, default_server, exchange_ticket, get_tokens,
@@ -50,7 +50,7 @@ fn remember_smoothed_rtt() {
// wants to acknowledge; so the ticket will include an ACK frame too.
let validation = AddressValidation::new(now, ValidateAddress::NoToken).unwrap();
let validation = Rc::new(RefCell::new(validation));
- server.set_validation(Rc::clone(&validation));
+ server.set_validation(&validation);
server.send_ticket(now, &[]).expect("can send ticket");
let ticket = server.process_output(now).dgram();
assert!(ticket.is_some());
@@ -84,7 +84,7 @@ fn address_validation_token_resume() {
let mut server = default_server();
let validation = AddressValidation::new(now(), ValidateAddress::Always).unwrap();
let validation = Rc::new(RefCell::new(validation));
- server.set_validation(Rc::clone(&validation));
+ server.set_validation(&validation);
let mut now = connect_with_rtt(&mut client, &mut server, now(), RTT);
let token = exchange_ticket(&mut client, &mut server, now);
@@ -155,7 +155,7 @@ fn two_tickets_with_new_token() {
let mut server = default_server();
let validation = AddressValidation::new(now(), ValidateAddress::Always).unwrap();
let validation = Rc::new(RefCell::new(validation));
- server.set_validation(Rc::clone(&validation));
+ server.set_validation(&validation);
connect(&mut client, &mut server);
// Send two tickets with tokens and then bundle those into a packet.
diff --git a/third_party/rust/neqo-transport/src/connection/tests/stream.rs b/third_party/rust/neqo-transport/src/connection/tests/stream.rs
index 586a537b9d..f469866d50 100644
--- a/third_party/rust/neqo-transport/src/connection/tests/stream.rs
+++ b/third_party/rust/neqo-transport/src/connection/tests/stream.rs
@@ -4,7 +4,7 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-use std::{cmp::max, collections::HashMap, convert::TryFrom, mem};
+use std::{cmp::max, collections::HashMap, mem};
use neqo_common::{event::Provider, qdebug};
use test_fixture::now;
diff --git a/third_party/rust/neqo-transport/src/connection/tests/vn.rs b/third_party/rust/neqo-transport/src/connection/tests/vn.rs
index 22f15c991c..93872a94f4 100644
--- a/third_party/rust/neqo-transport/src/connection/tests/vn.rs
+++ b/third_party/rust/neqo-transport/src/connection/tests/vn.rs
@@ -7,7 +7,7 @@
use std::{mem, time::Duration};
use neqo_common::{event::Provider, Decoder, Encoder};
-use test_fixture::{self, assertions, datagram, now};
+use test_fixture::{assertions, datagram, now};
use super::{
super::{ConnectionError, ConnectionEvent, Output, State, ZeroRttState},
diff --git a/third_party/rust/neqo-transport/src/connection/tests/zerortt.rs b/third_party/rust/neqo-transport/src/connection/tests/zerortt.rs
index 0aa5573c98..b5e5f0d758 100644
--- a/third_party/rust/neqo-transport/src/connection/tests/zerortt.rs
+++ b/third_party/rust/neqo-transport/src/connection/tests/zerortt.rs
@@ -8,7 +8,7 @@ use std::{cell::RefCell, rc::Rc};
use neqo_common::event::Provider;
use neqo_crypto::{AllowZeroRtt, AntiReplay};
-use test_fixture::{self, assertions, now};
+use test_fixture::{assertions, now};
use super::{
super::Connection, connect, default_client, default_server, exchange_ticket, new_server,
diff --git a/third_party/rust/neqo-transport/src/crypto.rs b/third_party/rust/neqo-transport/src/crypto.rs
index f6cc7c0e2f..9840eaa1e1 100644
--- a/third_party/rust/neqo-transport/src/crypto.rs
+++ b/third_party/rust/neqo-transport/src/crypto.rs
@@ -8,7 +8,6 @@ use std::{
cell::RefCell,
cmp::{max, min},
collections::HashMap,
- convert::TryFrom,
mem,
ops::{Index, IndexMut, Range},
rc::Rc,
@@ -101,10 +100,10 @@ impl Crypto {
version,
protocols,
tls: agent,
- streams: Default::default(),
+ streams: CryptoStreams::default(),
states: CryptoStates {
fuzzing,
- ..Default::default()
+ ..CryptoStates::default()
},
})
}
@@ -239,14 +238,14 @@ impl Crypto {
/// Returns true if new handshake keys were installed.
pub fn install_keys(&mut self, role: Role) -> Res<bool> {
- if !self.tls.state().is_final() {
+ if self.tls.state().is_final() {
+ Ok(false)
+ } else {
let installed_hs = self.install_handshake_keys()?;
if role == Role::Server {
self.maybe_install_application_write_key(self.version)?;
}
Ok(installed_hs)
- } else {
- Ok(false)
}
}
@@ -274,7 +273,7 @@ impl Crypto {
fn maybe_install_application_write_key(&mut self, version: Version) -> Res<()> {
qtrace!([self], "Attempt to install application write key");
if let Some(secret) = self.tls.write_secret(TLS_EPOCH_APPLICATION_DATA) {
- self.states.set_application_write_key(version, secret)?;
+ self.states.set_application_write_key(version, &secret)?;
qdebug!([self], "Application write key installed");
}
Ok(())
@@ -290,7 +289,7 @@ impl Crypto {
.read_secret(TLS_EPOCH_APPLICATION_DATA)
.ok_or(Error::InternalError)?;
self.states
- .set_application_read_key(version, read_secret, expire_0rtt)?;
+ .set_application_read_key(version, &read_secret, expire_0rtt)?;
qdebug!([self], "application read keys installed");
Ok(())
}
@@ -313,8 +312,8 @@ impl Crypto {
builder: &mut PacketBuilder,
tokens: &mut Vec<RecoveryToken>,
stats: &mut FrameStats,
- ) -> Res<()> {
- self.streams.write_frame(space, builder, tokens, stats)
+ ) {
+ self.streams.write_frame(space, builder, tokens, stats);
}
pub fn acked(&mut self, token: &CryptoRecoveryToken) {
@@ -767,7 +766,7 @@ impl CryptoDxAppData {
pub fn new(
version: Version,
dir: CryptoDxDirection,
- secret: SymKey,
+ secret: &SymKey,
cipher: Cipher,
fuzzing: bool,
) -> Res<Self> {
@@ -776,12 +775,12 @@ impl CryptoDxAppData {
version,
dir,
TLS_EPOCH_APPLICATION_DATA,
- &secret,
+ secret,
cipher,
fuzzing,
),
cipher,
- next_secret: Self::update_secret(cipher, &secret)?,
+ next_secret: Self::update_secret(cipher, secret)?,
fuzzing,
})
}
@@ -1111,7 +1110,7 @@ impl CryptoStates {
});
}
- pub fn set_application_write_key(&mut self, version: Version, secret: SymKey) -> Res<()> {
+ pub fn set_application_write_key(&mut self, version: Version, secret: &SymKey) -> Res<()> {
debug_assert!(self.app_write.is_none());
debug_assert_ne!(self.cipher, 0);
let mut app = CryptoDxAppData::new(
@@ -1134,7 +1133,7 @@ impl CryptoStates {
pub fn set_application_read_key(
&mut self,
version: Version,
- secret: SymKey,
+ secret: &SymKey,
expire_0rtt: Instant,
) -> Res<()> {
debug_assert!(self.app_write.is_some(), "should have write keys installed");
@@ -1530,14 +1529,14 @@ impl CryptoStreams {
builder: &mut PacketBuilder,
tokens: &mut Vec<RecoveryToken>,
stats: &mut FrameStats,
- ) -> Res<()> {
+ ) {
let cs = self.get_mut(space).unwrap();
if let Some((offset, data)) = cs.tx.next_bytes() {
let mut header_len = 1 + Encoder::varint_len(offset) + 1;
// Don't bother if there isn't room for the header and some data.
if builder.remaining() < header_len + 1 {
- return Ok(());
+ return;
}
// Calculate length of data based on the minimum of:
// - available data
@@ -1561,7 +1560,6 @@ impl CryptoStreams {
}));
stats.crypto += 1;
}
- Ok(())
}
}
diff --git a/third_party/rust/neqo-transport/src/events.rs b/third_party/rust/neqo-transport/src/events.rs
index 88a85250ee..a892e384b9 100644
--- a/third_party/rust/neqo-transport/src/events.rs
+++ b/third_party/rust/neqo-transport/src/events.rs
@@ -52,7 +52,7 @@ pub enum ConnectionEvent {
stream_id: StreamId,
app_error: AppError,
},
- /// Peer has sent STOP_SENDING
+ /// Peer has sent `STOP_SENDING`
SendStreamStopSending {
stream_id: StreamId,
app_error: AppError,
@@ -61,7 +61,7 @@ pub enum ConnectionEvent {
SendStreamComplete {
stream_id: StreamId,
},
- /// Peer increased MAX_STREAMS
+ /// Peer increased `MAX_STREAMS`
SendStreamCreatable {
stream_type: StreamType,
},
@@ -254,8 +254,9 @@ impl EventProvider for ConnectionEvents {
#[cfg(test)]
mod tests {
- use super::*;
- use crate::{ConnectionError, Error};
+ use neqo_common::event::Provider;
+
+ use crate::{ConnectionError, ConnectionEvent, ConnectionEvents, Error, State, StreamId};
#[test]
fn event_culling() {
diff --git a/third_party/rust/neqo-transport/src/fc.rs b/third_party/rust/neqo-transport/src/fc.rs
index a219ca7e8d..5ddfce6463 100644
--- a/third_party/rust/neqo-transport/src/fc.rs
+++ b/third_party/rust/neqo-transport/src/fc.rs
@@ -8,7 +8,6 @@
// into flow control frames needing to be sent to the remote.
use std::{
- convert::TryFrom,
fmt::Debug,
ops::{Deref, DerefMut, Index, IndexMut},
};
@@ -249,7 +248,7 @@ where
}
}
- /// This function is called when STREAM_DATA_BLOCKED frame is received.
+ /// This function is called when `STREAM_DATA_BLOCKED` frame is received.
/// The flow control will try to send an update if possible.
pub fn send_flowc_update(&mut self) {
if self.retired + self.max_active > self.max_allowed {
diff --git a/third_party/rust/neqo-transport/src/frame.rs b/third_party/rust/neqo-transport/src/frame.rs
index f3d567ac7c..b3bb024a2c 100644
--- a/third_party/rust/neqo-transport/src/frame.rs
+++ b/third_party/rust/neqo-transport/src/frame.rs
@@ -6,7 +6,7 @@
// Directly relating to QUIC frames.
-use std::{convert::TryFrom, ops::RangeInclusive};
+use std::ops::RangeInclusive;
use neqo_common::{qtrace, Decoder};
@@ -78,6 +78,7 @@ impl CloseError {
}
}
+ #[must_use]
pub fn code(&self) -> u64 {
match self {
Self::Transport(c) | Self::Application(c) => *c,
@@ -303,7 +304,7 @@ impl<'a> Frame<'a> {
)
}
- /// Converts AckRanges as encoded in a ACK frame (see -transport
+ /// Converts `AckRanges` as encoded in a ACK frame (see -transport
/// 19.3.1) into ranges of acked packets (end, start), inclusive of
/// start and end values.
pub fn decode_ack_frame(
@@ -387,6 +388,7 @@ impl<'a> Frame<'a> {
}
}
+ #[allow(clippy::too_many_lines)] // Yeah, but it's a nice match statement.
pub fn decode(dec: &mut Decoder<'a>) -> Res<Self> {
/// Maximum ACK Range Count in ACK Frame
///
@@ -430,7 +432,7 @@ impl<'a> Frame<'a> {
}
})?;
let fa = dv(dec)?;
- let mut arr: Vec<AckRange> = Vec::with_capacity(nr as usize);
+ let mut arr: Vec<AckRange> = Vec::with_capacity(usize::try_from(nr)?);
for _ in 0..nr {
let ar = AckRange {
gap: dv(dec)?,
@@ -615,7 +617,11 @@ impl<'a> Frame<'a> {
mod tests {
use neqo_common::{Decoder, Encoder};
- use super::*;
+ use crate::{
+ cid::MAX_CONNECTION_ID_LEN,
+ frame::{AckRange, Frame, FRAME_TYPE_ACK},
+ CloseError, Error, StreamId, StreamType,
+ };
fn just_dec(f: &Frame, s: &str) {
let encoded = Encoder::from_hex(s);
diff --git a/third_party/rust/neqo-transport/src/lib.rs b/third_party/rust/neqo-transport/src/lib.rs
index ecf7ee2f73..be482c466f 100644
--- a/third_party/rust/neqo-transport/src/lib.rs
+++ b/third_party/rust/neqo-transport/src/lib.rs
@@ -4,8 +4,7 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-#![cfg_attr(feature = "deny-warnings", deny(warnings))]
-#![warn(clippy::use_self)]
+#![allow(clippy::module_name_repetitions)] // This lint doesn't work here.
use neqo_common::qinfo;
use neqo_crypto::Error as CryptoError;
@@ -30,6 +29,9 @@ pub mod recv_stream;
#[cfg(not(feature = "bench"))]
mod recv_stream;
mod rtt;
+#[cfg(feature = "bench")]
+pub mod send_stream;
+#[cfg(not(feature = "bench"))]
mod send_stream;
mod sender;
pub mod server;
@@ -130,6 +132,7 @@ pub enum Error {
}
impl Error {
+ #[must_use]
pub fn code(&self) -> TransportError {
match self {
Self::NoError
@@ -206,6 +209,7 @@ pub enum ConnectionError {
}
impl ConnectionError {
+ #[must_use]
pub fn app_code(&self) -> Option<AppError> {
match self {
Self::Application(e) => Some(*e),
diff --git a/third_party/rust/neqo-transport/src/pace.rs b/third_party/rust/neqo-transport/src/pace.rs
index e5214c1bc8..5b88e5c0c4 100644
--- a/third_party/rust/neqo-transport/src/pace.rs
+++ b/third_party/rust/neqo-transport/src/pace.rs
@@ -5,11 +5,9 @@
// except according to those terms.
// Pacer
-#![deny(clippy::pedantic)]
use std::{
cmp::min,
- convert::TryFrom,
fmt::{Debug, Display},
time::{Duration, Instant},
};
diff --git a/third_party/rust/neqo-transport/src/packet/mod.rs b/third_party/rust/neqo-transport/src/packet/mod.rs
index ccfd212d5f..8458f69779 100644
--- a/third_party/rust/neqo-transport/src/packet/mod.rs
+++ b/third_party/rust/neqo-transport/src/packet/mod.rs
@@ -7,9 +7,7 @@
// Encoding and decoding packets off the wire.
use std::{
cmp::min,
- convert::TryFrom,
fmt,
- iter::ExactSizeIterator,
ops::{Deref, DerefMut, Range},
time::Instant,
};
@@ -172,11 +170,12 @@ impl PacketBuilder {
}
/// Start building a long header packet.
- /// For an Initial packet you will need to call initial_token(),
+ /// For an Initial packet you will need to call `initial_token()`,
/// even if the token is empty.
///
/// See `short()` for more on how to handle this in cases where there is no space.
#[allow(clippy::reversed_empty_ranges)] // For initializing an empty range.
+ #[allow(clippy::similar_names)] // For dcid and scid, which are fine here.
pub fn long(
mut encoder: Encoder,
pt: PacketType,
@@ -271,7 +270,7 @@ impl PacketBuilder {
let mask = if quic_bit { PACKET_BIT_FIXED_QUIC } else { 0 }
| if self.is_long() { 0 } else { PACKET_BIT_SPIN };
let first = self.header.start;
- self.encoder.as_mut()[first] ^= random(1)[0] & mask;
+ self.encoder.as_mut()[first] ^= random::<1>()[0] & mask;
}
/// For an Initial packet, encode the token.
@@ -315,6 +314,7 @@ impl PacketBuilder {
self.pn = pn;
}
+ #[allow(clippy::cast_possible_truncation)] // Nope.
fn write_len(&mut self, expansion: usize) {
let len = self.encoder.len() - (self.offsets.len + 2) + expansion;
self.encoder.as_mut()[self.offsets.len] = 0x40 | ((len >> 8) & 0x3f) as u8;
@@ -410,6 +410,7 @@ impl PacketBuilder {
/// As this is a simple packet, this is just an associated function.
/// As Retry is odd (it has to be constructed with leading bytes),
/// this returns a [`Vec<u8>`] rather than building on an encoder.
+ #[allow(clippy::similar_names)] // scid and dcid are fine here.
pub fn retry(
version: Version,
dcid: &[u8],
@@ -424,7 +425,7 @@ impl PacketBuilder {
PACKET_BIT_LONG
| PACKET_BIT_FIXED_QUIC
| (PacketType::Retry.to_byte(version) << 4)
- | (random(1)[0] & 0xf),
+ | (random::<1>()[0] & 0xf),
);
encoder.encode_uint(4, version.wire_version());
encoder.encode_vec(1, dcid);
@@ -441,6 +442,7 @@ impl PacketBuilder {
}
/// Make a Version Negotiation packet.
+ #[allow(clippy::similar_names)] // scid and dcid are fine here.
pub fn version_negotiation(
dcid: &[u8],
scid: &[u8],
@@ -448,7 +450,7 @@ impl PacketBuilder {
versions: &[Version],
) -> Vec<u8> {
let mut encoder = Encoder::default();
- let mut grease = random(4);
+ let mut grease = random::<4>();
// This will not include the "QUIC bit" sometimes. Intentionally.
encoder.encode_byte(PACKET_BIT_LONG | (grease[3] & 0x7f));
encoder.encode(&[0; 4]); // Zero version == VN.
@@ -492,7 +494,7 @@ impl From<PacketBuilder> for Encoder {
}
}
-/// PublicPacket holds information from packets that is public only. This allows for
+/// `PublicPacket` holds information from packets that is public only. This allows for
/// processing of packets prior to decryption.
pub struct PublicPacket<'a> {
/// The packet type.
@@ -552,6 +554,7 @@ impl<'a> PublicPacket<'a> {
/// Decode the common parts of a packet. This provides minimal parsing and validation.
/// Returns a tuple of a `PublicPacket` and a slice with any remainder from the datagram.
+ #[allow(clippy::similar_names)] // For dcid and scid, which are fine.
pub fn decode(data: &'a [u8], dcid_decoder: &dyn ConnectionIdDecoder) -> Res<(Self, &'a [u8])> {
let mut decoder = Decoder::new(data);
let first = Self::opt(decoder.decode_byte())?;
@@ -868,10 +871,14 @@ mod tests {
use neqo_common::Encoder;
use test_fixture::{fixture_init, now};
- use super::*;
use crate::{
+ cid::MAX_CONNECTION_ID_LEN,
crypto::{CryptoDxState, CryptoStates},
- EmptyConnectionIdGenerator, RandomConnectionIdGenerator, Version,
+ packet::{
+ PacketBuilder, PacketType, PublicPacket, PACKET_BIT_FIXED_QUIC, PACKET_BIT_LONG,
+ PACKET_BIT_SPIN,
+ },
+ ConnectionId, EmptyConnectionIdGenerator, RandomConnectionIdGenerator, Version,
};
const CLIENT_CID: &[u8] = &[0x83, 0x94, 0xc8, 0xf0, 0x3e, 0x51, 0x57, 0x08];
@@ -1366,8 +1373,12 @@ mod tests {
#[test]
fn build_vn() {
fixture_init();
- let mut vn =
- PacketBuilder::version_negotiation(SERVER_CID, CLIENT_CID, 0x0a0a0a0a, &Version::all());
+ let mut vn = PacketBuilder::version_negotiation(
+ SERVER_CID,
+ CLIENT_CID,
+ 0x0a0a_0a0a,
+ &Version::all(),
+ );
// Erase randomness from greasing...
assert_eq!(vn.len(), SAMPLE_VN.len());
vn[0] &= 0x80;
@@ -1380,8 +1391,12 @@ mod tests {
#[test]
fn vn_do_not_repeat_client_grease() {
fixture_init();
- let vn =
- PacketBuilder::version_negotiation(SERVER_CID, CLIENT_CID, 0x0a0a0a0a, &Version::all());
+ let vn = PacketBuilder::version_negotiation(
+ SERVER_CID,
+ CLIENT_CID,
+ 0x0a0a_0a0a,
+ &Version::all(),
+ );
assert_ne!(&vn[SAMPLE_VN.len() - 4..], &[0x0a, 0x0a, 0x0a, 0x0a]);
}
diff --git a/third_party/rust/neqo-transport/src/packet/retry.rs b/third_party/rust/neqo-transport/src/packet/retry.rs
index 004e9de6e7..72036d3b49 100644
--- a/third_party/rust/neqo-transport/src/packet/retry.rs
+++ b/third_party/rust/neqo-transport/src/packet/retry.rs
@@ -4,8 +4,6 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-#![deny(clippy::pedantic)]
-
use std::cell::RefCell;
use neqo_common::qerror;
diff --git a/third_party/rust/neqo-transport/src/path.rs b/third_party/rust/neqo-transport/src/path.rs
index d6920c8d94..4e8d9958ab 100644
--- a/third_party/rust/neqo-transport/src/path.rs
+++ b/third_party/rust/neqo-transport/src/path.rs
@@ -4,12 +4,10 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-#![deny(clippy::pedantic)]
#![allow(clippy::module_name_repetitions)]
use std::{
cell::RefCell,
- convert::TryFrom,
fmt::{self, Display},
mem,
net::{IpAddr, SocketAddr},
@@ -72,7 +70,7 @@ pub struct Paths {
/// Connection IDs that need to be retired.
to_retire: Vec<u64>,
- /// QLog handler.
+ /// `QLog` handler.
qlog: NeqoQlog,
}
@@ -156,7 +154,7 @@ impl Paths {
/// Get a reference to the primary path. Use this prior to handshake completion.
pub fn primary_fallible(&self) -> Option<PathRef> {
- self.primary.as_ref().map(Rc::clone)
+ self.primary.clone()
}
/// Returns true if the path is not permanent.
@@ -341,7 +339,7 @@ impl Paths {
None
}
})
- .or_else(|| self.primary.as_ref().map(Rc::clone))
+ .or_else(|| self.primary.clone())
}
/// A `PATH_RESPONSE` was received.
@@ -527,7 +525,7 @@ pub struct Path {
/// For a path that is not validated, this is `None`. For a validated
/// path, the time that the path was last valid.
validated: Option<Instant>,
- /// A path challenge was received and PATH_RESPONSE has not been sent.
+ /// A path challenge was received and `PATH_RESPONSE` has not been sent.
challenge: Option<[u8; 8]>,
/// The round trip time estimate for this path.
@@ -796,7 +794,7 @@ impl Path {
// Send PATH_CHALLENGE.
if let ProbeState::ProbeNeeded { probe_count } = self.state {
qtrace!([self], "Initiating path challenge {}", probe_count);
- let data = <[u8; 8]>::try_from(&random(8)[..]).unwrap();
+ let data = random::<8>();
builder.encode_varint(FRAME_TYPE_PATH_CHALLENGE);
builder.encode(&data);
diff --git a/third_party/rust/neqo-transport/src/qlog.rs b/third_party/rust/neqo-transport/src/qlog.rs
index 434395fd23..2572966104 100644
--- a/third_party/rust/neqo-transport/src/qlog.rs
+++ b/third_party/rust/neqo-transport/src/qlog.rs
@@ -7,9 +7,7 @@
// Functions that handle capturing QLOG traces.
use std::{
- convert::TryFrom,
ops::{Deref, RangeInclusive},
- string::String,
time::Duration,
};
@@ -38,6 +36,7 @@ use crate::{
pub fn connection_tparams_set(qlog: &mut NeqoQlog, tph: &TransportParametersHandler) {
qlog.add_event_data(|| {
let remote = tph.remote();
+ #[allow(clippy::cast_possible_truncation)] // Nope.
let ev_data = EventData::TransportParametersSet(
qlog::events::quic::TransportParametersSet {
owner: None,
@@ -206,7 +205,7 @@ pub fn packet_sent(
let mut frames = SmallVec::new();
while d.remaining() > 0 {
if let Ok(f) = Frame::decode(&mut d) {
- frames.push(frame_to_qlogframe(&f))
+ frames.push(frame_to_qlogframe(&f));
} else {
qinfo!("qlog: invalid frame");
break;
@@ -300,7 +299,7 @@ pub fn packet_received(
while d.remaining() > 0 {
if let Ok(f) = Frame::decode(&mut d) {
- frames.push(frame_to_qlogframe(&f))
+ frames.push(frame_to_qlogframe(&f));
} else {
qinfo!("qlog: invalid frame");
break;
@@ -355,6 +354,7 @@ pub fn metrics_updated(qlog: &mut NeqoQlog, updated_metrics: &[QlogMetric]) {
let mut pacing_rate: Option<u64> = None;
for metric in updated_metrics {
+ #[allow(clippy::cast_precision_loss)] // Nought to do here.
match metric {
QlogMetric::MinRtt(v) => min_rtt = Some(v.as_secs_f32() * 1000.0),
QlogMetric::SmoothedRtt(v) => smoothed_rtt = Some(v.as_secs_f32() * 1000.0),
@@ -391,6 +391,8 @@ pub fn metrics_updated(qlog: &mut NeqoQlog, updated_metrics: &[QlogMetric]) {
// Helper functions
+#[allow(clippy::too_many_lines)] // Yeah, but it's a nice match.
+#[allow(clippy::cast_possible_truncation, clippy::cast_precision_loss)] // No choice here.
fn frame_to_qlogframe(frame: &Frame) -> QuicFrame {
match frame {
Frame::Padding => QuicFrame::Padding,
diff --git a/third_party/rust/neqo-transport/src/quic_datagrams.rs b/third_party/rust/neqo-transport/src/quic_datagrams.rs
index 07f3594768..d7c4769e31 100644
--- a/third_party/rust/neqo-transport/src/quic_datagrams.rs
+++ b/third_party/rust/neqo-transport/src/quic_datagrams.rs
@@ -6,7 +6,7 @@
// https://datatracker.ietf.org/doc/html/draft-ietf-quic-datagram
-use std::{cmp::min, collections::VecDeque, convert::TryFrom};
+use std::{cmp::min, collections::VecDeque};
use neqo_common::Encoder;
@@ -103,7 +103,7 @@ impl QuicDatagrams {
/// This function tries to write a datagram frame into a packet.
/// If the frame does not fit into the packet, the datagram will
- /// be dropped and a DatagramLost event will be posted.
+ /// be dropped and a `DatagramLost` event will be posted.
pub fn write_frames(
&mut self,
builder: &mut PacketBuilder,
diff --git a/third_party/rust/neqo-transport/src/recovery.rs b/third_party/rust/neqo-transport/src/recovery.rs
index d90989b486..dbea3aaf57 100644
--- a/third_party/rust/neqo-transport/src/recovery.rs
+++ b/third_party/rust/neqo-transport/src/recovery.rs
@@ -6,12 +6,9 @@
// Tracking of sent packets and detecting their loss.
-#![deny(clippy::pedantic)]
-
use std::{
cmp::{max, min},
collections::BTreeMap,
- convert::TryFrom,
mem,
ops::RangeInclusive,
time::{Duration, Instant},
@@ -1020,14 +1017,13 @@ impl ::std::fmt::Display for LossRecovery {
mod tests {
use std::{
cell::RefCell,
- convert::TryInto,
ops::{Deref, DerefMut, RangeInclusive},
rc::Rc,
time::{Duration, Instant},
};
use neqo_common::qlog::NeqoQlog;
- use test_fixture::{addr, now};
+ use test_fixture::{now, DEFAULT_ADDR};
use super::{
LossRecovery, LossRecoverySpace, PacketNumberSpace, SendProfile, SentPacket, FAST_PTO_SCALE,
@@ -1105,7 +1101,14 @@ mod tests {
impl Default for Fixture {
fn default() -> Self {
const CC: CongestionControlAlgorithm = CongestionControlAlgorithm::NewReno;
- let mut path = Path::temporary(addr(), addr(), CC, true, NeqoQlog::default(), now());
+ let mut path = Path::temporary(
+ DEFAULT_ADDR,
+ DEFAULT_ADDR,
+ CC,
+ true,
+ NeqoQlog::default(),
+ now(),
+ );
path.make_permanent(
None,
ConnectionIdEntry::new(0, ConnectionId::from(&[1, 2, 3]), [0; 16]),
diff --git a/third_party/rust/neqo-transport/src/recv_stream.rs b/third_party/rust/neqo-transport/src/recv_stream.rs
index 06ca59685d..5da80d6004 100644
--- a/third_party/rust/neqo-transport/src/recv_stream.rs
+++ b/third_party/rust/neqo-transport/src/recv_stream.rs
@@ -11,7 +11,6 @@ use std::{
cell::RefCell,
cmp::max,
collections::BTreeMap,
- convert::TryFrom,
mem,
rc::{Rc, Weak},
};
@@ -34,6 +33,7 @@ use crate::{
const RX_STREAM_DATA_WINDOW: u64 = 0x10_0000; // 1MiB
// Export as usize for consistency with SEND_BUFFER_SIZE
+#[allow(clippy::cast_possible_truncation)] // Yeah, nope.
pub const RECV_BUFFER_SIZE: usize = RX_STREAM_DATA_WINDOW as usize;
#[derive(Debug, Default)]
@@ -130,6 +130,7 @@ pub struct RxStreamOrderer {
}
impl RxStreamOrderer {
+ #[must_use]
pub fn new() -> Self {
Self::default()
}
@@ -137,6 +138,9 @@ impl RxStreamOrderer {
/// Process an incoming stream frame off the wire. This may result in data
/// being available to upper layers if frame is not out of order (ooo) or
/// if the frame fills a gap.
+ /// # Panics
+ /// Only when `u64` values cannot be converted to `usize`, which only
+ /// happens on 32-bit machines that hold far too much data at the same time.
pub fn inbound_frame(&mut self, mut new_start: u64, mut new_data: &[u8]) {
qtrace!("Inbound data offset={} len={}", new_start, new_data.len());
@@ -275,6 +279,7 @@ impl RxStreamOrderer {
}
/// Are any bytes readable?
+ #[must_use]
pub fn data_ready(&self) -> bool {
self.data_ranges
.keys()
@@ -301,20 +306,24 @@ impl RxStreamOrderer {
false
}
})
- .map(|(_, data_len)| data_len as usize)
- .sum()
+ // Accumulate, but saturate at usize::MAX.
+ .fold(0, |acc: usize, (_, data_len)| {
+ acc.saturating_add(usize::try_from(data_len).unwrap_or(usize::MAX))
+ })
}
/// Bytes read by the application.
+ #[must_use]
pub fn retired(&self) -> u64 {
self.retired
}
+ #[must_use]
pub fn received(&self) -> u64 {
self.received
}
- /// Data bytes buffered. Could be more than bytes_readable if there are
+ /// Data bytes buffered. Could be more than `bytes_readable` if there are
/// ranges missing.
fn buffered(&self) -> u64 {
self.data_ranges
@@ -588,6 +597,7 @@ impl RecvStream {
self.state = new_state;
}
+ #[must_use]
pub fn stats(&self) -> RecvStreamStats {
match &self.state {
RecvStreamState::Recv { recv_buf, .. }
@@ -622,6 +632,11 @@ impl RecvStream {
}
}
+ /// # Errors
+ /// When the incoming data violates flow control limits.
+ /// # Panics
+ /// Only when `u64` values are so big that they can't fit in a `usize`, which
+ /// only happens on a 32-bit machine that has far too much unread data.
pub fn inbound_stream_frame(&mut self, fin: bool, offset: u64, data: &[u8]) -> Res<()> {
// We should post a DataReadable event only once when we change from no-data-ready to
// data-ready. Therefore remember the state before processing a new frame.
@@ -691,6 +706,8 @@ impl RecvStream {
Ok(())
}
+ /// # Errors
+ /// When the reset occurs at an invalid point.
pub fn reset(&mut self, application_error_code: AppError, final_size: u64) -> Res<()> {
self.state.flow_control_consume_data(final_size, true)?;
match &mut self.state {
@@ -773,6 +790,7 @@ impl RecvStream {
}
}
+ #[must_use]
pub fn is_terminal(&self) -> bool {
matches!(
self.state,
@@ -792,8 +810,8 @@ impl RecvStream {
}
/// # Errors
- ///
/// `NoMoreData` if data and fin bit were previously read by the application.
+ #[allow(clippy::missing_panics_doc)] // with a >16 exabyte packet on a 128-bit machine, maybe
pub fn read(&mut self, buf: &mut [u8]) -> Res<(usize, bool)> {
let data_recvd_state = matches!(self.state, RecvStreamState::DataRecvd { .. });
match &mut self.state {
@@ -967,6 +985,7 @@ impl RecvStream {
}
#[cfg(test)]
+ #[must_use]
pub fn has_frames_to_write(&self) -> bool {
if let RecvStreamState::Recv { fc, .. } = &self.state {
fc.frame_needed()
@@ -976,6 +995,7 @@ impl RecvStream {
}
#[cfg(test)]
+ #[must_use]
pub fn fc(&self) -> Option<&ReceiverFlowControl<StreamId>> {
match &self.state {
RecvStreamState::Recv { fc, .. }
@@ -990,11 +1010,18 @@ impl RecvStream {
#[cfg(test)]
mod tests {
- use std::ops::Range;
+ use std::{cell::RefCell, ops::Range, rc::Rc};
- use neqo_common::Encoder;
+ use neqo_common::{qtrace, Encoder};
- use super::*;
+ use super::RecvStream;
+ use crate::{
+ fc::ReceiverFlowControl,
+ packet::PacketBuilder,
+ recv_stream::{RxStreamOrderer, RX_STREAM_DATA_WINDOW},
+ stats::FrameStats,
+ ConnectionEvents, Error, StreamId, RECV_BUFFER_SIZE,
+ };
const SESSION_WINDOW: usize = 1024;
@@ -1444,8 +1471,8 @@ mod tests {
let mut buf = vec![0u8; RECV_BUFFER_SIZE + 100]; // Make it overlarge
assert!(!s.has_frames_to_write());
- s.inbound_stream_frame(false, 0, &[0; RECV_BUFFER_SIZE])
- .unwrap();
+ let big_buf = vec![0; RECV_BUFFER_SIZE];
+ s.inbound_stream_frame(false, 0, &big_buf).unwrap();
assert!(!s.has_frames_to_write());
assert_eq!(s.read(&mut buf).unwrap(), (RECV_BUFFER_SIZE, false));
assert!(!s.data_ready());
@@ -1476,8 +1503,8 @@ mod tests {
fn stream_max_stream_data() {
let mut s = create_stream(1024 * RX_STREAM_DATA_WINDOW);
assert!(!s.has_frames_to_write());
- s.inbound_stream_frame(false, 0, &[0; RECV_BUFFER_SIZE])
- .unwrap();
+ let big_buf = vec![0; RECV_BUFFER_SIZE];
+ s.inbound_stream_frame(false, 0, &big_buf).unwrap();
s.inbound_stream_frame(false, RX_STREAM_DATA_WINDOW, &[1; 1])
.unwrap_err();
}
@@ -1520,9 +1547,10 @@ mod tests {
#[test]
fn no_stream_flowc_event_after_exiting_recv() {
let mut s = create_stream(1024 * RX_STREAM_DATA_WINDOW);
- s.inbound_stream_frame(false, 0, &[0; RECV_BUFFER_SIZE])
- .unwrap();
- let mut buf = [0; RECV_BUFFER_SIZE];
+ let mut buf = vec![0; RECV_BUFFER_SIZE];
+ // Write from buf at first.
+ s.inbound_stream_frame(false, 0, &buf).unwrap();
+ // Then read into it.
s.read(&mut buf).unwrap();
assert!(s.has_frames_to_write());
s.inbound_stream_frame(true, RX_STREAM_DATA_WINDOW, &[])
@@ -1634,7 +1662,7 @@ mod tests {
assert_eq!(fc.retired(), retired);
}
- /// Test consuming the flow control in RecvStreamState::Recv
+ /// Test consuming the flow control in `RecvStreamState::Recv`
#[test]
fn fc_state_recv_1() {
const SW: u64 = 1024;
@@ -1651,7 +1679,7 @@ mod tests {
check_fc(s.fc().unwrap(), SW / 4, 0);
}
- /// Test consuming the flow control in RecvStreamState::Recv
+ /// Test consuming the flow control in `RecvStreamState::Recv`
/// with multiple streams
#[test]
fn fc_state_recv_2() {
@@ -1678,7 +1706,7 @@ mod tests {
check_fc(s2.fc().unwrap(), SW / 4, 0);
}
- /// Test retiring the flow control in RecvStreamState::Recv
+ /// Test retiring the flow control in `RecvStreamState::Recv`
/// with multiple streams
#[test]
fn fc_state_recv_3() {
@@ -1730,7 +1758,7 @@ mod tests {
check_fc(s2.fc().unwrap(), SW / 4, SW / 4);
}
- /// Test consuming the flow control in RecvStreamState::Recv - duplicate data
+ /// Test consuming the flow control in `RecvStreamState::Recv` - duplicate data
#[test]
fn fc_state_recv_4() {
const SW: u64 = 1024;
@@ -1753,7 +1781,7 @@ mod tests {
check_fc(s.fc().unwrap(), SW / 4, 0);
}
- /// Test consuming the flow control in RecvStreamState::Recv - filling a gap in the
+ /// Test consuming the flow control in `RecvStreamState::Recv` - filling a gap in the
/// data stream.
#[test]
fn fc_state_recv_5() {
@@ -1774,7 +1802,7 @@ mod tests {
check_fc(s.fc().unwrap(), SW / 4, 0);
}
- /// Test consuming the flow control in RecvStreamState::Recv - receiving frame past
+ /// Test consuming the flow control in `RecvStreamState::Recv` - receiving frame past
/// the flow control will cause an error.
#[test]
fn fc_state_recv_6() {
@@ -1859,7 +1887,7 @@ mod tests {
assert_eq!(stats.max_stream_data, 1);
}
- /// Test flow control in RecvStreamState::SizeKnown
+ /// Test flow control in `RecvStreamState::SizeKnown`
#[test]
fn fc_state_size_known() {
const SW: u64 = 1024;
@@ -1916,7 +1944,7 @@ mod tests {
assert!(s.fc().is_none());
}
- /// Test flow control in RecvStreamState::DataRecvd
+ /// Test flow control in `RecvStreamState::DataRecvd`
#[test]
fn fc_state_data_recv() {
const SW: u64 = 1024;
@@ -1961,7 +1989,7 @@ mod tests {
assert!(s.fc().is_none());
}
- /// Test flow control in RecvStreamState::DataRead
+ /// Test flow control in `RecvStreamState::DataRead`
#[test]
fn fc_state_data_read() {
const SW: u64 = 1024;
@@ -1999,7 +2027,7 @@ mod tests {
assert!(s.fc().is_none());
}
- /// Test flow control in RecvStreamState::AbortReading and final size is known
+ /// Test flow control in `RecvStreamState::AbortReading` and final size is known
#[test]
fn fc_state_abort_reading_1() {
const SW: u64 = 1024;
@@ -2041,7 +2069,7 @@ mod tests {
check_fc(s.fc().unwrap(), SW / 2, SW / 2);
}
- /// Test flow control in RecvStreamState::AbortReading and final size is unknown
+ /// Test flow control in `RecvStreamState::AbortReading` and final size is unknown
#[test]
fn fc_state_abort_reading_2() {
const SW: u64 = 1024;
@@ -2099,7 +2127,7 @@ mod tests {
check_fc(s.fc().unwrap(), SW / 2 + 20, SW / 2 + 20);
}
- /// Test flow control in RecvStreamState::WaitForReset
+ /// Test flow control in `RecvStreamState::WaitForReset`
#[test]
fn fc_state_wait_for_reset() {
const SW: u64 = 1024;
diff --git a/third_party/rust/neqo-transport/src/rtt.rs b/third_party/rust/neqo-transport/src/rtt.rs
index 4b05198bc9..3b2969f689 100644
--- a/third_party/rust/neqo-transport/src/rtt.rs
+++ b/third_party/rust/neqo-transport/src/rtt.rs
@@ -6,8 +6,6 @@
// Tracking of sent packets and detecting their loss.
-#![deny(clippy::pedantic)]
-
use std::{
cmp::{max, min},
time::{Duration, Instant},
diff --git a/third_party/rust/neqo-transport/src/send_stream.rs b/third_party/rust/neqo-transport/src/send_stream.rs
index 5feb785ac6..8771ec7765 100644
--- a/third_party/rust/neqo-transport/src/send_stream.rs
+++ b/third_party/rust/neqo-transport/src/send_stream.rs
@@ -9,8 +9,7 @@
use std::{
cell::RefCell,
cmp::{max, min, Ordering},
- collections::{BTreeMap, VecDeque},
- convert::TryFrom,
+ collections::{btree_map::Entry, BTreeMap, VecDeque},
hash::{Hash, Hasher},
mem,
ops::Add,
@@ -18,7 +17,7 @@ use std::{
};
use indexmap::IndexMap;
-use neqo_common::{qdebug, qerror, qinfo, qtrace, Encoder, Role};
+use neqo_common::{qdebug, qerror, qtrace, Encoder, Role};
use smallvec::SmallVec;
use crate::{
@@ -111,7 +110,7 @@ impl Add<RetransmissionPriority> for TransmissionPriority {
/// If data is lost, this determines the priority that applies to retransmissions
/// of that data.
-#[derive(Debug, Clone, Copy, PartialEq, Eq)]
+#[derive(Debug, Default, Clone, Copy, PartialEq, Eq)]
pub enum RetransmissionPriority {
/// Prioritize retransmission at a fixed priority.
/// With this, it is possible to prioritize retransmissions lower than transmissions.
@@ -123,19 +122,14 @@ pub enum RetransmissionPriority {
Same,
/// Increase the priority of retransmissions (the default).
/// Retransmissions of `Critical` or `Important` aren't elevated at all.
+ #[default]
Higher,
/// Increase the priority of retransmissions a lot.
/// This is useful for streams that are particularly exposed to head-of-line blocking.
MuchHigher,
}
-impl Default for RetransmissionPriority {
- fn default() -> Self {
- Self::Higher
- }
-}
-
-#[derive(Debug, PartialEq, Clone, Copy)]
+#[derive(Debug, PartialEq, Eq, Clone, Copy)]
enum RangeState {
Sent,
Acked,
@@ -144,169 +138,268 @@ enum RangeState {
/// Track ranges in the stream as sent or acked. Acked implies sent. Not in a
/// range implies needing-to-be-sent, either initially or as a retransmission.
#[derive(Debug, Default, PartialEq)]
-struct RangeTracker {
- // offset, (len, RangeState). Use u64 for len because ranges can exceed 32bits.
+pub struct RangeTracker {
+ /// The number of bytes that have been acknowledged starting from offset 0.
+ acked: u64,
+ /// A map that tracks the state of ranges.
+ /// Keys are the offset of the start of the range.
+ /// Values is a tuple of the range length and its state.
used: BTreeMap<u64, (u64, RangeState)>,
+ /// This is a cache for the output of `first_unmarked_range`, which we check a lot.
+ first_unmarked: Option<(u64, Option<u64>)>,
}
impl RangeTracker {
fn highest_offset(&self) -> u64 {
self.used
- .range(..)
- .next_back()
- .map_or(0, |(k, (v, _))| *k + *v)
+ .last_key_value()
+ .map_or(self.acked, |(&k, &(v, _))| k + v)
}
fn acked_from_zero(&self) -> u64 {
- self.used
- .get(&0)
- .filter(|(_, state)| *state == RangeState::Acked)
- .map_or(0, |(v, _)| *v)
+ self.acked
}
/// Find the first unmarked range. If all are contiguous, this will return
- /// (highest_offset(), None).
- fn first_unmarked_range(&self) -> (u64, Option<u64>) {
- let mut prev_end = 0;
+ /// (`highest_offset()`, None).
+ fn first_unmarked_range(&mut self) -> (u64, Option<u64>) {
+ if let Some(first_unmarked) = self.first_unmarked {
+ return first_unmarked;
+ }
+
+ let mut prev_end = self.acked;
- for (cur_off, (cur_len, _)) in &self.used {
- if prev_end == *cur_off {
+ for (&cur_off, &(cur_len, _)) in &self.used {
+ if prev_end == cur_off {
prev_end = cur_off + cur_len;
} else {
- return (prev_end, Some(cur_off - prev_end));
+ let res = (prev_end, Some(cur_off - prev_end));
+ self.first_unmarked = Some(res);
+ return res;
}
}
+ self.first_unmarked = Some((prev_end, None));
(prev_end, None)
}
- /// Turn one range into a list of subranges that align with existing
- /// ranges.
- /// Check impermissible overlaps in subregions: Sent cannot overwrite Acked.
- //
- // e.g. given N is new and ABC are existing:
- // NNNNNNNNNNNNNNNN
- // AAAAA BBBCCCCC ...then we want 5 chunks:
- // 1122222333444555
- //
- // but also if we have this:
- // NNNNNNNNNNNNNNNN
- // AAAAAAAAAA BBBB ...then break existing A and B ranges up:
- //
- // 1111111122222233
- // aaAAAAAAAA BBbb
- //
- // Doing all this work up front should make handling each chunk much
- // easier.
- fn chunk_range_on_edges(
- &mut self,
- new_off: u64,
- new_len: u64,
- new_state: RangeState,
- ) -> Vec<(u64, u64, RangeState)> {
- let mut tmp_off = new_off;
- let mut tmp_len = new_len;
- let mut v = Vec::new();
-
- // cut previous overlapping range if needed
- let prev = self.used.range_mut(..tmp_off).next_back();
- if let Some((prev_off, (prev_len, prev_state))) = prev {
- let prev_state = *prev_state;
- let overlap = (*prev_off + *prev_len).saturating_sub(new_off);
- *prev_len -= overlap;
- if overlap > 0 {
- self.used.insert(new_off, (overlap, prev_state));
+ /// When the range of acknowledged bytes from zero increases, we need to drop any
+ /// ranges within that span AND maybe extend it to include any adjacent acknowledged ranges.
+ fn coalesce_acked(&mut self) {
+ while let Some(e) = self.used.first_entry() {
+ match self.acked.cmp(e.key()) {
+ Ordering::Greater => {
+ let (off, (len, state)) = e.remove_entry();
+ let overflow = (off + len).saturating_sub(self.acked);
+ if overflow > 0 {
+ if state == RangeState::Acked {
+ self.acked += overflow;
+ } else {
+ self.used.insert(self.acked, (overflow, state));
+ }
+ break;
+ }
+ }
+ Ordering::Equal => {
+ if e.get().1 == RangeState::Acked {
+ let (len, _) = e.remove();
+ self.acked += len;
+ }
+ break;
+ }
+ Ordering::Less => break,
}
}
+ }
- let mut last_existing_remaining = None;
- for (off, (len, state)) in self.used.range(tmp_off..tmp_off + tmp_len) {
- // Create chunk for "overhang" before an existing range
- if tmp_off < *off {
- let sub_len = off - tmp_off;
- v.push((tmp_off, sub_len, new_state));
- tmp_off += sub_len;
- tmp_len -= sub_len;
- }
+ /// Mark a range as acknowledged. This is simpler than marking a range as sent
+ /// because an acknowledged range can never turn back into a sent range, so
+ /// this function can just override the entire range.
+ ///
+ /// The only tricky parts are making sure that we maintain `self.acked`,
+ /// which is the first acknowledged range. And making sure that we don't create
+ /// ranges of the same type that are adjacent; these need to be merged.
+ #[allow(clippy::missing_panics_doc)] // with a >16 exabyte packet on a 128-bit machine, maybe
+ pub fn mark_acked(&mut self, new_off: u64, new_len: usize) {
+ let end = new_off + u64::try_from(new_len).unwrap();
+ let new_off = max(self.acked, new_off);
+ let mut new_len = end.saturating_sub(new_off);
+ if new_len == 0 {
+ return;
+ }
- // Create chunk to match existing range
- let sub_len = min(*len, tmp_len);
- let remaining_len = len - sub_len;
- if new_state == RangeState::Sent && *state == RangeState::Acked {
- qinfo!(
- "Attempted to downgrade overlapping range Acked range {}-{} with Sent {}-{}",
- off,
- len,
- new_off,
- new_len
- );
- } else {
- v.push((tmp_off, sub_len, new_state));
- }
- tmp_off += sub_len;
- tmp_len -= sub_len;
+ self.first_unmarked = None;
+ if new_off == self.acked {
+ self.acked += new_len;
+ self.coalesce_acked();
+ return;
+ }
+ let mut new_end = new_off + new_len;
- if remaining_len > 0 {
- last_existing_remaining = Some((*off, sub_len, remaining_len, *state));
+ // Get all existing ranges that start within this new range.
+ let mut covered = self
+ .used
+ .range(new_off..new_end)
+ .map(|(&k, _)| k)
+ .collect::<SmallVec<[_; 8]>>();
+
+ if let Entry::Occupied(next_entry) = self.used.entry(new_end) {
+ // Check if the very next entry is the same type as this.
+ if next_entry.get().1 == RangeState::Acked {
+ // If is is acked, drop it and extend this new range.
+ let (extra_len, _) = next_entry.remove();
+ new_len += extra_len;
+ new_end += extra_len;
+ }
+ } else if let Some(last) = covered.pop() {
+ // Otherwise, the last of the existing ranges might overhang this one by some.
+ let (old_off, (old_len, old_state)) = self.used.remove_entry(&last).unwrap(); // can't fail
+ let remainder = (old_off + old_len).saturating_sub(new_end);
+ if remainder > 0 {
+ if old_state == RangeState::Acked {
+ // Just extend the current range.
+ new_len += remainder;
+ new_end += remainder;
+ } else {
+ self.used.insert(new_end, (remainder, RangeState::Sent));
+ }
}
}
-
- // Maybe break last existing range in two so that a final chunk will
- // have the same length as an existing range entry
- if let Some((off, sub_len, remaining_len, state)) = last_existing_remaining {
- *self.used.get_mut(&off).expect("must be there") = (sub_len, state);
- self.used.insert(off + sub_len, (remaining_len, state));
+ // All covered ranges can just be trashed.
+ for k in covered {
+ self.used.remove(&k);
}
- // Create final chunk if anything remains of the new range
- if tmp_len > 0 {
- v.push((tmp_off, tmp_len, new_state));
+ // Now either merge with a preceding acked range
+ // or cut a preceding sent range as needed.
+ let prev = self.used.range_mut(..new_off).next_back();
+ if let Some((prev_off, (prev_len, prev_state))) = prev {
+ let prev_end = *prev_off + *prev_len;
+ if prev_end >= new_off {
+ if *prev_state == RangeState::Sent {
+ *prev_len = new_off - *prev_off;
+ if prev_end > new_end {
+ // There is some extra sent range after the new acked range.
+ self.used
+ .insert(new_end, (prev_end - new_end, RangeState::Sent));
+ }
+ } else {
+ *prev_len = max(prev_end, new_end) - *prev_off;
+ return;
+ }
+ }
+ }
+ self.used.insert(new_off, (new_len, RangeState::Acked));
+ }
+
+ /// Turn a single sent range into a list of subranges that align with existing
+ /// acknowledged ranges.
+ ///
+ /// This is more complicated than adding acked ranges because any acked ranges
+ /// need to be kept in place, with sent ranges filling the gaps.
+ ///
+ /// This means:
+ /// ```ignore
+ /// AAA S AAAS AAAAA
+ /// + SSSSSSSSSSSSS
+ /// = AAASSSAAASSAAAAA
+ /// ```
+ ///
+ /// But we also have to ensure that:
+ /// ```ignore
+ /// SSSS
+ /// + SS
+ /// = SSSSSS
+ /// ```
+ /// and
+ /// ```ignore
+ /// SSSSS
+ /// + SS
+ /// = SSSSSS
+ /// ```
+ #[allow(clippy::missing_panics_doc)] // not possible
+ pub fn mark_sent(&mut self, mut new_off: u64, new_len: usize) {
+ let new_end = new_off + u64::try_from(new_len).unwrap();
+ new_off = max(self.acked, new_off);
+ let mut new_len = new_end.saturating_sub(new_off);
+ if new_len == 0 {
+ return;
}
- v
- }
+ self.first_unmarked = None;
- /// Merge contiguous Acked ranges into the first entry (0). This range may
- /// be dropped from the send buffer.
- fn coalesce_acked_from_zero(&mut self) {
- let acked_range_from_zero = self
+ // Get all existing ranges that start within this new range.
+ let covered = self
.used
- .get_mut(&0)
- .filter(|(_, state)| *state == RangeState::Acked)
- .map(|(len, _)| *len);
-
- if let Some(len_from_zero) = acked_range_from_zero {
- let mut new_len_from_zero = len_from_zero;
-
- // See if there's another Acked range entry contiguous to this one
- while let Some((next_len, _)) = self
- .used
- .get(&new_len_from_zero)
- .filter(|(_, state)| *state == RangeState::Acked)
- {
- let to_remove = new_len_from_zero;
- new_len_from_zero += *next_len;
- self.used.remove(&to_remove);
- }
-
- if len_from_zero != new_len_from_zero {
- self.used.get_mut(&0).expect("must be there").0 = new_len_from_zero;
+ .range(new_off..(new_off + new_len))
+ .map(|(&k, _)| k)
+ .collect::<SmallVec<[u64; 8]>>();
+
+ if let Entry::Occupied(next_entry) = self.used.entry(new_end) {
+ if next_entry.get().1 == RangeState::Sent {
+ // Check if the very next entry is the same type as this, so it can be merged.
+ let (extra_len, _) = next_entry.remove();
+ new_len += extra_len;
}
}
- }
- fn mark_range(&mut self, off: u64, len: usize, state: RangeState) {
- if len == 0 {
- qinfo!("mark 0-length range at {}", off);
- return;
- }
+ // Merge with any preceding sent range that might overlap,
+ // or cut the head of this if the preceding range is acked.
+ let prev = self.used.range(..new_off).next_back();
+ if let Some((&prev_off, &(prev_len, prev_state))) = prev {
+ if prev_off + prev_len >= new_off {
+ let overlap = prev_off + prev_len - new_off;
+ new_len = new_len.saturating_sub(overlap);
+ if new_len == 0 {
+ // The previous range completely covers this one (no more to do).
+ return;
+ }
- let subranges = self.chunk_range_on_edges(off, len as u64, state);
+ if prev_state == RangeState::Acked {
+ // The previous range is acked, so it cuts this one.
+ new_off += overlap;
+ } else {
+ // Extend the current range backwards.
+ new_off = prev_off;
+ new_len += prev_len;
+ // The previous range will be updated below.
+ // It might need to be cut because of a covered acked range.
+ }
+ }
+ }
- for (sub_off, sub_len, sub_state) in subranges {
- self.used.insert(sub_off, (sub_len, sub_state));
+ // Now interleave new sent chunks with any existing acked chunks.
+ for old_off in covered {
+ let Entry::Occupied(e) = self.used.entry(old_off) else {
+ unreachable!();
+ };
+ let &(old_len, old_state) = e.get();
+ if old_state == RangeState::Acked {
+ // Now we have to insert a chunk ahead of this acked chunk.
+ let chunk_len = old_off - new_off;
+ if chunk_len > 0 {
+ self.used.insert(new_off, (chunk_len, RangeState::Sent));
+ }
+ let included = chunk_len + old_len;
+ new_len = new_len.saturating_sub(included);
+ if new_len == 0 {
+ return;
+ }
+ new_off += included;
+ } else {
+ let overhang = (old_off + old_len).saturating_sub(new_off + new_len);
+ new_len += overhang;
+ if *e.key() != new_off {
+ // Retain a sent entry at `new_off`.
+ // This avoids the work of removing and re-creating an entry.
+ // The value will be overwritten when the next insert occurs,
+ // either when this loop hits an acked range (above)
+ // or for any remainder (below).
+ e.remove();
+ }
+ }
}
- self.coalesce_acked_from_zero();
+ self.used.insert(new_off, (new_len, RangeState::Sent));
}
fn unmark_range(&mut self, off: u64, len: usize) {
@@ -315,6 +408,7 @@ impl RangeTracker {
return;
}
+ self.first_unmarked = None;
let len = u64::try_from(len).unwrap();
let end_off = off + len;
@@ -376,6 +470,9 @@ impl RangeTracker {
}
/// Unmark all sent ranges.
+ /// # Panics
+ /// On 32-bit machines where far too much is sent before calling this.
+ /// Note that this should not be called for handshakes, which should never exceed that limit.
pub fn unmark_sent(&mut self) {
self.unmark_range(0, usize::try_from(self.highest_offset()).unwrap());
}
@@ -384,36 +481,37 @@ impl RangeTracker {
/// Buffer to contain queued bytes and track their state.
#[derive(Debug, Default, PartialEq)]
pub struct TxBuffer {
- retired: u64, // contig acked bytes, no longer in buffer
send_buf: VecDeque<u8>, // buffer of not-acked bytes
ranges: RangeTracker, // ranges in buffer that have been sent or acked
}
impl TxBuffer {
+ #[must_use]
pub fn new() -> Self {
Self::default()
}
- /// Attempt to add some or all of the passed-in buffer to the TxBuffer.
+ /// Attempt to add some or all of the passed-in buffer to the `TxBuffer`.
pub fn send(&mut self, buf: &[u8]) -> usize {
let can_buffer = min(SEND_BUFFER_SIZE - self.buffered(), buf.len());
if can_buffer > 0 {
self.send_buf.extend(&buf[..can_buffer]);
- assert!(self.send_buf.len() <= SEND_BUFFER_SIZE);
+ debug_assert!(self.send_buf.len() <= SEND_BUFFER_SIZE);
}
can_buffer
}
- pub fn next_bytes(&self) -> Option<(u64, &[u8])> {
+ #[allow(clippy::missing_panics_doc)] // These are not possible.
+ pub fn next_bytes(&mut self) -> Option<(u64, &[u8])> {
let (start, maybe_len) = self.ranges.first_unmarked_range();
- if start == self.retired + u64::try_from(self.buffered()).unwrap() {
+ if start == self.retired() + u64::try_from(self.buffered()).unwrap() {
return None;
}
// Convert from ranges-relative-to-zero to
// ranges-relative-to-buffer-start
- let buff_off = usize::try_from(start - self.retired).unwrap();
+ let buff_off = usize::try_from(start - self.retired()).unwrap();
// Deque returns two slices. Create a subslice from whichever
// one contains the first unmarked data.
@@ -437,23 +535,22 @@ impl TxBuffer {
}
pub fn mark_as_sent(&mut self, offset: u64, len: usize) {
- self.ranges.mark_range(offset, len, RangeState::Sent);
+ self.ranges.mark_sent(offset, len);
}
+ #[allow(clippy::missing_panics_doc)] // Not possible here.
pub fn mark_as_acked(&mut self, offset: u64, len: usize) {
- self.ranges.mark_range(offset, len, RangeState::Acked);
+ let prev_retired = self.retired();
+ self.ranges.mark_acked(offset, len);
- // We can drop contig acked range from the buffer
- let new_retirable = self.ranges.acked_from_zero() - self.retired;
+ // Any newly-retired bytes can be dropped from the buffer.
+ let new_retirable = self.retired() - prev_retired;
debug_assert!(new_retirable <= self.buffered() as u64);
- let keep_len =
- self.buffered() - usize::try_from(new_retirable).expect("should fit in usize");
+ let keep = self.buffered() - usize::try_from(new_retirable).unwrap();
// Truncate front
- self.send_buf.rotate_left(self.buffered() - keep_len);
- self.send_buf.truncate(keep_len);
-
- self.retired += new_retirable;
+ self.send_buf.rotate_left(self.buffered() - keep);
+ self.send_buf.truncate(keep);
}
pub fn mark_as_lost(&mut self, offset: u64, len: usize) {
@@ -465,8 +562,9 @@ impl TxBuffer {
self.ranges.unmark_sent();
}
+ #[must_use]
pub fn retired(&self) -> u64 {
- self.retired
+ self.ranges.acked_from_zero()
}
fn buffered(&self) -> usize {
@@ -478,7 +576,7 @@ impl TxBuffer {
}
fn used(&self) -> u64 {
- self.retired + u64::try_from(self.buffered()).unwrap()
+ self.retired() + u64::try_from(self.buffered()).unwrap()
}
}
@@ -693,6 +791,7 @@ impl SendStream {
self.fair = make_fair;
}
+ #[must_use]
pub fn is_fair(&self) -> bool {
self.fair
}
@@ -706,6 +805,7 @@ impl SendStream {
self.retransmission_priority = retransmission;
}
+ #[must_use]
pub fn sendorder(&self) -> Option<SendOrder> {
self.sendorder
}
@@ -715,6 +815,7 @@ impl SendStream {
}
/// If all data has been buffered or written, how much was sent.
+ #[must_use]
pub fn final_size(&self) -> Option<u64> {
match &self.state {
SendStreamState::DataSent { send_buf, .. } => Some(send_buf.used()),
@@ -723,10 +824,13 @@ impl SendStream {
}
}
+ #[must_use]
pub fn stats(&self) -> SendStreamStats {
SendStreamStats::new(self.bytes_written(), self.bytes_sent, self.bytes_acked())
}
+ #[must_use]
+ #[allow(clippy::missing_panics_doc)] // not possible
pub fn bytes_written(&self) -> u64 {
match &self.state {
SendStreamState::Send { send_buf, .. } | SendStreamState::DataSent { send_buf, .. } => {
@@ -749,6 +853,7 @@ impl SendStream {
}
}
+ #[must_use]
pub fn bytes_acked(&self) -> u64 {
match &self.state {
SendStreamState::Send { send_buf, .. } | SendStreamState::DataSent { send_buf, .. } => {
@@ -766,11 +871,13 @@ impl SendStream {
/// offset.
fn next_bytes(&mut self, retransmission_only: bool) -> Option<(u64, &[u8])> {
match self.state {
- SendStreamState::Send { ref send_buf, .. } => {
- send_buf.next_bytes().and_then(|(offset, slice)| {
+ SendStreamState::Send {
+ ref mut send_buf, ..
+ } => {
+ let result = send_buf.next_bytes();
+ if let Some((offset, slice)) = result {
if retransmission_only {
qtrace!(
- [self],
"next_bytes apply retransmission limit at {}",
self.retransmission_offset
);
@@ -786,13 +893,16 @@ impl SendStream {
} else {
Some((offset, slice))
}
- })
+ } else {
+ None
+ }
}
SendStreamState::DataSent {
- ref send_buf,
+ ref mut send_buf,
fin_sent,
..
} => {
+ let used = send_buf.used(); // immutable first
let bytes = send_buf.next_bytes();
if bytes.is_some() {
bytes
@@ -800,7 +910,7 @@ impl SendStream {
None
} else {
// Send empty stream frame with fin set
- Some((send_buf.used(), &[]))
+ Some((used, &[]))
}
}
SendStreamState::Ready { .. }
@@ -833,6 +943,7 @@ impl SendStream {
}
/// Maybe write a `STREAM` frame.
+ #[allow(clippy::missing_panics_doc)] // not possible
pub fn write_stream_frame(
&mut self,
priority: TransmissionPriority,
@@ -995,6 +1106,7 @@ impl SendStream {
}
}
+ #[allow(clippy::missing_panics_doc)] // not possible
pub fn mark_as_sent(&mut self, offset: u64, len: usize, fin: bool) {
self.bytes_sent = max(self.bytes_sent, offset + u64::try_from(len).unwrap());
@@ -1010,6 +1122,7 @@ impl SendStream {
}
}
+ #[allow(clippy::missing_panics_doc)] // not possible
pub fn mark_as_acked(&mut self, offset: u64, len: usize, fin: bool) {
match self.state {
SendStreamState::Send {
@@ -1047,6 +1160,7 @@ impl SendStream {
}
}
+ #[allow(clippy::missing_panics_doc)] // not possible
pub fn mark_as_lost(&mut self, offset: u64, len: usize, fin: bool) {
self.retransmission_offset = max(
self.retransmission_offset,
@@ -1075,6 +1189,7 @@ impl SendStream {
/// Bytes sendable on stream. Constrained by stream credit available,
/// connection credit available, and space in the tx buffer.
+ #[must_use]
pub fn avail(&self) -> usize {
if let SendStreamState::Ready { fc, conn_fc } | SendStreamState::Send { fc, conn_fc, .. } =
&self.state
@@ -1100,6 +1215,7 @@ impl SendStream {
}
}
+ #[must_use]
pub fn is_terminal(&self) -> bool {
matches!(
self.state,
@@ -1107,10 +1223,14 @@ impl SendStream {
)
}
+ /// # Errors
+ /// When `buf` is empty or when the stream is already closed.
pub fn send(&mut self, buf: &[u8]) -> Res<usize> {
self.send_internal(buf, false)
}
+ /// # Errors
+ /// When `buf` is empty or when the stream is already closed.
pub fn send_atomic(&mut self, buf: &[u8]) -> Res<usize> {
self.send_internal(buf, true)
}
@@ -1155,9 +1275,9 @@ impl SendStream {
if atomic {
self.send_blocked_if_space_needed(buf.len());
return Ok(0);
- } else {
- &buf[..self.avail()]
}
+
+ &buf[..self.avail()]
} else {
buf
};
@@ -1202,6 +1322,7 @@ impl SendStream {
}
}
+ #[allow(clippy::missing_panics_doc)] // not possible
pub fn reset(&mut self, err: AppError) {
match &self.state {
SendStreamState::Ready { fc, .. } => {
@@ -1296,6 +1417,7 @@ impl OrderGroup {
}
}
+ #[must_use]
pub fn stream_ids(&self) -> &Vec<StreamId> {
&self.vec
}
@@ -1319,26 +1441,24 @@ impl OrderGroup {
next
}
+ /// # Panics
+ /// If the stream ID is already present.
pub fn insert(&mut self, stream_id: StreamId) {
- match self.vec.binary_search(&stream_id) {
- Ok(_) => {
- // element already in vector @ `pos`
- panic!("Duplicate stream_id {}", stream_id)
- }
- Err(pos) => self.vec.insert(pos, stream_id),
- }
+ let Err(pos) = self.vec.binary_search(&stream_id) else {
+ // element already in vector @ `pos`
+ panic!("Duplicate stream_id {stream_id}");
+ };
+ self.vec.insert(pos, stream_id);
}
+ /// # Panics
+ /// If the stream ID is not present.
pub fn remove(&mut self, stream_id: StreamId) {
- match self.vec.binary_search(&stream_id) {
- Ok(pos) => {
- self.vec.remove(pos);
- }
- Err(_) => {
- // element already in vector @ `pos`
- panic!("Missing stream_id {}", stream_id)
- }
- }
+ let Ok(pos) = self.vec.binary_search(&stream_id) else {
+ // element already in vector @ `pos`
+ panic!("Missing stream_id {stream_id}");
+ };
+ self.vec.remove(pos);
}
}
@@ -1579,16 +1699,16 @@ impl SendStreams {
// Iterate the map, but only those without fairness, then iterate
// OrderGroups, then iterate each group
- qdebug!("processing streams... unfair:");
+ qtrace!("processing streams... unfair:");
for stream in self.map.values_mut() {
if !stream.is_fair() {
- qdebug!(" {}", stream);
+ qtrace!(" {}", stream);
if !stream.write_frames_with_early_return(priority, builder, tokens, stats) {
break;
}
}
}
- qdebug!("fair streams:");
+ qtrace!("fair streams:");
let stream_ids = self.regular.iter().chain(
self.sendordered
.values_mut()
@@ -1598,9 +1718,9 @@ impl SendStreams {
for stream_id in stream_ids {
let stream = self.map.get_mut(&stream_id).unwrap();
if let Some(order) = stream.sendorder() {
- qdebug!(" {} ({})", stream_id, order)
+ qtrace!(" {} ({})", stream_id, order);
} else {
- qdebug!(" None")
+ qtrace!(" None");
}
if !stream.write_frames_with_early_return(priority, builder, tokens, stats) {
break;
@@ -1609,7 +1729,7 @@ impl SendStreams {
}
pub fn update_initial_limit(&mut self, remote: &TransportParameters) {
- for (id, ss) in self.map.iter_mut() {
+ for (id, ss) in &mut self.map {
let limit = if id.is_bidi() {
assert!(!id.is_remote_initiated(Role::Client));
remote.get_integer(tparams::INITIAL_MAX_STREAM_DATA_BIDI_REMOTE)
@@ -1640,55 +1760,391 @@ pub struct SendStreamRecoveryToken {
#[cfg(test)]
mod tests {
- use neqo_common::{event::Provider, hex_with_len, qtrace};
-
- use super::*;
- use crate::events::ConnectionEvent;
+ use std::{cell::RefCell, collections::VecDeque, rc::Rc};
+
+ use neqo_common::{event::Provider, hex_with_len, qtrace, Encoder};
+
+ use super::SendStreamRecoveryToken;
+ use crate::{
+ connection::{RetransmissionPriority, TransmissionPriority},
+ events::ConnectionEvent,
+ fc::SenderFlowControl,
+ packet::PacketBuilder,
+ recovery::{RecoveryToken, StreamRecoveryToken},
+ send_stream::{
+ RangeState, RangeTracker, SendStream, SendStreamState, SendStreams, TxBuffer,
+ },
+ stats::FrameStats,
+ ConnectionEvents, StreamId, SEND_BUFFER_SIZE,
+ };
fn connection_fc(limit: u64) -> Rc<RefCell<SenderFlowControl<()>>> {
Rc::new(RefCell::new(SenderFlowControl::new((), limit)))
}
#[test]
- fn test_mark_range() {
+ fn mark_acked_from_zero() {
let mut rt = RangeTracker::default();
// ranges can go from nothing->Sent if queued for retrans and then
// acks arrive
- rt.mark_range(5, 5, RangeState::Acked);
+ rt.mark_acked(5, 5);
assert_eq!(rt.highest_offset(), 10);
assert_eq!(rt.acked_from_zero(), 0);
- rt.mark_range(10, 4, RangeState::Acked);
+ rt.mark_acked(10, 4);
assert_eq!(rt.highest_offset(), 14);
assert_eq!(rt.acked_from_zero(), 0);
- rt.mark_range(0, 5, RangeState::Sent);
+ rt.mark_sent(0, 5);
assert_eq!(rt.highest_offset(), 14);
assert_eq!(rt.acked_from_zero(), 0);
- rt.mark_range(0, 5, RangeState::Acked);
+ rt.mark_acked(0, 5);
assert_eq!(rt.highest_offset(), 14);
assert_eq!(rt.acked_from_zero(), 14);
- rt.mark_range(12, 20, RangeState::Acked);
+ rt.mark_acked(12, 20);
assert_eq!(rt.highest_offset(), 32);
assert_eq!(rt.acked_from_zero(), 32);
// ack the lot
- rt.mark_range(0, 400, RangeState::Acked);
+ rt.mark_acked(0, 400);
assert_eq!(rt.highest_offset(), 400);
assert_eq!(rt.acked_from_zero(), 400);
// acked trumps sent
- rt.mark_range(0, 200, RangeState::Sent);
+ rt.mark_sent(0, 200);
assert_eq!(rt.highest_offset(), 400);
assert_eq!(rt.acked_from_zero(), 400);
}
+ /// Check that `marked_acked` correctly handles all paths.
+ /// ```ignore
+ /// SSS SSSAAASSS
+ /// + AAAAAAAAA
+ /// = SSSAAAAAAAAASS
+ /// ```
+ #[test]
+ fn mark_acked_1() {
+ let mut rt = RangeTracker::default();
+ rt.mark_sent(0, 3);
+ rt.mark_sent(6, 3);
+ rt.mark_acked(9, 3);
+ rt.mark_sent(12, 3);
+
+ rt.mark_acked(3, 10);
+
+ let mut canon = RangeTracker::default();
+ canon.used.insert(0, (3, RangeState::Sent));
+ canon.used.insert(3, (10, RangeState::Acked));
+ canon.used.insert(13, (2, RangeState::Sent));
+ assert_eq!(rt, canon);
+ }
+
+ /// Check that `marked_acked` correctly handles all paths.
+ /// ```ignore
+ /// SSS SSS AAA
+ /// + AAAAAAAAA
+ /// = SSAAAAAAAAAAAA
+ /// ```
+ #[test]
+ fn mark_acked_2() {
+ let mut rt = RangeTracker::default();
+ rt.mark_sent(0, 3);
+ rt.mark_sent(6, 3);
+ rt.mark_acked(12, 3);
+
+ rt.mark_acked(2, 10);
+
+ let mut canon = RangeTracker::default();
+ canon.used.insert(0, (2, RangeState::Sent));
+ canon.used.insert(2, (13, RangeState::Acked));
+ assert_eq!(rt, canon);
+ }
+
+ /// Check that `marked_acked` correctly handles all paths.
+ /// ```ignore
+ /// AASSS AAAA
+ /// + AAAAAAAAA
+ /// = AAAAAAAAAAAA
+ /// ```
+ #[test]
+ fn mark_acked_3() {
+ let mut rt = RangeTracker::default();
+ rt.mark_acked(1, 2);
+ rt.mark_sent(3, 3);
+ rt.mark_acked(8, 4);
+
+ rt.mark_acked(0, 9);
+
+ let canon = RangeTracker {
+ acked: 12,
+ ..RangeTracker::default()
+ };
+ assert_eq!(rt, canon);
+ }
+
+ /// Check that `marked_acked` correctly handles all paths.
+ /// ```ignore
+ /// SSS
+ /// + AAAA
+ /// = AAAASS
+ /// ```
+ #[test]
+ fn mark_acked_4() {
+ let mut rt = RangeTracker::default();
+ rt.mark_sent(3, 3);
+
+ rt.mark_acked(0, 4);
+
+ let mut canon = RangeTracker {
+ acked: 4,
+ ..Default::default()
+ };
+ canon.used.insert(4, (2, RangeState::Sent));
+ assert_eq!(rt, canon);
+ }
+
+ /// Check that `marked_acked` correctly handles all paths.
+ /// ```ignore
+ /// AAAAAASSS
+ /// + AAA
+ /// = AAAAAASSS
+ /// ```
+ #[test]
+ fn mark_acked_5() {
+ let mut rt = RangeTracker::default();
+ rt.mark_acked(0, 6);
+ rt.mark_sent(6, 3);
+
+ rt.mark_acked(3, 3);
+
+ let mut canon = RangeTracker {
+ acked: 6,
+ ..RangeTracker::default()
+ };
+ canon.used.insert(6, (3, RangeState::Sent));
+ assert_eq!(rt, canon);
+ }
+
+ /// Check that `marked_acked` correctly handles all paths.
+ /// ```ignore
+ /// AAA AAA AAA
+ /// + AAAAAAA
+ /// = AAAAAAAAAAAAA
+ /// ```
+ #[test]
+ fn mark_acked_6() {
+ let mut rt = RangeTracker::default();
+ rt.mark_acked(3, 3);
+ rt.mark_acked(8, 3);
+ rt.mark_acked(13, 3);
+
+ rt.mark_acked(6, 7);
+
+ let mut canon = RangeTracker::default();
+ canon.used.insert(3, (13, RangeState::Acked));
+ assert_eq!(rt, canon);
+ }
+
+ /// Check that `marked_acked` correctly handles all paths.
+ /// ```ignore
+ /// AAA AAA
+ /// + AAA
+ /// = AAAAAAAA
+ /// ```
+ #[test]
+ fn mark_acked_7() {
+ let mut rt = RangeTracker::default();
+ rt.mark_acked(3, 3);
+ rt.mark_acked(8, 3);
+
+ rt.mark_acked(6, 3);
+
+ let mut canon = RangeTracker::default();
+ canon.used.insert(3, (8, RangeState::Acked));
+ assert_eq!(rt, canon);
+ }
+
+ /// Check that `marked_acked` correctly handles all paths.
+ /// ```ignore
+ /// SSSSSSSS
+ /// + AAAA
+ /// = SSAAAASS
+ /// ```
+ #[test]
+ fn mark_acked_8() {
+ let mut rt = RangeTracker::default();
+ rt.mark_sent(0, 8);
+
+ rt.mark_acked(2, 4);
+
+ let mut canon = RangeTracker::default();
+ canon.used.insert(0, (2, RangeState::Sent));
+ canon.used.insert(2, (4, RangeState::Acked));
+ canon.used.insert(6, (2, RangeState::Sent));
+ assert_eq!(rt, canon);
+ }
+
+ /// Check that `marked_acked` correctly handles all paths.
+ /// ```ignore
+ /// SSS
+ /// + AAA
+ /// = AAA SSS
+ /// ```
+ #[test]
+ fn mark_acked_9() {
+ let mut rt = RangeTracker::default();
+ rt.mark_sent(5, 3);
+
+ rt.mark_acked(0, 3);
+
+ let mut canon = RangeTracker {
+ acked: 3,
+ ..Default::default()
+ };
+ canon.used.insert(5, (3, RangeState::Sent));
+ assert_eq!(rt, canon);
+ }
+
+ /// Check that `marked_sent` correctly handles all paths.
+ /// ```ignore
+ /// AAA AAA SSS
+ /// + SSSSSSSSSSSS
+ /// = AAASSSAAASSSSSS
+ /// ```
+ #[test]
+ fn mark_sent_1() {
+ let mut rt = RangeTracker::default();
+ rt.mark_acked(0, 3);
+ rt.mark_acked(6, 3);
+ rt.mark_sent(12, 3);
+
+ rt.mark_sent(0, 12);
+
+ let mut canon = RangeTracker {
+ acked: 3,
+ ..RangeTracker::default()
+ };
+ canon.used.insert(3, (3, RangeState::Sent));
+ canon.used.insert(6, (3, RangeState::Acked));
+ canon.used.insert(9, (6, RangeState::Sent));
+ assert_eq!(rt, canon);
+ }
+
+ /// Check that `marked_sent` correctly handles all paths.
+ /// ```ignore
+ /// AAASS AAA S SSSS
+ /// + SSSSSSSSSSSSS
+ /// = AAASSSAAASSSSSSS
+ /// ```
+ #[test]
+ fn mark_sent_2() {
+ let mut rt = RangeTracker::default();
+ rt.mark_acked(0, 3);
+ rt.mark_sent(3, 2);
+ rt.mark_acked(6, 3);
+ rt.mark_sent(10, 1);
+ rt.mark_sent(12, 4);
+
+ rt.mark_sent(0, 13);
+
+ let mut canon = RangeTracker {
+ acked: 3,
+ ..RangeTracker::default()
+ };
+ canon.used.insert(3, (3, RangeState::Sent));
+ canon.used.insert(6, (3, RangeState::Acked));
+ canon.used.insert(9, (7, RangeState::Sent));
+ assert_eq!(rt, canon);
+ }
+
+ /// Check that `marked_sent` correctly handles all paths.
+ /// ```ignore
+ /// AAA AAA
+ /// + SSSS
+ /// = AAASSAAA
+ /// ```
+ #[test]
+ fn mark_sent_3() {
+ let mut rt = RangeTracker::default();
+ rt.mark_acked(0, 3);
+ rt.mark_acked(5, 3);
+
+ rt.mark_sent(2, 4);
+
+ let mut canon = RangeTracker {
+ acked: 3,
+ ..RangeTracker::default()
+ };
+ canon.used.insert(3, (2, RangeState::Sent));
+ canon.used.insert(5, (3, RangeState::Acked));
+ assert_eq!(rt, canon);
+ }
+
+ /// Check that `marked_sent` correctly handles all paths.
+ /// ```ignore
+ /// SSS AAA SS
+ /// + SSSSSSSS
+ /// = SSSSSAAASSSS
+ /// ```
+ #[test]
+ fn mark_sent_4() {
+ let mut rt = RangeTracker::default();
+ rt.mark_sent(0, 3);
+ rt.mark_acked(5, 3);
+ rt.mark_sent(10, 2);
+
+ rt.mark_sent(2, 8);
+
+ let mut canon = RangeTracker::default();
+ canon.used.insert(0, (5, RangeState::Sent));
+ canon.used.insert(5, (3, RangeState::Acked));
+ canon.used.insert(8, (4, RangeState::Sent));
+ assert_eq!(rt, canon);
+ }
+
+ /// Check that `marked_sent` correctly handles all paths.
+ /// ```ignore
+ /// AAA
+ /// + SSSSSS
+ /// = AAASSS
+ /// ```
+ #[test]
+ fn mark_sent_5() {
+ let mut rt = RangeTracker::default();
+ rt.mark_acked(3, 3);
+
+ rt.mark_sent(3, 6);
+
+ let mut canon = RangeTracker::default();
+ canon.used.insert(3, (3, RangeState::Acked));
+ canon.used.insert(6, (3, RangeState::Sent));
+ assert_eq!(rt, canon);
+ }
+
+ /// Check that `marked_sent` correctly handles all paths.
+ /// ```ignore
+ /// SSSSS
+ /// + SSS
+ /// = SSSSS
+ /// ```
+ #[test]
+ fn mark_sent_6() {
+ let mut rt = RangeTracker::default();
+ rt.mark_sent(0, 5);
+
+ rt.mark_sent(1, 3);
+
+ let mut canon = RangeTracker::default();
+ canon.used.insert(0, (5, RangeState::Sent));
+ assert_eq!(rt, canon);
+ }
+
#[test]
fn unmark_sent_start() {
let mut rt = RangeTracker::default();
- rt.mark_range(0, 5, RangeState::Sent);
+ rt.mark_sent(0, 5);
assert_eq!(rt.highest_offset(), 5);
assert_eq!(rt.acked_from_zero(), 0);
@@ -1702,13 +2158,13 @@ mod tests {
fn unmark_sent_middle() {
let mut rt = RangeTracker::default();
- rt.mark_range(0, 5, RangeState::Acked);
+ rt.mark_acked(0, 5);
assert_eq!(rt.highest_offset(), 5);
assert_eq!(rt.acked_from_zero(), 5);
- rt.mark_range(5, 5, RangeState::Sent);
+ rt.mark_sent(5, 5);
assert_eq!(rt.highest_offset(), 10);
assert_eq!(rt.acked_from_zero(), 5);
- rt.mark_range(10, 5, RangeState::Acked);
+ rt.mark_acked(10, 5);
assert_eq!(rt.highest_offset(), 15);
assert_eq!(rt.acked_from_zero(), 5);
assert_eq!(rt.first_unmarked_range(), (15, None));
@@ -1723,10 +2179,10 @@ mod tests {
fn unmark_sent_end() {
let mut rt = RangeTracker::default();
- rt.mark_range(0, 5, RangeState::Acked);
+ rt.mark_acked(0, 5);
assert_eq!(rt.highest_offset(), 5);
assert_eq!(rt.acked_from_zero(), 5);
- rt.mark_range(5, 5, RangeState::Sent);
+ rt.mark_sent(5, 5);
assert_eq!(rt.highest_offset(), 10);
assert_eq!(rt.acked_from_zero(), 5);
assert_eq!(rt.first_unmarked_range(), (10, None));
@@ -1752,11 +2208,11 @@ mod tests {
}
#[test]
- fn test_unmark_range() {
+ fn unmark_range() {
let mut rt = RangeTracker::default();
- rt.mark_range(5, 5, RangeState::Acked);
- rt.mark_range(10, 5, RangeState::Sent);
+ rt.mark_acked(5, 5);
+ rt.mark_sent(10, 5);
// Should unmark sent but not acked range
rt.unmark_range(7, 6);
@@ -1772,11 +2228,11 @@ mod tests {
(&13, &(2, RangeState::Sent))
);
assert!(rt.used.iter().nth(2).is_none());
- rt.mark_range(0, 5, RangeState::Sent);
+ rt.mark_sent(0, 5);
let res = rt.first_unmarked_range();
assert_eq!(res, (10, Some(3)));
- rt.mark_range(10, 3, RangeState::Sent);
+ rt.mark_sent(10, 3);
let res = rt.first_unmarked_range();
assert_eq!(res, (15, None));
@@ -1790,14 +2246,15 @@ mod tests {
assert_eq!(txb.avail(), SEND_BUFFER_SIZE);
// Fill the buffer
- assert_eq!(txb.send(&[1; SEND_BUFFER_SIZE * 2]), SEND_BUFFER_SIZE);
+ let big_buf = vec![1; SEND_BUFFER_SIZE * 2];
+ assert_eq!(txb.send(&big_buf), SEND_BUFFER_SIZE);
assert!(matches!(txb.next_bytes(),
- Some((0, x)) if x.len()==SEND_BUFFER_SIZE
+ Some((0, x)) if x.len() == SEND_BUFFER_SIZE
&& x.iter().all(|ch| *ch == 1)));
// Mark almost all as sent. Get what's left
let one_byte_from_end = SEND_BUFFER_SIZE as u64 - 1;
- txb.mark_as_sent(0, one_byte_from_end as usize);
+ txb.mark_as_sent(0, usize::try_from(one_byte_from_end).unwrap());
assert!(matches!(txb.next_bytes(),
Some((start, x)) if x.len() == 1
&& start == one_byte_from_end
@@ -1826,14 +2283,14 @@ mod tests {
// Contig acked range at start means it can be removed from buffer
// Impl of vecdeque should now result in a split buffer when more data
// is sent
- txb.mark_as_acked(0, five_bytes_from_end as usize);
+ txb.mark_as_acked(0, usize::try_from(five_bytes_from_end).unwrap());
assert_eq!(txb.send(&[2; 30]), 30);
// Just get 5 even though there is more
assert!(matches!(txb.next_bytes(),
Some((start, x)) if x.len() == 5
&& start == five_bytes_from_end
&& x.iter().all(|ch| *ch == 1)));
- assert_eq!(txb.retired, five_bytes_from_end);
+ assert_eq!(txb.retired(), five_bytes_from_end);
assert_eq!(txb.buffered(), 35);
// Marking that bit as sent should let the last contig bit be returned
@@ -1852,7 +2309,8 @@ mod tests {
assert_eq!(txb.avail(), SEND_BUFFER_SIZE);
// Fill the buffer
- assert_eq!(txb.send(&[1; SEND_BUFFER_SIZE * 2]), SEND_BUFFER_SIZE);
+ let big_buf = vec![1; SEND_BUFFER_SIZE * 2];
+ assert_eq!(txb.send(&big_buf), SEND_BUFFER_SIZE);
assert!(matches!(txb.next_bytes(),
Some((0, x)) if x.len()==SEND_BUFFER_SIZE
&& x.iter().all(|ch| *ch == 1)));
@@ -1860,7 +2318,7 @@ mod tests {
// As above
let forty_bytes_from_end = SEND_BUFFER_SIZE as u64 - 40;
- txb.mark_as_acked(0, forty_bytes_from_end as usize);
+ txb.mark_as_acked(0, usize::try_from(forty_bytes_from_end).unwrap());
assert!(matches!(txb.next_bytes(),
Some((start, x)) if x.len() == 40
&& start == forty_bytes_from_end
@@ -1888,7 +2346,7 @@ mod tests {
// Ack entire first slice and into second slice
let ten_bytes_past_end = SEND_BUFFER_SIZE as u64 + 10;
- txb.mark_as_acked(0, ten_bytes_past_end as usize);
+ txb.mark_as_acked(0, usize::try_from(ten_bytes_past_end).unwrap());
// Get up to marked range A
assert!(matches!(txb.next_bytes(),
@@ -1910,7 +2368,7 @@ mod tests {
}
#[test]
- fn test_stream_tx() {
+ fn stream_tx() {
let conn_fc = connection_fc(4096);
let conn_events = ConnectionEvents::default();
@@ -1926,22 +2384,23 @@ mod tests {
}
// Should hit stream flow control limit before filling up send buffer
- let res = s.send(&[4; SEND_BUFFER_SIZE]).unwrap();
+ let big_buf = vec![4; SEND_BUFFER_SIZE + 100];
+ let res = s.send(&big_buf[..SEND_BUFFER_SIZE]).unwrap();
assert_eq!(res, 1024 - 100);
// should do nothing, max stream data already 1024
s.set_max_stream_data(1024);
- let res = s.send(&[4; SEND_BUFFER_SIZE]).unwrap();
+ let res = s.send(&big_buf[..SEND_BUFFER_SIZE]).unwrap();
assert_eq!(res, 0);
// should now hit the conn flow control (4096)
s.set_max_stream_data(1_048_576);
- let res = s.send(&[4; SEND_BUFFER_SIZE]).unwrap();
+ let res = s.send(&big_buf[..SEND_BUFFER_SIZE]).unwrap();
assert_eq!(res, 3072);
// should now hit the tx buffer size
conn_fc.borrow_mut().update(SEND_BUFFER_SIZE as u64);
- let res = s.send(&[4; SEND_BUFFER_SIZE + 100]).unwrap();
+ let res = s.send(&big_buf).unwrap();
assert_eq!(res, SEND_BUFFER_SIZE - 4096);
// TODO(agrover@mozilla.com): test ooo acks somehow
@@ -2012,10 +2471,8 @@ mod tests {
// tx buffer size.
assert_eq!(s.avail(), SEND_BUFFER_SIZE - 4);
- assert_eq!(
- s.send(&[b'a'; SEND_BUFFER_SIZE]).unwrap(),
- SEND_BUFFER_SIZE - 4
- );
+ let big_buf = vec![b'a'; SEND_BUFFER_SIZE];
+ assert_eq!(s.send(&big_buf).unwrap(), SEND_BUFFER_SIZE - 4);
// No event because still blocked by tx buffer full
s.set_max_stream_data(2_000_000_000);
@@ -2395,8 +2852,7 @@ mod tests {
);
let mut send_buf = TxBuffer::new();
- send_buf.retired = u64::try_from(offset).unwrap();
- send_buf.ranges.mark_range(0, offset, RangeState::Acked);
+ send_buf.ranges.mark_acked(0, offset);
let mut fc = SenderFlowControl::new(StreamId::from(stream), MAX_VARINT);
fc.consume(offset);
let conn_fc = Rc::new(RefCell::new(SenderFlowControl::new((), MAX_VARINT)));
diff --git a/third_party/rust/neqo-transport/src/sender.rs b/third_party/rust/neqo-transport/src/sender.rs
index 9a00dfc7a7..3a54851533 100644
--- a/third_party/rust/neqo-transport/src/sender.rs
+++ b/third_party/rust/neqo-transport/src/sender.rs
@@ -5,7 +5,7 @@
// except according to those terms.
// Congestion control
-#![deny(clippy::pedantic)]
+
#![allow(clippy::module_name_repetitions)]
use std::{
diff --git a/third_party/rust/neqo-transport/src/server.rs b/third_party/rust/neqo-transport/src/server.rs
index 12a7d2f9e0..96a6244ef1 100644
--- a/third_party/rust/neqo-transport/src/server.rs
+++ b/third_party/rust/neqo-transport/src/server.rs
@@ -43,7 +43,7 @@ pub enum InitialResult {
Retry(Vec<u8>),
}
-/// MIN_INITIAL_PACKET_SIZE is the smallest packet that can be used to establish
+/// `MIN_INITIAL_PACKET_SIZE` is the smallest packet that can be used to establish
/// a new connection across all QUIC versions this server supports.
const MIN_INITIAL_PACKET_SIZE: usize = 1200;
/// The size of timer buckets. This is higher than the actual timer granularity
@@ -168,7 +168,7 @@ pub struct Server {
/// the same key are routed to the connection that was first accepted.
/// This is cleared out when the connection is closed or established.
active_attempts: HashMap<AttemptKey, StateRef>,
- /// All connections, keyed by ConnectionId.
+ /// All connections, keyed by `ConnectionId`.
connections: ConnectionTableRef,
/// The connections that have new events.
active: HashSet<ActiveConnectionRef>,
@@ -195,6 +195,8 @@ impl Server {
/// OK.
/// * `cid_generator` is responsible for generating connection IDs and parsing them; connection
/// IDs produced by the manager cannot be zero-length.
+ /// # Errors
+ /// When address validation state cannot be created.
pub fn new(
now: Instant,
certs: &[impl AsRef<str>],
@@ -240,6 +242,8 @@ impl Server {
self.ciphers = Vec::from(ciphers.as_ref());
}
+ /// # Errors
+ /// When the configuration is invalid.
pub fn enable_ech(
&mut self,
config: u8,
@@ -251,6 +255,7 @@ impl Server {
Ok(())
}
+ #[must_use]
pub fn ech_config(&self) -> &[u8] {
self.ech_config.as_ref().map_or(&[], |cfg| &cfg.encoded)
}
@@ -262,7 +267,7 @@ impl Server {
fn process_connection(
&mut self,
- c: StateRef,
+ c: &StateRef,
dgram: Option<&Datagram>,
now: Instant,
) -> Option<Datagram> {
@@ -271,24 +276,24 @@ impl Server {
match out {
Output::Datagram(_) => {
qtrace!([self], "Sending packet, added to waiting connections");
- self.waiting.push_back(Rc::clone(&c));
+ self.waiting.push_back(Rc::clone(c));
}
Output::Callback(delay) => {
let next = now + delay;
if next != c.borrow().last_timer {
qtrace!([self], "Change timer to {:?}", next);
- self.remove_timer(&c);
+ self.remove_timer(c);
c.borrow_mut().last_timer = next;
- self.timers.add(next, Rc::clone(&c));
+ self.timers.add(next, Rc::clone(c));
}
}
Output::None => {
- self.remove_timer(&c);
+ self.remove_timer(c);
}
}
if c.borrow().has_events() {
qtrace!([self], "Connection active: {:?}", c);
- self.active.insert(ActiveConnectionRef { c: Rc::clone(&c) });
+ self.active.insert(ActiveConnectionRef { c: Rc::clone(c) });
}
if *c.borrow().state() > State::Handshaking {
@@ -302,13 +307,13 @@ impl Server {
c.borrow_mut().set_qlog(NeqoQlog::disabled());
self.connections
.borrow_mut()
- .retain(|_, v| !Rc::ptr_eq(v, &c));
+ .retain(|_, v| !Rc::ptr_eq(v, c));
}
out.dgram()
}
fn connection(&self, cid: ConnectionIdRef) -> Option<StateRef> {
- self.connections.borrow().get(&cid[..]).map(Rc::clone)
+ self.connections.borrow().get(&cid[..]).cloned()
}
fn handle_initial(
@@ -387,7 +392,7 @@ impl Server {
attempt_key
);
let c = Rc::clone(c);
- self.process_connection(c, Some(dgram), now)
+ self.process_connection(&c, Some(dgram), now)
} else {
self.accept_connection(attempt_key, initial, dgram, orig_dcid, now)
}
@@ -395,9 +400,9 @@ impl Server {
fn create_qlog_trace(&self, odcid: ConnectionIdRef<'_>) -> NeqoQlog {
if let Some(qlog_dir) = &self.qlog_dir {
- let mut qlog_path = qlog_dir.to_path_buf();
+ let mut qlog_path = qlog_dir.clone();
- qlog_path.push(format!("{}.qlog", odcid));
+ qlog_path.push(format!("{odcid}.qlog"));
// The original DCID is chosen by the client. Using create_new()
// prevents attackers from overwriting existing logs.
@@ -456,9 +461,9 @@ impl Server {
}
if let Some(odcid) = orig_dcid {
// There was a retry, so set the connection IDs for.
- c.set_retry_cids(odcid, initial.src_cid, initial.dst_cid);
+ c.set_retry_cids(&odcid, initial.src_cid, &initial.dst_cid);
}
- c.set_validation(Rc::clone(&self.address_validation));
+ c.set_validation(&self.address_validation);
c.set_qlog(self.create_qlog_trace(attempt_key.odcid.as_cid_ref()));
if let Some(cfg) = &self.ech_config {
if c.server_enable_ech(cfg.config, &cfg.public_name, &cfg.sk, &cfg.pk)
@@ -505,10 +510,10 @@ impl Server {
last_timer: now,
active_attempt: Some(attempt_key.clone()),
}));
- cid_mgr.borrow_mut().set_connection(Rc::clone(&c));
+ cid_mgr.borrow_mut().set_connection(&c);
let previous_attempt = self.active_attempts.insert(attempt_key, Rc::clone(&c));
debug_assert!(previous_attempt.is_none());
- self.process_connection(c, Some(dgram), now)
+ self.process_connection(&c, Some(dgram), now)
}
Err(e) => {
qwarn!([self], "Unable to create connection");
@@ -517,7 +522,7 @@ impl Server {
&mut self.create_qlog_trace(attempt_key.odcid.as_cid_ref()),
self.conn_params.get_versions().all(),
initial.version.wire_version(),
- )
+ );
}
None
}
@@ -544,7 +549,7 @@ impl Server {
attempt_key
);
let c = Rc::clone(c);
- self.process_connection(c, Some(dgram), now)
+ self.process_connection(&c, Some(dgram), now)
} else {
qdebug!([self], "Dropping 0-RTT for unknown connection");
None
@@ -564,7 +569,7 @@ impl Server {
// Finding an existing connection. Should be the most common case.
if let Some(c) = self.connection(packet.dcid()) {
- return self.process_connection(c, Some(dgram), now);
+ return self.process_connection(&c, Some(dgram), now);
}
if packet.packet_type() == PacketType::Short {
@@ -637,13 +642,13 @@ impl Server {
fn process_next_output(&mut self, now: Instant) -> Option<Datagram> {
qtrace!([self], "No packet to send, look at waiting connections");
while let Some(c) = self.waiting.pop_front() {
- if let Some(d) = self.process_connection(c, None, now) {
+ if let Some(d) = self.process_connection(&c, None, now) {
return Some(d);
}
}
qtrace!([self], "No packet to send still, run timers");
while let Some(c) = self.timers.take_next(now) {
- if let Some(d) = self.process_connection(c, None, now) {
+ if let Some(d) = self.process_connection(&c, None, now) {
return Some(d);
}
}
@@ -684,7 +689,7 @@ impl Server {
mem::take(&mut self.active).into_iter().collect()
}
- pub fn add_to_waiting(&mut self, c: ActiveConnectionRef) {
+ pub fn add_to_waiting(&mut self, c: &ActiveConnectionRef) {
self.waiting.push_back(c.connection());
}
}
@@ -695,6 +700,7 @@ pub struct ActiveConnectionRef {
}
impl ActiveConnectionRef {
+ #[must_use]
pub fn borrow(&self) -> impl Deref<Target = Connection> + '_ {
std::cell::Ref::map(self.c.borrow(), |c| &c.c)
}
@@ -703,6 +709,7 @@ impl ActiveConnectionRef {
std::cell::RefMut::map(self.c.borrow_mut(), |c| &mut c.c)
}
+ #[must_use]
pub fn connection(&self) -> StateRef {
Rc::clone(&self.c)
}
@@ -731,13 +738,13 @@ struct ServerConnectionIdGenerator {
}
impl ServerConnectionIdGenerator {
- pub fn set_connection(&mut self, c: StateRef) {
+ pub fn set_connection(&mut self, c: &StateRef) {
let saved = std::mem::replace(&mut self.saved_cids, Vec::with_capacity(0));
for cid in saved {
qtrace!("ServerConnectionIdGenerator inserting saved cid {}", cid);
- self.insert_cid(cid, Rc::clone(&c));
+ self.insert_cid(cid, Rc::clone(c));
}
- self.c = Rc::downgrade(&c);
+ self.c = Rc::downgrade(c);
}
fn insert_cid(&mut self, cid: ConnectionId, rc: StateRef) {
diff --git a/third_party/rust/neqo-transport/src/stats.rs b/third_party/rust/neqo-transport/src/stats.rs
index d6c7a911f9..9eff503dcf 100644
--- a/third_party/rust/neqo-transport/src/stats.rs
+++ b/third_party/rust/neqo-transport/src/stats.rs
@@ -5,7 +5,6 @@
// except according to those terms.
// Tracking of some useful statistics.
-#![deny(clippy::pedantic)]
use std::{
cell::RefCell,
diff --git a/third_party/rust/neqo-transport/src/stream_id.rs b/third_party/rust/neqo-transport/src/stream_id.rs
index f3b07b86a8..8dbe2dcfbc 100644
--- a/third_party/rust/neqo-transport/src/stream_id.rs
+++ b/third_party/rust/neqo-transport/src/stream_id.rs
@@ -20,10 +20,12 @@ pub enum StreamType {
pub struct StreamId(u64);
impl StreamId {
+ #[must_use]
pub const fn new(id: u64) -> Self {
Self(id)
}
+ #[must_use]
pub fn init(stream_type: StreamType, role: Role) -> Self {
let type_val = match stream_type {
StreamType::BiDi => 0,
@@ -32,18 +34,22 @@ impl StreamId {
Self(type_val + Self::role_bit(role))
}
+ #[must_use]
pub fn as_u64(self) -> u64 {
self.0
}
+ #[must_use]
pub fn is_bidi(self) -> bool {
self.as_u64() & 0x02 == 0
}
+ #[must_use]
pub fn is_uni(self) -> bool {
!self.is_bidi()
}
+ #[must_use]
pub fn stream_type(self) -> StreamType {
if self.is_bidi() {
StreamType::BiDi
@@ -52,14 +58,17 @@ impl StreamId {
}
}
+ #[must_use]
pub fn is_client_initiated(self) -> bool {
self.as_u64() & 0x01 == 0
}
+ #[must_use]
pub fn is_server_initiated(self) -> bool {
!self.is_client_initiated()
}
+ #[must_use]
pub fn role(self) -> Role {
if self.is_client_initiated() {
Role::Client
@@ -68,6 +77,7 @@ impl StreamId {
}
}
+ #[must_use]
pub fn is_self_initiated(self, my_role: Role) -> bool {
match my_role {
Role::Client if self.is_client_initiated() => true,
@@ -76,14 +86,17 @@ impl StreamId {
}
}
+ #[must_use]
pub fn is_remote_initiated(self, my_role: Role) -> bool {
!self.is_self_initiated(my_role)
}
+ #[must_use]
pub fn is_send_only(self, my_role: Role) -> bool {
self.is_uni() && self.is_self_initiated(my_role)
}
+ #[must_use]
pub fn is_recv_only(self, my_role: Role) -> bool {
self.is_uni() && self.is_remote_initiated(my_role)
}
@@ -93,6 +106,7 @@ impl StreamId {
}
/// This returns a bit that is shared by all streams created by this role.
+ #[must_use]
pub fn role_bit(role: Role) -> u64 {
match role {
Role::Server => 1,
diff --git a/third_party/rust/neqo-transport/src/streams.rs b/third_party/rust/neqo-transport/src/streams.rs
index 7cbb29ce02..d8662afa3b 100644
--- a/third_party/rust/neqo-transport/src/streams.rs
+++ b/third_party/rust/neqo-transport/src/streams.rs
@@ -95,6 +95,7 @@ impl Streams {
}
}
+ #[must_use]
pub fn is_stream_id_allowed(&self, stream_id: StreamId) -> bool {
self.remote_stream_limits[stream_id.stream_type()].is_allowed(stream_id)
}
@@ -118,7 +119,9 @@ impl Streams {
self.local_stream_limits = LocalStreamLimits::new(self.role);
}
- pub fn input_frame(&mut self, frame: Frame, stats: &mut FrameStats) -> Res<()> {
+ /// # Errors
+ /// When the frame is invalid.
+ pub fn input_frame(&mut self, frame: &Frame, stats: &mut FrameStats) -> Res<()> {
match frame {
Frame::ResetStream {
stream_id,
@@ -126,8 +129,8 @@ impl Streams {
final_size,
} => {
stats.reset_stream += 1;
- if let (_, Some(rs)) = self.obtain_stream(stream_id)? {
- rs.reset(application_error_code, final_size)?;
+ if let (_, Some(rs)) = self.obtain_stream(*stream_id)? {
+ rs.reset(*application_error_code, *final_size)?;
}
}
Frame::StopSending {
@@ -136,9 +139,9 @@ impl Streams {
} => {
stats.stop_sending += 1;
self.events
- .send_stream_stop_sending(stream_id, application_error_code);
- if let (Some(ss), _) = self.obtain_stream(stream_id)? {
- ss.reset(application_error_code);
+ .send_stream_stop_sending(*stream_id, *application_error_code);
+ if let (Some(ss), _) = self.obtain_stream(*stream_id)? {
+ ss.reset(*application_error_code);
}
}
Frame::Stream {
@@ -149,13 +152,13 @@ impl Streams {
..
} => {
stats.stream += 1;
- if let (_, Some(rs)) = self.obtain_stream(stream_id)? {
- rs.inbound_stream_frame(fin, offset, data)?;
+ if let (_, Some(rs)) = self.obtain_stream(*stream_id)? {
+ rs.inbound_stream_frame(*fin, *offset, data)?;
}
}
Frame::MaxData { maximum_data } => {
stats.max_data += 1;
- self.handle_max_data(maximum_data);
+ self.handle_max_data(*maximum_data);
}
Frame::MaxStreamData {
stream_id,
@@ -163,12 +166,12 @@ impl Streams {
} => {
qtrace!(
"Stream {} Received MaxStreamData {}",
- stream_id,
- maximum_stream_data
+ *stream_id,
+ *maximum_stream_data
);
stats.max_stream_data += 1;
- if let (Some(ss), _) = self.obtain_stream(stream_id)? {
- ss.set_max_stream_data(maximum_stream_data);
+ if let (Some(ss), _) = self.obtain_stream(*stream_id)? {
+ ss.set_max_stream_data(*maximum_stream_data);
}
}
Frame::MaxStreams {
@@ -176,7 +179,7 @@ impl Streams {
maximum_streams,
} => {
stats.max_streams += 1;
- self.handle_max_streams(stream_type, maximum_streams);
+ self.handle_max_streams(*stream_type, *maximum_streams);
}
Frame::DataBlocked { data_limit } => {
// Should never happen since we set data limit to max
@@ -193,7 +196,7 @@ impl Streams {
return Err(Error::StreamStateError);
}
- if let (_, Some(rs)) = self.obtain_stream(stream_id)? {
+ if let (_, Some(rs)) = self.obtain_stream(*stream_id)? {
rs.send_flowc_update();
}
}
@@ -401,6 +404,8 @@ impl Streams {
/// Get or make a stream, and implicitly open additional streams as
/// indicated by its stream id.
+ /// # Errors
+ /// When the stream cannot be created due to stream limits.
pub fn obtain_stream(
&mut self,
stream_id: StreamId,
@@ -412,14 +417,20 @@ impl Streams {
))
}
+ /// # Errors
+ /// When the stream does not exist.
pub fn set_sendorder(&mut self, stream_id: StreamId, sendorder: Option<SendOrder>) -> Res<()> {
self.send.set_sendorder(stream_id, sendorder)
}
+ /// # Errors
+ /// When the stream does not exist.
pub fn set_fairness(&mut self, stream_id: StreamId, fairness: bool) -> Res<()> {
self.send.set_fairness(stream_id, fairness)
}
+ /// # Errors
+ /// When a stream cannot be created, which might be temporary.
pub fn stream_create(&mut self, st: StreamType) -> Res<StreamId> {
match self.local_stream_limits.take_stream_id(st) {
None => Err(Error::StreamLimitError),
@@ -525,18 +536,26 @@ impl Streams {
}
}
+ /// # Errors
+ /// When the stream does not exist.
pub fn get_send_stream_mut(&mut self, stream_id: StreamId) -> Res<&mut SendStream> {
self.send.get_mut(stream_id)
}
+ /// # Errors
+ /// When the stream does not exist.
pub fn get_send_stream(&self, stream_id: StreamId) -> Res<&SendStream> {
self.send.get(stream_id)
}
+ /// # Errors
+ /// When the stream does not exist.
pub fn get_recv_stream_mut(&mut self, stream_id: StreamId) -> Res<&mut RecvStream> {
self.recv.get_mut(stream_id)
}
+ /// # Errors
+ /// When the stream does not exist.
pub fn keep_alive(&mut self, stream_id: StreamId, keep: bool) -> Res<()> {
self.recv.keep_alive(stream_id, keep)
}
diff --git a/third_party/rust/neqo-transport/src/tparams.rs b/third_party/rust/neqo-transport/src/tparams.rs
index 1297829094..eada56cc4c 100644
--- a/third_party/rust/neqo-transport/src/tparams.rs
+++ b/third_party/rust/neqo-transport/src/tparams.rs
@@ -9,7 +9,6 @@
use std::{
cell::RefCell,
collections::HashMap,
- convert::TryFrom,
net::{Ipv4Addr, Ipv6Addr, SocketAddrV4, SocketAddrV6},
rc::Rc,
};
@@ -88,6 +87,8 @@ impl PreferredAddress {
}
/// A generic version of `new()` for testing.
+ /// # Panics
+ /// When the addresses are the wrong type.
#[must_use]
#[cfg(test)]
pub fn new_any(v4: Option<std::net::SocketAddr>, v6: Option<std::net::SocketAddr>) -> Self {
@@ -231,7 +232,7 @@ impl TransportParameter {
if v == 0 {
Err(Error::TransportParameterError)
} else {
- Ok(v as WireVersion)
+ Ok(WireVersion::try_from(v)?)
}
}
@@ -353,6 +354,9 @@ impl TransportParameters {
}
// Get an integer type or a default.
+ /// # Panics
+ /// When the transport parameter isn't recognized as being an integer.
+ #[must_use]
pub fn get_integer(&self, tp: TransportParameterId) -> u64 {
let default = match tp {
IDLE_TIMEOUT
@@ -378,6 +382,8 @@ impl TransportParameters {
}
// Set an integer type or a default.
+ /// # Panics
+ /// When the transport parameter isn't recognized as being an integer.
pub fn set_integer(&mut self, tp: TransportParameterId, value: u64) {
match tp {
IDLE_TIMEOUT
@@ -399,6 +405,9 @@ impl TransportParameters {
}
}
+ /// # Panics
+ /// When the transport parameter isn't recognized as containing bytes.
+ #[must_use]
pub fn get_bytes(&self, tp: TransportParameterId) -> Option<&[u8]> {
match tp {
ORIGINAL_DESTINATION_CONNECTION_ID
@@ -415,6 +424,8 @@ impl TransportParameters {
}
}
+ /// # Panics
+ /// When the transport parameter isn't recognized as containing bytes.
pub fn set_bytes(&mut self, tp: TransportParameterId, value: Vec<u8>) {
match tp {
ORIGINAL_DESTINATION_CONNECTION_ID
@@ -427,6 +438,8 @@ impl TransportParameters {
}
}
+ /// # Panics
+ /// When the transport parameter isn't recognized as being empty.
pub fn set_empty(&mut self, tp: TransportParameterId) {
match tp {
DISABLE_MIGRATION | GREASE_QUIC_BIT => {
@@ -437,11 +450,14 @@ impl TransportParameters {
}
/// Set version information.
+ /// # Panics
+ /// Never. But rust doesn't know that.
pub fn set_versions(&mut self, role: Role, versions: &VersionConfig) {
- let rbuf = random(4);
+ let rbuf = random::<4>();
let mut other = Vec::with_capacity(versions.all().len() + 1);
let mut dec = Decoder::new(&rbuf);
- let grease = (dec.decode_uint(4).unwrap() as u32) & 0xf0f0_f0f0 | 0x0a0a_0a0a;
+ let grease =
+ (u32::try_from(dec.decode_uint(4).unwrap()).unwrap()) & 0xf0f0_f0f0 | 0x0a0a_0a0a;
other.push(grease);
for &v in versions.all() {
if role == Role::Client && !versions.initial().is_compatible(v) {
@@ -467,6 +483,10 @@ impl TransportParameters {
}
}
+ /// # Panics
+ /// When the indicated transport parameter is present but NOT empty.
+ /// This should not happen if the parsing code in `TransportParameter::decode` is correct.
+ #[must_use]
pub fn get_empty(&self, tipe: TransportParameterId) -> bool {
match self.params.get(&tipe) {
None => false,
@@ -568,6 +588,7 @@ pub struct TransportParametersHandler {
}
impl TransportParametersHandler {
+ #[must_use]
pub fn new(role: Role, versions: VersionConfig) -> Self {
let mut local = TransportParameters::default();
local.set_versions(role, &versions);
@@ -588,6 +609,10 @@ impl TransportParametersHandler {
self.local.set_versions(self.role, &self.versions);
}
+ /// # Panics
+ /// When this function is called before the peer has provided transport parameters.
+ /// Do not call this function if you are not also able to send data.
+ #[must_use]
pub fn remote(&self) -> &TransportParameters {
match (self.remote.as_ref(), self.remote_0rtt.as_ref()) {
(Some(tp), _) | (_, Some(tp)) => tp,
@@ -596,6 +621,7 @@ impl TransportParametersHandler {
}
/// Get the version as set (or as determined by a compatible upgrade).
+ #[must_use]
pub fn version(&self) -> Version {
self.versions.initial()
}
@@ -749,7 +775,24 @@ where
#[cfg(test)]
#[allow(unused_variables)]
mod tests {
- use super::*;
+ use std::net::{Ipv4Addr, Ipv6Addr, SocketAddrV4, SocketAddrV6};
+
+ use neqo_common::{Decoder, Encoder};
+
+ use super::PreferredAddress;
+ use crate::{
+ tparams::{
+ TransportParameter, TransportParameterId, TransportParameters,
+ ACTIVE_CONNECTION_ID_LIMIT, IDLE_TIMEOUT, INITIAL_MAX_DATA, INITIAL_MAX_STREAMS_BIDI,
+ INITIAL_MAX_STREAMS_UNI, INITIAL_MAX_STREAM_DATA_BIDI_LOCAL,
+ INITIAL_MAX_STREAM_DATA_BIDI_REMOTE, INITIAL_MAX_STREAM_DATA_UNI,
+ INITIAL_SOURCE_CONNECTION_ID, MAX_ACK_DELAY, MAX_DATAGRAM_FRAME_SIZE,
+ MAX_UDP_PAYLOAD_SIZE, MIN_ACK_DELAY, ORIGINAL_DESTINATION_CONNECTION_ID,
+ PREFERRED_ADDRESS, RETRY_SOURCE_CONNECTION_ID, STATELESS_RESET_TOKEN,
+ VERSION_INFORMATION,
+ },
+ ConnectionId, Error, Version,
+ };
#[test]
fn basic_tps() {
@@ -843,7 +886,7 @@ mod tests {
/// This takes a `TransportParameter::PreferredAddress` that has been mutilated.
/// It then encodes it, working from the knowledge that the `encode` function
/// doesn't care about validity, and decodes it. The result should be failure.
- fn assert_invalid_spa(spa: TransportParameter) {
+ fn assert_invalid_spa(spa: &TransportParameter) {
let mut enc = Encoder::new();
spa.encode(&mut enc, PREFERRED_ADDRESS);
assert_eq!(
@@ -853,40 +896,40 @@ mod tests {
}
/// This is for those rare mutations that are acceptable.
- fn assert_valid_spa(spa: TransportParameter) {
+ fn assert_valid_spa(spa: &TransportParameter) {
let mut enc = Encoder::new();
spa.encode(&mut enc, PREFERRED_ADDRESS);
let mut dec = enc.as_decoder();
let (id, decoded) = TransportParameter::decode(&mut dec).unwrap().unwrap();
assert_eq!(id, PREFERRED_ADDRESS);
- assert_eq!(decoded, spa);
+ assert_eq!(&decoded, spa);
}
#[test]
fn preferred_address_zero_address() {
// Either port being zero is bad.
- assert_invalid_spa(mutate_spa(|v4, _, _| {
+ assert_invalid_spa(&mutate_spa(|v4, _, _| {
v4.as_mut().unwrap().set_port(0);
}));
- assert_invalid_spa(mutate_spa(|_, v6, _| {
+ assert_invalid_spa(&mutate_spa(|_, v6, _| {
v6.as_mut().unwrap().set_port(0);
}));
// Either IP being zero is bad.
- assert_invalid_spa(mutate_spa(|v4, _, _| {
+ assert_invalid_spa(&mutate_spa(|v4, _, _| {
v4.as_mut().unwrap().set_ip(Ipv4Addr::from(0));
}));
- assert_invalid_spa(mutate_spa(|_, v6, _| {
+ assert_invalid_spa(&mutate_spa(|_, v6, _| {
v6.as_mut().unwrap().set_ip(Ipv6Addr::from(0));
}));
// Either address being absent is OK.
- assert_valid_spa(mutate_spa(|v4, _, _| {
+ assert_valid_spa(&mutate_spa(|v4, _, _| {
*v4 = None;
}));
- assert_valid_spa(mutate_spa(|_, v6, _| {
+ assert_valid_spa(&mutate_spa(|_, v6, _| {
*v6 = None;
}));
// Both addresses being absent is bad.
- assert_invalid_spa(mutate_spa(|v4, v6, _| {
+ assert_invalid_spa(&mutate_spa(|v4, v6, _| {
*v4 = None;
*v6 = None;
}));
@@ -894,10 +937,10 @@ mod tests {
#[test]
fn preferred_address_bad_cid() {
- assert_invalid_spa(mutate_spa(|_, _, cid| {
+ assert_invalid_spa(&mutate_spa(|_, _, cid| {
*cid = ConnectionId::from(&[]);
}));
- assert_invalid_spa(mutate_spa(|_, _, cid| {
+ assert_invalid_spa(&mutate_spa(|_, _, cid| {
*cid = ConnectionId::from(&[0x0c; 21]);
}));
}
@@ -975,7 +1018,6 @@ mod tests {
#[test]
fn compatible_0rtt_integers() {
- let mut tps_a = TransportParameters::default();
const INTEGER_KEYS: &[TransportParameterId] = &[
INITIAL_MAX_DATA,
INITIAL_MAX_STREAM_DATA_BIDI_LOCAL,
@@ -987,6 +1029,8 @@ mod tests {
MIN_ACK_DELAY,
MAX_DATAGRAM_FRAME_SIZE,
];
+
+ let mut tps_a = TransportParameters::default();
for i in INTEGER_KEYS {
tps_a.set(*i, TransportParameter::Integer(12));
}
diff --git a/third_party/rust/neqo-transport/src/tracking.rs b/third_party/rust/neqo-transport/src/tracking.rs
index 64d00257d3..bdd0f250c7 100644
--- a/third_party/rust/neqo-transport/src/tracking.rs
+++ b/third_party/rust/neqo-transport/src/tracking.rs
@@ -6,12 +6,9 @@
// Tracking of received packets and generating acks thereof.
-#![deny(clippy::pedantic)]
-
use std::{
cmp::min,
collections::VecDeque,
- convert::TryFrom,
ops::{Index, IndexMut},
time::{Duration, Instant},
};
@@ -746,8 +743,8 @@ impl Default for AckTracker {
mod tests {
use std::collections::HashSet;
- use lazy_static::lazy_static;
use neqo_common::Encoder;
+ use test_fixture::now;
use super::{
AckTracker, Duration, Instant, PacketNumberSpace, PacketNumberSpaceSet, RecoveryToken,
@@ -760,16 +757,13 @@ mod tests {
};
const RTT: Duration = Duration::from_millis(100);
- lazy_static! {
- static ref NOW: Instant = Instant::now();
- }
fn test_ack_range(pns: &[PacketNumber], nranges: usize) {
let mut rp = RecvdPackets::new(PacketNumberSpace::Initial); // Any space will do.
let mut packets = HashSet::new();
for pn in pns {
- rp.set_received(*NOW, *pn, true);
+ rp.set_received(now(), *pn, true);
packets.insert(*pn);
}
@@ -824,7 +818,7 @@ mod tests {
// This will add one too many disjoint ranges.
for i in 0..=MAX_TRACKED_RANGES {
- rp.set_received(*NOW, (i * 2) as u64, true);
+ rp.set_received(now(), (i * 2) as u64, true);
}
assert_eq!(rp.ranges.len(), MAX_TRACKED_RANGES);
@@ -843,22 +837,22 @@ mod tests {
// Only application data packets are delayed.
let mut rp = RecvdPackets::new(PacketNumberSpace::ApplicationData);
assert!(rp.ack_time().is_none());
- assert!(!rp.ack_now(*NOW, RTT));
+ assert!(!rp.ack_now(now(), RTT));
rp.ack_freq(0, COUNT, DELAY, false);
// Some packets won't cause an ACK to be needed.
for i in 0..COUNT {
- rp.set_received(*NOW, i, true);
- assert_eq!(Some(*NOW + DELAY), rp.ack_time());
- assert!(!rp.ack_now(*NOW, RTT));
- assert!(rp.ack_now(*NOW + DELAY, RTT));
+ rp.set_received(now(), i, true);
+ assert_eq!(Some(now() + DELAY), rp.ack_time());
+ assert!(!rp.ack_now(now(), RTT));
+ assert!(rp.ack_now(now() + DELAY, RTT));
}
// Exceeding COUNT will move the ACK time to now.
- rp.set_received(*NOW, COUNT, true);
- assert_eq!(Some(*NOW), rp.ack_time());
- assert!(rp.ack_now(*NOW, RTT));
+ rp.set_received(now(), COUNT, true);
+ assert_eq!(Some(now()), rp.ack_time());
+ assert!(rp.ack_now(now(), RTT));
}
#[test]
@@ -866,12 +860,12 @@ mod tests {
for space in &[PacketNumberSpace::Initial, PacketNumberSpace::Handshake] {
let mut rp = RecvdPackets::new(*space);
assert!(rp.ack_time().is_none());
- assert!(!rp.ack_now(*NOW, RTT));
+ assert!(!rp.ack_now(now(), RTT));
// Any packet in these spaces is acknowledged straight away.
- rp.set_received(*NOW, 0, true);
- assert_eq!(Some(*NOW), rp.ack_time());
- assert!(rp.ack_now(*NOW, RTT));
+ rp.set_received(now(), 0, true);
+ assert_eq!(Some(now()), rp.ack_time());
+ assert!(rp.ack_now(now(), RTT));
}
}
@@ -879,12 +873,12 @@ mod tests {
fn ooo_no_ack_delay_new() {
let mut rp = RecvdPackets::new(PacketNumberSpace::ApplicationData);
assert!(rp.ack_time().is_none());
- assert!(!rp.ack_now(*NOW, RTT));
+ assert!(!rp.ack_now(now(), RTT));
// Anything other than packet 0 is acknowledged immediately.
- rp.set_received(*NOW, 1, true);
- assert_eq!(Some(*NOW), rp.ack_time());
- assert!(rp.ack_now(*NOW, RTT));
+ rp.set_received(now(), 1, true);
+ assert_eq!(Some(now()), rp.ack_time());
+ assert!(rp.ack_now(now(), RTT));
}
fn write_frame_at(rp: &mut RecvdPackets, now: Instant) {
@@ -897,37 +891,37 @@ mod tests {
}
fn write_frame(rp: &mut RecvdPackets) {
- write_frame_at(rp, *NOW);
+ write_frame_at(rp, now());
}
#[test]
fn ooo_no_ack_delay_fill() {
let mut rp = RecvdPackets::new(PacketNumberSpace::ApplicationData);
- rp.set_received(*NOW, 1, true);
+ rp.set_received(now(), 1, true);
write_frame(&mut rp);
// Filling in behind the largest acknowledged causes immediate ACK.
- rp.set_received(*NOW, 0, true);
+ rp.set_received(now(), 0, true);
write_frame(&mut rp);
// Receiving the next packet won't elicit an ACK.
- rp.set_received(*NOW, 2, true);
- assert!(!rp.ack_now(*NOW, RTT));
+ rp.set_received(now(), 2, true);
+ assert!(!rp.ack_now(now(), RTT));
}
#[test]
fn immediate_ack_after_rtt() {
let mut rp = RecvdPackets::new(PacketNumberSpace::ApplicationData);
- rp.set_received(*NOW, 1, true);
+ rp.set_received(now(), 1, true);
write_frame(&mut rp);
// Filling in behind the largest acknowledged causes immediate ACK.
- rp.set_received(*NOW, 0, true);
+ rp.set_received(now(), 0, true);
write_frame(&mut rp);
// A new packet ordinarily doesn't result in an ACK, but this time it does.
- rp.set_received(*NOW + RTT, 2, true);
- write_frame_at(&mut rp, *NOW + RTT);
+ rp.set_received(now() + RTT, 2, true);
+ write_frame_at(&mut rp, now() + RTT);
}
#[test]
@@ -937,29 +931,29 @@ mod tests {
// Set tolerance to 2 and then it takes three packets.
rp.ack_freq(0, 2, Duration::from_millis(10), true);
- rp.set_received(*NOW, 1, true);
- assert_ne!(Some(*NOW), rp.ack_time());
- rp.set_received(*NOW, 2, true);
- assert_ne!(Some(*NOW), rp.ack_time());
- rp.set_received(*NOW, 3, true);
- assert_eq!(Some(*NOW), rp.ack_time());
+ rp.set_received(now(), 1, true);
+ assert_ne!(Some(now()), rp.ack_time());
+ rp.set_received(now(), 2, true);
+ assert_ne!(Some(now()), rp.ack_time());
+ rp.set_received(now(), 3, true);
+ assert_eq!(Some(now()), rp.ack_time());
}
#[test]
fn ooo_no_ack_delay_threshold_gap() {
let mut rp = RecvdPackets::new(PacketNumberSpace::ApplicationData);
- rp.set_received(*NOW, 1, true);
+ rp.set_received(now(), 1, true);
write_frame(&mut rp);
// Set tolerance to 2 and then it takes three packets.
rp.ack_freq(0, 2, Duration::from_millis(10), true);
- rp.set_received(*NOW, 3, true);
- assert_ne!(Some(*NOW), rp.ack_time());
- rp.set_received(*NOW, 4, true);
- assert_ne!(Some(*NOW), rp.ack_time());
- rp.set_received(*NOW, 5, true);
- assert_eq!(Some(*NOW), rp.ack_time());
+ rp.set_received(now(), 3, true);
+ assert_ne!(Some(now()), rp.ack_time());
+ rp.set_received(now(), 4, true);
+ assert_ne!(Some(now()), rp.ack_time());
+ rp.set_received(now(), 5, true);
+ assert_eq!(Some(now()), rp.ack_time());
}
/// Test that an in-order packet that is not ack-eliciting doesn't
@@ -970,13 +964,13 @@ mod tests {
rp.ack_freq(0, 1, Duration::from_millis(10), true);
// This should be ignored.
- rp.set_received(*NOW, 0, false);
- assert_ne!(Some(*NOW), rp.ack_time());
+ rp.set_received(now(), 0, false);
+ assert_ne!(Some(now()), rp.ack_time());
// Skip 1 (it has no effect).
- rp.set_received(*NOW, 2, true);
- assert_ne!(Some(*NOW), rp.ack_time());
- rp.set_received(*NOW, 3, true);
- assert_eq!(Some(*NOW), rp.ack_time());
+ rp.set_received(now(), 2, true);
+ assert_ne!(Some(now()), rp.ack_time());
+ rp.set_received(now(), 3, true);
+ assert_eq!(Some(now()), rp.ack_time());
}
/// If a packet that is not ack-eliciting is reordered, that's fine too.
@@ -986,16 +980,16 @@ mod tests {
rp.ack_freq(0, 1, Duration::from_millis(10), false);
// These are out of order, but they are not ack-eliciting.
- rp.set_received(*NOW, 1, false);
- assert_ne!(Some(*NOW), rp.ack_time());
- rp.set_received(*NOW, 0, false);
- assert_ne!(Some(*NOW), rp.ack_time());
+ rp.set_received(now(), 1, false);
+ assert_ne!(Some(now()), rp.ack_time());
+ rp.set_received(now(), 0, false);
+ assert_ne!(Some(now()), rp.ack_time());
// These are in order.
- rp.set_received(*NOW, 2, true);
- assert_ne!(Some(*NOW), rp.ack_time());
- rp.set_received(*NOW, 3, true);
- assert_eq!(Some(*NOW), rp.ack_time());
+ rp.set_received(now(), 2, true);
+ assert_ne!(Some(now()), rp.ack_time());
+ rp.set_received(now(), 3, true);
+ assert_eq!(Some(now()), rp.ack_time());
}
#[test]
@@ -1007,23 +1001,23 @@ mod tests {
tracker
.get_mut(PacketNumberSpace::Handshake)
.unwrap()
- .set_received(*NOW, 0, false);
- assert_eq!(None, tracker.ack_time(*NOW));
+ .set_received(now(), 0, false);
+ assert_eq!(None, tracker.ack_time(now()));
// This should be delayed.
tracker
.get_mut(PacketNumberSpace::ApplicationData)
.unwrap()
- .set_received(*NOW, 0, true);
- assert_eq!(Some(*NOW + DELAY), tracker.ack_time(*NOW));
+ .set_received(now(), 0, true);
+ assert_eq!(Some(now() + DELAY), tracker.ack_time(now()));
// This should move the time forward.
- let later = *NOW + (DELAY / 2);
+ let later = now() + (DELAY / 2);
tracker
.get_mut(PacketNumberSpace::Initial)
.unwrap()
.set_received(later, 0, true);
- assert_eq!(Some(later), tracker.ack_time(*NOW));
+ assert_eq!(Some(later), tracker.ack_time(now()));
}
#[test]
@@ -1047,17 +1041,17 @@ mod tests {
tracker
.get_mut(PacketNumberSpace::Initial)
.unwrap()
- .set_received(*NOW, 0, true);
+ .set_received(now(), 0, true);
// The reference time for `ack_time` has to be in the past or we filter out the timer.
assert!(tracker
- .ack_time(NOW.checked_sub(Duration::from_millis(1)).unwrap())
+ .ack_time(now().checked_sub(Duration::from_millis(1)).unwrap())
.is_some());
let mut tokens = Vec::new();
let mut stats = FrameStats::default();
tracker.write_frame(
PacketNumberSpace::Initial,
- *NOW,
+ now(),
RTT,
&mut builder,
&mut tokens,
@@ -1069,9 +1063,9 @@ mod tests {
tracker
.get_mut(PacketNumberSpace::Initial)
.unwrap()
- .set_received(*NOW, 1, true);
+ .set_received(now(), 1, true);
assert!(tracker
- .ack_time(NOW.checked_sub(Duration::from_millis(1)).unwrap())
+ .ack_time(now().checked_sub(Duration::from_millis(1)).unwrap())
.is_some());
// Now drop that space.
@@ -1079,11 +1073,11 @@ mod tests {
assert!(tracker.get_mut(PacketNumberSpace::Initial).is_none());
assert!(tracker
- .ack_time(NOW.checked_sub(Duration::from_millis(1)).unwrap())
+ .ack_time(now().checked_sub(Duration::from_millis(1)).unwrap())
.is_none());
tracker.write_frame(
PacketNumberSpace::Initial,
- *NOW,
+ now(),
RTT,
&mut builder,
&mut tokens,
@@ -1103,9 +1097,9 @@ mod tests {
tracker
.get_mut(PacketNumberSpace::Initial)
.unwrap()
- .set_received(*NOW, 0, true);
+ .set_received(now(), 0, true);
assert!(tracker
- .ack_time(NOW.checked_sub(Duration::from_millis(1)).unwrap())
+ .ack_time(now().checked_sub(Duration::from_millis(1)).unwrap())
.is_some());
let mut builder = PacketBuilder::short(Encoder::new(), false, []);
@@ -1114,7 +1108,7 @@ mod tests {
let mut stats = FrameStats::default();
tracker.write_frame(
PacketNumberSpace::Initial,
- *NOW,
+ now(),
RTT,
&mut builder,
&mut Vec::new(),
@@ -1130,13 +1124,13 @@ mod tests {
tracker
.get_mut(PacketNumberSpace::Initial)
.unwrap()
- .set_received(*NOW, 0, true);
+ .set_received(now(), 0, true);
tracker
.get_mut(PacketNumberSpace::Initial)
.unwrap()
- .set_received(*NOW, 2, true);
+ .set_received(now(), 2, true);
assert!(tracker
- .ack_time(NOW.checked_sub(Duration::from_millis(1)).unwrap())
+ .ack_time(now().checked_sub(Duration::from_millis(1)).unwrap())
.is_some());
let mut builder = PacketBuilder::short(Encoder::new(), false, []);
@@ -1145,7 +1139,7 @@ mod tests {
let mut stats = FrameStats::default();
tracker.write_frame(
PacketNumberSpace::Initial,
- *NOW,
+ now(),
RTT,
&mut builder,
&mut Vec::new(),
@@ -1168,19 +1162,19 @@ mod tests {
let mut tracker = AckTracker::default();
// While we have multiple PN spaces, we ignore ACK timers from the past.
- // Send out of order to cause the delayed ack timer to be set to `*NOW`.
+ // Send out of order to cause the delayed ack timer to be set to `now()`.
tracker
.get_mut(PacketNumberSpace::ApplicationData)
.unwrap()
- .set_received(*NOW, 3, true);
- assert!(tracker.ack_time(*NOW + Duration::from_millis(1)).is_none());
+ .set_received(now(), 3, true);
+ assert!(tracker.ack_time(now() + Duration::from_millis(1)).is_none());
// When we are reduced to one space, that filter is off.
tracker.drop_space(PacketNumberSpace::Initial);
tracker.drop_space(PacketNumberSpace::Handshake);
assert_eq!(
- tracker.ack_time(*NOW + Duration::from_millis(1)),
- Some(*NOW)
+ tracker.ack_time(now() + Duration::from_millis(1)),
+ Some(now())
);
}
diff --git a/third_party/rust/neqo-transport/src/version.rs b/third_party/rust/neqo-transport/src/version.rs
index 13db0bf024..eee598fdd0 100644
--- a/third_party/rust/neqo-transport/src/version.rs
+++ b/third_party/rust/neqo-transport/src/version.rs
@@ -4,17 +4,16 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-use std::convert::TryFrom;
-
use neqo_common::qdebug;
use crate::{Error, Res};
pub type WireVersion = u32;
-#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
+#[derive(Debug, Default, Clone, Copy, PartialEq, Eq, Hash)]
pub enum Version {
Version2,
+ #[default]
Version1,
Draft29,
Draft30,
@@ -23,6 +22,7 @@ pub enum Version {
}
impl Version {
+ #[must_use]
pub const fn wire_version(self) -> WireVersion {
match self {
Self::Version2 => 0x6b33_43cf,
@@ -94,6 +94,7 @@ impl Version {
}
/// Determine if `self` can be upgraded to `other` compatibly.
+ #[must_use]
pub fn is_compatible(self, other: Self) -> bool {
self == other
|| matches!(
@@ -102,6 +103,7 @@ impl Version {
)
}
+ #[must_use]
pub fn all() -> Vec<Self> {
vec![
Self::Version2,
@@ -121,12 +123,6 @@ impl Version {
}
}
-impl Default for Version {
- fn default() -> Self {
- Self::Version1
- }
-}
-
impl TryFrom<WireVersion> for Version {
type Error = Error;
@@ -176,15 +172,20 @@ pub struct VersionConfig {
}
impl VersionConfig {
+ /// # Panics
+ /// When `all` does not include `initial`.
+ #[must_use]
pub fn new(initial: Version, all: Vec<Version>) -> Self {
assert!(all.contains(&initial));
Self { initial, all }
}
+ #[must_use]
pub fn initial(&self) -> Version {
self.initial
}
+ #[must_use]
pub fn all(&self) -> &[Version] {
&self.all
}
diff --git a/third_party/rust/neqo-transport/tests/common/mod.rs b/third_party/rust/neqo-transport/tests/common/mod.rs
index a43f91e3fe..faff216eb9 100644
--- a/third_party/rust/neqo-transport/tests/common/mod.rs
+++ b/third_party/rust/neqo-transport/tests/common/mod.rs
@@ -4,11 +4,9 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-#![cfg_attr(feature = "deny-warnings", deny(warnings))]
-#![warn(clippy::pedantic)]
#![allow(unused)]
-use std::{cell::RefCell, convert::TryFrom, mem, ops::Range, rc::Rc};
+use std::{cell::RefCell, mem, ops::Range, rc::Rc};
use neqo_common::{event::Provider, hex_with_len, qtrace, Datagram, Decoder, Role};
use neqo_crypto::{
@@ -21,7 +19,7 @@ use neqo_transport::{
server::{ActiveConnectionRef, Server, ValidateAddress},
Connection, ConnectionEvent, ConnectionParameters, State,
};
-use test_fixture::{self, default_client, now, CountingConnectionIdGenerator};
+use test_fixture::{default_client, now, CountingConnectionIdGenerator};
/// Create a server. This is different than the one in the fixture, which is a single connection.
pub fn new_server(params: ConnectionParameters) -> Server {
diff --git a/third_party/rust/neqo-transport/tests/conn_vectors.rs b/third_party/rust/neqo-transport/tests/conn_vectors.rs
index 91dbbf31cc..f478883075 100644
--- a/third_party/rust/neqo-transport/tests/conn_vectors.rs
+++ b/third_party/rust/neqo-transport/tests/conn_vectors.rs
@@ -5,7 +5,7 @@
// except according to those terms.
// Tests with the test vectors from the spec.
-#![deny(clippy::pedantic)]
+
#![cfg(not(feature = "fuzzing"))]
use std::{cell::RefCell, rc::Rc};
@@ -13,7 +13,7 @@ use std::{cell::RefCell, rc::Rc};
use neqo_transport::{
Connection, ConnectionParameters, RandomConnectionIdGenerator, State, Version,
};
-use test_fixture::{self, datagram, now};
+use test_fixture::{datagram, now};
const INITIAL_PACKET_V2: &[u8] = &[
0xd7, 0x6b, 0x33, 0x43, 0xcf, 0x08, 0x83, 0x94, 0xc8, 0xf0, 0x3e, 0x51, 0x57, 0x08, 0x00, 0x00,
diff --git a/third_party/rust/neqo-transport/tests/connection.rs b/third_party/rust/neqo-transport/tests/connection.rs
index 4cbf57f405..0b91fcf306 100644
--- a/third_party/rust/neqo-transport/tests/connection.rs
+++ b/third_party/rust/neqo-transport/tests/connection.rs
@@ -4,19 +4,14 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-#![cfg_attr(feature = "deny-warnings", deny(warnings))]
-#![warn(clippy::use_self)]
-
mod common;
-use std::convert::TryFrom;
-
use common::{
apply_header_protection, decode_initial_header, initial_aead_and_hp, remove_header_protection,
};
use neqo_common::{Datagram, Decoder, Encoder, Role};
use neqo_transport::{ConnectionError, ConnectionParameters, Error, State, Version};
-use test_fixture::{self, default_client, default_server, new_client, now, split_datagram};
+use test_fixture::{default_client, default_server, new_client, now, split_datagram};
#[test]
fn connect() {
@@ -133,6 +128,7 @@ fn reorder_server_initial() {
}
/// Overflow the crypto buffer.
+#[allow(clippy::similar_names)] // For ..._scid and ..._dcid, which are fine.
#[test]
fn overflow_crypto() {
let mut client = new_client(
diff --git a/third_party/rust/neqo-transport/tests/network.rs b/third_party/rust/neqo-transport/tests/network.rs
index 8c388457c5..27e5a83cd6 100644
--- a/third_party/rust/neqo-transport/tests/network.rs
+++ b/third_party/rust/neqo-transport/tests/network.rs
@@ -4,18 +4,17 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-#![cfg_attr(feature = "deny-warnings", deny(warnings))]
-#![warn(clippy::pedantic)]
-
-mod sim;
-
use std::{ops::Range, time::Duration};
use neqo_transport::{ConnectionError, ConnectionParameters, Error, State};
-use sim::{
- connection::{ConnectionNode, ReachState, ReceiveData, SendData},
- network::{Delay, Drop, TailDrop},
- Simulator,
+use test_fixture::{
+ boxed,
+ sim::{
+ connection::{ConnectionNode, ReachState, ReceiveData, SendData},
+ network::{Delay, Drop, TailDrop},
+ Simulator,
+ },
+ simulate,
};
/// The amount of transfer. Much more than this takes a surprising amount of time.
@@ -32,26 +31,28 @@ const fn weeks(m: u32) -> Duration {
simulate!(
connect_direct,
[
- ConnectionNode::default_client(boxed![ReachState::new(State::Confirmed)]),
- ConnectionNode::default_server(boxed![ReachState::new(State::Confirmed)]),
+ ConnectionNode::new_client(
+ ConnectionParameters::default(),
+ [],
+ boxed![ReachState::new(State::Confirmed)]
+ ),
+ ConnectionNode::new_server(
+ ConnectionParameters::default(),
+ [],
+ boxed![ReachState::new(State::Confirmed)]
+ ),
]
);
simulate!(
idle_timeout,
[
- ConnectionNode::default_client(boxed![
- ReachState::new(State::Confirmed),
- ReachState::new(State::Closed(ConnectionError::Transport(
- Error::IdleTimeout
- )))
- ]),
- ConnectionNode::default_server(boxed![
- ReachState::new(State::Confirmed),
- ReachState::new(State::Closed(ConnectionError::Transport(
- Error::IdleTimeout
- )))
- ]),
+ ConnectionNode::default_client(boxed![ReachState::new(State::Closed(
+ ConnectionError::Transport(Error::IdleTimeout)
+ ))]),
+ ConnectionNode::default_server(boxed![ReachState::new(State::Closed(
+ ConnectionError::Transport(Error::IdleTimeout)
+ ))]),
]
);
@@ -60,23 +61,19 @@ simulate!(
[
ConnectionNode::new_client(
ConnectionParameters::default().idle_timeout(weeks(1000)),
- boxed![
- ReachState::new(State::Confirmed),
- ReachState::new(State::Closed(ConnectionError::Transport(
- Error::IdleTimeout
- )))
- ]
+ boxed![ReachState::new(State::Confirmed),],
+ boxed![ReachState::new(State::Closed(ConnectionError::Transport(
+ Error::IdleTimeout
+ )))]
),
Delay::new(weeks(6)..weeks(6)),
Drop::percentage(10),
ConnectionNode::new_server(
ConnectionParameters::default().idle_timeout(weeks(1000)),
- boxed![
- ReachState::new(State::Confirmed),
- ReachState::new(State::Closed(ConnectionError::Transport(
- Error::IdleTimeout
- )))
- ]
+ boxed![ReachState::new(State::Confirmed),],
+ boxed![ReachState::new(State::Closed(ConnectionError::Transport(
+ Error::IdleTimeout
+ )))]
),
Delay::new(weeks(8)..weeks(8)),
Drop::percentage(10),
@@ -94,9 +91,17 @@ simulate!(
simulate!(
connect_fixed_rtt,
[
- ConnectionNode::default_client(boxed![ReachState::new(State::Confirmed)]),
+ ConnectionNode::new_client(
+ ConnectionParameters::default(),
+ [],
+ boxed![ReachState::new(State::Confirmed)]
+ ),
Delay::new(DELAY..DELAY),
- ConnectionNode::default_server(boxed![ReachState::new(State::Confirmed)]),
+ ConnectionNode::new_server(
+ ConnectionParameters::default(),
+ [],
+ boxed![ReachState::new(State::Confirmed)]
+ ),
Delay::new(DELAY..DELAY),
],
);
@@ -104,22 +109,38 @@ simulate!(
simulate!(
connect_taildrop_jitter,
[
- ConnectionNode::default_client(boxed![ReachState::new(State::Confirmed)]),
- TailDrop::dsl_uplink(),
- Delay::new(ZERO..JITTER),
- ConnectionNode::default_server(boxed![ReachState::new(State::Confirmed)]),
+ ConnectionNode::new_client(
+ ConnectionParameters::default(),
+ [],
+ boxed![ReachState::new(State::Confirmed)]
+ ),
TailDrop::dsl_downlink(),
Delay::new(ZERO..JITTER),
+ ConnectionNode::new_server(
+ ConnectionParameters::default(),
+ [],
+ boxed![ReachState::new(State::Confirmed)]
+ ),
+ TailDrop::dsl_uplink(),
+ Delay::new(ZERO..JITTER),
],
);
simulate!(
connect_taildrop,
[
- ConnectionNode::default_client(boxed![ReachState::new(State::Confirmed)]),
- TailDrop::dsl_uplink(),
- ConnectionNode::default_server(boxed![ReachState::new(State::Confirmed)]),
+ ConnectionNode::new_client(
+ ConnectionParameters::default(),
+ [],
+ boxed![ReachState::new(State::Confirmed)]
+ ),
TailDrop::dsl_downlink(),
+ ConnectionNode::new_server(
+ ConnectionParameters::default(),
+ [],
+ boxed![ReachState::new(State::Confirmed)]
+ ),
+ TailDrop::dsl_uplink(),
],
);
@@ -139,9 +160,9 @@ simulate!(
transfer_taildrop,
[
ConnectionNode::default_client(boxed![SendData::new(TRANSFER_AMOUNT)]),
- TailDrop::dsl_uplink(),
- ConnectionNode::default_server(boxed![ReceiveData::new(TRANSFER_AMOUNT)]),
TailDrop::dsl_downlink(),
+ ConnectionNode::default_server(boxed![ReceiveData::new(TRANSFER_AMOUNT)]),
+ TailDrop::dsl_uplink(),
],
);
@@ -149,10 +170,10 @@ simulate!(
transfer_taildrop_jitter,
[
ConnectionNode::default_client(boxed![SendData::new(TRANSFER_AMOUNT)]),
- TailDrop::dsl_uplink(),
+ TailDrop::dsl_downlink(),
Delay::new(ZERO..JITTER),
ConnectionNode::default_server(boxed![ReceiveData::new(TRANSFER_AMOUNT)]),
- TailDrop::dsl_downlink(),
+ TailDrop::dsl_uplink(),
Delay::new(ZERO..JITTER),
],
);
diff --git a/third_party/rust/neqo-transport/tests/retry.rs b/third_party/rust/neqo-transport/tests/retry.rs
index 93759c7df9..e583fcae0f 100644
--- a/third_party/rust/neqo-transport/tests/retry.rs
+++ b/third_party/rust/neqo-transport/tests/retry.rs
@@ -4,14 +4,11 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-#![cfg_attr(feature = "deny-warnings", deny(warnings))]
-#![warn(clippy::pedantic)]
#![cfg(not(feature = "fuzzing"))]
mod common;
use std::{
- convert::TryFrom,
mem,
net::{IpAddr, Ipv4Addr, SocketAddr},
time::Duration,
@@ -24,7 +21,7 @@ use common::{
use neqo_common::{hex_with_len, qdebug, qtrace, Datagram, Encoder, Role};
use neqo_crypto::AuthenticationStatus;
use neqo_transport::{server::ValidateAddress, ConnectionError, Error, State, StreamType};
-use test_fixture::{self, assertions, datagram, default_client, now, split_datagram};
+use test_fixture::{assertions, datagram, default_client, now, split_datagram};
#[test]
fn retry_basic() {
diff --git a/third_party/rust/neqo-transport/tests/server.rs b/third_party/rust/neqo-transport/tests/server.rs
index d6c9c2df95..7388e0fee7 100644
--- a/third_party/rust/neqo-transport/tests/server.rs
+++ b/third_party/rust/neqo-transport/tests/server.rs
@@ -4,12 +4,9 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-#![cfg_attr(feature = "deny-warnings", deny(warnings))]
-#![warn(clippy::pedantic)]
-
mod common;
-use std::{cell::RefCell, convert::TryFrom, mem, net::SocketAddr, rc::Rc, time::Duration};
+use std::{cell::RefCell, mem, net::SocketAddr, rc::Rc, time::Duration};
use common::{
apply_header_protection, connect, connected_server, decode_initial_header, default_server,
@@ -24,7 +21,7 @@ use neqo_transport::{
Connection, ConnectionError, ConnectionParameters, Error, Output, State, StreamType, Version,
};
use test_fixture::{
- self, assertions, datagram, default_client, new_client, now, split_datagram,
+ assertions, datagram, default_client, new_client, now, split_datagram,
CountingConnectionIdGenerator,
};
diff --git a/third_party/rust/neqo-transport/tests/sim/connection.rs b/third_party/rust/neqo-transport/tests/sim/connection.rs
deleted file mode 100644
index 45a5234512..0000000000
--- a/third_party/rust/neqo-transport/tests/sim/connection.rs
+++ /dev/null
@@ -1,315 +0,0 @@
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-#![allow(clippy::module_name_repetitions)]
-
-use std::{
- cmp::min,
- fmt::{self, Debug},
- time::Instant,
-};
-
-use neqo_common::{event::Provider, qdebug, qtrace, Datagram};
-use neqo_crypto::AuthenticationStatus;
-use neqo_transport::{
- Connection, ConnectionEvent, ConnectionParameters, Output, State, StreamId, StreamType,
-};
-
-use super::{Node, Rng};
-
-/// The status of the processing of an event.
-#[derive(Debug, Clone, Copy, PartialEq, Eq)]
-pub enum GoalStatus {
- /// The event didn't result in doing anything; the goal is waiting for something.
- Waiting,
- /// An action was taken as a result of the event.
- Active,
- /// The goal was accomplished.
- Done,
-}
-
-/// A goal for the connection.
-/// Goals can be accomplished in any order.
-pub trait ConnectionGoal {
- fn init(&mut self, _c: &mut Connection, _now: Instant) {}
- /// Perform some processing.
- fn process(&mut self, _c: &mut Connection, _now: Instant) -> GoalStatus {
- GoalStatus::Waiting
- }
- /// Handle an event from the provided connection, returning `true` when the
- /// goal is achieved.
- fn handle_event(&mut self, c: &mut Connection, e: &ConnectionEvent, now: Instant)
- -> GoalStatus;
-}
-
-pub struct ConnectionNode {
- c: Connection,
- goals: Vec<Box<dyn ConnectionGoal>>,
-}
-
-impl ConnectionNode {
- pub fn new_client(
- params: ConnectionParameters,
- goals: impl IntoIterator<Item = Box<dyn ConnectionGoal>>,
- ) -> Self {
- Self {
- c: test_fixture::new_client(params),
- goals: goals.into_iter().collect(),
- }
- }
-
- pub fn new_server(
- params: ConnectionParameters,
- goals: impl IntoIterator<Item = Box<dyn ConnectionGoal>>,
- ) -> Self {
- Self {
- c: test_fixture::new_server(test_fixture::DEFAULT_ALPN, params),
- goals: goals.into_iter().collect(),
- }
- }
-
- pub fn default_client(goals: impl IntoIterator<Item = Box<dyn ConnectionGoal>>) -> Self {
- Self::new_client(ConnectionParameters::default(), goals)
- }
-
- pub fn default_server(goals: impl IntoIterator<Item = Box<dyn ConnectionGoal>>) -> Self {
- Self::new_server(ConnectionParameters::default(), goals)
- }
-
- #[allow(dead_code)]
- pub fn clear_goals(&mut self) {
- self.goals.clear();
- }
-
- #[allow(dead_code)]
- pub fn add_goal(&mut self, goal: Box<dyn ConnectionGoal>) {
- self.goals.push(goal);
- }
-
- /// Process all goals using the given closure and return whether any were active.
- fn process_goals<F>(&mut self, mut f: F) -> bool
- where
- F: FnMut(&mut Box<dyn ConnectionGoal>, &mut Connection) -> GoalStatus,
- {
- // Waiting on drain_filter...
- // self.goals.drain_filter(|g| f(g, &mut self.c, &e)).count();
- let mut active = false;
- let mut i = 0;
- while i < self.goals.len() {
- let status = f(&mut self.goals[i], &mut self.c);
- if status == GoalStatus::Done {
- self.goals.remove(i);
- active = true;
- } else {
- active |= status == GoalStatus::Active;
- i += 1;
- }
- }
- active
- }
-}
-
-impl Node for ConnectionNode {
- fn init(&mut self, _rng: Rng, now: Instant) {
- for g in &mut self.goals {
- g.init(&mut self.c, now);
- }
- }
-
- fn process(&mut self, mut d: Option<Datagram>, now: Instant) -> Output {
- _ = self.process_goals(|goal, c| goal.process(c, now));
- loop {
- let res = self.c.process(d.take().as_ref(), now);
-
- let mut active = false;
- while let Some(e) = self.c.next_event() {
- qtrace!([self.c], "received event {:?}", e);
-
- // Perform authentication automatically.
- if matches!(e, ConnectionEvent::AuthenticationNeeded) {
- self.c.authenticated(AuthenticationStatus::Ok, now);
- }
-
- active |= self.process_goals(|goal, c| goal.handle_event(c, &e, now));
- }
- // Exit at this point if the connection produced a datagram.
- // We also exit if none of the goals were active, as there is
- // no point trying again if they did nothing.
- if matches!(res, Output::Datagram(_)) || !active {
- return res;
- }
- qdebug!([self.c], "no datagram and goal activity, looping");
- }
- }
-
- fn done(&self) -> bool {
- self.goals.is_empty()
- }
-
- fn print_summary(&self, test_name: &str) {
- println!("{}: {:?}", test_name, self.c.stats());
- }
-}
-
-impl Debug for ConnectionNode {
- fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
- fmt::Display::fmt(&self.c, f)
- }
-}
-
-#[derive(Debug, Clone)]
-pub struct ReachState {
- target: State,
-}
-
-impl ReachState {
- pub fn new(target: State) -> Self {
- Self { target }
- }
-}
-
-impl ConnectionGoal for ReachState {
- fn handle_event(
- &mut self,
- _c: &mut Connection,
- e: &ConnectionEvent,
- _now: Instant,
- ) -> GoalStatus {
- if matches!(e, ConnectionEvent::StateChange(state) if *state == self.target) {
- GoalStatus::Done
- } else {
- GoalStatus::Waiting
- }
- }
-}
-
-#[derive(Debug)]
-pub struct SendData {
- remaining: usize,
- stream_id: Option<StreamId>,
-}
-
-impl SendData {
- pub fn new(amount: usize) -> Self {
- Self {
- remaining: amount,
- stream_id: None,
- }
- }
-
- fn make_stream(&mut self, c: &mut Connection) {
- if self.stream_id.is_none() {
- if let Ok(stream_id) = c.stream_create(StreamType::UniDi) {
- qdebug!([c], "made stream {} for sending", stream_id);
- self.stream_id = Some(stream_id);
- }
- }
- }
-
- fn send(&mut self, c: &mut Connection, stream_id: StreamId) -> GoalStatus {
- const DATA: &[u8] = &[0; 4096];
- let mut status = GoalStatus::Waiting;
- loop {
- let end = min(self.remaining, DATA.len());
- let sent = c.stream_send(stream_id, &DATA[..end]).unwrap();
- if sent == 0 {
- return status;
- }
- self.remaining -= sent;
- qtrace!("sent {} remaining {}", sent, self.remaining);
- if self.remaining == 0 {
- c.stream_close_send(stream_id).unwrap();
- return GoalStatus::Done;
- }
- status = GoalStatus::Active;
- }
- }
-}
-
-impl ConnectionGoal for SendData {
- fn init(&mut self, c: &mut Connection, _now: Instant) {
- self.make_stream(c);
- }
-
- fn process(&mut self, c: &mut Connection, _now: Instant) -> GoalStatus {
- self.stream_id
- .map_or(GoalStatus::Waiting, |stream_id| self.send(c, stream_id))
- }
-
- fn handle_event(
- &mut self,
- c: &mut Connection,
- e: &ConnectionEvent,
- _now: Instant,
- ) -> GoalStatus {
- match e {
- ConnectionEvent::SendStreamCreatable {
- stream_type: StreamType::UniDi,
- }
- // TODO(mt): remove the second condition when #842 is fixed.
- | ConnectionEvent::StateChange(_) => {
- self.make_stream(c);
- GoalStatus::Active
- }
-
- ConnectionEvent::SendStreamWritable { stream_id }
- if Some(*stream_id) == self.stream_id =>
- {
- self.send(c, *stream_id)
- }
-
- // If we sent data in 0-RTT, then we didn't track how much we should
- // have sent. This is trivial to fix if 0-RTT testing is ever needed.
- ConnectionEvent::ZeroRttRejected => panic!("not supported"),
- _ => GoalStatus::Waiting,
- }
- }
-}
-
-/// Receive a prescribed amount of data from any stream.
-#[derive(Debug)]
-pub struct ReceiveData {
- remaining: usize,
-}
-
-impl ReceiveData {
- pub fn new(amount: usize) -> Self {
- Self { remaining: amount }
- }
-
- fn recv(&mut self, c: &mut Connection, stream_id: StreamId) -> GoalStatus {
- let mut buf = vec![0; 4096];
- let mut status = GoalStatus::Waiting;
- loop {
- let end = min(self.remaining, buf.len());
- let (recvd, _) = c.stream_recv(stream_id, &mut buf[..end]).unwrap();
- qtrace!("received {} remaining {}", recvd, self.remaining);
- if recvd == 0 {
- return status;
- }
- self.remaining -= recvd;
- if self.remaining == 0 {
- return GoalStatus::Done;
- }
- status = GoalStatus::Active;
- }
- }
-}
-
-impl ConnectionGoal for ReceiveData {
- fn handle_event(
- &mut self,
- c: &mut Connection,
- e: &ConnectionEvent,
- _now: Instant,
- ) -> GoalStatus {
- if let ConnectionEvent::RecvStreamReadable { stream_id } = e {
- self.recv(c, *stream_id)
- } else {
- GoalStatus::Waiting
- }
- }
-}
diff --git a/third_party/rust/neqo-transport/tests/sim/delay.rs b/third_party/rust/neqo-transport/tests/sim/delay.rs
deleted file mode 100644
index 34cb923084..0000000000
--- a/third_party/rust/neqo-transport/tests/sim/delay.rs
+++ /dev/null
@@ -1,102 +0,0 @@
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-#![allow(clippy::module_name_repetitions)]
-
-use std::{
- collections::BTreeMap,
- convert::TryFrom,
- fmt::{self, Debug},
- ops::Range,
- time::{Duration, Instant},
-};
-
-use neqo_common::Datagram;
-use neqo_transport::Output;
-
-use super::{Node, Rng};
-
-/// An iterator that shares a `Random` instance and produces uniformly
-/// random `Duration`s within a specified range.
-pub struct RandomDelay {
- start: Duration,
- max: u64,
- rng: Option<Rng>,
-}
-
-impl RandomDelay {
- /// Make a new random `Duration` generator. This panics if the range provided
- /// is inverted (i.e., `bounds.start > bounds.end`), or spans 2^64
- /// or more nanoseconds.
- /// A zero-length range means that random values won't be taken from the Rng
- pub fn new(bounds: Range<Duration>) -> Self {
- let max = u64::try_from((bounds.end - bounds.start).as_nanos()).unwrap();
- Self {
- start: bounds.start,
- max,
- rng: None,
- }
- }
-
- pub fn set_rng(&mut self, rng: Rng) {
- self.rng = Some(rng);
- }
-
- pub fn next(&mut self) -> Duration {
- let mut rng = self.rng.as_ref().unwrap().borrow_mut();
- let r = rng.random_from(0..self.max);
- self.start + Duration::from_nanos(r)
- }
-}
-
-pub struct Delay {
- random: RandomDelay,
- queue: BTreeMap<Instant, Datagram>,
-}
-
-impl Delay {
- pub fn new(bounds: Range<Duration>) -> Self {
- Self {
- random: RandomDelay::new(bounds),
- queue: BTreeMap::default(),
- }
- }
-
- fn insert(&mut self, d: Datagram, now: Instant) {
- let mut t = now + self.random.next();
- while self.queue.contains_key(&t) {
- // This is a little inefficient, but it avoids drops on collisions,
- // which are super-common for a fixed delay.
- t += Duration::from_nanos(1);
- }
- self.queue.insert(t, d);
- }
-}
-
-impl Node for Delay {
- fn init(&mut self, rng: Rng, _now: Instant) {
- self.random.set_rng(rng);
- }
-
- fn process(&mut self, d: Option<Datagram>, now: Instant) -> Output {
- if let Some(dgram) = d {
- self.insert(dgram, now);
- }
- if let Some((&k, _)) = self.queue.range(..=now).next() {
- Output::Datagram(self.queue.remove(&k).unwrap())
- } else if let Some(&t) = self.queue.keys().next() {
- Output::Callback(t - now)
- } else {
- Output::None
- }
- }
-}
-
-impl Debug for Delay {
- fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
- f.write_str("delay")
- }
-}
diff --git a/third_party/rust/neqo-transport/tests/sim/drop.rs b/third_party/rust/neqo-transport/tests/sim/drop.rs
deleted file mode 100644
index 629fbf48d3..0000000000
--- a/third_party/rust/neqo-transport/tests/sim/drop.rs
+++ /dev/null
@@ -1,75 +0,0 @@
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-#![allow(clippy::module_name_repetitions)]
-
-use std::{
- fmt::{self, Debug},
- time::Instant,
-};
-
-use neqo_common::{qtrace, Datagram};
-use neqo_transport::Output;
-
-use super::{Node, Rng};
-
-/// A random dropper.
-pub struct Drop {
- threshold: u64,
- max: u64,
- rng: Option<Rng>,
-}
-
-impl Drop {
- /// Make a new random drop generator. Each `drop` is called, this generates a
- /// random value between 0 and `max` (exclusive). If this value is less than
- /// `threshold` a value of `true` is returned.
- pub fn new(threshold: u64, max: u64) -> Self {
- Self {
- threshold,
- max,
- rng: None,
- }
- }
-
- /// Generate random drops with the given percentage.
- pub fn percentage(pct: u8) -> Self {
- // Multiply by 10 so that the random number generator works more efficiently.
- Self::new(u64::from(pct) * 10, 1000)
- }
-
- pub fn drop(&mut self) -> bool {
- let mut rng = self.rng.as_ref().unwrap().borrow_mut();
- let r = rng.random_from(0..self.max);
- r < self.threshold
- }
-}
-
-impl Node for Drop {
- fn init(&mut self, rng: Rng, _now: Instant) {
- self.rng = Some(rng);
- }
-
- // Pass any datagram provided directly out, but drop some of them.
- fn process(&mut self, d: Option<Datagram>, _now: Instant) -> Output {
- if let Some(dgram) = d {
- if self.drop() {
- qtrace!("drop {}", dgram.len());
- Output::None
- } else {
- Output::Datagram(dgram)
- }
- } else {
- Output::None
- }
- }
-}
-
-impl Debug for Drop {
- fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
- f.write_str("drop")
- }
-}
diff --git a/third_party/rust/neqo-transport/tests/sim/mod.rs b/third_party/rust/neqo-transport/tests/sim/mod.rs
deleted file mode 100644
index 9ab9d57a4a..0000000000
--- a/third_party/rust/neqo-transport/tests/sim/mod.rs
+++ /dev/null
@@ -1,232 +0,0 @@
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-// Tests with simulated network
-#![cfg_attr(feature = "deny-warnings", deny(warnings))]
-#![warn(clippy::pedantic)]
-
-pub mod connection;
-mod delay;
-mod drop;
-pub mod rng;
-mod taildrop;
-
-use std::{
- cell::RefCell,
- cmp::min,
- convert::TryFrom,
- fmt::Debug,
- rc::Rc,
- time::{Duration, Instant},
-};
-
-use neqo_common::{qdebug, qinfo, qtrace, Datagram, Encoder};
-use neqo_transport::Output;
-use rng::Random;
-use test_fixture::{self, now};
-use NodeState::{Active, Idle, Waiting};
-
-pub mod network {
- pub use super::{delay::Delay, drop::Drop, taildrop::TailDrop};
-}
-
-type Rng = Rc<RefCell<Random>>;
-
-/// A macro that turns a list of values into boxed versions of the same.
-#[macro_export]
-macro_rules! boxed {
- [$($v:expr),+ $(,)?] => {
- vec![ $( Box::new($v) as _ ),+ ]
- };
-}
-
-/// Create a simulation test case. This takes either two or three arguments.
-/// The two argument form takes a bare name (`ident`), a comma, and an array of
-/// items that implement `Node`.
-/// The three argument form adds a setup block that can be used to construct a
-/// complex value that is then shared between all nodes. The values in the
-/// three-argument form have to be closures (or functions) that accept a reference
-/// to the value returned by the setup.
-#[macro_export]
-macro_rules! simulate {
- ($n:ident, [ $($v:expr),+ $(,)? ] $(,)?) => {
- simulate!($n, (), [ $(|_| $v),+ ]);
- };
- ($n:ident, $setup:expr, [ $( $v:expr ),+ $(,)? ] $(,)?) => {
- #[test]
- fn $n() {
- let fixture = $setup;
- let mut nodes: Vec<Box<dyn $crate::sim::Node>> = Vec::new();
- $(
- let f: Box<dyn FnOnce(&_) -> _> = Box::new($v);
- nodes.push(Box::new(f(&fixture)));
- )*
- let mut sim = Simulator::new(stringify!($n), nodes);
- if let Ok(seed) = std::env::var("SIMULATION_SEED") {
- sim.seed_str(seed);
- }
- sim.run();
- }
- };
-}
-
-pub trait Node: Debug {
- fn init(&mut self, _rng: Rng, _now: Instant) {}
- /// Perform processing. This optionally takes a datagram and produces either
- /// another data, a time that the simulator needs to wait, or nothing.
- fn process(&mut self, d: Option<Datagram>, now: Instant) -> Output;
- /// An node can report when it considers itself "done".
- fn done(&self) -> bool {
- true
- }
- fn print_summary(&self, _test_name: &str) {}
-}
-
-/// The state of a single node. Nodes will be activated if they are `Active`
-/// or if the previous node in the loop generated a datagram. Nodes that return
-/// `true` from `Node::done` will be activated as normal.
-#[derive(Debug, PartialEq)]
-enum NodeState {
- /// The node just produced a datagram. It should be activated again as soon as possible.
- Active,
- /// The node is waiting.
- Waiting(Instant),
- /// The node became idle.
- Idle,
-}
-
-#[derive(Debug)]
-struct NodeHolder {
- node: Box<dyn Node>,
- state: NodeState,
-}
-
-impl NodeHolder {
- fn ready(&self, now: Instant) -> bool {
- match self.state {
- Active => true,
- Waiting(t) => t >= now,
- Idle => false,
- }
- }
-}
-
-pub struct Simulator {
- name: String,
- nodes: Vec<NodeHolder>,
- rng: Rng,
-}
-
-impl Simulator {
- pub fn new(name: impl AsRef<str>, nodes: impl IntoIterator<Item = Box<dyn Node>>) -> Self {
- let name = String::from(name.as_ref());
- // The first node is marked as Active, the rest are idle.
- let mut it = nodes.into_iter();
- let nodes = it
- .next()
- .map(|node| NodeHolder {
- node,
- state: Active,
- })
- .into_iter()
- .chain(it.map(|node| NodeHolder { node, state: Idle }))
- .collect::<Vec<_>>();
- Self {
- name,
- nodes,
- rng: Rc::default(),
- }
- }
-
- pub fn seed(&mut self, seed: [u8; 32]) {
- self.rng = Rc::new(RefCell::new(Random::new(seed)));
- }
-
- /// Seed from a hex string.
- /// Though this is convenient, it panics if this isn't a 64 character hex string.
- pub fn seed_str(&mut self, seed: impl AsRef<str>) {
- let seed = Encoder::from_hex(seed);
- self.seed(<[u8; 32]>::try_from(seed.as_ref()).unwrap());
- }
-
- fn next_time(&self, now: Instant) -> Instant {
- let mut next = None;
- for n in &self.nodes {
- match n.state {
- Idle => continue,
- Active => return now,
- Waiting(a) => next = Some(next.map_or(a, |b| min(a, b))),
- }
- }
- next.expect("a node cannot be idle and not done")
- }
-
- /// Runs the simulation.
- pub fn run(mut self) -> Duration {
- let start = now();
- let mut now = start;
- let mut dgram = None;
-
- for n in &mut self.nodes {
- n.node.init(self.rng.clone(), now);
- }
- println!("{}: seed {}", self.name, self.rng.borrow().seed_str());
-
- let real_start = Instant::now();
- loop {
- for n in &mut self.nodes {
- if dgram.is_none() && !n.ready(now) {
- qdebug!([self.name], "skipping {:?}", n.node);
- continue;
- }
-
- qdebug!([self.name], "processing {:?}", n.node);
- let res = n.node.process(dgram.take(), now);
- n.state = match res {
- Output::Datagram(d) => {
- qtrace!([self.name], " => datagram {}", d.len());
- dgram = Some(d);
- Active
- }
- Output::Callback(delay) => {
- qtrace!([self.name], " => callback {:?}", delay);
- assert_ne!(delay, Duration::new(0, 0));
- Waiting(now + delay)
- }
- Output::None => {
- qtrace!([self.name], " => nothing");
- assert!(n.node.done(), "nodes have to be done when they go idle");
- Idle
- }
- };
- }
-
- if self.nodes.iter().all(|n| n.node.done()) {
- let real_elapsed = real_start.elapsed();
- println!("{}: real elapsed time: {:?}", self.name, real_elapsed);
- let elapsed = now - start;
- println!("{}: simulated elapsed time: {:?}", self.name, elapsed);
- for n in &self.nodes {
- n.node.print_summary(&self.name);
- }
- return elapsed;
- }
-
- if dgram.is_none() {
- let next = self.next_time(now);
- if next > now {
- qinfo!(
- [self.name],
- "advancing time by {:?} to {:?}",
- next - now,
- next - start
- );
- now = next;
- }
- }
- }
- }
-}
diff --git a/third_party/rust/neqo-transport/tests/sim/net.rs b/third_party/rust/neqo-transport/tests/sim/net.rs
deleted file mode 100644
index 754426f895..0000000000
--- a/third_party/rust/neqo-transport/tests/sim/net.rs
+++ /dev/null
@@ -1,111 +0,0 @@
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-use super::rng::RandomDuration;
-use super::{Node, Rng};
-use neqo_common::Datagram;
-use neqo_transport::Output;
-use std::collections::BTreeMap;
-use std::fmt::{self, Debug};
-use std::iter;
-use std::ops::Range;
-use std::time::{Duration, Instant};
-
-///
-pub struct RandomDrop {
- threshold: u64,
- max: u64,
- rng: Rng,
-}
-
-impl RandomDuration {
- /// Make a new random `Duration` generator. This asserts if the range provided
- /// is inverted (i.e., `bounds.start > bounds.end`), or spans 2^64
- /// or more nanoseconds.
- /// A zero-length range means that random values won't be taken from the Rng
- pub fn new(bounds: Range<Duration>, rng: Rng) -> Self {
- let max = u64::try_from((bounds.end - bounds.start).as_nanos()).unwrap();
- Self {
- start: bounds.start,
- max,
- rng,
- }
- }
-
- fn next(&mut self) -> Duration {
- let r = if self.max == 0 {
- Duration::new(0, 0)
- } else {
- self.rng.borrow_mut().random_from(0..self.max)
- }
- self.start + Duration::from_nanos(r)
- }
-}
-
-enum DelayState {
- New(Range<Duration>),
- Ready(RandomDuration),
-}
-
-pub struct Delay {
- state: DelayState,
- queue: BTreeMap<Instant, Datagram>,
-}
-
-impl Delay
-{
- pub fn new(bounds: Range<Duration>) -> Self
- {
- Self {
- State: DelayState::New(bounds),
- queue: BTreeMap::default(),
- }
- }
-
- fn insert(&mut self, d: Datagram, now: Instant) {
- let mut t = if let State::Ready(r) = self.state {
- now + self.source.next()
- } else {
- unreachable!();
- }
- while self.queue.contains_key(&t) {
- // This is a little inefficient, but it avoids drops on collisions,
- // which are super-common for a fixed delay.
- t += Duration::from_nanos(1);
- }
- self.queue.insert(t, d);
- }
-}
-
-impl Node for Delay
-{
- fn init(&mut self, rng: Rng, now: Instant) {
- if let DelayState::New(bounds) = self.state {
- self.state = RandomDuration::new(bounds);
- } else {
- unreachable!();
- }
- }
-
- fn process(&mut self, d: Option<Datagram>, now: Instant) -> Output {
- if let Some(dgram) = d {
- self.insert(dgram, now);
- }
- if let Some((&k, _)) = self.queue.range(..now).nth(0) {
- Output::Datagram(self.queue.remove(&k).unwrap())
- } else if let Some(&t) = self.queue.keys().nth(0) {
- Output::Callback(t - now)
- } else {
- Output::None
- }
- }
-}
-
-impl<T> Debug for Delay<T> {
- fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
- f.write_str("delay")
- }
-}
diff --git a/third_party/rust/neqo-transport/tests/sim/rng.rs b/third_party/rust/neqo-transport/tests/sim/rng.rs
deleted file mode 100644
index af4f70eb5f..0000000000
--- a/third_party/rust/neqo-transport/tests/sim/rng.rs
+++ /dev/null
@@ -1,81 +0,0 @@
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-use std::{convert::TryFrom, ops::Range};
-
-use neqo_common::Decoder;
-
-/// An implementation of a xoshiro256** pseudorandom generator.
-pub struct Random {
- state: [u64; 4],
-}
-
-impl Random {
- pub fn new(seed: [u8; 32]) -> Self {
- assert!(seed.iter().any(|&x| x != 0));
- let mut dec = Decoder::from(&seed);
- Self {
- state: [
- dec.decode_uint(8).unwrap(),
- dec.decode_uint(8).unwrap(),
- dec.decode_uint(8).unwrap(),
- dec.decode_uint(8).unwrap(),
- ],
- }
- }
-
- pub fn random(&mut self) -> u64 {
- let result = (self.state[1].overflowing_mul(5).0)
- .rotate_right(7)
- .overflowing_mul(9)
- .0;
- let t = self.state[1] << 17;
-
- self.state[2] ^= self.state[0];
- self.state[3] ^= self.state[1];
- self.state[1] ^= self.state[2];
- self.state[0] ^= self.state[3];
-
- self.state[2] ^= t;
- self.state[3] = self.state[3].rotate_right(45);
-
- result
- }
-
- /// Generate a random value from the range.
- /// If the range is empty or inverted (`range.start > range.end`), then
- /// this returns the value of `range.start` without generating any random values.
- pub fn random_from(&mut self, range: Range<u64>) -> u64 {
- let max = range.end.saturating_sub(range.start);
- if max == 0 {
- return range.start;
- }
-
- let shift = (max - 1).leading_zeros();
- assert_ne!(max, 0);
- loop {
- let r = self.random() >> shift;
- if r < max {
- return range.start + r;
- }
- }
- }
-
- /// Get the seed necessary to continue from this point.
- pub fn seed_str(&self) -> String {
- format!(
- "{:8x}{:8x}{:8x}{:8x}",
- self.state[0], self.state[1], self.state[2], self.state[3],
- )
- }
-}
-
-impl Default for Random {
- fn default() -> Self {
- let buf = neqo_crypto::random(32);
- Random::new(<[u8; 32]>::try_from(&buf[..]).unwrap())
- }
-}
diff --git a/third_party/rust/neqo-transport/tests/sim/taildrop.rs b/third_party/rust/neqo-transport/tests/sim/taildrop.rs
deleted file mode 100644
index 26813800c9..0000000000
--- a/third_party/rust/neqo-transport/tests/sim/taildrop.rs
+++ /dev/null
@@ -1,188 +0,0 @@
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-#![allow(clippy::module_name_repetitions)]
-
-use std::{
- cmp::max,
- collections::VecDeque,
- convert::TryFrom,
- fmt::{self, Debug},
- time::{Duration, Instant},
-};
-
-use neqo_common::{qtrace, Datagram};
-use neqo_transport::Output;
-
-use super::Node;
-
-/// One second in nanoseconds.
-const ONE_SECOND_NS: u128 = 1_000_000_000;
-
-/// This models a link with a tail drop router at the front of it.
-pub struct TailDrop {
- /// An overhead associated with each entry. This accounts for
- /// layer 2, IP, and UDP overheads.
- overhead: usize,
- /// The rate at which bytes egress the link, in bytes per second.
- rate: usize,
- /// The depth of the queue, in bytes.
- capacity: usize,
-
- /// A counter for how many bytes are enqueued.
- used: usize,
- /// A queue of unsent bytes.
- queue: VecDeque<Datagram>,
- /// The time that the next datagram can enter the link.
- next_deque: Option<Instant>,
-
- /// Any sub-ns delay from the last enqueue.
- sub_ns_delay: u32,
- /// The time it takes a byte to exit the other end of the link.
- delay: Duration,
- /// The packets that are on the link and when they can be delivered.
- on_link: VecDeque<(Instant, Datagram)>,
-
- /// The number of packets received.
- received: usize,
- /// The number of packets dropped.
- dropped: usize,
- /// The number of packets delivered.
- delivered: usize,
- /// The maximum amount of queue capacity ever used.
- /// As packets leave the queue as soon as they start being used, this doesn't
- /// count them.
- maxq: usize,
-}
-
-impl TailDrop {
- /// Make a new taildrop node with the given rate, queue capacity, and link delay.
- pub fn new(rate: usize, capacity: usize, delay: Duration) -> Self {
- Self {
- overhead: 64,
- rate,
- capacity,
- used: 0,
- queue: VecDeque::new(),
- next_deque: None,
- sub_ns_delay: 0,
- delay,
- on_link: VecDeque::new(),
- received: 0,
- dropped: 0,
- delivered: 0,
- maxq: 0,
- }
- }
-
- /// A tail drop queue on a 10Mbps link (approximated to 1 million bytes per second)
- /// with a fat 32k buffer (about 30ms), and the default forward delay of 50ms.
- pub fn dsl_uplink() -> Self {
- TailDrop::new(1_000_000, 32_768, Duration::from_millis(50))
- }
-
- /// Cut downlink to one fifth of the uplink (2Mbps), and reduce the buffer to 1/4.
- pub fn dsl_downlink() -> Self {
- TailDrop::new(200_000, 8_192, Duration::from_millis(50))
- }
-
- /// How "big" is this datagram, accounting for overheads.
- /// This approximates by using the same overhead for storing in the queue
- /// and for sending on the wire.
- fn size(&self, d: &Datagram) -> usize {
- d.len() + self.overhead
- }
-
- /// Start sending a datagram.
- fn send(&mut self, d: Datagram, now: Instant) {
- // How many bytes are we "transmitting"?
- let sz = u128::try_from(self.size(&d)).unwrap();
-
- // Calculate how long it takes to put the packet on the link.
- // Perform the calculation based on 2^32 seconds and save any remainder.
- // This ensures that high rates and small packets don't result in rounding
- // down times too badly.
- // Duration consists of a u64 and a u32, so we have 32 high bits to spare.
- let t = sz * (ONE_SECOND_NS << 32) / u128::try_from(self.rate).unwrap()
- + u128::from(self.sub_ns_delay);
- let send_ns = u64::try_from(t >> 32).unwrap();
- assert_ne!(send_ns, 0, "sending a packet takes <1ns");
- self.sub_ns_delay = u32::try_from(t & u128::from(u32::MAX)).unwrap();
- let deque_time = now + Duration::from_nanos(send_ns);
- self.next_deque = Some(deque_time);
-
- // Now work out when the packet is fully received at the other end of
- // the link. Setup to deliver the packet then.
- let delivery_time = deque_time + self.delay;
- self.on_link.push_back((delivery_time, d));
- }
-
- /// Enqueue for sending. Maybe. If this overflows the queue, drop it instead.
- fn maybe_enqueue(&mut self, d: Datagram, now: Instant) {
- self.received += 1;
- if self.next_deque.is_none() {
- // Nothing in the queue and nothing still sending.
- debug_assert!(self.queue.is_empty());
- self.send(d, now);
- } else if self.used + self.size(&d) <= self.capacity {
- self.used += self.size(&d);
- self.maxq = max(self.maxq, self.used);
- self.queue.push_back(d);
- } else {
- qtrace!("taildrop dropping {} bytes", d.len());
- self.dropped += 1;
- }
- }
-
- /// If the last packet that was sending has been sent, start sending
- /// the next one.
- fn maybe_send(&mut self, now: Instant) {
- if self.next_deque.as_ref().map_or(false, |t| *t <= now) {
- if let Some(d) = self.queue.pop_front() {
- self.used -= self.size(&d);
- self.send(d, now);
- } else {
- self.next_deque = None;
- self.sub_ns_delay = 0;
- }
- }
- }
-}
-
-impl Node for TailDrop {
- fn process(&mut self, d: Option<Datagram>, now: Instant) -> Output {
- if let Some(dgram) = d {
- self.maybe_enqueue(dgram, now);
- }
-
- self.maybe_send(now);
-
- if let Some((t, _)) = self.on_link.front() {
- if *t <= now {
- let (_, d) = self.on_link.pop_front().unwrap();
- self.delivered += 1;
- Output::Datagram(d)
- } else {
- Output::Callback(*t - now)
- }
- } else {
- Output::None
- }
- }
-
- fn print_summary(&self, test_name: &str) {
- println!(
- "{}: taildrop: rx {} drop {} tx {} maxq {}",
- test_name, self.received, self.dropped, self.delivered, self.maxq,
- );
- }
-}
-
-impl Debug for TailDrop {
- fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
- f.write_str("taildrop")
- }
-}
diff --git a/third_party/rust/qlog/.cargo-checksum.json b/third_party/rust/qlog/.cargo-checksum.json
index 17ad1f8978..4aae6bf3b8 100644
--- a/third_party/rust/qlog/.cargo-checksum.json
+++ b/third_party/rust/qlog/.cargo-checksum.json
@@ -1 +1 @@
-{"files":{"Cargo.toml":"15c2606defff66515f2ded1ace2aeb729229a3558d9a026c058e51a7518e6859","README.md":"597691eb766c2cbd7a6591bda56d3e70e6836b62b6327fb73497523eabd5b53d","src/events/connectivity.rs":"116993412e200e375c97762980ffb638d2244197fd752b9569b5b20baf574308","src/events/h3.rs":"45dfa1dea722f3c8adb989f04ff24e8c39550a65a35325885b3a915cafd3a550","src/events/mod.rs":"75f57b4717fa9777e19d61b99b6a79164f0e8bca9b4681c3ab11b204320c8c55","src/events/qpack.rs":"5c7267c45e3fb947cdfa946f9f9692d3e3e36a166f70124ba293dc27534267d0","src/events/quic.rs":"88b884f5788c671ffee79a3448f367c18f95ee30531262fcc14310d80e662f4a","src/events/security.rs":"e9852d7de16851b62c3e0a886a2c1a31d237e62574ef88428ef62dd179b0b008","src/lib.rs":"bbc190a6d0f484fd723f9df6c1b2a4596f826e0282ad40ee17a0822ea28a5626","src/reader.rs":"4e0069c24aca9cb99d75075c9b784fa02855ea449d2f1528bea944a4e02a9af5","src/streamer.rs":"4774c2abde1a5b0f4448aac06c62c7927208c12f338c46981f80c98703b54074"},"package":null} \ No newline at end of file
+{"files":{"Cargo.toml":"66c6d85a84c84711f3abddaafb163bc6025a8505701fea8b61bfa961851be757","README.md":"597691eb766c2cbd7a6591bda56d3e70e6836b62b6327fb73497523eabd5b53d","src/events/connectivity.rs":"116993412e200e375c97762980ffb638d2244197fd752b9569b5b20baf574308","src/events/h3.rs":"4799179c4d18403a3936a327c483d110bd058d42061f9cdd39601b3e91500c1b","src/events/mod.rs":"75f57b4717fa9777e19d61b99b6a79164f0e8bca9b4681c3ab11b204320c8c55","src/events/qpack.rs":"5c7267c45e3fb947cdfa946f9f9692d3e3e36a166f70124ba293dc27534267d0","src/events/quic.rs":"88b884f5788c671ffee79a3448f367c18f95ee30531262fcc14310d80e662f4a","src/events/security.rs":"e9852d7de16851b62c3e0a886a2c1a31d237e62574ef88428ef62dd179b0b008","src/lib.rs":"bbc190a6d0f484fd723f9df6c1b2a4596f826e0282ad40ee17a0822ea28a5626","src/reader.rs":"4e0069c24aca9cb99d75075c9b784fa02855ea449d2f1528bea944a4e02a9af5","src/streamer.rs":"4774c2abde1a5b0f4448aac06c62c7927208c12f338c46981f80c98703b54074"},"package":"9c0407438c69b3d99714a796a135cbfb2d60744e4747fb2b46a87acd1c1fcd0e"} \ No newline at end of file
diff --git a/third_party/rust/qlog/Cargo.toml b/third_party/rust/qlog/Cargo.toml
index 665559312f..4a5eb1ff96 100644
--- a/third_party/rust/qlog/Cargo.toml
+++ b/third_party/rust/qlog/Cargo.toml
@@ -12,7 +12,7 @@
[package]
edition = "2018"
name = "qlog"
-version = "0.11.0"
+version = "0.12.0"
authors = ["Lucas Pardue <lucaspardue.24.7@gmail.com>"]
description = "qlog data model for QUIC and HTTP/3"
readme = "README.md"
@@ -25,13 +25,13 @@ categories = ["network-programming"]
license = "BSD-2-Clause"
repository = "https://github.com/cloudflare/quiche"
-[dependencies]
-serde_derive = "1.0"
-
[dependencies.serde]
version = "1.0.139"
features = ["derive"]
+[dependencies.serde_derive]
+version = "1.0"
+
[dependencies.serde_json]
version = "1.0"
features = ["preserve_order"]
diff --git a/third_party/rust/qlog/src/events/h3.rs b/third_party/rust/qlog/src/events/h3.rs
index eaf3cadf36..632c80538f 100644
--- a/third_party/rust/qlog/src/events/h3.rs
+++ b/third_party/rust/qlog/src/events/h3.rs
@@ -209,9 +209,8 @@ pub struct H3ParametersRestored {
pub struct H3StreamTypeSet {
pub owner: Option<H3Owner>,
pub stream_id: u64,
-
pub stream_type: H3StreamType,
-
+ pub stream_type_value: Option<u64>,
pub associated_push_id: Option<u64>,
}
diff --git a/third_party/rust/serde/.cargo-checksum.json b/third_party/rust/serde/.cargo-checksum.json
index b8aaa174e0..d55068bd63 100644
--- a/third_party/rust/serde/.cargo-checksum.json
+++ b/third_party/rust/serde/.cargo-checksum.json
@@ -1 +1 @@
-{"files":{"Cargo.toml":"3c553cd5adc065a5e6412e518b55ef85f7ffa307171ad4369d016de03a8f163f","LICENSE-APACHE":"62c7a1e35f56406896d7aa7ca52d0cc0d272ac022b5d2796e7d6905db8a3636a","LICENSE-MIT":"23f18e03dc49df91622fe2a76176497404e46ced8a715d9d2b67a7446571cca3","README.md":"c3ece10a36d19b4e857a770eaf74a2164d220f55fa11947065a3898c1697ecef","build.rs":"f9ba30324b9ce085c903595fb55a5293f8c2348ff36bfe870521b935ae6d105c","crates-io.md":"56e988ac4944c45f5bf5051e3827892ed8fb817853d99d9df1fff6621108e270","src/de/format.rs":"c85071b016df643b161859682d21ce34fa0ebf2a3bdbeeea69859da48f5d934f","src/de/ignored_any.rs":"6480f2b2a83dc4764d01b2eec7309729eef2492eede2e5ee98d23a60b05198eb","src/de/impls.rs":"2857d734176a0b78a41c9358354b0b0b83c6b2d948590be072d98606a8cae9d6","src/de/mod.rs":"7fb7427de1981bfa13af06c898d213a6bc34697148e96cef08d3c447c1999527","src/de/seed.rs":"045d890712a04eb33ffc5a021e5d948a63c89402b8ffeea749df2171b7484f8f","src/de/size_hint.rs":"fff83dc39d30e75e8e611991f9c5399188a1aad23a6462dbca2c8b62655cfedb","src/de/value.rs":"5d8dcae3a98a2203f2c0934adb84dbf741f120f246dfc02aa6d0d10673dc39c7","src/integer128.rs":"29ef30b7d94507b34807090e68173767cdc7aff62edccd38affe69e75338dddc","src/lib.rs":"b16783d3e69a1e3b499d28810823aac2fd92d696c0b511f7bcda0d011399167c","src/macros.rs":"e3486ef4a9a4ed1b27234aa1817ccb25ec0eb026ffc95e2c71c7b917f1f45629","src/private/de.rs":"6557a124fdaf61f9c7cd80163e40f4a453354e45b63a4eb55dafdfe0159f6881","src/private/doc.rs":"9ad740e9ea2eedf861b77116eda9a6fb74bc8553541cd17d1bc5791a3ef3271a","src/private/mod.rs":"b8f0c348621d91dd9da3db83d8877e70bc61ad0a2dc2d6fb57c6fc2c2cbafa26","src/private/ser.rs":"656613691bd8d40cb70a52d4ebe3ee96a993c8a1292d50822d9ca5bdad84426b","src/ser/fmt.rs":"77a5583e5c227ea1982b097ed6378af5c899d43761d71e33440262fd35944695","src/ser/impls.rs":"850619164b399c37cd373d24f5a2c83453f40b34bb978c5722d2c1ae226775b5","src/ser/impossible.rs":"e11b37689ec1395378d546fce74221ca9046d0761744301f12029102fd07e30e","src/ser/mod.rs":"a7fd082203d63cbe4f0fe86d9be16bf4f3b2444653dac6bb61d82e0f4f6b4214","src/std_error.rs":"25a07149e2e468747ffa5a58051c7f93d7b3c0fa0372f012a96c97ec8ab03b97"},"package":"63261df402c67811e9ac6def069e4786148c4563f4b50fd4bf30aa370d626b02"} \ No newline at end of file
+{"files":{"Cargo.toml":"f03b626efae73a6dd42f07d722dad2da3a4add51f4e653e30a6d696853bab209","LICENSE-APACHE":"62c7a1e35f56406896d7aa7ca52d0cc0d272ac022b5d2796e7d6905db8a3636a","LICENSE-MIT":"23f18e03dc49df91622fe2a76176497404e46ced8a715d9d2b67a7446571cca3","README.md":"731c044fc5f98b37a89e9049c9214267db98763309cb63146b45c029640f82a3","build.rs":"f9ba30324b9ce085c903595fb55a5293f8c2348ff36bfe870521b935ae6d105c","crates-io.md":"56e988ac4944c45f5bf5051e3827892ed8fb817853d99d9df1fff6621108e270","src/de/format.rs":"c85071b016df643b161859682d21ce34fa0ebf2a3bdbeeea69859da48f5d934f","src/de/ignored_any.rs":"6480f2b2a83dc4764d01b2eec7309729eef2492eede2e5ee98d23a60b05198eb","src/de/impls.rs":"2857d734176a0b78a41c9358354b0b0b83c6b2d948590be072d98606a8cae9d6","src/de/mod.rs":"fc34da49f692803d2c2f131322d9b48ad8e4f39ed10b2b304d6193ab09d621fb","src/de/seed.rs":"045d890712a04eb33ffc5a021e5d948a63c89402b8ffeea749df2171b7484f8f","src/de/size_hint.rs":"fff83dc39d30e75e8e611991f9c5399188a1aad23a6462dbca2c8b62655cfedb","src/de/value.rs":"0fd511a288c20a1b768718f4baadf9c7d4146d276af6a71ba1d0f7679b28644a","src/integer128.rs":"29ef30b7d94507b34807090e68173767cdc7aff62edccd38affe69e75338dddc","src/lib.rs":"638b231a280519f1861ea5f1bfbe97e2394b2f7662a9701b8e57ed95093dd298","src/macros.rs":"e3486ef4a9a4ed1b27234aa1817ccb25ec0eb026ffc95e2c71c7b917f1f45629","src/private/de.rs":"6557a124fdaf61f9c7cd80163e40f4a453354e45b63a4eb55dafdfe0159f6881","src/private/doc.rs":"9ad740e9ea2eedf861b77116eda9a6fb74bc8553541cd17d1bc5791a3ef3271a","src/private/mod.rs":"b8f0c348621d91dd9da3db83d8877e70bc61ad0a2dc2d6fb57c6fc2c2cbafa26","src/private/ser.rs":"656613691bd8d40cb70a52d4ebe3ee96a993c8a1292d50822d9ca5bdad84426b","src/ser/fmt.rs":"77a5583e5c227ea1982b097ed6378af5c899d43761d71e33440262fd35944695","src/ser/impls.rs":"850619164b399c37cd373d24f5a2c83453f40b34bb978c5722d2c1ae226775b5","src/ser/impossible.rs":"e11b37689ec1395378d546fce74221ca9046d0761744301f12029102fd07e30e","src/ser/mod.rs":"a7fd082203d63cbe4f0fe86d9be16bf4f3b2444653dac6bb61d82e0f4f6b4214","src/std_error.rs":"25a07149e2e468747ffa5a58051c7f93d7b3c0fa0372f012a96c97ec8ab03b97"},"package":"3fb1c873e1b9b056a4dc4c0c198b24c3ffa059243875552b2bd0933b1aee4ce2"} \ No newline at end of file
diff --git a/third_party/rust/serde/Cargo.toml b/third_party/rust/serde/Cargo.toml
index a11aad026a..6ba68688ac 100644
--- a/third_party/rust/serde/Cargo.toml
+++ b/third_party/rust/serde/Cargo.toml
@@ -13,7 +13,7 @@
edition = "2018"
rust-version = "1.31"
name = "serde"
-version = "1.0.195"
+version = "1.0.197"
authors = [
"Erick Tryzelaar <erick.tryzelaar@gmail.com>",
"David Tolnay <dtolnay@gmail.com>",
@@ -74,4 +74,4 @@ std = []
unstable = []
[target."cfg(any())".dependencies.serde_derive]
-version = "=1.0.195"
+version = "=1.0.197"
diff --git a/third_party/rust/serde/README.md b/third_party/rust/serde/README.md
index 477fd36478..31292944af 100644
--- a/third_party/rust/serde/README.md
+++ b/third_party/rust/serde/README.md
@@ -1,11 +1,11 @@
-# Serde &emsp; [![Build Status]][actions] [![Latest Version]][crates.io] [![serde: rustc 1.31+]][Rust 1.31] [![serde_derive: rustc 1.56+]][Rust 1.56]
+# Serde &emsp; [![Build Status]][actions] [![Latest Version]][crates.io] [![serde msrv]][Rust 1.31] [![serde_derive msrv]][Rust 1.56]
[Build Status]: https://img.shields.io/github/actions/workflow/status/serde-rs/serde/ci.yml?branch=master
[actions]: https://github.com/serde-rs/serde/actions?query=branch%3Amaster
[Latest Version]: https://img.shields.io/crates/v/serde.svg
[crates.io]: https://crates.io/crates/serde
-[serde: rustc 1.31+]: https://img.shields.io/badge/serde-rustc_1.31+-lightgray.svg
-[serde_derive: rustc 1.56+]: https://img.shields.io/badge/serde_derive-rustc_1.56+-lightgray.svg
+[serde msrv]: https://img.shields.io/crates/msrv/serde.svg?label=serde%20msrv&color=lightgray
+[serde_derive msrv]: https://img.shields.io/crates/msrv/serde_derive.svg?label=serde_derive%20msrv&color=lightgray
[Rust 1.31]: https://blog.rust-lang.org/2018/12/06/Rust-1.31-and-rust-2018.html
[Rust 1.56]: https://blog.rust-lang.org/2021/10/21/Rust-1.56.0.html
diff --git a/third_party/rust/serde/src/de/mod.rs b/third_party/rust/serde/src/de/mod.rs
index c9919d92b8..1924fe3d88 100644
--- a/third_party/rust/serde/src/de/mod.rs
+++ b/third_party/rust/serde/src/de/mod.rs
@@ -402,20 +402,20 @@ impl<'a> fmt::Display for Unexpected<'a> {
Bool(b) => write!(formatter, "boolean `{}`", b),
Unsigned(i) => write!(formatter, "integer `{}`", i),
Signed(i) => write!(formatter, "integer `{}`", i),
- Float(f) => write!(formatter, "floating point `{}`", f),
+ Float(f) => write!(formatter, "floating point `{}`", WithDecimalPoint(f)),
Char(c) => write!(formatter, "character `{}`", c),
Str(s) => write!(formatter, "string {:?}", s),
- Bytes(_) => write!(formatter, "byte array"),
- Unit => write!(formatter, "unit value"),
- Option => write!(formatter, "Option value"),
- NewtypeStruct => write!(formatter, "newtype struct"),
- Seq => write!(formatter, "sequence"),
- Map => write!(formatter, "map"),
- Enum => write!(formatter, "enum"),
- UnitVariant => write!(formatter, "unit variant"),
- NewtypeVariant => write!(formatter, "newtype variant"),
- TupleVariant => write!(formatter, "tuple variant"),
- StructVariant => write!(formatter, "struct variant"),
+ Bytes(_) => formatter.write_str("byte array"),
+ Unit => formatter.write_str("unit value"),
+ Option => formatter.write_str("Option value"),
+ NewtypeStruct => formatter.write_str("newtype struct"),
+ Seq => formatter.write_str("sequence"),
+ Map => formatter.write_str("map"),
+ Enum => formatter.write_str("enum"),
+ UnitVariant => formatter.write_str("unit variant"),
+ NewtypeVariant => formatter.write_str("newtype variant"),
+ TupleVariant => formatter.write_str("tuple variant"),
+ StructVariant => formatter.write_str("struct variant"),
Other(other) => formatter.write_str(other),
}
}
@@ -2278,10 +2278,10 @@ impl Display for OneOf {
1 => write!(formatter, "`{}`", self.names[0]),
2 => write!(formatter, "`{}` or `{}`", self.names[0], self.names[1]),
_ => {
- tri!(write!(formatter, "one of "));
+ tri!(formatter.write_str("one of "));
for (i, alt) in self.names.iter().enumerate() {
if i > 0 {
- tri!(write!(formatter, ", "));
+ tri!(formatter.write_str(", "));
}
tri!(write!(formatter, "`{}`", alt));
}
@@ -2290,3 +2290,36 @@ impl Display for OneOf {
}
}
}
+
+struct WithDecimalPoint(f64);
+
+impl Display for WithDecimalPoint {
+ fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
+ struct LookForDecimalPoint<'f, 'a> {
+ formatter: &'f mut fmt::Formatter<'a>,
+ has_decimal_point: bool,
+ }
+
+ impl<'f, 'a> fmt::Write for LookForDecimalPoint<'f, 'a> {
+ fn write_str(&mut self, fragment: &str) -> fmt::Result {
+ self.has_decimal_point |= fragment.contains('.');
+ self.formatter.write_str(fragment)
+ }
+
+ fn write_char(&mut self, ch: char) -> fmt::Result {
+ self.has_decimal_point |= ch == '.';
+ self.formatter.write_char(ch)
+ }
+ }
+
+ let mut writer = LookForDecimalPoint {
+ formatter,
+ has_decimal_point: false,
+ };
+ tri!(write!(writer, "{}", self.0));
+ if !writer.has_decimal_point {
+ tri!(formatter.write_str(".0"));
+ }
+ Ok(())
+ }
+}
diff --git a/third_party/rust/serde/src/de/value.rs b/third_party/rust/serde/src/de/value.rs
index b229ebab77..3bc0c71c57 100644
--- a/third_party/rust/serde/src/de/value.rs
+++ b/third_party/rust/serde/src/de/value.rs
@@ -983,7 +983,7 @@ struct ExpectedInSeq(usize);
impl Expected for ExpectedInSeq {
fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
if self.0 == 1 {
- write!(formatter, "1 element in sequence")
+ formatter.write_str("1 element in sequence")
} else {
write!(formatter, "{} elements in sequence", self.0)
}
@@ -1411,7 +1411,7 @@ struct ExpectedInMap(usize);
impl Expected for ExpectedInMap {
fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
if self.0 == 1 {
- write!(formatter, "1 element in map")
+ formatter.write_str("1 element in map")
} else {
write!(formatter, "{} elements in map", self.0)
}
diff --git a/third_party/rust/serde/src/lib.rs b/third_party/rust/serde/src/lib.rs
index b0756af6dc..5cf44c1c18 100644
--- a/third_party/rust/serde/src/lib.rs
+++ b/third_party/rust/serde/src/lib.rs
@@ -95,7 +95,7 @@
////////////////////////////////////////////////////////////////////////////////
// Serde types in rustdoc of other crates get linked to here.
-#![doc(html_root_url = "https://docs.rs/serde/1.0.195")]
+#![doc(html_root_url = "https://docs.rs/serde/1.0.197")]
// Support using Serde without the standard library!
#![cfg_attr(not(feature = "std"), no_std)]
// Show which crate feature enables conditionally compiled APIs in documentation.
@@ -130,6 +130,7 @@
clippy::derive_partial_eq_without_eq,
clippy::enum_glob_use,
clippy::explicit_auto_deref,
+ clippy::incompatible_msrv,
clippy::let_underscore_untyped,
clippy::map_err_ignore,
clippy::new_without_default,
@@ -178,16 +179,16 @@ mod lib {
pub use self::core::{cmp, mem, slice};
pub use self::core::cell::{Cell, RefCell};
- pub use self::core::clone::{self, Clone};
+ pub use self::core::clone;
pub use self::core::cmp::Reverse;
- pub use self::core::convert::{self, From, Into};
- pub use self::core::default::{self, Default};
- pub use self::core::fmt::{self, Debug, Display};
+ pub use self::core::convert;
+ pub use self::core::default;
+ pub use self::core::fmt::{self, Debug, Display, Write as FmtWrite};
pub use self::core::marker::{self, PhantomData};
pub use self::core::num::Wrapping;
pub use self::core::ops::{Bound, Range, RangeFrom, RangeInclusive, RangeTo};
- pub use self::core::option::{self, Option};
- pub use self::core::result::{self, Result};
+ pub use self::core::option;
+ pub use self::core::result;
pub use self::core::time::Duration;
#[cfg(all(feature = "alloc", not(feature = "std")))]
diff --git a/third_party/rust/serde_derive/.cargo-checksum.json b/third_party/rust/serde_derive/.cargo-checksum.json
index 3e0519d169..331652acd7 100644
--- a/third_party/rust/serde_derive/.cargo-checksum.json
+++ b/third_party/rust/serde_derive/.cargo-checksum.json
@@ -1 +1 @@
-{"files":{"Cargo.toml":"292f527e949377d3d23759746419c447bb8e93603f48d933ef1af34a0bf8b666","LICENSE-APACHE":"62c7a1e35f56406896d7aa7ca52d0cc0d272ac022b5d2796e7d6905db8a3636a","LICENSE-MIT":"23f18e03dc49df91622fe2a76176497404e46ced8a715d9d2b67a7446571cca3","README.md":"c3ece10a36d19b4e857a770eaf74a2164d220f55fa11947065a3898c1697ecef","crates-io.md":"56e988ac4944c45f5bf5051e3827892ed8fb817853d99d9df1fff6621108e270","src/bound.rs":"6c5c20785ac95af9480f8d0de35a7e844cc36a16012f6468db148acd03cb15c2","src/de.rs":"c221ab2b94a5d80dccff74a37f3448b3d695656552b452595dc289c73b12fb2b","src/dummy.rs":"9533dfee23f20d92ea75734c739022820c2787ded0d54f459feacdeb770ec912","src/fragment.rs":"6757cb4c3131d4300f093572efc273c4ab5a20e3e1efb54a311dcfa52d0bd6eb","src/internals/ast.rs":"7dc997e4090033bbd1d0bdd870e8bb87b096b7f66cfd02047f6b85ebdd569b12","src/internals/attr.rs":"6584c0a02de0d17993877303f3cc2c1bccf235257632220421f98082d82d387a","src/internals/case.rs":"10c8dda2b32d8c6c6b63cf09cdc63d02375af7e95ecefe8fecb34f93b65191bb","src/internals/check.rs":"d842eb9912fd29311060b67f3bc62c438eb7b5d86093355acb4de7eee02a0ef8","src/internals/ctxt.rs":"83a4e6fbe0e439d578478883594407e03f2f340541be479bdf0b04a202633a37","src/internals/mod.rs":"ed021ca635c18132a0e5c3d90f21b7f65def0a61e946421a30200b5b9ab6ad43","src/internals/receiver.rs":"fe8a480669511b5edcfe71f5dd290cf72ccec54c9016ec85f2ac59dce538077f","src/internals/respan.rs":"899753859c58ce5f532a3ec4584796a52f13ed5a0533191e48c953ba5c1b52ff","src/internals/symbol.rs":"d619e88caa3c7a09b03014257f2b349ee922290062d9b97b4dd19d0e64532690","src/lib.rs":"3fc5a148c35cda8a27bc0a65da41a991d63f8ea0e0d607336082dd0f528750ef","src/pretend.rs":"7facc10a5b805564dd95735ae11118ec17ca6adcc49a59764e7c920e27b9fc4a","src/ser.rs":"e3341471cea9d7e2fb4043e5d1746862beb9a4e25196170879eeac529d460920","src/this.rs":"87818dc80cbb521b51938a653d09daf10aafc220bb10425948de82ad670fcb85"},"package":"46fe8f8603d81ba86327b23a2e9cdf49e1255fb94a4c5f297f6ee0547178ea2c"} \ No newline at end of file
+{"files":{"Cargo.toml":"daf3d8f8efdf30d3575c7d1e1372ff7287891fb95625223e4a8a2f792c4474e1","LICENSE-APACHE":"62c7a1e35f56406896d7aa7ca52d0cc0d272ac022b5d2796e7d6905db8a3636a","LICENSE-MIT":"23f18e03dc49df91622fe2a76176497404e46ced8a715d9d2b67a7446571cca3","README.md":"731c044fc5f98b37a89e9049c9214267db98763309cb63146b45c029640f82a3","crates-io.md":"56e988ac4944c45f5bf5051e3827892ed8fb817853d99d9df1fff6621108e270","src/bound.rs":"6c5c20785ac95af9480f8d0de35a7e844cc36a16012f6468db148acd03cb15c2","src/de.rs":"c221ab2b94a5d80dccff74a37f3448b3d695656552b452595dc289c73b12fb2b","src/dummy.rs":"9533dfee23f20d92ea75734c739022820c2787ded0d54f459feacdeb770ec912","src/fragment.rs":"6757cb4c3131d4300f093572efc273c4ab5a20e3e1efb54a311dcfa52d0bd6eb","src/internals/ast.rs":"7dc997e4090033bbd1d0bdd870e8bb87b096b7f66cfd02047f6b85ebdd569b12","src/internals/attr.rs":"6584c0a02de0d17993877303f3cc2c1bccf235257632220421f98082d82d387a","src/internals/case.rs":"10c8dda2b32d8c6c6b63cf09cdc63d02375af7e95ecefe8fecb34f93b65191bb","src/internals/check.rs":"d842eb9912fd29311060b67f3bc62c438eb7b5d86093355acb4de7eee02a0ef8","src/internals/ctxt.rs":"83a4e6fbe0e439d578478883594407e03f2f340541be479bdf0b04a202633a37","src/internals/mod.rs":"ed021ca635c18132a0e5c3d90f21b7f65def0a61e946421a30200b5b9ab6ad43","src/internals/receiver.rs":"fe8a480669511b5edcfe71f5dd290cf72ccec54c9016ec85f2ac59dce538077f","src/internals/respan.rs":"899753859c58ce5f532a3ec4584796a52f13ed5a0533191e48c953ba5c1b52ff","src/internals/symbol.rs":"d619e88caa3c7a09b03014257f2b349ee922290062d9b97b4dd19d0e64532690","src/lib.rs":"7a6c2796244658f62d398ebc6819c4f3064dac4a1ad7c52b40359f9411f1c266","src/pretend.rs":"7facc10a5b805564dd95735ae11118ec17ca6adcc49a59764e7c920e27b9fc4a","src/ser.rs":"e3341471cea9d7e2fb4043e5d1746862beb9a4e25196170879eeac529d460920","src/this.rs":"87818dc80cbb521b51938a653d09daf10aafc220bb10425948de82ad670fcb85"},"package":"7eb0b34b42edc17f6b7cac84a52a1c5f0e1bb2227e997ca9011ea3dd34e8610b"} \ No newline at end of file
diff --git a/third_party/rust/serde_derive/Cargo.toml b/third_party/rust/serde_derive/Cargo.toml
index 66d752cf73..33642ef04c 100644
--- a/third_party/rust/serde_derive/Cargo.toml
+++ b/third_party/rust/serde_derive/Cargo.toml
@@ -12,7 +12,7 @@
[package]
rust-version = "1.56"
name = "serde_derive"
-version = "1.0.195"
+version = "1.0.197"
authors = [
"Erick Tryzelaar <erick.tryzelaar@gmail.com>",
"David Tolnay <dtolnay@gmail.com>",
@@ -44,12 +44,24 @@ proc-macro = true
[dependencies.proc-macro2]
version = "1.0.74"
+features = ["proc-macro"]
+default-features = false
[dependencies.quote]
version = "1.0.35"
+features = ["proc-macro"]
+default-features = false
[dependencies.syn]
version = "2.0.46"
+features = [
+ "clone-impls",
+ "derive",
+ "parsing",
+ "printing",
+ "proc-macro",
+]
+default-features = false
[dev-dependencies.serde]
version = "1"
diff --git a/third_party/rust/serde_derive/README.md b/third_party/rust/serde_derive/README.md
index 477fd36478..31292944af 100644
--- a/third_party/rust/serde_derive/README.md
+++ b/third_party/rust/serde_derive/README.md
@@ -1,11 +1,11 @@
-# Serde &emsp; [![Build Status]][actions] [![Latest Version]][crates.io] [![serde: rustc 1.31+]][Rust 1.31] [![serde_derive: rustc 1.56+]][Rust 1.56]
+# Serde &emsp; [![Build Status]][actions] [![Latest Version]][crates.io] [![serde msrv]][Rust 1.31] [![serde_derive msrv]][Rust 1.56]
[Build Status]: https://img.shields.io/github/actions/workflow/status/serde-rs/serde/ci.yml?branch=master
[actions]: https://github.com/serde-rs/serde/actions?query=branch%3Amaster
[Latest Version]: https://img.shields.io/crates/v/serde.svg
[crates.io]: https://crates.io/crates/serde
-[serde: rustc 1.31+]: https://img.shields.io/badge/serde-rustc_1.31+-lightgray.svg
-[serde_derive: rustc 1.56+]: https://img.shields.io/badge/serde_derive-rustc_1.56+-lightgray.svg
+[serde msrv]: https://img.shields.io/crates/msrv/serde.svg?label=serde%20msrv&color=lightgray
+[serde_derive msrv]: https://img.shields.io/crates/msrv/serde_derive.svg?label=serde_derive%20msrv&color=lightgray
[Rust 1.31]: https://blog.rust-lang.org/2018/12/06/Rust-1.31-and-rust-2018.html
[Rust 1.56]: https://blog.rust-lang.org/2021/10/21/Rust-1.56.0.html
diff --git a/third_party/rust/serde_derive/src/lib.rs b/third_party/rust/serde_derive/src/lib.rs
index 8c4a6cacc7..b91f17b1a9 100644
--- a/third_party/rust/serde_derive/src/lib.rs
+++ b/third_party/rust/serde_derive/src/lib.rs
@@ -13,7 +13,7 @@
//!
//! [https://serde.rs/derive.html]: https://serde.rs/derive.html
-#![doc(html_root_url = "https://docs.rs/serde_derive/1.0.195")]
+#![doc(html_root_url = "https://docs.rs/serde_derive/1.0.197")]
// Ignored clippy lints
#![allow(
// clippy false positive: https://github.com/rust-lang/rust-clippy/issues/7054
diff --git a/third_party/rust/smallvec/.cargo-checksum.json b/third_party/rust/smallvec/.cargo-checksum.json
index f8adf7532e..bdb1fcd312 100644
--- a/third_party/rust/smallvec/.cargo-checksum.json
+++ b/third_party/rust/smallvec/.cargo-checksum.json
@@ -1 +1 @@
-{"files":{"Cargo.toml":"964f828a4ed019af9a728f6a0f63dc5860446c7b21abe2be1a0b92ddc82d1140","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"0b28172679e0009b655da42797c03fd163a3379d5cfa67ba1f1655e974a2a1a9","README.md":"a01127c37308457e8d396b176fb790846be0978c173be3f13260b62efcef011b","benches/bench.rs":"e2a235d68be20996014c00468b369887d2041ce95486625de3cef35b8f2e4acd","debug_metadata/README.md":"4d7f1c1b2c25ce2231ef71864d06e54323867459035b53bc9e00f66a0a44f82e","debug_metadata/smallvec.natvis":"3092ddebd8fffc3486536d7f27f8c5eae3a8a093d45cd8eeb3946ea2b0c35a15","scripts/run_miri.sh":"74a9f9adc43f986e81977b03846f7dd00122a0150bd8ec3fe4842a1a787e0f07","src/arbitrary.rs":"22e55cfbf60374945b30e6d0855129eff67cd8b878cef6fa997e1f4be67b9e3d","src/lib.rs":"aed3176e0c74d7eb1d405ee096a4d1027626ed5f1bb65da4c0ef89f83b8f66ed","src/specialization.rs":"46433586203399251cba496d67b88d34e1be3c2b591986b77463513da1c66471","src/tests.rs":"d0a70bb7b4e1d0a174f2a195c8ab55280a40589bab7028999afd787b3fff6eae","tests/debugger_visualizer.rs":"185456ad253957fc0c9e904ff8a1135397ac991c29fa3c60f75d8d81f7463022","tests/macro.rs":"22ad4f6f104a599fdcba19cad8834105b8656b212fb6c7573a427d447f5db14f"},"package":"942b4a808e05215192e39f4ab80813e599068285906cc91aa64f923db842bd5a"} \ No newline at end of file
+{"files":{"Cargo.toml":"668bb964a243127d65605bb7a0d8d3c81bcbd8f7656a5b5734766ef534b4abcb","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"0b28172679e0009b655da42797c03fd163a3379d5cfa67ba1f1655e974a2a1a9","README.md":"a01127c37308457e8d396b176fb790846be0978c173be3f13260b62efcef011b","benches/bench.rs":"d82015eae942ee5cf74ace8c3c260ee2c6b5bcbeeb87254d2c72622c747a708a","debug_metadata/README.md":"4d7f1c1b2c25ce2231ef71864d06e54323867459035b53bc9e00f66a0a44f82e","debug_metadata/smallvec.natvis":"3092ddebd8fffc3486536d7f27f8c5eae3a8a093d45cd8eeb3946ea2b0c35a15","scripts/run_miri.sh":"74a9f9adc43f986e81977b03846f7dd00122a0150bd8ec3fe4842a1a787e0f07","src/arbitrary.rs":"22e55cfbf60374945b30e6d0855129eff67cd8b878cef6fa997e1f4be67b9e3d","src/lib.rs":"25fe85b6ae7b3972211bf57aeded4c7b72c47e4d843c7a4ba66908442197b5a0","src/specialization.rs":"46433586203399251cba496d67b88d34e1be3c2b591986b77463513da1c66471","src/tests.rs":"29c6e5dad62ebfea74e5116ac4a344b127b91cfb769fe9ba8b02b53773cf7ec8","tests/debugger_visualizer.rs":"185456ad253957fc0c9e904ff8a1135397ac991c29fa3c60f75d8d81f7463022","tests/macro.rs":"22ad4f6f104a599fdcba19cad8834105b8656b212fb6c7573a427d447f5db14f"},"package":"e6ecd384b10a64542d77071bd64bd7b231f4ed5940fba55e98c3de13824cf3d7"} \ No newline at end of file
diff --git a/third_party/rust/smallvec/Cargo.toml b/third_party/rust/smallvec/Cargo.toml
index 4a69944339..baba15315a 100644
--- a/third_party/rust/smallvec/Cargo.toml
+++ b/third_party/rust/smallvec/Cargo.toml
@@ -12,7 +12,7 @@
[package]
edition = "2018"
name = "smallvec"
-version = "1.11.1"
+version = "1.13.1"
authors = ["The Servo Project Developers"]
description = "'Small vector' optimization: store up to a small number of items on the stack"
documentation = "https://docs.rs/smallvec/"
diff --git a/third_party/rust/smallvec/benches/bench.rs b/third_party/rust/smallvec/benches/bench.rs
index b52ee15504..b6a8b857d3 100644
--- a/third_party/rust/smallvec/benches/bench.rs
+++ b/third_party/rust/smallvec/benches/bench.rs
@@ -1,12 +1,10 @@
#![feature(test)]
#![allow(deprecated)]
-#[macro_use]
-extern crate smallvec;
extern crate test;
use self::test::Bencher;
-use smallvec::{ExtendFromSlice, SmallVec};
+use smallvec::{ExtendFromSlice, smallvec, SmallVec};
const VEC_SIZE: usize = 16;
const SPILLED_SIZE: usize = 100;
diff --git a/third_party/rust/smallvec/src/lib.rs b/third_party/rust/smallvec/src/lib.rs
index 8281fb1808..cadb5d8b15 100644
--- a/third_party/rust/smallvec/src/lib.rs
+++ b/third_party/rust/smallvec/src/lib.rs
@@ -149,8 +149,7 @@ use core::mem::ManuallyDrop;
/// - Create a [`SmallVec`] containing a given list of elements:
///
/// ```
-/// # #[macro_use] extern crate smallvec;
-/// # use smallvec::SmallVec;
+/// # use smallvec::{smallvec, SmallVec};
/// # fn main() {
/// let v: SmallVec<[_; 128]> = smallvec![1, 2, 3];
/// assert_eq!(v[0], 1);
@@ -162,8 +161,7 @@ use core::mem::ManuallyDrop;
/// - Create a [`SmallVec`] from a given element and size:
///
/// ```
-/// # #[macro_use] extern crate smallvec;
-/// # use smallvec::SmallVec;
+/// # use smallvec::{smallvec, SmallVec};
/// # fn main() {
/// let v: SmallVec<[_; 0x8000]> = smallvec![1; 3];
/// assert_eq!(v, SmallVec::from_buf([1, 1, 1]));
@@ -209,8 +207,7 @@ macro_rules! smallvec {
/// - Create a [`SmallVec`] containing a given list of elements:
///
/// ```
-/// # #[macro_use] extern crate smallvec;
-/// # use smallvec::SmallVec;
+/// # use smallvec::{smallvec_inline, SmallVec};
/// # fn main() {
/// const V: SmallVec<[i32; 3]> = smallvec_inline![1, 2, 3];
/// assert_eq!(V[0], 1);
@@ -222,8 +219,7 @@ macro_rules! smallvec {
/// - Create a [`SmallVec`] from a given element and size:
///
/// ```
-/// # #[macro_use] extern crate smallvec;
-/// # use smallvec::SmallVec;
+/// # use smallvec::{smallvec_inline, SmallVec};
/// # fn main() {
/// const V: SmallVec<[i32; 3]> = smallvec_inline![1; 3];
/// assert_eq!(V, SmallVec::from_buf([1, 1, 1]));
@@ -328,7 +324,7 @@ fn infallible<T>(result: Result<T, CollectionAllocErr>) -> T {
}
/// FIXME: use `Layout::array` when we require a Rust version where it’s stable
-/// https://github.com/rust-lang/rust/issues/55724
+/// <https://github.com/rust-lang/rust/issues/55724>
fn layout_array<T>(n: usize) -> Result<Layout, CollectionAllocErr> {
let size = mem::size_of::<T>()
.checked_mul(n)
@@ -430,7 +426,7 @@ impl<'a, T: 'a + Array> Drop for Drain<'a, T> {
/// An iterator which uses a closure to determine if an element should be removed.
///
/// Returned from [`SmallVec::drain_filter`][1].
-///
+///
/// [1]: struct.SmallVec.html#method.drain_filter
pub struct DrainFilter<'a, T, F>
where
@@ -815,7 +811,7 @@ impl<A: Array> SmallVec<A> {
/// Construct a new `SmallVec` from a `Vec<A::Item>`.
///
- /// Elements will be copied to the inline buffer if vec.capacity() <= Self::inline_capacity().
+ /// Elements will be copied to the inline buffer if `vec.capacity() <= Self::inline_capacity()`.
///
/// ```rust
/// use smallvec::SmallVec;
@@ -970,7 +966,7 @@ impl<A: Array> SmallVec<A> {
}
/// Returns a tuple with (data ptr, len, capacity)
- /// Useful to get all SmallVec properties with a single check of the current storage variant.
+ /// Useful to get all `SmallVec` properties with a single check of the current storage variant.
#[inline]
fn triple(&self) -> (ConstNonNull<A::Item>, usize, usize) {
unsafe {
@@ -1055,13 +1051,12 @@ impl<A: Array> SmallVec<A> {
}
}
-
#[cfg(feature = "drain_filter")]
/// Creates an iterator which uses a closure to determine if an element should be removed.
- ///
+ ///
/// If the closure returns true, the element is removed and yielded. If the closure returns
/// false, the element will remain in the vector and will not be yielded by the iterator.
- ///
+ ///
/// Using this method is equivalent to the following code:
/// ```
/// # use smallvec::SmallVec;
@@ -1076,7 +1071,7 @@ impl<A: Array> SmallVec<A> {
/// i += 1;
/// }
/// }
- ///
+ ///
/// # assert_eq!(vec, SmallVec::<[i32; 8]>::from_slice(&[1i32, 4, 5]));
/// ```
/// ///
@@ -1476,7 +1471,7 @@ impl<A: Array> SmallVec<A> {
}
}
- /// Convert a SmallVec to a Vec, without reallocating if the SmallVec has already spilled onto
+ /// Convert a `SmallVec` to a `Vec`, without reallocating if the `SmallVec` has already spilled onto
/// the heap.
pub fn into_vec(mut self) -> Vec<A::Item> {
if self.spilled() {
@@ -1499,10 +1494,10 @@ impl<A: Array> SmallVec<A> {
self.into_vec().into_boxed_slice()
}
- /// Convert the SmallVec into an `A` if possible. Otherwise return `Err(Self)`.
+ /// Convert the `SmallVec` into an `A` if possible. Otherwise return `Err(Self)`.
///
- /// This method returns `Err(Self)` if the SmallVec is too short (and the `A` contains uninitialized elements),
- /// or if the SmallVec is too long (and all the elements were spilled to the heap).
+ /// This method returns `Err(Self)` if the `SmallVec` is too short (and the `A` contains uninitialized elements),
+ /// or if the `SmallVec` is too long (and all the elements were spilled to the heap).
pub fn into_inner(self) -> Result<A, Self> {
if self.spilled() || self.len() != A::size() {
// Note: A::size, not Self::inline_capacity
@@ -1596,7 +1591,7 @@ impl<A: Array> SmallVec<A> {
///
/// If `new_len` is greater than `len`, the `SmallVec` is extended by the difference, with each
/// additional slot filled with the result of calling the closure `f`. The return values from `f`
- //// will end up in the `SmallVec` in the order they have been generated.
+ /// will end up in the `SmallVec` in the order they have been generated.
///
/// If `new_len` is less than `len`, the `SmallVec` is simply truncated.
///
@@ -1604,7 +1599,7 @@ impl<A: Array> SmallVec<A> {
/// value, use `resize`. If you want to use the `Default` trait to generate values, you can pass
/// `Default::default()` as the second argument.
///
- /// Added for std::vec::Vec compatibility (added in Rust 1.33.0)
+ /// Added for `std::vec::Vec` compatibility (added in Rust 1.33.0)
///
/// ```
/// # use smallvec::{smallvec, SmallVec};
@@ -1667,8 +1662,7 @@ impl<A: Array> SmallVec<A> {
/// # Examples
///
/// ```
- /// # #[macro_use] extern crate smallvec;
- /// # use smallvec::SmallVec;
+ /// # use smallvec::{smallvec, SmallVec};
/// use std::mem;
/// use std::ptr;
///
@@ -2322,7 +2316,7 @@ impl<'a, A: Array> IntoIterator for &'a mut SmallVec<A> {
}
}
-/// Types that can be used as the backing store for a SmallVec
+/// Types that can be used as the backing store for a [`SmallVec`].
pub unsafe trait Array {
/// The type of the array's elements.
type Item;
@@ -2332,7 +2326,7 @@ pub unsafe trait Array {
/// Set the length of the vec when the `SetLenOnDrop` value goes out of scope.
///
-/// Copied from https://github.com/rust-lang/rust/pull/36355
+/// Copied from <https://github.com/rust-lang/rust/pull/36355>
struct SetLenOnDrop<'a> {
len: &'a mut usize,
local_len: usize,
@@ -2390,9 +2384,23 @@ impl<T, const N: usize> SmallVec<[T; N]> {
data: SmallVecData::from_const(MaybeUninit::new(items)),
}
}
+
+ /// Constructs a new `SmallVec` on the stack from an array without
+ /// copying elements. Also sets the length. The user is responsible
+ /// for ensuring that `len <= N`.
+ ///
+ /// This is a `const` version of [`SmallVec::from_buf_and_len_unchecked`] that is enabled by the feature `const_new`, with the limitation that it only works for arrays.
+ #[cfg_attr(docsrs, doc(cfg(feature = "const_new")))]
+ #[inline]
+ pub const unsafe fn from_const_with_len_unchecked(items: [T; N], len: usize) -> Self {
+ SmallVec {
+ capacity: len,
+ data: SmallVecData::from_const(MaybeUninit::new(items)),
+ }
+ }
}
-#[cfg(all(feature = "const_generics", not(doc)))]
+#[cfg(feature = "const_generics")]
#[cfg_attr(docsrs, doc(cfg(feature = "const_generics")))]
unsafe impl<T, const N: usize> Array for [T; N] {
type Item = T;
@@ -2402,7 +2410,7 @@ unsafe impl<T, const N: usize> Array for [T; N] {
}
}
-#[cfg(any(not(feature = "const_generics"), doc))]
+#[cfg(not(feature = "const_generics"))]
macro_rules! impl_array(
($($size:expr),+) => {
$(
@@ -2415,7 +2423,7 @@ macro_rules! impl_array(
}
);
-#[cfg(any(not(feature = "const_generics"), doc))]
+#[cfg(not(feature = "const_generics"))]
impl_array!(
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25,
26, 27, 28, 29, 30, 31, 32, 36, 0x40, 0x60, 0x80, 0x100, 0x200, 0x400, 0x600, 0x800, 0x1000,
diff --git a/third_party/rust/smallvec/src/tests.rs b/third_party/rust/smallvec/src/tests.rs
index bb39ddeb31..3eab846bf2 100644
--- a/third_party/rust/smallvec/src/tests.rs
+++ b/third_party/rust/smallvec/src/tests.rs
@@ -72,13 +72,13 @@ pub fn test_double_spill() {
);
}
-/// https://github.com/servo/rust-smallvec/issues/4
+// https://github.com/servo/rust-smallvec/issues/4
#[test]
fn issue_4() {
SmallVec::<[Box<u32>; 2]>::new();
}
-/// https://github.com/servo/rust-smallvec/issues/5
+// https://github.com/servo/rust-smallvec/issues/5
#[test]
fn issue_5() {
assert!(Some(SmallVec::<[&u32; 2]>::new()).is_some());
@@ -833,12 +833,9 @@ fn test_write() {
}
#[cfg(feature = "serde")]
-extern crate bincode;
-
-#[cfg(feature = "serde")]
#[test]
fn test_serde() {
- use self::bincode::{config, deserialize};
+ use bincode::{config, deserialize};
let mut small_vec: SmallVec<[i32; 2]> = SmallVec::new();
small_vec.push(1);
let encoded = config().limit(100).serialize(&small_vec).unwrap();
@@ -925,6 +922,12 @@ fn const_new() {
assert_eq!(v.len(), 2);
assert_eq!(v[0], 1);
assert_eq!(v[1], 4);
+ let v = const_new_with_len();
+ assert_eq!(v.capacity(), 4);
+ assert_eq!(v.len(), 3);
+ assert_eq!(v[0], 2);
+ assert_eq!(v[1], 5);
+ assert_eq!(v[2], 7);
}
#[cfg(feature = "const_new")]
const fn const_new_inner() -> SmallVec<[i32; 4]> {
@@ -938,6 +941,12 @@ const fn const_new_inline_sized() -> SmallVec<[i32; 4]> {
const fn const_new_inline_args() -> SmallVec<[i32; 2]> {
crate::smallvec_inline![1, 4]
}
+#[cfg(feature = "const_new")]
+const fn const_new_with_len() -> SmallVec<[i32; 4]> {
+ unsafe {
+ SmallVec::<[i32; 4]>::from_const_with_len_unchecked([2, 5, 7, 0], 3)
+ }
+}
#[test]
fn empty_macro() {
diff --git a/third_party/rust/thiserror-impl/.cargo-checksum.json b/third_party/rust/thiserror-impl/.cargo-checksum.json
index f091d556de..6adcf117c2 100644
--- a/third_party/rust/thiserror-impl/.cargo-checksum.json
+++ b/third_party/rust/thiserror-impl/.cargo-checksum.json
@@ -1 +1 @@
-{"files":{"Cargo.toml":"b387bf85702168709e2619d65e68e1860dca1021ddd91346a09ec8bd1cc80471","LICENSE-APACHE":"62c7a1e35f56406896d7aa7ca52d0cc0d272ac022b5d2796e7d6905db8a3636a","LICENSE-MIT":"23f18e03dc49df91622fe2a76176497404e46ced8a715d9d2b67a7446571cca3","src/ast.rs":"e2fbc32e91cd462757107c1f8ab2f888f08c865ac71270583f9e11b83e3c32ca","src/attr.rs":"ad5319f053a18ec3841300b2ae553ffa005749ba7099fe4b318126223e4dcbdf","src/expand.rs":"9469753e0949556848183084e0f22521c6300d38ca0de196441446c50f350d3c","src/fmt.rs":"d63d39120c18712596f9f2a1715821148c2becd4d8bad5bc1b307210a84dbe98","src/generics.rs":"2076cde22271be355a8131a77add4b93f83ab0af4317cd2df5471fffa4f95c66","src/lib.rs":"5eea86c771e643328ad9bc3b881cce4bf9d50adae1b33e0d07645bdd9044003d","src/prop.rs":"5ba613e38430831259f20b258f33d57dcb783fbaeeb49e5faffa7b2a7be99e67","src/span.rs":"430460a4fa0d1fa9c627c1ddd575d2b101778fea84217591e1a93a5f6a2a0132","src/valid.rs":"ac95253944fd360d3578d0643a7baabb2cfa6bf9fbced7a6ce1f7b0529a3bb98"},"package":"fa0faa943b50f3db30a20aa7e265dbc66076993efed8463e8de414e5d06d3471"} \ No newline at end of file
+{"files":{"Cargo.toml":"0a1dbd3bcb92455b7ed1d661e0c05958a64fd694a18357d3c61a8e1f2cffb9bd","LICENSE-APACHE":"62c7a1e35f56406896d7aa7ca52d0cc0d272ac022b5d2796e7d6905db8a3636a","LICENSE-MIT":"23f18e03dc49df91622fe2a76176497404e46ced8a715d9d2b67a7446571cca3","src/ast.rs":"e2fbc32e91cd462757107c1f8ab2f888f08c865ac71270583f9e11b83e3c32ca","src/attr.rs":"7451fc0b0a06649e62679c7558e5aeb0e2019400c170cde54c23289ef44188f7","src/expand.rs":"9469753e0949556848183084e0f22521c6300d38ca0de196441446c50f350d3c","src/fmt.rs":"5d1cefc012403c2d4ff7ab2513c0ec559166df4271d5983a6463939b5ec8c3e1","src/generics.rs":"2076cde22271be355a8131a77add4b93f83ab0af4317cd2df5471fffa4f95c66","src/lib.rs":"5eea86c771e643328ad9bc3b881cce4bf9d50adae1b33e0d07645bdd9044003d","src/prop.rs":"5ba613e38430831259f20b258f33d57dcb783fbaeeb49e5faffa7b2a7be99e67","src/span.rs":"430460a4fa0d1fa9c627c1ddd575d2b101778fea84217591e1a93a5f6a2a0132","src/valid.rs":"ac95253944fd360d3578d0643a7baabb2cfa6bf9fbced7a6ce1f7b0529a3bb98"},"package":"a953cb265bef375dae3de6663da4d3804eee9682ea80d8e2542529b73c531c81"} \ No newline at end of file
diff --git a/third_party/rust/thiserror-impl/Cargo.toml b/third_party/rust/thiserror-impl/Cargo.toml
index fffdf5c3a4..5445c126ec 100644
--- a/third_party/rust/thiserror-impl/Cargo.toml
+++ b/third_party/rust/thiserror-impl/Cargo.toml
@@ -13,7 +13,7 @@
edition = "2021"
rust-version = "1.56"
name = "thiserror-impl"
-version = "1.0.56"
+version = "1.0.57"
authors = ["David Tolnay <dtolnay@gmail.com>"]
description = "Implementation detail of the `thiserror` crate"
license = "MIT OR Apache-2.0"
diff --git a/third_party/rust/thiserror-impl/src/attr.rs b/third_party/rust/thiserror-impl/src/attr.rs
index 4beb8c9628..269c69eca5 100644
--- a/third_party/rust/thiserror-impl/src/attr.rs
+++ b/third_party/rust/thiserror-impl/src/attr.rs
@@ -1,6 +1,7 @@
use proc_macro2::{Delimiter, Group, Span, TokenStream, TokenTree};
use quote::{format_ident, quote, ToTokens};
use std::collections::BTreeSet as Set;
+use syn::parse::discouraged::Speculative;
use syn::parse::ParseStream;
use syn::{
braced, bracketed, parenthesized, token, Attribute, Error, Ident, Index, LitInt, LitStr, Meta,
@@ -20,6 +21,7 @@ pub struct Display<'a> {
pub original: &'a Attribute,
pub fmt: LitStr,
pub args: TokenStream,
+ pub requires_fmt_machinery: bool,
pub has_bonus_display: bool,
pub implied_bounds: Set<(usize, Trait)>,
}
@@ -103,10 +105,24 @@ fn parse_error_attribute<'a>(attrs: &mut Attrs<'a>, attr: &'a Attribute) -> Resu
return Ok(());
}
+ let fmt: LitStr = input.parse()?;
+
+ let ahead = input.fork();
+ ahead.parse::<Option<Token![,]>>()?;
+ let args = if ahead.is_empty() {
+ input.advance_to(&ahead);
+ TokenStream::new()
+ } else {
+ parse_token_expr(input, false)?
+ };
+
+ let requires_fmt_machinery = !args.is_empty();
+
let display = Display {
original: attr,
- fmt: input.parse()?,
- args: parse_token_expr(input, false)?,
+ fmt,
+ args,
+ requires_fmt_machinery,
has_bonus_display: false,
implied_bounds: Set::new(),
};
@@ -196,8 +212,18 @@ impl ToTokens for Display<'_> {
fn to_tokens(&self, tokens: &mut TokenStream) {
let fmt = &self.fmt;
let args = &self.args;
- tokens.extend(quote! {
- ::core::write!(__formatter, #fmt #args)
+
+ // Currently `write!(f, "text")` produces less efficient code than
+ // `f.write_str("text")`. We recognize the case when the format string
+ // has no braces and no interpolated values, and generate simpler code.
+ tokens.extend(if self.requires_fmt_machinery {
+ quote! {
+ ::core::write!(__formatter, #fmt #args)
+ }
+ } else {
+ quote! {
+ __formatter.write_str(#fmt)
+ }
});
}
}
diff --git a/third_party/rust/thiserror-impl/src/fmt.rs b/third_party/rust/thiserror-impl/src/fmt.rs
index 807dfb9677..b38b7bf1f5 100644
--- a/third_party/rust/thiserror-impl/src/fmt.rs
+++ b/third_party/rust/thiserror-impl/src/fmt.rs
@@ -32,7 +32,10 @@ impl Display<'_> {
}
}
+ self.requires_fmt_machinery = self.requires_fmt_machinery || fmt.contains('}');
+
while let Some(brace) = read.find('{') {
+ self.requires_fmt_machinery = true;
out += &read[..brace + 1];
read = &read[brace + 1..];
if read.starts_with('{') {
diff --git a/third_party/rust/thiserror/.cargo-checksum.json b/third_party/rust/thiserror/.cargo-checksum.json
index cee7027f74..efb4bd5017 100644
--- a/third_party/rust/thiserror/.cargo-checksum.json
+++ b/third_party/rust/thiserror/.cargo-checksum.json
@@ -1 +1 @@
-{"files":{"Cargo.toml":"d78abe9a046d804b1c51260cefe516c36dfbb9994edfe47175bbd838cd46f68f","LICENSE-APACHE":"62c7a1e35f56406896d7aa7ca52d0cc0d272ac022b5d2796e7d6905db8a3636a","LICENSE-MIT":"23f18e03dc49df91622fe2a76176497404e46ced8a715d9d2b67a7446571cca3","README.md":"106c5a937767d49503e1fc5eae1b924f57f15decd8583720a3c652483e348a64","build.rs":"532f6ac494cdddfad3267067a46969a8052b02c1bd94567361f7103ab0dc8c28","build/probe.rs":"3245569a228727091f335db44c7c2f729729b2dfac9f46c1143eb179439f223d","rust-toolchain.toml":"6bbb61302978c736b2da03e4fb40e3beab908f85d533ab46fd541e637b5f3e0f","src/aserror.rs":"cf3c98abb2b9e06afa3c4aba0df14938417c3e330315863437561cbb3573888b","src/display.rs":"0adeeeb524c6bee06de179d54e82a43dc12d2c5b7f69f6fd268ba4611ebf5233","src/lib.rs":"074676e9bbe9402ebe3f41dedcaa289774e47e075d10e2f523d2dcbd8f648f79","src/provide.rs":"8007e22953bacfcc57bb7d12a03fbeb0acece5d2ec889cf55522a4e071d26df3","tests/compiletest.rs":"022a8e400ef813d7ea1875b944549cee5125f6a995dc33e93b48cba3e1b57bd1","tests/test_backtrace.rs":"a8f038490fb881463c0e8c36557617c47cf2d181f16c00525d4d139c7964fade","tests/test_deprecated.rs":"7b80a10f090a3982da017556d3d71398abcead59afd8278c7b9d9b1f7b66c7b3","tests/test_display.rs":"28e0f938fe0f6354529c35722eff04830451e27718145c27522c9acf7f8a6639","tests/test_error.rs":"d06dca3c38f22d7ce4e27dadd6c0f78e5cefe3a2ebbc5fe44abc9ddd5ee1985f","tests/test_expr.rs":"d35b11040ebc547467f52571051854e3e094d52b8e229fa3d44700d5f40959a2","tests/test_from.rs":"36bd22be7b048cd187a19076aeac1456040f20a0b677b01c6003998b63439ea1","tests/test_generics.rs":"adc61f0d5fe8d53796848d44fb0373be5eab19a1eeb6a7172bc6f0dd7b91199c","tests/test_lints.rs":"c17d79d77edfcdd4b8f6dcdcd1c70ad065cfbc747e1a618ac6343315d0b59ea4","tests/test_option.rs":"ac30d929c019d6c54d1c1792b09e43c18dc0e4123b82051ff9e5db5e63c15e43","tests/test_path.rs":"ef5452c7e828a0179f5ace7e19f95b9762aa887caf10244adbfe36ded712c090","tests/test_source.rs":"f2f04f11bf8a709eddb1c68f113cda0c2be87e56800d6b9d991bedd545b4642f","tests/test_transparent.rs":"cd8d5be14d00d610a1782104bea6c013618501dab5c3625178ecfcf66e31f939","tests/ui/bad-field-attr.rs":"c5b567e3091969a01061843fb2d95c5e1aa3fa81edfeecdf416a84a6fba40aa8","tests/ui/bad-field-attr.stderr":"78f576d5ec66464a77f1cdf0f5bb7dcdf18f7f04f1165983a6239ec59d908ea3","tests/ui/concat-display.rs":"3995bd6b3bdd67df7bb16499775d89600c0dd20895633fe807396a64c117078d","tests/ui/concat-display.stderr":"256dfde61ee689ebe51588b135e2e030bdf95ba5adef1cb59f588c797bbdeef2","tests/ui/duplicate-enum-source.rs":"bfe28ce18042d446a76c7411aa233598211ce1157fdd3cb87bff3b3fa7c33131","tests/ui/duplicate-enum-source.stderr":"3d32fead420b27b4497be49080bc3b78f7f0ba339ead3de6c94e5dc20302c18f","tests/ui/duplicate-fmt.rs":"af53b66445bcce076a114376747f176b42c060a156563a41ccb638ae14c451fd","tests/ui/duplicate-fmt.stderr":"998bb121ce6f1595fd99529a7a1b06451b6bf476924337dce5524a83a7a5f1a1","tests/ui/duplicate-struct-source.rs":"f3d5f8e3d6fccfcdbb630db291353709583a920c6bf46f9f9de9966b67ea4c0f","tests/ui/duplicate-struct-source.stderr":"fb761d76668ac42357cf37b03c0abdbae5de0a828034990850291c9cb6ab766d","tests/ui/duplicate-transparent.rs":"41a9447e85f1a47027023442acde55c3d8610ec46d91b39bd43a42d7a004d747","tests/ui/duplicate-transparent.stderr":"4975abad43e973df158f18098d9bcb9dc39f8e75d3e733ed5d6620d1ee065c11","tests/ui/fallback-impl-with-display.rs":"141a8efbabe3fdac584bec8a61e6cceb58a34a70b825f6277037bf9d591150eb","tests/ui/fallback-impl-with-display.stderr":"1b3dad712b97598fbee70125471de1a8106eb161d42ce1f790ae07be8c8984ba","tests/ui/from-backtrace-backtrace.rs":"0caac64486c0eb9c076553c0502d468fbc477602a9a2595165a1dcd95524e5ff","tests/ui/from-backtrace-backtrace.stderr":"e24156ae0828878f3282341732b6e032eaa8cb4b4db366a6b5437ed0731d40a7","tests/ui/from-not-source.rs":"744a55aeffe11066830159ac023c33aaa5576e313b341fa24440ee13dfe3ac98","tests/ui/from-not-source.stderr":"525038e8b841707b927434cca4549168f73bd305faca17552a0d1fffa542ccc4","tests/ui/invalid-input-impl-anyway.rs":"6de91f71ddf038dffa3b9da33763a2ec3a5aa0047528e19ba998d5efe3aada5b","tests/ui/invalid-input-impl-anyway.stderr":"fa2725053cd87fc37f87546b377e6e5eed95c45e2a960863303b21a1935fdddb","tests/ui/lifetime.rs":"e72e0391695e47fcd07edbf3819f114e468e2097086ec687781c7c8d6b4b7da7","tests/ui/lifetime.stderr":"d889a23f71324afe95dafc5f9d15337fbdbc9977cb8924f0cafe3a3becf4ced7","tests/ui/missing-display.rs":"c1fd1bc0ec0fb103d7f7b128837f717d49485662839899d570b3c983f1332888","tests/ui/missing-display.stderr":"a8de0f1559da9320ee99ef334937d532d9e9f40a32ed7f8ce56fb465628bff96","tests/ui/missing-fmt.rs":"bc9e2830e54c2474ff6c27a766ed3dee88d29e40f93f30e8d64d63233866c17d","tests/ui/missing-fmt.stderr":"9a20ccee9b660fe31a5b3199307b48580bb8305cb9ce33d97d3fc767a0cfc614","tests/ui/no-display.rs":"962245372272d23e9833311c15e73221b3c7da822a2ff90189613af56ffb5c2e","tests/ui/no-display.stderr":"9e2161baf5f66ab22370322f2e66e7633bf04b1ec07ef656e904b984bcc45d09","tests/ui/source-enum-not-error.rs":"3add5e7b4503d964bcae0848904822e1473c1d08c5a146c2df5347314ce1b8df","tests/ui/source-enum-not-error.stderr":"c093580558a259489eef92728d19aeca3b6c27ec17c39a02a75f0a924b095675","tests/ui/source-enum-unnamed-field-not-error.rs":"a98989e908b84a8e6e6dceef02af7bdd1098a444d229950f67ed4f54d55c62e7","tests/ui/source-enum-unnamed-field-not-error.stderr":"45b520f44e6fd10792d7f48e8ca7bc89850aa039278cba7c9f6ea11aa6378494","tests/ui/source-struct-not-error.rs":"09fb7713637242dca9192585a6daeb8d732dc1c1d0fa522b74f1c98618e6d949","tests/ui/source-struct-not-error.stderr":"66fb5fa85d59f11d8b5f7ec99469a843c51943b0010e554bdf56376a0614a2ca","tests/ui/source-struct-unnamed-field-not-error.rs":"eee605a9aafbb093666393e25cef4f7d7b8e90d518b9fadbdbed9685c66ccfcd","tests/ui/source-struct-unnamed-field-not-error.stderr":"38e4bd380ff1637c179b277ea1beb0a1ce688d191e5a9502ee69ab752e9ba70f","tests/ui/transparent-display.rs":"b3c59583eb64b0b5a246444456d03cf52d51bcdc08885023600dbb44fd87e5f2","tests/ui/transparent-display.stderr":"16d538914e0d92026bde4b4bec75660217da9ecc6b621d12d2eb81d33ed1d1da","tests/ui/transparent-enum-many.rs":"2a40a764fb4683bff57973eec61507a6c00f7d4d7a32da6e7bd0190c2e445434","tests/ui/transparent-enum-many.stderr":"f1d78c1d6d8edbef153420db4fb9ca3dc6076fa043b5b1bc0cd291daa417a3ea","tests/ui/transparent-enum-not-error.rs":"f6315993e68bc71d6d4d39afa4d059695308ef785f92cc0d1df3e9ff55be2a9a","tests/ui/transparent-enum-not-error.stderr":"e485c39f861ab66a6a41f0a423b1b13ba277968210284148883b350b5d629ccc","tests/ui/transparent-enum-source.rs":"18f606a98ac0a53f08dc56f5f923b9cbe75d25ed34479c777b48dac305d5968c","tests/ui/transparent-enum-source.stderr":"1b2e0ac53951034575d43ec0396c4e2b3cfb272db2aef8d6baa13a7e1632cc84","tests/ui/transparent-enum-unnamed-field-not-error.rs":"0c720567e9e0f87921055dfa9f607661725377633445e01a4b5048c7a7a50a85","tests/ui/transparent-enum-unnamed-field-not-error.stderr":"6b8ba244eba94006039d10f35bdd7526136bcff4751b13313ab12283d5bdc24c","tests/ui/transparent-struct-many.rs":"72c6b6c1a44c203d3bc68989b2f1ec092531ef75b745432824c3776c290326f6","tests/ui/transparent-struct-many.stderr":"7bd0536dbb54a0ce7d4a8e66ca7624a1b132d8a1d1e4fecca642ec77494ac01c","tests/ui/transparent-struct-not-error.rs":"fd814d3141c9182b1267b558d9daef8dd6e8b6462e1ad42b197f3a1c0703dce2","tests/ui/transparent-struct-not-error.stderr":"bb8b856515b34711c046f195b4267d2bb21309c5d3ac0a39d6660c55dadafb41","tests/ui/transparent-struct-source.rs":"863fa691ed7d27e8767da58d9ee11fd40d6642274b36338ca1074c07964ea2b3","tests/ui/transparent-struct-source.stderr":"267dab65929e67d32347fb467a00b43af931f8205d727d7671938580217fc70e","tests/ui/transparent-struct-unnamed-field-not-error.rs":"fbff5874be44a5dcc347693d7929537256b187dfec467ed72c9968c095228d8d","tests/ui/transparent-struct-unnamed-field-not-error.stderr":"684d4ed4325f2e7fb95c84a6d231585b2be313990c41222fda66b99a84e7b884","tests/ui/unexpected-field-fmt.rs":"29fba7b4d81c642ec8e47cfe053aa515acf9080a86d65e685363a48993becfe3","tests/ui/unexpected-field-fmt.stderr":"20731c4a08af04bed3ff513903adadd690b6bc532b15604557e7f25575a8338f","tests/ui/unexpected-struct-source.rs":"c6cbe882d622635c216feb8290b1bd536ce0ec4feee16bc087667a21b3641d5c","tests/ui/unexpected-struct-source.stderr":"7c8227513478f6cc09e8a28be337c8a0e758a06ca5978d774c91bd43c4a54043","tests/ui/union.rs":"331adff27cebd8b95b03b6742cc8247331fda1f961e1590ed39c8d39f50cf1d8","tests/ui/union.stderr":"5f67ad29753d6fb14bc03aef7d4a1f660ee7796e469c037efbf8b13456934ad3"},"package":"d54378c645627613241d077a3a79db965db602882668f9136ac42af9ecb730ad"} \ No newline at end of file
+{"files":{"Cargo.toml":"315a0379d474f762b74a40fb83f8a52b7efe0f5a352e7a7ab0b9d118f32f609e","LICENSE-APACHE":"62c7a1e35f56406896d7aa7ca52d0cc0d272ac022b5d2796e7d6905db8a3636a","LICENSE-MIT":"23f18e03dc49df91622fe2a76176497404e46ced8a715d9d2b67a7446571cca3","README.md":"106c5a937767d49503e1fc5eae1b924f57f15decd8583720a3c652483e348a64","build.rs":"532f6ac494cdddfad3267067a46969a8052b02c1bd94567361f7103ab0dc8c28","build/probe.rs":"3245569a228727091f335db44c7c2f729729b2dfac9f46c1143eb179439f223d","rust-toolchain.toml":"6bbb61302978c736b2da03e4fb40e3beab908f85d533ab46fd541e637b5f3e0f","src/aserror.rs":"cf3c98abb2b9e06afa3c4aba0df14938417c3e330315863437561cbb3573888b","src/display.rs":"0adeeeb524c6bee06de179d54e82a43dc12d2c5b7f69f6fd268ba4611ebf5233","src/lib.rs":"6903e561c2a83901061ed717061040a1d0ffb4296c2c52e19e617e1ca6c07211","src/provide.rs":"8007e22953bacfcc57bb7d12a03fbeb0acece5d2ec889cf55522a4e071d26df3","tests/compiletest.rs":"022a8e400ef813d7ea1875b944549cee5125f6a995dc33e93b48cba3e1b57bd1","tests/test_backtrace.rs":"a8f038490fb881463c0e8c36557617c47cf2d181f16c00525d4d139c7964fade","tests/test_deprecated.rs":"7b80a10f090a3982da017556d3d71398abcead59afd8278c7b9d9b1f7b66c7b3","tests/test_display.rs":"91a6bf704fd78a5565330f438f30ae4088aa8bc7ac5e1412401e440072408c03","tests/test_error.rs":"d06dca3c38f22d7ce4e27dadd6c0f78e5cefe3a2ebbc5fe44abc9ddd5ee1985f","tests/test_expr.rs":"d35b11040ebc547467f52571051854e3e094d52b8e229fa3d44700d5f40959a2","tests/test_from.rs":"36bd22be7b048cd187a19076aeac1456040f20a0b677b01c6003998b63439ea1","tests/test_generics.rs":"adc61f0d5fe8d53796848d44fb0373be5eab19a1eeb6a7172bc6f0dd7b91199c","tests/test_lints.rs":"c17d79d77edfcdd4b8f6dcdcd1c70ad065cfbc747e1a618ac6343315d0b59ea4","tests/test_option.rs":"ac30d929c019d6c54d1c1792b09e43c18dc0e4123b82051ff9e5db5e63c15e43","tests/test_path.rs":"ef5452c7e828a0179f5ace7e19f95b9762aa887caf10244adbfe36ded712c090","tests/test_source.rs":"f2f04f11bf8a709eddb1c68f113cda0c2be87e56800d6b9d991bedd545b4642f","tests/test_transparent.rs":"cd8d5be14d00d610a1782104bea6c013618501dab5c3625178ecfcf66e31f939","tests/ui/bad-field-attr.rs":"c5b567e3091969a01061843fb2d95c5e1aa3fa81edfeecdf416a84a6fba40aa8","tests/ui/bad-field-attr.stderr":"78f576d5ec66464a77f1cdf0f5bb7dcdf18f7f04f1165983a6239ec59d908ea3","tests/ui/concat-display.rs":"3995bd6b3bdd67df7bb16499775d89600c0dd20895633fe807396a64c117078d","tests/ui/concat-display.stderr":"256dfde61ee689ebe51588b135e2e030bdf95ba5adef1cb59f588c797bbdeef2","tests/ui/duplicate-enum-source.rs":"bfe28ce18042d446a76c7411aa233598211ce1157fdd3cb87bff3b3fa7c33131","tests/ui/duplicate-enum-source.stderr":"3d32fead420b27b4497be49080bc3b78f7f0ba339ead3de6c94e5dc20302c18f","tests/ui/duplicate-fmt.rs":"af53b66445bcce076a114376747f176b42c060a156563a41ccb638ae14c451fd","tests/ui/duplicate-fmt.stderr":"998bb121ce6f1595fd99529a7a1b06451b6bf476924337dce5524a83a7a5f1a1","tests/ui/duplicate-struct-source.rs":"f3d5f8e3d6fccfcdbb630db291353709583a920c6bf46f9f9de9966b67ea4c0f","tests/ui/duplicate-struct-source.stderr":"fb761d76668ac42357cf37b03c0abdbae5de0a828034990850291c9cb6ab766d","tests/ui/duplicate-transparent.rs":"41a9447e85f1a47027023442acde55c3d8610ec46d91b39bd43a42d7a004d747","tests/ui/duplicate-transparent.stderr":"4975abad43e973df158f18098d9bcb9dc39f8e75d3e733ed5d6620d1ee065c11","tests/ui/fallback-impl-with-display.rs":"141a8efbabe3fdac584bec8a61e6cceb58a34a70b825f6277037bf9d591150eb","tests/ui/fallback-impl-with-display.stderr":"1b3dad712b97598fbee70125471de1a8106eb161d42ce1f790ae07be8c8984ba","tests/ui/from-backtrace-backtrace.rs":"0caac64486c0eb9c076553c0502d468fbc477602a9a2595165a1dcd95524e5ff","tests/ui/from-backtrace-backtrace.stderr":"e24156ae0828878f3282341732b6e032eaa8cb4b4db366a6b5437ed0731d40a7","tests/ui/from-not-source.rs":"744a55aeffe11066830159ac023c33aaa5576e313b341fa24440ee13dfe3ac98","tests/ui/from-not-source.stderr":"525038e8b841707b927434cca4549168f73bd305faca17552a0d1fffa542ccc4","tests/ui/invalid-input-impl-anyway.rs":"6de91f71ddf038dffa3b9da33763a2ec3a5aa0047528e19ba998d5efe3aada5b","tests/ui/invalid-input-impl-anyway.stderr":"fa2725053cd87fc37f87546b377e6e5eed95c45e2a960863303b21a1935fdddb","tests/ui/lifetime.rs":"e72e0391695e47fcd07edbf3819f114e468e2097086ec687781c7c8d6b4b7da7","tests/ui/lifetime.stderr":"d889a23f71324afe95dafc5f9d15337fbdbc9977cb8924f0cafe3a3becf4ced7","tests/ui/missing-display.rs":"c1fd1bc0ec0fb103d7f7b128837f717d49485662839899d570b3c983f1332888","tests/ui/missing-display.stderr":"a8de0f1559da9320ee99ef334937d532d9e9f40a32ed7f8ce56fb465628bff96","tests/ui/missing-fmt.rs":"bc9e2830e54c2474ff6c27a766ed3dee88d29e40f93f30e8d64d63233866c17d","tests/ui/missing-fmt.stderr":"9a20ccee9b660fe31a5b3199307b48580bb8305cb9ce33d97d3fc767a0cfc614","tests/ui/no-display.rs":"962245372272d23e9833311c15e73221b3c7da822a2ff90189613af56ffb5c2e","tests/ui/no-display.stderr":"c145dbdd39e145b5c6f26f8504fbf1e33efa2eada37e09900b39dd62667f22d7","tests/ui/source-enum-not-error.rs":"3add5e7b4503d964bcae0848904822e1473c1d08c5a146c2df5347314ce1b8df","tests/ui/source-enum-not-error.stderr":"aeba0a8a0084833e470b6be2250370809f53c279ad603232af5302b9de9f8cce","tests/ui/source-enum-unnamed-field-not-error.rs":"a98989e908b84a8e6e6dceef02af7bdd1098a444d229950f67ed4f54d55c62e7","tests/ui/source-enum-unnamed-field-not-error.stderr":"4f3d90525dd462e67f633e83b26acec75d9af3626e40d28ded2c2438e0c73192","tests/ui/source-struct-not-error.rs":"09fb7713637242dca9192585a6daeb8d732dc1c1d0fa522b74f1c98618e6d949","tests/ui/source-struct-not-error.stderr":"b45eb66f078fec68d44feff1f8d6a676c341e9e9d9acb35daa58ec1ea20f9dae","tests/ui/source-struct-unnamed-field-not-error.rs":"eee605a9aafbb093666393e25cef4f7d7b8e90d518b9fadbdbed9685c66ccfcd","tests/ui/source-struct-unnamed-field-not-error.stderr":"10e408f71c1b61880b1d52739f222ec58a66be70a1df17e44c536fe0f9ffe2a6","tests/ui/transparent-display.rs":"b3c59583eb64b0b5a246444456d03cf52d51bcdc08885023600dbb44fd87e5f2","tests/ui/transparent-display.stderr":"16d538914e0d92026bde4b4bec75660217da9ecc6b621d12d2eb81d33ed1d1da","tests/ui/transparent-enum-many.rs":"2a40a764fb4683bff57973eec61507a6c00f7d4d7a32da6e7bd0190c2e445434","tests/ui/transparent-enum-many.stderr":"f1d78c1d6d8edbef153420db4fb9ca3dc6076fa043b5b1bc0cd291daa417a3ea","tests/ui/transparent-enum-not-error.rs":"f6315993e68bc71d6d4d39afa4d059695308ef785f92cc0d1df3e9ff55be2a9a","tests/ui/transparent-enum-not-error.stderr":"3a5fe056cd4566f402b03cb591394e0ba85bd74da53df3c8d50bda4a05c2e5ba","tests/ui/transparent-enum-source.rs":"18f606a98ac0a53f08dc56f5f923b9cbe75d25ed34479c777b48dac305d5968c","tests/ui/transparent-enum-source.stderr":"1b2e0ac53951034575d43ec0396c4e2b3cfb272db2aef8d6baa13a7e1632cc84","tests/ui/transparent-enum-unnamed-field-not-error.rs":"0c720567e9e0f87921055dfa9f607661725377633445e01a4b5048c7a7a50a85","tests/ui/transparent-enum-unnamed-field-not-error.stderr":"6d84e9a7f4e88daba12931a6c1508be60bb19125d06e44fa2fa205126d953bb1","tests/ui/transparent-struct-many.rs":"72c6b6c1a44c203d3bc68989b2f1ec092531ef75b745432824c3776c290326f6","tests/ui/transparent-struct-many.stderr":"7bd0536dbb54a0ce7d4a8e66ca7624a1b132d8a1d1e4fecca642ec77494ac01c","tests/ui/transparent-struct-not-error.rs":"fd814d3141c9182b1267b558d9daef8dd6e8b6462e1ad42b197f3a1c0703dce2","tests/ui/transparent-struct-not-error.stderr":"ac7634ea72096d8a5c1a91fd4f1b45ef870130a2698d9ae7c6530cec2f9799d5","tests/ui/transparent-struct-source.rs":"863fa691ed7d27e8767da58d9ee11fd40d6642274b36338ca1074c07964ea2b3","tests/ui/transparent-struct-source.stderr":"267dab65929e67d32347fb467a00b43af931f8205d727d7671938580217fc70e","tests/ui/transparent-struct-unnamed-field-not-error.rs":"fbff5874be44a5dcc347693d7929537256b187dfec467ed72c9968c095228d8d","tests/ui/transparent-struct-unnamed-field-not-error.stderr":"ea99d5422348c2588ad7374360e2a24649f040b9c5614c9308eff958f61960ec","tests/ui/unexpected-field-fmt.rs":"29fba7b4d81c642ec8e47cfe053aa515acf9080a86d65e685363a48993becfe3","tests/ui/unexpected-field-fmt.stderr":"20731c4a08af04bed3ff513903adadd690b6bc532b15604557e7f25575a8338f","tests/ui/unexpected-struct-source.rs":"c6cbe882d622635c216feb8290b1bd536ce0ec4feee16bc087667a21b3641d5c","tests/ui/unexpected-struct-source.stderr":"7c8227513478f6cc09e8a28be337c8a0e758a06ca5978d774c91bd43c4a54043","tests/ui/union.rs":"331adff27cebd8b95b03b6742cc8247331fda1f961e1590ed39c8d39f50cf1d8","tests/ui/union.stderr":"5f67ad29753d6fb14bc03aef7d4a1f660ee7796e469c037efbf8b13456934ad3"},"package":"1e45bcbe8ed29775f228095caf2cd67af7a4ccf756ebff23a306bf3e8b47b24b"} \ No newline at end of file
diff --git a/third_party/rust/thiserror/Cargo.toml b/third_party/rust/thiserror/Cargo.toml
index 7871761242..bb7315063c 100644
--- a/third_party/rust/thiserror/Cargo.toml
+++ b/third_party/rust/thiserror/Cargo.toml
@@ -13,7 +13,7 @@
edition = "2021"
rust-version = "1.56"
name = "thiserror"
-version = "1.0.56"
+version = "1.0.57"
authors = ["David Tolnay <dtolnay@gmail.com>"]
description = "derive(Error)"
documentation = "https://docs.rs/thiserror"
@@ -32,7 +32,7 @@ rustdoc-args = ["--generate-link-to-definition"]
targets = ["x86_64-unknown-linux-gnu"]
[dependencies.thiserror-impl]
-version = "=1.0.56"
+version = "=1.0.57"
[dev-dependencies.anyhow]
version = "1.0.73"
diff --git a/third_party/rust/thiserror/src/lib.rs b/third_party/rust/thiserror/src/lib.rs
index 73e6e21736..717cdc6f19 100644
--- a/third_party/rust/thiserror/src/lib.rs
+++ b/third_party/rust/thiserror/src/lib.rs
@@ -228,7 +228,7 @@
//!
//! [`anyhow`]: https://github.com/dtolnay/anyhow
-#![doc(html_root_url = "https://docs.rs/thiserror/1.0.56")]
+#![doc(html_root_url = "https://docs.rs/thiserror/1.0.57")]
#![allow(
clippy::module_name_repetitions,
clippy::needless_lifetimes,
diff --git a/third_party/rust/thiserror/tests/test_display.rs b/third_party/rust/thiserror/tests/test_display.rs
index 6f603882eb..95a210f0b6 100644
--- a/third_party/rust/thiserror/tests/test_display.rs
+++ b/third_party/rust/thiserror/tests/test_display.rs
@@ -1,4 +1,4 @@
-#![allow(clippy::uninlined_format_args)]
+#![allow(clippy::needless_raw_string_hashes, clippy::uninlined_format_args)]
use std::fmt::{self, Display};
use thiserror::Error;
@@ -301,3 +301,58 @@ fn test_keyword() {
assert("error: 1", Error);
}
+
+#[test]
+fn test_str_special_chars() {
+ #[derive(Error, Debug)]
+ pub enum Error {
+ #[error("brace left {{")]
+ BraceLeft,
+ #[error("brace left 2 \x7B\x7B")]
+ BraceLeft2,
+ #[error("brace left 3 \u{7B}\u{7B}")]
+ BraceLeft3,
+ #[error("brace right }}")]
+ BraceRight,
+ #[error("brace right 2 \x7D\x7D")]
+ BraceRight2,
+ #[error("brace right 3 \u{7D}\u{7D}")]
+ BraceRight3,
+ #[error(
+ "new_\
+line"
+ )]
+ NewLine,
+ #[error("escape24 \u{78}")]
+ Escape24,
+ }
+
+ assert("brace left {", Error::BraceLeft);
+ assert("brace left 2 {", Error::BraceLeft2);
+ assert("brace left 3 {", Error::BraceLeft3);
+ assert("brace right }", Error::BraceRight);
+ assert("brace right 2 }", Error::BraceRight2);
+ assert("brace right 3 }", Error::BraceRight3);
+ assert("new_line", Error::NewLine);
+ assert("escape24 x", Error::Escape24);
+}
+
+#[test]
+fn test_raw_str() {
+ #[derive(Error, Debug)]
+ pub enum Error {
+ #[error(r#"raw brace left {{"#)]
+ BraceLeft,
+ #[error(r#"raw brace left 2 \x7B"#)]
+ BraceLeft2,
+ #[error(r#"raw brace right }}"#)]
+ BraceRight,
+ #[error(r#"raw brace right 2 \x7D"#)]
+ BraceRight2,
+ }
+
+ assert(r#"raw brace left {"#, Error::BraceLeft);
+ assert(r#"raw brace left 2 \x7B"#, Error::BraceLeft2);
+ assert(r#"raw brace right }"#, Error::BraceRight);
+ assert(r#"raw brace right 2 \x7D"#, Error::BraceRight2);
+}
diff --git a/third_party/rust/thiserror/tests/ui/no-display.stderr b/third_party/rust/thiserror/tests/ui/no-display.stderr
index 0f47c24b62..88d0092678 100644
--- a/third_party/rust/thiserror/tests/ui/no-display.stderr
+++ b/third_party/rust/thiserror/tests/ui/no-display.stderr
@@ -15,3 +15,6 @@ note: the trait `std::fmt::Display` must be implemented
|
| pub trait Display {
| ^^^^^^^^^^^^^^^^^
+ = help: items from traits can only be used if the trait is implemented and in scope
+ = note: the following trait defines an item `as_display`, perhaps you need to implement it:
+ candidate #1: `AsDisplay`
diff --git a/third_party/rust/thiserror/tests/ui/source-enum-not-error.stderr b/third_party/rust/thiserror/tests/ui/source-enum-not-error.stderr
index 4c44742d54..649d77df81 100644
--- a/third_party/rust/thiserror/tests/ui/source-enum-not-error.stderr
+++ b/third_party/rust/thiserror/tests/ui/source-enum-not-error.stderr
@@ -2,10 +2,7 @@ error[E0599]: the method `as_dyn_error` exists for reference `&NotError`, but it
--> tests/ui/source-enum-not-error.rs:9:14
|
4 | pub struct NotError;
- | -------------------
- | |
- | doesn't satisfy `NotError: AsDynError<'_>`
- | doesn't satisfy `NotError: std::error::Error`
+ | ------------------- doesn't satisfy `NotError: AsDynError<'_>` or `NotError: std::error::Error`
...
9 | Broken { source: NotError },
| ^^^^^^ method cannot be called on `&NotError` due to unsatisfied trait bounds
@@ -20,3 +17,6 @@ note: the trait `std::error::Error` must be implemented
|
| pub trait Error: Debug + Display {
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+ = help: items from traits can only be used if the trait is implemented and in scope
+ = note: the following trait defines an item `as_dyn_error`, perhaps you need to implement it:
+ candidate #1: `AsDynError`
diff --git a/third_party/rust/thiserror/tests/ui/source-enum-unnamed-field-not-error.stderr b/third_party/rust/thiserror/tests/ui/source-enum-unnamed-field-not-error.stderr
index da6d225f86..a1fe2b5b53 100644
--- a/third_party/rust/thiserror/tests/ui/source-enum-unnamed-field-not-error.stderr
+++ b/third_party/rust/thiserror/tests/ui/source-enum-unnamed-field-not-error.stderr
@@ -2,10 +2,7 @@ error[E0599]: the method `as_dyn_error` exists for reference `&NotError`, but it
--> tests/ui/source-enum-unnamed-field-not-error.rs:9:14
|
4 | pub struct NotError;
- | -------------------
- | |
- | doesn't satisfy `NotError: AsDynError<'_>`
- | doesn't satisfy `NotError: std::error::Error`
+ | ------------------- doesn't satisfy `NotError: AsDynError<'_>` or `NotError: std::error::Error`
...
9 | Broken(#[source] NotError),
| ^^^^^^ method cannot be called on `&NotError` due to unsatisfied trait bounds
@@ -20,3 +17,6 @@ note: the trait `std::error::Error` must be implemented
|
| pub trait Error: Debug + Display {
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+ = help: items from traits can only be used if the trait is implemented and in scope
+ = note: the following trait defines an item `as_dyn_error`, perhaps you need to implement it:
+ candidate #1: `AsDynError`
diff --git a/third_party/rust/thiserror/tests/ui/source-struct-not-error.stderr b/third_party/rust/thiserror/tests/ui/source-struct-not-error.stderr
index b98460fcbe..07cd67ac64 100644
--- a/third_party/rust/thiserror/tests/ui/source-struct-not-error.stderr
+++ b/third_party/rust/thiserror/tests/ui/source-struct-not-error.stderr
@@ -2,11 +2,7 @@ error[E0599]: the method `as_dyn_error` exists for struct `NotError`, but its tr
--> tests/ui/source-struct-not-error.rs:9:5
|
4 | struct NotError;
- | ---------------
- | |
- | method `as_dyn_error` not found for this struct
- | doesn't satisfy `NotError: AsDynError<'_>`
- | doesn't satisfy `NotError: std::error::Error`
+ | --------------- method `as_dyn_error` not found for this struct because it doesn't satisfy `NotError: AsDynError<'_>` or `NotError: std::error::Error`
...
9 | source: NotError,
| ^^^^^^ method cannot be called on `NotError` due to unsatisfied trait bounds
@@ -19,3 +15,6 @@ note: the trait `std::error::Error` must be implemented
|
| pub trait Error: Debug + Display {
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+ = help: items from traits can only be used if the trait is implemented and in scope
+ = note: the following trait defines an item `as_dyn_error`, perhaps you need to implement it:
+ candidate #1: `AsDynError`
diff --git a/third_party/rust/thiserror/tests/ui/source-struct-unnamed-field-not-error.stderr b/third_party/rust/thiserror/tests/ui/source-struct-unnamed-field-not-error.stderr
index a23f26823f..2022ea67cd 100644
--- a/third_party/rust/thiserror/tests/ui/source-struct-unnamed-field-not-error.stderr
+++ b/third_party/rust/thiserror/tests/ui/source-struct-unnamed-field-not-error.stderr
@@ -2,11 +2,7 @@ error[E0599]: the method `as_dyn_error` exists for struct `NotError`, but its tr
--> tests/ui/source-struct-unnamed-field-not-error.rs:8:26
|
4 | struct NotError;
- | ---------------
- | |
- | method `as_dyn_error` not found for this struct
- | doesn't satisfy `NotError: AsDynError<'_>`
- | doesn't satisfy `NotError: std::error::Error`
+ | --------------- method `as_dyn_error` not found for this struct because it doesn't satisfy `NotError: AsDynError<'_>` or `NotError: std::error::Error`
...
8 | pub struct ErrorStruct(#[source] NotError);
| ^^^^^^ method cannot be called on `NotError` due to unsatisfied trait bounds
@@ -19,3 +15,6 @@ note: the trait `std::error::Error` must be implemented
|
| pub trait Error: Debug + Display {
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+ = help: items from traits can only be used if the trait is implemented and in scope
+ = note: the following trait defines an item `as_dyn_error`, perhaps you need to implement it:
+ candidate #1: `AsDynError`
diff --git a/third_party/rust/thiserror/tests/ui/transparent-enum-not-error.stderr b/third_party/rust/thiserror/tests/ui/transparent-enum-not-error.stderr
index 9be51434a6..bb836d4e8d 100644
--- a/third_party/rust/thiserror/tests/ui/transparent-enum-not-error.stderr
+++ b/third_party/rust/thiserror/tests/ui/transparent-enum-not-error.stderr
@@ -7,10 +7,7 @@ error[E0599]: the method `as_dyn_error` exists for reference `&String`, but its
::: $RUST/alloc/src/string.rs
|
| pub struct String {
- | -----------------
- | |
- | doesn't satisfy `String: AsDynError<'_>`
- | doesn't satisfy `String: std::error::Error`
+ | ----------------- doesn't satisfy `String: AsDynError<'_>` or `String: std::error::Error`
|
= note: the following trait bounds were not satisfied:
`String: std::error::Error`
diff --git a/third_party/rust/thiserror/tests/ui/transparent-enum-unnamed-field-not-error.stderr b/third_party/rust/thiserror/tests/ui/transparent-enum-unnamed-field-not-error.stderr
index 3d23c3a0e5..f337c592ee 100644
--- a/third_party/rust/thiserror/tests/ui/transparent-enum-unnamed-field-not-error.stderr
+++ b/third_party/rust/thiserror/tests/ui/transparent-enum-unnamed-field-not-error.stderr
@@ -7,10 +7,7 @@ error[E0599]: the method `as_dyn_error` exists for reference `&String`, but its
::: $RUST/alloc/src/string.rs
|
| pub struct String {
- | -----------------
- | |
- | doesn't satisfy `String: AsDynError<'_>`
- | doesn't satisfy `String: std::error::Error`
+ | ----------------- doesn't satisfy `String: AsDynError<'_>` or `String: std::error::Error`
|
= note: the following trait bounds were not satisfied:
`String: std::error::Error`
diff --git a/third_party/rust/thiserror/tests/ui/transparent-struct-not-error.stderr b/third_party/rust/thiserror/tests/ui/transparent-struct-not-error.stderr
index d67a694467..ee50d03a7b 100644
--- a/third_party/rust/thiserror/tests/ui/transparent-struct-not-error.stderr
+++ b/third_party/rust/thiserror/tests/ui/transparent-struct-not-error.stderr
@@ -7,10 +7,7 @@ error[E0599]: the method `as_dyn_error` exists for struct `String`, but its trai
::: $RUST/alloc/src/string.rs
|
| pub struct String {
- | -----------------
- | |
- | doesn't satisfy `String: AsDynError<'_>`
- | doesn't satisfy `String: std::error::Error`
+ | ----------------- doesn't satisfy `String: AsDynError<'_>` or `String: std::error::Error`
|
= note: the following trait bounds were not satisfied:
`String: std::error::Error`
diff --git a/third_party/rust/thiserror/tests/ui/transparent-struct-unnamed-field-not-error.stderr b/third_party/rust/thiserror/tests/ui/transparent-struct-unnamed-field-not-error.stderr
index f715a15175..c3d6c0023d 100644
--- a/third_party/rust/thiserror/tests/ui/transparent-struct-unnamed-field-not-error.stderr
+++ b/third_party/rust/thiserror/tests/ui/transparent-struct-unnamed-field-not-error.stderr
@@ -7,10 +7,7 @@ error[E0599]: the method `as_dyn_error` exists for struct `String`, but its trai
::: $RUST/alloc/src/string.rs
|
| pub struct String {
- | -----------------
- | |
- | doesn't satisfy `String: AsDynError<'_>`
- | doesn't satisfy `String: std::error::Error`
+ | ----------------- doesn't satisfy `String: AsDynError<'_>` or `String: std::error::Error`
|
= note: the following trait bounds were not satisfied:
`String: std::error::Error`
diff --git a/third_party/rust/unicode-bidi/.appveyor.yml b/third_party/rust/unicode-bidi/.appveyor.yml
new file mode 100644
index 0000000000..1bd43a1145
--- /dev/null
+++ b/third_party/rust/unicode-bidi/.appveyor.yml
@@ -0,0 +1,19 @@
+install:
+ - appveyor DownloadFile https://win.rustup.rs/ -FileName rustup-init.exe
+ - rustup-init -yv --default-toolchain nightly
+ - set PATH=%PATH%;%USERPROFILE%\.cargo\bin
+ - rustc -V
+ - cargo -V
+ - git submodule update --init --recursive
+
+build: false
+
+environment:
+ RUST_BACKTRACE: full
+
+test_script:
+ - cargo build --verbose --all
+ - cargo doc --verbose --all --no-deps
+
+ - cargo test --verbose --all
+ - cargo test --verbose --all --features serde
diff --git a/third_party/rust/unicode-bidi/.cargo-checksum.json b/third_party/rust/unicode-bidi/.cargo-checksum.json
index 3628dc8f6f..fa0b5995c0 100644
--- a/third_party/rust/unicode-bidi/.cargo-checksum.json
+++ b/third_party/rust/unicode-bidi/.cargo-checksum.json
@@ -1 +1 @@
-{"files":{"AUTHORS":"1ff3a7c8519b29544bb28ba9b1e7502df0cb764051fb9a1172e60006aa2b8dcc","COPYRIGHT":"edb20b474f6cbd4f4db066b54a9e0f687d0009d309412a63431189b59b8e2a07","Cargo.toml":"9cd0be282dbaeacf5d1fdf07096114c7b3f16f275755f30a0d2e873ab1fbc150","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"7b63ecd5f1902af1b63729947373683c32745c16a10e8e6292e2e2dcd7e90ae0","README.md":"81d3dc6d894a68894d982760b0a907f9dcbb0da179a8063ed9de1d2257518957","src/char_data/mod.rs":"0622df8ce4b4de60aea7e4787635d3187f79f7a3f9001e3d209f58fd07d03887","src/char_data/tables.rs":"50faf4eef73c831a38b735309ff3415e9f65992a0474ff5c055138f91c91ee16","src/data_source.rs":"36fa0785e51c549c1f72f09040cfe515b848d1b23fb30d469770a6b4b17b49df","src/deprecated.rs":"46c5a8bb1e6aa6193eec8269891d4cbbb5fd92214eb55eac7ea5e7ca193386aa","src/explicit.rs":"afa7599674fc8daad2939e5987ec5d937ed9fdbb78b8492b1e137db88d0a3af7","src/format_chars.rs":"678399fec3f4bfaf4093f38cfdb8956288313386dc3511dab9fb58164e8dc01b","src/implicit.rs":"e96484178d1bab97439b2c0cf4b3a8d6ee18459b9de64499aa07607aa304be0c","src/level.rs":"921fb7b8960f6cc703a51936075302db513927b8c5d5d760536b6ff70ddab0dd","src/lib.rs":"ca09c7dedc56ec139fa92fec26c3983346a3b6456992acdfbfe18b324174e0d8","src/prepare.rs":"c4aaad603f5c1be5c4364265aac7590335dc234288a4151e0f30bcefe532e14d","src/utf16.rs":"30d31c4d8c814315b48b56a2dfb755b8b442dde23f76429c6df1af754999fe3b"},"package":"08f95100a766bf4f8f28f90d77e0a5461bbdb219042e7679bebe79004fed8d75"} \ No newline at end of file
+{"files":{".appveyor.yml":"15bdeea0e836ac2ccbb259cde1509a0673a73300e90e970f3e533b189234b6fd",".github/workflows/main.yml":"e0bee93284a8b39c9d419038bfa72a6389ebdae39ce55c40624e764ac1c98a9e",".rustfmt.toml":"168c973274f3f5946e90cac6ae0f017d0832a5c830872d9d3b9b387ad6c1a81e","AUTHORS":"1ff3a7c8519b29544bb28ba9b1e7502df0cb764051fb9a1172e60006aa2b8dcc","COPYRIGHT":"edb20b474f6cbd4f4db066b54a9e0f687d0009d309412a63431189b59b8e2a07","Cargo.lock":"8842f03d0fcea88aa1546244d0455834732603175b293218f8e9a9f44c297b7c","Cargo.toml":"099454ebee9b081080e1521eccbe447db30b17ac36e9e655ed1d0d1e20e657fb","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"7b63ecd5f1902af1b63729947373683c32745c16a10e8e6292e2e2dcd7e90ae0","README.md":"81d3dc6d894a68894d982760b0a907f9dcbb0da179a8063ed9de1d2257518957","src/char_data/mod.rs":"8cbdcaacddb3dd9b70d615693fa73d0e7dca6332102a95f0d3ce447df7645284","src/char_data/tables.rs":"8adf126131f573a3b6d2c35849c1cc13c831c9b55c4d3fcb5a3961d8ed7a0d44","src/data_source.rs":"36fa0785e51c549c1f72f09040cfe515b848d1b23fb30d469770a6b4b17b49df","src/deprecated.rs":"f94c0e75dec7e70cb9802e26b7f82fe618dcdd50e9973927bacd4eccc6899c62","src/explicit.rs":"86c3c55bf2cc90aab1411aac6cf05de505ca74e44a76fe829572dd7dc4dd2aa3","src/format_chars.rs":"678399fec3f4bfaf4093f38cfdb8956288313386dc3511dab9fb58164e8dc01b","src/implicit.rs":"8d5b003464aee3f333785c6170a884945251f39601e4ea658e669a2ad575d588","src/level.rs":"ce1eaa9940f1b90bc59aba296488b8cd128aefeb4b6b2e3ecc34da26c569150b","src/lib.rs":"9dff9c105f481a03823de6ad9a0a11733af019649ae211644061d5a525670244","src/prepare.rs":"aeb8b88cfb2d2e6b74473f5903205dd3683d57abcc8801de7b9fdea6a432a0fe","src/utf16.rs":"12ee177127a0b5b0350a1fcc1edf7387c26b51ec5654f724629aab723881c313"},"package":null} \ No newline at end of file
diff --git a/third_party/rust/unicode-bidi/.github/workflows/main.yml b/third_party/rust/unicode-bidi/.github/workflows/main.yml
new file mode 100644
index 0000000000..303bac8b97
--- /dev/null
+++ b/third_party/rust/unicode-bidi/.github/workflows/main.yml
@@ -0,0 +1,49 @@
+name: CI
+
+on:
+ push:
+ branches: ['master', 'auto']
+ pull_request:
+
+jobs:
+ Test:
+ strategy:
+ matrix:
+ os: [ubuntu-latest]
+ rust: [1.47.0, stable, beta, nightly]
+ runs-on: ${{ matrix.os }}
+ steps:
+ - uses: actions/checkout@v4
+ - uses: dtolnay/rust-toolchain@master
+ with:
+ toolchain: ${{ matrix.rust }}
+ - name: Unpin dependencies except on MSRV
+ if: matrix.rust != '1.47.0'
+ run: cargo update
+ - run: cargo build --all-targets
+ - run: cargo test
+ - run: cargo test --features "serde"
+ - run: cargo test --no-default-features
+ - run: cargo test --no-default-features --features=hardcoded-data
+ Fmt:
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v4
+ - uses: dtolnay/rust-toolchain@stable
+ with:
+ components: rustfmt
+ - run: cargo fmt --check
+
+ build_result:
+ name: homu build finished
+ runs-on: ubuntu-latest
+ needs:
+ - "Test"
+ - "Fmt"
+ steps:
+ - name: Mark the job as successful
+ run: exit 0
+ if: success()
+ - name: Mark the job as unsuccessful
+ run: exit 1
+ if: "!success()"
diff --git a/third_party/rust/unicode-bidi/.rustfmt.toml b/third_party/rust/unicode-bidi/.rustfmt.toml
new file mode 100644
index 0000000000..e416686ee5
--- /dev/null
+++ b/third_party/rust/unicode-bidi/.rustfmt.toml
@@ -0,0 +1 @@
+array_width = 80
diff --git a/third_party/rust/unicode-bidi/Cargo.lock b/third_party/rust/unicode-bidi/Cargo.lock
new file mode 100644
index 0000000000..63f01ebdcc
--- /dev/null
+++ b/third_party/rust/unicode-bidi/Cargo.lock
@@ -0,0 +1,175 @@
+# This file is automatically @generated by Cargo.
+# It is not intended for manual editing.
+[[package]]
+name = "flame"
+version = "0.2.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "lazy_static 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)",
+ "serde 1.0.156 (registry+https://github.com/rust-lang/crates.io-index)",
+ "serde_derive 1.0.156 (registry+https://github.com/rust-lang/crates.io-index)",
+ "serde_json 1.0.99 (registry+https://github.com/rust-lang/crates.io-index)",
+ "thread-id 3.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "flamer"
+version = "0.4.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "flame 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
+ "quote 1.0.30 (registry+https://github.com/rust-lang/crates.io-index)",
+ "syn 1.0.109 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "itoa"
+version = "1.0.9"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+
+[[package]]
+name = "lazy_static"
+version = "0.2.11"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+
+[[package]]
+name = "libc"
+version = "0.2.149"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+
+[[package]]
+name = "proc-macro2"
+version = "1.0.65"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "unicode-ident 1.0.12 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "quote"
+version = "1.0.30"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "proc-macro2 1.0.65 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "redox_syscall"
+version = "0.1.57"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+
+[[package]]
+name = "ryu"
+version = "1.0.15"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+
+[[package]]
+name = "serde"
+version = "1.0.156"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "serde_derive 1.0.156 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "serde_derive"
+version = "1.0.156"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "proc-macro2 1.0.65 (registry+https://github.com/rust-lang/crates.io-index)",
+ "quote 1.0.30 (registry+https://github.com/rust-lang/crates.io-index)",
+ "syn 1.0.109 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "serde_json"
+version = "1.0.99"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "itoa 1.0.9 (registry+https://github.com/rust-lang/crates.io-index)",
+ "ryu 1.0.15 (registry+https://github.com/rust-lang/crates.io-index)",
+ "serde 1.0.156 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "serde_test"
+version = "1.0.175"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "serde 1.0.156 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "syn"
+version = "1.0.109"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "proc-macro2 1.0.65 (registry+https://github.com/rust-lang/crates.io-index)",
+ "quote 1.0.30 (registry+https://github.com/rust-lang/crates.io-index)",
+ "unicode-ident 1.0.12 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "thread-id"
+version = "3.3.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "libc 0.2.149 (registry+https://github.com/rust-lang/crates.io-index)",
+ "redox_syscall 0.1.57 (registry+https://github.com/rust-lang/crates.io-index)",
+ "winapi 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "unicode-bidi"
+version = "0.3.13"
+dependencies = [
+ "flame 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
+ "flamer 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "serde 1.0.156 (registry+https://github.com/rust-lang/crates.io-index)",
+ "serde_test 1.0.175 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "unicode-ident"
+version = "1.0.12"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+
+[[package]]
+name = "winapi"
+version = "0.3.9"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "winapi-i686-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "winapi-x86_64-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
+name = "winapi-i686-pc-windows-gnu"
+version = "0.4.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+
+[[package]]
+name = "winapi-x86_64-pc-windows-gnu"
+version = "0.4.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+
+[metadata]
+"checksum flame 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "1fc2706461e1ee94f55cab2ed2e3d34ae9536cfa830358ef80acff1a3dacab30"
+"checksum flamer 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "36b732da54fd4ea34452f2431cf464ac7be94ca4b339c9cd3d3d12eb06fe7aab"
+"checksum itoa 1.0.9 (registry+https://github.com/rust-lang/crates.io-index)" = "af150ab688ff2122fcef229be89cb50dd66af9e01a4ff320cc137eecc9bacc38"
+"checksum lazy_static 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)" = "76f033c7ad61445c5b347c7382dd1237847eb1bce590fe50365dcb33d546be73"
+"checksum libc 0.2.149 (registry+https://github.com/rust-lang/crates.io-index)" = "a08173bc88b7955d1b3145aa561539096c421ac8debde8cbc3612ec635fee29b"
+"checksum proc-macro2 1.0.65 (registry+https://github.com/rust-lang/crates.io-index)" = "92de25114670a878b1261c79c9f8f729fb97e95bac93f6312f583c60dd6a1dfe"
+"checksum quote 1.0.30 (registry+https://github.com/rust-lang/crates.io-index)" = "5907a1b7c277254a8b15170f6e7c97cfa60ee7872a3217663bb81151e48184bb"
+"checksum redox_syscall 0.1.57 (registry+https://github.com/rust-lang/crates.io-index)" = "41cc0f7e4d5d4544e8861606a285bb08d3e70712ccc7d2b84d7c0ccfaf4b05ce"
+"checksum ryu 1.0.15 (registry+https://github.com/rust-lang/crates.io-index)" = "1ad4cc8da4ef723ed60bced201181d83791ad433213d8c24efffda1eec85d741"
+"checksum serde 1.0.156 (registry+https://github.com/rust-lang/crates.io-index)" = "314b5b092c0ade17c00142951e50ced110ec27cea304b1037c6969246c2469a4"
+"checksum serde_derive 1.0.156 (registry+https://github.com/rust-lang/crates.io-index)" = "d7e29c4601e36bcec74a223228dce795f4cd3616341a4af93520ca1a837c087d"
+"checksum serde_json 1.0.99 (registry+https://github.com/rust-lang/crates.io-index)" = "46266871c240a00b8f503b877622fe33430b3c7d963bdc0f2adc511e54a1eae3"
+"checksum serde_test 1.0.175 (registry+https://github.com/rust-lang/crates.io-index)" = "29baf0f77ca9ad9c6ed46e1b408b5e0f30b5184bcd66884e7f6d36bd7a65a8a4"
+"checksum syn 1.0.109 (registry+https://github.com/rust-lang/crates.io-index)" = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237"
+"checksum thread-id 3.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "c7fbf4c9d56b320106cd64fd024dadfa0be7cb4706725fc44a7d7ce952d820c1"
+"checksum unicode-ident 1.0.12 (registry+https://github.com/rust-lang/crates.io-index)" = "3354b9ac3fae1ff6755cb6db53683adb661634f67557942dea4facebec0fee4b"
+"checksum winapi 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)" = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419"
+"checksum winapi-i686-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6"
+"checksum winapi-x86_64-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f"
diff --git a/third_party/rust/unicode-bidi/Cargo.toml b/third_party/rust/unicode-bidi/Cargo.toml
index 29be4e5a71..584d471c8a 100644
--- a/third_party/rust/unicode-bidi/Cargo.toml
+++ b/third_party/rust/unicode-bidi/Cargo.toml
@@ -11,6 +11,7 @@
[package]
edition = "2018"
+rust-version = "1.47.0"
name = "unicode-bidi"
version = "0.3.15"
authors = ["The Servo Project Developers"]
@@ -61,8 +62,13 @@ features = ["derive"]
optional = true
default-features = false
-[dev-dependencies.serde_test]
-version = ">=0.8, <2.0"
+[dependencies.smallvec]
+version = ">=1.13"
+features = ["union"]
+optional = true
+
+[dev-dependencies]
+serde_test = ">=0.8, <2.0"
[features]
bench_it = []
diff --git a/third_party/rust/unicode-bidi/src/char_data/mod.rs b/third_party/rust/unicode-bidi/src/char_data/mod.rs
index 4edf5b8f4c..543b0ed8fd 100644
--- a/third_party/rust/unicode-bidi/src/char_data/mod.rs
+++ b/third_party/rust/unicode-bidi/src/char_data/mod.rs
@@ -59,10 +59,7 @@ pub(crate) fn bidi_matched_opening_bracket(c: char) -> Option<BidiMatchedOpening
}
pub fn is_rtl(bidi_class: BidiClass) -> bool {
- match bidi_class {
- RLE | RLO | RLI => true,
- _ => false,
- }
+ matches!(bidi_class, RLE | RLO | RLI)
}
#[cfg(feature = "hardcoded-data")]
diff --git a/third_party/rust/unicode-bidi/src/char_data/tables.rs b/third_party/rust/unicode-bidi/src/char_data/tables.rs
index ecdcf496d1..f10265d214 100644
--- a/third_party/rust/unicode-bidi/src/char_data/tables.rs
+++ b/third_party/rust/unicode-bidi/src/char_data/tables.rs
@@ -45,7 +45,7 @@ pub enum BidiClass {
use self::BidiClass::*;
#[cfg(feature = "hardcoded-data")]
-pub const bidi_class_table: &'static [(char, char, BidiClass)] = &[
+pub const bidi_class_table: &[(char, char, BidiClass)] = &[
('\u{0}', '\u{8}', BN), ('\u{9}', '\u{9}', S), ('\u{a}', '\u{a}', B), ('\u{b}', '\u{b}', S),
('\u{c}', '\u{c}', WS), ('\u{d}', '\u{d}', B), ('\u{e}', '\u{1b}', BN), ('\u{1c}', '\u{1e}', B),
('\u{1f}', '\u{1f}', S), ('\u{20}', '\u{20}', WS), ('\u{21}', '\u{22}', ON), ('\u{23}',
@@ -516,7 +516,7 @@ pub const bidi_class_table: &'static [(char, char, BidiClass)] = &[
'\u{e01ef}', NSM), ('\u{f0000}', '\u{ffffd}', L), ('\u{100000}', '\u{10fffd}', L)
];
-pub const bidi_pairs_table: &'static [(char, char, Option<char>)] = &[
+pub const bidi_pairs_table: &[(char, char, Option<char>)] = &[
('\u{28}', '\u{29}', None), ('\u{5b}', '\u{5d}', None), ('\u{7b}', '\u{7d}', None), ('\u{f3a}',
'\u{f3b}', None), ('\u{f3c}', '\u{f3d}', None), ('\u{169b}', '\u{169c}', None), ('\u{2045}',
'\u{2046}', None), ('\u{207d}', '\u{207e}', None), ('\u{208d}', '\u{208e}', None), ('\u{2308}',
diff --git a/third_party/rust/unicode-bidi/src/deprecated.rs b/third_party/rust/unicode-bidi/src/deprecated.rs
index 74a24f5b8b..c903663e99 100644
--- a/third_party/rust/unicode-bidi/src/deprecated.rs
+++ b/third_party/rust/unicode-bidi/src/deprecated.rs
@@ -9,8 +9,6 @@
//! This module holds deprecated assets only.
-use alloc::vec::Vec;
-
use super::*;
/// Find the level runs within a line and return them in visual order.
@@ -71,10 +69,8 @@ pub fn visual_runs(line: Range<usize>, levels: &[Level]) -> Vec<LevelRun> {
// Found the start of a sequence. Now find the end.
let mut seq_end = seq_start + 1;
- while seq_end < run_count {
- if levels[runs[seq_end].start] < max_level {
- break;
- }
+
+ while seq_end < run_count && levels[runs[seq_end].start] >= max_level {
seq_end += 1;
}
@@ -83,6 +79,7 @@ pub fn visual_runs(line: Range<usize>, levels: &[Level]) -> Vec<LevelRun> {
seq_start = seq_end;
}
+
max_level
.lower(1)
.expect("Lowering embedding level below zero");
diff --git a/third_party/rust/unicode-bidi/src/explicit.rs b/third_party/rust/unicode-bidi/src/explicit.rs
index d4ad897b54..5760ab8ece 100644
--- a/third_party/rust/unicode-bidi/src/explicit.rs
+++ b/third_party/rust/unicode-bidi/src/explicit.rs
@@ -11,19 +11,25 @@
//!
//! <http://www.unicode.org/reports/tr9/#Explicit_Levels_and_Directions>
-use alloc::vec::Vec;
+#[cfg(feature = "smallvec")]
+use smallvec::{smallvec, SmallVec};
use super::char_data::{
is_rtl,
BidiClass::{self, *},
};
use super::level::Level;
+use super::prepare::removed_by_x9;
+use super::LevelRunVec;
use super::TextSource;
-/// Compute explicit embedding levels for one paragraph of text (X1-X8).
+/// Compute explicit embedding levels for one paragraph of text (X1-X8), and identify
+/// level runs (BD7) for use when determining Isolating Run Sequences (X10).
///
/// `processing_classes[i]` must contain the `BidiClass` of the char at byte index `i`,
/// for each char in `text`.
+///
+/// `runs` returns the list of level runs (BD7) of the text.
#[cfg_attr(feature = "flame_it", flamer::flame)]
pub fn compute<'a, T: TextSource<'a> + ?Sized>(
text: &'a T,
@@ -31,35 +37,44 @@ pub fn compute<'a, T: TextSource<'a> + ?Sized>(
original_classes: &[BidiClass],
levels: &mut [Level],
processing_classes: &mut [BidiClass],
+ runs: &mut LevelRunVec,
) {
assert_eq!(text.len(), original_classes.len());
// <http://www.unicode.org/reports/tr9/#X1>
- let mut stack = DirectionalStatusStack::new();
- stack.push(para_level, OverrideStatus::Neutral);
+ #[cfg(feature = "smallvec")]
+ let mut stack: SmallVec<[Status; 8]> = smallvec![Status {
+ level: para_level,
+ status: OverrideStatus::Neutral,
+ }];
+ #[cfg(not(feature = "smallvec"))]
+ let mut stack = vec![Status {
+ level: para_level,
+ status: OverrideStatus::Neutral,
+ }];
let mut overflow_isolate_count = 0u32;
let mut overflow_embedding_count = 0u32;
let mut valid_isolate_count = 0u32;
+ let mut current_run_level = Level::ltr();
+ let mut current_run_start = 0;
+
for (i, len) in text.indices_lengths() {
+ let last = stack.last().unwrap();
+
match original_classes[i] {
// Rules X2-X5c
RLE | LRE | RLO | LRO | RLI | LRI | FSI => {
- let last_level = stack.last().level;
-
// <https://www.unicode.org/reports/tr9/#Retaining_Explicit_Formatting_Characters>
- levels[i] = last_level;
+ levels[i] = last.level;
// X5a-X5c: Isolate initiators get the level of the last entry on the stack.
- let is_isolate = match original_classes[i] {
- RLI | LRI | FSI => true,
- _ => false,
- };
+ let is_isolate = matches!(original_classes[i], RLI | LRI | FSI);
if is_isolate {
// Redundant due to "Retaining explicit formatting characters" step.
- // levels[i] = last_level;
- match stack.last().status {
+ // levels[i] = last.level;
+ match last.status {
OverrideStatus::RTL => processing_classes[i] = R,
OverrideStatus::LTR => processing_classes[i] = L,
_ => {}
@@ -67,22 +82,25 @@ pub fn compute<'a, T: TextSource<'a> + ?Sized>(
}
let new_level = if is_rtl(original_classes[i]) {
- last_level.new_explicit_next_rtl()
+ last.level.new_explicit_next_rtl()
} else {
- last_level.new_explicit_next_ltr()
+ last.level.new_explicit_next_ltr()
};
+
if new_level.is_ok() && overflow_isolate_count == 0 && overflow_embedding_count == 0
{
let new_level = new_level.unwrap();
- stack.push(
- new_level,
- match original_classes[i] {
+
+ stack.push(Status {
+ level: new_level,
+ status: match original_classes[i] {
RLO => OverrideStatus::RTL,
LRO => OverrideStatus::LTR,
RLI | LRI | FSI => OverrideStatus::Isolate,
_ => OverrideStatus::Neutral,
},
- );
+ });
+
if is_isolate {
valid_isolate_count += 1;
} else {
@@ -110,21 +128,21 @@ pub fn compute<'a, T: TextSource<'a> + ?Sized>(
overflow_isolate_count -= 1;
} else if valid_isolate_count > 0 {
overflow_embedding_count = 0;
- loop {
- // Pop everything up to and including the last Isolate status.
- match stack.vec.pop() {
- None
- | Some(Status {
- status: OverrideStatus::Isolate,
- ..
- }) => break,
- _ => continue,
- }
- }
+
+ while !matches!(
+ stack.pop(),
+ None | Some(Status {
+ status: OverrideStatus::Isolate,
+ ..
+ })
+ ) {}
+
valid_isolate_count -= 1;
}
- let last = stack.last();
+
+ let last = stack.last().unwrap();
levels[i] = last.level;
+
match last.status {
OverrideStatus::RTL => processing_classes[i] = R,
OverrideStatus::LTR => processing_classes[i] = L,
@@ -138,11 +156,12 @@ pub fn compute<'a, T: TextSource<'a> + ?Sized>(
// do nothing
} else if overflow_embedding_count > 0 {
overflow_embedding_count -= 1;
- } else if stack.last().status != OverrideStatus::Isolate && stack.vec.len() >= 2 {
- stack.vec.pop();
+ } else if last.status != OverrideStatus::Isolate && stack.len() >= 2 {
+ stack.pop();
}
+
// <https://www.unicode.org/reports/tr9/#Retaining_Explicit_Formatting_Characters>
- levels[i] = stack.last().level;
+ levels[i] = stack.last().unwrap().level;
// X9 part of retaining explicit formatting characters.
processing_classes[i] = BN;
}
@@ -153,8 +172,8 @@ pub fn compute<'a, T: TextSource<'a> + ?Sized>(
// <http://www.unicode.org/reports/tr9/#X6>
_ => {
- let last = stack.last();
levels[i] = last.level;
+
// This condition is not in the spec, but I am pretty sure that is a spec bug.
// https://www.unicode.org/L2/L2023/23014-amd-to-uax9.pdf
if original_classes[i] != BN {
@@ -172,6 +191,26 @@ pub fn compute<'a, T: TextSource<'a> + ?Sized>(
levels[i + j] = levels[i];
processing_classes[i + j] = processing_classes[i];
}
+
+ // Identify level runs to be passed to prepare::isolating_run_sequences().
+ if i == 0 {
+ // Initialize for the first (or only) run.
+ current_run_level = levels[i];
+ } else {
+ // Check if we need to start a new level run.
+ // <https://www.unicode.org/reports/tr9/#BD7>
+ if !removed_by_x9(original_classes[i]) && levels[i] != current_run_level {
+ // End the last run and start a new one.
+ runs.push(current_run_start..i);
+ current_run_level = levels[i];
+ current_run_start = i;
+ }
+ }
+ }
+
+ // Append the trailing level run, if non-empty.
+ if levels.len() > current_run_start {
+ runs.push(current_run_start..levels.len());
}
}
@@ -188,23 +227,3 @@ enum OverrideStatus {
LTR,
Isolate,
}
-
-struct DirectionalStatusStack {
- vec: Vec<Status>,
-}
-
-impl DirectionalStatusStack {
- fn new() -> Self {
- DirectionalStatusStack {
- vec: Vec::with_capacity(Level::max_explicit_depth() as usize + 2),
- }
- }
-
- fn push(&mut self, level: Level, status: OverrideStatus) {
- self.vec.push(Status { level, status });
- }
-
- fn last(&self) -> &Status {
- self.vec.last().unwrap()
- }
-}
diff --git a/third_party/rust/unicode-bidi/src/implicit.rs b/third_party/rust/unicode-bidi/src/implicit.rs
index 0311053c0a..334afec049 100644
--- a/third_party/rust/unicode-bidi/src/implicit.rs
+++ b/third_party/rust/unicode-bidi/src/implicit.rs
@@ -9,8 +9,11 @@
//! 3.3.4 - 3.3.6. Resolve implicit levels and types.
+#[cfg(not(feature = "smallvec"))]
use alloc::vec::Vec;
use core::cmp::max;
+#[cfg(feature = "smallvec")]
+use smallvec::SmallVec;
use super::char_data::BidiClass::{self, *};
use super::level::Level;
@@ -39,7 +42,13 @@ pub fn resolve_weak<'a, T: TextSource<'a> + ?Sized>(
// The previous class for the purposes of rule W1, not tracking changes from any other rules.
let mut prev_class_before_w1 = sequence.sos;
let mut last_strong_is_al = false;
+ #[cfg(feature = "smallvec")]
+ let mut et_run_indices = SmallVec::<[usize; 8]>::new(); // for W5
+ #[cfg(not(feature = "smallvec"))]
let mut et_run_indices = Vec::new(); // for W5
+ #[cfg(feature = "smallvec")]
+ let mut bn_run_indices = SmallVec::<[usize; 8]>::new(); // for W5 + <https://www.unicode.org/reports/tr9/#Retaining_Explicit_Formatting_Characters>
+ #[cfg(not(feature = "smallvec"))]
let mut bn_run_indices = Vec::new(); // for W5 + <https://www.unicode.org/reports/tr9/#Retaining_Explicit_Formatting_Characters>
for (run_index, level_run) in sequence.runs.iter().enumerate() {
@@ -177,7 +186,7 @@ pub fn resolve_weak<'a, T: TextSource<'a> + ?Sized>(
_ => {
// <https://www.unicode.org/reports/tr9/#Retaining_Explicit_Formatting_Characters>
// If there was a BN run before this, that's now a part of this ET run.
- et_run_indices.extend(&bn_run_indices);
+ et_run_indices.extend(bn_run_indices.clone());
// In case this is followed by an EN.
et_run_indices.push(i);
@@ -224,26 +233,29 @@ pub fn resolve_weak<'a, T: TextSource<'a> + ?Sized>(
// W7. If the previous strong char was L, change EN to L.
let mut last_strong_is_l = sequence.sos == L;
- for run in &sequence.runs {
- for i in run.clone() {
- match processing_classes[i] {
- EN if last_strong_is_l => {
- processing_classes[i] = L;
- }
- L => {
- last_strong_is_l = true;
- }
- R | AL => {
- last_strong_is_l = false;
- }
- // <https://www.unicode.org/reports/tr9/#Retaining_Explicit_Formatting_Characters>
- // Already scanning past BN here.
- _ => {}
+ for i in sequence.runs.iter().cloned().flatten() {
+ match processing_classes[i] {
+ EN if last_strong_is_l => {
+ processing_classes[i] = L;
}
+ L => {
+ last_strong_is_l = true;
+ }
+ R | AL => {
+ last_strong_is_l = false;
+ }
+ // <https://www.unicode.org/reports/tr9/#Retaining_Explicit_Formatting_Characters>
+ // Already scanning past BN here.
+ _ => {}
}
}
}
+#[cfg(feature = "smallvec")]
+type BracketPairVec = SmallVec<[BracketPair; 8]>;
+#[cfg(not(feature = "smallvec"))]
+type BracketPairVec = Vec<BracketPair>;
+
/// 3.3.5 Resolving Neutral Types
///
/// <http://www.unicode.org/reports/tr9/#Resolving_Neutral_Types>
@@ -267,7 +279,14 @@ pub fn resolve_neutral<'a, D: BidiDataSource, T: TextSource<'a> + ?Sized>(
// > Identify the bracket pairs in the current isolating run sequence according to BD16.
// We use processing_classes, not original_classes, due to BD14/BD15
- let bracket_pairs = identify_bracket_pairs(text, data_source, sequence, processing_classes);
+ let mut bracket_pairs = BracketPairVec::new();
+ identify_bracket_pairs(
+ text,
+ data_source,
+ sequence,
+ processing_classes,
+ &mut bracket_pairs,
+ );
// > For each bracket-pair element in the list of pairs of text positions
//
@@ -308,7 +327,7 @@ pub fn resolve_neutral<'a, D: BidiDataSource, T: TextSource<'a> + ?Sized>(
found_e = true;
} else if class == not_e {
found_not_e = true;
- } else if class == BidiClass::EN || class == BidiClass::AN {
+ } else if matches!(class, BidiClass::EN | BidiClass::AN) {
// > Within this scope, bidirectional types EN and AN are treated as R.
if e == BidiClass::L {
found_not_e = true;
@@ -337,15 +356,15 @@ pub fn resolve_neutral<'a, D: BidiDataSource, T: TextSource<'a> + ?Sized>(
.iter_backwards_from(pair.start, pair.start_run)
.map(|i| processing_classes[i])
.find(|class| {
- *class == BidiClass::L
- || *class == BidiClass::R
- || *class == BidiClass::EN
- || *class == BidiClass::AN
+ matches!(
+ class,
+ BidiClass::L | BidiClass::R | BidiClass::EN | BidiClass::AN
+ )
})
.unwrap_or(sequence.sos);
// > Within this scope, bidirectional types EN and AN are treated as R.
- if previous_strong == BidiClass::EN || previous_strong == BidiClass::AN {
+ if matches!(previous_strong, BidiClass::EN | BidiClass::AN) {
previous_strong = BidiClass::R;
}
@@ -413,6 +432,9 @@ pub fn resolve_neutral<'a, D: BidiDataSource, T: TextSource<'a> + ?Sized>(
let mut prev_class = sequence.sos;
while let Some(mut i) = indices.next() {
// Process sequences of NI characters.
+ #[cfg(feature = "smallvec")]
+ let mut ni_run = SmallVec::<[usize; 8]>::new();
+ #[cfg(not(feature = "smallvec"))]
let mut ni_run = Vec::new();
// The BN is for <https://www.unicode.org/reports/tr9/#Retaining_Explicit_Formatting_Characters>
if is_NI(processing_classes[i]) || processing_classes[i] == BN {
@@ -484,9 +506,12 @@ fn identify_bracket_pairs<'a, T: TextSource<'a> + ?Sized, D: BidiDataSource>(
data_source: &D,
run_sequence: &IsolatingRunSequence,
original_classes: &[BidiClass],
-) -> Vec<BracketPair> {
- let mut ret = vec![];
- let mut stack = vec![];
+ bracket_pairs: &mut BracketPairVec,
+) {
+ #[cfg(feature = "smallvec")]
+ let mut stack = SmallVec::<[(char, usize, usize); 8]>::new();
+ #[cfg(not(feature = "smallvec"))]
+ let mut stack = Vec::new();
for (run_index, level_run) in run_sequence.runs.iter().enumerate() {
for (i, ch) in text.subrange(level_run.clone()).char_indices() {
@@ -532,7 +557,7 @@ fn identify_bracket_pairs<'a, T: TextSource<'a> + ?Sized, D: BidiDataSource>(
start_run: element.2,
end_run: run_index,
};
- ret.push(pair);
+ bracket_pairs.push(pair);
// > Pop the stack through the current stack element inclusively.
stack.truncate(stack_index);
@@ -545,8 +570,7 @@ fn identify_bracket_pairs<'a, T: TextSource<'a> + ?Sized, D: BidiDataSource>(
}
// > Sort the list of pairs of text positions in ascending order based on
// > the text position of the opening paired bracket.
- ret.sort_by_key(|r| r.start);
- ret
+ bracket_pairs.sort_by_key(|r| r.start);
}
/// 3.3.6 Resolving Implicit Levels
@@ -555,11 +579,11 @@ fn identify_bracket_pairs<'a, T: TextSource<'a> + ?Sized, D: BidiDataSource>(
///
/// <http://www.unicode.org/reports/tr9/#Resolving_Implicit_Levels>
#[cfg_attr(feature = "flame_it", flamer::flame)]
-pub fn resolve_levels(original_classes: &[BidiClass], levels: &mut [Level]) -> Level {
+pub fn resolve_levels(processing_classes: &[BidiClass], levels: &mut [Level]) -> Level {
let mut max_level = Level::ltr();
- assert_eq!(original_classes.len(), levels.len());
+ assert_eq!(processing_classes.len(), levels.len());
for i in 0..levels.len() {
- match (levels[i].is_rtl(), original_classes[i]) {
+ match (levels[i].is_rtl(), processing_classes[i]) {
(false, AN) | (false, EN) => levels[i].raise(2).expect("Level number error"),
(false, R) | (true, L) | (true, EN) | (true, AN) => {
levels[i].raise(1).expect("Level number error")
@@ -578,8 +602,5 @@ pub fn resolve_levels(original_classes: &[BidiClass], levels: &mut [Level]) -> L
/// <http://www.unicode.org/reports/tr9/#NI>
#[allow(non_snake_case)]
fn is_NI(class: BidiClass) -> bool {
- match class {
- B | S | WS | ON | FSI | LRI | RLI | PDI => true,
- _ => false,
- }
+ matches!(class, B | S | WS | ON | FSI | LRI | RLI | PDI)
}
diff --git a/third_party/rust/unicode-bidi/src/level.rs b/third_party/rust/unicode-bidi/src/level.rs
index ef4f6d9e40..5ece0251a5 100644
--- a/third_party/rust/unicode-bidi/src/level.rs
+++ b/third_party/rust/unicode-bidi/src/level.rs
@@ -13,9 +13,10 @@
//!
//! <http://www.unicode.org/reports/tr9/#BD2>
-use alloc::string::{String, ToString};
-use alloc::vec::Vec;
-use core::convert::{From, Into};
+use alloc::{
+ string::{String, ToString},
+ vec::Vec,
+};
use core::slice;
use super::char_data::BidiClass;
@@ -219,11 +220,11 @@ pub fn has_rtl(levels: &[Level]) -> bool {
levels.iter().any(|&lvl| lvl.is_rtl())
}
-impl Into<u8> for Level {
+impl From<Level> for u8 {
/// Convert to the level number
#[inline]
- fn into(self) -> u8 {
- self.number()
+ fn from(val: Level) -> Self {
+ val.number()
}
}
@@ -244,7 +245,7 @@ impl<'a> PartialEq<&'a str> for Level {
}
/// Used for matching levels in conformance tests
-impl<'a> PartialEq<String> for Level {
+impl PartialEq<String> for Level {
#[inline]
fn eq(&self, s: &String) -> bool {
self == &s.as_str()
diff --git a/third_party/rust/unicode-bidi/src/lib.rs b/third_party/rust/unicode-bidi/src/lib.rs
index 1072b67fe0..489927588a 100644
--- a/third_party/rust/unicode-bidi/src/lib.rs
+++ b/third_party/rust/unicode-bidi/src/lib.rs
@@ -71,6 +71,8 @@
extern crate std;
#[macro_use]
extern crate alloc;
+#[cfg(feature = "smallvec")]
+extern crate smallvec;
pub mod data_source;
pub mod deprecated;
@@ -86,7 +88,7 @@ mod prepare;
pub use crate::char_data::{BidiClass, UNICODE_VERSION};
pub use crate::data_source::BidiDataSource;
pub use crate::level::{Level, LTR_LEVEL, RTL_LEVEL};
-pub use crate::prepare::LevelRun;
+pub use crate::prepare::{LevelRun, LevelRunVec};
#[cfg(feature = "hardcoded-data")]
pub use crate::char_data::{bidi_class, HardcodedBidiData};
@@ -99,6 +101,8 @@ use core::cmp;
use core::iter::repeat;
use core::ops::Range;
use core::str::CharIndices;
+#[cfg(feature = "smallvec")]
+use smallvec::SmallVec;
use crate::format_chars as chars;
use crate::BidiClass::*;
@@ -244,8 +248,14 @@ struct InitialInfoExt<'text> {
/// Parallel to base.paragraphs, records whether each paragraph is "pure LTR" that
/// requires no further bidi processing (i.e. there are no RTL characters or bidi
- /// control codes present).
- pure_ltr: Vec<bool>,
+ /// control codes present), and whether any bidi isolation controls are present.
+ flags: Vec<ParagraphInfoFlags>,
+}
+
+#[derive(PartialEq, Debug)]
+struct ParagraphInfoFlags {
+ is_pure_ltr: bool,
+ has_isolate_controls: bool,
}
impl<'text> InitialInfoExt<'text> {
@@ -265,12 +275,12 @@ impl<'text> InitialInfoExt<'text> {
default_para_level: Option<Level>,
) -> InitialInfoExt<'a> {
let mut paragraphs = Vec::<ParagraphInfo>::new();
- let mut pure_ltr = Vec::<bool>::new();
- let (original_classes, _, _) = compute_initial_info(
+ let mut flags = Vec::<ParagraphInfoFlags>::new();
+ let (original_classes, _, _, _) = compute_initial_info(
data_source,
text,
default_para_level,
- Some((&mut paragraphs, &mut pure_ltr)),
+ Some((&mut paragraphs, &mut flags)),
);
InitialInfoExt {
@@ -279,7 +289,7 @@ impl<'text> InitialInfoExt<'text> {
original_classes,
paragraphs,
},
- pure_ltr,
+ flags,
}
}
}
@@ -295,16 +305,19 @@ fn compute_initial_info<'a, D: BidiDataSource, T: TextSource<'a> + ?Sized>(
data_source: &D,
text: &'a T,
default_para_level: Option<Level>,
- mut split_paragraphs: Option<(&mut Vec<ParagraphInfo>, &mut Vec<bool>)>,
-) -> (Vec<BidiClass>, Level, bool) {
+ mut split_paragraphs: Option<(&mut Vec<ParagraphInfo>, &mut Vec<ParagraphInfoFlags>)>,
+) -> (Vec<BidiClass>, Level, bool, bool) {
let mut original_classes = Vec::with_capacity(text.len());
// The stack contains the starting code unit index for each nested isolate we're inside.
+ #[cfg(feature = "smallvec")]
+ let mut isolate_stack = SmallVec::<[usize; 8]>::new();
+ #[cfg(not(feature = "smallvec"))]
let mut isolate_stack = Vec::new();
debug_assert!(
- if let Some((ref paragraphs, ref pure_ltr)) = split_paragraphs {
- paragraphs.is_empty() && pure_ltr.is_empty()
+ if let Some((ref paragraphs, ref flags)) = split_paragraphs {
+ paragraphs.is_empty() && flags.is_empty()
} else {
true
}
@@ -316,6 +329,8 @@ fn compute_initial_info<'a, D: BidiDataSource, T: TextSource<'a> + ?Sized>(
// Per-paragraph flag: can subsequent processing be skipped? Set to false if any
// RTL characters or bidi control characters are encountered in the paragraph.
let mut is_pure_ltr = true;
+ // Set to true if any bidi isolation controls are present in the paragraph.
+ let mut has_isolate_controls = false;
#[cfg(feature = "flame_it")]
flame::start("compute_initial_info(): iter text.char_indices()");
@@ -334,7 +349,7 @@ fn compute_initial_info<'a, D: BidiDataSource, T: TextSource<'a> + ?Sized>(
match class {
B => {
- if let Some((ref mut paragraphs, ref mut pure_ltr)) = split_paragraphs {
+ if let Some((ref mut paragraphs, ref mut flags)) = split_paragraphs {
// P1. Split the text into separate paragraphs. The paragraph separator is kept
// with the previous paragraph.
let para_end = i + len;
@@ -343,7 +358,10 @@ fn compute_initial_info<'a, D: BidiDataSource, T: TextSource<'a> + ?Sized>(
// P3. If no character is found in p2, set the paragraph level to zero.
level: para_level.unwrap_or(LTR_LEVEL),
});
- pure_ltr.push(is_pure_ltr);
+ flags.push(ParagraphInfoFlags {
+ is_pure_ltr,
+ has_isolate_controls,
+ });
// Reset state for the start of the next paragraph.
para_start = para_end;
// TODO: Support defaulting to direction of previous paragraph
@@ -351,6 +369,7 @@ fn compute_initial_info<'a, D: BidiDataSource, T: TextSource<'a> + ?Sized>(
// <http://www.unicode.org/reports/tr9/#HL1>
para_level = default_para_level;
is_pure_ltr = true;
+ has_isolate_controls = false;
isolate_stack.clear();
}
}
@@ -387,6 +406,7 @@ fn compute_initial_info<'a, D: BidiDataSource, T: TextSource<'a> + ?Sized>(
RLI | LRI | FSI => {
is_pure_ltr = false;
+ has_isolate_controls = true;
isolate_stack.push(i);
}
@@ -398,15 +418,18 @@ fn compute_initial_info<'a, D: BidiDataSource, T: TextSource<'a> + ?Sized>(
}
}
- if let Some((paragraphs, pure_ltr)) = split_paragraphs {
+ if let Some((paragraphs, flags)) = split_paragraphs {
if para_start < text.len() {
paragraphs.push(ParagraphInfo {
range: para_start..text.len(),
level: para_level.unwrap_or(LTR_LEVEL),
});
- pure_ltr.push(is_pure_ltr);
+ flags.push(ParagraphInfoFlags {
+ is_pure_ltr,
+ has_isolate_controls,
+ });
}
- debug_assert_eq!(paragraphs.len(), pure_ltr.len());
+ debug_assert_eq!(paragraphs.len(), flags.len());
}
debug_assert_eq!(original_classes.len(), text.len());
@@ -417,6 +440,7 @@ fn compute_initial_info<'a, D: BidiDataSource, T: TextSource<'a> + ?Sized>(
original_classes,
para_level.unwrap_or(LTR_LEVEL),
is_pure_ltr,
+ has_isolate_controls,
)
}
@@ -475,20 +499,21 @@ impl<'text> BidiInfo<'text> {
text: &'a str,
default_para_level: Option<Level>,
) -> BidiInfo<'a> {
- let InitialInfoExt { base, pure_ltr, .. } =
+ let InitialInfoExt { base, flags, .. } =
InitialInfoExt::new_with_data_source(data_source, text, default_para_level);
let mut levels = Vec::<Level>::with_capacity(text.len());
let mut processing_classes = base.original_classes.clone();
- for (para, is_pure_ltr) in base.paragraphs.iter().zip(pure_ltr.iter()) {
+ for (para, flags) in base.paragraphs.iter().zip(flags.iter()) {
let text = &text[para.range.clone()];
let original_classes = &base.original_classes[para.range.clone()];
compute_bidi_info_for_para(
data_source,
para,
- *is_pure_ltr,
+ flags.is_pure_ltr,
+ flags.has_isolate_controls,
text,
original_classes,
&mut processing_classes,
@@ -713,7 +738,7 @@ impl<'text> ParagraphBidiInfo<'text> {
) -> ParagraphBidiInfo<'a> {
// Here we could create a ParagraphInitialInfo struct to parallel the one
// used by BidiInfo, but there doesn't seem any compelling reason for it.
- let (original_classes, paragraph_level, is_pure_ltr) =
+ let (original_classes, paragraph_level, is_pure_ltr, has_isolate_controls) =
compute_initial_info(data_source, text, default_para_level, None);
let mut levels = Vec::<Level>::with_capacity(text.len());
@@ -731,6 +756,7 @@ impl<'text> ParagraphBidiInfo<'text> {
data_source,
&para_info,
is_pure_ltr,
+ has_isolate_controls,
text,
&original_classes,
&mut processing_classes,
@@ -855,12 +881,12 @@ impl<'text> ParagraphBidiInfo<'text> {
///
/// [Rule L3]: https://www.unicode.org/reports/tr9/#L3
/// [Rule L4]: https://www.unicode.org/reports/tr9/#L4
-fn reorder_line<'text>(
- text: &'text str,
+fn reorder_line(
+ text: &str,
line: Range<usize>,
levels: Vec<Level>,
runs: Vec<LevelRun>,
-) -> Cow<'text, str> {
+) -> Cow<'_, str> {
// If all isolating run sequences are LTR, no reordering is needed
if runs.iter().all(|run| levels[run.start].is_ltr()) {
return text[line].into();
@@ -1059,6 +1085,7 @@ fn compute_bidi_info_for_para<'a, D: BidiDataSource, T: TextSource<'a> + ?Sized>
data_source: &D,
para: &ParagraphInfo,
is_pure_ltr: bool,
+ has_isolate_controls: bool,
text: &'a T,
original_classes: &[BidiClass],
processing_classes: &mut [BidiClass],
@@ -1072,6 +1099,7 @@ fn compute_bidi_info_for_para<'a, D: BidiDataSource, T: TextSource<'a> + ?Sized>
let processing_classes = &mut processing_classes[para.range.clone()];
let levels = &mut levels[para.range.clone()];
+ let mut level_runs = LevelRunVec::new();
explicit::compute(
text,
@@ -1079,9 +1107,18 @@ fn compute_bidi_info_for_para<'a, D: BidiDataSource, T: TextSource<'a> + ?Sized>
original_classes,
levels,
processing_classes,
+ &mut level_runs,
);
- let sequences = prepare::isolating_run_sequences(para.level, original_classes, levels);
+ let mut sequences = prepare::IsolatingRunSequenceVec::new();
+ prepare::isolating_run_sequences(
+ para.level,
+ original_classes,
+ levels,
+ level_runs,
+ has_isolate_controls,
+ &mut sequences,
+ );
for sequence in &sequences {
implicit::resolve_weak(text, sequence, processing_classes);
implicit::resolve_neutral(
@@ -1093,6 +1130,7 @@ fn compute_bidi_info_for_para<'a, D: BidiDataSource, T: TextSource<'a> + ?Sized>
processing_classes,
);
}
+
implicit::resolve_levels(processing_classes, levels);
assign_levels_to_removed_chars(para.level, original_classes, levels);
@@ -1122,20 +1160,20 @@ fn reorder_levels<'a, T: TextSource<'a> + ?Sized>(
B | S => {
assert_eq!(reset_to, None);
reset_to = Some(i + T::char_len(c));
- if reset_from == None {
+ if reset_from.is_none() {
reset_from = Some(i);
}
}
// Whitespace, isolate formatting
WS | FSI | LRI | RLI | PDI => {
- if reset_from == None {
+ if reset_from.is_none() {
reset_from = Some(i);
}
}
// <https://www.unicode.org/reports/tr9/#Retaining_Explicit_Formatting_Characters>
// same as above + set the level
RLE | LRE | RLO | LRO | PDF | BN => {
- if reset_from == None {
+ if reset_from.is_none() {
reset_from = Some(i);
}
// also set the level to previous
@@ -1294,8 +1332,8 @@ fn get_base_direction_impl<'a, D: BidiDataSource, T: TextSource<'a> + ?Sized>(
let mut isolate_level = 0;
for c in text.chars() {
match data_source.bidi_class(c) {
- LRI | RLI | FSI => isolate_level = isolate_level + 1,
- PDI if isolate_level > 0 => isolate_level = isolate_level - 1,
+ LRI | RLI | FSI => isolate_level += 1,
+ PDI if isolate_level > 0 => isolate_level -= 1,
L if isolate_level == 0 => return Direction::Ltr,
R | AL if isolate_level == 0 => return Direction::Rtl,
B if !use_full_text => break,
@@ -1342,7 +1380,7 @@ impl<'text> TextSource<'text> for str {
}
#[inline]
fn indices_lengths(&'text self) -> Self::IndexLenIter {
- Utf8IndexLenIter::new(&self)
+ Utf8IndexLenIter::new(self)
}
#[inline]
fn char_len(ch: char) -> usize {
@@ -1544,6 +1582,24 @@ mod tests {
let tests = vec![
(
// text
+ "",
+ // base level
+ Some(RTL_LEVEL),
+ // levels
+ Level::vec(&[]),
+ // original_classes
+ vec![],
+ // paragraphs
+ vec![],
+ // levels_u16
+ Level::vec(&[]),
+ // original_classes_u16
+ vec![],
+ // paragraphs_u16
+ vec![],
+ ),
+ (
+ // text
"abc123",
// base level
Some(LTR_LEVEL),
@@ -1703,6 +1759,19 @@ mod tests {
paragraphs: t.4.clone(),
}
);
+ // If it was empty, also test that ParagraphBidiInfo handles it safely.
+ if t.4.len() == 0 {
+ assert_eq!(
+ ParagraphBidiInfo::new(t.0, t.1),
+ ParagraphBidiInfo {
+ text: t.0,
+ original_classes: t.3.clone(),
+ levels: t.2.clone(),
+ paragraph_level: RTL_LEVEL,
+ is_pure_ltr: true,
+ }
+ )
+ }
// If it was a single paragraph, also test ParagraphBidiInfo.
if t.4.len() == 1 {
assert_eq!(
diff --git a/third_party/rust/unicode-bidi/src/prepare.rs b/third_party/rust/unicode-bidi/src/prepare.rs
index 9234e1aa61..f7b35ad689 100644
--- a/third_party/rust/unicode-bidi/src/prepare.rs
+++ b/third_party/rust/unicode-bidi/src/prepare.rs
@@ -14,6 +14,8 @@
use alloc::vec::Vec;
use core::cmp::max;
use core::ops::Range;
+#[cfg(feature = "smallvec")]
+use smallvec::{smallvec, SmallVec};
use super::level::Level;
use super::BidiClass::{self, *};
@@ -23,6 +25,11 @@ use super::BidiClass::{self, *};
/// Represented as a range of byte indices.
pub type LevelRun = Range<usize>;
+#[cfg(feature = "smallvec")]
+pub type LevelRunVec = SmallVec<[LevelRun; 8]>;
+#[cfg(not(feature = "smallvec"))]
+pub type LevelRunVec = Vec<LevelRun>;
+
/// Output of `isolating_run_sequences` (steps X9-X10)
#[derive(Debug, PartialEq)]
pub struct IsolatingRunSequence {
@@ -31,6 +38,11 @@ pub struct IsolatingRunSequence {
pub eos: BidiClass, // End-of-sequence type.
}
+#[cfg(feature = "smallvec")]
+pub type IsolatingRunSequenceVec = SmallVec<[IsolatingRunSequence; 8]>;
+#[cfg(not(feature = "smallvec"))]
+pub type IsolatingRunSequenceVec = Vec<IsolatingRunSequence>;
+
/// Compute the set of isolating run sequences.
///
/// An isolating run sequence is a maximal sequence of level runs such that for all level runs
@@ -43,8 +55,59 @@ pub fn isolating_run_sequences(
para_level: Level,
original_classes: &[BidiClass],
levels: &[Level],
-) -> Vec<IsolatingRunSequence> {
- let runs = level_runs(levels, original_classes);
+ runs: LevelRunVec,
+ has_isolate_controls: bool,
+ isolating_run_sequences: &mut IsolatingRunSequenceVec,
+) {
+ // Per http://www.unicode.org/reports/tr9/#BD13:
+ // "In the absence of isolate initiators, each isolating run sequence in a paragraph
+ // consists of exactly one level run, and each level run constitutes a separate
+ // isolating run sequence."
+ // We can take a simplified path to handle this case.
+ if !has_isolate_controls {
+ isolating_run_sequences.reserve_exact(runs.len());
+ for run in runs {
+ // Determine the `sos` and `eos` class for the sequence.
+ // <http://www.unicode.org/reports/tr9/#X10>
+
+ let run_levels = &levels[run.clone()];
+ let run_classes = &original_classes[run.clone()];
+ let seq_level = run_levels[run_classes
+ .iter()
+ .position(|c| not_removed_by_x9(c))
+ .unwrap_or(0)];
+
+ let end_level = run_levels[run_classes
+ .iter()
+ .rposition(|c| not_removed_by_x9(c))
+ .unwrap_or(run.end - run.start - 1)];
+
+ // Get the level of the last non-removed char before the run.
+ let pred_level = match original_classes[..run.start]
+ .iter()
+ .rposition(not_removed_by_x9)
+ {
+ Some(idx) => levels[idx],
+ None => para_level,
+ };
+
+ // Get the level of the next non-removed char after the run.
+ let succ_level = match original_classes[run.end..]
+ .iter()
+ .position(not_removed_by_x9)
+ {
+ Some(idx) => levels[run.end + idx],
+ None => para_level,
+ };
+
+ isolating_run_sequences.push(IsolatingRunSequence {
+ runs: vec![run],
+ sos: max(seq_level, pred_level).bidi_class(),
+ eos: max(end_level, succ_level).bidi_class(),
+ });
+ }
+ return;
+ }
// Compute the set of isolating run sequences.
// <http://www.unicode.org/reports/tr9/#BD13>
@@ -52,10 +115,13 @@ pub fn isolating_run_sequences(
// When we encounter an isolate initiator, we push the current sequence onto the
// stack so we can resume it after the matching PDI.
- let mut stack = vec![Vec::new()];
+ #[cfg(feature = "smallvec")]
+ let mut stack: SmallVec<[Vec<Range<usize>>; 8]> = smallvec![vec![]];
+ #[cfg(not(feature = "smallvec"))]
+ let mut stack = vec![vec![]];
for run in runs {
- assert!(run.len() > 0);
+ assert!(!run.is_empty());
assert!(!stack.is_empty());
let start_class = original_classes[run.start];
@@ -67,8 +133,7 @@ pub fn isolating_run_sequences(
.iter()
.copied()
.rev()
- .filter(not_removed_by_x9)
- .next()
+ .find(not_removed_by_x9)
.unwrap_or(start_class);
let mut sequence = if start_class == PDI && stack.len() > 1 {
@@ -81,7 +146,7 @@ pub fn isolating_run_sequences(
sequence.push(run);
- if let RLI | LRI | FSI = end_class {
+ if matches!(end_class, RLI | LRI | FSI) {
// Resume this sequence after the isolate.
stack.push(sequence);
} else {
@@ -89,90 +154,82 @@ pub fn isolating_run_sequences(
sequences.push(sequence);
}
}
- // Pop any remaning sequences off the stack.
+ // Pop any remaining sequences off the stack.
sequences.extend(stack.into_iter().rev().filter(|seq| !seq.is_empty()));
// Determine the `sos` and `eos` class for each sequence.
// <http://www.unicode.org/reports/tr9/#X10>
- sequences
- .into_iter()
- .map(|sequence: Vec<LevelRun>| {
- assert!(!sequence.is_empty());
+ for sequence in sequences {
+ assert!(!sequence.is_empty());
- let mut result = IsolatingRunSequence {
- runs: sequence,
- sos: L,
- eos: L,
- };
+ let start_of_seq = sequence[0].start;
+ let runs_len = sequence.len();
+ let end_of_seq = sequence[runs_len - 1].end;
- let start_of_seq = result.runs[0].start;
- let runs_len = result.runs.len();
- let end_of_seq = result.runs[runs_len - 1].end;
-
- // > (not counting characters removed by X9)
- let seq_level = result
- .iter_forwards_from(start_of_seq, 0)
- .filter(|i| not_removed_by_x9(&original_classes[*i]))
- .map(|i| levels[i])
- .next()
- .unwrap_or(levels[start_of_seq]);
-
- // XXXManishearth the spec talks of a start and end level,
- // but for a given IRS the two should be equivalent, yes?
- let end_level = result
- .iter_backwards_from(end_of_seq, runs_len - 1)
- .filter(|i| not_removed_by_x9(&original_classes[*i]))
- .map(|i| levels[i])
- .next()
- .unwrap_or(levels[end_of_seq - 1]);
-
- #[cfg(test)]
- for run in result.runs.clone() {
- for idx in run {
- if not_removed_by_x9(&original_classes[idx]) {
- assert_eq!(seq_level, levels[idx]);
- }
- }
+ let mut result = IsolatingRunSequence {
+ runs: sequence,
+ sos: L,
+ eos: L,
+ };
+
+ // > (not counting characters removed by X9)
+ let seq_level = levels[result
+ .iter_forwards_from(start_of_seq, 0)
+ .find(|i| not_removed_by_x9(&original_classes[*i]))
+ .unwrap_or(start_of_seq)];
+
+ // XXXManishearth the spec talks of a start and end level,
+ // but for a given IRS the two should be equivalent, yes?
+ let end_level = levels[result
+ .iter_backwards_from(end_of_seq, runs_len - 1)
+ .find(|i| not_removed_by_x9(&original_classes[*i]))
+ .unwrap_or(end_of_seq - 1)];
+
+ #[cfg(test)]
+ for idx in result.runs.clone().into_iter().flatten() {
+ if not_removed_by_x9(&original_classes[idx]) {
+ assert_eq!(seq_level, levels[idx]);
}
+ }
+
+ // Get the level of the last non-removed char before the runs.
+ let pred_level = match original_classes[..start_of_seq]
+ .iter()
+ .rposition(not_removed_by_x9)
+ {
+ Some(idx) => levels[idx],
+ None => para_level,
+ };
- // Get the level of the last non-removed char before the runs.
- let pred_level = match original_classes[..start_of_seq]
+ // Get the last non-removed character to check if it is an isolate initiator.
+ // The spec calls for an unmatched one, but matched isolate initiators
+ // will never be at the end of a level run (otherwise there would be more to the run).
+ // We unwrap_or(BN) because BN marks removed classes and it won't matter for the check.
+ let last_non_removed = original_classes[..end_of_seq]
+ .iter()
+ .copied()
+ .rev()
+ .find(not_removed_by_x9)
+ .unwrap_or(BN);
+
+ // Get the level of the next non-removed char after the runs.
+ let succ_level = if matches!(last_non_removed, RLI | LRI | FSI) {
+ para_level
+ } else {
+ match original_classes[end_of_seq..]
.iter()
- .rposition(not_removed_by_x9)
+ .position(not_removed_by_x9)
{
- Some(idx) => levels[idx],
+ Some(idx) => levels[end_of_seq + idx],
None => para_level,
- };
+ }
+ };
- // Get the last non-removed character to check if it is an isolate initiator.
- // The spec calls for an unmatched one, but matched isolate initiators
- // will never be at the end of a level run (otherwise there would be more to the run).
- // We unwrap_or(BN) because BN marks removed classes and it won't matter for the check.
- let last_non_removed = original_classes[..end_of_seq]
- .iter()
- .copied()
- .rev()
- .find(not_removed_by_x9)
- .unwrap_or(BN);
-
- // Get the level of the next non-removed char after the runs.
- let succ_level = if let RLI | LRI | FSI = last_non_removed {
- para_level
- } else {
- match original_classes[end_of_seq..]
- .iter()
- .position(not_removed_by_x9)
- {
- Some(idx) => levels[end_of_seq + idx],
- None => para_level,
- }
- };
+ result.sos = max(seq_level, pred_level).bidi_class();
+ result.eos = max(end_level, succ_level).bidi_class();
- result.sos = max(seq_level, pred_level).bidi_class();
- result.eos = max(end_level, succ_level).bidi_class();
- result
- })
- .collect()
+ isolating_run_sequences.push(result);
+ }
}
impl IsolatingRunSequence {
@@ -219,6 +276,9 @@ impl IsolatingRunSequence {
/// Finds the level runs in a paragraph.
///
/// <http://www.unicode.org/reports/tr9/#BD7>
+///
+/// This is only used by tests; normally level runs are identified during explicit::compute.
+#[cfg(test)]
fn level_runs(levels: &[Level], original_classes: &[BidiClass]) -> Vec<LevelRun> {
assert_eq!(levels.len(), original_classes.len());
@@ -246,10 +306,7 @@ fn level_runs(levels: &[Level], original_classes: &[BidiClass]) -> Vec<LevelRun>
///
/// <http://www.unicode.org/reports/tr9/#X9>
pub fn removed_by_x9(class: BidiClass) -> bool {
- match class {
- RLE | LRE | RLO | LRO | PDF | BN => true,
- _ => false,
- }
+ matches!(class, RLE | LRE | RLO | LRO | PDF | BN)
}
// For use as a predicate for `position` / `rposition`
@@ -281,7 +338,14 @@ mod tests {
let classes = &[L, RLE, L, PDF, RLE, L, PDF, L];
let levels = &[0, 1, 1, 1, 1, 1, 1, 0];
let para_level = Level::ltr();
- let mut sequences = isolating_run_sequences(para_level, classes, &Level::vec(levels));
+ let mut sequences = IsolatingRunSequenceVec::new();
+ isolating_run_sequences(
+ para_level,
+ classes,
+ &Level::vec(levels),
+ level_runs(&Level::vec(levels), classes).into(),
+ false,
+ &mut sequences);
sequences.sort_by(|a, b| a.runs[0].clone().cmp(b.runs[0].clone()));
assert_eq!(
sequences.iter().map(|s| s.runs.clone()).collect::<Vec<_>>(),
@@ -294,7 +358,14 @@ mod tests {
let classes = &[L, RLI, L, PDI, RLI, L, PDI, L];
let levels = &[0, 0, 1, 0, 0, 1, 0, 0];
let para_level = Level::ltr();
- let mut sequences = isolating_run_sequences(para_level, classes, &Level::vec(levels));
+ let mut sequences = IsolatingRunSequenceVec::new();
+ isolating_run_sequences(
+ para_level,
+ classes,
+ &Level::vec(levels),
+ level_runs(&Level::vec(levels), classes).into(),
+ true,
+ &mut sequences);
sequences.sort_by(|a, b| a.runs[0].clone().cmp(b.runs[0].clone()));
assert_eq!(
sequences.iter().map(|s| s.runs.clone()).collect::<Vec<_>>(),
@@ -307,7 +378,14 @@ mod tests {
let classes = &[L, RLI, L, LRI, L, RLE, L, PDF, L, PDI, L, PDI, L];
let levels = &[0, 0, 1, 1, 2, 3, 3, 3, 2, 1, 1, 0, 0];
let para_level = Level::ltr();
- let mut sequences = isolating_run_sequences(para_level, classes, &Level::vec(levels));
+ let mut sequences = IsolatingRunSequenceVec::new();
+ isolating_run_sequences(
+ para_level,
+ classes,
+ &Level::vec(levels),
+ level_runs(&Level::vec(levels), classes).into(),
+ true,
+ &mut sequences);
sequences.sort_by(|a, b| a.runs[0].clone().cmp(b.runs[0].clone()));
assert_eq!(
sequences.iter().map(|s| s.runs.clone()).collect::<Vec<_>>(),
@@ -326,7 +404,14 @@ mod tests {
let classes = &[L, RLE, L, LRE, L, PDF, L, PDF, RLE, L, PDF, L];
let levels = &[0, 1, 1, 2, 2, 2, 1, 1, 1, 1, 1, 0];
let para_level = Level::ltr();
- let mut sequences = isolating_run_sequences(para_level, classes, &Level::vec(levels));
+ let mut sequences = IsolatingRunSequenceVec::new();
+ isolating_run_sequences(
+ para_level,
+ classes,
+ &Level::vec(levels),
+ level_runs(&Level::vec(levels), classes).into(),
+ false,
+ &mut sequences);
sequences.sort_by(|a, b| a.runs[0].clone().cmp(b.runs[0].clone()));
// text1
@@ -385,7 +470,14 @@ mod tests {
let classes = &[L, RLI, L, LRI, L, PDI, L, PDI, RLI, L, PDI, L];
let levels = &[0, 0, 1, 1, 2, 1, 1, 0, 0, 1, 0, 0];
let para_level = Level::ltr();
- let mut sequences = isolating_run_sequences(para_level, classes, &Level::vec(levels));
+ let mut sequences = IsolatingRunSequenceVec::new();
+ isolating_run_sequences(
+ para_level,
+ classes,
+ &Level::vec(levels),
+ level_runs(&Level::vec(levels), classes).into(),
+ true,
+ &mut sequences);
sequences.sort_by(|a, b| a.runs[0].clone().cmp(b.runs[0].clone()));
// text1·RLI·PDI·RLI·PDI·text6
diff --git a/third_party/rust/unicode-bidi/src/utf16.rs b/third_party/rust/unicode-bidi/src/utf16.rs
index dcd9baf2be..11b386f91e 100644
--- a/third_party/rust/unicode-bidi/src/utf16.rs
+++ b/third_party/rust/unicode-bidi/src/utf16.rs
@@ -18,7 +18,9 @@ use crate::{
compute_bidi_info_for_para, compute_initial_info, level, para_direction, reorder_levels,
reorder_visual, visual_runs_for_line,
};
-use crate::{BidiClass, BidiDataSource, Direction, Level, LevelRun, ParagraphInfo};
+use crate::{
+ BidiClass, BidiDataSource, Direction, Level, LevelRun, ParagraphInfo, ParagraphInfoFlags,
+};
#[cfg(feature = "hardcoded-data")]
use crate::HardcodedBidiData;
@@ -83,7 +85,7 @@ struct InitialInfoExt<'text> {
/// Parallel to base.paragraphs, records whether each paragraph is "pure LTR" that
/// requires no further bidi processing (i.e. there are no RTL characters or bidi
/// control codes present).
- pure_ltr: Vec<bool>,
+ flags: Vec<ParagraphInfoFlags>,
}
impl<'text> InitialInfoExt<'text> {
@@ -103,12 +105,12 @@ impl<'text> InitialInfoExt<'text> {
default_para_level: Option<Level>,
) -> InitialInfoExt<'a> {
let mut paragraphs = Vec::<ParagraphInfo>::new();
- let mut pure_ltr = Vec::<bool>::new();
- let (original_classes, _, _) = compute_initial_info(
+ let mut flags = Vec::<ParagraphInfoFlags>::new();
+ let (original_classes, _, _, _) = compute_initial_info(
data_source,
text,
default_para_level,
- Some((&mut paragraphs, &mut pure_ltr)),
+ Some((&mut paragraphs, &mut flags)),
);
InitialInfoExt {
@@ -117,7 +119,7 @@ impl<'text> InitialInfoExt<'text> {
original_classes,
paragraphs,
},
- pure_ltr,
+ flags,
}
}
}
@@ -177,20 +179,21 @@ impl<'text> BidiInfo<'text> {
text: &'a [u16],
default_para_level: Option<Level>,
) -> BidiInfo<'a> {
- let InitialInfoExt { base, pure_ltr, .. } =
+ let InitialInfoExt { base, flags, .. } =
InitialInfoExt::new_with_data_source(data_source, text, default_para_level);
let mut levels = Vec::<Level>::with_capacity(text.len());
let mut processing_classes = base.original_classes.clone();
- for (para, is_pure_ltr) in base.paragraphs.iter().zip(pure_ltr.iter()) {
+ for (para, flags) in base.paragraphs.iter().zip(flags.iter()) {
let text = &text[para.range.clone()];
let original_classes = &base.original_classes[para.range.clone()];
compute_bidi_info_for_para(
data_source,
para,
- *is_pure_ltr,
+ flags.is_pure_ltr,
+ flags.has_isolate_controls,
text,
original_classes,
&mut processing_classes,
@@ -411,7 +414,7 @@ impl<'text> ParagraphBidiInfo<'text> {
) -> ParagraphBidiInfo<'a> {
// Here we could create a ParagraphInitialInfo struct to parallel the one
// used by BidiInfo, but there doesn't seem any compelling reason for it.
- let (original_classes, paragraph_level, is_pure_ltr) =
+ let (original_classes, paragraph_level, is_pure_ltr, has_isolate_controls) =
compute_initial_info(data_source, text, default_para_level, None);
let mut levels = Vec::<Level>::with_capacity(text.len());
@@ -429,6 +432,7 @@ impl<'text> ParagraphBidiInfo<'text> {
data_source,
&para_info,
is_pure_ltr,
+ has_isolate_controls,
text,
&original_classes,
&mut processing_classes,
@@ -551,12 +555,12 @@ impl<'text> ParagraphBidiInfo<'text> {
///
/// [Rule L3]: https://www.unicode.org/reports/tr9/#L3
/// [Rule L4]: https://www.unicode.org/reports/tr9/#L4
-fn reorder_line<'text>(
- text: &'text [u16],
+fn reorder_line(
+ text: &[u16],
line: Range<usize>,
levels: Vec<Level>,
runs: Vec<LevelRun>,
-) -> Cow<'text, [u16]> {
+) -> Cow<'_, [u16]> {
// If all isolating run sequences are LTR, no reordering is needed
if runs.iter().all(|run| levels[run.start].is_ltr()) {
return text[line].into();
@@ -668,15 +672,15 @@ impl<'text> TextSource<'text> for [u16] {
}
#[inline]
fn chars(&'text self) -> Self::CharIter {
- Utf16CharIter::new(&self)
+ Utf16CharIter::new(self)
}
#[inline]
fn char_indices(&'text self) -> Self::CharIndexIter {
- Utf16CharIndexIter::new(&self)
+ Utf16CharIndexIter::new(self)
}
#[inline]
fn indices_lengths(&'text self) -> Self::IndexLenIter {
- Utf16IndexLenIter::new(&self)
+ Utf16IndexLenIter::new(self)
}
#[inline]
fn char_len(ch: char) -> usize {
diff --git a/third_party/rust/wgpu-core/.cargo-checksum.json b/third_party/rust/wgpu-core/.cargo-checksum.json
index bf0a48e81a..d22e0914d7 100644
--- a/third_party/rust/wgpu-core/.cargo-checksum.json
+++ b/third_party/rust/wgpu-core/.cargo-checksum.json
@@ -1 +1 @@
-{"files":{"Cargo.toml":"92c0bcfb5bf68fb55acb6e7b826ec07c1cfdd6d53b057c16a5c698e044ea228e","LICENSE.APACHE":"a6cba85bc92e0cff7a450b1d873c0eaa2e9fc96bf472df0247a26bec77bf3ff9","LICENSE.MIT":"c7fea58d1cfe49634cd92e54fc10a9d871f4b275321a4cd8c09e449122caaeb4","build.rs":"a99478d7f63fb41429e3834f4d0e5cd333f94ba1834c68295f929170e16987de","src/any_surface.rs":"1c032bc1894a222a47f0116b976f1543c1140c0534678502ee1172d4f77fc515","src/binding_model.rs":"2bd4e4a36742ccf0cab0afa039411a791e2a6e9ea3909d0b85cc9a84cc151c6b","src/command/bind.rs":"a37f042484b65d9fdea4cdab3667381623ee9a8943a6d32683d410b92736d306","src/command/bundle.rs":"91513a3be0adf46a9f3454b6a3d00ff6686729eb91fe9dd6d732cbfa1ff6d1d8","src/command/clear.rs":"b20e93c4b8cb47062b38e472f78d28d9ec00fd1169b17a87094be7f9d1c995e1","src/command/compute.rs":"eb60f0e2842dd20b366905225af24f4ca2a1b0c67914b86009c5b870b26f747f","src/command/draw.rs":"e8a664fc248e273e8c0e4aaeb645010b3f4ec61d29d858137f31f6f653c86542","src/command/memory_init.rs":"6ec93b9e2eb21edaa534e60770b4ba95735e9de61e74d827bc492df8e3639449","src/command/mod.rs":"d6a66a5796bd824be72af2c8d3ece59a507090c61cb50e9856eb4c70a28945e2","src/command/query.rs":"dffc843746b10ba9a50b8a2b92a59b407b56a845619a96d72a5883588fcb50f0","src/command/render.rs":"c3783b4f19b4eafb33f94554aea69408d42e40b5e98da22aa804a0931430ea6f","src/command/transfer.rs":"bf1077d1a99a258bad46087ae7234703627e7f4d30b38e6142d016c02deaad3a","src/conv.rs":"7e3ffe33b47a6fd3617aabf9f11cc68f1ccbee2c7343b8dbbcd0e8f3447e1ad8","src/device/any_device.rs":"65f47b58939b60f88f47861e65d5d45209492df8e73e7c1b60b3b459f510c09e","src/device/bgl.rs":"ec8bdd6e9b4cd50c25bed317275863d0c16bb6619f62ed85bf0464948010dfc1","src/device/global.rs":"7d70a45bd39e251c6945fc475883c4e69632f92a7abe263adab6e47a248de5a4","src/device/life.rs":"cd12343d5a14d82b18b787991811b36f420719776336f8a65b45c32fd47a77d4","src/device/mod.rs":"fff41f92e1a9f6660e18dc30452d9911ca827701bb8303af2ae06f1c1e1a795f","src/device/queue.rs":"2ffc477d1bebb35a1fc8e46f4ca2c5ef50a4eb6034968f076062461b2e678699","src/device/resource.rs":"4f22cf27da8d829b624877d7d3bb10971a0e8fb7c4f95d85d5011049a010684a","src/device/trace.rs":"9deb1b083165e07253b4928ac2f564aba06f9089c3aca1c0a1d438d87d981542","src/error.rs":"e3b6b7a69877437f4e46af7f0e8ca1db1822beae7c8448db41c2bae0f64b2bb4","src/global.rs":"0966475959706650fd036a18d51441a8e14c3ef10107db617f597614ca47e50a","src/hal_api.rs":"1cd9c3fe1c9d8c3a24e3e7f963a2ef26e056a2b26d529b840dbc969090aaf201","src/hash_utils.rs":"e8d484027c7ce81978e4679a5e20af9416ab7d2fa595f1ca95992b29d625b0ca","src/hub.rs":"352a1b75d4535f24b06d16134421db98f910e6e719f50f863a204df6768e3369","src/id.rs":"c736c0b3d35cf620e2c01322d57c4938b42828b39948ecad82d39fc39c1093c1","src/identity.rs":"c6a719389d71bb11c9ceaeadb0496f8b4c6ad24e35597e12b40980ad7ad72f10","src/init_tracker/buffer.rs":"61eb9cfaa312135b7a937ff6a3117f531b5b7323fae6553a41d6de9bc106d7e0","src/init_tracker/mod.rs":"a0f64730cc025113b656b4690f9dcb0ec18b8770bc7ef24c7b4ad8bebae03d24","src/init_tracker/texture.rs":"030fd594bf9948fad391390d85c5e1fec7eaf67b6e812c60f2dd59bc4fda8fd5","src/instance.rs":"c9b5b53a0aeac8e117d49a3a007fab001cd5737e29dd75388cdbfc24f3d8df08","src/lib.rs":"49174591f8116c3b8fadb185f89ce69ae931ee6e9f639d2558848db82ea1651f","src/pipeline.rs":"300f58afc16c454ce52aabff6debd7a7db85ed627b111a8801bcb201827f110c","src/pool.rs":"778ea1c23fcfaaa5001606e686f712f606826039d60dd5a3cd26e7de91ac057a","src/present.rs":"86b1e8bd7314f77f083be6d89a2f734e92f2ed11c86eb4c912c754fcdaa2e597","src/registry.rs":"dbc9310a24a843cf6b94a4bab78b0bb5f325e18c1f3c19c94d4f12b4f29e8598","src/resource.rs":"cd568c9d1abd4bf740cb86efae7862b5478518f3b1cdaf792ae05b3c0920c8e0","src/snatch.rs":"29a1135ee09c06883eac4df6f45b7220c2ba8f89f34232ea1d270d6e7b05c7a8","src/storage.rs":"f0c41461b8f9cdc862dbd3de04c8e720ee416c7c57310696f6f4fd22183fcc85","src/track/buffer.rs":"65c27dfabe7a1c3e4ddbde7189e53b2e95f3f3663aa82b121801a2fd0dcbd304","src/track/metadata.rs":"ac82a9c69b0a141b5c3ca69b203c5aa2a17578b598cab3ae156b917cef734b97","src/track/mod.rs":"8f03955447544f3ebcb48547440a48d321ad1ff0e0c601a62623b5457763b8de","src/track/range.rs":"2a15794e79b0470d5ba6b3267173a42f34312878e1cb288f198d2854a7888e53","src/track/stateless.rs":"2da10160c46d07ad15986ba6f1356b7933806fc5c3fa5a9d8deea44d9a3c93a7","src/track/texture.rs":"15892e639f2ecbb13c8d34c29e3fd6ad719cb71e2d40c64910b552b8985ddab0","src/validation.rs":"613c58c3601f36d6aa5986cea01f30497c6bd4ceb990824904d101b2327941a9"},"package":null} \ No newline at end of file
+{"files":{"Cargo.toml":"4880d66b004519ca6e424fc9e2e6ac065536d36334a2e327b90422e97f2a2a35","LICENSE.APACHE":"a6cba85bc92e0cff7a450b1d873c0eaa2e9fc96bf472df0247a26bec77bf3ff9","LICENSE.MIT":"c7fea58d1cfe49634cd92e54fc10a9d871f4b275321a4cd8c09e449122caaeb4","build.rs":"a99478d7f63fb41429e3834f4d0e5cd333f94ba1834c68295f929170e16987de","src/any_surface.rs":"1c032bc1894a222a47f0116b976f1543c1140c0534678502ee1172d4f77fc515","src/binding_model.rs":"bb4aefad17957e770a5f70f00bf5853dc13da1d9f836493c9aa9adbbe7bb8147","src/command/bind.rs":"a37f042484b65d9fdea4cdab3667381623ee9a8943a6d32683d410b92736d306","src/command/bundle.rs":"fea00382acdf204bcb58522953335dd8f0092565693fa65d0c008e2698e39445","src/command/clear.rs":"03cfc0d4c689d56010391440ab279e615ef1d3235eb1f9f9df0323682d275109","src/command/compute.rs":"2b6beed328ed351ad6fe7088cfa1824c1bf4be50deaeab971cdcb09914d791de","src/command/draw.rs":"15f9ad857504d8098279f9c789317feba321c9b6b8f0de20b8ba98f358c99d89","src/command/memory_init.rs":"6ec93b9e2eb21edaa534e60770b4ba95735e9de61e74d827bc492df8e3639449","src/command/mod.rs":"1d347e1746194f7a07d1f75bd3a9d3cbe121fbaa479c25ba6b8c16e9d699e06b","src/command/query.rs":"43b78a163eb0eb5f1427b7a57b6d39a2748c25f880ba024c91e2f71e2a6a817d","src/command/render.rs":"808dc8106811b32877637851e63baeba7c7438748dec67cbb17ea93c58dc61bd","src/command/transfer.rs":"bf1077d1a99a258bad46087ae7234703627e7f4d30b38e6142d016c02deaad3a","src/conv.rs":"7e3ffe33b47a6fd3617aabf9f11cc68f1ccbee2c7343b8dbbcd0e8f3447e1ad8","src/device/any_device.rs":"65f47b58939b60f88f47861e65d5d45209492df8e73e7c1b60b3b459f510c09e","src/device/bgl.rs":"ec8bdd6e9b4cd50c25bed317275863d0c16bb6619f62ed85bf0464948010dfc1","src/device/global.rs":"ff90a9e3b261bedbec37ab1aed0bf23f1e50c5418da72184e2b175057ed18fce","src/device/life.rs":"3cacaaa74df04bb1285a36d70395b35cfa17059f8d6289b41e665ecbc64cb66a","src/device/mod.rs":"fff41f92e1a9f6660e18dc30452d9911ca827701bb8303af2ae06f1c1e1a795f","src/device/queue.rs":"da0aeebfd1d1c6e155dc89cebf75dfdb6ec18062f9512044ed7e0fef0bda2f74","src/device/resource.rs":"74d3180c12602133bee46925d3788ac510d2ad5ea141a2b46f6904f38549053b","src/device/trace.rs":"9deb1b083165e07253b4928ac2f564aba06f9089c3aca1c0a1d438d87d981542","src/error.rs":"e3b6b7a69877437f4e46af7f0e8ca1db1822beae7c8448db41c2bae0f64b2bb4","src/global.rs":"0966475959706650fd036a18d51441a8e14c3ef10107db617f597614ca47e50a","src/hal_api.rs":"1cd9c3fe1c9d8c3a24e3e7f963a2ef26e056a2b26d529b840dbc969090aaf201","src/hash_utils.rs":"e8d484027c7ce81978e4679a5e20af9416ab7d2fa595f1ca95992b29d625b0ca","src/hub.rs":"352a1b75d4535f24b06d16134421db98f910e6e719f50f863a204df6768e3369","src/id.rs":"9f67dbef5d7a416eb740281ecf8a94673f624da16f21ec33c425c11d9ed01e90","src/identity.rs":"12b820eb4b8bd7b226e15eec97d0f100a695f6b9be7acd79ad2421f2d0fe1985","src/init_tracker/buffer.rs":"61eb9cfaa312135b7a937ff6a3117f531b5b7323fae6553a41d6de9bc106d7e0","src/init_tracker/mod.rs":"a0f64730cc025113b656b4690f9dcb0ec18b8770bc7ef24c7b4ad8bebae03d24","src/init_tracker/texture.rs":"030fd594bf9948fad391390d85c5e1fec7eaf67b6e812c60f2dd59bc4fda8fd5","src/instance.rs":"b6de2a371ef3b43d3217102fe87e423dd1eb12da86b65f54b902d9eaa38b6b9f","src/lib.rs":"4ad9979442cf88557fb3b9f8d3b26c7b929a710c60cabcd1f51788917c95aecb","src/pipeline.rs":"89d88de4b8b8e1dd2bc834d101a1bdf34816ebcaa616dc795f604e9183a21cd0","src/pool.rs":"778ea1c23fcfaaa5001606e686f712f606826039d60dd5a3cd26e7de91ac057a","src/present.rs":"f69580ee0baf181162f9dd82b159596c738558d8abb60db93047effbe1436b2f","src/registry.rs":"913e651dc585ff12fe7659443c38d635a2904881e56cb7159c5ca72d45ae5800","src/resource.rs":"59731bc9a207d87b07b6db9c897e20d64be27c144bb8eb8ab2505807163acfc4","src/snatch.rs":"29a1135ee09c06883eac4df6f45b7220c2ba8f89f34232ea1d270d6e7b05c7a8","src/storage.rs":"f0c41461b8f9cdc862dbd3de04c8e720ee416c7c57310696f6f4fd22183fcc85","src/track/buffer.rs":"83a0cbb8026dbd651d32ea5a47f332f691afed1c5e6f14e78a4fe8aa25e2ad12","src/track/metadata.rs":"655985fdfdd1c7fe8220af98abadf33de7e8920b485e3dd27c28688c3dd2e47d","src/track/mod.rs":"52470a48de6b5dce55385e23ba7a3cbf512cc10cdf431a35aa42190e2fc4306d","src/track/range.rs":"2a15794e79b0470d5ba6b3267173a42f34312878e1cb288f198d2854a7888e53","src/track/stateless.rs":"305e0a493fb1cd0a325274c0757e99c19f9d14deaa8ca11ada41c1399a4ae5c4","src/track/texture.rs":"ba3e3814b341b5242548b55d77bef1d1d9e7d52d63784be98c51e342da7fefff","src/validation.rs":"026168ac4f23bc6a58a90c78fd3eb73485b3c1aad630ef43755604d1babade79"},"package":null} \ No newline at end of file
diff --git a/third_party/rust/wgpu-core/Cargo.toml b/third_party/rust/wgpu-core/Cargo.toml
index f9692cf607..3d3b4dc80c 100644
--- a/third_party/rust/wgpu-core/Cargo.toml
+++ b/third_party/rust/wgpu-core/Cargo.toml
@@ -41,6 +41,7 @@ arrayvec = "0.7"
bit-vec = "0.6"
bitflags = "2"
codespan-reporting = "0.11"
+document-features = "0.2.8"
indexmap = "2"
log = "0.4"
once_cell = "1"
diff --git a/third_party/rust/wgpu-core/src/binding_model.rs b/third_party/rust/wgpu-core/src/binding_model.rs
index d7b54ad5a5..8689af2ac1 100644
--- a/third_party/rust/wgpu-core/src/binding_model.rs
+++ b/third_party/rust/wgpu-core/src/binding_model.rs
@@ -38,6 +38,8 @@ pub enum BindGroupLayoutEntryError {
ArrayUnsupported,
#[error("Multisampled binding with sample type `TextureSampleType::Float` must have filterable set to false.")]
SampleTypeFloatFilterableBindingMultisampled,
+ #[error("Multisampled texture binding view dimension must be 2d, got {0:?}")]
+ Non2DMultisampled(wgt::TextureViewDimension),
#[error(transparent)]
MissingFeatures(#[from] MissingFeatures),
#[error(transparent)]
@@ -219,7 +221,7 @@ pub enum BindingZone {
}
#[derive(Clone, Debug, Error)]
-#[error("Too many bindings of type {kind:?} in {zone}, limit is {limit}, count was {count}")]
+#[error("Too many bindings of type {kind:?} in {zone}, limit is {limit}, count was {count}. Check the limit `{}` passed to `Adapter::request_device`", .kind.to_config_str())]
pub struct BindingTypeMaxCountError {
pub kind: BindingTypeMaxCountErrorKind,
pub zone: BindingZone,
@@ -238,6 +240,28 @@ pub enum BindingTypeMaxCountErrorKind {
UniformBuffers,
}
+impl BindingTypeMaxCountErrorKind {
+ fn to_config_str(&self) -> &'static str {
+ match self {
+ BindingTypeMaxCountErrorKind::DynamicUniformBuffers => {
+ "max_dynamic_uniform_buffers_per_pipeline_layout"
+ }
+ BindingTypeMaxCountErrorKind::DynamicStorageBuffers => {
+ "max_dynamic_storage_buffers_per_pipeline_layout"
+ }
+ BindingTypeMaxCountErrorKind::SampledTextures => {
+ "max_sampled_textures_per_shader_stage"
+ }
+ BindingTypeMaxCountErrorKind::Samplers => "max_samplers_per_shader_stage",
+ BindingTypeMaxCountErrorKind::StorageBuffers => "max_storage_buffers_per_shader_stage",
+ BindingTypeMaxCountErrorKind::StorageTextures => {
+ "max_storage_textures_per_shader_stage"
+ }
+ BindingTypeMaxCountErrorKind::UniformBuffers => "max_uniform_buffers_per_shader_stage",
+ }
+ }
+}
+
#[derive(Debug, Default)]
pub(crate) struct PerStageBindingTypeCounter {
vertex: u32,
diff --git a/third_party/rust/wgpu-core/src/command/bundle.rs b/third_party/rust/wgpu-core/src/command/bundle.rs
index 9d80c62f85..ab2d18bc59 100644
--- a/third_party/rust/wgpu-core/src/command/bundle.rs
+++ b/third_party/rust/wgpu-core/src/command/bundle.rs
@@ -97,7 +97,7 @@ use crate::{
id,
init_tracker::{BufferInitTrackerAction, MemoryInitKind, TextureInitTrackerAction},
pipeline::{PipelineFlags, RenderPipeline, VertexStep},
- resource::{Resource, ResourceInfo, ResourceType},
+ resource::{Buffer, Resource, ResourceInfo, ResourceType},
resource_log,
track::RenderBundleScope,
validation::check_buffer_usage,
@@ -110,9 +110,11 @@ use thiserror::Error;
use hal::CommandEncoder as _;
+use super::ArcRenderCommand;
+
/// https://gpuweb.github.io/gpuweb/#dom-gpurendercommandsmixin-draw
-fn validate_draw(
- vertex: &[Option<VertexState>],
+fn validate_draw<A: HalApi>(
+ vertex: &[Option<VertexState<A>>],
step: &[VertexStep],
first_vertex: u32,
vertex_count: u32,
@@ -152,10 +154,10 @@ fn validate_draw(
}
// See https://gpuweb.github.io/gpuweb/#dom-gpurendercommandsmixin-drawindexed
-fn validate_indexed_draw(
- vertex: &[Option<VertexState>],
+fn validate_indexed_draw<A: HalApi>(
+ vertex: &[Option<VertexState<A>>],
step: &[VertexStep],
- index_state: &IndexState,
+ index_state: &IndexState<A>,
first_index: u32,
index_count: u32,
first_instance: u32,
@@ -260,6 +262,9 @@ impl RenderBundleEncoder {
None => (true, true),
};
+ // TODO: should be device.limits.max_color_attachments
+ let max_color_attachments = hal::MAX_COLOR_ATTACHMENTS;
+
//TODO: validate that attachment formats are renderable,
// have expected aspects, support multisampling.
Ok(Self {
@@ -267,11 +272,11 @@ impl RenderBundleEncoder {
parent_id,
context: RenderPassContext {
attachments: AttachmentData {
- colors: if desc.color_formats.len() > hal::MAX_COLOR_ATTACHMENTS {
+ colors: if desc.color_formats.len() > max_color_attachments {
return Err(CreateRenderBundleError::ColorAttachment(
ColorAttachmentError::TooMany {
given: desc.color_formats.len(),
- limit: hal::MAX_COLOR_ATTACHMENTS,
+ limit: max_color_attachments,
},
));
} else {
@@ -345,24 +350,44 @@ impl RenderBundleEncoder {
) -> Result<RenderBundle<A>, RenderBundleError> {
let bind_group_guard = hub.bind_groups.read();
let pipeline_guard = hub.render_pipelines.read();
- let query_set_guard = hub.query_sets.read();
let buffer_guard = hub.buffers.read();
- let texture_guard = hub.textures.read();
let mut state = State {
- trackers: RenderBundleScope::new(
- &*buffer_guard,
- &*texture_guard,
- &*bind_group_guard,
- &*pipeline_guard,
- &*query_set_guard,
- ),
+ trackers: RenderBundleScope::new(),
pipeline: None,
bind: (0..hal::MAX_BIND_GROUPS).map(|_| None).collect(),
vertex: (0..hal::MAX_VERTEX_BUFFERS).map(|_| None).collect(),
index: None,
flat_dynamic_offsets: Vec::new(),
};
+
+ let indices = &device.tracker_indices;
+ state
+ .trackers
+ .buffers
+ .write()
+ .set_size(indices.buffers.size());
+ state
+ .trackers
+ .textures
+ .write()
+ .set_size(indices.textures.size());
+ state
+ .trackers
+ .bind_groups
+ .write()
+ .set_size(indices.bind_groups.size());
+ state
+ .trackers
+ .render_pipelines
+ .write()
+ .set_size(indices.render_pipelines.size());
+ state
+ .trackers
+ .query_sets
+ .write()
+ .set_size(indices.query_sets.size());
+
let mut commands = Vec::new();
let mut buffer_memory_init_actions = Vec::new();
let mut texture_memory_init_actions = Vec::new();
@@ -399,7 +424,6 @@ impl RenderBundleEncoder {
}
// Identify the next `num_dynamic_offsets` entries from `base.dynamic_offsets`.
- let num_dynamic_offsets = num_dynamic_offsets;
let offsets_range =
next_dynamic_offset..next_dynamic_offset + num_dynamic_offsets;
next_dynamic_offset = offsets_range.end;
@@ -471,7 +495,7 @@ impl RenderBundleEncoder {
let pipeline_state = PipelineState::new(pipeline);
- commands.push(command);
+ commands.push(ArcRenderCommand::SetPipeline(pipeline.clone()));
// If this pipeline uses push constants, zero out their values.
if let Some(iter) = pipeline_state.zero_push_constants() {
@@ -496,7 +520,7 @@ impl RenderBundleEncoder {
.map_pass_err(scope)?;
self.check_valid_to_use(buffer.device.info.id())
.map_pass_err(scope)?;
- check_buffer_usage(buffer.usage, wgt::BufferUsages::INDEX)
+ check_buffer_usage(buffer_id, buffer.usage, wgt::BufferUsages::INDEX)
.map_pass_err(scope)?;
let end = match size {
@@ -508,7 +532,7 @@ impl RenderBundleEncoder {
offset..end,
MemoryInitKind::NeedsInitializedMemory,
));
- state.set_index_buffer(buffer_id, index_format, offset..end);
+ state.set_index_buffer(buffer.clone(), index_format, offset..end);
}
RenderCommand::SetVertexBuffer {
slot,
@@ -535,7 +559,7 @@ impl RenderBundleEncoder {
.map_pass_err(scope)?;
self.check_valid_to_use(buffer.device.info.id())
.map_pass_err(scope)?;
- check_buffer_usage(buffer.usage, wgt::BufferUsages::VERTEX)
+ check_buffer_usage(buffer_id, buffer.usage, wgt::BufferUsages::VERTEX)
.map_pass_err(scope)?;
let end = match size {
@@ -547,13 +571,13 @@ impl RenderBundleEncoder {
offset..end,
MemoryInitKind::NeedsInitializedMemory,
));
- state.vertex[slot as usize] = Some(VertexState::new(buffer_id, offset..end));
+ state.vertex[slot as usize] = Some(VertexState::new(buffer.clone(), offset..end));
}
RenderCommand::SetPushConstant {
stages,
offset,
size_bytes,
- values_offset: _,
+ values_offset,
} => {
let scope = PassErrorScope::SetPushConstant;
let end_offset = offset + size_bytes;
@@ -564,7 +588,7 @@ impl RenderBundleEncoder {
.validate_push_constant_ranges(stages, offset, end_offset)
.map_pass_err(scope)?;
- commands.push(command);
+ commands.push(ArcRenderCommand::SetPushConstant { stages, offset, size_bytes, values_offset });
}
RenderCommand::Draw {
vertex_count,
@@ -592,14 +616,19 @@ impl RenderBundleEncoder {
if instance_count > 0 && vertex_count > 0 {
commands.extend(state.flush_vertices());
commands.extend(state.flush_binds(used_bind_groups, base.dynamic_offsets));
- commands.push(command);
+ commands.push(ArcRenderCommand::Draw {
+ vertex_count,
+ instance_count,
+ first_vertex,
+ first_instance,
+ });
}
}
RenderCommand::DrawIndexed {
index_count,
instance_count,
first_index,
- base_vertex: _,
+ base_vertex,
first_instance,
} => {
let scope = PassErrorScope::Draw {
@@ -628,7 +657,7 @@ impl RenderBundleEncoder {
commands.extend(state.flush_index());
commands.extend(state.flush_vertices());
commands.extend(state.flush_binds(used_bind_groups, base.dynamic_offsets));
- commands.push(command);
+ commands.push(ArcRenderCommand::DrawIndexed { index_count, instance_count, first_index, base_vertex, first_instance });
}
}
RenderCommand::MultiDrawIndirect {
@@ -657,7 +686,7 @@ impl RenderBundleEncoder {
.map_pass_err(scope)?;
self.check_valid_to_use(buffer.device.info.id())
.map_pass_err(scope)?;
- check_buffer_usage(buffer.usage, wgt::BufferUsages::INDIRECT)
+ check_buffer_usage(buffer_id, buffer.usage, wgt::BufferUsages::INDIRECT)
.map_pass_err(scope)?;
buffer_memory_init_actions.extend(buffer.initialization_status.read().create_action(
@@ -668,7 +697,7 @@ impl RenderBundleEncoder {
commands.extend(state.flush_vertices());
commands.extend(state.flush_binds(used_bind_groups, base.dynamic_offsets));
- commands.push(command);
+ commands.push(ArcRenderCommand::MultiDrawIndirect { buffer: buffer.clone(), offset, count: None, indexed: false });
}
RenderCommand::MultiDrawIndirect {
buffer_id,
@@ -696,7 +725,7 @@ impl RenderBundleEncoder {
.map_pass_err(scope)?;
self.check_valid_to_use(buffer.device.info.id())
.map_pass_err(scope)?;
- check_buffer_usage(buffer.usage, wgt::BufferUsages::INDIRECT)
+ check_buffer_usage(buffer_id, buffer.usage, wgt::BufferUsages::INDIRECT)
.map_pass_err(scope)?;
buffer_memory_init_actions.extend(buffer.initialization_status.read().create_action(
@@ -713,7 +742,7 @@ impl RenderBundleEncoder {
commands.extend(index.flush());
commands.extend(state.flush_vertices());
commands.extend(state.flush_binds(used_bind_groups, base.dynamic_offsets));
- commands.push(command);
+ commands.push(ArcRenderCommand::MultiDrawIndirect { buffer: buffer.clone(), offset, count: None, indexed: true });
}
RenderCommand::MultiDrawIndirect { .. }
| RenderCommand::MultiDrawIndirectCount { .. } => unimplemented!(),
@@ -748,7 +777,10 @@ impl RenderBundleEncoder {
buffer_memory_init_actions,
texture_memory_init_actions,
context: self.context,
- info: ResourceInfo::new(desc.label.borrow_or_default()),
+ info: ResourceInfo::new(
+ desc.label.borrow_or_default(),
+ Some(device.tracker_indices.bundles.clone()),
+ ),
discard_hal_labels: device
.instance_flags
.contains(wgt::InstanceFlags::DISCARD_HAL_LABELS),
@@ -824,7 +856,7 @@ pub type RenderBundleDescriptor<'a> = wgt::RenderBundleDescriptor<Label<'a>>;
pub struct RenderBundle<A: HalApi> {
// Normalized command stream. It can be executed verbatim,
// without re-binding anything on the pipeline change.
- base: BasePass<RenderCommand>,
+ base: BasePass<ArcRenderCommand<A>>,
pub(super) is_depth_read_only: bool,
pub(super) is_stencil_read_only: bool,
pub(crate) device: Arc<Device<A>>,
@@ -863,7 +895,6 @@ impl<A: HalApi> RenderBundle<A> {
/// All the validation has already been done by this point.
/// The only failure condition is if some of the used buffers are destroyed.
pub(super) unsafe fn execute(&self, raw: &mut A::CommandEncoder) -> Result<(), ExecutionError> {
- let trackers = &self.used;
let mut offsets = self.base.dynamic_offsets.as_slice();
let mut pipeline_layout = None::<Arc<PipelineLayout<A>>>;
if !self.discard_hal_labels {
@@ -874,74 +905,65 @@ impl<A: HalApi> RenderBundle<A> {
let snatch_guard = self.device.snatchable_lock.read();
+ use ArcRenderCommand as Cmd;
for command in self.base.commands.iter() {
- match *command {
- RenderCommand::SetBindGroup {
+ match command {
+ Cmd::SetBindGroup {
index,
num_dynamic_offsets,
- bind_group_id,
+ bind_group,
} => {
- let bind_groups = trackers.bind_groups.read();
- let bind_group = bind_groups.get(bind_group_id).unwrap();
let raw_bg = bind_group
.raw(&snatch_guard)
- .ok_or(ExecutionError::InvalidBindGroup(bind_group_id))?;
+ .ok_or(ExecutionError::InvalidBindGroup(bind_group.info.id()))?;
unsafe {
raw.set_bind_group(
pipeline_layout.as_ref().unwrap().raw(),
- index,
+ *index,
raw_bg,
- &offsets[..num_dynamic_offsets],
+ &offsets[..*num_dynamic_offsets],
)
};
- offsets = &offsets[num_dynamic_offsets..];
+ offsets = &offsets[*num_dynamic_offsets..];
}
- RenderCommand::SetPipeline(pipeline_id) => {
- let render_pipelines = trackers.render_pipelines.read();
- let pipeline = render_pipelines.get(pipeline_id).unwrap();
+ Cmd::SetPipeline(pipeline) => {
unsafe { raw.set_render_pipeline(pipeline.raw()) };
pipeline_layout = Some(pipeline.layout.clone());
}
- RenderCommand::SetIndexBuffer {
- buffer_id,
+ Cmd::SetIndexBuffer {
+ buffer,
index_format,
offset,
size,
} => {
- let buffers = trackers.buffers.read();
- let buffer: &A::Buffer = buffers
- .get(buffer_id)
- .ok_or(ExecutionError::DestroyedBuffer(buffer_id))?
+ let buffer: &A::Buffer = buffer
.raw(&snatch_guard)
- .ok_or(ExecutionError::DestroyedBuffer(buffer_id))?;
+ .ok_or(ExecutionError::DestroyedBuffer(buffer.info.id()))?;
let bb = hal::BufferBinding {
buffer,
- offset,
- size,
+ offset: *offset,
+ size: *size,
};
- unsafe { raw.set_index_buffer(bb, index_format) };
+ unsafe { raw.set_index_buffer(bb, *index_format) };
}
- RenderCommand::SetVertexBuffer {
+ Cmd::SetVertexBuffer {
slot,
- buffer_id,
+ buffer,
offset,
size,
} => {
- let buffers = trackers.buffers.read();
- let buffer = buffers
- .get(buffer_id)
- .ok_or(ExecutionError::DestroyedBuffer(buffer_id))?
+ let buffer = buffer
.raw(&snatch_guard)
- .ok_or(ExecutionError::DestroyedBuffer(buffer_id))?;
+ .ok_or(ExecutionError::DestroyedBuffer(buffer.info.id()))?;
let bb = hal::BufferBinding {
buffer,
- offset,
- size,
+ offset: *offset,
+ size: *size,
};
- unsafe { raw.set_vertex_buffer(slot, bb) };
+ unsafe { raw.set_vertex_buffer(*slot, bb) };
}
- RenderCommand::SetPushConstant {
+ Cmd::SetPushConstant {
stages,
offset,
size_bytes,
@@ -949,7 +971,7 @@ impl<A: HalApi> RenderBundle<A> {
} => {
let pipeline_layout = pipeline_layout.as_ref().unwrap();
- if let Some(values_offset) = values_offset {
+ if let Some(values_offset) = *values_offset {
let values_end_offset =
(values_offset + size_bytes / wgt::PUSH_CONSTANT_ALIGNMENT) as usize;
let data_slice = &self.base.push_constant_data
@@ -958,20 +980,20 @@ impl<A: HalApi> RenderBundle<A> {
unsafe {
raw.set_push_constants(
pipeline_layout.raw(),
- stages,
- offset,
+ *stages,
+ *offset,
data_slice,
)
}
} else {
super::push_constant_clear(
- offset,
- size_bytes,
+ *offset,
+ *size_bytes,
|clear_offset, clear_data| {
unsafe {
raw.set_push_constants(
pipeline_layout.raw(),
- stages,
+ *stages,
clear_offset,
clear_data,
)
@@ -980,15 +1002,22 @@ impl<A: HalApi> RenderBundle<A> {
);
}
}
- RenderCommand::Draw {
+ Cmd::Draw {
vertex_count,
instance_count,
first_vertex,
first_instance,
} => {
- unsafe { raw.draw(first_vertex, vertex_count, first_instance, instance_count) };
+ unsafe {
+ raw.draw(
+ *first_vertex,
+ *vertex_count,
+ *first_instance,
+ *instance_count,
+ )
+ };
}
- RenderCommand::DrawIndexed {
+ Cmd::DrawIndexed {
index_count,
instance_count,
first_index,
@@ -997,63 +1026,54 @@ impl<A: HalApi> RenderBundle<A> {
} => {
unsafe {
raw.draw_indexed(
- first_index,
- index_count,
- base_vertex,
- first_instance,
- instance_count,
+ *first_index,
+ *index_count,
+ *base_vertex,
+ *first_instance,
+ *instance_count,
)
};
}
- RenderCommand::MultiDrawIndirect {
- buffer_id,
+ Cmd::MultiDrawIndirect {
+ buffer,
offset,
count: None,
indexed: false,
} => {
- let buffers = trackers.buffers.read();
- let buffer = buffers
- .get(buffer_id)
- .ok_or(ExecutionError::DestroyedBuffer(buffer_id))?
+ let buffer = buffer
.raw(&snatch_guard)
- .ok_or(ExecutionError::DestroyedBuffer(buffer_id))?;
- unsafe { raw.draw_indirect(buffer, offset, 1) };
+ .ok_or(ExecutionError::DestroyedBuffer(buffer.info.id()))?;
+ unsafe { raw.draw_indirect(buffer, *offset, 1) };
}
- RenderCommand::MultiDrawIndirect {
- buffer_id,
+ Cmd::MultiDrawIndirect {
+ buffer,
offset,
count: None,
indexed: true,
} => {
- let buffers = trackers.buffers.read();
- let buffer = buffers
- .get(buffer_id)
- .ok_or(ExecutionError::DestroyedBuffer(buffer_id))?
+ let buffer = buffer
.raw(&snatch_guard)
- .ok_or(ExecutionError::DestroyedBuffer(buffer_id))?;
- unsafe { raw.draw_indexed_indirect(buffer, offset, 1) };
+ .ok_or(ExecutionError::DestroyedBuffer(buffer.info.id()))?;
+ unsafe { raw.draw_indexed_indirect(buffer, *offset, 1) };
}
- RenderCommand::MultiDrawIndirect { .. }
- | RenderCommand::MultiDrawIndirectCount { .. } => {
+ Cmd::MultiDrawIndirect { .. } | Cmd::MultiDrawIndirectCount { .. } => {
return Err(ExecutionError::Unimplemented("multi-draw-indirect"))
}
- RenderCommand::PushDebugGroup { .. }
- | RenderCommand::InsertDebugMarker { .. }
- | RenderCommand::PopDebugGroup => {
+ Cmd::PushDebugGroup { .. } | Cmd::InsertDebugMarker { .. } | Cmd::PopDebugGroup => {
return Err(ExecutionError::Unimplemented("debug-markers"))
}
- RenderCommand::WriteTimestamp { .. }
- | RenderCommand::BeginOcclusionQuery { .. }
- | RenderCommand::EndOcclusionQuery
- | RenderCommand::BeginPipelineStatisticsQuery { .. }
- | RenderCommand::EndPipelineStatisticsQuery => {
+ Cmd::WriteTimestamp { .. }
+ | Cmd::BeginOcclusionQuery { .. }
+ | Cmd::EndOcclusionQuery
+ | Cmd::BeginPipelineStatisticsQuery { .. }
+ | Cmd::EndPipelineStatisticsQuery => {
return Err(ExecutionError::Unimplemented("queries"))
}
- RenderCommand::ExecuteBundle(_)
- | RenderCommand::SetBlendConstant(_)
- | RenderCommand::SetStencilReference(_)
- | RenderCommand::SetViewport { .. }
- | RenderCommand::SetScissor(_) => unreachable!(),
+ Cmd::ExecuteBundle(_)
+ | Cmd::SetBlendConstant(_)
+ | Cmd::SetStencilReference(_)
+ | Cmd::SetViewport { .. }
+ | Cmd::SetScissor(_) => unreachable!(),
}
}
@@ -1087,14 +1107,14 @@ impl<A: HalApi> Resource for RenderBundle<A> {
/// and calls [`State::flush_index`] before any indexed draw command to produce
/// a `SetIndexBuffer` command if one is necessary.
#[derive(Debug)]
-struct IndexState {
- buffer: id::BufferId,
+struct IndexState<A: HalApi> {
+ buffer: Arc<Buffer<A>>,
format: wgt::IndexFormat,
range: Range<wgt::BufferAddress>,
is_dirty: bool,
}
-impl IndexState {
+impl<A: HalApi> IndexState<A> {
/// Return the number of entries in the current index buffer.
///
/// Panic if no index buffer has been set.
@@ -1109,11 +1129,11 @@ impl IndexState {
/// Generate a `SetIndexBuffer` command to prepare for an indexed draw
/// command, if needed.
- fn flush(&mut self) -> Option<RenderCommand> {
+ fn flush(&mut self) -> Option<ArcRenderCommand<A>> {
if self.is_dirty {
self.is_dirty = false;
- Some(RenderCommand::SetIndexBuffer {
- buffer_id: self.buffer,
+ Some(ArcRenderCommand::SetIndexBuffer {
+ buffer: self.buffer.clone(),
index_format: self.format,
offset: self.range.start,
size: wgt::BufferSize::new(self.range.end - self.range.start),
@@ -1134,14 +1154,14 @@ impl IndexState {
///
/// [`flush`]: IndexState::flush
#[derive(Debug)]
-struct VertexState {
- buffer: id::BufferId,
+struct VertexState<A: HalApi> {
+ buffer: Arc<Buffer<A>>,
range: Range<wgt::BufferAddress>,
is_dirty: bool,
}
-impl VertexState {
- fn new(buffer: id::BufferId, range: Range<wgt::BufferAddress>) -> Self {
+impl<A: HalApi> VertexState<A> {
+ fn new(buffer: Arc<Buffer<A>>, range: Range<wgt::BufferAddress>) -> Self {
Self {
buffer,
range,
@@ -1152,12 +1172,12 @@ impl VertexState {
/// Generate a `SetVertexBuffer` command for this slot, if necessary.
///
/// `slot` is the index of the vertex buffer slot that `self` tracks.
- fn flush(&mut self, slot: u32) -> Option<RenderCommand> {
+ fn flush(&mut self, slot: u32) -> Option<ArcRenderCommand<A>> {
if self.is_dirty {
self.is_dirty = false;
- Some(RenderCommand::SetVertexBuffer {
+ Some(ArcRenderCommand::SetVertexBuffer {
slot,
- buffer_id: self.buffer,
+ buffer: self.buffer.clone(),
offset: self.range.start,
size: wgt::BufferSize::new(self.range.end - self.range.start),
})
@@ -1219,7 +1239,7 @@ impl<A: HalApi> PipelineState<A> {
/// Return a sequence of commands to zero the push constant ranges this
/// pipeline uses. If no initialization is necessary, return `None`.
- fn zero_push_constants(&self) -> Option<impl Iterator<Item = RenderCommand>> {
+ fn zero_push_constants(&self) -> Option<impl Iterator<Item = ArcRenderCommand<A>>> {
if !self.push_constant_ranges.is_empty() {
let nonoverlapping_ranges =
super::bind::compute_nonoverlapping_ranges(&self.push_constant_ranges);
@@ -1227,7 +1247,7 @@ impl<A: HalApi> PipelineState<A> {
Some(
nonoverlapping_ranges
.into_iter()
- .map(|range| RenderCommand::SetPushConstant {
+ .map(|range| ArcRenderCommand::SetPushConstant {
stages: range.stages,
offset: range.range.start,
size_bytes: range.range.end - range.range.start,
@@ -1261,11 +1281,11 @@ struct State<A: HalApi> {
bind: ArrayVec<Option<BindState<A>>, { hal::MAX_BIND_GROUPS }>,
/// The state of each vertex buffer slot.
- vertex: ArrayVec<Option<VertexState>, { hal::MAX_VERTEX_BUFFERS }>,
+ vertex: ArrayVec<Option<VertexState<A>>, { hal::MAX_VERTEX_BUFFERS }>,
/// The current index buffer, if one has been set. We flush this state
/// before indexed draw commands.
- index: Option<IndexState>,
+ index: Option<IndexState<A>>,
/// Dynamic offset values used by the cleaned-up command sequence.
///
@@ -1375,13 +1395,13 @@ impl<A: HalApi> State<A> {
/// Set the bundle's current index buffer and its associated parameters.
fn set_index_buffer(
&mut self,
- buffer: id::BufferId,
+ buffer: Arc<Buffer<A>>,
format: wgt::IndexFormat,
range: Range<wgt::BufferAddress>,
) {
match self.index {
Some(ref current)
- if current.buffer == buffer
+ if Arc::ptr_eq(&current.buffer, &buffer)
&& current.format == format
&& current.range == range =>
{
@@ -1400,11 +1420,11 @@ impl<A: HalApi> State<A> {
/// Generate a `SetIndexBuffer` command to prepare for an indexed draw
/// command, if needed.
- fn flush_index(&mut self) -> Option<RenderCommand> {
+ fn flush_index(&mut self) -> Option<ArcRenderCommand<A>> {
self.index.as_mut().and_then(|index| index.flush())
}
- fn flush_vertices(&mut self) -> impl Iterator<Item = RenderCommand> + '_ {
+ fn flush_vertices(&mut self) -> impl Iterator<Item = ArcRenderCommand<A>> + '_ {
self.vertex
.iter_mut()
.enumerate()
@@ -1416,7 +1436,7 @@ impl<A: HalApi> State<A> {
&mut self,
used_bind_groups: usize,
dynamic_offsets: &[wgt::DynamicOffset],
- ) -> impl Iterator<Item = RenderCommand> + '_ {
+ ) -> impl Iterator<Item = ArcRenderCommand<A>> + '_ {
// Append each dirty bind group's dynamic offsets to `flat_dynamic_offsets`.
for contents in self.bind[..used_bind_groups].iter().flatten() {
if contents.is_dirty {
@@ -1435,9 +1455,9 @@ impl<A: HalApi> State<A> {
if contents.is_dirty {
contents.is_dirty = false;
let offsets = &contents.dynamic_offsets;
- return Some(RenderCommand::SetBindGroup {
+ return Some(ArcRenderCommand::SetBindGroup {
index: i.try_into().unwrap(),
- bind_group_id: contents.bind_group.as_info().id(),
+ bind_group: contents.bind_group.clone(),
num_dynamic_offsets: offsets.end - offsets.start,
});
}
diff --git a/third_party/rust/wgpu-core/src/command/clear.rs b/third_party/rust/wgpu-core/src/command/clear.rs
index 2569fea1a4..e404fabb14 100644
--- a/third_party/rust/wgpu-core/src/command/clear.rs
+++ b/third_party/rust/wgpu-core/src/command/clear.rs
@@ -39,6 +39,11 @@ pub enum ClearError {
UnalignedFillSize(BufferAddress),
#[error("Buffer offset {0:?} is not a multiple of `COPY_BUFFER_ALIGNMENT`")]
UnalignedBufferOffset(BufferAddress),
+ #[error("Clear starts at offset {start_offset} with size of {requested_size}, but these added together exceed `u64::MAX`")]
+ OffsetPlusSizeExceeds64BitBounds {
+ start_offset: BufferAddress,
+ requested_size: BufferAddress,
+ },
#[error("Clear of {start_offset}..{end_offset} would end up overrunning the bounds of the buffer of size {buffer_size}")]
BufferOverrun {
start_offset: BufferAddress,
@@ -117,25 +122,27 @@ impl Global {
if offset % wgt::COPY_BUFFER_ALIGNMENT != 0 {
return Err(ClearError::UnalignedBufferOffset(offset));
}
- if let Some(size) = size {
- if size % wgt::COPY_BUFFER_ALIGNMENT != 0 {
- return Err(ClearError::UnalignedFillSize(size));
- }
- let destination_end_offset = offset + size;
- if destination_end_offset > dst_buffer.size {
- return Err(ClearError::BufferOverrun {
+
+ let size = size.unwrap_or(dst_buffer.size.saturating_sub(offset));
+ if size % wgt::COPY_BUFFER_ALIGNMENT != 0 {
+ return Err(ClearError::UnalignedFillSize(size));
+ }
+ let end_offset =
+ offset
+ .checked_add(size)
+ .ok_or(ClearError::OffsetPlusSizeExceeds64BitBounds {
start_offset: offset,
- end_offset: destination_end_offset,
- buffer_size: dst_buffer.size,
- });
- }
+ requested_size: size,
+ })?;
+ if end_offset > dst_buffer.size {
+ return Err(ClearError::BufferOverrun {
+ start_offset: offset,
+ end_offset,
+ buffer_size: dst_buffer.size,
+ });
}
- let end = match size {
- Some(size) => offset + size,
- None => dst_buffer.size,
- };
- if offset == end {
+ if offset == end_offset {
log::trace!("Ignoring fill_buffer of size 0");
return Ok(());
}
@@ -144,7 +151,7 @@ impl Global {
cmd_buf_data.buffer_memory_init_actions.extend(
dst_buffer.initialization_status.read().create_action(
&dst_buffer,
- offset..end,
+ offset..end_offset,
MemoryInitKind::ImplicitlyInitialized,
),
);
@@ -154,7 +161,7 @@ impl Global {
let cmd_buf_raw = cmd_buf_data.encoder.open()?;
unsafe {
cmd_buf_raw.transition_buffers(dst_barrier.into_iter());
- cmd_buf_raw.clear_buffer(dst_raw, offset..end);
+ cmd_buf_raw.clear_buffer(dst_raw, offset..end_offset);
}
Ok(())
}
@@ -366,7 +373,7 @@ fn clear_texture_via_buffer_copies<A: HalApi>(
assert!(
max_rows_per_copy > 0,
"Zero buffer size is too small to fill a single row \
- of a texture with format {:?} and desc {:?}",
+ of a texture with format {:?} and desc {:?}",
texture_desc.format,
texture_desc.size
);
diff --git a/third_party/rust/wgpu-core/src/command/compute.rs b/third_party/rust/wgpu-core/src/command/compute.rs
index 804186a01e..c2fd3ab397 100644
--- a/third_party/rust/wgpu-core/src/command/compute.rs
+++ b/third_party/rust/wgpu-core/src/command/compute.rs
@@ -1,6 +1,7 @@
use crate::device::DeviceError;
use crate::resource::Resource;
use crate::snatch::SnatchGuard;
+use crate::track::TrackerIndex;
use crate::{
binding_model::{
BindError, BindGroup, LateMinBufferBindingSizeMismatch, PushConstantUploadError,
@@ -305,7 +306,7 @@ impl<A: HalApi> State<A> {
raw_encoder: &mut A::CommandEncoder,
base_trackers: &mut Tracker<A>,
bind_group_guard: &Storage<BindGroup<A>>,
- indirect_buffer: Option<id::BufferId>,
+ indirect_buffer: Option<TrackerIndex>,
snatch_guard: &SnatchGuard,
) -> Result<(), UsageConflict> {
for id in self.binder.list_active() {
@@ -402,12 +403,11 @@ impl Global {
let pipeline_guard = hub.compute_pipelines.read();
let query_set_guard = hub.query_sets.read();
let buffer_guard = hub.buffers.read();
- let texture_guard = hub.textures.read();
let mut state = State {
binder: Binder::new(),
pipeline: None,
- scope: UsageScope::new(&*buffer_guard, &*texture_guard),
+ scope: UsageScope::new(&device.tracker_indices),
debug_scope_depth: 0,
};
let mut temp_offsets = Vec::new();
@@ -452,17 +452,14 @@ impl Global {
let snatch_guard = device.snatchable_lock.read();
- tracker.set_size(
- Some(&*buffer_guard),
- Some(&*texture_guard),
- None,
- None,
- Some(&*bind_group_guard),
- Some(&*pipeline_guard),
- None,
- None,
- Some(&*query_set_guard),
- );
+ let indices = &device.tracker_indices;
+ tracker.buffers.set_size(indices.buffers.size());
+ tracker.textures.set_size(indices.textures.size());
+ tracker.bind_groups.set_size(indices.bind_groups.size());
+ tracker
+ .compute_pipelines
+ .set_size(indices.compute_pipelines.size());
+ tracker.query_sets.set_size(indices.query_sets.size());
let discard_hal_labels = self
.instance
@@ -719,8 +716,12 @@ impl Global {
.buffers
.merge_single(&*buffer_guard, buffer_id, hal::BufferUses::INDIRECT)
.map_pass_err(scope)?;
- check_buffer_usage(indirect_buffer.usage, wgt::BufferUsages::INDIRECT)
- .map_pass_err(scope)?;
+ check_buffer_usage(
+ buffer_id,
+ indirect_buffer.usage,
+ wgt::BufferUsages::INDIRECT,
+ )
+ .map_pass_err(scope)?;
let end_offset = offset + mem::size_of::<wgt::DispatchIndirectArgs>() as u64;
if end_offset > indirect_buffer.size {
@@ -753,7 +754,7 @@ impl Global {
raw,
&mut intermediate_trackers,
&*bind_group_guard,
- Some(buffer_id),
+ Some(indirect_buffer.as_info().tracker_index()),
&snatch_guard,
)
.map_pass_err(scope)?;
diff --git a/third_party/rust/wgpu-core/src/command/draw.rs b/third_party/rust/wgpu-core/src/command/draw.rs
index e03a78ee93..98aa689b78 100644
--- a/third_party/rust/wgpu-core/src/command/draw.rs
+++ b/third_party/rust/wgpu-core/src/command/draw.rs
@@ -2,17 +2,22 @@
!*/
use crate::{
- binding_model::{LateMinBufferBindingSizeMismatch, PushConstantUploadError},
+ binding_model::{BindGroup, LateMinBufferBindingSizeMismatch, PushConstantUploadError},
error::ErrorFormatter,
+ hal_api::HalApi,
id,
+ pipeline::RenderPipeline,
+ resource::{Buffer, QuerySet},
track::UsageConflict,
validation::{MissingBufferUsageError, MissingTextureUsageError},
};
use wgt::{BufferAddress, BufferSize, Color, VertexStepMode};
-use std::num::NonZeroU32;
+use std::{num::NonZeroU32, sync::Arc};
use thiserror::Error;
+use super::RenderBundle;
+
/// Error validating a draw call.
#[derive(Clone, Debug, Error, Eq, PartialEq)]
#[non_exhaustive]
@@ -245,3 +250,114 @@ pub enum RenderCommand {
EndPipelineStatisticsQuery,
ExecuteBundle(id::RenderBundleId),
}
+
+/// Equivalent to `RenderCommand` with the Ids resolved into resource Arcs.
+#[doc(hidden)]
+#[derive(Clone, Debug)]
+pub enum ArcRenderCommand<A: HalApi> {
+ SetBindGroup {
+ index: u32,
+ num_dynamic_offsets: usize,
+ bind_group: Arc<BindGroup<A>>,
+ },
+ SetPipeline(Arc<RenderPipeline<A>>),
+ SetIndexBuffer {
+ buffer: Arc<Buffer<A>>,
+ index_format: wgt::IndexFormat,
+ offset: BufferAddress,
+ size: Option<BufferSize>,
+ },
+ SetVertexBuffer {
+ slot: u32,
+ buffer: Arc<Buffer<A>>,
+ offset: BufferAddress,
+ size: Option<BufferSize>,
+ },
+ SetBlendConstant(Color),
+ SetStencilReference(u32),
+ SetViewport {
+ rect: Rect<f32>,
+ depth_min: f32,
+ depth_max: f32,
+ },
+ SetScissor(Rect<u32>),
+
+ /// Set a range of push constants to values stored in [`BasePass::push_constant_data`].
+ ///
+ /// See [`wgpu::RenderPass::set_push_constants`] for a detailed explanation
+ /// of the restrictions these commands must satisfy.
+ SetPushConstant {
+ /// Which stages we are setting push constant values for.
+ stages: wgt::ShaderStages,
+
+ /// The byte offset within the push constant storage to write to. This
+ /// must be a multiple of four.
+ offset: u32,
+
+ /// The number of bytes to write. This must be a multiple of four.
+ size_bytes: u32,
+
+ /// Index in [`BasePass::push_constant_data`] of the start of the data
+ /// to be written.
+ ///
+ /// Note: this is not a byte offset like `offset`. Rather, it is the
+ /// index of the first `u32` element in `push_constant_data` to read.
+ ///
+ /// `None` means zeros should be written to the destination range, and
+ /// there is no corresponding data in `push_constant_data`. This is used
+ /// by render bundles, which explicitly clear out any state that
+ /// post-bundle code might see.
+ values_offset: Option<u32>,
+ },
+ Draw {
+ vertex_count: u32,
+ instance_count: u32,
+ first_vertex: u32,
+ first_instance: u32,
+ },
+ DrawIndexed {
+ index_count: u32,
+ instance_count: u32,
+ first_index: u32,
+ base_vertex: i32,
+ first_instance: u32,
+ },
+ MultiDrawIndirect {
+ buffer: Arc<Buffer<A>>,
+ offset: BufferAddress,
+ /// Count of `None` represents a non-multi call.
+ count: Option<NonZeroU32>,
+ indexed: bool,
+ },
+ MultiDrawIndirectCount {
+ buffer: Arc<Buffer<A>>,
+ offset: BufferAddress,
+ count_buffer: Arc<Buffer<A>>,
+ count_buffer_offset: BufferAddress,
+ max_count: u32,
+ indexed: bool,
+ },
+ PushDebugGroup {
+ color: u32,
+ len: usize,
+ },
+ PopDebugGroup,
+ InsertDebugMarker {
+ color: u32,
+ len: usize,
+ },
+ WriteTimestamp {
+ query_set: Arc<QuerySet<A>>,
+ query_index: u32,
+ },
+ BeginOcclusionQuery {
+ query_index: u32,
+ },
+ EndOcclusionQuery,
+ BeginPipelineStatisticsQuery {
+ query_set: Arc<QuerySet<A>>,
+ query_index: u32,
+ },
+ EndPipelineStatisticsQuery,
+ ExecuteBundle(Arc<RenderBundle<A>>),
+}
diff --git a/third_party/rust/wgpu-core/src/command/mod.rs b/third_party/rust/wgpu-core/src/command/mod.rs
index 2d5fca200a..febed4fc97 100644
--- a/third_party/rust/wgpu-core/src/command/mod.rs
+++ b/third_party/rust/wgpu-core/src/command/mod.rs
@@ -75,7 +75,7 @@ impl<A: HalApi> CommandEncoder<A> {
Ok(())
}
- fn discard(&mut self) {
+ pub(crate) fn discard(&mut self) {
if self.is_open {
self.is_open = false;
unsafe { self.raw.discard_encoding() };
@@ -112,7 +112,7 @@ pub(crate) struct DestroyedBufferError(pub id::BufferId);
pub(crate) struct DestroyedTextureError(pub id::TextureId);
pub struct CommandBufferMutable<A: HalApi> {
- encoder: CommandEncoder<A>,
+ pub(crate) encoder: CommandEncoder<A>,
status: CommandEncoderStatus,
pub(crate) trackers: Tracker<A>,
buffer_memory_init_actions: Vec<BufferInitTrackerAction<A>>,
@@ -174,6 +174,7 @@ impl<A: HalApi> CommandBuffer<A> {
.as_ref()
.unwrap_or(&String::from("<CommandBuffer>"))
.as_str(),
+ None,
),
data: Mutex::new(Some(CommandBufferMutable {
encoder: CommandEncoder {
diff --git a/third_party/rust/wgpu-core/src/command/query.rs b/third_party/rust/wgpu-core/src/command/query.rs
index 39d7a9cc93..89cba6fbf3 100644
--- a/third_party/rust/wgpu-core/src/command/query.rs
+++ b/third_party/rust/wgpu-core/src/command/query.rs
@@ -4,7 +4,7 @@ use hal::CommandEncoder as _;
use crate::device::trace::Command as TraceCommand;
use crate::{
command::{CommandBuffer, CommandEncoderError},
- device::DeviceError,
+ device::{DeviceError, MissingFeatures},
global::Global,
hal_api::HalApi,
id::{self, Id},
@@ -108,6 +108,8 @@ pub enum QueryError {
Device(#[from] DeviceError),
#[error(transparent)]
Encoder(#[from] CommandEncoderError),
+ #[error(transparent)]
+ MissingFeature(#[from] MissingFeatures),
#[error("Error encountered while trying to use queries")]
Use(#[from] QueryUseError),
#[error("Error encountered while trying to resolve a query")]
@@ -355,6 +357,11 @@ impl Global {
let hub = A::hub(self);
let cmd_buf = CommandBuffer::get_encoder(hub, command_encoder_id)?;
+
+ cmd_buf
+ .device
+ .require_features(wgt::Features::TIMESTAMP_QUERY_INSIDE_ENCODERS)?;
+
let mut cmd_buf_data = cmd_buf.data.lock();
let cmd_buf_data = cmd_buf_data.as_mut().unwrap();
diff --git a/third_party/rust/wgpu-core/src/command/render.rs b/third_party/rust/wgpu-core/src/command/render.rs
index d3de3e26e1..9141ddb021 100644
--- a/third_party/rust/wgpu-core/src/command/render.rs
+++ b/third_party/rust/wgpu-core/src/command/render.rs
@@ -22,7 +22,7 @@ use crate::{
hal_label, id,
init_tracker::{MemoryInitKind, TextureInitRange, TextureInitTrackerAction},
pipeline::{self, PipelineFlags},
- resource::{Buffer, QuerySet, Texture, TextureView, TextureViewNotRenderableReason},
+ resource::{QuerySet, Texture, TextureView, TextureViewNotRenderableReason},
storage::Storage,
track::{TextureSelector, Tracker, UsageConflict, UsageScope},
validation::{
@@ -531,6 +531,8 @@ pub enum ColorAttachmentError {
InvalidFormat(wgt::TextureFormat),
#[error("The number of color attachments {given} exceeds the limit {limit}")]
TooMany { given: usize, limit: usize },
+ #[error("The total number of bytes per sample in color attachments {total} exceeds the limit {limit}")]
+ TooManyBytesPerSample { total: u32, limit: u32 },
}
/// Error encountered when performing a render pass.
@@ -799,8 +801,6 @@ impl<'a, A: HalApi> RenderPassInfo<'a, A> {
texture_memory_actions: &mut CommandBufferTextureMemoryActions<A>,
pending_query_resets: &mut QueryResetMap<A>,
view_guard: &'a Storage<TextureView<A>>,
- buffer_guard: &'a Storage<Buffer<A>>,
- texture_guard: &'a Storage<Texture<A>>,
query_set_guard: &'a Storage<QuerySet<A>>,
snatch_guard: &SnatchGuard<'a>,
) -> Result<Self, RenderPassErrorInner> {
@@ -1214,7 +1214,7 @@ impl<'a, A: HalApi> RenderPassInfo<'a, A> {
Ok(Self {
context,
- usage_scope: UsageScope::new(buffer_guard, texture_guard),
+ usage_scope: UsageScope::new(&device.tracker_indices),
render_attachments,
is_depth_read_only,
is_stencil_read_only,
@@ -1386,7 +1386,6 @@ impl Global {
let render_pipeline_guard = hub.render_pipelines.read();
let query_set_guard = hub.query_sets.read();
let buffer_guard = hub.buffers.read();
- let texture_guard = hub.textures.read();
let view_guard = hub.texture_views.read();
log::trace!(
@@ -1406,24 +1405,21 @@ impl Global {
texture_memory_actions,
pending_query_resets,
&*view_guard,
- &*buffer_guard,
- &*texture_guard,
&*query_set_guard,
&snatch_guard,
)
.map_pass_err(pass_scope)?;
- tracker.set_size(
- Some(&*buffer_guard),
- Some(&*texture_guard),
- Some(&*view_guard),
- None,
- Some(&*bind_group_guard),
- None,
- Some(&*render_pipeline_guard),
- Some(&*bundle_guard),
- Some(&*query_set_guard),
- );
+ let indices = &device.tracker_indices;
+ tracker.buffers.set_size(indices.buffers.size());
+ tracker.textures.set_size(indices.textures.size());
+ tracker.views.set_size(indices.texture_views.size());
+ tracker.bind_groups.set_size(indices.bind_groups.size());
+ tracker
+ .render_pipelines
+ .set_size(indices.render_pipelines.size());
+ tracker.bundles.set_size(indices.bundles.size());
+ tracker.query_sets.set_size(indices.query_sets.size());
let raw = &mut encoder.raw;
@@ -1675,7 +1671,7 @@ impl Global {
return Err(DeviceError::WrongDevice).map_pass_err(scope);
}
- check_buffer_usage(buffer.usage, BufferUsages::INDEX)
+ check_buffer_usage(buffer_id, buffer.usage, BufferUsages::INDEX)
.map_pass_err(scope)?;
let buf_raw = buffer
.raw
@@ -1737,7 +1733,7 @@ impl Global {
.map_pass_err(scope);
}
- check_buffer_usage(buffer.usage, BufferUsages::VERTEX)
+ check_buffer_usage(buffer_id, buffer.usage, BufferUsages::VERTEX)
.map_pass_err(scope)?;
let buf_raw = buffer
.raw
@@ -2034,8 +2030,12 @@ impl Global {
.buffers
.merge_single(&*buffer_guard, buffer_id, hal::BufferUses::INDIRECT)
.map_pass_err(scope)?;
- check_buffer_usage(indirect_buffer.usage, BufferUsages::INDIRECT)
- .map_pass_err(scope)?;
+ check_buffer_usage(
+ buffer_id,
+ indirect_buffer.usage,
+ BufferUsages::INDIRECT,
+ )
+ .map_pass_err(scope)?;
let indirect_raw = indirect_buffer
.raw
.get(&snatch_guard)
@@ -2106,8 +2106,12 @@ impl Global {
.buffers
.merge_single(&*buffer_guard, buffer_id, hal::BufferUses::INDIRECT)
.map_pass_err(scope)?;
- check_buffer_usage(indirect_buffer.usage, BufferUsages::INDIRECT)
- .map_pass_err(scope)?;
+ check_buffer_usage(
+ buffer_id,
+ indirect_buffer.usage,
+ BufferUsages::INDIRECT,
+ )
+ .map_pass_err(scope)?;
let indirect_raw = indirect_buffer
.raw
.get(&snatch_guard)
@@ -2123,7 +2127,7 @@ impl Global {
hal::BufferUses::INDIRECT,
)
.map_pass_err(scope)?;
- check_buffer_usage(count_buffer.usage, BufferUsages::INDIRECT)
+ check_buffer_usage(buffer_id, count_buffer.usage, BufferUsages::INDIRECT)
.map_pass_err(scope)?;
let count_raw = count_buffer
.raw
diff --git a/third_party/rust/wgpu-core/src/device/global.rs b/third_party/rust/wgpu-core/src/device/global.rs
index 64fd6d4de7..539b92e0f3 100644
--- a/third_party/rust/wgpu-core/src/device/global.rs
+++ b/third_party/rust/wgpu-core/src/device/global.rs
@@ -26,9 +26,7 @@ use wgt::{BufferAddress, TextureFormat};
use std::{
borrow::Cow,
- iter,
- ops::Range,
- ptr,
+ iter, ptr,
sync::{atomic::Ordering, Arc},
};
@@ -219,7 +217,7 @@ impl Global {
mapped_at_creation: false,
};
let stage = match device.create_buffer(&stage_desc, true) {
- Ok(stage) => stage,
+ Ok(stage) => Arc::new(stage),
Err(e) => {
to_destroy.push(buffer);
break e;
@@ -232,14 +230,10 @@ impl Global {
Ok(mapping) => mapping,
Err(e) => {
to_destroy.push(buffer);
- to_destroy.push(stage);
break CreateBufferError::Device(e.into());
}
};
- let stage_fid = hub.buffers.request();
- let stage = stage_fid.init(stage);
-
assert_eq!(buffer.size % wgt::COPY_BUFFER_ALIGNMENT, 0);
// Zero initialize memory and then mark both staging and buffer as initialized
// (it's guaranteed that this is the case by the time the buffer is usable)
@@ -262,7 +256,7 @@ impl Global {
.trackers
.lock()
.buffers
- .insert_single(id, resource, buffer_use);
+ .insert_single(resource, buffer_use);
return (id, None);
};
@@ -383,7 +377,7 @@ impl Global {
.buffers
.get(buffer_id)
.map_err(|_| BufferAccessError::Invalid)?;
- check_buffer_usage(buffer.usage, wgt::BufferUsages::MAP_WRITE)?;
+ check_buffer_usage(buffer_id, buffer.usage, wgt::BufferUsages::MAP_WRITE)?;
//assert!(buffer isn't used by the GPU);
#[cfg(feature = "trace")]
@@ -446,7 +440,7 @@ impl Global {
.buffers
.get(buffer_id)
.map_err(|_| BufferAccessError::Invalid)?;
- check_buffer_usage(buffer.usage, wgt::BufferUsages::MAP_READ)?;
+ check_buffer_usage(buffer_id, buffer.usage, wgt::BufferUsages::MAP_READ)?;
//assert!(buffer isn't used by the GPU);
let raw_buf = buffer
@@ -529,7 +523,7 @@ impl Global {
.lock_life()
.suspected_resources
.buffers
- .insert(buffer_id, buffer);
+ .insert(buffer.info.tracker_index(), buffer);
}
if wait {
@@ -573,11 +567,11 @@ impl Global {
let (id, resource) = fid.assign(texture);
api_log!("Device::create_texture({desc:?}) -> {id:?}");
- device.trackers.lock().textures.insert_single(
- id,
- resource,
- hal::TextureUses::UNINITIALIZED,
- );
+ device
+ .trackers
+ .lock()
+ .textures
+ .insert_single(resource, hal::TextureUses::UNINITIALIZED);
return (id, None);
};
@@ -647,11 +641,11 @@ impl Global {
let (id, resource) = fid.assign(texture);
api_log!("Device::create_texture({desc:?}) -> {id:?}");
- device.trackers.lock().textures.insert_single(
- id,
- resource,
- hal::TextureUses::UNINITIALIZED,
- );
+ device
+ .trackers
+ .lock()
+ .textures
+ .insert_single(resource, hal::TextureUses::UNINITIALIZED);
return (id, None);
};
@@ -704,7 +698,7 @@ impl Global {
.trackers
.lock()
.buffers
- .insert_single(id, buffer, hal::BufferUses::empty());
+ .insert_single(buffer, hal::BufferUses::empty());
return (id, None);
};
@@ -764,7 +758,7 @@ impl Global {
.lock_life()
.suspected_resources
.textures
- .insert(texture_id, texture.clone());
+ .insert(texture.info.tracker_index(), texture.clone());
}
}
@@ -824,7 +818,7 @@ impl Global {
}
api_log!("Texture::create_view({texture_id:?}) -> {id:?}");
- device.trackers.lock().views.insert_single(id, resource);
+ device.trackers.lock().views.insert_single(resource);
return (id, None);
};
@@ -854,7 +848,7 @@ impl Global {
.lock_life()
.suspected_resources
.texture_views
- .insert(texture_view_id, view.clone());
+ .insert(view.info.tracker_index(), view.clone());
if wait {
match view.device.wait_for_submit(last_submit_index) {
@@ -900,7 +894,7 @@ impl Global {
let (id, resource) = fid.assign(sampler);
api_log!("Device::create_sampler -> {id:?}");
- device.trackers.lock().samplers.insert_single(id, resource);
+ device.trackers.lock().samplers.insert_single(resource);
return (id, None);
};
@@ -925,7 +919,7 @@ impl Global {
.lock_life()
.suspected_resources
.samplers
- .insert(sampler_id, sampler.clone());
+ .insert(sampler.info.tracker_index(), sampler.clone());
}
}
@@ -1024,7 +1018,7 @@ impl Global {
.lock_life()
.suspected_resources
.bind_group_layouts
- .insert(bind_group_layout_id, layout.clone());
+ .insert(layout.info.tracker_index(), layout.clone());
}
}
@@ -1085,7 +1079,7 @@ impl Global {
.lock_life()
.suspected_resources
.pipeline_layouts
- .insert(pipeline_layout_id, layout.clone());
+ .insert(layout.info.tracker_index(), layout.clone());
}
}
@@ -1140,11 +1134,7 @@ impl Global {
api_log!("Device::create_bind_group -> {id:?}");
- device
- .trackers
- .lock()
- .bind_groups
- .insert_single(id, resource);
+ device.trackers.lock().bind_groups.insert_single(resource);
return (id, None);
};
@@ -1168,7 +1158,7 @@ impl Global {
.lock_life()
.suspected_resources
.bind_groups
- .insert(bind_group_id, bind_group.clone());
+ .insert(bind_group.info.tracker_index(), bind_group.clone());
}
}
@@ -1332,9 +1322,8 @@ impl Global {
if !device.is_valid() {
break DeviceError::Lost;
}
- let queue = match hub.queues.get(device.queue_id.read().unwrap()) {
- Ok(queue) => queue,
- Err(_) => break DeviceError::InvalidQueueId,
+ let Some(queue) = device.get_queue() else {
+ break DeviceError::InvalidQueueId;
};
let encoder = match device
.command_allocator
@@ -1379,6 +1368,7 @@ impl Global {
.command_buffers
.unregister(command_encoder_id.transmute())
{
+ cmd_buf.data.lock().as_mut().unwrap().encoder.discard();
cmd_buf
.device
.untrack(&cmd_buf.data.lock().as_ref().unwrap().trackers);
@@ -1450,7 +1440,7 @@ impl Global {
let (id, resource) = fid.assign(render_bundle);
api_log!("RenderBundleEncoder::finish -> {id:?}");
- device.trackers.lock().bundles.insert_single(id, resource);
+ device.trackers.lock().bundles.insert_single(resource);
return (id, None);
};
@@ -1474,7 +1464,7 @@ impl Global {
.lock_life()
.suspected_resources
.render_bundles
- .insert(render_bundle_id, bundle.clone());
+ .insert(bundle.info.tracker_index(), bundle.clone());
}
}
@@ -1513,11 +1503,7 @@ impl Global {
let (id, resource) = fid.assign(query_set);
api_log!("Device::create_query_set -> {id:?}");
- device
- .trackers
- .lock()
- .query_sets
- .insert_single(id, resource);
+ device.trackers.lock().query_sets.insert_single(resource);
return (id, None);
};
@@ -1544,7 +1530,7 @@ impl Global {
.lock_life()
.suspected_resources
.query_sets
- .insert(query_set_id, query_set.clone());
+ .insert(query_set.info.tracker_index(), query_set.clone());
}
}
@@ -1600,7 +1586,7 @@ impl Global {
.trackers
.lock()
.render_pipelines
- .insert_single(id, resource);
+ .insert_single(resource);
return (id, None);
};
@@ -1672,18 +1658,17 @@ impl Global {
let hub = A::hub(self);
if let Some(pipeline) = hub.render_pipelines.unregister(render_pipeline_id) {
- let layout_id = pipeline.layout.as_info().id();
let device = &pipeline.device;
let mut life_lock = device.lock_life();
life_lock
.suspected_resources
.render_pipelines
- .insert(render_pipeline_id, pipeline.clone());
+ .insert(pipeline.info.tracker_index(), pipeline.clone());
- life_lock
- .suspected_resources
- .pipeline_layouts
- .insert(layout_id, pipeline.layout.clone());
+ life_lock.suspected_resources.pipeline_layouts.insert(
+ pipeline.layout.info.tracker_index(),
+ pipeline.layout.clone(),
+ );
}
}
@@ -1734,7 +1719,7 @@ impl Global {
.trackers
.lock()
.compute_pipelines
- .insert_single(id, resource);
+ .insert_single(resource);
return (id, None);
};
@@ -1804,17 +1789,16 @@ impl Global {
let hub = A::hub(self);
if let Some(pipeline) = hub.compute_pipelines.unregister(compute_pipeline_id) {
- let layout_id = pipeline.layout.as_info().id();
let device = &pipeline.device;
let mut life_lock = device.lock_life();
life_lock
.suspected_resources
.compute_pipelines
- .insert(compute_pipeline_id, pipeline.clone());
- life_lock
- .suspected_resources
- .pipeline_layouts
- .insert(layout_id, pipeline.layout.clone());
+ .insert(pipeline.info.tracker_index(), pipeline.clone());
+ life_lock.suspected_resources.pipeline_layouts.insert(
+ pipeline.layout.info.tracker_index(),
+ pipeline.layout.clone(),
+ );
}
}
@@ -2113,28 +2097,41 @@ impl Global {
.get(device_id)
.map_err(|_| DeviceError::Invalid)?;
- let (closures, queue_empty) = {
- if let wgt::Maintain::WaitForSubmissionIndex(submission_index) = maintain {
- if submission_index.queue_id != device_id.transmute() {
- return Err(WaitIdleError::WrongSubmissionIndex(
- submission_index.queue_id,
- device_id,
- ));
- }
+ if let wgt::Maintain::WaitForSubmissionIndex(submission_index) = maintain {
+ if submission_index.queue_id != device_id.transmute() {
+ return Err(WaitIdleError::WrongSubmissionIndex(
+ submission_index.queue_id,
+ device_id,
+ ));
}
+ }
- let fence = device.fence.read();
- let fence = fence.as_ref().unwrap();
- device.maintain(fence, maintain)?
- };
+ let DevicePoll {
+ closures,
+ queue_empty,
+ } = Self::poll_single_device(&device, maintain)?;
+
+ closures.fire();
+
+ Ok(queue_empty)
+ }
+
+ fn poll_single_device<A: HalApi>(
+ device: &crate::device::Device<A>,
+ maintain: wgt::Maintain<queue::WrappedSubmissionIndex>,
+ ) -> Result<DevicePoll, WaitIdleError> {
+ let fence = device.fence.read();
+ let fence = fence.as_ref().unwrap();
+ let (closures, queue_empty) = device.maintain(fence, maintain)?;
// Some deferred destroys are scheduled in maintain so run this right after
// to avoid holding on to them until the next device poll.
device.deferred_resource_destruction();
- closures.fire();
-
- Ok(queue_empty)
+ Ok(DevicePoll {
+ closures,
+ queue_empty,
+ })
}
/// Poll all devices belonging to the backend `A`.
@@ -2143,7 +2140,7 @@ impl Global {
///
/// Return `all_queue_empty` indicating whether there are more queue
/// submissions still in flight.
- fn poll_device<A: HalApi>(
+ fn poll_all_devices_of_api<A: HalApi>(
&self,
force_wait: bool,
closures: &mut UserClosures,
@@ -2161,10 +2158,13 @@ impl Global {
} else {
wgt::Maintain::Poll
};
- let fence = device.fence.read();
- let fence = fence.as_ref().unwrap();
- let (cbs, queue_empty) = device.maintain(fence, maintain)?;
- all_queue_empty = all_queue_empty && queue_empty;
+
+ let DevicePoll {
+ closures: cbs,
+ queue_empty,
+ } = Self::poll_single_device(device, maintain)?;
+
+ all_queue_empty &= queue_empty;
closures.extend(cbs);
}
@@ -2186,23 +2186,23 @@ impl Global {
#[cfg(vulkan)]
{
- all_queue_empty =
- self.poll_device::<hal::api::Vulkan>(force_wait, &mut closures)? && all_queue_empty;
+ all_queue_empty &=
+ self.poll_all_devices_of_api::<hal::api::Vulkan>(force_wait, &mut closures)?;
}
#[cfg(metal)]
{
- all_queue_empty =
- self.poll_device::<hal::api::Metal>(force_wait, &mut closures)? && all_queue_empty;
+ all_queue_empty &=
+ self.poll_all_devices_of_api::<hal::api::Metal>(force_wait, &mut closures)?;
}
#[cfg(dx12)]
{
- all_queue_empty =
- self.poll_device::<hal::api::Dx12>(force_wait, &mut closures)? && all_queue_empty;
+ all_queue_empty &=
+ self.poll_all_devices_of_api::<hal::api::Dx12>(force_wait, &mut closures)?;
}
#[cfg(gles)]
{
- all_queue_empty =
- self.poll_device::<hal::api::Gles>(force_wait, &mut closures)? && all_queue_empty;
+ all_queue_empty &=
+ self.poll_all_devices_of_api::<hal::api::Gles>(force_wait, &mut closures)?;
}
closures.fire();
@@ -2336,15 +2336,18 @@ impl Global {
pub fn buffer_map_async<A: HalApi>(
&self,
buffer_id: id::BufferId,
- range: Range<BufferAddress>,
+ offset: BufferAddress,
+ size: Option<BufferAddress>,
op: BufferMapOperation,
) -> BufferAccessResult {
- api_log!("Buffer::map_async {buffer_id:?} range {range:?} op: {op:?}");
+ api_log!("Buffer::map_async {buffer_id:?} offset {offset:?} size {size:?} op: {op:?}");
// User callbacks must not be called while holding buffer_map_async_inner's locks, so we
// defer the error callback if it needs to be called immediately (typically when running
// into errors).
- if let Err((mut operation, err)) = self.buffer_map_async_inner::<A>(buffer_id, range, op) {
+ if let Err((mut operation, err)) =
+ self.buffer_map_async_inner::<A>(buffer_id, offset, size, op)
+ {
if let Some(callback) = operation.callback.take() {
callback.call(Err(err.clone()));
}
@@ -2360,7 +2363,8 @@ impl Global {
fn buffer_map_async_inner<A: HalApi>(
&self,
buffer_id: id::BufferId,
- range: Range<BufferAddress>,
+ offset: BufferAddress,
+ size: Option<BufferAddress>,
op: BufferMapOperation,
) -> Result<(), (BufferMapOperation, BufferAccessError)> {
profiling::scope!("Buffer::map_async");
@@ -2372,29 +2376,50 @@ impl Global {
HostMap::Write => (wgt::BufferUsages::MAP_WRITE, hal::BufferUses::MAP_WRITE),
};
- if range.start % wgt::MAP_ALIGNMENT != 0 || range.end % wgt::COPY_BUFFER_ALIGNMENT != 0 {
- return Err((op, BufferAccessError::UnalignedRange));
- }
-
let buffer = {
- let buffer = hub
- .buffers
- .get(buffer_id)
- .map_err(|_| BufferAccessError::Invalid);
+ let buffer = hub.buffers.get(buffer_id);
let buffer = match buffer {
Ok(b) => b,
- Err(e) => {
- return Err((op, e));
+ Err(_) => {
+ return Err((op, BufferAccessError::Invalid));
}
};
+ {
+ let snatch_guard = buffer.device.snatchable_lock.read();
+ if buffer.is_destroyed(&snatch_guard) {
+ return Err((op, BufferAccessError::Destroyed));
+ }
+ }
+
+ let range_size = if let Some(size) = size {
+ size
+ } else if offset > buffer.size {
+ 0
+ } else {
+ buffer.size - offset
+ };
+
+ if offset % wgt::MAP_ALIGNMENT != 0 {
+ return Err((op, BufferAccessError::UnalignedOffset { offset }));
+ }
+ if range_size % wgt::COPY_BUFFER_ALIGNMENT != 0 {
+ return Err((op, BufferAccessError::UnalignedRangeSize { range_size }));
+ }
+
+ let range = offset..(offset + range_size);
+
+ if range.start % wgt::MAP_ALIGNMENT != 0 || range.end % wgt::COPY_BUFFER_ALIGNMENT != 0
+ {
+ return Err((op, BufferAccessError::UnalignedRange));
+ }
let device = &buffer.device;
if !device.is_valid() {
return Err((op, DeviceError::Lost.into()));
}
- if let Err(e) = check_buffer_usage(buffer.usage, pub_usage) {
+ if let Err(e) = check_buffer_usage(buffer.info.id(), buffer.usage, pub_usage) {
return Err((op, e.into()));
}
@@ -2417,11 +2442,6 @@ impl Global {
));
}
- let snatch_guard = device.snatchable_lock.read();
- if buffer.is_destroyed(&snatch_guard) {
- return Err((op, BufferAccessError::Destroyed));
- }
-
{
let map_state = &mut *buffer.map_state.lock();
*map_state = match *map_state {
@@ -2442,6 +2462,8 @@ impl Global {
};
}
+ let snatch_guard = buffer.device.snatchable_lock.read();
+
{
let mut trackers = buffer.device.as_ref().trackers.lock();
trackers.buffers.set_single(&buffer, internal_use);
@@ -2557,3 +2579,8 @@ impl Global {
buffer.unmap()
}
}
+
+struct DevicePoll {
+ closures: UserClosures,
+ queue_empty: bool,
+}
diff --git a/third_party/rust/wgpu-core/src/device/life.rs b/third_party/rust/wgpu-core/src/device/life.rs
index 86c5d027c7..7b06a4a30b 100644
--- a/third_party/rust/wgpu-core/src/device/life.rs
+++ b/third_party/rust/wgpu-core/src/device/life.rs
@@ -6,17 +6,13 @@ use crate::{
DeviceError, DeviceLostClosure,
},
hal_api::HalApi,
- id::{
- self, BindGroupId, BindGroupLayoutId, BufferId, ComputePipelineId, Id, PipelineLayoutId,
- QuerySetId, RenderBundleId, RenderPipelineId, SamplerId, StagingBufferId, TextureId,
- TextureViewId,
- },
+ id,
pipeline::{ComputePipeline, RenderPipeline},
resource::{
self, Buffer, DestroyedBuffer, DestroyedTexture, QuerySet, Resource, Sampler,
StagingBuffer, Texture, TextureView,
},
- track::{ResourceTracker, Tracker},
+ track::{ResourceTracker, Tracker, TrackerIndex},
FastHashMap, SubmissionIndex,
};
use smallvec::SmallVec;
@@ -28,20 +24,20 @@ use thiserror::Error;
/// A struct that keeps lists of resources that are no longer needed by the user.
#[derive(Default)]
pub(crate) struct ResourceMaps<A: HalApi> {
- pub buffers: FastHashMap<BufferId, Arc<Buffer<A>>>,
- pub staging_buffers: FastHashMap<StagingBufferId, Arc<StagingBuffer<A>>>,
- pub textures: FastHashMap<TextureId, Arc<Texture<A>>>,
- pub texture_views: FastHashMap<TextureViewId, Arc<TextureView<A>>>,
- pub samplers: FastHashMap<SamplerId, Arc<Sampler<A>>>,
- pub bind_groups: FastHashMap<BindGroupId, Arc<BindGroup<A>>>,
- pub bind_group_layouts: FastHashMap<BindGroupLayoutId, Arc<BindGroupLayout<A>>>,
- pub render_pipelines: FastHashMap<RenderPipelineId, Arc<RenderPipeline<A>>>,
- pub compute_pipelines: FastHashMap<ComputePipelineId, Arc<ComputePipeline<A>>>,
- pub pipeline_layouts: FastHashMap<PipelineLayoutId, Arc<PipelineLayout<A>>>,
- pub render_bundles: FastHashMap<RenderBundleId, Arc<RenderBundle<A>>>,
- pub query_sets: FastHashMap<QuerySetId, Arc<QuerySet<A>>>,
- pub destroyed_buffers: FastHashMap<BufferId, Arc<DestroyedBuffer<A>>>,
- pub destroyed_textures: FastHashMap<TextureId, Arc<DestroyedTexture<A>>>,
+ pub buffers: FastHashMap<TrackerIndex, Arc<Buffer<A>>>,
+ pub staging_buffers: FastHashMap<TrackerIndex, Arc<StagingBuffer<A>>>,
+ pub textures: FastHashMap<TrackerIndex, Arc<Texture<A>>>,
+ pub texture_views: FastHashMap<TrackerIndex, Arc<TextureView<A>>>,
+ pub samplers: FastHashMap<TrackerIndex, Arc<Sampler<A>>>,
+ pub bind_groups: FastHashMap<TrackerIndex, Arc<BindGroup<A>>>,
+ pub bind_group_layouts: FastHashMap<TrackerIndex, Arc<BindGroupLayout<A>>>,
+ pub render_pipelines: FastHashMap<TrackerIndex, Arc<RenderPipeline<A>>>,
+ pub compute_pipelines: FastHashMap<TrackerIndex, Arc<ComputePipeline<A>>>,
+ pub pipeline_layouts: FastHashMap<TrackerIndex, Arc<PipelineLayout<A>>>,
+ pub render_bundles: FastHashMap<TrackerIndex, Arc<RenderBundle<A>>>,
+ pub query_sets: FastHashMap<TrackerIndex, Arc<QuerySet<A>>>,
+ pub destroyed_buffers: FastHashMap<TrackerIndex, Arc<DestroyedBuffer<A>>>,
+ pub destroyed_textures: FastHashMap<TrackerIndex, Arc<DestroyedTexture<A>>>,
}
impl<A: HalApi> ResourceMaps<A> {
@@ -276,25 +272,29 @@ impl<A: HalApi> LifetimeTracker<A> {
for res in temp_resources {
match res {
TempResource::Buffer(raw) => {
- last_resources.buffers.insert(raw.as_info().id(), raw);
+ last_resources
+ .buffers
+ .insert(raw.as_info().tracker_index(), raw);
}
TempResource::StagingBuffer(raw) => {
last_resources
.staging_buffers
- .insert(raw.as_info().id(), raw);
+ .insert(raw.as_info().tracker_index(), raw);
}
TempResource::DestroyedBuffer(destroyed) => {
last_resources
.destroyed_buffers
- .insert(destroyed.id, destroyed);
+ .insert(destroyed.tracker_index, destroyed);
}
TempResource::Texture(raw) => {
- last_resources.textures.insert(raw.as_info().id(), raw);
+ last_resources
+ .textures
+ .insert(raw.as_info().tracker_index(), raw);
}
TempResource::DestroyedTexture(destroyed) => {
last_resources
.destroyed_textures
- .insert(destroyed.id, destroyed);
+ .insert(destroyed.tracker_index, destroyed);
}
}
}
@@ -310,12 +310,14 @@ impl<A: HalApi> LifetimeTracker<A> {
pub fn post_submit(&mut self) {
for v in self.future_suspected_buffers.drain(..).take(1) {
- self.suspected_resources.buffers.insert(v.as_info().id(), v);
+ self.suspected_resources
+ .buffers
+ .insert(v.as_info().tracker_index(), v);
}
for v in self.future_suspected_textures.drain(..).take(1) {
self.suspected_resources
.textures
- .insert(v.as_info().id(), v);
+ .insert(v.as_info().tracker_index(), v);
}
}
@@ -386,19 +388,27 @@ impl<A: HalApi> LifetimeTracker<A> {
if let Some(resources) = resources {
match temp_resource {
TempResource::Buffer(raw) => {
- resources.buffers.insert(raw.as_info().id(), raw);
+ resources.buffers.insert(raw.as_info().tracker_index(), raw);
}
TempResource::StagingBuffer(raw) => {
- resources.staging_buffers.insert(raw.as_info().id(), raw);
+ resources
+ .staging_buffers
+ .insert(raw.as_info().tracker_index(), raw);
}
TempResource::DestroyedBuffer(destroyed) => {
- resources.destroyed_buffers.insert(destroyed.id, destroyed);
+ resources
+ .destroyed_buffers
+ .insert(destroyed.tracker_index, destroyed);
}
TempResource::Texture(raw) => {
- resources.textures.insert(raw.as_info().id(), raw);
+ resources
+ .textures
+ .insert(raw.as_info().tracker_index(), raw);
}
TempResource::DestroyedTexture(destroyed) => {
- resources.destroyed_textures.insert(destroyed.id, destroyed);
+ resources
+ .destroyed_textures
+ .insert(destroyed.tracker_index, destroyed);
}
}
}
@@ -420,27 +430,27 @@ impl<A: HalApi> LifetimeTracker<A> {
impl<A: HalApi> LifetimeTracker<A> {
fn triage_resources<R>(
- resources_map: &mut FastHashMap<Id<R::Marker>, Arc<R>>,
+ resources_map: &mut FastHashMap<TrackerIndex, Arc<R>>,
active: &mut [ActiveSubmission<A>],
- trackers: &mut impl ResourceTracker<R>,
- get_resource_map: impl Fn(&mut ResourceMaps<A>) -> &mut FastHashMap<Id<R::Marker>, Arc<R>>,
+ trackers: &mut impl ResourceTracker,
+ get_resource_map: impl Fn(&mut ResourceMaps<A>) -> &mut FastHashMap<TrackerIndex, Arc<R>>,
) -> Vec<Arc<R>>
where
R: Resource,
{
let mut removed_resources = Vec::new();
- resources_map.retain(|&id, resource| {
+ resources_map.retain(|&index, resource| {
let submit_index = resource.as_info().submission_index();
let non_referenced_resources = active
.iter_mut()
.find(|a| a.index == submit_index)
.map(|a| &mut a.last_resources);
- let is_removed = trackers.remove_abandoned(id);
+ let is_removed = trackers.remove_abandoned(index);
if is_removed {
removed_resources.push(resource.clone());
if let Some(resources) = non_referenced_resources {
- get_resource_map(resources).insert(id, resource.clone());
+ get_resource_map(resources).insert(index, resource.clone());
}
}
!is_removed
@@ -459,27 +469,29 @@ impl<A: HalApi> LifetimeTracker<A> {
);
removed_resources.drain(..).for_each(|bundle| {
for v in bundle.used.buffers.write().drain_resources() {
- self.suspected_resources.buffers.insert(v.as_info().id(), v);
+ self.suspected_resources
+ .buffers
+ .insert(v.as_info().tracker_index(), v);
}
for v in bundle.used.textures.write().drain_resources() {
self.suspected_resources
.textures
- .insert(v.as_info().id(), v);
+ .insert(v.as_info().tracker_index(), v);
}
for v in bundle.used.bind_groups.write().drain_resources() {
self.suspected_resources
.bind_groups
- .insert(v.as_info().id(), v);
+ .insert(v.as_info().tracker_index(), v);
}
for v in bundle.used.render_pipelines.write().drain_resources() {
self.suspected_resources
.render_pipelines
- .insert(v.as_info().id(), v);
+ .insert(v.as_info().tracker_index(), v);
}
for v in bundle.used.query_sets.write().drain_resources() {
self.suspected_resources
.query_sets
- .insert(v.as_info().id(), v);
+ .insert(v.as_info().tracker_index(), v);
}
});
self
@@ -496,27 +508,30 @@ impl<A: HalApi> LifetimeTracker<A> {
);
removed_resource.drain(..).for_each(|bind_group| {
for v in bind_group.used.buffers.drain_resources() {
- self.suspected_resources.buffers.insert(v.as_info().id(), v);
+ self.suspected_resources
+ .buffers
+ .insert(v.as_info().tracker_index(), v);
}
for v in bind_group.used.textures.drain_resources() {
self.suspected_resources
.textures
- .insert(v.as_info().id(), v);
+ .insert(v.as_info().tracker_index(), v);
}
for v in bind_group.used.views.drain_resources() {
self.suspected_resources
.texture_views
- .insert(v.as_info().id(), v);
+ .insert(v.as_info().tracker_index(), v);
}
for v in bind_group.used.samplers.drain_resources() {
self.suspected_resources
.samplers
- .insert(v.as_info().id(), v);
+ .insert(v.as_info().tracker_index(), v);
}
- self.suspected_resources
- .bind_group_layouts
- .insert(bind_group.layout.as_info().id(), bind_group.layout.clone());
+ self.suspected_resources.bind_group_layouts.insert(
+ bind_group.layout.as_info().tracker_index(),
+ bind_group.layout.clone(),
+ );
});
self
}
@@ -605,7 +620,7 @@ impl<A: HalApi> LifetimeTracker<A> {
);
removed_resources.drain(..).for_each(|compute_pipeline| {
self.suspected_resources.pipeline_layouts.insert(
- compute_pipeline.layout.as_info().id(),
+ compute_pipeline.layout.as_info().tracker_index(),
compute_pipeline.layout.clone(),
);
});
@@ -623,7 +638,7 @@ impl<A: HalApi> LifetimeTracker<A> {
);
removed_resources.drain(..).for_each(|render_pipeline| {
self.suspected_resources.pipeline_layouts.insert(
- render_pipeline.layout.as_info().id(),
+ render_pipeline.layout.as_info().tracker_index(),
render_pipeline.layout.clone(),
);
});
@@ -642,7 +657,7 @@ impl<A: HalApi> LifetimeTracker<A> {
for bgl in &pipeline_layout.bind_group_layouts {
self.suspected_resources
.bind_group_layouts
- .insert(bgl.as_info().id(), bgl.clone());
+ .insert(bgl.as_info().tracker_index(), bgl.clone());
}
});
self
@@ -773,14 +788,14 @@ impl<A: HalApi> LifetimeTracker<A> {
Vec::with_capacity(self.ready_to_map.len());
for buffer in self.ready_to_map.drain(..) {
- let buffer_id = buffer.info.id();
+ let tracker_index = buffer.info.tracker_index();
let is_removed = {
let mut trackers = trackers.lock();
- trackers.buffers.remove_abandoned(buffer_id)
+ trackers.buffers.remove_abandoned(tracker_index)
};
if is_removed {
*buffer.map_state.lock() = resource::BufferMapState::Idle;
- log::trace!("Buffer ready to map {:?} is not tracked anymore", buffer_id);
+ log::trace!("Buffer ready to map {tracker_index:?} is not tracked anymore");
} else {
let mapping = match std::mem::replace(
&mut *buffer.map_state.lock(),
@@ -798,7 +813,7 @@ impl<A: HalApi> LifetimeTracker<A> {
_ => panic!("No pending mapping."),
};
let status = if mapping.range.start != mapping.range.end {
- log::debug!("Buffer {:?} map state -> Active", buffer_id);
+ log::debug!("Buffer {tracker_index:?} map state -> Active");
let host = mapping.op.host;
let size = mapping.range.end - mapping.range.start;
match super::map_buffer(raw, &buffer, mapping.range.start, size, host) {
diff --git a/third_party/rust/wgpu-core/src/device/queue.rs b/third_party/rust/wgpu-core/src/device/queue.rs
index 08c5b767b6..6ebb9eb09b 100644
--- a/third_party/rust/wgpu-core/src/device/queue.rs
+++ b/third_party/rust/wgpu-core/src/device/queue.rs
@@ -12,7 +12,7 @@ use crate::{
global::Global,
hal_api::HalApi,
hal_label,
- id::{self, QueueId},
+ id::{self, DeviceId, QueueId},
init_tracker::{has_copy_partial_init_tracker_coverage, TextureInitRange},
resource::{
Buffer, BufferAccessError, BufferMapState, DestroyedBuffer, DestroyedTexture, Resource,
@@ -188,10 +188,17 @@ impl<A: HalApi> EncoderInFlight<A> {
#[derive(Debug)]
pub(crate) struct PendingWrites<A: HalApi> {
pub command_encoder: A::CommandEncoder,
- pub is_active: bool,
+
+ /// True if `command_encoder` is in the "recording" state, as
+ /// described in the docs for the [`wgpu_hal::CommandEncoder`]
+ /// trait.
+ pub is_recording: bool,
+
pub temp_resources: Vec<TempResource<A>>,
pub dst_buffers: FastHashMap<id::BufferId, Arc<Buffer<A>>>,
pub dst_textures: FastHashMap<id::TextureId, Arc<Texture<A>>>,
+
+ /// All command buffers allocated from `command_encoder`.
pub executing_command_buffers: Vec<A::CommandBuffer>,
}
@@ -199,7 +206,7 @@ impl<A: HalApi> PendingWrites<A> {
pub fn new(command_encoder: A::CommandEncoder) -> Self {
Self {
command_encoder,
- is_active: false,
+ is_recording: false,
temp_resources: Vec::new(),
dst_buffers: FastHashMap::default(),
dst_textures: FastHashMap::default(),
@@ -209,7 +216,7 @@ impl<A: HalApi> PendingWrites<A> {
pub fn dispose(mut self, device: &A::Device) {
unsafe {
- if self.is_active {
+ if self.is_recording {
self.command_encoder.discard_encoding();
}
self.command_encoder
@@ -232,9 +239,9 @@ impl<A: HalApi> PendingWrites<A> {
fn pre_submit(&mut self) -> Result<Option<&A::CommandBuffer>, DeviceError> {
self.dst_buffers.clear();
self.dst_textures.clear();
- if self.is_active {
+ if self.is_recording {
let cmd_buf = unsafe { self.command_encoder.end_encoding()? };
- self.is_active = false;
+ self.is_recording = false;
self.executing_command_buffers.push(cmd_buf);
return Ok(self.executing_command_buffers.last());
@@ -262,23 +269,23 @@ impl<A: HalApi> PendingWrites<A> {
}
pub fn activate(&mut self) -> &mut A::CommandEncoder {
- if !self.is_active {
+ if !self.is_recording {
unsafe {
self.command_encoder
.begin_encoding(Some("(wgpu internal) PendingWrites"))
.unwrap();
}
- self.is_active = true;
+ self.is_recording = true;
}
&mut self.command_encoder
}
pub fn deactivate(&mut self) {
- if self.is_active {
+ if self.is_recording {
unsafe {
self.command_encoder.discard_encoding();
}
- self.is_active = false;
+ self.is_recording = false;
}
}
}
@@ -303,7 +310,10 @@ fn prepare_staging_buffer<A: HalApi>(
raw: Mutex::new(Some(buffer)),
device: device.clone(),
size,
- info: ResourceInfo::new("<StagingBuffer>"),
+ info: ResourceInfo::new(
+ "<StagingBuffer>",
+ Some(device.tracker_indices.staging_buffers.clone()),
+ ),
is_coherent: mapping.is_coherent,
};
@@ -332,6 +342,15 @@ pub struct InvalidQueue;
#[derive(Clone, Debug, Error)]
#[non_exhaustive]
pub enum QueueWriteError {
+ #[error(
+ "Device of queue ({:?}) does not match device of write recipient ({:?})",
+ queue_device_id,
+ target_device_id
+ )]
+ DeviceMismatch {
+ queue_device_id: DeviceId,
+ target_device_id: DeviceId,
+ },
#[error(transparent)]
Queue(#[from] DeviceError),
#[error(transparent)]
@@ -376,6 +395,14 @@ impl Global {
let hub = A::hub(self);
+ let buffer_device_id = hub
+ .buffers
+ .get(buffer_id)
+ .map_err(|_| TransferError::InvalidBuffer(buffer_id))?
+ .device
+ .as_info()
+ .id();
+
let queue = hub
.queues
.get(queue_id)
@@ -383,6 +410,16 @@ impl Global {
let device = queue.device.as_ref().unwrap();
+ {
+ let queue_device_id = device.as_info().id();
+ if buffer_device_id != queue_device_id {
+ return Err(QueueWriteError::DeviceMismatch {
+ queue_device_id,
+ target_device_id: buffer_device_id,
+ });
+ }
+ }
+
let data_size = data.len() as wgt::BufferAddress;
#[cfg(feature = "trace")]
@@ -1143,7 +1180,7 @@ impl Global {
for &cmb_id in command_buffer_ids {
// we reset the used surface textures every time we use
// it, so make sure to set_size on it.
- used_surface_textures.set_size(hub.textures.read().len());
+ used_surface_textures.set_size(device.tracker_indices.textures.size());
#[allow(unused_mut)]
let mut cmdbuf = match command_buffer_guard.replace_with_error(cmb_id) {
@@ -1188,11 +1225,13 @@ impl Global {
// update submission IDs
for buffer in cmd_buf_trackers.buffers.used_resources() {
- let id = buffer.info.id();
+ let tracker_index = buffer.info.tracker_index();
let raw_buf = match buffer.raw.get(&snatch_guard) {
Some(raw) => raw,
None => {
- return Err(QueueSubmitError::DestroyedBuffer(id));
+ return Err(QueueSubmitError::DestroyedBuffer(
+ buffer.info.id(),
+ ));
}
};
buffer.info.use_at(submit_index);
@@ -1207,28 +1246,28 @@ impl Global {
.as_mut()
.unwrap()
.buffers
- .insert(id, buffer.clone());
+ .insert(tracker_index, buffer.clone());
} else {
match *buffer.map_state.lock() {
BufferMapState::Idle => (),
- _ => return Err(QueueSubmitError::BufferStillMapped(id)),
+ _ => {
+ return Err(QueueSubmitError::BufferStillMapped(
+ buffer.info.id(),
+ ))
+ }
}
}
}
for texture in cmd_buf_trackers.textures.used_resources() {
- let id = texture.info.id();
+ let tracker_index = texture.info.tracker_index();
let should_extend = match texture.inner.get(&snatch_guard) {
None => {
- return Err(QueueSubmitError::DestroyedTexture(id));
+ return Err(QueueSubmitError::DestroyedTexture(
+ texture.info.id(),
+ ));
}
Some(TextureInner::Native { .. }) => false,
- Some(TextureInner::Surface {
- ref has_work,
- ref raw,
- ..
- }) => {
- has_work.store(true, Ordering::Relaxed);
-
+ Some(TextureInner::Surface { ref raw, .. }) => {
if raw.is_some() {
submit_surface_textures_owned.push(texture.clone());
}
@@ -1242,7 +1281,7 @@ impl Global {
.as_mut()
.unwrap()
.textures
- .insert(id, texture.clone());
+ .insert(tracker_index, texture.clone());
}
if should_extend {
unsafe {
@@ -1255,11 +1294,10 @@ impl Global {
for texture_view in cmd_buf_trackers.views.used_resources() {
texture_view.info.use_at(submit_index);
if texture_view.is_unique() {
- temp_suspected
- .as_mut()
- .unwrap()
- .texture_views
- .insert(texture_view.as_info().id(), texture_view.clone());
+ temp_suspected.as_mut().unwrap().texture_views.insert(
+ texture_view.as_info().tracker_index(),
+ texture_view.clone(),
+ );
}
}
{
@@ -1279,7 +1317,7 @@ impl Global {
.as_mut()
.unwrap()
.bind_groups
- .insert(bg.as_info().id(), bg.clone());
+ .insert(bg.as_info().tracker_index(), bg.clone());
}
}
}
@@ -1290,7 +1328,7 @@ impl Global {
compute_pipeline.info.use_at(submit_index);
if compute_pipeline.is_unique() {
temp_suspected.as_mut().unwrap().compute_pipelines.insert(
- compute_pipeline.as_info().id(),
+ compute_pipeline.as_info().tracker_index(),
compute_pipeline.clone(),
);
}
@@ -1301,7 +1339,7 @@ impl Global {
render_pipeline.info.use_at(submit_index);
if render_pipeline.is_unique() {
temp_suspected.as_mut().unwrap().render_pipelines.insert(
- render_pipeline.as_info().id(),
+ render_pipeline.as_info().tracker_index(),
render_pipeline.clone(),
);
}
@@ -1309,11 +1347,10 @@ impl Global {
for query_set in cmd_buf_trackers.query_sets.used_resources() {
query_set.info.use_at(submit_index);
if query_set.is_unique() {
- temp_suspected
- .as_mut()
- .unwrap()
- .query_sets
- .insert(query_set.as_info().id(), query_set.clone());
+ temp_suspected.as_mut().unwrap().query_sets.insert(
+ query_set.as_info().tracker_index(),
+ query_set.clone(),
+ );
}
}
for bundle in cmd_buf_trackers.bundles.used_resources() {
@@ -1334,7 +1371,7 @@ impl Global {
.as_mut()
.unwrap()
.render_bundles
- .insert(bundle.as_info().id(), bundle.clone());
+ .insert(bundle.as_info().tracker_index(), bundle.clone());
}
}
}
@@ -1423,13 +1460,7 @@ impl Global {
return Err(QueueSubmitError::DestroyedTexture(id));
}
Some(TextureInner::Native { .. }) => {}
- Some(TextureInner::Surface {
- ref has_work,
- ref raw,
- ..
- }) => {
- has_work.store(true, Ordering::Relaxed);
-
+ Some(TextureInner::Surface { ref raw, .. }) => {
if raw.is_some() {
submit_surface_textures_owned.push(texture.clone());
}
diff --git a/third_party/rust/wgpu-core/src/device/resource.rs b/third_party/rust/wgpu-core/src/device/resource.rs
index b2c85a056a..28ba0eafb1 100644
--- a/third_party/rust/wgpu-core/src/device/resource.rs
+++ b/third_party/rust/wgpu-core/src/device/resource.rs
@@ -13,7 +13,6 @@ use crate::{
hal_api::HalApi,
hal_label,
hub::Hub,
- id::QueueId,
init_tracker::{
BufferInitTracker, BufferInitTrackerAction, MemoryInitKind, TextureInitRange,
TextureInitTracker, TextureInitTrackerAction,
@@ -29,13 +28,16 @@ use crate::{
resource_log,
snatch::{SnatchGuard, SnatchLock, Snatchable},
storage::Storage,
- track::{BindGroupStates, TextureSelector, Tracker},
- validation::{self, check_buffer_usage, check_texture_usage},
+ track::{BindGroupStates, TextureSelector, Tracker, TrackerIndexAllocators},
+ validation::{
+ self, check_buffer_usage, check_texture_usage, validate_color_attachment_bytes_per_sample,
+ },
FastHashMap, LabelHelpers as _, SubmissionIndex,
};
use arrayvec::ArrayVec;
use hal::{CommandEncoder as _, Device as _};
+use once_cell::sync::OnceCell;
use parking_lot::{Mutex, MutexGuard, RwLock};
use smallvec::SmallVec;
@@ -54,7 +56,7 @@ use std::{
use super::{
life::{self, ResourceMaps},
- queue::{self},
+ queue::{self, Queue},
DeviceDescriptor, DeviceError, ImplicitPipelineContext, UserClosures, ENTRYPOINT_FAILURE_ERROR,
IMPLICIT_BIND_GROUP_LAYOUT_ERROR_LABEL, ZERO_BUFFER_SIZE,
};
@@ -87,8 +89,8 @@ use super::{
pub struct Device<A: HalApi> {
raw: Option<A::Device>,
pub(crate) adapter: Arc<Adapter<A>>,
- pub(crate) queue_id: RwLock<Option<QueueId>>,
- queue_to_drop: RwLock<Option<A::Queue>>,
+ pub(crate) queue: OnceCell<Weak<Queue<A>>>,
+ queue_to_drop: OnceCell<A::Queue>,
pub(crate) zero_buffer: Option<A::Buffer>,
pub(crate) info: ResourceInfo<Device<A>>,
@@ -116,6 +118,7 @@ pub struct Device<A: HalApi> {
/// Has to be locked temporarily only (locked last)
/// and never before pending_writes
pub(crate) trackers: Mutex<Tracker<A>>,
+ pub(crate) tracker_indices: TrackerIndexAllocators,
// Life tracker should be locked right after the device and before anything else.
life_tracker: Mutex<LifetimeTracker<A>>,
/// Temporary storage for resource management functions. Cleared at the end
@@ -160,7 +163,7 @@ impl<A: HalApi> Drop for Device<A> {
unsafe {
raw.destroy_buffer(self.zero_buffer.take().unwrap());
raw.destroy_fence(self.fence.write().take().unwrap());
- let queue = self.queue_to_drop.write().take().unwrap();
+ let queue = self.queue_to_drop.take().unwrap();
raw.exit(queue);
}
}
@@ -258,16 +261,17 @@ impl<A: HalApi> Device<A> {
Ok(Self {
raw: Some(raw_device),
adapter: adapter.clone(),
- queue_id: RwLock::new(None),
- queue_to_drop: RwLock::new(None),
+ queue: OnceCell::new(),
+ queue_to_drop: OnceCell::new(),
zero_buffer: Some(zero_buffer),
- info: ResourceInfo::new("<device>"),
+ info: ResourceInfo::new("<device>", None),
command_allocator: Mutex::new(Some(com_alloc)),
active_submission_index: AtomicU64::new(0),
fence: RwLock::new(Some(fence)),
snatchable_lock: unsafe { SnatchLock::new() },
valid: AtomicBool::new(true),
trackers: Mutex::new(Tracker::new()),
+ tracker_indices: TrackerIndexAllocators::new(),
life_tracker: Mutex::new(life::LifetimeTracker::new()),
temp_suspected: Mutex::new(Some(life::ResourceMaps::new())),
bgl_pool: ResourcePool::new(),
@@ -300,7 +304,7 @@ impl<A: HalApi> Device<A> {
}
pub(crate) fn release_queue(&self, queue: A::Queue) {
- self.queue_to_drop.write().replace(queue);
+ assert!(self.queue_to_drop.set(queue).is_ok());
}
pub(crate) fn lock_life<'a>(&'a self) -> MutexGuard<'a, LifetimeTracker<A>> {
@@ -339,7 +343,8 @@ impl<A: HalApi> Device<A> {
let Some(bind_group) = bind_group.upgrade() else {
continue;
};
- let Some(raw_bind_group) = bind_group.raw.snatch(self.snatchable_lock.write()) else {
+ let Some(raw_bind_group) = bind_group.raw.snatch(self.snatchable_lock.write())
+ else {
continue;
};
@@ -357,6 +362,14 @@ impl<A: HalApi> Device<A> {
}
}
+ pub fn get_queue(&self) -> Option<Arc<Queue<A>>> {
+ self.queue.get().as_ref()?.upgrade()
+ }
+
+ pub fn set_queue(&self, queue: Arc<Queue<A>>) {
+ assert!(self.queue.set(Arc::downgrade(&queue)).is_ok());
+ }
+
/// Check this device for completed commands.
///
/// The `maintain` argument tells how the maintence function should behave, either
@@ -483,56 +496,56 @@ impl<A: HalApi> Device<A> {
if resource.is_unique() {
temp_suspected
.buffers
- .insert(resource.as_info().id(), resource.clone());
+ .insert(resource.as_info().tracker_index(), resource.clone());
}
}
for resource in trackers.textures.used_resources() {
if resource.is_unique() {
temp_suspected
.textures
- .insert(resource.as_info().id(), resource.clone());
+ .insert(resource.as_info().tracker_index(), resource.clone());
}
}
for resource in trackers.views.used_resources() {
if resource.is_unique() {
temp_suspected
.texture_views
- .insert(resource.as_info().id(), resource.clone());
+ .insert(resource.as_info().tracker_index(), resource.clone());
}
}
for resource in trackers.bind_groups.used_resources() {
if resource.is_unique() {
temp_suspected
.bind_groups
- .insert(resource.as_info().id(), resource.clone());
+ .insert(resource.as_info().tracker_index(), resource.clone());
}
}
for resource in trackers.samplers.used_resources() {
if resource.is_unique() {
temp_suspected
.samplers
- .insert(resource.as_info().id(), resource.clone());
+ .insert(resource.as_info().tracker_index(), resource.clone());
}
}
for resource in trackers.compute_pipelines.used_resources() {
if resource.is_unique() {
temp_suspected
.compute_pipelines
- .insert(resource.as_info().id(), resource.clone());
+ .insert(resource.as_info().tracker_index(), resource.clone());
}
}
for resource in trackers.render_pipelines.used_resources() {
if resource.is_unique() {
temp_suspected
.render_pipelines
- .insert(resource.as_info().id(), resource.clone());
+ .insert(resource.as_info().tracker_index(), resource.clone());
}
}
for resource in trackers.query_sets.used_resources() {
if resource.is_unique() {
temp_suspected
.query_sets
- .insert(resource.as_info().id(), resource.clone());
+ .insert(resource.as_info().tracker_index(), resource.clone());
}
}
}
@@ -633,7 +646,10 @@ impl<A: HalApi> Device<A> {
initialization_status: RwLock::new(BufferInitTracker::new(aligned_size)),
sync_mapped_writes: Mutex::new(None),
map_state: Mutex::new(resource::BufferMapState::Idle),
- info: ResourceInfo::new(desc.label.borrow_or_default()),
+ info: ResourceInfo::new(
+ desc.label.borrow_or_default(),
+ Some(self.tracker_indices.buffers.clone()),
+ ),
bind_groups: Mutex::new(Vec::new()),
})
}
@@ -662,7 +678,10 @@ impl<A: HalApi> Device<A> {
mips: 0..desc.mip_level_count,
layers: 0..desc.array_layer_count(),
},
- info: ResourceInfo::new(desc.label.borrow_or_default()),
+ info: ResourceInfo::new(
+ desc.label.borrow_or_default(),
+ Some(self.tracker_indices.textures.clone()),
+ ),
clear_mode: RwLock::new(clear_mode),
views: Mutex::new(Vec::new()),
bind_groups: Mutex::new(Vec::new()),
@@ -684,7 +703,10 @@ impl<A: HalApi> Device<A> {
initialization_status: RwLock::new(BufferInitTracker::new(0)),
sync_mapped_writes: Mutex::new(None),
map_state: Mutex::new(resource::BufferMapState::Idle),
- info: ResourceInfo::new(desc.label.borrow_or_default()),
+ info: ResourceInfo::new(
+ desc.label.borrow_or_default(),
+ Some(self.tracker_indices.buffers.clone()),
+ ),
bind_groups: Mutex::new(Vec::new()),
}
}
@@ -1262,7 +1284,10 @@ impl<A: HalApi> Device<A> {
render_extent,
samples: texture.desc.sample_count,
selector,
- info: ResourceInfo::new(desc.label.borrow_or_default()),
+ info: ResourceInfo::new(
+ desc.label.borrow_or_default(),
+ Some(self.tracker_indices.texture_views.clone()),
+ ),
})
}
@@ -1366,7 +1391,10 @@ impl<A: HalApi> Device<A> {
Ok(Sampler {
raw: Some(raw),
device: self.clone(),
- info: ResourceInfo::new(desc.label.borrow_or_default()),
+ info: ResourceInfo::new(
+ desc.label.borrow_or_default(),
+ Some(self.tracker_indices.samplers.clone()),
+ ),
comparison: desc.compare.is_some(),
filtering: desc.min_filter == wgt::FilterMode::Linear
|| desc.mag_filter == wgt::FilterMode::Linear,
@@ -1484,6 +1512,10 @@ impl<A: HalApi> Device<A> {
.contains(wgt::Features::SHADER_EARLY_DEPTH_TEST),
);
caps.set(
+ Caps::SHADER_INT64,
+ self.features.contains(wgt::Features::SHADER_INT64),
+ );
+ caps.set(
Caps::MULTISAMPLED_SHADING,
self.downlevel
.flags
@@ -1559,7 +1591,7 @@ impl<A: HalApi> Device<A> {
raw: Some(raw),
device: self.clone(),
interface: Some(interface),
- info: ResourceInfo::new(desc.label.borrow_or_default()),
+ info: ResourceInfo::new(desc.label.borrow_or_default(), None),
label: desc.label.borrow_or_default().to_string(),
})
}
@@ -1600,7 +1632,7 @@ impl<A: HalApi> Device<A> {
raw: Some(raw),
device: self.clone(),
interface: None,
- info: ResourceInfo::new(desc.label.borrow_or_default()),
+ info: ResourceInfo::new(desc.label.borrow_or_default(), None),
label: desc.label.borrow_or_default().to_string(),
})
}
@@ -1704,10 +1736,23 @@ impl<A: HalApi> Device<A> {
BindGroupLayoutEntryError::SampleTypeFloatFilterableBindingMultisampled,
});
}
- Bt::Texture { .. } => (
- Some(wgt::Features::TEXTURE_BINDING_ARRAY),
- WritableStorage::No,
- ),
+ Bt::Texture {
+ multisampled,
+ view_dimension,
+ ..
+ } => {
+ if multisampled && view_dimension != TextureViewDimension::D2 {
+ return Err(binding_model::CreateBindGroupLayoutError::Entry {
+ binding: entry.binding,
+ error: BindGroupLayoutEntryError::Non2DMultisampled(view_dimension),
+ });
+ }
+
+ (
+ Some(wgt::Features::TEXTURE_BINDING_ARRAY),
+ WritableStorage::No,
+ )
+ }
Bt::StorageTexture {
access,
view_dimension,
@@ -1840,7 +1885,10 @@ impl<A: HalApi> Device<A> {
entries: entry_map,
origin,
binding_count_validator: count_validator,
- info: ResourceInfo::new(label.unwrap_or("<BindGroupLayout>")),
+ info: ResourceInfo::new(
+ label.unwrap_or("<BindGroupLayout>"),
+ Some(self.tracker_indices.bind_group_layouts.clone()),
+ ),
label: label.unwrap_or_default().to_string(),
})
}
@@ -1905,7 +1953,7 @@ impl<A: HalApi> Device<A> {
.add_single(storage, bb.buffer_id, internal_use)
.ok_or(Error::InvalidBuffer(bb.buffer_id))?;
- check_buffer_usage(buffer.usage, pub_usage)?;
+ check_buffer_usage(bb.buffer_id, buffer.usage, pub_usage)?;
let raw_buffer = buffer
.raw
.get(snatch_guard)
@@ -2273,7 +2321,10 @@ impl<A: HalApi> Device<A> {
raw: Snatchable::new(raw),
device: self.clone(),
layout: layout.clone(),
- info: ResourceInfo::new(desc.label.borrow_or_default()),
+ info: ResourceInfo::new(
+ desc.label.borrow_or_default(),
+ Some(self.tracker_indices.bind_groups.clone()),
+ ),
used,
used_buffer_ranges,
used_texture_ranges,
@@ -2555,7 +2606,10 @@ impl<A: HalApi> Device<A> {
Ok(binding_model::PipelineLayout {
raw: Some(raw),
device: self.clone(),
- info: ResourceInfo::new(desc.label.borrow_or_default()),
+ info: ResourceInfo::new(
+ desc.label.borrow_or_default(),
+ Some(self.tracker_indices.pipeline_layouts.clone()),
+ ),
bind_group_layouts,
push_constant_ranges: desc.push_constant_ranges.iter().cloned().collect(),
})
@@ -2656,14 +2710,21 @@ impl<A: HalApi> Device<A> {
let mut shader_binding_sizes = FastHashMap::default();
let io = validation::StageIo::default();
+ let final_entry_point_name;
+
{
let stage = wgt::ShaderStages::COMPUTE;
+ final_entry_point_name = shader_module.finalize_entry_point_name(
+ stage,
+ desc.stage.entry_point.as_ref().map(|ep| ep.as_ref()),
+ )?;
+
if let Some(ref interface) = shader_module.interface {
let _ = interface.check_stage(
&mut binding_layout_source,
&mut shader_binding_sizes,
- &desc.stage.entry_point,
+ &final_entry_point_name,
stage,
io,
None,
@@ -2691,7 +2752,7 @@ impl<A: HalApi> Device<A> {
label: desc.label.to_hal(self.instance_flags),
layout: pipeline_layout.raw(),
stage: hal::ProgrammableStage {
- entry_point: desc.stage.entry_point.as_ref(),
+ entry_point: final_entry_point_name.as_ref(),
module: shader_module.raw(),
},
};
@@ -2720,7 +2781,10 @@ impl<A: HalApi> Device<A> {
device: self.clone(),
_shader_module: shader_module,
late_sized_buffer_groups,
- info: ResourceInfo::new(desc.label.borrow_or_default()),
+ info: ResourceInfo::new(
+ desc.label.borrow_or_default(),
+ Some(self.tracker_indices.compute_pipelines.clone()),
+ ),
};
Ok(pipeline)
}
@@ -2749,11 +2813,12 @@ impl<A: HalApi> Device<A> {
let mut shader_binding_sizes = FastHashMap::default();
let num_attachments = desc.fragment.as_ref().map(|f| f.targets.len()).unwrap_or(0);
- if num_attachments > hal::MAX_COLOR_ATTACHMENTS {
+ let max_attachments = self.limits.max_color_attachments as usize;
+ if num_attachments > max_attachments {
return Err(pipeline::CreateRenderPipelineError::ColorAttachment(
command::ColorAttachmentError::TooMany {
given: num_attachments,
- limit: hal::MAX_COLOR_ATTACHMENTS,
+ limit: max_attachments,
},
));
}
@@ -2959,6 +3024,7 @@ impl<A: HalApi> Device<A> {
}
}
}
+
break None;
};
if let Some(e) = error {
@@ -2967,6 +3033,16 @@ impl<A: HalApi> Device<A> {
}
}
+ let limit = self.limits.max_color_attachment_bytes_per_sample;
+ let formats = color_targets
+ .iter()
+ .map(|cs| cs.as_ref().map(|cs| cs.format));
+ if let Err(total) = validate_color_attachment_bytes_per_sample(formats, limit) {
+ return Err(pipeline::CreateRenderPipelineError::ColorAttachment(
+ command::ColorAttachmentError::TooManyBytesPerSample { total, limit },
+ ));
+ }
+
if let Some(ds) = depth_stencil_state {
let error = loop {
let format_features = self.describe_format_features(adapter, ds.format)?;
@@ -3051,6 +3127,7 @@ impl<A: HalApi> Device<A> {
};
let vertex_shader_module;
+ let vertex_entry_point_name;
let vertex_stage = {
let stage_desc = &desc.vertex.stage;
let stage = wgt::ShaderStages::VERTEX;
@@ -3065,27 +3142,37 @@ impl<A: HalApi> Device<A> {
return Err(DeviceError::WrongDevice.into());
}
+ let stage_err = |error| pipeline::CreateRenderPipelineError::Stage { stage, error };
+
+ vertex_entry_point_name = vertex_shader_module
+ .finalize_entry_point_name(
+ stage,
+ stage_desc.entry_point.as_ref().map(|ep| ep.as_ref()),
+ )
+ .map_err(stage_err)?;
+
if let Some(ref interface) = vertex_shader_module.interface {
io = interface
.check_stage(
&mut binding_layout_source,
&mut shader_binding_sizes,
- &stage_desc.entry_point,
+ &vertex_entry_point_name,
stage,
io,
desc.depth_stencil.as_ref().map(|d| d.depth_compare),
)
- .map_err(|error| pipeline::CreateRenderPipelineError::Stage { stage, error })?;
+ .map_err(stage_err)?;
validated_stages |= stage;
}
hal::ProgrammableStage {
module: vertex_shader_module.raw(),
- entry_point: stage_desc.entry_point.as_ref(),
+ entry_point: &vertex_entry_point_name,
}
};
let mut fragment_shader_module = None;
+ let fragment_entry_point_name;
let fragment_stage = match desc.fragment {
Some(ref fragment_state) => {
let stage = wgt::ShaderStages::FRAGMENT;
@@ -3099,28 +3186,38 @@ impl<A: HalApi> Device<A> {
})?,
);
+ let stage_err = |error| pipeline::CreateRenderPipelineError::Stage { stage, error };
+
+ fragment_entry_point_name = shader_module
+ .finalize_entry_point_name(
+ stage,
+ fragment_state
+ .stage
+ .entry_point
+ .as_ref()
+ .map(|ep| ep.as_ref()),
+ )
+ .map_err(stage_err)?;
+
if validated_stages == wgt::ShaderStages::VERTEX {
if let Some(ref interface) = shader_module.interface {
io = interface
.check_stage(
&mut binding_layout_source,
&mut shader_binding_sizes,
- &fragment_state.stage.entry_point,
+ &fragment_entry_point_name,
stage,
io,
desc.depth_stencil.as_ref().map(|d| d.depth_compare),
)
- .map_err(|error| pipeline::CreateRenderPipelineError::Stage {
- stage,
- error,
- })?;
+ .map_err(stage_err)?;
validated_stages |= stage;
}
}
if let Some(ref interface) = shader_module.interface {
shader_expects_dual_source_blending = interface
- .fragment_uses_dual_source_blending(&fragment_state.stage.entry_point)
+ .fragment_uses_dual_source_blending(&fragment_entry_point_name)
.map_err(|error| pipeline::CreateRenderPipelineError::Stage {
stage,
error,
@@ -3129,7 +3226,7 @@ impl<A: HalApi> Device<A> {
Some(hal::ProgrammableStage {
module: shader_module.raw(),
- entry_point: fragment_state.stage.entry_point.as_ref(),
+ entry_point: &fragment_entry_point_name,
})
}
None => None,
@@ -3302,7 +3399,10 @@ impl<A: HalApi> Device<A> {
strip_index_format: desc.primitive.strip_index_format,
vertex_steps,
late_sized_buffer_groups,
- info: ResourceInfo::new(desc.label.borrow_or_default()),
+ info: ResourceInfo::new(
+ desc.label.borrow_or_default(),
+ Some(self.tracker_indices.render_pipelines.clone()),
+ ),
};
Ok(pipeline)
}
@@ -3415,7 +3515,7 @@ impl<A: HalApi> Device<A> {
Ok(QuerySet {
raw: Some(unsafe { self.raw().create_query_set(&hal_desc).unwrap() }),
device: self.clone(),
- info: ResourceInfo::new(""),
+ info: ResourceInfo::new("", Some(self.tracker_indices.query_sets.clone())),
desc: desc.map_label(|_| ()),
})
}
diff --git a/third_party/rust/wgpu-core/src/id.rs b/third_party/rust/wgpu-core/src/id.rs
index 1dbb491e60..72b74218d0 100644
--- a/third_party/rust/wgpu-core/src/id.rs
+++ b/third_party/rust/wgpu-core/src/id.rs
@@ -275,7 +275,7 @@ where
{
#[inline]
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
- self.0.partial_cmp(&other.0)
+ Some(self.cmp(other))
}
}
diff --git a/third_party/rust/wgpu-core/src/identity.rs b/third_party/rust/wgpu-core/src/identity.rs
index 0e34055c74..d76d29341a 100644
--- a/third_party/rust/wgpu-core/src/identity.rs
+++ b/third_party/rust/wgpu-core/src/identity.rs
@@ -3,10 +3,17 @@ use wgt::Backend;
use crate::{
id::{Id, Marker},
- Epoch, FastHashMap, Index,
+ Epoch, Index,
};
use std::{fmt::Debug, marker::PhantomData};
+#[derive(Copy, Clone, Debug, PartialEq)]
+enum IdSource {
+ External,
+ Allocated,
+ None,
+}
+
/// A simple structure to allocate [`Id`] identifiers.
///
/// Calling [`alloc`] returns a fresh, never-before-seen id. Calling [`free`]
@@ -34,12 +41,15 @@ use std::{fmt::Debug, marker::PhantomData};
/// [`Backend`]: wgt::Backend;
/// [`alloc`]: IdentityManager::alloc
/// [`free`]: IdentityManager::free
-#[derive(Debug, Default)]
+#[derive(Debug)]
pub(super) struct IdentityValues {
free: Vec<(Index, Epoch)>,
- //sorted by Index
- used: FastHashMap<Epoch, Vec<Index>>,
+ next_index: Index,
count: usize,
+ // Sanity check: The allocation logic works under the assumption that we don't
+ // do a mix of allocating ids from here and providing ids manually for the same
+ // storage container.
+ id_source: IdSource,
}
impl IdentityValues {
@@ -48,35 +58,41 @@ impl IdentityValues {
/// The backend is incorporated into the id, so that ids allocated with
/// different `backend` values are always distinct.
pub fn alloc<T: Marker>(&mut self, backend: Backend) -> Id<T> {
+ assert!(
+ self.id_source != IdSource::External,
+ "Mix of internally allocated and externally provided IDs"
+ );
+ self.id_source = IdSource::Allocated;
+
self.count += 1;
match self.free.pop() {
Some((index, epoch)) => Id::zip(index, epoch + 1, backend),
None => {
+ let index = self.next_index;
+ self.next_index += 1;
let epoch = 1;
- let used = self.used.entry(epoch).or_insert_with(Default::default);
- let index = if let Some(i) = used.iter().max_by_key(|v| *v) {
- i + 1
- } else {
- 0
- };
- used.push(index);
Id::zip(index, epoch, backend)
}
}
}
pub fn mark_as_used<T: Marker>(&mut self, id: Id<T>) -> Id<T> {
+ assert!(
+ self.id_source != IdSource::Allocated,
+ "Mix of internally allocated and externally provided IDs"
+ );
+ self.id_source = IdSource::External;
+
self.count += 1;
- let (index, epoch, _backend) = id.unzip();
- let used = self.used.entry(epoch).or_insert_with(Default::default);
- used.push(index);
id
}
/// Free `id`. It will never be returned from `alloc` again.
pub fn release<T: Marker>(&mut self, id: Id<T>) {
- let (index, epoch, _backend) = id.unzip();
- self.free.push((index, epoch));
+ if let IdSource::Allocated = self.id_source {
+ let (index, epoch, _backend) = id.unzip();
+ self.free.push((index, epoch));
+ }
self.count -= 1;
}
@@ -106,7 +122,12 @@ impl<T: Marker> IdentityManager<T> {
impl<T: Marker> IdentityManager<T> {
pub fn new() -> Self {
Self {
- values: Mutex::new(IdentityValues::default()),
+ values: Mutex::new(IdentityValues {
+ free: Vec::new(),
+ next_index: 0,
+ count: 0,
+ id_source: IdSource::None,
+ }),
_phantom: PhantomData,
}
}
@@ -115,15 +136,11 @@ impl<T: Marker> IdentityManager<T> {
#[test]
fn test_epoch_end_of_life() {
use crate::id;
-
let man = IdentityManager::<id::markers::Buffer>::new();
- let forced_id = man.mark_as_used(id::BufferId::zip(0, 1, Backend::Empty));
- assert_eq!(forced_id.unzip().0, 0);
let id1 = man.process(Backend::Empty);
- assert_eq!(id1.unzip().0, 1);
+ assert_eq!(id1.unzip(), (0, 1, Backend::Empty));
man.free(id1);
let id2 = man.process(Backend::Empty);
// confirm that the epoch 1 is no longer re-used
- assert_eq!(id2.unzip().0, 1);
- assert_eq!(id2.unzip().1, 2);
+ assert_eq!(id2.unzip(), (0, 2, Backend::Empty));
}
diff --git a/third_party/rust/wgpu-core/src/instance.rs b/third_party/rust/wgpu-core/src/instance.rs
index 582571c2b8..b909245fac 100644
--- a/third_party/rust/wgpu-core/src/instance.rs
+++ b/third_party/rust/wgpu-core/src/instance.rs
@@ -198,7 +198,7 @@ impl<A: HalApi> Adapter<A> {
Self {
raw,
- info: ResourceInfo::new("<Adapter>"),
+ info: ResourceInfo::new("<Adapter>", None),
}
}
@@ -303,7 +303,7 @@ impl<A: HalApi> Adapter<A> {
let queue = Queue {
device: None,
raw: Some(hal_device.queue),
- info: ResourceInfo::new("<Queue>"),
+ info: ResourceInfo::new("<Queue>", None),
};
return Ok((device, queue));
}
@@ -521,7 +521,7 @@ impl Global {
let surface = Surface {
presentation: Mutex::new(None),
- info: ResourceInfo::new("<Surface>"),
+ info: ResourceInfo::new("<Surface>", None),
raw: hal_surface,
};
@@ -542,7 +542,7 @@ impl Global {
let surface = Surface {
presentation: Mutex::new(None),
- info: ResourceInfo::new("<Surface>"),
+ info: ResourceInfo::new("<Surface>", None),
raw: {
let hal_surface = self
.instance
@@ -575,7 +575,7 @@ impl Global {
let surface = Surface {
presentation: Mutex::new(None),
- info: ResourceInfo::new("<Surface>"),
+ info: ResourceInfo::new("<Surface>", None),
raw: {
let hal_surface = self
.instance
@@ -604,7 +604,7 @@ impl Global {
let surface = Surface {
presentation: Mutex::new(None),
- info: ResourceInfo::new("<Surface>"),
+ info: ResourceInfo::new("<Surface>", None),
raw: {
let hal_surface = self
.instance
@@ -633,7 +633,7 @@ impl Global {
let surface = Surface {
presentation: Mutex::new(None),
- info: ResourceInfo::new("<Surface>"),
+ info: ResourceInfo::new("<Surface>", None),
raw: {
let hal_surface = self
.instance
@@ -1072,10 +1072,10 @@ impl Global {
let device = hub.devices.get(device_id).unwrap();
queue.device = Some(device.clone());
- let (queue_id, _) = queue_fid.assign(queue);
+ let (queue_id, queue) = queue_fid.assign(queue);
resource_log!("Created Queue {:?}", queue_id);
- device.queue_id.write().replace(queue_id);
+ device.set_queue(queue);
return (device_id, queue_id, None);
};
@@ -1124,10 +1124,10 @@ impl Global {
let device = hub.devices.get(device_id).unwrap();
queue.device = Some(device.clone());
- let (queue_id, _) = queues_fid.assign(queue);
+ let (queue_id, queue) = queues_fid.assign(queue);
resource_log!("Created Queue {:?}", queue_id);
- device.queue_id.write().replace(queue_id);
+ device.set_queue(queue);
return (device_id, queue_id, None);
};
diff --git a/third_party/rust/wgpu-core/src/lib.rs b/third_party/rust/wgpu-core/src/lib.rs
index 9f6526fc11..5454f0d682 100644
--- a/third_party/rust/wgpu-core/src/lib.rs
+++ b/third_party/rust/wgpu-core/src/lib.rs
@@ -3,36 +3,8 @@
//! into other language-specific user-friendly libraries.
//!
//! ## Feature flags
-// NOTE: feature docs. below should be kept in sync. with `Cargo.toml`!
+#![doc = document_features::document_features!()]
//!
-//! - **`api_log_info`** --- Log all API entry points at info instead of trace level.
-//! - **`resource_log_info`** --- Log resource lifecycle management at info instead of trace level.
-//! - **`link`** _(enabled by default)_ --- Use static linking for libraries. Disable to manually
-//! link. Enabled by default.
-//! - **`renderdoc`** --- Support the Renderdoc graphics debugger:
-//! [https://renderdoc.org/](https://renderdoc.org/)
-//! - **`strict_asserts`** --- Apply run-time checks, even in release builds. These are in addition
-//! to the validation carried out at public APIs in all builds.
-//! - **`serde`** --- Enables serialization via `serde` on common wgpu types.
-//! - **`trace`** --- Enable API tracing.
-//! - **`replay`** --- Enable API replaying
-//! - **`wgsl`** --- Enable `ShaderModuleSource::Wgsl`
-//! - **`fragile-send-sync-non-atomic-wasm`** --- Implement `Send` and `Sync` on Wasm, but only if
-//! atomics are not enabled.
-//!
-//! WebGL/WebGPU objects can not be shared between threads. However, it can be useful to
-//! artificially mark them as `Send` and `Sync` anyways to make it easier to write cross-platform
-//! code. This is technically _very_ unsafe in a multithreaded environment, but on a wasm binary
-//! compiled without atomics we know we are definitely not in a multithreaded environment.
-//!
-//! ### Backends, passed through to wgpu-hal
-//!
-//! - **`metal`** --- Enable the `metal` backend.
-//! - **`vulkan`** --- Enable the `vulkan` backend.
-//! - **`gles`** --- Enable the `GLES` backend.
-//!
-//! This is used for all of GLES, OpenGL, and WebGL.
-//! - **`dx12`** --- Enable the `dx12` backend.
// When we have no backends, we end up with a lot of dead or otherwise unreachable code.
#![cfg_attr(
diff --git a/third_party/rust/wgpu-core/src/pipeline.rs b/third_party/rust/wgpu-core/src/pipeline.rs
index acc1b24b0c..4a7651b327 100644
--- a/third_party/rust/wgpu-core/src/pipeline.rs
+++ b/third_party/rust/wgpu-core/src/pipeline.rs
@@ -92,6 +92,19 @@ impl<A: HalApi> ShaderModule<A> {
pub(crate) fn raw(&self) -> &A::ShaderModule {
self.raw.as_ref().unwrap()
}
+
+ pub(crate) fn finalize_entry_point_name(
+ &self,
+ stage_bit: wgt::ShaderStages,
+ entry_point: Option<&str>,
+ ) -> Result<String, validation::StageError> {
+ match &self.interface {
+ Some(interface) => interface.finalize_entry_point_name(stage_bit, entry_point),
+ None => entry_point
+ .map(|ep| ep.to_string())
+ .ok_or(validation::StageError::NoEntryPointFound),
+ }
+ }
}
#[derive(Clone, Debug)]
@@ -213,9 +226,13 @@ impl CreateShaderModuleError {
pub struct ProgrammableStageDescriptor<'a> {
/// The compiled shader module for this stage.
pub module: ShaderModuleId,
- /// The name of the entry point in the compiled shader. There must be a function with this name
- /// in the shader.
- pub entry_point: Cow<'a, str>,
+ /// The name of the entry point in the compiled shader. The name is selected using the
+ /// following logic:
+ ///
+ /// * If `Some(name)` is specified, there must be a function with this name in the shader.
+ /// * If a single entry point associated with this stage must be in the shader, then proceed as
+ /// if `Some(…)` was specified with that entry point's name.
+ pub entry_point: Option<Cow<'a, str>>,
}
/// Number of implicit bind groups derived at pipeline creation.
diff --git a/third_party/rust/wgpu-core/src/present.rs b/third_party/rust/wgpu-core/src/present.rs
index 4d8e1df73e..cb4e17798f 100644
--- a/third_party/rust/wgpu-core/src/present.rs
+++ b/third_party/rust/wgpu-core/src/present.rs
@@ -9,10 +9,7 @@ When this texture is presented, we remove it from the device tracker as well as
extract it from the hub.
!*/
-use std::{
- borrow::Borrow,
- sync::atomic::{AtomicBool, Ordering},
-};
+use std::borrow::Borrow;
#[cfg(feature = "trace")]
use crate::device::trace::Action;
@@ -73,7 +70,7 @@ pub enum ConfigureSurfaceError {
PreviousOutputExists,
#[error("Both `Surface` width and height must be non-zero. Wait to recreate the `Surface` until the window has non-zero area.")]
ZeroArea,
- #[error("`Surface` width and height must be within the maximum supported texture size. Requested was ({width}, {height}), maximum extent is {max_texture_dimension_2d}.")]
+ #[error("`Surface` width and height must be within the maximum supported texture size. Requested was ({width}, {height}), maximum extent for either dimension is {max_texture_dimension_2d}.")]
TooLarge {
width: u32,
height: u32,
@@ -213,7 +210,6 @@ impl Global {
inner: Snatchable::new(resource::TextureInner::Surface {
raw: Some(ast.texture),
parent_id: surface_id,
- has_work: AtomicBool::new(false),
}),
device: device.clone(),
desc: texture_desc,
@@ -224,7 +220,10 @@ impl Global {
layers: 0..1,
mips: 0..1,
},
- info: ResourceInfo::new("<Surface>"),
+ info: ResourceInfo::new(
+ "<Surface Texture>",
+ Some(device.tracker_indices.textures.clone()),
+ ),
clear_mode: RwLock::new(resource::TextureClearMode::Surface {
clear_view: Some(clear_view),
}),
@@ -240,7 +239,7 @@ impl Global {
let mut trackers = device.trackers.lock();
trackers
.textures
- .insert_single(id, resource, hal::TextureUses::UNINITIALIZED);
+ .insert_single(resource, hal::TextureUses::UNINITIALIZED);
}
if present.acquired_texture.is_some() {
@@ -298,8 +297,7 @@ impl Global {
if !device.is_valid() {
return Err(DeviceError::Lost.into());
}
- let queue_id = device.queue_id.read().unwrap();
- let queue = hub.queues.get(queue_id).unwrap();
+ let queue = device.get_queue().unwrap();
#[cfg(feature = "trace")]
if let Some(ref mut trace) = *device.trace.lock() {
@@ -318,10 +316,13 @@ impl Global {
"Removing swapchain texture {:?} from the device tracker",
texture_id
);
- device.trackers.lock().textures.remove(texture_id);
-
let texture = hub.textures.unregister(texture_id);
if let Some(texture) = texture {
+ device
+ .trackers
+ .lock()
+ .textures
+ .remove(texture.info.tracker_index());
let mut exclusive_snatch_guard = device.snatchable_lock.write();
let suf = A::get_surface(&surface);
let mut inner = texture.inner_mut(&mut exclusive_snatch_guard);
@@ -331,15 +332,10 @@ impl Global {
resource::TextureInner::Surface {
ref mut raw,
ref parent_id,
- ref has_work,
} => {
if surface_id != *parent_id {
log::error!("Presented frame is from a different surface");
Err(hal::SurfaceError::Lost)
- } else if !has_work.load(Ordering::Relaxed) {
- log::error!("No work has been submitted for this frame");
- unsafe { suf.unwrap().discard_texture(raw.take().unwrap()) };
- Err(hal::SurfaceError::Outdated)
} else {
unsafe {
queue
@@ -413,18 +409,19 @@ impl Global {
"Removing swapchain texture {:?} from the device tracker",
texture_id
);
- device.trackers.lock().textures.remove(texture_id);
let texture = hub.textures.unregister(texture_id);
+
if let Some(texture) = texture {
+ device
+ .trackers
+ .lock()
+ .textures
+ .remove(texture.info.tracker_index());
let suf = A::get_surface(&surface);
let exclusive_snatch_guard = device.snatchable_lock.write();
match texture.inner.snatch(exclusive_snatch_guard).unwrap() {
- resource::TextureInner::Surface {
- mut raw,
- parent_id,
- has_work: _,
- } => {
+ resource::TextureInner::Surface { mut raw, parent_id } => {
if surface_id == parent_id {
unsafe { suf.unwrap().discard_texture(raw.take().unwrap()) };
} else {
diff --git a/third_party/rust/wgpu-core/src/registry.rs b/third_party/rust/wgpu-core/src/registry.rs
index f55809770b..80394351af 100644
--- a/third_party/rust/wgpu-core/src/registry.rs
+++ b/third_party/rust/wgpu-core/src/registry.rs
@@ -60,7 +60,6 @@ impl<T: Resource> Registry<T> {
#[must_use]
pub(crate) struct FutureId<'a, T: Resource> {
id: Id<T::Marker>,
- identity: Arc<IdentityManager<T::Marker>>,
data: &'a RwLock<Storage<T>>,
}
@@ -75,7 +74,7 @@ impl<T: Resource> FutureId<'_, T> {
}
pub fn init(&self, mut value: T) -> Arc<T> {
- value.as_info_mut().set_id(self.id, &self.identity);
+ value.as_info_mut().set_id(self.id);
Arc::new(value)
}
@@ -117,7 +116,6 @@ impl<T: Resource> Registry<T> {
}
None => self.identity.process(self.backend),
},
- identity: self.identity.clone(),
data: &self.storage,
}
}
@@ -125,7 +123,6 @@ impl<T: Resource> Registry<T> {
pub(crate) fn request(&self) -> FutureId<T> {
FutureId {
id: self.identity.process(self.backend),
- identity: self.identity.clone(),
data: &self.storage,
}
}
@@ -142,11 +139,12 @@ impl<T: Resource> Registry<T> {
self.storage.write()
}
pub fn unregister_locked(&self, id: Id<T::Marker>, storage: &mut Storage<T>) -> Option<Arc<T>> {
+ self.identity.free(id);
storage.remove(id)
}
pub fn force_replace(&self, id: Id<T::Marker>, mut value: T) {
let mut storage = self.storage.write();
- value.as_info_mut().set_id(id, &self.identity);
+ value.as_info_mut().set_id(id);
storage.force_replace(id, value)
}
pub fn force_replace_with_error(&self, id: Id<T::Marker>, label: &str) {
@@ -155,6 +153,7 @@ impl<T: Resource> Registry<T> {
storage.insert_error(id, label);
}
pub(crate) fn unregister(&self, id: Id<T::Marker>) -> Option<Arc<T>> {
+ self.identity.free(id);
let value = self.storage.write().remove(id);
//Returning None is legal if it's an error ID
value
diff --git a/third_party/rust/wgpu-core/src/resource.rs b/third_party/rust/wgpu-core/src/resource.rs
index de5d1868a3..aca077caab 100644
--- a/third_party/rust/wgpu-core/src/resource.rs
+++ b/third_party/rust/wgpu-core/src/resource.rs
@@ -9,11 +9,10 @@ use crate::{
global::Global,
hal_api::HalApi,
id::{AdapterId, BufferId, DeviceId, Id, Marker, SurfaceId, TextureId},
- identity::IdentityManager,
init_tracker::{BufferInitTracker, TextureInitTracker},
resource, resource_log,
snatch::{ExclusiveSnatchGuard, SnatchGuard, Snatchable},
- track::TextureSelector,
+ track::{SharedTrackerIndexAllocator, TextureSelector, TrackerIndex},
validation::MissingBufferUsageError,
Label, SubmissionIndex,
};
@@ -31,7 +30,7 @@ use std::{
ops::Range,
ptr::NonNull,
sync::{
- atomic::{AtomicBool, AtomicUsize, Ordering},
+ atomic::{AtomicUsize, Ordering},
Arc, Weak,
},
};
@@ -58,7 +57,8 @@ use std::{
#[derive(Debug)]
pub struct ResourceInfo<T: Resource> {
id: Option<Id<T::Marker>>,
- identity: Option<Arc<IdentityManager<T::Marker>>>,
+ tracker_index: TrackerIndex,
+ tracker_indices: Option<Arc<SharedTrackerIndexAllocator>>,
/// The index of the last queue submission in which the resource
/// was used.
///
@@ -74,19 +74,26 @@ pub struct ResourceInfo<T: Resource> {
impl<T: Resource> Drop for ResourceInfo<T> {
fn drop(&mut self) {
- if let Some(identity) = self.identity.as_ref() {
- let id = self.id.as_ref().unwrap();
- identity.free(*id);
+ if let Some(indices) = &self.tracker_indices {
+ indices.free(self.tracker_index);
}
}
}
impl<T: Resource> ResourceInfo<T> {
#[allow(unused_variables)]
- pub(crate) fn new(label: &str) -> Self {
+ pub(crate) fn new(
+ label: &str,
+ tracker_indices: Option<Arc<SharedTrackerIndexAllocator>>,
+ ) -> Self {
+ let tracker_index = tracker_indices
+ .as_ref()
+ .map(|indices| indices.alloc())
+ .unwrap_or(TrackerIndex::INVALID);
Self {
id: None,
- identity: None,
+ tracker_index,
+ tracker_indices,
submission_index: AtomicUsize::new(0),
label: label.to_string(),
}
@@ -111,9 +118,13 @@ impl<T: Resource> ResourceInfo<T> {
self.id.unwrap()
}
- pub(crate) fn set_id(&mut self, id: Id<T::Marker>, identity: &Arc<IdentityManager<T::Marker>>) {
+ pub(crate) fn tracker_index(&self) -> TrackerIndex {
+ debug_assert!(self.tracker_index != TrackerIndex::INVALID);
+ self.tracker_index
+ }
+
+ pub(crate) fn set_id(&mut self, id: Id<T::Marker>) {
self.id = Some(id);
- self.identity = Some(identity.clone());
}
/// Record that this resource will be used by the queue submission with the
@@ -551,6 +562,7 @@ impl<A: HalApi> Buffer<A> {
device: Arc::clone(&self.device),
submission_index: self.info.submission_index(),
id: self.info.id.unwrap(),
+ tracker_index: self.info.tracker_index(),
label: self.info.label.clone(),
bind_groups,
}))
@@ -611,6 +623,7 @@ pub struct DestroyedBuffer<A: HalApi> {
device: Arc<Device<A>>,
label: String,
pub(crate) id: BufferId,
+ pub(crate) tracker_index: TrackerIndex,
pub(crate) submission_index: u64,
bind_groups: Vec<Weak<BindGroup<A>>>,
}
@@ -717,7 +730,6 @@ pub(crate) enum TextureInner<A: HalApi> {
Surface {
raw: Option<A::SurfaceTexture>,
parent_id: SurfaceId,
- has_work: AtomicBool,
},
}
@@ -886,6 +898,7 @@ impl<A: HalApi> Texture<A> {
views,
bind_groups,
device: Arc::clone(&self.device),
+ tracker_index: self.info.tracker_index(),
submission_index: self.info.submission_index(),
id: self.info.id.unwrap(),
label: self.info.label.clone(),
@@ -1003,6 +1016,7 @@ pub struct DestroyedTexture<A: HalApi> {
device: Arc<Device<A>>,
label: String,
pub(crate) id: TextureId,
+ pub(crate) tracker_index: TrackerIndex,
pub(crate) submission_index: u64,
}
diff --git a/third_party/rust/wgpu-core/src/track/buffer.rs b/third_party/rust/wgpu-core/src/track/buffer.rs
index 323d2dab9d..a30ac2a225 100644
--- a/third_party/rust/wgpu-core/src/track/buffer.rs
+++ b/third_party/rust/wgpu-core/src/track/buffer.rs
@@ -7,7 +7,7 @@
use std::{borrow::Cow, marker::PhantomData, sync::Arc};
-use super::{PendingTransition, ResourceTracker};
+use super::{PendingTransition, ResourceTracker, TrackerIndex};
use crate::{
hal_api::HalApi,
id::BufferId,
@@ -64,16 +64,16 @@ impl<A: HalApi> BufferBindGroupState<A> {
#[allow(clippy::pattern_type_mismatch)]
pub(crate) fn optimize(&self) {
let mut buffers = self.buffers.lock();
- buffers.sort_unstable_by_key(|(b, _)| b.as_info().id().unzip().0);
+ buffers.sort_unstable_by_key(|(b, _)| b.as_info().tracker_index());
}
/// Returns a list of all buffers tracked. May contain duplicates.
#[allow(clippy::pattern_type_mismatch)]
- pub fn used_ids(&self) -> impl Iterator<Item = BufferId> + '_ {
+ pub fn used_tracker_indices(&self) -> impl Iterator<Item = TrackerIndex> + '_ {
let buffers = self.buffers.lock();
buffers
.iter()
- .map(|(ref b, _)| b.as_info().id())
+ .map(|(ref b, _)| b.as_info().tracker_index())
.collect::<Vec<_>>()
.into_iter()
}
@@ -149,20 +149,6 @@ impl<A: HalApi> BufferUsageScope<A> {
resources.into_iter()
}
- pub fn get(&self, id: BufferId) -> Option<&Arc<Buffer<A>>> {
- let index = id.unzip().0 as usize;
- if index > self.metadata.size() {
- return None;
- }
- self.tracker_assert_in_bounds(index);
- unsafe {
- if self.metadata.contains_unchecked(index) {
- return Some(self.metadata.get_resource_unchecked(index));
- }
- }
- None
- }
-
/// Merge the list of buffer states in the given bind group into this usage scope.
///
/// If any of the resulting states is invalid, stops the merge and returns a usage
@@ -181,7 +167,7 @@ impl<A: HalApi> BufferUsageScope<A> {
) -> Result<(), UsageConflict> {
let buffers = bind_group.buffers.lock();
for &(ref resource, state) in &*buffers {
- let index = resource.as_info().id().unzip().0 as usize;
+ let index = resource.as_info().tracker_index().as_usize();
unsafe {
insert_or_merge(
@@ -255,7 +241,7 @@ impl<A: HalApi> BufferUsageScope<A> {
.get(id)
.map_err(|_| UsageConflict::BufferInvalid { id })?;
- let index = id.unzip().0 as usize;
+ let index = buffer.info.tracker_index().as_usize();
self.allow_index(index);
@@ -292,7 +278,7 @@ pub(crate) struct BufferTracker<A: HalApi> {
temp: Vec<PendingTransition<BufferUses>>,
}
-impl<A: HalApi> ResourceTracker<Buffer<A>> for BufferTracker<A> {
+impl<A: HalApi> ResourceTracker for BufferTracker<A> {
/// Try to remove the buffer `id` from this tracker if it is otherwise unused.
///
/// A buffer is 'otherwise unused' when the only references to it are:
@@ -313,8 +299,8 @@ impl<A: HalApi> ResourceTracker<Buffer<A>> for BufferTracker<A> {
/// [`Device::trackers`]: crate::device::Device
/// [`self.metadata`]: BufferTracker::metadata
/// [`Hub::buffers`]: crate::hub::Hub::buffers
- fn remove_abandoned(&mut self, id: BufferId) -> bool {
- let index = id.unzip().0 as usize;
+ fn remove_abandoned(&mut self, index: TrackerIndex) -> bool {
+ let index = index.as_usize();
if index > self.metadata.size() {
return false;
@@ -329,16 +315,10 @@ impl<A: HalApi> ResourceTracker<Buffer<A>> for BufferTracker<A> {
//so it's already been released from user and so it's not inside Registry\Storage
if existing_ref_count <= 2 {
self.metadata.remove(index);
- log::trace!("Buffer {:?} is not tracked anymore", id,);
return true;
- } else {
- log::trace!(
- "Buffer {:?} is still referenced from {}",
- id,
- existing_ref_count
- );
- return false;
}
+
+ return false;
}
}
true
@@ -404,8 +384,8 @@ impl<A: HalApi> BufferTracker<A> {
///
/// If the ID is higher than the length of internal vectors,
/// the vectors will be extended. A call to set_size is not needed.
- pub fn insert_single(&mut self, id: BufferId, resource: Arc<Buffer<A>>, state: BufferUses) {
- let index = id.unzip().0 as usize;
+ pub fn insert_single(&mut self, resource: Arc<Buffer<A>>, state: BufferUses) {
+ let index = resource.info.tracker_index().as_usize();
self.allow_index(index);
@@ -440,7 +420,7 @@ impl<A: HalApi> BufferTracker<A> {
/// If the ID is higher than the length of internal vectors,
/// the vectors will be extended. A call to set_size is not needed.
pub fn set_single(&mut self, buffer: &Arc<Buffer<A>>, state: BufferUses) -> SetSingleResult<A> {
- let index: usize = buffer.as_info().id().unzip().0 as usize;
+ let index: usize = buffer.as_info().tracker_index().as_usize();
self.allow_index(index);
@@ -561,16 +541,15 @@ impl<A: HalApi> BufferTracker<A> {
pub unsafe fn set_and_remove_from_usage_scope_sparse(
&mut self,
scope: &mut BufferUsageScope<A>,
- id_source: impl IntoIterator<Item = BufferId>,
+ index_source: impl IntoIterator<Item = TrackerIndex>,
) {
let incoming_size = scope.state.len();
if incoming_size > self.start.len() {
self.set_size(incoming_size);
}
- for id in id_source {
- let (index32, _, _) = id.unzip();
- let index = index32 as usize;
+ for index in index_source {
+ let index = index.as_usize();
scope.tracker_assert_in_bounds(index);
@@ -599,8 +578,8 @@ impl<A: HalApi> BufferTracker<A> {
}
#[allow(dead_code)]
- pub fn get(&self, id: BufferId) -> Option<&Arc<Buffer<A>>> {
- let index = id.unzip().0 as usize;
+ pub fn get(&self, index: TrackerIndex) -> Option<&Arc<Buffer<A>>> {
+ let index = index.as_usize();
if index > self.metadata.size() {
return None;
}
@@ -785,11 +764,7 @@ unsafe fn merge<A: HalApi>(
if invalid_resource_state(merged_state) {
return Err(UsageConflict::from_buffer(
- BufferId::zip(
- index32,
- unsafe { metadata_provider.get_epoch(index) },
- A::VARIANT,
- ),
+ unsafe { metadata_provider.get_own(index).info.id() },
*current_state,
new_state,
));
diff --git a/third_party/rust/wgpu-core/src/track/metadata.rs b/third_party/rust/wgpu-core/src/track/metadata.rs
index e5f4d5e969..744783a7fa 100644
--- a/third_party/rust/wgpu-core/src/track/metadata.rs
+++ b/third_party/rust/wgpu-core/src/track/metadata.rs
@@ -1,6 +1,6 @@
//! The `ResourceMetadata` type.
-use crate::{resource::Resource, Epoch};
+use crate::resource::Resource;
use bit_vec::BitVec;
use std::{borrow::Cow, mem, sync::Arc};
use wgt::strict_assert;
@@ -194,15 +194,6 @@ impl<T: Resource> ResourceMetadataProvider<'_, T> {
}
}
}
- /// Get the epoch from this.
- ///
- /// # Safety
- ///
- /// - The index must be in bounds of the metadata tracker if this uses an indirect source.
- #[inline(always)]
- pub(super) unsafe fn get_epoch(self, index: usize) -> Epoch {
- unsafe { self.get_own(index).as_info().id().unzip().1 }
- }
}
/// Resizes the given bitvec to the given size. I'm not sure why this is hard to do but it is.
diff --git a/third_party/rust/wgpu-core/src/track/mod.rs b/third_party/rust/wgpu-core/src/track/mod.rs
index a36280d03b..9ca37ebadc 100644
--- a/third_party/rust/wgpu-core/src/track/mod.rs
+++ b/third_party/rust/wgpu-core/src/track/mod.rs
@@ -102,16 +102,11 @@ mod stateless;
mod texture;
use crate::{
- binding_model, command, conv,
- hal_api::HalApi,
- id::{self, Id},
- pipeline, resource,
- snatch::SnatchGuard,
- storage::Storage,
+ binding_model, command, conv, hal_api::HalApi, id, pipeline, resource, snatch::SnatchGuard,
};
-use parking_lot::RwLock;
-use std::{fmt, ops};
+use parking_lot::{Mutex, RwLock};
+use std::{fmt, ops, sync::Arc};
use thiserror::Error;
pub(crate) use buffer::{BufferBindGroupState, BufferTracker, BufferUsageScope};
@@ -122,6 +117,130 @@ pub(crate) use texture::{
};
use wgt::strict_assert_ne;
+#[repr(transparent)]
+#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash, PartialOrd, Ord)]
+pub(crate) struct TrackerIndex(u32);
+
+impl TrackerIndex {
+ /// A dummy value to place in ResourceInfo for resources that are never tracked.
+ pub const INVALID: Self = TrackerIndex(u32::MAX);
+
+ pub fn as_usize(self) -> usize {
+ debug_assert!(self != Self::INVALID);
+ self.0 as usize
+ }
+}
+
+/// wgpu-core internally use some array-like storage for tracking resources.
+/// To that end, there needs to be a uniquely assigned index for each live resource
+/// of a certain type. This index is separate from the resource ID for various reasons:
+/// - There can be multiple resource IDs pointing the the same resource.
+/// - IDs of dead handles can be recycled while resources are internally held alive (and tracked).
+/// - The plan is to remove IDs in the long run (https://github.com/gfx-rs/wgpu/issues/5121).
+/// In order to produce these tracker indices, there is a shared TrackerIndexAllocator
+/// per resource type. Indices have the same lifetime as the internal resource they
+/// are associated to (alloc happens when creating the resource and free is called when
+/// the resource is dropped).
+struct TrackerIndexAllocator {
+ unused: Vec<TrackerIndex>,
+ next_index: TrackerIndex,
+}
+
+impl TrackerIndexAllocator {
+ pub fn new() -> Self {
+ TrackerIndexAllocator {
+ unused: Vec::new(),
+ next_index: TrackerIndex(0),
+ }
+ }
+
+ pub fn alloc(&mut self) -> TrackerIndex {
+ if let Some(index) = self.unused.pop() {
+ return index;
+ }
+
+ let index = self.next_index;
+ self.next_index.0 += 1;
+
+ index
+ }
+
+ pub fn free(&mut self, index: TrackerIndex) {
+ self.unused.push(index);
+ }
+
+ // This is used to pre-allocate the tracker storage.
+ pub fn size(&self) -> usize {
+ self.next_index.0 as usize
+ }
+}
+
+impl std::fmt::Debug for TrackerIndexAllocator {
+ fn fmt(&self, _: &mut std::fmt::Formatter<'_>) -> Result<(), std::fmt::Error> {
+ Ok(())
+ }
+}
+
+/// See TrackerIndexAllocator.
+#[derive(Debug)]
+pub(crate) struct SharedTrackerIndexAllocator {
+ inner: Mutex<TrackerIndexAllocator>,
+}
+
+impl SharedTrackerIndexAllocator {
+ pub fn new() -> Self {
+ SharedTrackerIndexAllocator {
+ inner: Mutex::new(TrackerIndexAllocator::new()),
+ }
+ }
+
+ pub fn alloc(&self) -> TrackerIndex {
+ self.inner.lock().alloc()
+ }
+
+ pub fn free(&self, index: TrackerIndex) {
+ self.inner.lock().free(index);
+ }
+
+ pub fn size(&self) -> usize {
+ self.inner.lock().size()
+ }
+}
+
+pub(crate) struct TrackerIndexAllocators {
+ pub buffers: Arc<SharedTrackerIndexAllocator>,
+ pub staging_buffers: Arc<SharedTrackerIndexAllocator>,
+ pub textures: Arc<SharedTrackerIndexAllocator>,
+ pub texture_views: Arc<SharedTrackerIndexAllocator>,
+ pub samplers: Arc<SharedTrackerIndexAllocator>,
+ pub bind_groups: Arc<SharedTrackerIndexAllocator>,
+ pub bind_group_layouts: Arc<SharedTrackerIndexAllocator>,
+ pub compute_pipelines: Arc<SharedTrackerIndexAllocator>,
+ pub render_pipelines: Arc<SharedTrackerIndexAllocator>,
+ pub pipeline_layouts: Arc<SharedTrackerIndexAllocator>,
+ pub bundles: Arc<SharedTrackerIndexAllocator>,
+ pub query_sets: Arc<SharedTrackerIndexAllocator>,
+}
+
+impl TrackerIndexAllocators {
+ pub fn new() -> Self {
+ TrackerIndexAllocators {
+ buffers: Arc::new(SharedTrackerIndexAllocator::new()),
+ staging_buffers: Arc::new(SharedTrackerIndexAllocator::new()),
+ textures: Arc::new(SharedTrackerIndexAllocator::new()),
+ texture_views: Arc::new(SharedTrackerIndexAllocator::new()),
+ samplers: Arc::new(SharedTrackerIndexAllocator::new()),
+ bind_groups: Arc::new(SharedTrackerIndexAllocator::new()),
+ bind_group_layouts: Arc::new(SharedTrackerIndexAllocator::new()),
+ compute_pipelines: Arc::new(SharedTrackerIndexAllocator::new()),
+ render_pipelines: Arc::new(SharedTrackerIndexAllocator::new()),
+ pipeline_layouts: Arc::new(SharedTrackerIndexAllocator::new()),
+ bundles: Arc::new(SharedTrackerIndexAllocator::new()),
+ query_sets: Arc::new(SharedTrackerIndexAllocator::new()),
+ }
+ }
+}
+
/// A structure containing all the information about a particular resource
/// transition. User code should be able to generate a pipeline barrier
/// based on the contents.
@@ -359,31 +478,14 @@ pub(crate) struct RenderBundleScope<A: HalApi> {
impl<A: HalApi> RenderBundleScope<A> {
/// Create the render bundle scope and pull the maximum IDs from the hubs.
- pub fn new(
- buffers: &Storage<resource::Buffer<A>>,
- textures: &Storage<resource::Texture<A>>,
- bind_groups: &Storage<binding_model::BindGroup<A>>,
- render_pipelines: &Storage<pipeline::RenderPipeline<A>>,
- query_sets: &Storage<resource::QuerySet<A>>,
- ) -> Self {
- let value = Self {
+ pub fn new() -> Self {
+ Self {
buffers: RwLock::new(BufferUsageScope::new()),
textures: RwLock::new(TextureUsageScope::new()),
bind_groups: RwLock::new(StatelessTracker::new()),
render_pipelines: RwLock::new(StatelessTracker::new()),
query_sets: RwLock::new(StatelessTracker::new()),
- };
-
- value.buffers.write().set_size(buffers.len());
- value.textures.write().set_size(textures.len());
- value.bind_groups.write().set_size(bind_groups.len());
- value
- .render_pipelines
- .write()
- .set_size(render_pipelines.len());
- value.query_sets.write().set_size(query_sets.len());
-
- value
+ }
}
/// Merge the inner contents of a bind group into the render bundle tracker.
@@ -420,17 +522,14 @@ pub(crate) struct UsageScope<A: HalApi> {
impl<A: HalApi> UsageScope<A> {
/// Create the render bundle scope and pull the maximum IDs from the hubs.
- pub fn new(
- buffers: &Storage<resource::Buffer<A>>,
- textures: &Storage<resource::Texture<A>>,
- ) -> Self {
+ pub fn new(tracker_indices: &TrackerIndexAllocators) -> Self {
let mut value = Self {
buffers: BufferUsageScope::new(),
textures: TextureUsageScope::new(),
};
- value.buffers.set_size(buffers.len());
- value.textures.set_size(textures.len());
+ value.buffers.set_size(tracker_indices.buffers.size());
+ value.textures.set_size(tracker_indices.textures.size());
value
}
@@ -478,11 +577,8 @@ impl<A: HalApi> UsageScope<A> {
}
}
-pub(crate) trait ResourceTracker<R>
-where
- R: resource::Resource,
-{
- fn remove_abandoned(&mut self, id: Id<R::Marker>) -> bool;
+pub(crate) trait ResourceTracker {
+ fn remove_abandoned(&mut self, index: TrackerIndex) -> bool;
}
/// A full double sided tracker used by CommandBuffers and the Device.
@@ -513,48 +609,6 @@ impl<A: HalApi> Tracker<A> {
}
}
- /// Pull the maximum IDs from the hubs.
- pub fn set_size(
- &mut self,
- buffers: Option<&Storage<resource::Buffer<A>>>,
- textures: Option<&Storage<resource::Texture<A>>>,
- views: Option<&Storage<resource::TextureView<A>>>,
- samplers: Option<&Storage<resource::Sampler<A>>>,
- bind_groups: Option<&Storage<binding_model::BindGroup<A>>>,
- compute_pipelines: Option<&Storage<pipeline::ComputePipeline<A>>>,
- render_pipelines: Option<&Storage<pipeline::RenderPipeline<A>>>,
- bundles: Option<&Storage<command::RenderBundle<A>>>,
- query_sets: Option<&Storage<resource::QuerySet<A>>>,
- ) {
- if let Some(buffers) = buffers {
- self.buffers.set_size(buffers.len());
- };
- if let Some(textures) = textures {
- self.textures.set_size(textures.len());
- };
- if let Some(views) = views {
- self.views.set_size(views.len());
- };
- if let Some(samplers) = samplers {
- self.samplers.set_size(samplers.len());
- };
- if let Some(bind_groups) = bind_groups {
- self.bind_groups.set_size(bind_groups.len());
- };
- if let Some(compute_pipelines) = compute_pipelines {
- self.compute_pipelines.set_size(compute_pipelines.len());
- }
- if let Some(render_pipelines) = render_pipelines {
- self.render_pipelines.set_size(render_pipelines.len());
- };
- if let Some(bundles) = bundles {
- self.bundles.set_size(bundles.len());
- };
- if let Some(query_sets) = query_sets {
- self.query_sets.set_size(query_sets.len());
- };
- }
-
/// Iterates through all resources in the given bind group and adopts
/// the state given for those resources in the UsageScope. It also
/// removes all touched resources from the usage scope.
@@ -585,7 +639,7 @@ impl<A: HalApi> Tracker<A> {
unsafe {
self.buffers.set_and_remove_from_usage_scope_sparse(
&mut scope.buffers,
- bind_group.buffers.used_ids(),
+ bind_group.buffers.used_tracker_indices(),
)
};
unsafe {
diff --git a/third_party/rust/wgpu-core/src/track/stateless.rs b/third_party/rust/wgpu-core/src/track/stateless.rs
index 4111a90f79..00225f2305 100644
--- a/third_party/rust/wgpu-core/src/track/stateless.rs
+++ b/third_party/rust/wgpu-core/src/track/stateless.rs
@@ -10,7 +10,7 @@ use parking_lot::Mutex;
use crate::{id::Id, resource::Resource, resource_log, storage::Storage, track::ResourceMetadata};
-use super::ResourceTracker;
+use super::{ResourceTracker, TrackerIndex};
/// Satisfy clippy.
type Pair<T> = (Id<<T as Resource>::Marker>, Arc<T>);
@@ -74,7 +74,7 @@ pub(crate) struct StatelessTracker<T: Resource> {
metadata: ResourceMetadata<T>,
}
-impl<T: Resource> ResourceTracker<T> for StatelessTracker<T> {
+impl<T: Resource> ResourceTracker for StatelessTracker<T> {
/// Try to remove the given resource from the tracker iff we have the last reference to the
/// resource and the epoch matches.
///
@@ -82,14 +82,14 @@ impl<T: Resource> ResourceTracker<T> for StatelessTracker<T> {
///
/// If the ID is higher than the length of internal vectors,
/// false will be returned.
- fn remove_abandoned(&mut self, id: Id<T::Marker>) -> bool {
- let index = id.unzip().0 as usize;
+ fn remove_abandoned(&mut self, index: TrackerIndex) -> bool {
+ let index = index.as_usize();
if index >= self.metadata.size() {
return false;
}
- resource_log!("StatelessTracker::remove_abandoned {id:?}");
+ resource_log!("StatelessTracker::remove_abandoned {index:?}");
self.tracker_assert_in_bounds(index);
@@ -100,17 +100,10 @@ impl<T: Resource> ResourceTracker<T> for StatelessTracker<T> {
//so it's already been released from user and so it's not inside Registry\Storage
if existing_ref_count <= 2 {
self.metadata.remove(index);
- log::trace!("{} {:?} is not tracked anymore", T::TYPE, id,);
return true;
- } else {
- log::trace!(
- "{} {:?} is still referenced from {}",
- T::TYPE,
- id,
- existing_ref_count
- );
- return false;
}
+
+ return false;
}
}
true
@@ -160,9 +153,8 @@ impl<T: Resource> StatelessTracker<T> {
///
/// If the ID is higher than the length of internal vectors,
/// the vectors will be extended. A call to set_size is not needed.
- pub fn insert_single(&mut self, id: Id<T::Marker>, resource: Arc<T>) {
- let (index32, _epoch, _) = id.unzip();
- let index = index32 as usize;
+ pub fn insert_single(&mut self, resource: Arc<T>) {
+ let index = resource.as_info().tracker_index().as_usize();
self.allow_index(index);
@@ -184,8 +176,7 @@ impl<T: Resource> StatelessTracker<T> {
) -> Option<&'a Arc<T>> {
let resource = storage.get(id).ok()?;
- let (index32, _epoch, _) = id.unzip();
- let index = index32 as usize;
+ let index = resource.as_info().tracker_index().as_usize();
self.allow_index(index);
@@ -221,18 +212,4 @@ impl<T: Resource> StatelessTracker<T> {
}
}
}
-
- pub fn get(&self, id: Id<T::Marker>) -> Option<&Arc<T>> {
- let index = id.unzip().0 as usize;
- if index > self.metadata.size() {
- return None;
- }
- self.tracker_assert_in_bounds(index);
- unsafe {
- if self.metadata.contains_unchecked(index) {
- return Some(self.metadata.get_resource_unchecked(index));
- }
- }
- None
- }
}
diff --git a/third_party/rust/wgpu-core/src/track/texture.rs b/third_party/rust/wgpu-core/src/track/texture.rs
index 601df11e1b..e7c4707c93 100644
--- a/third_party/rust/wgpu-core/src/track/texture.rs
+++ b/third_party/rust/wgpu-core/src/track/texture.rs
@@ -19,10 +19,11 @@
* will treat the contents as junk.
!*/
-use super::{range::RangedStates, PendingTransition, PendingTransitionList, ResourceTracker};
+use super::{
+ range::RangedStates, PendingTransition, PendingTransitionList, ResourceTracker, TrackerIndex,
+};
use crate::{
hal_api::HalApi,
- id::TextureId,
resource::{Resource, Texture, TextureInner},
snatch::SnatchGuard,
track::{
@@ -173,7 +174,7 @@ impl<A: HalApi> TextureBindGroupState<A> {
/// accesses will be in a constant ascending order.
pub(crate) fn optimize(&self) {
let mut textures = self.textures.lock();
- textures.sort_unstable_by_key(|v| v.texture.as_info().id().unzip().0);
+ textures.sort_unstable_by_key(|v| v.texture.as_info().tracker_index());
}
/// Returns a list of all textures tracked. May contain duplicates.
@@ -359,7 +360,7 @@ impl<A: HalApi> TextureUsageScope<A> {
selector: Option<TextureSelector>,
new_state: TextureUses,
) -> Result<(), UsageConflict> {
- let index = texture.as_info().id().unzip().0 as usize;
+ let index = texture.as_info().tracker_index().as_usize();
self.tracker_assert_in_bounds(index);
@@ -393,7 +394,7 @@ pub(crate) struct TextureTracker<A: HalApi> {
_phantom: PhantomData<A>,
}
-impl<A: HalApi> ResourceTracker<Texture<A>> for TextureTracker<A> {
+impl<A: HalApi> ResourceTracker for TextureTracker<A> {
/// Try to remove the given resource from the tracker iff we have the last reference to the
/// resource and the epoch matches.
///
@@ -401,10 +402,10 @@ impl<A: HalApi> ResourceTracker<Texture<A>> for TextureTracker<A> {
///
/// If the ID is higher than the length of internal vectors,
/// false will be returned.
- fn remove_abandoned(&mut self, id: TextureId) -> bool {
- let index = id.unzip().0 as usize;
+ fn remove_abandoned(&mut self, index: TrackerIndex) -> bool {
+ let index = index.as_usize();
- if index > self.metadata.size() {
+ if index >= self.metadata.size() {
return false;
}
@@ -419,16 +420,10 @@ impl<A: HalApi> ResourceTracker<Texture<A>> for TextureTracker<A> {
self.start_set.complex.remove(&index);
self.end_set.complex.remove(&index);
self.metadata.remove(index);
- log::trace!("Texture {:?} is not tracked anymore", id,);
return true;
- } else {
- log::trace!(
- "Texture {:?} is still referenced from {}",
- id,
- existing_ref_count
- );
- return false;
}
+
+ return false;
}
}
true
@@ -518,8 +513,8 @@ impl<A: HalApi> TextureTracker<A> {
///
/// If the ID is higher than the length of internal vectors,
/// the vectors will be extended. A call to set_size is not needed.
- pub fn insert_single(&mut self, id: TextureId, resource: Arc<Texture<A>>, usage: TextureUses) {
- let index = id.unzip().0 as usize;
+ pub fn insert_single(&mut self, resource: Arc<Texture<A>>, usage: TextureUses) {
+ let index = resource.info.tracker_index().as_usize();
self.allow_index(index);
@@ -560,7 +555,7 @@ impl<A: HalApi> TextureTracker<A> {
selector: TextureSelector,
new_state: TextureUses,
) -> Option<Drain<'_, PendingTransition<TextureUses>>> {
- let index = texture.as_info().id().unzip().0 as usize;
+ let index = texture.as_info().tracker_index().as_usize();
self.allow_index(index);
@@ -694,7 +689,7 @@ impl<A: HalApi> TextureTracker<A> {
let textures = bind_group_state.textures.lock();
for t in textures.iter() {
- let index = t.texture.as_info().id().unzip().0 as usize;
+ let index = t.texture.as_info().tracker_index().as_usize();
scope.tracker_assert_in_bounds(index);
if unsafe { !scope.metadata.contains_unchecked(index) } {
@@ -727,10 +722,10 @@ impl<A: HalApi> TextureTracker<A> {
///
/// If the ID is higher than the length of internal vectors,
/// false will be returned.
- pub fn remove(&mut self, id: TextureId) -> bool {
- let index = id.unzip().0 as usize;
+ pub fn remove(&mut self, index: TrackerIndex) -> bool {
+ let index = index.as_usize();
- if index > self.metadata.size() {
+ if index >= self.metadata.size() {
return false;
}
@@ -1080,11 +1075,7 @@ unsafe fn merge<A: HalApi>(
if invalid_resource_state(merged_state) {
return Err(UsageConflict::from_texture(
- TextureId::zip(
- index as _,
- unsafe { metadata_provider.get_epoch(index) },
- A::VARIANT,
- ),
+ unsafe { metadata_provider.get_own(index).info.id() },
texture_selector.clone(),
*current_simple,
new_simple,
@@ -1111,11 +1102,7 @@ unsafe fn merge<A: HalApi>(
if invalid_resource_state(merged_state) {
return Err(UsageConflict::from_texture(
- TextureId::zip(
- index as _,
- unsafe { metadata_provider.get_epoch(index) },
- A::VARIANT,
- ),
+ unsafe { metadata_provider.get_own(index).info.id() },
selector,
*current_simple,
new_state,
@@ -1156,11 +1143,7 @@ unsafe fn merge<A: HalApi>(
if invalid_resource_state(merged_state) {
return Err(UsageConflict::from_texture(
- TextureId::zip(
- index as _,
- unsafe { metadata_provider.get_epoch(index) },
- A::VARIANT,
- ),
+ unsafe { metadata_provider.get_own(index).info.id() },
TextureSelector {
mips: mip_id..mip_id + 1,
layers: layers.clone(),
@@ -1201,11 +1184,7 @@ unsafe fn merge<A: HalApi>(
if invalid_resource_state(merged_state) {
return Err(UsageConflict::from_texture(
- TextureId::zip(
- index as _,
- unsafe { metadata_provider.get_epoch(index) },
- A::VARIANT,
- ),
+ unsafe { metadata_provider.get_own(index).info.id() },
TextureSelector {
mips: mip_id..mip_id + 1,
layers: layers.clone(),
diff --git a/third_party/rust/wgpu-core/src/validation.rs b/third_party/rust/wgpu-core/src/validation.rs
index a0947ae83f..e4846c4000 100644
--- a/third_party/rust/wgpu-core/src/validation.rs
+++ b/third_party/rust/wgpu-core/src/validation.rs
@@ -1,4 +1,8 @@
-use crate::{device::bgl, FastHashMap, FastHashSet};
+use crate::{
+ device::bgl,
+ id::{markers::Buffer, Id},
+ FastHashMap, FastHashSet,
+};
use arrayvec::ArrayVec;
use std::{collections::hash_map::Entry, fmt};
use thiserror::Error;
@@ -134,8 +138,11 @@ pub struct Interface {
}
#[derive(Clone, Debug, Error)]
-#[error("Buffer usage is {actual:?} which does not contain required usage {expected:?}")]
+#[error(
+ "Usage flags {actual:?} for buffer {id:?} do not contain required usage flags {expected:?}"
+)]
pub struct MissingBufferUsageError {
+ pub(crate) id: Id<Buffer>,
pub(crate) actual: wgt::BufferUsages,
pub(crate) expected: wgt::BufferUsages,
}
@@ -143,11 +150,16 @@ pub struct MissingBufferUsageError {
/// Checks that the given buffer usage contains the required buffer usage,
/// returns an error otherwise.
pub fn check_buffer_usage(
+ id: Id<Buffer>,
actual: wgt::BufferUsages,
expected: wgt::BufferUsages,
) -> Result<(), MissingBufferUsageError> {
if !actual.contains(expected) {
- Err(MissingBufferUsageError { actual, expected })
+ Err(MissingBufferUsageError {
+ id,
+ actual,
+ expected,
+ })
} else {
Ok(())
}
@@ -271,6 +283,16 @@ pub enum StageError {
},
#[error("Location[{location}] is provided by the previous stage output but is not consumed as input by this stage.")]
InputNotConsumed { location: wgt::ShaderLocation },
+ #[error(
+ "Unable to select an entry point: no entry point was found in the provided shader module"
+ )]
+ NoEntryPointFound,
+ #[error(
+ "Unable to select an entry point: \
+ multiple entry points were found in the provided shader module, \
+ but no entry point was specified"
+ )]
+ MultipleEntryPointsFound,
}
fn map_storage_format_to_naga(format: wgt::TextureFormat) -> Option<naga::StorageFormat> {
@@ -892,9 +914,15 @@ impl Interface {
class,
},
naga::TypeInner::Sampler { comparison } => ResourceType::Sampler { comparison },
- naga::TypeInner::Array { stride, .. } => ResourceType::Buffer {
- size: wgt::BufferSize::new(stride as u64).unwrap(),
- },
+ naga::TypeInner::Array { stride, size, .. } => {
+ let size = match size {
+ naga::ArraySize::Constant(size) => size.get() * stride,
+ naga::ArraySize::Dynamic => stride,
+ };
+ ResourceType::Buffer {
+ size: wgt::BufferSize::new(size as u64).unwrap(),
+ }
+ }
ref other => ResourceType::Buffer {
size: wgt::BufferSize::new(other.size(module.to_ctx()) as u64).unwrap(),
},
@@ -953,6 +981,37 @@ impl Interface {
}
}
+ pub fn finalize_entry_point_name(
+ &self,
+ stage_bit: wgt::ShaderStages,
+ entry_point_name: Option<&str>,
+ ) -> Result<String, StageError> {
+ let stage = Self::shader_stage_from_stage_bit(stage_bit);
+ entry_point_name
+ .map(|ep| ep.to_string())
+ .map(Ok)
+ .unwrap_or_else(|| {
+ let mut entry_points = self
+ .entry_points
+ .keys()
+ .filter_map(|(ep_stage, name)| (ep_stage == &stage).then_some(name));
+ let first = entry_points.next().ok_or(StageError::NoEntryPointFound)?;
+ if entry_points.next().is_some() {
+ return Err(StageError::MultipleEntryPointsFound);
+ }
+ Ok(first.clone())
+ })
+ }
+
+ pub(crate) fn shader_stage_from_stage_bit(stage_bit: wgt::ShaderStages) -> naga::ShaderStage {
+ match stage_bit {
+ wgt::ShaderStages::VERTEX => naga::ShaderStage::Vertex,
+ wgt::ShaderStages::FRAGMENT => naga::ShaderStage::Fragment,
+ wgt::ShaderStages::COMPUTE => naga::ShaderStage::Compute,
+ _ => unreachable!(),
+ }
+ }
+
pub fn check_stage(
&self,
layouts: &mut BindingLayoutSource<'_>,
@@ -964,17 +1023,13 @@ impl Interface {
) -> Result<StageIo, StageError> {
// Since a shader module can have multiple entry points with the same name,
// we need to look for one with the right execution model.
- let shader_stage = match stage_bit {
- wgt::ShaderStages::VERTEX => naga::ShaderStage::Vertex,
- wgt::ShaderStages::FRAGMENT => naga::ShaderStage::Fragment,
- wgt::ShaderStages::COMPUTE => naga::ShaderStage::Compute,
- _ => unreachable!(),
- };
+ let shader_stage = Self::shader_stage_from_stage_bit(stage_bit);
let pair = (shader_stage, entry_point_name.to_string());
- let entry_point = self
- .entry_points
- .get(&pair)
- .ok_or(StageError::MissingEntryPoint(pair.1))?;
+ let entry_point = match self.entry_points.get(&pair) {
+ Some(some) => some,
+ None => return Err(StageError::MissingEntryPoint(pair.1)),
+ };
+ let (_stage, entry_point_name) = pair;
// check resources visibility
for &handle in entry_point.resources.iter() {
@@ -1246,3 +1301,31 @@ impl Interface {
.map(|ep| ep.dual_source_blending)
}
}
+
+// https://gpuweb.github.io/gpuweb/#abstract-opdef-calculating-color-attachment-bytes-per-sample
+pub fn validate_color_attachment_bytes_per_sample(
+ attachment_formats: impl Iterator<Item = Option<wgt::TextureFormat>>,
+ limit: u32,
+) -> Result<(), u32> {
+ let mut total_bytes_per_sample = 0;
+ for format in attachment_formats {
+ let Some(format) = format else {
+ continue;
+ };
+
+ let byte_cost = format.target_pixel_byte_cost().unwrap();
+ let alignment = format.target_component_alignment().unwrap();
+
+ let rem = total_bytes_per_sample % alignment;
+ if rem != 0 {
+ total_bytes_per_sample += alignment - rem;
+ }
+ total_bytes_per_sample += byte_cost;
+ }
+
+ if total_bytes_per_sample > limit {
+ return Err(total_bytes_per_sample);
+ }
+
+ Ok(())
+}
diff --git a/third_party/rust/wgpu-hal/.cargo-checksum.json b/third_party/rust/wgpu-hal/.cargo-checksum.json
index 65fb8499cb..de9bc38719 100644
--- a/third_party/rust/wgpu-hal/.cargo-checksum.json
+++ b/third_party/rust/wgpu-hal/.cargo-checksum.json
@@ -1 +1 @@
-{"files":{"Cargo.toml":"24691ea259ad568f29d8b25dff9720f0f30dbaaf47df1b7c223bb2e1dc2a943f","LICENSE.APACHE":"a6cba85bc92e0cff7a450b1d873c0eaa2e9fc96bf472df0247a26bec77bf3ff9","LICENSE.MIT":"c7fea58d1cfe49634cd92e54fc10a9d871f4b275321a4cd8c09e449122caaeb4","README.md":"099ee611a911dc19330a61bffcde13663929a51b25ac528ee33ea796d695491e","build.rs":"c80bdc0152a00471eec6ed0dd0f7d55d0b975498a00ba05e94100c84ad639a49","examples/halmark/main.rs":"4604737f714943383c57feac2b8468ecf15e9e60c54a5303455e9953ec5c79fb","examples/halmark/shader.wgsl":"26c256ec36d6f0e9a1647431ca772766bee4382d64eaa718ba7b488dcfb6bcca","examples/raw-gles.em.html":"70fbe68394a1a4522192de1dcfaf7d399f60d7bdf5de70b708f9bb0417427546","examples/raw-gles.rs":"095113a1ba0851652a77aabfc8fa6ea7edcc2d09e91fd1e5009ead87d5998ea9","examples/ray-traced-triangle/main.rs":"955c2b8700c3b2daf14e9ef963ff499ed185b6f349dbc63caa422b2cf4942a1f","examples/ray-traced-triangle/shader.wgsl":"cc10caf92746724a71f6dd0dbc3a71e57b37c7d1d83278556805a535c0728a9d","src/auxil/dxgi/conv.rs":"760cd4eaa79b530368a30140b96bf73ac4fbdb4025eb95f0bed581638c8bb1cb","src/auxil/dxgi/exception.rs":"f0cfb5a0adcdc3b6db909601fee51ad51368f5da269bcd46e4dbea45a3bec4b1","src/auxil/dxgi/factory.rs":"5f861fbfe2f4cce08722a95283549b8f62b96f24a306d080d9f1730ae53501d8","src/auxil/dxgi/mod.rs":"a202564d9ac97530b16a234b87d180cd345aae705e082a9b1177dcde813645f9","src/auxil/dxgi/result.rs":"20c8eb03d738062dff198feca6327addb9882ed0462be842c789eadf7dca0573","src/auxil/dxgi/time.rs":"b6f966b250e9424d5d7e4065f2108cba87197c1e30baae6d87083055d1bc5a4b","src/auxil/mod.rs":"720ef2aae258733322a3274fd858f91effb8951dabaf7bbfd8a9a0be2d2dba97","src/auxil/renderdoc.rs":"c2f849f70f576b0c9b0d32dd155b6a6353f74dff59cbeeaa994a12789d047c0f","src/dx12/adapter.rs":"5143d009ab75950df6f6e311ca07108dedd373b99029d0eac9b882e4880893ea","src/dx12/command.rs":"bb3cc2ff1e77c0e4434eef8cab57e9018a1d00738fda71b860cdfc4fe802c0a4","src/dx12/conv.rs":"94d35f117ae003b07049f3a0bc6c45a0ffda9fb8053233d39c173cfb1b644403","src/dx12/descriptor.rs":"e06eb08bee4c805fa76b6ab791893b5b563ee60de9c8f8d8e0e21ab97ade5664","src/dx12/device.rs":"2a72beac1496b1682700e07923e9ad6ce7271e5a88641bf6c6d0b9b893b46cd9","src/dx12/instance.rs":"351a4e0d526de8eafc74bf5f01a41da48efa39e0c66704a85da72e1140b159d4","src/dx12/mod.rs":"4ec20d1082f10c7429db0fcdc6261210a0ff1565e87f4ab799719dc00aa636e0","src/dx12/shader_compilation.rs":"419ce7fe4df2973845851fac045dab21157eec6b26a573012f22fa41fc130b5b","src/dx12/suballocation.rs":"6939fc36223a15cc070c744d0418f9ac6fa2829d794af17cdea7c61eb5f8d2c0","src/dx12/types.rs":"9573736baaa0ef607367c3b72144556d24faf677a26bb8df49a4372a1348e06b","src/dx12/view.rs":"792772e9c87840dcd045b7381a03162eb4a501492a95ca586e77e81aed621c67","src/empty.rs":"5c3a5e39d45b4522ff3496fe6ec3b4a7afd906b6095dff1cad113c826aa9ea62","src/gles/adapter.rs":"05dd64c42b8b8265cfa1913dfdb9d1d7730abc05d189ed48bb0aa190debd90f6","src/gles/command.rs":"7118e42376e403e0d13db007534529d0e0650ff938a327cbdb0d6c90bee876de","src/gles/conv.rs":"5d15d3a33032d32ff99bc338fba0689fa54c76d0714e335fe48523d841df386f","src/gles/device.rs":"087fcfaf796b3fba2e6d638bb9840df941dd89aae43fcd8f528baf7b9ad9bd05","src/gles/egl.rs":"5ae9499e56f48ebe1797533c091529e77494ef69e32ea23e08e9135ba63188d1","src/gles/emscripten.rs":"19bb73a9d140645f3f32cd48b002151711a9b8456e213eab5f3a2be79239e147","src/gles/mod.rs":"772cf714874d12d815f2b0cf3309fd970545c582e8c2bc56eb1b266b013f5afb","src/gles/queue.rs":"9159af1636e838462ec562f25bbcacd15bc0a7e63606a3352e04f1f39818c61b","src/gles/shaders/clear.frag":"9133ed8ed97d3641fbb6b5f5ea894a3554c629ccc1b80a5fc9221d7293aa1954","src/gles/shaders/clear.vert":"a543768725f4121ff2e9e1fb5b00644931e9d6f2f946c0ef01968afb5a135abd","src/gles/shaders/srgb_present.frag":"dd9a43c339a2fa4ccf7f6a1854c6f400cabf271a7d5e9230768e9f39d47f3ff5","src/gles/shaders/srgb_present.vert":"6e85d489403d80b81cc94790730bb53b309dfc5eeede8f1ea3412a660f31d357","src/gles/web.rs":"d263695d45736d3c6ec3528c8c33fe6cf3767d3429a13a92d88b4fdc7b6340fb","src/gles/wgl.rs":"80351e261e2eaa47fff3ec4118d4ce781b24ab9a40072c8b3525baf09f041aca","src/lib.rs":"93873ebd663ed115a4bdd554eb5e33658658c89dd2fd4a5b33eda57417ab8d7d","src/metal/adapter.rs":"48747609f839dd9dbb5f6bc0a89f7f7017458e40dabc375efb07fbc93e36dfaa","src/metal/command.rs":"661b38a75d4f4cd1b0d6957f1f09db0743ec3a13bbafba9baa931894ee193f48","src/metal/conv.rs":"0bce6a8d0ccef16783475803d70d35e03ab7938c19374e22c9d253abe1f8b111","src/metal/device.rs":"c5deeecf475e0aa4b2027c656ea19207716f84b56cfa7c9132dca504d1abebfb","src/metal/mod.rs":"17665544754102ccf5f4bb1ccc0493ee8d2dbe45b22470bddaf9e609c24c0774","src/metal/surface.rs":"f2b9b65d4117db2b16c04469c573358eb65de104d5a72aa02da8483ee243cbd3","src/metal/time.rs":"c32d69f30e846dfcc0e39e01097fb80df63b2bebb6586143bb62494999850246","src/vulkan/adapter.rs":"a0f365b9d4fea8ec81b8f6211648a78ecf1e8442aaed3f41819b59ce1c66f05d","src/vulkan/command.rs":"e5a88eab59b3864cdf44ba2231270e16045505dc549b8b90251031de452ba826","src/vulkan/conv.rs":"7e6266e3a0b7d0b8d5d51362a0386a84bc047350eeac663b6352a94d5e5c0a87","src/vulkan/device.rs":"9824d597dbb51030bd337e80bb0f1eab6fdb6935fc87dfd8beae2c1f1048fbcf","src/vulkan/instance.rs":"fb583496865eb67b3997503ec58e8e2518fc88175aa3cc4c19b8022be267f1ec","src/vulkan/mod.rs":"5c873db859e740876e072bed752e76940dd97a35f3d532509a6357cb0fb9119b"},"package":null} \ No newline at end of file
+{"files":{"Cargo.toml":"eaa7b2b51fbe98c0721dc52d94c64b48d2d6e351bf36da3e756378a8d8ebc1de","LICENSE.APACHE":"a6cba85bc92e0cff7a450b1d873c0eaa2e9fc96bf472df0247a26bec77bf3ff9","LICENSE.MIT":"c7fea58d1cfe49634cd92e54fc10a9d871f4b275321a4cd8c09e449122caaeb4","README.md":"099ee611a911dc19330a61bffcde13663929a51b25ac528ee33ea796d695491e","build.rs":"c80bdc0152a00471eec6ed0dd0f7d55d0b975498a00ba05e94100c84ad639a49","examples/halmark/main.rs":"4604737f714943383c57feac2b8468ecf15e9e60c54a5303455e9953ec5c79fb","examples/halmark/shader.wgsl":"26c256ec36d6f0e9a1647431ca772766bee4382d64eaa718ba7b488dcfb6bcca","examples/raw-gles.em.html":"70fbe68394a1a4522192de1dcfaf7d399f60d7bdf5de70b708f9bb0417427546","examples/raw-gles.rs":"095113a1ba0851652a77aabfc8fa6ea7edcc2d09e91fd1e5009ead87d5998ea9","examples/ray-traced-triangle/main.rs":"955c2b8700c3b2daf14e9ef963ff499ed185b6f349dbc63caa422b2cf4942a1f","examples/ray-traced-triangle/shader.wgsl":"cc10caf92746724a71f6dd0dbc3a71e57b37c7d1d83278556805a535c0728a9d","src/auxil/dxgi/conv.rs":"760cd4eaa79b530368a30140b96bf73ac4fbdb4025eb95f0bed581638c8bb1cb","src/auxil/dxgi/exception.rs":"f0cfb5a0adcdc3b6db909601fee51ad51368f5da269bcd46e4dbea45a3bec4b1","src/auxil/dxgi/factory.rs":"5f861fbfe2f4cce08722a95283549b8f62b96f24a306d080d9f1730ae53501d8","src/auxil/dxgi/mod.rs":"a202564d9ac97530b16a234b87d180cd345aae705e082a9b1177dcde813645f9","src/auxil/dxgi/result.rs":"79fe5aa17a2b21a7f06b1b604200c3c3e73fca31e8193aab80b5b15e7e9818a0","src/auxil/dxgi/time.rs":"b6f966b250e9424d5d7e4065f2108cba87197c1e30baae6d87083055d1bc5a4b","src/auxil/mod.rs":"720ef2aae258733322a3274fd858f91effb8951dabaf7bbfd8a9a0be2d2dba97","src/auxil/renderdoc.rs":"c2f849f70f576b0c9b0d32dd155b6a6353f74dff59cbeeaa994a12789d047c0f","src/dx12/adapter.rs":"7d647c9a1211e564fb1220c65df26fe2c519e5eddfa89291eaea45be4b60746a","src/dx12/command.rs":"6fe77b8b27c6428128ed0c3bcf7517e511c3c1eec8491a08936a696d5cb30751","src/dx12/conv.rs":"94d35f117ae003b07049f3a0bc6c45a0ffda9fb8053233d39c173cfb1b644403","src/dx12/descriptor.rs":"e06eb08bee4c805fa76b6ab791893b5b563ee60de9c8f8d8e0e21ab97ade5664","src/dx12/device.rs":"f7ca4a30085fdaecc321a01344f9d8cd907b7ba5a1b92f13a3bd9faad1934ed8","src/dx12/instance.rs":"351a4e0d526de8eafc74bf5f01a41da48efa39e0c66704a85da72e1140b159d4","src/dx12/mod.rs":"4b9d5e2414d628ed537f32f46604eeb95912ad9d5ee61cf4ce11c8dd6a88c8ab","src/dx12/shader_compilation.rs":"5087adb8576e2d7751619dfdf8b37c573bb4e494290c594077ca3208cce1e746","src/dx12/suballocation.rs":"6939fc36223a15cc070c744d0418f9ac6fa2829d794af17cdea7c61eb5f8d2c0","src/dx12/types.rs":"9573736baaa0ef607367c3b72144556d24faf677a26bb8df49a4372a1348e06b","src/dx12/view.rs":"792772e9c87840dcd045b7381a03162eb4a501492a95ca586e77e81aed621c67","src/empty.rs":"5c3a5e39d45b4522ff3496fe6ec3b4a7afd906b6095dff1cad113c826aa9ea62","src/gles/adapter.rs":"3175c86212b6c8caa099a3e34750c18251107461314c02f77c984e5b8301051a","src/gles/command.rs":"9f9ef3d97fcb2bc521b85141dee1ca9e8fe06b08d861766c3b3e9a2f3a53b494","src/gles/conv.rs":"5d15d3a33032d32ff99bc338fba0689fa54c76d0714e335fe48523d841df386f","src/gles/device.rs":"7ccd7aa3b878159190092bf279158289d754cc695bd27b9ec7177cd9b86b37c5","src/gles/egl.rs":"ad9b0ddc66877ae4088511283b8c860dd09b0b4d2c1fc51246c6935aa16703eb","src/gles/emscripten.rs":"19bb73a9d140645f3f32cd48b002151711a9b8456e213eab5f3a2be79239e147","src/gles/mod.rs":"b8999f76ad45e07312b291457100f12699ba6a2635c1f1913b0648e9a9394015","src/gles/queue.rs":"3ead252c54c673da6736a0c0c6b63c848791bc78042def3f3ffff8ffce2c6e64","src/gles/shaders/clear.frag":"9133ed8ed97d3641fbb6b5f5ea894a3554c629ccc1b80a5fc9221d7293aa1954","src/gles/shaders/clear.vert":"a543768725f4121ff2e9e1fb5b00644931e9d6f2f946c0ef01968afb5a135abd","src/gles/shaders/srgb_present.frag":"dd9a43c339a2fa4ccf7f6a1854c6f400cabf271a7d5e9230768e9f39d47f3ff5","src/gles/shaders/srgb_present.vert":"6e85d489403d80b81cc94790730bb53b309dfc5eeede8f1ea3412a660f31d357","src/gles/web.rs":"d263695d45736d3c6ec3528c8c33fe6cf3767d3429a13a92d88b4fdc7b6340fb","src/gles/wgl.rs":"06e947912c357c5275090b12b7e31e596ff264fd460e2449b6db4b79284eb74d","src/lib.rs":"c8b8a95f5bfd58eaada0af2cd0abc80f888aeea85969a1363f4061cc9b542ca4","src/metal/adapter.rs":"bb5d0ca1cecbd914cbb29487303be4ed69035469a8bc137784d5bbb6ab36cec7","src/metal/command.rs":"661b38a75d4f4cd1b0d6957f1f09db0743ec3a13bbafba9baa931894ee193f48","src/metal/conv.rs":"0bce6a8d0ccef16783475803d70d35e03ab7938c19374e22c9d253abe1f8b111","src/metal/device.rs":"c5deeecf475e0aa4b2027c656ea19207716f84b56cfa7c9132dca504d1abebfb","src/metal/mod.rs":"f6d12246a6c7e6d998db796a009702f289b5f56bd35f01c0a619f5345fb363c9","src/metal/surface.rs":"f2b9b65d4117db2b16c04469c573358eb65de104d5a72aa02da8483ee243cbd3","src/metal/time.rs":"c32d69f30e846dfcc0e39e01097fb80df63b2bebb6586143bb62494999850246","src/vulkan/adapter.rs":"ed980734c8239bad7f3371e0e778ec63ecea5fe971f04c3dcdd3fe55c359f63b","src/vulkan/command.rs":"e5a88eab59b3864cdf44ba2231270e16045505dc549b8b90251031de452ba826","src/vulkan/conv.rs":"7e6266e3a0b7d0b8d5d51362a0386a84bc047350eeac663b6352a94d5e5c0a87","src/vulkan/device.rs":"9824d597dbb51030bd337e80bb0f1eab6fdb6935fc87dfd8beae2c1f1048fbcf","src/vulkan/instance.rs":"cd4aa3a8ed343076446117bae21fc438fe8761054489ec7d1ed7c31512c2e5ec","src/vulkan/mod.rs":"0c6bfb321b693930bcae3e61d06ff7b71965a64761ce39d757fc609d4b46a03e"},"package":null} \ No newline at end of file
diff --git a/third_party/rust/wgpu-hal/Cargo.toml b/third_party/rust/wgpu-hal/Cargo.toml
index 88f96fb59b..47195f996d 100644
--- a/third_party/rust/wgpu-hal/Cargo.toml
+++ b/third_party/rust/wgpu-hal/Cargo.toml
@@ -95,6 +95,7 @@ cfg_aliases = "0.1"
[features]
default = ["link"]
+device_lost_panic = []
dx12 = [
"naga/hlsl-out",
"d3d12",
@@ -116,12 +117,15 @@ gles = [
"glutin_wgl_sys",
"khronos-egl",
"libloading",
+ "ndk-sys",
]
+internal_error_panic = []
link = ["metal/link"]
metal = [
"naga/msl-out",
"block",
]
+oom_panic = []
renderdoc = [
"libloading",
"renderdoc-sys",
@@ -133,6 +137,7 @@ vulkan = [
"gpu-descriptor",
"libloading",
"smallvec",
+ "android_system_properties",
]
windows_rs = ["gpu-allocator"]
@@ -191,8 +196,13 @@ optional = true
[target."cfg(not(target_arch = \"wasm32\"))".dev-dependencies]
glutin = "0.29.1"
-[target."cfg(target_os = \"android\")".dependencies]
-android_system_properties = "0.1.1"
+[target."cfg(target_os = \"android\")".dependencies.android_system_properties]
+version = "0.1.1"
+optional = true
+
+[target."cfg(target_os = \"android\")".dependencies.ndk-sys]
+version = "0.5.0"
+optional = true
[target."cfg(target_os = \"emscripten\")".dependencies.khronos-egl]
version = "6"
diff --git a/third_party/rust/wgpu-hal/src/auxil/dxgi/result.rs b/third_party/rust/wgpu-hal/src/auxil/dxgi/result.rs
index db013d2dec..2ac4464568 100644
--- a/third_party/rust/wgpu-hal/src/auxil/dxgi/result.rs
+++ b/third_party/rust/wgpu-hal/src/auxil/dxgi/result.rs
@@ -21,8 +21,26 @@ impl HResult<()> for i32 {
Err(Cow::Borrowed(description))
}
fn into_device_result(self, description: &str) -> Result<(), crate::DeviceError> {
+ #![allow(unreachable_code)]
+
self.into_result().map_err(|err| {
log::error!("{} failed: {}", description, err);
+
+ match self {
+ winerror::E_OUTOFMEMORY => {
+ #[cfg(feature = "oom_panic")]
+ panic!("{description} failed: Out of memory");
+ }
+ winerror::DXGI_ERROR_DEVICE_RESET | winerror::DXGI_ERROR_DEVICE_REMOVED => {
+ #[cfg(feature = "device_lost_panic")]
+ panic!("{description} failed: Device lost ({err})");
+ }
+ _ => {
+ #[cfg(feature = "internal_error_panic")]
+ panic!("{description} failed: {err}");
+ }
+ }
+
if self == winerror::E_OUTOFMEMORY {
crate::DeviceError::OutOfMemory
} else {
diff --git a/third_party/rust/wgpu-hal/src/dx12/adapter.rs b/third_party/rust/wgpu-hal/src/dx12/adapter.rs
index f6027014d2..960e1790a9 100644
--- a/third_party/rust/wgpu-hal/src/dx12/adapter.rs
+++ b/third_party/rust/wgpu-hal/src/dx12/adapter.rs
@@ -242,6 +242,7 @@ impl super::Adapter {
| wgt::Features::POLYGON_MODE_LINE
| wgt::Features::TEXTURE_ADAPTER_SPECIFIC_FORMAT_FEATURES
| wgt::Features::TIMESTAMP_QUERY
+ | wgt::Features::TIMESTAMP_QUERY_INSIDE_ENCODERS
| wgt::Features::TIMESTAMP_QUERY_INSIDE_PASSES
| wgt::Features::TEXTURE_COMPRESSION_BC
| wgt::Features::CLEAR_TEXTURE
@@ -294,6 +295,22 @@ impl super::Adapter {
bgra8unorm_storage_supported,
);
+ // we must be using DXC because uint64_t was added with Shader Model 6
+ // and FXC only supports up to 5.1
+ let int64_shader_ops_supported = dxc_container.is_some() && {
+ let mut features1: d3d12_ty::D3D12_FEATURE_DATA_D3D12_OPTIONS1 =
+ unsafe { mem::zeroed() };
+ let hr = unsafe {
+ device.CheckFeatureSupport(
+ d3d12_ty::D3D12_FEATURE_D3D12_OPTIONS1,
+ &mut features1 as *mut _ as *mut _,
+ mem::size_of::<d3d12_ty::D3D12_FEATURE_DATA_D3D12_OPTIONS1>() as _,
+ )
+ };
+ hr == 0 && features1.Int64ShaderOps != 0
+ };
+ features.set(wgt::Features::SHADER_INT64, int64_shader_ops_supported);
+
// float32-filterable should always be available on d3d12
features.set(wgt::Features::FLOAT32_FILTERABLE, true);
@@ -307,6 +324,12 @@ impl super::Adapter {
downlevel.flags -=
wgt::DownlevelFlags::VERTEX_AND_INSTANCE_INDEX_RESPECTS_RESPECTIVE_FIRST_VALUE_IN_INDIRECT_DRAW;
+ // See https://learn.microsoft.com/en-us/windows/win32/direct3d12/hardware-feature-levels#feature-level-support
+ let max_color_attachments = 8;
+ // TODO: determine this programmatically if possible.
+ // https://github.com/gpuweb/gpuweb/issues/2965#issuecomment-1361315447
+ let max_color_attachment_bytes_per_sample = 64;
+
Some(crate::ExposedAdapter {
adapter: super::Adapter {
raw: adapter,
@@ -377,6 +400,8 @@ impl super::Adapter {
d3d12_ty::D3D12_CONSTANT_BUFFER_DATA_PLACEMENT_ALIGNMENT,
min_storage_buffer_offset_alignment: 4,
max_inter_stage_shader_components: base.max_inter_stage_shader_components,
+ max_color_attachments,
+ max_color_attachment_bytes_per_sample,
max_compute_workgroup_storage_size: base.max_compute_workgroup_storage_size, //TODO?
max_compute_invocations_per_workgroup:
d3d12_ty::D3D12_CS_4_X_THREAD_GROUP_MAX_THREADS_PER_GROUP,
diff --git a/third_party/rust/wgpu-hal/src/dx12/command.rs b/third_party/rust/wgpu-hal/src/dx12/command.rs
index f527898d90..9d96d29cae 100644
--- a/third_party/rust/wgpu-hal/src/dx12/command.rs
+++ b/third_party/rust/wgpu-hal/src/dx12/command.rs
@@ -56,6 +56,13 @@ impl super::Temp {
}
}
+impl Drop for super::CommandEncoder {
+ fn drop(&mut self) {
+ use crate::CommandEncoder;
+ unsafe { self.discard_encoding() }
+ }
+}
+
impl super::CommandEncoder {
unsafe fn begin_pass(&mut self, kind: super::PassKind, label: crate::Label) {
let list = self.list.as_ref().unwrap();
diff --git a/third_party/rust/wgpu-hal/src/dx12/device.rs b/third_party/rust/wgpu-hal/src/dx12/device.rs
index 2507c125f8..3603b033b8 100644
--- a/third_party/rust/wgpu-hal/src/dx12/device.rs
+++ b/third_party/rust/wgpu-hal/src/dx12/device.rs
@@ -663,11 +663,7 @@ impl crate::Device<super::Api> for super::Device {
end_of_pass_timer_query: None,
})
}
- unsafe fn destroy_command_encoder(&self, encoder: super::CommandEncoder) {
- if let Some(list) = encoder.list {
- list.close();
- }
- }
+ unsafe fn destroy_command_encoder(&self, _encoder: super::CommandEncoder) {}
unsafe fn create_bind_group_layout(
&self,
diff --git a/third_party/rust/wgpu-hal/src/dx12/mod.rs b/third_party/rust/wgpu-hal/src/dx12/mod.rs
index 053b880689..13b43f8aca 100644
--- a/third_party/rust/wgpu-hal/src/dx12/mod.rs
+++ b/third_party/rust/wgpu-hal/src/dx12/mod.rs
@@ -238,6 +238,9 @@ struct DeviceShared {
heap_samplers: descriptor::GeneralHeap,
}
+unsafe impl Send for DeviceShared {}
+unsafe impl Sync for DeviceShared {}
+
pub struct Device {
raw: d3d12::Device,
present_queue: d3d12::CommandQueue,
diff --git a/third_party/rust/wgpu-hal/src/dx12/shader_compilation.rs b/third_party/rust/wgpu-hal/src/dx12/shader_compilation.rs
index df040dba15..288fc24745 100644
--- a/third_party/rust/wgpu-hal/src/dx12/shader_compilation.rs
+++ b/third_party/rust/wgpu-hal/src/dx12/shader_compilation.rs
@@ -13,7 +13,7 @@ use crate::auxil::dxgi::result::HResult;
pub(super) fn compile_fxc(
device: &super::Device,
- source: &String,
+ source: &str,
source_name: &str,
raw_ep: &std::ffi::CString,
stage_bit: wgt::ShaderStages,
@@ -211,7 +211,7 @@ mod dxc {
Err(crate::PipelineError::Linkage(
stage_bit,
format!(
- "DXC compile error: {:?}",
+ "DXC compile error: {}",
get_error_string_from_dxc_result(&dxc_container.library, &e.0)
.unwrap_or_default()
),
diff --git a/third_party/rust/wgpu-hal/src/gles/adapter.rs b/third_party/rust/wgpu-hal/src/gles/adapter.rs
index afa4023797..c09725e85f 100644
--- a/third_party/rust/wgpu-hal/src/gles/adapter.rs
+++ b/third_party/rust/wgpu-hal/src/gles/adapter.rs
@@ -4,6 +4,7 @@ use std::sync::{atomic::AtomicU8, Arc};
use wgt::AstcChannel;
use crate::auxil::db;
+use crate::gles::ShaderClearProgram;
// https://webgl2fundamentals.org/webgl/lessons/webgl-data-textures.html
@@ -435,7 +436,8 @@ impl super::Adapter {
let mut features = wgt::Features::empty()
| wgt::Features::TEXTURE_ADAPTER_SPECIFIC_FORMAT_FEATURES
| wgt::Features::CLEAR_TEXTURE
- | wgt::Features::PUSH_CONSTANTS;
+ | wgt::Features::PUSH_CONSTANTS
+ | wgt::Features::DEPTH32FLOAT_STENCIL8;
features.set(
wgt::Features::ADDRESS_MODE_CLAMP_TO_BORDER | wgt::Features::ADDRESS_MODE_CLAMP_TO_ZERO,
extensions.contains("GL_EXT_texture_border_clamp")
@@ -472,6 +474,7 @@ impl super::Adapter {
features.set(wgt::Features::SHADER_UNUSED_VERTEX_OUTPUT, true);
if extensions.contains("GL_ARB_timer_query") {
features.set(wgt::Features::TIMESTAMP_QUERY, true);
+ features.set(wgt::Features::TIMESTAMP_QUERY_INSIDE_ENCODERS, true);
features.set(wgt::Features::TIMESTAMP_QUERY_INSIDE_PASSES, true);
}
let gl_bcn_exts = [
@@ -652,6 +655,15 @@ impl super::Adapter {
0
};
+ let max_color_attachments = unsafe {
+ gl.get_parameter_i32(glow::MAX_COLOR_ATTACHMENTS)
+ .min(gl.get_parameter_i32(glow::MAX_DRAW_BUFFERS))
+ .min(crate::MAX_COLOR_ATTACHMENTS as i32) as u32
+ };
+
+ // TODO: programmatically determine this.
+ let max_color_attachment_bytes_per_sample = 32;
+
let limits = wgt::Limits {
max_texture_dimension_1d: max_texture_size,
max_texture_dimension_2d: max_texture_size,
@@ -719,9 +731,21 @@ impl super::Adapter {
max_push_constant_size: super::MAX_PUSH_CONSTANTS as u32 * 4,
min_uniform_buffer_offset_alignment,
min_storage_buffer_offset_alignment,
- max_inter_stage_shader_components: unsafe {
- gl.get_parameter_i32(glow::MAX_VARYING_COMPONENTS)
- } as u32,
+ max_inter_stage_shader_components: {
+ // MAX_VARYING_COMPONENTS may return 0, because it is deprecated since OpenGL 3.2 core,
+ // and an OpenGL Context with the core profile and with forward-compatibility=true,
+ // will make deprecated constants unavailable.
+ let max_varying_components =
+ unsafe { gl.get_parameter_i32(glow::MAX_VARYING_COMPONENTS) } as u32;
+ if max_varying_components == 0 {
+ // default value for max_inter_stage_shader_components
+ 60
+ } else {
+ max_varying_components
+ }
+ },
+ max_color_attachments,
+ max_color_attachment_bytes_per_sample,
max_compute_workgroup_storage_size: if supports_work_group_params {
(unsafe { gl.get_parameter_i32(glow::MAX_COMPUTE_SHARED_MEMORY_SIZE) } as u32)
} else {
@@ -779,6 +803,7 @@ impl super::Adapter {
}
let downlevel_defaults = wgt::DownlevelLimits {};
+ let max_samples = unsafe { gl.get_parameter_i32(glow::MAX_SAMPLES) };
// Drop the GL guard so we can move the context into AdapterShared
// ( on Wasm the gl handle is just a ref so we tell clippy to allow
@@ -797,6 +822,7 @@ impl super::Adapter {
next_shader_id: Default::default(),
program_cache: Default::default(),
es: es_ver.is_some(),
+ max_msaa_samples: max_samples,
}),
},
info: Self::make_info(vendor, renderer),
@@ -825,7 +851,14 @@ impl super::Adapter {
let source = if es {
format!("#version 300 es\nprecision lowp float;\n{source}")
} else {
- format!("#version 130\n{source}")
+ let version = gl.version();
+ if version.major == 3 && version.minor == 0 {
+ // OpenGL 3.0 only supports this format
+ format!("#version 130\n{source}")
+ } else {
+ // OpenGL 3.1+ support this format
+ format!("#version 140\n{source}")
+ }
};
let shader = unsafe { gl.create_shader(shader_type) }.expect("Could not create shader");
unsafe { gl.shader_source(shader, &source) };
@@ -846,7 +879,7 @@ impl super::Adapter {
unsafe fn create_shader_clear_program(
gl: &glow::Context,
es: bool,
- ) -> Option<(glow::Program, glow::UniformLocation)> {
+ ) -> Option<ShaderClearProgram> {
let program = unsafe { gl.create_program() }.expect("Could not create shader program");
let vertex = unsafe {
Self::compile_shader(
@@ -882,7 +915,10 @@ impl super::Adapter {
unsafe { gl.delete_shader(vertex) };
unsafe { gl.delete_shader(fragment) };
- Some((program, color_uniform_location))
+ Some(ShaderClearProgram {
+ program,
+ color_uniform_location,
+ })
}
}
@@ -908,9 +944,18 @@ impl crate::Adapter<super::Api> for super::Adapter {
// Compile the shader program we use for doing manual clears to work around Mesa fastclear
// bug.
- let (shader_clear_program, shader_clear_program_color_uniform_location) = unsafe {
- Self::create_shader_clear_program(gl, self.shared.es)
- .ok_or(crate::DeviceError::ResourceCreationFailed)?
+ let shader_clear_program = if self
+ .shared
+ .workarounds
+ .contains(super::Workarounds::MESA_I915_SRGB_SHADER_CLEAR)
+ {
+ Some(unsafe {
+ Self::create_shader_clear_program(gl, self.shared.es)
+ .ok_or(crate::DeviceError::ResourceCreationFailed)?
+ })
+ } else {
+ // If we don't need the workaround, don't waste time and resources compiling the clear program
+ None
};
Ok(crate::OpenDevice {
@@ -928,7 +973,6 @@ impl crate::Adapter<super::Api> for super::Adapter {
copy_fbo: unsafe { gl.create_framebuffer() }
.map_err(|_| crate::DeviceError::OutOfMemory)?,
shader_clear_program,
- shader_clear_program_color_uniform_location,
zero_buffer,
temp_query_results: Mutex::new(Vec::new()),
draw_buffer_count: AtomicU8::new(1),
@@ -945,12 +989,7 @@ impl crate::Adapter<super::Api> for super::Adapter {
use wgt::TextureFormat as Tf;
let sample_count = {
- let max_samples = unsafe {
- self.shared
- .context
- .lock()
- .get_parameter_i32(glow::MAX_SAMPLES)
- };
+ let max_samples = self.shared.max_msaa_samples;
if max_samples >= 16 {
Tfc::MULTISAMPLE_X2
| Tfc::MULTISAMPLE_X4
diff --git a/third_party/rust/wgpu-hal/src/gles/command.rs b/third_party/rust/wgpu-hal/src/gles/command.rs
index 926122e4ad..4385e2a31e 100644
--- a/third_party/rust/wgpu-hal/src/gles/command.rs
+++ b/third_party/rust/wgpu-hal/src/gles/command.rs
@@ -93,6 +93,13 @@ impl super::CommandBuffer {
}
}
+impl Drop for super::CommandEncoder {
+ fn drop(&mut self) {
+ use crate::CommandEncoder;
+ unsafe { self.discard_encoding() }
+ }
+}
+
impl super::CommandEncoder {
fn rebind_stencil_func(&mut self) {
fn make(s: &super::StencilSide, face: u32) -> C {
diff --git a/third_party/rust/wgpu-hal/src/gles/device.rs b/third_party/rust/wgpu-hal/src/gles/device.rs
index d0abe2c169..2678488cf8 100644
--- a/third_party/rust/wgpu-hal/src/gles/device.rs
+++ b/third_party/rust/wgpu-hal/src/gles/device.rs
@@ -1194,13 +1194,16 @@ impl crate::Device<super::Api> for super::Device {
let sampler = desc.samplers[entry.resource_index as usize];
super::RawBinding::Sampler(sampler.raw)
}
- wgt::BindingType::Texture { .. } => {
+ wgt::BindingType::Texture { view_dimension, .. } => {
let view = desc.textures[entry.resource_index as usize].view;
if view.array_layers.start != 0 {
log::error!("Unable to create a sampled texture binding for non-zero array layer.\n{}",
"This is an implementation problem of wgpu-hal/gles backend.")
}
let (raw, target) = view.inner.as_native();
+
+ super::Texture::log_failing_target_heuristics(view_dimension, target);
+
super::RawBinding::Texture {
raw,
target,
diff --git a/third_party/rust/wgpu-hal/src/gles/egl.rs b/third_party/rust/wgpu-hal/src/gles/egl.rs
index aa985d8121..f4bfcf5487 100644
--- a/third_party/rust/wgpu-hal/src/gles/egl.rs
+++ b/third_party/rust/wgpu-hal/src/gles/egl.rs
@@ -1,7 +1,8 @@
use glow::HasContext;
+use once_cell::sync::Lazy;
use parking_lot::{Mutex, MutexGuard, RwLock};
-use std::{ffi, os::raw, ptr, rc::Rc, sync::Arc, time::Duration};
+use std::{collections::HashMap, ffi, os::raw, ptr, rc::Rc, sync::Arc, time::Duration};
/// The amount of time to wait while trying to obtain a lock to the adapter context
const CONTEXT_LOCK_TIMEOUT_SECS: u64 = 1;
@@ -50,16 +51,6 @@ type WlEglWindowResizeFun = unsafe extern "system" fn(
type WlEglWindowDestroyFun = unsafe extern "system" fn(window: *const raw::c_void);
-#[cfg(target_os = "android")]
-extern "C" {
- pub fn ANativeWindow_setBuffersGeometry(
- window: *mut raw::c_void,
- width: i32,
- height: i32,
- format: i32,
- ) -> i32;
-}
-
type EglLabel = *const raw::c_void;
#[allow(clippy::upper_case_acronyms)]
@@ -161,7 +152,7 @@ impl Drop for DisplayOwner {
fn open_x_display() -> Option<DisplayOwner> {
log::debug!("Loading X11 library to get the current display");
unsafe {
- let library = libloading::Library::new("libX11.so").ok()?;
+ let library = find_library(&["libX11.so.6", "libX11.so"])?;
let func: libloading::Symbol<XOpenDisplayFun> = library.get(b"XOpenDisplay").unwrap();
let result = func(ptr::null());
ptr::NonNull::new(result).map(|ptr| DisplayOwner {
@@ -442,6 +433,45 @@ struct Inner {
srgb_kind: SrgbFrameBufferKind,
}
+// Different calls to `eglGetPlatformDisplay` may return the same `Display`, making it a global
+// state of all our `EglContext`s. This forces us to track the number of such context to prevent
+// terminating the display if it's currently used by another `EglContext`.
+static DISPLAYS_REFERENCE_COUNT: Lazy<Mutex<HashMap<usize, usize>>> = Lazy::new(Default::default);
+
+fn initialize_display(
+ egl: &EglInstance,
+ display: khronos_egl::Display,
+) -> Result<(i32, i32), khronos_egl::Error> {
+ let mut guard = DISPLAYS_REFERENCE_COUNT.lock();
+ *guard.entry(display.as_ptr() as usize).or_default() += 1;
+
+ // We don't need to check the reference count here since according to the `eglInitialize`
+ // documentation, initializing an already initialized EGL display connection has no effect
+ // besides returning the version numbers.
+ egl.initialize(display)
+}
+
+fn terminate_display(
+ egl: &EglInstance,
+ display: khronos_egl::Display,
+) -> Result<(), khronos_egl::Error> {
+ let key = &(display.as_ptr() as usize);
+ let mut guard = DISPLAYS_REFERENCE_COUNT.lock();
+ let count_ref = guard
+ .get_mut(key)
+ .expect("Attempted to decref a display before incref was called");
+
+ if *count_ref > 1 {
+ *count_ref -= 1;
+
+ Ok(())
+ } else {
+ guard.remove(key);
+
+ egl.terminate(display)
+ }
+}
+
impl Inner {
fn create(
flags: wgt::InstanceFlags,
@@ -449,7 +479,7 @@ impl Inner {
display: khronos_egl::Display,
force_gles_minor_version: wgt::Gles3MinorVersion,
) -> Result<Self, crate::InstanceError> {
- let version = egl.initialize(display).map_err(|e| {
+ let version = initialize_display(&egl, display).map_err(|e| {
crate::InstanceError::with_source(
String::from("failed to initialize EGL display connection"),
e,
@@ -618,7 +648,8 @@ impl Drop for Inner {
{
log::warn!("Error in destroy_context: {:?}", e);
}
- if let Err(e) = self.egl.instance.terminate(self.egl.display) {
+
+ if let Err(e) = terminate_display(&self.egl.instance, self.egl.display) {
log::warn!("Error in terminate: {:?}", e);
}
}
@@ -783,11 +814,12 @@ impl crate::Instance<super::Api> for Instance {
(display, Some(Rc::new(display_owner)), WindowKind::AngleX11)
} else if client_ext_str.contains("EGL_MESA_platform_surfaceless") {
log::warn!("No windowing system present. Using surfaceless platform");
+ #[allow(clippy::unnecessary_literal_unwrap)] // This is only a literal on Emscripten
let egl = egl1_5.expect("Failed to get EGL 1.5 for surfaceless");
let display = unsafe {
egl.get_platform_display(
EGL_PLATFORM_SURFACELESS_MESA,
- std::ptr::null_mut(),
+ khronos_egl::DEFAULT_DISPLAY,
&[khronos_egl::ATTRIB_NONE],
)
}
@@ -863,7 +895,12 @@ impl crate::Instance<super::Api> for Instance {
.unwrap();
let ret = unsafe {
- ANativeWindow_setBuffersGeometry(handle.a_native_window.as_ptr(), 0, 0, format)
+ ndk_sys::ANativeWindow_setBuffersGeometry(
+ handle.a_native_window.as_ptr() as *mut ndk_sys::ANativeWindow,
+ 0,
+ 0,
+ format,
+ )
};
if ret != 0 {
diff --git a/third_party/rust/wgpu-hal/src/gles/mod.rs b/third_party/rust/wgpu-hal/src/gles/mod.rs
index 646419c7fe..6f41f7c000 100644
--- a/third_party/rust/wgpu-hal/src/gles/mod.rs
+++ b/third_party/rust/wgpu-hal/src/gles/mod.rs
@@ -251,6 +251,11 @@ struct AdapterShared {
next_shader_id: AtomicU32,
program_cache: Mutex<ProgramCache>,
es: bool,
+
+ /// Result of `gl.get_parameter_i32(glow::MAX_SAMPLES)`.
+ /// Cached here so it doesn't need to be queried every time texture format capabilities are requested.
+ /// (this has been shown to be a significant enough overhead)
+ max_msaa_samples: i32,
}
pub struct Adapter {
@@ -264,6 +269,11 @@ pub struct Device {
render_doc: crate::auxil::renderdoc::RenderDoc,
}
+pub struct ShaderClearProgram {
+ pub program: glow::Program,
+ pub color_uniform_location: glow::UniformLocation,
+}
+
pub struct Queue {
shared: Arc<AdapterShared>,
features: wgt::Features,
@@ -271,9 +281,7 @@ pub struct Queue {
copy_fbo: glow::Framebuffer,
/// Shader program used to clear the screen for [`Workarounds::MESA_I915_SRGB_SHADER_CLEAR`]
/// devices.
- shader_clear_program: glow::Program,
- /// The uniform location of the color uniform in the shader clear program
- shader_clear_program_color_uniform_location: glow::UniformLocation,
+ shader_clear_program: Option<ShaderClearProgram>,
/// Keep a reasonably large buffer filled with zeroes, so that we can implement `ClearBuffer` of
/// zeroes by copying from it.
zero_buffer: glow::Buffer,
@@ -366,6 +374,8 @@ impl Texture {
/// Returns the `target`, whether the image is 3d and whether the image is a cubemap.
fn get_info_from_desc(desc: &TextureDescriptor) -> u32 {
match desc.dimension {
+ // WebGL (1 and 2) as well as some GLES versions do not have 1D textures, so we are
+ // doing `TEXTURE_2D` instead
wgt::TextureDimension::D1 => glow::TEXTURE_2D,
wgt::TextureDimension::D2 => {
// HACK: detect a cube map; forces cube compatible textures to be cube textures
@@ -379,6 +389,43 @@ impl Texture {
wgt::TextureDimension::D3 => glow::TEXTURE_3D,
}
}
+
+ /// More information can be found in issues #1614 and #1574
+ fn log_failing_target_heuristics(view_dimension: wgt::TextureViewDimension, target: u32) {
+ let expected_target = match view_dimension {
+ wgt::TextureViewDimension::D1 => glow::TEXTURE_2D,
+ wgt::TextureViewDimension::D2 => glow::TEXTURE_2D,
+ wgt::TextureViewDimension::D2Array => glow::TEXTURE_2D_ARRAY,
+ wgt::TextureViewDimension::Cube => glow::TEXTURE_CUBE_MAP,
+ wgt::TextureViewDimension::CubeArray => glow::TEXTURE_CUBE_MAP_ARRAY,
+ wgt::TextureViewDimension::D3 => glow::TEXTURE_3D,
+ };
+
+ if expected_target == target {
+ return;
+ }
+
+ let buffer;
+ let got = match target {
+ glow::TEXTURE_2D => "D2",
+ glow::TEXTURE_2D_ARRAY => "D2Array",
+ glow::TEXTURE_CUBE_MAP => "Cube",
+ glow::TEXTURE_CUBE_MAP_ARRAY => "CubeArray",
+ glow::TEXTURE_3D => "D3",
+ target => {
+ buffer = target.to_string();
+ &buffer
+ }
+ };
+
+ log::error!(
+ "wgpu-hal heuristics assumed that the view dimension will be equal to `{got}` rather than `{view_dimension:?}`.\n{}\n{}\n{}\n{}",
+ "`D2` textures with `depth_or_array_layers == 1` are assumed to have view dimension `D2`",
+ "`D2` textures with `depth_or_array_layers > 1` are assumed to have view dimension `D2Array`",
+ "`D2` textures with `depth_or_array_layers == 6` are assumed to have view dimension `Cube`",
+ "`D2` textures with `depth_or_array_layers > 6 && depth_or_array_layers % 6 == 0` are assumed to have view dimension `CubeArray`",
+ );
+ }
}
#[derive(Clone, Debug)]
diff --git a/third_party/rust/wgpu-hal/src/gles/queue.rs b/third_party/rust/wgpu-hal/src/gles/queue.rs
index 6ec553bd29..5db5af9a16 100644
--- a/third_party/rust/wgpu-hal/src/gles/queue.rs
+++ b/third_party/rust/wgpu-hal/src/gles/queue.rs
@@ -40,10 +40,14 @@ fn get_z_offset(target: u32, base: &crate::TextureCopyBase) -> u32 {
impl super::Queue {
/// Performs a manual shader clear, used as a workaround for a clearing bug on mesa
unsafe fn perform_shader_clear(&self, gl: &glow::Context, draw_buffer: u32, color: [f32; 4]) {
- unsafe { gl.use_program(Some(self.shader_clear_program)) };
+ let shader_clear = self
+ .shader_clear_program
+ .as_ref()
+ .expect("shader_clear_program should always be set if the workaround is enabled");
+ unsafe { gl.use_program(Some(shader_clear.program)) };
unsafe {
gl.uniform_4_f32(
- Some(&self.shader_clear_program_color_uniform_location),
+ Some(&shader_clear.color_uniform_location),
color[0],
color[1],
color[2],
diff --git a/third_party/rust/wgpu-hal/src/gles/wgl.rs b/third_party/rust/wgpu-hal/src/gles/wgl.rs
index 6243430dc2..c9039090b7 100644
--- a/third_party/rust/wgpu-hal/src/gles/wgl.rs
+++ b/third_party/rust/wgpu-hal/src/gles/wgl.rs
@@ -160,6 +160,9 @@ struct Inner {
context: WglContext,
}
+unsafe impl Send for Inner {}
+unsafe impl Sync for Inner {}
+
pub struct Instance {
srgb_capable: bool,
inner: Arc<Mutex<Inner>>,
diff --git a/third_party/rust/wgpu-hal/src/lib.rs b/third_party/rust/wgpu-hal/src/lib.rs
index 5d8c6ddda8..f1794a4a89 100644
--- a/third_party/rust/wgpu-hal/src/lib.rs
+++ b/third_party/rust/wgpu-hal/src/lib.rs
@@ -16,6 +16,8 @@
#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))]
#![allow(
+ // this happens on the GL backend, where it is both thread safe and non-thread safe in the same code.
+ clippy::arc_with_non_send_sync,
// for `if_then_panic` until it reaches stable
unknown_lints,
// We use loops for getting early-out of scope without closures.
@@ -329,6 +331,9 @@ pub trait Device<A: Api>: WasmNotSendSync {
unsafe fn create_sampler(&self, desc: &SamplerDescriptor) -> Result<A::Sampler, DeviceError>;
unsafe fn destroy_sampler(&self, sampler: A::Sampler);
+ /// Create a fresh [`CommandEncoder`].
+ ///
+ /// The new `CommandEncoder` is in the "closed" state.
unsafe fn create_command_encoder(
&self,
desc: &CommandEncoderDescriptor<A>,
@@ -429,19 +434,95 @@ pub trait Queue<A: Api>: WasmNotSendSync {
unsafe fn get_timestamp_period(&self) -> f32;
}
-/// Encoder for commands in command buffers.
-/// Serves as a parent for all the encoded command buffers.
-/// Works in bursts of action: one or more command buffers are recorded,
-/// then submitted to a queue, and then it needs to be `reset_all()`.
+/// Encoder and allocation pool for `CommandBuffer`.
+///
+/// The life cycle of a `CommandBuffer` is as follows:
+///
+/// - Call [`Device::create_command_encoder`] to create a new
+/// `CommandEncoder`, in the "closed" state.
+///
+/// - Call `begin_encoding` on a closed `CommandEncoder` to begin
+/// recording commands. This puts the `CommandEncoder` in the
+/// "recording" state.
+///
+/// - Call methods like `copy_buffer_to_buffer`, `begin_render_pass`,
+/// etc. on a "recording" `CommandEncoder` to add commands to the
+/// list.
+///
+/// - Call `end_encoding` on a recording `CommandEncoder` to close the
+/// encoder and construct a fresh `CommandBuffer` consisting of the
+/// list of commands recorded up to that point.
+///
+/// - Call `discard_encoding` on a recording `CommandEncoder` to drop
+/// the commands recorded thus far and close the encoder.
+///
+/// - Call `reset_all` on a closed `CommandEncoder`, passing all the
+/// live `CommandBuffers` built from it. All the `CommandBuffer`s
+/// are destroyed, and their resources are freed.
+///
+/// # Safety
+///
+/// - The `CommandEncoder` must be in the states described above to
+/// make the given calls.
+///
+/// - A `CommandBuffer` that has been submitted for execution on the
+/// GPU must live until its execution is complete.
+///
+/// - A `CommandBuffer` must not outlive the `CommandEncoder` that
+/// built it.
+///
+/// - A `CommandEncoder` must not outlive its `Device`.
pub trait CommandEncoder<A: Api>: WasmNotSendSync + fmt::Debug {
/// Begin encoding a new command buffer.
+ ///
+ /// This puts this `CommandEncoder` in the "recording" state.
+ ///
+ /// # Safety
+ ///
+ /// This `CommandEncoder` must be in the "closed" state.
unsafe fn begin_encoding(&mut self, label: Label) -> Result<(), DeviceError>;
- /// Discard currently recorded list, if any.
+
+ /// Discard the command list under construction, if any.
+ ///
+ /// This puts this `CommandEncoder` in the "closed" state.
+ ///
+ /// # Safety
+ ///
+ /// This `CommandEncoder` must be in the "recording" state.
unsafe fn discard_encoding(&mut self);
+
+ /// Return a fresh [`CommandBuffer`] holding the recorded commands.
+ ///
+ /// The returned [`CommandBuffer`] holds all the commands recorded
+ /// on this `CommandEncoder` since the last call to
+ /// [`begin_encoding`].
+ ///
+ /// This puts this `CommandEncoder` in the "closed" state.
+ ///
+ /// # Safety
+ ///
+ /// This `CommandEncoder` must be in the "recording" state.
+ ///
+ /// The returned [`CommandBuffer`] must not outlive this
+ /// `CommandEncoder`. Implementations are allowed to build
+ /// `CommandBuffer`s that depend on storage owned by this
+ /// `CommandEncoder`.
+ ///
+ /// [`CommandBuffer`]: Api::CommandBuffer
+ /// [`begin_encoding`]: CommandEncoder::begin_encoding
unsafe fn end_encoding(&mut self) -> Result<A::CommandBuffer, DeviceError>;
- /// Reclaims all resources that are allocated for this encoder.
- /// Must get all of the produced command buffers back,
- /// and they must not be used by GPU at this moment.
+
+ /// Reclaim all resources belonging to this `CommandEncoder`.
+ ///
+ /// # Safety
+ ///
+ /// This `CommandEncoder` must be in the "closed" state.
+ ///
+ /// The `command_buffers` iterator must produce all the live
+ /// [`CommandBuffer`]s built using this `CommandEncoder` --- that
+ /// is, every extant `CommandBuffer` returned from `end_encoding`.
+ ///
+ /// [`CommandBuffer`]: Api::CommandBuffer
unsafe fn reset_all<I>(&mut self, command_buffers: I)
where
I: Iterator<Item = A::CommandBuffer>;
diff --git a/third_party/rust/wgpu-hal/src/metal/adapter.rs b/third_party/rust/wgpu-hal/src/metal/adapter.rs
index a946ce5819..9ec777b0f0 100644
--- a/third_party/rust/wgpu-hal/src/metal/adapter.rs
+++ b/third_party/rust/wgpu-hal/src/metal/adapter.rs
@@ -731,6 +731,12 @@ impl super::PrivateCapabilities {
} else {
4
},
+ // Per https://developer.apple.com/metal/Metal-Feature-Set-Tables.pdf
+ max_color_attachment_bytes_per_sample: if device.supports_family(MTLGPUFamily::Apple4) {
+ 64
+ } else {
+ 32
+ },
max_varying_components: if device
.supports_feature_set(MTLFeatureSet::macOS_GPUFamily1_v1)
{
@@ -833,7 +839,7 @@ impl super::PrivateCapabilities {
self.indirect_draw_dispatch,
);
features.set(
- F::TIMESTAMP_QUERY,
+ F::TIMESTAMP_QUERY | F::TIMESTAMP_QUERY_INSIDE_ENCODERS,
self.timestamp_query_support
.contains(TimestampQuerySupport::STAGE_BOUNDARIES),
);
@@ -872,6 +878,10 @@ impl super::PrivateCapabilities {
{
features.insert(F::STORAGE_RESOURCE_BINDING_ARRAY);
}
+ features.set(
+ F::SHADER_INT64,
+ self.msl_version >= MTLLanguageVersion::V2_3,
+ );
features.set(
F::ADDRESS_MODE_CLAMP_TO_BORDER,
@@ -940,6 +950,10 @@ impl super::PrivateCapabilities {
min_uniform_buffer_offset_alignment: self.buffer_alignment as u32,
min_storage_buffer_offset_alignment: self.buffer_alignment as u32,
max_inter_stage_shader_components: self.max_varying_components,
+ max_color_attachments: (self.max_color_render_targets as u32)
+ .min(crate::MAX_COLOR_ATTACHMENTS as u32),
+ max_color_attachment_bytes_per_sample: self.max_color_attachment_bytes_per_sample
+ as u32,
max_compute_workgroup_storage_size: self.max_total_threadgroup_memory,
max_compute_invocations_per_workgroup: self.max_threads_per_group,
max_compute_workgroup_size_x: self.max_threads_per_group,
diff --git a/third_party/rust/wgpu-hal/src/metal/mod.rs b/third_party/rust/wgpu-hal/src/metal/mod.rs
index 298f60faac..62fbf3d49d 100644
--- a/third_party/rust/wgpu-hal/src/metal/mod.rs
+++ b/third_party/rust/wgpu-hal/src/metal/mod.rs
@@ -248,6 +248,7 @@ struct PrivateCapabilities {
max_texture_layers: u64,
max_fragment_input_components: u64,
max_color_render_targets: u8,
+ max_color_attachment_bytes_per_sample: u8,
max_varying_components: u32,
max_threads_per_group: u32,
max_total_threadgroup_memory: u32,
diff --git a/third_party/rust/wgpu-hal/src/vulkan/adapter.rs b/third_party/rust/wgpu-hal/src/vulkan/adapter.rs
index 85e620d23c..83b3dfa8e5 100644
--- a/third_party/rust/wgpu-hal/src/vulkan/adapter.rs
+++ b/third_party/rust/wgpu-hal/src/vulkan/adapter.rs
@@ -189,7 +189,7 @@ impl PhysicalDeviceFeatures {
//.shader_clip_distance(requested_features.contains(wgt::Features::SHADER_CLIP_DISTANCE))
//.shader_cull_distance(requested_features.contains(wgt::Features::SHADER_CULL_DISTANCE))
.shader_float64(requested_features.contains(wgt::Features::SHADER_F64))
- //.shader_int64(requested_features.contains(wgt::Features::SHADER_INT64))
+ .shader_int64(requested_features.contains(wgt::Features::SHADER_INT64))
.shader_int16(requested_features.contains(wgt::Features::SHADER_I16))
//.shader_resource_residency(requested_features.contains(wgt::Features::SHADER_RESOURCE_RESIDENCY))
.geometry_shader(requested_features.contains(wgt::Features::SHADER_PRIMITIVE_INDEX))
@@ -369,6 +369,7 @@ impl PhysicalDeviceFeatures {
| F::ADDRESS_MODE_CLAMP_TO_BORDER
| F::ADDRESS_MODE_CLAMP_TO_ZERO
| F::TIMESTAMP_QUERY
+ | F::TIMESTAMP_QUERY_INSIDE_ENCODERS
| F::TIMESTAMP_QUERY_INSIDE_PASSES
| F::TEXTURE_ADAPTER_SPECIFIC_FORMAT_FEATURES
| F::CLEAR_TEXTURE;
@@ -468,7 +469,7 @@ impl PhysicalDeviceFeatures {
//if self.core.shader_clip_distance != 0 {
//if self.core.shader_cull_distance != 0 {
features.set(F::SHADER_F64, self.core.shader_float64 != 0);
- //if self.core.shader_int64 != 0 {
+ features.set(F::SHADER_INT64, self.core.shader_int64 != 0);
features.set(F::SHADER_I16, self.core.shader_int16 != 0);
//if caps.supports_extension(vk::KhrSamplerMirrorClampToEdgeFn::name()) {
@@ -827,6 +828,11 @@ impl PhysicalDeviceCapabilities {
u64::MAX
};
+ // TODO: programmatically determine this, if possible. It's unclear whether we can
+ // as of https://github.com/gpuweb/gpuweb/issues/2965#issuecomment-1361315447.
+ // We could increase the limit when we aren't on a tiled GPU.
+ let max_color_attachment_bytes_per_sample = 32;
+
wgt::Limits {
max_texture_dimension_1d: limits.max_image_dimension1_d,
max_texture_dimension_2d: limits.max_image_dimension2_d,
@@ -862,6 +868,10 @@ impl PhysicalDeviceCapabilities {
max_inter_stage_shader_components: limits
.max_vertex_output_components
.min(limits.max_fragment_input_components),
+ max_color_attachments: limits
+ .max_color_attachments
+ .min(crate::MAX_COLOR_ATTACHMENTS as u32),
+ max_color_attachment_bytes_per_sample,
max_compute_workgroup_storage_size: limits.max_compute_shared_memory_size,
max_compute_invocations_per_workgroup: limits.max_compute_work_group_invocations,
max_compute_workgroup_size_x: max_compute_workgroup_sizes[0],
@@ -1444,6 +1454,10 @@ impl super::Adapter {
capabilities.push(spv::Capability::RayQueryKHR);
}
+ if features.contains(wgt::Features::SHADER_INT64) {
+ capabilities.push(spv::Capability::Int64);
+ }
+
let mut flags = spv::WriterFlags::empty();
flags.set(
spv::WriterFlags::DEBUG,
diff --git a/third_party/rust/wgpu-hal/src/vulkan/instance.rs b/third_party/rust/wgpu-hal/src/vulkan/instance.rs
index c4ef573461..771938b0b0 100644
--- a/third_party/rust/wgpu-hal/src/vulkan/instance.rs
+++ b/third_party/rust/wgpu-hal/src/vulkan/instance.rs
@@ -6,6 +6,7 @@ use std::{
thread,
};
+use arrayvec::ArrayVec;
use ash::{
extensions::{ext, khr},
vk,
@@ -34,11 +35,13 @@ unsafe extern "system" fn debug_utils_messenger_callback(
// the debug range start and end appear in different command buffers.
let khronos_validation_layer =
std::ffi::CStr::from_bytes_with_nul(b"Khronos Validation Layer\0").unwrap();
- if user_data.validation_layer_description.as_ref() == khronos_validation_layer
- && user_data.validation_layer_spec_version >= vk::make_api_version(0, 1, 3, 240)
- && user_data.validation_layer_spec_version <= vk::make_api_version(0, 1, 3, 250)
- {
- return vk::FALSE;
+ if let Some(layer_properties) = user_data.validation_layer_properties.as_ref() {
+ if layer_properties.layer_description.as_ref() == khronos_validation_layer
+ && layer_properties.layer_spec_version >= vk::make_api_version(0, 1, 3, 240)
+ && layer_properties.layer_spec_version <= vk::make_api_version(0, 1, 3, 250)
+ {
+ return vk::FALSE;
+ }
}
}
@@ -211,6 +214,22 @@ impl super::Instance {
&self.shared
}
+ fn enumerate_instance_extension_properties(
+ entry: &ash::Entry,
+ layer_name: Option<&CStr>,
+ ) -> Result<Vec<vk::ExtensionProperties>, crate::InstanceError> {
+ let instance_extensions = {
+ profiling::scope!("vkEnumerateInstanceExtensionProperties");
+ entry.enumerate_instance_extension_properties(layer_name)
+ };
+ instance_extensions.map_err(|e| {
+ crate::InstanceError::with_source(
+ String::from("enumerate_instance_extension_properties() failed"),
+ e,
+ )
+ })
+ }
+
/// Return the instance extension names wgpu would like to enable.
///
/// Return a vector of the names of instance extensions actually available
@@ -229,16 +248,7 @@ impl super::Instance {
_instance_api_version: u32,
flags: wgt::InstanceFlags,
) -> Result<Vec<&'static CStr>, crate::InstanceError> {
- let instance_extensions = {
- profiling::scope!("vkEnumerateInstanceExtensionProperties");
- entry.enumerate_instance_extension_properties(None)
- };
- let instance_extensions = instance_extensions.map_err(|e| {
- crate::InstanceError::with_source(
- String::from("enumerate_instance_extension_properties() failed"),
- e,
- )
- })?;
+ let instance_extensions = Self::enumerate_instance_extension_properties(entry, None)?;
// Check our extensions against the available extensions
let mut extensions: Vec<&'static CStr> = Vec::new();
@@ -643,6 +653,31 @@ impl crate::Instance<super::Api> for super::Instance {
.find(|inst_layer| cstr_from_bytes_until_nul(&inst_layer.layer_name) == Some(name))
}
+ let validation_layer_name =
+ CStr::from_bytes_with_nul(b"VK_LAYER_KHRONOS_validation\0").unwrap();
+ let validation_layer_properties = find_layer(&instance_layers, validation_layer_name);
+
+ // Determine if VK_EXT_validation_features is available, so we can enable
+ // GPU assisted validation and synchronization validation.
+ let validation_features_are_enabled = if validation_layer_properties.is_some() {
+ // Get the all the instance extension properties.
+ let exts =
+ Self::enumerate_instance_extension_properties(&entry, Some(validation_layer_name))?;
+ // Convert all the names of the extensions into an iterator of CStrs.
+ let mut ext_names = exts
+ .iter()
+ .filter_map(|ext| cstr_from_bytes_until_nul(&ext.extension_name));
+ // Find the validation features extension.
+ ext_names.any(|ext_name| ext_name == vk::ExtValidationFeaturesFn::name())
+ } else {
+ false
+ };
+
+ let should_enable_gpu_based_validation = desc
+ .flags
+ .intersects(wgt::InstanceFlags::GPU_BASED_VALIDATION)
+ && validation_features_are_enabled;
+
let nv_optimus_layer = CStr::from_bytes_with_nul(b"VK_LAYER_NV_optimus\0").unwrap();
let has_nv_optimus = find_layer(&instance_layers, nv_optimus_layer).is_some();
@@ -651,52 +686,33 @@ impl crate::Instance<super::Api> for super::Instance {
let mut layers: Vec<&'static CStr> = Vec::new();
+ let has_debug_extension = extensions.contains(&ext::DebugUtils::name());
+ let mut debug_user_data = has_debug_extension.then(|| {
+ // Put the callback data on the heap, to ensure it will never be
+ // moved.
+ Box::new(super::DebugUtilsMessengerUserData {
+ validation_layer_properties: None,
+ has_obs_layer,
+ })
+ });
+
// Request validation layer if asked.
- let mut debug_utils = None;
- if desc.flags.intersects(wgt::InstanceFlags::VALIDATION) {
- let validation_layer_name =
- CStr::from_bytes_with_nul(b"VK_LAYER_KHRONOS_validation\0").unwrap();
- if let Some(layer_properties) = find_layer(&instance_layers, validation_layer_name) {
+ if desc.flags.intersects(wgt::InstanceFlags::VALIDATION)
+ || should_enable_gpu_based_validation
+ {
+ if let Some(layer_properties) = validation_layer_properties {
layers.push(validation_layer_name);
- if extensions.contains(&ext::DebugUtils::name()) {
- // Put the callback data on the heap, to ensure it will never be
- // moved.
- let callback_data = Box::new(super::DebugUtilsMessengerUserData {
- validation_layer_description: cstr_from_bytes_until_nul(
- &layer_properties.description,
- )
- .unwrap()
- .to_owned(),
- validation_layer_spec_version: layer_properties.spec_version,
- has_obs_layer,
- });
-
- // having ERROR unconditionally because Vk doesn't like empty flags
- let mut severity = vk::DebugUtilsMessageSeverityFlagsEXT::ERROR;
- if log::max_level() >= log::LevelFilter::Debug {
- severity |= vk::DebugUtilsMessageSeverityFlagsEXT::VERBOSE;
- }
- if log::max_level() >= log::LevelFilter::Info {
- severity |= vk::DebugUtilsMessageSeverityFlagsEXT::INFO;
- }
- if log::max_level() >= log::LevelFilter::Warn {
- severity |= vk::DebugUtilsMessageSeverityFlagsEXT::WARNING;
- }
-
- let message_type = vk::DebugUtilsMessageTypeFlagsEXT::GENERAL
- | vk::DebugUtilsMessageTypeFlagsEXT::VALIDATION
- | vk::DebugUtilsMessageTypeFlagsEXT::PERFORMANCE;
-
- let create_info = super::DebugUtilsCreateInfo {
- severity,
- message_type,
- callback_data,
- };
-
- let vk_create_info = create_info.to_vk_create_info().build();
-
- debug_utils = Some((create_info, vk_create_info));
+ if let Some(debug_user_data) = debug_user_data.as_mut() {
+ debug_user_data.validation_layer_properties =
+ Some(super::ValidationLayerProperties {
+ layer_description: cstr_from_bytes_until_nul(
+ &layer_properties.description,
+ )
+ .unwrap()
+ .to_owned(),
+ layer_spec_version: layer_properties.spec_version,
+ });
}
} else {
log::warn!(
@@ -705,6 +721,35 @@ impl crate::Instance<super::Api> for super::Instance {
);
}
}
+ let mut debug_utils = if let Some(callback_data) = debug_user_data {
+ // having ERROR unconditionally because Vk doesn't like empty flags
+ let mut severity = vk::DebugUtilsMessageSeverityFlagsEXT::ERROR;
+ if log::max_level() >= log::LevelFilter::Debug {
+ severity |= vk::DebugUtilsMessageSeverityFlagsEXT::VERBOSE;
+ }
+ if log::max_level() >= log::LevelFilter::Info {
+ severity |= vk::DebugUtilsMessageSeverityFlagsEXT::INFO;
+ }
+ if log::max_level() >= log::LevelFilter::Warn {
+ severity |= vk::DebugUtilsMessageSeverityFlagsEXT::WARNING;
+ }
+
+ let message_type = vk::DebugUtilsMessageTypeFlagsEXT::GENERAL
+ | vk::DebugUtilsMessageTypeFlagsEXT::VALIDATION
+ | vk::DebugUtilsMessageTypeFlagsEXT::PERFORMANCE;
+
+ let create_info = super::DebugUtilsCreateInfo {
+ severity,
+ message_type,
+ callback_data,
+ };
+
+ let vk_create_info = create_info.to_vk_create_info().build();
+
+ Some((create_info, vk_create_info))
+ } else {
+ None
+ };
#[cfg(target_os = "android")]
let android_sdk_version = {
@@ -756,6 +801,28 @@ impl crate::Instance<super::Api> for super::Instance {
create_info = create_info.push_next(vk_create_info);
}
+ // Enable explicit validation features if available
+ let mut validation_features;
+ let mut validation_feature_list: ArrayVec<_, 3>;
+ if validation_features_are_enabled {
+ validation_feature_list = ArrayVec::new();
+
+ // Always enable synchronization validation
+ validation_feature_list
+ .push(vk::ValidationFeatureEnableEXT::SYNCHRONIZATION_VALIDATION);
+
+ // Only enable GPU assisted validation if requested.
+ if should_enable_gpu_based_validation {
+ validation_feature_list.push(vk::ValidationFeatureEnableEXT::GPU_ASSISTED);
+ validation_feature_list
+ .push(vk::ValidationFeatureEnableEXT::GPU_ASSISTED_RESERVE_BINDING_SLOT);
+ }
+
+ validation_features = vk::ValidationFeaturesEXT::builder()
+ .enabled_validation_features(&validation_feature_list);
+ create_info = create_info.push_next(&mut validation_features);
+ }
+
unsafe {
profiling::scope!("vkCreateInstance");
entry.create_instance(&create_info, None)
diff --git a/third_party/rust/wgpu-hal/src/vulkan/mod.rs b/third_party/rust/wgpu-hal/src/vulkan/mod.rs
index 787ebd7267..1f922e83da 100644
--- a/third_party/rust/wgpu-hal/src/vulkan/mod.rs
+++ b/third_party/rust/wgpu-hal/src/vulkan/mod.rs
@@ -101,17 +101,25 @@ pub struct DebugUtilsCreateInfo {
callback_data: Box<DebugUtilsMessengerUserData>,
}
+#[derive(Debug)]
+/// The properties related to the validation layer needed for the
+/// DebugUtilsMessenger for their workarounds
+struct ValidationLayerProperties {
+ /// Validation layer description, from `vk::LayerProperties`.
+ layer_description: std::ffi::CString,
+
+ /// Validation layer specification version, from `vk::LayerProperties`.
+ layer_spec_version: u32,
+}
+
/// User data needed by `instance::debug_utils_messenger_callback`.
///
/// When we create the [`vk::DebugUtilsMessengerEXT`], the `pUserData`
/// pointer refers to one of these values.
#[derive(Debug)]
pub struct DebugUtilsMessengerUserData {
- /// Validation layer description, from `vk::LayerProperties`.
- validation_layer_description: std::ffi::CString,
-
- /// Validation layer specification version, from `vk::LayerProperties`.
- validation_layer_spec_version: u32,
+ /// The properties related to the validation layer, if present
+ validation_layer_properties: Option<ValidationLayerProperties>,
/// If the OBS layer is present. OBS never increments the version of their layer,
/// so there's no reason to have the version.
@@ -724,13 +732,25 @@ impl crate::Queue<Api> for Queue {
impl From<vk::Result> for crate::DeviceError {
fn from(result: vk::Result) -> Self {
+ #![allow(unreachable_code)]
match result {
vk::Result::ERROR_OUT_OF_HOST_MEMORY | vk::Result::ERROR_OUT_OF_DEVICE_MEMORY => {
+ #[cfg(feature = "oom_panic")]
+ panic!("Out of memory ({result:?})");
+
Self::OutOfMemory
}
- vk::Result::ERROR_DEVICE_LOST => Self::Lost,
+ vk::Result::ERROR_DEVICE_LOST => {
+ #[cfg(feature = "device_lost_panic")]
+ panic!("Device lost");
+
+ Self::Lost
+ }
_ => {
- log::warn!("Unrecognized device error {:?}", result);
+ #[cfg(feature = "internal_error_panic")]
+ panic!("Internal error: {result:?}");
+
+ log::warn!("Unrecognized device error {result:?}");
Self::Lost
}
}
diff --git a/third_party/rust/wgpu-types/.cargo-checksum.json b/third_party/rust/wgpu-types/.cargo-checksum.json
index f45cbfc589..dea747bd18 100644
--- a/third_party/rust/wgpu-types/.cargo-checksum.json
+++ b/third_party/rust/wgpu-types/.cargo-checksum.json
@@ -1 +1 @@
-{"files":{"Cargo.toml":"6b0d7ddecc26e3b72cb6d47793770203147f851f048da8d1f5d8f508e40d4f82","LICENSE.APACHE":"a6cba85bc92e0cff7a450b1d873c0eaa2e9fc96bf472df0247a26bec77bf3ff9","LICENSE.MIT":"c7fea58d1cfe49634cd92e54fc10a9d871f4b275321a4cd8c09e449122caaeb4","src/assertions.rs":"3fe98027aa73970c8ab7874a3e13dbfd6faa87df2081beb5c83aeec4c60f372f","src/lib.rs":"33e1cf343a848c5deecbac6949d5a1378a70da0a48b2120fc62d600ce98a2da2","src/math.rs":"4d03039736dd6926feb139bc68734cb59df34ede310427bbf059e5c925e0af3b"},"package":null} \ No newline at end of file
+{"files":{"Cargo.toml":"c536ec4d70291834fb3e6bcd6f03900bb3a651eda9449e7adf03ef2611be96a9","LICENSE.APACHE":"a6cba85bc92e0cff7a450b1d873c0eaa2e9fc96bf472df0247a26bec77bf3ff9","LICENSE.MIT":"c7fea58d1cfe49634cd92e54fc10a9d871f4b275321a4cd8c09e449122caaeb4","src/assertions.rs":"3fe98027aa73970c8ab7874a3e13dbfd6faa87df2081beb5c83aeec4c60f372f","src/lib.rs":"bafa964caee2fdc6fc2adbb1dea540d5575e29c45946bc51e9912bfbdb13a352","src/math.rs":"4d03039736dd6926feb139bc68734cb59df34ede310427bbf059e5c925e0af3b"},"package":null} \ No newline at end of file
diff --git a/third_party/rust/wgpu-types/Cargo.toml b/third_party/rust/wgpu-types/Cargo.toml
index 9900e77771..792b03dcc8 100644
--- a/third_party/rust/wgpu-types/Cargo.toml
+++ b/third_party/rust/wgpu-types/Cargo.toml
@@ -45,7 +45,7 @@ features = ["serde_derive"]
optional = true
[dev-dependencies]
-serde_json = "1.0.111"
+serde_json = "1.0.113"
[dev-dependencies.serde]
version = "1"
diff --git a/third_party/rust/wgpu-types/src/lib.rs b/third_party/rust/wgpu-types/src/lib.rs
index d2d493a7ca..347aad76f9 100644
--- a/third_party/rust/wgpu-types/src/lib.rs
+++ b/third_party/rust/wgpu-types/src/lib.rs
@@ -267,15 +267,75 @@ bitflags::bitflags! {
///
/// This is a web and native feature.
const DEPTH_CLIP_CONTROL = 1 << 0;
+
+ /// Allows for explicit creation of textures of format [`TextureFormat::Depth32FloatStencil8`]
+ ///
+ /// Supported platforms:
+ /// - Vulkan (mostly)
+ /// - DX12
+ /// - Metal
+ /// - OpenGL
+ ///
+ /// This is a web and native feature.
+ const DEPTH32FLOAT_STENCIL8 = 1 << 1;
+
+ /// Enables BCn family of compressed textures. All BCn textures use 4x4 pixel blocks
+ /// with 8 or 16 bytes per block.
+ ///
+ /// Compressed textures sacrifice some quality in exchange for significantly reduced
+ /// bandwidth usage.
+ ///
+ /// Support for this feature guarantees availability of [`TextureUsages::COPY_SRC | TextureUsages::COPY_DST | TextureUsages::TEXTURE_BINDING`] for BCn formats.
+ /// [`Features::TEXTURE_ADAPTER_SPECIFIC_FORMAT_FEATURES`] may enable additional usages.
+ ///
+ /// Supported Platforms:
+ /// - desktops
+ ///
+ /// This is a web and native feature.
+ const TEXTURE_COMPRESSION_BC = 1 << 2;
+
+ /// Enables ETC family of compressed textures. All ETC textures use 4x4 pixel blocks.
+ /// ETC2 RGB and RGBA1 are 8 bytes per block. RTC2 RGBA8 and EAC are 16 bytes per block.
+ ///
+ /// Compressed textures sacrifice some quality in exchange for significantly reduced
+ /// bandwidth usage.
+ ///
+ /// Support for this feature guarantees availability of [`TextureUsages::COPY_SRC | TextureUsages::COPY_DST | TextureUsages::TEXTURE_BINDING`] for ETC2 formats.
+ /// [`Features::TEXTURE_ADAPTER_SPECIFIC_FORMAT_FEATURES`] may enable additional usages.
+ ///
+ /// Supported Platforms:
+ /// - Vulkan on Intel
+ /// - Mobile (some)
+ ///
+ /// This is a web and native feature.
+ const TEXTURE_COMPRESSION_ETC2 = 1 << 3;
+
+ /// Enables ASTC family of compressed textures. ASTC textures use pixel blocks varying from 4x4 to 12x12.
+ /// Blocks are always 16 bytes.
+ ///
+ /// Compressed textures sacrifice some quality in exchange for significantly reduced
+ /// bandwidth usage.
+ ///
+ /// Support for this feature guarantees availability of [`TextureUsages::COPY_SRC | TextureUsages::COPY_DST | TextureUsages::TEXTURE_BINDING`] for ASTC formats with Unorm/UnormSrgb channel type.
+ /// [`Features::TEXTURE_ADAPTER_SPECIFIC_FORMAT_FEATURES`] may enable additional usages.
+ ///
+ /// Supported Platforms:
+ /// - Vulkan on Intel
+ /// - Mobile (some)
+ ///
+ /// This is a web and native feature.
+ const TEXTURE_COMPRESSION_ASTC = 1 << 4;
+
/// Enables use of Timestamp Queries. These queries tell the current gpu timestamp when
/// all work before the query is finished.
///
/// This feature allows the use of
- /// - [`CommandEncoder::write_timestamp`]
/// - [`RenderPassDescriptor::timestamp_writes`]
/// - [`ComputePassDescriptor::timestamp_writes`]
/// to write out timestamps.
- /// For timestamps within passes refer to [`Features::TIMESTAMP_QUERY_INSIDE_PASSES`]
+ ///
+ /// For arbitrary timestamp write commands on encoders refer to [`Features::TIMESTAMP_QUERY_INSIDE_ENCODERS`].
+ /// For arbitrary timestamp write commands on passes refer to [`Features::TIMESTAMP_QUERY_INSIDE_PASSES`].
///
/// They must be resolved using [`CommandEncoder::resolve_query_sets`] into a buffer,
/// then the result must be multiplied by the timestamp period [`Queue::get_timestamp_period`]
@@ -288,7 +348,8 @@ bitflags::bitflags! {
/// - Metal
///
/// This is a web and native feature.
- const TIMESTAMP_QUERY = 1 << 1;
+ const TIMESTAMP_QUERY = 1 << 5;
+
/// Allows non-zero value for the `first_instance` member in indirect draw calls.
///
/// If this feature is not enabled, and the `first_instance` member is non-zero, the behavior may be:
@@ -306,33 +367,29 @@ bitflags::bitflags! {
/// - OpenGL ES / WebGL
///
/// This is a web and native feature.
- const INDIRECT_FIRST_INSTANCE = 1 << 2;
-
- // 3..8 available
-
- // Shader:
+ const INDIRECT_FIRST_INSTANCE = 1 << 6;
/// Allows shaders to acquire the FP16 ability
///
- /// Note: this is not supported in `naga` yet,only through `spirv-passthrough` right now.
+ /// Note: this is not supported in `naga` yet, only through `spirv-passthrough` right now.
///
/// Supported Platforms:
/// - Vulkan
/// - Metal
///
/// This is a web and native feature.
- const SHADER_F16 = 1 << 8;
-
- // 9..14 available
+ const SHADER_F16 = 1 << 7;
- // Texture Formats:
- // The features starting with a ? are features that might become part of the spec or
- // at the very least we can implement as native features; since they should cover all
- // possible formats and capabilities across backends.
- //
- // ? const FORMATS_TIER_1 = 1 << 14; (https://github.com/gpuweb/gpuweb/issues/3837)
- // ? const RW_STORAGE_TEXTURE_TIER_1 = 1 << 15; (https://github.com/gpuweb/gpuweb/issues/3838)
+ /// Allows for usage of textures of format [`TextureFormat::Rg11b10Float`] as a render target
+ ///
+ /// Supported platforms:
+ /// - Vulkan
+ /// - DX12
+ /// - Metal
+ ///
+ /// This is a web and native feature.
+ const RG11B10UFLOAT_RENDERABLE = 1 << 8;
/// Allows the [`wgpu::TextureUsages::STORAGE_BINDING`] usage on textures with format [`TextureFormat::Bgra8unorm`]
///
@@ -342,10 +399,8 @@ bitflags::bitflags! {
/// - Metal
///
/// This is a web and native feature.
- const BGRA8UNORM_STORAGE = 1 << 16;
+ const BGRA8UNORM_STORAGE = 1 << 9;
- // ? const NORM16_FILTERABLE = 1 << 17; (https://github.com/gpuweb/gpuweb/issues/3839)
- // ? const NORM16_RESOLVE = 1 << 18; (https://github.com/gpuweb/gpuweb/issues/3839)
/// Allows textures with formats "r32float", "rg32float", and "rgba32float" to be filterable.
///
@@ -356,81 +411,11 @@ bitflags::bitflags! {
/// - GL with one of `GL_ARB_color_buffer_float`/`GL_EXT_color_buffer_float`/`OES_texture_float_linear`
///
/// This is a web and native feature.
- const FLOAT32_FILTERABLE = 1 << 19;
+ const FLOAT32_FILTERABLE = 1 << 10;
- // ? const FLOAT32_BLENDABLE = 1 << 20; (https://github.com/gpuweb/gpuweb/issues/3556)
- // ? const 32BIT_FORMAT_MULTISAMPLE = 1 << 21; (https://github.com/gpuweb/gpuweb/issues/3844)
- // ? const 32BIT_FORMAT_RESOLVE = 1 << 22; (https://github.com/gpuweb/gpuweb/issues/3844)
-
- /// Allows for usage of textures of format [`TextureFormat::Rg11b10Float`] as a render target
- ///
- /// Supported platforms:
- /// - Vulkan
- /// - DX12
- /// - Metal
- ///
- /// This is a web and native feature.
- const RG11B10UFLOAT_RENDERABLE = 1 << 23;
-
- /// Allows for explicit creation of textures of format [`TextureFormat::Depth32FloatStencil8`]
- ///
- /// Supported platforms:
- /// - Vulkan (mostly)
- /// - DX12
- /// - Metal
- ///
- /// This is a web and native feature.
- const DEPTH32FLOAT_STENCIL8 = 1 << 24;
- /// Enables BCn family of compressed textures. All BCn textures use 4x4 pixel blocks
- /// with 8 or 16 bytes per block.
- ///
- /// Compressed textures sacrifice some quality in exchange for significantly reduced
- /// bandwidth usage.
- ///
- /// Support for this feature guarantees availability of [`TextureUsages::COPY_SRC | TextureUsages::COPY_DST | TextureUsages::TEXTURE_BINDING`] for BCn formats.
- /// [`Features::TEXTURE_ADAPTER_SPECIFIC_FORMAT_FEATURES`] may enable additional usages.
- ///
- /// Supported Platforms:
- /// - desktops
- ///
- /// This is a web and native feature.
- const TEXTURE_COMPRESSION_BC = 1 << 25;
- /// Enables ETC family of compressed textures. All ETC textures use 4x4 pixel blocks.
- /// ETC2 RGB and RGBA1 are 8 bytes per block. RTC2 RGBA8 and EAC are 16 bytes per block.
- ///
- /// Compressed textures sacrifice some quality in exchange for significantly reduced
- /// bandwidth usage.
- ///
- /// Support for this feature guarantees availability of [`TextureUsages::COPY_SRC | TextureUsages::COPY_DST | TextureUsages::TEXTURE_BINDING`] for ETC2 formats.
- /// [`Features::TEXTURE_ADAPTER_SPECIFIC_FORMAT_FEATURES`] may enable additional usages.
- ///
- /// Supported Platforms:
- /// - Vulkan on Intel
- /// - Mobile (some)
- ///
- /// This is a web and native feature.
- const TEXTURE_COMPRESSION_ETC2 = 1 << 26;
- /// Enables ASTC family of compressed textures. ASTC textures use pixel blocks varying from 4x4 to 12x12.
- /// Blocks are always 16 bytes.
- ///
- /// Compressed textures sacrifice some quality in exchange for significantly reduced
- /// bandwidth usage.
- ///
- /// Support for this feature guarantees availability of [`TextureUsages::COPY_SRC | TextureUsages::COPY_DST | TextureUsages::TEXTURE_BINDING`] for ASTC formats with Unorm/UnormSrgb channel type.
- /// [`Features::TEXTURE_ADAPTER_SPECIFIC_FORMAT_FEATURES`] may enable additional usages.
- ///
- /// Supported Platforms:
- /// - Vulkan on Intel
- /// - Mobile (some)
- ///
- /// This is a web and native feature.
- const TEXTURE_COMPRESSION_ASTC = 1 << 27;
-
- // ? const TEXTURE_COMPRESSION_ASTC_HDR = 1 << 28; (https://github.com/gpuweb/gpuweb/issues/3856)
-
- // 29..32 should be available but are for now occupied by native only texture related features
- // TEXTURE_FORMAT_16BIT_NORM & TEXTURE_COMPRESSION_ASTC_HDR will most likely become web features as well
- // TEXTURE_ADAPTER_SPECIFIC_FORMAT_FEATURES might not be necessary if we have all the texture features implemented
+ // Bits 11-19 available for webgpu features. Should you chose to use some of them for
+ // for native features, don't forget to update `all_webgpu_mask` and `all_native_mask`
+ // accordingly.
//
// ---- Restart Numbering for Native Features ---
@@ -438,6 +423,21 @@ bitflags::bitflags! {
// Native Features:
//
+ // The features starting with a ? are features that might become part of the spec or
+ // at the very least we can implement as native features; since they should cover all
+ // possible formats and capabilities across backends.
+ //
+ // ? const FORMATS_TIER_1 = 1 << ??; (https://github.com/gpuweb/gpuweb/issues/3837)
+ // ? const RW_STORAGE_TEXTURE_TIER_1 = 1 << ??; (https://github.com/gpuweb/gpuweb/issues/3838)
+ // ? const NORM16_FILTERABLE = 1 << ??; (https://github.com/gpuweb/gpuweb/issues/3839)
+ // ? const NORM16_RESOLVE = 1 << ??; (https://github.com/gpuweb/gpuweb/issues/3839)
+ // ? const FLOAT32_BLENDABLE = 1 << ??; (https://github.com/gpuweb/gpuweb/issues/3556)
+ // ? const 32BIT_FORMAT_MULTISAMPLE = 1 << ??; (https://github.com/gpuweb/gpuweb/issues/3844)
+ // ? const 32BIT_FORMAT_RESOLVE = 1 << ??; (https://github.com/gpuweb/gpuweb/issues/3844)
+ // ? const TEXTURE_COMPRESSION_ASTC_HDR = 1 << ??; (https://github.com/gpuweb/gpuweb/issues/3856)
+ // TEXTURE_FORMAT_16BIT_NORM & TEXTURE_COMPRESSION_ASTC_HDR will most likely become web features as well
+ // TEXTURE_ADAPTER_SPECIFIC_FORMAT_FEATURES might not be necessary if we have all the texture features implemented
+
// Texture Formats:
/// Enables normalized `16-bit` texture formats.
@@ -448,7 +448,7 @@ bitflags::bitflags! {
/// - Metal
///
/// This is a native only feature.
- const TEXTURE_FORMAT_16BIT_NORM = 1 << 29;
+ const TEXTURE_FORMAT_16BIT_NORM = 1 << 20;
/// Enables ASTC HDR family of compressed textures.
///
/// Compressed textures sacrifice some quality in exchange for significantly reduced
@@ -463,7 +463,7 @@ bitflags::bitflags! {
/// - OpenGL
///
/// This is a native only feature.
- const TEXTURE_COMPRESSION_ASTC_HDR = 1 << 30;
+ const TEXTURE_COMPRESSION_ASTC_HDR = 1 << 21;
/// Enables device specific texture format features.
///
/// See `TextureFormatFeatures` for a listing of the features in question.
@@ -475,7 +475,7 @@ bitflags::bitflags! {
/// This extension does not enable additional formats.
///
/// This is a native only feature.
- const TEXTURE_ADAPTER_SPECIFIC_FORMAT_FEATURES = 1 << 31;
+ const TEXTURE_ADAPTER_SPECIFIC_FORMAT_FEATURES = 1 << 22;
// API:
@@ -491,11 +491,25 @@ bitflags::bitflags! {
/// - DX12
///
/// This is a native only feature with a [proposal](https://github.com/gpuweb/gpuweb/blob/0008bd30da2366af88180b511a5d0d0c1dffbc36/proposals/pipeline-statistics-query.md) for the web.
- const PIPELINE_STATISTICS_QUERY = 1 << 32;
- /// Allows for timestamp queries inside render passes.
+ const PIPELINE_STATISTICS_QUERY = 1 << 23;
+ /// Allows for timestamp queries directly on command encoders.
///
/// Implies [`Features::TIMESTAMP_QUERY`] is supported.
///
+ /// Additionally allows for timestamp writes on command encoders
+ /// using [`CommandEncoder::write_timestamp`].
+ ///
+ /// Supported platforms:
+ /// - Vulkan
+ /// - DX12
+ /// - Metal
+ ///
+ /// This is a native only feature.
+ const TIMESTAMP_QUERY_INSIDE_ENCODERS = 1 << 24;
+ /// Allows for timestamp queries directly on command encoders.
+ ///
+ /// Implies [`Features::TIMESTAMP_QUERY`] & [`Features::TIMESTAMP_QUERY_INSIDE_ENCODERS`] is supported.
+ ///
/// Additionally allows for timestamp queries to be used inside render & compute passes using:
/// - [`RenderPassEncoder::write_timestamp`]
/// - [`ComputePassEncoder::write_timestamp`]
@@ -508,7 +522,7 @@ bitflags::bitflags! {
/// This is generally not available on tile-based rasterization GPUs.
///
/// This is a native only feature with a [proposal](https://github.com/gpuweb/gpuweb/blob/0008bd30da2366af88180b511a5d0d0c1dffbc36/proposals/timestamp-query-inside-passes.md) for the web.
- const TIMESTAMP_QUERY_INSIDE_PASSES = 1 << 33;
+ const TIMESTAMP_QUERY_INSIDE_PASSES = 1 << 25;
/// Webgpu only allows the MAP_READ and MAP_WRITE buffer usage to be matched with
/// COPY_DST and COPY_SRC respectively. This removes this requirement.
///
@@ -522,7 +536,7 @@ bitflags::bitflags! {
/// - Metal
///
/// This is a native only feature.
- const MAPPABLE_PRIMARY_BUFFERS = 1 << 34;
+ const MAPPABLE_PRIMARY_BUFFERS = 1 << 26;
/// Allows the user to create uniform arrays of textures in shaders:
///
/// ex.
@@ -545,7 +559,7 @@ bitflags::bitflags! {
/// - Vulkan
///
/// This is a native only feature.
- const TEXTURE_BINDING_ARRAY = 1 << 35;
+ const TEXTURE_BINDING_ARRAY = 1 << 27;
/// Allows the user to create arrays of buffers in shaders:
///
/// ex.
@@ -567,7 +581,7 @@ bitflags::bitflags! {
/// - Vulkan
///
/// This is a native only feature.
- const BUFFER_BINDING_ARRAY = 1 << 36;
+ const BUFFER_BINDING_ARRAY = 1 << 28;
/// Allows the user to create uniform arrays of storage buffers or textures in shaders,
/// if resp. [`Features::BUFFER_BINDING_ARRAY`] or [`Features::TEXTURE_BINDING_ARRAY`]
/// is supported.
@@ -580,7 +594,7 @@ bitflags::bitflags! {
/// - Vulkan
///
/// This is a native only feature.
- const STORAGE_RESOURCE_BINDING_ARRAY = 1 << 37;
+ const STORAGE_RESOURCE_BINDING_ARRAY = 1 << 29;
/// Allows shaders to index sampled texture and storage buffer resource arrays with dynamically non-uniform values:
///
/// ex. `texture_array[vertex_data]`
@@ -605,7 +619,7 @@ bitflags::bitflags! {
/// - Vulkan 1.2+ (or VK_EXT_descriptor_indexing)'s shaderSampledImageArrayNonUniformIndexing & shaderStorageBufferArrayNonUniformIndexing feature)
///
/// This is a native only feature.
- const SAMPLED_TEXTURE_AND_STORAGE_BUFFER_ARRAY_NON_UNIFORM_INDEXING = 1 << 38;
+ const SAMPLED_TEXTURE_AND_STORAGE_BUFFER_ARRAY_NON_UNIFORM_INDEXING = 1 << 30;
/// Allows shaders to index uniform buffer and storage texture resource arrays with dynamically non-uniform values:
///
/// ex. `texture_array[vertex_data]`
@@ -630,11 +644,11 @@ bitflags::bitflags! {
/// - Vulkan 1.2+ (or VK_EXT_descriptor_indexing)'s shaderUniformBufferArrayNonUniformIndexing & shaderStorageTextureArrayNonUniformIndexing feature)
///
/// This is a native only feature.
- const UNIFORM_BUFFER_AND_STORAGE_TEXTURE_ARRAY_NON_UNIFORM_INDEXING = 1 << 39;
+ const UNIFORM_BUFFER_AND_STORAGE_TEXTURE_ARRAY_NON_UNIFORM_INDEXING = 1 << 31;
/// Allows the user to create bind groups containing arrays with less bindings than the BindGroupLayout.
///
/// This is a native only feature.
- const PARTIALLY_BOUND_BINDING_ARRAY = 1 << 40;
+ const PARTIALLY_BOUND_BINDING_ARRAY = 1 << 32;
/// Allows the user to call [`RenderPass::multi_draw_indirect`] and [`RenderPass::multi_draw_indexed_indirect`].
///
/// Allows multiple indirect calls to be dispatched from a single buffer.
@@ -648,7 +662,7 @@ bitflags::bitflags! {
///
/// [`RenderPass::multi_draw_indirect`]: ../wgpu/struct.RenderPass.html#method.multi_draw_indirect
/// [`RenderPass::multi_draw_indexed_indirect`]: ../wgpu/struct.RenderPass.html#method.multi_draw_indexed_indirect
- const MULTI_DRAW_INDIRECT = 1 << 41;
+ const MULTI_DRAW_INDIRECT = 1 << 33;
/// Allows the user to call [`RenderPass::multi_draw_indirect_count`] and [`RenderPass::multi_draw_indexed_indirect_count`].
///
/// This allows the use of a buffer containing the actual number of draw calls.
@@ -661,7 +675,7 @@ bitflags::bitflags! {
///
/// [`RenderPass::multi_draw_indirect_count`]: ../wgpu/struct.RenderPass.html#method.multi_draw_indirect_count
/// [`RenderPass::multi_draw_indexed_indirect_count`]: ../wgpu/struct.RenderPass.html#method.multi_draw_indexed_indirect_count
- const MULTI_DRAW_INDIRECT_COUNT = 1 << 42;
+ const MULTI_DRAW_INDIRECT_COUNT = 1 << 34;
/// Allows the use of push constants: small, fast bits of memory that can be updated
/// inside a [`RenderPass`].
///
@@ -681,7 +695,7 @@ bitflags::bitflags! {
/// [`RenderPass`]: ../wgpu/struct.RenderPass.html
/// [`PipelineLayoutDescriptor`]: ../wgpu/struct.PipelineLayoutDescriptor.html
/// [`RenderPass::set_push_constants`]: ../wgpu/struct.RenderPass.html#method.set_push_constants
- const PUSH_CONSTANTS = 1 << 43;
+ const PUSH_CONSTANTS = 1 << 35;
/// Allows the use of [`AddressMode::ClampToBorder`] with a border color
/// of [`SamplerBorderColor::Zero`].
///
@@ -692,7 +706,7 @@ bitflags::bitflags! {
/// - OpenGL
///
/// This is a native only feature.
- const ADDRESS_MODE_CLAMP_TO_ZERO = 1 << 44;
+ const ADDRESS_MODE_CLAMP_TO_ZERO = 1 << 36;
/// Allows the use of [`AddressMode::ClampToBorder`] with a border color
/// other than [`SamplerBorderColor::Zero`].
///
@@ -703,7 +717,7 @@ bitflags::bitflags! {
/// - OpenGL
///
/// This is a native only feature.
- const ADDRESS_MODE_CLAMP_TO_BORDER = 1 << 45;
+ const ADDRESS_MODE_CLAMP_TO_BORDER = 1 << 37;
/// Allows the user to set [`PolygonMode::Line`] in [`PrimitiveState::polygon_mode`]
///
/// This allows drawing polygons/triangles as lines (wireframe) instead of filled
@@ -714,7 +728,7 @@ bitflags::bitflags! {
/// - Metal
///
/// This is a native only feature.
- const POLYGON_MODE_LINE = 1 << 46;
+ const POLYGON_MODE_LINE = 1 << 38;
/// Allows the user to set [`PolygonMode::Point`] in [`PrimitiveState::polygon_mode`]
///
/// This allows only drawing the vertices of polygons/triangles instead of filled
@@ -723,7 +737,7 @@ bitflags::bitflags! {
/// - Vulkan
///
/// This is a native only feature.
- const POLYGON_MODE_POINT = 1 << 47;
+ const POLYGON_MODE_POINT = 1 << 39;
/// Allows the user to set a overestimation-conservative-rasterization in [`PrimitiveState::conservative`]
///
/// Processing of degenerate triangles/lines is hardware specific.
@@ -733,7 +747,7 @@ bitflags::bitflags! {
/// - Vulkan
///
/// This is a native only feature.
- const CONSERVATIVE_RASTERIZATION = 1 << 48;
+ const CONSERVATIVE_RASTERIZATION = 1 << 40;
/// Enables bindings of writable storage buffers and textures visible to vertex shaders.
///
/// Note: some (tiled-based) platforms do not support vertex shaders with any side-effects.
@@ -742,14 +756,14 @@ bitflags::bitflags! {
/// - All
///
/// This is a native only feature.
- const VERTEX_WRITABLE_STORAGE = 1 << 49;
+ const VERTEX_WRITABLE_STORAGE = 1 << 41;
/// Enables clear to zero for textures.
///
/// Supported platforms:
/// - All
///
/// This is a native only feature.
- const CLEAR_TEXTURE = 1 << 50;
+ const CLEAR_TEXTURE = 1 << 42;
/// Enables creating shader modules from SPIR-V binary data (unsafe).
///
/// SPIR-V data is not parsed or interpreted in any way; you can use
@@ -761,7 +775,7 @@ bitflags::bitflags! {
/// Vulkan implementation.
///
/// This is a native only feature.
- const SPIRV_SHADER_PASSTHROUGH = 1 << 51;
+ const SPIRV_SHADER_PASSTHROUGH = 1 << 43;
/// Enables multiview render passes and `builtin(view_index)` in vertex shaders.
///
/// Supported platforms:
@@ -769,7 +783,7 @@ bitflags::bitflags! {
/// - OpenGL (web only)
///
/// This is a native only feature.
- const MULTIVIEW = 1 << 52;
+ const MULTIVIEW = 1 << 44;
/// Enables using 64-bit types for vertex attributes.
///
/// Requires SHADER_FLOAT64.
@@ -777,7 +791,7 @@ bitflags::bitflags! {
/// Supported Platforms: N/A
///
/// This is a native only feature.
- const VERTEX_ATTRIBUTE_64BIT = 1 << 53;
+ const VERTEX_ATTRIBUTE_64BIT = 1 << 45;
/// Allows vertex shaders to have outputs which are not consumed
/// by the fragment shader.
///
@@ -785,7 +799,7 @@ bitflags::bitflags! {
/// - Vulkan
/// - Metal
/// - OpenGL
- const SHADER_UNUSED_VERTEX_OUTPUT = 1 << 54;
+ const SHADER_UNUSED_VERTEX_OUTPUT = 1 << 46;
/// Allows for creation of textures of format [`TextureFormat::NV12`]
///
/// Supported platforms:
@@ -793,16 +807,14 @@ bitflags::bitflags! {
/// - Vulkan
///
/// This is a native only feature.
- const TEXTURE_FORMAT_NV12 = 1 << 55;
+ const TEXTURE_FORMAT_NV12 = 1 << 47;
/// Allows for the creation of ray-tracing acceleration structures.
///
/// Supported platforms:
/// - Vulkan
///
/// This is a native-only feature.
- const RAY_TRACING_ACCELERATION_STRUCTURE = 1 << 56;
-
- // 57 available
+ const RAY_TRACING_ACCELERATION_STRUCTURE = 1 << 48;
// Shader:
@@ -812,7 +824,7 @@ bitflags::bitflags! {
/// - Vulkan
///
/// This is a native-only feature.
- const RAY_QUERY = 1 << 58;
+ const RAY_QUERY = 1 << 49;
/// Enables 64-bit floating point types in SPIR-V shaders.
///
/// Note: even when supported by GPU hardware, 64-bit floating point operations are
@@ -822,14 +834,14 @@ bitflags::bitflags! {
/// - Vulkan
///
/// This is a native only feature.
- const SHADER_F64 = 1 << 59;
+ const SHADER_F64 = 1 << 50;
/// Allows shaders to use i16. Not currently supported in `naga`, only available through `spirv-passthrough`.
///
/// Supported platforms:
/// - Vulkan
///
/// This is a native only feature.
- const SHADER_I16 = 1 << 60;
+ const SHADER_I16 = 1 << 51;
/// Enables `builtin(primitive_index)` in fragment shaders.
///
/// Note: enables geometry processing for pipelines using the builtin.
@@ -843,14 +855,14 @@ bitflags::bitflags! {
/// - OpenGL (some)
///
/// This is a native only feature.
- const SHADER_PRIMITIVE_INDEX = 1 << 61;
+ const SHADER_PRIMITIVE_INDEX = 1 << 52;
/// Allows shaders to use the `early_depth_test` attribute.
///
/// Supported platforms:
/// - GLES 3.1+
///
/// This is a native only feature.
- const SHADER_EARLY_DEPTH_TEST = 1 << 62;
+ const SHADER_EARLY_DEPTH_TEST = 1 << 53;
/// Allows two outputs from a shader to be used for blending.
/// Note that dual-source blending doesn't support multiple render targets.
///
@@ -861,7 +873,16 @@ bitflags::bitflags! {
/// - Metal (with MSL 1.2+)
/// - Vulkan (with dualSrcBlend)
/// - DX12
- const DUAL_SOURCE_BLENDING = 1 << 63;
+ const DUAL_SOURCE_BLENDING = 1 << 54;
+ /// Allows shaders to use i64 and u64.
+ ///
+ /// Supported platforms:
+ /// - Vulkan
+ /// - DX12 (DXC only)
+ /// - Metal (with MSL 2.3+)
+ ///
+ /// This is a native only feature.
+ const SHADER_INT64 = 1 << 55;
}
}
@@ -870,12 +891,12 @@ impl_bitflags!(Features);
impl Features {
/// Mask of all features which are part of the upstream WebGPU standard.
pub const fn all_webgpu_mask() -> Self {
- Self::from_bits_truncate(0x0000_0000_0000_FFFF)
+ Self::from_bits_truncate(0xFFFFF)
}
/// Mask of all features that are only available when targeting native (not web).
pub const fn all_native_mask() -> Self {
- Self::from_bits_truncate(0xFFFF_FFFF_FFFF_0000)
+ Self::from_bits_truncate(!Self::all_webgpu_mask().bits())
}
}
@@ -904,13 +925,15 @@ bitflags::bitflags! {
/// This mainly applies to a Vulkan driver's compliance version. If the major compliance version
/// is `0`, then the driver is ignored. This flag allows that driver to be enabled for testing.
const ALLOW_UNDERLYING_NONCOMPLIANT_ADAPTER = 1 << 3;
- /// Enable GPU-based validation. Currently, this only changes behavior on the DX12
- /// backend.
+ /// Enable GPU-based validation. Implies [`Self::VALIDATION`]. Currently, this only changes
+ /// behavior on the DX12 and Vulkan backends.
///
/// Supported platforms:
///
/// - D3D12; called ["GPU-based validation", or
/// "GBV"](https://web.archive.org/web/20230206120404/https://learn.microsoft.com/en-us/windows/win32/direct3d12/using-d3d12-debug-layer-gpu-based-validation)
+ /// - Vulkan, via the `VK_LAYER_KHRONOS_validation` layer; called ["GPU-Assisted
+ /// Validation"](https://github.com/KhronosGroup/Vulkan-ValidationLayers/blob/e45aeb85079e0835694cb8f03e6681fd18ae72c9/docs/gpu_validation.md#gpu-assisted-validation)
const GPU_BASED_VALIDATION = 1 << 4;
}
}
@@ -924,7 +947,12 @@ impl Default for InstanceFlags {
impl InstanceFlags {
/// Enable recommended debugging and validation flags.
pub fn debugging() -> Self {
- InstanceFlags::DEBUG | InstanceFlags::VALIDATION | InstanceFlags::GPU_BASED_VALIDATION
+ InstanceFlags::DEBUG | InstanceFlags::VALIDATION
+ }
+
+ /// Enable advanced debugging and validation flags (potentially very slow).
+ pub fn advanced_debugging() -> Self {
+ Self::debugging() | InstanceFlags::GPU_BASED_VALIDATION
}
/// Infer good defaults from the build type
@@ -1078,6 +1106,11 @@ pub struct Limits {
/// inter-stage communication (vertex outputs to fragment inputs). Defaults to 60.
/// Higher is "better".
pub max_inter_stage_shader_components: u32,
+ /// The maximum allowed number of color attachments.
+ pub max_color_attachments: u32,
+ /// The maximum number of bytes necessary to hold one sample (pixel or subpixel) of render
+ /// pipeline output data, across all color attachments.
+ pub max_color_attachment_bytes_per_sample: u32,
/// Maximum number of bytes used for workgroup memory in a compute entry point. Defaults to
/// 16352. Higher is "better".
pub max_compute_workgroup_storage_size: u32,
@@ -1139,6 +1172,8 @@ impl Default for Limits {
min_uniform_buffer_offset_alignment: 256,
min_storage_buffer_offset_alignment: 256,
max_inter_stage_shader_components: 60,
+ max_color_attachments: 8,
+ max_color_attachment_bytes_per_sample: 32,
max_compute_workgroup_storage_size: 16384,
max_compute_invocations_per_workgroup: 256,
max_compute_workgroup_size_x: 256,
@@ -1180,6 +1215,8 @@ impl Limits {
/// min_uniform_buffer_offset_alignment: 256,
/// min_storage_buffer_offset_alignment: 256,
/// max_inter_stage_shader_components: 60,
+ /// max_color_attachments: 8,
+ /// max_color_attachment_bytes_per_sample: 32,
/// max_compute_workgroup_storage_size: 16352,
/// max_compute_invocations_per_workgroup: 256,
/// max_compute_workgroup_size_x: 256,
@@ -1190,7 +1227,7 @@ impl Limits {
/// max_non_sampler_bindings: 1_000_000,
/// });
/// ```
- pub fn downlevel_defaults() -> Self {
+ pub const fn downlevel_defaults() -> Self {
Self {
max_texture_dimension_1d: 2048,
max_texture_dimension_2d: 2048,
@@ -1214,6 +1251,8 @@ impl Limits {
min_uniform_buffer_offset_alignment: 256,
min_storage_buffer_offset_alignment: 256,
max_inter_stage_shader_components: 60,
+ max_color_attachments: 8,
+ max_color_attachment_bytes_per_sample: 32,
max_compute_workgroup_storage_size: 16352,
max_compute_invocations_per_workgroup: 256,
max_compute_workgroup_size_x: 256,
@@ -1254,6 +1293,8 @@ impl Limits {
/// min_uniform_buffer_offset_alignment: 256,
/// min_storage_buffer_offset_alignment: 256,
/// max_inter_stage_shader_components: 31,
+ /// max_color_attachments: 8,
+ /// max_color_attachment_bytes_per_sample: 32,
/// max_compute_workgroup_storage_size: 0, // +
/// max_compute_invocations_per_workgroup: 0, // +
/// max_compute_workgroup_size_x: 0, // +
@@ -1264,7 +1305,7 @@ impl Limits {
/// max_non_sampler_bindings: 1_000_000,
/// });
/// ```
- pub fn downlevel_webgl2_defaults() -> Self {
+ pub const fn downlevel_webgl2_defaults() -> Self {
Self {
max_uniform_buffers_per_shader_stage: 11,
max_storage_buffers_per_shader_stage: 0,
@@ -1292,7 +1333,7 @@ impl Limits {
/// This is useful because the swapchain might need to be larger than any other image in the application.
///
/// If your application only needs 512x512, you might be running on a 4k display and need extremely high resolution limits.
- pub fn using_resolution(self, other: Self) -> Self {
+ pub const fn using_resolution(self, other: Self) -> Self {
Self {
max_texture_dimension_1d: other.max_texture_dimension_1d,
max_texture_dimension_2d: other.max_texture_dimension_2d,
@@ -1304,7 +1345,7 @@ impl Limits {
/// Modify the current limits to use the buffer alignment limits of the adapter.
///
/// This is useful for when you'd like to dynamically use the "best" supported buffer alignments.
- pub fn using_alignment(self, other: Self) -> Self {
+ pub const fn using_alignment(self, other: Self) -> Self {
Self {
min_uniform_buffer_offset_alignment: other.min_uniform_buffer_offset_alignment,
min_storage_buffer_offset_alignment: other.min_storage_buffer_offset_alignment,
@@ -2098,6 +2139,9 @@ pub struct PrimitiveState {
pub topology: PrimitiveTopology,
/// When drawing strip topologies with indices, this is the required format for the index buffer.
/// This has no effect on non-indexed or non-strip draws.
+ ///
+ /// Specifying this value enables primitive restart, allowing individual strips to be separated
+ /// with the index value `0xFFFF` when using `Uint16`, or `0xFFFFFFFF` when using `Uint32`.
#[cfg_attr(feature = "serde", serde(default))]
pub strip_index_format: Option<IndexFormat>,
/// The face to consider the front for the purpose of culling and stencil operations.
@@ -3522,6 +3566,87 @@ impl TextureFormat {
}
}
+ /// The number of bytes occupied per pixel in a color attachment
+ /// <https://gpuweb.github.io/gpuweb/#render-target-pixel-byte-cost>
+ pub fn target_pixel_byte_cost(&self) -> Option<u32> {
+ match *self {
+ Self::R8Unorm | Self::R8Uint | Self::R8Sint => Some(1),
+ Self::Rg8Unorm
+ | Self::Rg8Uint
+ | Self::Rg8Sint
+ | Self::R16Uint
+ | Self::R16Sint
+ | Self::R16Float => Some(2),
+ Self::Rgba8Uint
+ | Self::Rgba8Sint
+ | Self::Rg16Uint
+ | Self::Rg16Sint
+ | Self::Rg16Float
+ | Self::R32Uint
+ | Self::R32Sint
+ | Self::R32Float => Some(4),
+ Self::Rgba8Unorm
+ | Self::Rgba8UnormSrgb
+ | Self::Bgra8Unorm
+ | Self::Bgra8UnormSrgb
+ | Self::Rgba16Uint
+ | Self::Rgba16Sint
+ | Self::Rgba16Float
+ | Self::Rg32Uint
+ | Self::Rg32Sint
+ | Self::Rg32Float
+ | Self::Rgb10a2Uint
+ | Self::Rgb10a2Unorm
+ | Self::Rg11b10Float => Some(8),
+ Self::Rgba32Uint | Self::Rgba32Sint | Self::Rgba32Float => Some(16),
+ Self::Rgba8Snorm | Self::Rg8Snorm | Self::R8Snorm => None,
+ _ => None,
+ }
+ }
+
+ /// See <https://gpuweb.github.io/gpuweb/#render-target-component-alignment>
+ pub fn target_component_alignment(&self) -> Option<u32> {
+ match self {
+ Self::R8Unorm
+ | Self::R8Snorm
+ | Self::R8Uint
+ | Self::R8Sint
+ | Self::Rg8Unorm
+ | Self::Rg8Snorm
+ | Self::Rg8Uint
+ | Self::Rg8Sint
+ | Self::Rgba8Unorm
+ | Self::Rgba8UnormSrgb
+ | Self::Rgba8Snorm
+ | Self::Rgba8Uint
+ | Self::Rgba8Sint
+ | Self::Bgra8Unorm
+ | Self::Bgra8UnormSrgb => Some(1),
+ Self::R16Uint
+ | Self::R16Sint
+ | Self::R16Float
+ | Self::Rg16Uint
+ | Self::Rg16Sint
+ | Self::Rg16Float
+ | Self::Rgba16Uint
+ | Self::Rgba16Sint
+ | Self::Rgba16Float => Some(2),
+ Self::R32Uint
+ | Self::R32Sint
+ | Self::R32Float
+ | Self::Rg32Uint
+ | Self::Rg32Sint
+ | Self::Rg32Float
+ | Self::Rgba32Uint
+ | Self::Rgba32Sint
+ | Self::Rgba32Float
+ | Self::Rgb10a2Uint
+ | Self::Rgb10a2Unorm
+ | Self::Rg11b10Float => Some(4),
+ _ => None,
+ }
+ }
+
/// Returns the number of components this format has.
pub fn components(&self) -> u8 {
self.components_with_aspect(TextureAspect::All)