summaryrefslogtreecommitdiffstats
path: root/vendor/gix-pack
diff options
context:
space:
mode:
Diffstat (limited to 'vendor/gix-pack')
-rw-r--r--vendor/gix-pack/.cargo-checksum.json2
-rw-r--r--vendor/gix-pack/CHANGELOG.md355
-rw-r--r--vendor/gix-pack/Cargo.toml40
-rw-r--r--vendor/gix-pack/src/bundle/find.rs23
-rw-r--r--vendor/gix-pack/src/bundle/init.rs2
-rw-r--r--vendor/gix-pack/src/bundle/mod.rs16
-rw-r--r--vendor/gix-pack/src/bundle/write/mod.rs48
-rw-r--r--vendor/gix-pack/src/cache/delta/from_offsets.rs8
-rw-r--r--vendor/gix-pack/src/cache/delta/mod.rs8
-rw-r--r--vendor/gix-pack/src/cache/delta/traverse/mod.rs21
-rw-r--r--vendor/gix-pack/src/cache/delta/traverse/resolve.rs117
-rw-r--r--vendor/gix-pack/src/cache/object.rs2
-rw-r--r--vendor/gix-pack/src/data/entry/decode.rs8
-rw-r--r--vendor/gix-pack/src/data/entry/header.rs4
-rw-r--r--vendor/gix-pack/src/data/file/decode/entry.rs76
-rw-r--r--vendor/gix-pack/src/data/file/decode/header.rs15
-rw-r--r--vendor/gix-pack/src/data/file/verify.rs5
-rw-r--r--vendor/gix-pack/src/data/input/bytes_to_entries.rs71
-rw-r--r--vendor/gix-pack/src/data/input/entries_to_bytes.rs5
-rw-r--r--vendor/gix-pack/src/data/input/entry.rs2
-rw-r--r--vendor/gix-pack/src/data/input/lookup_ref_delta_objects.rs12
-rw-r--r--vendor/gix-pack/src/data/mod.rs2
-rw-r--r--vendor/gix-pack/src/data/output/count/mod.rs2
-rw-r--r--vendor/gix-pack/src/data/output/count/objects/mod.rs138
-rw-r--r--vendor/gix-pack/src/data/output/count/objects/reduce.rs23
-rw-r--r--vendor/gix-pack/src/data/output/count/objects/types.rs10
-rw-r--r--vendor/gix-pack/src/data/output/entry/iter_from_counts.rs27
-rw-r--r--vendor/gix-pack/src/data/output/entry/mod.rs9
-rw-r--r--vendor/gix-pack/src/find.rs21
-rw-r--r--vendor/gix-pack/src/find_traits.rs82
-rw-r--r--vendor/gix-pack/src/index/access.rs11
-rw-r--r--vendor/gix-pack/src/index/encode.rs158
-rw-r--r--vendor/gix-pack/src/index/mod.rs2
-rw-r--r--vendor/gix-pack/src/index/traverse/mod.rs35
-rw-r--r--vendor/gix-pack/src/index/traverse/with_index.rs38
-rw-r--r--vendor/gix-pack/src/index/traverse/with_lookup.rs32
-rw-r--r--vendor/gix-pack/src/index/util.rs30
-rw-r--r--vendor/gix-pack/src/index/verify.rs28
-rw-r--r--vendor/gix-pack/src/index/write/encode.rs124
-rw-r--r--vendor/gix-pack/src/index/write/mod.rs31
-rw-r--r--vendor/gix-pack/src/multi_index/access.rs4
-rw-r--r--vendor/gix-pack/src/multi_index/chunk.rs16
-rw-r--r--vendor/gix-pack/src/multi_index/verify.rs48
-rw-r--r--vendor/gix-pack/src/multi_index/write.rs37
-rw-r--r--vendor/gix-pack/src/verify.rs4
45 files changed, 1034 insertions, 718 deletions
diff --git a/vendor/gix-pack/.cargo-checksum.json b/vendor/gix-pack/.cargo-checksum.json
index 9fa31bc91..7598cab91 100644
--- a/vendor/gix-pack/.cargo-checksum.json
+++ b/vendor/gix-pack/.cargo-checksum.json
@@ -1 +1 @@
-{"files":{"CHANGELOG.md":"03315402ee906c24a2e0c846a7246c36fe29f37119f447f149d6c4d5030e54f1","Cargo.toml":"75f30d0efeddf596a1145e39ad38a21a3b207b28ce062691e7edfedcf7aae447","LICENSE-APACHE":"cb4780590812826851ba250f90bed0ed19506ec98f6865a0e2e20bbf62391ff9","LICENSE-MIT":"49df47913ab2beafe8dc45607877ae64198bf0eee64aaad3e82ed9e4d27424e8","src/bundle/find.rs":"f8b2fb56e6b898b7da15cfb278bf943641244eb0e5a630d56b7146eb7029911f","src/bundle/init.rs":"6067e968404f52dcd38ed5c6c0114f880e9ff7bd0f883a07b7546e22678aabdc","src/bundle/mod.rs":"e4ee9d88ba45da113f0d9081535a3218bb3a3ebdb51ca7af52fe92fd33021b00","src/bundle/write/error.rs":"f978b1622ac1354f44a7160d2439d5ac697dd92b67631d8c846ae2621be489ae","src/bundle/write/mod.rs":"1d191d9420a9f0fc59918fbf451521677da94680357dcb78bb61f4cd05dfdb0b","src/bundle/write/types.rs":"faef010d8680b85b08ad2aaca291fed1e07ddf00a5f407f1c60d657b0ef6ed5b","src/cache/delta/from_offsets.rs":"7e85dc125810ab0c01a2aac0f846b540377e0f47fe62d31c2277410264fb6468","src/cache/delta/mod.rs":"d2d67cc96fb8e0fe48bc6eabdc2846c448f8bb297ad0512e0dfc40d54205b3cb","src/cache/delta/traverse/mod.rs":"dff2a3fe900e9f8b5dd5d49e461e48eed7d73331eaa22bdc199ee6faa20fd599","src/cache/delta/traverse/resolve.rs":"0748b96962ba415679808597a9e0cc0a2552a756e5785895c5903378d4bb6690","src/cache/delta/traverse/util.rs":"eced32d724ad66766b5c9933d0324ef351436740266a85491238ec8d26010b7d","src/cache/lru.rs":"557043f806f754fadae96199d2261a8df184f86bf0dd525efe091c5fa3a793da","src/cache/mod.rs":"1fd545e5944ec378f5a89825840fc869684ca571ea7859213ea38d674e991fb1","src/cache/object.rs":"5b796f4b1776290a56f258a99132d2aad2fd65851de0c2476fb7766a2325931e","src/data/delta.rs":"915c10207f5897d473cc806ae31de98e90f41e5e99361e18cb1e42177137c024","src/data/entry/decode.rs":"629abef1cd8352bb57c34ff1b6282e6387a12ec67372b823e0d0dda4caafd901","src/data/entry/header.rs":"55ca90e93c4b39d09b19bfe10dbb74792258cd5f73df29005b60dfc671669024","src/data/entry/mod.rs":"543c2ed9985bdc7077655deaaeb4d1745d03ae74db03842ae97e2c0fb4880db7","src/data/file/decode/entry.rs":"21a52cd2e12c8464e1b53f92a8c4724215ab90aacb29ec6ca7a98da74048c309","src/data/file/decode/header.rs":"374bfe93eec705204eb5cb1f3cc97736160f4f05eb629928768af73b058c9361","src/data/file/decode/mod.rs":"bed7398ba3c6a48d2c7af96a2044ee56ba3d9e757995e06493517db27c743712","src/data/file/init.rs":"16a594f4245be493e3ca61fabe730d796e1d2235de7c0b38609e394df9737c86","src/data/file/mod.rs":"4b6a8a99a4864365592733836d654cc30ab717bf8a69569d02fac3ef0b88fac2","src/data/file/verify.rs":"20aea340799f68087aa196be574fe48cff25047cd1dfdaa99b1fb2f60f4319d9","src/data/header.rs":"cc86b9b6d45c7764217bcea615e5640fbbb4af0d17d25cc24073d48d2b9fd206","src/data/input/bytes_to_entries.rs":"d60b835cb1c4e25b98faac8c1e48d1830352fca81e1b019b6c7ee784b8ab4f7b","src/data/input/entries_to_bytes.rs":"01f23c0cf5822c1f3f5888d287a0f03f1e67dc519d83061ccbca9c5a38adfff0","src/data/input/entry.rs":"9e9d9f2a696e084e71a7a50d85b85003647e4a761f281b3f0623333454d5352a","src/data/input/lookup_ref_delta_objects.rs":"f410de41c7031c5d65b7300ef61f44555b114acf0887432f2f25c64ea320749a","src/data/input/mod.rs":"386167c1f33fad1b20718c8664a7bd2984c498affaffa29cc54dff13a5d9f3b8","src/data/input/types.rs":"b48be6950d83ebb4019a5a53ced7fa37b6763f4f50510230fce90761ca85d7ae","src/data/mod.rs":"2e65995227c8745b4a62695cf434f0efbf5e136810cf4ac3c1ee09c9e8e210f0","src/data/output/bytes.rs":"3fa26f3197bc8cee5fb141cd67c3f233481350422f9acfcb3138f97d58792238","src/data/output/count/mod.rs":"c836960b9952d3982cf575cfbf99035d28625d9802e397906ecc8e0935df0f8d","src/data/output/count/objects/mod.rs":"0477195322c61b65477602e2a29e38c696d7ab6c15c5ea22412dac55c9d6c693","src/data/output/count/objects/reduce.rs":"22371344975483bfd8b3a3dec67cd290a1cb526967b0c52032f817bcdba96014","src/data/output/count/objects/tree.rs":"7d6bfbe55d32c3c13fe1c9004e0671a8fc7379d73985ef69da0d1d2d153315e3","src/data/output/count/objects/types.rs":"b53d36dae89d094d66c622f0f816c412f6401126f7e940caf34856b48e776a4e","src/data/output/count/objects/util.rs":"f018375fa5d1dbf48a714f99768b6ab45e5403a2ed59b70829dae172c65c187b","src/data/output/entry/iter_from_counts.rs":"58c9cc17b7ea728b781c4f30a0a5ebc72601ccf1cc4271492584bfce015db032","src/data/output/entry/mod.rs":"d1b9b4480bdfb1821c3a3153a5f3c880584a2f0a46a65a187ce676406b6b03fa","src/data/output/mod.rs":"ac037fc6111d8f32f1d807bb3d34c2af351b99bdc97c600992d64c90fdb4fa1e","src/find.rs":"9a5025b69728b6ce2358a9234bee46f4d20dcc2dcd0925faaada8c71d0c13741","src/find_traits.rs":"04cf9445ff46a29cb4f9c91db846cf4288b341671d60f3362bdd0e4f36c87a01","src/index/access.rs":"7725c5c810ebc9d93f019913829b02920bbfa1db730e04ad66381280463f63d8","src/index/init.rs":"d25b0865859f505fba4b02437faad6f02b264585906c9cdc1743b64f7f238148","src/index/mod.rs":"5b4539665da73770f76628863ec3c1d4532f6033cd272a6c0fb7ea88856d2570","src/index/traverse/error.rs":"d520a384e3737ac973a9d84cf6dbd7ebda6f459d26560e6e40555eacede8c7f6","src/index/traverse/mod.rs":"c40f4e7f7d65054b6bc7c48769b9a6bae00b8710732cf45a7fa36c445a248509","src/index/traverse/reduce.rs":"0f3614232888f66c5ad13552875ced211c79dad656093e080b16bfc25ff5d7b1","src/index/traverse/types.rs":"a9ad1e3b6944d62c38fcde4823a46809736516e9753dc16d15f38ac2bb74041e","src/index/traverse/with_index.rs":"d9c8d4deecdc56a76deee808981e3e12d7ad37c23e327a9ca5f5b777f4da18b8","src/index/traverse/with_lookup.rs":"39a14d6f0e8235730d489365ff1acf784fad99a779326bab1215229f03b8e2e5","src/index/util.rs":"546454f39d469b2b1cca384822e3004a48b9c6a91b899cce83b5759026911419","src/index/verify.rs":"42ec54de729f176e22bb891950d0ce0e96880730b22e3bcb2a57e0e37f4c2b0e","src/index/write/encode.rs":"250c29516aad527156951280c8fbe263f6c0da4d98899a88a20f8e0187fd11f8","src/index/write/error.rs":"5294efe0508252d5942183fa5ab5139dc10e33ccfb28698a6101fc360990d688","src/index/write/mod.rs":"471f5a1a91b3d4cc6856675f96933eaf5781b3c79b23aaadb01925bdd8b239fa","src/lib.rs":"7b72df6596c2501d8bb9c0bde09620e6b497ce4561191c59eae0d4229356d97b","src/multi_index/access.rs":"5c6b309147423c27defc2f7fbd194a4c2d60e7c9d9f77b2fccfcb647ef426140","src/multi_index/chunk.rs":"7f5506391c4ed94edb83844de67bdbf98a12b28f1ae4e0947e6ad2f6488e3fa3","src/multi_index/init.rs":"290daf86cfe21127a27c5bea49d3b1ef4812bde968ff30b36e4cef278bc513c9","src/multi_index/mod.rs":"38ac324b03c7ed2a7a3d169ff1997a7799f343c86bf9b5b026be00def7685fc9","src/multi_index/verify.rs":"b165566234f53abde696b741843000f55a5699c90d38e03173fa6f58279c4b3f","src/multi_index/write.rs":"133044bad102aceaf8d1ab2e2af239b6466a74011904a74cefec547942b71f1f","src/verify.rs":"5e5d9decdbfb46963b5589dd49d76079e28a8aa6575d20d078492a7f2d50bad9"},"package":"7d2a14cb3156037eedb17d6cb7209b7180522b8949b21fd0fe3184c0a1d0af88"} \ No newline at end of file
+{"files":{"CHANGELOG.md":"f0292e4c78da3bdeb48d06f5b481618fa52c3028682e4173e8c358a95c65c4a2","Cargo.toml":"121d2cc62e872849df3fb6ac3af2982417ce56f01ba31157010d16c9f8c38c6f","LICENSE-APACHE":"cb4780590812826851ba250f90bed0ed19506ec98f6865a0e2e20bbf62391ff9","LICENSE-MIT":"49df47913ab2beafe8dc45607877ae64198bf0eee64aaad3e82ed9e4d27424e8","src/bundle/find.rs":"f6ac1a7b11c13ee8eb1eda5a3a967f99bdc49af9d0869354b6fa617d0c749837","src/bundle/init.rs":"78582570130e4b7e67bbf01395317ec4727be7cee01d310d07180bb79f61ea1d","src/bundle/mod.rs":"cc6b3794389ce9ed3b99a4da20e7cd1ee0e027ac2032b6da08f829c185608bc6","src/bundle/write/error.rs":"f978b1622ac1354f44a7160d2439d5ac697dd92b67631d8c846ae2621be489ae","src/bundle/write/mod.rs":"9e0a3cae664583dfed7c93606d72e9fbb25637cdd99f8476a04a5e7cb558fd29","src/bundle/write/types.rs":"faef010d8680b85b08ad2aaca291fed1e07ddf00a5f407f1c60d657b0ef6ed5b","src/cache/delta/from_offsets.rs":"21b6ae005f11441a6be09c5939db4c047e142d8369167d2b775ef084172a968a","src/cache/delta/mod.rs":"52e7aa488fc2eb478ac91be0c6f4179e78c9620094b3a0d9adee03b776b1636e","src/cache/delta/traverse/mod.rs":"43a7854d1bc10cfb5fccedc9686d6cdfcbe1aae2dd712731bee6b701fc0a3a2a","src/cache/delta/traverse/resolve.rs":"b432abdafb1a131413dcd2688cd57549ffcec99c13933c057fe354c776226186","src/cache/delta/traverse/util.rs":"eced32d724ad66766b5c9933d0324ef351436740266a85491238ec8d26010b7d","src/cache/lru.rs":"557043f806f754fadae96199d2261a8df184f86bf0dd525efe091c5fa3a793da","src/cache/mod.rs":"1fd545e5944ec378f5a89825840fc869684ca571ea7859213ea38d674e991fb1","src/cache/object.rs":"abbdd67a9f101d9df707057fcbd5fe23e1464d0e312807348bfe610c64a410c5","src/data/delta.rs":"915c10207f5897d473cc806ae31de98e90f41e5e99361e18cb1e42177137c024","src/data/entry/decode.rs":"c3fa9509fcb93b455d679366d390b660b2807a1a7aa743b3882acd22d5bdeb8f","src/data/entry/header.rs":"5b1e5485215f203031d2762b13ed7915367f4c2f86fd1cdb5adfa91a06b62198","src/data/entry/mod.rs":"543c2ed9985bdc7077655deaaeb4d1745d03ae74db03842ae97e2c0fb4880db7","src/data/file/decode/entry.rs":"6399d1d9a4d71180d0216af1e9d4efcc3913a62736588ff6274ed69d191decff","src/data/file/decode/header.rs":"2e1bf2bd6191d16ecb2b835386a43f2d79a8ebbf9c1ed3d8a57d7845fdeaa2bc","src/data/file/decode/mod.rs":"bed7398ba3c6a48d2c7af96a2044ee56ba3d9e757995e06493517db27c743712","src/data/file/init.rs":"16a594f4245be493e3ca61fabe730d796e1d2235de7c0b38609e394df9737c86","src/data/file/mod.rs":"4b6a8a99a4864365592733836d654cc30ab717bf8a69569d02fac3ef0b88fac2","src/data/file/verify.rs":"8deb230289e2cd29b4af49d777ce84731fcb216194df157ab1fc76c9d60bfd84","src/data/header.rs":"cc86b9b6d45c7764217bcea615e5640fbbb4af0d17d25cc24073d48d2b9fd206","src/data/input/bytes_to_entries.rs":"97a4ccc355a50603c8e301edbdeeb33829438b82bc4e68925fae1691994f2c00","src/data/input/entries_to_bytes.rs":"f13bc7a0637182a4c9588dcfa4ab8bb9d88c64f037c33991fd0c9dc36bd324c7","src/data/input/entry.rs":"84c2278258981685bb927d5dbd2cc7b981c371a66f22a86f22772a5c11535bc4","src/data/input/lookup_ref_delta_objects.rs":"12aca3bfe7546f65e6ce9dff454c69e3c55bc6bd23df65bfb469f088a109d215","src/data/input/mod.rs":"386167c1f33fad1b20718c8664a7bd2984c498affaffa29cc54dff13a5d9f3b8","src/data/input/types.rs":"b48be6950d83ebb4019a5a53ced7fa37b6763f4f50510230fce90761ca85d7ae","src/data/mod.rs":"09e4550448b99015accdab97926c7672d5a0c84caee349b105d5be6c7fd7597d","src/data/output/bytes.rs":"3fa26f3197bc8cee5fb141cd67c3f233481350422f9acfcb3138f97d58792238","src/data/output/count/mod.rs":"12fd6fb32b1467467b4d92a458d6b5017644f4c7e35098553f66c618b8a71a1d","src/data/output/count/objects/mod.rs":"70ac448eae2d3b0d4491922bbb3ad25813d404441a41e7617c3464d918dc83e3","src/data/output/count/objects/reduce.rs":"a02c32cfc1b62b07072123c922ff5790b293a5e2e87f6f1c12db806521418ecb","src/data/output/count/objects/tree.rs":"7d6bfbe55d32c3c13fe1c9004e0671a8fc7379d73985ef69da0d1d2d153315e3","src/data/output/count/objects/types.rs":"00864c5daec66e5b58d814b5fc2151e1d046a6eb84a5d175f84c165ef93cb6ba","src/data/output/count/objects/util.rs":"f018375fa5d1dbf48a714f99768b6ab45e5403a2ed59b70829dae172c65c187b","src/data/output/entry/iter_from_counts.rs":"bf5d4bb30673dd0729a8057ff6cf99e9dc606ea054e4ff9cd271b0dfeddec8c7","src/data/output/entry/mod.rs":"0352e7acbd09ca868bc7dd51b1bbb12d5417d5fa8282cece52d6f77174979678","src/data/output/mod.rs":"ac037fc6111d8f32f1d807bb3d34c2af351b99bdc97c600992d64c90fdb4fa1e","src/find.rs":"380c4d0e77c73486650e0c8b1b661df9e810cd356fbf71efaf7ba4b0c518304c","src/find_traits.rs":"2b978fab38dda58f3e32dd0e352a7cb59fbf282ab22110a37cc02e76ea2302d0","src/index/access.rs":"28605818e941df33ec7835759a09146e8fc5a05027521f81e1612d35e04b8d10","src/index/encode.rs":"91dfbd431439e5e19f763653b3aa7217096c8b16c8ec2512a97666643fa9f90e","src/index/init.rs":"d25b0865859f505fba4b02437faad6f02b264585906c9cdc1743b64f7f238148","src/index/mod.rs":"40bdc4fc5ae804abe993c73f5c44fa2709aa63f93b01426cf012fb0167820c71","src/index/traverse/error.rs":"d520a384e3737ac973a9d84cf6dbd7ebda6f459d26560e6e40555eacede8c7f6","src/index/traverse/mod.rs":"64cf4783cab2572c164e0400519854cb6797d020cf4fb8047f48ab45fa0a7de5","src/index/traverse/reduce.rs":"0f3614232888f66c5ad13552875ced211c79dad656093e080b16bfc25ff5d7b1","src/index/traverse/types.rs":"a9ad1e3b6944d62c38fcde4823a46809736516e9753dc16d15f38ac2bb74041e","src/index/traverse/with_index.rs":"105461d0787e55df10208b217b30f1dd307a8adaac841616fa9c37597adfeb97","src/index/traverse/with_lookup.rs":"4689744f43581b3f3dd462c8a9e64b89f2e13d7d26dcce72310d514b68d0e0c6","src/index/util.rs":"de1ab39c296dc74c01edcd5ec7a9ce872acbd07aed796b8b06310d1d6e0893ea","src/index/verify.rs":"050a10ec67594fda1bf28f9d7593a58725c3d59fafa61ef5f63e522bd9408e7d","src/index/write/error.rs":"5294efe0508252d5942183fa5ab5139dc10e33ccfb28698a6101fc360990d688","src/index/write/mod.rs":"8117fe768a417b1d8fe7643fae0427be354c26d80d787c62cd6e714599207e3f","src/lib.rs":"7b72df6596c2501d8bb9c0bde09620e6b497ce4561191c59eae0d4229356d97b","src/multi_index/access.rs":"53ce7098eb88d4f847477941cd75bf01ab27f71e260f449eceb7f52ff9f95b61","src/multi_index/chunk.rs":"842f8b85b7c01293dad0eb6e786880e1acf89c9ea570bcd8f890776d9b4be0ac","src/multi_index/init.rs":"290daf86cfe21127a27c5bea49d3b1ef4812bde968ff30b36e4cef278bc513c9","src/multi_index/mod.rs":"38ac324b03c7ed2a7a3d169ff1997a7799f343c86bf9b5b026be00def7685fc9","src/multi_index/verify.rs":"ce63542ff4b8ee35a04051813bc1a19187b78a72f60d958787d6316deb822592","src/multi_index/write.rs":"3168e8a2c5100942b0f694719cf16fa55a7201e4ad436d61b285133d2ab415c1","src/verify.rs":"74a644a2db5b643f21a0927b0c75c99359b9d42242ed4089fe10069b17067df7"},"package":"7536203a45b31e1bc5694bbf90ba8da1b736c77040dd6a520db369f371eb1ab3"} \ No newline at end of file
diff --git a/vendor/gix-pack/CHANGELOG.md b/vendor/gix-pack/CHANGELOG.md
index dad7f017f..eb29fb276 100644
--- a/vendor/gix-pack/CHANGELOG.md
+++ b/vendor/gix-pack/CHANGELOG.md
@@ -5,6 +5,358 @@ All notable changes to this project will be documented in this file.
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
+## 0.43.0 (2023-09-24)
+
+A maintenance release without user-facing changes.
+
+### Commit Statistics
+
+<csr-read-only-do-not-edit/>
+
+ - 1 commit contributed to the release.
+ - 16 days passed between releases.
+ - 0 commits were understood as [conventional](https://www.conventionalcommits.org).
+ - 0 issues like '(#ID)' were seen in commit messages
+
+### Commit Details
+
+<csr-read-only-do-not-edit/>
+
+<details><summary>view details</summary>
+
+ * **Uncategorized**
+ - Prepare changelogs prior to release ([`8a60d5b`](https://github.com/Byron/gitoxide/commit/8a60d5b80877c213c3b646d3061e8a33e0e433ec))
+</details>
+
+## 0.42.0 (2023-09-08)
+
+<csr-id-ed327f6163f54756e58c20f86a563a97efb256ca/>
+
+### New Features
+
+ - <csr-id-0357b6cdb11b098fd54cd0c3df6c617d0b44a1c2/> Add `generate` and `streaming-input` feature toggles.
+ That way, it's possible to not compile a bunch of code in `gix` if
+ the writing of packs isn't required.
+
+### Bug Fixes
+
+ - <csr-id-96a07e08e6090222cf398b46aa8d46b56f81f14d/> Use `Vec::resize()` instead of set_len()
+ Otherwise it's possible for uninitialized memory to be used as if it was initialized,
+ which can lead to strange behaviour.
+
+ As the buffer is re-used, it's not actually zeroing that much memory either.
+
+### Chore (BREAKING)
+
+ - <csr-id-ed327f6163f54756e58c20f86a563a97efb256ca/> update to the latest `prodash`
+ It makes proper usage of `Progress` types easier and allows them to be used
+ as `dyn` traits as well.
+
+### New Features (BREAKING)
+
+ - <csr-id-24dd870919ba444aa8099c63a78ea120d47ec28e/> use `prodash::Count` to indicate that nothing more than counting is performed, in place of `prodash::Progress`
+ - <csr-id-4b74996b19176cb0f00860b3db5a27819d63e7d0/> Make usage of decompression context explicit.
+ That way, the context can be reused which is more efficient than recreating
+ it from scratch for every little delta to decompress.
+
+ This leads to a performance gain of 1.3%.
+
+### Bug Fixes (BREAKING)
+
+ - <csr-id-072ee32f693a31161cd6a843da6582d13efbb20b/> use `dyn` trait where possible.
+ This reduces compile time due to avoiding duplication.
+
+### Commit Statistics
+
+<csr-read-only-do-not-edit/>
+
+ - 13 commits contributed to the release over the course of 17 calendar days.
+ - 17 days passed between releases.
+ - 6 commits were understood as [conventional](https://www.conventionalcommits.org).
+ - 0 issues like '(#ID)' were seen in commit messages
+
+### Commit Details
+
+<csr-read-only-do-not-edit/>
+
+<details><summary>view details</summary>
+
+ * **Uncategorized**
+ - Release gix-date v0.8.0, gix-hash v0.13.0, gix-features v0.34.0, gix-actor v0.26.0, gix-object v0.36.0, gix-path v0.10.0, gix-glob v0.12.0, gix-attributes v0.18.0, gix-packetline-blocking v0.16.6, gix-filter v0.4.0, gix-fs v0.6.0, gix-commitgraph v0.20.0, gix-hashtable v0.4.0, gix-revwalk v0.7.0, gix-traverse v0.32.0, gix-worktree-stream v0.4.0, gix-archive v0.4.0, gix-config-value v0.14.0, gix-tempfile v9.0.0, gix-lock v9.0.0, gix-ref v0.36.0, gix-sec v0.10.0, gix-config v0.29.0, gix-prompt v0.7.0, gix-url v0.23.0, gix-credentials v0.19.0, gix-diff v0.35.0, gix-discover v0.24.0, gix-ignore v0.7.0, gix-index v0.24.0, gix-macros v0.1.0, gix-mailmap v0.18.0, gix-negotiate v0.7.0, gix-pack v0.42.0, gix-odb v0.52.0, gix-pathspec v0.2.0, gix-packetline v0.16.6, gix-transport v0.36.0, gix-protocol v0.39.0, gix-revision v0.21.0, gix-refspec v0.17.0, gix-submodule v0.3.0, gix-worktree v0.25.0, gix-worktree-state v0.2.0, gix v0.53.0, safety bump 39 crates ([`8bd0456`](https://github.com/Byron/gitoxide/commit/8bd045676bb2cdc02624ab93e73ff8518064ca38))
+ - Prepare changelogs for release ([`375db06`](https://github.com/Byron/gitoxide/commit/375db06a8442378c3f7a922fae38e2a6694d9d04))
+ - Merge branch 'optimizations' ([`6135a5e`](https://github.com/Byron/gitoxide/commit/6135a5ea8709646f01da62939a59dd3a9750e007))
+ - Add `generate` and `streaming-input` feature toggles. ([`0357b6c`](https://github.com/Byron/gitoxide/commit/0357b6cdb11b098fd54cd0c3df6c617d0b44a1c2))
+ - Merge branch `dyn`ification ([`f658fcc`](https://github.com/Byron/gitoxide/commit/f658fcc52dc2200ae34ca53dc10be97fb9012057))
+ - Use `dyn` trait where possible. ([`072ee32`](https://github.com/Byron/gitoxide/commit/072ee32f693a31161cd6a843da6582d13efbb20b))
+ - Use `prodash::Count` to indicate that nothing more than counting is performed, in place of `prodash::Progress` ([`24dd870`](https://github.com/Byron/gitoxide/commit/24dd870919ba444aa8099c63a78ea120d47ec28e))
+ - Update to the latest `prodash` ([`ed327f6`](https://github.com/Byron/gitoxide/commit/ed327f6163f54756e58c20f86a563a97efb256ca))
+ - Switch the last crate (gix-package-tests) to edition 2021 ([`04494c6`](https://github.com/Byron/gitoxide/commit/04494c65df05eef6b24acb68faae939a3d85f510))
+ - Merge branch 'perf-and-safety' ([`9ad9c5b`](https://github.com/Byron/gitoxide/commit/9ad9c5b1cfa3afff5273558b6ef98ca4714d4272))
+ - Use `Vec::resize()` instead of set_len() ([`96a07e0`](https://github.com/Byron/gitoxide/commit/96a07e08e6090222cf398b46aa8d46b56f81f14d))
+ - Make usage of decompression context explicit. ([`4b74996`](https://github.com/Byron/gitoxide/commit/4b74996b19176cb0f00860b3db5a27819d63e7d0))
+ - Merge branch 'gix-submodule' ([`363ee77`](https://github.com/Byron/gitoxide/commit/363ee77400805f473c9ad66eadad9214e7ab66f4))
+</details>
+
+## 0.41.0 (2023-08-22)
+
+<csr-id-93feea269eebd114e866e6f29f4a73c0096df9e0/>
+
+### Chore
+
+ - <csr-id-93feea269eebd114e866e6f29f4a73c0096df9e0/> split tests off into their own crate to allow feature toggles.
+ That way we can test with the `parallel` feature and won't have to
+ create bogus feature toggles that are only used for testing, yet visbible
+ to users.
+
+### Commit Statistics
+
+<csr-read-only-do-not-edit/>
+
+ - 7 commits contributed to the release over the course of 15 calendar days.
+ - 29 days passed between releases.
+ - 1 commit was understood as [conventional](https://www.conventionalcommits.org).
+ - 0 issues like '(#ID)' were seen in commit messages
+
+### Commit Details
+
+<csr-read-only-do-not-edit/>
+
+<details><summary>view details</summary>
+
+ * **Uncategorized**
+ - Release gix-url v0.22.0, gix-credentials v0.18.0, gix-diff v0.34.0, gix-discover v0.23.0, gix-ignore v0.6.0, gix-bitmap v0.2.7, gix-index v0.22.0, gix-mailmap v0.17.0, gix-negotiate v0.6.0, gix-pack v0.41.0, gix-odb v0.51.0, gix-pathspec v0.1.0, gix-packetline v0.16.5, gix-transport v0.35.0, gix-protocol v0.38.0, gix-revision v0.20.0, gix-refspec v0.16.0, gix-submodule v0.2.0, gix-worktree v0.24.0, gix-worktree-state v0.1.0, gix v0.52.0, gitoxide-core v0.31.0, gitoxide v0.29.0 ([`6c62e74`](https://github.com/Byron/gitoxide/commit/6c62e748240ac0980fc23fdf30f8477dea8b9bc3))
+ - Release gix-date v0.7.3, gix-hash v0.12.0, gix-features v0.33.0, gix-actor v0.25.0, gix-object v0.35.0, gix-path v0.9.0, gix-glob v0.11.0, gix-quote v0.4.7, gix-attributes v0.17.0, gix-command v0.2.9, gix-packetline-blocking v0.16.5, gix-filter v0.3.0, gix-fs v0.5.0, gix-commitgraph v0.19.0, gix-hashtable v0.3.0, gix-revwalk v0.6.0, gix-traverse v0.31.0, gix-worktree-stream v0.3.0, gix-archive v0.3.0, gix-config-value v0.13.0, gix-tempfile v8.0.0, gix-lock v8.0.0, gix-ref v0.35.0, gix-sec v0.9.0, gix-config v0.28.0, gix-prompt v0.6.0, gix-url v0.22.0, gix-credentials v0.18.0, gix-diff v0.34.0, gix-discover v0.23.0, gix-ignore v0.6.0, gix-bitmap v0.2.7, gix-index v0.22.0, gix-mailmap v0.17.0, gix-negotiate v0.6.0, gix-pack v0.41.0, gix-odb v0.51.0, gix-pathspec v0.1.0, gix-packetline v0.16.5, gix-transport v0.35.0, gix-protocol v0.38.0, gix-revision v0.20.0, gix-refspec v0.16.0, gix-submodule v0.2.0, gix-worktree v0.24.0, gix-worktree-state v0.1.0, gix v0.52.0, gitoxide-core v0.31.0, gitoxide v0.29.0, safety bump 41 crates ([`30b2761`](https://github.com/Byron/gitoxide/commit/30b27615047692d3ced1b2d9c2ac15a80f79fbee))
+ - Update changelogs prior to release ([`f23ea88`](https://github.com/Byron/gitoxide/commit/f23ea8828f2d9ba7559973daca388c9591bcc5fc))
+ - Merge branch 'gix-submodule' ([`8f3f358`](https://github.com/Byron/gitoxide/commit/8f3f358800f1fe77d7ba7ebd396a90b692d3c0c1))
+ - More cleanup of test crates ([`73c685a`](https://github.com/Byron/gitoxide/commit/73c685a67debcfa26a940f37bbca69cb3a4af57e))
+ - Split tests off into their own crate to allow feature toggles. ([`93feea2`](https://github.com/Byron/gitoxide/commit/93feea269eebd114e866e6f29f4a73c0096df9e0))
+ - Release gix-glob v0.10.2, gix-date v0.7.2, gix-validate v0.8.0, gix-object v0.34.0, gix-ref v0.34.0, gix-config v0.27.0, gix-commitgraph v0.18.2, gix-revwalk v0.5.0, gix-revision v0.19.0, gix-refspec v0.15.0, gix-submodule v0.1.0, safety bump 18 crates ([`4604f83`](https://github.com/Byron/gitoxide/commit/4604f83ef238dc07c85aaeae097399b67f3cfd0c))
+</details>
+
+## 0.40.2 (2023-07-24)
+
+A maintenance release without user-facing changes.
+
+### Commit Statistics
+
+<csr-read-only-do-not-edit/>
+
+ - 3 commits contributed to the release.
+ - 1 day passed between releases.
+ - 0 commits were understood as [conventional](https://www.conventionalcommits.org).
+ - 0 issues like '(#ID)' were seen in commit messages
+
+### Commit Details
+
+<csr-read-only-do-not-edit/>
+
+<details><summary>view details</summary>
+
+ * **Uncategorized**
+ - Release gix-archive v0.2.1, gix-ref v0.33.2, gix-pack v0.40.2, gix v0.50.1 ([`13883e5`](https://github.com/Byron/gitoxide/commit/13883e5528385f892ee402e911298121e0c297c0))
+ - Prepare changelogs ([`735c206`](https://github.com/Byron/gitoxide/commit/735c2062625aaeffbdbca3c1395dbcf075661e3a))
+ - Does this fix CI? ([`a4e6e28`](https://github.com/Byron/gitoxide/commit/a4e6e28cf18766c625bfb03ed2eb607694b1371a))
+</details>
+
+## 0.40.1 (2023-07-22)
+
+A maintenance release without user-facing changes.
+
+### Commit Statistics
+
+<csr-read-only-do-not-edit/>
+
+ - 7 commits contributed to the release over the course of 1 calendar day.
+ - 3 days passed between releases.
+ - 0 commits were understood as [conventional](https://www.conventionalcommits.org).
+ - 0 issues like '(#ID)' were seen in commit messages
+
+### Commit Details
+
+<csr-read-only-do-not-edit/>
+
+<details><summary>view details</summary>
+
+ * **Uncategorized**
+ - Release gix-diff v0.33.1, gix-discover v0.22.1, gix-ignore v0.5.1, gix-bitmap v0.2.6, gix-index v0.21.1, gix-mailmap v0.16.1, gix-negotiate v0.5.1, gix-pack v0.40.1, gix-odb v0.50.1, gix-packetline v0.16.4, gix-transport v0.34.1, gix-protocol v0.36.1, gix-revision v0.18.1, gix-refspec v0.14.1, gix-worktree v0.23.0, gix v0.50.0 ([`0062971`](https://github.com/Byron/gitoxide/commit/00629710dffeb10fda340665530353703cf5d129))
+ - Release gix-tempfile v7.0.2, gix-utils v0.1.5, gix-lock v7.0.2, gix-ref v0.33.1, gix-sec v0.8.4, gix-prompt v0.5.4, gix-url v0.21.1, gix-credentials v0.17.1, gix-diff v0.33.1, gix-discover v0.22.1, gix-ignore v0.5.1, gix-bitmap v0.2.6, gix-index v0.21.1, gix-mailmap v0.16.1, gix-negotiate v0.5.1, gix-pack v0.40.1, gix-odb v0.50.1, gix-packetline v0.16.4, gix-transport v0.34.1, gix-protocol v0.36.1, gix-revision v0.18.1, gix-refspec v0.14.1, gix-worktree v0.23.0, gix v0.50.0 ([`107a64e`](https://github.com/Byron/gitoxide/commit/107a64e734580ad9e2c4142db96394529d8072df))
+ - Release gix-features v0.32.1, gix-actor v0.24.1, gix-validate v0.7.7, gix-object v0.33.1, gix-path v0.8.4, gix-glob v0.10.1, gix-quote v0.4.6, gix-attributes v0.16.0, gix-command v0.2.8, gix-packetline-blocking v0.16.4, gix-filter v0.2.0, gix-fs v0.4.1, gix-chunk v0.4.4, gix-commitgraph v0.18.1, gix-hashtable v0.2.4, gix-revwalk v0.4.1, gix-traverse v0.30.1, gix-worktree-stream v0.2.0, gix-archive v0.2.0, gix-config-value v0.12.5, gix-tempfile v7.0.1, gix-utils v0.1.5, gix-lock v7.0.2, gix-ref v0.33.1, gix-sec v0.8.4, gix-prompt v0.5.4, gix-url v0.21.1, gix-credentials v0.17.1, gix-diff v0.33.1, gix-discover v0.22.1, gix-ignore v0.5.1, gix-bitmap v0.2.6, gix-index v0.21.1, gix-mailmap v0.16.1, gix-negotiate v0.5.1, gix-pack v0.40.1, gix-odb v0.50.1, gix-packetline v0.16.4, gix-transport v0.34.1, gix-protocol v0.36.1, gix-revision v0.18.1, gix-refspec v0.14.1, gix-worktree v0.23.0, gix v0.50.0, safety bump 5 crates ([`16295b5`](https://github.com/Byron/gitoxide/commit/16295b58e2581d2e8b8b762816f52baabe871c75))
+ - Prepare more changelogs ([`c4cc5f2`](https://github.com/Byron/gitoxide/commit/c4cc5f261d29f712a101033a18293a97a9d4ae85))
+ - Release gix-date v0.7.1, gix-hash v0.11.4, gix-trace v0.1.3, gix-features v0.32.0, gix-actor v0.24.0, gix-validate v0.7.7, gix-object v0.33.0, gix-path v0.8.4, gix-glob v0.10.0, gix-quote v0.4.6, gix-attributes v0.15.0, gix-command v0.2.7, gix-packetline-blocking v0.16.3, gix-filter v0.1.0, gix-fs v0.4.0, gix-chunk v0.4.4, gix-commitgraph v0.18.0, gix-hashtable v0.2.4, gix-revwalk v0.4.0, gix-traverse v0.30.0, gix-worktree-stream v0.2.0, gix-archive v0.2.0, gix-config-value v0.12.4, gix-tempfile v7.0.1, gix-utils v0.1.5, gix-lock v7.0.2, gix-ref v0.33.0, gix-sec v0.8.4, gix-prompt v0.5.3, gix-url v0.21.0, gix-credentials v0.17.0, gix-diff v0.33.0, gix-discover v0.22.0, gix-ignore v0.5.0, gix-bitmap v0.2.6, gix-index v0.21.0, gix-mailmap v0.16.0, gix-negotiate v0.5.0, gix-pack v0.40.0, gix-odb v0.50.0, gix-packetline v0.16.4, gix-transport v0.34.0, gix-protocol v0.36.0, gix-revision v0.18.0, gix-refspec v0.14.0, gix-worktree v0.22.0, gix v0.49.1 ([`5cb3589`](https://github.com/Byron/gitoxide/commit/5cb3589b74fc5376e02cbfe151e71344e1c417fe))
+ - Update changelogs prior to release ([`2fc66b5`](https://github.com/Byron/gitoxide/commit/2fc66b55097ed494b72d1af939ba5561f71fde97))
+ - Update license field following SPDX 2.1 license expression standard ([`9064ea3`](https://github.com/Byron/gitoxide/commit/9064ea31fae4dc59a56bdd3a06c0ddc990ee689e))
+</details>
+
+## 0.40.0 (2023-07-19)
+
+A maintenance release without user-facing changes.
+
+### Commit Statistics
+
+<csr-read-only-do-not-edit/>
+
+ - 9 commits contributed to the release over the course of 10 calendar days.
+ - 19 days passed between releases.
+ - 0 commits were understood as [conventional](https://www.conventionalcommits.org).
+ - 0 issues like '(#ID)' were seen in commit messages
+
+### Thanks Clippy
+
+<csr-read-only-do-not-edit/>
+
+[Clippy](https://github.com/rust-lang/rust-clippy) helped 1 time to make code idiomatic.
+
+### Commit Details
+
+<csr-read-only-do-not-edit/>
+
+<details><summary>view details</summary>
+
+ * **Uncategorized**
+ - Release gix-filter v0.1.0, gix-ignore v0.5.0, gix-revwalk v0.4.0, gix-traverse v0.30.0, gix-index v0.21.0, gix-mailmap v0.16.0, gix-negotiate v0.5.0, gix-pack v0.40.0, gix-odb v0.50.0, gix-transport v0.34.0, gix-protocol v0.36.0, gix-revision v0.18.0, gix-refspec v0.14.0, gix-worktree v0.22.0, gix v0.49.0 ([`4aca8c2`](https://github.com/Byron/gitoxide/commit/4aca8c2ae2ec588fb65ec4faa0c07c19d219569f))
+ - Release gix-features v0.32.0, gix-actor v0.24.0, gix-glob v0.10.0, gix-attributes v0.15.0, gix-commitgraph v0.18.0, gix-config-value v0.12.4, gix-fs v0.4.0, gix-object v0.33.0, gix-ref v0.33.0, gix-config v0.26.0, gix-command v0.2.7, gix-url v0.21.0, gix-credentials v0.17.0, gix-diff v0.33.0, gix-discover v0.22.0, gix-filter v0.1.0, gix-ignore v0.5.0, gix-revwalk v0.4.0, gix-traverse v0.30.0, gix-index v0.21.0, gix-mailmap v0.16.0, gix-negotiate v0.5.0, gix-pack v0.40.0, gix-odb v0.50.0, gix-transport v0.34.0, gix-protocol v0.36.0, gix-revision v0.18.0, gix-refspec v0.14.0, gix-worktree v0.22.0, gix v0.49.0 ([`68ae3ff`](https://github.com/Byron/gitoxide/commit/68ae3ff9d642ec56f088a6a682a073dc16f4e8ca))
+ - Adjust package versions (by cargo-smart-release) ([`c70e54f`](https://github.com/Byron/gitoxide/commit/c70e54f163c312c87753a506eeaad462e8579bfb))
+ - Prepare changelogs prior to release ([`e4dded0`](https://github.com/Byron/gitoxide/commit/e4dded05138562f9737a7dcfb60570c55769486d))
+ - Merge branch 'refactor-pack-streaming' ([`8a46a7e`](https://github.com/Byron/gitoxide/commit/8a46a7ef3efc22241b67f3447223b2505e205442))
+ - Simplify decompressor and hash handling when streaming packs ([`33f95ba`](https://github.com/Byron/gitoxide/commit/33f95ba45885b10c8ca73d8180caa1b303dc091c))
+ - Thanks clippy ([`3ef32af`](https://github.com/Byron/gitoxide/commit/3ef32af9bf477cbc60d24da8bb3f15d20976e9e0))
+ - Merge branch 'integrate-filtering' ([`b19a56d`](https://github.com/Byron/gitoxide/commit/b19a56dcfa9bea86332a84aa4e8fad445e7d1724))
+ - Assure we run all tests in nextest ([`3821089`](https://github.com/Byron/gitoxide/commit/3821089b6b02c933770705b19fc3126d61beb5a7))
+</details>
+
+## 0.39.1 (2023-06-29)
+
+A maintenance release without user-facing changes.
+
+### Commit Statistics
+
+<csr-read-only-do-not-edit/>
+
+ - 3 commits contributed to the release.
+ - 0 commits were understood as [conventional](https://www.conventionalcommits.org).
+ - 0 issues like '(#ID)' were seen in commit messages
+
+### Commit Details
+
+<csr-read-only-do-not-edit/>
+
+<details><summary>view details</summary>
+
+ * **Uncategorized**
+ - Release gix-glob v0.9.1, gix-attributes v0.14.1, gix-config-value v0.12.3, gix-ref v0.32.1, gix-sec v0.8.3, gix-config v0.25.1, gix-url v0.20.1, gix-credentials v0.16.1, gix-discover v0.21.1, gix-ignore v0.4.1, gix-pack v0.39.1, gix-odb v0.49.1, gix-worktree v0.21.1, gix v0.48.0 ([`69c6a36`](https://github.com/Byron/gitoxide/commit/69c6a36ba14cbef129deebda9fd8870005fefa17))
+ - Prepare changelogs prior to release ([`c143cf4`](https://github.com/Byron/gitoxide/commit/c143cf48ee1885467e3e9262a3f8823a1247bfe0))
+ - Align usage of `gix-path` across all crates ([`73c1292`](https://github.com/Byron/gitoxide/commit/73c1292be393986c4a1adde1400abf551e850da0))
+</details>
+
+## 0.39.0 (2023-06-29)
+
+A maintenance release without user-facing changes.
+
+### Commit Statistics
+
+<csr-read-only-do-not-edit/>
+
+ - 3 commits contributed to the release over the course of 6 calendar days.
+ - 6 days passed between releases.
+ - 0 commits were understood as [conventional](https://www.conventionalcommits.org).
+ - 0 issues like '(#ID)' were seen in commit messages
+
+### Commit Details
+
+<csr-read-only-do-not-edit/>
+
+<details><summary>view details</summary>
+
+ * **Uncategorized**
+ - Release gix-date v0.7.0, gix-trace v0.1.2, gix-actor v0.23.0, gix-commitgraph v0.17.1, gix-utils v0.1.4, gix-object v0.32.0, gix-ref v0.32.0, gix-config v0.25.0, gix-diff v0.32.0, gix-discover v0.21.0, gix-hashtable v0.2.3, gix-revwalk v0.3.0, gix-traverse v0.29.0, gix-index v0.20.0, gix-mailmap v0.15.0, gix-negotiate v0.4.0, gix-pack v0.39.0, gix-odb v0.49.0, gix-protocol v0.35.0, gix-revision v0.17.0, gix-refspec v0.13.0, gix-worktree v0.21.0, gix v0.48.0, safety bump 20 crates ([`27e8c18`](https://github.com/Byron/gitoxide/commit/27e8c18db5a9a21843381c116a8ed6d9f681b3f8))
+ - Prepare changelogs prior to release ([`00f96fb`](https://github.com/Byron/gitoxide/commit/00f96fb3110a8f81a1bd0d74c757c15b8773c6f6))
+ - Upgrade memmap2 and fastrand dependencies ([`6fc7497`](https://github.com/Byron/gitoxide/commit/6fc74971ac6838cbfd9c869ba3746713001d7a38))
+</details>
+
+## 0.38.0 (2023-06-22)
+
+<csr-id-bcad5c22049d56a25ef69d6c7a3344e78f9a1d4d/>
+
+### Chore
+
+ - <csr-id-bcad5c22049d56a25ef69d6c7a3344e78f9a1d4d/> Add `clippy::redundant-closure-for-method-calls` lint
+
+### New Features
+
+ - <csr-id-3cffa268460eb2d41bd6a30d45778b88db4ec602/> provide basic `tracing` spans for common operations.
+ This is just the beginning and more crates will integrate with it over time.
+
+### Bug Fixes
+
+ - <csr-id-b9eb40730b53f788d2e4bffe4ef6d9028440782e/> revert 3a2d5286084597d4c68549903709cda77dda4357 to fix 'incorrect data check' error.
+ This error could occour in heavily threaded code for unknown reason.
+ But maybe it's due to threads somehow not cleaning up their reused decompressor properly
+ (maybe related to the zlib-ng version). It's strange and sad as this really costs performnace
+ for no good reason.
+ - <csr-id-8817c248dd7c6453ced654d4df304f98ff18ecda/> don't crash when object validation failed during verification.
+ When objects can't be serialized, they will trigger an error that manifests as IO error.
+ Previously we didn't think of the possibility that writing to an im-memory buffer could fail
+ would indeed panic during verification.
+
+ This is now fixed.
+ - <csr-id-cd747f9292fb034e7fd5ee6c5a94a4e53df42e86/> ScopedJoinHandle is not always available
+
+### Commit Statistics
+
+<csr-read-only-do-not-edit/>
+
+ - 12 commits contributed to the release over the course of 10 calendar days.
+ - 12 days passed between releases.
+ - 5 commits were understood as [conventional](https://www.conventionalcommits.org).
+ - 0 issues like '(#ID)' were seen in commit messages
+
+### Commit Details
+
+<csr-read-only-do-not-edit/>
+
+<details><summary>view details</summary>
+
+ * **Uncategorized**
+ - Release gix-date v0.6.0, gix-hash v0.11.3, gix-trace v0.1.1, gix-features v0.31.0, gix-actor v0.22.0, gix-path v0.8.2, gix-glob v0.9.0, gix-quote v0.4.5, gix-attributes v0.14.0, gix-chunk v0.4.3, gix-commitgraph v0.17.0, gix-config-value v0.12.2, gix-fs v0.3.0, gix-tempfile v7.0.0, gix-utils v0.1.3, gix-lock v7.0.0, gix-validate v0.7.6, gix-object v0.31.0, gix-ref v0.31.0, gix-sec v0.8.2, gix-config v0.24.0, gix-command v0.2.6, gix-prompt v0.5.2, gix-url v0.20.0, gix-credentials v0.16.0, gix-diff v0.31.0, gix-discover v0.20.0, gix-hashtable v0.2.2, gix-ignore v0.4.0, gix-bitmap v0.2.5, gix-revwalk v0.2.0, gix-traverse v0.28.0, gix-index v0.19.0, gix-mailmap v0.14.0, gix-negotiate v0.3.0, gix-pack v0.38.0, gix-odb v0.48.0, gix-packetline v0.16.3, gix-transport v0.33.0, gix-protocol v0.34.0, gix-revision v0.16.0, gix-refspec v0.12.0, gix-worktree v0.20.0, gix v0.47.0, gitoxide-core v0.29.0, gitoxide v0.27.0, safety bump 30 crates ([`ea9f942`](https://github.com/Byron/gitoxide/commit/ea9f9424e777f10da0e33bb9ffbbefd01c4c5a74))
+ - Prepare changelogs prior to release ([`18b0a37`](https://github.com/Byron/gitoxide/commit/18b0a371941aa2d4d62512437d5daa351ba99ffd))
+ - Try tree-traversal without thread_local! ([`5a9a7a3`](https://github.com/Byron/gitoxide/commit/5a9a7a31c1537aa54cd81a5145ca319a040b9951))
+ - Revert 3a2d5286084597d4c68549903709cda77dda4357 to fix 'incorrect data check' error. ([`b9eb407`](https://github.com/Byron/gitoxide/commit/b9eb40730b53f788d2e4bffe4ef6d9028440782e))
+ - Merge branch 'gix-corpus' ([`5861afb`](https://github.com/Byron/gitoxide/commit/5861afb45f32c16eefcd8e7b7480309bf44b6edc))
+ - Don't crash when object validation failed during verification. ([`8817c24`](https://github.com/Byron/gitoxide/commit/8817c248dd7c6453ced654d4df304f98ff18ecda))
+ - Merge branch 'corpus' ([`aa16c8c`](https://github.com/Byron/gitoxide/commit/aa16c8ce91452a3e3063cf1cf0240b6014c4743f))
+ - Change MSRV to 1.65 ([`4f635fc`](https://github.com/Byron/gitoxide/commit/4f635fc4429350bae2582d25de86429969d28f30))
+ - Provide basic `tracing` spans for common operations. ([`3cffa26`](https://github.com/Byron/gitoxide/commit/3cffa268460eb2d41bd6a30d45778b88db4ec602))
+ - Merge branch 'help-874-redundant-closures' ([`fe59956`](https://github.com/Byron/gitoxide/commit/fe59956ad667303a923d7cfd9ffd72283df41d78))
+ - ScopedJoinHandle is not always available ([`cd747f9`](https://github.com/Byron/gitoxide/commit/cd747f9292fb034e7fd5ee6c5a94a4e53df42e86))
+ - Add `clippy::redundant-closure-for-method-calls` lint ([`bcad5c2`](https://github.com/Byron/gitoxide/commit/bcad5c22049d56a25ef69d6c7a3344e78f9a1d4d))
+</details>
+
+## 0.37.0 (2023-06-10)
+
+A maintenance release without user-facing changes.
+
+### Commit Statistics
+
+<csr-read-only-do-not-edit/>
+
+ - 4 commits contributed to the release.
+ - 3 days passed between releases.
+ - 0 commits were understood as [conventional](https://www.conventionalcommits.org).
+ - 0 issues like '(#ID)' were seen in commit messages
+
+### Commit Details
+
+<csr-read-only-do-not-edit/>
+
+<details><summary>view details</summary>
+
+ * **Uncategorized**
+ - Release gix-attributes v0.13.1, gix-diff v0.30.1, gix-revwalk v0.1.0, gix-traverse v0.27.0, gix-index v0.18.0, gix-revision v0.15.2, gix-negotiate v0.2.1, gix-pack v0.37.0, gix-odb v0.47.0, gix-protocol v0.33.2, gix-worktree v0.19.0, gix v0.46.0, safety bump 7 crates ([`2560a2c`](https://github.com/Byron/gitoxide/commit/2560a2cc3e1d8c60cd812e15696fa4761d036e19))
+ - Prepare changelogs prior to release ([`298f3d7`](https://github.com/Byron/gitoxide/commit/298f3d7359c5b183314d8c584e45dcdd559d88b3))
+ - Merge branch 'walk-with-commitgraph' ([`fdee9a2`](https://github.com/Byron/gitoxide/commit/fdee9a22873a13ae644d3dc92f8fe93f8f0266c0))
+ - Adapt to changes in `gix-traverse` ([`1f682fd`](https://github.com/Byron/gitoxide/commit/1f682fd991b9b76a8d37e6852567ff239c0ac0db))
+</details>
+
## 0.36.0 (2023-06-06)
### New Features
@@ -65,7 +417,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
<csr-read-only-do-not-edit/>
- - 28 commits contributed to the release over the course of 25 calendar days.
+ - 29 commits contributed to the release over the course of 25 calendar days.
- 40 days passed between releases.
- 7 commits were understood as [conventional](https://www.conventionalcommits.org).
- 1 unique issue was worked on: [#851](https://github.com/Byron/gitoxide/issues/851)
@@ -92,6 +444,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
- Check for interrupt more often ([`f89cbc6`](https://github.com/Byron/gitoxide/commit/f89cbc675b0acc67322e289e7b3a17288b9eae27))
- Memory capped hashmap as pack delta cache won't trash memory as much. ([`923692b`](https://github.com/Byron/gitoxide/commit/923692bcda698b45d3d1ad518b29f6d30b23fbc0))
* **Uncategorized**
+ - Release gix-date v0.5.1, gix-hash v0.11.2, gix-features v0.30.0, gix-actor v0.21.0, gix-path v0.8.1, gix-glob v0.8.0, gix-quote v0.4.4, gix-attributes v0.13.0, gix-chunk v0.4.2, gix-commitgraph v0.16.0, gix-config-value v0.12.1, gix-fs v0.2.0, gix-tempfile v6.0.0, gix-utils v0.1.2, gix-lock v6.0.0, gix-validate v0.7.5, gix-object v0.30.0, gix-ref v0.30.0, gix-sec v0.8.1, gix-config v0.23.0, gix-command v0.2.5, gix-prompt v0.5.1, gix-url v0.19.0, gix-credentials v0.15.0, gix-diff v0.30.0, gix-discover v0.19.0, gix-hashtable v0.2.1, gix-ignore v0.3.0, gix-bitmap v0.2.4, gix-traverse v0.26.0, gix-index v0.17.0, gix-mailmap v0.13.0, gix-revision v0.15.0, gix-negotiate v0.2.0, gix-pack v0.36.0, gix-odb v0.46.0, gix-packetline v0.16.2, gix-transport v0.32.0, gix-protocol v0.33.0, gix-refspec v0.11.0, gix-worktree v0.18.0, gix v0.45.0, safety bump 29 crates ([`9a9fa96`](https://github.com/Byron/gitoxide/commit/9a9fa96fa8a722bddc5c3b2270b0edf8f6615141))
- `just fmt` ([`ffc1276`](https://github.com/Byron/gitoxide/commit/ffc1276e0c991ac33ce842f5dca0b45ac69680c0))
- Prepare changelogs prior to release ([`8f15cec`](https://github.com/Byron/gitoxide/commit/8f15cec1ec7d5a9d56bb158f155011ef2bb3539b))
- Merge branch 'integrate-gix-negotiate' ([`ae845de`](https://github.com/Byron/gitoxide/commit/ae845dea6cee6523c88a23d7a14293589cf8092f))
diff --git a/vendor/gix-pack/Cargo.toml b/vendor/gix-pack/Cargo.toml
index 783b411ce..d6e0fa0b4 100644
--- a/vendor/gix-pack/Cargo.toml
+++ b/vendor/gix-pack/Cargo.toml
@@ -11,9 +11,9 @@
[package]
edition = "2021"
-rust-version = "1.64"
+rust-version = "1.65"
name = "gix-pack"
-version = "0.36.0"
+version = "0.43.0"
authors = ["Sebastian Thiel <sebastian.thiel@icloud.com>"]
include = [
"src/**/*",
@@ -22,7 +22,7 @@ include = [
]
autotests = false
description = "Implements git packs and related data structures"
-license = "MIT/Apache-2.0"
+license = "MIT OR Apache-2.0"
repository = "https://github.com/Byron/gitoxide"
[package.metadata.docs.rs]
@@ -50,13 +50,15 @@ version = "0.2.0"
optional = true
[dependencies.gix-chunk]
-version = "^0.4.2"
+version = "^0.4.4"
[dependencies.gix-diff]
-version = "^0.30.0"
+version = "^0.36.0"
+optional = true
+default-features = false
[dependencies.gix-features]
-version = "^0.30.0"
+version = "^0.35.0"
features = [
"crc32",
"rustsha1",
@@ -65,22 +67,23 @@ features = [
]
[dependencies.gix-hash]
-version = "^0.11.2"
+version = "^0.13.0"
[dependencies.gix-hashtable]
-version = "^0.2.1"
+version = "^0.4.0"
[dependencies.gix-object]
-version = "^0.30.0"
+version = "^0.37.0"
[dependencies.gix-path]
-version = "^0.8.1"
+version = "^0.10.0"
[dependencies.gix-traverse]
-version = "^0.26.0"
+version = "^0.33.0"
+optional = true
[dependencies.memmap2]
-version = "0.5.0"
+version = "0.7.0"
[dependencies.parking_lot]
version = "0.12.0"
@@ -105,6 +108,14 @@ optional = true
[dev-dependencies]
[features]
+default = [
+ "generate",
+ "streaming-input",
+]
+generate = [
+ "dep:gix-traverse",
+ "dep:gix-diff",
+]
object-cache-dynamic = ["dep:clru"]
pack-cache-lru-dynamic = ["dep:clru"]
pack-cache-lru-static = ["dep:uluru"]
@@ -112,8 +123,9 @@ serde = [
"dep:serde",
"gix-object/serde",
]
-wasm = ["gix-diff/wasm"]
+streaming-input = []
+wasm = ["gix-diff?/wasm"]
[target."cfg(not(target_arch = \"wasm32\"))".dependencies.gix-tempfile]
-version = "^6.0.0"
+version = "^10.0.0"
default-features = false
diff --git a/vendor/gix-pack/src/bundle/find.rs b/vendor/gix-pack/src/bundle/find.rs
index 2fc335721..98e28333d 100644
--- a/vendor/gix-pack/src/bundle/find.rs
+++ b/vendor/gix-pack/src/bundle/find.rs
@@ -1,25 +1,30 @@
+use gix_features::zlib;
+
impl crate::Bundle {
- /// Find an object with the given [`ObjectId`][gix_hash::ObjectId] and place its data into `out`.
+ /// Find an object with the given [`ObjectId`](gix_hash::ObjectId) and place its data into `out`.
+ /// `inflate` is used to decompress objects, and will be reset before first use, but not after the last use.
///
- /// [`cache`][crate::cache::DecodeEntry] is used to accelerate the lookup.
+ /// [`cache`](crate::cache::DecodeEntry) is used to accelerate the lookup.
///
/// **Note** that ref deltas are automatically resolved within this pack only, which makes this implementation unusable
/// for thin packs, which by now are expected to be resolved already.
pub fn find<'a>(
&self,
- id: impl AsRef<gix_hash::oid>,
+ id: &gix_hash::oid,
out: &'a mut Vec<u8>,
- cache: &mut impl crate::cache::DecodeEntry,
+ inflate: &mut zlib::Inflate,
+ cache: &mut dyn crate::cache::DecodeEntry,
) -> Result<Option<(gix_object::Data<'a>, crate::data::entry::Location)>, crate::data::decode::Error> {
let idx = match self.index.lookup(id) {
Some(idx) => idx,
None => return Ok(None),
};
- self.get_object_by_index(idx, out, cache).map(Some)
+ self.get_object_by_index(idx, out, inflate, cache).map(Some)
}
/// Special-use function to get an object given an index previously returned from
- /// `internal_find_pack_index`.
+ /// [index::File::](crate::index::File::lookup()).
+ /// `inflate` is used to decompress objects, and will be reset before first use, but not after the last use.
///
/// # Panics
///
@@ -28,7 +33,8 @@ impl crate::Bundle {
&self,
idx: u32,
out: &'a mut Vec<u8>,
- cache: &mut impl crate::cache::DecodeEntry,
+ inflate: &mut zlib::Inflate,
+ cache: &mut dyn crate::cache::DecodeEntry,
) -> Result<(gix_object::Data<'a>, crate::data::entry::Location), crate::data::decode::Error> {
let ofs = self.index.pack_offset_at_index(idx);
let pack_entry = self.pack.entry(ofs);
@@ -37,7 +43,8 @@ impl crate::Bundle {
.decode_entry(
pack_entry,
out,
- |id, _out| {
+ inflate,
+ &|id, _out| {
self.index.lookup(id).map(|idx| {
crate::data::decode::entry::ResolvedBase::InPack(
self.pack.entry(self.index.pack_offset_at_index(idx)),
diff --git a/vendor/gix-pack/src/bundle/init.rs b/vendor/gix-pack/src/bundle/init.rs
index 3ba5257ed..bde90bde1 100644
--- a/vendor/gix-pack/src/bundle/init.rs
+++ b/vendor/gix-pack/src/bundle/init.rs
@@ -29,7 +29,7 @@ impl Bundle {
fn at_inner(path: &Path, object_hash: gix_hash::Kind) -> Result<Self, Error> {
let ext = path
.extension()
- .and_then(|e| e.to_str())
+ .and_then(std::ffi::OsStr::to_str)
.ok_or_else(|| Error::InvalidPath(path.to_owned()))?;
Ok(match ext {
"idx" => Self {
diff --git a/vendor/gix-pack/src/bundle/mod.rs b/vendor/gix-pack/src/bundle/mod.rs
index 076b355d9..d8ef1107d 100644
--- a/vendor/gix-pack/src/bundle/mod.rs
+++ b/vendor/gix-pack/src/bundle/mod.rs
@@ -3,25 +3,23 @@ pub mod init;
mod find;
///
-#[cfg(not(feature = "wasm"))]
+#[cfg(all(not(feature = "wasm"), feature = "streaming-input"))]
pub mod write;
///
pub mod verify {
use std::sync::atomic::AtomicBool;
- use gix_features::progress::Progress;
+ use gix_features::progress::DynNestedProgress;
///
pub mod integrity {
/// Returned by [`Bundle::verify_integrity()`][crate::Bundle::verify_integrity()].
- pub struct Outcome<P> {
+ pub struct Outcome {
/// The computed checksum of the index which matched the stored one.
pub actual_index_checksum: gix_hash::ObjectId,
/// The packs traversal outcome
pub pack_traverse_outcome: crate::index::traverse::Statistics,
- /// The provided progress instance.
- pub progress: P,
}
}
@@ -30,14 +28,13 @@ pub mod verify {
impl Bundle {
/// Similar to [`crate::index::File::verify_integrity()`] but more convenient to call as the presence of the
/// pack file is a given.
- pub fn verify_integrity<C, P, F>(
+ pub fn verify_integrity<C, F>(
&self,
- progress: P,
+ progress: &mut dyn DynNestedProgress,
should_interrupt: &AtomicBool,
options: crate::index::verify::integrity::Options<F>,
- ) -> Result<integrity::Outcome<P>, crate::index::traverse::Error<crate::index::verify::integrity::Error>>
+ ) -> Result<integrity::Outcome, crate::index::traverse::Error<crate::index::verify::integrity::Error>>
where
- P: Progress,
C: crate::cache::DecodeEntry,
F: Fn() -> C + Send + Clone,
{
@@ -53,7 +50,6 @@ pub mod verify {
.map(|o| integrity::Outcome {
actual_index_checksum: o.actual_index_checksum,
pack_traverse_outcome: o.pack_traverse_statistics.expect("pack is set"),
- progress: o.progress,
})
}
}
diff --git a/vendor/gix-pack/src/bundle/write/mod.rs b/vendor/gix-pack/src/bundle/write/mod.rs
index 103a0034b..8983cb92f 100644
--- a/vendor/gix-pack/src/bundle/write/mod.rs
+++ b/vendor/gix-pack/src/bundle/write/mod.rs
@@ -13,6 +13,7 @@ use crate::data;
mod error;
pub use error::Error;
+use gix_features::progress::prodash::DynNestedProgress;
mod types;
use types::{LockWriter, PassThrough};
@@ -63,14 +64,15 @@ impl crate::Bundle {
/// be accounted for.
/// - Empty packs always have the same name and not handling this case will result in at most one superfluous pack.
pub fn write_to_directory(
- pack: impl io::BufRead,
- directory: Option<impl AsRef<Path>>,
- mut progress: impl Progress,
+ pack: &mut dyn io::BufRead,
+ directory: Option<&Path>,
+ progress: &mut dyn DynNestedProgress,
should_interrupt: &AtomicBool,
thin_pack_base_object_lookup_fn: Option<ThinPackLookupFn>,
options: Options,
) -> Result<Outcome, Error> {
- let mut read_progress = progress.add_child_with_id("read pack", ProgressId::ReadPackBytes.into());
+ let _span = gix_features::trace::coarse!("gix_pack::Bundle::write_to_directory()");
+ let mut read_progress = progress.add_child_with_id("read pack".into(), ProgressId::ReadPackBytes.into());
read_progress.init(None, progress::bytes());
let pack = progress::Read {
inner: pack,
@@ -170,20 +172,17 @@ impl crate::Bundle {
/// As it sends portions of the input to a thread it requires the 'static lifetime for the interrupt flags. This can only
/// be satisfied by a static `AtomicBool` which is only suitable for programs that only run one of these operations at a time
/// or don't mind that all of them abort when the flag is set.
- pub fn write_to_directory_eagerly<P>(
- pack: impl io::Read + Send + 'static,
+ pub fn write_to_directory_eagerly(
+ pack: Box<dyn io::Read + Send + 'static>,
pack_size: Option<u64>,
directory: Option<impl AsRef<Path>>,
- mut progress: P,
+ progress: &mut dyn DynNestedProgress,
should_interrupt: &'static AtomicBool,
thin_pack_base_object_lookup_fn: Option<ThinPackLookupFnSend>,
options: Options,
- ) -> Result<Outcome, Error>
- where
- P: Progress,
- P::SubProgress: 'static,
- {
- let mut read_progress = progress.add_child_with_id("read pack", ProgressId::ReadPackBytes.into()); /* Bundle Write Read pack Bytes*/
+ ) -> Result<Outcome, Error> {
+ let _span = gix_features::trace::coarse!("gix_pack::Bundle::write_to_directory_eagerly()");
+ let mut read_progress = progress.add_child_with_id("read pack".into(), ProgressId::ReadPackBytes.into()); /* Bundle Write Read pack Bytes*/
read_progress.init(pack_size.map(|s| s as usize), progress::bytes());
let pack = progress::Read {
inner: pack,
@@ -251,7 +250,7 @@ impl crate::Bundle {
progress,
options,
data_file,
- pack_entries_iter,
+ Box::new(pack_entries_iter),
should_interrupt,
pack_version,
)?;
@@ -266,9 +265,9 @@ impl crate::Bundle {
})
}
- fn inner_write(
+ fn inner_write<'a>(
directory: Option<impl AsRef<Path>>,
- mut progress: impl Progress,
+ progress: &mut dyn DynNestedProgress,
Options {
thread_limit,
iteration_mode: _,
@@ -276,12 +275,12 @@ impl crate::Bundle {
object_hash,
}: Options,
data_file: SharedTempFile,
- pack_entries_iter: impl Iterator<Item = Result<data::input::Entry, data::input::Error>>,
+ mut pack_entries_iter: Box<dyn Iterator<Item = Result<data::input::Entry, data::input::Error>> + 'a>,
should_interrupt: &AtomicBool,
pack_version: data::Version,
) -> Result<WriteOutcome, Error> {
- let indexing_progress = progress.add_child_with_id(
- "create index file",
+ let mut indexing_progress = progress.add_child_with_id(
+ "create index file".into(),
ProgressId::IndexingSteps(Default::default()).into(),
);
Ok(match directory {
@@ -295,14 +294,15 @@ impl crate::Bundle {
let data_file = Arc::clone(&data_file);
move || new_pack_file_resolver(data_file)
},
- pack_entries_iter,
+ &mut pack_entries_iter,
thread_limit,
- indexing_progress,
+ &mut indexing_progress,
&mut index_file,
should_interrupt,
object_hash,
pack_version,
)?;
+ drop(pack_entries_iter);
let data_path = directory.join(format!("pack-{}.pack", outcome.data_hash.to_hex()));
let index_path = data_path.with_extension("idx");
@@ -335,10 +335,10 @@ impl crate::Bundle {
outcome: crate::index::File::write_data_iter_to_stream(
index_kind,
move || new_pack_file_resolver(data_file),
- pack_entries_iter,
+ &mut pack_entries_iter,
thread_limit,
- indexing_progress,
- io::sink(),
+ &mut indexing_progress,
+ &mut io::sink(),
should_interrupt,
object_hash,
pack_version,
diff --git a/vendor/gix-pack/src/cache/delta/from_offsets.rs b/vendor/gix-pack/src/cache/delta/from_offsets.rs
index 065b1ca20..d790dcc0f 100644
--- a/vendor/gix-pack/src/cache/delta/from_offsets.rs
+++ b/vendor/gix-pack/src/cache/delta/from_offsets.rs
@@ -42,11 +42,11 @@ impl<T> Tree<T> {
///
/// Note that the sort order is ascending. The given pack file path must match the provided offsets.
pub fn from_offsets_in_pack(
- pack_path: impl AsRef<std::path::Path>,
+ pack_path: &std::path::Path,
data_sorted_by_offsets: impl Iterator<Item = T>,
- get_pack_offset: impl Fn(&T) -> data::Offset,
- resolve_in_pack_id: impl Fn(&gix_hash::oid) -> Option<data::Offset>,
- mut progress: impl Progress,
+ get_pack_offset: &dyn Fn(&T) -> data::Offset,
+ resolve_in_pack_id: &dyn Fn(&gix_hash::oid) -> Option<data::Offset>,
+ progress: &mut dyn Progress,
should_interrupt: &AtomicBool,
object_hash: gix_hash::Kind,
) -> Result<Self, Error> {
diff --git a/vendor/gix-pack/src/cache/delta/mod.rs b/vendor/gix-pack/src/cache/delta/mod.rs
index f4c1b6fc6..64b392f76 100644
--- a/vendor/gix-pack/src/cache/delta/mod.rs
+++ b/vendor/gix-pack/src/cache/delta/mod.rs
@@ -179,11 +179,11 @@ mod tests {
fn tree(index_path: &str, pack_path: &str) -> Result<(), Box<dyn std::error::Error>> {
let idx = pack::index::File::at(fixture_path(index_path), gix_hash::Kind::Sha1)?;
crate::cache::delta::Tree::from_offsets_in_pack(
- fixture_path(pack_path),
+ &fixture_path(pack_path),
idx.sorted_offsets().into_iter(),
- |ofs| *ofs,
- |id| idx.lookup(id).map(|index| idx.pack_offset_at_index(index)),
- gix_features::progress::Discard,
+ &|ofs| *ofs,
+ &|id| idx.lookup(id).map(|index| idx.pack_offset_at_index(index)),
+ &mut gix_features::progress::Discard,
&AtomicBool::new(false),
gix_hash::Kind::Sha1,
)?;
diff --git a/vendor/gix-pack/src/cache/delta/traverse/mod.rs b/vendor/gix-pack/src/cache/delta/traverse/mod.rs
index e933af838..0598cf92e 100644
--- a/vendor/gix-pack/src/cache/delta/traverse/mod.rs
+++ b/vendor/gix-pack/src/cache/delta/traverse/mod.rs
@@ -1,5 +1,6 @@
use std::sync::atomic::{AtomicBool, Ordering};
+use gix_features::progress::DynNestedProgress;
use gix_features::{
parallel::in_parallel_with_slice,
progress::{self, Progress},
@@ -55,11 +56,11 @@ pub struct Context<'a> {
}
/// Options for [`Tree::traverse()`].
-pub struct Options<'a, P1, P2> {
+pub struct Options<'a, 's> {
/// is a progress instance to track progress for each object in the traversal.
- pub object_progress: P1,
+ pub object_progress: Box<dyn DynNestedProgress>,
/// is a progress instance to track the overall progress.
- pub size_progress: P2,
+ pub size_progress: &'s mut dyn Progress,
/// If `Some`, only use the given amount of threads. Otherwise, the amount of threads to use will be selected based on
/// the amount of available logical cores.
pub thread_limit: Option<usize>,
@@ -99,7 +100,7 @@ where
/// This method returns a vector of all tree items, along with their potentially modified custom node data.
///
/// _Note_ that this method consumed the Tree to assure safe parallel traversal with mutation support.
- pub fn traverse<F, P1, P2, MBFN, E, R>(
+ pub fn traverse<F, MBFN, E, R>(
mut self,
resolve: F,
resolve_data: &R,
@@ -108,17 +109,15 @@ where
Options {
thread_limit,
mut object_progress,
- mut size_progress,
+ size_progress,
should_interrupt,
object_hash,
- }: Options<'_, P1, P2>,
+ }: Options<'_, '_>,
) -> Result<Outcome<T>, Error>
where
F: for<'r> Fn(EntryRange, &'r R) -> Option<&'r [u8]> + Send + Clone,
R: Send + Sync,
- P1: Progress,
- P2: Progress,
- MBFN: FnMut(&mut T, &<P1 as Progress>::SubProgress, Context<'_>) -> Result<(), E> + Send + Clone,
+ MBFN: FnMut(&mut T, &dyn Progress, Context<'_>) -> Result<(), E> + Send + Clone,
E: std::error::Error + Send + Sync + 'static,
{
self.set_pack_entries_end_and_resolve_ref_offsets(pack_entries_end)?;
@@ -150,7 +149,9 @@ where
resolve::State {
delta_bytes: Vec::<u8>::with_capacity(4096),
fully_resolved_delta_bytes: Vec::<u8>::with_capacity(4096),
- progress: threading::lock(&object_progress).add_child(format!("thread {thread_index}")),
+ progress: Box::new(
+ threading::lock(&object_progress).add_child(format!("thread {thread_index}")),
+ ),
resolve: resolve.clone(),
modify_base: inspect_object.clone(),
child_items: child_items.clone(),
diff --git a/vendor/gix-pack/src/cache/delta/traverse/resolve.rs b/vendor/gix-pack/src/cache/delta/traverse/resolve.rs
index 0a4d29191..daf6f273e 100644
--- a/vendor/gix-pack/src/cache/delta/traverse/resolve.rs
+++ b/vendor/gix-pack/src/cache/delta/traverse/resolve.rs
@@ -17,19 +17,19 @@ use crate::{
data::EntryRange,
};
-pub(crate) struct State<P, F, MBFN, T: Send> {
+pub(crate) struct State<F, MBFN, T: Send> {
pub delta_bytes: Vec<u8>,
pub fully_resolved_delta_bytes: Vec<u8>,
- pub progress: P,
+ pub progress: Box<dyn Progress>,
pub resolve: F,
pub modify_base: MBFN,
pub child_items: ItemSliceSend<Item<T>>,
}
#[allow(clippy::too_many_arguments)]
-pub(crate) fn deltas<T, F, MBFN, E, R, P>(
- object_counter: Option<gix_features::progress::StepShared>,
- size_counter: Option<gix_features::progress::StepShared>,
+pub(crate) fn deltas<T, F, MBFN, E, R>(
+ objects: gix_features::progress::StepShared,
+ size: gix_features::progress::StepShared,
node: &mut Item<T>,
State {
delta_bytes,
@@ -38,7 +38,7 @@ pub(crate) fn deltas<T, F, MBFN, E, R, P>(
resolve,
modify_base,
child_items,
- }: &mut State<P, F, MBFN, T>,
+ }: &mut State<F, MBFN, T>,
resolve_data: &R,
hash_len: usize,
threads_left: &AtomicIsize,
@@ -47,20 +47,20 @@ pub(crate) fn deltas<T, F, MBFN, E, R, P>(
where
T: Send,
R: Send + Sync,
- P: Progress,
F: for<'r> Fn(EntryRange, &'r R) -> Option<&'r [u8]> + Send + Clone,
- MBFN: FnMut(&mut T, &P, Context<'_>) -> Result<(), E> + Send + Clone,
+ MBFN: FnMut(&mut T, &dyn Progress, Context<'_>) -> Result<(), E> + Send + Clone,
E: std::error::Error + Send + Sync + 'static,
{
let mut decompressed_bytes_by_pack_offset = BTreeMap::new();
- let decompress_from_resolver = |slice: EntryRange, out: &mut Vec<u8>| -> Result<(data::Entry, u64), Error> {
+ let mut inflate = zlib::Inflate::default();
+ let mut decompress_from_resolver = |slice: EntryRange, out: &mut Vec<u8>| -> Result<(data::Entry, u64), Error> {
let bytes = resolve(slice.clone(), resolve_data).ok_or(Error::ResolveFailed {
pack_offset: slice.start,
})?;
let entry = data::Entry::from_bytes(bytes, slice.start, hash_len);
let compressed = &bytes[entry.header_size()..];
let decompressed_len = entry.decompressed_size as usize;
- decompress_all_at_once_with(compressed, decompressed_len, out)?;
+ decompress_all_at_once_with(&mut inflate, compressed, decompressed_len, out)?;
Ok((entry, slice.end))
};
@@ -103,10 +103,8 @@ where
},
)
.map_err(|err| Box::new(err) as Box<dyn std::error::Error + Send + Sync>)?;
- object_counter.as_ref().map(|c| c.fetch_add(1, Ordering::SeqCst));
- size_counter
- .as_ref()
- .map(|c| c.fetch_add(base_bytes.len(), Ordering::SeqCst));
+ objects.fetch_add(1, Ordering::Relaxed);
+ size.fetch_add(base_bytes.len(), Ordering::Relaxed);
}
for mut child in base.into_child_iter() {
@@ -121,7 +119,7 @@ where
let (result_size, consumed) = data::delta::decode_header_size(&delta_bytes[consumed..]);
header_ofs += consumed;
- set_len(fully_resolved_delta_bytes, result_size as usize);
+ fully_resolved_delta_bytes.resize(result_size as usize, 0);
data::delta::apply(&base_bytes, fully_resolved_delta_bytes, &delta_bytes[header_ofs..]);
// FIXME: this actually invalidates the "pack_offset()" computation, which is not obvious to consumers
@@ -136,7 +134,7 @@ where
} else {
modify_base(
child.data(),
- progress,
+ &progress,
Context {
entry: &child_entry,
entry_end,
@@ -145,10 +143,8 @@ where
},
)
.map_err(|err| Box::new(err) as Box<dyn std::error::Error + Send + Sync>)?;
- object_counter.as_ref().map(|c| c.fetch_add(1, Ordering::SeqCst));
- size_counter
- .as_ref()
- .map(|c| c.fetch_add(base_bytes.len(), Ordering::SeqCst));
+ objects.fetch_add(1, Ordering::Relaxed);
+ size.fetch_add(base_bytes.len(), Ordering::Relaxed);
}
}
@@ -168,9 +164,9 @@ where
return deltas_mt(
initial_threads,
decompressed_bytes_by_pack_offset,
- object_counter,
- size_counter,
- progress,
+ objects,
+ size,
+ &progress,
nodes,
resolve.clone(),
resolve_data,
@@ -190,12 +186,12 @@ where
/// system. Since this thread will take a controlling function, we may spawn one more than that. In threaded mode, we will finish
/// all remaining work.
#[allow(clippy::too_many_arguments)]
-pub(crate) fn deltas_mt<T, F, MBFN, E, R, P>(
+pub(crate) fn deltas_mt<T, F, MBFN, E, R>(
mut threads_to_create: isize,
decompressed_bytes_by_pack_offset: BTreeMap<u64, (data::Entry, u64, Vec<u8>)>,
- object_counter: Option<gix_features::progress::StepShared>,
- size_counter: Option<gix_features::progress::StepShared>,
- progress: &P,
+ objects: gix_features::progress::StepShared,
+ size: gix_features::progress::StepShared,
+ progress: &dyn Progress,
nodes: Vec<(u16, Node<'_, T>)>,
resolve: F,
resolve_data: &R,
@@ -207,9 +203,8 @@ pub(crate) fn deltas_mt<T, F, MBFN, E, R, P>(
where
T: Send,
R: Send + Sync,
- P: Progress,
F: for<'r> Fn(EntryRange, &'r R) -> Option<&'r [u8]> + Send + Clone,
- MBFN: FnMut(&mut T, &P, Context<'_>) -> Result<(), E> + Send + Clone,
+ MBFN: FnMut(&mut T, &dyn Progress, Context<'_>) -> Result<(), E> + Send + Clone,
E: std::error::Error + Send + Sync + 'static,
{
let nodes = gix_features::threading::Mutable::new(nodes);
@@ -229,13 +224,14 @@ where
let decompressed_bytes_by_pack_offset = &decompressed_bytes_by_pack_offset;
let resolve = resolve.clone();
let mut modify_base = modify_base.clone();
- let object_counter = object_counter.as_ref();
- let size_counter = size_counter.as_ref();
+ let objects = &objects;
+ let size = &size;
move || -> Result<(), Error> {
let mut fully_resolved_delta_bytes = Vec::new();
let mut delta_bytes = Vec::new();
- let decompress_from_resolver =
+ let mut inflate = zlib::Inflate::default();
+ let mut decompress_from_resolver =
|slice: EntryRange, out: &mut Vec<u8>| -> Result<(data::Entry, u64), Error> {
let bytes = resolve(slice.clone(), resolve_data).ok_or(Error::ResolveFailed {
pack_offset: slice.start,
@@ -243,7 +239,7 @@ where
let entry = data::Entry::from_bytes(bytes, slice.start, hash_len);
let compressed = &bytes[entry.header_size()..];
let decompressed_len = entry.decompressed_size as usize;
- decompress_all_at_once_with(compressed, decompressed_len, out)?;
+ decompress_all_at_once_with(&mut inflate, compressed, decompressed_len, out)?;
Ok((entry, slice.end))
};
@@ -280,10 +276,8 @@ where
},
)
.map_err(|err| Box::new(err) as Box<dyn std::error::Error + Send + Sync>)?;
- object_counter.as_ref().map(|c| c.fetch_add(1, Ordering::SeqCst));
- size_counter
- .as_ref()
- .map(|c| c.fetch_add(base_bytes.len(), Ordering::SeqCst));
+ objects.fetch_add(1, Ordering::Relaxed);
+ size.fetch_add(base_bytes.len(), Ordering::Relaxed);
}
for mut child in base.into_child_iter() {
@@ -328,10 +322,8 @@ where
},
)
.map_err(|err| Box::new(err) as Box<dyn std::error::Error + Send + Sync>)?;
- object_counter.as_ref().map(|c| c.fetch_add(1, Ordering::SeqCst));
- size_counter
- .as_ref()
- .map(|c| c.fetch_add(base_bytes.len(), Ordering::SeqCst));
+ objects.fetch_add(1, Ordering::Relaxed);
+ size.fetch_add(base_bytes.len(), Ordering::Relaxed);
}
}
}
@@ -357,6 +349,9 @@ where
// but may instead find a good way to set the polling interval instead of hard-coding it.
std::thread::sleep(poll_interval);
// Get out of threads are already starving or they would be starving soon as no work is left.
+ //
+ // Lint: ScopedJoinHandle is not the same depending on active features and is not exposed in some cases.
+ #[allow(clippy::redundant_closure_for_method_calls)]
if threads.iter().any(|t| t.is_finished()) {
let mut running_threads = Vec::new();
for thread in threads.drain(..) {
@@ -389,35 +384,17 @@ where
})
}
-fn set_len(v: &mut Vec<u8>, new_len: usize) {
- if new_len > v.len() {
- v.reserve_exact(new_len.saturating_sub(v.capacity()) + (v.capacity() - v.len()));
- // SAFETY:
- // 1. we have reserved enough capacity to fit `new_len`
- // 2. the caller is trusted to write into `v` to completely fill `new_len`.
- #[allow(unsafe_code, clippy::uninit_vec)]
- unsafe {
- v.set_len(new_len);
- }
- } else {
- v.truncate(new_len)
- }
-}
-
-fn decompress_all_at_once_with(b: &[u8], decompressed_len: usize, out: &mut Vec<u8>) -> Result<(), Error> {
- set_len(out, decompressed_len);
- use std::cell::RefCell;
- thread_local! {
- pub static INFLATE: RefCell<zlib::Inflate> = RefCell::new(zlib::Inflate::default());
- }
-
- INFLATE.with(|inflate| {
- let mut inflate = inflate.borrow_mut();
- inflate.reset();
- inflate.once(b, out).map_err(|err| Error::ZlibInflate {
- source: err,
- message: "Failed to decompress entry",
- })
+fn decompress_all_at_once_with(
+ inflate: &mut zlib::Inflate,
+ b: &[u8],
+ decompressed_len: usize,
+ out: &mut Vec<u8>,
+) -> Result<(), Error> {
+ out.resize(decompressed_len, 0);
+ inflate.reset();
+ inflate.once(b, out).map_err(|err| Error::ZlibInflate {
+ source: err,
+ message: "Failed to decompress entry",
})?;
Ok(())
}
diff --git a/vendor/gix-pack/src/cache/object.rs b/vendor/gix-pack/src/cache/object.rs
index 26896bf89..af1f7d0a4 100644
--- a/vendor/gix-pack/src/cache/object.rs
+++ b/vendor/gix-pack/src/cache/object.rs
@@ -43,7 +43,7 @@ mod memory {
MemoryCappedHashmap {
inner: clru::CLruCache::with_config(
clru::CLruCacheConfig::new(NonZeroUsize::new(memory_cap_in_bytes).expect("non zero"))
- .with_hasher(gix_hashtable::hash::Builder::default())
+ .with_hasher(gix_hashtable::hash::Builder)
.with_scale(CustomScale),
),
free_list: Vec::new(),
diff --git a/vendor/gix-pack/src/data/entry/decode.rs b/vendor/gix-pack/src/data/entry/decode.rs
index 79d7aecff..b81320319 100644
--- a/vendor/gix-pack/src/data/entry/decode.rs
+++ b/vendor/gix-pack/src/data/entry/decode.rs
@@ -47,16 +47,16 @@ impl data::Entry {
/// Instantiate an `Entry` from the reader `r`, providing the `pack_offset` to allow tracking the start of the entry data section.
pub fn from_read(
- mut r: impl io::Read,
+ r: &mut dyn io::Read,
pack_offset: data::Offset,
hash_len: usize,
) -> Result<data::Entry, io::Error> {
- let (type_id, size, mut consumed) = streaming_parse_header_info(&mut r)?;
+ let (type_id, size, mut consumed) = streaming_parse_header_info(r)?;
use crate::data::entry::Header::*;
let object = match type_id {
OFS_DELTA => {
- let (distance, leb_bytes) = leb64_from_read(&mut r)?;
+ let (distance, leb_bytes) = leb64_from_read(r)?;
let delta = OfsDelta {
base_distance: distance,
};
@@ -89,7 +89,7 @@ impl data::Entry {
}
#[inline]
-fn streaming_parse_header_info(mut read: impl io::Read) -> Result<(u8, u64, usize), io::Error> {
+fn streaming_parse_header_info(read: &mut dyn io::Read) -> Result<(u8, u64, usize), io::Error> {
let mut byte = [0u8; 1];
read.read_exact(&mut byte)?;
let mut c = byte[0];
diff --git a/vendor/gix-pack/src/data/entry/header.rs b/vendor/gix-pack/src/data/entry/header.rs
index 4d0dbf4d2..358bd743c 100644
--- a/vendor/gix-pack/src/data/entry/header.rs
+++ b/vendor/gix-pack/src/data/entry/header.rs
@@ -83,7 +83,7 @@ impl Header {
///
/// Returns the amount of bytes written to `out`.
/// `decompressed_size_in_bytes` is the full size in bytes of the object that this header represents
- pub fn write_to(&self, decompressed_size_in_bytes: u64, mut out: impl io::Write) -> io::Result<usize> {
+ pub fn write_to(&self, decompressed_size_in_bytes: u64, out: &mut dyn io::Write) -> io::Result<usize> {
let mut size = decompressed_size_in_bytes;
let mut written = 1;
let mut c: u8 = (self.as_type_id() << 4) | (size as u8 & 0b0000_1111);
@@ -115,7 +115,7 @@ impl Header {
/// The size of the header in bytes when serialized
pub fn size(&self, decompressed_size: u64) -> usize {
- self.write_to(decompressed_size, io::sink())
+ self.write_to(decompressed_size, &mut io::sink())
.expect("io::sink() to never fail")
}
}
diff --git a/vendor/gix-pack/src/data/file/decode/entry.rs b/vendor/gix-pack/src/data/file/decode/entry.rs
index f82e33a7b..d5dd121f8 100644
--- a/vendor/gix-pack/src/data/file/decode/entry.rs
+++ b/vendor/gix-pack/src/data/file/decode/entry.rs
@@ -75,6 +75,7 @@ impl Outcome {
/// Decompression of objects
impl File {
/// Decompress the given `entry` into `out` and return the amount of bytes read from the pack data.
+ /// Note that `inflate` is not reset after usage, but will be reset before using it.
///
/// _Note_ that this method does not resolve deltified objects, but merely decompresses their content
/// `out` is expected to be large enough to hold `entry.size` bytes.
@@ -82,7 +83,12 @@ impl File {
/// # Panics
///
/// If `out` isn't large enough to hold the decompressed `entry`
- pub fn decompress_entry(&self, entry: &data::Entry, out: &mut [u8]) -> Result<usize, Error> {
+ pub fn decompress_entry(
+ &self,
+ entry: &data::Entry,
+ inflate: &mut zlib::Inflate,
+ out: &mut [u8],
+ ) -> Result<usize, Error> {
assert!(
out.len() as u64 >= entry.decompressed_size,
"output buffer isn't large enough to hold decompressed result, want {}, have {}",
@@ -90,7 +96,7 @@ impl File {
out.len()
);
- self.decompress_entry_from_data_offset(entry.data_offset, out)
+ self.decompress_entry_from_data_offset(entry.data_offset, inflate, out)
.map_err(Into::into)
}
@@ -121,53 +127,39 @@ impl File {
pub(crate) fn decompress_entry_from_data_offset(
&self,
data_offset: data::Offset,
+ inflate: &mut zlib::Inflate,
out: &mut [u8],
) -> Result<usize, zlib::inflate::Error> {
let offset: usize = data_offset.try_into().expect("offset representable by machine");
assert!(offset < self.data.len(), "entry offset out of bounds");
- use std::cell::RefCell;
- thread_local! {
- pub static INFLATE: RefCell<zlib::Inflate> = RefCell::new(zlib::Inflate::default());
- }
- INFLATE.with(|inflate| {
- let mut inflate = inflate.borrow_mut();
- let res = inflate
- .once(&self.data[offset..], out)
- .map(|(_status, consumed_in, _consumed_out)| consumed_in);
- inflate.reset();
- res
- })
+ inflate.reset();
+ inflate
+ .once(&self.data[offset..], out)
+ .map(|(_status, consumed_in, _consumed_out)| consumed_in)
}
/// Like `decompress_entry_from_data_offset`, but returns consumed input and output.
pub(crate) fn decompress_entry_from_data_offset_2(
&self,
data_offset: data::Offset,
+ inflate: &mut zlib::Inflate,
out: &mut [u8],
) -> Result<(usize, usize), zlib::inflate::Error> {
let offset: usize = data_offset.try_into().expect("offset representable by machine");
assert!(offset < self.data.len(), "entry offset out of bounds");
- use std::cell::RefCell;
- thread_local! {
- pub static INFLATE: RefCell<zlib::Inflate> = RefCell::new(zlib::Inflate::default());
- }
-
- INFLATE.with(|inflate| {
- let mut inflate = inflate.borrow_mut();
- let res = inflate
- .once(&self.data[offset..], out)
- .map(|(_status, consumed_in, consumed_out)| (consumed_in, consumed_out));
- inflate.reset();
- res
- })
+ inflate.reset();
+ inflate
+ .once(&self.data[offset..], out)
+ .map(|(_status, consumed_in, consumed_out)| (consumed_in, consumed_out))
}
/// Decode an entry, resolving delta's as needed, while growing the `out` vector if there is not enough
/// space to hold the result object.
///
/// The `entry` determines which object to decode, and is commonly obtained with the help of a pack index file or through pack iteration.
+ /// `inflate` will be used for decompressing entries, and will not be reset after usage, but before first using it.
///
/// `resolve` is a function to lookup objects with the given [`ObjectId`][gix_hash::ObjectId], in case the full object id is used to refer to
/// a base object, instead of an in-pack offset.
@@ -178,8 +170,9 @@ impl File {
&self,
entry: data::Entry,
out: &mut Vec<u8>,
- resolve: impl Fn(&gix_hash::oid, &mut Vec<u8>) -> Option<ResolvedBase>,
- delta_cache: &mut impl cache::DecodeEntry,
+ inflate: &mut zlib::Inflate,
+ resolve: &dyn Fn(&gix_hash::oid, &mut Vec<u8>) -> Option<ResolvedBase>,
+ delta_cache: &mut dyn cache::DecodeEntry,
) -> Result<Outcome, Error> {
use crate::data::entry::Header::*;
match entry.header {
@@ -191,15 +184,16 @@ impl File {
.expect("size representable by machine"),
0,
);
- self.decompress_entry(&entry, out.as_mut_slice()).map(|consumed_input| {
- Outcome::from_object_entry(
- entry.header.as_kind().expect("a non-delta entry"),
- &entry,
- consumed_input,
- )
- })
+ self.decompress_entry(&entry, inflate, out.as_mut_slice())
+ .map(|consumed_input| {
+ Outcome::from_object_entry(
+ entry.header.as_kind().expect("a non-delta entry"),
+ &entry,
+ consumed_input,
+ )
+ })
}
- OfsDelta { .. } | RefDelta { .. } => self.resolve_deltas(entry, resolve, out, delta_cache),
+ OfsDelta { .. } | RefDelta { .. } => self.resolve_deltas(entry, resolve, inflate, out, delta_cache),
}
}
@@ -209,9 +203,10 @@ impl File {
fn resolve_deltas(
&self,
last: data::Entry,
- resolve: impl Fn(&gix_hash::oid, &mut Vec<u8>) -> Option<ResolvedBase>,
+ resolve: &dyn Fn(&gix_hash::oid, &mut Vec<u8>) -> Option<ResolvedBase>,
+ inflate: &mut zlib::Inflate,
out: &mut Vec<u8>,
- cache: &mut impl cache::DecodeEntry,
+ cache: &mut dyn cache::DecodeEntry,
) -> Result<Outcome, Error> {
// all deltas, from the one that produces the desired object (first) to the oldest at the end of the chain
let mut chain = SmallVec::<[Delta; 10]>::default();
@@ -297,6 +292,7 @@ impl File {
for (delta_idx, delta) in chain.iter_mut().rev().enumerate() {
let consumed_from_data_offset = self.decompress_entry_from_data_offset(
delta.data_offset,
+ inflate,
&mut instructions[..delta.decompressed_size],
)?;
let is_last_delta_to_be_applied = delta_idx + 1 == chain_len;
@@ -357,7 +353,7 @@ impl File {
let base_entry = cursor;
debug_assert!(!base_entry.header.is_delta());
object_kind = base_entry.header.as_kind();
- self.decompress_entry_from_data_offset(base_entry.data_offset, out)?;
+ self.decompress_entry_from_data_offset(base_entry.data_offset, inflate, out)?;
}
(first_buffer_size, second_buffer_end)
diff --git a/vendor/gix-pack/src/data/file/decode/header.rs b/vendor/gix-pack/src/data/file/decode/header.rs
index 0afd6e52a..3a6e40f8a 100644
--- a/vendor/gix-pack/src/data/file/decode/header.rs
+++ b/vendor/gix-pack/src/data/file/decode/header.rs
@@ -2,6 +2,7 @@ use crate::{
data,
data::{delta, file::decode::Error, File},
};
+use gix_features::zlib;
/// A return value of a resolve function, which given an [`ObjectId`][gix_hash::ObjectId] determines where an object can be found.
#[derive(Debug, PartialEq, Eq, Hash, Ord, PartialOrd, Clone)]
@@ -37,13 +38,15 @@ impl File {
/// Resolve the object header information starting at `entry`, following the chain of entries as needed.
///
/// The `entry` determines which object to decode, and is commonly obtained with the help of a pack index file or through pack iteration.
+ /// `inflate` will be used for (partially) decompressing entries, and will be reset before first use, but not after the last use.
///
/// `resolve` is a function to lookup objects with the given [`ObjectId`][gix_hash::ObjectId], in case the full object id
/// is used to refer to a base object, instead of an in-pack offset.
pub fn decode_header(
&self,
mut entry: data::Entry,
- resolve: impl Fn(&gix_hash::oid) -> Option<ResolvedBase>,
+ inflate: &mut zlib::Inflate,
+ resolve: &dyn Fn(&gix_hash::oid) -> Option<ResolvedBase>,
) -> Result<Outcome, Error> {
use crate::data::entry::Header::*;
let mut num_deltas = 0;
@@ -60,14 +63,14 @@ impl File {
OfsDelta { base_distance } => {
num_deltas += 1;
if first_delta_decompressed_size.is_none() {
- first_delta_decompressed_size = Some(self.decode_delta_object_size(&entry)?);
+ first_delta_decompressed_size = Some(self.decode_delta_object_size(inflate, &entry)?);
}
entry = self.entry(entry.base_pack_offset(base_distance))
}
RefDelta { base_id } => {
num_deltas += 1;
if first_delta_decompressed_size.is_none() {
- first_delta_decompressed_size = Some(self.decode_delta_object_size(&entry)?);
+ first_delta_decompressed_size = Some(self.decode_delta_object_size(inflate, &entry)?);
}
match resolve(base_id.as_ref()) {
Some(ResolvedBase::InPack(base_entry)) => entry = base_entry,
@@ -89,9 +92,11 @@ impl File {
}
#[inline]
- fn decode_delta_object_size(&self, entry: &data::Entry) -> Result<u64, Error> {
+ fn decode_delta_object_size(&self, inflate: &mut zlib::Inflate, entry: &data::Entry) -> Result<u64, Error> {
let mut buf = [0_u8; 32];
- let used = self.decompress_entry_from_data_offset_2(entry.data_offset, &mut buf)?.1;
+ let used = self
+ .decompress_entry_from_data_offset_2(entry.data_offset, inflate, &mut buf)?
+ .1;
let buf = &buf[..used];
let (_base_size, offset) = delta::decode_header_size(buf);
let (result_size, _offset) = delta::decode_header_size(&buf[offset..]);
diff --git a/vendor/gix-pack/src/data/file/verify.rs b/vendor/gix-pack/src/data/file/verify.rs
index afec20826..11cec041d 100644
--- a/vendor/gix-pack/src/data/file/verify.rs
+++ b/vendor/gix-pack/src/data/file/verify.rs
@@ -1,6 +1,5 @@
-use std::sync::atomic::AtomicBool;
-
use gix_features::progress::Progress;
+use std::sync::atomic::AtomicBool;
use crate::data::File;
@@ -27,7 +26,7 @@ impl File {
/// even more thorough integrity check.
pub fn verify_checksum(
&self,
- progress: impl Progress,
+ progress: &mut dyn Progress,
should_interrupt: &AtomicBool,
) -> Result<gix_hash::ObjectId, checksum::Error> {
crate::verify::checksum_on_disk_or_mmap(
diff --git a/vendor/gix-pack/src/data/input/bytes_to_entries.rs b/vendor/gix-pack/src/data/input/bytes_to_entries.rs
index 995c8df2c..7450e9134 100644
--- a/vendor/gix-pack/src/data/input/bytes_to_entries.rs
+++ b/vendor/gix-pack/src/data/input/bytes_to_entries.rs
@@ -1,10 +1,6 @@
use std::{fs, io};
-use gix_features::{
- hash,
- hash::Sha1,
- zlib::{stream::inflate::ReadBoxed, Decompress},
-};
+use gix_features::{hash::Sha1, zlib::Decompress};
use gix_hash::ObjectId;
use crate::data::input;
@@ -14,7 +10,7 @@ use crate::data::input;
/// The iterator used as part of [`Bundle::write_to_directory(…)`][crate::Bundle::write_to_directory()].
pub struct BytesToEntriesIter<BR> {
read: BR,
- decompressor: Option<Box<Decompress>>,
+ decompressor: Decompress,
offset: u64,
had_error: bool,
version: crate::data::Version,
@@ -66,7 +62,7 @@ where
);
Ok(BytesToEntriesIter {
read,
- decompressor: None,
+ decompressor: Decompress::new(true),
compressed,
offset: 12,
had_error: false,
@@ -88,31 +84,25 @@ where
self.objects_left -= 1; // even an error counts as objects
// Read header
- let entry = match self.hash.take() {
+ let entry = match self.hash.as_mut() {
Some(hash) => {
let mut read = read_and_pass_to(
&mut self.read,
- hash::Write {
+ HashWrite {
inner: io::sink(),
hash,
},
);
- let res = crate::data::Entry::from_read(&mut read, self.offset, self.hash_len);
- self.hash = Some(read.write.hash);
- res
+ crate::data::Entry::from_read(&mut read, self.offset, self.hash_len)
}
None => crate::data::Entry::from_read(&mut self.read, self.offset, self.hash_len),
}
.map_err(input::Error::from)?;
// Decompress object to learn its compressed bytes
- let mut decompressor = self
- .decompressor
- .take()
- .unwrap_or_else(|| Box::new(Decompress::new(true)));
let compressed_buf = self.compressed_buf.take().unwrap_or_else(|| Vec::with_capacity(4096));
- decompressor.reset(true);
- let mut decompressed_reader = ReadBoxed {
+ self.decompressor.reset(true);
+ let mut decompressed_reader = DecompressRead {
inner: read_and_pass_to(
&mut self.read,
if self.compressed.keep() {
@@ -121,7 +111,7 @@ where
compressed_buf
},
),
- decompressor,
+ decompressor: &mut self.decompressor,
};
let bytes_copied = io::copy(&mut decompressed_reader, &mut io::sink())?;
@@ -135,7 +125,6 @@ where
let pack_offset = self.offset;
let compressed_size = decompressed_reader.decompressor.total_in();
self.offset += entry.header_size() as u64 + compressed_size;
- self.decompressor = Some(decompressed_reader.decompressor);
let mut compressed = decompressed_reader.inner.write;
debug_assert_eq!(
@@ -149,7 +138,7 @@ where
let crc32 = if self.compressed.crc32() {
let mut header_buf = [0u8; 12 + gix_hash::Kind::longest().len_in_bytes()];
- let header_len = entry.header.write_to(bytes_copied, header_buf.as_mut())?;
+ let header_len = entry.header.write_to(bytes_copied, &mut header_buf.as_mut())?;
let state = gix_features::hash::crc32_update(0, &header_buf[..header_len]);
Some(gix_features::hash::crc32_update(state, &compressed))
} else {
@@ -293,3 +282,43 @@ impl crate::data::File {
)
}
}
+
+/// The boxed variant is faster for what we do (moving the decompressor in and out a lot)
+pub struct DecompressRead<'a, R> {
+ /// The reader from which bytes should be decompressed.
+ pub inner: R,
+ /// The decompressor doing all the work.
+ pub decompressor: &'a mut Decompress,
+}
+
+impl<'a, R> io::Read for DecompressRead<'a, R>
+where
+ R: io::BufRead,
+{
+ fn read(&mut self, into: &mut [u8]) -> io::Result<usize> {
+ gix_features::zlib::stream::inflate::read(&mut self.inner, self.decompressor, into)
+ }
+}
+
+/// A utility to automatically generate a hash while writing into an inner writer.
+pub struct HashWrite<'a, T> {
+ /// The hash implementation.
+ pub hash: &'a mut Sha1,
+ /// The inner writer.
+ pub inner: T,
+}
+
+impl<'a, T> std::io::Write for HashWrite<'a, T>
+where
+ T: std::io::Write,
+{
+ fn write(&mut self, buf: &[u8]) -> std::io::Result<usize> {
+ let written = self.inner.write(buf)?;
+ self.hash.update(&buf[..written]);
+ Ok(written)
+ }
+
+ fn flush(&mut self) -> std::io::Result<()> {
+ self.inner.flush()
+ }
+}
diff --git a/vendor/gix-pack/src/data/input/entries_to_bytes.rs b/vendor/gix-pack/src/data/input/entries_to_bytes.rs
index a8c21e653..27cd04648 100644
--- a/vendor/gix-pack/src/data/input/entries_to_bytes.rs
+++ b/vendor/gix-pack/src/data/input/entries_to_bytes.rs
@@ -73,12 +73,11 @@ where
}
self.num_entries += 1;
entry.header.write_to(entry.decompressed_size, &mut self.output)?;
- std::io::copy(
- &mut entry
+ self.output.write_all(
+ entry
.compressed
.as_deref()
.expect("caller must configure generator to keep compressed bytes"),
- &mut self.output,
)?;
Ok(entry)
}
diff --git a/vendor/gix-pack/src/data/input/entry.rs b/vendor/gix-pack/src/data/input/entry.rs
index 74d4800a0..7d3d9b3cb 100644
--- a/vendor/gix-pack/src/data/input/entry.rs
+++ b/vendor/gix-pack/src/data/input/entry.rs
@@ -33,7 +33,7 @@ impl input::Entry {
let mut header_buf = [0u8; 12 + gix_hash::Kind::longest().len_in_bytes()];
let header_len = self
.header
- .write_to(self.decompressed_size, header_buf.as_mut())
+ .write_to(self.decompressed_size, &mut header_buf.as_mut())
.expect("write to memory will not fail");
let state = gix_features::hash::crc32_update(0, &header_buf[..header_len]);
gix_features::hash::crc32_update(state, self.compressed.as_ref().expect("we always set it"))
diff --git a/vendor/gix-pack/src/data/input/lookup_ref_delta_objects.rs b/vendor/gix-pack/src/data/input/lookup_ref_delta_objects.rs
index d95e6176d..60f686d3a 100644
--- a/vendor/gix-pack/src/data/input/lookup_ref_delta_objects.rs
+++ b/vendor/gix-pack/src/data/input/lookup_ref_delta_objects.rs
@@ -47,13 +47,7 @@ where
/// positive `size_change` values mean an object grew or was more commonly, was inserted. Negative values
/// mean the object shrunk, usually because there header changed from ref-deltas to ofs deltas.
- fn track_change(
- &mut self,
- shifted_pack_offset: u64,
- pack_offset: u64,
- size_change: i64,
- oid: impl Into<Option<ObjectId>>,
- ) {
+ fn track_change(&mut self, shifted_pack_offset: u64, pack_offset: u64, size_change: i64, oid: Option<ObjectId>) {
if size_change == 0 {
return;
}
@@ -61,7 +55,7 @@ where
shifted_pack_offset,
pack_offset,
size_change_in_bytes: size_change,
- oid: oid.into().unwrap_or_else(||
+ oid: oid.unwrap_or_else(||
// NOTE: this value acts as sentinel and the actual hash kind doesn't matter.
gix_hash::Kind::Sha1.null()),
});
@@ -112,7 +106,7 @@ where
entry.pack_offset,
current_pack_offset,
entry.bytes_in_pack() as i64,
- base_id,
+ Some(base_id),
);
entry
}
diff --git a/vendor/gix-pack/src/data/mod.rs b/vendor/gix-pack/src/data/mod.rs
index 36e01d779..9808ae853 100644
--- a/vendor/gix-pack/src/data/mod.rs
+++ b/vendor/gix-pack/src/data/mod.rs
@@ -37,9 +37,11 @@ pub mod init {
pub mod entry;
///
+#[cfg(feature = "streaming-input")]
pub mod input;
/// Utilities to encode pack data entries and write them to a `Write` implementation to resemble a pack data file.
+#[cfg(feature = "generate")]
pub mod output;
/// A slice into a pack file denoting a pack entry.
diff --git a/vendor/gix-pack/src/data/output/count/mod.rs b/vendor/gix-pack/src/data/output/count/mod.rs
index 0c33abd97..481ff65d3 100644
--- a/vendor/gix-pack/src/data/output/count/mod.rs
+++ b/vendor/gix-pack/src/data/output/count/mod.rs
@@ -45,5 +45,5 @@ pub use objects_impl::{objects, objects_unthreaded};
///
pub mod objects {
- pub use super::objects_impl::{Error, ObjectExpansion, Options, Outcome, Result};
+ pub use super::objects_impl::{Error, ObjectExpansion, Options, Outcome};
}
diff --git a/vendor/gix-pack/src/data/output/count/objects/mod.rs b/vendor/gix-pack/src/data/output/count/objects/mod.rs
index a13e41146..24810577c 100644
--- a/vendor/gix-pack/src/data/output/count/objects/mod.rs
+++ b/vendor/gix-pack/src/data/output/count/objects/mod.rs
@@ -1,12 +1,9 @@
-use std::{
- cell::RefCell,
- sync::{atomic::AtomicBool, Arc},
-};
+use std::{cell::RefCell, sync::atomic::AtomicBool};
-use gix_features::{parallel, progress::Progress};
+use gix_features::parallel;
use gix_hash::ObjectId;
-use crate::{data::output, find};
+use crate::data::output;
pub(in crate::data::output::count::objects_impl) mod reduce;
mod util;
@@ -16,9 +13,6 @@ pub use types::{Error, ObjectExpansion, Options, Outcome};
mod tree;
-/// The return type used by [`objects()`].
-pub type Result<E1, E2> = std::result::Result<(Vec<output::Count>, Outcome), Error<E1, E2>>;
-
/// Generate [`Count`][output::Count]s from input `objects` with object expansion based on [`options`][Options]
/// to learn which objects would would constitute a pack. This step is required to know exactly how many objects would
/// be in a pack while keeping data around to avoid minimize object database access.
@@ -29,29 +23,25 @@ pub type Result<E1, E2> = std::result::Result<(Vec<output::Count>, Outcome), Err
/// * `objects_ids`
/// * A list of objects ids to add to the pack. Duplication checks are performed so no object is ever added to a pack twice.
/// * Objects may be expanded based on the provided [`options`][Options]
-/// * `progress`
-/// * a way to obtain progress information
+/// * `objects`
+/// * count the amount of objects we encounter
/// * `should_interrupt`
/// * A flag that is set to true if the operation should stop
/// * `options`
/// * more configuration
-pub fn objects<Find, Iter, IterErr, Oid>(
+pub fn objects<Find>(
db: Find,
- objects_ids: Iter,
- progress: impl Progress,
+ objects_ids: Box<dyn Iterator<Item = Result<ObjectId, Box<dyn std::error::Error + Send + Sync + 'static>>> + Send>,
+ objects: &dyn gix_features::progress::Count,
should_interrupt: &AtomicBool,
Options {
thread_limit,
input_object_expansion,
chunk_size,
}: Options,
-) -> Result<find::existing::Error<Find::Error>, IterErr>
+) -> Result<(Vec<output::Count>, Outcome), Error>
where
Find: crate::Find + Send + Clone,
- <Find as crate::Find>::Error: Send,
- Iter: Iterator<Item = std::result::Result<Oid, IterErr>> + Send,
- Oid: Into<ObjectId> + Send,
- IterErr: std::error::Error + Send,
{
let lower_bound = objects_ids.size_hint().0;
let (chunk_size, thread_limit, _) = parallel::optimize_chunk_size_and_thread_limit(
@@ -65,71 +55,59 @@ where
size: chunk_size,
};
let seen_objs = gix_hashtable::sync::ObjectIdMap::default();
- let progress = Arc::new(parking_lot::Mutex::new(progress));
+ let objects = objects.counter();
parallel::in_parallel(
chunks,
thread_limit,
{
- let progress = Arc::clone(&progress);
- move |n| {
+ move |_| {
(
Vec::new(), // object data buffer
Vec::new(), // object data buffer 2 to hold two objects at a time
- {
- let mut p = progress
- .lock()
- .add_child_with_id(format!("thread {n}"), gix_features::progress::UNKNOWN);
- p.init(None, gix_features::progress::count("objects"));
- p
- },
+ objects.clone(),
)
}
},
{
let seen_objs = &seen_objs;
- move |oids: Vec<std::result::Result<Oid, IterErr>>, (buf1, buf2, progress)| {
+ move |oids: Vec<_>, (buf1, buf2, objects)| {
expand::this(
&db,
input_object_expansion,
seen_objs,
- oids,
+ &mut oids.into_iter(),
buf1,
buf2,
- progress,
+ objects,
should_interrupt,
true, /*allow pack lookups*/
)
}
},
- reduce::Statistics::new(progress),
+ reduce::Statistics::new(),
)
}
/// Like [`objects()`] but using a single thread only to mostly save on the otherwise required overhead.
-pub fn objects_unthreaded<Find, IterErr, Oid>(
- db: Find,
- object_ids: impl Iterator<Item = std::result::Result<Oid, IterErr>>,
- mut progress: impl Progress,
+pub fn objects_unthreaded(
+ db: &dyn crate::Find,
+ object_ids: &mut dyn Iterator<Item = Result<ObjectId, Box<dyn std::error::Error + Send + Sync + 'static>>>,
+ objects: &dyn gix_features::progress::Count,
should_interrupt: &AtomicBool,
input_object_expansion: ObjectExpansion,
-) -> Result<find::existing::Error<Find::Error>, IterErr>
-where
- Find: crate::Find,
- Oid: Into<ObjectId>,
- IterErr: std::error::Error,
-{
+) -> Result<(Vec<output::Count>, Outcome), Error> {
let seen_objs = RefCell::new(gix_hashtable::HashSet::default());
let (mut buf1, mut buf2) = (Vec::new(), Vec::new());
expand::this(
- &db,
+ db,
input_object_expansion,
&seen_objs,
object_ids,
&mut buf1,
&mut buf2,
- &mut progress,
+ &objects.counter(),
should_interrupt,
false, /*allow pack lookups*/
)
@@ -138,7 +116,6 @@ where
mod expand {
use std::sync::atomic::{AtomicBool, Ordering};
- use gix_features::progress::Progress;
use gix_hash::{oid, ObjectId};
use gix_object::{CommitRefIter, TagRefIter};
@@ -149,26 +126,21 @@ mod expand {
};
use crate::{
data::{output, output::count::PackLocation},
- find, FindExt,
+ FindExt,
};
#[allow(clippy::too_many_arguments)]
- pub fn this<Find, IterErr, Oid>(
- db: &Find,
+ pub fn this(
+ db: &dyn crate::Find,
input_object_expansion: ObjectExpansion,
seen_objs: &impl util::InsertImmutable,
- oids: impl IntoIterator<Item = std::result::Result<Oid, IterErr>>,
+ oids: &mut dyn Iterator<Item = Result<ObjectId, Box<dyn std::error::Error + Send + Sync + 'static>>>,
buf1: &mut Vec<u8>,
#[allow(clippy::ptr_arg)] buf2: &mut Vec<u8>,
- progress: &mut impl Progress,
+ objects: &gix_features::progress::AtomicStep,
should_interrupt: &AtomicBool,
allow_pack_lookups: bool,
- ) -> super::Result<find::existing::Error<Find::Error>, IterErr>
- where
- Find: crate::Find,
- Oid: Into<ObjectId>,
- IterErr: std::error::Error,
- {
+ ) -> Result<(Vec<output::Count>, Outcome), Error> {
use ObjectExpansion::*;
let mut out = Vec::new();
@@ -180,13 +152,13 @@ mod expand {
let mut outcome = Outcome::default();
let stats = &mut outcome;
- for id in oids.into_iter() {
+ for id in oids {
if should_interrupt.load(Ordering::Relaxed) {
return Err(Error::Interrupted);
}
- let id = id.map(|oid| oid.into()).map_err(Error::InputIteration)?;
- let (obj, location) = db.find(id, buf1)?;
+ let id = id.map_err(Error::InputIteration)?;
+ let (obj, location) = db.find(&id, buf1)?;
stats.input_objects += 1;
match input_object_expansion {
TreeAdditionsComparedToAncestor => {
@@ -196,14 +168,14 @@ mod expand {
let mut id = id.to_owned();
loop {
- push_obj_count_unique(&mut out, seen_objs, &id, location, progress, stats, false);
+ push_obj_count_unique(&mut out, seen_objs, &id, location, objects, stats, false);
match obj.kind {
Tree | Blob => break,
Tag => {
id = TagRefIter::from_bytes(obj.data)
.target_id()
.expect("every tag has a target");
- let tmp = db.find(id, buf1)?;
+ let tmp = db.find(&id, buf1)?;
obj = tmp.0;
location = tmp.1;
@@ -225,14 +197,14 @@ mod expand {
Err(err) => return Err(Error::CommitDecode(err)),
}
}
- let (obj, location) = db.find(tree_id, buf1)?;
+ let (obj, location) = db.find(&tree_id, buf1)?;
push_obj_count_unique(
- &mut out, seen_objs, &tree_id, location, progress, stats, true,
+ &mut out, seen_objs, &tree_id, location, objects, stats, true,
);
gix_object::TreeRefIter::from_bytes(obj.data)
};
- let objects = if parent_commit_ids.is_empty() {
+ let objects_ref = if parent_commit_ids.is_empty() {
traverse_delegate.clear();
gix_traverse::tree::breadthfirst(
current_tree_iter,
@@ -241,7 +213,7 @@ mod expand {
stats.decoded_objects += 1;
match db.find(oid, buf).ok() {
Some((obj, location)) => {
- progress.inc();
+ objects.fetch_add(1, Ordering::Relaxed);
stats.expanded_objects += 1;
out.push(output::Count::from_data(oid, location));
obj.try_into_tree_iter()
@@ -259,20 +231,20 @@ mod expand {
let (parent_commit_obj, location) = db.find(commit_id, buf2)?;
push_obj_count_unique(
- &mut out, seen_objs, commit_id, location, progress, stats, true,
+ &mut out, seen_objs, commit_id, location, objects, stats, true,
);
CommitRefIter::from_bytes(parent_commit_obj.data)
.tree_id()
.expect("every commit has a tree")
};
let parent_tree = {
- let (parent_tree_obj, location) = db.find(parent_tree_id, buf2)?;
+ let (parent_tree_obj, location) = db.find(&parent_tree_id, buf2)?;
push_obj_count_unique(
&mut out,
seen_objs,
&parent_tree_id,
location,
- progress,
+ objects,
stats,
true,
);
@@ -294,8 +266,8 @@ mod expand {
}
&changes_delegate.objects
};
- for id in objects.iter() {
- out.push(id_to_count(db, buf2, id, progress, stats, allow_pack_lookups));
+ for id in objects_ref.iter() {
+ out.push(id_to_count(db, buf2, id, objects, stats, allow_pack_lookups));
}
break;
}
@@ -307,7 +279,7 @@ mod expand {
let mut id = id;
let mut obj = (obj, location);
loop {
- push_obj_count_unique(&mut out, seen_objs, &id, obj.1.clone(), progress, stats, false);
+ push_obj_count_unique(&mut out, seen_objs, &id, obj.1.clone(), objects, stats, false);
match obj.0.kind {
Tree => {
traverse_delegate.clear();
@@ -318,7 +290,7 @@ mod expand {
stats.decoded_objects += 1;
match db.find(oid, buf).ok() {
Some((obj, location)) => {
- progress.inc();
+ objects.fetch_add(1, Ordering::Relaxed);
stats.expanded_objects += 1;
out.push(output::Count::from_data(oid, location));
obj.try_into_tree_iter()
@@ -330,7 +302,7 @@ mod expand {
)
.map_err(Error::TreeTraverse)?;
for id in &traverse_delegate.non_trees {
- out.push(id_to_count(db, buf1, id, progress, stats, allow_pack_lookups));
+ out.push(id_to_count(db, buf1, id, objects, stats, allow_pack_lookups));
}
break;
}
@@ -339,7 +311,7 @@ mod expand {
.tree_id()
.expect("every commit has a tree");
stats.expanded_objects += 1;
- obj = db.find(id, buf1)?;
+ obj = db.find(&id, buf1)?;
continue;
}
Blob => break,
@@ -348,13 +320,13 @@ mod expand {
.target_id()
.expect("every tag has a target");
stats.expanded_objects += 1;
- obj = db.find(id, buf1)?;
+ obj = db.find(&id, buf1)?;
continue;
}
}
}
}
- AsIs => push_obj_count_unique(&mut out, seen_objs, &id, location, progress, stats, false),
+ AsIs => push_obj_count_unique(&mut out, seen_objs, &id, location, objects, stats, false),
}
}
outcome.total_objects = out.len();
@@ -367,13 +339,13 @@ mod expand {
all_seen: &impl util::InsertImmutable,
id: &oid,
location: Option<crate::data::entry::Location>,
- progress: &mut impl Progress,
+ objects: &gix_features::progress::AtomicStep,
statistics: &mut Outcome,
count_expanded: bool,
) {
let inserted = all_seen.insert(id.to_owned());
if inserted {
- progress.inc();
+ objects.fetch_add(1, Ordering::Relaxed);
statistics.decoded_objects += 1;
if count_expanded {
statistics.expanded_objects += 1;
@@ -383,15 +355,15 @@ mod expand {
}
#[inline]
- fn id_to_count<Find: crate::Find>(
- db: &Find,
+ fn id_to_count(
+ db: &dyn crate::Find,
buf: &mut Vec<u8>,
id: &oid,
- progress: &mut impl Progress,
+ objects: &gix_features::progress::AtomicStep,
statistics: &mut Outcome,
allow_pack_lookups: bool,
) -> output::Count {
- progress.inc();
+ objects.fetch_add(1, Ordering::Relaxed);
statistics.expanded_objects += 1;
output::Count {
id: id.to_owned(),
diff --git a/vendor/gix-pack/src/data/output/count/objects/reduce.rs b/vendor/gix-pack/src/data/output/count/objects/reduce.rs
index c6a61d467..03144b60f 100644
--- a/vendor/gix-pack/src/data/output/count/objects/reduce.rs
+++ b/vendor/gix-pack/src/data/output/count/objects/reduce.rs
@@ -1,35 +1,27 @@
-use std::{marker::PhantomData, sync::Arc};
+use std::marker::PhantomData;
-use gix_features::{parallel, progress::Progress};
+use gix_features::parallel;
use super::Outcome;
use crate::data::output;
-pub struct Statistics<E, P> {
+pub struct Statistics<E> {
total: Outcome,
counts: Vec<output::Count>,
- progress: Arc<parking_lot::Mutex<P>>,
_err: PhantomData<E>,
}
-impl<E, P> Statistics<E, P>
-where
- P: Progress,
-{
- pub fn new(progress: Arc<parking_lot::Mutex<P>>) -> Self {
+impl<E> Statistics<E> {
+ pub fn new() -> Self {
Statistics {
total: Default::default(),
counts: Default::default(),
- progress,
- _err: PhantomData::default(),
+ _err: PhantomData,
}
}
}
-impl<E, P> parallel::Reduce for Statistics<E, P>
-where
- P: Progress,
-{
+impl<E> parallel::Reduce for Statistics<E> {
type Input = Result<(Vec<output::Count>, Outcome), E>;
type FeedProduce = ();
type Output = (Vec<output::Count>, Outcome);
@@ -38,7 +30,6 @@ where
fn feed(&mut self, item: Self::Input) -> Result<Self::FeedProduce, Self::Error> {
let (counts, stats) = item?;
self.total.aggregate(stats);
- self.progress.lock().inc_by(counts.len());
self.counts.extend(counts);
Ok(())
}
diff --git a/vendor/gix-pack/src/data/output/count/objects/types.rs b/vendor/gix-pack/src/data/output/count/objects/types.rs
index f39a24ee4..4b9ecea20 100644
--- a/vendor/gix-pack/src/data/output/count/objects/types.rs
+++ b/vendor/gix-pack/src/data/output/count/objects/types.rs
@@ -80,17 +80,13 @@ impl Default for Options {
/// The error returned by the pack generation iterator [`bytes::FromEntriesIter`][crate::data::output::bytes::FromEntriesIter].
#[derive(Debug, thiserror::Error)]
#[allow(missing_docs)]
-pub enum Error<FindErr, IterErr>
-where
- FindErr: std::error::Error + 'static,
- IterErr: std::error::Error + 'static,
-{
+pub enum Error {
#[error(transparent)]
CommitDecode(gix_object::decode::Error),
#[error(transparent)]
- FindExisting(#[from] FindErr),
+ FindExisting(#[from] crate::find::existing::Error),
#[error(transparent)]
- InputIteration(IterErr),
+ InputIteration(Box<dyn std::error::Error + Send + Sync + 'static>),
#[error(transparent)]
TreeTraverse(gix_traverse::tree::breadthfirst::Error),
#[error(transparent)]
diff --git a/vendor/gix-pack/src/data/output/entry/iter_from_counts.rs b/vendor/gix-pack/src/data/output/entry/iter_from_counts.rs
index dbe8b0b95..2bebf5b20 100644
--- a/vendor/gix-pack/src/data/output/entry/iter_from_counts.rs
+++ b/vendor/gix-pack/src/data/output/entry/iter_from_counts.rs
@@ -1,6 +1,7 @@
pub(crate) mod function {
use std::{cmp::Ordering, sync::Arc};
+ use gix_features::progress::prodash::{Count, DynNestedProgress};
use gix_features::{parallel, parallel::SequenceId, progress::Progress};
use super::{reduce, util, Error, Mode, Options, Outcome, ProgressId};
@@ -38,7 +39,7 @@ pub(crate) mod function {
pub fn iter_from_counts<Find>(
mut counts: Vec<output::Count>,
db: Find,
- mut progress: impl Progress + 'static,
+ mut progress: Box<dyn DynNestedProgress + 'static>,
Options {
version,
mode,
@@ -46,11 +47,10 @@ pub(crate) mod function {
thread_limit,
chunk_size,
}: Options,
- ) -> impl Iterator<Item = Result<(SequenceId, Vec<output::Entry>), Error<Find::Error>>>
- + parallel::reduce::Finalize<Reduce = reduce::Statistics<Error<Find::Error>>>
+ ) -> impl Iterator<Item = Result<(SequenceId, Vec<output::Entry>), Error>>
+ + parallel::reduce::Finalize<Reduce = reduce::Statistics<Error>>
where
Find: crate::Find + Send + Clone + 'static,
- <Find as crate::Find>::Error: Send,
{
assert!(
matches!(version, crate::data::Version::V2),
@@ -60,7 +60,7 @@ pub(crate) mod function {
parallel::optimize_chunk_size_and_thread_limit(chunk_size, Some(counts.len()), thread_limit, None);
{
let progress = Arc::new(parking_lot::Mutex::new(
- progress.add_child_with_id("resolving", ProgressId::ResolveCounts.into()),
+ progress.add_child_with_id("resolving".into(), ProgressId::ResolveCounts.into()),
));
progress.lock().init(None, gix_features::progress::count("counts"));
let enough_counts_present = counts.len() > 4_000;
@@ -79,7 +79,7 @@ pub(crate) mod function {
use crate::data::output::count::PackLocation::*;
match count.entry_pack_location {
LookedUp(_) => continue,
- NotLookedUp => count.entry_pack_location = LookedUp(db.location_by_oid(count.id, buf)),
+ NotLookedUp => count.entry_pack_location = LookedUp(db.location_by_oid(&count.id, buf)),
}
}
progress.lock().inc_by(chunk_size);
@@ -93,7 +93,7 @@ pub(crate) mod function {
}
let counts_range_by_pack_id = match mode {
Mode::PackCopyAndBaseObjects => {
- let mut progress = progress.add_child_with_id("sorting", ProgressId::SortEntries.into());
+ let mut progress = progress.add_child_with_id("sorting".into(), ProgressId::SortEntries.into());
progress.init(Some(counts.len()), gix_features::progress::count("counts"));
let start = std::time::Instant::now();
@@ -204,7 +204,7 @@ pub(crate) mod function {
stats.objects_copied_from_pack += 1;
entry
}
- None => match db.try_find(count.id, buf).map_err(Error::FindExisting)? {
+ None => match db.try_find(&count.id, buf).map_err(Error::FindExisting)? {
Some((obj, _location)) => {
stats.decoded_and_recompressed_objects += 1;
output::Entry::from_data(count, &obj)
@@ -216,7 +216,7 @@ pub(crate) mod function {
},
}
}
- None => match db.try_find(count.id, buf).map_err(Error::FindExisting)? {
+ None => match db.try_find(&count.id, buf).map_err(Error::FindExisting)? {
Some((obj, _location)) => {
stats.decoded_and_recompressed_objects += 1;
output::Entry::from_data(count, &obj)
@@ -288,7 +288,7 @@ mod reduce {
fn default() -> Self {
Statistics {
total: Default::default(),
- _err: PhantomData::default(),
+ _err: PhantomData,
}
}
}
@@ -395,12 +395,9 @@ mod types {
/// The error returned by the pack generation function [`iter_from_counts()`][crate::data::output::entry::iter_from_counts()].
#[derive(Debug, thiserror::Error)]
#[allow(missing_docs)]
- pub enum Error<FindErr>
- where
- FindErr: std::error::Error + 'static,
- {
+ pub enum Error {
#[error(transparent)]
- FindExisting(FindErr),
+ FindExisting(crate::find::Error),
#[error(transparent)]
NewEntry(#[from] entry::Error),
}
diff --git a/vendor/gix-pack/src/data/output/entry/mod.rs b/vendor/gix-pack/src/data/output/entry/mod.rs
index a94720047..4ab4879eb 100644
--- a/vendor/gix-pack/src/data/output/entry/mod.rs
+++ b/vendor/gix-pack/src/data/output/entry/mod.rs
@@ -66,15 +66,14 @@ impl output::Entry {
potential_bases: &[output::Count],
bases_index_offset: usize,
pack_offset_to_oid: Option<impl FnMut(u32, u64) -> Option<ObjectId>>,
- target_version: crate::data::Version,
+ target_version: data::Version,
) -> Option<Result<Self, Error>> {
if entry.version != target_version {
return None;
};
let pack_offset_must_be_zero = 0;
- let pack_entry =
- crate::data::Entry::from_bytes(&entry.data, pack_offset_must_be_zero, count.id.as_slice().len());
+ let pack_entry = data::Entry::from_bytes(&entry.data, pack_offset_must_be_zero, count.id.as_slice().len());
use crate::data::entry::Header::*;
match pack_entry.header {
@@ -153,9 +152,9 @@ impl output::Entry {
/// This information is known to the one calling the method.
pub fn to_entry_header(
&self,
- version: crate::data::Version,
+ version: data::Version,
index_to_base_distance: impl FnOnce(usize) -> u64,
- ) -> crate::data::entry::Header {
+ ) -> data::entry::Header {
assert!(
matches!(version, data::Version::V2),
"we can only write V2 pack entries for now"
diff --git a/vendor/gix-pack/src/find.rs b/vendor/gix-pack/src/find.rs
index 2908669a2..b049d4d78 100644
--- a/vendor/gix-pack/src/find.rs
+++ b/vendor/gix-pack/src/find.rs
@@ -1,13 +1,16 @@
+/// The error returned by methods of the [Find](crate::Find) trait.
+pub type Error = Box<dyn std::error::Error + Send + Sync + 'static>;
+
///
pub mod existing {
use gix_hash::ObjectId;
- /// The error returned by the [`find(…)`][crate::FindExt::find()] trait methods.
+ /// The error returned by the [`find(…)`](crate::FindExt::find()) trait methods.
#[derive(Debug, thiserror::Error)]
#[allow(missing_docs)]
- pub enum Error<T: std::error::Error + 'static> {
+ pub enum Error {
#[error(transparent)]
- Find(T),
+ Find(crate::find::Error),
#[error("An object with id {} could not be found", .oid)]
NotFound { oid: ObjectId },
}
@@ -17,12 +20,12 @@ pub mod existing {
pub mod existing_object {
use gix_hash::ObjectId;
- /// The error returned by the various [`find_*`][crate::FindExt::find_commit()] trait methods.
+ /// The error returned by the various [`find_*`](crate::FindExt::find_commit()) trait methods.
#[derive(Debug, thiserror::Error)]
#[allow(missing_docs)]
- pub enum Error<T: std::error::Error + 'static> {
+ pub enum Error {
#[error(transparent)]
- Find(T),
+ Find(crate::find::Error),
#[error(transparent)]
Decode(gix_object::decode::Error),
#[error("An object with id {} could not be found", .oid)]
@@ -36,12 +39,12 @@ pub mod existing_object {
pub mod existing_iter {
use gix_hash::ObjectId;
- /// The error returned by the various [`find_*`][crate::FindExt::find_commit()] trait methods.
+ /// The error returned by the various [`find_*`](crate::FindExt::find_commit()) trait methods.
#[derive(Debug, thiserror::Error)]
#[allow(missing_docs)]
- pub enum Error<T: std::error::Error + 'static> {
+ pub enum Error {
#[error(transparent)]
- Find(T),
+ Find(crate::find::Error),
#[error("An object with id {} could not be found", .oid)]
NotFound { oid: ObjectId },
#[error("Expected object of kind {} something else", .expected)]
diff --git a/vendor/gix-pack/src/find_traits.rs b/vendor/gix-pack/src/find_traits.rs
index 6f828afbf..7c4821d81 100644
--- a/vendor/gix-pack/src/find_traits.rs
+++ b/vendor/gix-pack/src/find_traits.rs
@@ -12,11 +12,8 @@ use crate::{data, find};
///
/// [issue]: https://github.com/rust-lang/rust/issues/44265
pub trait Find {
- /// The error returned by [`try_find()`][Find::try_find()]
- type Error: std::error::Error + Send + Sync + 'static;
-
/// Returns true if the object exists in the database.
- fn contains(&self, id: impl AsRef<gix_hash::oid>) -> bool;
+ fn contains(&self, id: &gix_hash::oid) -> bool;
/// Find an object matching `id` in the database while placing its raw, decoded data into `buffer`.
/// A `pack_cache` can be used to speed up subsequent lookups, set it to [`crate::cache::Never`] if the
@@ -26,9 +23,9 @@ pub trait Find {
/// or the error that occurred during lookup or object retrieval.
fn try_find<'a>(
&self,
- id: impl AsRef<gix_hash::oid>,
+ id: &gix_hash::oid,
buffer: &'a mut Vec<u8>,
- ) -> Result<Option<(gix_object::Data<'a>, Option<data::entry::Location>)>, Self::Error> {
+ ) -> Result<Option<(gix_object::Data<'a>, Option<data::entry::Location>)>, find::Error> {
self.try_find_cached(id, buffer, &mut crate::cache::Never)
}
@@ -40,16 +37,16 @@ pub trait Find {
/// or the error that occurred during lookup or object retrieval.
fn try_find_cached<'a>(
&self,
- id: impl AsRef<gix_hash::oid>,
+ id: &gix_hash::oid,
buffer: &'a mut Vec<u8>,
- pack_cache: &mut impl crate::cache::DecodeEntry,
- ) -> Result<Option<(gix_object::Data<'a>, Option<data::entry::Location>)>, Self::Error>;
+ pack_cache: &mut dyn crate::cache::DecodeEntry,
+ ) -> Result<Option<(gix_object::Data<'a>, Option<data::entry::Location>)>, find::Error>;
/// Find the packs location where an object with `id` can be found in the database, or `None` if there is no pack
/// holding the object.
///
/// _Note_ that this is always None if the object isn't packed even though it exists as loose object.
- fn location_by_oid(&self, id: impl AsRef<gix_hash::oid>, buf: &mut Vec<u8>) -> Option<data::entry::Location>;
+ fn location_by_oid(&self, id: &gix_hash::oid, buf: &mut Vec<u8>) -> Option<data::entry::Location>;
/// Obtain a vector of all offsets, in index order, along with their object id.
fn pack_offsets_and_oid(&self, pack_id: u32) -> Option<Vec<(data::Offset, gix_hash::ObjectId)>>;
@@ -77,10 +74,9 @@ mod ext {
/// while returning the desired object type.
fn $method<'a>(
&self,
- id: impl AsRef<gix_hash::oid>,
+ id: &gix_hash::oid,
buffer: &'a mut Vec<u8>,
- ) -> Result<($object_type, Option<crate::data::entry::Location>), find::existing_object::Error<Self::Error>>
- {
+ ) -> Result<($object_type, Option<crate::data::entry::Location>), find::existing_object::Error> {
let id = id.as_ref();
self.try_find(id, buffer)
.map_err(find::existing_object::Error::Find)?
@@ -108,9 +104,9 @@ mod ext {
/// while returning the desired iterator type.
fn $method<'a>(
&self,
- id: impl AsRef<gix_hash::oid>,
+ id: &gix_hash::oid,
buffer: &'a mut Vec<u8>,
- ) -> Result<($object_type, Option<crate::data::entry::Location>), find::existing_iter::Error<Self::Error>> {
+ ) -> Result<($object_type, Option<crate::data::entry::Location>), find::existing_iter::Error> {
let id = id.as_ref();
self.try_find(id, buffer)
.map_err(find::existing_iter::Error::Find)?
@@ -133,11 +129,9 @@ mod ext {
/// Like [`try_find(…)`][super::Find::try_find()], but flattens the `Result<Option<_>>` into a single `Result` making a non-existing object an error.
fn find<'a>(
&self,
- id: impl AsRef<gix_hash::oid>,
+ id: &gix_hash::oid,
buffer: &'a mut Vec<u8>,
- ) -> Result<(gix_object::Data<'a>, Option<crate::data::entry::Location>), find::existing::Error<Self::Error>>
- {
- let id = id.as_ref();
+ ) -> Result<(gix_object::Data<'a>, Option<crate::data::entry::Location>), find::existing::Error> {
self.try_find(id, buffer)
.map_err(find::existing::Error::Find)?
.ok_or_else(|| find::existing::Error::NotFound {
@@ -154,7 +148,7 @@ mod ext {
make_iter_lookup!(find_tag_iter, Kind::Tag, TagRefIter<'a>, try_into_tag_iter);
}
- impl<T: super::Find> FindExt for T {}
+ impl<T: super::Find + ?Sized> FindExt for T {}
}
pub use ext::FindExt;
@@ -169,22 +163,20 @@ mod find_impls {
where
T: crate::Find,
{
- type Error = T::Error;
-
- fn contains(&self, id: impl AsRef<oid>) -> bool {
+ fn contains(&self, id: &oid) -> bool {
(*self).contains(id)
}
fn try_find_cached<'a>(
&self,
- id: impl AsRef<oid>,
+ id: &oid,
buffer: &'a mut Vec<u8>,
- pack_cache: &mut impl crate::cache::DecodeEntry,
- ) -> Result<Option<(gix_object::Data<'a>, Option<data::entry::Location>)>, Self::Error> {
+ pack_cache: &mut dyn crate::cache::DecodeEntry,
+ ) -> Result<Option<(gix_object::Data<'a>, Option<data::entry::Location>)>, crate::find::Error> {
(*self).try_find_cached(id, buffer, pack_cache)
}
- fn location_by_oid(&self, id: impl AsRef<oid>, buf: &mut Vec<u8>) -> Option<data::entry::Location> {
+ fn location_by_oid(&self, id: &oid, buf: &mut Vec<u8>) -> Option<data::entry::Location> {
(*self).location_by_oid(id, buf)
}
@@ -201,22 +193,20 @@ mod find_impls {
where
T: super::Find,
{
- type Error = T::Error;
-
- fn contains(&self, id: impl AsRef<oid>) -> bool {
+ fn contains(&self, id: &oid) -> bool {
self.deref().contains(id)
}
fn try_find_cached<'a>(
&self,
- id: impl AsRef<oid>,
+ id: &oid,
buffer: &'a mut Vec<u8>,
- pack_cache: &mut impl crate::cache::DecodeEntry,
- ) -> Result<Option<(gix_object::Data<'a>, Option<data::entry::Location>)>, Self::Error> {
+ pack_cache: &mut dyn crate::cache::DecodeEntry,
+ ) -> Result<Option<(gix_object::Data<'a>, Option<data::entry::Location>)>, find::Error> {
self.deref().try_find_cached(id, buffer, pack_cache)
}
- fn location_by_oid(&self, id: impl AsRef<oid>, buf: &mut Vec<u8>) -> Option<data::entry::Location> {
+ fn location_by_oid(&self, id: &oid, buf: &mut Vec<u8>) -> Option<data::entry::Location> {
self.deref().location_by_oid(id, buf)
}
@@ -233,22 +223,20 @@ mod find_impls {
where
T: super::Find,
{
- type Error = T::Error;
-
- fn contains(&self, id: impl AsRef<oid>) -> bool {
+ fn contains(&self, id: &oid) -> bool {
self.deref().contains(id)
}
fn try_find_cached<'a>(
&self,
- id: impl AsRef<oid>,
+ id: &oid,
buffer: &'a mut Vec<u8>,
- pack_cache: &mut impl crate::cache::DecodeEntry,
- ) -> Result<Option<(gix_object::Data<'a>, Option<data::entry::Location>)>, Self::Error> {
+ pack_cache: &mut dyn crate::cache::DecodeEntry,
+ ) -> Result<Option<(gix_object::Data<'a>, Option<data::entry::Location>)>, find::Error> {
self.deref().try_find_cached(id, buffer, pack_cache)
}
- fn location_by_oid(&self, id: impl AsRef<oid>, buf: &mut Vec<u8>) -> Option<data::entry::Location> {
+ fn location_by_oid(&self, id: &oid, buf: &mut Vec<u8>) -> Option<data::entry::Location> {
self.deref().location_by_oid(id, buf)
}
@@ -265,22 +253,20 @@ mod find_impls {
where
T: super::Find,
{
- type Error = T::Error;
-
- fn contains(&self, id: impl AsRef<oid>) -> bool {
+ fn contains(&self, id: &oid) -> bool {
self.deref().contains(id)
}
fn try_find_cached<'a>(
&self,
- id: impl AsRef<oid>,
+ id: &oid,
buffer: &'a mut Vec<u8>,
- pack_cache: &mut impl crate::cache::DecodeEntry,
- ) -> Result<Option<(gix_object::Data<'a>, Option<data::entry::Location>)>, Self::Error> {
+ pack_cache: &mut dyn crate::cache::DecodeEntry,
+ ) -> Result<Option<(gix_object::Data<'a>, Option<data::entry::Location>)>, find::Error> {
self.deref().try_find_cached(id, buffer, pack_cache)
}
- fn location_by_oid(&self, id: impl AsRef<oid>, buf: &mut Vec<u8>) -> Option<data::entry::Location> {
+ fn location_by_oid(&self, id: &oid, buf: &mut Vec<u8>) -> Option<data::entry::Location> {
self.deref().location_by_oid(id, buf)
}
diff --git a/vendor/gix-pack/src/index/access.rs b/vendor/gix-pack/src/index/access.rs
index 18fb70e2a..3b748e110 100644
--- a/vendor/gix-pack/src/index/access.rs
+++ b/vendor/gix-pack/src/index/access.rs
@@ -119,7 +119,7 @@ impl index::File {
// NOTE: pretty much the same things as in `multi_index::File::lookup`, change things there
// as well.
pub fn lookup(&self, id: impl AsRef<gix_hash::oid>) -> Option<EntryIndex> {
- lookup(id, &self.fan, |idx| self.oid_at_index(idx))
+ lookup(id.as_ref(), &self.fan, &|idx| self.oid_at_index(idx))
}
/// Given a `prefix`, find an object that matches it uniquely within this index and return `Some(Ok(entry_index))`.
@@ -141,7 +141,7 @@ impl index::File {
prefix,
candidates,
&self.fan,
- |idx| self.oid_at_index(idx),
+ &|idx| self.oid_at_index(idx),
self.num_objects,
)
}
@@ -206,7 +206,7 @@ pub(crate) fn lookup_prefix<'a>(
prefix: gix_hash::Prefix,
candidates: Option<&mut Range<EntryIndex>>,
fan: &[u32; FAN_LEN],
- oid_at_index: impl Fn(EntryIndex) -> &'a gix_hash::oid,
+ oid_at_index: &dyn Fn(EntryIndex) -> &'a gix_hash::oid,
num_objects: u32,
) -> Option<PrefixLookupResult> {
let first_byte = prefix.as_oid().first_byte() as usize;
@@ -266,11 +266,10 @@ pub(crate) fn lookup_prefix<'a>(
}
pub(crate) fn lookup<'a>(
- id: impl AsRef<gix_hash::oid>,
+ id: &gix_hash::oid,
fan: &[u32; FAN_LEN],
- oid_at_index: impl Fn(EntryIndex) -> &'a gix_hash::oid,
+ oid_at_index: &dyn Fn(EntryIndex) -> &'a gix_hash::oid,
) -> Option<EntryIndex> {
- let id = id.as_ref();
let first_byte = id.first_byte() as usize;
let mut upper_bound = fan[first_byte];
let mut lower_bound = if first_byte != 0 { fan[first_byte - 1] } else { 0 };
diff --git a/vendor/gix-pack/src/index/encode.rs b/vendor/gix-pack/src/index/encode.rs
new file mode 100644
index 000000000..d9dad68ce
--- /dev/null
+++ b/vendor/gix-pack/src/index/encode.rs
@@ -0,0 +1,158 @@
+use std::cmp::Ordering;
+
+pub(crate) const LARGE_OFFSET_THRESHOLD: u64 = 0x7fff_ffff;
+pub(crate) const HIGH_BIT: u32 = 0x8000_0000;
+
+pub(crate) fn fanout(iter: &mut dyn ExactSizeIterator<Item = u8>) -> [u32; 256] {
+ let mut fan_out = [0u32; 256];
+ let entries_len = iter.len() as u32;
+ let mut iter = iter.enumerate();
+ let mut idx_and_entry = iter.next();
+ let mut upper_bound = 0;
+
+ for (offset_be, byte) in fan_out.iter_mut().zip(0u8..=255) {
+ *offset_be = match idx_and_entry.as_ref() {
+ Some((_idx, first_byte)) => match first_byte.cmp(&byte) {
+ Ordering::Less => unreachable!("ids should be ordered, and we make sure to keep ahead with them"),
+ Ordering::Greater => upper_bound,
+ Ordering::Equal => {
+ if byte == 255 {
+ entries_len
+ } else {
+ idx_and_entry = iter.find(|(_, first_byte)| *first_byte != byte);
+ upper_bound = idx_and_entry.as_ref().map_or(entries_len, |(idx, _)| *idx as u32);
+ upper_bound
+ }
+ }
+ },
+ None => entries_len,
+ };
+ }
+
+ fan_out
+}
+
+#[cfg(feature = "streaming-input")]
+mod function {
+ use gix_features::{
+ hash,
+ progress::{self, DynNestedProgress},
+ };
+ use std::io;
+
+ use super::{fanout, HIGH_BIT, LARGE_OFFSET_THRESHOLD};
+
+ use crate::index::V2_SIGNATURE;
+
+ struct Count<W> {
+ bytes: u64,
+ inner: W,
+ }
+
+ impl<W> Count<W> {
+ fn new(inner: W) -> Self {
+ Count { bytes: 0, inner }
+ }
+ }
+
+ impl<W> io::Write for Count<W>
+ where
+ W: io::Write,
+ {
+ fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
+ let written = self.inner.write(buf)?;
+ self.bytes += written as u64;
+ Ok(written)
+ }
+
+ fn flush(&mut self) -> io::Result<()> {
+ self.inner.flush()
+ }
+ }
+
+ pub(crate) fn write_to(
+ out: &mut dyn io::Write,
+ entries_sorted_by_oid: Vec<crate::cache::delta::Item<crate::index::write::TreeEntry>>,
+ pack_hash: &gix_hash::ObjectId,
+ kind: crate::index::Version,
+ progress: &mut dyn DynNestedProgress,
+ ) -> io::Result<gix_hash::ObjectId> {
+ use io::Write;
+ assert_eq!(kind, crate::index::Version::V2, "Can only write V2 packs right now");
+ assert!(
+ entries_sorted_by_oid.len() <= u32::MAX as usize,
+ "a pack cannot have more than u32::MAX objects"
+ );
+
+ // Write header
+ let mut out = Count::new(std::io::BufWriter::with_capacity(
+ 8 * 4096,
+ hash::Write::new(out, kind.hash()),
+ ));
+ out.write_all(V2_SIGNATURE)?;
+ out.write_all(&(kind as u32).to_be_bytes())?;
+
+ progress.init(Some(4), progress::steps());
+ let start = std::time::Instant::now();
+ let _info = progress.add_child_with_id("writing fan-out table".into(), gix_features::progress::UNKNOWN);
+ let fan_out = fanout(&mut entries_sorted_by_oid.iter().map(|e| e.data.id.first_byte()));
+
+ for value in fan_out.iter() {
+ out.write_all(&value.to_be_bytes())?;
+ }
+
+ progress.inc();
+ let _info = progress.add_child_with_id("writing ids".into(), gix_features::progress::UNKNOWN);
+ for entry in &entries_sorted_by_oid {
+ out.write_all(entry.data.id.as_slice())?;
+ }
+
+ progress.inc();
+ let _info = progress.add_child_with_id("writing crc32".into(), gix_features::progress::UNKNOWN);
+ for entry in &entries_sorted_by_oid {
+ out.write_all(&entry.data.crc32.to_be_bytes())?;
+ }
+
+ progress.inc();
+ let _info = progress.add_child_with_id("writing offsets".into(), gix_features::progress::UNKNOWN);
+ {
+ let mut offsets64 = Vec::<u64>::new();
+ for entry in &entries_sorted_by_oid {
+ let offset: u32 = if entry.offset > LARGE_OFFSET_THRESHOLD {
+ assert!(
+ offsets64.len() < LARGE_OFFSET_THRESHOLD as usize,
+ "Encoding breakdown - way too many 64bit offsets"
+ );
+ offsets64.push(entry.offset);
+ ((offsets64.len() - 1) as u32) | HIGH_BIT
+ } else {
+ entry.offset as u32
+ };
+ out.write_all(&offset.to_be_bytes())?;
+ }
+ for value in offsets64 {
+ out.write_all(&value.to_be_bytes())?;
+ }
+ }
+
+ out.write_all(pack_hash.as_slice())?;
+
+ let bytes_written_without_trailer = out.bytes;
+ let out = out.inner.into_inner()?;
+ let index_hash: gix_hash::ObjectId = out.hash.digest().into();
+ out.inner.write_all(index_hash.as_slice())?;
+ out.inner.flush()?;
+
+ progress.inc();
+ progress.show_throughput_with(
+ start,
+ (bytes_written_without_trailer + 20) as usize,
+ progress::bytes().expect("unit always set"),
+ progress::MessageLevel::Success,
+ );
+
+ Ok(index_hash)
+ }
+}
+#[cfg(feature = "streaming-input")]
+pub(crate) use function::write_to;
diff --git a/vendor/gix-pack/src/index/mod.rs b/vendor/gix-pack/src/index/mod.rs
index 36be2d429..8d8807442 100644
--- a/vendor/gix-pack/src/index/mod.rs
+++ b/vendor/gix-pack/src/index/mod.rs
@@ -141,10 +141,12 @@ pub mod init;
pub(crate) mod access;
pub use access::Entry;
+pub(crate) mod encode;
///
pub mod traverse;
mod util;
///
pub mod verify;
///
+#[cfg(feature = "streaming-input")]
pub mod write;
diff --git a/vendor/gix-pack/src/index/traverse/mod.rs b/vendor/gix-pack/src/index/traverse/mod.rs
index 83173f904..1edf0b1d5 100644
--- a/vendor/gix-pack/src/index/traverse/mod.rs
+++ b/vendor/gix-pack/src/index/traverse/mod.rs
@@ -1,9 +1,6 @@
use std::sync::atomic::AtomicBool;
-use gix_features::{
- parallel,
- progress::{Progress, RawProgress},
-};
+use gix_features::{parallel, progress::Progress, zlib};
use crate::index;
@@ -16,6 +13,7 @@ use reduce::Reducer;
mod error;
pub use error::Error;
+use gix_features::progress::DynNestedProgress;
mod types;
pub use types::{Algorithm, ProgressId, SafetyCheck, Statistics};
@@ -46,13 +44,11 @@ impl Default for Options<fn() -> crate::cache::Never> {
}
/// The outcome of the [`traverse()`][index::File::traverse()] method.
-pub struct Outcome<P> {
+pub struct Outcome {
/// The checksum obtained when hashing the file, which matched the checksum contained within the file.
pub actual_index_checksum: gix_hash::ObjectId,
/// The statistics obtained during traversal.
pub statistics: Statistics,
- /// The input progress to allow reuse.
- pub progress: P,
}
/// Traversal of pack data files using an index file
@@ -77,10 +73,10 @@ impl index::File {
///
/// Use [`thread_limit`][Options::thread_limit] to further control parallelism and [`check`][SafetyCheck] to define how much the passed
/// objects shall be verified beforehand.
- pub fn traverse<P, C, Processor, E, F>(
+ pub fn traverse<C, Processor, E, F>(
&self,
pack: &crate::data::File,
- progress: P,
+ progress: &mut dyn DynNestedProgress,
should_interrupt: &AtomicBool,
processor: Processor,
Options {
@@ -89,12 +85,11 @@ impl index::File {
check,
make_pack_lookup_cache,
}: Options<F>,
- ) -> Result<Outcome<P>, Error<E>>
+ ) -> Result<Outcome, Error<E>>
where
- P: Progress,
C: crate::cache::DecodeEntry,
E: std::error::Error + Send + Sync + 'static,
- Processor: FnMut(gix_object::Kind, &[u8], &index::Entry, &dyn RawProgress) -> Result<(), E> + Send + Clone,
+ Processor: FnMut(gix_object::Kind, &[u8], &index::Entry, &dyn Progress) -> Result<(), E> + Send + Clone,
F: Fn() -> C + Send + Clone,
{
match traversal {
@@ -123,8 +118,8 @@ impl index::File {
&self,
pack: &crate::data::File,
check: SafetyCheck,
- pack_progress: impl Progress,
- index_progress: impl Progress,
+ pack_progress: &mut dyn Progress,
+ index_progress: &mut dyn Progress,
should_interrupt: &AtomicBool,
) -> Result<gix_hash::ObjectId, Error<E>>
where
@@ -155,9 +150,10 @@ impl index::File {
pack: &crate::data::File,
cache: &mut C,
buf: &mut Vec<u8>,
- progress: &mut dyn RawProgress,
+ inflate: &mut zlib::Inflate,
+ progress: &mut dyn Progress,
index_entry: &index::Entry,
- processor: &mut impl FnMut(gix_object::Kind, &[u8], &index::Entry, &dyn RawProgress) -> Result<(), E>,
+ processor: &mut impl FnMut(gix_object::Kind, &[u8], &index::Entry, &dyn Progress) -> Result<(), E>,
) -> Result<crate::data::decode::entry::Outcome, Error<E>>
where
C: crate::cache::DecodeEntry,
@@ -169,7 +165,8 @@ impl index::File {
.decode_entry(
pack_entry,
buf,
- |id, _| {
+ inflate,
+ &|id, _| {
self.lookup(id).map(|index| {
crate::data::decode::entry::ResolvedBase::InPack(pack.entry(self.pack_offset_at_index(index)))
})
@@ -205,8 +202,8 @@ fn process_entry<E>(
decompressed: &[u8],
index_entry: &index::Entry,
pack_entry_crc32: impl FnOnce() -> u32,
- progress: &dyn RawProgress,
- processor: &mut impl FnMut(gix_object::Kind, &[u8], &index::Entry, &dyn RawProgress) -> Result<(), E>,
+ progress: &dyn Progress,
+ processor: &mut impl FnMut(gix_object::Kind, &[u8], &index::Entry, &dyn Progress) -> Result<(), E>,
) -> Result<(), Error<E>>
where
E: std::error::Error + Send + Sync + 'static,
diff --git a/vendor/gix-pack/src/index/traverse/with_index.rs b/vendor/gix-pack/src/index/traverse/with_index.rs
index 884277c9d..91382034c 100644
--- a/vendor/gix-pack/src/index/traverse/with_index.rs
+++ b/vendor/gix-pack/src/index/traverse/with_index.rs
@@ -1,6 +1,7 @@
use std::sync::atomic::{AtomicBool, Ordering};
-use gix_features::{parallel, progress::Progress};
+use gix_features::parallel;
+use gix_features::progress::DynNestedProgress;
use super::Error;
use crate::{
@@ -56,31 +57,30 @@ impl index::File {
/// at the cost of memory.
///
/// For more details, see the documentation on the [`traverse()`][index::File::traverse()] method.
- pub fn traverse_with_index<P, Processor, E>(
+ pub fn traverse_with_index<Processor, E>(
&self,
pack: &crate::data::File,
mut processor: Processor,
- mut progress: P,
+ progress: &mut dyn DynNestedProgress,
should_interrupt: &AtomicBool,
Options { check, thread_limit }: Options,
- ) -> Result<Outcome<P>, Error<E>>
+ ) -> Result<Outcome, Error<E>>
where
- P: Progress,
- Processor: FnMut(gix_object::Kind, &[u8], &index::Entry, &dyn gix_features::progress::RawProgress) -> Result<(), E>
+ Processor: FnMut(gix_object::Kind, &[u8], &index::Entry, &dyn gix_features::progress::Progress) -> Result<(), E>
+ Send
+ Clone,
E: std::error::Error + Send + Sync + 'static,
{
let (verify_result, traversal_result) = parallel::join(
{
- let pack_progress = progress.add_child_with_id(
+ let mut pack_progress = progress.add_child_with_id(
format!(
"Hash of pack '{}'",
pack.path().file_name().expect("pack has filename").to_string_lossy()
),
ProgressId::HashPackDataBytes.into(),
);
- let index_progress = progress.add_child_with_id(
+ let mut index_progress = progress.add_child_with_id(
format!(
"Hash of index '{}'",
self.path.file_name().expect("index has filename").to_string_lossy()
@@ -88,7 +88,8 @@ impl index::File {
ProgressId::HashPackIndexBytes.into(),
);
move || {
- let res = self.possibly_verify(pack, check, pack_progress, index_progress, should_interrupt);
+ let res =
+ self.possibly_verify(pack, check, &mut pack_progress, &mut index_progress, should_interrupt);
if res.is_err() {
should_interrupt.store(true, Ordering::SeqCst);
}
@@ -98,14 +99,17 @@ impl index::File {
|| -> Result<_, Error<_>> {
let sorted_entries = index_entries_sorted_by_offset_ascending(
self,
- progress.add_child_with_id("collecting sorted index", ProgressId::CollectSortedIndexEntries.into()),
+ &mut progress.add_child_with_id(
+ "collecting sorted index".into(),
+ ProgressId::CollectSortedIndexEntries.into(),
+ ),
); /* Pack Traverse Collect sorted Entries */
let tree = crate::cache::delta::Tree::from_offsets_in_pack(
pack.path(),
sorted_entries.into_iter().map(Entry::from),
- |e| e.index_entry.pack_offset,
- |id| self.lookup(id).map(|idx| self.pack_offset_at_index(idx)),
- progress.add_child_with_id("indexing", ProgressId::TreeFromOffsetsObjects.into()),
+ &|e| e.index_entry.pack_offset,
+ &|id| self.lookup(id).map(|idx| self.pack_offset_at_index(idx)),
+ &mut progress.add_child_with_id("indexing".into(), ProgressId::TreeFromOffsetsObjects.into()),
should_interrupt,
self.object_hash,
)?;
@@ -153,8 +157,11 @@ impl index::File {
}
},
traverse::Options {
- object_progress: progress.add_child_with_id("Resolving", ProgressId::DecodedObjects.into()),
- size_progress: progress.add_child_with_id("Decoding", ProgressId::DecodedBytes.into()),
+ object_progress: Box::new(
+ progress.add_child_with_id("Resolving".into(), ProgressId::DecodedObjects.into()),
+ ),
+ size_progress:
+ &mut progress.add_child_with_id("Decoding".into(), ProgressId::DecodedBytes.into()),
thread_limit,
should_interrupt,
object_hash: self.object_hash,
@@ -167,7 +174,6 @@ impl index::File {
Ok(Outcome {
actual_index_checksum: verify_result?,
statistics: traversal_result?,
- progress,
})
}
}
diff --git a/vendor/gix-pack/src/index/traverse/with_lookup.rs b/vendor/gix-pack/src/index/traverse/with_lookup.rs
index 0165e4e01..3759dae5e 100644
--- a/vendor/gix-pack/src/index/traverse/with_lookup.rs
+++ b/vendor/gix-pack/src/index/traverse/with_lookup.rs
@@ -1,9 +1,11 @@
use std::sync::atomic::{AtomicBool, Ordering};
+use gix_features::progress::{Count, DynNestedProgress};
use gix_features::{
parallel::{self, in_parallel_if},
progress::{self, Progress},
threading::{lock, Mutable, OwnShared},
+ zlib,
};
use super::{Error, Reducer};
@@ -65,37 +67,34 @@ impl index::File {
/// waste while decoding objects.
///
/// For more details, see the documentation on the [`traverse()`][index::File::traverse()] method.
- pub fn traverse_with_lookup<P, C, Processor, E, F>(
+ pub fn traverse_with_lookup<C, Processor, E, F>(
&self,
mut processor: Processor,
pack: &data::File,
- mut progress: P,
+ progress: &mut dyn DynNestedProgress,
should_interrupt: &AtomicBool,
Options {
thread_limit,
check,
make_pack_lookup_cache,
}: Options<F>,
- ) -> Result<Outcome<P>, Error<E>>
+ ) -> Result<Outcome, Error<E>>
where
- P: Progress,
C: crate::cache::DecodeEntry,
E: std::error::Error + Send + Sync + 'static,
- Processor: FnMut(gix_object::Kind, &[u8], &index::Entry, &dyn gix_features::progress::RawProgress) -> Result<(), E>
- + Send
- + Clone,
+ Processor: FnMut(gix_object::Kind, &[u8], &index::Entry, &dyn Progress) -> Result<(), E> + Send + Clone,
F: Fn() -> C + Send + Clone,
{
let (verify_result, traversal_result) = parallel::join(
{
- let pack_progress = progress.add_child_with_id(
+ let mut pack_progress = progress.add_child_with_id(
format!(
"Hash of pack '{}'",
pack.path().file_name().expect("pack has filename").to_string_lossy()
),
ProgressId::HashPackDataBytes.into(),
);
- let index_progress = progress.add_child_with_id(
+ let mut index_progress = progress.add_child_with_id(
format!(
"Hash of index '{}'",
self.path.file_name().expect("index has filename").to_string_lossy()
@@ -103,7 +102,8 @@ impl index::File {
ProgressId::HashPackIndexBytes.into(),
);
move || {
- let res = self.possibly_verify(pack, check, pack_progress, index_progress, should_interrupt);
+ let res =
+ self.possibly_verify(pack, check, &mut pack_progress, &mut index_progress, should_interrupt);
if res.is_err() {
should_interrupt.store(true, Ordering::SeqCst);
}
@@ -113,7 +113,10 @@ impl index::File {
|| {
let index_entries = util::index_entries_sorted_by_offset_ascending(
self,
- progress.add_child_with_id("collecting sorted index", ProgressId::CollectSortedIndexEntries.into()),
+ &mut progress.add_child_with_id(
+ "collecting sorted index".into(),
+ ProgressId::CollectSortedIndexEntries.into(),
+ ),
);
let (chunk_size, thread_limit, available_cores) =
@@ -121,7 +124,7 @@ impl index::File {
let there_are_enough_entries_to_process = || index_entries.len() > chunk_size * available_cores;
let input_chunks = index_entries.chunks(chunk_size.max(chunk_size));
let reduce_progress = OwnShared::new(Mutable::new({
- let mut p = progress.add_child_with_id("Traversing", ProgressId::DecodedObjects.into());
+ let mut p = progress.add_child_with_id("Traversing".into(), ProgressId::DecodedObjects.into());
p.init(Some(self.num_objects() as usize), progress::count("objects"));
p
}));
@@ -131,6 +134,7 @@ impl index::File {
(
make_pack_lookup_cache(),
Vec::with_capacity(2048), // decode buffer
+ zlib::Inflate::default(),
lock(&reduce_progress)
.add_child_with_id(format!("thread {index}"), gix_features::progress::UNKNOWN), // per thread progress
)
@@ -143,7 +147,7 @@ impl index::File {
thread_limit,
state_per_thread,
move |entries: &[index::Entry],
- (cache, buf, progress)|
+ (cache, buf, inflate, progress)|
-> Result<Vec<data::decode::entry::Outcome>, Error<_>> {
progress.init(
Some(entries.len()),
@@ -157,6 +161,7 @@ impl index::File {
pack,
cache,
buf,
+ inflate,
progress,
index_entry,
&mut processor,
@@ -183,7 +188,6 @@ impl index::File {
Ok(Outcome {
actual_index_checksum: verify_result?,
statistics: traversal_result?,
- progress,
})
}
}
diff --git a/vendor/gix-pack/src/index/util.rs b/vendor/gix-pack/src/index/util.rs
index 284ee6158..2549429f9 100644
--- a/vendor/gix-pack/src/index/util.rs
+++ b/vendor/gix-pack/src/index/util.rs
@@ -1,10 +1,10 @@
-use std::{io, time::Instant};
+use std::time::Instant;
use gix_features::progress::{self, Progress};
pub(crate) fn index_entries_sorted_by_offset_ascending(
idx: &crate::index::File,
- mut progress: impl Progress,
+ progress: &mut dyn Progress,
) -> Vec<crate::index::Entry> {
progress.init(Some(idx.num_objects as usize), progress::count("entries"));
let start = Instant::now();
@@ -19,29 +19,3 @@ pub(crate) fn index_entries_sorted_by_offset_ascending(
progress.show_throughput(start);
v
}
-
-pub(crate) struct Count<W> {
- pub bytes: u64,
- pub inner: W,
-}
-
-impl<W> Count<W> {
- pub fn new(inner: W) -> Self {
- Count { bytes: 0, inner }
- }
-}
-
-impl<W> io::Write for Count<W>
-where
- W: io::Write,
-{
- fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
- let written = self.inner.write(buf)?;
- self.bytes += written as u64;
- Ok(written)
- }
-
- fn flush(&mut self) -> io::Result<()> {
- self.inner.flush()
- }
-}
diff --git a/vendor/gix-pack/src/index/verify.rs b/vendor/gix-pack/src/index/verify.rs
index 6af352ac9..d259a6a38 100644
--- a/vendor/gix-pack/src/index/verify.rs
+++ b/vendor/gix-pack/src/index/verify.rs
@@ -1,6 +1,6 @@
use std::sync::atomic::AtomicBool;
-use gix_features::progress::Progress;
+use gix_features::progress::{DynNestedProgress, Progress};
use gix_object::{bstr::ByteSlice, WriteTo};
use crate::index;
@@ -15,6 +15,8 @@ pub mod integrity {
#[derive(thiserror::Error, Debug)]
#[allow(missing_docs)]
pub enum Error {
+ #[error("Reserialization of an object failed")]
+ Io(#[from] std::io::Error),
#[error("The fan at index {index} is out of order as it's larger then the following value.")]
Fan { index: usize },
#[error("{kind} object {id} could not be decoded")]
@@ -33,13 +35,11 @@ pub mod integrity {
}
/// Returned by [`index::File::verify_integrity()`][crate::index::File::verify_integrity()].
- pub struct Outcome<P> {
+ pub struct Outcome {
/// The computed checksum of the index which matched the stored one.
pub actual_index_checksum: gix_hash::ObjectId,
/// The packs traversal outcome, if one was provided
pub pack_traverse_statistics: Option<crate::index::traverse::Statistics>,
- /// The provided progress instance.
- pub progress: P,
}
/// Additional options to define how the integrity should be verified.
@@ -136,7 +136,7 @@ impl index::File {
/// of this index file, and return it if it does.
pub fn verify_checksum(
&self,
- progress: impl Progress,
+ progress: &mut dyn Progress,
should_interrupt: &AtomicBool,
) -> Result<gix_hash::ObjectId, checksum::Error> {
crate::verify::checksum_on_disk_or_mmap(
@@ -166,14 +166,13 @@ impl index::File {
///
/// The given `progress` is inevitably consumed if there is an error, which is a tradeoff chosen to easily allow using `?` in the
/// error case.
- pub fn verify_integrity<P, C, F>(
+ pub fn verify_integrity<C, F>(
&self,
pack: Option<PackContext<'_, F>>,
- mut progress: P,
+ progress: &mut dyn DynNestedProgress,
should_interrupt: &AtomicBool,
- ) -> Result<integrity::Outcome<P>, index::traverse::Error<index::verify::integrity::Error>>
+ ) -> Result<integrity::Outcome, index::traverse::Error<index::verify::integrity::Error>>
where
- P: Progress,
C: crate::cache::DecodeEntry,
F: Fn() -> C + Send + Clone,
{
@@ -214,18 +213,17 @@ impl index::File {
.map(|o| integrity::Outcome {
actual_index_checksum: o.actual_index_checksum,
pack_traverse_statistics: Some(o.statistics),
- progress: o.progress,
}),
None => self
.verify_checksum(
- progress.add_child_with_id("Sha1 of index", integrity::ProgressId::ChecksumBytes.into()),
+ &mut progress
+ .add_child_with_id("Sha1 of index".into(), integrity::ProgressId::ChecksumBytes.into()),
should_interrupt,
)
.map_err(Into::into)
.map(|id| integrity::Outcome {
actual_index_checksum: id,
pack_traverse_statistics: None,
- progress,
}),
}
}
@@ -237,7 +235,7 @@ impl index::File {
object_kind: gix_object::Kind,
buf: &[u8],
index_entry: &index::Entry,
- progress: &dyn gix_features::progress::RawProgress,
+ progress: &dyn gix_features::progress::Progress,
) -> Result<(), integrity::Error> {
if let Mode::HashCrc32Decode | Mode::HashCrc32DecodeEncode = verify_mode {
use gix_object::Kind::*;
@@ -252,9 +250,7 @@ impl index::File {
})?;
if let Mode::HashCrc32DecodeEncode = verify_mode {
encode_buf.clear();
- object
- .write_to(&mut *encode_buf)
- .expect("writing to a memory buffer never fails");
+ object.write_to(&mut *encode_buf)?;
if encode_buf.as_slice() != buf {
let mut should_return_error = true;
if let Tree = object_kind {
diff --git a/vendor/gix-pack/src/index/write/encode.rs b/vendor/gix-pack/src/index/write/encode.rs
deleted file mode 100644
index f1195875c..000000000
--- a/vendor/gix-pack/src/index/write/encode.rs
+++ /dev/null
@@ -1,124 +0,0 @@
-use std::{cmp::Ordering, io};
-
-pub(crate) const LARGE_OFFSET_THRESHOLD: u64 = 0x7fff_ffff;
-pub(crate) const HIGH_BIT: u32 = 0x8000_0000;
-
-use gix_features::{
- hash,
- progress::{self, Progress},
-};
-
-use crate::index::{util::Count, V2_SIGNATURE};
-
-pub(crate) fn write_to(
- out: impl io::Write,
- entries_sorted_by_oid: Vec<crate::cache::delta::Item<crate::index::write::TreeEntry>>,
- pack_hash: &gix_hash::ObjectId,
- kind: crate::index::Version,
- mut progress: impl Progress,
-) -> io::Result<gix_hash::ObjectId> {
- use io::Write;
- assert_eq!(kind, crate::index::Version::V2, "Can only write V2 packs right now");
- assert!(
- entries_sorted_by_oid.len() <= u32::MAX as usize,
- "a pack cannot have more than u32::MAX objects"
- );
-
- // Write header
- let mut out = Count::new(std::io::BufWriter::with_capacity(
- 8 * 4096,
- hash::Write::new(out, kind.hash()),
- ));
- out.write_all(V2_SIGNATURE)?;
- out.write_all(&(kind as u32).to_be_bytes())?;
-
- progress.init(Some(4), progress::steps());
- let start = std::time::Instant::now();
- let _info = progress.add_child_with_id("writing fan-out table", gix_features::progress::UNKNOWN);
- let fan_out = fanout(entries_sorted_by_oid.iter().map(|e| e.data.id.first_byte()));
-
- for value in fan_out.iter() {
- out.write_all(&value.to_be_bytes())?;
- }
-
- progress.inc();
- let _info = progress.add_child_with_id("writing ids", gix_features::progress::UNKNOWN);
- for entry in &entries_sorted_by_oid {
- out.write_all(entry.data.id.as_slice())?;
- }
-
- progress.inc();
- let _info = progress.add_child_with_id("writing crc32", gix_features::progress::UNKNOWN);
- for entry in &entries_sorted_by_oid {
- out.write_all(&entry.data.crc32.to_be_bytes())?;
- }
-
- progress.inc();
- let _info = progress.add_child_with_id("writing offsets", gix_features::progress::UNKNOWN);
- {
- let mut offsets64 = Vec::<u64>::new();
- for entry in &entries_sorted_by_oid {
- let offset: u32 = if entry.offset > LARGE_OFFSET_THRESHOLD {
- assert!(
- offsets64.len() < LARGE_OFFSET_THRESHOLD as usize,
- "Encoding breakdown - way too many 64bit offsets"
- );
- offsets64.push(entry.offset);
- ((offsets64.len() - 1) as u32) | HIGH_BIT
- } else {
- entry.offset as u32
- };
- out.write_all(&offset.to_be_bytes())?;
- }
- for value in offsets64 {
- out.write_all(&value.to_be_bytes())?;
- }
- }
-
- out.write_all(pack_hash.as_slice())?;
-
- let bytes_written_without_trailer = out.bytes;
- let mut out = out.inner.into_inner()?;
- let index_hash: gix_hash::ObjectId = out.hash.digest().into();
- out.inner.write_all(index_hash.as_slice())?;
- out.inner.flush()?;
-
- progress.inc();
- progress.show_throughput_with(
- start,
- (bytes_written_without_trailer + 20) as usize,
- progress::bytes().expect("unit always set"),
- progress::MessageLevel::Success,
- );
-
- Ok(index_hash)
-}
-
-pub(crate) fn fanout(iter: impl ExactSizeIterator<Item = u8>) -> [u32; 256] {
- let mut fan_out = [0u32; 256];
- let entries_len = iter.len() as u32;
- let mut iter = iter.enumerate();
- let mut idx_and_entry = iter.next();
- let mut upper_bound = 0;
-
- for (offset_be, byte) in fan_out.iter_mut().zip(0u8..=255) {
- *offset_be = match idx_and_entry.as_ref() {
- Some((_idx, first_byte)) => match first_byte.cmp(&byte) {
- Ordering::Less => unreachable!("ids should be ordered, and we make sure to keep ahead with them"),
- Ordering::Greater => upper_bound,
- Ordering::Equal => {
- if byte == 255 {
- entries_len
- } else {
- idx_and_entry = iter.find(|(_, first_byte)| *first_byte != byte);
- upper_bound = idx_and_entry.as_ref().map_or(entries_len, |(idx, _)| *idx as u32);
- upper_bound
- }
- }
- },
- None => entries_len,
- };
- }
-
- fan_out
-}
diff --git a/vendor/gix-pack/src/index/write/mod.rs b/vendor/gix-pack/src/index/write/mod.rs
index 72a076a85..d1402fa86 100644
--- a/vendor/gix-pack/src/index/write/mod.rs
+++ b/vendor/gix-pack/src/index/write/mod.rs
@@ -1,11 +1,11 @@
use std::{convert::TryInto, io, sync::atomic::AtomicBool};
pub use error::Error;
-use gix_features::progress::{self, Progress};
+use gix_features::progress::prodash::DynNestedProgress;
+use gix_features::progress::{self, Count, Progress};
use crate::cache::delta::{traverse, Tree};
-pub(crate) mod encode;
mod error;
pub(crate) struct TreeEntry {
@@ -83,13 +83,13 @@ impl crate::index::File {
/// It should return `None` if the entry cannot be resolved from the pack that produced the `entries` iterator, causing
/// the write operation to fail.
#[allow(clippy::too_many_arguments)]
- pub fn write_data_iter_to_stream<F, F2, R, P>(
+ pub fn write_data_iter_to_stream<F, F2, R>(
version: crate::index::Version,
make_resolver: F,
- entries: impl Iterator<Item = Result<crate::data::input::Entry, crate::data::input::Error>>,
+ entries: &mut dyn Iterator<Item = Result<crate::data::input::Entry, crate::data::input::Error>>,
thread_limit: Option<usize>,
- mut root_progress: P,
- out: impl io::Write,
+ root_progress: &mut dyn DynNestedProgress,
+ out: &mut dyn io::Write,
should_interrupt: &AtomicBool,
object_hash: gix_hash::Kind,
pack_version: crate::data::Version,
@@ -98,7 +98,6 @@ impl crate::index::File {
F: FnOnce() -> io::Result<(F2, R)>,
R: Send + Sync,
F2: for<'r> Fn(crate::data::EntryRange, &'r R) -> Option<&'r [u8]> + Send + Clone,
- P: Progress,
{
if version != crate::index::Version::default() {
return Err(Error::Unsupported(version));
@@ -111,10 +110,10 @@ impl crate::index::File {
let indexing_start = std::time::Instant::now();
root_progress.init(Some(4), progress::steps());
- let mut objects_progress = root_progress.add_child_with_id("indexing", ProgressId::IndexObjects.into());
+ let mut objects_progress = root_progress.add_child_with_id("indexing".into(), ProgressId::IndexObjects.into());
objects_progress.init(Some(anticipated_num_objects), progress::count("objects"));
let mut decompressed_progress =
- root_progress.add_child_with_id("decompressing", ProgressId::DecompressedBytes.into());
+ root_progress.add_child_with_id("decompressing".into(), ProgressId::DecompressedBytes.into());
decompressed_progress.init(None, progress::bytes());
let mut pack_entries_end: u64 = 0;
@@ -199,8 +198,11 @@ impl crate::index::File {
Ok::<_, Error>(())
},
traverse::Options {
- object_progress: root_progress.add_child_with_id("Resolving", ProgressId::ResolveObjects.into()),
- size_progress: root_progress.add_child_with_id("Decoding", ProgressId::DecodedBytes.into()),
+ object_progress: Box::new(
+ root_progress.add_child_with_id("Resolving".into(), ProgressId::ResolveObjects.into()),
+ ),
+ size_progress: &mut root_progress
+ .add_child_with_id("Decoding".into(), ProgressId::DecodedBytes.into()),
thread_limit,
should_interrupt,
object_hash,
@@ -211,7 +213,8 @@ impl crate::index::File {
let mut items = roots;
items.extend(children);
{
- let _progress = root_progress.add_child_with_id("sorting by id", gix_features::progress::UNKNOWN);
+ let _progress =
+ root_progress.add_child_with_id("sorting by id".into(), gix_features::progress::UNKNOWN);
items.sort_by_key(|e| e.data.id);
}
@@ -229,12 +232,12 @@ impl crate::index::File {
}
None => return Err(Error::IteratorInvariantTrailer),
};
- let index_hash = encode::write_to(
+ let index_hash = crate::index::encode::write_to(
out,
sorted_pack_offsets_by_oid,
&pack_hash,
version,
- root_progress.add_child_with_id("writing index file", ProgressId::IndexBytesWritten.into()),
+ &mut root_progress.add_child_with_id("writing index file".into(), ProgressId::IndexBytesWritten.into()),
)?;
root_progress.show_throughput_with(
indexing_start,
diff --git a/vendor/gix-pack/src/multi_index/access.rs b/vendor/gix-pack/src/multi_index/access.rs
index 488f996d3..0150c7206 100644
--- a/vendor/gix-pack/src/multi_index/access.rs
+++ b/vendor/gix-pack/src/multi_index/access.rs
@@ -89,7 +89,7 @@ impl File {
prefix,
candidates,
&self.fan,
- |idx| self.oid_at_index(idx),
+ &|idx| self.oid_at_index(idx),
self.num_objects,
)
}
@@ -98,7 +98,7 @@ impl File {
///
/// Use this index for finding additional information via [`File::pack_id_and_pack_offset_at_index()`].
pub fn lookup(&self, id: impl AsRef<gix_hash::oid>) -> Option<EntryIndex> {
- crate::index::access::lookup(id, &self.fan, |idx| self.oid_at_index(idx))
+ crate::index::access::lookup(id.as_ref(), &self.fan, &|idx| self.oid_at_index(idx))
}
/// Given the `index` ranging from 0 to [File::num_objects()], return the pack index and its absolute offset into the pack.
diff --git a/vendor/gix-pack/src/multi_index/chunk.rs b/vendor/gix-pack/src/multi_index/chunk.rs
index 48a003ca0..86e43714d 100644
--- a/vendor/gix-pack/src/multi_index/chunk.rs
+++ b/vendor/gix-pack/src/multi_index/chunk.rs
@@ -82,7 +82,7 @@ pub mod index_names {
/// Write all `paths` in order to `out`, including padding.
pub fn write(
paths: impl IntoIterator<Item = impl AsRef<Path>>,
- mut out: impl std::io::Write,
+ out: &mut dyn std::io::Write,
) -> std::io::Result<()> {
let mut written_bytes = 0;
for path in paths {
@@ -130,9 +130,9 @@ pub mod fanout {
/// Write the fanout for the given entries, which must be sorted by oid
pub(crate) fn write(
sorted_entries: &[multi_index::write::Entry],
- mut out: impl std::io::Write,
+ out: &mut dyn std::io::Write,
) -> std::io::Result<()> {
- let fanout = crate::index::write::encode::fanout(sorted_entries.iter().map(|e| e.id.first_byte()));
+ let fanout = crate::index::encode::fanout(&mut sorted_entries.iter().map(|e| e.id.first_byte()));
for value in fanout.iter() {
out.write_all(&value.to_be_bytes())?;
@@ -157,7 +157,7 @@ pub mod lookup {
pub(crate) fn write(
sorted_entries: &[multi_index::write::Entry],
- mut out: impl std::io::Write,
+ out: &mut dyn std::io::Write,
) -> std::io::Result<()> {
for entry in sorted_entries {
out.write_all(entry.id.as_slice())?;
@@ -188,9 +188,9 @@ pub mod offsets {
pub(crate) fn write(
sorted_entries: &[multi_index::write::Entry],
large_offsets_needed: bool,
- mut out: impl std::io::Write,
+ out: &mut dyn std::io::Write,
) -> std::io::Result<()> {
- use crate::index::write::encode::{HIGH_BIT, LARGE_OFFSET_THRESHOLD};
+ use crate::index::encode::{HIGH_BIT, LARGE_OFFSET_THRESHOLD};
let mut num_large_offsets = 0u32;
for entry in sorted_entries {
@@ -226,7 +226,7 @@ pub mod offsets {
pub mod large_offsets {
use std::ops::Range;
- use crate::{index::write::encode::LARGE_OFFSET_THRESHOLD, multi_index};
+ use crate::{index::encode::LARGE_OFFSET_THRESHOLD, multi_index};
/// The id uniquely identifying the large offsets table (with 64 bit offsets)
pub const ID: gix_chunk::Id = *b"LOFF";
@@ -254,7 +254,7 @@ pub mod large_offsets {
pub(crate) fn write(
sorted_entries: &[multi_index::write::Entry],
mut num_large_offsets: usize,
- mut out: impl std::io::Write,
+ out: &mut dyn std::io::Write,
) -> std::io::Result<()> {
for offset in sorted_entries
.iter()
diff --git a/vendor/gix-pack/src/multi_index/verify.rs b/vendor/gix-pack/src/multi_index/verify.rs
index 856a48501..0903b3568 100644
--- a/vendor/gix-pack/src/multi_index/verify.rs
+++ b/vendor/gix-pack/src/multi_index/verify.rs
@@ -1,6 +1,6 @@
use std::{cmp::Ordering, sync::atomic::AtomicBool, time::Instant};
-use gix_features::progress::Progress;
+use gix_features::progress::{Count, DynNestedProgress, Progress};
use crate::{index, multi_index::File};
@@ -39,13 +39,11 @@ pub mod integrity {
}
/// Returned by [`multi_index::File::verify_integrity()`][crate::multi_index::File::verify_integrity()].
- pub struct Outcome<P> {
+ pub struct Outcome {
/// The computed checksum of the multi-index which matched the stored one.
pub actual_index_checksum: gix_hash::ObjectId,
/// The for each entry in [`index_names()`][super::File::index_names()] provide the corresponding pack traversal outcome.
pub pack_traverse_statistics: Vec<crate::index::traverse::Statistics>,
- /// The provided progress instance.
- pub progress: P,
}
/// The progress ids used in [`multi_index::File::verify_integrity()`][crate::multi_index::File::verify_integrity()].
@@ -80,7 +78,7 @@ impl File {
/// of this index file, and return it if it does.
pub fn verify_checksum(
&self,
- progress: impl Progress,
+ progress: &mut dyn Progress,
should_interrupt: &AtomicBool,
) -> Result<gix_hash::ObjectId, checksum::Error> {
crate::verify::checksum_on_disk_or_mmap(
@@ -96,14 +94,11 @@ impl File {
/// Similar to [`verify_integrity()`][File::verify_integrity()] but without any deep inspection of objects.
///
/// Instead we only validate the contents of the multi-index itself.
- pub fn verify_integrity_fast<P>(
+ pub fn verify_integrity_fast(
&self,
- progress: P,
+ progress: &mut dyn DynNestedProgress,
should_interrupt: &AtomicBool,
- ) -> Result<(gix_hash::ObjectId, P), integrity::Error>
- where
- P: Progress,
- {
+ ) -> Result<gix_hash::ObjectId, integrity::Error> {
self.verify_integrity_inner(
progress,
should_interrupt,
@@ -114,35 +109,33 @@ impl File {
index::traverse::Error::Processor(err) => err,
_ => unreachable!("BUG: no other error type is possible"),
})
- .map(|o| (o.actual_index_checksum, o.progress))
+ .map(|o| o.actual_index_checksum)
}
/// Similar to [`crate::Bundle::verify_integrity()`] but checks all contained indices and their packs.
///
/// Note that it's considered a failure if an index doesn't have a corresponding pack.
- pub fn verify_integrity<C, P, F>(
+ pub fn verify_integrity<C, F>(
&self,
- progress: P,
+ progress: &mut dyn DynNestedProgress,
should_interrupt: &AtomicBool,
options: index::verify::integrity::Options<F>,
- ) -> Result<integrity::Outcome<P>, index::traverse::Error<integrity::Error>>
+ ) -> Result<integrity::Outcome, index::traverse::Error<integrity::Error>>
where
- P: Progress,
C: crate::cache::DecodeEntry,
F: Fn() -> C + Send + Clone,
{
self.verify_integrity_inner(progress, should_interrupt, true, options)
}
- fn verify_integrity_inner<C, P, F>(
+ fn verify_integrity_inner<C, F>(
&self,
- mut progress: P,
+ progress: &mut dyn DynNestedProgress,
should_interrupt: &AtomicBool,
deep_check: bool,
options: index::verify::integrity::Options<F>,
- ) -> Result<integrity::Outcome<P>, index::traverse::Error<integrity::Error>>
+ ) -> Result<integrity::Outcome, index::traverse::Error<integrity::Error>>
where
- P: Progress,
C: crate::cache::DecodeEntry,
F: Fn() -> C + Send + Clone,
{
@@ -150,7 +143,7 @@ impl File {
let actual_index_checksum = self
.verify_checksum(
- progress.add_child_with_id(
+ &mut progress.add_child_with_id(
format!("{}: checksum", self.path.display()),
integrity::ProgressId::ChecksumBytes.into(),
),
@@ -176,7 +169,7 @@ impl File {
let mut pack_ids_and_offsets = Vec::with_capacity(self.num_objects as usize);
{
let order_start = Instant::now();
- let mut progress = progress.add_child_with_id("checking oid order", gix_features::progress::UNKNOWN);
+ let mut progress = progress.add_child_with_id("checking oid order".into(), gix_features::progress::UNKNOWN);
progress.init(
Some(self.num_objects as usize),
gix_features::progress::count("objects"),
@@ -238,8 +231,10 @@ impl File {
let multi_index_entries_to_check = &pack_ids_slice[..slice_end];
{
let offset_start = Instant::now();
- let mut offsets_progress =
- progress.add_child_with_id("verify object offsets", integrity::ProgressId::ObjectOffsets.into());
+ let mut offsets_progress = progress.add_child_with_id(
+ "verify object offsets".into(),
+ integrity::ProgressId::ObjectOffsets.into(),
+ );
offsets_progress.init(
Some(pack_ids_and_offsets.len()),
gix_features::progress::count("objects"),
@@ -278,7 +273,6 @@ impl File {
let crate::bundle::verify::integrity::Outcome {
actual_index_checksum: _,
pack_traverse_outcome,
- progress: returned_progress,
} = bundle
.verify_integrity(progress, should_interrupt, options.clone())
.map_err(|err| {
@@ -315,7 +309,6 @@ impl File {
Interrupted => Interrupted,
}
})?;
- progress = returned_progress;
pack_traverse_statistics.push(pack_traverse_outcome);
}
}
@@ -325,13 +318,12 @@ impl File {
"BUG: our slicing should allow to visit all objects"
);
- progress.set_name("Validating multi-pack");
+ progress.set_name("Validating multi-pack".into());
progress.show_throughput(operation_start);
Ok(integrity::Outcome {
actual_index_checksum,
pack_traverse_statistics,
- progress,
})
}
}
diff --git a/vendor/gix-pack/src/multi_index/write.rs b/vendor/gix-pack/src/multi_index/write.rs
index 9002af9eb..881033091 100644
--- a/vendor/gix-pack/src/multi_index/write.rs
+++ b/vendor/gix-pack/src/multi_index/write.rs
@@ -5,7 +5,7 @@ use std::{
time::{Instant, SystemTime},
};
-use gix_features::progress::Progress;
+use gix_features::progress::{Count, DynNestedProgress, Progress};
use crate::multi_index;
@@ -40,11 +40,9 @@ pub struct Options {
}
/// The result of [`multi_index::File::write_from_index_paths()`].
-pub struct Outcome<P> {
+pub struct Outcome {
/// The calculated multi-index checksum of the file at `multi_index_path`.
pub multi_index_checksum: gix_hash::ObjectId,
- /// The input progress
- pub progress: P,
}
/// The progress ids used in [`write_from_index_paths()`][multi_index::File::write_from_index_paths()].
@@ -79,16 +77,13 @@ impl multi_index::File {
/// Create a new multi-index file for writing to `out` from the pack index files at `index_paths`.
///
/// Progress is sent to `progress` and interruptions checked via `should_interrupt`.
- pub fn write_from_index_paths<P>(
+ pub fn write_from_index_paths(
mut index_paths: Vec<PathBuf>,
- out: impl std::io::Write,
- mut progress: P,
+ out: &mut dyn std::io::Write,
+ progress: &mut dyn DynNestedProgress,
should_interrupt: &AtomicBool,
Options { object_hash }: Options,
- ) -> Result<Outcome<P>, Error>
- where
- P: Progress,
- {
+ ) -> Result<Outcome, Error> {
let out = gix_features::hash::Write::new(out, object_hash);
let (index_paths_sorted, index_filenames_sorted) = {
index_paths.sort();
@@ -102,8 +97,10 @@ impl multi_index::File {
let entries = {
let mut entries = Vec::new();
let start = Instant::now();
- let mut progress =
- progress.add_child_with_id("Collecting entries", ProgressId::FromPathsCollectingEntries.into());
+ let mut progress = progress.add_child_with_id(
+ "Collecting entries".into(),
+ ProgressId::FromPathsCollectingEntries.into(),
+ );
progress.init(Some(index_paths_sorted.len()), gix_features::progress::count("indices"));
// This could be parallelized… but it's probably not worth it unless you have 500mio objects.
@@ -129,7 +126,7 @@ impl multi_index::File {
progress.show_throughput(start);
let start = Instant::now();
- progress.set_name("Deduplicate");
+ progress.set_name("Deduplicate".into());
progress.init(Some(entries.len()), gix_features::progress::count("entries"));
entries.sort_by(|l, r| {
l.id.cmp(&r.id)
@@ -168,7 +165,8 @@ impl multi_index::File {
);
}
- let mut write_progress = progress.add_child_with_id("Writing multi-index", ProgressId::BytesWritten.into());
+ let mut write_progress =
+ progress.add_child_with_id("Writing multi-index".into(), ProgressId::BytesWritten.into());
let write_start = Instant::now();
write_progress.init(
Some(cf.planned_storage_size() as usize + Self::HEADER_LEN),
@@ -187,7 +185,7 @@ impl multi_index::File {
)?;
{
- progress.set_name("Writing chunks");
+ progress.set_name("Writing chunks".into());
progress.init(Some(cf.num_chunks()), gix_features::progress::count("chunks"));
let mut chunk_write = cf.into_write(&mut out, bytes_written)?;
@@ -220,14 +218,11 @@ impl multi_index::File {
out.inner.inner.write_all(multi_index_checksum.as_slice())?;
out.progress.show_throughput(write_start);
- Ok(Outcome {
- multi_index_checksum,
- progress,
- })
+ Ok(Outcome { multi_index_checksum })
}
fn write_header(
- mut out: impl std::io::Write,
+ out: &mut dyn std::io::Write,
num_chunks: u8,
num_indices: u32,
object_hash: gix_hash::Kind,
diff --git a/vendor/gix-pack/src/verify.rs b/vendor/gix-pack/src/verify.rs
index f985c8657..d502ada38 100644
--- a/vendor/gix-pack/src/verify.rs
+++ b/vendor/gix-pack/src/verify.rs
@@ -33,7 +33,7 @@ pub fn checksum_on_disk_or_mmap(
data: &[u8],
expected: gix_hash::ObjectId,
object_hash: gix_hash::Kind,
- mut progress: impl Progress,
+ progress: &mut dyn Progress,
should_interrupt: &AtomicBool,
) -> Result<gix_hash::ObjectId, checksum::Error> {
let data_len_without_trailer = data.len() - object_hash.len_in_bytes();
@@ -41,7 +41,7 @@ pub fn checksum_on_disk_or_mmap(
data_path,
data_len_without_trailer,
object_hash,
- &mut progress,
+ progress,
should_interrupt,
) {
Ok(id) => id,